From 06fbc29eaaf23d5377e5f846ebf29d412e7c36bb Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Fri, 30 Jan 2026 11:33:08 -0800 Subject: [PATCH 01/72] v0 --- apps/sim/app/api/copilot/chat/route.ts | 439 ++------- apps/sim/app/api/v1/copilot/chat/route.ts | 81 ++ .../components/tool-call/tool-call.tsx | 32 + apps/sim/lib/copilot/orchestrator/config.ts | 23 + apps/sim/lib/copilot/orchestrator/index.ts | 181 ++++ .../lib/copilot/orchestrator/persistence.ts | 138 +++ .../lib/copilot/orchestrator/sse-handlers.ts | 342 +++++++ .../lib/copilot/orchestrator/sse-parser.ts | 72 ++ .../lib/copilot/orchestrator/tool-executor.ts | 239 +++++ apps/sim/lib/copilot/orchestrator/types.ts | 127 +++ apps/sim/stores/panel/copilot/store.ts | 20 + docs/COPILOT_SERVER_REFACTOR.md | 927 ++++++++++++++++++ 12 files changed, 2243 insertions(+), 378 deletions(-) create mode 100644 apps/sim/app/api/v1/copilot/chat/route.ts create mode 100644 apps/sim/lib/copilot/orchestrator/config.ts create mode 100644 apps/sim/lib/copilot/orchestrator/index.ts create mode 100644 apps/sim/lib/copilot/orchestrator/persistence.ts create mode 100644 apps/sim/lib/copilot/orchestrator/sse-handlers.ts create mode 100644 apps/sim/lib/copilot/orchestrator/sse-parser.ts create mode 100644 apps/sim/lib/copilot/orchestrator/tool-executor.ts create mode 100644 apps/sim/lib/copilot/orchestrator/types.ts create mode 100644 docs/COPILOT_SERVER_REFACTOR.md diff --git a/apps/sim/app/api/copilot/chat/route.ts b/apps/sim/app/api/copilot/chat/route.ts index 72c959d9a4..b6fdcb9a6b 100644 --- a/apps/sim/app/api/copilot/chat/route.ts +++ b/apps/sim/app/api/copilot/chat/route.ts @@ -7,7 +7,7 @@ import { z } from 'zod' import { getSession } from '@/lib/auth' import { generateChatTitle } from '@/lib/copilot/chat-title' import { getCopilotModel } from '@/lib/copilot/config' -import { SIM_AGENT_API_URL_DEFAULT, SIM_AGENT_VERSION } from '@/lib/copilot/constants' +import { SIM_AGENT_VERSION } from '@/lib/copilot/constants' import { COPILOT_MODEL_IDS, COPILOT_REQUEST_MODES } from '@/lib/copilot/models' import { authenticateCopilotRequestSessionOnly, @@ -23,10 +23,10 @@ import { CopilotFiles } from '@/lib/uploads' import { createFileContent } from '@/lib/uploads/utils/file-utils' import { tools } from '@/tools/registry' import { getLatestVersionTools, stripVersionSuffix } from '@/tools/utils' +import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator' const logger = createLogger('CopilotChatAPI') -const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT const FileAttachmentSchema = z.object({ id: z.string(), @@ -473,77 +473,19 @@ export async function POST(req: NextRequest) { }) } catch {} - const simAgentResponse = await fetch(`${SIM_AGENT_API_URL}/api/chat-completion-streaming`, { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - ...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}), - }, - body: JSON.stringify(requestPayload), - }) - - if (!simAgentResponse.ok) { - if (simAgentResponse.status === 401 || simAgentResponse.status === 402) { - // Rethrow status only; client will render appropriate assistant message - return new NextResponse(null, { status: simAgentResponse.status }) - } - - const errorText = await simAgentResponse.text().catch(() => '') - logger.error(`[${tracker.requestId}] Sim agent API error:`, { - status: simAgentResponse.status, - error: errorText, - }) - - return NextResponse.json( - { error: `Sim agent API error: ${simAgentResponse.statusText}` }, - { status: simAgentResponse.status } - ) - } - - // If streaming is requested, forward the stream and update chat later - if (stream && simAgentResponse.body) { - // Create user message to save - const userMessage = { - id: userMessageIdToUse, // Consistent ID used for request and persistence - role: 'user', - content: message, - timestamp: new Date().toISOString(), - ...(fileAttachments && fileAttachments.length > 0 && { fileAttachments }), - ...(Array.isArray(contexts) && contexts.length > 0 && { contexts }), - ...(Array.isArray(contexts) && - contexts.length > 0 && { - contentBlocks: [{ type: 'contexts', contexts: contexts as any, timestamp: Date.now() }], - }), - } - - // Create a pass-through stream that captures the response + if (stream) { const transformedStream = new ReadableStream({ async start(controller) { const encoder = new TextEncoder() - let assistantContent = '' - const toolCalls: any[] = [] - let buffer = '' - const isFirstDone = true - let responseIdFromStart: string | undefined - let responseIdFromDone: string | undefined - // Track tool call progress to identify a safe done event - const announcedToolCallIds = new Set() - const startedToolExecutionIds = new Set() - const completedToolExecutionIds = new Set() - let lastDoneResponseId: string | undefined - let lastSafeDoneResponseId: string | undefined - - // Send chatId as first event + if (actualChatId) { - const chatIdEvent = `data: ${JSON.stringify({ - type: 'chat_id', - chatId: actualChatId, - })}\n\n` - controller.enqueue(encoder.encode(chatIdEvent)) - logger.debug(`[${tracker.requestId}] Sent initial chatId event to client`) + controller.enqueue( + encoder.encode( + `data: ${JSON.stringify({ type: 'chat_id', chatId: actualChatId })}\n\n` + ) + ) } - // Start title generation in parallel if needed if (actualChatId && !currentChat?.title && conversationHistory.length === 0) { generateChatTitle(message) .then(async (title) => { @@ -555,311 +497,61 @@ export async function POST(req: NextRequest) { updatedAt: new Date(), }) .where(eq(copilotChats.id, actualChatId!)) - - const titleEvent = `data: ${JSON.stringify({ - type: 'title_updated', - title: title, - })}\n\n` - controller.enqueue(encoder.encode(titleEvent)) - logger.info(`[${tracker.requestId}] Generated and saved title: ${title}`) + controller.enqueue( + encoder.encode(`data: ${JSON.stringify({ type: 'title_updated', title })}\n\n`) + ) } }) .catch((error) => { logger.error(`[${tracker.requestId}] Title generation failed:`, error) }) - } else { - logger.debug(`[${tracker.requestId}] Skipping title generation`) } - // Forward the sim agent stream and capture assistant response - const reader = simAgentResponse.body!.getReader() - const decoder = new TextDecoder() - try { - while (true) { - const { done, value } = await reader.read() - if (done) { - break - } - - // Decode and parse SSE events for logging and capturing content - const decodedChunk = decoder.decode(value, { stream: true }) - buffer += decodedChunk - - const lines = buffer.split('\n') - buffer = lines.pop() || '' // Keep incomplete line in buffer - - for (const line of lines) { - if (line.trim() === '') continue // Skip empty lines - - if (line.startsWith('data: ') && line.length > 6) { - try { - const jsonStr = line.slice(6) - - // Check if the JSON string is unusually large (potential streaming issue) - if (jsonStr.length > 50000) { - // 50KB limit - logger.warn(`[${tracker.requestId}] Large SSE event detected`, { - size: jsonStr.length, - preview: `${jsonStr.substring(0, 100)}...`, - }) - } - - const event = JSON.parse(jsonStr) - - // Log different event types comprehensively - switch (event.type) { - case 'content': - if (event.data) { - assistantContent += event.data - } - break - - case 'reasoning': - logger.debug( - `[${tracker.requestId}] Reasoning chunk received (${(event.data || event.content || '').length} chars)` - ) - break - - case 'tool_call': - if (!event.data?.partial) { - toolCalls.push(event.data) - if (event.data?.id) { - announcedToolCallIds.add(event.data.id) - } - } - break - - case 'tool_generating': - if (event.toolCallId) { - startedToolExecutionIds.add(event.toolCallId) - } - break - - case 'tool_result': - if (event.toolCallId) { - completedToolExecutionIds.add(event.toolCallId) - } - break - - case 'tool_error': - logger.error(`[${tracker.requestId}] Tool error:`, { - toolCallId: event.toolCallId, - toolName: event.toolName, - error: event.error, - success: event.success, - }) - if (event.toolCallId) { - completedToolExecutionIds.add(event.toolCallId) - } - break - - case 'start': - if (event.data?.responseId) { - responseIdFromStart = event.data.responseId - } - break - - case 'done': - if (event.data?.responseId) { - responseIdFromDone = event.data.responseId - lastDoneResponseId = responseIdFromDone - - // Mark this done as safe only if no tool call is currently in progress or pending - const announced = announcedToolCallIds.size - const completed = completedToolExecutionIds.size - const started = startedToolExecutionIds.size - const hasToolInProgress = announced > completed || started > completed - if (!hasToolInProgress) { - lastSafeDoneResponseId = responseIdFromDone - } - } - break - - case 'error': - break - - default: - } - - // Emit to client: rewrite 'error' events into user-friendly assistant message - if (event?.type === 'error') { - try { - const displayMessage: string = - (event?.data && (event.data.displayMessage as string)) || - 'Sorry, I encountered an error. Please try again.' - const formatted = `_${displayMessage}_` - // Accumulate so it persists to DB as assistant content - assistantContent += formatted - // Send as content chunk - try { - controller.enqueue( - encoder.encode( - `data: ${JSON.stringify({ type: 'content', data: formatted })}\n\n` - ) - ) - } catch (enqueueErr) { - reader.cancel() - break - } - // Then close this response cleanly for the client - try { - controller.enqueue( - encoder.encode(`data: ${JSON.stringify({ type: 'done' })}\n\n`) - ) - } catch (enqueueErr) { - reader.cancel() - break - } - } catch {} - // Do not forward the original error event - } else { - // Forward original event to client - try { - controller.enqueue(encoder.encode(`data: ${jsonStr}\n\n`)) - } catch (enqueueErr) { - reader.cancel() - break - } - } - } catch (e) { - // Enhanced error handling for large payloads and parsing issues - const lineLength = line.length - const isLargePayload = lineLength > 10000 - - if (isLargePayload) { - logger.error( - `[${tracker.requestId}] Failed to parse large SSE event (${lineLength} chars)`, - { - error: e, - preview: `${line.substring(0, 200)}...`, - size: lineLength, - } - ) - } else { - logger.warn( - `[${tracker.requestId}] Failed to parse SSE event: "${line.substring(0, 200)}..."`, - e - ) - } - } - } else if (line.trim() && line !== 'data: [DONE]') { - logger.debug(`[${tracker.requestId}] Non-SSE line from sim agent: "${line}"`) - } - } - } - - // Process any remaining buffer - if (buffer.trim()) { - logger.debug(`[${tracker.requestId}] Processing remaining buffer: "${buffer}"`) - if (buffer.startsWith('data: ')) { + const result = await orchestrateCopilotStream(requestPayload, { + userId: authenticatedUserId, + workflowId, + chatId: actualChatId, + autoExecuteTools: true, + interactive: true, + onEvent: async (event) => { try { - const jsonStr = buffer.slice(6) - const event = JSON.parse(jsonStr) - if (event.type === 'content' && event.data) { - assistantContent += event.data - } - // Forward remaining event, applying same error rewrite behavior - if (event?.type === 'error') { - const displayMessage: string = - (event?.data && (event.data.displayMessage as string)) || - 'Sorry, I encountered an error. Please try again.' - const formatted = `_${displayMessage}_` - assistantContent += formatted - try { - controller.enqueue( - encoder.encode( - `data: ${JSON.stringify({ type: 'content', data: formatted })}\n\n` - ) - ) - controller.enqueue( - encoder.encode(`data: ${JSON.stringify({ type: 'done' })}\n\n`) - ) - } catch (enqueueErr) { - reader.cancel() - } - } else { - try { - controller.enqueue(encoder.encode(`data: ${jsonStr}\n\n`)) - } catch (enqueueErr) { - reader.cancel() - } - } - } catch (e) { - logger.warn(`[${tracker.requestId}] Failed to parse final buffer: "${buffer}"`) + controller.enqueue(encoder.encode(`data: ${JSON.stringify(event)}\n\n`)) + } catch { + controller.error('Failed to forward SSE event') } - } - } - - // Log final streaming summary - logger.info(`[${tracker.requestId}] Streaming complete summary:`, { - totalContentLength: assistantContent.length, - toolCallsCount: toolCalls.length, - hasContent: assistantContent.length > 0, - toolNames: toolCalls.map((tc) => tc?.name).filter(Boolean), + }, }) - // NOTE: Messages are saved by the client via update-messages endpoint with full contentBlocks. - // Server only updates conversationId here to avoid overwriting client's richer save. - if (currentChat) { - // Persist only a safe conversationId to avoid continuing from a state that expects tool outputs - const previousConversationId = currentChat?.conversationId as string | undefined - const responseId = lastSafeDoneResponseId || previousConversationId || undefined - - if (responseId) { - await db - .update(copilotChats) - .set({ - updatedAt: new Date(), - conversationId: responseId, - }) - .where(eq(copilotChats.id, actualChatId!)) - - logger.info( - `[${tracker.requestId}] Updated conversationId for chat ${actualChatId}`, - { - updatedConversationId: responseId, - } - ) - } + if (currentChat && result.conversationId) { + await db + .update(copilotChats) + .set({ + updatedAt: new Date(), + conversationId: result.conversationId, + }) + .where(eq(copilotChats.id, actualChatId!)) } } catch (error) { - logger.error(`[${tracker.requestId}] Error processing stream:`, error) - - // Send an error event to the client before closing so it knows what happened - try { - const errorMessage = - error instanceof Error && error.message === 'terminated' - ? 'Connection to AI service was interrupted. Please try again.' - : 'An unexpected error occurred while processing the response.' - const encoder = new TextEncoder() - - // Send error as content so it shows in the chat - controller.enqueue( - encoder.encode( - `data: ${JSON.stringify({ type: 'content', data: `\n\n_${errorMessage}_` })}\n\n` - ) - ) - // Send done event to properly close the stream on client - controller.enqueue(encoder.encode(`data: ${JSON.stringify({ type: 'done' })}\n\n`)) - } catch (enqueueError) { - // Stream might already be closed, that's ok - logger.warn( - `[${tracker.requestId}] Could not send error event to client:`, - enqueueError + logger.error(`[${tracker.requestId}] Orchestration error:`, error) + controller.enqueue( + encoder.encode( + `data: ${JSON.stringify({ + type: 'error', + data: { + displayMessage: + 'An unexpected error occurred while processing the response.', + }, + })}\n\n` ) - } + ) } finally { - try { - controller.close() - } catch { - // Controller might already be closed - } + controller.close() } }, }) - const response = new Response(transformedStream, { + return new Response(transformedStream, { headers: { 'Content-Type': 'text/event-stream', 'Cache-Control': 'no-cache', @@ -867,43 +559,31 @@ export async function POST(req: NextRequest) { 'X-Accel-Buffering': 'no', }, }) + } - logger.info(`[${tracker.requestId}] Returning streaming response to client`, { - duration: tracker.getDuration(), - chatId: actualChatId, - headers: { - 'Content-Type': 'text/event-stream', - 'Cache-Control': 'no-cache', - Connection: 'keep-alive', - }, - }) + const nonStreamingResult = await orchestrateCopilotStream(requestPayload, { + userId: authenticatedUserId, + workflowId, + chatId: actualChatId, + autoExecuteTools: true, + interactive: true, + }) - return response + const responseData = { + content: nonStreamingResult.content, + toolCalls: nonStreamingResult.toolCalls, + model: selectedModel, + provider: providerConfig?.provider || env.COPILOT_PROVIDER || 'openai', } - // For non-streaming responses - const responseData = await simAgentResponse.json() - logger.info(`[${tracker.requestId}] Non-streaming response from sim agent:`, { + logger.info(`[${tracker.requestId}] Non-streaming response from orchestrator:`, { hasContent: !!responseData.content, contentLength: responseData.content?.length || 0, model: responseData.model, provider: responseData.provider, toolCallsCount: responseData.toolCalls?.length || 0, - hasTokens: !!responseData.tokens, }) - // Log tool calls if present - if (responseData.toolCalls?.length > 0) { - responseData.toolCalls.forEach((toolCall: any) => { - logger.info(`[${tracker.requestId}] Tool call in response:`, { - id: toolCall.id, - name: toolCall.name, - success: toolCall.success, - result: `${JSON.stringify(toolCall.result).substring(0, 200)}...`, - }) - }) - } - // Save messages if we have a chat if (currentChat && responseData.content) { const userMessage = { @@ -955,6 +635,9 @@ export async function POST(req: NextRequest) { .set({ messages: updatedMessages, updatedAt: new Date(), + ...(nonStreamingResult.conversationId + ? { conversationId: nonStreamingResult.conversationId } + : {}), }) .where(eq(copilotChats.id, actualChatId!)) } diff --git a/apps/sim/app/api/v1/copilot/chat/route.ts b/apps/sim/app/api/v1/copilot/chat/route.ts new file mode 100644 index 0000000000..8cd1e0104f --- /dev/null +++ b/apps/sim/app/api/v1/copilot/chat/route.ts @@ -0,0 +1,81 @@ +import { createLogger } from '@sim/logger' +import { type NextRequest, NextResponse } from 'next/server' +import { z } from 'zod' +import { authenticateV1Request } from '@/app/api/v1/auth' +import { getCopilotModel } from '@/lib/copilot/config' +import { SIM_AGENT_VERSION } from '@/lib/copilot/constants' +import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator' + +const logger = createLogger('CopilotHeadlessAPI') + +const RequestSchema = z.object({ + message: z.string().min(1, 'message is required'), + workflowId: z.string().min(1, 'workflowId is required'), + chatId: z.string().optional(), + mode: z.enum(['agent', 'ask', 'plan']).optional().default('agent'), + model: z.string().optional(), + autoExecuteTools: z.boolean().optional().default(true), + timeout: z.number().optional().default(300000), +}) + +/** + * POST /api/v1/copilot/chat + * Headless copilot endpoint for server-side orchestration. + */ +export async function POST(req: NextRequest) { + const auth = await authenticateV1Request(req) + if (!auth.authenticated || !auth.userId) { + return NextResponse.json({ success: false, error: auth.error || 'Unauthorized' }, { status: 401 }) + } + + try { + const body = await req.json() + const parsed = RequestSchema.parse(body) + const defaults = getCopilotModel('chat') + const selectedModel = parsed.model || defaults.model + + const requestPayload = { + message: parsed.message, + workflowId: parsed.workflowId, + userId: auth.userId, + stream: true, + streamToolCalls: true, + model: selectedModel, + mode: parsed.mode, + messageId: crypto.randomUUID(), + version: SIM_AGENT_VERSION, + ...(parsed.chatId ? { chatId: parsed.chatId } : {}), + } + + const result = await orchestrateCopilotStream(requestPayload, { + userId: auth.userId, + workflowId: parsed.workflowId, + chatId: parsed.chatId, + autoExecuteTools: parsed.autoExecuteTools, + timeout: parsed.timeout, + interactive: false, + }) + + return NextResponse.json({ + success: result.success, + content: result.content, + toolCalls: result.toolCalls, + chatId: result.chatId, + conversationId: result.conversationId, + error: result.error, + }) + } catch (error) { + if (error instanceof z.ZodError) { + return NextResponse.json( + { success: false, error: 'Invalid request', details: error.errors }, + { status: 400 } + ) + } + + logger.error('Headless copilot request failed', { + error: error instanceof Error ? error.message : String(error), + }) + return NextResponse.json({ success: false, error: 'Internal server error' }, { status: 500 }) + } +} + diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx index f6ee0679a6..5edc292713 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx @@ -1,5 +1,6 @@ 'use client' +import { createLogger } from '@sim/logger' import { memo, useEffect, useMemo, useRef, useState } from 'react' import clsx from 'clsx' import { ChevronUp, LayoutList } from 'lucide-react' @@ -26,6 +27,7 @@ import { getBlock } from '@/blocks/registry' import type { CopilotToolCall } from '@/stores/panel' import { useCopilotStore } from '@/stores/panel' import { CLASS_TOOL_METADATA } from '@/stores/panel/copilot/store' +import { COPILOT_SERVER_ORCHESTRATED } from '@/lib/copilot/orchestrator/config' import type { SubAgentContentBlock } from '@/stores/panel/copilot/types' import { useWorkflowStore } from '@/stores/workflows/workflow/store' @@ -1257,12 +1259,36 @@ function shouldShowRunSkipButtons(toolCall: CopilotToolCall): boolean { return false } +const toolCallLogger = createLogger('CopilotToolCall') + +async function sendToolDecision(toolCallId: string, status: 'accepted' | 'rejected') { + try { + await fetch('/api/copilot/confirm', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ toolCallId, status }), + }) + } catch (error) { + toolCallLogger.warn('Failed to send tool decision', { + toolCallId, + status, + error: error instanceof Error ? error.message : String(error), + }) + } +} + async function handleRun( toolCall: CopilotToolCall, setToolCallState: any, onStateChange?: any, editedParams?: any ) { + if (COPILOT_SERVER_ORCHESTRATED) { + setToolCallState(toolCall, 'executing') + onStateChange?.('executing') + await sendToolDecision(toolCall.id, 'accepted') + return + } const instance = getClientTool(toolCall.id) if (!instance && isIntegrationTool(toolCall.name)) { @@ -1307,6 +1333,12 @@ async function handleRun( } async function handleSkip(toolCall: CopilotToolCall, setToolCallState: any, onStateChange?: any) { + if (COPILOT_SERVER_ORCHESTRATED) { + setToolCallState(toolCall, 'rejected') + onStateChange?.('rejected') + await sendToolDecision(toolCall.id, 'rejected') + return + } const instance = getClientTool(toolCall.id) if (!instance && isIntegrationTool(toolCall.name)) { diff --git a/apps/sim/lib/copilot/orchestrator/config.ts b/apps/sim/lib/copilot/orchestrator/config.ts new file mode 100644 index 0000000000..3f6eb99876 --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/config.ts @@ -0,0 +1,23 @@ +/** + * Feature flag for server-side copilot orchestration. + */ +export const COPILOT_SERVER_ORCHESTRATED = true + +export const INTERRUPT_TOOL_NAMES = [ + 'set_global_workflow_variables', + 'run_workflow', + 'manage_mcp_tool', + 'manage_custom_tool', + 'deploy_mcp', + 'deploy_chat', + 'deploy_api', + 'create_workspace_mcp_server', + 'set_environment_variables', + 'make_api_request', + 'oauth_request_access', + 'navigate_ui', + 'knowledge_base', +] as const + +export const INTERRUPT_TOOL_SET = new Set(INTERRUPT_TOOL_NAMES) + diff --git a/apps/sim/lib/copilot/orchestrator/index.ts b/apps/sim/lib/copilot/orchestrator/index.ts new file mode 100644 index 0000000000..24746c24d9 --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/index.ts @@ -0,0 +1,181 @@ +import { createLogger } from '@sim/logger' +import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/copilot/constants' +import { env } from '@/lib/core/config/env' +import { parseSSEStream } from '@/lib/copilot/orchestrator/sse-parser' +import { + handleSubagentRouting, + sseHandlers, + subAgentHandlers, +} from '@/lib/copilot/orchestrator/sse-handlers' +import { prepareExecutionContext } from '@/lib/copilot/orchestrator/tool-executor' +import type { + ExecutionContext, + OrchestratorOptions, + OrchestratorResult, + SSEEvent, + StreamingContext, + ToolCallSummary, +} from '@/lib/copilot/orchestrator/types' + +const logger = createLogger('CopilotOrchestrator') +const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT + +export interface OrchestrateStreamOptions extends OrchestratorOptions { + userId: string + workflowId: string + chatId?: string +} + +/** + * Orchestrate a copilot SSE stream and execute tool calls server-side. + */ +export async function orchestrateCopilotStream( + requestPayload: Record, + options: OrchestrateStreamOptions +): Promise { + const { userId, workflowId, chatId, timeout = 300000, abortSignal } = options + const execContext = await prepareExecutionContext(userId, workflowId) + + const context: StreamingContext = { + chatId, + conversationId: undefined, + messageId: requestPayload?.messageId || crypto.randomUUID(), + accumulatedContent: '', + contentBlocks: [], + toolCalls: new Map(), + currentThinkingBlock: null, + isInThinkingBlock: false, + subAgentParentToolCallId: undefined, + subAgentContent: {}, + subAgentToolCalls: {}, + pendingContent: '', + streamComplete: false, + wasAborted: false, + errors: [], + } + + try { + const response = await fetch(`${SIM_AGENT_API_URL}/api/chat-completion-streaming`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + ...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}), + }, + body: JSON.stringify(requestPayload), + signal: abortSignal, + }) + + if (!response.ok) { + const errorText = await response.text().catch(() => '') + throw new Error(`Copilot backend error (${response.status}): ${errorText || response.statusText}`) + } + + if (!response.body) { + throw new Error('Copilot backend response missing body') + } + + const reader = response.body.getReader() + const decoder = new TextDecoder() + + const timeoutId = setTimeout(() => { + context.errors.push('Request timed out') + context.streamComplete = true + reader.cancel().catch(() => {}) + }, timeout) + + try { + for await (const event of parseSSEStream(reader, decoder, abortSignal)) { + if (abortSignal?.aborted) { + context.wasAborted = true + break + } + + await forwardEvent(event, options) + + if (event.type === 'subagent_start') { + const toolCallId = event.data?.tool_call_id + if (toolCallId) { + context.subAgentParentToolCallId = toolCallId + context.subAgentContent[toolCallId] = '' + context.subAgentToolCalls[toolCallId] = [] + } + continue + } + + if (event.type === 'subagent_end') { + context.subAgentParentToolCallId = undefined + continue + } + + if (handleSubagentRouting(event, context)) { + const handler = subAgentHandlers[event.type] + if (handler) { + await handler(event, context, execContext, options) + } + if (context.streamComplete) break + continue + } + + const handler = sseHandlers[event.type] + if (handler) { + await handler(event, context, execContext, options) + } + if (context.streamComplete) break + } + } finally { + clearTimeout(timeoutId) + } + + const result = buildResult(context) + await options.onComplete?.(result) + return result + } catch (error) { + const err = error instanceof Error ? error : new Error('Copilot orchestration failed') + logger.error('Copilot orchestration failed', { error: err.message }) + await options.onError?.(err) + return { + success: false, + content: '', + contentBlocks: [], + toolCalls: [], + chatId: context.chatId, + conversationId: context.conversationId, + error: err.message, + } + } +} + +async function forwardEvent(event: SSEEvent, options: OrchestratorOptions): Promise { + try { + await options.onEvent?.(event) + } catch (error) { + logger.warn('Failed to forward SSE event', { + type: event.type, + error: error instanceof Error ? error.message : String(error), + }) + } +} + +function buildResult(context: StreamingContext): OrchestratorResult { + const toolCalls: ToolCallSummary[] = Array.from(context.toolCalls.values()).map((toolCall) => ({ + id: toolCall.id, + name: toolCall.name, + status: toolCall.status, + params: toolCall.params, + result: toolCall.result?.output, + error: toolCall.error, + durationMs: + toolCall.endTime && toolCall.startTime ? toolCall.endTime - toolCall.startTime : undefined, + })) + + return { + success: context.errors.length === 0, + content: context.accumulatedContent, + contentBlocks: context.contentBlocks, + toolCalls, + chatId: context.chatId, + conversationId: context.conversationId, + errors: context.errors.length ? context.errors : undefined, + } +} + diff --git a/apps/sim/lib/copilot/orchestrator/persistence.ts b/apps/sim/lib/copilot/orchestrator/persistence.ts new file mode 100644 index 0000000000..d7b015f00c --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/persistence.ts @@ -0,0 +1,138 @@ +import { db } from '@sim/db' +import { copilotChats } from '@sim/db/schema' +import { createLogger } from '@sim/logger' +import { and, eq } from 'drizzle-orm' +import { getRedisClient } from '@/lib/core/config/redis' + +const logger = createLogger('CopilotOrchestratorPersistence') + +/** + * Create a new copilot chat record. + */ +export async function createChat(params: { + userId: string + workflowId: string + model: string +}): Promise<{ id: string }> { + const [chat] = await db + .insert(copilotChats) + .values({ + userId: params.userId, + workflowId: params.workflowId, + model: params.model, + messages: [], + }) + .returning({ id: copilotChats.id }) + + return { id: chat.id } +} + +/** + * Load an existing chat for a user. + */ +export async function loadChat(chatId: string, userId: string) { + const [chat] = await db + .select() + .from(copilotChats) + .where(and(eq(copilotChats.id, chatId), eq(copilotChats.userId, userId))) + .limit(1) + + return chat || null +} + +/** + * Save chat messages and metadata. + */ +export async function saveMessages( + chatId: string, + messages: any[], + options?: { + title?: string + conversationId?: string + planArtifact?: string | null + config?: { mode?: string; model?: string } + } +): Promise { + await db + .update(copilotChats) + .set({ + messages, + updatedAt: new Date(), + ...(options?.title ? { title: options.title } : {}), + ...(options?.conversationId ? { conversationId: options.conversationId } : {}), + ...(options?.planArtifact !== undefined ? { planArtifact: options.planArtifact } : {}), + ...(options?.config ? { config: options.config } : {}), + }) + .where(eq(copilotChats.id, chatId)) +} + +/** + * Update the conversationId for a chat without overwriting messages. + */ +export async function updateChatConversationId(chatId: string, conversationId: string): Promise { + await db + .update(copilotChats) + .set({ + conversationId, + updatedAt: new Date(), + }) + .where(eq(copilotChats.id, chatId)) +} + +/** + * Set a tool call confirmation status in Redis. + */ +export async function setToolConfirmation( + toolCallId: string, + status: 'accepted' | 'rejected' | 'background' | 'pending', + message?: string +): Promise { + const redis = getRedisClient() + if (!redis) { + logger.warn('Redis client not available for tool confirmation') + return false + } + + const key = `tool_call:${toolCallId}` + const payload = { + status, + message: message || null, + timestamp: new Date().toISOString(), + } + + try { + await redis.set(key, JSON.stringify(payload), 'EX', 86400) + return true + } catch (error) { + logger.error('Failed to set tool confirmation', { + toolCallId, + error: error instanceof Error ? error.message : String(error), + }) + return false + } +} + +/** + * Get a tool call confirmation status from Redis. + */ +export async function getToolConfirmation(toolCallId: string): Promise<{ + status: string + message?: string + timestamp?: string +} | null> { + const redis = getRedisClient() + if (!redis) return null + + try { + const data = await redis.get(`tool_call:${toolCallId}`) + if (!data) return null + return JSON.parse(data) as { status: string; message?: string; timestamp?: string } + } catch (error) { + logger.error('Failed to read tool confirmation', { + toolCallId, + error: error instanceof Error ? error.message : String(error), + }) + return null + } +} + diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers.ts new file mode 100644 index 0000000000..101b281381 --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers.ts @@ -0,0 +1,342 @@ +import { createLogger } from '@sim/logger' +import type { + ContentBlock, + ExecutionContext, + OrchestratorOptions, + SSEEvent, + StreamingContext, + ToolCallState, +} from '@/lib/copilot/orchestrator/types' +import { executeToolServerSide, markToolComplete } from '@/lib/copilot/orchestrator/tool-executor' +import { getToolConfirmation } from '@/lib/copilot/orchestrator/persistence' +import { INTERRUPT_TOOL_SET } from '@/lib/copilot/orchestrator/config' + +const logger = createLogger('CopilotSseHandlers') + +export type SSEHandler = ( + event: SSEEvent, + context: StreamingContext, + execContext: ExecutionContext, + options: OrchestratorOptions +) => void | Promise + +function addContentBlock( + context: StreamingContext, + block: Omit +): void { + context.contentBlocks.push({ + ...block, + timestamp: Date.now(), + }) +} + +async function executeToolAndReport( + toolCallId: string, + context: StreamingContext, + execContext: ExecutionContext, + options?: OrchestratorOptions +): Promise { + const toolCall = context.toolCalls.get(toolCallId) + if (!toolCall) return + + if (toolCall.status === 'executing') return + + toolCall.status = 'executing' + try { + const result = await executeToolServerSide(toolCall, execContext) + toolCall.status = result.success ? 'success' : 'error' + toolCall.result = result + toolCall.error = result.error + toolCall.endTime = Date.now() + + await markToolComplete( + toolCall.id, + toolCall.name, + result.success ? 200 : 500, + result.error || (result.success ? 'Tool completed' : 'Tool failed'), + result.output + ) + + await options?.onEvent?.({ + type: 'tool_result', + toolCallId: toolCall.id, + data: { + id: toolCall.id, + name: toolCall.name, + success: result.success, + result: result.output, + }, + }) + } catch (error) { + toolCall.status = 'error' + toolCall.error = error instanceof Error ? error.message : String(error) + toolCall.endTime = Date.now() + + await markToolComplete(toolCall.id, toolCall.name, 500, toolCall.error) + + await options?.onEvent?.({ + type: 'tool_error', + toolCallId: toolCall.id, + data: { + id: toolCall.id, + name: toolCall.name, + error: toolCall.error, + }, + }) + } +} + +async function waitForToolDecision( + toolCallId: string, + timeoutMs: number +): Promise<{ status: string; message?: string } | null> { + const start = Date.now() + while (Date.now() - start < timeoutMs) { + const decision = await getToolConfirmation(toolCallId) + if (decision?.status) { + return decision + } + await new Promise((resolve) => setTimeout(resolve, 100)) + } + return null +} + +export const sseHandlers: Record = { + chat_id: (event, context) => { + context.chatId = event.data?.chatId + }, + title_updated: () => {}, + tool_result: (event, context) => { + const toolCallId = event.toolCallId || event.data?.id + if (!toolCallId) return + const current = context.toolCalls.get(toolCallId) + if (!current) return + + const success = event.data?.success ?? event.data?.result?.success + current.status = success ? 'success' : 'error' + current.endTime = Date.now() + if (event.data?.result || event.data?.data) { + current.result = { + success: !!success, + output: event.data?.result || event.data?.data, + } + } + }, + tool_error: (event, context) => { + const toolCallId = event.toolCallId || event.data?.id + if (!toolCallId) return + const current = context.toolCalls.get(toolCallId) + if (!current) return + current.status = 'error' + current.error = event.data?.error || 'Tool execution failed' + current.endTime = Date.now() + }, + tool_generating: (event, context) => { + const toolCallId = event.toolCallId || event.data?.toolCallId || event.data?.id + const toolName = event.toolName || event.data?.toolName || event.data?.name + if (!toolCallId || !toolName) return + if (!context.toolCalls.has(toolCallId)) { + context.toolCalls.set(toolCallId, { + id: toolCallId, + name: toolName, + status: 'pending', + startTime: Date.now(), + }) + } + }, + tool_call: async (event, context, execContext, options) => { + const toolData = event.data || {} + const toolCallId = toolData.id || event.toolCallId + const toolName = toolData.name || event.toolName + if (!toolCallId || !toolName) return + + const args = toolData.arguments || toolData.input || event.data?.input + const isPartial = toolData.partial === true + const existing = context.toolCalls.get(toolCallId) + const toolCall: ToolCallState = existing + ? { ...existing, status: 'pending', params: args || existing.params } + : { + id: toolCallId, + name: toolName, + status: 'pending', + params: args, + startTime: Date.now(), + } + + context.toolCalls.set(toolCallId, toolCall) + addContentBlock(context, { type: 'tool_call', toolCall }) + + if (isPartial) return + + const isInterruptTool = INTERRUPT_TOOL_SET.has(toolName) + const isInteractive = options.interactive === true + + if (isInterruptTool && isInteractive) { + const decision = await waitForToolDecision(toolCallId, options.timeout || 600000) + if (decision?.status === 'accepted' || decision?.status === 'success') { + await executeToolAndReport(toolCallId, context, execContext, options) + return + } + + if (decision?.status === 'rejected' || decision?.status === 'error') { + toolCall.status = 'rejected' + toolCall.endTime = Date.now() + await markToolComplete( + toolCall.id, + toolCall.name, + 400, + decision.message || 'Tool execution rejected', + { skipped: true, reason: 'user_rejected' } + ) + await options.onEvent?.({ + type: 'tool_result', + toolCallId: toolCall.id, + data: { + id: toolCall.id, + name: toolCall.name, + success: false, + result: { skipped: true, reason: 'user_rejected' }, + }, + }) + return + } + + if (decision?.status === 'background') { + toolCall.status = 'skipped' + toolCall.endTime = Date.now() + await markToolComplete( + toolCall.id, + toolCall.name, + 202, + decision.message || 'Tool execution moved to background', + { background: true } + ) + await options.onEvent?.({ + type: 'tool_result', + toolCallId: toolCall.id, + data: { + id: toolCall.id, + name: toolCall.name, + success: true, + result: { background: true }, + }, + }) + return + } + } + + if (options.autoExecuteTools !== false) { + await executeToolAndReport(toolCallId, context, execContext, options) + } + }, + reasoning: (event, context) => { + const phase = event.data?.phase || event.data?.data?.phase + if (phase === 'start') { + context.isInThinkingBlock = true + context.currentThinkingBlock = { + type: 'thinking', + content: '', + timestamp: Date.now(), + } + return + } + if (phase === 'end') { + if (context.currentThinkingBlock) { + context.contentBlocks.push(context.currentThinkingBlock) + } + context.isInThinkingBlock = false + context.currentThinkingBlock = null + return + } + const chunk = typeof event.data === 'string' ? event.data : event.data?.data || event.data?.content + if (!chunk || !context.currentThinkingBlock) return + context.currentThinkingBlock.content = `${context.currentThinkingBlock.content || ''}${chunk}` + }, + content: (event, context) => { + const chunk = typeof event.data === 'string' ? event.data : event.data?.content || event.data?.data + if (!chunk) return + context.accumulatedContent += chunk + addContentBlock(context, { type: 'text', content: chunk }) + }, + done: (event, context) => { + if (event.data?.responseId) { + context.conversationId = event.data.responseId + } + context.streamComplete = true + }, + start: (event, context) => { + if (event.data?.responseId) { + context.conversationId = event.data.responseId + } + }, + error: (event, context) => { + const message = + event.data?.message || event.data?.error || (typeof event.data === 'string' ? event.data : null) + if (message) { + context.errors.push(message) + } + context.streamComplete = true + }, +} + +export const subAgentHandlers: Record = { + content: (event, context) => { + const parentToolCallId = context.subAgentParentToolCallId + if (!parentToolCallId || !event.data) return + const chunk = typeof event.data === 'string' ? event.data : event.data?.content || '' + if (!chunk) return + context.subAgentContent[parentToolCallId] = (context.subAgentContent[parentToolCallId] || '') + chunk + addContentBlock(context, { type: 'subagent_text', content: chunk }) + }, + tool_call: async (event, context, execContext, options) => { + const parentToolCallId = context.subAgentParentToolCallId + if (!parentToolCallId) return + const toolData = event.data || {} + const toolCallId = toolData.id || event.toolCallId + const toolName = toolData.name || event.toolName + if (!toolCallId || !toolName) return + const isPartial = toolData.partial === true + const args = toolData.arguments || toolData.input || event.data?.input + + const toolCall: ToolCallState = { + id: toolCallId, + name: toolName, + status: 'pending', + params: args, + startTime: Date.now(), + } + if (!context.subAgentToolCalls[parentToolCallId]) { + context.subAgentToolCalls[parentToolCallId] = [] + } + context.subAgentToolCalls[parentToolCallId].push(toolCall) + + if (isPartial) return + if (options.autoExecuteTools !== false) { + await executeToolAndReport(toolCallId, context, execContext, options) + } + }, + tool_result: (event, context) => { + const parentToolCallId = context.subAgentParentToolCallId + if (!parentToolCallId) return + const toolCallId = event.toolCallId || event.data?.id + if (!toolCallId) return + const toolCalls = context.subAgentToolCalls[parentToolCallId] || [] + const toolCall = toolCalls.find((tc) => tc.id === toolCallId) + if (!toolCall) return + toolCall.status = event.data?.success ? 'success' : 'error' + toolCall.endTime = Date.now() + }, +} + +export function handleSubagentRouting(event: SSEEvent, context: StreamingContext): boolean { + if (!event.subagent) return false + if (!context.subAgentParentToolCallId) { + logger.warn('Subagent event missing parent tool call', { + type: event.type, + subagent: event.subagent, + }) + return false + } + return true +} + diff --git a/apps/sim/lib/copilot/orchestrator/sse-parser.ts b/apps/sim/lib/copilot/orchestrator/sse-parser.ts new file mode 100644 index 0000000000..06873289ec --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/sse-parser.ts @@ -0,0 +1,72 @@ +import { createLogger } from '@sim/logger' +import type { SSEEvent } from '@/lib/copilot/orchestrator/types' + +const logger = createLogger('CopilotSseParser') + +/** + * Parses SSE streams from the copilot backend into typed events. + */ +export async function* parseSSEStream( + reader: ReadableStreamDefaultReader, + decoder: TextDecoder, + abortSignal?: AbortSignal +): AsyncGenerator { + let buffer = '' + + try { + while (true) { + if (abortSignal?.aborted) { + logger.info('SSE stream aborted by signal') + break + } + + const { done, value } = await reader.read() + if (done) break + + buffer += decoder.decode(value, { stream: true }) + const lines = buffer.split('\n') + buffer = lines.pop() || '' + + for (const line of lines) { + if (!line.trim()) continue + if (!line.startsWith('data: ')) continue + + const jsonStr = line.slice(6) + if (jsonStr === '[DONE]') continue + + try { + const event = JSON.parse(jsonStr) as SSEEvent + if (event?.type) { + yield event + } + } catch (error) { + logger.warn('Failed to parse SSE event', { + preview: jsonStr.slice(0, 200), + error: error instanceof Error ? error.message : String(error), + }) + } + } + } + + if (buffer.trim() && buffer.startsWith('data: ')) { + try { + const event = JSON.parse(buffer.slice(6)) as SSEEvent + if (event?.type) { + yield event + } + } catch (error) { + logger.warn('Failed to parse final SSE buffer', { + preview: buffer.slice(0, 200), + error: error instanceof Error ? error.message : String(error), + }) + } + } + } finally { + try { + reader.releaseLock() + } catch { + logger.warn('Failed to release SSE reader lock') + } + } +} + diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor.ts b/apps/sim/lib/copilot/orchestrator/tool-executor.ts new file mode 100644 index 0000000000..fe288f2446 --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/tool-executor.ts @@ -0,0 +1,239 @@ +import { db } from '@sim/db' +import { account, workflow } from '@sim/db/schema' +import { createLogger } from '@sim/logger' +import { and, eq } from 'drizzle-orm' +import { refreshTokenIfNeeded } from '@/app/api/auth/oauth/utils' +import { resolveEnvVarReferences } from '@/executor/utils/reference-validation' +import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/copilot/constants' +import { generateRequestId } from '@/lib/core/utils/request' +import { env } from '@/lib/core/config/env' +import { getEffectiveDecryptedEnv } from '@/lib/environment/utils' +import { executeTool } from '@/tools' +import { getTool, resolveToolId } from '@/tools/utils' +import { routeExecution } from '@/lib/copilot/tools/server/router' +import type { ExecutionContext, ToolCallResult, ToolCallState } from '@/lib/copilot/orchestrator/types' + +const logger = createLogger('CopilotToolExecutor') +const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT + +const SERVER_TOOLS = new Set([ + 'get_blocks_and_tools', + 'get_blocks_metadata', + 'get_block_options', + 'get_block_config', + 'get_trigger_blocks', + 'edit_workflow', + 'get_workflow_console', + 'search_documentation', + 'search_online', + 'set_environment_variables', + 'get_credentials', + 'make_api_request', + 'knowledge_base', +]) + +/** + * Execute a tool server-side without calling internal routes. + */ +export async function executeToolServerSide( + toolCall: ToolCallState, + context: ExecutionContext +): Promise { + const toolName = toolCall.name + const resolvedToolName = resolveToolId(toolName) + + if (SERVER_TOOLS.has(toolName)) { + return executeServerToolDirect(toolName, toolCall.params || {}, context) + } + + const toolConfig = getTool(resolvedToolName) + if (!toolConfig) { + logger.warn('Tool not found in registry', { toolName, resolvedToolName }) + return { + success: false, + error: `Tool not found: ${toolName}`, + } + } + + return executeIntegrationToolDirect(toolCall, toolConfig, context) +} + +/** + * Execute a server tool directly via the server tool router. + */ +async function executeServerToolDirect( + toolName: string, + params: Record, + context: ExecutionContext +): Promise { + try { + const result = await routeExecution(toolName, params, { userId: context.userId }) + return { success: true, output: result } + } catch (error) { + logger.error('Server tool execution failed', { + toolName, + error: error instanceof Error ? error.message : String(error), + }) + return { + success: false, + error: error instanceof Error ? error.message : 'Server tool execution failed', + } + } +} + +/** + * Execute an integration tool directly via the tools registry. + */ +async function executeIntegrationToolDirect( + toolCall: ToolCallState, + toolConfig: any, + context: ExecutionContext +): Promise { + const { userId, workflowId } = context + const toolName = resolveToolId(toolCall.name) + const toolArgs = toolCall.params || {} + + let workspaceId = context.workspaceId + if (!workspaceId && workflowId) { + const workflowResult = await db + .select({ workspaceId: workflow.workspaceId }) + .from(workflow) + .where(eq(workflow.id, workflowId)) + .limit(1) + workspaceId = workflowResult[0]?.workspaceId ?? undefined + } + + const decryptedEnvVars = + context.decryptedEnvVars || (await getEffectiveDecryptedEnv(userId, workspaceId)) + + const executionParams: Record = resolveEnvVarReferences( + toolArgs, + decryptedEnvVars, + { deep: true } + ) as Record + + if (toolConfig.oauth?.required && toolConfig.oauth.provider) { + const provider = toolConfig.oauth.provider + const accounts = await db + .select() + .from(account) + .where(and(eq(account.providerId, provider), eq(account.userId, userId))) + .limit(1) + + if (!accounts.length) { + return { + success: false, + error: `No ${provider} account connected. Please connect your account first.`, + } + } + + const acc = accounts[0] + const requestId = generateRequestId() + const { accessToken } = await refreshTokenIfNeeded(requestId, acc as any, acc.id) + + if (!accessToken) { + return { + success: false, + error: `OAuth token not available for ${provider}. Please reconnect your account.`, + } + } + + executionParams.accessToken = accessToken + } + + if (toolConfig.params?.apiKey?.required && !executionParams.apiKey) { + return { + success: false, + error: `API key not provided for ${toolName}. Use {{YOUR_API_KEY_ENV_VAR}} to reference your environment variable.`, + } + } + + executionParams._context = { + workflowId, + userId, + } + + if (toolName === 'function_execute') { + executionParams.envVars = decryptedEnvVars + executionParams.workflowVariables = {} + executionParams.blockData = {} + executionParams.blockNameMapping = {} + executionParams.language = executionParams.language || 'javascript' + executionParams.timeout = executionParams.timeout || 30000 + } + + const result = await executeTool(toolName, executionParams) + + return { + success: result.success, + output: result.output, + error: result.error, + } +} + +/** + * Notify the copilot backend that a tool has completed. + */ +export async function markToolComplete( + toolCallId: string, + toolName: string, + status: number, + message?: any, + data?: any +): Promise { + try { + const response = await fetch(`${SIM_AGENT_API_URL}/api/tools/mark-complete`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + ...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}), + }, + body: JSON.stringify({ + id: toolCallId, + name: toolName, + status, + message, + data, + }), + }) + + if (!response.ok) { + logger.warn('Mark-complete call failed', { toolCallId, status: response.status }) + return false + } + + return true + } catch (error) { + logger.error('Mark-complete call failed', { + toolCallId, + error: error instanceof Error ? error.message : String(error), + }) + return false + } +} + +/** + * Prepare execution context with cached environment values. + */ +export async function prepareExecutionContext( + userId: string, + workflowId: string +): Promise { + let workspaceId: string | undefined + const workflowResult = await db + .select({ workspaceId: workflow.workspaceId }) + .from(workflow) + .where(eq(workflow.id, workflowId)) + .limit(1) + workspaceId = workflowResult[0]?.workspaceId ?? undefined + + const decryptedEnvVars = await getEffectiveDecryptedEnv(userId, workspaceId) + + return { + userId, + workflowId, + workspaceId, + decryptedEnvVars, + } +} + diff --git a/apps/sim/lib/copilot/orchestrator/types.ts b/apps/sim/lib/copilot/orchestrator/types.ts new file mode 100644 index 0000000000..f4adbdeead --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/types.ts @@ -0,0 +1,127 @@ +import type { CopilotProviderConfig } from '@/lib/copilot/types' + +export type SSEEventType = + | 'chat_id' + | 'title_updated' + | 'content' + | 'reasoning' + | 'tool_call' + | 'tool_generating' + | 'tool_result' + | 'tool_error' + | 'subagent_start' + | 'subagent_end' + | 'done' + | 'error' + | 'start' + +export interface SSEEvent { + type: SSEEventType + data?: any + subagent?: string + toolCallId?: string + toolName?: string +} + +export type ToolCallStatus = 'pending' | 'executing' | 'success' | 'error' | 'skipped' | 'rejected' + +export interface ToolCallState { + id: string + name: string + status: ToolCallStatus + params?: Record + result?: ToolCallResult + error?: string + startTime?: number + endTime?: number +} + +export interface ToolCallResult { + success: boolean + output?: any + error?: string +} + +export type ContentBlockType = 'text' | 'thinking' | 'tool_call' | 'subagent_text' + +export interface ContentBlock { + type: ContentBlockType + content?: string + toolCall?: ToolCallState + timestamp: number +} + +export interface StreamingContext { + chatId?: string + conversationId?: string + messageId: string + accumulatedContent: string + contentBlocks: ContentBlock[] + toolCalls: Map + currentThinkingBlock: ContentBlock | null + isInThinkingBlock: boolean + subAgentParentToolCallId?: string + subAgentContent: Record + subAgentToolCalls: Record + pendingContent: string + streamComplete: boolean + wasAborted: boolean + errors: string[] +} + +export interface OrchestratorRequest { + message: string + workflowId: string + userId: string + chatId?: string + mode?: 'agent' | 'ask' | 'plan' + model?: string + conversationId?: string + contexts?: Array<{ type: string; content: string }> + fileAttachments?: any[] + commands?: string[] + provider?: CopilotProviderConfig + streamToolCalls?: boolean + version?: string + prefetch?: boolean + userName?: string +} + +export interface OrchestratorOptions { + autoExecuteTools?: boolean + timeout?: number + onEvent?: (event: SSEEvent) => void | Promise + onComplete?: (result: OrchestratorResult) => void | Promise + onError?: (error: Error) => void | Promise + abortSignal?: AbortSignal + interactive?: boolean +} + +export interface OrchestratorResult { + success: boolean + content: string + contentBlocks: ContentBlock[] + toolCalls: ToolCallSummary[] + chatId?: string + conversationId?: string + error?: string + errors?: string[] +} + +export interface ToolCallSummary { + id: string + name: string + status: ToolCallStatus + params?: Record + result?: any + error?: string + durationMs?: number +} + +export interface ExecutionContext { + userId: string + workflowId: string + workspaceId?: string + decryptedEnvVars?: Record +} + diff --git a/apps/sim/stores/panel/copilot/store.ts b/apps/sim/stores/panel/copilot/store.ts index e368d412e1..3cdff056bd 100644 --- a/apps/sim/stores/panel/copilot/store.ts +++ b/apps/sim/stores/panel/copilot/store.ts @@ -54,6 +54,7 @@ import { TestClientTool } from '@/lib/copilot/tools/client/other/test' import { TourClientTool } from '@/lib/copilot/tools/client/other/tour' import { WorkflowClientTool } from '@/lib/copilot/tools/client/other/workflow' import { createExecutionContext, getTool } from '@/lib/copilot/tools/client/registry' +import { COPILOT_SERVER_ORCHESTRATED } from '@/lib/copilot/orchestrator/config' import { GetCredentialsClientTool } from '@/lib/copilot/tools/client/user/get-credentials' import { SetEnvironmentVariablesClientTool } from '@/lib/copilot/tools/client/user/set-environment-variables' import { CheckDeploymentStatusClientTool } from '@/lib/copilot/tools/client/workflow/check-deployment-status' @@ -1198,6 +1199,18 @@ const sseHandlers: Record = { } } catch {} } + + if (COPILOT_SERVER_ORCHESTRATED && current.name === 'edit_workflow') { + try { + const resultPayload = + data?.result || data?.data?.result || data?.data?.data || data?.data || {} + const workflowState = resultPayload?.workflowState + if (workflowState) { + const diffStore = useWorkflowDiffStore.getState() + void diffStore.setProposedChanges(workflowState) + } + } catch {} + } } // Update inline content block state @@ -1362,6 +1375,10 @@ const sseHandlers: Record = { return } + if (COPILOT_SERVER_ORCHESTRATED) { + return + } + // Prefer interface-based registry to determine interrupt and execute try { const def = name ? getTool(name) : undefined @@ -3820,6 +3837,9 @@ export const useCopilotStore = create()( setEnabledModels: (models) => set({ enabledModels: models }), executeIntegrationTool: async (toolCallId: string) => { + if (COPILOT_SERVER_ORCHESTRATED) { + return + } const { toolCallsById, workflowId } = get() const toolCall = toolCallsById[toolCallId] if (!toolCall || !workflowId) return diff --git a/docs/COPILOT_SERVER_REFACTOR.md b/docs/COPILOT_SERVER_REFACTOR.md new file mode 100644 index 0000000000..a58e5aa6ad --- /dev/null +++ b/docs/COPILOT_SERVER_REFACTOR.md @@ -0,0 +1,927 @@ +# Copilot Server-Side Refactor Plan + +> **Goal**: Move copilot orchestration logic from the browser (React/Zustand) to the Next.js server, enabling both headless API access and a simplified interactive client. + +## Table of Contents + +1. [Executive Summary](#executive-summary) +2. [Current Architecture](#current-architecture) +3. [Target Architecture](#target-architecture) +4. [Scope & Boundaries](#scope--boundaries) +5. [Module Design](#module-design) +6. [Implementation Plan](#implementation-plan) +7. [API Contracts](#api-contracts) +8. [Migration Strategy](#migration-strategy) +9. [Testing Strategy](#testing-strategy) +10. [Risks & Mitigations](#risks--mitigations) +11. [File Inventory](#file-inventory) + +--- + +## Executive Summary + +### Problem + +The current copilot implementation in Sim has all orchestration logic in the browser: +- SSE stream parsing happens in the React client +- Tool execution is triggered from the browser +- OAuth tokens are sent to the client +- No headless/API access is possible +- The Zustand store is ~4,200 lines of complex async logic + +### Solution + +Move orchestration to the Next.js server: +- Server parses SSE from copilot backend +- Server executes tools directly (no HTTP round-trips) +- Server forwards events to client (if attached) +- Headless API returns JSON response +- Client store becomes a thin UI layer (~600 lines) + +### Benefits + +| Aspect | Before | After | +|--------|--------|-------| +| Security | OAuth tokens in browser | Tokens stay server-side | +| Headless access | Not possible | Full API support | +| Store complexity | ~4,200 lines | ~600 lines | +| Tool execution | Browser-initiated | Server-side | +| Testing | Complex async | Simple state | +| Bundle size | Large (tool classes) | Minimal | + +--- + +## Current Architecture + +``` +┌─────────────────────────────────────────────────────────────────────────────┐ +│ BROWSER (React) │ +├─────────────────────────────────────────────────────────────────────────────┤ +│ │ +│ ┌─────────────────────────────────────────────────────────────────────────┐│ +│ │ Copilot Store (4,200 lines) ││ +│ │ ││ +│ │ • SSE stream parsing (parseSSEStream) ││ +│ │ • Event handlers (sseHandlers, subAgentSSEHandlers) ││ +│ │ • Tool execution logic ││ +│ │ • Client tool instantiation ││ +│ │ • Content block processing ││ +│ │ • State management ││ +│ │ • UI state ││ +│ └─────────────────────────────────────────────────────────────────────────┘│ +│ │ │ +│ │ HTTP calls for tool execution │ +│ ▼ │ +└─────────────────────────────────────────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────────────────────┐ +│ NEXT.JS SERVER │ +├─────────────────────────────────────────────────────────────────────────────┤ +│ │ +│ /api/copilot/chat - Proxy to copilot backend (pass-through) │ +│ /api/copilot/execute-tool - Execute integration tools │ +│ /api/copilot/confirm - Update Redis with tool status │ +│ /api/copilot/tools/mark-complete - Notify copilot backend │ +│ /api/copilot/execute-copilot-server-tool - Execute server tools │ +│ │ +└─────────────────────────────────────────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────────────────────┐ +│ COPILOT BACKEND (Go) │ +│ copilot.sim.ai │ +├─────────────────────────────────────────────────────────────────────────────┤ +│ │ +│ • LLM orchestration │ +│ • Subagent system (plan, edit, debug, etc.) │ +│ • Tool definitions │ +│ • Conversation management │ +│ • SSE streaming │ +│ │ +└─────────────────────────────────────────────────────────────────────────────┘ +``` + +### Current Flow (Interactive) + +1. User sends message in UI +2. Store calls `/api/copilot/chat` +3. Chat route proxies to copilot backend, streams SSE back +4. **Store parses SSE in browser** +5. On `tool_call` event: + - Store decides if tool needs confirmation + - Store calls `/api/copilot/execute-tool` or `/api/copilot/execute-copilot-server-tool` + - Store calls `/api/copilot/tools/mark-complete` +6. Store updates UI state + +### Problems with Current Flow + +1. **No headless access**: Must have browser client +2. **Security**: OAuth tokens sent to browser for tool execution +3. **Complexity**: All orchestration logic in Zustand store +4. **Performance**: Multiple HTTP round-trips from browser +5. **Reliability**: Browser can disconnect mid-operation +6. **Testing**: Hard to test async browser logic + +--- + +## Target Architecture + +``` +┌─────────────────────────────────────────────────────────────────────────────┐ +│ BROWSER (React) │ +├─────────────────────────────────────────────────────────────────────────────┤ +│ │ +│ ┌─────────────────────────────────────────────────────────────────────────┐│ +│ │ Copilot Store (~600 lines) ││ +│ │ ││ +│ │ • UI state (messages, toolCalls display) ││ +│ │ • Event listener (receive server events) ││ +│ │ • User actions (send message, confirm/reject) ││ +│ │ • Simple API calls ││ +│ └─────────────────────────────────────────────────────────────────────────┘│ +│ │ │ +│ │ SSE events from server │ +│ │ │ +└─────────────────────────────────────────────────────────────────────────────┘ + ▲ + │ (Optional - headless mode has no client) + │ +┌─────────────────────────────────────────────────────────────────────────────┐ +│ NEXT.JS SERVER │ +├─────────────────────────────────────────────────────────────────────────────┤ +│ │ +│ ┌─────────────────────────────────────────────────────────────────────────┐│ +│ │ Orchestrator Module (NEW) ││ +│ │ lib/copilot/orchestrator/ ││ +│ │ ││ +│ │ • SSE stream parsing ││ +│ │ • Event handlers ││ +│ │ • Tool execution (direct function calls) ││ +│ │ • Response building ││ +│ │ • Event forwarding (to client if attached) ││ +│ └─────────────────────────────────────────────────────────────────────────┘│ +│ │ │ +│ ┌──────┴──────┐ │ +│ │ │ │ +│ ▼ ▼ │ +│ /api/copilot/chat /api/v1/copilot/chat │ +│ (Interactive) (Headless) │ +│ - Session auth - API key auth │ +│ - SSE to client - JSON response │ +│ │ +└─────────────────────────────────────────────────────────────────────────────┘ + │ + │ (Single external HTTP call) + ▼ +┌─────────────────────────────────────────────────────────────────────────────┐ +│ COPILOT BACKEND (Go) │ +│ (UNCHANGED - no modifications) │ +└─────────────────────────────────────────────────────────────────────────────┘ +``` + +### Target Flow (Headless) + +1. External client calls `POST /api/v1/copilot/chat` with API key +2. Orchestrator calls copilot backend +3. **Server parses SSE stream** +4. **Server executes tools directly** (no HTTP) +5. Server notifies copilot backend (mark-complete) +6. Server returns JSON response + +### Target Flow (Interactive) + +1. User sends message in UI +2. Store calls `/api/copilot/chat` +3. **Server orchestrates everything** +4. Server forwards events to client via SSE +5. Client just updates UI from events +6. Server returns when complete + +--- + +## Scope & Boundaries + +### In Scope + +| Item | Description | +|------|-------------| +| Orchestrator module | New module in `lib/copilot/orchestrator/` | +| Headless API route | New route `POST /api/v1/copilot/chat` | +| SSE parsing | Move from store to server | +| Tool execution | Direct function calls on server | +| Event forwarding | SSE to client (interactive mode) | +| Store simplification | Reduce to UI-only logic | + +### Out of Scope + +| Item | Reason | +|------|--------| +| Copilot backend (Go) | Separate repo, working correctly | +| Tool definitions | Already work, just called differently | +| LLM providers | Handled by copilot backend | +| Subagent system | Handled by copilot backend | + +### Boundaries + +``` + ┌─────────────────────────────────────┐ + │ MODIFICATION ZONE │ + │ │ + ┌────────────────┼─────────────────────────────────────┼────────────────┐ + │ │ │ │ + │ UNCHANGED │ apps/sim/ │ UNCHANGED │ + │ │ ├── lib/copilot/orchestrator/ │ │ + │ copilot/ │ │ └── (NEW) │ apps/sim/ │ + │ (Go backend) │ ├── app/api/v1/copilot/ │ tools/ │ + │ │ │ └── (NEW) │ (definitions)│ + │ │ ├── app/api/copilot/chat/ │ │ + │ │ │ └── (MODIFIED) │ │ + │ │ └── stores/panel/copilot/ │ │ + │ │ └── (SIMPLIFIED) │ │ + │ │ │ │ + └────────────────┼─────────────────────────────────────┼────────────────┘ + │ │ + └─────────────────────────────────────┘ +``` + +--- + +## Module Design + +### Directory Structure + +``` +apps/sim/lib/copilot/orchestrator/ +├── index.ts # Main orchestrator function +├── types.ts # Type definitions +├── sse-parser.ts # Parse SSE stream from copilot backend +├── sse-handlers.ts # Handle each SSE event type +├── tool-executor.ts # Execute tools directly (no HTTP) +├── persistence.ts # Database and Redis operations +└── response-builder.ts # Build final response +``` + +### Module Responsibilities + +#### `types.ts` + +Defines all types used by the orchestrator: + +```typescript +// SSE Events +interface SSEEvent { type, data, subagent?, toolCallId?, toolName? } +type SSEEventType = 'content' | 'tool_call' | 'tool_result' | 'done' | ... + +// Tool State +interface ToolCallState { id, name, status, params?, result?, error? } +type ToolCallStatus = 'pending' | 'executing' | 'success' | 'error' | 'skipped' + +// Streaming Context (internal state during orchestration) +interface StreamingContext { + chatId?, conversationId?, messageId + accumulatedContent, contentBlocks + toolCalls: Map + streamComplete, errors[] +} + +// Orchestrator API +interface OrchestratorRequest { message, workflowId, userId, chatId?, mode?, ... } +interface OrchestratorOptions { autoExecuteTools?, onEvent?, timeout?, ... } +interface OrchestratorResult { success, content, toolCalls[], chatId?, error? } + +// Execution Context (passed to tool executors) +interface ExecutionContext { userId, workflowId, workspaceId?, decryptedEnvVars? } +``` + +#### `sse-parser.ts` + +Parses SSE stream into typed events: + +```typescript +async function* parseSSEStream( + reader: ReadableStreamDefaultReader, + decoder: TextDecoder, + abortSignal?: AbortSignal +): AsyncGenerator +``` + +- Handles buffering for partial lines +- Parses JSON from `data:` lines +- Yields typed `SSEEvent` objects +- Supports abort signal + +#### `sse-handlers.ts` + +Handles each SSE event type: + +```typescript +const sseHandlers: Record = { + content: (event, context) => { /* append to accumulated content */ }, + tool_call: async (event, context, execContext, options) => { + /* track tool, execute if autoExecuteTools */ + }, + tool_result: (event, context) => { /* update tool status */ }, + tool_generating: (event, context) => { /* create pending tool */ }, + reasoning: (event, context) => { /* handle thinking blocks */ }, + done: (event, context) => { /* mark stream complete */ }, + error: (event, context) => { /* record error */ }, + // ... etc +} + +const subAgentHandlers: Record = { + // Handlers for events within subagent context +} +``` + +#### `tool-executor.ts` + +Executes tools directly without HTTP: + +```typescript +// Main entry point +async function executeToolServerSide( + toolCall: ToolCallState, + context: ExecutionContext +): Promise + +// Server tools (edit_workflow, search_documentation, etc.) +async function executeServerToolDirect( + toolName: string, + params: Record, + context: ExecutionContext +): Promise + +// Integration tools (slack_send, gmail_read, etc.) +async function executeIntegrationToolDirect( + toolCallId: string, + toolName: string, + toolConfig: ToolConfig, + params: Record, + context: ExecutionContext +): Promise + +// Notify copilot backend (external HTTP - required) +async function markToolComplete( + toolCallId: string, + toolName: string, + status: number, + message?: any, + data?: any +): Promise + +// Prepare cached context for tool execution +async function prepareExecutionContext( + userId: string, + workflowId: string +): Promise +``` + +**Key principle**: Internal tool execution uses direct function calls. Only `markToolComplete` makes HTTP call (to copilot backend - external). + +#### `persistence.ts` + +Database and Redis operations: + +```typescript +// Chat persistence +async function createChat(params): Promise<{ id: string }> +async function loadChat(chatId, userId): Promise +async function saveMessages(chatId, messages, options?): Promise +async function updateChatConversationId(chatId, conversationId): Promise + +// Tool confirmation (Redis) +async function setToolConfirmation(toolCallId, status, message?): Promise +async function getToolConfirmation(toolCallId): Promise +``` + +#### `index.ts` + +Main orchestrator function: + +```typescript +async function orchestrateCopilotRequest( + request: OrchestratorRequest, + options: OrchestratorOptions = {} +): Promise { + + // 1. Prepare execution context (cache env vars, etc.) + const execContext = await prepareExecutionContext(userId, workflowId) + + // 2. Handle chat creation/loading + let chatId = await resolveChat(request) + + // 3. Build request payload for copilot backend + const payload = buildCopilotPayload(request) + + // 4. Call copilot backend + const response = await fetch(COPILOT_URL, { body: JSON.stringify(payload) }) + + // 5. Create streaming context + const context = createStreamingContext(chatId) + + // 6. Parse and handle SSE stream + for await (const event of parseSSEStream(response.body)) { + // Forward to client if attached + options.onEvent?.(event) + + // Handle event + const handler = getHandler(event) + await handler(event, context, execContext, options) + + if (context.streamComplete) break + } + + // 7. Persist to database + await persistChat(chatId, context) + + // 8. Build and return result + return buildResult(context) +} +``` + +--- + +## Implementation Plan + +### Phase 1: Create Orchestrator Module (3-4 days) + +**Goal**: Build the orchestrator module that can run independently. + +#### Tasks + +1. **Create `types.ts`** (~200 lines) + - [ ] Define SSE event types + - [ ] Define tool call state types + - [ ] Define streaming context type + - [ ] Define orchestrator request/response types + - [ ] Define execution context type + +2. **Create `sse-parser.ts`** (~80 lines) + - [ ] Extract parsing logic from store.ts + - [ ] Add abort signal support + - [ ] Add error handling + +3. **Create `persistence.ts`** (~120 lines) + - [ ] Extract DB operations from chat route + - [ ] Extract Redis operations from confirm route + - [ ] Add chat creation/loading + - [ ] Add message saving + +4. **Create `tool-executor.ts`** (~300 lines) + - [ ] Create `executeToolServerSide()` main entry + - [ ] Create `executeServerToolDirect()` for server tools + - [ ] Create `executeIntegrationToolDirect()` for integration tools + - [ ] Create `markToolComplete()` for copilot backend notification + - [ ] Create `prepareExecutionContext()` for caching + - [ ] Handle OAuth token resolution + - [ ] Handle env var resolution + +5. **Create `sse-handlers.ts`** (~350 lines) + - [ ] Extract handlers from store.ts + - [ ] Adapt for server-side context + - [ ] Add tool execution integration + - [ ] Add subagent handlers + +6. **Create `index.ts`** (~250 lines) + - [ ] Create `orchestrateCopilotRequest()` main function + - [ ] Wire together all modules + - [ ] Add timeout handling + - [ ] Add abort signal support + - [ ] Add event forwarding + +#### Deliverables + +- Complete `lib/copilot/orchestrator/` module +- Unit tests for each component +- Integration test for full orchestration + +### Phase 2: Create Headless API Route (1 day) + +**Goal**: Create API endpoint for headless copilot access. + +#### Tasks + +1. **Create route** `app/api/v1/copilot/chat/route.ts` (~100 lines) + - [ ] Add API key authentication + - [ ] Parse and validate request + - [ ] Call orchestrator + - [ ] Return JSON response + +2. **Add to API documentation** + - [ ] Document request format + - [ ] Document response format + - [ ] Document error codes + +#### Deliverables + +- Working `POST /api/v1/copilot/chat` endpoint +- API documentation +- E2E test + +### Phase 3: Wire Interactive Route (2 days) + +**Goal**: Use orchestrator for existing interactive flow. + +#### Tasks + +1. **Modify `/api/copilot/chat/route.ts`** + - [ ] Add feature flag for new vs old flow + - [ ] Call orchestrator with `onEvent` callback + - [ ] Forward events to client via SSE + - [ ] Maintain backward compatibility + +2. **Test both flows** + - [ ] Verify interactive works with new orchestrator + - [ ] Verify old flow still works (feature flag off) + +#### Deliverables + +- Interactive route using orchestrator +- Feature flag for gradual rollout +- No breaking changes + +### Phase 4: Simplify Client Store (2-3 days) + +**Goal**: Remove orchestration logic from client, keep UI-only. + +#### Tasks + +1. **Create simplified store** (new file or gradual refactor) + - [ ] Keep: UI state, messages, tool display + - [ ] Keep: Simple API calls + - [ ] Keep: Event listener + - [ ] Remove: SSE parsing + - [ ] Remove: Tool execution logic + - [ ] Remove: Client tool instantiators + +2. **Update components** + - [ ] Update components to use simplified store + - [ ] Remove tool execution from UI components + - [ ] Simplify tool display components + +3. **Remove dead code** + - [ ] Remove unused imports + - [ ] Remove unused helper functions + - [ ] Remove client tool classes (if no longer needed) + +#### Deliverables + +- Simplified store (~600 lines) +- Updated components +- Reduced bundle size + +### Phase 5: Testing & Polish (2-3 days) + +#### Tasks + +1. **E2E testing** + - [ ] Test headless API with various prompts + - [ ] Test interactive with various prompts + - [ ] Test tool execution scenarios + - [ ] Test error handling + - [ ] Test abort/timeout scenarios + +2. **Performance testing** + - [ ] Compare latency (old vs new) + - [ ] Check memory usage + - [ ] Check for connection issues + +3. **Documentation** + - [ ] Update developer docs + - [ ] Add architecture diagram + - [ ] Document new API + +#### Deliverables + +- Comprehensive test suite +- Performance benchmarks +- Complete documentation + +--- + +## API Contracts + +### Headless API + +#### Request + +```http +POST /api/v1/copilot/chat +Content-Type: application/json +X-API-Key: sim_xxx + +{ + "message": "Create a Slack notification workflow", + "workflowId": "wf_abc123", + "chatId": "chat_xyz", // Optional: continue existing chat + "mode": "agent", // Optional: "agent" | "ask" | "plan" + "model": "claude-4-sonnet", // Optional + "autoExecuteTools": true, // Optional: default true + "timeout": 300000 // Optional: default 5 minutes +} +``` + +#### Response (Success) + +```json +{ + "success": true, + "content": "I've created a Slack notification workflow that...", + "toolCalls": [ + { + "id": "tc_001", + "name": "search_patterns", + "status": "success", + "params": { "query": "slack notification" }, + "result": { "patterns": [...] }, + "durationMs": 234 + }, + { + "id": "tc_002", + "name": "edit_workflow", + "status": "success", + "params": { "operations": [...] }, + "result": { "blocksAdded": 3 }, + "durationMs": 1523 + } + ], + "chatId": "chat_xyz", + "conversationId": "conv_123" +} +``` + +#### Response (Error) + +```json +{ + "success": false, + "error": "Workflow not found", + "content": "", + "toolCalls": [] +} +``` + +#### Error Codes + +| Status | Error | Description | +|--------|-------|-------------| +| 400 | Invalid request | Missing required fields | +| 401 | Unauthorized | Invalid or missing API key | +| 404 | Workflow not found | Workflow ID doesn't exist | +| 500 | Internal error | Server-side failure | +| 504 | Timeout | Request exceeded timeout | + +### Interactive API (Existing - Modified) + +The existing `/api/copilot/chat` endpoint continues to work but now uses the orchestrator internally. SSE events forwarded to client remain the same format. + +--- + +## Migration Strategy + +### Rollout Plan + +``` +Week 1: Phase 1 (Orchestrator) +├── Day 1-2: Types + SSE Parser +├── Day 3: Tool Executor +└── Day 4-5: Handlers + Main Orchestrator + +Week 2: Phase 2-3 (Routes) +├── Day 1: Headless API route +├── Day 2-3: Wire interactive route +└── Day 4-5: Testing both modes + +Week 3: Phase 4-5 (Cleanup) +├── Day 1-3: Simplify store +├── Day 4: Testing +└── Day 5: Documentation +``` + +### Feature Flags + +```typescript +// lib/copilot/config.ts + +export const COPILOT_FLAGS = { + // Use new orchestrator for interactive mode + USE_SERVER_ORCHESTRATOR: process.env.COPILOT_USE_SERVER_ORCHESTRATOR === 'true', + + // Enable headless API + ENABLE_HEADLESS_API: process.env.COPILOT_ENABLE_HEADLESS_API === 'true', +} +``` + +### Rollback Plan + +If issues arise: +1. Set `COPILOT_USE_SERVER_ORCHESTRATOR=false` +2. Interactive mode falls back to old client-side flow +3. Headless API returns 503 Service Unavailable + +--- + +## Testing Strategy + +### Unit Tests + +``` +lib/copilot/orchestrator/ +├── __tests__/ +│ ├── sse-parser.test.ts +│ ├── sse-handlers.test.ts +│ ├── tool-executor.test.ts +│ ├── persistence.test.ts +│ └── index.test.ts +``` + +#### SSE Parser Tests + +```typescript +describe('parseSSEStream', () => { + it('parses content events') + it('parses tool_call events') + it('handles partial lines') + it('handles malformed JSON') + it('respects abort signal') +}) +``` + +#### Tool Executor Tests + +```typescript +describe('executeToolServerSide', () => { + it('executes server tools directly') + it('executes integration tools with OAuth') + it('resolves env var references') + it('handles tool not found') + it('handles execution errors') +}) +``` + +### Integration Tests + +```typescript +describe('orchestrateCopilotRequest', () => { + it('handles simple message without tools') + it('handles message with single tool call') + it('handles message with multiple tool calls') + it('handles subagent tool calls') + it('handles stream errors') + it('respects timeout') + it('forwards events to callback') +}) +``` + +### E2E Tests + +```typescript +describe('POST /api/v1/copilot/chat', () => { + it('returns 401 without API key') + it('returns 400 with invalid request') + it('executes simple ask query') + it('executes workflow modification') + it('handles tool execution') +}) +``` + +--- + +## Risks & Mitigations + +### Risk 1: Breaking Interactive Mode + +**Risk**: Refactoring could break existing interactive copilot. + +**Mitigation**: +- Feature flag for gradual rollout +- Keep old code path available +- Extensive E2E testing +- Staged deployment (internal → beta → production) + +### Risk 2: Tool Execution Differences + +**Risk**: Tool behavior differs between client and server execution. + +**Mitigation**: +- Reuse existing tool execution logic (same functions) +- Compare outputs in parallel testing +- Log discrepancies for investigation + +### Risk 3: Performance Regression + +**Risk**: Server-side orchestration could be slower. + +**Mitigation**: +- Actually should be faster (no browser round-trips) +- Benchmark before/after +- Profile critical paths + +### Risk 4: Memory Usage + +**Risk**: Server accumulates state during long-running requests. + +**Mitigation**: +- Set reasonable timeouts +- Clean up context after request +- Monitor memory in production + +### Risk 5: Connection Issues + +**Risk**: Long-running SSE connections could drop. + +**Mitigation**: +- Implement reconnection logic +- Save checkpoints to resume +- Handle partial completions gracefully + +--- + +## File Inventory + +### New Files + +| File | Lines | Description | +|------|-------|-------------| +| `lib/copilot/orchestrator/types.ts` | ~200 | Type definitions | +| `lib/copilot/orchestrator/sse-parser.ts` | ~80 | SSE stream parsing | +| `lib/copilot/orchestrator/sse-handlers.ts` | ~350 | Event handlers | +| `lib/copilot/orchestrator/tool-executor.ts` | ~300 | Tool execution | +| `lib/copilot/orchestrator/persistence.ts` | ~120 | DB/Redis operations | +| `lib/copilot/orchestrator/index.ts` | ~250 | Main orchestrator | +| `app/api/v1/copilot/chat/route.ts` | ~100 | Headless API | +| **Total New** | **~1,400** | | + +### Modified Files + +| File | Change | +|------|--------| +| `app/api/copilot/chat/route.ts` | Use orchestrator (optional) | +| `stores/panel/copilot/store.ts` | Simplify to ~600 lines | + +### Deleted Code (from store.ts) + +| Section | Lines Removed | +|---------|---------------| +| SSE parsing logic | ~150 | +| `sseHandlers` object | ~750 | +| `subAgentSSEHandlers` | ~280 | +| Tool execution logic | ~400 | +| Client tool instantiators | ~120 | +| Content block helpers | ~200 | +| Streaming context | ~100 | +| **Total Removed** | **~2,000** | + +### Net Change + +``` +New code: +1,400 lines (orchestrator module) +Removed code: -2,000 lines (from store) +Modified code: ~200 lines (route changes) +─────────────────────────────────────── +Net change: -400 lines (cleaner, more maintainable) +``` + +--- + +## Appendix: Code Extraction Map + +### From `stores/panel/copilot/store.ts` + +| Source Lines | Destination | Notes | +|--------------|-------------|-------| +| 900-1050 (parseSSEStream) | `sse-parser.ts` | Adapt for server | +| 1120-1867 (sseHandlers) | `sse-handlers.ts` | Remove Zustand deps | +| 1940-2217 (subAgentSSEHandlers) | `sse-handlers.ts` | Merge with above | +| 1365-1583 (tool execution) | `tool-executor.ts` | Direct calls | +| 330-380 (StreamingContext) | `types.ts` | Clean up | +| 3328-3648 (handleStreamingResponse) | `index.ts` | Main loop | + +### From `app/api/copilot/execute-tool/route.ts` + +| Source Lines | Destination | Notes | +|--------------|-------------|-------| +| 30-247 (POST handler) | `tool-executor.ts` | Extract core logic | + +### From `app/api/copilot/confirm/route.ts` + +| Source Lines | Destination | Notes | +|--------------|-------------|-------| +| 28-89 (updateToolCallStatus) | `persistence.ts` | Redis operations | + +--- + +## Approval & Sign-off + +- [ ] Technical review complete +- [ ] Security review complete +- [ ] Performance impact assessed +- [ ] Rollback plan approved +- [ ] Testing plan approved + +--- + +*Document created: January 2026* +*Last updated: January 2026* + From cb3618a104087fa62b7e6ecc7b98ed313dbffbd8 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Fri, 30 Jan 2026 12:15:41 -0800 Subject: [PATCH 02/72] v1 --- apps/sim/lib/copilot/orchestrator/config.ts | 19 + .../lib/copilot/orchestrator/sse-handlers.ts | 6 +- .../lib/copilot/orchestrator/tool-executor.ts | 1182 ++++++++++++++++- 3 files changed, 1204 insertions(+), 3 deletions(-) diff --git a/apps/sim/lib/copilot/orchestrator/config.ts b/apps/sim/lib/copilot/orchestrator/config.ts index 3f6eb99876..69901d6961 100644 --- a/apps/sim/lib/copilot/orchestrator/config.ts +++ b/apps/sim/lib/copilot/orchestrator/config.ts @@ -21,3 +21,22 @@ export const INTERRUPT_TOOL_NAMES = [ export const INTERRUPT_TOOL_SET = new Set(INTERRUPT_TOOL_NAMES) +export const SUBAGENT_TOOL_NAMES = [ + 'debug', + 'edit', + 'plan', + 'test', + 'deploy', + 'auth', + 'research', + 'knowledge', + 'custom_tool', + 'tour', + 'info', + 'workflow', + 'evaluate', + 'superagent', +] as const + +export const SUBAGENT_TOOL_SET = new Set(SUBAGENT_TOOL_NAMES) + diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers.ts index 101b281381..269f2e43ba 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-handlers.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers.ts @@ -9,7 +9,7 @@ import type { } from '@/lib/copilot/orchestrator/types' import { executeToolServerSide, markToolComplete } from '@/lib/copilot/orchestrator/tool-executor' import { getToolConfirmation } from '@/lib/copilot/orchestrator/persistence' -import { INTERRUPT_TOOL_SET } from '@/lib/copilot/orchestrator/config' +import { INTERRUPT_TOOL_SET, SUBAGENT_TOOL_SET } from '@/lib/copilot/orchestrator/config' const logger = createLogger('CopilotSseHandlers') @@ -168,6 +168,10 @@ export const sseHandlers: Record = { if (isPartial) return + if (SUBAGENT_TOOL_SET.has(toolName)) { + return + } + const isInterruptTool = INTERRUPT_TOOL_SET.has(toolName) const isInteractive = options.interactive === true diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor.ts b/apps/sim/lib/copilot/orchestrator/tool-executor.ts index fe288f2446..3d5ec4d696 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor.ts @@ -1,16 +1,37 @@ import { db } from '@sim/db' -import { account, workflow } from '@sim/db/schema' +import { + account, + chat, + customTools, + permissions, + workflow, + workflowMcpServer, + workflowMcpTool, +} from '@sim/db/schema' import { createLogger } from '@sim/logger' -import { and, eq } from 'drizzle-orm' +import { and, asc, desc, eq, inArray, isNull, or } from 'drizzle-orm' import { refreshTokenIfNeeded } from '@/app/api/auth/oauth/utils' +import { checkChatAccess, checkWorkflowAccessForChatCreation } from '@/app/api/chat/utils' import { resolveEnvVarReferences } from '@/executor/utils/reference-validation' +import { normalizeName } from '@/executor/constants' import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/copilot/constants' import { generateRequestId } from '@/lib/core/utils/request' import { env } from '@/lib/core/config/env' import { getEffectiveDecryptedEnv } from '@/lib/environment/utils' +import { listWorkspaceFiles } from '@/lib/uploads/contexts/workspace' +import { mcpService } from '@/lib/mcp/service' +import { sanitizeForCopilot } from '@/lib/workflows/sanitization/json-sanitizer' +import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/persistence/utils' +import { executeWorkflow } from '@/lib/workflows/executor/execute-workflow' +import { deployWorkflow, undeployWorkflow } from '@/lib/workflows/persistence/utils' +import { BlockPathCalculator } from '@/lib/workflows/blocks/block-path-calculator' +import { getBlockOutputPaths } from '@/lib/workflows/blocks/block-outputs' +import { isInputDefinitionTrigger } from '@/lib/workflows/triggers/input-definition-triggers' +import { hasValidStartBlock } from '@/lib/workflows/triggers/trigger-utils.server' import { executeTool } from '@/tools' import { getTool, resolveToolId } from '@/tools/utils' import { routeExecution } from '@/lib/copilot/tools/server/router' +import { sanitizeToolName } from '@/lib/mcp/workflow-tool-schema' import type { ExecutionContext, ToolCallResult, ToolCallState } from '@/lib/copilot/orchestrator/types' const logger = createLogger('CopilotToolExecutor') @@ -32,6 +53,24 @@ const SERVER_TOOLS = new Set([ 'knowledge_base', ]) +const SIM_WORKFLOW_TOOLS = new Set([ + 'get_user_workflow', + 'get_workflow_from_name', + 'list_user_workflows', + 'get_workflow_data', + 'get_block_outputs', + 'get_block_upstream_references', + 'run_workflow', + 'set_global_workflow_variables', + 'deploy_api', + 'deploy_chat', + 'deploy_mcp', + 'redeploy', + 'check_deployment_status', + 'list_workspace_mcp_servers', + 'create_workspace_mcp_server', +]) + /** * Execute a tool server-side without calling internal routes. */ @@ -46,6 +85,10 @@ export async function executeToolServerSide( return executeServerToolDirect(toolName, toolCall.params || {}, context) } + if (SIM_WORKFLOW_TOOLS.has(toolName)) { + return executeSimWorkflowTool(toolName, toolCall.params || {}, context) + } + const toolConfig = getTool(resolvedToolName) if (!toolConfig) { logger.warn('Tool not found in registry', { toolName, resolvedToolName }) @@ -171,6 +214,1141 @@ async function executeIntegrationToolDirect( } } +async function executeSimWorkflowTool( + toolName: string, + params: Record, + context: ExecutionContext +): Promise { + switch (toolName) { + case 'get_user_workflow': + return executeGetUserWorkflow(params, context) + case 'get_workflow_from_name': + return executeGetWorkflowFromName(params, context) + case 'list_user_workflows': + return executeListUserWorkflows(context) + case 'get_workflow_data': + return executeGetWorkflowData(params, context) + case 'get_block_outputs': + return executeGetBlockOutputs(params, context) + case 'get_block_upstream_references': + return executeGetBlockUpstreamReferences(params, context) + case 'run_workflow': + return executeRunWorkflow(params, context) + case 'set_global_workflow_variables': + return executeSetGlobalWorkflowVariables(params, context) + case 'deploy_api': + return executeDeployApi(params, context) + case 'deploy_chat': + return executeDeployChat(params, context) + case 'deploy_mcp': + return executeDeployMcp(params, context) + case 'redeploy': + return executeRedeploy(context) + case 'check_deployment_status': + return executeCheckDeploymentStatus(params, context) + case 'list_workspace_mcp_servers': + return executeListWorkspaceMcpServers(params, context) + case 'create_workspace_mcp_server': + return executeCreateWorkspaceMcpServer(params, context) + default: + return { success: false, error: `Unsupported workflow tool: ${toolName}` } + } +} + +async function ensureWorkflowAccess(workflowId: string, userId: string): Promise<{ + workflow: typeof workflow.$inferSelect + workspaceId?: string | null +}> { + const [workflowRecord] = await db + .select() + .from(workflow) + .where(eq(workflow.id, workflowId)) + .limit(1) + if (!workflowRecord) { + throw new Error(`Workflow ${workflowId} not found`) + } + + if (workflowRecord.userId === userId) { + return { workflow: workflowRecord, workspaceId: workflowRecord.workspaceId } + } + + if (workflowRecord.workspaceId) { + const [permissionRow] = await db + .select({ permissionType: permissions.permissionType }) + .from(permissions) + .where( + and( + eq(permissions.entityType, 'workspace'), + eq(permissions.entityId, workflowRecord.workspaceId), + eq(permissions.userId, userId) + ) + ) + .limit(1) + if (permissionRow) { + return { workflow: workflowRecord, workspaceId: workflowRecord.workspaceId } + } + } + + throw new Error('Unauthorized workflow access') +} + +async function executeGetUserWorkflow( + params: Record, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + + await ensureWorkflowAccess(workflowId, context.userId) + + const normalized = await loadWorkflowFromNormalizedTables(workflowId) + if (!normalized) { + return { success: false, error: 'Workflow has no normalized data' } + } + + const workflowState = { + blocks: normalized.blocks || {}, + edges: normalized.edges || [], + loops: normalized.loops || {}, + parallels: normalized.parallels || {}, + } + const sanitized = sanitizeForCopilot(workflowState) + const userWorkflow = JSON.stringify(sanitized, null, 2) + + return { success: true, output: { userWorkflow } } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +async function executeGetWorkflowFromName( + params: Record, + context: ExecutionContext +): Promise { + try { + const workflowName = typeof params.workflow_name === 'string' ? params.workflow_name.trim() : '' + if (!workflowName) { + return { success: false, error: 'workflow_name is required' } + } + + const workspaceIds = await db + .select({ entityId: permissions.entityId }) + .from(permissions) + .where(and(eq(permissions.userId, context.userId), eq(permissions.entityType, 'workspace'))) + + const workspaceIdList = workspaceIds.map((row) => row.entityId) + + const workflowConditions = [eq(workflow.userId, context.userId)] + if (workspaceIdList.length > 0) { + workflowConditions.push(inArray(workflow.workspaceId, workspaceIdList)) + } + const workflows = await db + .select() + .from(workflow) + .where(or(...workflowConditions)) + + const match = workflows.find( + (w) => String(w.name || '').trim().toLowerCase() === workflowName.toLowerCase() + ) + if (!match) { + return { success: false, error: `Workflow not found: ${workflowName}` } + } + + const normalized = await loadWorkflowFromNormalizedTables(match.id) + if (!normalized) { + return { success: false, error: 'Workflow has no normalized data' } + } + + const workflowState = { + blocks: normalized.blocks || {}, + edges: normalized.edges || [], + loops: normalized.loops || {}, + parallels: normalized.parallels || {}, + } + const sanitized = sanitizeForCopilot(workflowState) + const userWorkflow = JSON.stringify(sanitized, null, 2) + + return { success: true, output: { userWorkflow } } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +async function executeListUserWorkflows(context: ExecutionContext): Promise { + try { + const workspaceIds = await db + .select({ entityId: permissions.entityId }) + .from(permissions) + .where(and(eq(permissions.userId, context.userId), eq(permissions.entityType, 'workspace'))) + + const workspaceIdList = workspaceIds.map((row) => row.entityId) + + const workflowConditions = [eq(workflow.userId, context.userId)] + if (workspaceIdList.length > 0) { + workflowConditions.push(inArray(workflow.workspaceId, workspaceIdList)) + } + const workflows = await db + .select() + .from(workflow) + .where(or(...workflowConditions)) + .orderBy(asc(workflow.sortOrder), asc(workflow.createdAt), asc(workflow.id)) + + const names = workflows + .map((w) => (typeof w.name === 'string' ? w.name : null)) + .filter((n): n is string => Boolean(n)) + + return { success: true, output: { workflow_names: names } } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +async function executeGetWorkflowData( + params: Record, + context: ExecutionContext +): Promise { + try { + const dataType = params.data_type + if (!dataType) { + return { success: false, error: 'data_type is required' } + } + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + + const { workflow: workflowRecord, workspaceId } = await ensureWorkflowAccess( + workflowId, + context.userId + ) + + if (dataType === 'global_variables') { + const variablesRecord = (workflowRecord.variables as Record) || {} + const variables = Object.values(variablesRecord).map((v: any) => ({ + id: String(v?.id || ''), + name: String(v?.name || ''), + value: v?.value, + })) + return { success: true, output: { variables } } + } + + if (dataType === 'custom_tools') { + if (!workspaceId) { + return { success: false, error: 'workspaceId is required' } + } + const conditions = [ + eq(customTools.workspaceId, workspaceId), + and(eq(customTools.userId, context.userId), isNull(customTools.workspaceId)), + ] + const toolsRows = await db + .select() + .from(customTools) + .where(or(...conditions)) + .orderBy(desc(customTools.createdAt)) + + const customToolsData = toolsRows.map((tool) => ({ + id: String(tool.id || ''), + title: String(tool.title || ''), + functionName: String((tool.schema as any)?.function?.name || ''), + description: String((tool.schema as any)?.function?.description || ''), + parameters: (tool.schema as any)?.function?.parameters, + })) + + return { success: true, output: { customTools: customToolsData } } + } + + if (dataType === 'mcp_tools') { + if (!workspaceId) { + return { success: false, error: 'workspaceId is required' } + } + const tools = await mcpService.discoverTools(context.userId, workspaceId, false) + const mcpTools = tools.map((tool) => ({ + name: String(tool.name || ''), + serverId: String(tool.serverId || ''), + serverName: String(tool.serverName || ''), + description: String(tool.description || ''), + inputSchema: tool.inputSchema, + })) + return { success: true, output: { mcpTools } } + } + + if (dataType === 'files') { + if (!workspaceId) { + return { success: false, error: 'workspaceId is required' } + } + const files = await listWorkspaceFiles(workspaceId) + const fileResults = files.map((file) => ({ + id: String(file.id || ''), + name: String(file.name || ''), + key: String(file.key || ''), + path: String(file.path || ''), + size: Number(file.size || 0), + type: String(file.type || ''), + uploadedAt: String(file.uploadedAt || ''), + })) + return { success: true, output: { files: fileResults } } + } + + return { success: false, error: `Unknown data_type: ${dataType}` } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +async function executeGetBlockOutputs( + params: Record, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + await ensureWorkflowAccess(workflowId, context.userId) + + const normalized = await loadWorkflowFromNormalizedTables(workflowId) + if (!normalized) { + return { success: false, error: 'Workflow has no normalized data' } + } + + const blocks = normalized.blocks || {} + const loops = normalized.loops || {} + const parallels = normalized.parallels || {} + const blockIds = Array.isArray(params.blockIds) && params.blockIds.length > 0 + ? params.blockIds + : Object.keys(blocks) + + const results: Array<{ + blockId: string + blockName: string + blockType: string + outputs: string[] + insideSubflowOutputs?: string[] + outsideSubflowOutputs?: string[] + triggerMode?: boolean + }> = [] + + for (const blockId of blockIds) { + const block = blocks[blockId] + if (!block?.type) continue + const blockName = block.name || block.type + + if (block.type === 'loop' || block.type === 'parallel') { + const insidePaths = getSubflowInsidePaths(block.type, blockId, loops, parallels) + results.push({ + blockId, + blockName, + blockType: block.type, + outputs: [], + insideSubflowOutputs: formatOutputsWithPrefix(insidePaths, blockName), + outsideSubflowOutputs: formatOutputsWithPrefix(['results'], blockName), + triggerMode: block.triggerMode, + }) + continue + } + + const outputs = getBlockOutputPaths(block.type, block.subBlocks, block.triggerMode) + results.push({ + blockId, + blockName, + blockType: block.type, + outputs: formatOutputsWithPrefix(outputs, blockName), + triggerMode: block.triggerMode, + }) + } + + const variables = await getWorkflowVariablesForTool(workflowId) + + const payload = { blocks: results, variables } + return { success: true, output: payload } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +async function executeGetBlockUpstreamReferences( + params: Record, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + if (!Array.isArray(params.blockIds) || params.blockIds.length === 0) { + return { success: false, error: 'blockIds array is required' } + } + + await ensureWorkflowAccess(workflowId, context.userId) + const normalized = await loadWorkflowFromNormalizedTables(workflowId) + if (!normalized) { + return { success: false, error: 'Workflow has no normalized data' } + } + + const blocks = normalized.blocks || {} + const edges = normalized.edges || [] + const loops = normalized.loops || {} + const parallels = normalized.parallels || {} + + const graphEdges = edges.map((edge: any) => ({ source: edge.source, target: edge.target })) + const variableOutputs = await getWorkflowVariablesForTool(workflowId) + + const results: any[] = [] + + for (const blockId of params.blockIds) { + const targetBlock = blocks[blockId] + if (!targetBlock) continue + + const insideSubflows: Array<{ blockId: string; blockName: string; blockType: string }> = [] + const containingLoopIds = new Set() + const containingParallelIds = new Set() + + Object.values(loops as Record).forEach((loop) => { + if (loop?.nodes?.includes(blockId)) { + containingLoopIds.add(loop.id) + const loopBlock = blocks[loop.id] + if (loopBlock) { + insideSubflows.push({ + blockId: loop.id, + blockName: loopBlock.name || loopBlock.type, + blockType: 'loop', + }) + } + } + }) + + Object.values(parallels as Record).forEach((parallel) => { + if (parallel?.nodes?.includes(blockId)) { + containingParallelIds.add(parallel.id) + const parallelBlock = blocks[parallel.id] + if (parallelBlock) { + insideSubflows.push({ + blockId: parallel.id, + blockName: parallelBlock.name || parallelBlock.type, + blockType: 'parallel', + }) + } + } + }) + + const ancestorIds = BlockPathCalculator.findAllPathNodes(graphEdges, blockId) + const accessibleIds = new Set(ancestorIds) + accessibleIds.add(blockId) + + const starterBlock = Object.values(blocks).find((b: any) => isInputDefinitionTrigger(b.type)) + if (starterBlock && ancestorIds.includes((starterBlock as any).id)) { + accessibleIds.add((starterBlock as any).id) + } + + containingLoopIds.forEach((loopId) => { + accessibleIds.add(loopId) + loops[loopId]?.nodes?.forEach((nodeId: string) => accessibleIds.add(nodeId)) + }) + + containingParallelIds.forEach((parallelId) => { + accessibleIds.add(parallelId) + parallels[parallelId]?.nodes?.forEach((nodeId: string) => accessibleIds.add(nodeId)) + }) + + const accessibleBlocks: any[] = [] + + for (const accessibleBlockId of accessibleIds) { + const block = blocks[accessibleBlockId] + if (!block?.type) continue + const canSelfReference = + block.type === 'approval' || block.type === 'human_in_the_loop' + if (accessibleBlockId === blockId && !canSelfReference) continue + + const blockName = block.name || block.type + let accessContext: 'inside' | 'outside' | undefined + let outputPaths: string[] + + if (block.type === 'loop' || block.type === 'parallel') { + const isInside = + (block.type === 'loop' && containingLoopIds.has(accessibleBlockId)) || + (block.type === 'parallel' && containingParallelIds.has(accessibleBlockId)) + accessContext = isInside ? 'inside' : 'outside' + outputPaths = isInside + ? getSubflowInsidePaths(block.type, accessibleBlockId, loops, parallels) + : ['results'] + } else { + outputPaths = getBlockOutputPaths(block.type, block.subBlocks, block.triggerMode) + } + + const formattedOutputs = formatOutputsWithPrefix(outputPaths, blockName) + const entry: any = { + blockId: accessibleBlockId, + blockName, + blockType: block.type, + outputs: formattedOutputs, + } + if (block.triggerMode) entry.triggerMode = true + if (accessContext) entry.accessContext = accessContext + accessibleBlocks.push(entry) + } + + results.push({ + blockId, + blockName: targetBlock.name || targetBlock.type, + blockType: targetBlock.type, + accessibleBlocks, + insideSubflows, + variables: variableOutputs, + }) + } + + const payload = { results } + return { success: true, output: payload } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +async function executeRunWorkflow( + params: Record, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + + const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + + const result = await executeWorkflow( + { + id: workflowRecord.id, + userId: workflowRecord.userId, + workspaceId: workflowRecord.workspaceId, + variables: workflowRecord.variables || {}, + }, + generateRequestId(), + params.workflow_input || params.input || undefined, + context.userId + ) + + return { + success: result.success, + output: { + executionId: result.executionId, + success: result.success, + output: result.output, + logs: result.logs, + }, + error: result.success ? undefined : result.error || 'Workflow execution failed', + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +async function executeSetGlobalWorkflowVariables( + params: Record, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + const operations = Array.isArray(params.operations) ? params.operations : [] + const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + + const currentVarsRecord = (workflowRecord.variables as Record) || {} + const byName: Record = {} + Object.values(currentVarsRecord).forEach((v: any) => { + if (v && typeof v === 'object' && v.id && v.name) byName[String(v.name)] = v + }) + + for (const op of operations) { + const key = String(op?.name || '') + if (!key) continue + const nextType = op?.type || byName[key]?.type || 'plain' + const coerceValue = (value: any, type: string) => { + if (value === undefined) return value + if (type === 'number') { + const n = Number(value) + return Number.isNaN(n) ? value : n + } + if (type === 'boolean') { + const v = String(value).trim().toLowerCase() + if (v === 'true') return true + if (v === 'false') return false + return value + } + if (type === 'array' || type === 'object') { + try { + const parsed = JSON.parse(String(value)) + if (type === 'array' && Array.isArray(parsed)) return parsed + if (type === 'object' && parsed && typeof parsed === 'object' && !Array.isArray(parsed)) + return parsed + } catch {} + return value + } + return value + } + + if (op.operation === 'delete') { + delete byName[key] + continue + } + const typedValue = coerceValue(op.value, nextType) + if (op.operation === 'add') { + byName[key] = { + id: crypto.randomUUID(), + workflowId, + name: key, + type: nextType, + value: typedValue, + } + continue + } + if (op.operation === 'edit') { + if (!byName[key]) { + byName[key] = { + id: crypto.randomUUID(), + workflowId, + name: key, + type: nextType, + value: typedValue, + } + } else { + byName[key] = { + ...byName[key], + type: nextType, + value: typedValue, + } + } + } + } + + const nextVarsRecord = Object.fromEntries( + Object.values(byName).map((v: any) => [String(v.id), v]) + ) + + await db + .update(workflow) + .set({ variables: nextVarsRecord, updatedAt: new Date() }) + .where(eq(workflow.id, workflowId)) + + return { success: true, output: { updated: Object.values(byName).length } } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +async function executeDeployApi( + params: Record, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + const action = params.action === 'undeploy' ? 'undeploy' : 'deploy' + const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + + if (action === 'undeploy') { + const result = await undeployWorkflow({ workflowId }) + if (!result.success) { + return { success: false, error: result.error || 'Failed to undeploy workflow' } + } + return { success: true, output: { workflowId, isDeployed: false } } + } + + const result = await deployWorkflow({ + workflowId, + deployedBy: context.userId, + workflowName: workflowRecord.name || undefined, + }) + if (!result.success) { + return { success: false, error: result.error || 'Failed to deploy workflow' } + } + + return { + success: true, + output: { workflowId, isDeployed: true, deployedAt: result.deployedAt, version: result.version }, + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +async function executeDeployChat( + params: Record, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + + const action = params.action === 'undeploy' ? 'undeploy' : 'deploy' + if (action === 'undeploy') { + const existing = await db.select().from(chat).where(eq(chat.workflowId, workflowId)).limit(1) + if (!existing.length) { + return { success: false, error: 'No active chat deployment found for this workflow' } + } + const { hasAccess } = await checkChatAccess(existing[0].id, context.userId) + if (!hasAccess) { + return { success: false, error: 'Unauthorized chat access' } + } + await db.delete(chat).where(eq(chat.id, existing[0].id)) + return { success: true, output: { success: true, action: 'undeploy', isDeployed: false } } + } + + const { hasAccess } = await checkWorkflowAccessForChatCreation(workflowId, context.userId) + if (!hasAccess) { + return { success: false, error: 'Workflow not found or access denied' } + } + + const existing = await db.select().from(chat).where(eq(chat.workflowId, workflowId)).limit(1) + const existingDeployment = existing[0] || null + + const identifier = String(params.identifier || existingDeployment?.identifier || '').trim() + const title = String(params.title || existingDeployment?.title || '').trim() + if (!identifier || !title) { + return { success: false, error: 'Chat identifier and title are required' } + } + + const identifierPattern = /^[a-z0-9-]+$/ + if (!identifierPattern.test(identifier)) { + return { success: false, error: 'Identifier can only contain lowercase letters, numbers, and hyphens' } + } + + const existingIdentifier = await db + .select() + .from(chat) + .where(eq(chat.identifier, identifier)) + .limit(1) + if (existingIdentifier.length > 0 && existingIdentifier[0].id !== existingDeployment?.id) { + return { success: false, error: 'Identifier already in use' } + } + + const deployResult = await deployWorkflow({ + workflowId, + deployedBy: context.userId, + }) + if (!deployResult.success) { + return { success: false, error: deployResult.error || 'Failed to deploy workflow' } + } + + const payload = { + workflowId, + identifier, + title, + description: String(params.description || existingDeployment?.description || ''), + customizations: { + primaryColor: + params.customizations?.primaryColor || + existingDeployment?.customizations?.primaryColor || + 'var(--brand-primary-hover-hex)', + welcomeMessage: + params.customizations?.welcomeMessage || + existingDeployment?.customizations?.welcomeMessage || + 'Hi there! How can I help you today?', + }, + authType: params.authType || existingDeployment?.authType || 'public', + password: params.password, + allowedEmails: params.allowedEmails || existingDeployment?.allowedEmails || [], + outputConfigs: params.outputConfigs || existingDeployment?.outputConfigs || [], + } + + if (existingDeployment) { + await db + .update(chat) + .set({ + identifier: payload.identifier, + title: payload.title, + description: payload.description, + customizations: payload.customizations, + authType: payload.authType, + password: payload.password || existingDeployment.password, + allowedEmails: + payload.authType === 'email' || payload.authType === 'sso' ? payload.allowedEmails : [], + outputConfigs: payload.outputConfigs, + updatedAt: new Date(), + }) + .where(eq(chat.id, existingDeployment.id)) + } else { + await db.insert(chat).values({ + id: crypto.randomUUID(), + workflowId, + userId: context.userId, + identifier: payload.identifier, + title: payload.title, + description: payload.description, + customizations: payload.customizations, + isActive: true, + authType: payload.authType, + password: payload.password || null, + allowedEmails: + payload.authType === 'email' || payload.authType === 'sso' ? payload.allowedEmails : [], + outputConfigs: payload.outputConfigs, + createdAt: new Date(), + updatedAt: new Date(), + }) + } + + return { success: true, output: { success: true, action: 'deploy', isDeployed: true, identifier } } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +async function executeDeployMcp( + params: Record, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + + const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + const workspaceId = workflowRecord.workspaceId + if (!workspaceId) { + return { success: false, error: 'workspaceId is required' } + } + + if (!workflowRecord.isDeployed) { + return { + success: false, + error: 'Workflow must be deployed before adding as an MCP tool. Use deploy_api first.', + } + } + + const serverId = params.serverId + if (!serverId) { + return { + success: false, + error: 'serverId is required. Use list_workspace_mcp_servers to get available servers.', + } + } + + const existingTool = await db + .select() + .from(workflowMcpTool) + .where(and(eq(workflowMcpTool.serverId, serverId), eq(workflowMcpTool.workflowId, workflowId))) + .limit(1) + + const toolName = sanitizeToolName(params.toolName || workflowRecord.name || `workflow_${workflowId}`) + const toolDescription = + params.toolDescription || workflowRecord.description || `Execute ${workflowRecord.name} workflow` + const parameterSchema = params.parameterSchema || {} + + if (existingTool.length > 0) { + const toolId = existingTool[0].id + await db + .update(workflowMcpTool) + .set({ + toolName, + toolDescription, + parameterSchema, + updatedAt: new Date(), + }) + .where(eq(workflowMcpTool.id, toolId)) + return { success: true, output: { toolId, toolName, toolDescription, updated: true } } + } + + const toolId = crypto.randomUUID() + await db.insert(workflowMcpTool).values({ + id: toolId, + serverId, + workflowId, + toolName, + toolDescription, + parameterSchema, + createdAt: new Date(), + updatedAt: new Date(), + }) + + return { success: true, output: { toolId, toolName, toolDescription, updated: false } } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +async function executeRedeploy(context: ExecutionContext): Promise { + try { + const workflowId = context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + await ensureWorkflowAccess(workflowId, context.userId) + + const result = await deployWorkflow({ workflowId, deployedBy: context.userId }) + if (!result.success) { + return { success: false, error: result.error || 'Failed to redeploy workflow' } + } + return { + success: true, + output: { workflowId, deployedAt: result.deployedAt || null, version: result.version }, + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +async function executeCheckDeploymentStatus( + params: Record, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + const workspaceId = workflowRecord.workspaceId + + const [apiDeploy, chatDeploy] = await Promise.all([ + db + .select() + .from(workflow) + .where(eq(workflow.id, workflowId)) + .limit(1), + db.select().from(chat).where(eq(chat.workflowId, workflowId)).limit(1), + ]) + + const isApiDeployed = apiDeploy[0]?.isDeployed || false + const apiDetails = { + isDeployed: isApiDeployed, + deployedAt: apiDeploy[0]?.deployedAt || null, + endpoint: isApiDeployed ? `/api/workflows/${workflowId}/execute` : null, + apiKey: workflowRecord.workspaceId ? 'Workspace API keys' : 'Personal API keys', + needsRedeployment: false, + } + + const isChatDeployed = !!chatDeploy[0] + const chatDetails = { + isDeployed: isChatDeployed, + chatId: chatDeploy[0]?.id || null, + identifier: chatDeploy[0]?.identifier || null, + chatUrl: isChatDeployed ? `/chat/${chatDeploy[0]?.identifier}` : null, + title: chatDeploy[0]?.title || null, + description: chatDeploy[0]?.description || null, + authType: chatDeploy[0]?.authType || null, + allowedEmails: chatDeploy[0]?.allowedEmails || null, + outputConfigs: chatDeploy[0]?.outputConfigs || null, + welcomeMessage: chatDeploy[0]?.customizations?.welcomeMessage || null, + primaryColor: chatDeploy[0]?.customizations?.primaryColor || null, + hasPassword: Boolean(chatDeploy[0]?.password), + } + + const mcpDetails = { isDeployed: false, servers: [] as any[] } + if (workspaceId) { + const servers = await db + .select({ + serverId: workflowMcpServer.id, + serverName: workflowMcpServer.name, + toolName: workflowMcpTool.toolName, + toolDescription: workflowMcpTool.toolDescription, + parameterSchema: workflowMcpTool.parameterSchema, + toolId: workflowMcpTool.id, + }) + .from(workflowMcpTool) + .innerJoin(workflowMcpServer, eq(workflowMcpTool.serverId, workflowMcpServer.id)) + .where(eq(workflowMcpTool.workflowId, workflowId)) + + if (servers.length > 0) { + mcpDetails.isDeployed = true + mcpDetails.servers = servers + } + } + + const isDeployed = apiDetails.isDeployed || chatDetails.isDeployed || mcpDetails.isDeployed + return { + success: true, + output: { isDeployed, api: apiDetails, chat: chatDetails, mcp: mcpDetails }, + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +async function executeListWorkspaceMcpServers( + params: Record, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + const workspaceId = workflowRecord.workspaceId + if (!workspaceId) { + return { success: false, error: 'workspaceId is required' } + } + + const servers = await db + .select({ + id: workflowMcpServer.id, + name: workflowMcpServer.name, + description: workflowMcpServer.description, + }) + .from(workflowMcpServer) + .where(eq(workflowMcpServer.workspaceId, workspaceId)) + + const serverIds = servers.map((server) => server.id) + const tools = + serverIds.length > 0 + ? await db + .select({ + serverId: workflowMcpTool.serverId, + toolName: workflowMcpTool.toolName, + }) + .from(workflowMcpTool) + .where(inArray(workflowMcpTool.serverId, serverIds)) + : [] + + const toolNamesByServer: Record = {} + for (const tool of tools) { + if (!toolNamesByServer[tool.serverId]) { + toolNamesByServer[tool.serverId] = [] + } + toolNamesByServer[tool.serverId].push(tool.toolName) + } + + const serversWithToolNames = servers.map((server) => ({ + ...server, + toolCount: toolNamesByServer[server.id]?.length || 0, + toolNames: toolNamesByServer[server.id] || [], + })) + + return { success: true, output: { servers: serversWithToolNames, count: servers.length } } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +async function executeCreateWorkspaceMcpServer( + params: Record, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + const workspaceId = workflowRecord.workspaceId + if (!workspaceId) { + return { success: false, error: 'workspaceId is required' } + } + + const name = params.name?.trim() + if (!name) { + return { success: false, error: 'name is required' } + } + + const serverId = crypto.randomUUID() + const [server] = await db + .insert(workflowMcpServer) + .values({ + id: serverId, + workspaceId, + createdBy: context.userId, + name, + description: params.description?.trim() || null, + isPublic: params.isPublic ?? false, + createdAt: new Date(), + updatedAt: new Date(), + }) + .returning() + + const workflowIds: string[] = params.workflowIds || [] + const addedTools: Array<{ workflowId: string; toolName: string }> = [] + + if (workflowIds.length > 0) { + const workflows = await db + .select() + .from(workflow) + .where(inArray(workflow.id, workflowIds)) + + for (const wf of workflows) { + if (wf.workspaceId !== workspaceId || !wf.isDeployed) { + continue + } + const hasStartBlock = await hasValidStartBlock(wf.id) + if (!hasStartBlock) { + continue + } + const toolName = sanitizeToolName(wf.name || `workflow_${wf.id}`) + await db.insert(workflowMcpTool).values({ + id: crypto.randomUUID(), + serverId, + workflowId: wf.id, + toolName, + toolDescription: wf.description || `Execute ${wf.name} workflow`, + parameterSchema: {}, + createdAt: new Date(), + updatedAt: new Date(), + }) + addedTools.push({ workflowId: wf.id, toolName }) + } + } + + return { success: true, output: { server, addedTools } } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +async function getWorkflowVariablesForTool( + workflowId: string +): Promise> { + const [workflowRecord] = await db + .select({ variables: workflow.variables }) + .from(workflow) + .where(eq(workflow.id, workflowId)) + .limit(1) + + const variablesRecord = (workflowRecord?.variables as Record) || {} + return Object.values(variablesRecord) + .filter((v: any) => v?.name && String(v.name).trim() !== '') + .map((v: any) => ({ + id: String(v.id || ''), + name: String(v.name || ''), + type: String(v.type || 'plain'), + tag: `variable.${normalizeName(String(v.name || ''))}`, + })) +} + +function getSubflowInsidePaths( + blockType: 'loop' | 'parallel', + blockId: string, + loops: Record, + parallels: Record +): string[] { + const paths = ['index'] + if (blockType === 'loop') { + const loopType = loops[blockId]?.loopType || 'for' + if (loopType === 'forEach') { + paths.push('currentItem', 'items') + } + } else { + const parallelType = parallels[blockId]?.parallelType || 'count' + if (parallelType === 'collection') { + paths.push('currentItem', 'items') + } + } + return paths +} + +function formatOutputsWithPrefix(paths: string[], blockName: string): string[] { + const normalizedName = normalizeName(blockName) + return paths.map((path) => `${normalizedName}.${path}`) +} + /** * Notify the copilot backend that a tool has completed. */ From 8b7b331267930e86658f1544f2e7bc64fe52b4b7 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Fri, 30 Jan 2026 16:24:27 -0800 Subject: [PATCH 03/72] Basic ss tes --- apps/sim/app/api/copilot/chat/route.ts | 68 ++++++++++++++++- apps/sim/app/api/v1/copilot/chat/route.ts | 75 ++++++++++++++++++- .../lib/copilot/orchestrator/tool-executor.ts | 36 ++++++++- 3 files changed, 167 insertions(+), 12 deletions(-) diff --git a/apps/sim/app/api/copilot/chat/route.ts b/apps/sim/app/api/copilot/chat/route.ts index b6fdcb9a6b..635e106c7a 100644 --- a/apps/sim/app/api/copilot/chat/route.ts +++ b/apps/sim/app/api/copilot/chat/route.ts @@ -1,7 +1,7 @@ import { db } from '@sim/db' -import { copilotChats } from '@sim/db/schema' +import { copilotChats, permissions, workflow } from '@sim/db/schema' import { createLogger } from '@sim/logger' -import { and, desc, eq } from 'drizzle-orm' +import { and, asc, desc, eq, inArray, or } from 'drizzle-orm' import { type NextRequest, NextResponse } from 'next/server' import { z } from 'zod' import { getSession } from '@/lib/auth' @@ -40,7 +40,8 @@ const ChatMessageSchema = z.object({ message: z.string().min(1, 'Message is required'), userMessageId: z.string().optional(), // ID from frontend for the user message chatId: z.string().optional(), - workflowId: z.string().min(1, 'Workflow ID is required'), + workflowId: z.string().optional(), + workflowName: z.string().optional(), model: z.enum(COPILOT_MODEL_IDS).optional().default('claude-4.5-opus'), mode: z.enum(COPILOT_REQUEST_MODES).optional().default('agent'), prefetch: z.boolean().optional(), @@ -78,6 +79,54 @@ const ChatMessageSchema = z.object({ commands: z.array(z.string()).optional(), }) +async function resolveWorkflowId( + userId: string, + workflowId?: string, + workflowName?: string +): Promise<{ workflowId: string; workflowName?: string } | null> { + // If workflowId provided, use it directly + if (workflowId) { + return { workflowId } + } + + // Get user's accessible workflows + const workspaceIds = await db + .select({ entityId: permissions.entityId }) + .from(permissions) + .where(and(eq(permissions.userId, userId), eq(permissions.entityType, 'workspace'))) + + const workspaceIdList = workspaceIds.map((row) => row.entityId) + + const workflowConditions = [eq(workflow.userId, userId)] + if (workspaceIdList.length > 0) { + workflowConditions.push(inArray(workflow.workspaceId, workspaceIdList)) + } + + const workflows = await db + .select() + .from(workflow) + .where(or(...workflowConditions)) + .orderBy(asc(workflow.sortOrder), asc(workflow.createdAt), asc(workflow.id)) + + if (workflows.length === 0) { + return null + } + + // If workflowName provided, find matching workflow + if (workflowName) { + const match = workflows.find( + (w) => String(w.name || '').trim().toLowerCase() === workflowName.toLowerCase() + ) + if (match) { + return { workflowId: match.id, workflowName: match.name || undefined } + } + return null + } + + // Default to first workflow + return { workflowId: workflows[0].id, workflowName: workflows[0].name || undefined } +} + /** * POST /api/copilot/chat * Send messages to sim agent and handle chat persistence @@ -100,7 +149,8 @@ export async function POST(req: NextRequest) { message, userMessageId, chatId, - workflowId, + workflowId: providedWorkflowId, + workflowName, model, mode, prefetch, @@ -113,6 +163,16 @@ export async function POST(req: NextRequest) { contexts, commands, } = ChatMessageSchema.parse(body) + + // Resolve workflowId - if not provided, use first workflow or find by name + const resolved = await resolveWorkflowId(authenticatedUserId, providedWorkflowId, workflowName) + if (!resolved) { + return createBadRequestResponse( + 'No workflows found. Create a workflow first or provide a valid workflowId.' + ) + } + const workflowId = resolved.workflowId + // Ensure we have a consistent user message ID for this request const userMessageIdToUse = userMessageId || crypto.randomUUID() try { diff --git a/apps/sim/app/api/v1/copilot/chat/route.ts b/apps/sim/app/api/v1/copilot/chat/route.ts index 8cd1e0104f..412ed80528 100644 --- a/apps/sim/app/api/v1/copilot/chat/route.ts +++ b/apps/sim/app/api/v1/copilot/chat/route.ts @@ -1,4 +1,7 @@ +import { db } from '@sim/db' +import { permissions, workflow } from '@sim/db/schema' import { createLogger } from '@sim/logger' +import { and, asc, eq, inArray, or } from 'drizzle-orm' import { type NextRequest, NextResponse } from 'next/server' import { z } from 'zod' import { authenticateV1Request } from '@/app/api/v1/auth' @@ -10,17 +13,71 @@ const logger = createLogger('CopilotHeadlessAPI') const RequestSchema = z.object({ message: z.string().min(1, 'message is required'), - workflowId: z.string().min(1, 'workflowId is required'), + workflowId: z.string().optional(), + workflowName: z.string().optional(), chatId: z.string().optional(), - mode: z.enum(['agent', 'ask', 'plan']).optional().default('agent'), + mode: z.enum(['agent', 'ask', 'plan', 'fast']).optional().default('fast'), model: z.string().optional(), autoExecuteTools: z.boolean().optional().default(true), timeout: z.number().optional().default(300000), }) +async function resolveWorkflowId( + userId: string, + workflowId?: string, + workflowName?: string +): Promise<{ workflowId: string; workflowName?: string } | null> { + // If workflowId provided, use it directly + if (workflowId) { + return { workflowId } + } + + // Get user's accessible workflows + const workspaceIds = await db + .select({ entityId: permissions.entityId }) + .from(permissions) + .where(and(eq(permissions.userId, userId), eq(permissions.entityType, 'workspace'))) + + const workspaceIdList = workspaceIds.map((row) => row.entityId) + + const workflowConditions = [eq(workflow.userId, userId)] + if (workspaceIdList.length > 0) { + workflowConditions.push(inArray(workflow.workspaceId, workspaceIdList)) + } + + const workflows = await db + .select() + .from(workflow) + .where(or(...workflowConditions)) + .orderBy(asc(workflow.sortOrder), asc(workflow.createdAt), asc(workflow.id)) + + if (workflows.length === 0) { + return null + } + + // If workflowName provided, find matching workflow + if (workflowName) { + const match = workflows.find( + (w) => String(w.name || '').trim().toLowerCase() === workflowName.toLowerCase() + ) + if (match) { + return { workflowId: match.id, workflowName: match.name || undefined } + } + return null + } + + // Default to first workflow + return { workflowId: workflows[0].id, workflowName: workflows[0].name || undefined } +} + /** * POST /api/v1/copilot/chat * Headless copilot endpoint for server-side orchestration. + * + * workflowId is optional - if not provided: + * - If workflowName is provided, finds that workflow + * - Otherwise uses the user's first workflow as context + * - The copilot can still operate on any workflow using list_user_workflows */ export async function POST(req: NextRequest) { const auth = await authenticateV1Request(req) @@ -34,9 +91,18 @@ export async function POST(req: NextRequest) { const defaults = getCopilotModel('chat') const selectedModel = parsed.model || defaults.model + // Resolve workflow ID + const resolved = await resolveWorkflowId(auth.userId, parsed.workflowId, parsed.workflowName) + if (!resolved) { + return NextResponse.json( + { success: false, error: 'No workflows found. Create a workflow first or provide a valid workflowId.' }, + { status: 400 } + ) + } + const requestPayload = { message: parsed.message, - workflowId: parsed.workflowId, + workflowId: resolved.workflowId, userId: auth.userId, stream: true, streamToolCalls: true, @@ -44,12 +110,13 @@ export async function POST(req: NextRequest) { mode: parsed.mode, messageId: crypto.randomUUID(), version: SIM_AGENT_VERSION, + headless: true, // Enable cross-workflow operations via workflowId params ...(parsed.chatId ? { chatId: parsed.chatId } : {}), } const result = await orchestrateCopilotStream(requestPayload, { userId: auth.userId, - workflowId: parsed.workflowId, + workflowId: resolved.workflowId, chatId: parsed.chatId, autoExecuteTools: parsed.autoExecuteTools, timeout: parsed.timeout, diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor.ts b/apps/sim/lib/copilot/orchestrator/tool-executor.ts index 3d5ec4d696..15638b0a7f 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor.ts @@ -302,7 +302,10 @@ async function executeGetUserWorkflow( return { success: false, error: 'workflowId is required' } } - await ensureWorkflowAccess(workflowId, context.userId) + const { workflow: workflowRecord, workspaceId } = await ensureWorkflowAccess( + workflowId, + context.userId + ) const normalized = await loadWorkflowFromNormalizedTables(workflowId) if (!normalized) { @@ -318,7 +321,16 @@ async function executeGetUserWorkflow( const sanitized = sanitizeForCopilot(workflowState) const userWorkflow = JSON.stringify(sanitized, null, 2) - return { success: true, output: { userWorkflow } } + // Return workflow ID so copilot can use it for subsequent tool calls + return { + success: true, + output: { + workflowId, + workflowName: workflowRecord.name || '', + workspaceId, + userWorkflow, + }, + } } catch (error) { return { success: false, error: error instanceof Error ? error.message : String(error) } } @@ -371,7 +383,16 @@ async function executeGetWorkflowFromName( const sanitized = sanitizeForCopilot(workflowState) const userWorkflow = JSON.stringify(sanitized, null, 2) - return { success: true, output: { userWorkflow } } + // Return workflow ID and workspaceId so copilot can use them for subsequent tool calls + return { + success: true, + output: { + workflowId: match.id, + workflowName: match.name || '', + workspaceId: match.workspaceId, + userWorkflow, + }, + } } catch (error) { return { success: false, error: error instanceof Error ? error.message : String(error) } } @@ -396,11 +417,18 @@ async function executeListUserWorkflows(context: ExecutionContext): Promise (typeof w.name === 'string' ? w.name : null)) .filter((n): n is string => Boolean(n)) - return { success: true, output: { workflow_names: names } } + const workflowList = workflows.map((w) => ({ + workflowId: w.id, + workflowName: w.name || '', + workspaceId: w.workspaceId, + })) + + return { success: true, output: { workflow_names: names, workflows: workflowList } } } catch (error) { return { success: false, error: error instanceof Error ? error.message : String(error) } } From 301e25c81db8355cd01389d39243360c31c51416 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Fri, 30 Jan 2026 16:52:23 -0800 Subject: [PATCH 04/72] Ss tests --- apps/sim/app/api/v1/copilot/chat/route.ts | 19 ++-- .../lib/copilot/orchestrator/sse-handlers.ts | 86 +++++++++++++++++-- .../tools/server/workflow/edit-workflow.ts | 36 +++++++- 3 files changed, 128 insertions(+), 13 deletions(-) diff --git a/apps/sim/app/api/v1/copilot/chat/route.ts b/apps/sim/app/api/v1/copilot/chat/route.ts index 412ed80528..cab197ad5b 100644 --- a/apps/sim/app/api/v1/copilot/chat/route.ts +++ b/apps/sim/app/api/v1/copilot/chat/route.ts @@ -7,6 +7,7 @@ import { z } from 'zod' import { authenticateV1Request } from '@/app/api/v1/auth' import { getCopilotModel } from '@/lib/copilot/config' import { SIM_AGENT_VERSION } from '@/lib/copilot/constants' +import { COPILOT_REQUEST_MODES } from '@/lib/copilot/models' import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator' const logger = createLogger('CopilotHeadlessAPI') @@ -16,7 +17,7 @@ const RequestSchema = z.object({ workflowId: z.string().optional(), workflowName: z.string().optional(), chatId: z.string().optional(), - mode: z.enum(['agent', 'ask', 'plan', 'fast']).optional().default('fast'), + mode: z.enum(COPILOT_REQUEST_MODES).optional().default('agent'), model: z.string().optional(), autoExecuteTools: z.boolean().optional().default(true), timeout: z.number().optional().default(300000), @@ -100,6 +101,14 @@ export async function POST(req: NextRequest) { ) } + // Transform mode to transport mode (same as client API) + // build and agent both map to 'agent' on the backend + const effectiveMode = parsed.mode === 'agent' ? 'build' : parsed.mode + const transportMode = effectiveMode === 'build' ? 'agent' : effectiveMode + + // Always generate a chatId - required for artifacts system to work with subagents + const chatId = parsed.chatId || crypto.randomUUID() + const requestPayload = { message: parsed.message, workflowId: resolved.workflowId, @@ -107,17 +116,17 @@ export async function POST(req: NextRequest) { stream: true, streamToolCalls: true, model: selectedModel, - mode: parsed.mode, + mode: transportMode, messageId: crypto.randomUUID(), version: SIM_AGENT_VERSION, headless: true, // Enable cross-workflow operations via workflowId params - ...(parsed.chatId ? { chatId: parsed.chatId } : {}), + chatId, } const result = await orchestrateCopilotStream(requestPayload, { userId: auth.userId, workflowId: resolved.workflowId, - chatId: parsed.chatId, + chatId, autoExecuteTools: parsed.autoExecuteTools, timeout: parsed.timeout, interactive: false, @@ -127,7 +136,7 @@ export async function POST(req: NextRequest) { success: result.success, content: result.content, toolCalls: result.toolCalls, - chatId: result.chatId, + chatId: result.chatId || chatId, // Return the chatId for conversation continuity conversationId: result.conversationId, error: result.error, }) diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers.ts index 269f2e43ba..e3b26df538 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-handlers.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers.ts @@ -13,6 +13,21 @@ import { INTERRUPT_TOOL_SET, SUBAGENT_TOOL_SET } from '@/lib/copilot/orchestrato const logger = createLogger('CopilotSseHandlers') +/** + * Respond tools are internal to the copilot's subagent system. + * They're used by subagents to signal completion and should NOT be executed by the sim side. + * The copilot backend handles these internally. + */ +const RESPOND_TOOL_SET = new Set([ + 'plan_respond', + 'edit_respond', + 'debug_respond', + 'info_respond', + 'research_respond', + 'deploy_respond', + 'superagent_respond', +]) + export type SSEHandler = ( event: SSEEvent, context: StreamingContext, @@ -112,15 +127,26 @@ export const sseHandlers: Record = { const current = context.toolCalls.get(toolCallId) if (!current) return - const success = event.data?.success ?? event.data?.result?.success + // Determine success: explicit success field, or if there's result data without explicit failure + const hasExplicitSuccess = event.data?.success !== undefined || event.data?.result?.success !== undefined + const explicitSuccess = event.data?.success ?? event.data?.result?.success + const hasResultData = event.data?.result !== undefined || event.data?.data !== undefined + const hasError = !!event.data?.error || !!event.data?.result?.error + + // If explicitly set, use that; otherwise infer from data presence + const success = hasExplicitSuccess ? !!explicitSuccess : (hasResultData && !hasError) + current.status = success ? 'success' : 'error' current.endTime = Date.now() - if (event.data?.result || event.data?.data) { + if (hasResultData) { current.result = { - success: !!success, + success, output: event.data?.result || event.data?.data, } } + if (hasError) { + current.error = event.data?.error || event.data?.result?.error + } }, tool_error: (event, context) => { const toolCallId = event.toolCallId || event.data?.id @@ -168,10 +194,17 @@ export const sseHandlers: Record = { if (isPartial) return + // Subagent tools are executed by the copilot backend, not sim side if (SUBAGENT_TOOL_SET.has(toolName)) { return } + // Respond tools are internal to copilot's subagent system - skip execution + // The copilot backend handles these internally to signal subagent completion + if (RESPOND_TOOL_SET.has(toolName)) { + return + } + const isInterruptTool = INTERRUPT_TOOL_SET.has(toolName) const isInteractive = options.interactive === true @@ -309,12 +342,21 @@ export const subAgentHandlers: Record = { params: args, startTime: Date.now(), } + + // Store in both places - subAgentToolCalls for tracking and toolCalls for executeToolAndReport if (!context.subAgentToolCalls[parentToolCallId]) { context.subAgentToolCalls[parentToolCallId] = [] } context.subAgentToolCalls[parentToolCallId].push(toolCall) + context.toolCalls.set(toolCallId, toolCall) if (isPartial) return + + // Respond tools are internal to copilot's subagent system - skip execution + if (RESPOND_TOOL_SET.has(toolName)) { + return + } + if (options.autoExecuteTools !== false) { await executeToolAndReport(toolCallId, context, execContext, options) } @@ -324,11 +366,41 @@ export const subAgentHandlers: Record = { if (!parentToolCallId) return const toolCallId = event.toolCallId || event.data?.id if (!toolCallId) return + + // Update in subAgentToolCalls const toolCalls = context.subAgentToolCalls[parentToolCallId] || [] - const toolCall = toolCalls.find((tc) => tc.id === toolCallId) - if (!toolCall) return - toolCall.status = event.data?.success ? 'success' : 'error' - toolCall.endTime = Date.now() + const subAgentToolCall = toolCalls.find((tc) => tc.id === toolCallId) + + // Also update in main toolCalls (where we added it for execution) + const mainToolCall = context.toolCalls.get(toolCallId) + + // Use same success inference logic as main handler + const hasExplicitSuccess = + event.data?.success !== undefined || event.data?.result?.success !== undefined + const explicitSuccess = event.data?.success ?? event.data?.result?.success + const hasResultData = event.data?.result !== undefined || event.data?.data !== undefined + const hasError = !!event.data?.error || !!event.data?.result?.error + const success = hasExplicitSuccess ? !!explicitSuccess : hasResultData && !hasError + + const status = success ? 'success' : 'error' + const endTime = Date.now() + const result = hasResultData + ? { success, output: event.data?.result || event.data?.data } + : undefined + + if (subAgentToolCall) { + subAgentToolCall.status = status + subAgentToolCall.endTime = endTime + if (result) subAgentToolCall.result = result + if (hasError) subAgentToolCall.error = event.data?.error || event.data?.result?.error + } + + if (mainToolCall) { + mainToolCall.status = status + mainToolCall.endTime = endTime + if (result) mainToolCall.result = result + if (hasError) mainToolCall.error = event.data?.error || event.data?.result?.error + } }, } diff --git a/apps/sim/lib/copilot/tools/server/workflow/edit-workflow.ts b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow.ts index 66bb54ffaa..c7c5b0c4da 100644 --- a/apps/sim/lib/copilot/tools/server/workflow/edit-workflow.ts +++ b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow.ts @@ -8,7 +8,10 @@ import { validateSelectorIds } from '@/lib/copilot/validation/selector-validator import type { PermissionGroupConfig } from '@/lib/permission-groups/types' import { getBlockOutputs } from '@/lib/workflows/blocks/block-outputs' import { extractAndPersistCustomTools } from '@/lib/workflows/persistence/custom-tools-persistence' -import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/persistence/utils' +import { + loadWorkflowFromNormalizedTables, + saveWorkflowToNormalizedTables, +} from '@/lib/workflows/persistence/utils' import { isValidKey } from '@/lib/workflows/sanitization/key-validation' import { validateWorkflowState } from '@/lib/workflows/sanitization/validation' import { buildCanonicalIndex, isCanonicalPair } from '@/lib/workflows/subblocks/visibility' @@ -3162,6 +3165,37 @@ export const editWorkflowServerTool: BaseServerTool = { const skippedMessages = skippedItems.length > 0 ? skippedItems.map((item) => item.reason) : undefined + // Persist the workflow state to the database + const finalWorkflowState = validation.sanitizedState || modifiedWorkflowState + const workflowStateForDb = { + blocks: finalWorkflowState.blocks, + edges: finalWorkflowState.edges, + loops: generateLoopBlocks(finalWorkflowState.blocks as any), + parallels: generateParallelBlocks(finalWorkflowState.blocks as any), + lastSaved: Date.now(), + isDeployed: false, + } + + const saveResult = await saveWorkflowToNormalizedTables(workflowId, workflowStateForDb as any) + if (!saveResult.success) { + logger.error('Failed to persist workflow state to database', { + workflowId, + error: saveResult.error, + }) + throw new Error(`Failed to save workflow: ${saveResult.error}`) + } + + // Update workflow's lastSynced timestamp + await db + .update(workflowTable) + .set({ + lastSynced: new Date(), + updatedAt: new Date(), + }) + .where(eq(workflowTable.id, workflowId)) + + logger.info('Workflow state persisted to database', { workflowId }) + // Return the modified workflow state for the client to convert to YAML if needed return { success: true, From 4c821d03f5e50d73c08e9b55b560a6bf0fbba3b7 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Fri, 30 Jan 2026 17:01:15 -0800 Subject: [PATCH 05/72] Stuff --- .../lib/copilot/orchestrator/sse-handlers.ts | 6 ++++ .../tools/server/workflow/edit-workflow.ts | 30 +++++++++++++++---- 2 files changed, 31 insertions(+), 5 deletions(-) diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers.ts index e3b26df538..dbe512b725 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-handlers.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers.ts @@ -202,6 +202,9 @@ export const sseHandlers: Record = { // Respond tools are internal to copilot's subagent system - skip execution // The copilot backend handles these internally to signal subagent completion if (RESPOND_TOOL_SET.has(toolName)) { + toolCall.status = 'success' + toolCall.endTime = Date.now() + toolCall.result = { success: true, output: 'Internal respond tool - handled by copilot backend' } return } @@ -354,6 +357,9 @@ export const subAgentHandlers: Record = { // Respond tools are internal to copilot's subagent system - skip execution if (RESPOND_TOOL_SET.has(toolName)) { + toolCall.status = 'success' + toolCall.endTime = Date.now() + toolCall.result = { success: true, output: 'Internal respond tool - handled by copilot backend' } return } diff --git a/apps/sim/lib/copilot/tools/server/workflow/edit-workflow.ts b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow.ts index c7c5b0c4da..1caa1b16f5 100644 --- a/apps/sim/lib/copilot/tools/server/workflow/edit-workflow.ts +++ b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow.ts @@ -8,6 +8,7 @@ import { validateSelectorIds } from '@/lib/copilot/validation/selector-validator import type { PermissionGroupConfig } from '@/lib/permission-groups/types' import { getBlockOutputs } from '@/lib/workflows/blocks/block-outputs' import { extractAndPersistCustomTools } from '@/lib/workflows/persistence/custom-tools-persistence' +import { applyAutoLayout } from '@/lib/workflows/autolayout' import { loadWorkflowFromNormalizedTables, saveWorkflowToNormalizedTables, @@ -3167,11 +3168,30 @@ export const editWorkflowServerTool: BaseServerTool = { // Persist the workflow state to the database const finalWorkflowState = validation.sanitizedState || modifiedWorkflowState + + // Apply autolayout to position blocks properly + const layoutResult = applyAutoLayout(finalWorkflowState.blocks, finalWorkflowState.edges, { + horizontalSpacing: 250, + verticalSpacing: 100, + padding: { x: 100, y: 100 }, + }) + + const layoutedBlocks = layoutResult.success && layoutResult.blocks + ? layoutResult.blocks + : finalWorkflowState.blocks + + if (!layoutResult.success) { + logger.warn('Autolayout failed, using default positions', { + workflowId, + error: layoutResult.error, + }) + } + const workflowStateForDb = { - blocks: finalWorkflowState.blocks, + blocks: layoutedBlocks, edges: finalWorkflowState.edges, - loops: generateLoopBlocks(finalWorkflowState.blocks as any), - parallels: generateParallelBlocks(finalWorkflowState.blocks as any), + loops: generateLoopBlocks(layoutedBlocks as any), + parallels: generateParallelBlocks(layoutedBlocks as any), lastSaved: Date.now(), isDeployed: false, } @@ -3196,10 +3216,10 @@ export const editWorkflowServerTool: BaseServerTool = { logger.info('Workflow state persisted to database', { workflowId }) - // Return the modified workflow state for the client to convert to YAML if needed + // Return the modified workflow state with autolayout applied return { success: true, - workflowState: validation.sanitizedState || modifiedWorkflowState, + workflowState: { ...finalWorkflowState, blocks: layoutedBlocks }, // Include input validation errors so the LLM can see what was rejected ...(inputErrors && { inputValidationErrors: inputErrors, From 6cd8f1d5fcd6450ac8588e047a84891b4807cd39 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Sat, 31 Jan 2026 11:38:26 -0800 Subject: [PATCH 06/72] Add mcp --- apps/sim/app/api/mcp/copilot/route.ts | 517 ++++++++++++++++++ .../lib/copilot/orchestrator/sse-handlers.ts | 1 + apps/sim/lib/copilot/orchestrator/subagent.ts | 239 ++++++++ .../lib/copilot/orchestrator/tool-executor.ts | 273 ++++++++- apps/sim/lib/copilot/orchestrator/types.ts | 2 + 5 files changed, 1027 insertions(+), 5 deletions(-) create mode 100644 apps/sim/app/api/mcp/copilot/route.ts create mode 100644 apps/sim/lib/copilot/orchestrator/subagent.ts diff --git a/apps/sim/app/api/mcp/copilot/route.ts b/apps/sim/app/api/mcp/copilot/route.ts new file mode 100644 index 0000000000..6e4d6a9a73 --- /dev/null +++ b/apps/sim/app/api/mcp/copilot/route.ts @@ -0,0 +1,517 @@ +import { + type CallToolResult, + ErrorCode, + type InitializeResult, + isJSONRPCNotification, + isJSONRPCRequest, + type JSONRPCError, + type JSONRPCMessage, + type JSONRPCResponse, + type ListToolsResult, + type RequestId, +} from '@modelcontextprotocol/sdk/types.js' +import { createLogger } from '@sim/logger' +import { type NextRequest, NextResponse } from 'next/server' +import { checkHybridAuth } from '@/lib/auth/hybrid' +import { getCopilotModel } from '@/lib/copilot/config' +import { orchestrateSubagentStream } from '@/lib/copilot/orchestrator/subagent' +import { executeToolServerSide, prepareExecutionContext } from '@/lib/copilot/orchestrator/tool-executor' + +const logger = createLogger('CopilotMcpAPI') + +export const dynamic = 'force-dynamic' + +/** + * Direct tools that execute immediately without LLM orchestration. + * These are fast database queries that don't need AI reasoning. + */ +const DIRECT_TOOL_DEFS: Array<{ + name: string + description: string + inputSchema: { type: 'object'; properties?: Record; required?: string[] } + toolId: string +}> = [ + { + name: 'list_workflows', + toolId: 'list_user_workflows', + description: 'List all workflows the user has access to. Returns workflow IDs, names, and workspace info.', + inputSchema: { + type: 'object', + properties: { + workspaceId: { + type: 'string', + description: 'Optional workspace ID to filter workflows.', + }, + folderId: { + type: 'string', + description: 'Optional folder ID to filter workflows.', + }, + }, + }, + }, + { + name: 'list_workspaces', + toolId: 'list_user_workspaces', + description: 'List all workspaces the user has access to. Returns workspace IDs, names, and roles.', + inputSchema: { + type: 'object', + properties: {}, + }, + }, + { + name: 'list_folders', + toolId: 'list_folders', + description: 'List all folders in a workspace.', + inputSchema: { + type: 'object', + properties: { + workspaceId: { + type: 'string', + description: 'Workspace ID to list folders from.', + }, + }, + required: ['workspaceId'], + }, + }, + { + name: 'get_workflow', + toolId: 'get_workflow_from_name', + description: 'Get a workflow by name or ID. Returns the full workflow definition.', + inputSchema: { + type: 'object', + properties: { + name: { + type: 'string', + description: 'Workflow name to search for.', + }, + workflowId: { + type: 'string', + description: 'Workflow ID to retrieve directly.', + }, + }, + }, + }, +] + +const SUBAGENT_TOOL_DEFS: Array<{ + name: string + description: string + inputSchema: { type: 'object'; properties?: Record; required?: string[] } + agentId: string +}> = [ + { + name: 'copilot_discovery', + agentId: 'discovery', + description: `Find workflows by their contents or functionality when the user doesn't know the exact name or ID. + +USE THIS WHEN: +- User describes a workflow by what it does: "the one that sends emails", "my Slack notification workflow" +- User refers to workflow contents: "the workflow with the OpenAI block" +- User needs to search/match workflows by functionality or description + +DO NOT USE (use direct tools instead): +- User knows the workflow name → use get_workflow +- User wants to list all workflows → use list_workflows +- User wants to list workspaces → use list_workspaces +- User wants to list folders → use list_folders`, + inputSchema: { + type: 'object', + properties: { + request: { type: 'string' }, + workspaceId: { type: 'string' }, + context: { type: 'object' }, + }, + required: ['request'], + }, + }, + { + name: 'copilot_plan', + agentId: 'plan', + description: 'Plan workflow changes by gathering required information.', + inputSchema: { + type: 'object', + properties: { + request: { type: 'string' }, + workflowId: { type: 'string' }, + context: { type: 'object' }, + }, + required: ['request'], + }, + }, + { + name: 'copilot_edit', + agentId: 'edit', + description: 'Execute a workflow plan and apply edits.', + inputSchema: { + type: 'object', + properties: { + message: { type: 'string' }, + workflowId: { type: 'string' }, + plan: { type: 'object' }, + context: { type: 'object' }, + }, + required: ['workflowId'], + }, + }, + { + name: 'copilot_debug', + agentId: 'debug', + description: 'Diagnose errors or unexpected workflow behavior.', + inputSchema: { + type: 'object', + properties: { + error: { type: 'string' }, + workflowId: { type: 'string' }, + context: { type: 'object' }, + }, + required: ['error'], + }, + }, + { + name: 'copilot_deploy', + agentId: 'deploy', + description: 'Deploy or manage workflow deployments.', + inputSchema: { + type: 'object', + properties: { + request: { type: 'string' }, + workflowId: { type: 'string' }, + context: { type: 'object' }, + }, + required: ['request'], + }, + }, + { + name: 'copilot_auth', + agentId: 'auth', + description: 'Handle OAuth connection flows.', + inputSchema: { + type: 'object', + properties: { + request: { type: 'string' }, + context: { type: 'object' }, + }, + required: ['request'], + }, + }, + { + name: 'copilot_knowledge', + agentId: 'knowledge', + description: 'Create and manage knowledge bases.', + inputSchema: { + type: 'object', + properties: { + request: { type: 'string' }, + context: { type: 'object' }, + }, + required: ['request'], + }, + }, + { + name: 'copilot_custom_tool', + agentId: 'custom_tool', + description: 'Create or manage custom tools.', + inputSchema: { + type: 'object', + properties: { + request: { type: 'string' }, + context: { type: 'object' }, + }, + required: ['request'], + }, + }, + { + name: 'copilot_info', + agentId: 'info', + description: 'Inspect blocks, outputs, and workflow metadata.', + inputSchema: { + type: 'object', + properties: { + request: { type: 'string' }, + workflowId: { type: 'string' }, + context: { type: 'object' }, + }, + required: ['request'], + }, + }, + { + name: 'copilot_workflow', + agentId: 'workflow', + description: 'Manage workflow environment and configuration.', + inputSchema: { + type: 'object', + properties: { + request: { type: 'string' }, + workflowId: { type: 'string' }, + context: { type: 'object' }, + }, + required: ['request'], + }, + }, + { + name: 'copilot_research', + agentId: 'research', + description: 'Research external APIs and documentation.', + inputSchema: { + type: 'object', + properties: { + request: { type: 'string' }, + context: { type: 'object' }, + }, + required: ['request'], + }, + }, + { + name: 'copilot_tour', + agentId: 'tour', + description: 'Explain platform features and usage.', + inputSchema: { + type: 'object', + properties: { + request: { type: 'string' }, + context: { type: 'object' }, + }, + required: ['request'], + }, + }, + { + name: 'copilot_test', + agentId: 'test', + description: 'Run workflows and verify outputs.', + inputSchema: { + type: 'object', + properties: { + request: { type: 'string' }, + workflowId: { type: 'string' }, + context: { type: 'object' }, + }, + required: ['request'], + }, + }, + { + name: 'copilot_superagent', + agentId: 'superagent', + description: 'Execute direct external actions (email, Slack, etc.).', + inputSchema: { + type: 'object', + properties: { + request: { type: 'string' }, + context: { type: 'object' }, + }, + required: ['request'], + }, + }, +] + +function createResponse(id: RequestId, result: unknown): JSONRPCResponse { + return { + jsonrpc: '2.0', + id, + result: result as JSONRPCResponse['result'], + } +} + +function createError(id: RequestId, code: ErrorCode | number, message: string): JSONRPCError { + return { + jsonrpc: '2.0', + id, + error: { code, message }, + } +} + +export async function GET() { + return NextResponse.json({ + name: 'copilot-subagents', + version: '1.0.0', + protocolVersion: '2024-11-05', + capabilities: { tools: {} }, + }) +} + +export async function POST(request: NextRequest) { + try { + const auth = await checkHybridAuth(request, { requireWorkflowId: false }) + if (!auth.success || !auth.userId) { + return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) + } + + const body = (await request.json()) as JSONRPCMessage + + if (isJSONRPCNotification(body)) { + return new NextResponse(null, { status: 202 }) + } + + if (!isJSONRPCRequest(body)) { + return NextResponse.json( + createError(0, ErrorCode.InvalidRequest, 'Invalid JSON-RPC message'), + { status: 400 } + ) + } + + const { id, method, params } = body + + switch (method) { + case 'initialize': { + const result: InitializeResult = { + protocolVersion: '2024-11-05', + capabilities: { tools: {} }, + serverInfo: { name: 'copilot-subagents', version: '1.0.0' }, + } + return NextResponse.json(createResponse(id, result)) + } + case 'ping': + return NextResponse.json(createResponse(id, {})) + case 'tools/list': + return handleToolsList(id) + case 'tools/call': + return handleToolsCall( + id, + params as { name: string; arguments?: Record }, + auth.userId + ) + default: + return NextResponse.json( + createError(id, ErrorCode.MethodNotFound, `Method not found: ${method}`), + { status: 404 } + ) + } + } catch (error) { + logger.error('Error handling MCP request', { error }) + return NextResponse.json(createError(0, ErrorCode.InternalError, 'Internal error'), { + status: 500, + }) + } +} + +async function handleToolsList(id: RequestId): Promise { + const directTools = DIRECT_TOOL_DEFS.map((tool) => ({ + name: tool.name, + description: tool.description, + inputSchema: tool.inputSchema, + })) + + const subagentTools = SUBAGENT_TOOL_DEFS.map((tool) => ({ + name: tool.name, + description: tool.description, + inputSchema: tool.inputSchema, + })) + + const result: ListToolsResult = { + tools: [...directTools, ...subagentTools], + } + + return NextResponse.json(createResponse(id, result)) +} + +async function handleToolsCall( + id: RequestId, + params: { name: string; arguments?: Record }, + userId: string +): Promise { + const args = params.arguments || {} + + // Check if this is a direct tool (fast, no LLM) + const directTool = DIRECT_TOOL_DEFS.find((tool) => tool.name === params.name) + if (directTool) { + return handleDirectToolCall(id, directTool, args, userId) + } + + // Check if this is a subagent tool (slower, uses LLM) + const subagentTool = SUBAGENT_TOOL_DEFS.find((tool) => tool.name === params.name) + if (subagentTool) { + return handleSubagentToolCall(id, subagentTool, args, userId) + } + + return NextResponse.json( + createError(id, ErrorCode.MethodNotFound, `Tool not found: ${params.name}`), + { status: 404 } + ) + } + +async function handleDirectToolCall( + id: RequestId, + toolDef: (typeof DIRECT_TOOL_DEFS)[number], + args: Record, + userId: string +): Promise { + try { + const execContext = await prepareExecutionContext(userId, (args.workflowId as string) || '') + + const toolCall = { + id: crypto.randomUUID(), + name: toolDef.toolId, + status: 'pending' as const, + params: args as Record, + startTime: Date.now(), + } + + const result = await executeToolServerSide(toolCall, execContext) + + const response: CallToolResult = { + content: [ + { + type: 'text', + text: JSON.stringify(result.output ?? result, null, 2), + }, + ], + isError: !result.success, + } + + return NextResponse.json(createResponse(id, response)) + } catch (error) { + logger.error('Direct tool execution failed', { tool: toolDef.name, error }) + return NextResponse.json( + createError(id, ErrorCode.InternalError, `Tool execution failed: ${error}`), + { status: 500 } + ) + } +} + +async function handleSubagentToolCall( + id: RequestId, + toolDef: (typeof SUBAGENT_TOOL_DEFS)[number], + args: Record, + userId: string +): Promise { + const requestText = + (args.request as string) || + (args.message as string) || + (args.error as string) || + JSON.stringify(args) + + const context = (args.context as Record) || {} + if (args.plan && !context.plan) { + context.plan = args.plan + } + + const { model } = getCopilotModel('chat') + + const result = await orchestrateSubagentStream( + toolDef.agentId, + { + message: requestText, + workflowId: args.workflowId, + workspaceId: args.workspaceId, + context, + model, + }, + { + userId, + workflowId: args.workflowId as string | undefined, + workspaceId: args.workspaceId as string | undefined, + } + ) + + const response: CallToolResult = { + content: [ + { + type: 'text', + text: JSON.stringify(result, null, 2), + }, + ], + isError: !result.success, + } + + return NextResponse.json(createResponse(id, response)) +} + diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers.ts index dbe512b725..d65da8e89a 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-handlers.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers.ts @@ -26,6 +26,7 @@ const RESPOND_TOOL_SET = new Set([ 'research_respond', 'deploy_respond', 'superagent_respond', + 'discovery_respond', ]) export type SSEHandler = ( diff --git a/apps/sim/lib/copilot/orchestrator/subagent.ts b/apps/sim/lib/copilot/orchestrator/subagent.ts new file mode 100644 index 0000000000..bdc69fd68b --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/subagent.ts @@ -0,0 +1,239 @@ +import { createLogger } from '@sim/logger' +import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/copilot/constants' +import { parseSSEStream } from '@/lib/copilot/orchestrator/sse-parser' +import { + sseHandlers, + subAgentHandlers, + handleSubagentRouting, +} from '@/lib/copilot/orchestrator/sse-handlers' +import { prepareExecutionContext } from '@/lib/copilot/orchestrator/tool-executor' +import type { + ExecutionContext, + OrchestratorOptions, + SSEEvent, + StreamingContext, + ToolCallSummary, +} from '@/lib/copilot/orchestrator/types' +import { env } from '@/lib/core/config/env' +import { getEffectiveDecryptedEnv } from '@/lib/environment/utils' + +const logger = createLogger('CopilotSubagentOrchestrator') +const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT + +export interface SubagentOrchestratorOptions extends OrchestratorOptions { + userId: string + workflowId?: string + workspaceId?: string +} + +export interface SubagentOrchestratorResult { + success: boolean + content: string + toolCalls: ToolCallSummary[] + structuredResult?: { + type?: string + summary?: string + data?: any + success?: boolean + } + error?: string + errors?: string[] +} + +export async function orchestrateSubagentStream( + agentId: string, + requestPayload: Record, + options: SubagentOrchestratorOptions +): Promise { + const { userId, workflowId, workspaceId, timeout = 300000, abortSignal } = options + const execContext = await buildExecutionContext(userId, workflowId, workspaceId) + + const context: StreamingContext = { + chatId: undefined, + conversationId: undefined, + messageId: requestPayload?.messageId || crypto.randomUUID(), + accumulatedContent: '', + contentBlocks: [], + toolCalls: new Map(), + currentThinkingBlock: null, + isInThinkingBlock: false, + subAgentParentToolCallId: undefined, + subAgentContent: {}, + subAgentToolCalls: {}, + pendingContent: '', + streamComplete: false, + wasAborted: false, + errors: [], + } + + let structuredResult: SubagentOrchestratorResult['structuredResult'] + + try { + const response = await fetch(`${SIM_AGENT_API_URL}/api/subagent/${agentId}`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + ...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}), + }, + body: JSON.stringify({ ...requestPayload, stream: true, userId }), + signal: abortSignal, + }) + + if (!response.ok) { + const errorText = await response.text().catch(() => '') + throw new Error( + `Copilot backend error (${response.status}): ${errorText || response.statusText}` + ) + } + + if (!response.body) { + throw new Error('Copilot backend response missing body') + } + + const reader = response.body.getReader() + const decoder = new TextDecoder() + + const timeoutId = setTimeout(() => { + context.errors.push('Request timed out') + context.streamComplete = true + reader.cancel().catch(() => {}) + }, timeout) + + try { + for await (const event of parseSSEStream(reader, decoder, abortSignal)) { + if (abortSignal?.aborted) { + context.wasAborted = true + break + } + + await forwardEvent(event, options) + + if (event.type === 'structured_result' || event.type === 'subagent_result') { + structuredResult = normalizeStructuredResult(event.data) + context.streamComplete = true + continue + } + + // Handle subagent_start/subagent_end events to track nested subagent calls + if (event.type === 'subagent_start') { + const toolCallId = event.data?.tool_call_id + if (toolCallId) { + context.subAgentParentToolCallId = toolCallId + context.subAgentContent[toolCallId] = '' + context.subAgentToolCalls[toolCallId] = [] + } + continue + } + + if (event.type === 'subagent_end') { + context.subAgentParentToolCallId = undefined + continue + } + + // For direct subagent calls, events may have the subagent field set (e.g., subagent: "discovery") + // but no subagent_start event because this IS the top-level agent. Skip subagent routing + // for events where the subagent field matches the current agentId - these are top-level events. + const isTopLevelSubagentEvent = event.subagent === agentId && !context.subAgentParentToolCallId + + // Only route to subagent handlers for nested subagent events (not matching current agentId) + if (!isTopLevelSubagentEvent && handleSubagentRouting(event, context)) { + const handler = subAgentHandlers[event.type] + if (handler) { + await handler(event, context, execContext, options) + } + if (context.streamComplete) break + continue + } + + // Process as a regular SSE event (including top-level subagent events) + const handler = sseHandlers[event.type] + if (handler) { + await handler(event, context, execContext, options) + } + if (context.streamComplete) break + } + } finally { + clearTimeout(timeoutId) + } + + const result = buildResult(context, structuredResult) + await options.onComplete?.(result) + return result + } catch (error) { + const err = error instanceof Error ? error : new Error('Subagent orchestration failed') + logger.error('Subagent orchestration failed', { error: err.message, agentId }) + await options.onError?.(err) + return { + success: false, + content: context.accumulatedContent, + toolCalls: [], + error: err.message, + } + } +} + +async function forwardEvent(event: SSEEvent, options: OrchestratorOptions): Promise { + try { + await options.onEvent?.(event) + } catch (error) { + logger.warn('Failed to forward SSE event', { + type: event.type, + error: error instanceof Error ? error.message : String(error), + }) + } +} + +function normalizeStructuredResult(data: any): SubagentOrchestratorResult['structuredResult'] { + if (!data || typeof data !== 'object') { + return undefined + } + return { + type: data.result_type || data.type, + summary: data.summary, + data: data.data ?? data, + success: data.success, + } +} + +async function buildExecutionContext( + userId: string, + workflowId?: string, + workspaceId?: string +): Promise { + if (workflowId) { + return prepareExecutionContext(userId, workflowId) + } + + const decryptedEnvVars = await getEffectiveDecryptedEnv(userId, workspaceId) + return { + userId, + workflowId: workflowId || '', + workspaceId, + decryptedEnvVars, + } +} + +function buildResult( + context: StreamingContext, + structuredResult?: SubagentOrchestratorResult['structuredResult'] +): SubagentOrchestratorResult { + const toolCalls: ToolCallSummary[] = Array.from(context.toolCalls.values()).map((toolCall) => ({ + id: toolCall.id, + name: toolCall.name, + status: toolCall.status, + params: toolCall.params, + result: toolCall.result?.output, + error: toolCall.error, + durationMs: + toolCall.endTime && toolCall.startTime ? toolCall.endTime - toolCall.startTime : undefined, + })) + + return { + success: context.errors.length === 0 && !context.wasAborted, + content: context.accumulatedContent, + toolCalls, + structuredResult, + errors: context.errors.length ? context.errors : undefined, + } +} + diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor.ts b/apps/sim/lib/copilot/orchestrator/tool-executor.ts index 15638b0a7f..e2611c90df 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor.ts @@ -5,11 +5,13 @@ import { customTools, permissions, workflow, + workflowFolder, workflowMcpServer, workflowMcpTool, + workspace, } from '@sim/db/schema' import { createLogger } from '@sim/logger' -import { and, asc, desc, eq, inArray, isNull, or } from 'drizzle-orm' +import { and, asc, desc, eq, inArray, isNull, max, or } from 'drizzle-orm' import { refreshTokenIfNeeded } from '@/app/api/auth/oauth/utils' import { checkChatAccess, checkWorkflowAccessForChatCreation } from '@/app/api/chat/utils' import { resolveEnvVarReferences } from '@/executor/utils/reference-validation' @@ -21,9 +23,14 @@ import { getEffectiveDecryptedEnv } from '@/lib/environment/utils' import { listWorkspaceFiles } from '@/lib/uploads/contexts/workspace' import { mcpService } from '@/lib/mcp/service' import { sanitizeForCopilot } from '@/lib/workflows/sanitization/json-sanitizer' -import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/persistence/utils' +import { buildDefaultWorkflowArtifacts } from '@/lib/workflows/defaults' +import { + deployWorkflow, + loadWorkflowFromNormalizedTables, + saveWorkflowToNormalizedTables, + undeployWorkflow, +} from '@/lib/workflows/persistence/utils' import { executeWorkflow } from '@/lib/workflows/executor/execute-workflow' -import { deployWorkflow, undeployWorkflow } from '@/lib/workflows/persistence/utils' import { BlockPathCalculator } from '@/lib/workflows/blocks/block-path-calculator' import { getBlockOutputPaths } from '@/lib/workflows/blocks/block-outputs' import { isInputDefinitionTrigger } from '@/lib/workflows/triggers/input-definition-triggers' @@ -57,6 +64,10 @@ const SIM_WORKFLOW_TOOLS = new Set([ 'get_user_workflow', 'get_workflow_from_name', 'list_user_workflows', + 'list_user_workspaces', + 'list_folders', + 'create_workflow', + 'create_folder', 'get_workflow_data', 'get_block_outputs', 'get_block_upstream_references', @@ -225,7 +236,15 @@ async function executeSimWorkflowTool( case 'get_workflow_from_name': return executeGetWorkflowFromName(params, context) case 'list_user_workflows': - return executeListUserWorkflows(context) + return executeListUserWorkflows(params, context) + case 'list_user_workspaces': + return executeListUserWorkspaces(context) + case 'list_folders': + return executeListFolders(params, context) + case 'create_workflow': + return executeCreateWorkflow(params, context) + case 'create_folder': + return executeCreateFolder(params, context) case 'get_workflow_data': return executeGetWorkflowData(params, context) case 'get_block_outputs': @@ -292,6 +311,61 @@ async function ensureWorkflowAccess(workflowId: string, userId: string): Promise throw new Error('Unauthorized workflow access') } +async function getDefaultWorkspaceId(userId: string): Promise { + const workspaces = await db + .select({ workspaceId: workspace.id }) + .from(permissions) + .innerJoin(workspace, eq(permissions.entityId, workspace.id)) + .where(and(eq(permissions.userId, userId), eq(permissions.entityType, 'workspace'))) + .orderBy(desc(workspace.createdAt)) + .limit(1) + + const workspaceId = workspaces[0]?.workspaceId + if (!workspaceId) { + throw new Error('No workspace found for user') + } + + return workspaceId +} + +async function ensureWorkspaceAccess( + workspaceId: string, + userId: string, + requireWrite: boolean +): Promise { + const [row] = await db + .select({ + permissionType: permissions.permissionType, + ownerId: workspace.ownerId, + }) + .from(permissions) + .innerJoin(workspace, eq(permissions.entityId, workspace.id)) + .where( + and( + eq(permissions.entityType, 'workspace'), + eq(permissions.entityId, workspaceId), + eq(permissions.userId, userId) + ) + ) + .limit(1) + + if (!row) { + throw new Error(`Workspace ${workspaceId} not found`) + } + + const isOwner = row.ownerId === userId + const permissionType = row.permissionType + const canWrite = isOwner || permissionType === 'admin' || permissionType === 'write' + + if (requireWrite && !canWrite) { + throw new Error('Write or admin access required for this workspace') + } + + if (!requireWrite && !canWrite && permissionType !== 'read') { + throw new Error('Access denied to workspace') + } +} + async function executeGetUserWorkflow( params: Record, context: ExecutionContext @@ -398,8 +472,14 @@ async function executeGetWorkflowFromName( } } -async function executeListUserWorkflows(context: ExecutionContext): Promise { +async function executeListUserWorkflows( + params: Record, + context: ExecutionContext +): Promise { try { + const workspaceId = params?.workspaceId as string | undefined + const folderId = params?.folderId as string | undefined + const workspaceIds = await db .select({ entityId: permissions.entityId }) .from(permissions) @@ -411,6 +491,12 @@ async function executeListUserWorkflows(context: ExecutionContext): Promise 0) { workflowConditions.push(inArray(workflow.workspaceId, workspaceIdList)) } + if (workspaceId) { + workflowConditions.push(eq(workflow.workspaceId, workspaceId)) + } + if (folderId) { + workflowConditions.push(eq(workflow.folderId, folderId)) + } const workflows = await db .select() .from(workflow) @@ -426,6 +512,7 @@ async function executeListUserWorkflows(context: ExecutionContext): Promise { + try { + const workspaces = await db + .select({ + workspaceId: workspace.id, + workspaceName: workspace.name, + ownerId: workspace.ownerId, + permissionType: permissions.permissionType, + }) + .from(permissions) + .innerJoin(workspace, eq(permissions.entityId, workspace.id)) + .where(and(eq(permissions.userId, context.userId), eq(permissions.entityType, 'workspace'))) + .orderBy(desc(workspace.createdAt)) + + const output = workspaces.map((row) => ({ + workspaceId: row.workspaceId, + workspaceName: row.workspaceName, + role: row.ownerId === context.userId ? 'owner' : row.permissionType, + })) + + return { success: true, output: { workspaces: output } } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +async function executeListFolders( + params: Record, + context: ExecutionContext +): Promise { + try { + const workspaceId = (params?.workspaceId as string | undefined) || + (await getDefaultWorkspaceId(context.userId)) + + await ensureWorkspaceAccess(workspaceId, context.userId, false) + + const folders = await db + .select({ + folderId: workflowFolder.id, + folderName: workflowFolder.name, + parentId: workflowFolder.parentId, + sortOrder: workflowFolder.sortOrder, + }) + .from(workflowFolder) + .where(eq(workflowFolder.workspaceId, workspaceId)) + .orderBy(asc(workflowFolder.sortOrder), asc(workflowFolder.createdAt)) + + return { + success: true, + output: { + workspaceId, + folders, + }, + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +async function executeCreateWorkflow( + params: Record, + context: ExecutionContext +): Promise { + try { + const name = typeof params?.name === 'string' ? params.name.trim() : '' + if (!name) { + return { success: false, error: 'name is required' } + } + + const workspaceId = params?.workspaceId || (await getDefaultWorkspaceId(context.userId)) + const folderId = params?.folderId || null + const description = typeof params?.description === 'string' ? params.description : null + + await ensureWorkspaceAccess(workspaceId, context.userId, true) + + const workflowId = crypto.randomUUID() + const now = new Date() + + const folderCondition = folderId ? eq(workflow.folderId, folderId) : isNull(workflow.folderId) + const [maxResult] = await db + .select({ maxOrder: max(workflow.sortOrder) }) + .from(workflow) + .where(and(eq(workflow.workspaceId, workspaceId), folderCondition)) + const sortOrder = (maxResult?.maxOrder ?? 0) + 1 + + await db.insert(workflow).values({ + id: workflowId, + userId: context.userId, + workspaceId, + folderId, + sortOrder, + name, + description, + color: '#3972F6', + lastSynced: now, + createdAt: now, + updatedAt: now, + isDeployed: false, + runCount: 0, + variables: {}, + }) + + const { workflowState } = buildDefaultWorkflowArtifacts() + const saveResult = await saveWorkflowToNormalizedTables(workflowId, workflowState) + if (!saveResult.success) { + throw new Error(saveResult.error || 'Failed to save workflow state') + } + + return { + success: true, + output: { + workflowId, + workflowName: name, + workspaceId, + folderId, + }, + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +async function executeCreateFolder( + params: Record, + context: ExecutionContext +): Promise { + try { + const name = typeof params?.name === 'string' ? params.name.trim() : '' + if (!name) { + return { success: false, error: 'name is required' } + } + + const workspaceId = params?.workspaceId || (await getDefaultWorkspaceId(context.userId)) + const parentId = params?.parentId || null + + await ensureWorkspaceAccess(workspaceId, context.userId, true) + + const [maxOrder] = await db + .select({ maxOrder: max(workflowFolder.sortOrder) }) + .from(workflowFolder) + .where( + and( + eq(workflowFolder.workspaceId, workspaceId), + parentId ? eq(workflowFolder.parentId, parentId) : isNull(workflowFolder.parentId) + ) + ) + .limit(1) + + const sortOrder = (maxOrder?.maxOrder ?? 0) + 1 + const folderId = crypto.randomUUID() + + await db.insert(workflowFolder).values({ + id: folderId, + name, + userId: context.userId, + workspaceId, + parentId, + color: '#6B7280', + sortOrder, + }) + + return { + success: true, + output: { + folderId, + folderName: name, + workspaceId, + parentId, + sortOrder, + }, + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + async function executeGetWorkflowData( params: Record, context: ExecutionContext diff --git a/apps/sim/lib/copilot/orchestrator/types.ts b/apps/sim/lib/copilot/orchestrator/types.ts index f4adbdeead..12cdee9da2 100644 --- a/apps/sim/lib/copilot/orchestrator/types.ts +++ b/apps/sim/lib/copilot/orchestrator/types.ts @@ -11,6 +11,8 @@ export type SSEEventType = | 'tool_error' | 'subagent_start' | 'subagent_end' + | 'structured_result' + | 'subagent_result' | 'done' | 'error' | 'start' From 793c87799ef4b1247ec9c9a54e175c785a501783 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Sat, 31 Jan 2026 12:26:06 -0800 Subject: [PATCH 07/72] mcp v1 --- apps/sim/app/api/mcp/copilot/route.ts | 342 ++++++++++++++++-- .../lib/copilot/orchestrator/sse-handlers.ts | 14 + .../lib/copilot/orchestrator/tool-executor.ts | 9 +- .../tools/server/workflow/edit-workflow.ts | 100 +++++ 4 files changed, 434 insertions(+), 31 deletions(-) diff --git a/apps/sim/app/api/mcp/copilot/route.ts b/apps/sim/app/api/mcp/copilot/route.ts index 6e4d6a9a73..042a4983d8 100644 --- a/apps/sim/app/api/mcp/copilot/route.ts +++ b/apps/sim/app/api/mcp/copilot/route.ts @@ -21,6 +21,152 @@ const logger = createLogger('CopilotMcpAPI') export const dynamic = 'force-dynamic' +/** + * MCP Server instructions that guide LLMs on how to use the Sim copilot tools. + * This is included in the initialize response to help external LLMs understand + * the workflow lifecycle and best practices. + */ +const MCP_SERVER_INSTRUCTIONS = ` +## Sim Workflow Copilot - Usage Guide + +You are interacting with Sim's workflow automation platform. These tools orchestrate specialized AI agents that build workflows. Follow these guidelines carefully. + +--- + +## Platform Knowledge + +Sim is a workflow automation platform. Workflows are visual pipelines of blocks. + +### Block Types + +**Core Logic:** +- **Agent** - The heart of Sim (LLM block with tools, memory, structured output, knowledge bases) +- **Function** - JavaScript code execution +- **Condition** - If/else branching +- **Router** - AI-powered content-based routing +- **Loop** - While/do-while iteration +- **Parallel** - Simultaneous execution +- **API** - HTTP requests + +**Integrations (3rd Party):** +- OAuth: Slack, Gmail, Google Calendar, Sheets, Outlook, Linear, GitHub, Notion +- API: Stripe, Twilio, SendGrid, any REST API + +### The Agent Block + +The Agent block is the core of intelligent workflows: +- **Tools** - Add integrations, custom tools, web search to give it capabilities +- **Memory** - Multi-turn conversations with persistent context +- **Structured Output** - JSON schema for reliable parsing +- **Knowledge Bases** - RAG-powered document retrieval + +**Design principle:** Put tools INSIDE agents rather than using standalone tool blocks. + +### Triggers + +| Type | Description | +|------|-------------| +| Manual/Chat | User sends message in UI (start block: input, files, conversationId) | +| API | REST endpoint with custom input schema | +| Webhook | External services POST to trigger URL | +| Schedule | Cron-based (hourly, daily, weekly) | + +### Deployments + +| Type | Trigger | Use Case | +|------|---------|----------| +| API | Start block | REST endpoint for programmatic access | +| Chat | Start block | Managed chat UI with auth options | +| MCP | Start block | Expose as MCP tool for AI agents | +| General | Schedule/Webhook | Activate triggers to run automatically | + +**Undeployed workflows only run in the builder UI.** + +### Variable Syntax + +Reference outputs from previous blocks: \`\` +Reference environment variables: \`{{ENV_VAR_NAME}}\` + +Rules: +- Block names must be lowercase, no spaces, no special characters +- Use dot notation for nested fields: \`\` + +--- + +## Workflow Lifecycle + +1. **Create**: For NEW workflows, FIRST call create_workflow to get a workflowId +2. **Plan**: Use copilot_plan with the workflowId to plan the workflow +3. **Edit**: Use copilot_edit with the workflowId AND the plan to build the workflow +4. **Deploy**: ALWAYS deploy after building using copilot_deploy before testing/running +5. **Test**: Use copilot_test to verify the workflow works correctly +6. **Share**: Provide the user with the workflow URL after completion + +--- + +## CRITICAL: Always Pass workflowId + +- For NEW workflows: Call create_workflow FIRST, then use the returned workflowId +- For EXISTING workflows: Pass the workflowId to all copilot tools +- copilot_plan, copilot_edit, copilot_deploy, copilot_test, copilot_debug all REQUIRE workflowId + +--- + +## CRITICAL: How to Handle Plans + +The copilot_plan tool returns a structured plan object. You MUST: + +1. **Do NOT modify the plan**: Pass the plan object EXACTLY as returned to copilot_edit +2. **Do NOT interpret or summarize the plan**: The edit agent needs the raw plan data +3. **Pass the plan in the context.plan field**: \`{ "context": { "plan": } }\` +4. **Include ALL plan data**: Block configurations, connections, credentials, everything + +Example flow: +\`\`\` +1. copilot_plan({ request: "build a workflow...", workflowId: "abc123" }) + -> Returns: { "plan": { "blocks": [...], "connections": [...], ... } } + +2. copilot_edit({ + workflowId: "abc123", + message: "Execute the plan", + context: { "plan": } + }) +\`\`\` + +**Why this matters**: The plan contains technical details (block IDs, field mappings, API schemas) that the edit agent needs verbatim. Summarizing or rephrasing loses critical information. + +--- + +## CRITICAL: Error Handling + +**If the user says "doesn't work", "broke", "failed", "error" → ALWAYS use copilot_debug FIRST.** + +Don't guess. Don't plan. Debug first to find the actual problem. + +--- + +## Important Rules + +- ALWAYS deploy a workflow before attempting to run or test it +- Workflows must be deployed to have an "active deployment" for execution +- After building, call copilot_deploy with the appropriate deployment type (api, chat, or mcp) +- Return the workflow URL to the user so they can access it in Sim + +--- + +## Quick Operations (use direct tools) +- list_workflows, list_workspaces, list_folders, get_workflow: Fast database queries +- create_workflow: Create new workflow and get workflowId (CALL THIS FIRST for new workflows) +- create_folder: Create new resources + +## Workflow Building (use copilot tools) +- copilot_plan: Plan workflow changes (REQUIRES workflowId) - returns a plan object +- copilot_edit: Execute the plan (REQUIRES workflowId AND plan from copilot_plan) +- copilot_deploy: Deploy workflows (REQUIRES workflowId) +- copilot_test: Test workflow execution (REQUIRES workflowId) +- copilot_debug: Diagnose errors (REQUIRES workflowId) - USE THIS FIRST for issues +` + /** * Direct tools that execute immediately without LLM orchestration. * These are fast database queries that don't need AI reasoning. @@ -91,6 +237,56 @@ const DIRECT_TOOL_DEFS: Array<{ }, }, }, + { + name: 'create_workflow', + toolId: 'create_workflow', + description: 'Create a new workflow. Returns the new workflow ID.', + inputSchema: { + type: 'object', + properties: { + name: { + type: 'string', + description: 'Name for the new workflow.', + }, + workspaceId: { + type: 'string', + description: 'Optional workspace ID. Uses default workspace if not provided.', + }, + folderId: { + type: 'string', + description: 'Optional folder ID to place the workflow in.', + }, + description: { + type: 'string', + description: 'Optional description for the workflow.', + }, + }, + required: ['name'], + }, + }, + { + name: 'create_folder', + toolId: 'create_folder', + description: 'Create a new folder in a workspace.', + inputSchema: { + type: 'object', + properties: { + name: { + type: 'string', + description: 'Name for the new folder.', + }, + workspaceId: { + type: 'string', + description: 'Optional workspace ID. Uses default workspace if not provided.', + }, + parentId: { + type: 'string', + description: 'Optional parent folder ID for nested folders.', + }, + }, + required: ['name'], + }, + }, ] const SUBAGENT_TOOL_DEFS: Array<{ @@ -127,28 +323,71 @@ DO NOT USE (use direct tools instead): { name: 'copilot_plan', agentId: 'plan', - description: 'Plan workflow changes by gathering required information.', + description: `Plan workflow changes by gathering required information. + +USE THIS WHEN: +- Building a new workflow +- Modifying an existing workflow +- You need to understand what blocks and integrations are available +- The workflow requires multiple blocks or connections + +WORKFLOW ID (REQUIRED): +- For NEW workflows: First call create_workflow to get a workflowId, then pass it here +- For EXISTING workflows: Always pass the workflowId parameter + +This tool gathers information about available blocks, credentials, and the current workflow state. + +RETURNS: A plan object containing block configurations, connections, and technical details. +IMPORTANT: Pass the returned plan EXACTLY to copilot_edit - do not modify or summarize it.`, inputSchema: { type: 'object', properties: { - request: { type: 'string' }, - workflowId: { type: 'string' }, + request: { type: 'string', description: 'What you want to build or modify in the workflow.' }, + workflowId: { + type: 'string', + description: 'REQUIRED. The workflow ID. For new workflows, call create_workflow first to get this.', + }, context: { type: 'object' }, }, - required: ['request'], + required: ['request', 'workflowId'], }, }, { name: 'copilot_edit', agentId: 'edit', - description: 'Execute a workflow plan and apply edits.', + description: `Execute a workflow plan and apply edits. + +USE THIS WHEN: +- You have a plan from copilot_plan that needs to be executed +- Building or modifying a workflow based on the plan +- Making changes to blocks, connections, or configurations + +WORKFLOW ID (REQUIRED): +- You MUST provide the workflowId parameter +- For new workflows, get the workflowId from create_workflow first + +PLAN (REQUIRED): +- Pass the EXACT plan object from copilot_plan in the context.plan field +- Do NOT modify, summarize, or interpret the plan - pass it verbatim +- The plan contains technical details the edit agent needs exactly as-is + +IMPORTANT: After copilot_edit completes, you MUST call copilot_deploy before the workflow can be run or tested.`, inputSchema: { type: 'object', properties: { - message: { type: 'string' }, - workflowId: { type: 'string' }, - plan: { type: 'object' }, - context: { type: 'object' }, + message: { type: 'string', description: 'Optional additional instructions for the edit.' }, + workflowId: { + type: 'string', + description: 'REQUIRED. The workflow ID to edit. Get this from create_workflow for new workflows.', + }, + plan: { + type: 'object', + description: 'The plan object from copilot_plan. Pass it EXACTLY as returned, do not modify.', + }, + context: { + type: 'object', + description: 'Additional context. Put the plan in context.plan if not using the plan field directly.', + }, }, required: ['workflowId'], }, @@ -156,29 +395,54 @@ DO NOT USE (use direct tools instead): { name: 'copilot_debug', agentId: 'debug', - description: 'Diagnose errors or unexpected workflow behavior.', + description: `Diagnose errors or unexpected workflow behavior. + +WORKFLOW ID (REQUIRED): Always provide the workflowId of the workflow to debug.`, inputSchema: { type: 'object', properties: { - error: { type: 'string' }, - workflowId: { type: 'string' }, + error: { type: 'string', description: 'The error message or description of the issue.' }, + workflowId: { type: 'string', description: 'REQUIRED. The workflow ID to debug.' }, context: { type: 'object' }, }, - required: ['error'], + required: ['error', 'workflowId'], }, }, { name: 'copilot_deploy', agentId: 'deploy', - description: 'Deploy or manage workflow deployments.', + description: `Deploy or manage workflow deployments. + +CRITICAL: You MUST deploy a workflow after building before it can be run or tested. +Workflows without an active deployment will fail with "no active deployment" error. + +WORKFLOW ID (REQUIRED): +- Always provide the workflowId parameter +- This must match the workflow you built with copilot_edit + +USE THIS: +- After copilot_edit completes to activate the workflow +- To update deployment settings +- To redeploy after making changes + +DEPLOYMENT TYPES: +- "deploy as api" - REST API endpoint +- "deploy as chat" - Chat interface +- "deploy as mcp" - MCP server`, inputSchema: { type: 'object', properties: { - request: { type: 'string' }, - workflowId: { type: 'string' }, + request: { + type: 'string', + description: 'The deployment request, e.g. "deploy as api" or "deploy as chat"', + }, + workflowId: { + type: 'string', + description: 'REQUIRED. The workflow ID to deploy.', + }, context: { type: 'object' }, }, - required: ['request'], + required: ['request', 'workflowId'], }, }, { @@ -277,15 +541,29 @@ DO NOT USE (use direct tools instead): { name: 'copilot_test', agentId: 'test', - description: 'Run workflows and verify outputs.', + description: `Run workflows and verify outputs. + +PREREQUISITE: The workflow MUST be deployed first using copilot_deploy. +Undeployed workflows will fail with "no active deployment" error. + +WORKFLOW ID (REQUIRED): +- Always provide the workflowId parameter + +USE THIS: +- After deploying to verify the workflow works correctly +- To test with sample inputs +- To validate workflow behavior before sharing with user`, inputSchema: { type: 'object', properties: { request: { type: 'string' }, - workflowId: { type: 'string' }, + workflowId: { + type: 'string', + description: 'REQUIRED. The workflow ID to test.', + }, context: { type: 'object' }, }, - required: ['request'], + required: ['request', 'workflowId'], }, }, { @@ -355,7 +633,8 @@ export async function POST(request: NextRequest) { const result: InitializeResult = { protocolVersion: '2024-11-05', capabilities: { tools: {} }, - serverInfo: { name: 'copilot-subagents', version: '1.0.0' }, + serverInfo: { name: 'sim-copilot', version: '1.0.0' }, + instructions: MCP_SERVER_INSTRUCTIONS, } return NextResponse.json(createResponse(id, result)) } @@ -391,9 +670,9 @@ async function handleToolsList(id: RequestId): Promise { })) const subagentTools = SUBAGENT_TOOL_DEFS.map((tool) => ({ - name: tool.name, - description: tool.description, - inputSchema: tool.inputSchema, + name: tool.name, + description: tool.description, + inputSchema: tool.inputSchema, })) const result: ListToolsResult = { @@ -416,17 +695,17 @@ async function handleToolsCall( return handleDirectToolCall(id, directTool, args, userId) } - // Check if this is a subagent tool (slower, uses LLM) + // Check if this is a subagent tool (uses LLM orchestration) const subagentTool = SUBAGENT_TOOL_DEFS.find((tool) => tool.name === params.name) if (subagentTool) { return handleSubagentToolCall(id, subagentTool, args, userId) } - return NextResponse.json( - createError(id, ErrorCode.MethodNotFound, `Tool not found: ${params.name}`), - { status: 404 } - ) - } + return NextResponse.json( + createError(id, ErrorCode.MethodNotFound, `Tool not found: ${params.name}`), + { status: 404 } + ) +} async function handleDirectToolCall( id: RequestId, @@ -494,6 +773,9 @@ async function handleSubagentToolCall( workspaceId: args.workspaceId, context, model, + // Signal to the copilot backend that this is a headless request + // so it can enforce workflowId requirements on tools + headless: true, }, { userId, diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers.ts index d65da8e89a..c738674be6 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-handlers.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers.ts @@ -65,6 +65,20 @@ async function executeToolAndReport( toolCall.error = result.error toolCall.endTime = Date.now() + // If create_workflow was successful, update the execution context with the new workflowId + // This ensures subsequent tools in the same stream have access to the workflowId + if ( + toolCall.name === 'create_workflow' && + result.success && + result.output?.workflowId && + !execContext.workflowId + ) { + execContext.workflowId = result.output.workflowId + if (result.output.workspaceId) { + execContext.workspaceId = result.output.workspaceId + } + } + await markToolComplete( toolCall.id, toolCall.name, diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor.ts b/apps/sim/lib/copilot/orchestrator/tool-executor.ts index e2611c90df..e7ec7717a9 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor.ts @@ -121,7 +121,14 @@ async function executeServerToolDirect( context: ExecutionContext ): Promise { try { - const result = await routeExecution(toolName, params, { userId: context.userId }) + // Inject workflowId from context if not provided in params + // This is needed for tools like set_environment_variables that require workflowId + const enrichedParams = { ...params } + if (!enrichedParams.workflowId && context.workflowId) { + enrichedParams.workflowId = context.workflowId + } + + const result = await routeExecution(toolName, enrichedParams, { userId: context.userId }) return { success: true, output: result } } catch (error) { logger.error('Server tool execution failed', { diff --git a/apps/sim/lib/copilot/tools/server/workflow/edit-workflow.ts b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow.ts index 1caa1b16f5..a060acfb91 100644 --- a/apps/sim/lib/copilot/tools/server/workflow/edit-workflow.ts +++ b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow.ts @@ -1403,6 +1403,101 @@ function filterDisallowedTools( return allowedTools } +/** + * Normalizes block IDs in operations to ensure they are valid UUIDs. + * The LLM may generate human-readable IDs like "web_search" or "research_agent" + * which need to be converted to proper UUIDs for database compatibility. + * + * Returns the normalized operations and a mapping from old IDs to new UUIDs. + */ +function normalizeBlockIdsInOperations(operations: EditWorkflowOperation[]): { + normalizedOperations: EditWorkflowOperation[] + idMapping: Map +} { + const logger = createLogger('EditWorkflowServerTool') + const idMapping = new Map() + + // First pass: collect all non-UUID block_ids from add/insert operations + for (const op of operations) { + if (op.operation_type === 'add' || op.operation_type === 'insert_into_subflow') { + if (op.block_id && !UUID_REGEX.test(op.block_id)) { + const newId = crypto.randomUUID() + idMapping.set(op.block_id, newId) + logger.debug('Normalizing block ID', { oldId: op.block_id, newId }) + } + } + } + + if (idMapping.size === 0) { + return { normalizedOperations: operations, idMapping } + } + + logger.info('Normalizing block IDs in operations', { + normalizedCount: idMapping.size, + mappings: Object.fromEntries(idMapping), + }) + + // Helper to replace an ID if it's in the mapping + const replaceId = (id: string | undefined): string | undefined => { + if (!id) return id + return idMapping.get(id) ?? id + } + + // Second pass: update all references to use new UUIDs + const normalizedOperations = operations.map((op) => { + const normalized: EditWorkflowOperation = { + ...op, + block_id: replaceId(op.block_id) ?? op.block_id, + } + + if (op.params) { + normalized.params = { ...op.params } + + // Update subflowId references (for insert_into_subflow) + if (normalized.params.subflowId) { + normalized.params.subflowId = replaceId(normalized.params.subflowId) + } + + // Update connection references + if (normalized.params.connections) { + const normalizedConnections: Record = {} + for (const [handle, targets] of Object.entries(normalized.params.connections)) { + if (typeof targets === 'string') { + normalizedConnections[handle] = replaceId(targets) + } else if (Array.isArray(targets)) { + normalizedConnections[handle] = targets.map((t) => { + if (typeof t === 'string') return replaceId(t) + if (t && typeof t === 'object' && t.block) { + return { ...t, block: replaceId(t.block) } + } + return t + }) + } else if (targets && typeof targets === 'object' && (targets as any).block) { + normalizedConnections[handle] = { ...targets, block: replaceId((targets as any).block) } + } else { + normalizedConnections[handle] = targets + } + } + normalized.params.connections = normalizedConnections + } + + // Update nestedNodes block IDs + if (normalized.params.nestedNodes) { + const normalizedNestedNodes: Record = {} + for (const [childId, childBlock] of Object.entries(normalized.params.nestedNodes)) { + const newChildId = replaceId(childId) ?? childId + normalizedNestedNodes[newChildId] = childBlock + } + normalized.params.nestedNodes = normalizedNestedNodes + } + } + + return normalized + }) + + return { normalizedOperations, idMapping } +} + /** * Apply operations directly to the workflow JSON state */ @@ -1422,6 +1517,11 @@ function applyOperationsToWorkflowState( // Log initial state const logger = createLogger('EditWorkflowServerTool') + + // Normalize block IDs to UUIDs before processing + const { normalizedOperations } = normalizeBlockIdsInOperations(operations) + operations = normalizedOperations + logger.info('Applying operations to workflow:', { totalOperations: operations.length, operationTypes: operations.reduce((acc: any, op) => { From e04f3796bcf6d46e94eadebe2ccc5d61475fa51f Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Sat, 31 Jan 2026 16:00:53 -0800 Subject: [PATCH 08/72] Improvement --- apps/sim/app/api/mcp/copilot/route.ts | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/apps/sim/app/api/mcp/copilot/route.ts b/apps/sim/app/api/mcp/copilot/route.ts index 042a4983d8..f22f631f56 100644 --- a/apps/sim/app/api/mcp/copilot/route.ts +++ b/apps/sim/app/api/mcp/copilot/route.ts @@ -784,11 +784,36 @@ async function handleSubagentToolCall( } ) + // When a respond tool (plan_respond, edit_respond, etc.) was used, + // return only the structured result - not the full result with all internal tool calls. + // This provides clean output for MCP consumers. + let responseData: unknown + if (result.structuredResult) { + responseData = { + success: result.structuredResult.success ?? result.success, + type: result.structuredResult.type, + summary: result.structuredResult.summary, + data: result.structuredResult.data, + } + } else if (result.error) { + responseData = { + success: false, + error: result.error, + errors: result.errors, + } + } else { + // Fallback: return content if no structured result + responseData = { + success: result.success, + content: result.content, + } + } + const response: CallToolResult = { content: [ { type: 'text', - text: JSON.stringify(result, null, 2), + text: JSON.stringify(responseData, null, 2), }, ], isError: !result.success, From 4d84c54c527d43e1097af3fea2651404cbfcc5f9 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Tue, 3 Feb 2026 12:56:57 -0800 Subject: [PATCH 09/72] Fix --- apps/sim/app/api/mcp/copilot/route.ts | 48 +++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/apps/sim/app/api/mcp/copilot/route.ts b/apps/sim/app/api/mcp/copilot/route.ts index f22f631f56..4ecb328da1 100644 --- a/apps/sim/app/api/mcp/copilot/route.ts +++ b/apps/sim/app/api/mcp/copilot/route.ts @@ -295,6 +295,54 @@ const SUBAGENT_TOOL_DEFS: Array<{ inputSchema: { type: 'object'; properties?: Record; required?: string[] } agentId: string }> = [ + { + name: 'copilot_build', + agentId: 'build', + description: `Build a workflow end-to-end in a single step. This is the fast mode equivalent for headless/MCP usage. + +USE THIS WHEN: +- Building a new workflow from scratch +- Modifying an existing workflow +- You want to gather information and build in one pass without separate plan→edit steps + +WORKFLOW ID (REQUIRED): +- For NEW workflows: First call create_workflow to get a workflowId, then pass it here +- For EXISTING workflows: Always pass the workflowId parameter + +CAN DO: +- Gather information about blocks, credentials, patterns +- Search documentation and patterns for best practices +- Add, modify, or remove blocks +- Configure block settings and connections +- Set environment variables and workflow variables + +CANNOT DO: +- Run or test workflows (use copilot_test separately after deploying) +- Deploy workflows (use copilot_deploy separately) + +WORKFLOW: +1. Call create_workflow to get a workflowId (for new workflows) +2. Call copilot_build with the request and workflowId +3. Build agent gathers info and builds in one pass +4. Call copilot_deploy to deploy the workflow +5. Optionally call copilot_test to verify it works`, + inputSchema: { + type: 'object', + properties: { + request: { + type: 'string', + description: 'What you want to build or modify in the workflow.', + }, + workflowId: { + type: 'string', + description: + 'REQUIRED. The workflow ID. For new workflows, call create_workflow first to get this.', + }, + context: { type: 'object' }, + }, + required: ['request', 'workflowId'], + }, + }, { name: 'copilot_discovery', agentId: 'discovery', From 8f17bc48ab0f8ce4b3744038fc5b5068e02a0b51 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Tue, 3 Feb 2026 15:32:40 -0800 Subject: [PATCH 10/72] BROKEN --- apps/sim/app/api/copilot/chat/route.ts | 59 +- apps/sim/app/api/mcp/copilot/route.ts | 463 +--------------- apps/sim/app/api/v1/copilot/chat/route.ts | 54 +- .../components/tool-call/tool-call.tsx | 134 +---- apps/sim/lib/copilot/orchestrator/config.ts | 5 - .../lib/copilot/orchestrator/tool-executor.ts | 107 ++-- .../client/workflow/get-workflow-from-name.ts | 18 +- .../client/workflow/list-user-workflows.ts | 5 +- apps/sim/lib/copilot/tools/mcp/definitions.ts | 466 ++++++++++++++++ .../copilot/tools/shared/workflow-utils.ts | 37 ++ apps/sim/lib/workflows/utils.ts | 46 +- apps/sim/stores/panel/copilot/store.ts | 511 +----------------- apps/sim/stores/panel/copilot/types.ts | 2 - 13 files changed, 630 insertions(+), 1277 deletions(-) create mode 100644 apps/sim/lib/copilot/tools/mcp/definitions.ts create mode 100644 apps/sim/lib/copilot/tools/shared/workflow-utils.ts diff --git a/apps/sim/app/api/copilot/chat/route.ts b/apps/sim/app/api/copilot/chat/route.ts index 635e106c7a..a3e6766a41 100644 --- a/apps/sim/app/api/copilot/chat/route.ts +++ b/apps/sim/app/api/copilot/chat/route.ts @@ -1,7 +1,7 @@ import { db } from '@sim/db' -import { copilotChats, permissions, workflow } from '@sim/db/schema' +import { copilotChats } from '@sim/db/schema' import { createLogger } from '@sim/logger' -import { and, asc, desc, eq, inArray, or } from 'drizzle-orm' +import { and, desc, eq } from 'drizzle-orm' import { type NextRequest, NextResponse } from 'next/server' import { z } from 'zod' import { getSession } from '@/lib/auth' @@ -21,6 +21,7 @@ import type { CopilotProviderConfig } from '@/lib/copilot/types' import { env } from '@/lib/core/config/env' import { CopilotFiles } from '@/lib/uploads' import { createFileContent } from '@/lib/uploads/utils/file-utils' +import { resolveWorkflowIdForUser } from '@/lib/workflows/utils' import { tools } from '@/tools/registry' import { getLatestVersionTools, stripVersionSuffix } from '@/tools/utils' import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator' @@ -79,54 +80,6 @@ const ChatMessageSchema = z.object({ commands: z.array(z.string()).optional(), }) -async function resolveWorkflowId( - userId: string, - workflowId?: string, - workflowName?: string -): Promise<{ workflowId: string; workflowName?: string } | null> { - // If workflowId provided, use it directly - if (workflowId) { - return { workflowId } - } - - // Get user's accessible workflows - const workspaceIds = await db - .select({ entityId: permissions.entityId }) - .from(permissions) - .where(and(eq(permissions.userId, userId), eq(permissions.entityType, 'workspace'))) - - const workspaceIdList = workspaceIds.map((row) => row.entityId) - - const workflowConditions = [eq(workflow.userId, userId)] - if (workspaceIdList.length > 0) { - workflowConditions.push(inArray(workflow.workspaceId, workspaceIdList)) - } - - const workflows = await db - .select() - .from(workflow) - .where(or(...workflowConditions)) - .orderBy(asc(workflow.sortOrder), asc(workflow.createdAt), asc(workflow.id)) - - if (workflows.length === 0) { - return null - } - - // If workflowName provided, find matching workflow - if (workflowName) { - const match = workflows.find( - (w) => String(w.name || '').trim().toLowerCase() === workflowName.toLowerCase() - ) - if (match) { - return { workflowId: match.id, workflowName: match.name || undefined } - } - return null - } - - // Default to first workflow - return { workflowId: workflows[0].id, workflowName: workflows[0].name || undefined } -} - /** * POST /api/copilot/chat * Send messages to sim agent and handle chat persistence @@ -165,7 +118,11 @@ export async function POST(req: NextRequest) { } = ChatMessageSchema.parse(body) // Resolve workflowId - if not provided, use first workflow or find by name - const resolved = await resolveWorkflowId(authenticatedUserId, providedWorkflowId, workflowName) + const resolved = await resolveWorkflowIdForUser( + authenticatedUserId, + providedWorkflowId, + workflowName + ) if (!resolved) { return createBadRequestResponse( 'No workflows found. Create a workflow first or provide a valid workflowId.' diff --git a/apps/sim/app/api/mcp/copilot/route.ts b/apps/sim/app/api/mcp/copilot/route.ts index 4ecb328da1..b797ccca53 100644 --- a/apps/sim/app/api/mcp/copilot/route.ts +++ b/apps/sim/app/api/mcp/copilot/route.ts @@ -15,6 +15,7 @@ import { type NextRequest, NextResponse } from 'next/server' import { checkHybridAuth } from '@/lib/auth/hybrid' import { getCopilotModel } from '@/lib/copilot/config' import { orchestrateSubagentStream } from '@/lib/copilot/orchestrator/subagent' +import { DIRECT_TOOL_DEFS, SUBAGENT_TOOL_DEFS } from '@/lib/copilot/tools/mcp/definitions' import { executeToolServerSide, prepareExecutionContext } from '@/lib/copilot/orchestrator/tool-executor' const logger = createLogger('CopilotMcpAPI') @@ -167,468 +168,6 @@ Don't guess. Don't plan. Debug first to find the actual problem. - copilot_debug: Diagnose errors (REQUIRES workflowId) - USE THIS FIRST for issues ` -/** - * Direct tools that execute immediately without LLM orchestration. - * These are fast database queries that don't need AI reasoning. - */ -const DIRECT_TOOL_DEFS: Array<{ - name: string - description: string - inputSchema: { type: 'object'; properties?: Record; required?: string[] } - toolId: string -}> = [ - { - name: 'list_workflows', - toolId: 'list_user_workflows', - description: 'List all workflows the user has access to. Returns workflow IDs, names, and workspace info.', - inputSchema: { - type: 'object', - properties: { - workspaceId: { - type: 'string', - description: 'Optional workspace ID to filter workflows.', - }, - folderId: { - type: 'string', - description: 'Optional folder ID to filter workflows.', - }, - }, - }, - }, - { - name: 'list_workspaces', - toolId: 'list_user_workspaces', - description: 'List all workspaces the user has access to. Returns workspace IDs, names, and roles.', - inputSchema: { - type: 'object', - properties: {}, - }, - }, - { - name: 'list_folders', - toolId: 'list_folders', - description: 'List all folders in a workspace.', - inputSchema: { - type: 'object', - properties: { - workspaceId: { - type: 'string', - description: 'Workspace ID to list folders from.', - }, - }, - required: ['workspaceId'], - }, - }, - { - name: 'get_workflow', - toolId: 'get_workflow_from_name', - description: 'Get a workflow by name or ID. Returns the full workflow definition.', - inputSchema: { - type: 'object', - properties: { - name: { - type: 'string', - description: 'Workflow name to search for.', - }, - workflowId: { - type: 'string', - description: 'Workflow ID to retrieve directly.', - }, - }, - }, - }, - { - name: 'create_workflow', - toolId: 'create_workflow', - description: 'Create a new workflow. Returns the new workflow ID.', - inputSchema: { - type: 'object', - properties: { - name: { - type: 'string', - description: 'Name for the new workflow.', - }, - workspaceId: { - type: 'string', - description: 'Optional workspace ID. Uses default workspace if not provided.', - }, - folderId: { - type: 'string', - description: 'Optional folder ID to place the workflow in.', - }, - description: { - type: 'string', - description: 'Optional description for the workflow.', - }, - }, - required: ['name'], - }, - }, - { - name: 'create_folder', - toolId: 'create_folder', - description: 'Create a new folder in a workspace.', - inputSchema: { - type: 'object', - properties: { - name: { - type: 'string', - description: 'Name for the new folder.', - }, - workspaceId: { - type: 'string', - description: 'Optional workspace ID. Uses default workspace if not provided.', - }, - parentId: { - type: 'string', - description: 'Optional parent folder ID for nested folders.', - }, - }, - required: ['name'], - }, - }, -] - -const SUBAGENT_TOOL_DEFS: Array<{ - name: string - description: string - inputSchema: { type: 'object'; properties?: Record; required?: string[] } - agentId: string -}> = [ - { - name: 'copilot_build', - agentId: 'build', - description: `Build a workflow end-to-end in a single step. This is the fast mode equivalent for headless/MCP usage. - -USE THIS WHEN: -- Building a new workflow from scratch -- Modifying an existing workflow -- You want to gather information and build in one pass without separate plan→edit steps - -WORKFLOW ID (REQUIRED): -- For NEW workflows: First call create_workflow to get a workflowId, then pass it here -- For EXISTING workflows: Always pass the workflowId parameter - -CAN DO: -- Gather information about blocks, credentials, patterns -- Search documentation and patterns for best practices -- Add, modify, or remove blocks -- Configure block settings and connections -- Set environment variables and workflow variables - -CANNOT DO: -- Run or test workflows (use copilot_test separately after deploying) -- Deploy workflows (use copilot_deploy separately) - -WORKFLOW: -1. Call create_workflow to get a workflowId (for new workflows) -2. Call copilot_build with the request and workflowId -3. Build agent gathers info and builds in one pass -4. Call copilot_deploy to deploy the workflow -5. Optionally call copilot_test to verify it works`, - inputSchema: { - type: 'object', - properties: { - request: { - type: 'string', - description: 'What you want to build or modify in the workflow.', - }, - workflowId: { - type: 'string', - description: - 'REQUIRED. The workflow ID. For new workflows, call create_workflow first to get this.', - }, - context: { type: 'object' }, - }, - required: ['request', 'workflowId'], - }, - }, - { - name: 'copilot_discovery', - agentId: 'discovery', - description: `Find workflows by their contents or functionality when the user doesn't know the exact name or ID. - -USE THIS WHEN: -- User describes a workflow by what it does: "the one that sends emails", "my Slack notification workflow" -- User refers to workflow contents: "the workflow with the OpenAI block" -- User needs to search/match workflows by functionality or description - -DO NOT USE (use direct tools instead): -- User knows the workflow name → use get_workflow -- User wants to list all workflows → use list_workflows -- User wants to list workspaces → use list_workspaces -- User wants to list folders → use list_folders`, - inputSchema: { - type: 'object', - properties: { - request: { type: 'string' }, - workspaceId: { type: 'string' }, - context: { type: 'object' }, - }, - required: ['request'], - }, - }, - { - name: 'copilot_plan', - agentId: 'plan', - description: `Plan workflow changes by gathering required information. - -USE THIS WHEN: -- Building a new workflow -- Modifying an existing workflow -- You need to understand what blocks and integrations are available -- The workflow requires multiple blocks or connections - -WORKFLOW ID (REQUIRED): -- For NEW workflows: First call create_workflow to get a workflowId, then pass it here -- For EXISTING workflows: Always pass the workflowId parameter - -This tool gathers information about available blocks, credentials, and the current workflow state. - -RETURNS: A plan object containing block configurations, connections, and technical details. -IMPORTANT: Pass the returned plan EXACTLY to copilot_edit - do not modify or summarize it.`, - inputSchema: { - type: 'object', - properties: { - request: { type: 'string', description: 'What you want to build or modify in the workflow.' }, - workflowId: { - type: 'string', - description: 'REQUIRED. The workflow ID. For new workflows, call create_workflow first to get this.', - }, - context: { type: 'object' }, - }, - required: ['request', 'workflowId'], - }, - }, - { - name: 'copilot_edit', - agentId: 'edit', - description: `Execute a workflow plan and apply edits. - -USE THIS WHEN: -- You have a plan from copilot_plan that needs to be executed -- Building or modifying a workflow based on the plan -- Making changes to blocks, connections, or configurations - -WORKFLOW ID (REQUIRED): -- You MUST provide the workflowId parameter -- For new workflows, get the workflowId from create_workflow first - -PLAN (REQUIRED): -- Pass the EXACT plan object from copilot_plan in the context.plan field -- Do NOT modify, summarize, or interpret the plan - pass it verbatim -- The plan contains technical details the edit agent needs exactly as-is - -IMPORTANT: After copilot_edit completes, you MUST call copilot_deploy before the workflow can be run or tested.`, - inputSchema: { - type: 'object', - properties: { - message: { type: 'string', description: 'Optional additional instructions for the edit.' }, - workflowId: { - type: 'string', - description: 'REQUIRED. The workflow ID to edit. Get this from create_workflow for new workflows.', - }, - plan: { - type: 'object', - description: 'The plan object from copilot_plan. Pass it EXACTLY as returned, do not modify.', - }, - context: { - type: 'object', - description: 'Additional context. Put the plan in context.plan if not using the plan field directly.', - }, - }, - required: ['workflowId'], - }, - }, - { - name: 'copilot_debug', - agentId: 'debug', - description: `Diagnose errors or unexpected workflow behavior. - -WORKFLOW ID (REQUIRED): Always provide the workflowId of the workflow to debug.`, - inputSchema: { - type: 'object', - properties: { - error: { type: 'string', description: 'The error message or description of the issue.' }, - workflowId: { type: 'string', description: 'REQUIRED. The workflow ID to debug.' }, - context: { type: 'object' }, - }, - required: ['error', 'workflowId'], - }, - }, - { - name: 'copilot_deploy', - agentId: 'deploy', - description: `Deploy or manage workflow deployments. - -CRITICAL: You MUST deploy a workflow after building before it can be run or tested. -Workflows without an active deployment will fail with "no active deployment" error. - -WORKFLOW ID (REQUIRED): -- Always provide the workflowId parameter -- This must match the workflow you built with copilot_edit - -USE THIS: -- After copilot_edit completes to activate the workflow -- To update deployment settings -- To redeploy after making changes - -DEPLOYMENT TYPES: -- "deploy as api" - REST API endpoint -- "deploy as chat" - Chat interface -- "deploy as mcp" - MCP server`, - inputSchema: { - type: 'object', - properties: { - request: { - type: 'string', - description: 'The deployment request, e.g. "deploy as api" or "deploy as chat"', - }, - workflowId: { - type: 'string', - description: 'REQUIRED. The workflow ID to deploy.', - }, - context: { type: 'object' }, - }, - required: ['request', 'workflowId'], - }, - }, - { - name: 'copilot_auth', - agentId: 'auth', - description: 'Handle OAuth connection flows.', - inputSchema: { - type: 'object', - properties: { - request: { type: 'string' }, - context: { type: 'object' }, - }, - required: ['request'], - }, - }, - { - name: 'copilot_knowledge', - agentId: 'knowledge', - description: 'Create and manage knowledge bases.', - inputSchema: { - type: 'object', - properties: { - request: { type: 'string' }, - context: { type: 'object' }, - }, - required: ['request'], - }, - }, - { - name: 'copilot_custom_tool', - agentId: 'custom_tool', - description: 'Create or manage custom tools.', - inputSchema: { - type: 'object', - properties: { - request: { type: 'string' }, - context: { type: 'object' }, - }, - required: ['request'], - }, - }, - { - name: 'copilot_info', - agentId: 'info', - description: 'Inspect blocks, outputs, and workflow metadata.', - inputSchema: { - type: 'object', - properties: { - request: { type: 'string' }, - workflowId: { type: 'string' }, - context: { type: 'object' }, - }, - required: ['request'], - }, - }, - { - name: 'copilot_workflow', - agentId: 'workflow', - description: 'Manage workflow environment and configuration.', - inputSchema: { - type: 'object', - properties: { - request: { type: 'string' }, - workflowId: { type: 'string' }, - context: { type: 'object' }, - }, - required: ['request'], - }, - }, - { - name: 'copilot_research', - agentId: 'research', - description: 'Research external APIs and documentation.', - inputSchema: { - type: 'object', - properties: { - request: { type: 'string' }, - context: { type: 'object' }, - }, - required: ['request'], - }, - }, - { - name: 'copilot_tour', - agentId: 'tour', - description: 'Explain platform features and usage.', - inputSchema: { - type: 'object', - properties: { - request: { type: 'string' }, - context: { type: 'object' }, - }, - required: ['request'], - }, - }, - { - name: 'copilot_test', - agentId: 'test', - description: `Run workflows and verify outputs. - -PREREQUISITE: The workflow MUST be deployed first using copilot_deploy. -Undeployed workflows will fail with "no active deployment" error. - -WORKFLOW ID (REQUIRED): -- Always provide the workflowId parameter - -USE THIS: -- After deploying to verify the workflow works correctly -- To test with sample inputs -- To validate workflow behavior before sharing with user`, - inputSchema: { - type: 'object', - properties: { - request: { type: 'string' }, - workflowId: { - type: 'string', - description: 'REQUIRED. The workflow ID to test.', - }, - context: { type: 'object' }, - }, - required: ['request', 'workflowId'], - }, - }, - { - name: 'copilot_superagent', - agentId: 'superagent', - description: 'Execute direct external actions (email, Slack, etc.).', - inputSchema: { - type: 'object', - properties: { - request: { type: 'string' }, - context: { type: 'object' }, - }, - required: ['request'], - }, - }, -] - function createResponse(id: RequestId, result: unknown): JSONRPCResponse { return { jsonrpc: '2.0', diff --git a/apps/sim/app/api/v1/copilot/chat/route.ts b/apps/sim/app/api/v1/copilot/chat/route.ts index cab197ad5b..517959c44e 100644 --- a/apps/sim/app/api/v1/copilot/chat/route.ts +++ b/apps/sim/app/api/v1/copilot/chat/route.ts @@ -1,7 +1,4 @@ -import { db } from '@sim/db' -import { permissions, workflow } from '@sim/db/schema' import { createLogger } from '@sim/logger' -import { and, asc, eq, inArray, or } from 'drizzle-orm' import { type NextRequest, NextResponse } from 'next/server' import { z } from 'zod' import { authenticateV1Request } from '@/app/api/v1/auth' @@ -9,6 +6,7 @@ import { getCopilotModel } from '@/lib/copilot/config' import { SIM_AGENT_VERSION } from '@/lib/copilot/constants' import { COPILOT_REQUEST_MODES } from '@/lib/copilot/models' import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator' +import { resolveWorkflowIdForUser } from '@/lib/workflows/utils' const logger = createLogger('CopilotHeadlessAPI') @@ -23,54 +21,6 @@ const RequestSchema = z.object({ timeout: z.number().optional().default(300000), }) -async function resolveWorkflowId( - userId: string, - workflowId?: string, - workflowName?: string -): Promise<{ workflowId: string; workflowName?: string } | null> { - // If workflowId provided, use it directly - if (workflowId) { - return { workflowId } - } - - // Get user's accessible workflows - const workspaceIds = await db - .select({ entityId: permissions.entityId }) - .from(permissions) - .where(and(eq(permissions.userId, userId), eq(permissions.entityType, 'workspace'))) - - const workspaceIdList = workspaceIds.map((row) => row.entityId) - - const workflowConditions = [eq(workflow.userId, userId)] - if (workspaceIdList.length > 0) { - workflowConditions.push(inArray(workflow.workspaceId, workspaceIdList)) - } - - const workflows = await db - .select() - .from(workflow) - .where(or(...workflowConditions)) - .orderBy(asc(workflow.sortOrder), asc(workflow.createdAt), asc(workflow.id)) - - if (workflows.length === 0) { - return null - } - - // If workflowName provided, find matching workflow - if (workflowName) { - const match = workflows.find( - (w) => String(w.name || '').trim().toLowerCase() === workflowName.toLowerCase() - ) - if (match) { - return { workflowId: match.id, workflowName: match.name || undefined } - } - return null - } - - // Default to first workflow - return { workflowId: workflows[0].id, workflowName: workflows[0].name || undefined } -} - /** * POST /api/v1/copilot/chat * Headless copilot endpoint for server-side orchestration. @@ -93,7 +43,7 @@ export async function POST(req: NextRequest) { const selectedModel = parsed.model || defaults.model // Resolve workflow ID - const resolved = await resolveWorkflowId(auth.userId, parsed.workflowId, parsed.workflowName) + const resolved = await resolveWorkflowIdForUser(auth.userId, parsed.workflowId, parsed.workflowName) if (!resolved) { return NextResponse.json( { success: false, error: 'No workflows found. Create a workflow first or provide a valid workflowId.' }, diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx index 5edc292713..9d575cfd5d 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx @@ -27,7 +27,6 @@ import { getBlock } from '@/blocks/registry' import type { CopilotToolCall } from '@/stores/panel' import { useCopilotStore } from '@/stores/panel' import { CLASS_TOOL_METADATA } from '@/stores/panel/copilot/store' -import { COPILOT_SERVER_ORCHESTRATED } from '@/lib/copilot/orchestrator/config' import type { SubAgentContentBlock } from '@/stores/panel/copilot/types' import { useWorkflowStore } from '@/stores/workflows/workflow/store' @@ -1261,7 +1260,7 @@ function shouldShowRunSkipButtons(toolCall: CopilotToolCall): boolean { const toolCallLogger = createLogger('CopilotToolCall') -async function sendToolDecision(toolCallId: string, status: 'accepted' | 'rejected') { +async function sendToolDecision(toolCallId: string, status: 'accepted' | 'rejected' | 'background') { try { await fetch('/api/copilot/confirm', { method: 'POST', @@ -1283,105 +1282,15 @@ async function handleRun( onStateChange?: any, editedParams?: any ) { - if (COPILOT_SERVER_ORCHESTRATED) { - setToolCallState(toolCall, 'executing') - onStateChange?.('executing') - await sendToolDecision(toolCall.id, 'accepted') - return - } - const instance = getClientTool(toolCall.id) - - if (!instance && isIntegrationTool(toolCall.name)) { - onStateChange?.('executing') - try { - await useCopilotStore.getState().executeIntegrationTool(toolCall.id) - } catch (e) { - setToolCallState(toolCall, 'error', { error: e instanceof Error ? e.message : String(e) }) - onStateChange?.('error') - try { - await fetch('/api/copilot/tools/mark-complete', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - id: toolCall.id, - name: toolCall.name, - status: 500, - message: e instanceof Error ? e.message : 'Tool execution failed', - data: { error: e instanceof Error ? e.message : String(e) }, - }), - }) - } catch { - console.error('[handleRun] Failed to notify backend of tool error:', toolCall.id) - } - } - return - } - - if (!instance) return - try { - const mergedParams = - editedParams || - (toolCall as any).params || - (toolCall as any).parameters || - (toolCall as any).input || - {} - await instance.handleAccept?.(mergedParams) - onStateChange?.('executing') - } catch (e) { - setToolCallState(toolCall, 'error', { error: e instanceof Error ? e.message : String(e) }) - } + setToolCallState(toolCall, 'executing', editedParams ? { params: editedParams } : undefined) + onStateChange?.('executing') + await sendToolDecision(toolCall.id, 'accepted') } async function handleSkip(toolCall: CopilotToolCall, setToolCallState: any, onStateChange?: any) { - if (COPILOT_SERVER_ORCHESTRATED) { - setToolCallState(toolCall, 'rejected') - onStateChange?.('rejected') - await sendToolDecision(toolCall.id, 'rejected') - return - } - const instance = getClientTool(toolCall.id) - - if (!instance && isIntegrationTool(toolCall.name)) { - setToolCallState(toolCall, 'rejected') - onStateChange?.('rejected') - - let notified = false - for (let attempt = 0; attempt < 3 && !notified; attempt++) { - try { - const res = await fetch('/api/copilot/tools/mark-complete', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - id: toolCall.id, - name: toolCall.name, - status: 400, - message: 'Tool execution skipped by user', - data: { skipped: true, reason: 'user_skipped' }, - }), - }) - if (res.ok) { - notified = true - } - } catch (e) { - if (attempt < 2) { - await new Promise((resolve) => setTimeout(resolve, 500)) - } - } - } - - if (!notified) { - console.error('[handleSkip] Failed to notify backend after 3 attempts:', toolCall.id) - } - return - } - - if (instance) { - try { - await instance.handleReject?.() - } catch {} - } setToolCallState(toolCall, 'rejected') onStateChange?.('rejected') + await sendToolDecision(toolCall.id, 'rejected') } function getDisplayName(toolCall: CopilotToolCall): string { @@ -1541,7 +1450,7 @@ export function ToolCall({ // Check if this integration tool is auto-allowed // Subscribe to autoAllowedTools so we re-render when it changes const autoAllowedTools = useCopilotStore((s) => s.autoAllowedTools) - const { removeAutoAllowedTool } = useCopilotStore() + const { removeAutoAllowedTool, setToolCallState } = useCopilotStore() const isAutoAllowed = isIntegrationTool(toolCall.name) && autoAllowedTools.includes(toolCall.name) // Update edited params when toolCall params change (deep comparison to avoid resetting user edits on ref change) @@ -2243,16 +2152,9 @@ export function ToolCall({
- {/* Show loading state until fully initialized */} - {!isInitialized ? ( + {/* Show loading state until fully initialized, but skip if actively streaming (resume case) */} + {!isInitialized && !isSendingMessage ? (

Loading copilot

diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/hooks/use-copilot-initialization.ts b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/hooks/use-copilot-initialization.ts index 48a3ead804..1ffe80216a 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/hooks/use-copilot-initialization.ts +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/hooks/use-copilot-initialization.ts @@ -14,6 +14,7 @@ interface UseCopilotInitializationProps { loadAutoAllowedTools: () => Promise currentChat: any isSendingMessage: boolean + resumeActiveStream: () => Promise } /** @@ -32,11 +33,13 @@ export function useCopilotInitialization(props: UseCopilotInitializationProps) { loadAutoAllowedTools, currentChat, isSendingMessage, + resumeActiveStream, } = props const [isInitialized, setIsInitialized] = useState(false) const lastWorkflowIdRef = useRef(null) const hasMountedRef = useRef(false) + const hasResumedRef = useRef(false) /** Initialize on mount - loads chats if needed. Never loads during streaming */ useEffect(() => { @@ -105,6 +108,16 @@ export function useCopilotInitialization(props: UseCopilotInitializationProps) { isSendingMessage, ]) + /** Try to resume active stream on mount - runs early, before waiting for chats */ + useEffect(() => { + if (hasResumedRef.current || isSendingMessage) return + hasResumedRef.current = true + // Resume immediately on mount - don't wait for isInitialized + resumeActiveStream().catch((err) => { + logger.warn('[Copilot] Failed to resume active stream', err) + }) + }, [isSendingMessage, resumeActiveStream]) + /** Load auto-allowed tools once on mount - runs immediately, independent of workflow */ const hasLoadedAutoAllowedToolsRef = useRef(false) useEffect(() => { diff --git a/apps/sim/lib/copilot/api.ts b/apps/sim/lib/copilot/api.ts index c680f9751c..089d6bac72 100644 --- a/apps/sim/lib/copilot/api.ts +++ b/apps/sim/lib/copilot/api.ts @@ -82,6 +82,7 @@ export interface SendMessageRequest { executionId?: string }> commands?: string[] + resumeFromEventId?: number } /** @@ -120,7 +121,7 @@ export async function sendStreamingMessage( request: SendMessageRequest ): Promise { try { - const { abortSignal, ...requestBody } = request + const { abortSignal, resumeFromEventId, ...requestBody } = request try { const preview = Array.isArray((requestBody as any).contexts) ? (requestBody as any).contexts.map((c: any) => ({ @@ -136,8 +137,51 @@ export async function sendStreamingMessage( ? (requestBody as any).contexts.length : 0, contextsPreview: preview, + resumeFromEventId, }) } catch {} + + const streamId = request.userMessageId + if (typeof resumeFromEventId === 'number') { + if (!streamId) { + return { + success: false, + error: 'streamId is required to resume a stream', + status: 400, + } + } + const url = `/api/copilot/chat/stream?streamId=${encodeURIComponent( + streamId + )}&from=${encodeURIComponent(String(resumeFromEventId))}` + const response = await fetch(url, { + method: 'GET', + signal: abortSignal, + credentials: 'include', + }) + + if (!response.ok) { + const errorMessage = await handleApiError(response, 'Failed to resume streaming message') + return { + success: false, + error: errorMessage, + status: response.status, + } + } + + if (!response.body) { + return { + success: false, + error: 'No response body received', + status: 500, + } + } + + return { + success: true, + stream: response.body, + } + } + const response = await fetch('/api/copilot/chat', { method: 'POST', headers: { 'Content-Type': 'application/json' }, diff --git a/apps/sim/lib/copilot/orchestrator/stream-buffer.ts b/apps/sim/lib/copilot/orchestrator/stream-buffer.ts new file mode 100644 index 0000000000..11f6518705 --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/stream-buffer.ts @@ -0,0 +1,152 @@ +import { createLogger } from '@sim/logger' +import { getRedisClient } from '@/lib/core/config/redis' + +const logger = createLogger('CopilotStreamBuffer') + +const STREAM_TTL_SECONDS = 60 * 60 +const STREAM_EVENT_LIMIT = 5000 + +function getStreamKeyPrefix(streamId: string) { + return `copilot_stream:${streamId}` +} + +function getEventsKey(streamId: string) { + return `${getStreamKeyPrefix(streamId)}:events` +} + +function getSeqKey(streamId: string) { + return `${getStreamKeyPrefix(streamId)}:seq` +} + +function getMetaKey(streamId: string) { + return `${getStreamKeyPrefix(streamId)}:meta` +} + +export type StreamStatus = 'active' | 'complete' | 'error' + +export type StreamMeta = { + status: StreamStatus + userId?: string + updatedAt?: string + error?: string +} + +export type StreamEventEntry = { + eventId: number + streamId: string + event: Record +} + +export async function resetStreamBuffer(streamId: string): Promise { + const redis = getRedisClient() + if (!redis) return + try { + await redis.del(getEventsKey(streamId), getSeqKey(streamId), getMetaKey(streamId)) + } catch (error) { + logger.warn('Failed to reset stream buffer', { + streamId, + error: error instanceof Error ? error.message : String(error), + }) + } +} + +export async function setStreamMeta( + streamId: string, + meta: StreamMeta +): Promise { + const redis = getRedisClient() + if (!redis) return + try { + const payload: Record = { + status: meta.status, + updatedAt: meta.updatedAt || new Date().toISOString(), + } + if (meta.userId) payload.userId = meta.userId + if (meta.error) payload.error = meta.error + await redis.hset(getMetaKey(streamId), payload) + await redis.expire(getMetaKey(streamId), STREAM_TTL_SECONDS) + } catch (error) { + logger.warn('Failed to update stream meta', { + streamId, + error: error instanceof Error ? error.message : String(error), + }) + } +} + +export async function getStreamMeta(streamId: string): Promise { + const redis = getRedisClient() + if (!redis) return null + try { + const meta = await redis.hgetall(getMetaKey(streamId)) + if (!meta || Object.keys(meta).length === 0) return null + return meta as StreamMeta + } catch (error) { + logger.warn('Failed to read stream meta', { + streamId, + error: error instanceof Error ? error.message : String(error), + }) + return null + } +} + +export async function appendStreamEvent( + streamId: string, + event: Record +): Promise { + const redis = getRedisClient() + if (!redis) { + return { eventId: 0, streamId, event } + } + + try { + const nextId = await redis.incr(getSeqKey(streamId)) + const entry: StreamEventEntry = { eventId: nextId, streamId, event } + await redis.zadd(getEventsKey(streamId), nextId, JSON.stringify(entry)) + + const count = await redis.zcard(getEventsKey(streamId)) + if (count > STREAM_EVENT_LIMIT) { + const trimCount = count - STREAM_EVENT_LIMIT + if (trimCount > 0) { + await redis.zremrangebyrank(getEventsKey(streamId), 0, trimCount - 1) + } + } + + await redis.expire(getEventsKey(streamId), STREAM_TTL_SECONDS) + await redis.expire(getSeqKey(streamId), STREAM_TTL_SECONDS) + + return entry + } catch (error) { + logger.warn('Failed to append stream event', { + streamId, + error: error instanceof Error ? error.message : String(error), + }) + return { eventId: 0, streamId, event } + } +} + +export async function readStreamEvents( + streamId: string, + afterEventId: number +): Promise { + const redis = getRedisClient() + if (!redis) return [] + try { + const raw = await redis.zrangebyscore(getEventsKey(streamId), afterEventId + 1, '+inf') + return raw + .map((entry) => { + try { + return JSON.parse(entry) as StreamEventEntry + } catch { + return null + } + }) + .filter((entry): entry is StreamEventEntry => Boolean(entry)) + } catch (error) { + logger.warn('Failed to read stream events', { + streamId, + error: error instanceof Error ? error.message : String(error), + }) + return [] + } +} + diff --git a/apps/sim/stores/panel/copilot/store.ts b/apps/sim/stores/panel/copilot/store.ts index 55b366ecdf..bfb6e2c8e8 100644 --- a/apps/sim/stores/panel/copilot/store.ts +++ b/apps/sim/stores/panel/copilot/store.ts @@ -80,6 +80,7 @@ import { subscriptionKeys } from '@/hooks/queries/subscription' import type { ChatContext, CopilotMessage, + CopilotStreamInfo, CopilotStore, CopilotToolCall, MessageFileAttachment, @@ -92,6 +93,69 @@ import type { WorkflowState } from '@/stores/workflows/workflow/types' const logger = createLogger('CopilotStore') +const STREAM_STORAGE_KEY = 'copilot_active_stream' + +/** + * Flag set on beforeunload to suppress continue option during page refresh/close. + * Aborts during unload should NOT show the continue button. + */ +let isPageUnloading = false +if (typeof window !== 'undefined') { + window.addEventListener('beforeunload', () => { + isPageUnloading = true + }) +} + +function readActiveStreamFromStorage(): CopilotStreamInfo | null { + if (typeof window === 'undefined') return null + try { + const raw = window.sessionStorage.getItem(STREAM_STORAGE_KEY) + logger.info('[Copilot] Reading stream from storage', { + hasRaw: !!raw, + rawPreview: raw ? raw.substring(0, 100) : null, + }) + if (!raw) return null + const parsed = JSON.parse(raw) as CopilotStreamInfo + return parsed?.streamId ? parsed : null + } catch (e) { + logger.warn('[Copilot] Failed to read stream from storage', { error: String(e) }) + return null + } +} + +function writeActiveStreamToStorage(info: CopilotStreamInfo | null): void { + if (typeof window === 'undefined') return + try { + if (!info) { + logger.info('[Copilot] Clearing stream from storage', { + isPageUnloading, + stack: new Error().stack?.split('\n').slice(1, 4).join(' <- '), + }) + window.sessionStorage.removeItem(STREAM_STORAGE_KEY) + return + } + logger.info('[Copilot] Writing stream to storage', { + streamId: info.streamId, + lastEventId: info.lastEventId, + }) + window.sessionStorage.setItem(STREAM_STORAGE_KEY, JSON.stringify(info)) + } catch {} +} + +function updateActiveStreamEventId( + get: () => CopilotStore, + set: (next: Partial) => void, + streamId: string, + eventId: number +): void { + const current = get().activeStream + if (!current || current.streamId !== streamId) return + if (eventId <= (current.lastEventId || 0)) return + const next = { ...current, lastEventId: eventId } + set({ activeStream: next }) + writeActiveStreamToStorage(next) +} + // On module load, clear any lingering diff preview (fresh page refresh) try { const diffStore = useWorkflowDiffStore.getState() @@ -1033,6 +1097,28 @@ function appendContinueOptionBlock(blocks: any[]): any[] { ] } +function stripContinueOption(content: string): string { + if (!content || !content.includes(CONTINUE_OPTIONS_TAG)) return content + const next = content.replace(CONTINUE_OPTIONS_TAG, '') + return next.replace(/\n{2,}\s*$/g, '\n').trimEnd() +} + +function stripContinueOptionFromBlocks(blocks: any[]): any[] { + if (!Array.isArray(blocks)) return blocks + return blocks.flatMap((block) => { + if ( + block?.type === TEXT_BLOCK_TYPE && + typeof block.content === 'string' && + block.content.includes(CONTINUE_OPTIONS_TAG) + ) { + const nextContent = stripContinueOption(block.content) + if (!nextContent.trim()) return [] + return [{ ...block, content: nextContent }] + } + return [block] + }) +} + function beginThinkingBlock(context: StreamingContext) { if (!context.currentThinkingBlock) { context.currentThinkingBlock = contentBlockPool.get() @@ -1118,12 +1204,18 @@ function appendSubAgentText(context: StreamingContext, parentToolCallId: string, } const sseHandlers: Record = { - chat_id: async (data, context, get) => { + chat_id: async (data, context, get, set) => { context.newChatId = data.chatId - const { currentChat } = get() + const { currentChat, activeStream } = get() if (!currentChat && context.newChatId) { await get().handleNewChatCreation(context.newChatId) } + // Update activeStream with chatId for resume purposes + if (activeStream && context.newChatId && !activeStream.chatId) { + const updatedStream = { ...activeStream, chatId: context.newChatId } + set({ activeStream: updatedStream }) + writeActiveStreamToStorage(updatedStream) + } }, title_updated: (_data, _context, get, set) => { const title = _data.title @@ -2072,6 +2164,7 @@ const initialState = { toolCallsById: {} as Record, suppressAutoSelect: false, autoAllowedTools: [] as string[], + activeStream: null as CopilotStreamInfo | null, messageQueue: [] as import('./types').QueuedMessage[], suppressAbortContinueOption: false, sensitiveCredentialIds: new Set(), @@ -2492,6 +2585,22 @@ export const useCopilotStore = create()( currentUserMessageId: userMessage.id, })) + const activeStream: CopilotStreamInfo = { + streamId: userMessage.id, + workflowId, + chatId: currentChat?.id, + userMessageId: userMessage.id, + assistantMessageId: streamingMessage.id, + lastEventId: 0, + resumeAttempts: 0, + userMessageContent: message, + fileAttachments, + contexts, + startedAt: Date.now(), + } + set({ activeStream }) + writeActiveStreamToStorage(activeStream) + if (isFirstMessage) { const optimisticTitle = message.length > 50 ? `${message.substring(0, 47)}...` : message set((state) => ({ @@ -2616,6 +2725,8 @@ export const useCopilotStore = create()( isSendingMessage: false, abortController: null, })) + set({ activeStream: null }) + writeActiveStreamToStorage(null) } } catch (error) { if (error instanceof Error && error.name === 'AbortError') return @@ -2629,14 +2740,240 @@ export const useCopilotStore = create()( isSendingMessage: false, abortController: null, })) + set({ activeStream: null }) + writeActiveStreamToStorage(null) } }, + resumeActiveStream: async () => { + const stored = get().activeStream || readActiveStreamFromStorage() + logger.info('[Copilot] Resume check', { + hasStored: !!stored, + streamId: stored?.streamId, + lastEventId: stored?.lastEventId, + storedWorkflowId: stored?.workflowId, + storedChatId: stored?.chatId, + currentWorkflowId: get().workflowId, + isSendingMessage: get().isSendingMessage, + resumeAttempts: stored?.resumeAttempts, + }) + if (!stored || !stored.streamId) return false + if (get().isSendingMessage) return false + if (get().workflowId && stored.workflowId !== get().workflowId) return false + + if (stored.resumeAttempts >= 3) { + logger.warn('[Copilot] Too many resume attempts, giving up') + return false + } + + const nextStream: CopilotStreamInfo = { + ...stored, + resumeAttempts: (stored.resumeAttempts || 0) + 1, + } + set({ activeStream: nextStream }) + writeActiveStreamToStorage(nextStream) + + // Load existing chat messages from database if we have a chatId but no messages + let messages = get().messages + // Track if this is a fresh page load (no messages in memory) + const isFreshResume = messages.length === 0 + if (isFreshResume && nextStream.chatId) { + try { + logger.info('[Copilot] Loading chat for resume', { chatId: nextStream.chatId }) + const response = await fetch(`/api/copilot/chat?chatId=${nextStream.chatId}`) + if (response.ok) { + const data = await response.json() + if (data.success && data.chat) { + const normalizedMessages = normalizeMessagesForUI(data.chat.messages || []) + const toolCallsById = buildToolCallsById(normalizedMessages) + set({ + currentChat: data.chat, + messages: normalizedMessages, + toolCallsById, + streamingPlanContent: data.chat.planArtifact || '', + }) + messages = normalizedMessages + logger.info('[Copilot] Loaded chat for resume', { + chatId: nextStream.chatId, + messageCount: normalizedMessages.length, + }) + } + } + } catch (e) { + logger.warn('[Copilot] Failed to load chat for resume', { error: String(e) }) + } + } + + // ALWAYS fetch buffered events when resuming (to ensure we have complete content) + let bufferedContent = '' + if (nextStream.lastEventId > 0) { + try { + logger.info('[Copilot] Fetching buffered events', { + streamId: nextStream.streamId, + lastEventId: nextStream.lastEventId, + isFreshResume, + }) + const batchUrl = `/api/copilot/chat/stream?streamId=${encodeURIComponent( + nextStream.streamId + )}&from=0&to=${nextStream.lastEventId}&batch=true` + const batchResponse = await fetch(batchUrl, { credentials: 'include' }) + if (batchResponse.ok) { + const batchData = await batchResponse.json() + if (batchData.success && Array.isArray(batchData.events)) { + // Extract text content from buffered events + for (const entry of batchData.events) { + const event = entry.event + if (event?.type === 'content' && typeof event.data === 'string') { + bufferedContent += event.data + } + } + logger.info('[Copilot] Loaded buffered content', { + eventCount: batchData.events.length, + contentLength: bufferedContent.length, + contentPreview: bufferedContent.slice(0, 100), + }) + } else { + logger.warn('[Copilot] Batch response missing events', { + success: batchData.success, + hasEvents: Array.isArray(batchData.events), + }) + } + } else { + logger.warn('[Copilot] Failed to fetch buffered events', { + status: batchResponse.status, + }) + } + } catch (e) { + logger.warn('[Copilot] Failed to fetch buffered events', { error: String(e) }) + } + } + + let nextMessages = messages + let cleanedExisting = false + nextMessages = nextMessages.map((m) => { + if (m.id !== nextStream.assistantMessageId) return m + const hasContinueTag = + (typeof m.content === 'string' && m.content.includes(CONTINUE_OPTIONS_TAG)) || + (Array.isArray(m.contentBlocks) && + m.contentBlocks.some( + (b: any) => + b?.type === TEXT_BLOCK_TYPE && + typeof b.content === 'string' && + b.content.includes(CONTINUE_OPTIONS_TAG) + )) + if (!hasContinueTag) return m + cleanedExisting = true + return { + ...m, + content: stripContinueOption(m.content || ''), + contentBlocks: stripContinueOptionFromBlocks(m.contentBlocks || []), + } + }) + + if (!messages.some((m) => m.id === nextStream.userMessageId)) { + const userMessage = createUserMessage( + nextStream.userMessageContent || '', + nextStream.fileAttachments, + nextStream.contexts, + nextStream.userMessageId + ) + nextMessages = [...nextMessages, userMessage] + } + + if (!nextMessages.some((m) => m.id === nextStream.assistantMessageId)) { + // Create assistant message with buffered content pre-loaded + const assistantMessage: CopilotMessage = { + ...createStreamingMessage(), + id: nextStream.assistantMessageId, + content: bufferedContent, + contentBlocks: bufferedContent + ? [{ type: TEXT_BLOCK_TYPE, content: bufferedContent, timestamp: Date.now() }] + : [], + } + nextMessages = [...nextMessages, assistantMessage] + } else if (bufferedContent) { + // Update existing assistant message with buffered content + nextMessages = nextMessages.map((m) => + m.id === nextStream.assistantMessageId + ? { + ...m, + content: bufferedContent, + contentBlocks: [ + { type: TEXT_BLOCK_TYPE, content: bufferedContent, timestamp: Date.now() }, + ], + } + : m + ) + } + + if (cleanedExisting || nextMessages !== messages || bufferedContent) { + set({ messages: nextMessages, currentUserMessageId: nextStream.userMessageId }) + } else { + set({ currentUserMessageId: nextStream.userMessageId }) + } + + const abortController = new AbortController() + set({ isSendingMessage: true, abortController }) + + try { + logger.info('[Copilot] Attempting to resume stream', { + streamId: nextStream.streamId, + lastEventId: nextStream.lastEventId, + isFreshResume, + bufferedContentLength: bufferedContent.length, + assistantMessageId: nextStream.assistantMessageId, + chatId: nextStream.chatId, + }) + const result = await sendStreamingMessage({ + message: nextStream.userMessageContent || '', + userMessageId: nextStream.userMessageId, + workflowId: nextStream.workflowId, + chatId: nextStream.chatId || get().currentChat?.id || undefined, + mode: get().mode === 'ask' ? 'ask' : get().mode === 'plan' ? 'plan' : 'agent', + model: get().selectedModel, + prefetch: get().agentPrefetch, + stream: true, + resumeFromEventId: nextStream.lastEventId, + abortSignal: abortController.signal, + }) + + logger.info('[Copilot] Resume stream result', { + success: result.success, + hasStream: !!result.stream, + error: result.error, + }) + + if (result.success && result.stream) { + await get().handleStreamingResponse( + result.stream, + nextStream.assistantMessageId, + true, + nextStream.userMessageId + ) + return true + } + set({ isSendingMessage: false, abortController: null }) + } catch (error) { + // Handle AbortError gracefully - expected when user aborts + if (error instanceof Error && (error.name === 'AbortError' || error.message.includes('aborted'))) { + logger.info('[Copilot] Resume stream aborted by user') + set({ isSendingMessage: false, abortController: null }) + return false + } + logger.error('[Copilot] Failed to resume stream', { + error: error instanceof Error ? error.message : String(error), + }) + set({ isSendingMessage: false, abortController: null }) + } + return false + }, + // Abort streaming abortMessage: (options?: { suppressContinueOption?: boolean }) => { const { abortController, isSendingMessage, messages } = get() if (!isSendingMessage || !abortController) return - const suppressContinueOption = options?.suppressContinueOption === true + // Suppress continue option if explicitly requested OR if page is unloading (refresh/close) + const suppressContinueOption = options?.suppressContinueOption === true || isPageUnloading set({ isAborting: true, suppressAbortContinueOption: suppressContinueOption }) try { abortController.abort() @@ -2678,6 +3015,13 @@ export const useCopilotStore = create()( }) } + // Only clear active stream for user-initiated aborts, NOT page unload + // During page unload, keep the stream info so we can resume after refresh + if (!isPageUnloading) { + set({ activeStream: null }) + writeActiveStreamToStorage(null) + } + // Immediately put all in-progress tools into aborted state abortAllInProgressTools(set, get) @@ -2704,6 +3048,11 @@ export const useCopilotStore = create()( } } catch { set({ isSendingMessage: false, isAborting: false }) + // Only clear active stream for user-initiated aborts, NOT page unload + if (!isPageUnloading) { + set({ activeStream: null }) + writeActiveStreamToStorage(null) + } } }, @@ -3051,15 +3400,42 @@ export const useCopilotStore = create()( subAgentToolCalls: {}, subAgentBlocks: {}, } + if (isContinuation) { + context.suppressContinueOption = true + } if (isContinuation) { const { messages } = get() const existingMessage = messages.find((m) => m.id === assistantMessageId) + logger.info('[Copilot] Continuation init', { + hasMessage: !!existingMessage, + contentLength: existingMessage?.content?.length || 0, + contentPreview: existingMessage?.content?.slice(0, 100) || '', + contentBlocksCount: existingMessage?.contentBlocks?.length || 0, + }) if (existingMessage) { - if (existingMessage.content) context.accumulatedContent.append(existingMessage.content) - context.contentBlocks = existingMessage.contentBlocks - ? [...existingMessage.contentBlocks] - : [] + // Initialize with existing text content (should be buffered content we set earlier) + const existingContent = existingMessage.content || '' + if (existingContent) { + context.accumulatedContent.append(existingContent) + } + // Create fresh text block with existing content (don't reuse to avoid mutation issues) + if (existingContent) { + const textBlock = contentBlockPool.get() + textBlock.type = TEXT_BLOCK_TYPE + textBlock.content = existingContent + textBlock.timestamp = Date.now() + context.contentBlocks = [textBlock] + context.currentTextBlock = textBlock + } + // Copy over any non-text blocks (tool calls, thinking, etc) from existing message + if (existingMessage.contentBlocks) { + for (const block of existingMessage.contentBlocks) { + if (block.type !== TEXT_BLOCK_TYPE) { + context.contentBlocks.push({ ...block }) + } + } + } } } @@ -3074,7 +3450,8 @@ export const useCopilotStore = create()( if (abortController?.signal.aborted) { context.wasAborted = true const { suppressAbortContinueOption } = get() - context.suppressContinueOption = suppressAbortContinueOption === true + // Suppress continue option if explicitly requested OR if page is unloading (refresh/close) + context.suppressContinueOption = suppressAbortContinueOption === true || isPageUnloading if (suppressAbortContinueOption) { set({ suppressAbortContinueOption: false }) } @@ -3085,6 +3462,12 @@ export const useCopilotStore = create()( break } + const eventId = typeof data?.eventId === 'number' ? data.eventId : undefined + const streamId = typeof data?.streamId === 'string' ? data.streamId : undefined + if (eventId && streamId) { + updateActiveStreamEventId(get, set, streamId, eventId) + } + // Log SSE events for debugging logger.info('[SSE] Received event', { type: data.type, @@ -3202,10 +3585,20 @@ export const useCopilotStore = create()( : block ) } + if (isContinuation) { + sanitizedContentBlocks = stripContinueOptionFromBlocks(sanitizedContentBlocks) + } if (context.wasAborted && !context.suppressContinueOption) { sanitizedContentBlocks = appendContinueOptionBlock(sanitizedContentBlocks) } + if (!context.streamComplete && !context.wasAborted) { + const resumed = await get().resumeActiveStream() + if (resumed) { + return + } + } + if (context.contentBlocks) { context.contentBlocks.forEach((block) => { if (block.type === TEXT_BLOCK_TYPE || block.type === THINKING_BLOCK_TYPE) { @@ -3215,10 +3608,13 @@ export const useCopilotStore = create()( } const finalContent = stripTodoTags(context.accumulatedContent.toString()) + const finalContentStripped = isContinuation + ? stripContinueOption(finalContent) + : finalContent const finalContentWithOptions = context.wasAborted && !context.suppressContinueOption ? appendContinueOption(finalContent) - : finalContent + : finalContentStripped set((state) => { const snapshotId = state.currentUserMessageId const nextSnapshots = @@ -3229,7 +3625,7 @@ export const useCopilotStore = create()( return updated })() : state.messageSnapshots - return { + const nextState: Partial = { messages: state.messages.map((msg) => msg.id === assistantMessageId ? { @@ -3245,8 +3641,15 @@ export const useCopilotStore = create()( currentUserMessageId: null, messageSnapshots: nextSnapshots, } + return nextState }) + // Only clear active stream if stream completed normally or user aborted (not page unload) + if ((context.streamComplete || context.wasAborted) && !isPageUnloading) { + set({ activeStream: null }) + writeActiveStreamToStorage(null) + } + if (context.newChatId && !get().currentChat) { await get().handleNewChatCreation(context.newChatId) } diff --git a/apps/sim/stores/panel/copilot/types.ts b/apps/sim/stores/panel/copilot/types.ts index 6736f5a60d..706ff29480 100644 --- a/apps/sim/stores/panel/copilot/types.ts +++ b/apps/sim/stores/panel/copilot/types.ts @@ -33,6 +33,20 @@ export interface CopilotToolCall { subAgentStreaming?: boolean } +export interface CopilotStreamInfo { + streamId: string + workflowId: string + chatId?: string + userMessageId: string + assistantMessageId: string + lastEventId: number + resumeAttempts: number + userMessageContent: string + fileAttachments?: MessageFileAttachment[] + contexts?: ChatContext[] + startedAt: number +} + export interface MessageFileAttachment { id: string key: string @@ -154,6 +168,9 @@ export interface CopilotState { // Auto-allowed integration tools (tools that can run without confirmation) autoAllowedTools: string[] + // Active stream metadata for reconnect/replay + activeStream: CopilotStreamInfo | null + // Message queue for messages sent while another is in progress messageQueue: QueuedMessage[] @@ -194,6 +211,7 @@ export interface CopilotActions { toolCallState: 'accepted' | 'rejected' | 'error', toolCallId?: string ) => void + resumeActiveStream: () => Promise setToolCallState: (toolCall: any, newState: ClientToolCallState, options?: any) => void updateToolCallParams: (toolCallId: string, params: Record) => void sendDocsMessage: (query: string, options?: { stream?: boolean; topK?: number }) => Promise From addc7608cf29826d523c31e85a2f1ceaef4c6705 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Tue, 3 Feb 2026 16:36:09 -0800 Subject: [PATCH 12/72] Streaming --- apps/sim/stores/panel/copilot/store.ts | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/apps/sim/stores/panel/copilot/store.ts b/apps/sim/stores/panel/copilot/store.ts index bfb6e2c8e8..e9f33f1c68 100644 --- a/apps/sim/stores/panel/copilot/store.ts +++ b/apps/sim/stores/panel/copilot/store.ts @@ -2804,33 +2804,38 @@ export const useCopilotStore = create()( } } - // ALWAYS fetch buffered events when resuming (to ensure we have complete content) + // Fetch ALL buffered events and display instantly, then continue streaming from highest event let bufferedContent = '' + let resumeFromEventId = nextStream.lastEventId if (nextStream.lastEventId > 0) { try { - logger.info('[Copilot] Fetching buffered events', { + logger.info('[Copilot] Fetching all buffered events', { streamId: nextStream.streamId, - lastEventId: nextStream.lastEventId, - isFreshResume, + savedLastEventId: nextStream.lastEventId, }) + // Fetch ALL events (no 'to' limit) so we get everything buffered while disconnected const batchUrl = `/api/copilot/chat/stream?streamId=${encodeURIComponent( nextStream.streamId - )}&from=0&to=${nextStream.lastEventId}&batch=true` + )}&from=0&batch=true` const batchResponse = await fetch(batchUrl, { credentials: 'include' }) if (batchResponse.ok) { const batchData = await batchResponse.json() if (batchData.success && Array.isArray(batchData.events)) { - // Extract text content from buffered events + // Extract text content and track highest event ID for (const entry of batchData.events) { const event = entry.event if (event?.type === 'content' && typeof event.data === 'string') { bufferedContent += event.data } + // Track highest event ID so we resume from there (not the old lastEventId) + if (typeof entry.eventId === 'number' && entry.eventId > resumeFromEventId) { + resumeFromEventId = entry.eventId + } } - logger.info('[Copilot] Loaded buffered content', { + logger.info('[Copilot] Loaded buffered content instantly', { eventCount: batchData.events.length, contentLength: bufferedContent.length, - contentPreview: bufferedContent.slice(0, 100), + resumeFromEventId, }) } else { logger.warn('[Copilot] Batch response missing events', { @@ -2918,7 +2923,8 @@ export const useCopilotStore = create()( try { logger.info('[Copilot] Attempting to resume stream', { streamId: nextStream.streamId, - lastEventId: nextStream.lastEventId, + savedLastEventId: nextStream.lastEventId, + resumeFromEventId, isFreshResume, bufferedContentLength: bufferedContent.length, assistantMessageId: nextStream.assistantMessageId, @@ -2933,7 +2939,7 @@ export const useCopilotStore = create()( model: get().selectedModel, prefetch: get().agentPrefetch, stream: true, - resumeFromEventId: nextStream.lastEventId, + resumeFromEventId, abortSignal: abortController.signal, }) From a58a61b0010a16a467bc90468ef910c412102376 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Tue, 3 Feb 2026 16:40:22 -0800 Subject: [PATCH 13/72] Fix abort --- apps/sim/stores/panel/copilot/store.ts | 39 ++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/apps/sim/stores/panel/copilot/store.ts b/apps/sim/stores/panel/copilot/store.ts index e9f33f1c68..9e8d3b137e 100644 --- a/apps/sim/stores/panel/copilot/store.ts +++ b/apps/sim/stores/panel/copilot/store.ts @@ -2034,6 +2034,38 @@ function stopStreamingUpdates() { streamingUpdateQueue.clear() } +/** Flush pending streaming updates immediately (apply them to state before clearing) */ +function flushStreamingUpdates(set: any) { + if (streamingUpdateRAF !== null) { + cancelAnimationFrame(streamingUpdateRAF) + streamingUpdateRAF = null + } + if (streamingUpdateQueue.size === 0) return + + const updates = new Map(streamingUpdateQueue) + streamingUpdateQueue.clear() + + set((state: CopilotStore) => { + if (updates.size === 0) return state + return { + messages: state.messages.map((msg) => { + const update = updates.get(msg.id) + if (update) { + return { + ...msg, + content: '', + contentBlocks: + update.contentBlocks.length > 0 + ? createOptimizedContentBlocks(update.contentBlocks) + : [], + } + } + return msg + }), + } + }) +} + function createOptimizedContentBlocks(contentBlocks: any[]): any[] { const result: any[] = new Array(contentBlocks.length) for (let i = 0; i < contentBlocks.length; i++) { @@ -2983,8 +3015,11 @@ export const useCopilotStore = create()( set({ isAborting: true, suppressAbortContinueOption: suppressContinueOption }) try { abortController.abort() - stopStreamingUpdates() - const lastMessage = messages[messages.length - 1] + // Flush pending streaming updates to preserve content before stopping + flushStreamingUpdates(set) + // Re-read messages after flush to get the latest content + const { messages: updatedMessages } = get() + const lastMessage = updatedMessages[updatedMessages.length - 1] if (lastMessage && lastMessage.role === 'assistant') { const textContent = lastMessage.contentBlocks From be7fb8fc39213c3d5e933d449a129858184f55eb Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Tue, 3 Feb 2026 18:33:47 -0800 Subject: [PATCH 14/72] Things are broken --- apps/sim/stores/panel/copilot/store.ts | 425 ++++++++++++++++--------- apps/sim/stores/panel/copilot/types.ts | 3 +- 2 files changed, 271 insertions(+), 157 deletions(-) diff --git a/apps/sim/stores/panel/copilot/store.ts b/apps/sim/stores/panel/copilot/store.ts index 9e8d3b137e..39a933dd10 100644 --- a/apps/sim/stores/panel/copilot/store.ts +++ b/apps/sim/stores/panel/copilot/store.ts @@ -134,12 +134,18 @@ function writeActiveStreamToStorage(info: CopilotStreamInfo | null): void { window.sessionStorage.removeItem(STREAM_STORAGE_KEY) return } + const payload = JSON.stringify(info) + window.sessionStorage.setItem(STREAM_STORAGE_KEY, payload) + const verified = window.sessionStorage.getItem(STREAM_STORAGE_KEY) === payload logger.info('[Copilot] Writing stream to storage', { streamId: info.streamId, lastEventId: info.lastEventId, + userMessageContent: info.userMessageContent?.slice(0, 30), + verified, }) - window.sessionStorage.setItem(STREAM_STORAGE_KEY, JSON.stringify(info)) - } catch {} + } catch (e) { + logger.error('[Copilot] Failed to write stream to storage', { error: String(e) }) + } } function updateActiveStreamEventId( @@ -1046,6 +1052,7 @@ interface StreamingContext { subAgentToolCalls: Record /** Track subagent streaming blocks per parent tool call */ subAgentBlocks: Record + suppressStreamingUpdates?: boolean } type SSEHandler = ( @@ -2018,6 +2025,92 @@ const subAgentSSEHandlers: Record = { }, } +async function applySseEvent( + data: any, + context: StreamingContext, + get: () => CopilotStore, + set: (next: Partial | ((state: CopilotStore) => Partial)) => void +): Promise { + if (data.type === 'subagent_start') { + const toolCallId = data.data?.tool_call_id + if (toolCallId) { + context.subAgentParentToolCallId = toolCallId + const { toolCallsById } = get() + const parentToolCall = toolCallsById[toolCallId] + if (parentToolCall) { + const updatedToolCall: CopilotToolCall = { + ...parentToolCall, + subAgentStreaming: true, + } + const updatedMap = { ...toolCallsById, [toolCallId]: updatedToolCall } + set({ toolCallsById: updatedMap }) + } + logger.info('[SSE] Subagent session started', { + subagent: data.subagent, + parentToolCallId: toolCallId, + }) + } + return true + } + + if (data.type === 'subagent_end') { + const parentToolCallId = context.subAgentParentToolCallId + if (parentToolCallId) { + const { toolCallsById } = get() + const parentToolCall = toolCallsById[parentToolCallId] + if (parentToolCall) { + const updatedToolCall: CopilotToolCall = { + ...parentToolCall, + subAgentContent: context.subAgentContent[parentToolCallId] || '', + subAgentToolCalls: context.subAgentToolCalls[parentToolCallId] || [], + subAgentBlocks: context.subAgentBlocks[parentToolCallId] || [], + subAgentStreaming: false, + } + const updatedMap = { ...toolCallsById, [parentToolCallId]: updatedToolCall } + set({ toolCallsById: updatedMap }) + logger.info('[SSE] Subagent session ended', { + subagent: data.subagent, + parentToolCallId, + contentLength: context.subAgentContent[parentToolCallId]?.length || 0, + toolCallCount: context.subAgentToolCalls[parentToolCallId]?.length || 0, + }) + } + } + context.subAgentParentToolCallId = undefined + return true + } + + if (data.subagent) { + const parentToolCallId = context.subAgentParentToolCallId + if (!parentToolCallId) { + logger.warn('[SSE] Subagent event without parent tool call ID', { + type: data.type, + subagent: data.subagent, + }) + return true + } + + logger.info('[SSE] Processing subagent event', { + type: data.type, + subagent: data.subagent, + parentToolCallId, + hasHandler: !!subAgentSSEHandlers[data.type], + }) + + const subAgentHandler = subAgentSSEHandlers[data.type] + if (subAgentHandler) { + await subAgentHandler(data, context, get, set) + } else { + logger.warn('[SSE] No handler for subagent event type', { type: data.type }) + } + return !context.streamComplete + } + + const handler = sseHandlers[data.type] || sseHandlers.default + await handler(data, context, get, set) + return !context.streamComplete +} + // Debounced UI update queue for smoother streaming const streamingUpdateQueue = new Map() let streamingUpdateRAF: number | null = null @@ -2034,7 +2127,7 @@ function stopStreamingUpdates() { streamingUpdateQueue.clear() } -/** Flush pending streaming updates immediately (apply them to state before clearing) */ +/** Flush pending streaming updates immediately (apply them to state before clearing). */ function flushStreamingUpdates(set: any) { if (streamingUpdateRAF !== null) { cancelAnimationFrame(streamingUpdateRAF) @@ -2066,6 +2159,76 @@ function flushStreamingUpdates(set: any) { }) } +function cloneContentBlocks(blocks: any[]): any[] { + if (!Array.isArray(blocks)) return [] + return blocks.map((block) => (block ? { ...block } : block)) +} + +function extractTextFromBlocks(blocks: any[]): string { + if (!Array.isArray(blocks)) return '' + return blocks + .filter((block) => block?.type === TEXT_BLOCK_TYPE && typeof block.content === 'string') + .map((block) => block.content) + .join('') +} + +function appendTextToBlocks(blocks: any[], text: string): any[] { + const nextBlocks = cloneContentBlocks(blocks) + if (!text) return nextBlocks + const lastIndex = nextBlocks.length - 1 + const lastBlock = nextBlocks[lastIndex] + if (lastBlock?.type === TEXT_BLOCK_TYPE) { + const current = typeof lastBlock.content === 'string' ? lastBlock.content : '' + nextBlocks[lastIndex] = { ...lastBlock, content: current + text } + return nextBlocks + } + nextBlocks.push({ type: TEXT_BLOCK_TYPE, content: text, timestamp: Date.now() }) + return nextBlocks +} + +function findLastTextBlock(blocks: any[]): any | null { + if (!Array.isArray(blocks) || blocks.length === 0) return null + const lastBlock = blocks[blocks.length - 1] + return lastBlock?.type === TEXT_BLOCK_TYPE ? lastBlock : null +} + +function replaceTextBlocks(blocks: any[], text: string): any[] { + const next: any[] = [] + let inserted = false + for (const block of blocks || []) { + if (block?.type === TEXT_BLOCK_TYPE) { + if (!inserted && text) { + next.push({ type: TEXT_BLOCK_TYPE, content: text, timestamp: Date.now() }) + inserted = true + } + continue + } + next.push(block ? { ...block } : block) + } + if (!inserted && text) { + next.push({ type: TEXT_BLOCK_TYPE, content: text, timestamp: Date.now() }) + } + return next +} + +function createStreamingContext(messageId: string): StreamingContext { + return { + messageId, + accumulatedContent: new StringBuilder(), + contentBlocks: [], + currentTextBlock: null, + isInThinkingBlock: false, + currentThinkingBlock: null, + isInDesignWorkflowBlock: false, + designWorkflowContent: '', + pendingContent: '', + doneEventCount: 0, + subAgentContent: {}, + subAgentToolCalls: {}, + subAgentBlocks: {}, + } +} + function createOptimizedContentBlocks(contentBlocks: any[]): any[] { const result: any[] = new Array(contentBlocks.length) for (let i = 0; i < contentBlocks.length; i++) { @@ -2074,8 +2237,9 @@ function createOptimizedContentBlocks(contentBlocks: any[]): any[] { } return result } - +`` function updateStreamingMessage(set: any, context: StreamingContext) { + if (context.suppressStreamingUpdates) return const now = performance.now() streamingUpdateQueue.set(context.messageId, context) const timeSinceLastBatch = now - lastBatchTime @@ -2617,6 +2781,8 @@ export const useCopilotStore = create()( currentUserMessageId: userMessage.id, })) + // Create new stream info and write to storage BEFORE starting the stream + // This ensures that if the user refreshes, they get the correct stream const activeStream: CopilotStreamInfo = { streamId: userMessage.id, workflowId, @@ -2630,6 +2796,12 @@ export const useCopilotStore = create()( contexts, startedAt: Date.now(), } + logger.info('[Copilot] Creating new active stream', { + streamId: activeStream.streamId, + workflowId: activeStream.workflowId, + chatId: activeStream.chatId, + userMessageContent: message.slice(0, 50), + }) set({ activeStream }) writeActiveStreamToStorage(activeStream) @@ -2712,7 +2884,8 @@ export const useCopilotStore = create()( result.stream, streamingMessage.id, false, - userMessage.id + userMessage.id, + nextAbortController.signal ) set({ chatsLastLoadedAt: null, chatsLoadedForWorkflow: null }) } else { @@ -2778,13 +2951,18 @@ export const useCopilotStore = create()( }, resumeActiveStream: async () => { - const stored = get().activeStream || readActiveStreamFromStorage() + const inMemoryStream = get().activeStream + const storedStream = readActiveStreamFromStorage() + const stored = inMemoryStream || storedStream logger.info('[Copilot] Resume check', { - hasStored: !!stored, + hasInMemory: !!inMemoryStream, + hasStored: !!storedStream, + usingStream: inMemoryStream ? 'memory' : storedStream ? 'storage' : 'none', streamId: stored?.streamId, lastEventId: stored?.lastEventId, storedWorkflowId: stored?.workflowId, storedChatId: stored?.chatId, + userMessageContent: stored?.userMessageContent?.slice(0, 50), currentWorkflowId: get().workflowId, isSendingMessage: get().isSendingMessage, resumeAttempts: stored?.resumeAttempts, @@ -2798,7 +2976,7 @@ export const useCopilotStore = create()( return false } - const nextStream: CopilotStreamInfo = { + let nextStream: CopilotStreamInfo = { ...stored, resumeAttempts: (stored.resumeAttempts || 0) + 1, } @@ -2836,8 +3014,8 @@ export const useCopilotStore = create()( } } - // Fetch ALL buffered events and display instantly, then continue streaming from highest event let bufferedContent = '' + let replayBlocks: any[] | null = null let resumeFromEventId = nextStream.lastEventId if (nextStream.lastEventId > 0) { try { @@ -2845,7 +3023,6 @@ export const useCopilotStore = create()( streamId: nextStream.streamId, savedLastEventId: nextStream.lastEventId, }) - // Fetch ALL events (no 'to' limit) so we get everything buffered while disconnected const batchUrl = `/api/copilot/chat/stream?streamId=${encodeURIComponent( nextStream.streamId )}&from=0&batch=true` @@ -2853,17 +3030,19 @@ export const useCopilotStore = create()( if (batchResponse.ok) { const batchData = await batchResponse.json() if (batchData.success && Array.isArray(batchData.events)) { - // Extract text content and track highest event ID + const replayContext = createStreamingContext(nextStream.assistantMessageId) + replayContext.suppressStreamingUpdates = true for (const entry of batchData.events) { const event = entry.event - if (event?.type === 'content' && typeof event.data === 'string') { - bufferedContent += event.data + if (event) { + await applySseEvent(event, replayContext, get, set) } - // Track highest event ID so we resume from there (not the old lastEventId) if (typeof entry.eventId === 'number' && entry.eventId > resumeFromEventId) { resumeFromEventId = entry.eventId } } + bufferedContent = replayContext.accumulatedContent.toString() + replayBlocks = replayContext.contentBlocks logger.info('[Copilot] Loaded buffered content instantly', { eventCount: batchData.events.length, contentLength: bufferedContent.length, @@ -2884,6 +3063,11 @@ export const useCopilotStore = create()( logger.warn('[Copilot] Failed to fetch buffered events', { error: String(e) }) } } + if (resumeFromEventId > nextStream.lastEventId) { + nextStream = { ...nextStream, lastEventId: resumeFromEventId } + set({ activeStream: nextStream }) + writeActiveStreamToStorage(nextStream) + } let nextMessages = messages let cleanedExisting = false @@ -2918,29 +3102,44 @@ export const useCopilotStore = create()( } if (!nextMessages.some((m) => m.id === nextStream.assistantMessageId)) { - // Create assistant message with buffered content pre-loaded const assistantMessage: CopilotMessage = { ...createStreamingMessage(), id: nextStream.assistantMessageId, content: bufferedContent, - contentBlocks: bufferedContent - ? [{ type: TEXT_BLOCK_TYPE, content: bufferedContent, timestamp: Date.now() }] - : [], + contentBlocks: + replayBlocks && replayBlocks.length > 0 + ? replayBlocks + : bufferedContent + ? [{ type: TEXT_BLOCK_TYPE, content: bufferedContent, timestamp: Date.now() }] + : [], } nextMessages = [...nextMessages, assistantMessage] - } else if (bufferedContent) { - // Update existing assistant message with buffered content - nextMessages = nextMessages.map((m) => - m.id === nextStream.assistantMessageId - ? { - ...m, - content: bufferedContent, - contentBlocks: [ - { type: TEXT_BLOCK_TYPE, content: bufferedContent, timestamp: Date.now() }, - ], - } - : m - ) + } else if (bufferedContent || (replayBlocks && replayBlocks.length > 0)) { + nextMessages = nextMessages.map((m) => { + if (m.id !== nextStream.assistantMessageId) return m + let nextBlocks = replayBlocks && replayBlocks.length > 0 ? replayBlocks : null + if (!nextBlocks) { + const existingBlocks = Array.isArray(m.contentBlocks) ? m.contentBlocks : [] + const existingText = extractTextFromBlocks(existingBlocks) + if (existingText && bufferedContent.startsWith(existingText)) { + const delta = bufferedContent.slice(existingText.length) + nextBlocks = delta + ? appendTextToBlocks(existingBlocks, delta) + : cloneContentBlocks(existingBlocks) + } else if (!existingText && existingBlocks.length === 0) { + nextBlocks = bufferedContent + ? [{ type: TEXT_BLOCK_TYPE, content: bufferedContent, timestamp: Date.now() }] + : [] + } else { + nextBlocks = replaceTextBlocks(existingBlocks, bufferedContent) + } + } + return { + ...m, + content: bufferedContent, + contentBlocks: nextBlocks || [], + } + }) } if (cleanedExisting || nextMessages !== messages || bufferedContent) { @@ -2986,7 +3185,8 @@ export const useCopilotStore = create()( result.stream, nextStream.assistantMessageId, true, - nextStream.userMessageId + nextStream.userMessageId, + abortController.signal ) return true } @@ -3015,9 +3215,7 @@ export const useCopilotStore = create()( set({ isAborting: true, suppressAbortContinueOption: suppressContinueOption }) try { abortController.abort() - // Flush pending streaming updates to preserve content before stopping flushStreamingUpdates(set) - // Re-read messages after flush to get the latest content const { messages: updatedMessages } = get() const lastMessage = updatedMessages[updatedMessages.length - 1] if (lastMessage && lastMessage.role === 'assistant') { @@ -3121,7 +3319,13 @@ export const useCopilotStore = create()( abortSignal: abortController.signal, }) if (result.success && result.stream) { - await get().handleStreamingResponse(result.stream, newAssistantMessage.id, false) + await get().handleStreamingResponse( + result.stream, + newAssistantMessage.id, + false, + undefined, + abortController.signal + ) } else { if (result.error === 'Request was aborted') return const errorMessage = createErrorMessage( @@ -3420,27 +3624,15 @@ export const useCopilotStore = create()( stream: ReadableStream, assistantMessageId: string, isContinuation = false, - triggerUserMessageId?: string + triggerUserMessageId?: string, + abortSignal?: AbortSignal ) => { const reader = stream.getReader() const decoder = new TextDecoder() const startTimeMs = Date.now() + const expectedStreamId = triggerUserMessageId - const context: StreamingContext = { - messageId: assistantMessageId, - accumulatedContent: new StringBuilder(), - contentBlocks: [], - currentTextBlock: null, - isInThinkingBlock: false, - currentThinkingBlock: null, - isInDesignWorkflowBlock: false, - designWorkflowContent: '', - pendingContent: '', - doneEventCount: 0, - subAgentContent: {}, - subAgentToolCalls: {}, - subAgentBlocks: {}, - } + const context = createStreamingContext(assistantMessageId) if (isContinuation) { context.suppressContinueOption = true } @@ -3455,27 +3647,25 @@ export const useCopilotStore = create()( contentBlocksCount: existingMessage?.contentBlocks?.length || 0, }) if (existingMessage) { - // Initialize with existing text content (should be buffered content we set earlier) - const existingContent = existingMessage.content || '' - if (existingContent) { - context.accumulatedContent.append(existingContent) - } - // Create fresh text block with existing content (don't reuse to avoid mutation issues) - if (existingContent) { + const existingBlocks = Array.isArray(existingMessage.contentBlocks) + ? existingMessage.contentBlocks + : [] + if (existingBlocks.length > 0) { + const existingText = extractTextFromBlocks(existingBlocks) + if (existingText) { + context.accumulatedContent.append(existingText) + } + const clonedBlocks = cloneContentBlocks(existingBlocks) + context.contentBlocks = clonedBlocks + context.currentTextBlock = findLastTextBlock(clonedBlocks) + } else if (existingMessage.content) { const textBlock = contentBlockPool.get() textBlock.type = TEXT_BLOCK_TYPE - textBlock.content = existingContent + textBlock.content = existingMessage.content textBlock.timestamp = Date.now() context.contentBlocks = [textBlock] context.currentTextBlock = textBlock - } - // Copy over any non-text blocks (tool calls, thinking, etc) from existing message - if (existingMessage.contentBlocks) { - for (const block of existingMessage.contentBlocks) { - if (block.type !== TEXT_BLOCK_TYPE) { - context.contentBlocks.push({ ...block }) - } - } + context.accumulatedContent.append(existingMessage.content) } } } @@ -3487,24 +3677,30 @@ export const useCopilotStore = create()( try { for await (const data of parseSSEStream(reader, decoder)) { - const { abortController } = get() - if (abortController?.signal.aborted) { + if (abortSignal?.aborted) { context.wasAborted = true const { suppressAbortContinueOption } = get() - // Suppress continue option if explicitly requested OR if page is unloading (refresh/close) context.suppressContinueOption = suppressAbortContinueOption === true || isPageUnloading if (suppressAbortContinueOption) { set({ suppressAbortContinueOption: false }) } context.pendingContent = '' finalizeThinkingBlock(context) - stopStreamingUpdates() + flushStreamingUpdates(set) reader.cancel() break } const eventId = typeof data?.eventId === 'number' ? data.eventId : undefined const streamId = typeof data?.streamId === 'string' ? data.streamId : undefined + if (expectedStreamId && streamId && streamId !== expectedStreamId) { + logger.warn('[SSE] Ignoring event for mismatched stream', { + expectedStreamId, + streamId, + type: data.type, + }) + continue + } if (eventId && streamId) { updateActiveStreamEventId(get, set, streamId, eventId) } @@ -3520,91 +3716,8 @@ export const useCopilotStore = create()( : JSON.stringify(data.data)?.substring(0, 100), }) - // Handle subagent_start to track parent tool call - if (data.type === 'subagent_start') { - const toolCallId = data.data?.tool_call_id - if (toolCallId) { - context.subAgentParentToolCallId = toolCallId - // Mark the parent tool call as streaming - const { toolCallsById } = get() - const parentToolCall = toolCallsById[toolCallId] - if (parentToolCall) { - const updatedToolCall: CopilotToolCall = { - ...parentToolCall, - subAgentStreaming: true, - } - const updatedMap = { ...toolCallsById, [toolCallId]: updatedToolCall } - set({ toolCallsById: updatedMap }) - } - logger.info('[SSE] Subagent session started', { - subagent: data.subagent, - parentToolCallId: toolCallId, - }) - } - continue - } - - // Handle subagent_end to finalize subagent content - if (data.type === 'subagent_end') { - const parentToolCallId = context.subAgentParentToolCallId - if (parentToolCallId) { - // Mark subagent streaming as complete - const { toolCallsById } = get() - const parentToolCall = toolCallsById[parentToolCallId] - if (parentToolCall) { - const updatedToolCall: CopilotToolCall = { - ...parentToolCall, - subAgentContent: context.subAgentContent[parentToolCallId] || '', - subAgentToolCalls: context.subAgentToolCalls[parentToolCallId] || [], - subAgentBlocks: context.subAgentBlocks[parentToolCallId] || [], - subAgentStreaming: false, // Done streaming - } - const updatedMap = { ...toolCallsById, [parentToolCallId]: updatedToolCall } - set({ toolCallsById: updatedMap }) - logger.info('[SSE] Subagent session ended', { - subagent: data.subagent, - parentToolCallId, - contentLength: context.subAgentContent[parentToolCallId]?.length || 0, - toolCallCount: context.subAgentToolCalls[parentToolCallId]?.length || 0, - }) - } - } - context.subAgentParentToolCallId = undefined - continue - } - - // Check if this is a subagent event (has subagent field) - if (data.subagent) { - const parentToolCallId = context.subAgentParentToolCallId - if (!parentToolCallId) { - logger.warn('[SSE] Subagent event without parent tool call ID', { - type: data.type, - subagent: data.subagent, - }) - continue - } - - logger.info('[SSE] Processing subagent event', { - type: data.type, - subagent: data.subagent, - parentToolCallId, - hasHandler: !!subAgentSSEHandlers[data.type], - }) - - const subAgentHandler = subAgentSSEHandlers[data.type] - if (subAgentHandler) { - await subAgentHandler(data, context, get, set) - } else { - logger.warn('[SSE] No handler for subagent event type', { type: data.type }) - } - // Skip regular handlers for subagent events - if (context.streamComplete) break - continue - } - - const handler = sseHandlers[data.type] || sseHandlers.default - await handler(data, context, get, set) - if (context.streamComplete) break + const shouldContinue = await applySseEvent(data, context, get, set) + if (!shouldContinue) break } if (!context.wasAborted && sseHandlers.stream_end) { diff --git a/apps/sim/stores/panel/copilot/types.ts b/apps/sim/stores/panel/copilot/types.ts index 706ff29480..9357ebd6bd 100644 --- a/apps/sim/stores/panel/copilot/types.ts +++ b/apps/sim/stores/panel/copilot/types.ts @@ -246,7 +246,8 @@ export interface CopilotActions { stream: ReadableStream, messageId: string, isContinuation?: boolean, - triggerUserMessageId?: string + triggerUserMessageId?: string, + abortSignal?: AbortSignal ) => Promise handleNewChatCreation: (newChatId: string) => Promise loadAutoAllowedTools: () => Promise From b034b1c939e5c693a9e520e05b85825e54d9143e Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Tue, 3 Feb 2026 19:19:26 -0800 Subject: [PATCH 15/72] Streaming seems to work but copilot is dumb --- apps/sim/stores/panel/copilot/store.ts | 18 ++++++++++++++++-- apps/sim/stores/workflow-diff/store.ts | 8 ++++++++ apps/sim/stores/workflow-diff/utils.ts | 18 +++++++++++++++++- 3 files changed, 41 insertions(+), 3 deletions(-) diff --git a/apps/sim/stores/panel/copilot/store.ts b/apps/sim/stores/panel/copilot/store.ts index 39a933dd10..9d7bdb6dc2 100644 --- a/apps/sim/stores/panel/copilot/store.ts +++ b/apps/sim/stores/panel/copilot/store.ts @@ -1303,11 +1303,25 @@ const sseHandlers: Record = { const resultPayload = data?.result || data?.data?.result || data?.data?.data || data?.data || {} const workflowState = resultPayload?.workflowState + logger.info('[SSE] edit_workflow result received', { + hasWorkflowState: !!workflowState, + blockCount: workflowState ? Object.keys(workflowState.blocks || {}).length : 0, + edgeCount: workflowState?.edges?.length ?? 0, + }) if (workflowState) { const diffStore = useWorkflowDiffStore.getState() - void diffStore.setProposedChanges(workflowState) + // Await the diff application to catch any errors + diffStore.setProposedChanges(workflowState).catch((err) => { + logger.error('[SSE] Failed to apply edit_workflow diff', { + error: err instanceof Error ? err.message : String(err), + }) + }) } - } catch {} + } catch (err) { + logger.error('[SSE] edit_workflow result handling failed', { + error: err instanceof Error ? err.message : String(err), + }) + } } } diff --git a/apps/sim/stores/workflow-diff/store.ts b/apps/sim/stores/workflow-diff/store.ts index 285be7e110..abd57d0aec 100644 --- a/apps/sim/stores/workflow-diff/store.ts +++ b/apps/sim/stores/workflow-diff/store.ts @@ -121,6 +121,13 @@ export const useWorkflowDiffStore = create `${e.source} -> ${e.target}`), + }) const workflowStore = useWorkflowStore.getState() - workflowStore.replaceWorkflowState(cloneWorkflowState(workflowState), options) + const cloned = cloneWorkflowState(workflowState) + logger.info('[applyWorkflowStateToStores] Cloned state edges', { + clonedEdgeCount: cloned.edges?.length ?? 0, + }) + workflowStore.replaceWorkflowState(cloned, options) const subBlockValues = extractSubBlockValues(workflowState) useSubBlockStore.getState().setWorkflowValues(workflowId, subBlockValues) + + // Verify what's in the store after apply + const afterState = workflowStore.getWorkflowState() + logger.info('[applyWorkflowStateToStores] After apply', { + afterEdgeCount: afterState.edges?.length ?? 0, + }) } export function captureBaselineSnapshot(workflowId: string): WorkflowState { From 8c48cdd068b0083e74635ff61e2d4b03fb545bdc Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Wed, 4 Feb 2026 11:16:19 -0800 Subject: [PATCH 16/72] Fix edge issue --- apps/sim/lib/copilot/orchestrator/index.ts | 45 ++++- .../lib/copilot/orchestrator/sse-handlers.ts | 172 ++++++++++++++---- apps/sim/lib/copilot/orchestrator/subagent.ts | 50 ++++- apps/sim/lib/copilot/orchestrator/types.ts | 2 + 4 files changed, 227 insertions(+), 42 deletions(-) diff --git a/apps/sim/lib/copilot/orchestrator/index.ts b/apps/sim/lib/copilot/orchestrator/index.ts index 24746c24d9..e3a0b35fc5 100644 --- a/apps/sim/lib/copilot/orchestrator/index.ts +++ b/apps/sim/lib/copilot/orchestrator/index.ts @@ -3,9 +3,14 @@ import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/copilot/constants' import { env } from '@/lib/core/config/env' import { parseSSEStream } from '@/lib/copilot/orchestrator/sse-parser' import { + getToolCallIdFromEvent, handleSubagentRouting, + markToolCallSeen, + markToolResultSeen, sseHandlers, subAgentHandlers, + wasToolCallSeen, + wasToolResultSeen, } from '@/lib/copilot/orchestrator/sse-handlers' import { prepareExecutionContext } from '@/lib/copilot/orchestrator/tool-executor' import type { @@ -90,7 +95,45 @@ export async function orchestrateCopilotStream( break } - await forwardEvent(event, options) + // Skip tool_result events for tools the sim-side already executed. + // The sim-side emits its own tool_result with complete data. + // For server-side tools (not executed by sim), we still forward the Go backend's tool_result. + const toolCallId = getToolCallIdFromEvent(event) + const eventData = + typeof event.data === 'string' + ? (() => { + try { + return JSON.parse(event.data) + } catch { + return undefined + } + })() + : event.data + + const isPartialToolCall = event.type === 'tool_call' && eventData?.partial === true + + const shouldSkipToolCall = + event.type === 'tool_call' && + !!toolCallId && + !isPartialToolCall && + (wasToolResultSeen(toolCallId) || wasToolCallSeen(toolCallId)) + + if (event.type === 'tool_call' && toolCallId && !isPartialToolCall && !shouldSkipToolCall) { + markToolCallSeen(toolCallId) + } + + const shouldSkipToolResult = + event.type === 'tool_result' && + (() => { + if (!toolCallId) return false + if (wasToolResultSeen(toolCallId)) return true + markToolResultSeen(toolCallId) + return false + })() + + if (!shouldSkipToolCall && !shouldSkipToolResult) { + await forwardEvent(event, options) + } if (event.type === 'subagent_start') { const toolCallId = event.data?.tool_call_id diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers.ts index c738674be6..0198599c96 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-handlers.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers.ts @@ -13,6 +13,64 @@ import { INTERRUPT_TOOL_SET, SUBAGENT_TOOL_SET } from '@/lib/copilot/orchestrato const logger = createLogger('CopilotSseHandlers') +/** + * Tracks tool call IDs for which a tool_call has already been forwarded/emitted (non-partial). + */ +const seenToolCalls = new Set() + +/** + * Tracks tool call IDs for which a tool_result has already been emitted or forwarded. + */ +const seenToolResults = new Set() + +export function markToolCallSeen(toolCallId: string): void { + seenToolCalls.add(toolCallId) + setTimeout(() => { + seenToolCalls.delete(toolCallId) + }, 5 * 60 * 1000) +} + +export function wasToolCallSeen(toolCallId: string): boolean { + return seenToolCalls.has(toolCallId) +} + +type EventDataObject = Record | undefined + +const parseEventData = (data: unknown): EventDataObject => { + if (!data) return undefined + if (typeof data !== 'string') return data as EventDataObject + try { + return JSON.parse(data) as EventDataObject + } catch { + return undefined + } +} + +const getEventData = (event: SSEEvent): EventDataObject => parseEventData(event.data) + +export function getToolCallIdFromEvent(event: SSEEvent): string | undefined { + const data = getEventData(event) + return event.toolCallId || data?.id || data?.toolCallId +} + +/** + * Mark a tool call as executed by the sim-side. + * This prevents the Go backend's duplicate tool_result from being forwarded. + */ +export function markToolResultSeen(toolCallId: string): void { + seenToolResults.add(toolCallId) + setTimeout(() => { + seenToolResults.delete(toolCallId) + }, 5 * 60 * 1000) +} + +/** + * Check if a tool call was executed by the sim-side. + */ +export function wasToolResultSeen(toolCallId: string): boolean { + return seenToolResults.has(toolCallId) +} + /** * Respond tools are internal to the copilot's subagent system. * They're used by subagents to signal completion and should NOT be executed by the sim side. @@ -56,6 +114,7 @@ async function executeToolAndReport( if (!toolCall) return if (toolCall.status === 'executing') return + if (wasToolResultSeen(toolCall.id)) return toolCall.status = 'executing' try { @@ -79,6 +138,8 @@ async function executeToolAndReport( } } + markToolResultSeen(toolCall.id) + await markToolComplete( toolCall.id, toolCall.name, @@ -90,8 +151,11 @@ async function executeToolAndReport( await options?.onEvent?.({ type: 'tool_result', toolCallId: toolCall.id, + toolName: toolCall.name, + success: result.success, + result: result.output, data: { - id: toolCall.id, + id: toolCall.id, name: toolCall.name, success: result.success, result: result.output, @@ -102,6 +166,8 @@ async function executeToolAndReport( toolCall.error = error instanceof Error ? error.message : String(error) toolCall.endTime = Date.now() + markToolResultSeen(toolCall.id) + await markToolComplete(toolCall.id, toolCall.name, 500, toolCall.error) await options?.onEvent?.({ @@ -137,16 +203,17 @@ export const sseHandlers: Record = { }, title_updated: () => {}, tool_result: (event, context) => { - const toolCallId = event.toolCallId || event.data?.id + const data = getEventData(event) + const toolCallId = event.toolCallId || data?.id if (!toolCallId) return const current = context.toolCalls.get(toolCallId) if (!current) return // Determine success: explicit success field, or if there's result data without explicit failure - const hasExplicitSuccess = event.data?.success !== undefined || event.data?.result?.success !== undefined - const explicitSuccess = event.data?.success ?? event.data?.result?.success - const hasResultData = event.data?.result !== undefined || event.data?.data !== undefined - const hasError = !!event.data?.error || !!event.data?.result?.error + const hasExplicitSuccess = data?.success !== undefined || data?.result?.success !== undefined + const explicitSuccess = data?.success ?? data?.result?.success + const hasResultData = data?.result !== undefined || data?.data !== undefined + const hasError = !!data?.error || !!data?.result?.error // If explicitly set, use that; otherwise infer from data presence const success = hasExplicitSuccess ? !!explicitSuccess : (hasResultData && !hasError) @@ -156,25 +223,27 @@ export const sseHandlers: Record = { if (hasResultData) { current.result = { success, - output: event.data?.result || event.data?.data, + output: data?.result || data?.data, } } if (hasError) { - current.error = event.data?.error || event.data?.result?.error + current.error = data?.error || data?.result?.error } }, tool_error: (event, context) => { - const toolCallId = event.toolCallId || event.data?.id + const data = getEventData(event) + const toolCallId = event.toolCallId || data?.id if (!toolCallId) return const current = context.toolCalls.get(toolCallId) if (!current) return current.status = 'error' - current.error = event.data?.error || 'Tool execution failed' + current.error = data?.error || 'Tool execution failed' current.endTime = Date.now() }, tool_generating: (event, context) => { - const toolCallId = event.toolCallId || event.data?.toolCallId || event.data?.id - const toolName = event.toolName || event.data?.toolName || event.data?.name + const data = getEventData(event) + const toolCallId = event.toolCallId || data?.toolCallId || data?.id + const toolName = event.toolName || data?.toolName || data?.name if (!toolCallId || !toolName) return if (!context.toolCalls.has(toolCallId)) { context.toolCalls.set(toolCallId, { @@ -186,7 +255,7 @@ export const sseHandlers: Record = { } }, tool_call: async (event, context, execContext, options) => { - const toolData = event.data || {} + const toolData = getEventData(event) || {} const toolCallId = toolData.id || event.toolCallId const toolName = toolData.name || event.toolName if (!toolCallId || !toolName) return @@ -194,20 +263,35 @@ export const sseHandlers: Record = { const args = toolData.arguments || toolData.input || event.data?.input const isPartial = toolData.partial === true const existing = context.toolCalls.get(toolCallId) - const toolCall: ToolCallState = existing - ? { ...existing, status: 'pending', params: args || existing.params } - : { - id: toolCallId, - name: toolName, - status: 'pending', - params: args, - startTime: Date.now(), - } - - context.toolCalls.set(toolCallId, toolCall) - addContentBlock(context, { type: 'tool_call', toolCall }) + + // If we've already completed this tool call, ignore late/duplicate tool_call events + // to avoid resetting UI/state back to pending and re-executing. + if (existing?.endTime || (existing && existing.status !== 'pending' && existing.status !== 'executing')) { + if (!existing.params && args) { + existing.params = args + } + return + } + + if (existing) { + if (args && !existing.params) existing.params = args + } else { + context.toolCalls.set(toolCallId, { + id: toolCallId, + name: toolName, + status: 'pending', + params: args, + startTime: Date.now(), + }) + const created = context.toolCalls.get(toolCallId)! + addContentBlock(context, { type: 'tool_call', toolCall: created }) + } if (isPartial) return + if (wasToolResultSeen(toolCallId)) return + + const toolCall = context.toolCalls.get(toolCallId) + if (!toolCall) return // Subagent tools are executed by the copilot backend, not sim side if (SUBAGENT_TOOL_SET.has(toolName)) { @@ -243,6 +327,7 @@ export const sseHandlers: Record = { decision.message || 'Tool execution rejected', { skipped: true, reason: 'user_rejected' } ) + markToolResultSeen(toolCall.id) await options.onEvent?.({ type: 'tool_result', toolCallId: toolCall.id, @@ -266,6 +351,7 @@ export const sseHandlers: Record = { decision.message || 'Tool execution moved to background', { background: true } ) + markToolResultSeen(toolCall.id) await options.onEvent?.({ type: 'tool_result', toolCallId: toolCall.id, @@ -346,13 +432,19 @@ export const subAgentHandlers: Record = { tool_call: async (event, context, execContext, options) => { const parentToolCallId = context.subAgentParentToolCallId if (!parentToolCallId) return - const toolData = event.data || {} + const toolData = getEventData(event) || {} const toolCallId = toolData.id || event.toolCallId const toolName = toolData.name || event.toolName if (!toolCallId || !toolName) return const isPartial = toolData.partial === true const args = toolData.arguments || toolData.input || event.data?.input + const existing = context.toolCalls.get(toolCallId) + // Ignore late/duplicate tool_call events once we already have a result + if (wasToolResultSeen(toolCallId) || existing?.endTime) { + return + } + const toolCall: ToolCallState = { id: toolCallId, name: toolName, @@ -361,12 +453,16 @@ export const subAgentHandlers: Record = { startTime: Date.now(), } - // Store in both places - subAgentToolCalls for tracking and toolCalls for executeToolAndReport + // Store in both places - but do NOT overwrite existing tool call state for the same id if (!context.subAgentToolCalls[parentToolCallId]) { context.subAgentToolCalls[parentToolCallId] = [] } - context.subAgentToolCalls[parentToolCallId].push(toolCall) - context.toolCalls.set(toolCallId, toolCall) + if (!context.subAgentToolCalls[parentToolCallId].some((tc) => tc.id === toolCallId)) { + context.subAgentToolCalls[parentToolCallId].push(toolCall) + } + if (!context.toolCalls.has(toolCallId)) { + context.toolCalls.set(toolCallId, toolCall) + } if (isPartial) return @@ -385,7 +481,8 @@ export const subAgentHandlers: Record = { tool_result: (event, context) => { const parentToolCallId = context.subAgentParentToolCallId if (!parentToolCallId) return - const toolCallId = event.toolCallId || event.data?.id + const data = getEventData(event) + const toolCallId = event.toolCallId || data?.id if (!toolCallId) return // Update in subAgentToolCalls @@ -396,31 +493,30 @@ export const subAgentHandlers: Record = { const mainToolCall = context.toolCalls.get(toolCallId) // Use same success inference logic as main handler - const hasExplicitSuccess = - event.data?.success !== undefined || event.data?.result?.success !== undefined - const explicitSuccess = event.data?.success ?? event.data?.result?.success - const hasResultData = event.data?.result !== undefined || event.data?.data !== undefined - const hasError = !!event.data?.error || !!event.data?.result?.error + const hasExplicitSuccess = data?.success !== undefined || data?.result?.success !== undefined + const explicitSuccess = data?.success ?? data?.result?.success + const hasResultData = data?.result !== undefined || data?.data !== undefined + const hasError = !!data?.error || !!data?.result?.error const success = hasExplicitSuccess ? !!explicitSuccess : hasResultData && !hasError const status = success ? 'success' : 'error' const endTime = Date.now() const result = hasResultData - ? { success, output: event.data?.result || event.data?.data } + ? { success, output: data?.result || data?.data } : undefined if (subAgentToolCall) { subAgentToolCall.status = status subAgentToolCall.endTime = endTime if (result) subAgentToolCall.result = result - if (hasError) subAgentToolCall.error = event.data?.error || event.data?.result?.error + if (hasError) subAgentToolCall.error = data?.error || data?.result?.error } if (mainToolCall) { mainToolCall.status = status mainToolCall.endTime = endTime if (result) mainToolCall.result = result - if (hasError) mainToolCall.error = event.data?.error || event.data?.result?.error + if (hasError) mainToolCall.error = data?.error || data?.result?.error } }, } diff --git a/apps/sim/lib/copilot/orchestrator/subagent.ts b/apps/sim/lib/copilot/orchestrator/subagent.ts index bdc69fd68b..3d3795f7bd 100644 --- a/apps/sim/lib/copilot/orchestrator/subagent.ts +++ b/apps/sim/lib/copilot/orchestrator/subagent.ts @@ -2,9 +2,14 @@ import { createLogger } from '@sim/logger' import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/copilot/constants' import { parseSSEStream } from '@/lib/copilot/orchestrator/sse-parser' import { + getToolCallIdFromEvent, + handleSubagentRouting, + markToolCallSeen, + markToolResultSeen, sseHandlers, subAgentHandlers, - handleSubagentRouting, + wasToolCallSeen, + wasToolResultSeen, } from '@/lib/copilot/orchestrator/sse-handlers' import { prepareExecutionContext } from '@/lib/copilot/orchestrator/tool-executor' import type { @@ -20,10 +25,11 @@ import { getEffectiveDecryptedEnv } from '@/lib/environment/utils' const logger = createLogger('CopilotSubagentOrchestrator') const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT -export interface SubagentOrchestratorOptions extends OrchestratorOptions { +export interface SubagentOrchestratorOptions extends Omit { userId: string workflowId?: string workspaceId?: string + onComplete?: (result: SubagentOrchestratorResult) => void | Promise } export interface SubagentOrchestratorResult { @@ -106,7 +112,45 @@ export async function orchestrateSubagentStream( break } - await forwardEvent(event, options) + // Skip tool_result events for tools the sim-side already executed. + // The sim-side emits its own tool_result with complete data. + // For server-side tools (not executed by sim), we still forward the Go backend's tool_result. + const toolCallId = getToolCallIdFromEvent(event) + const eventData = + typeof event.data === 'string' + ? (() => { + try { + return JSON.parse(event.data) + } catch { + return undefined + } + })() + : event.data + + const isPartialToolCall = event.type === 'tool_call' && eventData?.partial === true + + const shouldSkipToolCall = + event.type === 'tool_call' && + !!toolCallId && + !isPartialToolCall && + (wasToolResultSeen(toolCallId) || wasToolCallSeen(toolCallId)) + + if (event.type === 'tool_call' && toolCallId && !isPartialToolCall && !shouldSkipToolCall) { + markToolCallSeen(toolCallId) + } + + const shouldSkipToolResult = + event.type === 'tool_result' && + (() => { + if (!toolCallId) return false + if (wasToolResultSeen(toolCallId)) return true + markToolResultSeen(toolCallId) + return false + })() + + if (!shouldSkipToolCall && !shouldSkipToolResult) { + await forwardEvent(event, options) + } if (event.type === 'structured_result' || event.type === 'subagent_result') { structuredResult = normalizeStructuredResult(event.data) diff --git a/apps/sim/lib/copilot/orchestrator/types.ts b/apps/sim/lib/copilot/orchestrator/types.ts index 12cdee9da2..52966d0b39 100644 --- a/apps/sim/lib/copilot/orchestrator/types.ts +++ b/apps/sim/lib/copilot/orchestrator/types.ts @@ -23,6 +23,8 @@ export interface SSEEvent { subagent?: string toolCallId?: string toolName?: string + success?: boolean + result?: any } export type ToolCallStatus = 'pending' | 'executing' | 'success' | 'error' | 'skipped' | 'rejected' From d8daf3a24879919a96d4ecd1917c96905e480334 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Wed, 4 Feb 2026 11:27:45 -0800 Subject: [PATCH 17/72] LUAAAA --- apps/sim/lib/copilot/orchestrator/index.ts | 49 ++++++++-------- .../lib/copilot/orchestrator/sse-handlers.ts | 44 ++++++++++++++- .../lib/copilot/orchestrator/stream-buffer.ts | 48 ++++++++++------ apps/sim/lib/copilot/orchestrator/subagent.ts | 56 +++++++++---------- 4 files changed, 126 insertions(+), 71 deletions(-) diff --git a/apps/sim/lib/copilot/orchestrator/index.ts b/apps/sim/lib/copilot/orchestrator/index.ts index e3a0b35fc5..f2286de75b 100644 --- a/apps/sim/lib/copilot/orchestrator/index.ts +++ b/apps/sim/lib/copilot/orchestrator/index.ts @@ -7,6 +7,7 @@ import { handleSubagentRouting, markToolCallSeen, markToolResultSeen, + normalizeSseEvent, sseHandlers, subAgentHandlers, wasToolCallSeen, @@ -95,35 +96,33 @@ export async function orchestrateCopilotStream( break } + const normalizedEvent = normalizeSseEvent(event) + // Skip tool_result events for tools the sim-side already executed. // The sim-side emits its own tool_result with complete data. // For server-side tools (not executed by sim), we still forward the Go backend's tool_result. - const toolCallId = getToolCallIdFromEvent(event) - const eventData = - typeof event.data === 'string' - ? (() => { - try { - return JSON.parse(event.data) - } catch { - return undefined - } - })() - : event.data - - const isPartialToolCall = event.type === 'tool_call' && eventData?.partial === true + const toolCallId = getToolCallIdFromEvent(normalizedEvent) + const eventData = normalizedEvent.data + + const isPartialToolCall = normalizedEvent.type === 'tool_call' && eventData?.partial === true const shouldSkipToolCall = - event.type === 'tool_call' && + normalizedEvent.type === 'tool_call' && !!toolCallId && !isPartialToolCall && (wasToolResultSeen(toolCallId) || wasToolCallSeen(toolCallId)) - if (event.type === 'tool_call' && toolCallId && !isPartialToolCall && !shouldSkipToolCall) { + if ( + normalizedEvent.type === 'tool_call' && + toolCallId && + !isPartialToolCall && + !shouldSkipToolCall + ) { markToolCallSeen(toolCallId) } const shouldSkipToolResult = - event.type === 'tool_result' && + normalizedEvent.type === 'tool_result' && (() => { if (!toolCallId) return false if (wasToolResultSeen(toolCallId)) return true @@ -132,11 +131,11 @@ export async function orchestrateCopilotStream( })() if (!shouldSkipToolCall && !shouldSkipToolResult) { - await forwardEvent(event, options) + await forwardEvent(normalizedEvent, options) } - if (event.type === 'subagent_start') { - const toolCallId = event.data?.tool_call_id + if (normalizedEvent.type === 'subagent_start') { + const toolCallId = normalizedEvent.data?.tool_call_id if (toolCallId) { context.subAgentParentToolCallId = toolCallId context.subAgentContent[toolCallId] = '' @@ -145,23 +144,23 @@ export async function orchestrateCopilotStream( continue } - if (event.type === 'subagent_end') { + if (normalizedEvent.type === 'subagent_end') { context.subAgentParentToolCallId = undefined continue } - if (handleSubagentRouting(event, context)) { - const handler = subAgentHandlers[event.type] + if (handleSubagentRouting(normalizedEvent, context)) { + const handler = subAgentHandlers[normalizedEvent.type] if (handler) { - await handler(event, context, execContext, options) + await handler(normalizedEvent, context, execContext, options) } if (context.streamComplete) break continue } - const handler = sseHandlers[event.type] + const handler = sseHandlers[normalizedEvent.type] if (handler) { - await handler(event, context, execContext, options) + await handler(normalizedEvent, context, execContext, options) } if (context.streamComplete) break } diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers.ts index 0198599c96..4ad1673459 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-handlers.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers.ts @@ -38,7 +38,9 @@ type EventDataObject = Record | undefined const parseEventData = (data: unknown): EventDataObject => { if (!data) return undefined - if (typeof data !== 'string') return data as EventDataObject + if (typeof data !== 'string') { + return data as EventDataObject + } try { return JSON.parse(data) as EventDataObject } catch { @@ -46,13 +48,51 @@ const parseEventData = (data: unknown): EventDataObject => { } } -const getEventData = (event: SSEEvent): EventDataObject => parseEventData(event.data) +const hasToolFields = (data: EventDataObject): boolean => { + if (!data) return false + return ( + data.id !== undefined || + data.toolCallId !== undefined || + data.name !== undefined || + data.success !== undefined || + data.result !== undefined || + data.arguments !== undefined + ) +} + +const getEventData = (event: SSEEvent): EventDataObject => { + const topLevel = parseEventData(event.data) + if (!topLevel) return undefined + if (hasToolFields(topLevel)) return topLevel + const nested = parseEventData(topLevel.data) + return nested || topLevel +} export function getToolCallIdFromEvent(event: SSEEvent): string | undefined { const data = getEventData(event) return event.toolCallId || data?.id || data?.toolCallId } +/** Normalizes SSE events so tool metadata is available at the top level. */ +export function normalizeSseEvent(event: SSEEvent): SSEEvent { + if (!event) return event + const data = getEventData(event) + if (!data) return event + const toolCallId = event.toolCallId || data.id || data.toolCallId + const toolName = event.toolName || data.name || data.toolName + const success = event.success ?? data.success + const result = event.result ?? data.result + const normalizedData = typeof event.data === 'string' ? data : event.data + return { + ...event, + data: normalizedData, + toolCallId, + toolName, + success, + result, + } +} + /** * Mark a tool call as executed by the sim-side. * This prevents the Go backend's duplicate tool_result from being forwarded. diff --git a/apps/sim/lib/copilot/orchestrator/stream-buffer.ts b/apps/sim/lib/copilot/orchestrator/stream-buffer.ts index 11f6518705..b1b37bb552 100644 --- a/apps/sim/lib/copilot/orchestrator/stream-buffer.ts +++ b/apps/sim/lib/copilot/orchestrator/stream-buffer.ts @@ -6,6 +6,25 @@ const logger = createLogger('CopilotStreamBuffer') const STREAM_TTL_SECONDS = 60 * 60 const STREAM_EVENT_LIMIT = 5000 +const APPEND_STREAM_EVENT_LUA = ` +local seqKey = KEYS[1] +local eventsKey = KEYS[2] +local ttl = tonumber(ARGV[1]) +local limit = tonumber(ARGV[2]) +local streamId = ARGV[3] +local eventJson = ARGV[4] + +local id = redis.call('INCR', seqKey) +local entry = '{"eventId":' .. id .. ',"streamId":' .. cjson.encode(streamId) .. ',"event":' .. eventJson .. '}' +redis.call('ZADD', eventsKey, id, entry) +redis.call('EXPIRE', eventsKey, ttl) +redis.call('EXPIRE', seqKey, ttl) +if limit > 0 then + redis.call('ZREMRANGEBYRANK', eventsKey, 0, -limit-1) +end +return id +` + function getStreamKeyPrefix(streamId: string) { return `copilot_stream:${streamId}` } @@ -99,22 +118,19 @@ export async function appendStreamEvent( } try { - const nextId = await redis.incr(getSeqKey(streamId)) - const entry: StreamEventEntry = { eventId: nextId, streamId, event } - await redis.zadd(getEventsKey(streamId), nextId, JSON.stringify(entry)) - - const count = await redis.zcard(getEventsKey(streamId)) - if (count > STREAM_EVENT_LIMIT) { - const trimCount = count - STREAM_EVENT_LIMIT - if (trimCount > 0) { - await redis.zremrangebyrank(getEventsKey(streamId), 0, trimCount - 1) - } - } - - await redis.expire(getEventsKey(streamId), STREAM_TTL_SECONDS) - await redis.expire(getSeqKey(streamId), STREAM_TTL_SECONDS) - - return entry + const eventJson = JSON.stringify(event) + const nextId = await redis.eval( + APPEND_STREAM_EVENT_LUA, + 2, + getSeqKey(streamId), + getEventsKey(streamId), + STREAM_TTL_SECONDS, + STREAM_EVENT_LIMIT, + streamId, + eventJson + ) + const eventId = typeof nextId === 'number' ? nextId : Number(nextId) + return { eventId, streamId, event } } catch (error) { logger.warn('Failed to append stream event', { streamId, diff --git a/apps/sim/lib/copilot/orchestrator/subagent.ts b/apps/sim/lib/copilot/orchestrator/subagent.ts index 3d3795f7bd..1d649eb3c9 100644 --- a/apps/sim/lib/copilot/orchestrator/subagent.ts +++ b/apps/sim/lib/copilot/orchestrator/subagent.ts @@ -6,6 +6,7 @@ import { handleSubagentRouting, markToolCallSeen, markToolResultSeen, + normalizeSseEvent, sseHandlers, subAgentHandlers, wasToolCallSeen, @@ -112,35 +113,33 @@ export async function orchestrateSubagentStream( break } + const normalizedEvent = normalizeSseEvent(event) + // Skip tool_result events for tools the sim-side already executed. // The sim-side emits its own tool_result with complete data. // For server-side tools (not executed by sim), we still forward the Go backend's tool_result. - const toolCallId = getToolCallIdFromEvent(event) - const eventData = - typeof event.data === 'string' - ? (() => { - try { - return JSON.parse(event.data) - } catch { - return undefined - } - })() - : event.data - - const isPartialToolCall = event.type === 'tool_call' && eventData?.partial === true + const toolCallId = getToolCallIdFromEvent(normalizedEvent) + const eventData = normalizedEvent.data + + const isPartialToolCall = normalizedEvent.type === 'tool_call' && eventData?.partial === true const shouldSkipToolCall = - event.type === 'tool_call' && + normalizedEvent.type === 'tool_call' && !!toolCallId && !isPartialToolCall && (wasToolResultSeen(toolCallId) || wasToolCallSeen(toolCallId)) - if (event.type === 'tool_call' && toolCallId && !isPartialToolCall && !shouldSkipToolCall) { + if ( + normalizedEvent.type === 'tool_call' && + toolCallId && + !isPartialToolCall && + !shouldSkipToolCall + ) { markToolCallSeen(toolCallId) } const shouldSkipToolResult = - event.type === 'tool_result' && + normalizedEvent.type === 'tool_result' && (() => { if (!toolCallId) return false if (wasToolResultSeen(toolCallId)) return true @@ -149,18 +148,18 @@ export async function orchestrateSubagentStream( })() if (!shouldSkipToolCall && !shouldSkipToolResult) { - await forwardEvent(event, options) + await forwardEvent(normalizedEvent, options) } - if (event.type === 'structured_result' || event.type === 'subagent_result') { - structuredResult = normalizeStructuredResult(event.data) + if (normalizedEvent.type === 'structured_result' || normalizedEvent.type === 'subagent_result') { + structuredResult = normalizeStructuredResult(normalizedEvent.data) context.streamComplete = true continue } // Handle subagent_start/subagent_end events to track nested subagent calls - if (event.type === 'subagent_start') { - const toolCallId = event.data?.tool_call_id + if (normalizedEvent.type === 'subagent_start') { + const toolCallId = normalizedEvent.data?.tool_call_id if (toolCallId) { context.subAgentParentToolCallId = toolCallId context.subAgentContent[toolCallId] = '' @@ -169,7 +168,7 @@ export async function orchestrateSubagentStream( continue } - if (event.type === 'subagent_end') { + if (normalizedEvent.type === 'subagent_end') { context.subAgentParentToolCallId = undefined continue } @@ -177,22 +176,23 @@ export async function orchestrateSubagentStream( // For direct subagent calls, events may have the subagent field set (e.g., subagent: "discovery") // but no subagent_start event because this IS the top-level agent. Skip subagent routing // for events where the subagent field matches the current agentId - these are top-level events. - const isTopLevelSubagentEvent = event.subagent === agentId && !context.subAgentParentToolCallId + const isTopLevelSubagentEvent = + normalizedEvent.subagent === agentId && !context.subAgentParentToolCallId // Only route to subagent handlers for nested subagent events (not matching current agentId) - if (!isTopLevelSubagentEvent && handleSubagentRouting(event, context)) { - const handler = subAgentHandlers[event.type] + if (!isTopLevelSubagentEvent && handleSubagentRouting(normalizedEvent, context)) { + const handler = subAgentHandlers[normalizedEvent.type] if (handler) { - await handler(event, context, execContext, options) + await handler(normalizedEvent, context, execContext, options) } if (context.streamComplete) break continue } // Process as a regular SSE event (including top-level subagent events) - const handler = sseHandlers[event.type] + const handler = sseHandlers[normalizedEvent.type] if (handler) { - await handler(event, context, execContext, options) + await handler(normalizedEvent, context, execContext, options) } if (context.streamComplete) break } From cbd7bb660d07fc83958395c0586d8cef3a897e95 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Wed, 4 Feb 2026 12:33:53 -0800 Subject: [PATCH 18/72] Fix stream buffer --- apps/sim/app/api/copilot/chat/route.ts | 36 ++++++- .../lib/copilot/orchestrator/stream-buffer.ts | 98 +++++++++++++++++++ apps/sim/stores/panel/copilot/store.ts | 2 +- 3 files changed, 131 insertions(+), 5 deletions(-) diff --git a/apps/sim/app/api/copilot/chat/route.ts b/apps/sim/app/api/copilot/chat/route.ts index 3b2dee11ba..198300cdc9 100644 --- a/apps/sim/app/api/copilot/chat/route.ts +++ b/apps/sim/app/api/copilot/chat/route.ts @@ -26,7 +26,7 @@ import { tools } from '@/tools/registry' import { getLatestVersionTools, stripVersionSuffix } from '@/tools/utils' import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator' import { - appendStreamEvent, + createStreamEventWriter, resetStreamBuffer, setStreamMeta, } from '@/lib/copilot/orchestrator/stream-buffer' @@ -497,24 +497,44 @@ export async function POST(req: NextRequest) { if (stream) { const streamId = userMessageIdToUse + let eventWriter: ReturnType | null = null + let clientDisconnected = false const transformedStream = new ReadableStream({ async start(controller) { const encoder = new TextEncoder() await resetStreamBuffer(streamId) await setStreamMeta(streamId, { status: 'active', userId: authenticatedUserId }) + eventWriter = createStreamEventWriter(streamId) + + const shouldFlushEvent = (event: Record) => + event.type === 'tool_call' || + event.type === 'tool_result' || + event.type === 'tool_error' || + event.type === 'subagent_end' || + event.type === 'structured_result' || + event.type === 'subagent_result' || + event.type === 'done' || + event.type === 'error' const pushEvent = async (event: Record) => { - const entry = await appendStreamEvent(streamId, event) + if (!eventWriter) return + const entry = await eventWriter.write(event) + if (shouldFlushEvent(event)) { + await eventWriter.flush() + } const payload = { ...event, eventId: entry.eventId, streamId, } try { - controller.enqueue(encoder.encode(`data: ${JSON.stringify(payload)}\n\n`)) + if (!clientDisconnected) { + controller.enqueue(encoder.encode(`data: ${JSON.stringify(payload)}\n\n`)) + } } catch { - // Client disconnected - keep buffering + clientDisconnected = true + await eventWriter.flush() } } @@ -562,9 +582,11 @@ export async function POST(req: NextRequest) { }) .where(eq(copilotChats.id, actualChatId!)) } + await eventWriter.close() await setStreamMeta(streamId, { status: 'complete', userId: authenticatedUserId }) } catch (error) { logger.error(`[${tracker.requestId}] Orchestration error:`, error) + await eventWriter.close() await setStreamMeta(streamId, { status: 'error', userId: authenticatedUserId, @@ -580,6 +602,12 @@ export async function POST(req: NextRequest) { controller.close() } }, + async cancel() { + clientDisconnected = true + if (eventWriter) { + await eventWriter.flush() + } + }, }) return new Response(transformedStream, { diff --git a/apps/sim/lib/copilot/orchestrator/stream-buffer.ts b/apps/sim/lib/copilot/orchestrator/stream-buffer.ts index b1b37bb552..2360478419 100644 --- a/apps/sim/lib/copilot/orchestrator/stream-buffer.ts +++ b/apps/sim/lib/copilot/orchestrator/stream-buffer.ts @@ -5,6 +5,9 @@ const logger = createLogger('CopilotStreamBuffer') const STREAM_TTL_SECONDS = 60 * 60 const STREAM_EVENT_LIMIT = 5000 +const STREAM_RESERVE_BATCH = 200 +const STREAM_FLUSH_INTERVAL_MS = 15 +const STREAM_FLUSH_MAX_BATCH = 200 const APPEND_STREAM_EVENT_LUA = ` local seqKey = KEYS[1] @@ -56,6 +59,12 @@ export type StreamEventEntry = { event: Record } +export type StreamEventWriter = { + write: (event: Record) => Promise + flush: () => Promise + close: () => Promise +} + export async function resetStreamBuffer(streamId: string): Promise { const redis = getRedisClient() if (!redis) return @@ -140,6 +149,95 @@ export async function appendStreamEvent( } } +export function createStreamEventWriter(streamId: string): StreamEventWriter { + const redis = getRedisClient() + if (!redis) { + return { + write: async (event) => ({ eventId: 0, streamId, event }), + flush: async () => {}, + close: async () => {}, + } + } + + let pending: StreamEventEntry[] = [] + let nextEventId = 0 + let maxReservedId = 0 + let flushTimer: ReturnType | null = null + let isFlushing = false + + const scheduleFlush = () => { + if (flushTimer) return + flushTimer = setTimeout(() => { + flushTimer = null + void flush() + }, STREAM_FLUSH_INTERVAL_MS) + } + + const reserveIds = async (minCount: number) => { + const reserveCount = Math.max(STREAM_RESERVE_BATCH, minCount) + const newMax = await redis.incrby(getSeqKey(streamId), reserveCount) + const startId = newMax - reserveCount + 1 + if (nextEventId === 0 || nextEventId > maxReservedId) { + nextEventId = startId + maxReservedId = newMax + } + } + + const flush = async () => { + if (isFlushing || pending.length === 0) return + isFlushing = true + const batch = pending + pending = [] + try { + const key = getEventsKey(streamId) + const zaddArgs: (string | number)[] = [] + for (const entry of batch) { + zaddArgs.push(entry.eventId, JSON.stringify(entry)) + } + const pipeline = redis.pipeline() + pipeline.zadd(key, ...(zaddArgs as any)) + pipeline.expire(key, STREAM_TTL_SECONDS) + pipeline.expire(getSeqKey(streamId), STREAM_TTL_SECONDS) + pipeline.zremrangebyrank(key, 0, -STREAM_EVENT_LIMIT - 1) + await pipeline.exec() + } catch (error) { + logger.warn('Failed to flush stream events', { + streamId, + error: error instanceof Error ? error.message : String(error), + }) + pending = batch.concat(pending) + } finally { + isFlushing = false + if (pending.length > 0) scheduleFlush() + } + } + + const write = async (event: Record) => { + if (nextEventId === 0 || nextEventId > maxReservedId) { + await reserveIds(1) + } + const eventId = nextEventId++ + const entry: StreamEventEntry = { eventId, streamId, event } + pending.push(entry) + if (pending.length >= STREAM_FLUSH_MAX_BATCH) { + await flush() + } else { + scheduleFlush() + } + return entry + } + + const close = async () => { + if (flushTimer) { + clearTimeout(flushTimer) + flushTimer = null + } + await flush() + } + + return { write, flush, close } +} + export async function readStreamEvents( streamId: string, afterEventId: number diff --git a/apps/sim/stores/panel/copilot/store.ts b/apps/sim/stores/panel/copilot/store.ts index 9d7bdb6dc2..95cbfee486 100644 --- a/apps/sim/stores/panel/copilot/store.ts +++ b/apps/sim/stores/panel/copilot/store.ts @@ -3039,7 +3039,7 @@ export const useCopilotStore = create()( }) const batchUrl = `/api/copilot/chat/stream?streamId=${encodeURIComponent( nextStream.streamId - )}&from=0&batch=true` + )}&from=0&to=${encodeURIComponent(String(nextStream.lastEventId))}&batch=true` const batchResponse = await fetch(batchUrl, { credentials: 'include' }) if (batchResponse.ok) { const batchData = await batchResponse.json() From 89782f67301278255b629ab6fe31737541b383c5 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Wed, 4 Feb 2026 12:35:12 -0800 Subject: [PATCH 19/72] Fix lint --- apps/sim/app/api/copilot/chat/route.ts | 13 +-- apps/sim/app/api/copilot/chat/stream/route.ts | 11 +- apps/sim/app/api/mcp/copilot/route.ts | 6 +- apps/sim/app/api/v1/copilot/chat/route.ts | 21 +++- .../components/tool-call/tool-call.tsx | 7 +- apps/sim/lib/copilot/orchestrator/config.ts | 1 - apps/sim/lib/copilot/orchestrator/index.ts | 13 ++- .../lib/copilot/orchestrator/persistence.ts | 6 +- .../lib/copilot/orchestrator/sse-handlers.ts | 66 ++++++----- .../lib/copilot/orchestrator/sse-parser.ts | 1 - .../lib/copilot/orchestrator/stream-buffer.ts | 6 +- apps/sim/lib/copilot/orchestrator/subagent.ts | 11 +- .../lib/copilot/orchestrator/tool-executor.ts | 107 ++++++++++-------- apps/sim/lib/copilot/orchestrator/types.ts | 1 - apps/sim/lib/copilot/tools/mcp/definitions.ts | 24 ++-- .../tools/server/workflow/edit-workflow.ts | 7 +- .../copilot/tools/shared/workflow-utils.ts | 5 +- apps/sim/lib/workflows/utils.ts | 5 +- apps/sim/stores/panel/copilot/store.ts | 14 ++- 19 files changed, 187 insertions(+), 138 deletions(-) diff --git a/apps/sim/app/api/copilot/chat/route.ts b/apps/sim/app/api/copilot/chat/route.ts index 198300cdc9..2a4cb4fe62 100644 --- a/apps/sim/app/api/copilot/chat/route.ts +++ b/apps/sim/app/api/copilot/chat/route.ts @@ -9,6 +9,12 @@ import { generateChatTitle } from '@/lib/copilot/chat-title' import { getCopilotModel } from '@/lib/copilot/config' import { SIM_AGENT_VERSION } from '@/lib/copilot/constants' import { COPILOT_MODEL_IDS, COPILOT_REQUEST_MODES } from '@/lib/copilot/models' +import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator' +import { + createStreamEventWriter, + resetStreamBuffer, + setStreamMeta, +} from '@/lib/copilot/orchestrator/stream-buffer' import { authenticateCopilotRequestSessionOnly, createBadRequestResponse, @@ -24,16 +30,9 @@ import { createFileContent } from '@/lib/uploads/utils/file-utils' import { resolveWorkflowIdForUser } from '@/lib/workflows/utils' import { tools } from '@/tools/registry' import { getLatestVersionTools, stripVersionSuffix } from '@/tools/utils' -import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator' -import { - createStreamEventWriter, - resetStreamBuffer, - setStreamMeta, -} from '@/lib/copilot/orchestrator/stream-buffer' const logger = createLogger('CopilotChatAPI') - const FileAttachmentSchema = z.object({ id: z.string(), key: z.string(), diff --git a/apps/sim/app/api/copilot/chat/stream/route.ts b/apps/sim/app/api/copilot/chat/stream/route.ts index e04271171f..c1fd6fb22f 100644 --- a/apps/sim/app/api/copilot/chat/stream/route.ts +++ b/apps/sim/app/api/copilot/chat/stream/route.ts @@ -1,12 +1,12 @@ -import { type NextRequest, NextResponse } from 'next/server' import { createLogger } from '@sim/logger' -import { SSE_HEADERS } from '@/lib/core/utils/sse' -import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request-helpers' +import { type NextRequest, NextResponse } from 'next/server' import { getStreamMeta, readStreamEvents, type StreamMeta, } from '@/lib/copilot/orchestrator/stream-buffer' +import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request-helpers' +import { SSE_HEADERS } from '@/lib/core/utils/sse' const logger = createLogger('CopilotChatStreamAPI') const POLL_INTERVAL_MS = 250 @@ -56,9 +56,7 @@ export async function GET(request: NextRequest) { // Batch mode: return all buffered events as JSON if (batchMode) { const events = await readStreamEvents(streamId, fromEventId) - const filteredEvents = toEventId - ? events.filter((e) => e.eventId <= toEventId) - : events + const filteredEvents = toEventId ? events.filter((e) => e.eventId <= toEventId) : events logger.info('[Resume] Batch response', { streamId, fromEventId, @@ -130,4 +128,3 @@ export async function GET(request: NextRequest) { return new Response(stream, { headers: SSE_HEADERS }) } - diff --git a/apps/sim/app/api/mcp/copilot/route.ts b/apps/sim/app/api/mcp/copilot/route.ts index b797ccca53..ee297b5b96 100644 --- a/apps/sim/app/api/mcp/copilot/route.ts +++ b/apps/sim/app/api/mcp/copilot/route.ts @@ -15,8 +15,11 @@ import { type NextRequest, NextResponse } from 'next/server' import { checkHybridAuth } from '@/lib/auth/hybrid' import { getCopilotModel } from '@/lib/copilot/config' import { orchestrateSubagentStream } from '@/lib/copilot/orchestrator/subagent' +import { + executeToolServerSide, + prepareExecutionContext, +} from '@/lib/copilot/orchestrator/tool-executor' import { DIRECT_TOOL_DEFS, SUBAGENT_TOOL_DEFS } from '@/lib/copilot/tools/mcp/definitions' -import { executeToolServerSide, prepareExecutionContext } from '@/lib/copilot/orchestrator/tool-executor' const logger = createLogger('CopilotMcpAPI') @@ -408,4 +411,3 @@ async function handleSubagentToolCall( return NextResponse.json(createResponse(id, response)) } - diff --git a/apps/sim/app/api/v1/copilot/chat/route.ts b/apps/sim/app/api/v1/copilot/chat/route.ts index 517959c44e..57def1fb56 100644 --- a/apps/sim/app/api/v1/copilot/chat/route.ts +++ b/apps/sim/app/api/v1/copilot/chat/route.ts @@ -1,12 +1,12 @@ import { createLogger } from '@sim/logger' import { type NextRequest, NextResponse } from 'next/server' import { z } from 'zod' -import { authenticateV1Request } from '@/app/api/v1/auth' import { getCopilotModel } from '@/lib/copilot/config' import { SIM_AGENT_VERSION } from '@/lib/copilot/constants' import { COPILOT_REQUEST_MODES } from '@/lib/copilot/models' import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator' import { resolveWorkflowIdForUser } from '@/lib/workflows/utils' +import { authenticateV1Request } from '@/app/api/v1/auth' const logger = createLogger('CopilotHeadlessAPI') @@ -24,7 +24,7 @@ const RequestSchema = z.object({ /** * POST /api/v1/copilot/chat * Headless copilot endpoint for server-side orchestration. - * + * * workflowId is optional - if not provided: * - If workflowName is provided, finds that workflow * - Otherwise uses the user's first workflow as context @@ -33,7 +33,10 @@ const RequestSchema = z.object({ export async function POST(req: NextRequest) { const auth = await authenticateV1Request(req) if (!auth.authenticated || !auth.userId) { - return NextResponse.json({ success: false, error: auth.error || 'Unauthorized' }, { status: 401 }) + return NextResponse.json( + { success: false, error: auth.error || 'Unauthorized' }, + { status: 401 } + ) } try { @@ -43,10 +46,17 @@ export async function POST(req: NextRequest) { const selectedModel = parsed.model || defaults.model // Resolve workflow ID - const resolved = await resolveWorkflowIdForUser(auth.userId, parsed.workflowId, parsed.workflowName) + const resolved = await resolveWorkflowIdForUser( + auth.userId, + parsed.workflowId, + parsed.workflowName + ) if (!resolved) { return NextResponse.json( - { success: false, error: 'No workflows found. Create a workflow first or provide a valid workflowId.' }, + { + success: false, + error: 'No workflows found. Create a workflow first or provide a valid workflowId.', + }, { status: 400 } ) } @@ -104,4 +114,3 @@ export async function POST(req: NextRequest) { return NextResponse.json({ success: false, error: 'Internal server error' }, { status: 500 }) } } - diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx index 9d575cfd5d..8d0e59eff3 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx @@ -1,7 +1,7 @@ 'use client' -import { createLogger } from '@sim/logger' import { memo, useEffect, useMemo, useRef, useState } from 'react' +import { createLogger } from '@sim/logger' import clsx from 'clsx' import { ChevronUp, LayoutList } from 'lucide-react' import Editor from 'react-simple-code-editor' @@ -1260,7 +1260,10 @@ function shouldShowRunSkipButtons(toolCall: CopilotToolCall): boolean { const toolCallLogger = createLogger('CopilotToolCall') -async function sendToolDecision(toolCallId: string, status: 'accepted' | 'rejected' | 'background') { +async function sendToolDecision( + toolCallId: string, + status: 'accepted' | 'rejected' | 'background' +) { try { await fetch('/api/copilot/confirm', { method: 'POST', diff --git a/apps/sim/lib/copilot/orchestrator/config.ts b/apps/sim/lib/copilot/orchestrator/config.ts index 80c22d436b..9e2dc7221a 100644 --- a/apps/sim/lib/copilot/orchestrator/config.ts +++ b/apps/sim/lib/copilot/orchestrator/config.ts @@ -34,4 +34,3 @@ export const SUBAGENT_TOOL_NAMES = [ ] as const export const SUBAGENT_TOOL_SET = new Set(SUBAGENT_TOOL_NAMES) - diff --git a/apps/sim/lib/copilot/orchestrator/index.ts b/apps/sim/lib/copilot/orchestrator/index.ts index f2286de75b..e990612be6 100644 --- a/apps/sim/lib/copilot/orchestrator/index.ts +++ b/apps/sim/lib/copilot/orchestrator/index.ts @@ -1,7 +1,5 @@ import { createLogger } from '@sim/logger' import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/copilot/constants' -import { env } from '@/lib/core/config/env' -import { parseSSEStream } from '@/lib/copilot/orchestrator/sse-parser' import { getToolCallIdFromEvent, handleSubagentRouting, @@ -13,15 +11,16 @@ import { wasToolCallSeen, wasToolResultSeen, } from '@/lib/copilot/orchestrator/sse-handlers' +import { parseSSEStream } from '@/lib/copilot/orchestrator/sse-parser' import { prepareExecutionContext } from '@/lib/copilot/orchestrator/tool-executor' import type { - ExecutionContext, OrchestratorOptions, OrchestratorResult, SSEEvent, StreamingContext, ToolCallSummary, } from '@/lib/copilot/orchestrator/types' +import { env } from '@/lib/core/config/env' const logger = createLogger('CopilotOrchestrator') const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT @@ -73,7 +72,9 @@ export async function orchestrateCopilotStream( if (!response.ok) { const errorText = await response.text().catch(() => '') - throw new Error(`Copilot backend error (${response.status}): ${errorText || response.statusText}`) + throw new Error( + `Copilot backend error (${response.status}): ${errorText || response.statusText}` + ) } if (!response.body) { @@ -104,7 +105,8 @@ export async function orchestrateCopilotStream( const toolCallId = getToolCallIdFromEvent(normalizedEvent) const eventData = normalizedEvent.data - const isPartialToolCall = normalizedEvent.type === 'tool_call' && eventData?.partial === true + const isPartialToolCall = + normalizedEvent.type === 'tool_call' && eventData?.partial === true const shouldSkipToolCall = normalizedEvent.type === 'tool_call' && @@ -220,4 +222,3 @@ function buildResult(context: StreamingContext): OrchestratorResult { errors: context.errors.length ? context.errors : undefined, } } - diff --git a/apps/sim/lib/copilot/orchestrator/persistence.ts b/apps/sim/lib/copilot/orchestrator/persistence.ts index d7b015f00c..418b652a50 100644 --- a/apps/sim/lib/copilot/orchestrator/persistence.ts +++ b/apps/sim/lib/copilot/orchestrator/persistence.ts @@ -69,7 +69,10 @@ export async function saveMessages( /** * Update the conversationId for a chat without overwriting messages. */ -export async function updateChatConversationId(chatId: string, conversationId: string): Promise { +export async function updateChatConversationId( + chatId: string, + conversationId: string +): Promise { await db .update(copilotChats) .set({ @@ -135,4 +138,3 @@ export async function getToolConfirmation(toolCallId: string): Promise<{ return null } } - diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers.ts index 4ad1673459..9a3d0f1b9d 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-handlers.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers.ts @@ -1,4 +1,7 @@ import { createLogger } from '@sim/logger' +import { INTERRUPT_TOOL_SET, SUBAGENT_TOOL_SET } from '@/lib/copilot/orchestrator/config' +import { getToolConfirmation } from '@/lib/copilot/orchestrator/persistence' +import { executeToolServerSide, markToolComplete } from '@/lib/copilot/orchestrator/tool-executor' import type { ContentBlock, ExecutionContext, @@ -7,9 +10,6 @@ import type { StreamingContext, ToolCallState, } from '@/lib/copilot/orchestrator/types' -import { executeToolServerSide, markToolComplete } from '@/lib/copilot/orchestrator/tool-executor' -import { getToolConfirmation } from '@/lib/copilot/orchestrator/persistence' -import { INTERRUPT_TOOL_SET, SUBAGENT_TOOL_SET } from '@/lib/copilot/orchestrator/config' const logger = createLogger('CopilotSseHandlers') @@ -25,9 +25,12 @@ const seenToolResults = new Set() export function markToolCallSeen(toolCallId: string): void { seenToolCalls.add(toolCallId) - setTimeout(() => { - seenToolCalls.delete(toolCallId) - }, 5 * 60 * 1000) + setTimeout( + () => { + seenToolCalls.delete(toolCallId) + }, + 5 * 60 * 1000 + ) } export function wasToolCallSeen(toolCallId: string): boolean { @@ -99,9 +102,12 @@ export function normalizeSseEvent(event: SSEEvent): SSEEvent { */ export function markToolResultSeen(toolCallId: string): void { seenToolResults.add(toolCallId) - setTimeout(() => { - seenToolResults.delete(toolCallId) - }, 5 * 60 * 1000) + setTimeout( + () => { + seenToolResults.delete(toolCallId) + }, + 5 * 60 * 1000 + ) } /** @@ -134,10 +140,7 @@ export type SSEHandler = ( options: OrchestratorOptions ) => void | Promise -function addContentBlock( - context: StreamingContext, - block: Omit -): void { +function addContentBlock(context: StreamingContext, block: Omit): void { context.contentBlocks.push({ ...block, timestamp: Date.now(), @@ -195,7 +198,7 @@ async function executeToolAndReport( success: result.success, result: result.output, data: { - id: toolCall.id, + id: toolCall.id, name: toolCall.name, success: result.success, result: result.output, @@ -256,7 +259,7 @@ export const sseHandlers: Record = { const hasError = !!data?.error || !!data?.result?.error // If explicitly set, use that; otherwise infer from data presence - const success = hasExplicitSuccess ? !!explicitSuccess : (hasResultData && !hasError) + const success = hasExplicitSuccess ? !!explicitSuccess : hasResultData && !hasError current.status = success ? 'success' : 'error' current.endTime = Date.now() @@ -306,7 +309,10 @@ export const sseHandlers: Record = { // If we've already completed this tool call, ignore late/duplicate tool_call events // to avoid resetting UI/state back to pending and re-executing. - if (existing?.endTime || (existing && existing.status !== 'pending' && existing.status !== 'executing')) { + if ( + existing?.endTime || + (existing && existing.status !== 'pending' && existing.status !== 'executing') + ) { if (!existing.params && args) { existing.params = args } @@ -343,7 +349,10 @@ export const sseHandlers: Record = { if (RESPOND_TOOL_SET.has(toolName)) { toolCall.status = 'success' toolCall.endTime = Date.now() - toolCall.result = { success: true, output: 'Internal respond tool - handled by copilot backend' } + toolCall.result = { + success: true, + output: 'Internal respond tool - handled by copilot backend', + } return } @@ -429,12 +438,14 @@ export const sseHandlers: Record = { context.currentThinkingBlock = null return } - const chunk = typeof event.data === 'string' ? event.data : event.data?.data || event.data?.content + const chunk = + typeof event.data === 'string' ? event.data : event.data?.data || event.data?.content if (!chunk || !context.currentThinkingBlock) return context.currentThinkingBlock.content = `${context.currentThinkingBlock.content || ''}${chunk}` }, content: (event, context) => { - const chunk = typeof event.data === 'string' ? event.data : event.data?.content || event.data?.data + const chunk = + typeof event.data === 'string' ? event.data : event.data?.content || event.data?.data if (!chunk) return context.accumulatedContent += chunk addContentBlock(context, { type: 'text', content: chunk }) @@ -452,7 +463,9 @@ export const sseHandlers: Record = { }, error: (event, context) => { const message = - event.data?.message || event.data?.error || (typeof event.data === 'string' ? event.data : null) + event.data?.message || + event.data?.error || + (typeof event.data === 'string' ? event.data : null) if (message) { context.errors.push(message) } @@ -466,7 +479,8 @@ export const subAgentHandlers: Record = { if (!parentToolCallId || !event.data) return const chunk = typeof event.data === 'string' ? event.data : event.data?.content || '' if (!chunk) return - context.subAgentContent[parentToolCallId] = (context.subAgentContent[parentToolCallId] || '') + chunk + context.subAgentContent[parentToolCallId] = + (context.subAgentContent[parentToolCallId] || '') + chunk addContentBlock(context, { type: 'subagent_text', content: chunk }) }, tool_call: async (event, context, execContext, options) => { @@ -510,7 +524,10 @@ export const subAgentHandlers: Record = { if (RESPOND_TOOL_SET.has(toolName)) { toolCall.status = 'success' toolCall.endTime = Date.now() - toolCall.result = { success: true, output: 'Internal respond tool - handled by copilot backend' } + toolCall.result = { + success: true, + output: 'Internal respond tool - handled by copilot backend', + } return } @@ -541,9 +558,7 @@ export const subAgentHandlers: Record = { const status = success ? 'success' : 'error' const endTime = Date.now() - const result = hasResultData - ? { success, output: data?.result || data?.data } - : undefined + const result = hasResultData ? { success, output: data?.result || data?.data } : undefined if (subAgentToolCall) { subAgentToolCall.status = status @@ -572,4 +587,3 @@ export function handleSubagentRouting(event: SSEEvent, context: StreamingContext } return true } - diff --git a/apps/sim/lib/copilot/orchestrator/sse-parser.ts b/apps/sim/lib/copilot/orchestrator/sse-parser.ts index 06873289ec..8ab50365c4 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-parser.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-parser.ts @@ -69,4 +69,3 @@ export async function* parseSSEStream( } } } - diff --git a/apps/sim/lib/copilot/orchestrator/stream-buffer.ts b/apps/sim/lib/copilot/orchestrator/stream-buffer.ts index 2360478419..24621ee571 100644 --- a/apps/sim/lib/copilot/orchestrator/stream-buffer.ts +++ b/apps/sim/lib/copilot/orchestrator/stream-buffer.ts @@ -78,10 +78,7 @@ export async function resetStreamBuffer(streamId: string): Promise { } } -export async function setStreamMeta( - streamId: string, - meta: StreamMeta -): Promise { +export async function setStreamMeta(streamId: string, meta: StreamMeta): Promise { const redis = getRedisClient() if (!redis) return try { @@ -263,4 +260,3 @@ export async function readStreamEvents( return [] } } - diff --git a/apps/sim/lib/copilot/orchestrator/subagent.ts b/apps/sim/lib/copilot/orchestrator/subagent.ts index 1d649eb3c9..fa1f3d36ad 100644 --- a/apps/sim/lib/copilot/orchestrator/subagent.ts +++ b/apps/sim/lib/copilot/orchestrator/subagent.ts @@ -1,6 +1,5 @@ import { createLogger } from '@sim/logger' import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/copilot/constants' -import { parseSSEStream } from '@/lib/copilot/orchestrator/sse-parser' import { getToolCallIdFromEvent, handleSubagentRouting, @@ -12,6 +11,7 @@ import { wasToolCallSeen, wasToolResultSeen, } from '@/lib/copilot/orchestrator/sse-handlers' +import { parseSSEStream } from '@/lib/copilot/orchestrator/sse-parser' import { prepareExecutionContext } from '@/lib/copilot/orchestrator/tool-executor' import type { ExecutionContext, @@ -121,7 +121,8 @@ export async function orchestrateSubagentStream( const toolCallId = getToolCallIdFromEvent(normalizedEvent) const eventData = normalizedEvent.data - const isPartialToolCall = normalizedEvent.type === 'tool_call' && eventData?.partial === true + const isPartialToolCall = + normalizedEvent.type === 'tool_call' && eventData?.partial === true const shouldSkipToolCall = normalizedEvent.type === 'tool_call' && @@ -151,7 +152,10 @@ export async function orchestrateSubagentStream( await forwardEvent(normalizedEvent, options) } - if (normalizedEvent.type === 'structured_result' || normalizedEvent.type === 'subagent_result') { + if ( + normalizedEvent.type === 'structured_result' || + normalizedEvent.type === 'subagent_result' + ) { structuredResult = normalizeStructuredResult(normalizedEvent.data) context.streamComplete = true continue @@ -280,4 +284,3 @@ function buildResult( errors: context.errors.length ? context.errors : undefined, } } - diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor.ts b/apps/sim/lib/copilot/orchestrator/tool-executor.ts index fc223f1019..8675263951 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor.ts @@ -12,38 +12,42 @@ import { } from '@sim/db/schema' import { createLogger } from '@sim/logger' import { and, asc, desc, eq, inArray, isNull, max, or } from 'drizzle-orm' -import { refreshTokenIfNeeded } from '@/app/api/auth/oauth/utils' -import { checkChatAccess, checkWorkflowAccessForChatCreation } from '@/app/api/chat/utils' -import { resolveEnvVarReferences } from '@/executor/utils/reference-validation' -import { normalizeName } from '@/executor/constants' import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/copilot/constants' -import { generateRequestId } from '@/lib/core/utils/request' -import { env } from '@/lib/core/config/env' -import { getEffectiveDecryptedEnv } from '@/lib/environment/utils' -import { listWorkspaceFiles } from '@/lib/uploads/contexts/workspace' -import { mcpService } from '@/lib/mcp/service' +import type { + ExecutionContext, + ToolCallResult, + ToolCallState, +} from '@/lib/copilot/orchestrator/types' +import { routeExecution } from '@/lib/copilot/tools/server/router' import { extractWorkflowNames, formatNormalizedWorkflowForCopilot, normalizeWorkflowName, } from '@/lib/copilot/tools/shared/workflow-utils' +import { env } from '@/lib/core/config/env' +import { generateRequestId } from '@/lib/core/utils/request' +import { getEffectiveDecryptedEnv } from '@/lib/environment/utils' +import { mcpService } from '@/lib/mcp/service' +import { sanitizeToolName } from '@/lib/mcp/workflow-tool-schema' +import { listWorkspaceFiles } from '@/lib/uploads/contexts/workspace' +import { getBlockOutputPaths } from '@/lib/workflows/blocks/block-outputs' +import { BlockPathCalculator } from '@/lib/workflows/blocks/block-path-calculator' import { buildDefaultWorkflowArtifacts } from '@/lib/workflows/defaults' +import { executeWorkflow } from '@/lib/workflows/executor/execute-workflow' import { deployWorkflow, loadWorkflowFromNormalizedTables, saveWorkflowToNormalizedTables, undeployWorkflow, } from '@/lib/workflows/persistence/utils' -import { executeWorkflow } from '@/lib/workflows/executor/execute-workflow' -import { BlockPathCalculator } from '@/lib/workflows/blocks/block-path-calculator' -import { getBlockOutputPaths } from '@/lib/workflows/blocks/block-outputs' import { isInputDefinitionTrigger } from '@/lib/workflows/triggers/input-definition-triggers' import { hasValidStartBlock } from '@/lib/workflows/triggers/trigger-utils.server' +import { refreshTokenIfNeeded } from '@/app/api/auth/oauth/utils' +import { checkChatAccess, checkWorkflowAccessForChatCreation } from '@/app/api/chat/utils' +import { normalizeName } from '@/executor/constants' +import { resolveEnvVarReferences } from '@/executor/utils/reference-validation' import { executeTool } from '@/tools' import { getTool, resolveToolId } from '@/tools/utils' -import { routeExecution } from '@/lib/copilot/tools/server/router' -import { sanitizeToolName } from '@/lib/mcp/workflow-tool-schema' -import type { ExecutionContext, ToolCallResult, ToolCallState } from '@/lib/copilot/orchestrator/types' const logger = createLogger('CopilotToolExecutor') const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT @@ -171,11 +175,9 @@ async function executeIntegrationToolDirect( const decryptedEnvVars = context.decryptedEnvVars || (await getEffectiveDecryptedEnv(userId, workspaceId)) - const executionParams: Record = resolveEnvVarReferences( - toolArgs, - decryptedEnvVars, - { deep: true } - ) as Record + const executionParams: Record = resolveEnvVarReferences(toolArgs, decryptedEnvVars, { + deep: true, + }) as Record if (toolConfig.oauth?.required && toolConfig.oauth.provider) { const provider = toolConfig.oauth.provider @@ -285,7 +287,10 @@ async function executeSimWorkflowTool( } } -async function ensureWorkflowAccess(workflowId: string, userId: string): Promise<{ +async function ensureWorkflowAccess( + workflowId: string, + userId: string +): Promise<{ workflow: typeof workflow.$inferSelect workspaceId?: string | null }> { @@ -538,8 +543,8 @@ async function executeListFolders( context: ExecutionContext ): Promise { try { - const workspaceId = (params?.workspaceId as string | undefined) || - (await getDefaultWorkspaceId(context.userId)) + const workspaceId = + (params?.workspaceId as string | undefined) || (await getDefaultWorkspaceId(context.userId)) await ensureWorkspaceAccess(workspaceId, context.userId, false) @@ -794,9 +799,10 @@ async function executeGetBlockOutputs( const blocks = normalized.blocks || {} const loops = normalized.loops || {} const parallels = normalized.parallels || {} - const blockIds = Array.isArray(params.blockIds) && params.blockIds.length > 0 - ? params.blockIds - : Object.keys(blocks) + const blockIds = + Array.isArray(params.blockIds) && params.blockIds.length > 0 + ? params.blockIds + : Object.keys(blocks) const results: Array<{ blockId: string @@ -935,8 +941,7 @@ async function executeGetBlockUpstreamReferences( for (const accessibleBlockId of accessibleIds) { const block = blocks[accessibleBlockId] if (!block?.type) continue - const canSelfReference = - block.type === 'approval' || block.type === 'human_in_the_loop' + const canSelfReference = block.type === 'approval' || block.type === 'human_in_the_loop' if (accessibleBlockId === blockId && !canSelfReference) continue const blockName = block.name || block.type @@ -1149,7 +1154,12 @@ async function executeDeployApi( return { success: true, - output: { workflowId, isDeployed: true, deployedAt: result.deployedAt, version: result.version }, + output: { + workflowId, + isDeployed: true, + deployedAt: result.deployedAt, + version: result.version, + }, } } catch (error) { return { success: false, error: error instanceof Error ? error.message : String(error) } @@ -1196,7 +1206,10 @@ async function executeDeployChat( const identifierPattern = /^[a-z0-9-]+$/ if (!identifierPattern.test(identifier)) { - return { success: false, error: 'Identifier can only contain lowercase letters, numbers, and hyphens' } + return { + success: false, + error: 'Identifier can only contain lowercase letters, numbers, and hyphens', + } } const existingIdentifier = await db @@ -1273,7 +1286,10 @@ async function executeDeployChat( }) } - return { success: true, output: { success: true, action: 'deploy', isDeployed: true, identifier } } + return { + success: true, + output: { success: true, action: 'deploy', isDeployed: true, identifier }, + } } catch (error) { return { success: false, error: error instanceof Error ? error.message : String(error) } } @@ -1313,12 +1329,18 @@ async function executeDeployMcp( const existingTool = await db .select() .from(workflowMcpTool) - .where(and(eq(workflowMcpTool.serverId, serverId), eq(workflowMcpTool.workflowId, workflowId))) + .where( + and(eq(workflowMcpTool.serverId, serverId), eq(workflowMcpTool.workflowId, workflowId)) + ) .limit(1) - const toolName = sanitizeToolName(params.toolName || workflowRecord.name || `workflow_${workflowId}`) + const toolName = sanitizeToolName( + params.toolName || workflowRecord.name || `workflow_${workflowId}` + ) const toolDescription = - params.toolDescription || workflowRecord.description || `Execute ${workflowRecord.name} workflow` + params.toolDescription || + workflowRecord.description || + `Execute ${workflowRecord.name} workflow` const parameterSchema = params.parameterSchema || {} if (existingTool.length > 0) { @@ -1387,11 +1409,7 @@ async function executeCheckDeploymentStatus( const workspaceId = workflowRecord.workspaceId const [apiDeploy, chatDeploy] = await Promise.all([ - db - .select() - .from(workflow) - .where(eq(workflow.id, workflowId)) - .limit(1), + db.select().from(workflow).where(eq(workflow.id, workflowId)).limit(1), db.select().from(chat).where(eq(chat.workflowId, workflowId)).limit(1), ]) @@ -1546,10 +1564,7 @@ async function executeCreateWorkspaceMcpServer( const addedTools: Array<{ workflowId: string; toolName: string }> = [] if (workflowIds.length > 0) { - const workflows = await db - .select() - .from(workflow) - .where(inArray(workflow.id, workflowIds)) + const workflows = await db.select().from(workflow).where(inArray(workflow.id, workflowIds)) for (const wf of workflows) { if (wf.workspaceId !== workspaceId || !wf.isDeployed) { @@ -1559,7 +1574,7 @@ async function executeCreateWorkspaceMcpServer( if (!hasStartBlock) { continue } - const toolName = sanitizeToolName(wf.name || `workflow_${wf.id}`) + const toolName = sanitizeToolName(wf.name || `workflow_${wf.id}`) await db.insert(workflowMcpTool).values({ id: crypto.randomUUID(), serverId, @@ -1674,13 +1689,12 @@ export async function prepareExecutionContext( userId: string, workflowId: string ): Promise { - let workspaceId: string | undefined const workflowResult = await db .select({ workspaceId: workflow.workspaceId }) .from(workflow) .where(eq(workflow.id, workflowId)) .limit(1) - workspaceId = workflowResult[0]?.workspaceId ?? undefined + const workspaceId = workflowResult[0]?.workspaceId ?? undefined const decryptedEnvVars = await getEffectiveDecryptedEnv(userId, workspaceId) @@ -1691,4 +1705,3 @@ export async function prepareExecutionContext( decryptedEnvVars, } } - diff --git a/apps/sim/lib/copilot/orchestrator/types.ts b/apps/sim/lib/copilot/orchestrator/types.ts index 52966d0b39..d1f58b5886 100644 --- a/apps/sim/lib/copilot/orchestrator/types.ts +++ b/apps/sim/lib/copilot/orchestrator/types.ts @@ -128,4 +128,3 @@ export interface ExecutionContext { workspaceId?: string decryptedEnvVars?: Record } - diff --git a/apps/sim/lib/copilot/tools/mcp/definitions.ts b/apps/sim/lib/copilot/tools/mcp/definitions.ts index 18e076ee9e..4ce44089bb 100644 --- a/apps/sim/lib/copilot/tools/mcp/definitions.ts +++ b/apps/sim/lib/copilot/tools/mcp/definitions.ts @@ -20,7 +20,8 @@ export const DIRECT_TOOL_DEFS: DirectToolDef[] = [ { name: 'list_workflows', toolId: 'list_user_workflows', - description: 'List all workflows the user has access to. Returns workflow IDs, names, and workspace info.', + description: + 'List all workflows the user has access to. Returns workflow IDs, names, and workspace info.', inputSchema: { type: 'object', properties: { @@ -38,7 +39,8 @@ export const DIRECT_TOOL_DEFS: DirectToolDef[] = [ { name: 'list_workspaces', toolId: 'list_user_workspaces', - description: 'List all workspaces the user has access to. Returns workspace IDs, names, and roles.', + description: + 'List all workspaces the user has access to. Returns workspace IDs, names, and roles.', inputSchema: { type: 'object', properties: {}, @@ -225,10 +227,14 @@ IMPORTANT: Pass the returned plan EXACTLY to copilot_edit - do not modify or sum inputSchema: { type: 'object', properties: { - request: { type: 'string', description: 'What you want to build or modify in the workflow.' }, + request: { + type: 'string', + description: 'What you want to build or modify in the workflow.', + }, workflowId: { type: 'string', - description: 'REQUIRED. The workflow ID. For new workflows, call create_workflow first to get this.', + description: + 'REQUIRED. The workflow ID. For new workflows, call create_workflow first to get this.', }, context: { type: 'object' }, }, @@ -261,15 +267,18 @@ IMPORTANT: After copilot_edit completes, you MUST call copilot_deploy before the message: { type: 'string', description: 'Optional additional instructions for the edit.' }, workflowId: { type: 'string', - description: 'REQUIRED. The workflow ID to edit. Get this from create_workflow for new workflows.', + description: + 'REQUIRED. The workflow ID to edit. Get this from create_workflow for new workflows.', }, plan: { type: 'object', - description: 'The plan object from copilot_plan. Pass it EXACTLY as returned, do not modify.', + description: + 'The plan object from copilot_plan. Pass it EXACTLY as returned, do not modify.', }, context: { type: 'object', - description: 'Additional context. Put the plan in context.plan if not using the plan field directly.', + description: + 'Additional context. Put the plan in context.plan if not using the plan field directly.', }, }, required: ['workflowId'], @@ -463,4 +472,3 @@ USE THIS: }, }, ] - diff --git a/apps/sim/lib/copilot/tools/server/workflow/edit-workflow.ts b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow.ts index a060acfb91..7a22c8075b 100644 --- a/apps/sim/lib/copilot/tools/server/workflow/edit-workflow.ts +++ b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow.ts @@ -6,9 +6,9 @@ import { eq } from 'drizzle-orm' import type { BaseServerTool } from '@/lib/copilot/tools/server/base-tool' import { validateSelectorIds } from '@/lib/copilot/validation/selector-validator' import type { PermissionGroupConfig } from '@/lib/permission-groups/types' +import { applyAutoLayout } from '@/lib/workflows/autolayout' import { getBlockOutputs } from '@/lib/workflows/blocks/block-outputs' import { extractAndPersistCustomTools } from '@/lib/workflows/persistence/custom-tools-persistence' -import { applyAutoLayout } from '@/lib/workflows/autolayout' import { loadWorkflowFromNormalizedTables, saveWorkflowToNormalizedTables, @@ -3276,9 +3276,8 @@ export const editWorkflowServerTool: BaseServerTool = { padding: { x: 100, y: 100 }, }) - const layoutedBlocks = layoutResult.success && layoutResult.blocks - ? layoutResult.blocks - : finalWorkflowState.blocks + const layoutedBlocks = + layoutResult.success && layoutResult.blocks ? layoutResult.blocks : finalWorkflowState.blocks if (!layoutResult.success) { logger.warn('Autolayout failed, using default positions', { diff --git a/apps/sim/lib/copilot/tools/shared/workflow-utils.ts b/apps/sim/lib/copilot/tools/shared/workflow-utils.ts index 7d8f659ce1..2f033a8830 100644 --- a/apps/sim/lib/copilot/tools/shared/workflow-utils.ts +++ b/apps/sim/lib/copilot/tools/shared/workflow-utils.ts @@ -26,7 +26,9 @@ export function formatNormalizedWorkflowForCopilot( } export function normalizeWorkflowName(name?: string | null): string { - return String(name || '').trim().toLowerCase() + return String(name || '') + .trim() + .toLowerCase() } export function extractWorkflowNames(workflows: Array<{ name?: string | null }>): string[] { @@ -34,4 +36,3 @@ export function extractWorkflowNames(workflows: Array<{ name?: string | null }>) .map((workflow) => (typeof workflow?.name === 'string' ? workflow.name : null)) .filter((name): name is string => Boolean(name)) } - diff --git a/apps/sim/lib/workflows/utils.ts b/apps/sim/lib/workflows/utils.ts index cfa9467497..7f952510f1 100644 --- a/apps/sim/lib/workflows/utils.ts +++ b/apps/sim/lib/workflows/utils.ts @@ -48,7 +48,10 @@ export async function resolveWorkflowIdForUser( if (workflowName) { const match = workflows.find( - (w) => String(w.name || '').trim().toLowerCase() === workflowName.toLowerCase() + (w) => + String(w.name || '') + .trim() + .toLowerCase() === workflowName.toLowerCase() ) if (match) { return { workflowId: match.id, workflowName: match.name || undefined } diff --git a/apps/sim/stores/panel/copilot/store.ts b/apps/sim/stores/panel/copilot/store.ts index 95cbfee486..0db3d68c09 100644 --- a/apps/sim/stores/panel/copilot/store.ts +++ b/apps/sim/stores/panel/copilot/store.ts @@ -80,8 +80,8 @@ import { subscriptionKeys } from '@/hooks/queries/subscription' import type { ChatContext, CopilotMessage, - CopilotStreamInfo, CopilotStore, + CopilotStreamInfo, CopilotToolCall, MessageFileAttachment, } from '@/stores/panel/copilot/types' @@ -2251,7 +2251,7 @@ function createOptimizedContentBlocks(contentBlocks: any[]): any[] { } return result } -`` +;`` function updateStreamingMessage(set: any, context: StreamingContext) { if (context.suppressStreamingUpdates) return const now = performance.now() @@ -3124,8 +3124,8 @@ export const useCopilotStore = create()( replayBlocks && replayBlocks.length > 0 ? replayBlocks : bufferedContent - ? [{ type: TEXT_BLOCK_TYPE, content: bufferedContent, timestamp: Date.now() }] - : [], + ? [{ type: TEXT_BLOCK_TYPE, content: bufferedContent, timestamp: Date.now() }] + : [], } nextMessages = [...nextMessages, assistantMessage] } else if (bufferedContent || (replayBlocks && replayBlocks.length > 0)) { @@ -3207,7 +3207,10 @@ export const useCopilotStore = create()( set({ isSendingMessage: false, abortController: null }) } catch (error) { // Handle AbortError gracefully - expected when user aborts - if (error instanceof Error && (error.name === 'AbortError' || error.message.includes('aborted'))) { + if ( + error instanceof Error && + (error.name === 'AbortError' || error.message.includes('aborted')) + ) { logger.info('[Copilot] Resume stream aborted by user') set({ isSendingMessage: false, abortController: null }) return false @@ -4123,7 +4126,6 @@ export const useCopilotStore = create()( logger.info('[AutoAllowedTools] API returned', { toolId, tools: data.autoAllowedTools }) set({ autoAllowedTools: data.autoAllowedTools || [] }) logger.info('[AutoAllowedTools] Added tool to store', { toolId }) - } } catch (err) { logger.error('[AutoAllowedTools] Failed to add tool', { toolId, error: err }) From 9a5a4947644f6f495522fa0cb78517bf92fde829 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Thu, 5 Feb 2026 11:20:58 -0800 Subject: [PATCH 20/72] Checkpoint --- apps/sim/lib/copilot/orchestrator/index.ts | 48 +- .../copilot/orchestrator/sse-handlers.test.ts | 95 ++ .../lib/copilot/orchestrator/sse-handlers.ts | 109 +- .../copilot/orchestrator/sse-utils.test.ts | 43 + .../sim/lib/copilot/orchestrator/sse-utils.ts | 119 ++ .../orchestrator/stream-buffer.test.ts | 118 ++ .../lib/copilot/orchestrator/stream-buffer.ts | 58 +- apps/sim/lib/copilot/orchestrator/subagent.ts | 48 +- .../lib/copilot/orchestrator/tool-executor.ts | 1504 +---------------- .../orchestrator/tool-executor/access.ts | 130 ++ .../tool-executor/deployment-tools.ts | 479 ++++++ .../tool-executor/integration-tools.ts | 100 ++ .../tool-executor/workflow-tools.ts | 769 +++++++++ .../client/blocks/get-blocks-and-tools.ts | 3 +- apps/sim/lib/core/config/env.ts | 5 + apps/sim/stores/panel/copilot/store.ts | 11 + bun.lock | 1 - docs/COPILOT_SERVER_REFACTOR.md | 9 + 18 files changed, 1969 insertions(+), 1680 deletions(-) create mode 100644 apps/sim/lib/copilot/orchestrator/sse-handlers.test.ts create mode 100644 apps/sim/lib/copilot/orchestrator/sse-utils.test.ts create mode 100644 apps/sim/lib/copilot/orchestrator/sse-utils.ts create mode 100644 apps/sim/lib/copilot/orchestrator/stream-buffer.test.ts create mode 100644 apps/sim/lib/copilot/orchestrator/tool-executor/access.ts create mode 100644 apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools.ts create mode 100644 apps/sim/lib/copilot/orchestrator/tool-executor/integration-tools.ts create mode 100644 apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools.ts diff --git a/apps/sim/lib/copilot/orchestrator/index.ts b/apps/sim/lib/copilot/orchestrator/index.ts index e990612be6..0fe0abe622 100644 --- a/apps/sim/lib/copilot/orchestrator/index.ts +++ b/apps/sim/lib/copilot/orchestrator/index.ts @@ -1,16 +1,11 @@ import { createLogger } from '@sim/logger' import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/copilot/constants' +import { handleSubagentRouting, sseHandlers, subAgentHandlers } from '@/lib/copilot/orchestrator/sse-handlers' import { - getToolCallIdFromEvent, - handleSubagentRouting, - markToolCallSeen, - markToolResultSeen, normalizeSseEvent, - sseHandlers, - subAgentHandlers, - wasToolCallSeen, - wasToolResultSeen, -} from '@/lib/copilot/orchestrator/sse-handlers' + shouldSkipToolCallEvent, + shouldSkipToolResultEvent, +} from '@/lib/copilot/orchestrator/sse-utils' import { parseSSEStream } from '@/lib/copilot/orchestrator/sse-parser' import { prepareExecutionContext } from '@/lib/copilot/orchestrator/tool-executor' import type { @@ -99,38 +94,9 @@ export async function orchestrateCopilotStream( const normalizedEvent = normalizeSseEvent(event) - // Skip tool_result events for tools the sim-side already executed. - // The sim-side emits its own tool_result with complete data. - // For server-side tools (not executed by sim), we still forward the Go backend's tool_result. - const toolCallId = getToolCallIdFromEvent(normalizedEvent) - const eventData = normalizedEvent.data - - const isPartialToolCall = - normalizedEvent.type === 'tool_call' && eventData?.partial === true - - const shouldSkipToolCall = - normalizedEvent.type === 'tool_call' && - !!toolCallId && - !isPartialToolCall && - (wasToolResultSeen(toolCallId) || wasToolCallSeen(toolCallId)) - - if ( - normalizedEvent.type === 'tool_call' && - toolCallId && - !isPartialToolCall && - !shouldSkipToolCall - ) { - markToolCallSeen(toolCallId) - } - - const shouldSkipToolResult = - normalizedEvent.type === 'tool_result' && - (() => { - if (!toolCallId) return false - if (wasToolResultSeen(toolCallId)) return true - markToolResultSeen(toolCallId) - return false - })() + // Skip duplicate tool events to prevent state regressions. + const shouldSkipToolCall = shouldSkipToolCallEvent(normalizedEvent) + const shouldSkipToolResult = shouldSkipToolResultEvent(normalizedEvent) if (!shouldSkipToolCall && !shouldSkipToolResult) { await forwardEvent(normalizedEvent, options) diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers.test.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers.test.ts new file mode 100644 index 0000000000..f9368ec69c --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers.test.ts @@ -0,0 +1,95 @@ +/** + * @vitest-environment node + */ +import { beforeEach, describe, expect, it, vi } from 'vitest' +import { loggerMock } from '@sim/testing' + +vi.mock('@sim/logger', () => loggerMock) + +const executeToolServerSide = vi.fn() +const markToolComplete = vi.fn() + +vi.mock('@/lib/copilot/orchestrator/tool-executor', () => ({ + executeToolServerSide, + markToolComplete, +})) + +import { sseHandlers } from '@/lib/copilot/orchestrator/sse-handlers' +import type { ExecutionContext, StreamingContext } from '@/lib/copilot/orchestrator/types' + +describe('sse-handlers tool lifecycle', () => { + let context: StreamingContext + let execContext: ExecutionContext + + beforeEach(() => { + vi.clearAllMocks() + context = { + chatId: undefined, + conversationId: undefined, + messageId: 'msg-1', + accumulatedContent: '', + contentBlocks: [], + toolCalls: new Map(), + currentThinkingBlock: null, + isInThinkingBlock: false, + subAgentParentToolCallId: undefined, + subAgentContent: {}, + subAgentToolCalls: {}, + pendingContent: '', + streamComplete: false, + wasAborted: false, + errors: [], + } + execContext = { + userId: 'user-1', + workflowId: 'workflow-1', + } + }) + + it('executes tool_call and emits tool_result + mark-complete', async () => { + executeToolServerSide.mockResolvedValueOnce({ success: true, output: { ok: true } }) + markToolComplete.mockResolvedValueOnce(true) + const onEvent = vi.fn() + + await sseHandlers.tool_call( + { + type: 'tool_call', + data: { id: 'tool-1', name: 'get_user_workflow', arguments: { workflowId: 'workflow-1' } }, + } as any, + context, + execContext, + { onEvent, interactive: false, timeout: 1000 } + ) + + expect(executeToolServerSide).toHaveBeenCalledTimes(1) + expect(markToolComplete).toHaveBeenCalledTimes(1) + expect(onEvent).toHaveBeenCalledWith( + expect.objectContaining({ + type: 'tool_result', + toolCallId: 'tool-1', + success: true, + }) + ) + + const updated = context.toolCalls.get('tool-1') + expect(updated?.status).toBe('success') + expect(updated?.result?.output).toEqual({ ok: true }) + }) + + it('skips duplicate tool_call after result', async () => { + executeToolServerSide.mockResolvedValueOnce({ success: true, output: { ok: true } }) + markToolComplete.mockResolvedValueOnce(true) + + const event = { + type: 'tool_call', + data: { id: 'tool-dup', name: 'get_user_workflow', arguments: { workflowId: 'workflow-1' } }, + } + + await sseHandlers.tool_call(event as any, context, execContext, { interactive: false }) + await sseHandlers.tool_call(event as any, context, execContext, { interactive: false }) + + expect(executeToolServerSide).toHaveBeenCalledTimes(1) + expect(markToolComplete).toHaveBeenCalledTimes(1) + }) +}) + diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers.ts index 9a3d0f1b9d..0f5f3df1ae 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-handlers.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers.ts @@ -1,6 +1,11 @@ import { createLogger } from '@sim/logger' import { INTERRUPT_TOOL_SET, SUBAGENT_TOOL_SET } from '@/lib/copilot/orchestrator/config' import { getToolConfirmation } from '@/lib/copilot/orchestrator/persistence' +import { + getEventData, + markToolResultSeen, + wasToolResultSeen, +} from '@/lib/copilot/orchestrator/sse-utils' import { executeToolServerSide, markToolComplete } from '@/lib/copilot/orchestrator/tool-executor' import type { ContentBlock, @@ -13,109 +18,7 @@ import type { const logger = createLogger('CopilotSseHandlers') -/** - * Tracks tool call IDs for which a tool_call has already been forwarded/emitted (non-partial). - */ -const seenToolCalls = new Set() - -/** - * Tracks tool call IDs for which a tool_result has already been emitted or forwarded. - */ -const seenToolResults = new Set() - -export function markToolCallSeen(toolCallId: string): void { - seenToolCalls.add(toolCallId) - setTimeout( - () => { - seenToolCalls.delete(toolCallId) - }, - 5 * 60 * 1000 - ) -} - -export function wasToolCallSeen(toolCallId: string): boolean { - return seenToolCalls.has(toolCallId) -} - -type EventDataObject = Record | undefined - -const parseEventData = (data: unknown): EventDataObject => { - if (!data) return undefined - if (typeof data !== 'string') { - return data as EventDataObject - } - try { - return JSON.parse(data) as EventDataObject - } catch { - return undefined - } -} - -const hasToolFields = (data: EventDataObject): boolean => { - if (!data) return false - return ( - data.id !== undefined || - data.toolCallId !== undefined || - data.name !== undefined || - data.success !== undefined || - data.result !== undefined || - data.arguments !== undefined - ) -} - -const getEventData = (event: SSEEvent): EventDataObject => { - const topLevel = parseEventData(event.data) - if (!topLevel) return undefined - if (hasToolFields(topLevel)) return topLevel - const nested = parseEventData(topLevel.data) - return nested || topLevel -} - -export function getToolCallIdFromEvent(event: SSEEvent): string | undefined { - const data = getEventData(event) - return event.toolCallId || data?.id || data?.toolCallId -} - -/** Normalizes SSE events so tool metadata is available at the top level. */ -export function normalizeSseEvent(event: SSEEvent): SSEEvent { - if (!event) return event - const data = getEventData(event) - if (!data) return event - const toolCallId = event.toolCallId || data.id || data.toolCallId - const toolName = event.toolName || data.name || data.toolName - const success = event.success ?? data.success - const result = event.result ?? data.result - const normalizedData = typeof event.data === 'string' ? data : event.data - return { - ...event, - data: normalizedData, - toolCallId, - toolName, - success, - result, - } -} - -/** - * Mark a tool call as executed by the sim-side. - * This prevents the Go backend's duplicate tool_result from being forwarded. - */ -export function markToolResultSeen(toolCallId: string): void { - seenToolResults.add(toolCallId) - setTimeout( - () => { - seenToolResults.delete(toolCallId) - }, - 5 * 60 * 1000 - ) -} - -/** - * Check if a tool call was executed by the sim-side. - */ -export function wasToolResultSeen(toolCallId: string): boolean { - return seenToolResults.has(toolCallId) -} +// Normalization + dedupe helpers live in sse-utils to keep server/client in sync. /** * Respond tools are internal to the copilot's subagent system. diff --git a/apps/sim/lib/copilot/orchestrator/sse-utils.test.ts b/apps/sim/lib/copilot/orchestrator/sse-utils.test.ts new file mode 100644 index 0000000000..37b748a7f7 --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/sse-utils.test.ts @@ -0,0 +1,43 @@ +/** + * @vitest-environment node + */ +import { describe, expect, it } from 'vitest' +import { + normalizeSseEvent, + shouldSkipToolCallEvent, + shouldSkipToolResultEvent, +} from '@/lib/copilot/orchestrator/sse-utils' + +describe('sse-utils', () => { + it.concurrent('normalizes tool fields from string data', () => { + const event = { + type: 'tool_result', + data: JSON.stringify({ + id: 'tool_1', + name: 'edit_workflow', + success: true, + result: { ok: true }, + }), + } + + const normalized = normalizeSseEvent(event as any) + + expect(normalized.toolCallId).toBe('tool_1') + expect(normalized.toolName).toBe('edit_workflow') + expect(normalized.success).toBe(true) + expect(normalized.result).toEqual({ ok: true }) + }) + + it.concurrent('dedupes tool_call events', () => { + const event = { type: 'tool_call', data: { id: 'tool_call_1', name: 'plan' } } + expect(shouldSkipToolCallEvent(event as any)).toBe(false) + expect(shouldSkipToolCallEvent(event as any)).toBe(true) + }) + + it.concurrent('dedupes tool_result events', () => { + const event = { type: 'tool_result', data: { id: 'tool_result_1', name: 'plan' } } + expect(shouldSkipToolResultEvent(event as any)).toBe(false) + expect(shouldSkipToolResultEvent(event as any)).toBe(true) + }) +}) + diff --git a/apps/sim/lib/copilot/orchestrator/sse-utils.ts b/apps/sim/lib/copilot/orchestrator/sse-utils.ts new file mode 100644 index 0000000000..792a42aba7 --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/sse-utils.ts @@ -0,0 +1,119 @@ +import type { SSEEvent } from '@/lib/copilot/orchestrator/types' + +type EventDataObject = Record | undefined + +const DEFAULT_TOOL_EVENT_TTL_MS = 5 * 60 * 1000 + +/** + * In-memory tool event dedupe. + * + * NOTE: These sets are process-local only. In a multi-instance setup (e.g., ECS), + * each task maintains its own dedupe cache, so duplicates can still appear across tasks. + */ +const seenToolCalls = new Set() +const seenToolResults = new Set() + +const parseEventData = (data: unknown): EventDataObject => { + if (!data) return undefined + if (typeof data !== 'string') { + return data as EventDataObject + } + try { + return JSON.parse(data) as EventDataObject + } catch { + return undefined + } +} + +const hasToolFields = (data: EventDataObject): boolean => { + if (!data) return false + return ( + data.id !== undefined || + data.toolCallId !== undefined || + data.name !== undefined || + data.success !== undefined || + data.result !== undefined || + data.arguments !== undefined + ) +} + +export const getEventData = (event: SSEEvent): EventDataObject => { + const topLevel = parseEventData(event.data) + if (!topLevel) return undefined + if (hasToolFields(topLevel)) return topLevel + const nested = parseEventData(topLevel.data) + return nested || topLevel +} + +export function getToolCallIdFromEvent(event: SSEEvent): string | undefined { + const data = getEventData(event) + return event.toolCallId || data?.id || data?.toolCallId +} + +/** Normalizes SSE events so tool metadata is available at the top level. */ +export function normalizeSseEvent(event: SSEEvent): SSEEvent { + if (!event) return event + const data = getEventData(event) + if (!data) return event + const toolCallId = event.toolCallId || data.id || data.toolCallId + const toolName = event.toolName || data.name || data.toolName + const success = event.success ?? data.success + const result = event.result ?? data.result + const normalizedData = typeof event.data === 'string' ? data : event.data + return { + ...event, + data: normalizedData, + toolCallId, + toolName, + success, + result, + } +} + +export function markToolCallSeen(toolCallId: string, ttlMs: number = DEFAULT_TOOL_EVENT_TTL_MS): void { + seenToolCalls.add(toolCallId) + setTimeout(() => { + seenToolCalls.delete(toolCallId) + }, ttlMs) +} + +export function wasToolCallSeen(toolCallId: string): boolean { + return seenToolCalls.has(toolCallId) +} + +export function markToolResultSeen( + toolCallId: string, + ttlMs: number = DEFAULT_TOOL_EVENT_TTL_MS +): void { + seenToolResults.add(toolCallId) + setTimeout(() => { + seenToolResults.delete(toolCallId) + }, ttlMs) +} + +export function wasToolResultSeen(toolCallId: string): boolean { + return seenToolResults.has(toolCallId) +} + +export function shouldSkipToolCallEvent(event: SSEEvent): boolean { + if (event.type !== 'tool_call') return false + const toolCallId = getToolCallIdFromEvent(event) + if (!toolCallId) return false + const eventData = getEventData(event) + if (eventData?.partial === true) return false + if (wasToolResultSeen(toolCallId) || wasToolCallSeen(toolCallId)) { + return true + } + markToolCallSeen(toolCallId) + return false +} + +export function shouldSkipToolResultEvent(event: SSEEvent): boolean { + if (event.type !== 'tool_result') return false + const toolCallId = getToolCallIdFromEvent(event) + if (!toolCallId) return false + if (wasToolResultSeen(toolCallId)) return true + markToolResultSeen(toolCallId) + return false +} + diff --git a/apps/sim/lib/copilot/orchestrator/stream-buffer.test.ts b/apps/sim/lib/copilot/orchestrator/stream-buffer.test.ts new file mode 100644 index 0000000000..6e834c629c --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/stream-buffer.test.ts @@ -0,0 +1,118 @@ +/** + * @vitest-environment node + */ +import { beforeEach, describe, expect, it, vi } from 'vitest' +import { loggerMock } from '@sim/testing' + +vi.mock('@sim/logger', () => loggerMock) + +type StoredEntry = { score: number; value: string } + +const createRedisStub = () => { + const events = new Map() + const counters = new Map() + + const readEntries = (key: string, min: number, max: number) => { + const list = events.get(key) || [] + return list + .filter((entry) => entry.score >= min && entry.score <= max) + .sort((a, b) => a.score - b.score) + .map((entry) => entry.value) + } + + return { + del: vi.fn().mockResolvedValue(1), + hset: vi.fn().mockResolvedValue(1), + hgetall: vi.fn().mockResolvedValue({}), + expire: vi.fn().mockResolvedValue(1), + eval: vi.fn().mockImplementation( + ( + _lua: string, + _keysCount: number, + seqKey: string, + eventsKey: string, + _ttl: number, + _limit: number, + streamId: string, + eventJson: string + ) => { + const current = counters.get(seqKey) || 0 + const next = current + 1 + counters.set(seqKey, next) + const entry = JSON.stringify({ eventId: next, streamId, event: JSON.parse(eventJson) }) + const list = events.get(eventsKey) || [] + list.push({ score: next, value: entry }) + events.set(eventsKey, list) + return next + } + ), + incrby: vi.fn().mockImplementation((key: string, amount: number) => { + const current = counters.get(key) || 0 + const next = current + amount + counters.set(key, next) + return next + }), + zrangebyscore: vi.fn().mockImplementation((key: string, min: string, max: string) => { + const minVal = Number(min) + const maxVal = max === '+inf' ? Number.POSITIVE_INFINITY : Number(max) + return Promise.resolve(readEntries(key, minVal, maxVal)) + }), + pipeline: vi.fn().mockImplementation(() => { + const api = { + zadd: vi.fn().mockImplementation((key: string, ...args: Array) => { + const list = events.get(key) || [] + for (let i = 0; i < args.length; i += 2) { + list.push({ score: Number(args[i]), value: String(args[i + 1]) }) + } + events.set(key, list) + return api + }), + expire: vi.fn().mockReturnValue(api), + zremrangebyrank: vi.fn().mockReturnValue(api), + exec: vi.fn().mockResolvedValue([]), + } + return api + }), + } +} + +let mockRedis: ReturnType + +vi.mock('@/lib/core/config/redis', () => ({ + getRedisClient: () => mockRedis, +})) + +import { + appendStreamEvent, + createStreamEventWriter, + readStreamEvents, +} from '@/lib/copilot/orchestrator/stream-buffer' + +describe('stream-buffer', () => { + beforeEach(() => { + mockRedis = createRedisStub() + vi.clearAllMocks() + }) + + it.concurrent('replays events after a given event id', async () => { + await appendStreamEvent('stream-1', { type: 'content', data: 'hello' }) + await appendStreamEvent('stream-1', { type: 'content', data: 'world' }) + + const allEvents = await readStreamEvents('stream-1', 0) + expect(allEvents.map((entry) => entry.event.data)).toEqual(['hello', 'world']) + + const replayed = await readStreamEvents('stream-1', 1) + expect(replayed.map((entry) => entry.event.data)).toEqual(['world']) + }) + + it.concurrent('flushes buffered events for resume', async () => { + const writer = createStreamEventWriter('stream-2') + await writer.write({ type: 'content', data: 'a' }) + await writer.write({ type: 'content', data: 'b' }) + await writer.flush() + + const events = await readStreamEvents('stream-2', 0) + expect(events.map((entry) => entry.event.data)).toEqual(['a', 'b']) + }) +}) + diff --git a/apps/sim/lib/copilot/orchestrator/stream-buffer.ts b/apps/sim/lib/copilot/orchestrator/stream-buffer.ts index 24621ee571..29fd8f55b2 100644 --- a/apps/sim/lib/copilot/orchestrator/stream-buffer.ts +++ b/apps/sim/lib/copilot/orchestrator/stream-buffer.ts @@ -1,13 +1,40 @@ import { createLogger } from '@sim/logger' +import { env } from '@/lib/core/config/env' import { getRedisClient } from '@/lib/core/config/redis' const logger = createLogger('CopilotStreamBuffer') -const STREAM_TTL_SECONDS = 60 * 60 -const STREAM_EVENT_LIMIT = 5000 -const STREAM_RESERVE_BATCH = 200 -const STREAM_FLUSH_INTERVAL_MS = 15 -const STREAM_FLUSH_MAX_BATCH = 200 +const STREAM_DEFAULTS = { + ttlSeconds: 60 * 60, + eventLimit: 5000, + reserveBatch: 200, + flushIntervalMs: 15, + flushMaxBatch: 200, +} + +export type StreamBufferConfig = { + ttlSeconds: number + eventLimit: number + reserveBatch: number + flushIntervalMs: number + flushMaxBatch: number +} + +const parseNumber = (value: number | string | undefined, fallback: number): number => { + if (typeof value === 'number' && Number.isFinite(value)) return value + const parsed = Number(value) + return Number.isFinite(parsed) ? parsed : fallback +} + +export function getStreamBufferConfig(): StreamBufferConfig { + return { + ttlSeconds: parseNumber(env.COPILOT_STREAM_TTL_SECONDS, STREAM_DEFAULTS.ttlSeconds), + eventLimit: parseNumber(env.COPILOT_STREAM_EVENT_LIMIT, STREAM_DEFAULTS.eventLimit), + reserveBatch: parseNumber(env.COPILOT_STREAM_RESERVE_BATCH, STREAM_DEFAULTS.reserveBatch), + flushIntervalMs: parseNumber(env.COPILOT_STREAM_FLUSH_INTERVAL_MS, STREAM_DEFAULTS.flushIntervalMs), + flushMaxBatch: parseNumber(env.COPILOT_STREAM_FLUSH_MAX_BATCH, STREAM_DEFAULTS.flushMaxBatch), + } +} const APPEND_STREAM_EVENT_LUA = ` local seqKey = KEYS[1] @@ -82,6 +109,7 @@ export async function setStreamMeta(streamId: string, meta: StreamMeta): Promise const redis = getRedisClient() if (!redis) return try { + const config = getStreamBufferConfig() const payload: Record = { status: meta.status, updatedAt: meta.updatedAt || new Date().toISOString(), @@ -89,7 +117,7 @@ export async function setStreamMeta(streamId: string, meta: StreamMeta): Promise if (meta.userId) payload.userId = meta.userId if (meta.error) payload.error = meta.error await redis.hset(getMetaKey(streamId), payload) - await redis.expire(getMetaKey(streamId), STREAM_TTL_SECONDS) + await redis.expire(getMetaKey(streamId), config.ttlSeconds) } catch (error) { logger.warn('Failed to update stream meta', { streamId, @@ -124,14 +152,15 @@ export async function appendStreamEvent( } try { + const config = getStreamBufferConfig() const eventJson = JSON.stringify(event) const nextId = await redis.eval( APPEND_STREAM_EVENT_LUA, 2, getSeqKey(streamId), getEventsKey(streamId), - STREAM_TTL_SECONDS, - STREAM_EVENT_LIMIT, + config.ttlSeconds, + config.eventLimit, streamId, eventJson ) @@ -156,6 +185,7 @@ export function createStreamEventWriter(streamId: string): StreamEventWriter { } } + const config = getStreamBufferConfig() let pending: StreamEventEntry[] = [] let nextEventId = 0 let maxReservedId = 0 @@ -167,11 +197,11 @@ export function createStreamEventWriter(streamId: string): StreamEventWriter { flushTimer = setTimeout(() => { flushTimer = null void flush() - }, STREAM_FLUSH_INTERVAL_MS) + }, config.flushIntervalMs) } const reserveIds = async (minCount: number) => { - const reserveCount = Math.max(STREAM_RESERVE_BATCH, minCount) + const reserveCount = Math.max(config.reserveBatch, minCount) const newMax = await redis.incrby(getSeqKey(streamId), reserveCount) const startId = newMax - reserveCount + 1 if (nextEventId === 0 || nextEventId > maxReservedId) { @@ -193,9 +223,9 @@ export function createStreamEventWriter(streamId: string): StreamEventWriter { } const pipeline = redis.pipeline() pipeline.zadd(key, ...(zaddArgs as any)) - pipeline.expire(key, STREAM_TTL_SECONDS) - pipeline.expire(getSeqKey(streamId), STREAM_TTL_SECONDS) - pipeline.zremrangebyrank(key, 0, -STREAM_EVENT_LIMIT - 1) + pipeline.expire(key, config.ttlSeconds) + pipeline.expire(getSeqKey(streamId), config.ttlSeconds) + pipeline.zremrangebyrank(key, 0, -config.eventLimit - 1) await pipeline.exec() } catch (error) { logger.warn('Failed to flush stream events', { @@ -216,7 +246,7 @@ export function createStreamEventWriter(streamId: string): StreamEventWriter { const eventId = nextEventId++ const entry: StreamEventEntry = { eventId, streamId, event } pending.push(entry) - if (pending.length >= STREAM_FLUSH_MAX_BATCH) { + if (pending.length >= config.flushMaxBatch) { await flush() } else { scheduleFlush() diff --git a/apps/sim/lib/copilot/orchestrator/subagent.ts b/apps/sim/lib/copilot/orchestrator/subagent.ts index fa1f3d36ad..17079e4d58 100644 --- a/apps/sim/lib/copilot/orchestrator/subagent.ts +++ b/apps/sim/lib/copilot/orchestrator/subagent.ts @@ -1,16 +1,11 @@ import { createLogger } from '@sim/logger' import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/copilot/constants' +import { handleSubagentRouting, sseHandlers, subAgentHandlers } from '@/lib/copilot/orchestrator/sse-handlers' import { - getToolCallIdFromEvent, - handleSubagentRouting, - markToolCallSeen, - markToolResultSeen, normalizeSseEvent, - sseHandlers, - subAgentHandlers, - wasToolCallSeen, - wasToolResultSeen, -} from '@/lib/copilot/orchestrator/sse-handlers' + shouldSkipToolCallEvent, + shouldSkipToolResultEvent, +} from '@/lib/copilot/orchestrator/sse-utils' import { parseSSEStream } from '@/lib/copilot/orchestrator/sse-parser' import { prepareExecutionContext } from '@/lib/copilot/orchestrator/tool-executor' import type { @@ -115,38 +110,9 @@ export async function orchestrateSubagentStream( const normalizedEvent = normalizeSseEvent(event) - // Skip tool_result events for tools the sim-side already executed. - // The sim-side emits its own tool_result with complete data. - // For server-side tools (not executed by sim), we still forward the Go backend's tool_result. - const toolCallId = getToolCallIdFromEvent(normalizedEvent) - const eventData = normalizedEvent.data - - const isPartialToolCall = - normalizedEvent.type === 'tool_call' && eventData?.partial === true - - const shouldSkipToolCall = - normalizedEvent.type === 'tool_call' && - !!toolCallId && - !isPartialToolCall && - (wasToolResultSeen(toolCallId) || wasToolCallSeen(toolCallId)) - - if ( - normalizedEvent.type === 'tool_call' && - toolCallId && - !isPartialToolCall && - !shouldSkipToolCall - ) { - markToolCallSeen(toolCallId) - } - - const shouldSkipToolResult = - normalizedEvent.type === 'tool_result' && - (() => { - if (!toolCallId) return false - if (wasToolResultSeen(toolCallId)) return true - markToolResultSeen(toolCallId) - return false - })() + // Skip duplicate tool events to prevent state regressions. + const shouldSkipToolCall = shouldSkipToolCallEvent(normalizedEvent) + const shouldSkipToolResult = shouldSkipToolResultEvent(normalizedEvent) if (!shouldSkipToolCall && !shouldSkipToolResult) { await forwardEvent(normalizedEvent, options) diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor.ts b/apps/sim/lib/copilot/orchestrator/tool-executor.ts index 8675263951..1c04181cd4 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor.ts @@ -1,17 +1,7 @@ import { db } from '@sim/db' -import { - account, - chat, - customTools, - permissions, - workflow, - workflowFolder, - workflowMcpServer, - workflowMcpTool, - workspace, -} from '@sim/db/schema' +import { workflow } from '@sim/db/schema' import { createLogger } from '@sim/logger' -import { and, asc, desc, eq, inArray, isNull, max, or } from 'drizzle-orm' +import { eq } from 'drizzle-orm' import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/copilot/constants' import type { ExecutionContext, @@ -19,34 +9,32 @@ import type { ToolCallState, } from '@/lib/copilot/orchestrator/types' import { routeExecution } from '@/lib/copilot/tools/server/router' -import { - extractWorkflowNames, - formatNormalizedWorkflowForCopilot, - normalizeWorkflowName, -} from '@/lib/copilot/tools/shared/workflow-utils' import { env } from '@/lib/core/config/env' -import { generateRequestId } from '@/lib/core/utils/request' import { getEffectiveDecryptedEnv } from '@/lib/environment/utils' -import { mcpService } from '@/lib/mcp/service' -import { sanitizeToolName } from '@/lib/mcp/workflow-tool-schema' -import { listWorkspaceFiles } from '@/lib/uploads/contexts/workspace' -import { getBlockOutputPaths } from '@/lib/workflows/blocks/block-outputs' -import { BlockPathCalculator } from '@/lib/workflows/blocks/block-path-calculator' -import { buildDefaultWorkflowArtifacts } from '@/lib/workflows/defaults' -import { executeWorkflow } from '@/lib/workflows/executor/execute-workflow' +import { executeIntegrationToolDirect } from '@/lib/copilot/orchestrator/tool-executor/integration-tools' import { - deployWorkflow, - loadWorkflowFromNormalizedTables, - saveWorkflowToNormalizedTables, - undeployWorkflow, -} from '@/lib/workflows/persistence/utils' -import { isInputDefinitionTrigger } from '@/lib/workflows/triggers/input-definition-triggers' -import { hasValidStartBlock } from '@/lib/workflows/triggers/trigger-utils.server' -import { refreshTokenIfNeeded } from '@/app/api/auth/oauth/utils' -import { checkChatAccess, checkWorkflowAccessForChatCreation } from '@/app/api/chat/utils' -import { normalizeName } from '@/executor/constants' -import { resolveEnvVarReferences } from '@/executor/utils/reference-validation' -import { executeTool } from '@/tools' + executeGetBlockOutputs, + executeGetBlockUpstreamReferences, + executeGetUserWorkflow, + executeGetWorkflowData, + executeGetWorkflowFromName, + executeListFolders, + executeListUserWorkflows, + executeListUserWorkspaces, + executeCreateWorkflow, + executeCreateFolder, + executeRunWorkflow, + executeSetGlobalWorkflowVariables, +} from '@/lib/copilot/orchestrator/tool-executor/workflow-tools' +import { + executeCheckDeploymentStatus, + executeCreateWorkspaceMcpServer, + executeDeployApi, + executeDeployChat, + executeDeployMcp, + executeListWorkspaceMcpServers, + executeRedeploy, +} from '@/lib/copilot/orchestrator/tool-executor/deployment-tools' import { getTool, resolveToolId } from '@/tools/utils' const logger = createLogger('CopilotToolExecutor') @@ -150,94 +138,6 @@ async function executeServerToolDirect( } } -/** - * Execute an integration tool directly via the tools registry. - */ -async function executeIntegrationToolDirect( - toolCall: ToolCallState, - toolConfig: any, - context: ExecutionContext -): Promise { - const { userId, workflowId } = context - const toolName = resolveToolId(toolCall.name) - const toolArgs = toolCall.params || {} - - let workspaceId = context.workspaceId - if (!workspaceId && workflowId) { - const workflowResult = await db - .select({ workspaceId: workflow.workspaceId }) - .from(workflow) - .where(eq(workflow.id, workflowId)) - .limit(1) - workspaceId = workflowResult[0]?.workspaceId ?? undefined - } - - const decryptedEnvVars = - context.decryptedEnvVars || (await getEffectiveDecryptedEnv(userId, workspaceId)) - - const executionParams: Record = resolveEnvVarReferences(toolArgs, decryptedEnvVars, { - deep: true, - }) as Record - - if (toolConfig.oauth?.required && toolConfig.oauth.provider) { - const provider = toolConfig.oauth.provider - const accounts = await db - .select() - .from(account) - .where(and(eq(account.providerId, provider), eq(account.userId, userId))) - .limit(1) - - if (!accounts.length) { - return { - success: false, - error: `No ${provider} account connected. Please connect your account first.`, - } - } - - const acc = accounts[0] - const requestId = generateRequestId() - const { accessToken } = await refreshTokenIfNeeded(requestId, acc as any, acc.id) - - if (!accessToken) { - return { - success: false, - error: `OAuth token not available for ${provider}. Please reconnect your account.`, - } - } - - executionParams.accessToken = accessToken - } - - if (toolConfig.params?.apiKey?.required && !executionParams.apiKey) { - return { - success: false, - error: `API key not provided for ${toolName}. Use {{YOUR_API_KEY_ENV_VAR}} to reference your environment variable.`, - } - } - - executionParams._context = { - workflowId, - userId, - } - - if (toolName === 'function_execute') { - executionParams.envVars = decryptedEnvVars - executionParams.workflowVariables = {} - executionParams.blockData = {} - executionParams.blockNameMapping = {} - executionParams.language = executionParams.language || 'javascript' - executionParams.timeout = executionParams.timeout || 30000 - } - - const result = await executeTool(toolName, executionParams) - - return { - success: result.success, - output: result.output, - error: result.error, - } -} - async function executeSimWorkflowTool( toolName: string, params: Record, @@ -287,1360 +187,6 @@ async function executeSimWorkflowTool( } } -async function ensureWorkflowAccess( - workflowId: string, - userId: string -): Promise<{ - workflow: typeof workflow.$inferSelect - workspaceId?: string | null -}> { - const [workflowRecord] = await db - .select() - .from(workflow) - .where(eq(workflow.id, workflowId)) - .limit(1) - if (!workflowRecord) { - throw new Error(`Workflow ${workflowId} not found`) - } - - if (workflowRecord.userId === userId) { - return { workflow: workflowRecord, workspaceId: workflowRecord.workspaceId } - } - - if (workflowRecord.workspaceId) { - const [permissionRow] = await db - .select({ permissionType: permissions.permissionType }) - .from(permissions) - .where( - and( - eq(permissions.entityType, 'workspace'), - eq(permissions.entityId, workflowRecord.workspaceId), - eq(permissions.userId, userId) - ) - ) - .limit(1) - if (permissionRow) { - return { workflow: workflowRecord, workspaceId: workflowRecord.workspaceId } - } - } - - throw new Error('Unauthorized workflow access') -} - -async function getDefaultWorkspaceId(userId: string): Promise { - const workspaces = await db - .select({ workspaceId: workspace.id }) - .from(permissions) - .innerJoin(workspace, eq(permissions.entityId, workspace.id)) - .where(and(eq(permissions.userId, userId), eq(permissions.entityType, 'workspace'))) - .orderBy(desc(workspace.createdAt)) - .limit(1) - - const workspaceId = workspaces[0]?.workspaceId - if (!workspaceId) { - throw new Error('No workspace found for user') - } - - return workspaceId -} - -async function ensureWorkspaceAccess( - workspaceId: string, - userId: string, - requireWrite: boolean -): Promise { - const [row] = await db - .select({ - permissionType: permissions.permissionType, - ownerId: workspace.ownerId, - }) - .from(permissions) - .innerJoin(workspace, eq(permissions.entityId, workspace.id)) - .where( - and( - eq(permissions.entityType, 'workspace'), - eq(permissions.entityId, workspaceId), - eq(permissions.userId, userId) - ) - ) - .limit(1) - - if (!row) { - throw new Error(`Workspace ${workspaceId} not found`) - } - - const isOwner = row.ownerId === userId - const permissionType = row.permissionType - const canWrite = isOwner || permissionType === 'admin' || permissionType === 'write' - - if (requireWrite && !canWrite) { - throw new Error('Write or admin access required for this workspace') - } - - if (!requireWrite && !canWrite && permissionType !== 'read') { - throw new Error('Access denied to workspace') - } -} - -async function getAccessibleWorkflowsForUser( - userId: string, - options?: { workspaceId?: string; folderId?: string } -) { - const workspaceIds = await db - .select({ entityId: permissions.entityId }) - .from(permissions) - .where(and(eq(permissions.userId, userId), eq(permissions.entityType, 'workspace'))) - - const workspaceIdList = workspaceIds.map((row) => row.entityId) - - const workflowConditions = [eq(workflow.userId, userId)] - if (workspaceIdList.length > 0) { - workflowConditions.push(inArray(workflow.workspaceId, workspaceIdList)) - } - if (options?.workspaceId) { - workflowConditions.push(eq(workflow.workspaceId, options.workspaceId)) - } - if (options?.folderId) { - workflowConditions.push(eq(workflow.folderId, options.folderId)) - } - - return db - .select() - .from(workflow) - .where(or(...workflowConditions)) - .orderBy(asc(workflow.sortOrder), asc(workflow.createdAt), asc(workflow.id)) -} - -async function executeGetUserWorkflow( - params: Record, - context: ExecutionContext -): Promise { - try { - const workflowId = params.workflowId || context.workflowId - if (!workflowId) { - return { success: false, error: 'workflowId is required' } - } - - const { workflow: workflowRecord, workspaceId } = await ensureWorkflowAccess( - workflowId, - context.userId - ) - - const normalized = await loadWorkflowFromNormalizedTables(workflowId) - const userWorkflow = formatNormalizedWorkflowForCopilot(normalized) - if (!userWorkflow) { - return { success: false, error: 'Workflow has no normalized data' } - } - - // Return workflow ID so copilot can use it for subsequent tool calls - return { - success: true, - output: { - workflowId, - workflowName: workflowRecord.name || '', - workspaceId, - userWorkflow, - }, - } - } catch (error) { - return { success: false, error: error instanceof Error ? error.message : String(error) } - } -} - -async function executeGetWorkflowFromName( - params: Record, - context: ExecutionContext -): Promise { - try { - const workflowName = typeof params.workflow_name === 'string' ? params.workflow_name.trim() : '' - if (!workflowName) { - return { success: false, error: 'workflow_name is required' } - } - - const workflows = await getAccessibleWorkflowsForUser(context.userId) - - const targetName = normalizeWorkflowName(workflowName) - const match = workflows.find((w) => normalizeWorkflowName(w.name) === targetName) - if (!match) { - return { success: false, error: `Workflow not found: ${workflowName}` } - } - - const normalized = await loadWorkflowFromNormalizedTables(match.id) - const userWorkflow = formatNormalizedWorkflowForCopilot(normalized) - if (!userWorkflow) { - return { success: false, error: 'Workflow has no normalized data' } - } - - // Return workflow ID and workspaceId so copilot can use them for subsequent tool calls - return { - success: true, - output: { - workflowId: match.id, - workflowName: match.name || '', - workspaceId: match.workspaceId, - userWorkflow, - }, - } - } catch (error) { - return { success: false, error: error instanceof Error ? error.message : String(error) } - } -} - -async function executeListUserWorkflows( - params: Record, - context: ExecutionContext -): Promise { - try { - const workspaceId = params?.workspaceId as string | undefined - const folderId = params?.folderId as string | undefined - - const workflows = await getAccessibleWorkflowsForUser(context.userId, { workspaceId, folderId }) - - // Return both names (for backward compatibility) and full workflow info with IDs - const names = extractWorkflowNames(workflows) - - const workflowList = workflows.map((w) => ({ - workflowId: w.id, - workflowName: w.name || '', - workspaceId: w.workspaceId, - folderId: w.folderId, - })) - - return { success: true, output: { workflow_names: names, workflows: workflowList } } - } catch (error) { - return { success: false, error: error instanceof Error ? error.message : String(error) } - } -} - -async function executeListUserWorkspaces(context: ExecutionContext): Promise { - try { - const workspaces = await db - .select({ - workspaceId: workspace.id, - workspaceName: workspace.name, - ownerId: workspace.ownerId, - permissionType: permissions.permissionType, - }) - .from(permissions) - .innerJoin(workspace, eq(permissions.entityId, workspace.id)) - .where(and(eq(permissions.userId, context.userId), eq(permissions.entityType, 'workspace'))) - .orderBy(desc(workspace.createdAt)) - - const output = workspaces.map((row) => ({ - workspaceId: row.workspaceId, - workspaceName: row.workspaceName, - role: row.ownerId === context.userId ? 'owner' : row.permissionType, - })) - - return { success: true, output: { workspaces: output } } - } catch (error) { - return { success: false, error: error instanceof Error ? error.message : String(error) } - } -} - -async function executeListFolders( - params: Record, - context: ExecutionContext -): Promise { - try { - const workspaceId = - (params?.workspaceId as string | undefined) || (await getDefaultWorkspaceId(context.userId)) - - await ensureWorkspaceAccess(workspaceId, context.userId, false) - - const folders = await db - .select({ - folderId: workflowFolder.id, - folderName: workflowFolder.name, - parentId: workflowFolder.parentId, - sortOrder: workflowFolder.sortOrder, - }) - .from(workflowFolder) - .where(eq(workflowFolder.workspaceId, workspaceId)) - .orderBy(asc(workflowFolder.sortOrder), asc(workflowFolder.createdAt)) - - return { - success: true, - output: { - workspaceId, - folders, - }, - } - } catch (error) { - return { success: false, error: error instanceof Error ? error.message : String(error) } - } -} - -async function executeCreateWorkflow( - params: Record, - context: ExecutionContext -): Promise { - try { - const name = typeof params?.name === 'string' ? params.name.trim() : '' - if (!name) { - return { success: false, error: 'name is required' } - } - - const workspaceId = params?.workspaceId || (await getDefaultWorkspaceId(context.userId)) - const folderId = params?.folderId || null - const description = typeof params?.description === 'string' ? params.description : null - - await ensureWorkspaceAccess(workspaceId, context.userId, true) - - const workflowId = crypto.randomUUID() - const now = new Date() - - const folderCondition = folderId ? eq(workflow.folderId, folderId) : isNull(workflow.folderId) - const [maxResult] = await db - .select({ maxOrder: max(workflow.sortOrder) }) - .from(workflow) - .where(and(eq(workflow.workspaceId, workspaceId), folderCondition)) - const sortOrder = (maxResult?.maxOrder ?? 0) + 1 - - await db.insert(workflow).values({ - id: workflowId, - userId: context.userId, - workspaceId, - folderId, - sortOrder, - name, - description, - color: '#3972F6', - lastSynced: now, - createdAt: now, - updatedAt: now, - isDeployed: false, - runCount: 0, - variables: {}, - }) - - const { workflowState } = buildDefaultWorkflowArtifacts() - const saveResult = await saveWorkflowToNormalizedTables(workflowId, workflowState) - if (!saveResult.success) { - throw new Error(saveResult.error || 'Failed to save workflow state') - } - - return { - success: true, - output: { - workflowId, - workflowName: name, - workspaceId, - folderId, - }, - } - } catch (error) { - return { success: false, error: error instanceof Error ? error.message : String(error) } - } -} - -async function executeCreateFolder( - params: Record, - context: ExecutionContext -): Promise { - try { - const name = typeof params?.name === 'string' ? params.name.trim() : '' - if (!name) { - return { success: false, error: 'name is required' } - } - - const workspaceId = params?.workspaceId || (await getDefaultWorkspaceId(context.userId)) - const parentId = params?.parentId || null - - await ensureWorkspaceAccess(workspaceId, context.userId, true) - - const [maxOrder] = await db - .select({ maxOrder: max(workflowFolder.sortOrder) }) - .from(workflowFolder) - .where( - and( - eq(workflowFolder.workspaceId, workspaceId), - parentId ? eq(workflowFolder.parentId, parentId) : isNull(workflowFolder.parentId) - ) - ) - .limit(1) - - const sortOrder = (maxOrder?.maxOrder ?? 0) + 1 - const folderId = crypto.randomUUID() - - await db.insert(workflowFolder).values({ - id: folderId, - name, - userId: context.userId, - workspaceId, - parentId, - color: '#6B7280', - sortOrder, - }) - - return { - success: true, - output: { - folderId, - folderName: name, - workspaceId, - parentId, - sortOrder, - }, - } - } catch (error) { - return { success: false, error: error instanceof Error ? error.message : String(error) } - } -} - -async function executeGetWorkflowData( - params: Record, - context: ExecutionContext -): Promise { - try { - const dataType = params.data_type - if (!dataType) { - return { success: false, error: 'data_type is required' } - } - const workflowId = params.workflowId || context.workflowId - if (!workflowId) { - return { success: false, error: 'workflowId is required' } - } - - const { workflow: workflowRecord, workspaceId } = await ensureWorkflowAccess( - workflowId, - context.userId - ) - - if (dataType === 'global_variables') { - const variablesRecord = (workflowRecord.variables as Record) || {} - const variables = Object.values(variablesRecord).map((v: any) => ({ - id: String(v?.id || ''), - name: String(v?.name || ''), - value: v?.value, - })) - return { success: true, output: { variables } } - } - - if (dataType === 'custom_tools') { - if (!workspaceId) { - return { success: false, error: 'workspaceId is required' } - } - const conditions = [ - eq(customTools.workspaceId, workspaceId), - and(eq(customTools.userId, context.userId), isNull(customTools.workspaceId)), - ] - const toolsRows = await db - .select() - .from(customTools) - .where(or(...conditions)) - .orderBy(desc(customTools.createdAt)) - - const customToolsData = toolsRows.map((tool) => ({ - id: String(tool.id || ''), - title: String(tool.title || ''), - functionName: String((tool.schema as any)?.function?.name || ''), - description: String((tool.schema as any)?.function?.description || ''), - parameters: (tool.schema as any)?.function?.parameters, - })) - - return { success: true, output: { customTools: customToolsData } } - } - - if (dataType === 'mcp_tools') { - if (!workspaceId) { - return { success: false, error: 'workspaceId is required' } - } - const tools = await mcpService.discoverTools(context.userId, workspaceId, false) - const mcpTools = tools.map((tool) => ({ - name: String(tool.name || ''), - serverId: String(tool.serverId || ''), - serverName: String(tool.serverName || ''), - description: String(tool.description || ''), - inputSchema: tool.inputSchema, - })) - return { success: true, output: { mcpTools } } - } - - if (dataType === 'files') { - if (!workspaceId) { - return { success: false, error: 'workspaceId is required' } - } - const files = await listWorkspaceFiles(workspaceId) - const fileResults = files.map((file) => ({ - id: String(file.id || ''), - name: String(file.name || ''), - key: String(file.key || ''), - path: String(file.path || ''), - size: Number(file.size || 0), - type: String(file.type || ''), - uploadedAt: String(file.uploadedAt || ''), - })) - return { success: true, output: { files: fileResults } } - } - - return { success: false, error: `Unknown data_type: ${dataType}` } - } catch (error) { - return { success: false, error: error instanceof Error ? error.message : String(error) } - } -} - -async function executeGetBlockOutputs( - params: Record, - context: ExecutionContext -): Promise { - try { - const workflowId = params.workflowId || context.workflowId - if (!workflowId) { - return { success: false, error: 'workflowId is required' } - } - await ensureWorkflowAccess(workflowId, context.userId) - - const normalized = await loadWorkflowFromNormalizedTables(workflowId) - if (!normalized) { - return { success: false, error: 'Workflow has no normalized data' } - } - - const blocks = normalized.blocks || {} - const loops = normalized.loops || {} - const parallels = normalized.parallels || {} - const blockIds = - Array.isArray(params.blockIds) && params.blockIds.length > 0 - ? params.blockIds - : Object.keys(blocks) - - const results: Array<{ - blockId: string - blockName: string - blockType: string - outputs: string[] - insideSubflowOutputs?: string[] - outsideSubflowOutputs?: string[] - triggerMode?: boolean - }> = [] - - for (const blockId of blockIds) { - const block = blocks[blockId] - if (!block?.type) continue - const blockName = block.name || block.type - - if (block.type === 'loop' || block.type === 'parallel') { - const insidePaths = getSubflowInsidePaths(block.type, blockId, loops, parallels) - results.push({ - blockId, - blockName, - blockType: block.type, - outputs: [], - insideSubflowOutputs: formatOutputsWithPrefix(insidePaths, blockName), - outsideSubflowOutputs: formatOutputsWithPrefix(['results'], blockName), - triggerMode: block.triggerMode, - }) - continue - } - - const outputs = getBlockOutputPaths(block.type, block.subBlocks, block.triggerMode) - results.push({ - blockId, - blockName, - blockType: block.type, - outputs: formatOutputsWithPrefix(outputs, blockName), - triggerMode: block.triggerMode, - }) - } - - const variables = await getWorkflowVariablesForTool(workflowId) - - const payload = { blocks: results, variables } - return { success: true, output: payload } - } catch (error) { - return { success: false, error: error instanceof Error ? error.message : String(error) } - } -} - -async function executeGetBlockUpstreamReferences( - params: Record, - context: ExecutionContext -): Promise { - try { - const workflowId = params.workflowId || context.workflowId - if (!workflowId) { - return { success: false, error: 'workflowId is required' } - } - if (!Array.isArray(params.blockIds) || params.blockIds.length === 0) { - return { success: false, error: 'blockIds array is required' } - } - - await ensureWorkflowAccess(workflowId, context.userId) - const normalized = await loadWorkflowFromNormalizedTables(workflowId) - if (!normalized) { - return { success: false, error: 'Workflow has no normalized data' } - } - - const blocks = normalized.blocks || {} - const edges = normalized.edges || [] - const loops = normalized.loops || {} - const parallels = normalized.parallels || {} - - const graphEdges = edges.map((edge: any) => ({ source: edge.source, target: edge.target })) - const variableOutputs = await getWorkflowVariablesForTool(workflowId) - - const results: any[] = [] - - for (const blockId of params.blockIds) { - const targetBlock = blocks[blockId] - if (!targetBlock) continue - - const insideSubflows: Array<{ blockId: string; blockName: string; blockType: string }> = [] - const containingLoopIds = new Set() - const containingParallelIds = new Set() - - Object.values(loops as Record).forEach((loop) => { - if (loop?.nodes?.includes(blockId)) { - containingLoopIds.add(loop.id) - const loopBlock = blocks[loop.id] - if (loopBlock) { - insideSubflows.push({ - blockId: loop.id, - blockName: loopBlock.name || loopBlock.type, - blockType: 'loop', - }) - } - } - }) - - Object.values(parallels as Record).forEach((parallel) => { - if (parallel?.nodes?.includes(blockId)) { - containingParallelIds.add(parallel.id) - const parallelBlock = blocks[parallel.id] - if (parallelBlock) { - insideSubflows.push({ - blockId: parallel.id, - blockName: parallelBlock.name || parallelBlock.type, - blockType: 'parallel', - }) - } - } - }) - - const ancestorIds = BlockPathCalculator.findAllPathNodes(graphEdges, blockId) - const accessibleIds = new Set(ancestorIds) - accessibleIds.add(blockId) - - const starterBlock = Object.values(blocks).find((b: any) => isInputDefinitionTrigger(b.type)) - if (starterBlock && ancestorIds.includes((starterBlock as any).id)) { - accessibleIds.add((starterBlock as any).id) - } - - containingLoopIds.forEach((loopId) => { - accessibleIds.add(loopId) - loops[loopId]?.nodes?.forEach((nodeId: string) => accessibleIds.add(nodeId)) - }) - - containingParallelIds.forEach((parallelId) => { - accessibleIds.add(parallelId) - parallels[parallelId]?.nodes?.forEach((nodeId: string) => accessibleIds.add(nodeId)) - }) - - const accessibleBlocks: any[] = [] - - for (const accessibleBlockId of accessibleIds) { - const block = blocks[accessibleBlockId] - if (!block?.type) continue - const canSelfReference = block.type === 'approval' || block.type === 'human_in_the_loop' - if (accessibleBlockId === blockId && !canSelfReference) continue - - const blockName = block.name || block.type - let accessContext: 'inside' | 'outside' | undefined - let outputPaths: string[] - - if (block.type === 'loop' || block.type === 'parallel') { - const isInside = - (block.type === 'loop' && containingLoopIds.has(accessibleBlockId)) || - (block.type === 'parallel' && containingParallelIds.has(accessibleBlockId)) - accessContext = isInside ? 'inside' : 'outside' - outputPaths = isInside - ? getSubflowInsidePaths(block.type, accessibleBlockId, loops, parallels) - : ['results'] - } else { - outputPaths = getBlockOutputPaths(block.type, block.subBlocks, block.triggerMode) - } - - const formattedOutputs = formatOutputsWithPrefix(outputPaths, blockName) - const entry: any = { - blockId: accessibleBlockId, - blockName, - blockType: block.type, - outputs: formattedOutputs, - } - if (block.triggerMode) entry.triggerMode = true - if (accessContext) entry.accessContext = accessContext - accessibleBlocks.push(entry) - } - - results.push({ - blockId, - blockName: targetBlock.name || targetBlock.type, - blockType: targetBlock.type, - accessibleBlocks, - insideSubflows, - variables: variableOutputs, - }) - } - - const payload = { results } - return { success: true, output: payload } - } catch (error) { - return { success: false, error: error instanceof Error ? error.message : String(error) } - } -} - -async function executeRunWorkflow( - params: Record, - context: ExecutionContext -): Promise { - try { - const workflowId = params.workflowId || context.workflowId - if (!workflowId) { - return { success: false, error: 'workflowId is required' } - } - - const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) - - const result = await executeWorkflow( - { - id: workflowRecord.id, - userId: workflowRecord.userId, - workspaceId: workflowRecord.workspaceId, - variables: workflowRecord.variables || {}, - }, - generateRequestId(), - params.workflow_input || params.input || undefined, - context.userId - ) - - return { - success: result.success, - output: { - executionId: result.executionId, - success: result.success, - output: result.output, - logs: result.logs, - }, - error: result.success ? undefined : result.error || 'Workflow execution failed', - } - } catch (error) { - return { success: false, error: error instanceof Error ? error.message : String(error) } - } -} - -async function executeSetGlobalWorkflowVariables( - params: Record, - context: ExecutionContext -): Promise { - try { - const workflowId = params.workflowId || context.workflowId - if (!workflowId) { - return { success: false, error: 'workflowId is required' } - } - const operations = Array.isArray(params.operations) ? params.operations : [] - const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) - - const currentVarsRecord = (workflowRecord.variables as Record) || {} - const byName: Record = {} - Object.values(currentVarsRecord).forEach((v: any) => { - if (v && typeof v === 'object' && v.id && v.name) byName[String(v.name)] = v - }) - - for (const op of operations) { - const key = String(op?.name || '') - if (!key) continue - const nextType = op?.type || byName[key]?.type || 'plain' - const coerceValue = (value: any, type: string) => { - if (value === undefined) return value - if (type === 'number') { - const n = Number(value) - return Number.isNaN(n) ? value : n - } - if (type === 'boolean') { - const v = String(value).trim().toLowerCase() - if (v === 'true') return true - if (v === 'false') return false - return value - } - if (type === 'array' || type === 'object') { - try { - const parsed = JSON.parse(String(value)) - if (type === 'array' && Array.isArray(parsed)) return parsed - if (type === 'object' && parsed && typeof parsed === 'object' && !Array.isArray(parsed)) - return parsed - } catch {} - return value - } - return value - } - - if (op.operation === 'delete') { - delete byName[key] - continue - } - const typedValue = coerceValue(op.value, nextType) - if (op.operation === 'add') { - byName[key] = { - id: crypto.randomUUID(), - workflowId, - name: key, - type: nextType, - value: typedValue, - } - continue - } - if (op.operation === 'edit') { - if (!byName[key]) { - byName[key] = { - id: crypto.randomUUID(), - workflowId, - name: key, - type: nextType, - value: typedValue, - } - } else { - byName[key] = { - ...byName[key], - type: nextType, - value: typedValue, - } - } - } - } - - const nextVarsRecord = Object.fromEntries( - Object.values(byName).map((v: any) => [String(v.id), v]) - ) - - await db - .update(workflow) - .set({ variables: nextVarsRecord, updatedAt: new Date() }) - .where(eq(workflow.id, workflowId)) - - return { success: true, output: { updated: Object.values(byName).length } } - } catch (error) { - return { success: false, error: error instanceof Error ? error.message : String(error) } - } -} - -async function executeDeployApi( - params: Record, - context: ExecutionContext -): Promise { - try { - const workflowId = params.workflowId || context.workflowId - if (!workflowId) { - return { success: false, error: 'workflowId is required' } - } - const action = params.action === 'undeploy' ? 'undeploy' : 'deploy' - const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) - - if (action === 'undeploy') { - const result = await undeployWorkflow({ workflowId }) - if (!result.success) { - return { success: false, error: result.error || 'Failed to undeploy workflow' } - } - return { success: true, output: { workflowId, isDeployed: false } } - } - - const result = await deployWorkflow({ - workflowId, - deployedBy: context.userId, - workflowName: workflowRecord.name || undefined, - }) - if (!result.success) { - return { success: false, error: result.error || 'Failed to deploy workflow' } - } - - return { - success: true, - output: { - workflowId, - isDeployed: true, - deployedAt: result.deployedAt, - version: result.version, - }, - } - } catch (error) { - return { success: false, error: error instanceof Error ? error.message : String(error) } - } -} - -async function executeDeployChat( - params: Record, - context: ExecutionContext -): Promise { - try { - const workflowId = params.workflowId || context.workflowId - if (!workflowId) { - return { success: false, error: 'workflowId is required' } - } - - const action = params.action === 'undeploy' ? 'undeploy' : 'deploy' - if (action === 'undeploy') { - const existing = await db.select().from(chat).where(eq(chat.workflowId, workflowId)).limit(1) - if (!existing.length) { - return { success: false, error: 'No active chat deployment found for this workflow' } - } - const { hasAccess } = await checkChatAccess(existing[0].id, context.userId) - if (!hasAccess) { - return { success: false, error: 'Unauthorized chat access' } - } - await db.delete(chat).where(eq(chat.id, existing[0].id)) - return { success: true, output: { success: true, action: 'undeploy', isDeployed: false } } - } - - const { hasAccess } = await checkWorkflowAccessForChatCreation(workflowId, context.userId) - if (!hasAccess) { - return { success: false, error: 'Workflow not found or access denied' } - } - - const existing = await db.select().from(chat).where(eq(chat.workflowId, workflowId)).limit(1) - const existingDeployment = existing[0] || null - - const identifier = String(params.identifier || existingDeployment?.identifier || '').trim() - const title = String(params.title || existingDeployment?.title || '').trim() - if (!identifier || !title) { - return { success: false, error: 'Chat identifier and title are required' } - } - - const identifierPattern = /^[a-z0-9-]+$/ - if (!identifierPattern.test(identifier)) { - return { - success: false, - error: 'Identifier can only contain lowercase letters, numbers, and hyphens', - } - } - - const existingIdentifier = await db - .select() - .from(chat) - .where(eq(chat.identifier, identifier)) - .limit(1) - if (existingIdentifier.length > 0 && existingIdentifier[0].id !== existingDeployment?.id) { - return { success: false, error: 'Identifier already in use' } - } - - const deployResult = await deployWorkflow({ - workflowId, - deployedBy: context.userId, - }) - if (!deployResult.success) { - return { success: false, error: deployResult.error || 'Failed to deploy workflow' } - } - - const payload = { - workflowId, - identifier, - title, - description: String(params.description || existingDeployment?.description || ''), - customizations: { - primaryColor: - params.customizations?.primaryColor || - existingDeployment?.customizations?.primaryColor || - 'var(--brand-primary-hover-hex)', - welcomeMessage: - params.customizations?.welcomeMessage || - existingDeployment?.customizations?.welcomeMessage || - 'Hi there! How can I help you today?', - }, - authType: params.authType || existingDeployment?.authType || 'public', - password: params.password, - allowedEmails: params.allowedEmails || existingDeployment?.allowedEmails || [], - outputConfigs: params.outputConfigs || existingDeployment?.outputConfigs || [], - } - - if (existingDeployment) { - await db - .update(chat) - .set({ - identifier: payload.identifier, - title: payload.title, - description: payload.description, - customizations: payload.customizations, - authType: payload.authType, - password: payload.password || existingDeployment.password, - allowedEmails: - payload.authType === 'email' || payload.authType === 'sso' ? payload.allowedEmails : [], - outputConfigs: payload.outputConfigs, - updatedAt: new Date(), - }) - .where(eq(chat.id, existingDeployment.id)) - } else { - await db.insert(chat).values({ - id: crypto.randomUUID(), - workflowId, - userId: context.userId, - identifier: payload.identifier, - title: payload.title, - description: payload.description, - customizations: payload.customizations, - isActive: true, - authType: payload.authType, - password: payload.password || null, - allowedEmails: - payload.authType === 'email' || payload.authType === 'sso' ? payload.allowedEmails : [], - outputConfigs: payload.outputConfigs, - createdAt: new Date(), - updatedAt: new Date(), - }) - } - - return { - success: true, - output: { success: true, action: 'deploy', isDeployed: true, identifier }, - } - } catch (error) { - return { success: false, error: error instanceof Error ? error.message : String(error) } - } -} - -async function executeDeployMcp( - params: Record, - context: ExecutionContext -): Promise { - try { - const workflowId = params.workflowId || context.workflowId - if (!workflowId) { - return { success: false, error: 'workflowId is required' } - } - - const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) - const workspaceId = workflowRecord.workspaceId - if (!workspaceId) { - return { success: false, error: 'workspaceId is required' } - } - - if (!workflowRecord.isDeployed) { - return { - success: false, - error: 'Workflow must be deployed before adding as an MCP tool. Use deploy_api first.', - } - } - - const serverId = params.serverId - if (!serverId) { - return { - success: false, - error: 'serverId is required. Use list_workspace_mcp_servers to get available servers.', - } - } - - const existingTool = await db - .select() - .from(workflowMcpTool) - .where( - and(eq(workflowMcpTool.serverId, serverId), eq(workflowMcpTool.workflowId, workflowId)) - ) - .limit(1) - - const toolName = sanitizeToolName( - params.toolName || workflowRecord.name || `workflow_${workflowId}` - ) - const toolDescription = - params.toolDescription || - workflowRecord.description || - `Execute ${workflowRecord.name} workflow` - const parameterSchema = params.parameterSchema || {} - - if (existingTool.length > 0) { - const toolId = existingTool[0].id - await db - .update(workflowMcpTool) - .set({ - toolName, - toolDescription, - parameterSchema, - updatedAt: new Date(), - }) - .where(eq(workflowMcpTool.id, toolId)) - return { success: true, output: { toolId, toolName, toolDescription, updated: true } } - } - - const toolId = crypto.randomUUID() - await db.insert(workflowMcpTool).values({ - id: toolId, - serverId, - workflowId, - toolName, - toolDescription, - parameterSchema, - createdAt: new Date(), - updatedAt: new Date(), - }) - - return { success: true, output: { toolId, toolName, toolDescription, updated: false } } - } catch (error) { - return { success: false, error: error instanceof Error ? error.message : String(error) } - } -} - -async function executeRedeploy(context: ExecutionContext): Promise { - try { - const workflowId = context.workflowId - if (!workflowId) { - return { success: false, error: 'workflowId is required' } - } - await ensureWorkflowAccess(workflowId, context.userId) - - const result = await deployWorkflow({ workflowId, deployedBy: context.userId }) - if (!result.success) { - return { success: false, error: result.error || 'Failed to redeploy workflow' } - } - return { - success: true, - output: { workflowId, deployedAt: result.deployedAt || null, version: result.version }, - } - } catch (error) { - return { success: false, error: error instanceof Error ? error.message : String(error) } - } -} - -async function executeCheckDeploymentStatus( - params: Record, - context: ExecutionContext -): Promise { - try { - const workflowId = params.workflowId || context.workflowId - if (!workflowId) { - return { success: false, error: 'workflowId is required' } - } - const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) - const workspaceId = workflowRecord.workspaceId - - const [apiDeploy, chatDeploy] = await Promise.all([ - db.select().from(workflow).where(eq(workflow.id, workflowId)).limit(1), - db.select().from(chat).where(eq(chat.workflowId, workflowId)).limit(1), - ]) - - const isApiDeployed = apiDeploy[0]?.isDeployed || false - const apiDetails = { - isDeployed: isApiDeployed, - deployedAt: apiDeploy[0]?.deployedAt || null, - endpoint: isApiDeployed ? `/api/workflows/${workflowId}/execute` : null, - apiKey: workflowRecord.workspaceId ? 'Workspace API keys' : 'Personal API keys', - needsRedeployment: false, - } - - const isChatDeployed = !!chatDeploy[0] - const chatDetails = { - isDeployed: isChatDeployed, - chatId: chatDeploy[0]?.id || null, - identifier: chatDeploy[0]?.identifier || null, - chatUrl: isChatDeployed ? `/chat/${chatDeploy[0]?.identifier}` : null, - title: chatDeploy[0]?.title || null, - description: chatDeploy[0]?.description || null, - authType: chatDeploy[0]?.authType || null, - allowedEmails: chatDeploy[0]?.allowedEmails || null, - outputConfigs: chatDeploy[0]?.outputConfigs || null, - welcomeMessage: chatDeploy[0]?.customizations?.welcomeMessage || null, - primaryColor: chatDeploy[0]?.customizations?.primaryColor || null, - hasPassword: Boolean(chatDeploy[0]?.password), - } - - const mcpDetails = { isDeployed: false, servers: [] as any[] } - if (workspaceId) { - const servers = await db - .select({ - serverId: workflowMcpServer.id, - serverName: workflowMcpServer.name, - toolName: workflowMcpTool.toolName, - toolDescription: workflowMcpTool.toolDescription, - parameterSchema: workflowMcpTool.parameterSchema, - toolId: workflowMcpTool.id, - }) - .from(workflowMcpTool) - .innerJoin(workflowMcpServer, eq(workflowMcpTool.serverId, workflowMcpServer.id)) - .where(eq(workflowMcpTool.workflowId, workflowId)) - - if (servers.length > 0) { - mcpDetails.isDeployed = true - mcpDetails.servers = servers - } - } - - const isDeployed = apiDetails.isDeployed || chatDetails.isDeployed || mcpDetails.isDeployed - return { - success: true, - output: { isDeployed, api: apiDetails, chat: chatDetails, mcp: mcpDetails }, - } - } catch (error) { - return { success: false, error: error instanceof Error ? error.message : String(error) } - } -} - -async function executeListWorkspaceMcpServers( - params: Record, - context: ExecutionContext -): Promise { - try { - const workflowId = params.workflowId || context.workflowId - if (!workflowId) { - return { success: false, error: 'workflowId is required' } - } - const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) - const workspaceId = workflowRecord.workspaceId - if (!workspaceId) { - return { success: false, error: 'workspaceId is required' } - } - - const servers = await db - .select({ - id: workflowMcpServer.id, - name: workflowMcpServer.name, - description: workflowMcpServer.description, - }) - .from(workflowMcpServer) - .where(eq(workflowMcpServer.workspaceId, workspaceId)) - - const serverIds = servers.map((server) => server.id) - const tools = - serverIds.length > 0 - ? await db - .select({ - serverId: workflowMcpTool.serverId, - toolName: workflowMcpTool.toolName, - }) - .from(workflowMcpTool) - .where(inArray(workflowMcpTool.serverId, serverIds)) - : [] - - const toolNamesByServer: Record = {} - for (const tool of tools) { - if (!toolNamesByServer[tool.serverId]) { - toolNamesByServer[tool.serverId] = [] - } - toolNamesByServer[tool.serverId].push(tool.toolName) - } - - const serversWithToolNames = servers.map((server) => ({ - ...server, - toolCount: toolNamesByServer[server.id]?.length || 0, - toolNames: toolNamesByServer[server.id] || [], - })) - - return { success: true, output: { servers: serversWithToolNames, count: servers.length } } - } catch (error) { - return { success: false, error: error instanceof Error ? error.message : String(error) } - } -} - -async function executeCreateWorkspaceMcpServer( - params: Record, - context: ExecutionContext -): Promise { - try { - const workflowId = params.workflowId || context.workflowId - if (!workflowId) { - return { success: false, error: 'workflowId is required' } - } - const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) - const workspaceId = workflowRecord.workspaceId - if (!workspaceId) { - return { success: false, error: 'workspaceId is required' } - } - - const name = params.name?.trim() - if (!name) { - return { success: false, error: 'name is required' } - } - - const serverId = crypto.randomUUID() - const [server] = await db - .insert(workflowMcpServer) - .values({ - id: serverId, - workspaceId, - createdBy: context.userId, - name, - description: params.description?.trim() || null, - isPublic: params.isPublic ?? false, - createdAt: new Date(), - updatedAt: new Date(), - }) - .returning() - - const workflowIds: string[] = params.workflowIds || [] - const addedTools: Array<{ workflowId: string; toolName: string }> = [] - - if (workflowIds.length > 0) { - const workflows = await db.select().from(workflow).where(inArray(workflow.id, workflowIds)) - - for (const wf of workflows) { - if (wf.workspaceId !== workspaceId || !wf.isDeployed) { - continue - } - const hasStartBlock = await hasValidStartBlock(wf.id) - if (!hasStartBlock) { - continue - } - const toolName = sanitizeToolName(wf.name || `workflow_${wf.id}`) - await db.insert(workflowMcpTool).values({ - id: crypto.randomUUID(), - serverId, - workflowId: wf.id, - toolName, - toolDescription: wf.description || `Execute ${wf.name} workflow`, - parameterSchema: {}, - createdAt: new Date(), - updatedAt: new Date(), - }) - addedTools.push({ workflowId: wf.id, toolName }) - } - } - - return { success: true, output: { server, addedTools } } - } catch (error) { - return { success: false, error: error instanceof Error ? error.message : String(error) } - } -} - -async function getWorkflowVariablesForTool( - workflowId: string -): Promise> { - const [workflowRecord] = await db - .select({ variables: workflow.variables }) - .from(workflow) - .where(eq(workflow.id, workflowId)) - .limit(1) - - const variablesRecord = (workflowRecord?.variables as Record) || {} - return Object.values(variablesRecord) - .filter((v: any) => v?.name && String(v.name).trim() !== '') - .map((v: any) => ({ - id: String(v.id || ''), - name: String(v.name || ''), - type: String(v.type || 'plain'), - tag: `variable.${normalizeName(String(v.name || ''))}`, - })) -} - -function getSubflowInsidePaths( - blockType: 'loop' | 'parallel', - blockId: string, - loops: Record, - parallels: Record -): string[] { - const paths = ['index'] - if (blockType === 'loop') { - const loopType = loops[blockId]?.loopType || 'for' - if (loopType === 'forEach') { - paths.push('currentItem', 'items') - } - } else { - const parallelType = parallels[blockId]?.parallelType || 'count' - if (parallelType === 'collection') { - paths.push('currentItem', 'items') - } - } - return paths -} - -function formatOutputsWithPrefix(paths: string[], blockName: string): string[] { - const normalizedName = normalizeName(blockName) - return paths.map((path) => `${normalizedName}.${path}`) -} - /** * Notify the copilot backend that a tool has completed. */ diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/access.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/access.ts new file mode 100644 index 0000000000..0f3f32492d --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/access.ts @@ -0,0 +1,130 @@ +import { db } from '@sim/db' +import { permissions, workflow, workspace } from '@sim/db/schema' +import { and, asc, desc, eq, inArray, or } from 'drizzle-orm' + +type WorkflowRecord = typeof workflow.$inferSelect + +export async function ensureWorkflowAccess( + workflowId: string, + userId: string +): Promise<{ + workflow: WorkflowRecord + workspaceId?: string | null +}> { + const [workflowRecord] = await db + .select() + .from(workflow) + .where(eq(workflow.id, workflowId)) + .limit(1) + if (!workflowRecord) { + throw new Error(`Workflow ${workflowId} not found`) + } + + if (workflowRecord.userId === userId) { + return { workflow: workflowRecord, workspaceId: workflowRecord.workspaceId } + } + + if (workflowRecord.workspaceId) { + const [permissionRow] = await db + .select({ permissionType: permissions.permissionType }) + .from(permissions) + .where( + and( + eq(permissions.entityType, 'workspace'), + eq(permissions.entityId, workflowRecord.workspaceId), + eq(permissions.userId, userId) + ) + ) + .limit(1) + if (permissionRow) { + return { workflow: workflowRecord, workspaceId: workflowRecord.workspaceId } + } + } + + throw new Error('Unauthorized workflow access') +} + +export async function getDefaultWorkspaceId(userId: string): Promise { + const workspaces = await db + .select({ workspaceId: workspace.id }) + .from(permissions) + .innerJoin(workspace, eq(permissions.entityId, workspace.id)) + .where(and(eq(permissions.userId, userId), eq(permissions.entityType, 'workspace'))) + .orderBy(desc(workspace.createdAt)) + .limit(1) + + const workspaceId = workspaces[0]?.workspaceId + if (!workspaceId) { + throw new Error('No workspace found for user') + } + + return workspaceId +} + +export async function ensureWorkspaceAccess( + workspaceId: string, + userId: string, + requireWrite: boolean +): Promise { + const [row] = await db + .select({ + permissionType: permissions.permissionType, + ownerId: workspace.ownerId, + }) + .from(permissions) + .innerJoin(workspace, eq(permissions.entityId, workspace.id)) + .where( + and( + eq(permissions.entityType, 'workspace'), + eq(permissions.entityId, workspaceId), + eq(permissions.userId, userId) + ) + ) + .limit(1) + + if (!row) { + throw new Error(`Workspace ${workspaceId} not found`) + } + + const isOwner = row.ownerId === userId + const permissionType = row.permissionType + const canWrite = isOwner || permissionType === 'admin' || permissionType === 'write' + + if (requireWrite && !canWrite) { + throw new Error('Write or admin access required for this workspace') + } + + if (!requireWrite && !canWrite && permissionType !== 'read') { + throw new Error('Access denied to workspace') + } +} + +export async function getAccessibleWorkflowsForUser( + userId: string, + options?: { workspaceId?: string; folderId?: string } +) { + const workspaceIds = await db + .select({ entityId: permissions.entityId }) + .from(permissions) + .where(and(eq(permissions.userId, userId), eq(permissions.entityType, 'workspace'))) + + const workspaceIdList = workspaceIds.map((row) => row.entityId) + + const workflowConditions = [eq(workflow.userId, userId)] + if (workspaceIdList.length > 0) { + workflowConditions.push(inArray(workflow.workspaceId, workspaceIdList)) + } + if (options?.workspaceId) { + workflowConditions.push(eq(workflow.workspaceId, options.workspaceId)) + } + if (options?.folderId) { + workflowConditions.push(eq(workflow.folderId, options.folderId)) + } + + return db + .select() + .from(workflow) + .where(or(...workflowConditions)) + .orderBy(asc(workflow.sortOrder), asc(workflow.createdAt), asc(workflow.id)) +} + diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools.ts new file mode 100644 index 0000000000..fdc962382b --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools.ts @@ -0,0 +1,479 @@ +import crypto from 'crypto' +import { db } from '@sim/db' +import { chat, workflow, workflowMcpServer, workflowMcpTool } from '@sim/db/schema' +import { and, eq, inArray } from 'drizzle-orm' +import type { ExecutionContext, ToolCallResult } from '@/lib/copilot/orchestrator/types' +import { sanitizeToolName } from '@/lib/mcp/workflow-tool-schema' +import { deployWorkflow, undeployWorkflow } from '@/lib/workflows/persistence/utils' +import { hasValidStartBlock } from '@/lib/workflows/triggers/trigger-utils.server' +import { checkChatAccess, checkWorkflowAccessForChatCreation } from '@/app/api/chat/utils' +import { ensureWorkflowAccess } from './access' + +export async function executeDeployApi( + params: Record, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + const action = params.action === 'undeploy' ? 'undeploy' : 'deploy' + const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + + if (action === 'undeploy') { + const result = await undeployWorkflow({ workflowId }) + if (!result.success) { + return { success: false, error: result.error || 'Failed to undeploy workflow' } + } + return { success: true, output: { workflowId, isDeployed: false } } + } + + const result = await deployWorkflow({ + workflowId, + deployedBy: context.userId, + workflowName: workflowRecord.name || undefined, + }) + if (!result.success) { + return { success: false, error: result.error || 'Failed to deploy workflow' } + } + + return { + success: true, + output: { + workflowId, + isDeployed: true, + deployedAt: result.deployedAt, + version: result.version, + }, + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeDeployChat( + params: Record, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + + const action = params.action === 'undeploy' ? 'undeploy' : 'deploy' + if (action === 'undeploy') { + const existing = await db.select().from(chat).where(eq(chat.workflowId, workflowId)).limit(1) + if (!existing.length) { + return { success: false, error: 'No active chat deployment found for this workflow' } + } + const { hasAccess } = await checkChatAccess(existing[0].id, context.userId) + if (!hasAccess) { + return { success: false, error: 'Unauthorized chat access' } + } + await db.delete(chat).where(eq(chat.id, existing[0].id)) + return { success: true, output: { success: true, action: 'undeploy', isDeployed: false } } + } + + const { hasAccess } = await checkWorkflowAccessForChatCreation(workflowId, context.userId) + if (!hasAccess) { + return { success: false, error: 'Workflow not found or access denied' } + } + + const existing = await db.select().from(chat).where(eq(chat.workflowId, workflowId)).limit(1) + const existingDeployment = existing[0] || null + + const identifier = String(params.identifier || existingDeployment?.identifier || '').trim() + const title = String(params.title || existingDeployment?.title || '').trim() + if (!identifier || !title) { + return { success: false, error: 'Chat identifier and title are required' } + } + + const identifierPattern = /^[a-z0-9-]+$/ + if (!identifierPattern.test(identifier)) { + return { + success: false, + error: 'Identifier can only contain lowercase letters, numbers, and hyphens', + } + } + + const existingIdentifier = await db + .select() + .from(chat) + .where(eq(chat.identifier, identifier)) + .limit(1) + if (existingIdentifier.length > 0 && existingIdentifier[0].id !== existingDeployment?.id) { + return { success: false, error: 'Identifier already in use' } + } + + const deployResult = await deployWorkflow({ + workflowId, + deployedBy: context.userId, + }) + if (!deployResult.success) { + return { success: false, error: deployResult.error || 'Failed to deploy workflow' } + } + + const payload = { + workflowId, + identifier, + title, + description: String(params.description || existingDeployment?.description || ''), + customizations: { + primaryColor: + params.customizations?.primaryColor || + existingDeployment?.customizations?.primaryColor || + 'var(--brand-primary-hover-hex)', + welcomeMessage: + params.customizations?.welcomeMessage || + existingDeployment?.customizations?.welcomeMessage || + 'Hi there! How can I help you today?', + }, + authType: params.authType || existingDeployment?.authType || 'public', + password: params.password, + allowedEmails: params.allowedEmails || existingDeployment?.allowedEmails || [], + outputConfigs: params.outputConfigs || existingDeployment?.outputConfigs || [], + } + + if (existingDeployment) { + await db + .update(chat) + .set({ + identifier: payload.identifier, + title: payload.title, + description: payload.description, + customizations: payload.customizations, + authType: payload.authType, + password: payload.password || existingDeployment.password, + allowedEmails: + payload.authType === 'email' || payload.authType === 'sso' ? payload.allowedEmails : [], + outputConfigs: payload.outputConfigs, + updatedAt: new Date(), + }) + .where(eq(chat.id, existingDeployment.id)) + } else { + await db.insert(chat).values({ + id: crypto.randomUUID(), + workflowId, + userId: context.userId, + identifier: payload.identifier, + title: payload.title, + description: payload.description, + customizations: payload.customizations, + isActive: true, + authType: payload.authType, + password: payload.password || null, + allowedEmails: + payload.authType === 'email' || payload.authType === 'sso' ? payload.allowedEmails : [], + outputConfigs: payload.outputConfigs, + createdAt: new Date(), + updatedAt: new Date(), + }) + } + + return { + success: true, + output: { success: true, action: 'deploy', isDeployed: true, identifier }, + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeDeployMcp( + params: Record, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + + const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + const workspaceId = workflowRecord.workspaceId + if (!workspaceId) { + return { success: false, error: 'workspaceId is required' } + } + + if (!workflowRecord.isDeployed) { + return { + success: false, + error: 'Workflow must be deployed before adding as an MCP tool. Use deploy_api first.', + } + } + + const serverId = params.serverId + if (!serverId) { + return { + success: false, + error: 'serverId is required. Use list_workspace_mcp_servers to get available servers.', + } + } + + const existingTool = await db + .select() + .from(workflowMcpTool) + .where(and(eq(workflowMcpTool.serverId, serverId), eq(workflowMcpTool.workflowId, workflowId))) + .limit(1) + + const toolName = sanitizeToolName( + params.toolName || workflowRecord.name || `workflow_${workflowId}` + ) + const toolDescription = + params.toolDescription || workflowRecord.description || `Execute ${workflowRecord.name} workflow` + const parameterSchema = params.parameterSchema || {} + + if (existingTool.length > 0) { + const toolId = existingTool[0].id + await db + .update(workflowMcpTool) + .set({ + toolName, + toolDescription, + parameterSchema, + updatedAt: new Date(), + }) + .where(eq(workflowMcpTool.id, toolId)) + return { success: true, output: { toolId, toolName, toolDescription, updated: true } } + } + + const toolId = crypto.randomUUID() + await db.insert(workflowMcpTool).values({ + id: toolId, + serverId, + workflowId, + toolName, + toolDescription, + parameterSchema, + createdAt: new Date(), + updatedAt: new Date(), + }) + + return { success: true, output: { toolId, toolName, toolDescription, updated: false } } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeRedeploy(context: ExecutionContext): Promise { + try { + const workflowId = context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + await ensureWorkflowAccess(workflowId, context.userId) + + const result = await deployWorkflow({ workflowId, deployedBy: context.userId }) + if (!result.success) { + return { success: false, error: result.error || 'Failed to redeploy workflow' } + } + return { + success: true, + output: { workflowId, deployedAt: result.deployedAt || null, version: result.version }, + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeCheckDeploymentStatus( + params: Record, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + const workspaceId = workflowRecord.workspaceId + + const [apiDeploy, chatDeploy] = await Promise.all([ + db.select().from(workflow).where(eq(workflow.id, workflowId)).limit(1), + db.select().from(chat).where(eq(chat.workflowId, workflowId)).limit(1), + ]) + + const isApiDeployed = apiDeploy[0]?.isDeployed || false + const apiDetails = { + isDeployed: isApiDeployed, + deployedAt: apiDeploy[0]?.deployedAt || null, + endpoint: isApiDeployed ? `/api/workflows/${workflowId}/execute` : null, + apiKey: workflowRecord.workspaceId ? 'Workspace API keys' : 'Personal API keys', + needsRedeployment: false, + } + + const isChatDeployed = !!chatDeploy[0] + const chatDetails = { + isDeployed: isChatDeployed, + chatId: chatDeploy[0]?.id || null, + identifier: chatDeploy[0]?.identifier || null, + chatUrl: isChatDeployed ? `/chat/${chatDeploy[0]?.identifier}` : null, + title: chatDeploy[0]?.title || null, + description: chatDeploy[0]?.description || null, + authType: chatDeploy[0]?.authType || null, + allowedEmails: chatDeploy[0]?.allowedEmails || null, + outputConfigs: chatDeploy[0]?.outputConfigs || null, + welcomeMessage: chatDeploy[0]?.customizations?.welcomeMessage || null, + primaryColor: chatDeploy[0]?.customizations?.primaryColor || null, + hasPassword: Boolean(chatDeploy[0]?.password), + } + + const mcpDetails = { isDeployed: false, servers: [] as any[] } + if (workspaceId) { + const servers = await db + .select({ + serverId: workflowMcpServer.id, + serverName: workflowMcpServer.name, + toolName: workflowMcpTool.toolName, + toolDescription: workflowMcpTool.toolDescription, + parameterSchema: workflowMcpTool.parameterSchema, + toolId: workflowMcpTool.id, + }) + .from(workflowMcpTool) + .innerJoin(workflowMcpServer, eq(workflowMcpTool.serverId, workflowMcpServer.id)) + .where(eq(workflowMcpTool.workflowId, workflowId)) + + if (servers.length > 0) { + mcpDetails.isDeployed = true + mcpDetails.servers = servers + } + } + + const isDeployed = apiDetails.isDeployed || chatDetails.isDeployed || mcpDetails.isDeployed + return { + success: true, + output: { isDeployed, api: apiDetails, chat: chatDetails, mcp: mcpDetails }, + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeListWorkspaceMcpServers( + params: Record, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + const workspaceId = workflowRecord.workspaceId + if (!workspaceId) { + return { success: false, error: 'workspaceId is required' } + } + + const servers = await db + .select({ + id: workflowMcpServer.id, + name: workflowMcpServer.name, + description: workflowMcpServer.description, + }) + .from(workflowMcpServer) + .where(eq(workflowMcpServer.workspaceId, workspaceId)) + + const serverIds = servers.map((server) => server.id) + const tools = + serverIds.length > 0 + ? await db + .select({ + serverId: workflowMcpTool.serverId, + toolName: workflowMcpTool.toolName, + }) + .from(workflowMcpTool) + .where(inArray(workflowMcpTool.serverId, serverIds)) + : [] + + const toolNamesByServer: Record = {} + for (const tool of tools) { + if (!toolNamesByServer[tool.serverId]) { + toolNamesByServer[tool.serverId] = [] + } + toolNamesByServer[tool.serverId].push(tool.toolName) + } + + const serversWithToolNames = servers.map((server) => ({ + ...server, + toolCount: toolNamesByServer[server.id]?.length || 0, + toolNames: toolNamesByServer[server.id] || [], + })) + + return { success: true, output: { servers: serversWithToolNames, count: servers.length } } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeCreateWorkspaceMcpServer( + params: Record, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + const workspaceId = workflowRecord.workspaceId + if (!workspaceId) { + return { success: false, error: 'workspaceId is required' } + } + + const name = params.name?.trim() + if (!name) { + return { success: false, error: 'name is required' } + } + + const serverId = crypto.randomUUID() + const [server] = await db + .insert(workflowMcpServer) + .values({ + id: serverId, + workspaceId, + createdBy: context.userId, + name, + description: params.description?.trim() || null, + isPublic: params.isPublic ?? false, + createdAt: new Date(), + updatedAt: new Date(), + }) + .returning() + + const workflowIds: string[] = params.workflowIds || [] + const addedTools: Array<{ workflowId: string; toolName: string }> = [] + + if (workflowIds.length > 0) { + const workflows = await db.select().from(workflow).where(inArray(workflow.id, workflowIds)) + + for (const wf of workflows) { + if (wf.workspaceId !== workspaceId || !wf.isDeployed) { + continue + } + const hasStartBlock = await hasValidStartBlock(wf.id) + if (!hasStartBlock) { + continue + } + const toolName = sanitizeToolName(wf.name || `workflow_${wf.id}`) + await db.insert(workflowMcpTool).values({ + id: crypto.randomUUID(), + serverId, + workflowId: wf.id, + toolName, + toolDescription: wf.description || `Execute ${wf.name} workflow`, + parameterSchema: {}, + createdAt: new Date(), + updatedAt: new Date(), + }) + addedTools.push({ workflowId: wf.id, toolName }) + } + } + + return { success: true, output: { server, addedTools } } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/integration-tools.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/integration-tools.ts new file mode 100644 index 0000000000..44a10d7af3 --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/integration-tools.ts @@ -0,0 +1,100 @@ +import { db } from '@sim/db' +import { account, workflow } from '@sim/db/schema' +import { and, eq } from 'drizzle-orm' +import type { + ExecutionContext, + ToolCallResult, + ToolCallState, +} from '@/lib/copilot/orchestrator/types' +import { generateRequestId } from '@/lib/core/utils/request' +import { getEffectiveDecryptedEnv } from '@/lib/environment/utils' +import { refreshTokenIfNeeded } from '@/app/api/auth/oauth/utils' +import { resolveEnvVarReferences } from '@/executor/utils/reference-validation' +import { executeTool } from '@/tools' +import { resolveToolId } from '@/tools/utils' + +export async function executeIntegrationToolDirect( + toolCall: ToolCallState, + toolConfig: any, + context: ExecutionContext +): Promise { + const { userId, workflowId } = context + const toolName = resolveToolId(toolCall.name) + const toolArgs = toolCall.params || {} + + let workspaceId = context.workspaceId + if (!workspaceId && workflowId) { + const workflowResult = await db + .select({ workspaceId: workflow.workspaceId }) + .from(workflow) + .where(eq(workflow.id, workflowId)) + .limit(1) + workspaceId = workflowResult[0]?.workspaceId ?? undefined + } + + const decryptedEnvVars = + context.decryptedEnvVars || (await getEffectiveDecryptedEnv(userId, workspaceId)) + + const executionParams: Record = resolveEnvVarReferences(toolArgs, decryptedEnvVars, { + deep: true, + }) as Record + + if (toolConfig.oauth?.required && toolConfig.oauth.provider) { + const provider = toolConfig.oauth.provider + const accounts = await db + .select() + .from(account) + .where(and(eq(account.providerId, provider), eq(account.userId, userId))) + .limit(1) + + if (!accounts.length) { + return { + success: false, + error: `No ${provider} account connected. Please connect your account first.`, + } + } + + const acc = accounts[0] + const requestId = generateRequestId() + const { accessToken } = await refreshTokenIfNeeded(requestId, acc as any, acc.id) + + if (!accessToken) { + return { + success: false, + error: `OAuth token not available for ${provider}. Please reconnect your account.`, + } + } + + executionParams.accessToken = accessToken + } + + if (toolConfig.params?.apiKey?.required && !executionParams.apiKey) { + return { + success: false, + error: `API key not provided for ${toolName}. Use {{YOUR_API_KEY_ENV_VAR}} to reference your environment variable.`, + } + } + + executionParams._context = { + workflowId, + userId, + } + + if (toolName === 'function_execute') { + executionParams.envVars = decryptedEnvVars + executionParams.workflowVariables = {} + executionParams.blockData = {} + executionParams.blockNameMapping = {} + executionParams.language = executionParams.language || 'javascript' + executionParams.timeout = executionParams.timeout || 30000 + } + + const result = await executeTool(toolName, executionParams) + + return { + success: result.success, + output: result.output, + error: result.error, + } +} + diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools.ts new file mode 100644 index 0000000000..0adc1a7685 --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools.ts @@ -0,0 +1,769 @@ +import crypto from 'crypto' +import { db } from '@sim/db' +import { customTools, permissions, workflow, workflowFolder, workspace } from '@sim/db/schema' +import { and, asc, desc, eq, inArray, isNull, max, or } from 'drizzle-orm' +import type { ExecutionContext, ToolCallResult } from '@/lib/copilot/orchestrator/types' +import { + extractWorkflowNames, + formatNormalizedWorkflowForCopilot, + normalizeWorkflowName, +} from '@/lib/copilot/tools/shared/workflow-utils' +import { generateRequestId } from '@/lib/core/utils/request' +import { mcpService } from '@/lib/mcp/service' +import { listWorkspaceFiles } from '@/lib/uploads/contexts/workspace' +import { getBlockOutputPaths } from '@/lib/workflows/blocks/block-outputs' +import { BlockPathCalculator } from '@/lib/workflows/blocks/block-path-calculator' +import { buildDefaultWorkflowArtifacts } from '@/lib/workflows/defaults' +import { executeWorkflow } from '@/lib/workflows/executor/execute-workflow' +import { + loadWorkflowFromNormalizedTables, + saveWorkflowToNormalizedTables, +} from '@/lib/workflows/persistence/utils' +import { isInputDefinitionTrigger } from '@/lib/workflows/triggers/input-definition-triggers' +import { ensureWorkflowAccess, ensureWorkspaceAccess, getAccessibleWorkflowsForUser, getDefaultWorkspaceId } from './access' +import { normalizeName } from '@/executor/constants' + +export async function executeGetUserWorkflow( + params: Record, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + + const { workflow: workflowRecord, workspaceId } = await ensureWorkflowAccess( + workflowId, + context.userId + ) + + const normalized = await loadWorkflowFromNormalizedTables(workflowId) + const userWorkflow = formatNormalizedWorkflowForCopilot(normalized) + if (!userWorkflow) { + return { success: false, error: 'Workflow has no normalized data' } + } + + return { + success: true, + output: { + workflowId, + workflowName: workflowRecord.name || '', + workspaceId, + userWorkflow, + }, + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeGetWorkflowFromName( + params: Record, + context: ExecutionContext +): Promise { + try { + const workflowName = typeof params.workflow_name === 'string' ? params.workflow_name.trim() : '' + if (!workflowName) { + return { success: false, error: 'workflow_name is required' } + } + + const workflows = await getAccessibleWorkflowsForUser(context.userId) + + const targetName = normalizeWorkflowName(workflowName) + const match = workflows.find((w) => normalizeWorkflowName(w.name) === targetName) + if (!match) { + return { success: false, error: `Workflow not found: ${workflowName}` } + } + + const normalized = await loadWorkflowFromNormalizedTables(match.id) + const userWorkflow = formatNormalizedWorkflowForCopilot(normalized) + if (!userWorkflow) { + return { success: false, error: 'Workflow has no normalized data' } + } + + return { + success: true, + output: { + workflowId: match.id, + workflowName: match.name || '', + workspaceId: match.workspaceId, + userWorkflow, + }, + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeListUserWorkflows( + params: Record, + context: ExecutionContext +): Promise { + try { + const workspaceId = params?.workspaceId as string | undefined + const folderId = params?.folderId as string | undefined + + const workflows = await getAccessibleWorkflowsForUser(context.userId, { workspaceId, folderId }) + + const names = extractWorkflowNames(workflows) + + const workflowList = workflows.map((w) => ({ + workflowId: w.id, + workflowName: w.name || '', + workspaceId: w.workspaceId, + folderId: w.folderId, + })) + + return { success: true, output: { workflow_names: names, workflows: workflowList } } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeListUserWorkspaces(context: ExecutionContext): Promise { + try { + const workspaces = await db + .select({ + workspaceId: workspace.id, + workspaceName: workspace.name, + ownerId: workspace.ownerId, + permissionType: permissions.permissionType, + }) + .from(permissions) + .innerJoin(workspace, eq(permissions.entityId, workspace.id)) + .where(and(eq(permissions.userId, context.userId), eq(permissions.entityType, 'workspace'))) + .orderBy(desc(workspace.createdAt)) + + const output = workspaces.map((row) => ({ + workspaceId: row.workspaceId, + workspaceName: row.workspaceName, + role: row.ownerId === context.userId ? 'owner' : row.permissionType, + })) + + return { success: true, output: { workspaces: output } } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeListFolders( + params: Record, + context: ExecutionContext +): Promise { + try { + const workspaceId = + (params?.workspaceId as string | undefined) || (await getDefaultWorkspaceId(context.userId)) + + await ensureWorkspaceAccess(workspaceId, context.userId, false) + + const folders = await db + .select({ + folderId: workflowFolder.id, + folderName: workflowFolder.name, + parentId: workflowFolder.parentId, + sortOrder: workflowFolder.sortOrder, + }) + .from(workflowFolder) + .where(eq(workflowFolder.workspaceId, workspaceId)) + .orderBy(asc(workflowFolder.sortOrder), asc(workflowFolder.createdAt)) + + return { + success: true, + output: { + workspaceId, + folders, + }, + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeCreateWorkflow( + params: Record, + context: ExecutionContext +): Promise { + try { + const name = typeof params?.name === 'string' ? params.name.trim() : '' + if (!name) { + return { success: false, error: 'name is required' } + } + + const workspaceId = params?.workspaceId || (await getDefaultWorkspaceId(context.userId)) + const folderId = params?.folderId || null + const description = typeof params?.description === 'string' ? params.description : null + + await ensureWorkspaceAccess(workspaceId, context.userId, true) + + const workflowId = crypto.randomUUID() + const now = new Date() + + const folderCondition = folderId ? eq(workflow.folderId, folderId) : isNull(workflow.folderId) + const [maxResult] = await db + .select({ maxOrder: max(workflow.sortOrder) }) + .from(workflow) + .where(and(eq(workflow.workspaceId, workspaceId), folderCondition)) + const sortOrder = (maxResult?.maxOrder ?? 0) + 1 + + await db.insert(workflow).values({ + id: workflowId, + userId: context.userId, + workspaceId, + folderId, + sortOrder, + name, + description, + color: '#3972F6', + lastSynced: now, + createdAt: now, + updatedAt: now, + isDeployed: false, + runCount: 0, + variables: {}, + }) + + const { workflowState } = buildDefaultWorkflowArtifacts() + const saveResult = await saveWorkflowToNormalizedTables(workflowId, workflowState) + if (!saveResult.success) { + throw new Error(saveResult.error || 'Failed to save workflow state') + } + + return { + success: true, + output: { + workflowId, + workflowName: name, + workspaceId, + folderId, + }, + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeCreateFolder( + params: Record, + context: ExecutionContext +): Promise { + try { + const name = typeof params?.name === 'string' ? params.name.trim() : '' + if (!name) { + return { success: false, error: 'name is required' } + } + + const workspaceId = params?.workspaceId || (await getDefaultWorkspaceId(context.userId)) + const parentId = params?.parentId || null + + await ensureWorkspaceAccess(workspaceId, context.userId, true) + + const [maxResult] = await db + .select({ maxOrder: max(workflowFolder.sortOrder) }) + .from(workflowFolder) + .where( + and( + eq(workflowFolder.workspaceId, workspaceId), + parentId ? eq(workflowFolder.parentId, parentId) : isNull(workflowFolder.parentId) + ) + ) + const sortOrder = (maxResult?.maxOrder ?? 0) + 1 + + const folderId = crypto.randomUUID() + await db.insert(workflowFolder).values({ + id: folderId, + workspaceId, + parentId, + name, + sortOrder, + createdAt: new Date(), + updatedAt: new Date(), + }) + + return { success: true, output: { folderId, name, workspaceId, parentId } } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeGetWorkflowData( + params: Record, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + const dataType = params.data_type || params.dataType || '' + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + if (!dataType) { + return { success: false, error: 'data_type is required' } + } + + const { workflow: workflowRecord, workspaceId } = await ensureWorkflowAccess( + workflowId, + context.userId + ) + + if (dataType === 'global_variables') { + const variablesRecord = (workflowRecord.variables as Record) || {} + const variables = Object.values(variablesRecord).map((v: any) => ({ + id: String(v?.id || ''), + name: String(v?.name || ''), + value: v?.value, + })) + return { success: true, output: { variables } } + } + + if (dataType === 'custom_tools') { + if (!workspaceId) { + return { success: false, error: 'workspaceId is required' } + } + const conditions = [ + eq(customTools.workspaceId, workspaceId), + and(eq(customTools.userId, context.userId), isNull(customTools.workspaceId)), + ] + const toolsRows = await db + .select() + .from(customTools) + .where(or(...conditions)) + .orderBy(desc(customTools.createdAt)) + + const customToolsData = toolsRows.map((tool) => ({ + id: String(tool.id || ''), + title: String(tool.title || ''), + functionName: String((tool.schema as any)?.function?.name || ''), + description: String((tool.schema as any)?.function?.description || ''), + parameters: (tool.schema as any)?.function?.parameters, + })) + + return { success: true, output: { customTools: customToolsData } } + } + + if (dataType === 'mcp_tools') { + if (!workspaceId) { + return { success: false, error: 'workspaceId is required' } + } + const tools = await mcpService.discoverTools(context.userId, workspaceId, false) + const mcpTools = tools.map((tool) => ({ + name: String(tool.name || ''), + serverId: String(tool.serverId || ''), + serverName: String(tool.serverName || ''), + description: String(tool.description || ''), + inputSchema: tool.inputSchema, + })) + return { success: true, output: { mcpTools } } + } + + if (dataType === 'files') { + if (!workspaceId) { + return { success: false, error: 'workspaceId is required' } + } + const files = await listWorkspaceFiles(workspaceId) + const fileResults = files.map((file) => ({ + id: String(file.id || ''), + name: String(file.name || ''), + key: String(file.key || ''), + path: String(file.path || ''), + size: Number(file.size || 0), + type: String(file.type || ''), + uploadedAt: String(file.uploadedAt || ''), + })) + return { success: true, output: { files: fileResults } } + } + + return { success: false, error: `Unknown data_type: ${dataType}` } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeGetBlockOutputs( + params: Record, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + await ensureWorkflowAccess(workflowId, context.userId) + + const normalized = await loadWorkflowFromNormalizedTables(workflowId) + if (!normalized) { + return { success: false, error: 'Workflow has no normalized data' } + } + + const blocks = normalized.blocks || {} + const loops = normalized.loops || {} + const parallels = normalized.parallels || {} + const blockIds = + Array.isArray(params.blockIds) && params.blockIds.length > 0 + ? params.blockIds + : Object.keys(blocks) + + const results: Array<{ + blockId: string + blockName: string + blockType: string + outputs: string[] + insideSubflowOutputs?: string[] + outsideSubflowOutputs?: string[] + triggerMode?: boolean + }> = [] + + for (const blockId of blockIds) { + const block = blocks[blockId] + if (!block?.type) continue + const blockName = block.name || block.type + + if (block.type === 'loop' || block.type === 'parallel') { + const insidePaths = getSubflowInsidePaths(block.type, blockId, loops, parallels) + results.push({ + blockId, + blockName, + blockType: block.type, + outputs: [], + insideSubflowOutputs: formatOutputsWithPrefix(insidePaths, blockName), + outsideSubflowOutputs: formatOutputsWithPrefix(['results'], blockName), + triggerMode: block.triggerMode, + }) + continue + } + + const outputs = getBlockOutputPaths(block.type, block.subBlocks, block.triggerMode) + results.push({ + blockId, + blockName, + blockType: block.type, + outputs: formatOutputsWithPrefix(outputs, blockName), + triggerMode: block.triggerMode, + }) + } + + const variables = await getWorkflowVariablesForTool(workflowId) + + const payload = { blocks: results, variables } + return { success: true, output: payload } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeGetBlockUpstreamReferences( + params: Record, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + if (!Array.isArray(params.blockIds) || params.blockIds.length === 0) { + return { success: false, error: 'blockIds array is required' } + } + await ensureWorkflowAccess(workflowId, context.userId) + + const normalized = await loadWorkflowFromNormalizedTables(workflowId) + if (!normalized) { + return { success: false, error: 'Workflow has no normalized data' } + } + + const blocks = normalized.blocks || {} + const edges = normalized.edges || [] + const loops = normalized.loops || {} + const parallels = normalized.parallels || {} + + const graphEdges = edges.map((edge: any) => ({ source: edge.source, target: edge.target })) + const variableOutputs = await getWorkflowVariablesForTool(workflowId) + + const results: any[] = [] + + for (const blockId of params.blockIds) { + const targetBlock = blocks[blockId] + if (!targetBlock) continue + + const insideSubflows: Array<{ blockId: string; blockName: string; blockType: string }> = [] + const containingLoopIds = new Set() + const containingParallelIds = new Set() + + Object.values(loops as Record).forEach((loop) => { + if (loop?.nodes?.includes(blockId)) { + containingLoopIds.add(loop.id) + const loopBlock = blocks[loop.id] + if (loopBlock) { + insideSubflows.push({ + blockId: loop.id, + blockName: loopBlock.name || loopBlock.type, + blockType: 'loop', + }) + } + } + }) + + Object.values(parallels as Record).forEach((parallel) => { + if (parallel?.nodes?.includes(blockId)) { + containingParallelIds.add(parallel.id) + const parallelBlock = blocks[parallel.id] + if (parallelBlock) { + insideSubflows.push({ + blockId: parallel.id, + blockName: parallelBlock.name || parallelBlock.type, + blockType: 'parallel', + }) + } + } + }) + + const ancestorIds = BlockPathCalculator.findAllPathNodes(graphEdges, blockId) + const accessibleIds = new Set(ancestorIds) + accessibleIds.add(blockId) + + const starterBlock = Object.values(blocks).find((b: any) => isInputDefinitionTrigger(b.type)) + if (starterBlock && ancestorIds.includes((starterBlock as any).id)) { + accessibleIds.add((starterBlock as any).id) + } + + containingLoopIds.forEach((loopId) => { + accessibleIds.add(loopId) + loops[loopId]?.nodes?.forEach((nodeId: string) => accessibleIds.add(nodeId)) + }) + + containingParallelIds.forEach((parallelId) => { + accessibleIds.add(parallelId) + parallels[parallelId]?.nodes?.forEach((nodeId: string) => accessibleIds.add(nodeId)) + }) + + const accessibleBlocks: any[] = [] + + for (const accessibleBlockId of accessibleIds) { + const block = blocks[accessibleBlockId] + if (!block?.type) continue + const canSelfReference = block.type === 'approval' || block.type === 'human_in_the_loop' + if (accessibleBlockId === blockId && !canSelfReference) continue + + const blockName = block.name || block.type + let accessContext: 'inside' | 'outside' | undefined + let outputPaths: string[] + + if (block.type === 'loop' || block.type === 'parallel') { + const isInside = + (block.type === 'loop' && containingLoopIds.has(accessibleBlockId)) || + (block.type === 'parallel' && containingParallelIds.has(accessibleBlockId)) + accessContext = isInside ? 'inside' : 'outside' + outputPaths = isInside + ? getSubflowInsidePaths(block.type, accessibleBlockId, loops, parallels) + : ['results'] + } else { + outputPaths = getBlockOutputPaths(block.type, block.subBlocks, block.triggerMode) + } + + const formattedOutputs = formatOutputsWithPrefix(outputPaths, blockName) + const entry: any = { + blockId: accessibleBlockId, + blockName, + blockType: block.type, + outputs: formattedOutputs, + } + if (block.triggerMode) entry.triggerMode = true + if (accessContext) entry.accessContext = accessContext + accessibleBlocks.push(entry) + } + + results.push({ + blockId, + blockName: targetBlock.name || targetBlock.type, + blockType: targetBlock.type, + accessibleBlocks, + insideSubflows, + variables: variableOutputs, + }) + } + + const payload = { results } + return { success: true, output: payload } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeRunWorkflow( + params: Record, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + + const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + + const result = await executeWorkflow( + { + id: workflowRecord.id, + userId: workflowRecord.userId, + workspaceId: workflowRecord.workspaceId, + variables: workflowRecord.variables || {}, + }, + generateRequestId(), + params.workflow_input || params.input || undefined, + context.userId + ) + + return { + success: result.success, + output: { + executionId: result.executionId, + success: result.success, + output: result.output, + logs: result.logs, + }, + error: result.success ? undefined : result.error || 'Workflow execution failed', + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeSetGlobalWorkflowVariables( + params: Record, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + const operations = Array.isArray(params.operations) ? params.operations : [] + const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + + const currentVarsRecord = (workflowRecord.variables as Record) || {} + const byName: Record = {} + Object.values(currentVarsRecord).forEach((v: any) => { + if (v && typeof v === 'object' && v.id && v.name) byName[String(v.name)] = v + }) + + for (const op of operations) { + const key = String(op?.name || '') + if (!key) continue + const nextType = op?.type || byName[key]?.type || 'plain' + const coerceValue = (value: any, type: string) => { + if (value === undefined) return value + if (type === 'number') { + const n = Number(value) + return Number.isNaN(n) ? value : n + } + if (type === 'boolean') { + const v = String(value).trim().toLowerCase() + if (v === 'true') return true + if (v === 'false') return false + return value + } + if (type === 'array' || type === 'object') { + try { + const parsed = JSON.parse(String(value)) + if (type === 'array' && Array.isArray(parsed)) return parsed + if (type === 'object' && parsed && typeof parsed === 'object' && !Array.isArray(parsed)) + return parsed + } catch {} + return value + } + return value + } + + if (op.operation === 'delete') { + delete byName[key] + continue + } + const typedValue = coerceValue(op.value, nextType) + if (op.operation === 'add') { + byName[key] = { + id: crypto.randomUUID(), + workflowId, + name: key, + type: nextType, + value: typedValue, + } + continue + } + if (op.operation === 'edit') { + if (!byName[key]) { + byName[key] = { + id: crypto.randomUUID(), + workflowId, + name: key, + type: nextType, + value: typedValue, + } + } else { + byName[key] = { + ...byName[key], + type: nextType, + value: typedValue, + } + } + } + } + + const nextVarsRecord = Object.fromEntries( + Object.values(byName).map((v: any) => [String(v.id), v]) + ) + + await db + .update(workflow) + .set({ variables: nextVarsRecord, updatedAt: new Date() }) + .where(eq(workflow.id, workflowId)) + + return { success: true, output: { updated: Object.values(byName).length } } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +async function getWorkflowVariablesForTool( + workflowId: string +): Promise> { + const [workflowRecord] = await db + .select({ variables: workflow.variables }) + .from(workflow) + .where(eq(workflow.id, workflowId)) + .limit(1) + + const variablesRecord = (workflowRecord?.variables as Record) || {} + return Object.values(variablesRecord) + .filter((v: any) => v?.name && String(v.name).trim() !== '') + .map((v: any) => ({ + id: String(v.id || ''), + name: String(v.name || ''), + type: String(v.type || 'plain'), + tag: `variable.${normalizeName(String(v.name || ''))}`, + })) +} + +function getSubflowInsidePaths( + blockType: 'loop' | 'parallel', + blockId: string, + loops: Record, + parallels: Record +): string[] { + const paths = ['index'] + if (blockType === 'loop') { + const loopType = loops[blockId]?.loopType || 'for' + if (loopType === 'forEach') { + paths.push('currentItem', 'items') + } + } else { + const parallelType = parallels[blockId]?.parallelType || 'count' + if (parallelType === 'collection') { + paths.push('currentItem', 'items') + } + } + return paths +} + +function formatOutputsWithPrefix(paths: string[], blockName: string): string[] { + const normalizedName = normalizeName(blockName) + return paths.map((path) => `${normalizedName}.${path}`) +} + diff --git a/apps/sim/lib/copilot/tools/client/blocks/get-blocks-and-tools.ts b/apps/sim/lib/copilot/tools/client/blocks/get-blocks-and-tools.ts index d57cb1d24e..7532ca6c49 100644 --- a/apps/sim/lib/copilot/tools/client/blocks/get-blocks-and-tools.ts +++ b/apps/sim/lib/copilot/tools/client/blocks/get-blocks-and-tools.ts @@ -48,7 +48,8 @@ export class GetBlocksAndToolsClientTool extends BaseClientTool { const parsed = ExecuteResponseSuccessSchema.parse(json) const result = GetBlocksAndToolsResult.parse(parsed.result) - await this.markToolComplete(200, 'Successfully retrieved blocks and tools', result) + // TODO: Temporarily sending empty data to test 403 issue + await this.markToolComplete(200, 'Successfully retrieved blocks and tools', {}) this.setState(ClientToolCallState.success) } catch (error: any) { const message = error instanceof Error ? error.message : String(error) diff --git a/apps/sim/lib/core/config/env.ts b/apps/sim/lib/core/config/env.ts index 685cf0e9da..0299ade0e2 100644 --- a/apps/sim/lib/core/config/env.ts +++ b/apps/sim/lib/core/config/env.ts @@ -35,6 +35,11 @@ export const env = createEnv({ SIM_AGENT_API_URL: z.string().url().optional(), // URL for internal sim agent API AGENT_INDEXER_URL: z.string().url().optional(), // URL for agent training data indexer AGENT_INDEXER_API_KEY: z.string().min(1).optional(), // API key for agent indexer authentication + COPILOT_STREAM_TTL_SECONDS: z.number().optional(), // Redis TTL for copilot SSE buffer + COPILOT_STREAM_EVENT_LIMIT: z.number().optional(), // Max events retained per stream + COPILOT_STREAM_RESERVE_BATCH: z.number().optional(), // Event ID reservation batch size + COPILOT_STREAM_FLUSH_INTERVAL_MS: z.number().optional(), // Buffer flush interval in ms + COPILOT_STREAM_FLUSH_MAX_BATCH: z.number().optional(), // Max events per flush batch // Database & Storage REDIS_URL: z.string().url().optional(), // Redis connection string for caching/sessions diff --git a/apps/sim/stores/panel/copilot/store.ts b/apps/sim/stores/panel/copilot/store.ts index 0db3d68c09..a17044afb8 100644 --- a/apps/sim/stores/panel/copilot/store.ts +++ b/apps/sim/stores/panel/copilot/store.ts @@ -5,6 +5,11 @@ import { create } from 'zustand' import { devtools } from 'zustand/middleware' import { type CopilotChat, sendStreamingMessage } from '@/lib/copilot/api' import type { CopilotTransportMode } from '@/lib/copilot/models' +import { + normalizeSseEvent, + shouldSkipToolCallEvent, + shouldSkipToolResultEvent, +} from '@/lib/copilot/orchestrator/sse-utils' import type { BaseClientToolMetadata, ClientToolDisplay, @@ -2045,6 +2050,12 @@ async function applySseEvent( get: () => CopilotStore, set: (next: Partial | ((state: CopilotStore) => Partial)) => void ): Promise { + const normalizedEvent = normalizeSseEvent(data) + if (shouldSkipToolCallEvent(normalizedEvent) || shouldSkipToolResultEvent(normalizedEvent)) { + return true + } + data = normalizedEvent + if (data.type === 'subagent_start') { const toolCallId = data.data?.tool_call_id if (toolCallId) { diff --git a/bun.lock b/bun.lock index defa6c36f1..4b18dedf8a 100644 --- a/bun.lock +++ b/bun.lock @@ -1,6 +1,5 @@ { "lockfileVersion": 1, - "configVersion": 0, "workspaces": { "": { "name": "simstudio", diff --git a/docs/COPILOT_SERVER_REFACTOR.md b/docs/COPILOT_SERVER_REFACTOR.md index a58e5aa6ad..3184fa3a15 100644 --- a/docs/COPILOT_SERVER_REFACTOR.md +++ b/docs/COPILOT_SERVER_REFACTOR.md @@ -836,6 +836,15 @@ describe('POST /api/v1/copilot/chat', () => { - Save checkpoints to resume - Handle partial completions gracefully +### Risk 6: Process-Local Dedupe + +**Risk**: Tool call/result dedupe caches are in-memory and scoped to a single process, so duplicate events can still appear across ECS tasks. + +**Mitigation**: +- Treat dedupe as best-effort, not global +- Prefer idempotent state updates on the client +- Use Redis-backed stream replay for authoritative ordering + --- ## File Inventory From 7402b3853755a286a60c7bafe975c41b27195782 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Thu, 5 Feb 2026 12:01:29 -0800 Subject: [PATCH 21/72] Initial temp state, in the middle of a refactor --- .../api/copilot/api-keys/generate/route.ts | 5 +- apps/sim/app/api/copilot/api-keys/route.ts | 6 +- apps/sim/app/api/copilot/confirm/route.ts | 46 +--- apps/sim/app/api/copilot/credentials/route.ts | 26 ++ .../execute-copilot-server-tool/route.ts | 6 + .../sim/app/api/copilot/execute-tool/route.ts | 247 ----------------- apps/sim/app/api/copilot/stats/route.ts | 4 +- .../api/copilot/tools/mark-complete/route.ts | 123 --------- apps/sim/app/api/mcp/copilot/route.ts | 92 ++++++- apps/sim/lib/copilot/constants.ts | 5 + apps/sim/lib/copilot/orchestrator/config.ts | 21 ++ apps/sim/lib/copilot/orchestrator/index.ts | 9 +- .../lib/copilot/orchestrator/persistence.ts | 112 -------- .../lib/copilot/orchestrator/sse-handlers.ts | 67 ++--- .../sim/lib/copilot/orchestrator/sse-utils.ts | 10 +- apps/sim/lib/copilot/orchestrator/subagent.ts | 10 +- .../deploy.ts} | 218 +-------------- .../tool-executor/deployment-tools/index.ts | 2 + .../tool-executor/deployment-tools/manage.ts | 211 ++++++++++++++ .../index.ts} | 9 +- .../tool-executor/workflow-tools/index.ts | 2 + .../tool-executor/workflow-tools/mutations.ts | 251 +++++++++++++++++ .../queries.ts} | 258 +----------------- apps/sim/lib/copilot/orchestrator/types.ts | 24 +- .../sim/lib/copilot/tools/client/base-tool.ts | 64 +---- .../tools/client/blocks/get-block-config.ts | 49 +--- .../tools/client/blocks/get-block-options.ts | 55 +--- .../client/blocks/get-blocks-and-tools.ts | 33 +-- .../client/blocks/get-blocks-metadata.ts | 42 +-- .../tools/client/blocks/get-trigger-blocks.ts | 37 +-- .../tools/client/knowledge/knowledge-base.ts | 51 +--- .../tools/client/other/make-api-request.ts | 32 +-- .../client/other/search-documentation.ts | 35 +-- .../tools/client/user/get-credentials.ts | 40 +-- .../client/user/set-environment-variables.ts | 45 +-- .../client/workflow/get-workflow-console.ts | 60 +--- apps/sim/stores/panel/copilot/store.ts | 6 +- 37 files changed, 759 insertions(+), 1554 deletions(-) create mode 100644 apps/sim/app/api/copilot/credentials/route.ts delete mode 100644 apps/sim/app/api/copilot/execute-tool/route.ts delete mode 100644 apps/sim/app/api/copilot/tools/mark-complete/route.ts rename apps/sim/lib/copilot/orchestrator/tool-executor/{deployment-tools.ts => deployment-tools/deploy.ts} (55%) create mode 100644 apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/index.ts create mode 100644 apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/manage.ts rename apps/sim/lib/copilot/orchestrator/{tool-executor.ts => tool-executor/index.ts} (94%) create mode 100644 apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/index.ts create mode 100644 apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/mutations.ts rename apps/sim/lib/copilot/orchestrator/tool-executor/{workflow-tools.ts => workflow-tools/queries.ts} (69%) diff --git a/apps/sim/app/api/copilot/api-keys/generate/route.ts b/apps/sim/app/api/copilot/api-keys/generate/route.ts index db890bdca3..27971cede7 100644 --- a/apps/sim/app/api/copilot/api-keys/generate/route.ts +++ b/apps/sim/app/api/copilot/api-keys/generate/route.ts @@ -1,7 +1,7 @@ import { type NextRequest, NextResponse } from 'next/server' import { z } from 'zod' import { getSession } from '@/lib/auth' -import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/copilot/constants' +import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' import { env } from '@/lib/core/config/env' const GenerateApiKeySchema = z.object({ @@ -17,9 +17,6 @@ export async function POST(req: NextRequest) { const userId = session.user.id - // Move environment variable access inside the function - const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT - const body = await req.json().catch(() => ({})) const validationResult = GenerateApiKeySchema.safeParse(body) diff --git a/apps/sim/app/api/copilot/api-keys/route.ts b/apps/sim/app/api/copilot/api-keys/route.ts index f3e25ac825..02d0d5be2b 100644 --- a/apps/sim/app/api/copilot/api-keys/route.ts +++ b/apps/sim/app/api/copilot/api-keys/route.ts @@ -1,6 +1,6 @@ import { type NextRequest, NextResponse } from 'next/server' import { getSession } from '@/lib/auth' -import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/copilot/constants' +import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' import { env } from '@/lib/core/config/env' export async function GET(request: NextRequest) { @@ -12,8 +12,6 @@ export async function GET(request: NextRequest) { const userId = session.user.id - const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT - const res = await fetch(`${SIM_AGENT_API_URL}/api/validate-key/get-api-keys`, { method: 'POST', headers: { @@ -68,8 +66,6 @@ export async function DELETE(request: NextRequest) { return NextResponse.json({ error: 'id is required' }, { status: 400 }) } - const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT - const res = await fetch(`${SIM_AGENT_API_URL}/api/validate-key/delete`, { method: 'POST', headers: { diff --git a/apps/sim/app/api/copilot/confirm/route.ts b/apps/sim/app/api/copilot/confirm/route.ts index 9fd5476c9e..01b6672a38 100644 --- a/apps/sim/app/api/copilot/confirm/route.ts +++ b/apps/sim/app/api/copilot/confirm/route.ts @@ -23,7 +23,8 @@ const ConfirmationSchema = z.object({ }) /** - * Update tool call status in Redis + * Write the user's tool decision to Redis. The server-side orchestrator's + * waitForToolDecision() polls Redis for this value. */ async function updateToolCallStatus( toolCallId: string, @@ -32,57 +33,24 @@ async function updateToolCallStatus( ): Promise { const redis = getRedisClient() if (!redis) { - logger.warn('updateToolCallStatus: Redis client not available') + logger.warn('Redis client not available for tool confirmation') return false } try { const key = `tool_call:${toolCallId}` - const timeout = 600000 // 10 minutes timeout for user confirmation - const pollInterval = 100 // Poll every 100ms - const startTime = Date.now() - - logger.info('Polling for tool call in Redis', { toolCallId, key, timeout }) - - // Poll until the key exists or timeout - while (Date.now() - startTime < timeout) { - const exists = await redis.exists(key) - if (exists) { - break - } - - // Wait before next poll - await new Promise((resolve) => setTimeout(resolve, pollInterval)) - } - - // Final check if key exists after polling - const exists = await redis.exists(key) - if (!exists) { - logger.warn('Tool call not found in Redis after polling timeout', { - toolCallId, - key, - timeout, - pollDuration: Date.now() - startTime, - }) - return false - } - - // Store both status and message as JSON - const toolCallData = { + const payload = { status, message: message || null, timestamp: new Date().toISOString(), } - - await redis.set(key, JSON.stringify(toolCallData), 'EX', 86400) // Keep 24 hour expiry - + await redis.set(key, JSON.stringify(payload), 'EX', 86400) return true } catch (error) { - logger.error('Failed to update tool call status in Redis', { + logger.error('Failed to update tool call status', { toolCallId, status, - message, - error: error instanceof Error ? error.message : 'Unknown error', + error: error instanceof Error ? error.message : String(error), }) return false } diff --git a/apps/sim/app/api/copilot/credentials/route.ts b/apps/sim/app/api/copilot/credentials/route.ts new file mode 100644 index 0000000000..acc99958f9 --- /dev/null +++ b/apps/sim/app/api/copilot/credentials/route.ts @@ -0,0 +1,26 @@ +import { type NextRequest, NextResponse } from 'next/server' +import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request-helpers' +import { routeExecution } from '@/lib/copilot/tools/server/router' + +/** + * GET /api/copilot/credentials + * Returns connected OAuth credentials for the authenticated user. + * Used by the copilot store for credential masking. + */ +export async function GET(_req: NextRequest) { + const { userId, isAuthenticated } = await authenticateCopilotRequestSessionOnly() + if (!isAuthenticated || !userId) { + return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) + } + + try { + const result = await routeExecution('get_credentials', {}, { userId }) + return NextResponse.json({ success: true, result }) + } catch (error) { + return NextResponse.json( + { success: false, error: error instanceof Error ? error.message : 'Failed to load credentials' }, + { status: 500 } + ) + } +} + diff --git a/apps/sim/app/api/copilot/execute-copilot-server-tool/route.ts b/apps/sim/app/api/copilot/execute-copilot-server-tool/route.ts index 5627ae8976..3d6ab2e3a9 100644 --- a/apps/sim/app/api/copilot/execute-copilot-server-tool/route.ts +++ b/apps/sim/app/api/copilot/execute-copilot-server-tool/route.ts @@ -17,6 +17,12 @@ const ExecuteSchema = z.object({ payload: z.unknown().optional(), }) +/** + * @deprecated Transitional route used by the legacy client-side tool execution path + * (Zustand store → client tool classes → this route). Will be removed once the + * interactive browser path is fully migrated to server-side orchestration. + * New server-side code should use lib/copilot/orchestrator/tool-executor directly. + */ export async function POST(req: NextRequest) { const tracker = createRequestTracker() try { diff --git a/apps/sim/app/api/copilot/execute-tool/route.ts b/apps/sim/app/api/copilot/execute-tool/route.ts deleted file mode 100644 index d134d28eb8..0000000000 --- a/apps/sim/app/api/copilot/execute-tool/route.ts +++ /dev/null @@ -1,247 +0,0 @@ -import { db } from '@sim/db' -import { account, workflow } from '@sim/db/schema' -import { createLogger } from '@sim/logger' -import { and, eq } from 'drizzle-orm' -import { type NextRequest, NextResponse } from 'next/server' -import { z } from 'zod' -import { getSession } from '@/lib/auth' -import { - createBadRequestResponse, - createInternalServerErrorResponse, - createRequestTracker, - createUnauthorizedResponse, -} from '@/lib/copilot/request-helpers' -import { generateRequestId } from '@/lib/core/utils/request' -import { getEffectiveDecryptedEnv } from '@/lib/environment/utils' -import { refreshTokenIfNeeded } from '@/app/api/auth/oauth/utils' -import { resolveEnvVarReferences } from '@/executor/utils/reference-validation' -import { executeTool } from '@/tools' -import { getTool, resolveToolId } from '@/tools/utils' - -const logger = createLogger('CopilotExecuteToolAPI') - -const ExecuteToolSchema = z.object({ - toolCallId: z.string(), - toolName: z.string(), - arguments: z.record(z.any()).optional().default({}), - workflowId: z.string().optional(), -}) - -export async function POST(req: NextRequest) { - const tracker = createRequestTracker() - - try { - const session = await getSession() - if (!session?.user?.id) { - return createUnauthorizedResponse() - } - - const userId = session.user.id - const body = await req.json() - - try { - const preview = JSON.stringify(body).slice(0, 300) - logger.debug(`[${tracker.requestId}] Incoming execute-tool request`, { preview }) - } catch {} - - const { toolCallId, toolName, arguments: toolArgs, workflowId } = ExecuteToolSchema.parse(body) - - const resolvedToolName = resolveToolId(toolName) - - logger.info(`[${tracker.requestId}] Executing tool`, { - toolCallId, - toolName, - resolvedToolName, - workflowId, - hasArgs: Object.keys(toolArgs).length > 0, - }) - - const toolConfig = getTool(resolvedToolName) - if (!toolConfig) { - // Find similar tool names to help debug - const { tools: allTools } = await import('@/tools/registry') - const allToolNames = Object.keys(allTools) - const prefix = toolName.split('_').slice(0, 2).join('_') - const similarTools = allToolNames - .filter((name) => name.startsWith(`${prefix.split('_')[0]}_`)) - .slice(0, 10) - - logger.warn(`[${tracker.requestId}] Tool not found in registry`, { - toolName, - prefix, - similarTools, - totalToolsInRegistry: allToolNames.length, - }) - return NextResponse.json( - { - success: false, - error: `Tool not found: ${toolName}. Similar tools: ${similarTools.join(', ')}`, - toolCallId, - }, - { status: 404 } - ) - } - - // Get the workspaceId from the workflow (env vars are stored at workspace level) - let workspaceId: string | undefined - if (workflowId) { - const workflowResult = await db - .select({ workspaceId: workflow.workspaceId }) - .from(workflow) - .where(eq(workflow.id, workflowId)) - .limit(1) - workspaceId = workflowResult[0]?.workspaceId ?? undefined - } - - // Get decrypted environment variables early so we can resolve all {{VAR}} references - const decryptedEnvVars = await getEffectiveDecryptedEnv(userId, workspaceId) - - logger.info(`[${tracker.requestId}] Fetched environment variables`, { - workflowId, - workspaceId, - envVarCount: Object.keys(decryptedEnvVars).length, - envVarKeys: Object.keys(decryptedEnvVars), - }) - - // Build execution params starting with LLM-provided arguments - // Resolve all {{ENV_VAR}} references in the arguments (deep for nested objects) - const executionParams: Record = resolveEnvVarReferences( - toolArgs, - decryptedEnvVars, - { deep: true } - ) as Record - - logger.info(`[${tracker.requestId}] Resolved env var references in arguments`, { - toolName, - originalArgKeys: Object.keys(toolArgs), - resolvedArgKeys: Object.keys(executionParams), - }) - - // Resolve OAuth access token if required - if (toolConfig.oauth?.required && toolConfig.oauth.provider) { - const provider = toolConfig.oauth.provider - logger.info(`[${tracker.requestId}] Resolving OAuth token`, { provider }) - - try { - // Find the account for this provider and user - const accounts = await db - .select() - .from(account) - .where(and(eq(account.providerId, provider), eq(account.userId, userId))) - .limit(1) - - if (accounts.length > 0) { - const acc = accounts[0] - const requestId = generateRequestId() - const { accessToken } = await refreshTokenIfNeeded(requestId, acc as any, acc.id) - - if (accessToken) { - executionParams.accessToken = accessToken - logger.info(`[${tracker.requestId}] OAuth token resolved`, { provider }) - } else { - logger.warn(`[${tracker.requestId}] No access token available`, { provider }) - return NextResponse.json( - { - success: false, - error: `OAuth token not available for ${provider}. Please reconnect your account.`, - toolCallId, - }, - { status: 400 } - ) - } - } else { - logger.warn(`[${tracker.requestId}] No account found for provider`, { provider }) - return NextResponse.json( - { - success: false, - error: `No ${provider} account connected. Please connect your account first.`, - toolCallId, - }, - { status: 400 } - ) - } - } catch (error) { - logger.error(`[${tracker.requestId}] Failed to resolve OAuth token`, { - provider, - error: error instanceof Error ? error.message : String(error), - }) - return NextResponse.json( - { - success: false, - error: `Failed to get OAuth token for ${provider}`, - toolCallId, - }, - { status: 500 } - ) - } - } - - // Check if tool requires an API key that wasn't resolved via {{ENV_VAR}} reference - const needsApiKey = toolConfig.params?.apiKey?.required - - if (needsApiKey && !executionParams.apiKey) { - logger.warn(`[${tracker.requestId}] No API key found for tool`, { toolName }) - return NextResponse.json( - { - success: false, - error: `API key not provided for ${toolName}. Use {{YOUR_API_KEY_ENV_VAR}} to reference your environment variable.`, - toolCallId, - }, - { status: 400 } - ) - } - - // Add execution context - executionParams._context = { - workflowId, - userId, - } - - // Special handling for function_execute - inject environment variables - if (toolName === 'function_execute') { - executionParams.envVars = decryptedEnvVars - executionParams.workflowVariables = {} // No workflow variables in copilot context - executionParams.blockData = {} // No block data in copilot context - executionParams.blockNameMapping = {} // No block mapping in copilot context - executionParams.language = executionParams.language || 'javascript' - executionParams.timeout = executionParams.timeout || 30000 - - logger.info(`[${tracker.requestId}] Injected env vars for function_execute`, { - envVarCount: Object.keys(decryptedEnvVars).length, - }) - } - - // Execute the tool - logger.info(`[${tracker.requestId}] Executing tool with resolved credentials`, { - toolName, - hasAccessToken: !!executionParams.accessToken, - hasApiKey: !!executionParams.apiKey, - }) - - const result = await executeTool(resolvedToolName, executionParams) - - logger.info(`[${tracker.requestId}] Tool execution complete`, { - toolName, - success: result.success, - hasOutput: !!result.output, - }) - - return NextResponse.json({ - success: true, - toolCallId, - result: { - success: result.success, - output: result.output, - error: result.error, - }, - }) - } catch (error) { - if (error instanceof z.ZodError) { - logger.debug(`[${tracker.requestId}] Zod validation error`, { issues: error.issues }) - return createBadRequestResponse('Invalid request body for execute-tool') - } - logger.error(`[${tracker.requestId}] Failed to execute tool:`, error) - const errorMessage = error instanceof Error ? error.message : 'Failed to execute tool' - return createInternalServerErrorResponse(errorMessage) - } -} diff --git a/apps/sim/app/api/copilot/stats/route.ts b/apps/sim/app/api/copilot/stats/route.ts index ea52c1c58b..493f6e4ec9 100644 --- a/apps/sim/app/api/copilot/stats/route.ts +++ b/apps/sim/app/api/copilot/stats/route.ts @@ -1,6 +1,6 @@ import { type NextRequest, NextResponse } from 'next/server' import { z } from 'zod' -import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/copilot/constants' +import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' import { authenticateCopilotRequestSessionOnly, createBadRequestResponse, @@ -10,8 +10,6 @@ import { } from '@/lib/copilot/request-helpers' import { env } from '@/lib/core/config/env' -const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT - const BodySchema = z.object({ messageId: z.string(), diffCreated: z.boolean(), diff --git a/apps/sim/app/api/copilot/tools/mark-complete/route.ts b/apps/sim/app/api/copilot/tools/mark-complete/route.ts deleted file mode 100644 index 1ada484e5b..0000000000 --- a/apps/sim/app/api/copilot/tools/mark-complete/route.ts +++ /dev/null @@ -1,123 +0,0 @@ -import { createLogger } from '@sim/logger' -import { type NextRequest, NextResponse } from 'next/server' -import { z } from 'zod' -import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/copilot/constants' -import { - authenticateCopilotRequestSessionOnly, - createBadRequestResponse, - createInternalServerErrorResponse, - createRequestTracker, - createUnauthorizedResponse, -} from '@/lib/copilot/request-helpers' -import { env } from '@/lib/core/config/env' - -const logger = createLogger('CopilotMarkToolCompleteAPI') - -const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT - -const MarkCompleteSchema = z.object({ - id: z.string(), - name: z.string(), - status: z.number().int(), - message: z.any().optional(), - data: z.any().optional(), -}) - -/** - * POST /api/copilot/tools/mark-complete - * Proxy to Sim Agent: POST /api/tools/mark-complete - */ -export async function POST(req: NextRequest) { - const tracker = createRequestTracker() - - try { - const { userId, isAuthenticated } = await authenticateCopilotRequestSessionOnly() - if (!isAuthenticated || !userId) { - return createUnauthorizedResponse() - } - - const body = await req.json() - - // Log raw body shape for diagnostics (avoid dumping huge payloads) - try { - const bodyPreview = JSON.stringify(body).slice(0, 300) - logger.debug(`[${tracker.requestId}] Incoming mark-complete raw body preview`, { - preview: `${bodyPreview}${bodyPreview.length === 300 ? '...' : ''}`, - }) - } catch {} - - const parsed = MarkCompleteSchema.parse(body) - - const messagePreview = (() => { - try { - const s = - typeof parsed.message === 'string' ? parsed.message : JSON.stringify(parsed.message) - return s ? `${s.slice(0, 200)}${s.length > 200 ? '...' : ''}` : undefined - } catch { - return undefined - } - })() - - logger.info(`[${tracker.requestId}] Forwarding tool mark-complete`, { - userId, - toolCallId: parsed.id, - toolName: parsed.name, - status: parsed.status, - hasMessage: parsed.message !== undefined, - hasData: parsed.data !== undefined, - messagePreview, - agentUrl: `${SIM_AGENT_API_URL}/api/tools/mark-complete`, - }) - - const agentRes = await fetch(`${SIM_AGENT_API_URL}/api/tools/mark-complete`, { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - ...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}), - }, - body: JSON.stringify(parsed), - }) - - // Attempt to parse agent response JSON - let agentJson: any = null - let agentText: string | null = null - try { - agentJson = await agentRes.json() - } catch (_) { - try { - agentText = await agentRes.text() - } catch {} - } - - logger.info(`[${tracker.requestId}] Agent responded to mark-complete`, { - status: agentRes.status, - ok: agentRes.ok, - responseJsonPreview: agentJson ? JSON.stringify(agentJson).slice(0, 300) : undefined, - responseTextPreview: agentText ? agentText.slice(0, 300) : undefined, - }) - - if (agentRes.ok) { - return NextResponse.json({ success: true }) - } - - const errorMessage = - agentJson?.error || agentText || `Agent responded with status ${agentRes.status}` - const status = agentRes.status >= 500 ? 500 : 400 - - logger.warn(`[${tracker.requestId}] Mark-complete failed`, { - status, - error: errorMessage, - }) - - return NextResponse.json({ success: false, error: errorMessage }, { status }) - } catch (error) { - if (error instanceof z.ZodError) { - logger.warn(`[${tracker.requestId}] Invalid mark-complete request body`, { - issues: error.issues, - }) - return createBadRequestResponse('Invalid request body for mark-complete') - } - logger.error(`[${tracker.requestId}] Failed to proxy mark-complete:`, error) - return createInternalServerErrorResponse('Failed to mark tool as complete') - } -} diff --git a/apps/sim/app/api/mcp/copilot/route.ts b/apps/sim/app/api/mcp/copilot/route.ts index ee297b5b96..6f36be94d4 100644 --- a/apps/sim/app/api/mcp/copilot/route.ts +++ b/apps/sim/app/api/mcp/copilot/route.ts @@ -14,12 +14,15 @@ import { createLogger } from '@sim/logger' import { type NextRequest, NextResponse } from 'next/server' import { checkHybridAuth } from '@/lib/auth/hybrid' import { getCopilotModel } from '@/lib/copilot/config' +import { SIM_AGENT_VERSION } from '@/lib/copilot/constants' +import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator' import { orchestrateSubagentStream } from '@/lib/copilot/orchestrator/subagent' import { executeToolServerSide, prepareExecutionContext, } from '@/lib/copilot/orchestrator/tool-executor' import { DIRECT_TOOL_DEFS, SUBAGENT_TOOL_DEFS } from '@/lib/copilot/tools/mcp/definitions' +import { resolveWorkflowIdForUser } from '@/lib/workflows/utils' const logger = createLogger('CopilotMcpAPI') @@ -336,12 +339,95 @@ async function handleDirectToolCall( } } +/** + * Build mode uses the main chat orchestrator with the 'fast' command instead of + * the subagent endpoint. In Go, 'build' is not a registered subagent — it's a mode + * (ModeFast) on the main chat processor that bypasses subagent orchestration and + * executes all tools directly. + */ +async function handleBuildToolCall( + id: RequestId, + args: Record, + userId: string +): Promise { + try { + const requestText = (args.request as string) || JSON.stringify(args) + const { model } = getCopilotModel('chat') + const workflowId = args.workflowId as string | undefined + + const resolved = workflowId + ? { workflowId } + : await resolveWorkflowIdForUser(userId) + + if (!resolved?.workflowId) { + const response: CallToolResult = { + content: [{ type: 'text', text: JSON.stringify({ success: false, error: 'workflowId is required for build. Call create_workflow first.' }, null, 2) }], + isError: true, + } + return NextResponse.json(createResponse(id, response)) + } + + const chatId = crypto.randomUUID() + const context = (args.context as Record) || {} + + const requestPayload = { + message: requestText, + workflowId: resolved.workflowId, + userId, + stream: true, + streamToolCalls: true, + model, + mode: 'agent', + commands: ['fast'], + messageId: crypto.randomUUID(), + version: SIM_AGENT_VERSION, + headless: true, + chatId, + context, + } + + const result = await orchestrateCopilotStream(requestPayload, { + userId, + workflowId: resolved.workflowId, + chatId, + autoExecuteTools: true, + timeout: 300000, + interactive: false, + }) + + const responseData = { + success: result.success, + content: result.content, + toolCalls: result.toolCalls, + error: result.error, + } + + const response: CallToolResult = { + content: [{ type: 'text', text: JSON.stringify(responseData, null, 2) }], + isError: !result.success, + } + + return NextResponse.json(createResponse(id, response)) + } catch (error) { + logger.error('Build tool call failed', { error }) + return NextResponse.json( + createError(id, ErrorCode.InternalError, `Build failed: ${error}`), + { status: 500 } + ) + } +} + async function handleSubagentToolCall( id: RequestId, toolDef: (typeof SUBAGENT_TOOL_DEFS)[number], args: Record, userId: string ): Promise { + // Build mode uses the main chat endpoint, not the subagent endpoint + if (toolDef.agentId === 'build') { + return handleBuildToolCall(id, args, userId) + } + const requestText = (args.request as string) || (args.message as string) || @@ -363,8 +449,6 @@ async function handleSubagentToolCall( workspaceId: args.workspaceId, context, model, - // Signal to the copilot backend that this is a headless request - // so it can enforce workflowId requirements on tools headless: true, }, { @@ -374,9 +458,6 @@ async function handleSubagentToolCall( } ) - // When a respond tool (plan_respond, edit_respond, etc.) was used, - // return only the structured result - not the full result with all internal tool calls. - // This provides clean output for MCP consumers. let responseData: unknown if (result.structuredResult) { responseData = { @@ -392,7 +473,6 @@ async function handleSubagentToolCall( errors: result.errors, } } else { - // Fallback: return content if no structured result responseData = { success: result.success, content: result.content, diff --git a/apps/sim/lib/copilot/constants.ts b/apps/sim/lib/copilot/constants.ts index e4b1f3a5df..7a45127ebf 100644 --- a/apps/sim/lib/copilot/constants.ts +++ b/apps/sim/lib/copilot/constants.ts @@ -1,2 +1,7 @@ +import { env } from '@/lib/core/config/env' + export const SIM_AGENT_API_URL_DEFAULT = 'https://copilot.sim.ai' export const SIM_AGENT_VERSION = '1.0.3' + +/** Resolved copilot backend URL — reads from env with fallback to default. */ +export const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT diff --git a/apps/sim/lib/copilot/orchestrator/config.ts b/apps/sim/lib/copilot/orchestrator/config.ts index 9e2dc7221a..6658ca6b93 100644 --- a/apps/sim/lib/copilot/orchestrator/config.ts +++ b/apps/sim/lib/copilot/orchestrator/config.ts @@ -19,6 +19,7 @@ export const INTERRUPT_TOOL_SET = new Set(INTERRUPT_TOOL_NAMES) export const SUBAGENT_TOOL_NAMES = [ 'debug', 'edit', + 'build', 'plan', 'test', 'deploy', @@ -31,6 +32,26 @@ export const SUBAGENT_TOOL_NAMES = [ 'workflow', 'evaluate', 'superagent', + 'discovery', ] as const export const SUBAGENT_TOOL_SET = new Set(SUBAGENT_TOOL_NAMES) + +/** + * Respond tools are internal to the copilot's subagent system. + * They're used by subagents to signal completion and should NOT be executed by the sim side. + * The copilot backend handles these internally. + */ +export const RESPOND_TOOL_NAMES = [ + 'plan_respond', + 'edit_respond', + 'build_respond', + 'debug_respond', + 'info_respond', + 'research_respond', + 'deploy_respond', + 'superagent_respond', + 'discovery_respond', +] as const + +export const RESPOND_TOOL_SET = new Set(RESPOND_TOOL_NAMES) diff --git a/apps/sim/lib/copilot/orchestrator/index.ts b/apps/sim/lib/copilot/orchestrator/index.ts index 0fe0abe622..1f3a54ee9e 100644 --- a/apps/sim/lib/copilot/orchestrator/index.ts +++ b/apps/sim/lib/copilot/orchestrator/index.ts @@ -1,6 +1,7 @@ import { createLogger } from '@sim/logger' -import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/copilot/constants' +import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' import { handleSubagentRouting, sseHandlers, subAgentHandlers } from '@/lib/copilot/orchestrator/sse-handlers' +import { env } from '@/lib/core/config/env' import { normalizeSseEvent, shouldSkipToolCallEvent, @@ -15,10 +16,7 @@ import type { StreamingContext, ToolCallSummary, } from '@/lib/copilot/orchestrator/types' -import { env } from '@/lib/core/config/env' - const logger = createLogger('CopilotOrchestrator') -const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT export interface OrchestrateStreamOptions extends OrchestratorOptions { userId: string @@ -103,7 +101,8 @@ export async function orchestrateCopilotStream( } if (normalizedEvent.type === 'subagent_start') { - const toolCallId = normalizedEvent.data?.tool_call_id + const eventData = normalizedEvent.data as Record | undefined + const toolCallId = eventData?.tool_call_id as string | undefined if (toolCallId) { context.subAgentParentToolCallId = toolCallId context.subAgentContent[toolCallId] = '' diff --git a/apps/sim/lib/copilot/orchestrator/persistence.ts b/apps/sim/lib/copilot/orchestrator/persistence.ts index 418b652a50..f42d16e37e 100644 --- a/apps/sim/lib/copilot/orchestrator/persistence.ts +++ b/apps/sim/lib/copilot/orchestrator/persistence.ts @@ -1,120 +1,8 @@ -import { db } from '@sim/db' -import { copilotChats } from '@sim/db/schema' import { createLogger } from '@sim/logger' -import { and, eq } from 'drizzle-orm' import { getRedisClient } from '@/lib/core/config/redis' const logger = createLogger('CopilotOrchestratorPersistence') -/** - * Create a new copilot chat record. - */ -export async function createChat(params: { - userId: string - workflowId: string - model: string -}): Promise<{ id: string }> { - const [chat] = await db - .insert(copilotChats) - .values({ - userId: params.userId, - workflowId: params.workflowId, - model: params.model, - messages: [], - }) - .returning({ id: copilotChats.id }) - - return { id: chat.id } -} - -/** - * Load an existing chat for a user. - */ -export async function loadChat(chatId: string, userId: string) { - const [chat] = await db - .select() - .from(copilotChats) - .where(and(eq(copilotChats.id, chatId), eq(copilotChats.userId, userId))) - .limit(1) - - return chat || null -} - -/** - * Save chat messages and metadata. - */ -export async function saveMessages( - chatId: string, - messages: any[], - options?: { - title?: string - conversationId?: string - planArtifact?: string | null - config?: { mode?: string; model?: string } - } -): Promise { - await db - .update(copilotChats) - .set({ - messages, - updatedAt: new Date(), - ...(options?.title ? { title: options.title } : {}), - ...(options?.conversationId ? { conversationId: options.conversationId } : {}), - ...(options?.planArtifact !== undefined ? { planArtifact: options.planArtifact } : {}), - ...(options?.config ? { config: options.config } : {}), - }) - .where(eq(copilotChats.id, chatId)) -} - -/** - * Update the conversationId for a chat without overwriting messages. - */ -export async function updateChatConversationId( - chatId: string, - conversationId: string -): Promise { - await db - .update(copilotChats) - .set({ - conversationId, - updatedAt: new Date(), - }) - .where(eq(copilotChats.id, chatId)) -} - -/** - * Set a tool call confirmation status in Redis. - */ -export async function setToolConfirmation( - toolCallId: string, - status: 'accepted' | 'rejected' | 'background' | 'pending', - message?: string -): Promise { - const redis = getRedisClient() - if (!redis) { - logger.warn('Redis client not available for tool confirmation') - return false - } - - const key = `tool_call:${toolCallId}` - const payload = { - status, - message: message || null, - timestamp: new Date().toISOString(), - } - - try { - await redis.set(key, JSON.stringify(payload), 'EX', 86400) - return true - } catch (error) { - logger.error('Failed to set tool confirmation', { - toolCallId, - error: error instanceof Error ? error.message : String(error), - }) - return false - } -} - /** * Get a tool call confirmation status from Redis. */ diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers.ts index 0f5f3df1ae..597de39b41 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-handlers.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers.ts @@ -1,7 +1,12 @@ import { createLogger } from '@sim/logger' -import { INTERRUPT_TOOL_SET, SUBAGENT_TOOL_SET } from '@/lib/copilot/orchestrator/config' +import { + INTERRUPT_TOOL_SET, + RESPOND_TOOL_SET, + SUBAGENT_TOOL_SET, +} from '@/lib/copilot/orchestrator/config' import { getToolConfirmation } from '@/lib/copilot/orchestrator/persistence' import { + asRecord, getEventData, markToolResultSeen, wasToolResultSeen, @@ -20,22 +25,6 @@ const logger = createLogger('CopilotSseHandlers') // Normalization + dedupe helpers live in sse-utils to keep server/client in sync. -/** - * Respond tools are internal to the copilot's subagent system. - * They're used by subagents to signal completion and should NOT be executed by the sim side. - * The copilot backend handles these internally. - */ -const RESPOND_TOOL_SET = new Set([ - 'plan_respond', - 'edit_respond', - 'debug_respond', - 'info_respond', - 'research_respond', - 'deploy_respond', - 'superagent_respond', - 'discovery_respond', -]) - export type SSEHandler = ( event: SSEEvent, context: StreamingContext, @@ -72,15 +61,16 @@ async function executeToolAndReport( // If create_workflow was successful, update the execution context with the new workflowId // This ensures subsequent tools in the same stream have access to the workflowId + const output = asRecord(result.output) if ( toolCall.name === 'create_workflow' && result.success && - result.output?.workflowId && + output.workflowId && !execContext.workflowId ) { - execContext.workflowId = result.output.workflowId - if (result.output.workspaceId) { - execContext.workspaceId = result.output.workspaceId + execContext.workflowId = output.workflowId as string + if (output.workspaceId) { + execContext.workspaceId = output.workspaceId as string } } @@ -145,7 +135,7 @@ async function waitForToolDecision( export const sseHandlers: Record = { chat_id: (event, context) => { - context.chatId = event.data?.chatId + context.chatId = asRecord(event.data).chatId }, title_updated: () => {}, tool_result: (event, context) => { @@ -206,7 +196,7 @@ export const sseHandlers: Record = { const toolName = toolData.name || event.toolName if (!toolCallId || !toolName) return - const args = toolData.arguments || toolData.input || event.data?.input + const args = toolData.arguments || toolData.input || asRecord(event.data).input const isPartial = toolData.partial === true const existing = context.toolCalls.get(toolCallId) @@ -323,7 +313,7 @@ export const sseHandlers: Record = { } }, reasoning: (event, context) => { - const phase = event.data?.phase || event.data?.data?.phase + const phase = asRecord(event.data).phase || asRecord(asRecord(event.data).data).phase if (phase === 'start') { context.isInThinkingBlock = true context.currentThinkingBlock = { @@ -341,34 +331,35 @@ export const sseHandlers: Record = { context.currentThinkingBlock = null return } - const chunk = - typeof event.data === 'string' ? event.data : event.data?.data || event.data?.content + const d = asRecord(event.data) + const chunk = typeof event.data === 'string' ? event.data : d.data || d.content if (!chunk || !context.currentThinkingBlock) return context.currentThinkingBlock.content = `${context.currentThinkingBlock.content || ''}${chunk}` }, content: (event, context) => { - const chunk = - typeof event.data === 'string' ? event.data : event.data?.content || event.data?.data + const d = asRecord(event.data) + const chunk = typeof event.data === 'string' ? event.data : d.content || d.data if (!chunk) return context.accumulatedContent += chunk - addContentBlock(context, { type: 'text', content: chunk }) + addContentBlock(context, { type: 'text', content: chunk as string }) }, done: (event, context) => { - if (event.data?.responseId) { - context.conversationId = event.data.responseId + const d = asRecord(event.data) + if (d.responseId) { + context.conversationId = d.responseId as string } context.streamComplete = true }, start: (event, context) => { - if (event.data?.responseId) { - context.conversationId = event.data.responseId + const d = asRecord(event.data) + if (d.responseId) { + context.conversationId = d.responseId as string } }, error: (event, context) => { + const d = asRecord(event.data) const message = - event.data?.message || - event.data?.error || - (typeof event.data === 'string' ? event.data : null) + d.message || d.error || (typeof event.data === 'string' ? event.data : null) if (message) { context.errors.push(message) } @@ -380,7 +371,7 @@ export const subAgentHandlers: Record = { content: (event, context) => { const parentToolCallId = context.subAgentParentToolCallId if (!parentToolCallId || !event.data) return - const chunk = typeof event.data === 'string' ? event.data : event.data?.content || '' + const chunk = typeof event.data === 'string' ? event.data : asRecord(event.data).content || '' if (!chunk) return context.subAgentContent[parentToolCallId] = (context.subAgentContent[parentToolCallId] || '') + chunk @@ -394,7 +385,7 @@ export const subAgentHandlers: Record = { const toolName = toolData.name || event.toolName if (!toolCallId || !toolName) return const isPartial = toolData.partial === true - const args = toolData.arguments || toolData.input || event.data?.input + const args = toolData.arguments || toolData.input || asRecord(event.data).input const existing = context.toolCalls.get(toolCallId) // Ignore late/duplicate tool_call events once we already have a result diff --git a/apps/sim/lib/copilot/orchestrator/sse-utils.ts b/apps/sim/lib/copilot/orchestrator/sse-utils.ts index 792a42aba7..0dd805decb 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-utils.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-utils.ts @@ -2,6 +2,10 @@ import type { SSEEvent } from '@/lib/copilot/orchestrator/types' type EventDataObject = Record | undefined +/** Safely cast event.data to a record for property access. */ +export const asRecord = (data: unknown): Record => + (data && typeof data === 'object' && !Array.isArray(data) ? data : {}) as Record + const DEFAULT_TOOL_EVENT_TTL_MS = 5 * 60 * 1000 /** @@ -45,7 +49,7 @@ export const getEventData = (event: SSEEvent): EventDataObject => { return nested || topLevel } -export function getToolCallIdFromEvent(event: SSEEvent): string | undefined { +function getToolCallIdFromEvent(event: SSEEvent): string | undefined { const data = getEventData(event) return event.toolCallId || data?.id || data?.toolCallId } @@ -70,14 +74,14 @@ export function normalizeSseEvent(event: SSEEvent): SSEEvent { } } -export function markToolCallSeen(toolCallId: string, ttlMs: number = DEFAULT_TOOL_EVENT_TTL_MS): void { +function markToolCallSeen(toolCallId: string, ttlMs: number = DEFAULT_TOOL_EVENT_TTL_MS): void { seenToolCalls.add(toolCallId) setTimeout(() => { seenToolCalls.delete(toolCallId) }, ttlMs) } -export function wasToolCallSeen(toolCallId: string): boolean { +function wasToolCallSeen(toolCallId: string): boolean { return seenToolCalls.has(toolCallId) } diff --git a/apps/sim/lib/copilot/orchestrator/subagent.ts b/apps/sim/lib/copilot/orchestrator/subagent.ts index 17079e4d58..80e71d672e 100644 --- a/apps/sim/lib/copilot/orchestrator/subagent.ts +++ b/apps/sim/lib/copilot/orchestrator/subagent.ts @@ -1,6 +1,7 @@ import { createLogger } from '@sim/logger' -import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/copilot/constants' +import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' import { handleSubagentRouting, sseHandlers, subAgentHandlers } from '@/lib/copilot/orchestrator/sse-handlers' +import { env } from '@/lib/core/config/env' import { normalizeSseEvent, shouldSkipToolCallEvent, @@ -15,11 +16,9 @@ import type { StreamingContext, ToolCallSummary, } from '@/lib/copilot/orchestrator/types' -import { env } from '@/lib/core/config/env' import { getEffectiveDecryptedEnv } from '@/lib/environment/utils' const logger = createLogger('CopilotSubagentOrchestrator') -const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT export interface SubagentOrchestratorOptions extends Omit { userId: string @@ -77,7 +76,7 @@ export async function orchestrateSubagentStream( 'Content-Type': 'application/json', ...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}), }, - body: JSON.stringify({ ...requestPayload, stream: true, userId }), + body: JSON.stringify({ ...requestPayload, stream: true }), signal: abortSignal, }) @@ -129,7 +128,8 @@ export async function orchestrateSubagentStream( // Handle subagent_start/subagent_end events to track nested subagent calls if (normalizedEvent.type === 'subagent_start') { - const toolCallId = normalizedEvent.data?.tool_call_id + const eventData = normalizedEvent.data as Record | undefined + const toolCallId = eventData?.tool_call_id as string | undefined if (toolCallId) { context.subAgentParentToolCallId = toolCallId context.subAgentContent[toolCallId] = '' diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/deploy.ts similarity index 55% rename from apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools.ts rename to apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/deploy.ts index fdc962382b..aad2ed2148 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/deploy.ts @@ -1,13 +1,12 @@ import crypto from 'crypto' import { db } from '@sim/db' -import { chat, workflow, workflowMcpServer, workflowMcpTool } from '@sim/db/schema' -import { and, eq, inArray } from 'drizzle-orm' +import { chat, workflow, workflowMcpTool } from '@sim/db/schema' +import { and, eq } from 'drizzle-orm' import type { ExecutionContext, ToolCallResult } from '@/lib/copilot/orchestrator/types' import { sanitizeToolName } from '@/lib/mcp/workflow-tool-schema' import { deployWorkflow, undeployWorkflow } from '@/lib/workflows/persistence/utils' -import { hasValidStartBlock } from '@/lib/workflows/triggers/trigger-utils.server' import { checkChatAccess, checkWorkflowAccessForChatCreation } from '@/app/api/chat/utils' -import { ensureWorkflowAccess } from './access' +import { ensureWorkflowAccess } from '../access' export async function executeDeployApi( params: Record, @@ -115,6 +114,11 @@ export async function executeDeployChat( return { success: false, error: deployResult.error || 'Failed to deploy workflow' } } + const existingCustomizations = + (existingDeployment?.customizations as + | { primaryColor?: string; welcomeMessage?: string } + | undefined) || {} + const payload = { workflowId, identifier, @@ -122,12 +126,10 @@ export async function executeDeployChat( description: String(params.description || existingDeployment?.description || ''), customizations: { primaryColor: - params.customizations?.primaryColor || - existingDeployment?.customizations?.primaryColor || + params.customizations?.primaryColor || existingCustomizations.primaryColor || 'var(--brand-primary-hover-hex)', welcomeMessage: - params.customizations?.welcomeMessage || - existingDeployment?.customizations?.welcomeMessage || + params.customizations?.welcomeMessage || existingCustomizations.welcomeMessage || 'Hi there! How can I help you today?', }, authType: params.authType || existingDeployment?.authType || 'public', @@ -277,203 +279,3 @@ export async function executeRedeploy(context: ExecutionContext): Promise, - context: ExecutionContext -): Promise { - try { - const workflowId = params.workflowId || context.workflowId - if (!workflowId) { - return { success: false, error: 'workflowId is required' } - } - const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) - const workspaceId = workflowRecord.workspaceId - - const [apiDeploy, chatDeploy] = await Promise.all([ - db.select().from(workflow).where(eq(workflow.id, workflowId)).limit(1), - db.select().from(chat).where(eq(chat.workflowId, workflowId)).limit(1), - ]) - - const isApiDeployed = apiDeploy[0]?.isDeployed || false - const apiDetails = { - isDeployed: isApiDeployed, - deployedAt: apiDeploy[0]?.deployedAt || null, - endpoint: isApiDeployed ? `/api/workflows/${workflowId}/execute` : null, - apiKey: workflowRecord.workspaceId ? 'Workspace API keys' : 'Personal API keys', - needsRedeployment: false, - } - - const isChatDeployed = !!chatDeploy[0] - const chatDetails = { - isDeployed: isChatDeployed, - chatId: chatDeploy[0]?.id || null, - identifier: chatDeploy[0]?.identifier || null, - chatUrl: isChatDeployed ? `/chat/${chatDeploy[0]?.identifier}` : null, - title: chatDeploy[0]?.title || null, - description: chatDeploy[0]?.description || null, - authType: chatDeploy[0]?.authType || null, - allowedEmails: chatDeploy[0]?.allowedEmails || null, - outputConfigs: chatDeploy[0]?.outputConfigs || null, - welcomeMessage: chatDeploy[0]?.customizations?.welcomeMessage || null, - primaryColor: chatDeploy[0]?.customizations?.primaryColor || null, - hasPassword: Boolean(chatDeploy[0]?.password), - } - - const mcpDetails = { isDeployed: false, servers: [] as any[] } - if (workspaceId) { - const servers = await db - .select({ - serverId: workflowMcpServer.id, - serverName: workflowMcpServer.name, - toolName: workflowMcpTool.toolName, - toolDescription: workflowMcpTool.toolDescription, - parameterSchema: workflowMcpTool.parameterSchema, - toolId: workflowMcpTool.id, - }) - .from(workflowMcpTool) - .innerJoin(workflowMcpServer, eq(workflowMcpTool.serverId, workflowMcpServer.id)) - .where(eq(workflowMcpTool.workflowId, workflowId)) - - if (servers.length > 0) { - mcpDetails.isDeployed = true - mcpDetails.servers = servers - } - } - - const isDeployed = apiDetails.isDeployed || chatDetails.isDeployed || mcpDetails.isDeployed - return { - success: true, - output: { isDeployed, api: apiDetails, chat: chatDetails, mcp: mcpDetails }, - } - } catch (error) { - return { success: false, error: error instanceof Error ? error.message : String(error) } - } -} - -export async function executeListWorkspaceMcpServers( - params: Record, - context: ExecutionContext -): Promise { - try { - const workflowId = params.workflowId || context.workflowId - if (!workflowId) { - return { success: false, error: 'workflowId is required' } - } - const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) - const workspaceId = workflowRecord.workspaceId - if (!workspaceId) { - return { success: false, error: 'workspaceId is required' } - } - - const servers = await db - .select({ - id: workflowMcpServer.id, - name: workflowMcpServer.name, - description: workflowMcpServer.description, - }) - .from(workflowMcpServer) - .where(eq(workflowMcpServer.workspaceId, workspaceId)) - - const serverIds = servers.map((server) => server.id) - const tools = - serverIds.length > 0 - ? await db - .select({ - serverId: workflowMcpTool.serverId, - toolName: workflowMcpTool.toolName, - }) - .from(workflowMcpTool) - .where(inArray(workflowMcpTool.serverId, serverIds)) - : [] - - const toolNamesByServer: Record = {} - for (const tool of tools) { - if (!toolNamesByServer[tool.serverId]) { - toolNamesByServer[tool.serverId] = [] - } - toolNamesByServer[tool.serverId].push(tool.toolName) - } - - const serversWithToolNames = servers.map((server) => ({ - ...server, - toolCount: toolNamesByServer[server.id]?.length || 0, - toolNames: toolNamesByServer[server.id] || [], - })) - - return { success: true, output: { servers: serversWithToolNames, count: servers.length } } - } catch (error) { - return { success: false, error: error instanceof Error ? error.message : String(error) } - } -} - -export async function executeCreateWorkspaceMcpServer( - params: Record, - context: ExecutionContext -): Promise { - try { - const workflowId = params.workflowId || context.workflowId - if (!workflowId) { - return { success: false, error: 'workflowId is required' } - } - const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) - const workspaceId = workflowRecord.workspaceId - if (!workspaceId) { - return { success: false, error: 'workspaceId is required' } - } - - const name = params.name?.trim() - if (!name) { - return { success: false, error: 'name is required' } - } - - const serverId = crypto.randomUUID() - const [server] = await db - .insert(workflowMcpServer) - .values({ - id: serverId, - workspaceId, - createdBy: context.userId, - name, - description: params.description?.trim() || null, - isPublic: params.isPublic ?? false, - createdAt: new Date(), - updatedAt: new Date(), - }) - .returning() - - const workflowIds: string[] = params.workflowIds || [] - const addedTools: Array<{ workflowId: string; toolName: string }> = [] - - if (workflowIds.length > 0) { - const workflows = await db.select().from(workflow).where(inArray(workflow.id, workflowIds)) - - for (const wf of workflows) { - if (wf.workspaceId !== workspaceId || !wf.isDeployed) { - continue - } - const hasStartBlock = await hasValidStartBlock(wf.id) - if (!hasStartBlock) { - continue - } - const toolName = sanitizeToolName(wf.name || `workflow_${wf.id}`) - await db.insert(workflowMcpTool).values({ - id: crypto.randomUUID(), - serverId, - workflowId: wf.id, - toolName, - toolDescription: wf.description || `Execute ${wf.name} workflow`, - parameterSchema: {}, - createdAt: new Date(), - updatedAt: new Date(), - }) - addedTools.push({ workflowId: wf.id, toolName }) - } - } - - return { success: true, output: { server, addedTools } } - } catch (error) { - return { success: false, error: error instanceof Error ? error.message : String(error) } - } -} - diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/index.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/index.ts new file mode 100644 index 0000000000..9e490922b1 --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/index.ts @@ -0,0 +1,2 @@ +export * from './deploy' +export * from './manage' diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/manage.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/manage.ts new file mode 100644 index 0000000000..4e6db4af3b --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/manage.ts @@ -0,0 +1,211 @@ +import crypto from 'crypto' +import { db } from '@sim/db' +import { chat, workflow, workflowMcpServer, workflowMcpTool } from '@sim/db/schema' +import { eq, inArray } from 'drizzle-orm' +import type { ExecutionContext, ToolCallResult } from '@/lib/copilot/orchestrator/types' +import { sanitizeToolName } from '@/lib/mcp/workflow-tool-schema' +import { hasValidStartBlock } from '@/lib/workflows/triggers/trigger-utils.server' +import { ensureWorkflowAccess } from '../access' + +export async function executeCheckDeploymentStatus( + params: Record, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + const workspaceId = workflowRecord.workspaceId + + const [apiDeploy, chatDeploy] = await Promise.all([ + db.select().from(workflow).where(eq(workflow.id, workflowId)).limit(1), + db.select().from(chat).where(eq(chat.workflowId, workflowId)).limit(1), + ]) + + const isApiDeployed = apiDeploy[0]?.isDeployed || false + const apiDetails = { + isDeployed: isApiDeployed, + deployedAt: apiDeploy[0]?.deployedAt || null, + endpoint: isApiDeployed ? `/api/workflows/${workflowId}/execute` : null, + apiKey: workflowRecord.workspaceId ? 'Workspace API keys' : 'Personal API keys', + needsRedeployment: false, + } + + const isChatDeployed = !!chatDeploy[0] + const chatCustomizations = + (chatDeploy[0]?.customizations as + | { welcomeMessage?: string; primaryColor?: string } + | undefined) || {} + const chatDetails = { + isDeployed: isChatDeployed, + chatId: chatDeploy[0]?.id || null, + identifier: chatDeploy[0]?.identifier || null, + chatUrl: isChatDeployed ? `/chat/${chatDeploy[0]?.identifier}` : null, + title: chatDeploy[0]?.title || null, + description: chatDeploy[0]?.description || null, + authType: chatDeploy[0]?.authType || null, + allowedEmails: chatDeploy[0]?.allowedEmails || null, + outputConfigs: chatDeploy[0]?.outputConfigs || null, + welcomeMessage: chatCustomizations.welcomeMessage || null, + primaryColor: chatCustomizations.primaryColor || null, + hasPassword: Boolean(chatDeploy[0]?.password), + } + + const mcpDetails = { isDeployed: false, servers: [] as any[] } + if (workspaceId) { + const servers = await db + .select({ + serverId: workflowMcpServer.id, + serverName: workflowMcpServer.name, + toolName: workflowMcpTool.toolName, + toolDescription: workflowMcpTool.toolDescription, + parameterSchema: workflowMcpTool.parameterSchema, + toolId: workflowMcpTool.id, + }) + .from(workflowMcpTool) + .innerJoin(workflowMcpServer, eq(workflowMcpTool.serverId, workflowMcpServer.id)) + .where(eq(workflowMcpTool.workflowId, workflowId)) + + if (servers.length > 0) { + mcpDetails.isDeployed = true + mcpDetails.servers = servers + } + } + + const isDeployed = apiDetails.isDeployed || chatDetails.isDeployed || mcpDetails.isDeployed + return { + success: true, + output: { isDeployed, api: apiDetails, chat: chatDetails, mcp: mcpDetails }, + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeListWorkspaceMcpServers( + params: Record, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + const workspaceId = workflowRecord.workspaceId + if (!workspaceId) { + return { success: false, error: 'workspaceId is required' } + } + + const servers = await db + .select({ + id: workflowMcpServer.id, + name: workflowMcpServer.name, + description: workflowMcpServer.description, + }) + .from(workflowMcpServer) + .where(eq(workflowMcpServer.workspaceId, workspaceId)) + + const serverIds = servers.map((server) => server.id) + const tools = + serverIds.length > 0 + ? await db + .select({ + serverId: workflowMcpTool.serverId, + toolName: workflowMcpTool.toolName, + }) + .from(workflowMcpTool) + .where(inArray(workflowMcpTool.serverId, serverIds)) + : [] + + const toolNamesByServer: Record = {} + for (const tool of tools) { + if (!toolNamesByServer[tool.serverId]) { + toolNamesByServer[tool.serverId] = [] + } + toolNamesByServer[tool.serverId].push(tool.toolName) + } + + const serversWithToolNames = servers.map((server) => ({ + ...server, + toolCount: toolNamesByServer[server.id]?.length || 0, + toolNames: toolNamesByServer[server.id] || [], + })) + + return { success: true, output: { servers: serversWithToolNames, count: servers.length } } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeCreateWorkspaceMcpServer( + params: Record, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + const workspaceId = workflowRecord.workspaceId + if (!workspaceId) { + return { success: false, error: 'workspaceId is required' } + } + + const name = params.name?.trim() + if (!name) { + return { success: false, error: 'name is required' } + } + + const serverId = crypto.randomUUID() + const [server] = await db + .insert(workflowMcpServer) + .values({ + id: serverId, + workspaceId, + createdBy: context.userId, + name, + description: params.description?.trim() || null, + isPublic: params.isPublic ?? false, + createdAt: new Date(), + updatedAt: new Date(), + }) + .returning() + + const workflowIds: string[] = params.workflowIds || [] + const addedTools: Array<{ workflowId: string; toolName: string }> = [] + + if (workflowIds.length > 0) { + const workflows = await db.select().from(workflow).where(inArray(workflow.id, workflowIds)) + + for (const wf of workflows) { + if (wf.workspaceId !== workspaceId || !wf.isDeployed) { + continue + } + const hasStartBlock = await hasValidStartBlock(wf.id) + if (!hasStartBlock) { + continue + } + const toolName = sanitizeToolName(wf.name || `workflow_${wf.id}`) + await db.insert(workflowMcpTool).values({ + id: crypto.randomUUID(), + serverId, + workflowId: wf.id, + toolName, + toolDescription: wf.description || `Execute ${wf.name} workflow`, + parameterSchema: {}, + createdAt: new Date(), + updatedAt: new Date(), + }) + addedTools.push({ workflowId: wf.id, toolName }) + } + } + + return { success: true, output: { server, addedTools } } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts similarity index 94% rename from apps/sim/lib/copilot/orchestrator/tool-executor.ts rename to apps/sim/lib/copilot/orchestrator/tool-executor/index.ts index 1c04181cd4..fc839f6120 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts @@ -2,7 +2,7 @@ import { db } from '@sim/db' import { workflow } from '@sim/db/schema' import { createLogger } from '@sim/logger' import { eq } from 'drizzle-orm' -import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/copilot/constants' +import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' import type { ExecutionContext, ToolCallResult, @@ -11,7 +11,7 @@ import type { import { routeExecution } from '@/lib/copilot/tools/server/router' import { env } from '@/lib/core/config/env' import { getEffectiveDecryptedEnv } from '@/lib/environment/utils' -import { executeIntegrationToolDirect } from '@/lib/copilot/orchestrator/tool-executor/integration-tools' +import { executeIntegrationToolDirect } from './integration-tools' import { executeGetBlockOutputs, executeGetBlockUpstreamReferences, @@ -25,7 +25,7 @@ import { executeCreateFolder, executeRunWorkflow, executeSetGlobalWorkflowVariables, -} from '@/lib/copilot/orchestrator/tool-executor/workflow-tools' +} from './workflow-tools' import { executeCheckDeploymentStatus, executeCreateWorkspaceMcpServer, @@ -34,11 +34,10 @@ import { executeDeployMcp, executeListWorkspaceMcpServers, executeRedeploy, -} from '@/lib/copilot/orchestrator/tool-executor/deployment-tools' +} from './deployment-tools' import { getTool, resolveToolId } from '@/tools/utils' const logger = createLogger('CopilotToolExecutor') -const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT const SERVER_TOOLS = new Set([ 'get_blocks_and_tools', diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/index.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/index.ts new file mode 100644 index 0000000000..938d84e7b5 --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/index.ts @@ -0,0 +1,2 @@ +export * from './queries' +export * from './mutations' diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/mutations.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/mutations.ts new file mode 100644 index 0000000000..ed4b51cc0a --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/mutations.ts @@ -0,0 +1,251 @@ +import crypto from 'crypto' +import { db } from '@sim/db' +import { workflow, workflowFolder } from '@sim/db/schema' +import { and, eq, isNull, max } from 'drizzle-orm' +import type { ExecutionContext, ToolCallResult } from '@/lib/copilot/orchestrator/types' +import { generateRequestId } from '@/lib/core/utils/request' +import { buildDefaultWorkflowArtifacts } from '@/lib/workflows/defaults' +import { executeWorkflow } from '@/lib/workflows/executor/execute-workflow' +import { saveWorkflowToNormalizedTables } from '@/lib/workflows/persistence/utils' +import { ensureWorkflowAccess, ensureWorkspaceAccess, getDefaultWorkspaceId } from '../access' + +export async function executeCreateWorkflow( + params: Record, + context: ExecutionContext +): Promise { + try { + const name = typeof params?.name === 'string' ? params.name.trim() : '' + if (!name) { + return { success: false, error: 'name is required' } + } + + const workspaceId = params?.workspaceId || (await getDefaultWorkspaceId(context.userId)) + const folderId = params?.folderId || null + const description = typeof params?.description === 'string' ? params.description : null + + await ensureWorkspaceAccess(workspaceId, context.userId, true) + + const workflowId = crypto.randomUUID() + const now = new Date() + + const folderCondition = folderId ? eq(workflow.folderId, folderId) : isNull(workflow.folderId) + const [maxResult] = await db + .select({ maxOrder: max(workflow.sortOrder) }) + .from(workflow) + .where(and(eq(workflow.workspaceId, workspaceId), folderCondition)) + const sortOrder = (maxResult?.maxOrder ?? 0) + 1 + + await db.insert(workflow).values({ + id: workflowId, + userId: context.userId, + workspaceId, + folderId, + sortOrder, + name, + description, + color: '#3972F6', + lastSynced: now, + createdAt: now, + updatedAt: now, + isDeployed: false, + runCount: 0, + variables: {}, + }) + + const { workflowState } = buildDefaultWorkflowArtifacts() + const saveResult = await saveWorkflowToNormalizedTables(workflowId, workflowState) + if (!saveResult.success) { + throw new Error(saveResult.error || 'Failed to save workflow state') + } + + return { + success: true, + output: { + workflowId, + workflowName: name, + workspaceId, + folderId, + }, + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeCreateFolder( + params: Record, + context: ExecutionContext +): Promise { + try { + const name = typeof params?.name === 'string' ? params.name.trim() : '' + if (!name) { + return { success: false, error: 'name is required' } + } + + const workspaceId = params?.workspaceId || (await getDefaultWorkspaceId(context.userId)) + const parentId = params?.parentId || null + + await ensureWorkspaceAccess(workspaceId, context.userId, true) + + const [maxResult] = await db + .select({ maxOrder: max(workflowFolder.sortOrder) }) + .from(workflowFolder) + .where( + and( + eq(workflowFolder.workspaceId, workspaceId), + parentId ? eq(workflowFolder.parentId, parentId) : isNull(workflowFolder.parentId) + ) + ) + const sortOrder = (maxResult?.maxOrder ?? 0) + 1 + + const folderId = crypto.randomUUID() + await db.insert(workflowFolder).values({ + id: folderId, + userId: context.userId, + workspaceId, + parentId, + name, + sortOrder, + createdAt: new Date(), + updatedAt: new Date(), + }) + + return { success: true, output: { folderId, name, workspaceId, parentId } } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeRunWorkflow( + params: Record, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + + const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + + const result = await executeWorkflow( + { + id: workflowRecord.id, + userId: workflowRecord.userId, + workspaceId: workflowRecord.workspaceId, + variables: workflowRecord.variables || {}, + }, + generateRequestId(), + params.workflow_input || params.input || undefined, + context.userId + ) + + return { + success: result.success, + output: { + executionId: result.metadata?.executionId, + success: result.success, + output: result.output, + logs: result.logs, + }, + error: result.success ? undefined : result.error || 'Workflow execution failed', + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeSetGlobalWorkflowVariables( + params: Record, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + const operations = Array.isArray(params.operations) ? params.operations : [] + const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + + const currentVarsRecord = (workflowRecord.variables as Record) || {} + const byName: Record = {} + Object.values(currentVarsRecord).forEach((v: any) => { + if (v && typeof v === 'object' && v.id && v.name) byName[String(v.name)] = v + }) + + for (const op of operations) { + const key = String(op?.name || '') + if (!key) continue + const nextType = op?.type || byName[key]?.type || 'plain' + const coerceValue = (value: any, type: string) => { + if (value === undefined) return value + if (type === 'number') { + const n = Number(value) + return Number.isNaN(n) ? value : n + } + if (type === 'boolean') { + const v = String(value).trim().toLowerCase() + if (v === 'true') return true + if (v === 'false') return false + return value + } + if (type === 'array' || type === 'object') { + try { + const parsed = JSON.parse(String(value)) + if (type === 'array' && Array.isArray(parsed)) return parsed + if (type === 'object' && parsed && typeof parsed === 'object' && !Array.isArray(parsed)) + return parsed + } catch {} + return value + } + return value + } + + if (op.operation === 'delete') { + delete byName[key] + continue + } + const typedValue = coerceValue(op.value, nextType) + if (op.operation === 'add') { + byName[key] = { + id: crypto.randomUUID(), + workflowId, + name: key, + type: nextType, + value: typedValue, + } + continue + } + if (op.operation === 'edit') { + if (!byName[key]) { + byName[key] = { + id: crypto.randomUUID(), + workflowId, + name: key, + type: nextType, + value: typedValue, + } + } else { + byName[key] = { + ...byName[key], + type: nextType, + value: typedValue, + } + } + } + } + + const nextVarsRecord = Object.fromEntries( + Object.values(byName).map((v: any) => [String(v.id), v]) + ) + + await db + .update(workflow) + .set({ variables: nextVarsRecord, updatedAt: new Date() }) + .where(eq(workflow.id, workflowId)) + + return { success: true, output: { updated: Object.values(byName).length } } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/queries.ts similarity index 69% rename from apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools.ts rename to apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/queries.ts index 0adc1a7685..7bbb8bd38b 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/queries.ts @@ -1,26 +1,24 @@ -import crypto from 'crypto' import { db } from '@sim/db' import { customTools, permissions, workflow, workflowFolder, workspace } from '@sim/db/schema' -import { and, asc, desc, eq, inArray, isNull, max, or } from 'drizzle-orm' +import { and, asc, desc, eq, isNull, or } from 'drizzle-orm' import type { ExecutionContext, ToolCallResult } from '@/lib/copilot/orchestrator/types' import { extractWorkflowNames, formatNormalizedWorkflowForCopilot, normalizeWorkflowName, } from '@/lib/copilot/tools/shared/workflow-utils' -import { generateRequestId } from '@/lib/core/utils/request' import { mcpService } from '@/lib/mcp/service' import { listWorkspaceFiles } from '@/lib/uploads/contexts/workspace' import { getBlockOutputPaths } from '@/lib/workflows/blocks/block-outputs' import { BlockPathCalculator } from '@/lib/workflows/blocks/block-path-calculator' -import { buildDefaultWorkflowArtifacts } from '@/lib/workflows/defaults' -import { executeWorkflow } from '@/lib/workflows/executor/execute-workflow' -import { - loadWorkflowFromNormalizedTables, - saveWorkflowToNormalizedTables, -} from '@/lib/workflows/persistence/utils' +import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/persistence/utils' import { isInputDefinitionTrigger } from '@/lib/workflows/triggers/input-definition-triggers' -import { ensureWorkflowAccess, ensureWorkspaceAccess, getAccessibleWorkflowsForUser, getDefaultWorkspaceId } from './access' +import { + ensureWorkflowAccess, + ensureWorkspaceAccess, + getAccessibleWorkflowsForUser, + getDefaultWorkspaceId, +} from '../access' import { normalizeName } from '@/executor/constants' export async function executeGetUserWorkflow( @@ -180,112 +178,6 @@ export async function executeListFolders( } } -export async function executeCreateWorkflow( - params: Record, - context: ExecutionContext -): Promise { - try { - const name = typeof params?.name === 'string' ? params.name.trim() : '' - if (!name) { - return { success: false, error: 'name is required' } - } - - const workspaceId = params?.workspaceId || (await getDefaultWorkspaceId(context.userId)) - const folderId = params?.folderId || null - const description = typeof params?.description === 'string' ? params.description : null - - await ensureWorkspaceAccess(workspaceId, context.userId, true) - - const workflowId = crypto.randomUUID() - const now = new Date() - - const folderCondition = folderId ? eq(workflow.folderId, folderId) : isNull(workflow.folderId) - const [maxResult] = await db - .select({ maxOrder: max(workflow.sortOrder) }) - .from(workflow) - .where(and(eq(workflow.workspaceId, workspaceId), folderCondition)) - const sortOrder = (maxResult?.maxOrder ?? 0) + 1 - - await db.insert(workflow).values({ - id: workflowId, - userId: context.userId, - workspaceId, - folderId, - sortOrder, - name, - description, - color: '#3972F6', - lastSynced: now, - createdAt: now, - updatedAt: now, - isDeployed: false, - runCount: 0, - variables: {}, - }) - - const { workflowState } = buildDefaultWorkflowArtifacts() - const saveResult = await saveWorkflowToNormalizedTables(workflowId, workflowState) - if (!saveResult.success) { - throw new Error(saveResult.error || 'Failed to save workflow state') - } - - return { - success: true, - output: { - workflowId, - workflowName: name, - workspaceId, - folderId, - }, - } - } catch (error) { - return { success: false, error: error instanceof Error ? error.message : String(error) } - } -} - -export async function executeCreateFolder( - params: Record, - context: ExecutionContext -): Promise { - try { - const name = typeof params?.name === 'string' ? params.name.trim() : '' - if (!name) { - return { success: false, error: 'name is required' } - } - - const workspaceId = params?.workspaceId || (await getDefaultWorkspaceId(context.userId)) - const parentId = params?.parentId || null - - await ensureWorkspaceAccess(workspaceId, context.userId, true) - - const [maxResult] = await db - .select({ maxOrder: max(workflowFolder.sortOrder) }) - .from(workflowFolder) - .where( - and( - eq(workflowFolder.workspaceId, workspaceId), - parentId ? eq(workflowFolder.parentId, parentId) : isNull(workflowFolder.parentId) - ) - ) - const sortOrder = (maxResult?.maxOrder ?? 0) + 1 - - const folderId = crypto.randomUUID() - await db.insert(workflowFolder).values({ - id: folderId, - workspaceId, - parentId, - name, - sortOrder, - createdAt: new Date(), - updatedAt: new Date(), - }) - - return { success: true, output: { folderId, name, workspaceId, parentId } } - } catch (error) { - return { success: false, error: error instanceof Error ? error.message : String(error) } - } -} - export async function executeGetWorkflowData( params: Record, context: ExecutionContext @@ -587,140 +479,6 @@ export async function executeGetBlockUpstreamReferences( } } -export async function executeRunWorkflow( - params: Record, - context: ExecutionContext -): Promise { - try { - const workflowId = params.workflowId || context.workflowId - if (!workflowId) { - return { success: false, error: 'workflowId is required' } - } - - const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) - - const result = await executeWorkflow( - { - id: workflowRecord.id, - userId: workflowRecord.userId, - workspaceId: workflowRecord.workspaceId, - variables: workflowRecord.variables || {}, - }, - generateRequestId(), - params.workflow_input || params.input || undefined, - context.userId - ) - - return { - success: result.success, - output: { - executionId: result.executionId, - success: result.success, - output: result.output, - logs: result.logs, - }, - error: result.success ? undefined : result.error || 'Workflow execution failed', - } - } catch (error) { - return { success: false, error: error instanceof Error ? error.message : String(error) } - } -} - -export async function executeSetGlobalWorkflowVariables( - params: Record, - context: ExecutionContext -): Promise { - try { - const workflowId = params.workflowId || context.workflowId - if (!workflowId) { - return { success: false, error: 'workflowId is required' } - } - const operations = Array.isArray(params.operations) ? params.operations : [] - const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) - - const currentVarsRecord = (workflowRecord.variables as Record) || {} - const byName: Record = {} - Object.values(currentVarsRecord).forEach((v: any) => { - if (v && typeof v === 'object' && v.id && v.name) byName[String(v.name)] = v - }) - - for (const op of operations) { - const key = String(op?.name || '') - if (!key) continue - const nextType = op?.type || byName[key]?.type || 'plain' - const coerceValue = (value: any, type: string) => { - if (value === undefined) return value - if (type === 'number') { - const n = Number(value) - return Number.isNaN(n) ? value : n - } - if (type === 'boolean') { - const v = String(value).trim().toLowerCase() - if (v === 'true') return true - if (v === 'false') return false - return value - } - if (type === 'array' || type === 'object') { - try { - const parsed = JSON.parse(String(value)) - if (type === 'array' && Array.isArray(parsed)) return parsed - if (type === 'object' && parsed && typeof parsed === 'object' && !Array.isArray(parsed)) - return parsed - } catch {} - return value - } - return value - } - - if (op.operation === 'delete') { - delete byName[key] - continue - } - const typedValue = coerceValue(op.value, nextType) - if (op.operation === 'add') { - byName[key] = { - id: crypto.randomUUID(), - workflowId, - name: key, - type: nextType, - value: typedValue, - } - continue - } - if (op.operation === 'edit') { - if (!byName[key]) { - byName[key] = { - id: crypto.randomUUID(), - workflowId, - name: key, - type: nextType, - value: typedValue, - } - } else { - byName[key] = { - ...byName[key], - type: nextType, - value: typedValue, - } - } - } - } - - const nextVarsRecord = Object.fromEntries( - Object.values(byName).map((v: any) => [String(v.id), v]) - ) - - await db - .update(workflow) - .set({ variables: nextVarsRecord, updatedAt: new Date() }) - .where(eq(workflow.id, workflowId)) - - return { success: true, output: { updated: Object.values(byName).length } } - } catch (error) { - return { success: false, error: error instanceof Error ? error.message : String(error) } - } -} - async function getWorkflowVariablesForTool( workflowId: string ): Promise> { diff --git a/apps/sim/lib/copilot/orchestrator/types.ts b/apps/sim/lib/copilot/orchestrator/types.ts index d1f58b5886..dd321bab31 100644 --- a/apps/sim/lib/copilot/orchestrator/types.ts +++ b/apps/sim/lib/copilot/orchestrator/types.ts @@ -19,12 +19,12 @@ export type SSEEventType = export interface SSEEvent { type: SSEEventType - data?: any + data?: unknown subagent?: string toolCallId?: string toolName?: string success?: boolean - result?: any + result?: unknown } export type ToolCallStatus = 'pending' | 'executing' | 'success' | 'error' | 'skipped' | 'rejected' @@ -33,16 +33,16 @@ export interface ToolCallState { id: string name: string status: ToolCallStatus - params?: Record + params?: Record result?: ToolCallResult error?: string startTime?: number endTime?: number } -export interface ToolCallResult { +export interface ToolCallResult { success: boolean - output?: any + output?: T error?: string } @@ -73,6 +73,14 @@ export interface StreamingContext { errors: string[] } +export interface FileAttachment { + id: string + key: string + name: string + mimeType: string + size: number +} + export interface OrchestratorRequest { message: string workflowId: string @@ -82,7 +90,7 @@ export interface OrchestratorRequest { model?: string conversationId?: string contexts?: Array<{ type: string; content: string }> - fileAttachments?: any[] + fileAttachments?: FileAttachment[] commands?: string[] provider?: CopilotProviderConfig streamToolCalls?: boolean @@ -116,8 +124,8 @@ export interface ToolCallSummary { id: string name: string status: ToolCallStatus - params?: Record - result?: any + params?: Record + result?: unknown error?: string durationMs?: number } diff --git a/apps/sim/lib/copilot/tools/client/base-tool.ts b/apps/sim/lib/copilot/tools/client/base-tool.ts index 8d7d396f9c..d3640bea0d 100644 --- a/apps/sim/lib/copilot/tools/client/base-tool.ts +++ b/apps/sim/lib/copilot/tools/client/base-tool.ts @@ -147,67 +147,13 @@ export class BaseClientTool { } /** - * Mark a tool as complete on the server (proxies to server-side route). - * Once called, the tool is considered complete and won't be marked again. + * Mark a tool as complete. Tool completion is now handled server-side by the + * orchestrator (which calls the Go backend directly). Client tools are retained + * for UI display only — this method just tracks local state. */ - async markToolComplete(status: number, message?: any, data?: any): Promise { - // Prevent double-marking - if (this.isMarkedComplete) { - baseToolLogger.warn('markToolComplete called but tool already marked complete', { - toolCallId: this.toolCallId, - toolName: this.name, - existingState: this.state, - attemptedStatus: status, - }) - return true - } - + async markToolComplete(_status: number, _message?: unknown, _data?: unknown): Promise { this.isMarkedComplete = true - - try { - baseToolLogger.info('markToolComplete called', { - toolCallId: this.toolCallId, - toolName: this.name, - state: this.state, - status, - hasMessage: message !== undefined, - hasData: data !== undefined, - }) - } catch {} - - try { - const res = await fetch('/api/copilot/tools/mark-complete', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - id: this.toolCallId, - name: this.name, - status, - message, - data, - }), - }) - - if (!res.ok) { - // Try to surface server error - let errorText = `Failed to mark tool complete (status ${res.status})` - try { - const { error } = await res.json() - if (error) errorText = String(error) - } catch {} - throw new Error(errorText) - } - - const json = (await res.json()) as { success?: boolean } - return json?.success === true - } catch (e) { - // Default failure path - but tool is still marked complete locally - baseToolLogger.error('Failed to mark tool complete on server', { - toolCallId: this.toolCallId, - error: e instanceof Error ? e.message : String(e), - }) - return false - } + return true } // Accept (continue) for interrupt flows: move pending -> executing diff --git a/apps/sim/lib/copilot/tools/client/blocks/get-block-config.ts b/apps/sim/lib/copilot/tools/client/blocks/get-block-config.ts index a76971df07..88e6963976 100644 --- a/apps/sim/lib/copilot/tools/client/blocks/get-block-config.ts +++ b/apps/sim/lib/copilot/tools/client/blocks/get-block-config.ts @@ -1,23 +1,11 @@ -import { createLogger } from '@sim/logger' import { FileCode, Loader2, MinusCircle, XCircle } from 'lucide-react' import { BaseClientTool, type BaseClientToolMetadata, ClientToolCallState, } from '@/lib/copilot/tools/client/base-tool' -import { - ExecuteResponseSuccessSchema, - GetBlockConfigInput, - GetBlockConfigResult, -} from '@/lib/copilot/tools/shared/schemas' import { getLatestBlock } from '@/blocks/registry' -interface GetBlockConfigArgs { - blockType: string - operation?: string - trigger?: boolean -} - export class GetBlockConfigClientTool extends BaseClientTool { static readonly id = 'get_block_config' @@ -63,38 +51,9 @@ export class GetBlockConfigClientTool extends BaseClientTool { }, } - async execute(args?: GetBlockConfigArgs): Promise { - const logger = createLogger('GetBlockConfigClientTool') - try { - this.setState(ClientToolCallState.executing) - - const { blockType, operation, trigger } = GetBlockConfigInput.parse(args || {}) - - const res = await fetch('/api/copilot/execute-copilot-server-tool', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - toolName: 'get_block_config', - payload: { blockType, operation, trigger }, - }), - }) - if (!res.ok) { - const errorText = await res.text().catch(() => '') - throw new Error(errorText || `Server error (${res.status})`) - } - const json = await res.json() - const parsed = ExecuteResponseSuccessSchema.parse(json) - const result = GetBlockConfigResult.parse(parsed.result) - - const inputCount = Object.keys(result.inputs).length - const outputCount = Object.keys(result.outputs).length - await this.markToolComplete(200, { inputs: inputCount, outputs: outputCount }, result) - this.setState(ClientToolCallState.success) - } catch (error: any) { - const message = error instanceof Error ? error.message : String(error) - logger.error('Execute failed', { message }) - await this.markToolComplete(500, message) - this.setState(ClientToolCallState.error) - } + async execute(): Promise { + // Tool execution is handled server-side by the orchestrator. + // Client tool classes are retained for UI display configuration only. + this.setState(ClientToolCallState.success) } } diff --git a/apps/sim/lib/copilot/tools/client/blocks/get-block-options.ts b/apps/sim/lib/copilot/tools/client/blocks/get-block-options.ts index 06efb6ffc1..993773f0e7 100644 --- a/apps/sim/lib/copilot/tools/client/blocks/get-block-options.ts +++ b/apps/sim/lib/copilot/tools/client/blocks/get-block-options.ts @@ -1,21 +1,11 @@ -import { createLogger } from '@sim/logger' import { ListFilter, Loader2, MinusCircle, XCircle } from 'lucide-react' import { BaseClientTool, type BaseClientToolMetadata, ClientToolCallState, } from '@/lib/copilot/tools/client/base-tool' -import { - ExecuteResponseSuccessSchema, - GetBlockOptionsInput, - GetBlockOptionsResult, -} from '@/lib/copilot/tools/shared/schemas' import { getLatestBlock } from '@/blocks/registry' -interface GetBlockOptionsArgs { - blockId: string -} - export class GetBlockOptionsClientTool extends BaseClientTool { static readonly id = 'get_block_options' @@ -65,46 +55,9 @@ export class GetBlockOptionsClientTool extends BaseClientTool { }, } - async execute(args?: GetBlockOptionsArgs): Promise { - const logger = createLogger('GetBlockOptionsClientTool') - try { - this.setState(ClientToolCallState.executing) - - // Handle both camelCase and snake_case parameter names, plus blockType as an alias - const normalizedArgs = args - ? { - blockId: - args.blockId || - (args as any).block_id || - (args as any).blockType || - (args as any).block_type, - } - : {} - - logger.info('execute called', { originalArgs: args, normalizedArgs }) - - const { blockId } = GetBlockOptionsInput.parse(normalizedArgs) - - const res = await fetch('/api/copilot/execute-copilot-server-tool', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ toolName: 'get_block_options', payload: { blockId } }), - }) - if (!res.ok) { - const errorText = await res.text().catch(() => '') - throw new Error(errorText || `Server error (${res.status})`) - } - const json = await res.json() - const parsed = ExecuteResponseSuccessSchema.parse(json) - const result = GetBlockOptionsResult.parse(parsed.result) - - await this.markToolComplete(200, { operations: result.operations.length }, result) - this.setState(ClientToolCallState.success) - } catch (error: any) { - const message = error instanceof Error ? error.message : String(error) - logger.error('Execute failed', { message }) - await this.markToolComplete(500, message) - this.setState(ClientToolCallState.error) - } + async execute(): Promise { + // Tool execution is handled server-side by the orchestrator. + // Client tool classes are retained for UI display configuration only. + this.setState(ClientToolCallState.success) } } diff --git a/apps/sim/lib/copilot/tools/client/blocks/get-blocks-and-tools.ts b/apps/sim/lib/copilot/tools/client/blocks/get-blocks-and-tools.ts index 7532ca6c49..17108c6db0 100644 --- a/apps/sim/lib/copilot/tools/client/blocks/get-blocks-and-tools.ts +++ b/apps/sim/lib/copilot/tools/client/blocks/get-blocks-and-tools.ts @@ -1,14 +1,9 @@ -import { createLogger } from '@sim/logger' import { Blocks, Loader2, MinusCircle, XCircle } from 'lucide-react' import { BaseClientTool, type BaseClientToolMetadata, ClientToolCallState, } from '@/lib/copilot/tools/client/base-tool' -import { - ExecuteResponseSuccessSchema, - GetBlocksAndToolsResult, -} from '@/lib/copilot/tools/shared/schemas' export class GetBlocksAndToolsClientTool extends BaseClientTool { static readonly id = 'get_blocks_and_tools' @@ -31,30 +26,8 @@ export class GetBlocksAndToolsClientTool extends BaseClientTool { } async execute(): Promise { - const logger = createLogger('GetBlocksAndToolsClientTool') - try { - this.setState(ClientToolCallState.executing) - - const res = await fetch('/api/copilot/execute-copilot-server-tool', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ toolName: 'get_blocks_and_tools', payload: {} }), - }) - if (!res.ok) { - const errorText = await res.text().catch(() => '') - throw new Error(errorText || `Server error (${res.status})`) - } - const json = await res.json() - const parsed = ExecuteResponseSuccessSchema.parse(json) - const result = GetBlocksAndToolsResult.parse(parsed.result) - - // TODO: Temporarily sending empty data to test 403 issue - await this.markToolComplete(200, 'Successfully retrieved blocks and tools', {}) - this.setState(ClientToolCallState.success) - } catch (error: any) { - const message = error instanceof Error ? error.message : String(error) - await this.markToolComplete(500, message) - this.setState(ClientToolCallState.error) - } + // Tool execution is handled server-side by the orchestrator. + // Client tool classes are retained for UI display configuration only. + this.setState(ClientToolCallState.success) } } diff --git a/apps/sim/lib/copilot/tools/client/blocks/get-blocks-metadata.ts b/apps/sim/lib/copilot/tools/client/blocks/get-blocks-metadata.ts index 8fd88b1a3a..fd547fa0c0 100644 --- a/apps/sim/lib/copilot/tools/client/blocks/get-blocks-metadata.ts +++ b/apps/sim/lib/copilot/tools/client/blocks/get-blocks-metadata.ts @@ -1,19 +1,9 @@ -import { createLogger } from '@sim/logger' import { ListFilter, Loader2, MinusCircle, XCircle } from 'lucide-react' import { BaseClientTool, type BaseClientToolMetadata, ClientToolCallState, } from '@/lib/copilot/tools/client/base-tool' -import { - ExecuteResponseSuccessSchema, - GetBlocksMetadataInput, - GetBlocksMetadataResult, -} from '@/lib/copilot/tools/shared/schemas' - -interface GetBlocksMetadataArgs { - blockIds: string[] -} export class GetBlocksMetadataClientTool extends BaseClientTool { static readonly id = 'get_blocks_metadata' @@ -63,33 +53,9 @@ export class GetBlocksMetadataClientTool extends BaseClientTool { }, } - async execute(args?: GetBlocksMetadataArgs): Promise { - const logger = createLogger('GetBlocksMetadataClientTool') - try { - this.setState(ClientToolCallState.executing) - - const { blockIds } = GetBlocksMetadataInput.parse(args || {}) - - const res = await fetch('/api/copilot/execute-copilot-server-tool', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ toolName: 'get_blocks_metadata', payload: { blockIds } }), - }) - if (!res.ok) { - const errorText = await res.text().catch(() => '') - throw new Error(errorText || `Server error (${res.status})`) - } - const json = await res.json() - const parsed = ExecuteResponseSuccessSchema.parse(json) - const result = GetBlocksMetadataResult.parse(parsed.result) - - await this.markToolComplete(200, { retrieved: Object.keys(result.metadata).length }, result) - this.setState(ClientToolCallState.success) - } catch (error: any) { - const message = error instanceof Error ? error.message : String(error) - logger.error('Execute failed', { message }) - await this.markToolComplete(500, message) - this.setState(ClientToolCallState.error) - } + async execute(): Promise { + // Tool execution is handled server-side by the orchestrator. + // Client tool classes are retained for UI display configuration only. + this.setState(ClientToolCallState.success) } } diff --git a/apps/sim/lib/copilot/tools/client/blocks/get-trigger-blocks.ts b/apps/sim/lib/copilot/tools/client/blocks/get-trigger-blocks.ts index c9fa0f78a2..2d8bda809b 100644 --- a/apps/sim/lib/copilot/tools/client/blocks/get-trigger-blocks.ts +++ b/apps/sim/lib/copilot/tools/client/blocks/get-trigger-blocks.ts @@ -1,14 +1,9 @@ -import { createLogger } from '@sim/logger' import { ListFilter, Loader2, MinusCircle, XCircle } from 'lucide-react' import { BaseClientTool, type BaseClientToolMetadata, ClientToolCallState, } from '@/lib/copilot/tools/client/base-tool' -import { - ExecuteResponseSuccessSchema, - GetTriggerBlocksResult, -} from '@/lib/copilot/tools/shared/schemas' export class GetTriggerBlocksClientTool extends BaseClientTool { static readonly id = 'get_trigger_blocks' @@ -31,34 +26,8 @@ export class GetTriggerBlocksClientTool extends BaseClientTool { } async execute(): Promise { - const logger = createLogger('GetTriggerBlocksClientTool') - try { - this.setState(ClientToolCallState.executing) - - const res = await fetch('/api/copilot/execute-copilot-server-tool', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ toolName: 'get_trigger_blocks', payload: {} }), - }) - if (!res.ok) { - const errorText = await res.text().catch(() => '') - try { - const errorJson = JSON.parse(errorText) - throw new Error(errorJson.error || errorText || `Server error (${res.status})`) - } catch { - throw new Error(errorText || `Server error (${res.status})`) - } - } - const json = await res.json() - const parsed = ExecuteResponseSuccessSchema.parse(json) - const result = GetTriggerBlocksResult.parse(parsed.result) - - await this.markToolComplete(200, 'Successfully retrieved trigger blocks', result) - this.setState(ClientToolCallState.success) - } catch (error: any) { - const message = error instanceof Error ? error.message : String(error) - await this.markToolComplete(500, message) - this.setState(ClientToolCallState.error) - } + // Tool execution is handled server-side by the orchestrator. + // Client tool classes are retained for UI display configuration only. + this.setState(ClientToolCallState.success) } } diff --git a/apps/sim/lib/copilot/tools/client/knowledge/knowledge-base.ts b/apps/sim/lib/copilot/tools/client/knowledge/knowledge-base.ts index 89f60b1550..611caa68f8 100644 --- a/apps/sim/lib/copilot/tools/client/knowledge/knowledge-base.ts +++ b/apps/sim/lib/copilot/tools/client/knowledge/knowledge-base.ts @@ -1,16 +1,11 @@ -import { createLogger } from '@sim/logger' import { Database, Loader2, MinusCircle, PlusCircle, XCircle } from 'lucide-react' import { BaseClientTool, type BaseClientToolMetadata, ClientToolCallState, } from '@/lib/copilot/tools/client/base-tool' -import { - ExecuteResponseSuccessSchema, - type KnowledgeBaseArgs, -} from '@/lib/copilot/tools/shared/schemas' +import { type KnowledgeBaseArgs } from '@/lib/copilot/tools/shared/schemas' import { useCopilotStore } from '@/stores/panel/copilot/store' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' /** * Client tool for knowledge base operations @@ -99,45 +94,9 @@ export class KnowledgeBaseClientTool extends BaseClientTool { await this.execute(args) } - async execute(args?: KnowledgeBaseArgs): Promise { - const logger = createLogger('KnowledgeBaseClientTool') - try { - this.setState(ClientToolCallState.executing) - - // Get the workspace ID from the workflow registry hydration state - const { hydration } = useWorkflowRegistry.getState() - const workspaceId = hydration.workspaceId - - // Build payload with workspace ID included in args - const payload: KnowledgeBaseArgs = { - ...(args || { operation: 'list' }), - args: { - ...(args?.args || {}), - workspaceId: workspaceId || undefined, - }, - } - - const res = await fetch('/api/copilot/execute-copilot-server-tool', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ toolName: 'knowledge_base', payload }), - }) - - if (!res.ok) { - const txt = await res.text().catch(() => '') - throw new Error(txt || `Server error (${res.status})`) - } - - const json = await res.json() - const parsed = ExecuteResponseSuccessSchema.parse(json) - - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, 'Knowledge base operation completed', parsed.result) - this.setState(ClientToolCallState.success) - } catch (e: any) { - logger.error('execute failed', { message: e?.message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, e?.message || 'Failed to access knowledge base') - } + async execute(): Promise { + // Tool execution is handled server-side by the orchestrator. + // Client tool classes are retained for UI display configuration only. + this.setState(ClientToolCallState.success) } } diff --git a/apps/sim/lib/copilot/tools/client/other/make-api-request.ts b/apps/sim/lib/copilot/tools/client/other/make-api-request.ts index 051622c05b..37d78b17c2 100644 --- a/apps/sim/lib/copilot/tools/client/other/make-api-request.ts +++ b/apps/sim/lib/copilot/tools/client/other/make-api-request.ts @@ -93,33 +93,15 @@ export class MakeApiRequestClientTool extends BaseClientTool { this.setState(ClientToolCallState.rejected) } - async handleAccept(args?: MakeApiRequestArgs): Promise { - const logger = createLogger('MakeApiRequestClientTool') - try { - this.setState(ClientToolCallState.executing) - const res = await fetch('/api/copilot/execute-copilot-server-tool', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ toolName: 'make_api_request', payload: args || {} }), - }) - if (!res.ok) { - const txt = await res.text().catch(() => '') - throw new Error(txt || `Server error (${res.status})`) - } - const json = await res.json() - const parsed = ExecuteResponseSuccessSchema.parse(json) - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, 'API request executed', parsed.result) - this.setState(ClientToolCallState.success) - } catch (e: any) { - logger.error('execute failed', { message: e?.message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, e?.message || 'API request failed') - } + async handleAccept(_args?: MakeApiRequestArgs): Promise { + // Tool execution is handled server-side by the orchestrator. + this.setState(ClientToolCallState.executing) } - async execute(args?: MakeApiRequestArgs): Promise { - await this.handleAccept(args) + async execute(): Promise { + // Tool execution is handled server-side by the orchestrator. + // Client tool classes are retained for UI display configuration only. + this.setState(ClientToolCallState.success) } } diff --git a/apps/sim/lib/copilot/tools/client/other/search-documentation.ts b/apps/sim/lib/copilot/tools/client/other/search-documentation.ts index cf784d3f2d..07fa971bbf 100644 --- a/apps/sim/lib/copilot/tools/client/other/search-documentation.ts +++ b/apps/sim/lib/copilot/tools/client/other/search-documentation.ts @@ -1,17 +1,9 @@ -import { createLogger } from '@sim/logger' import { BookOpen, Loader2, MinusCircle, XCircle } from 'lucide-react' import { BaseClientTool, type BaseClientToolMetadata, ClientToolCallState, } from '@/lib/copilot/tools/client/base-tool' -import { ExecuteResponseSuccessSchema } from '@/lib/copilot/tools/shared/schemas' - -interface SearchDocumentationArgs { - query: string - topK?: number - threshold?: number -} export class SearchDocumentationClientTool extends BaseClientTool { static readonly id = 'search_documentation' @@ -53,28 +45,9 @@ export class SearchDocumentationClientTool extends BaseClientTool { }, } - async execute(args?: SearchDocumentationArgs): Promise { - const logger = createLogger('SearchDocumentationClientTool') - try { - this.setState(ClientToolCallState.executing) - const res = await fetch('/api/copilot/execute-copilot-server-tool', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ toolName: 'search_documentation', payload: args || {} }), - }) - if (!res.ok) { - const txt = await res.text().catch(() => '') - throw new Error(txt || `Server error (${res.status})`) - } - const json = await res.json() - const parsed = ExecuteResponseSuccessSchema.parse(json) - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, 'Documentation search complete', parsed.result) - this.setState(ClientToolCallState.success) - } catch (e: any) { - logger.error('execute failed', { message: e?.message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, e?.message || 'Documentation search failed') - } + async execute(): Promise { + // Tool execution is handled server-side by the orchestrator. + // Client tool classes are retained for UI display configuration only. + this.setState(ClientToolCallState.success) } } diff --git a/apps/sim/lib/copilot/tools/client/user/get-credentials.ts b/apps/sim/lib/copilot/tools/client/user/get-credentials.ts index 8ad821b140..0623693c47 100644 --- a/apps/sim/lib/copilot/tools/client/user/get-credentials.ts +++ b/apps/sim/lib/copilot/tools/client/user/get-credentials.ts @@ -1,17 +1,9 @@ -import { createLogger } from '@sim/logger' import { Key, Loader2, MinusCircle, XCircle } from 'lucide-react' import { BaseClientTool, type BaseClientToolMetadata, ClientToolCallState, } from '@/lib/copilot/tools/client/base-tool' -import { ExecuteResponseSuccessSchema } from '@/lib/copilot/tools/shared/schemas' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -interface GetCredentialsArgs { - userId?: string - workflowId?: string -} export class GetCredentialsClientTool extends BaseClientTool { static readonly id = 'get_credentials' @@ -41,33 +33,9 @@ export class GetCredentialsClientTool extends BaseClientTool { }, } - async execute(args?: GetCredentialsArgs): Promise { - const logger = createLogger('GetCredentialsClientTool') - try { - this.setState(ClientToolCallState.executing) - const payload: GetCredentialsArgs = { ...(args || {}) } - if (!payload.workflowId && !payload.userId) { - const { activeWorkflowId } = useWorkflowRegistry.getState() - if (activeWorkflowId) payload.workflowId = activeWorkflowId - } - const res = await fetch('/api/copilot/execute-copilot-server-tool', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ toolName: 'get_credentials', payload }), - }) - if (!res.ok) { - const txt = await res.text().catch(() => '') - throw new Error(txt || `Server error (${res.status})`) - } - const json = await res.json() - const parsed = ExecuteResponseSuccessSchema.parse(json) - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, 'Connected integrations fetched', parsed.result) - this.setState(ClientToolCallState.success) - } catch (e: any) { - logger.error('execute failed', { message: e?.message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, e?.message || 'Failed to fetch connected integrations') - } + async execute(): Promise { + // Tool execution is handled server-side by the orchestrator. + // Client tool classes are retained for UI display configuration only. + this.setState(ClientToolCallState.success) } } diff --git a/apps/sim/lib/copilot/tools/client/user/set-environment-variables.ts b/apps/sim/lib/copilot/tools/client/user/set-environment-variables.ts index e4033ca85d..415987c8e1 100644 --- a/apps/sim/lib/copilot/tools/client/user/set-environment-variables.ts +++ b/apps/sim/lib/copilot/tools/client/user/set-environment-variables.ts @@ -107,46 +107,15 @@ export class SetEnvironmentVariablesClientTool extends BaseClientTool { this.setState(ClientToolCallState.rejected) } - async handleAccept(args?: SetEnvArgs): Promise { - const logger = createLogger('SetEnvironmentVariablesClientTool') - try { - this.setState(ClientToolCallState.executing) - const payload: SetEnvArgs = { ...(args || { variables: {} }) } - if (!payload.workflowId) { - const { activeWorkflowId } = useWorkflowRegistry.getState() - if (activeWorkflowId) payload.workflowId = activeWorkflowId - } - const res = await fetch('/api/copilot/execute-copilot-server-tool', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ toolName: 'set_environment_variables', payload }), - }) - if (!res.ok) { - const txt = await res.text().catch(() => '') - throw new Error(txt || `Server error (${res.status})`) - } - const json = await res.json() - const parsed = ExecuteResponseSuccessSchema.parse(json) - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, 'Environment variables updated', parsed.result) - this.setState(ClientToolCallState.success) - - // Refresh the environment store so the UI reflects the new variables - try { - await useEnvironmentStore.getState().loadEnvironmentVariables() - logger.info('Environment store refreshed after setting variables') - } catch (error) { - logger.warn('Failed to refresh environment store:', error) - } - } catch (e: any) { - logger.error('execute failed', { message: e?.message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, e?.message || 'Failed to set environment variables') - } + async handleAccept(_args?: SetEnvArgs): Promise { + // Tool execution is handled server-side by the orchestrator. + this.setState(ClientToolCallState.executing) } - async execute(args?: SetEnvArgs): Promise { - await this.handleAccept(args) + async execute(): Promise { + // Tool execution is handled server-side by the orchestrator. + // Client tool classes are retained for UI display configuration only. + this.setState(ClientToolCallState.success) } } diff --git a/apps/sim/lib/copilot/tools/client/workflow/get-workflow-console.ts b/apps/sim/lib/copilot/tools/client/workflow/get-workflow-console.ts index 328ae5aad9..24f27713bc 100644 --- a/apps/sim/lib/copilot/tools/client/workflow/get-workflow-console.ts +++ b/apps/sim/lib/copilot/tools/client/workflow/get-workflow-console.ts @@ -1,18 +1,9 @@ -import { createLogger } from '@sim/logger' import { Loader2, MinusCircle, TerminalSquare, XCircle } from 'lucide-react' import { BaseClientTool, type BaseClientToolMetadata, ClientToolCallState, } from '@/lib/copilot/tools/client/base-tool' -import { ExecuteResponseSuccessSchema } from '@/lib/copilot/tools/shared/schemas' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -interface GetWorkflowConsoleArgs { - workflowId?: string - limit?: number - includeDetails?: boolean -} export class GetWorkflowConsoleClientTool extends BaseClientTool { static readonly id = 'get_workflow_console' @@ -61,52 +52,9 @@ export class GetWorkflowConsoleClientTool extends BaseClientTool { }, } - async execute(args?: GetWorkflowConsoleArgs): Promise { - const logger = createLogger('GetWorkflowConsoleClientTool') - try { - this.setState(ClientToolCallState.executing) - - const params = args || {} - let workflowId = params.workflowId - if (!workflowId) { - const { activeWorkflowId } = useWorkflowRegistry.getState() - workflowId = activeWorkflowId || undefined - } - if (!workflowId) { - logger.error('No active workflow found for console fetch') - this.setState(ClientToolCallState.error) - await this.markToolComplete(400, 'No active workflow found') - return - } - - const payload = { - workflowId, - limit: params.limit ?? 3, - includeDetails: params.includeDetails ?? true, - } - - const res = await fetch('/api/copilot/execute-copilot-server-tool', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ toolName: 'get_workflow_console', payload }), - }) - if (!res.ok) { - const text = await res.text().catch(() => '') - throw new Error(text || `Server error (${res.status})`) - } - - const json = await res.json() - const parsed = ExecuteResponseSuccessSchema.parse(json) - - // Mark success and include result data for UI rendering - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, 'Workflow console fetched', parsed.result) - this.setState(ClientToolCallState.success) - } catch (e: any) { - const message = e instanceof Error ? e.message : String(e) - createLogger('GetWorkflowConsoleClientTool').error('execute failed', { message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, message) - } + async execute(): Promise { + // Tool execution is handled server-side by the orchestrator. + // Client tool classes are retained for UI display configuration only. + this.setState(ClientToolCallState.success) } } diff --git a/apps/sim/stores/panel/copilot/store.ts b/apps/sim/stores/panel/copilot/store.ts index a17044afb8..be32d1c723 100644 --- a/apps/sim/stores/panel/copilot/store.ts +++ b/apps/sim/stores/panel/copilot/store.ts @@ -4169,10 +4169,8 @@ export const useCopilotStore = create()( // Credential masking loadSensitiveCredentialIds: async () => { try { - const res = await fetch('/api/copilot/execute-copilot-server-tool', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ toolName: 'get_credentials', payload: {} }), + const res = await fetch('/api/copilot/credentials', { + credentials: 'include', }) if (!res.ok) { logger.warn('[loadSensitiveCredentialIds] Failed to fetch credentials', { From 4faa939e178da6ac483181e9bc087031c519877d Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Thu, 5 Feb 2026 12:09:21 -0800 Subject: [PATCH 22/72] Initial test shows diff store still working --- .../execute-copilot-server-tool/route.ts | 60 --- .../tools/client/knowledge/knowledge-base.ts | 4 +- .../tools/client/workflow/edit-workflow.ts | 391 +----------------- 3 files changed, 8 insertions(+), 447 deletions(-) delete mode 100644 apps/sim/app/api/copilot/execute-copilot-server-tool/route.ts diff --git a/apps/sim/app/api/copilot/execute-copilot-server-tool/route.ts b/apps/sim/app/api/copilot/execute-copilot-server-tool/route.ts deleted file mode 100644 index 3d6ab2e3a9..0000000000 --- a/apps/sim/app/api/copilot/execute-copilot-server-tool/route.ts +++ /dev/null @@ -1,60 +0,0 @@ -import { createLogger } from '@sim/logger' -import { type NextRequest, NextResponse } from 'next/server' -import { z } from 'zod' -import { - authenticateCopilotRequestSessionOnly, - createBadRequestResponse, - createInternalServerErrorResponse, - createRequestTracker, - createUnauthorizedResponse, -} from '@/lib/copilot/request-helpers' -import { routeExecution } from '@/lib/copilot/tools/server/router' - -const logger = createLogger('ExecuteCopilotServerToolAPI') - -const ExecuteSchema = z.object({ - toolName: z.string(), - payload: z.unknown().optional(), -}) - -/** - * @deprecated Transitional route used by the legacy client-side tool execution path - * (Zustand store → client tool classes → this route). Will be removed once the - * interactive browser path is fully migrated to server-side orchestration. - * New server-side code should use lib/copilot/orchestrator/tool-executor directly. - */ -export async function POST(req: NextRequest) { - const tracker = createRequestTracker() - try { - const { userId, isAuthenticated } = await authenticateCopilotRequestSessionOnly() - if (!isAuthenticated || !userId) { - return createUnauthorizedResponse() - } - - const body = await req.json() - try { - const preview = JSON.stringify(body).slice(0, 300) - logger.debug(`[${tracker.requestId}] Incoming request body preview`, { preview }) - } catch {} - - const { toolName, payload } = ExecuteSchema.parse(body) - - logger.info(`[${tracker.requestId}] Executing server tool`, { toolName }) - const result = await routeExecution(toolName, payload, { userId }) - - try { - const resultPreview = JSON.stringify(result).slice(0, 300) - logger.debug(`[${tracker.requestId}] Server tool result preview`, { toolName, resultPreview }) - } catch {} - - return NextResponse.json({ success: true, result }) - } catch (error) { - if (error instanceof z.ZodError) { - logger.debug(`[${tracker.requestId}] Zod validation error`, { issues: error.issues }) - return createBadRequestResponse('Invalid request body for execute-copilot-server-tool') - } - logger.error(`[${tracker.requestId}] Failed to execute server tool:`, error) - const errorMessage = error instanceof Error ? error.message : 'Failed to execute server tool' - return createInternalServerErrorResponse(errorMessage) - } -} diff --git a/apps/sim/lib/copilot/tools/client/knowledge/knowledge-base.ts b/apps/sim/lib/copilot/tools/client/knowledge/knowledge-base.ts index 611caa68f8..0245c7deed 100644 --- a/apps/sim/lib/copilot/tools/client/knowledge/knowledge-base.ts +++ b/apps/sim/lib/copilot/tools/client/knowledge/knowledge-base.ts @@ -90,8 +90,8 @@ export class KnowledgeBaseClientTool extends BaseClientTool { this.setState(ClientToolCallState.rejected) } - async handleAccept(args?: KnowledgeBaseArgs): Promise { - await this.execute(args) + async handleAccept(): Promise { + await this.execute() } async execute(): Promise { diff --git a/apps/sim/lib/copilot/tools/client/workflow/edit-workflow.ts b/apps/sim/lib/copilot/tools/client/workflow/edit-workflow.ts index 55ffdaa930..6c56dc1408 100644 --- a/apps/sim/lib/copilot/tools/client/workflow/edit-workflow.ts +++ b/apps/sim/lib/copilot/tools/client/workflow/edit-workflow.ts @@ -1,4 +1,3 @@ -import { createLogger } from '@sim/logger' import { Grid2x2, Grid2x2Check, Grid2x2X, Loader2, MinusCircle, XCircle } from 'lucide-react' import { BaseClientTool, @@ -6,126 +5,14 @@ import { ClientToolCallState, } from '@/lib/copilot/tools/client/base-tool' import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' -import { ExecuteResponseSuccessSchema } from '@/lib/copilot/tools/shared/schemas' -import { stripWorkflowDiffMarkers } from '@/lib/workflows/diff' -import { sanitizeForCopilot } from '@/lib/workflows/sanitization/json-sanitizer' -import { useWorkflowDiffStore } from '@/stores/workflow-diff/store' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' -import { mergeSubblockState } from '@/stores/workflows/utils' -import { useWorkflowStore } from '@/stores/workflows/workflow/store' -import type { WorkflowState } from '@/stores/workflows/workflow/types' - -interface EditWorkflowOperation { - operation_type: 'add' | 'edit' | 'delete' - block_id: string - params?: Record -} - -interface EditWorkflowArgs { - operations: EditWorkflowOperation[] - workflowId: string - currentUserWorkflow?: string -} export class EditWorkflowClientTool extends BaseClientTool { static readonly id = 'edit_workflow' - private lastResult: any | undefined - private hasExecuted = false - private hasAppliedDiff = false - private workflowId: string | undefined constructor(toolCallId: string) { super(toolCallId, EditWorkflowClientTool.id, EditWorkflowClientTool.metadata) } - async markToolComplete(status: number, message?: any, data?: any): Promise { - const logger = createLogger('EditWorkflowClientTool') - logger.info('markToolComplete payload', { - toolCallId: this.toolCallId, - toolName: this.name, - status, - message, - data, - }) - return super.markToolComplete(status, message, data) - } - - /** - * Get sanitized workflow JSON from a workflow state, merge subblocks, and sanitize for copilot - * This matches what get_user_workflow returns - */ - private getSanitizedWorkflowJson(workflowState: any): string | undefined { - const logger = createLogger('EditWorkflowClientTool') - - if (!this.workflowId) { - logger.warn('No workflowId available for getting sanitized workflow JSON') - return undefined - } - - if (!workflowState) { - logger.warn('No workflow state provided') - return undefined - } - - try { - // Normalize required properties - if (!workflowState.loops) workflowState.loops = {} - if (!workflowState.parallels) workflowState.parallels = {} - if (!workflowState.edges) workflowState.edges = [] - if (!workflowState.blocks) workflowState.blocks = {} - - // Merge latest subblock values so edits are reflected - let mergedState = workflowState - if (workflowState.blocks) { - mergedState = { - ...workflowState, - blocks: mergeSubblockState(workflowState.blocks, this.workflowId as any), - } - logger.info('Merged subblock values into workflow state', { - workflowId: this.workflowId, - blockCount: Object.keys(mergedState.blocks || {}).length, - }) - } - - // Sanitize workflow state for copilot (remove UI-specific data) - const sanitizedState = sanitizeForCopilot(mergedState) - - // Convert to JSON string for transport - const workflowJson = JSON.stringify(sanitizedState, null, 2) - logger.info('Successfully created sanitized workflow JSON', { - workflowId: this.workflowId, - jsonLength: workflowJson.length, - }) - - return workflowJson - } catch (error) { - logger.error('Failed to get sanitized workflow JSON', { - error: error instanceof Error ? error.message : String(error), - }) - return undefined - } - } - - /** - * Safely get the current workflow JSON sanitized for copilot without throwing. - * Used to ensure we always include workflow state in markComplete. - */ - private getCurrentWorkflowJsonSafe(logger: ReturnType): string | undefined { - try { - const currentState = useWorkflowStore.getState().getWorkflowState() - if (!currentState) { - logger.warn('No current workflow state available') - return undefined - } - return this.getSanitizedWorkflowJson(currentState) - } catch (error) { - logger.warn('Failed to get current workflow JSON safely', { - error: error instanceof Error ? error.message : String(error), - }) - return undefined - } - } - static readonly metadata: BaseClientToolMetadata = { displayNames: { [ClientToolCallState.generating]: { text: 'Editing your workflow', icon: Loader2 }, @@ -141,284 +28,18 @@ export class EditWorkflowClientTool extends BaseClientTool { isSpecial: true, customRenderer: 'edit_summary', }, - getDynamicText: (params, state) => { - const workflowId = params?.workflowId || useWorkflowRegistry.getState().activeWorkflowId - if (workflowId) { - const workflowName = useWorkflowRegistry.getState().workflows[workflowId]?.name - if (workflowName) { - switch (state) { - case ClientToolCallState.success: - return `Edited ${workflowName}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Editing ${workflowName}` - case ClientToolCallState.error: - return `Failed to edit ${workflowName}` - case ClientToolCallState.review: - return `Review changes to ${workflowName}` - case ClientToolCallState.rejected: - return `Rejected changes to ${workflowName}` - case ClientToolCallState.aborted: - return `Aborted editing ${workflowName}` - } - } - } - return undefined - }, } async handleAccept(): Promise { - const logger = createLogger('EditWorkflowClientTool') - logger.info('handleAccept called', { toolCallId: this.toolCallId, state: this.getState() }) - // Tool was already marked complete in execute() - this is just for UI state + // Diff store calls this after review acceptance. this.setState(ClientToolCallState.success) } - async handleReject(): Promise { - const logger = createLogger('EditWorkflowClientTool') - logger.info('handleReject called', { toolCallId: this.toolCallId, state: this.getState() }) - // Tool was already marked complete in execute() - this is just for UI state - this.setState(ClientToolCallState.rejected) - } - - async execute(args?: EditWorkflowArgs): Promise { - const logger = createLogger('EditWorkflowClientTool') - - if (this.hasExecuted) { - logger.info('execute skipped (already executed)', { toolCallId: this.toolCallId }) - return - } - - // Use timeout protection to ensure tool always completes - await this.executeWithTimeout(async () => { - this.hasExecuted = true - logger.info('execute called', { toolCallId: this.toolCallId, argsProvided: !!args }) - this.setState(ClientToolCallState.executing) - - // Resolve workflowId - let workflowId = args?.workflowId - if (!workflowId) { - const { activeWorkflowId } = useWorkflowRegistry.getState() - workflowId = activeWorkflowId as any - } - if (!workflowId) { - this.setState(ClientToolCallState.error) - await this.markToolComplete(400, 'No active workflow found') - return - } - - // Store workflowId for later use - this.workflowId = workflowId - - // Validate operations - const operations = args?.operations || [] - if (!operations.length) { - this.setState(ClientToolCallState.error) - const currentWorkflowJson = this.getCurrentWorkflowJsonSafe(logger) - await this.markToolComplete( - 400, - 'No operations provided for edit_workflow', - currentWorkflowJson ? { userWorkflow: currentWorkflowJson } : undefined - ) - return - } - - // Prepare currentUserWorkflow JSON from stores to preserve block IDs - let currentUserWorkflow = args?.currentUserWorkflow - - if (!currentUserWorkflow) { - try { - const workflowStore = useWorkflowStore.getState() - const fullState = workflowStore.getWorkflowState() - const mergedBlocks = mergeSubblockState(fullState.blocks, workflowId as any) - const payloadState = stripWorkflowDiffMarkers({ - ...fullState, - blocks: mergedBlocks, - edges: fullState.edges || [], - loops: fullState.loops || {}, - parallels: fullState.parallels || {}, - }) - currentUserWorkflow = JSON.stringify(payloadState) - } catch (error) { - logger.warn('Failed to build currentUserWorkflow from stores; proceeding without it', { - error, - }) - } - } - - // Fetch with AbortController for timeout support - const controller = new AbortController() - const fetchTimeout = setTimeout(() => controller.abort(), 60000) // 60s fetch timeout - - try { - const res = await fetch('/api/copilot/execute-copilot-server-tool', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - toolName: 'edit_workflow', - payload: { - operations, - workflowId, - ...(currentUserWorkflow ? { currentUserWorkflow } : {}), - }, - }), - signal: controller.signal, - }) - - clearTimeout(fetchTimeout) - - if (!res.ok) { - const errorText = await res.text().catch(() => '') - let errorMessage: string - try { - const errorJson = JSON.parse(errorText) - errorMessage = errorJson.error || errorText || `Server error (${res.status})` - } catch { - errorMessage = errorText || `Server error (${res.status})` - } - // Mark complete with error but include current workflow state - this.setState(ClientToolCallState.error) - const currentWorkflowJson = this.getCurrentWorkflowJsonSafe(logger) - await this.markToolComplete( - res.status, - errorMessage, - currentWorkflowJson ? { userWorkflow: currentWorkflowJson } : undefined - ) - return - } - - const json = await res.json() - const parsed = ExecuteResponseSuccessSchema.parse(json) - const result = parsed.result as any - this.lastResult = result - logger.info('server result parsed', { - hasWorkflowState: !!result?.workflowState, - blocksCount: result?.workflowState - ? Object.keys(result.workflowState.blocks || {}).length - : 0, - hasSkippedItems: !!result?.skippedItems, - skippedItemsCount: result?.skippedItems?.length || 0, - hasInputValidationErrors: !!result?.inputValidationErrors, - inputValidationErrorsCount: result?.inputValidationErrors?.length || 0, - }) - - // Log skipped items and validation errors for visibility - if (result?.skippedItems?.length > 0) { - logger.warn('Some operations were skipped during edit_workflow', { - skippedItems: result.skippedItems, - }) - } - if (result?.inputValidationErrors?.length > 0) { - logger.warn('Some inputs were rejected during edit_workflow', { - inputValidationErrors: result.inputValidationErrors, - }) - } - - // Update diff directly with workflow state - no YAML conversion needed! - if (!result.workflowState) { - this.setState(ClientToolCallState.error) - const currentWorkflowJson = this.getCurrentWorkflowJsonSafe(logger) - await this.markToolComplete( - 500, - 'No workflow state returned from server', - currentWorkflowJson ? { userWorkflow: currentWorkflowJson } : undefined - ) - return - } - - let actualDiffWorkflow: WorkflowState | null = null - - if (!this.hasAppliedDiff) { - const diffStore = useWorkflowDiffStore.getState() - // setProposedChanges applies the state optimistically to the workflow store - await diffStore.setProposedChanges(result.workflowState) - logger.info('diff proposed changes set for edit_workflow with direct workflow state') - this.hasAppliedDiff = true - } - - // Read back the applied state from the workflow store - const workflowStore = useWorkflowStore.getState() - actualDiffWorkflow = workflowStore.getWorkflowState() - - if (!actualDiffWorkflow) { - this.setState(ClientToolCallState.error) - const currentWorkflowJson = this.getCurrentWorkflowJsonSafe(logger) - await this.markToolComplete( - 500, - 'Failed to retrieve workflow state after applying changes', - currentWorkflowJson ? { userWorkflow: currentWorkflowJson } : undefined - ) - return - } - - // Get the workflow state that was just applied, merge subblocks, and sanitize - // This matches what get_user_workflow would return (the true state after edits were applied) - let workflowJson = this.getSanitizedWorkflowJson(actualDiffWorkflow) - - // Fallback: try to get current workflow state if sanitization failed - if (!workflowJson) { - workflowJson = this.getCurrentWorkflowJsonSafe(logger) - } - - // userWorkflow must always be present on success - log error if missing - if (!workflowJson) { - logger.error('Failed to get workflow JSON on success path - this should not happen', { - toolCallId: this.toolCallId, - workflowId: this.workflowId, - }) - } - - // Build sanitized data including workflow JSON and any skipped/validation info - // Always include userWorkflow on success paths - const sanitizedData: Record = { - userWorkflow: workflowJson ?? '{}', // Fallback to empty object JSON if all else fails - } - - // Include skipped items and validation errors in the response for LLM feedback - if (result?.skippedItems?.length > 0) { - sanitizedData.skippedItems = result.skippedItems - sanitizedData.skippedItemsMessage = result.skippedItemsMessage - } - if (result?.inputValidationErrors?.length > 0) { - sanitizedData.inputValidationErrors = result.inputValidationErrors - sanitizedData.inputValidationMessage = result.inputValidationMessage - } - - // Build a message that includes info about skipped items - let completeMessage = 'Workflow diff ready for review' - if (result?.skippedItems?.length > 0 || result?.inputValidationErrors?.length > 0) { - const parts: string[] = [] - if (result?.skippedItems?.length > 0) { - parts.push(`${result.skippedItems.length} operation(s) skipped`) - } - if (result?.inputValidationErrors?.length > 0) { - parts.push(`${result.inputValidationErrors.length} input(s) rejected`) - } - completeMessage = `Workflow diff ready for review. Note: ${parts.join(', ')}.` - } - - // Mark complete early to unblock LLM stream - sanitizedData always has userWorkflow - await this.markToolComplete(200, completeMessage, sanitizedData) - - // Move into review state - this.setState(ClientToolCallState.review, { result }) - } catch (fetchError: any) { - clearTimeout(fetchTimeout) - // Handle error with current workflow state - this.setState(ClientToolCallState.error) - const currentWorkflowJson = this.getCurrentWorkflowJsonSafe(logger) - const errorMessage = - fetchError.name === 'AbortError' - ? 'Server request timed out' - : fetchError.message || String(fetchError) - await this.markToolComplete( - 500, - errorMessage, - currentWorkflowJson ? { userWorkflow: currentWorkflowJson } : undefined - ) - } - }) + async execute(): Promise { + // Tool execution is handled server-side by the orchestrator. + // The store's tool_result SSE handler applies the diff preview + // via diffStore.setProposedChanges() when the result arrives. + this.setState(ClientToolCallState.success) } } From 71834480ce34836158f77b897b35ffb168079471 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Thu, 5 Feb 2026 13:11:12 -0800 Subject: [PATCH 23/72] Tool refactor --- .../components/tool-call/tool-call.tsx | 74 +- .../[workspaceId]/w/[workflowId]/workflow.tsx | 2 +- .../tools/client/base-subagent-tool.ts | 120 - .../sim/lib/copilot/tools/client/base-tool.ts | 207 +- .../tools/client/blocks/get-block-config.ts | 59 - .../tools/client/blocks/get-block-options.ts | 63 - .../client/blocks/get-blocks-and-tools.ts | 33 - .../client/blocks/get-blocks-metadata.ts | 61 - .../tools/client/blocks/get-trigger-blocks.ts | 33 - .../tools/client/examples/get-examples-rag.ts | 52 - .../examples/get-operations-examples.ts | 58 - .../client/examples/get-trigger-examples.ts | 31 - .../tools/client/examples/summarize.ts | 37 - .../copilot/tools/client/init-tool-configs.ts | 36 - .../tools/client/knowledge/knowledge-base.ts | 102 - apps/sim/lib/copilot/tools/client/manager.ts | 24 - .../tools/client/navigation/navigate-ui.ts | 241 -- .../lib/copilot/tools/client/other/auth.ts | 56 - .../tools/client/other/checkoff-todo.ts | 61 - .../tools/client/other/crawl-website.ts | 52 - .../copilot/tools/client/other/custom-tool.ts | 56 - .../lib/copilot/tools/client/other/debug.ts | 60 - .../lib/copilot/tools/client/other/deploy.ts | 56 - .../lib/copilot/tools/client/other/edit.ts | 61 - .../copilot/tools/client/other/evaluate.ts | 56 - .../tools/client/other/get-page-contents.ts | 53 - .../lib/copilot/tools/client/other/info.ts | 56 - .../copilot/tools/client/other/knowledge.ts | 56 - .../tools/client/other/make-api-request.ts | 109 - .../client/other/mark-todo-in-progress.ts | 64 - .../client/other/oauth-request-access.ts | 174 -- .../lib/copilot/tools/client/other/plan.ts | 59 - .../tools/client/other/remember-debug.ts | 76 - .../copilot/tools/client/other/research.ts | 56 - .../copilot/tools/client/other/scrape-page.ts | 52 - .../client/other/search-documentation.ts | 53 - .../tools/client/other/search-errors.ts | 52 - .../tools/client/other/search-library-docs.ts | 50 - .../tools/client/other/search-online.ts | 52 - .../tools/client/other/search-patterns.ts | 52 - .../lib/copilot/tools/client/other/sleep.ts | 157 -- .../copilot/tools/client/other/superagent.ts | 56 - .../lib/copilot/tools/client/other/test.ts | 56 - .../lib/copilot/tools/client/other/tour.ts | 56 - .../copilot/tools/client/other/workflow.ts | 56 - apps/sim/lib/copilot/tools/client/registry.ts | 34 - .../tools/client/tool-display-registry.ts | 2240 +++++++++++++++++ apps/sim/lib/copilot/tools/client/types.ts | 33 - .../sim/lib/copilot/tools/client/ui-config.ts | 238 -- .../tools/client/user/get-credentials.ts | 41 - .../client/user/set-environment-variables.ts | 126 - .../client/workflow/block-output-utils.ts | 142 -- .../workflow/check-deployment-status.ts | 215 -- .../workflow/create-workspace-mcp-server.ts | 155 -- .../tools/client/workflow/deploy-api.ts | 286 --- .../tools/client/workflow/deploy-chat.ts | 381 --- .../tools/client/workflow/deploy-mcp.ts | 250 -- .../tools/client/workflow/edit-workflow.ts | 47 - .../client/workflow/get-block-outputs.ts | 144 -- .../workflow/get-block-upstream-references.ts | 231 -- .../client/workflow/get-user-workflow.ts | 187 -- .../client/workflow/get-workflow-console.ts | 60 - .../client/workflow/get-workflow-data.ts | 269 -- .../client/workflow/get-workflow-from-name.ts | 117 - .../client/workflow/list-user-workflows.ts | 59 - .../workflow/list-workspace-mcp-servers.ts | 112 - .../client/workflow/manage-custom-tool.ts | 408 --- .../tools/client/workflow/manage-mcp-tool.ts | 360 --- .../copilot/tools/client/workflow/redeploy.ts | 71 - .../tools/client/workflow/run-workflow.ts | 231 -- .../workflow/set-global-workflow-variables.ts | 278 -- apps/sim/stores/panel/copilot/store.ts | 412 +-- apps/sim/stores/workflow-diff/store.ts | 21 +- 73 files changed, 2334 insertions(+), 7900 deletions(-) delete mode 100644 apps/sim/lib/copilot/tools/client/base-subagent-tool.ts delete mode 100644 apps/sim/lib/copilot/tools/client/blocks/get-block-config.ts delete mode 100644 apps/sim/lib/copilot/tools/client/blocks/get-block-options.ts delete mode 100644 apps/sim/lib/copilot/tools/client/blocks/get-blocks-and-tools.ts delete mode 100644 apps/sim/lib/copilot/tools/client/blocks/get-blocks-metadata.ts delete mode 100644 apps/sim/lib/copilot/tools/client/blocks/get-trigger-blocks.ts delete mode 100644 apps/sim/lib/copilot/tools/client/examples/get-examples-rag.ts delete mode 100644 apps/sim/lib/copilot/tools/client/examples/get-operations-examples.ts delete mode 100644 apps/sim/lib/copilot/tools/client/examples/get-trigger-examples.ts delete mode 100644 apps/sim/lib/copilot/tools/client/examples/summarize.ts delete mode 100644 apps/sim/lib/copilot/tools/client/init-tool-configs.ts delete mode 100644 apps/sim/lib/copilot/tools/client/knowledge/knowledge-base.ts delete mode 100644 apps/sim/lib/copilot/tools/client/manager.ts delete mode 100644 apps/sim/lib/copilot/tools/client/navigation/navigate-ui.ts delete mode 100644 apps/sim/lib/copilot/tools/client/other/auth.ts delete mode 100644 apps/sim/lib/copilot/tools/client/other/checkoff-todo.ts delete mode 100644 apps/sim/lib/copilot/tools/client/other/crawl-website.ts delete mode 100644 apps/sim/lib/copilot/tools/client/other/custom-tool.ts delete mode 100644 apps/sim/lib/copilot/tools/client/other/debug.ts delete mode 100644 apps/sim/lib/copilot/tools/client/other/deploy.ts delete mode 100644 apps/sim/lib/copilot/tools/client/other/edit.ts delete mode 100644 apps/sim/lib/copilot/tools/client/other/evaluate.ts delete mode 100644 apps/sim/lib/copilot/tools/client/other/get-page-contents.ts delete mode 100644 apps/sim/lib/copilot/tools/client/other/info.ts delete mode 100644 apps/sim/lib/copilot/tools/client/other/knowledge.ts delete mode 100644 apps/sim/lib/copilot/tools/client/other/make-api-request.ts delete mode 100644 apps/sim/lib/copilot/tools/client/other/mark-todo-in-progress.ts delete mode 100644 apps/sim/lib/copilot/tools/client/other/oauth-request-access.ts delete mode 100644 apps/sim/lib/copilot/tools/client/other/plan.ts delete mode 100644 apps/sim/lib/copilot/tools/client/other/remember-debug.ts delete mode 100644 apps/sim/lib/copilot/tools/client/other/research.ts delete mode 100644 apps/sim/lib/copilot/tools/client/other/scrape-page.ts delete mode 100644 apps/sim/lib/copilot/tools/client/other/search-documentation.ts delete mode 100644 apps/sim/lib/copilot/tools/client/other/search-errors.ts delete mode 100644 apps/sim/lib/copilot/tools/client/other/search-library-docs.ts delete mode 100644 apps/sim/lib/copilot/tools/client/other/search-online.ts delete mode 100644 apps/sim/lib/copilot/tools/client/other/search-patterns.ts delete mode 100644 apps/sim/lib/copilot/tools/client/other/sleep.ts delete mode 100644 apps/sim/lib/copilot/tools/client/other/superagent.ts delete mode 100644 apps/sim/lib/copilot/tools/client/other/test.ts delete mode 100644 apps/sim/lib/copilot/tools/client/other/tour.ts delete mode 100644 apps/sim/lib/copilot/tools/client/other/workflow.ts delete mode 100644 apps/sim/lib/copilot/tools/client/registry.ts create mode 100644 apps/sim/lib/copilot/tools/client/tool-display-registry.ts delete mode 100644 apps/sim/lib/copilot/tools/client/types.ts delete mode 100644 apps/sim/lib/copilot/tools/client/ui-config.ts delete mode 100644 apps/sim/lib/copilot/tools/client/user/get-credentials.ts delete mode 100644 apps/sim/lib/copilot/tools/client/user/set-environment-variables.ts delete mode 100644 apps/sim/lib/copilot/tools/client/workflow/block-output-utils.ts delete mode 100644 apps/sim/lib/copilot/tools/client/workflow/check-deployment-status.ts delete mode 100644 apps/sim/lib/copilot/tools/client/workflow/create-workspace-mcp-server.ts delete mode 100644 apps/sim/lib/copilot/tools/client/workflow/deploy-api.ts delete mode 100644 apps/sim/lib/copilot/tools/client/workflow/deploy-chat.ts delete mode 100644 apps/sim/lib/copilot/tools/client/workflow/deploy-mcp.ts delete mode 100644 apps/sim/lib/copilot/tools/client/workflow/edit-workflow.ts delete mode 100644 apps/sim/lib/copilot/tools/client/workflow/get-block-outputs.ts delete mode 100644 apps/sim/lib/copilot/tools/client/workflow/get-block-upstream-references.ts delete mode 100644 apps/sim/lib/copilot/tools/client/workflow/get-user-workflow.ts delete mode 100644 apps/sim/lib/copilot/tools/client/workflow/get-workflow-console.ts delete mode 100644 apps/sim/lib/copilot/tools/client/workflow/get-workflow-data.ts delete mode 100644 apps/sim/lib/copilot/tools/client/workflow/get-workflow-from-name.ts delete mode 100644 apps/sim/lib/copilot/tools/client/workflow/list-user-workflows.ts delete mode 100644 apps/sim/lib/copilot/tools/client/workflow/list-workspace-mcp-servers.ts delete mode 100644 apps/sim/lib/copilot/tools/client/workflow/manage-custom-tool.ts delete mode 100644 apps/sim/lib/copilot/tools/client/workflow/manage-mcp-tool.ts delete mode 100644 apps/sim/lib/copilot/tools/client/workflow/redeploy.ts delete mode 100644 apps/sim/lib/copilot/tools/client/workflow/run-workflow.ts delete mode 100644 apps/sim/lib/copilot/tools/client/workflow/set-global-workflow-variables.ts diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx index 8d0e59eff3..2d644c91e6 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx @@ -6,16 +6,10 @@ import clsx from 'clsx' import { ChevronUp, LayoutList } from 'lucide-react' import Editor from 'react-simple-code-editor' import { Button, Code, getCodeEditorProps, highlight, languages } from '@/components/emcn' -import { ClientToolCallState } from '@/lib/copilot/tools/client/base-tool' -import { getClientTool } from '@/lib/copilot/tools/client/manager' -import { getRegisteredTools } from '@/lib/copilot/tools/client/registry' -import '@/lib/copilot/tools/client/init-tool-configs' import { - getSubagentLabels as getSubagentLabelsFromConfig, - getToolUIConfig, - hasInterrupt as hasInterruptFromConfig, - isSpecialTool as isSpecialToolFromConfig, -} from '@/lib/copilot/tools/client/ui-config' + ClientToolCallState, + TOOL_DISPLAY_REGISTRY, +} from '@/lib/copilot/tools/client/tool-display-registry' import { formatDuration } from '@/lib/core/utils/formatting' import { CopilotMarkdownRenderer } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/copilot-message/components/markdown-renderer' import { SmoothStreamingText } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/copilot-message/components/smooth-streaming' @@ -26,7 +20,6 @@ import { getDisplayValue } from '@/app/workspace/[workspaceId]/w/[workflowId]/co import { getBlock } from '@/blocks/registry' import type { CopilotToolCall } from '@/stores/panel' import { useCopilotStore } from '@/stores/panel' -import { CLASS_TOOL_METADATA } from '@/stores/panel/copilot/store' import type { SubAgentContentBlock } from '@/stores/panel/copilot/types' import { useWorkflowStore } from '@/stores/workflows/workflow/store' @@ -711,8 +704,8 @@ const ShimmerOverlayText = memo(function ShimmerOverlayText({ * @returns The completion label from UI config, defaults to 'Thought' */ function getSubagentCompletionLabel(toolName: string): string { - const labels = getSubagentLabelsFromConfig(toolName, false) - return labels?.completed ?? 'Thought' + const labels = TOOL_DISPLAY_REGISTRY[toolName]?.uiConfig?.subagentLabels + return labels?.completed || 'Thought' } /** @@ -944,7 +937,7 @@ const SubagentContentRenderer = memo(function SubagentContentRenderer({ * Determines if a tool call should display with special gradient styling. */ function isSpecialToolCall(toolCall: CopilotToolCall): boolean { - return isSpecialToolFromConfig(toolCall.name) + return TOOL_DISPLAY_REGISTRY[toolCall.name]?.uiConfig?.isSpecial === true } /** @@ -1224,28 +1217,11 @@ const WorkflowEditSummary = memo(function WorkflowEditSummary({ /** Checks if a tool is server-side executed (not a client tool) */ function isIntegrationTool(toolName: string): boolean { - return !CLASS_TOOL_METADATA[toolName] + return !TOOL_DISPLAY_REGISTRY[toolName] } function shouldShowRunSkipButtons(toolCall: CopilotToolCall): boolean { - if (hasInterruptFromConfig(toolCall.name) && toolCall.state === 'pending') { - return true - } - - const instance = getClientTool(toolCall.id) - let hasInterrupt = !!instance?.getInterruptDisplays?.() - if (!hasInterrupt) { - try { - const def = getRegisteredTools()[toolCall.name] - if (def) { - hasInterrupt = - typeof def.hasInterrupt === 'function' - ? !!def.hasInterrupt(toolCall.params || {}) - : !!def.hasInterrupt - } - } catch {} - } - + const hasInterrupt = TOOL_DISPLAY_REGISTRY[toolCall.name]?.uiConfig?.interrupt === true if (hasInterrupt && toolCall.state === 'pending') { return true } @@ -1299,11 +1275,9 @@ async function handleSkip(toolCall: CopilotToolCall, setToolCallState: any, onSt function getDisplayName(toolCall: CopilotToolCall): string { const fromStore = (toolCall as any).display?.text if (fromStore) return fromStore - try { - const def = getRegisteredTools()[toolCall.name] as any - const byState = def?.metadata?.displayNames?.[toolCall.state] - if (byState?.text) return byState.text - } catch {} + const registryEntry = TOOL_DISPLAY_REGISTRY[toolCall.name] + const byState = registryEntry?.displayNames?.[toolCall.state as ClientToolCallState] + if (byState?.text) return byState.text const stateVerb = getStateVerb(toolCall.state) const formattedName = formatToolName(toolCall.name) @@ -1481,23 +1455,7 @@ export function ToolCall({ return null // Special rendering for subagent tools - show as thinking text with tool calls at top level - const SUBAGENT_TOOLS = [ - 'plan', - 'edit', - 'debug', - 'test', - 'deploy', - 'evaluate', - 'auth', - 'research', - 'knowledge', - 'custom_tool', - 'tour', - 'info', - 'workflow', - 'superagent', - ] - const isSubagentTool = SUBAGENT_TOOLS.includes(toolCall.name) + const isSubagentTool = TOOL_DISPLAY_REGISTRY[toolCall.name]?.uiConfig?.subagent === true // For ALL subagent tools, don't show anything until we have blocks with content if (isSubagentTool) { @@ -1537,17 +1495,18 @@ export function ToolCall({ stateStr === 'aborted' // Allow rendering if: - // 1. Tool is in CLASS_TOOL_METADATA (client tools), OR + // 1. Tool is in TOOL_DISPLAY_REGISTRY (client tools), OR // 2. We're in build mode (integration tools are executed server-side), OR // 3. Tool call is already completed (historical - should always render) - const isClientTool = !!CLASS_TOOL_METADATA[toolCall.name] + const isClientTool = !!TOOL_DISPLAY_REGISTRY[toolCall.name] const isIntegrationToolInBuildMode = mode === 'build' && !isClientTool if (!isClientTool && !isIntegrationToolInBuildMode && !isCompletedToolCall) { return null } + const toolUIConfig = TOOL_DISPLAY_REGISTRY[toolCall.name]?.uiConfig // Check if tool has params table config (meaning it's expandable) - const hasParamsTable = !!getToolUIConfig(toolCall.name)?.paramsTable + const hasParamsTable = !!toolUIConfig?.paramsTable const isRunWorkflow = toolCall.name === 'run_workflow' const isExpandableTool = hasParamsTable || @@ -1557,7 +1516,6 @@ export function ToolCall({ const showButtons = isCurrentMessage && shouldShowRunSkipButtons(toolCall) // Check UI config for secondary action - only show for current message tool calls - const toolUIConfig = getToolUIConfig(toolCall.name) const secondaryAction = toolUIConfig?.secondaryAction const showSecondaryAction = secondaryAction?.showInStates.includes( toolCall.state as ClientToolCallState diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/workflow.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/workflow.tsx index 82d05a5872..11e3942e76 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/workflow.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/workflow.tsx @@ -18,7 +18,7 @@ import 'reactflow/dist/style.css' import { createLogger } from '@sim/logger' import { useShallow } from 'zustand/react/shallow' import { useSession } from '@/lib/auth/auth-client' -import type { OAuthConnectEventDetail } from '@/lib/copilot/tools/client/other/oauth-request-access' +import type { OAuthConnectEventDetail } from '@/lib/copilot/tools/client/base-tool' import type { OAuthProvider } from '@/lib/oauth' import { BLOCK_DIMENSIONS, CONTAINER_DIMENSIONS } from '@/lib/workflows/blocks/block-dimensions' import { TriggerUtils } from '@/lib/workflows/triggers/triggers' diff --git a/apps/sim/lib/copilot/tools/client/base-subagent-tool.ts b/apps/sim/lib/copilot/tools/client/base-subagent-tool.ts deleted file mode 100644 index 7a843dd882..0000000000 --- a/apps/sim/lib/copilot/tools/client/base-subagent-tool.ts +++ /dev/null @@ -1,120 +0,0 @@ -/** - * Base class for subagent tools. - * - * Subagent tools spawn a server-side subagent that does the actual work. - * The tool auto-executes and the subagent's output is streamed back - * as nested content under the tool call. - * - * Examples: edit, plan, debug, evaluate, research, etc. - */ -import type { LucideIcon } from 'lucide-react' -import { BaseClientTool, type BaseClientToolMetadata, ClientToolCallState } from './base-tool' -import type { SubagentConfig, ToolUIConfig } from './ui-config' -import { registerToolUIConfig } from './ui-config' - -/** - * Configuration for creating a subagent tool - */ -export interface SubagentToolConfig { - /** Unique tool ID */ - id: string - /** Display names per state */ - displayNames: { - streaming: { text: string; icon: LucideIcon } - success: { text: string; icon: LucideIcon } - error: { text: string; icon: LucideIcon } - } - /** Subagent UI configuration */ - subagent: SubagentConfig - /** - * Optional: Whether this is a "special" tool (gets gradient styling). - * Default: false - */ - isSpecial?: boolean -} - -/** - * Create metadata for a subagent tool from config - */ -function createSubagentMetadata(config: SubagentToolConfig): BaseClientToolMetadata { - const { displayNames, subagent, isSpecial } = config - const { streaming, success, error } = displayNames - - const uiConfig: ToolUIConfig = { - isSpecial: isSpecial ?? false, - subagent, - } - - return { - displayNames: { - [ClientToolCallState.generating]: streaming, - [ClientToolCallState.pending]: streaming, - [ClientToolCallState.executing]: streaming, - [ClientToolCallState.success]: success, - [ClientToolCallState.error]: error, - [ClientToolCallState.rejected]: { - text: `${config.id.charAt(0).toUpperCase() + config.id.slice(1)} skipped`, - icon: error.icon, - }, - [ClientToolCallState.aborted]: { - text: `${config.id.charAt(0).toUpperCase() + config.id.slice(1)} aborted`, - icon: error.icon, - }, - }, - uiConfig, - } -} - -/** - * Base class for subagent tools. - * Extends BaseClientTool with subagent-specific behavior. - */ -export abstract class BaseSubagentTool extends BaseClientTool { - /** - * Subagent configuration. - * Override in subclasses to customize behavior. - */ - static readonly subagentConfig: SubagentToolConfig - - constructor(toolCallId: string, config: SubagentToolConfig) { - super(toolCallId, config.id, createSubagentMetadata(config)) - // Register UI config for this tool - registerToolUIConfig(config.id, this.metadata.uiConfig!) - } - - /** - * Execute the subagent tool. - * Immediately transitions to executing state - the actual work - * is done server-side by the subagent. - */ - async execute(_args?: Record): Promise { - this.setState(ClientToolCallState.executing) - // The tool result will come from the server via tool_result event - // when the subagent completes its work - } -} - -/** - * Factory function to create a subagent tool class. - * Use this for simple subagent tools that don't need custom behavior. - */ -export function createSubagentToolClass(config: SubagentToolConfig) { - // Register UI config at class creation time - const uiConfig: ToolUIConfig = { - isSpecial: config.isSpecial ?? false, - subagent: config.subagent, - } - registerToolUIConfig(config.id, uiConfig) - - return class extends BaseClientTool { - static readonly id = config.id - - constructor(toolCallId: string) { - super(toolCallId, config.id, createSubagentMetadata(config)) - } - - async execute(_args?: Record): Promise { - this.setState(ClientToolCallState.executing) - } - } -} diff --git a/apps/sim/lib/copilot/tools/client/base-tool.ts b/apps/sim/lib/copilot/tools/client/base-tool.ts index d3640bea0d..73a562aa15 100644 --- a/apps/sim/lib/copilot/tools/client/base-tool.ts +++ b/apps/sim/lib/copilot/tools/client/base-tool.ts @@ -1,15 +1,5 @@ -// Lazy require in setState to avoid circular init issues -import { createLogger } from '@sim/logger' import type { LucideIcon } from 'lucide-react' -import type { ToolUIConfig } from './ui-config' -const baseToolLogger = createLogger('BaseClientTool') - -const DEFAULT_TOOL_TIMEOUT_MS = 5 * 60 * 1000 - -export const WORKFLOW_EXECUTION_TIMEOUT_MS = 10 * 60 * 1000 - -// Client tool call states used by the new runtime export enum ClientToolCallState { generating = 'generating', pending = 'pending', @@ -22,198 +12,29 @@ export enum ClientToolCallState { background = 'background', } -// Display configuration for a given state export interface ClientToolDisplay { text: string icon: LucideIcon } -/** - * Function to generate dynamic display text based on tool parameters and state - * @param params - The tool call parameters - * @param state - The current tool call state - * @returns The dynamic text to display, or undefined to use the default text - */ -export type DynamicTextFormatter = ( - params: Record, - state: ClientToolCallState -) => string | undefined - export interface BaseClientToolMetadata { displayNames: Partial> - interrupt?: { - accept: ClientToolDisplay - reject: ClientToolDisplay - } - /** - * Optional function to generate dynamic display text based on parameters - * If provided, this will override the default text in displayNames - */ - getDynamicText?: DynamicTextFormatter - /** - * UI configuration for how this tool renders in the tool-call component. - * This replaces hardcoded logic in tool-call.tsx with declarative config. - */ - uiConfig?: ToolUIConfig + uiConfig?: Record + getDynamicText?: (params: Record, state: ClientToolCallState) => string | undefined } -export class BaseClientTool { - readonly toolCallId: string - readonly name: string - protected state: ClientToolCallState - protected metadata: BaseClientToolMetadata - protected isMarkedComplete = false - protected timeoutMs: number = DEFAULT_TOOL_TIMEOUT_MS - - constructor(toolCallId: string, name: string, metadata: BaseClientToolMetadata) { - this.toolCallId = toolCallId - this.name = name - this.metadata = metadata - this.state = ClientToolCallState.generating - } - - /** - * Set a custom timeout for this tool (in milliseconds) - */ - setTimeoutMs(ms: number): void { - this.timeoutMs = ms - } - - /** - * Check if this tool has been marked complete - */ - hasBeenMarkedComplete(): boolean { - return this.isMarkedComplete - } - - /** - * Ensure the tool is marked complete. If not already marked, marks it with error. - * This should be called in finally blocks to prevent leaked tool calls. - */ - async ensureMarkedComplete( - fallbackMessage = 'Tool execution did not complete properly' - ): Promise { - if (!this.isMarkedComplete) { - baseToolLogger.warn('Tool was not marked complete, marking with error', { - toolCallId: this.toolCallId, - toolName: this.name, - state: this.state, - }) - await this.markToolComplete(500, fallbackMessage) - this.setState(ClientToolCallState.error) - } - } - - /** - * Execute with timeout protection. Wraps the execution in a timeout and ensures - * markToolComplete is always called. - */ - async executeWithTimeout(executeFn: () => Promise, timeoutMs?: number): Promise { - const timeout = timeoutMs ?? this.timeoutMs - let timeoutId: NodeJS.Timeout | null = null - - try { - await Promise.race([ - executeFn(), - new Promise((_, reject) => { - timeoutId = setTimeout(() => { - reject(new Error(`Tool execution timed out after ${timeout / 1000} seconds`)) - }, timeout) - }), - ]) - } catch (error) { - const message = error instanceof Error ? error.message : String(error) - baseToolLogger.error('Tool execution failed or timed out', { - toolCallId: this.toolCallId, - toolName: this.name, - error: message, - }) - // Only mark complete if not already marked - if (!this.isMarkedComplete) { - await this.markToolComplete(500, message) - this.setState(ClientToolCallState.error) - } - } finally { - if (timeoutId) clearTimeout(timeoutId) - // Ensure tool is always marked complete - await this.ensureMarkedComplete() - } - } - - // Intentionally left empty - specific tools can override - // eslint-disable-next-line @typescript-eslint/no-unused-vars - async execute(_args?: Record): Promise { - return - } - - /** - * Mark a tool as complete. Tool completion is now handled server-side by the - * orchestrator (which calls the Go backend directly). Client tools are retained - * for UI display only — this method just tracks local state. - */ - async markToolComplete(_status: number, _message?: unknown, _data?: unknown): Promise { - this.isMarkedComplete = true - return true - } - - // Accept (continue) for interrupt flows: move pending -> executing - async handleAccept(): Promise { - this.setState(ClientToolCallState.executing) - } - - // Reject (skip) for interrupt flows: mark complete with a standard skip message - async handleReject(): Promise { - await this.markToolComplete(200, 'Tool execution was skipped by the user') - this.setState(ClientToolCallState.rejected) - } - - // Return the display configuration for the current state - getDisplayState(): ClientToolDisplay | undefined { - return this.metadata.displayNames[this.state] - } - - // Return interrupt display config (labels/icons) if defined - getInterruptDisplays(): BaseClientToolMetadata['interrupt'] | undefined { - return this.metadata.interrupt - } - - // Transition to a new state (also sync to Copilot store) - setState(next: ClientToolCallState, options?: { result?: any }): void { - const prev = this.state - this.state = next - - // Notify store via manager to avoid import cycles - try { - const { syncToolState } = require('@/lib/copilot/tools/client/manager') - syncToolState(this.toolCallId, next, options) - } catch {} - - // Log transition after syncing - try { - baseToolLogger.info('setState transition', { - toolCallId: this.toolCallId, - toolName: this.name, - prev, - next, - hasResult: options?.result !== undefined, - }) - } catch {} - } - - // Expose current state - getState(): ClientToolCallState { - return this.state - } +export type DynamicTextFormatter = ( + params: Record, + state: ClientToolCallState +) => string | undefined - hasInterrupt(): boolean { - return !!this.metadata.interrupt - } +export const WORKFLOW_EXECUTION_TIMEOUT_MS = 10 * 60 * 1000 - /** - * Get UI configuration for this tool. - * Used by tool-call component to determine rendering behavior. - */ - getUIConfig(): ToolUIConfig | undefined { - return this.metadata.uiConfig - } +/** Event detail for OAuth connect events dispatched by the copilot. */ +export interface OAuthConnectEventDetail { + providerName: string + serviceId: string + providerId: string + requiredScopes: string[] + newScopes?: string[] } diff --git a/apps/sim/lib/copilot/tools/client/blocks/get-block-config.ts b/apps/sim/lib/copilot/tools/client/blocks/get-block-config.ts deleted file mode 100644 index 88e6963976..0000000000 --- a/apps/sim/lib/copilot/tools/client/blocks/get-block-config.ts +++ /dev/null @@ -1,59 +0,0 @@ -import { FileCode, Loader2, MinusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { getLatestBlock } from '@/blocks/registry' - -export class GetBlockConfigClientTool extends BaseClientTool { - static readonly id = 'get_block_config' - - constructor(toolCallId: string) { - super(toolCallId, GetBlockConfigClientTool.id, GetBlockConfigClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Getting block config', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Getting block config', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Getting block config', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Retrieved block config', icon: FileCode }, - [ClientToolCallState.error]: { text: 'Failed to get block config', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted getting block config', icon: XCircle }, - [ClientToolCallState.rejected]: { - text: 'Skipped getting block config', - icon: MinusCircle, - }, - }, - getDynamicText: (params, state) => { - if (params?.blockType && typeof params.blockType === 'string') { - const blockConfig = getLatestBlock(params.blockType) - const blockName = (blockConfig?.name ?? params.blockType.replace(/_/g, ' ')).toLowerCase() - const opSuffix = params.operation ? ` (${params.operation})` : '' - - switch (state) { - case ClientToolCallState.success: - return `Retrieved ${blockName}${opSuffix} config` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Retrieving ${blockName}${opSuffix} config` - case ClientToolCallState.error: - return `Failed to retrieve ${blockName}${opSuffix} config` - case ClientToolCallState.aborted: - return `Aborted retrieving ${blockName}${opSuffix} config` - case ClientToolCallState.rejected: - return `Skipped retrieving ${blockName}${opSuffix} config` - } - } - return undefined - }, - } - - async execute(): Promise { - // Tool execution is handled server-side by the orchestrator. - // Client tool classes are retained for UI display configuration only. - this.setState(ClientToolCallState.success) - } -} diff --git a/apps/sim/lib/copilot/tools/client/blocks/get-block-options.ts b/apps/sim/lib/copilot/tools/client/blocks/get-block-options.ts deleted file mode 100644 index 993773f0e7..0000000000 --- a/apps/sim/lib/copilot/tools/client/blocks/get-block-options.ts +++ /dev/null @@ -1,63 +0,0 @@ -import { ListFilter, Loader2, MinusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { getLatestBlock } from '@/blocks/registry' - -export class GetBlockOptionsClientTool extends BaseClientTool { - static readonly id = 'get_block_options' - - constructor(toolCallId: string) { - super(toolCallId, GetBlockOptionsClientTool.id, GetBlockOptionsClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Getting block operations', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Getting block operations', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Getting block operations', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Retrieved block operations', icon: ListFilter }, - [ClientToolCallState.error]: { text: 'Failed to get block operations', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted getting block operations', icon: XCircle }, - [ClientToolCallState.rejected]: { - text: 'Skipped getting block operations', - icon: MinusCircle, - }, - }, - getDynamicText: (params, state) => { - const blockId = - (params as any)?.blockId || - (params as any)?.blockType || - (params as any)?.block_id || - (params as any)?.block_type - if (typeof blockId === 'string') { - const blockConfig = getLatestBlock(blockId) - const blockName = (blockConfig?.name ?? blockId.replace(/_/g, ' ')).toLowerCase() - - switch (state) { - case ClientToolCallState.success: - return `Retrieved ${blockName} operations` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Retrieving ${blockName} operations` - case ClientToolCallState.error: - return `Failed to retrieve ${blockName} operations` - case ClientToolCallState.aborted: - return `Aborted retrieving ${blockName} operations` - case ClientToolCallState.rejected: - return `Skipped retrieving ${blockName} operations` - } - } - return undefined - }, - } - - async execute(): Promise { - // Tool execution is handled server-side by the orchestrator. - // Client tool classes are retained for UI display configuration only. - this.setState(ClientToolCallState.success) - } -} diff --git a/apps/sim/lib/copilot/tools/client/blocks/get-blocks-and-tools.ts b/apps/sim/lib/copilot/tools/client/blocks/get-blocks-and-tools.ts deleted file mode 100644 index 17108c6db0..0000000000 --- a/apps/sim/lib/copilot/tools/client/blocks/get-blocks-and-tools.ts +++ /dev/null @@ -1,33 +0,0 @@ -import { Blocks, Loader2, MinusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -export class GetBlocksAndToolsClientTool extends BaseClientTool { - static readonly id = 'get_blocks_and_tools' - - constructor(toolCallId: string) { - super(toolCallId, GetBlocksAndToolsClientTool.id, GetBlocksAndToolsClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Exploring available options', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Exploring available options', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Exploring available options', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Explored available options', icon: Blocks }, - [ClientToolCallState.error]: { text: 'Failed to explore options', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted exploring options', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped exploring options', icon: MinusCircle }, - }, - interrupt: undefined, - } - - async execute(): Promise { - // Tool execution is handled server-side by the orchestrator. - // Client tool classes are retained for UI display configuration only. - this.setState(ClientToolCallState.success) - } -} diff --git a/apps/sim/lib/copilot/tools/client/blocks/get-blocks-metadata.ts b/apps/sim/lib/copilot/tools/client/blocks/get-blocks-metadata.ts deleted file mode 100644 index fd547fa0c0..0000000000 --- a/apps/sim/lib/copilot/tools/client/blocks/get-blocks-metadata.ts +++ /dev/null @@ -1,61 +0,0 @@ -import { ListFilter, Loader2, MinusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -export class GetBlocksMetadataClientTool extends BaseClientTool { - static readonly id = 'get_blocks_metadata' - - constructor(toolCallId: string) { - super(toolCallId, GetBlocksMetadataClientTool.id, GetBlocksMetadataClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Searching block choices', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Searching block choices', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Searching block choices', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Searched block choices', icon: ListFilter }, - [ClientToolCallState.error]: { text: 'Failed to search block choices', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted searching block choices', icon: XCircle }, - [ClientToolCallState.rejected]: { - text: 'Skipped searching block choices', - icon: MinusCircle, - }, - }, - getDynamicText: (params, state) => { - if (params?.blockIds && Array.isArray(params.blockIds) && params.blockIds.length > 0) { - const blockList = params.blockIds - .slice(0, 3) - .map((blockId) => blockId.replace(/_/g, ' ')) - .join(', ') - const more = params.blockIds.length > 3 ? '...' : '' - const blocks = `${blockList}${more}` - - switch (state) { - case ClientToolCallState.success: - return `Searched ${blocks}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Searching ${blocks}` - case ClientToolCallState.error: - return `Failed to search ${blocks}` - case ClientToolCallState.aborted: - return `Aborted searching ${blocks}` - case ClientToolCallState.rejected: - return `Skipped searching ${blocks}` - } - } - return undefined - }, - } - - async execute(): Promise { - // Tool execution is handled server-side by the orchestrator. - // Client tool classes are retained for UI display configuration only. - this.setState(ClientToolCallState.success) - } -} diff --git a/apps/sim/lib/copilot/tools/client/blocks/get-trigger-blocks.ts b/apps/sim/lib/copilot/tools/client/blocks/get-trigger-blocks.ts deleted file mode 100644 index 2d8bda809b..0000000000 --- a/apps/sim/lib/copilot/tools/client/blocks/get-trigger-blocks.ts +++ /dev/null @@ -1,33 +0,0 @@ -import { ListFilter, Loader2, MinusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -export class GetTriggerBlocksClientTool extends BaseClientTool { - static readonly id = 'get_trigger_blocks' - - constructor(toolCallId: string) { - super(toolCallId, GetTriggerBlocksClientTool.id, GetTriggerBlocksClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Finding trigger blocks', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Finding trigger blocks', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Finding trigger blocks', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Found trigger blocks', icon: ListFilter }, - [ClientToolCallState.error]: { text: 'Failed to find trigger blocks', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted finding trigger blocks', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped finding trigger blocks', icon: MinusCircle }, - }, - interrupt: undefined, - } - - async execute(): Promise { - // Tool execution is handled server-side by the orchestrator. - // Client tool classes are retained for UI display configuration only. - this.setState(ClientToolCallState.success) - } -} diff --git a/apps/sim/lib/copilot/tools/client/examples/get-examples-rag.ts b/apps/sim/lib/copilot/tools/client/examples/get-examples-rag.ts deleted file mode 100644 index 258330e0e9..0000000000 --- a/apps/sim/lib/copilot/tools/client/examples/get-examples-rag.ts +++ /dev/null @@ -1,52 +0,0 @@ -import { Loader2, MinusCircle, Search, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -export class GetExamplesRagClientTool extends BaseClientTool { - static readonly id = 'get_examples_rag' - - constructor(toolCallId: string) { - super(toolCallId, GetExamplesRagClientTool.id, GetExamplesRagClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Fetching examples', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Fetching examples', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Fetching examples', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Fetched examples', icon: Search }, - [ClientToolCallState.error]: { text: 'Failed to fetch examples', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted getting examples', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped getting examples', icon: MinusCircle }, - }, - interrupt: undefined, - getDynamicText: (params, state) => { - if (params?.query && typeof params.query === 'string') { - const query = params.query - - switch (state) { - case ClientToolCallState.success: - return `Found examples for ${query}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Searching examples for ${query}` - case ClientToolCallState.error: - return `Failed to find examples for ${query}` - case ClientToolCallState.aborted: - return `Aborted searching examples for ${query}` - case ClientToolCallState.rejected: - return `Skipped searching examples for ${query}` - } - } - return undefined - }, - } - - async execute(): Promise { - return - } -} diff --git a/apps/sim/lib/copilot/tools/client/examples/get-operations-examples.ts b/apps/sim/lib/copilot/tools/client/examples/get-operations-examples.ts deleted file mode 100644 index 4a14b71ef8..0000000000 --- a/apps/sim/lib/copilot/tools/client/examples/get-operations-examples.ts +++ /dev/null @@ -1,58 +0,0 @@ -import { Loader2, MinusCircle, XCircle, Zap } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -export class GetOperationsExamplesClientTool extends BaseClientTool { - static readonly id = 'get_operations_examples' - - constructor(toolCallId: string) { - super(toolCallId, GetOperationsExamplesClientTool.id, GetOperationsExamplesClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Designing workflow component', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Designing workflow component', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Designing workflow component', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Designed workflow component', icon: Zap }, - [ClientToolCallState.error]: { text: 'Failed to design workflow component', icon: XCircle }, - [ClientToolCallState.aborted]: { - text: 'Aborted designing workflow component', - icon: MinusCircle, - }, - [ClientToolCallState.rejected]: { - text: 'Skipped designing workflow component', - icon: MinusCircle, - }, - }, - interrupt: undefined, - getDynamicText: (params, state) => { - if (params?.query && typeof params.query === 'string') { - const query = params.query - - switch (state) { - case ClientToolCallState.success: - return `Designed ${query}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Designing ${query}` - case ClientToolCallState.error: - return `Failed to design ${query}` - case ClientToolCallState.aborted: - return `Aborted designing ${query}` - case ClientToolCallState.rejected: - return `Skipped designing ${query}` - } - } - return undefined - }, - } - - async execute(): Promise { - return - } -} diff --git a/apps/sim/lib/copilot/tools/client/examples/get-trigger-examples.ts b/apps/sim/lib/copilot/tools/client/examples/get-trigger-examples.ts deleted file mode 100644 index f24ea48017..0000000000 --- a/apps/sim/lib/copilot/tools/client/examples/get-trigger-examples.ts +++ /dev/null @@ -1,31 +0,0 @@ -import { Loader2, MinusCircle, XCircle, Zap } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -export class GetTriggerExamplesClientTool extends BaseClientTool { - static readonly id = 'get_trigger_examples' - - constructor(toolCallId: string) { - super(toolCallId, GetTriggerExamplesClientTool.id, GetTriggerExamplesClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Selecting a trigger', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Selecting a trigger', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Selecting a trigger', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Selected a trigger', icon: Zap }, - [ClientToolCallState.error]: { text: 'Failed to select a trigger', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted selecting a trigger', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped selecting a trigger', icon: MinusCircle }, - }, - interrupt: undefined, - } - - async execute(): Promise { - return - } -} diff --git a/apps/sim/lib/copilot/tools/client/examples/summarize.ts b/apps/sim/lib/copilot/tools/client/examples/summarize.ts deleted file mode 100644 index 240be300b5..0000000000 --- a/apps/sim/lib/copilot/tools/client/examples/summarize.ts +++ /dev/null @@ -1,37 +0,0 @@ -import { Loader2, MinusCircle, PencilLine, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -export class SummarizeClientTool extends BaseClientTool { - static readonly id = 'summarize_conversation' - - constructor(toolCallId: string) { - super(toolCallId, SummarizeClientTool.id, SummarizeClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Summarizing conversation', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Summarizing conversation', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Summarizing conversation', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Summarized conversation', icon: PencilLine }, - [ClientToolCallState.error]: { text: 'Failed to summarize conversation', icon: XCircle }, - [ClientToolCallState.aborted]: { - text: 'Aborted summarizing conversation', - icon: MinusCircle, - }, - [ClientToolCallState.rejected]: { - text: 'Skipped summarizing conversation', - icon: MinusCircle, - }, - }, - interrupt: undefined, - } - - async execute(): Promise { - return - } -} diff --git a/apps/sim/lib/copilot/tools/client/init-tool-configs.ts b/apps/sim/lib/copilot/tools/client/init-tool-configs.ts deleted file mode 100644 index 336fdbb0c5..0000000000 --- a/apps/sim/lib/copilot/tools/client/init-tool-configs.ts +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Initialize all tool UI configurations. - * - * This module imports all client tools to trigger their UI config registration. - * Import this module early in the app to ensure all tool configs are available. - */ - -// Other tools (subagents) -import './other/auth' -import './other/custom-tool' -import './other/debug' -import './other/deploy' -import './other/edit' -import './other/evaluate' -import './other/info' -import './other/knowledge' -import './other/make-api-request' -import './other/plan' -import './other/research' -import './other/sleep' -import './other/superagent' -import './other/test' -import './other/tour' -import './other/workflow' - -// Workflow tools -import './workflow/deploy-api' -import './workflow/deploy-chat' -import './workflow/deploy-mcp' -import './workflow/edit-workflow' -import './workflow/redeploy' -import './workflow/run-workflow' -import './workflow/set-global-workflow-variables' - -// User tools -import './user/set-environment-variables' diff --git a/apps/sim/lib/copilot/tools/client/knowledge/knowledge-base.ts b/apps/sim/lib/copilot/tools/client/knowledge/knowledge-base.ts deleted file mode 100644 index 0245c7deed..0000000000 --- a/apps/sim/lib/copilot/tools/client/knowledge/knowledge-base.ts +++ /dev/null @@ -1,102 +0,0 @@ -import { Database, Loader2, MinusCircle, PlusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { type KnowledgeBaseArgs } from '@/lib/copilot/tools/shared/schemas' -import { useCopilotStore } from '@/stores/panel/copilot/store' - -/** - * Client tool for knowledge base operations - */ -export class KnowledgeBaseClientTool extends BaseClientTool { - static readonly id = 'knowledge_base' - - constructor(toolCallId: string) { - super(toolCallId, KnowledgeBaseClientTool.id, KnowledgeBaseClientTool.metadata) - } - - /** - * Only show interrupt for create operation - */ - getInterruptDisplays(): BaseClientToolMetadata['interrupt'] | undefined { - const toolCallsById = useCopilotStore.getState().toolCallsById - const toolCall = toolCallsById[this.toolCallId] - const params = toolCall?.params as KnowledgeBaseArgs | undefined - - // Only require confirmation for create operation - if (params?.operation === 'create') { - const name = params?.args?.name || 'new knowledge base' - return { - accept: { text: `Create "${name}"`, icon: PlusCircle }, - reject: { text: 'Skip', icon: XCircle }, - } - } - - // No interrupt for list, get, query - auto-execute - return undefined - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Accessing knowledge base', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Accessing knowledge base', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Accessing knowledge base', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Accessed knowledge base', icon: Database }, - [ClientToolCallState.error]: { text: 'Failed to access knowledge base', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted knowledge base access', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped knowledge base access', icon: MinusCircle }, - }, - getDynamicText: (params: Record, state: ClientToolCallState) => { - const operation = params?.operation as string | undefined - const name = params?.args?.name as string | undefined - - const opVerbs: Record = { - create: { - active: 'Creating knowledge base', - past: 'Created knowledge base', - pending: name ? `Create knowledge base "${name}"?` : 'Create knowledge base?', - }, - list: { active: 'Listing knowledge bases', past: 'Listed knowledge bases' }, - get: { active: 'Getting knowledge base', past: 'Retrieved knowledge base' }, - query: { active: 'Querying knowledge base', past: 'Queried knowledge base' }, - } - const defaultVerb: { active: string; past: string; pending?: string } = { - active: 'Accessing knowledge base', - past: 'Accessed knowledge base', - } - const verb = operation ? opVerbs[operation] || defaultVerb : defaultVerb - - if (state === ClientToolCallState.success) { - return verb.past - } - if (state === ClientToolCallState.pending && verb.pending) { - return verb.pending - } - if ( - state === ClientToolCallState.generating || - state === ClientToolCallState.pending || - state === ClientToolCallState.executing - ) { - return verb.active - } - return undefined - }, - } - - async handleReject(): Promise { - await super.handleReject() - this.setState(ClientToolCallState.rejected) - } - - async handleAccept(): Promise { - await this.execute() - } - - async execute(): Promise { - // Tool execution is handled server-side by the orchestrator. - // Client tool classes are retained for UI display configuration only. - this.setState(ClientToolCallState.success) - } -} diff --git a/apps/sim/lib/copilot/tools/client/manager.ts b/apps/sim/lib/copilot/tools/client/manager.ts deleted file mode 100644 index bb83771d1b..0000000000 --- a/apps/sim/lib/copilot/tools/client/manager.ts +++ /dev/null @@ -1,24 +0,0 @@ -const instances: Record = {} - -let syncStateFn: ((toolCallId: string, nextState: any, options?: { result?: any }) => void) | null = - null - -export function registerClientTool(toolCallId: string, instance: any) { - instances[toolCallId] = instance -} - -export function getClientTool(toolCallId: string): any | undefined { - return instances[toolCallId] -} - -export function registerToolStateSync( - fn: (toolCallId: string, nextState: any, options?: { result?: any }) => void -) { - syncStateFn = fn -} - -export function syncToolState(toolCallId: string, nextState: any, options?: { result?: any }) { - try { - syncStateFn?.(toolCallId, nextState, options) - } catch {} -} diff --git a/apps/sim/lib/copilot/tools/client/navigation/navigate-ui.ts b/apps/sim/lib/copilot/tools/client/navigation/navigate-ui.ts deleted file mode 100644 index 5b9d30c067..0000000000 --- a/apps/sim/lib/copilot/tools/client/navigation/navigate-ui.ts +++ /dev/null @@ -1,241 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Loader2, Navigation, X, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { useCopilotStore } from '@/stores/panel/copilot/store' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -type NavigationDestination = 'workflow' | 'logs' | 'templates' | 'vector_db' | 'settings' - -interface NavigateUIArgs { - destination: NavigationDestination - workflowName?: string -} - -export class NavigateUIClientTool extends BaseClientTool { - static readonly id = 'navigate_ui' - - constructor(toolCallId: string) { - super(toolCallId, NavigateUIClientTool.id, NavigateUIClientTool.metadata) - } - - /** - * Override to provide dynamic button text based on destination - */ - getInterruptDisplays(): BaseClientToolMetadata['interrupt'] | undefined { - const toolCallsById = useCopilotStore.getState().toolCallsById - const toolCall = toolCallsById[this.toolCallId] - const params = toolCall?.params as NavigateUIArgs | undefined - - const destination = params?.destination - const workflowName = params?.workflowName - - let buttonText = 'Navigate' - - if (destination === 'workflow' && workflowName) { - buttonText = 'Open workflow' - } else if (destination === 'logs') { - buttonText = 'Open logs' - } else if (destination === 'templates') { - buttonText = 'Open templates' - } else if (destination === 'vector_db') { - buttonText = 'Open vector DB' - } else if (destination === 'settings') { - buttonText = 'Open settings' - } - - return { - accept: { text: buttonText, icon: Navigation }, - reject: { text: 'Skip', icon: XCircle }, - } - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { - text: 'Preparing to open', - icon: Loader2, - }, - [ClientToolCallState.pending]: { text: 'Open?', icon: Navigation }, - [ClientToolCallState.executing]: { text: 'Opening', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Opened', icon: Navigation }, - [ClientToolCallState.error]: { text: 'Failed to open', icon: X }, - [ClientToolCallState.aborted]: { - text: 'Aborted opening', - icon: XCircle, - }, - [ClientToolCallState.rejected]: { - text: 'Skipped opening', - icon: XCircle, - }, - }, - interrupt: { - accept: { text: 'Open', icon: Navigation }, - reject: { text: 'Skip', icon: XCircle }, - }, - getDynamicText: (params, state) => { - const destination = params?.destination as NavigationDestination | undefined - const workflowName = params?.workflowName - - const action = 'open' - const actionCapitalized = 'Open' - const actionPast = 'opened' - const actionIng = 'opening' - let target = '' - - if (destination === 'workflow' && workflowName) { - target = ` workflow "${workflowName}"` - } else if (destination === 'workflow') { - target = ' workflows' - } else if (destination === 'logs') { - target = ' logs' - } else if (destination === 'templates') { - target = ' templates' - } else if (destination === 'vector_db') { - target = ' vector database' - } else if (destination === 'settings') { - target = ' settings' - } - - const fullAction = `${action}${target}` - const fullActionCapitalized = `${actionCapitalized}${target}` - const fullActionPast = `${actionPast}${target}` - const fullActionIng = `${actionIng}${target}` - - switch (state) { - case ClientToolCallState.success: - return fullActionPast.charAt(0).toUpperCase() + fullActionPast.slice(1) - case ClientToolCallState.executing: - return fullActionIng.charAt(0).toUpperCase() + fullActionIng.slice(1) - case ClientToolCallState.generating: - return `Preparing to ${fullAction}` - case ClientToolCallState.pending: - return `${fullActionCapitalized}?` - case ClientToolCallState.error: - return `Failed to ${fullAction}` - case ClientToolCallState.aborted: - return `Aborted ${fullAction}` - case ClientToolCallState.rejected: - return `Skipped ${fullAction}` - } - return undefined - }, - } - - async handleReject(): Promise { - await super.handleReject() - this.setState(ClientToolCallState.rejected) - } - - async handleAccept(args?: NavigateUIArgs): Promise { - const logger = createLogger('NavigateUIClientTool') - try { - this.setState(ClientToolCallState.executing) - - // Get params from copilot store if not provided directly - let destination = args?.destination - let workflowName = args?.workflowName - - if (!destination) { - const toolCallsById = useCopilotStore.getState().toolCallsById - const toolCall = toolCallsById[this.toolCallId] - const params = toolCall?.params as NavigateUIArgs | undefined - destination = params?.destination - workflowName = params?.workflowName - } - - if (!destination) { - throw new Error('No destination provided') - } - - let navigationUrl = '' - let successMessage = '' - - // Get current workspace ID from URL - const workspaceId = window.location.pathname.split('/')[2] - - switch (destination) { - case 'workflow': - if (workflowName) { - // Find workflow by name - const { workflows } = useWorkflowRegistry.getState() - const workflow = Object.values(workflows).find( - (w) => w.name.toLowerCase() === workflowName.toLowerCase() - ) - - if (!workflow) { - throw new Error(`Workflow "${workflowName}" not found`) - } - - navigationUrl = `/workspace/${workspaceId}/w/${workflow.id}` - successMessage = `Navigated to workflow "${workflowName}"` - } else { - navigationUrl = `/workspace/${workspaceId}/w` - successMessage = 'Navigated to workflows' - } - break - - case 'logs': - navigationUrl = `/workspace/${workspaceId}/logs` - successMessage = 'Navigated to logs' - break - - case 'templates': - navigationUrl = `/workspace/${workspaceId}/templates` - successMessage = 'Navigated to templates' - break - - case 'vector_db': - navigationUrl = `/workspace/${workspaceId}/vector-db` - successMessage = 'Navigated to vector database' - break - - case 'settings': - window.dispatchEvent(new CustomEvent('open-settings', { detail: { tab: 'general' } })) - successMessage = 'Opened settings' - break - - default: - throw new Error(`Unknown destination: ${destination}`) - } - - // Navigate if URL was set - if (navigationUrl) { - window.location.href = navigationUrl - } - - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, successMessage, { - destination, - workflowName, - navigated: true, - }) - } catch (e: any) { - logger.error('Navigation failed', { message: e?.message }) - this.setState(ClientToolCallState.error) - - // Get destination info for better error message - const toolCallsById = useCopilotStore.getState().toolCallsById - const toolCall = toolCallsById[this.toolCallId] - const params = toolCall?.params as NavigateUIArgs | undefined - const dest = params?.destination - const wfName = params?.workflowName - - let errorMessage = e?.message || 'Failed to navigate' - if (dest === 'workflow' && wfName) { - errorMessage = `Failed to navigate to workflow "${wfName}": ${e?.message || 'Unknown error'}` - } else if (dest) { - errorMessage = `Failed to navigate to ${dest}: ${e?.message || 'Unknown error'}` - } - - await this.markToolComplete(500, errorMessage) - } - } - - async execute(args?: NavigateUIArgs): Promise { - await this.handleAccept(args) - } -} diff --git a/apps/sim/lib/copilot/tools/client/other/auth.ts b/apps/sim/lib/copilot/tools/client/other/auth.ts deleted file mode 100644 index b73a3f0036..0000000000 --- a/apps/sim/lib/copilot/tools/client/other/auth.ts +++ /dev/null @@ -1,56 +0,0 @@ -import { KeyRound, Loader2, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' - -interface AuthArgs { - instruction: string -} - -/** - * Auth tool that spawns a subagent to handle authentication setup. - * This tool auto-executes and the actual work is done by the auth subagent. - * The subagent's output is streamed as nested content under this tool call. - */ -export class AuthClientTool extends BaseClientTool { - static readonly id = 'auth' - - constructor(toolCallId: string) { - super(toolCallId, AuthClientTool.id, AuthClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Authenticating', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Authenticating', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Authenticating', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Authenticated', icon: KeyRound }, - [ClientToolCallState.error]: { text: 'Failed to authenticate', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped auth', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted auth', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Authenticating', - completedLabel: 'Authenticated', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } - - /** - * Execute the auth tool. - * This just marks the tool as executing - the actual auth work is done server-side - * by the auth subagent, and its output is streamed as subagent events. - */ - async execute(_args?: AuthArgs): Promise { - this.setState(ClientToolCallState.executing) - } -} - -// Register UI config at module load -registerToolUIConfig(AuthClientTool.id, AuthClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/other/checkoff-todo.ts b/apps/sim/lib/copilot/tools/client/other/checkoff-todo.ts deleted file mode 100644 index 2a925d82dd..0000000000 --- a/apps/sim/lib/copilot/tools/client/other/checkoff-todo.ts +++ /dev/null @@ -1,61 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Check, Loader2, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -interface CheckoffTodoArgs { - id?: string - todoId?: string -} - -export class CheckoffTodoClientTool extends BaseClientTool { - static readonly id = 'checkoff_todo' - - constructor(toolCallId: string) { - super(toolCallId, CheckoffTodoClientTool.id, CheckoffTodoClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Marking todo', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Marking todo', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Marked todo complete', icon: Check }, - [ClientToolCallState.error]: { text: 'Failed to mark todo', icon: XCircle }, - }, - } - - async execute(args?: CheckoffTodoArgs): Promise { - const logger = createLogger('CheckoffTodoClientTool') - try { - this.setState(ClientToolCallState.executing) - - const todoId = args?.id || args?.todoId - if (!todoId) { - this.setState(ClientToolCallState.error) - await this.markToolComplete(400, 'Missing todo id') - return - } - - try { - const { useCopilotStore } = await import('@/stores/panel/copilot/store') - const store = useCopilotStore.getState() - if (store.updatePlanTodoStatus) { - store.updatePlanTodoStatus(todoId, 'completed') - } - } catch (e) { - logger.warn('Failed to update todo status in store', { message: (e as any)?.message }) - } - - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, 'Todo checked off', { todoId }) - this.setState(ClientToolCallState.success) - } catch (e: any) { - logger.error('execute failed', { message: e?.message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, e?.message || 'Failed to check off todo') - } - } -} diff --git a/apps/sim/lib/copilot/tools/client/other/crawl-website.ts b/apps/sim/lib/copilot/tools/client/other/crawl-website.ts deleted file mode 100644 index 37c220d36d..0000000000 --- a/apps/sim/lib/copilot/tools/client/other/crawl-website.ts +++ /dev/null @@ -1,52 +0,0 @@ -import { Globe, Loader2, MinusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -export class CrawlWebsiteClientTool extends BaseClientTool { - static readonly id = 'crawl_website' - - constructor(toolCallId: string) { - super(toolCallId, CrawlWebsiteClientTool.id, CrawlWebsiteClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Crawling website', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Crawling website', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Crawling website', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Crawled website', icon: Globe }, - [ClientToolCallState.error]: { text: 'Failed to crawl website', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted crawling website', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped crawling website', icon: MinusCircle }, - }, - interrupt: undefined, - getDynamicText: (params, state) => { - if (params?.url && typeof params.url === 'string') { - const url = params.url - - switch (state) { - case ClientToolCallState.success: - return `Crawled ${url}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Crawling ${url}` - case ClientToolCallState.error: - return `Failed to crawl ${url}` - case ClientToolCallState.aborted: - return `Aborted crawling ${url}` - case ClientToolCallState.rejected: - return `Skipped crawling ${url}` - } - } - return undefined - }, - } - - async execute(): Promise { - return - } -} diff --git a/apps/sim/lib/copilot/tools/client/other/custom-tool.ts b/apps/sim/lib/copilot/tools/client/other/custom-tool.ts deleted file mode 100644 index eab2818a80..0000000000 --- a/apps/sim/lib/copilot/tools/client/other/custom-tool.ts +++ /dev/null @@ -1,56 +0,0 @@ -import { Loader2, Wrench, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' - -interface CustomToolArgs { - instruction: string -} - -/** - * Custom tool that spawns a subagent to manage custom tools. - * This tool auto-executes and the actual work is done by the custom_tool subagent. - * The subagent's output is streamed as nested content under this tool call. - */ -export class CustomToolClientTool extends BaseClientTool { - static readonly id = 'custom_tool' - - constructor(toolCallId: string) { - super(toolCallId, CustomToolClientTool.id, CustomToolClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Managing custom tool', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Managing custom tool', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Managing custom tool', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Managed custom tool', icon: Wrench }, - [ClientToolCallState.error]: { text: 'Failed custom tool', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped custom tool', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted custom tool', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Managing custom tool', - completedLabel: 'Custom tool managed', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } - - /** - * Execute the custom_tool tool. - * This just marks the tool as executing - the actual custom tool work is done server-side - * by the custom_tool subagent, and its output is streamed as subagent events. - */ - async execute(_args?: CustomToolArgs): Promise { - this.setState(ClientToolCallState.executing) - } -} - -// Register UI config at module load -registerToolUIConfig(CustomToolClientTool.id, CustomToolClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/other/debug.ts b/apps/sim/lib/copilot/tools/client/other/debug.ts deleted file mode 100644 index 6be16d8864..0000000000 --- a/apps/sim/lib/copilot/tools/client/other/debug.ts +++ /dev/null @@ -1,60 +0,0 @@ -import { Bug, Loader2, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' - -interface DebugArgs { - error_description: string - context?: string -} - -/** - * Debug tool that spawns a subagent to diagnose workflow issues. - * This tool auto-executes and the actual work is done by the debug subagent. - * The subagent's output is streamed as nested content under this tool call. - */ -export class DebugClientTool extends BaseClientTool { - static readonly id = 'debug' - - constructor(toolCallId: string) { - super(toolCallId, DebugClientTool.id, DebugClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Debugging', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Debugging', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Debugging', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Debugged', icon: Bug }, - [ClientToolCallState.error]: { text: 'Failed to debug', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped debug', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted debug', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Debugging', - completedLabel: 'Debugged', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } - - /** - * Execute the debug tool. - * This just marks the tool as executing - the actual debug work is done server-side - * by the debug subagent, and its output is streamed as subagent events. - */ - async execute(_args?: DebugArgs): Promise { - // Immediately transition to executing state - no user confirmation needed - this.setState(ClientToolCallState.executing) - // The tool result will come from the server via tool_result event - // when the debug subagent completes its work - } -} - -// Register UI config at module load -registerToolUIConfig(DebugClientTool.id, DebugClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/other/deploy.ts b/apps/sim/lib/copilot/tools/client/other/deploy.ts deleted file mode 100644 index 80e8f8bc63..0000000000 --- a/apps/sim/lib/copilot/tools/client/other/deploy.ts +++ /dev/null @@ -1,56 +0,0 @@ -import { Loader2, Rocket, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' - -interface DeployArgs { - instruction: string -} - -/** - * Deploy tool that spawns a subagent to handle deployment. - * This tool auto-executes and the actual work is done by the deploy subagent. - * The subagent's output is streamed as nested content under this tool call. - */ -export class DeployClientTool extends BaseClientTool { - static readonly id = 'deploy' - - constructor(toolCallId: string) { - super(toolCallId, DeployClientTool.id, DeployClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Deploying', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Deploying', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Deploying', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Deployed', icon: Rocket }, - [ClientToolCallState.error]: { text: 'Failed to deploy', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped deploy', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted deploy', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Deploying', - completedLabel: 'Deployed', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } - - /** - * Execute the deploy tool. - * This just marks the tool as executing - the actual deploy work is done server-side - * by the deploy subagent, and its output is streamed as subagent events. - */ - async execute(_args?: DeployArgs): Promise { - this.setState(ClientToolCallState.executing) - } -} - -// Register UI config at module load -registerToolUIConfig(DeployClientTool.id, DeployClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/other/edit.ts b/apps/sim/lib/copilot/tools/client/other/edit.ts deleted file mode 100644 index 85e67a927e..0000000000 --- a/apps/sim/lib/copilot/tools/client/other/edit.ts +++ /dev/null @@ -1,61 +0,0 @@ -import { Loader2, Pencil, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' - -interface EditArgs { - instruction: string -} - -/** - * Edit tool that spawns a subagent to apply code/workflow edits. - * This tool auto-executes and the actual work is done by the edit subagent. - * The subagent's output is streamed as nested content under this tool call. - */ -export class EditClientTool extends BaseClientTool { - static readonly id = 'edit' - - constructor(toolCallId: string) { - super(toolCallId, EditClientTool.id, EditClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Editing', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Editing', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Editing', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Edited', icon: Pencil }, - [ClientToolCallState.error]: { text: 'Failed to apply edit', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped edit', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted edit', icon: XCircle }, - }, - uiConfig: { - isSpecial: true, - subagent: { - streamingLabel: 'Editing', - completedLabel: 'Edited', - shouldCollapse: false, // Edit subagent stays expanded - outputArtifacts: ['edit_summary'], - hideThinkingText: true, // We show WorkflowEditSummary instead - }, - }, - } - - /** - * Execute the edit tool. - * This just marks the tool as executing - the actual edit work is done server-side - * by the edit subagent, and its output is streamed as subagent events. - */ - async execute(_args?: EditArgs): Promise { - // Immediately transition to executing state - no user confirmation needed - this.setState(ClientToolCallState.executing) - // The tool result will come from the server via tool_result event - // when the edit subagent completes its work - } -} - -// Register UI config at module load -registerToolUIConfig(EditClientTool.id, EditClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/other/evaluate.ts b/apps/sim/lib/copilot/tools/client/other/evaluate.ts deleted file mode 100644 index eaf7f542a2..0000000000 --- a/apps/sim/lib/copilot/tools/client/other/evaluate.ts +++ /dev/null @@ -1,56 +0,0 @@ -import { ClipboardCheck, Loader2, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' - -interface EvaluateArgs { - instruction: string -} - -/** - * Evaluate tool that spawns a subagent to evaluate workflows or outputs. - * This tool auto-executes and the actual work is done by the evaluate subagent. - * The subagent's output is streamed as nested content under this tool call. - */ -export class EvaluateClientTool extends BaseClientTool { - static readonly id = 'evaluate' - - constructor(toolCallId: string) { - super(toolCallId, EvaluateClientTool.id, EvaluateClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Evaluating', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Evaluating', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Evaluating', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Evaluated', icon: ClipboardCheck }, - [ClientToolCallState.error]: { text: 'Failed to evaluate', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped evaluation', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted evaluation', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Evaluating', - completedLabel: 'Evaluated', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } - - /** - * Execute the evaluate tool. - * This just marks the tool as executing - the actual evaluation work is done server-side - * by the evaluate subagent, and its output is streamed as subagent events. - */ - async execute(_args?: EvaluateArgs): Promise { - this.setState(ClientToolCallState.executing) - } -} - -// Register UI config at module load -registerToolUIConfig(EvaluateClientTool.id, EvaluateClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/other/get-page-contents.ts b/apps/sim/lib/copilot/tools/client/other/get-page-contents.ts deleted file mode 100644 index 5b30c9111e..0000000000 --- a/apps/sim/lib/copilot/tools/client/other/get-page-contents.ts +++ /dev/null @@ -1,53 +0,0 @@ -import { FileText, Loader2, MinusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -export class GetPageContentsClientTool extends BaseClientTool { - static readonly id = 'get_page_contents' - - constructor(toolCallId: string) { - super(toolCallId, GetPageContentsClientTool.id, GetPageContentsClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Getting page contents', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Getting page contents', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Getting page contents', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Retrieved page contents', icon: FileText }, - [ClientToolCallState.error]: { text: 'Failed to get page contents', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted getting page contents', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped getting page contents', icon: MinusCircle }, - }, - interrupt: undefined, - getDynamicText: (params, state) => { - if (params?.urls && Array.isArray(params.urls) && params.urls.length > 0) { - const firstUrl = String(params.urls[0]) - const count = params.urls.length - - switch (state) { - case ClientToolCallState.success: - return count > 1 ? `Retrieved ${count} pages` : `Retrieved ${firstUrl}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return count > 1 ? `Getting ${count} pages` : `Getting ${firstUrl}` - case ClientToolCallState.error: - return count > 1 ? `Failed to get ${count} pages` : `Failed to get ${firstUrl}` - case ClientToolCallState.aborted: - return count > 1 ? `Aborted getting ${count} pages` : `Aborted getting ${firstUrl}` - case ClientToolCallState.rejected: - return count > 1 ? `Skipped getting ${count} pages` : `Skipped getting ${firstUrl}` - } - } - return undefined - }, - } - - async execute(): Promise { - return - } -} diff --git a/apps/sim/lib/copilot/tools/client/other/info.ts b/apps/sim/lib/copilot/tools/client/other/info.ts deleted file mode 100644 index e4253a22c6..0000000000 --- a/apps/sim/lib/copilot/tools/client/other/info.ts +++ /dev/null @@ -1,56 +0,0 @@ -import { Info, Loader2, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' - -interface InfoArgs { - instruction: string -} - -/** - * Info tool that spawns a subagent to retrieve information. - * This tool auto-executes and the actual work is done by the info subagent. - * The subagent's output is streamed as nested content under this tool call. - */ -export class InfoClientTool extends BaseClientTool { - static readonly id = 'info' - - constructor(toolCallId: string) { - super(toolCallId, InfoClientTool.id, InfoClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Getting info', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Getting info', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Getting info', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Retrieved info', icon: Info }, - [ClientToolCallState.error]: { text: 'Failed to get info', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped info', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted info', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Getting info', - completedLabel: 'Info retrieved', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } - - /** - * Execute the info tool. - * This just marks the tool as executing - the actual info work is done server-side - * by the info subagent, and its output is streamed as subagent events. - */ - async execute(_args?: InfoArgs): Promise { - this.setState(ClientToolCallState.executing) - } -} - -// Register UI config at module load -registerToolUIConfig(InfoClientTool.id, InfoClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/other/knowledge.ts b/apps/sim/lib/copilot/tools/client/other/knowledge.ts deleted file mode 100644 index 25c853c71e..0000000000 --- a/apps/sim/lib/copilot/tools/client/other/knowledge.ts +++ /dev/null @@ -1,56 +0,0 @@ -import { BookOpen, Loader2, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' - -interface KnowledgeArgs { - instruction: string -} - -/** - * Knowledge tool that spawns a subagent to manage knowledge bases. - * This tool auto-executes and the actual work is done by the knowledge subagent. - * The subagent's output is streamed as nested content under this tool call. - */ -export class KnowledgeClientTool extends BaseClientTool { - static readonly id = 'knowledge' - - constructor(toolCallId: string) { - super(toolCallId, KnowledgeClientTool.id, KnowledgeClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Managing knowledge', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Managing knowledge', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Managing knowledge', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Managed knowledge', icon: BookOpen }, - [ClientToolCallState.error]: { text: 'Failed to manage knowledge', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped knowledge', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted knowledge', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Managing knowledge', - completedLabel: 'Knowledge managed', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } - - /** - * Execute the knowledge tool. - * This just marks the tool as executing - the actual knowledge search work is done server-side - * by the knowledge subagent, and its output is streamed as subagent events. - */ - async execute(_args?: KnowledgeArgs): Promise { - this.setState(ClientToolCallState.executing) - } -} - -// Register UI config at module load -registerToolUIConfig(KnowledgeClientTool.id, KnowledgeClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/other/make-api-request.ts b/apps/sim/lib/copilot/tools/client/other/make-api-request.ts deleted file mode 100644 index 37d78b17c2..0000000000 --- a/apps/sim/lib/copilot/tools/client/other/make-api-request.ts +++ /dev/null @@ -1,109 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Globe2, Loader2, MinusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' -import { ExecuteResponseSuccessSchema } from '@/lib/copilot/tools/shared/schemas' - -interface MakeApiRequestArgs { - url: string - method: 'GET' | 'POST' | 'PUT' - queryParams?: Record - headers?: Record - body?: any -} - -export class MakeApiRequestClientTool extends BaseClientTool { - static readonly id = 'make_api_request' - - constructor(toolCallId: string) { - super(toolCallId, MakeApiRequestClientTool.id, MakeApiRequestClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Preparing API request', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Review API request', icon: Globe2 }, - [ClientToolCallState.executing]: { text: 'Executing API request', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Completed API request', icon: Globe2 }, - [ClientToolCallState.error]: { text: 'Failed to execute API request', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped API request', icon: MinusCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted API request', icon: XCircle }, - }, - interrupt: { - accept: { text: 'Execute', icon: Globe2 }, - reject: { text: 'Skip', icon: MinusCircle }, - }, - uiConfig: { - interrupt: { - accept: { text: 'Execute', icon: Globe2 }, - reject: { text: 'Skip', icon: MinusCircle }, - showAllowOnce: true, - showAllowAlways: true, - }, - paramsTable: { - columns: [ - { key: 'method', label: 'Method', width: '26%', editable: true, mono: true }, - { key: 'url', label: 'Endpoint', width: '74%', editable: true, mono: true }, - ], - extractRows: (params) => { - return [['request', (params.method || 'GET').toUpperCase(), params.url || '']] - }, - }, - }, - getDynamicText: (params, state) => { - if (params?.url && typeof params.url === 'string') { - const method = params.method || 'GET' - let url = params.url - - // Extract domain from URL for cleaner display - try { - const urlObj = new URL(url) - url = urlObj.hostname + urlObj.pathname - } catch { - // Use URL as-is if parsing fails - } - - switch (state) { - case ClientToolCallState.success: - return `${method} ${url} complete` - case ClientToolCallState.executing: - return `${method} ${url}` - case ClientToolCallState.generating: - return `Preparing ${method} ${url}` - case ClientToolCallState.pending: - return `Review ${method} ${url}` - case ClientToolCallState.error: - return `Failed ${method} ${url}` - case ClientToolCallState.rejected: - return `Skipped ${method} ${url}` - case ClientToolCallState.aborted: - return `Aborted ${method} ${url}` - } - } - return undefined - }, - } - - async handleReject(): Promise { - await super.handleReject() - this.setState(ClientToolCallState.rejected) - } - - async handleAccept(_args?: MakeApiRequestArgs): Promise { - // Tool execution is handled server-side by the orchestrator. - this.setState(ClientToolCallState.executing) - } - - async execute(): Promise { - // Tool execution is handled server-side by the orchestrator. - // Client tool classes are retained for UI display configuration only. - this.setState(ClientToolCallState.success) - } -} - -// Register UI config at module load -registerToolUIConfig(MakeApiRequestClientTool.id, MakeApiRequestClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/other/mark-todo-in-progress.ts b/apps/sim/lib/copilot/tools/client/other/mark-todo-in-progress.ts deleted file mode 100644 index fbed86ea82..0000000000 --- a/apps/sim/lib/copilot/tools/client/other/mark-todo-in-progress.ts +++ /dev/null @@ -1,64 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Loader2, MinusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -interface MarkTodoInProgressArgs { - id?: string - todoId?: string -} - -export class MarkTodoInProgressClientTool extends BaseClientTool { - static readonly id = 'mark_todo_in_progress' - - constructor(toolCallId: string) { - super(toolCallId, MarkTodoInProgressClientTool.id, MarkTodoInProgressClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Marking todo in progress', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Marking todo in progress', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Marking todo in progress', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Marked todo in progress', icon: Loader2 }, - [ClientToolCallState.error]: { text: 'Failed to mark in progress', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted marking in progress', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped marking in progress', icon: MinusCircle }, - }, - } - - async execute(args?: MarkTodoInProgressArgs): Promise { - const logger = createLogger('MarkTodoInProgressClientTool') - try { - this.setState(ClientToolCallState.executing) - - const todoId = args?.id || args?.todoId - if (!todoId) { - this.setState(ClientToolCallState.error) - await this.markToolComplete(400, 'Missing todo id') - return - } - - try { - const { useCopilotStore } = await import('@/stores/panel/copilot/store') - const store = useCopilotStore.getState() - if (store.updatePlanTodoStatus) { - store.updatePlanTodoStatus(todoId, 'executing') - } - } catch (e) { - logger.warn('Failed to update todo status in store', { message: (e as any)?.message }) - } - - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, 'Todo marked in progress', { todoId }) - this.setState(ClientToolCallState.success) - } catch (e: any) { - logger.error('execute failed', { message: e?.message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, e?.message || 'Failed to mark todo in progress') - } - } -} diff --git a/apps/sim/lib/copilot/tools/client/other/oauth-request-access.ts b/apps/sim/lib/copilot/tools/client/other/oauth-request-access.ts deleted file mode 100644 index 725f73bc72..0000000000 --- a/apps/sim/lib/copilot/tools/client/other/oauth-request-access.ts +++ /dev/null @@ -1,174 +0,0 @@ -import { createLogger } from '@sim/logger' -import { CheckCircle, Loader2, MinusCircle, PlugZap, X, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { OAUTH_PROVIDERS, type OAuthServiceConfig } from '@/lib/oauth' - -const logger = createLogger('OAuthRequestAccessClientTool') - -interface OAuthRequestAccessArgs { - providerName?: string -} - -interface ResolvedServiceInfo { - serviceId: string - providerId: string - service: OAuthServiceConfig -} - -/** - * Finds the service configuration from a provider name. - * The providerName should match the exact `name` field returned by get_credentials tool's notConnected services. - */ -function findServiceByName(providerName: string): ResolvedServiceInfo | null { - const normalizedName = providerName.toLowerCase().trim() - - // First pass: exact match (case-insensitive) - for (const [, providerConfig] of Object.entries(OAUTH_PROVIDERS)) { - for (const [serviceId, service] of Object.entries(providerConfig.services)) { - if (service.name.toLowerCase() === normalizedName) { - return { serviceId, providerId: service.providerId, service } - } - } - } - - // Second pass: partial match as fallback for flexibility - for (const [, providerConfig] of Object.entries(OAUTH_PROVIDERS)) { - for (const [serviceId, service] of Object.entries(providerConfig.services)) { - if ( - service.name.toLowerCase().includes(normalizedName) || - normalizedName.includes(service.name.toLowerCase()) - ) { - return { serviceId, providerId: service.providerId, service } - } - } - } - - return null -} - -export interface OAuthConnectEventDetail { - providerName: string - serviceId: string - providerId: string - requiredScopes: string[] - newScopes?: string[] -} - -export class OAuthRequestAccessClientTool extends BaseClientTool { - static readonly id = 'oauth_request_access' - - private providerName?: string - - constructor(toolCallId: string) { - super(toolCallId, OAuthRequestAccessClientTool.id, OAuthRequestAccessClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Requesting integration access', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Requesting integration access', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Requesting integration access', icon: Loader2 }, - [ClientToolCallState.rejected]: { text: 'Skipped integration access', icon: MinusCircle }, - [ClientToolCallState.success]: { text: 'Requested integration access', icon: CheckCircle }, - [ClientToolCallState.error]: { text: 'Failed to request integration access', icon: X }, - [ClientToolCallState.aborted]: { text: 'Aborted integration access request', icon: XCircle }, - }, - interrupt: { - accept: { text: 'Connect', icon: PlugZap }, - reject: { text: 'Skip', icon: MinusCircle }, - }, - getDynamicText: (params, state) => { - if (params.providerName) { - const name = params.providerName - switch (state) { - case ClientToolCallState.generating: - case ClientToolCallState.pending: - case ClientToolCallState.executing: - return `Requesting ${name} access` - case ClientToolCallState.rejected: - return `Skipped ${name} access` - case ClientToolCallState.success: - return `Requested ${name} access` - case ClientToolCallState.error: - return `Failed to request ${name} access` - case ClientToolCallState.aborted: - return `Aborted ${name} access request` - } - } - return undefined - }, - } - - async handleAccept(args?: OAuthRequestAccessArgs): Promise { - try { - if (args?.providerName) { - this.providerName = args.providerName - } - - if (!this.providerName) { - logger.error('No provider name provided') - this.setState(ClientToolCallState.error) - await this.markToolComplete(400, 'No provider name specified') - return - } - - // Find the service by name - const serviceInfo = findServiceByName(this.providerName) - if (!serviceInfo) { - logger.error('Could not find OAuth service for provider', { - providerName: this.providerName, - }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(400, `Unknown provider: ${this.providerName}`) - return - } - - const { serviceId, providerId, service } = serviceInfo - - logger.info('Opening OAuth connect modal', { - providerName: this.providerName, - serviceId, - providerId, - }) - - // Move to executing state - this.setState(ClientToolCallState.executing) - - // Dispatch event to open the OAuth modal (same pattern as open-settings) - window.dispatchEvent( - new CustomEvent('open-oauth-connect', { - detail: { - providerName: this.providerName, - serviceId, - providerId, - requiredScopes: service.scopes || [], - }, - }) - ) - - // Mark as success - the user opened the prompt, but connection is not guaranteed - this.setState(ClientToolCallState.success) - await this.markToolComplete( - 200, - `The user opened the ${this.providerName} connection prompt and may have connected. Check the connected integrations to verify the connection status.` - ) - } catch (e) { - logger.error('Failed to open OAuth connect modal', { error: e }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, 'Failed to open OAuth connection dialog') - } - } - - async handleReject(): Promise { - await super.handleReject() - this.setState(ClientToolCallState.rejected) - } - - async execute(args?: OAuthRequestAccessArgs): Promise { - await this.handleAccept(args) - } -} diff --git a/apps/sim/lib/copilot/tools/client/other/plan.ts b/apps/sim/lib/copilot/tools/client/other/plan.ts deleted file mode 100644 index 63eaad7b4e..0000000000 --- a/apps/sim/lib/copilot/tools/client/other/plan.ts +++ /dev/null @@ -1,59 +0,0 @@ -import { ListTodo, Loader2, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' - -interface PlanArgs { - request: string -} - -/** - * Plan tool that spawns a subagent to plan an approach. - * This tool auto-executes and the actual work is done by the plan subagent. - * The subagent's output is streamed as nested content under this tool call. - */ -export class PlanClientTool extends BaseClientTool { - static readonly id = 'plan' - - constructor(toolCallId: string) { - super(toolCallId, PlanClientTool.id, PlanClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Planning', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Planning', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Planning', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Planned', icon: ListTodo }, - [ClientToolCallState.error]: { text: 'Failed to plan', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped plan', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted plan', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Planning', - completedLabel: 'Planned', - shouldCollapse: true, - outputArtifacts: ['plan'], - }, - }, - } - - /** - * Execute the plan tool. - * This just marks the tool as executing - the actual planning work is done server-side - * by the plan subagent, and its output is streamed as subagent events. - */ - async execute(_args?: PlanArgs): Promise { - // Immediately transition to executing state - no user confirmation needed - this.setState(ClientToolCallState.executing) - // The tool result will come from the server via tool_result event - // when the plan subagent completes its work - } -} - -// Register UI config at module load -registerToolUIConfig(PlanClientTool.id, PlanClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/other/remember-debug.ts b/apps/sim/lib/copilot/tools/client/other/remember-debug.ts deleted file mode 100644 index 822ceda07b..0000000000 --- a/apps/sim/lib/copilot/tools/client/other/remember-debug.ts +++ /dev/null @@ -1,76 +0,0 @@ -import { CheckCircle2, Loader2, MinusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -export class RememberDebugClientTool extends BaseClientTool { - static readonly id = 'remember_debug' - - constructor(toolCallId: string) { - super(toolCallId, RememberDebugClientTool.id, RememberDebugClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Validating fix', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Validating fix', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Validating fix', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Validated fix', icon: CheckCircle2 }, - [ClientToolCallState.error]: { text: 'Failed to validate', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted validation', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped validation', icon: MinusCircle }, - }, - interrupt: undefined, - getDynamicText: (params, state) => { - const operation = params?.operation - - if (operation === 'add' || operation === 'edit') { - // For add/edit, show from problem or solution - const text = params?.problem || params?.solution - if (text && typeof text === 'string') { - switch (state) { - case ClientToolCallState.success: - return `Validated fix ${text}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Validating fix ${text}` - case ClientToolCallState.error: - return `Failed to validate fix ${text}` - case ClientToolCallState.aborted: - return `Aborted validating fix ${text}` - case ClientToolCallState.rejected: - return `Skipped validating fix ${text}` - } - } - } else if (operation === 'delete') { - // For delete, show from problem or solution (or id as fallback) - const text = params?.problem || params?.solution || params?.id - if (text && typeof text === 'string') { - switch (state) { - case ClientToolCallState.success: - return `Adjusted fix ${text}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Adjusting fix ${text}` - case ClientToolCallState.error: - return `Failed to adjust fix ${text}` - case ClientToolCallState.aborted: - return `Aborted adjusting fix ${text}` - case ClientToolCallState.rejected: - return `Skipped adjusting fix ${text}` - } - } - } - - return undefined - }, - } - - async execute(): Promise { - return - } -} diff --git a/apps/sim/lib/copilot/tools/client/other/research.ts b/apps/sim/lib/copilot/tools/client/other/research.ts deleted file mode 100644 index 0a10e89899..0000000000 --- a/apps/sim/lib/copilot/tools/client/other/research.ts +++ /dev/null @@ -1,56 +0,0 @@ -import { Loader2, Search, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' - -interface ResearchArgs { - instruction: string -} - -/** - * Research tool that spawns a subagent to research information. - * This tool auto-executes and the actual work is done by the research subagent. - * The subagent's output is streamed as nested content under this tool call. - */ -export class ResearchClientTool extends BaseClientTool { - static readonly id = 'research' - - constructor(toolCallId: string) { - super(toolCallId, ResearchClientTool.id, ResearchClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Researching', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Researching', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Researching', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Researched', icon: Search }, - [ClientToolCallState.error]: { text: 'Failed to research', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped research', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted research', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Researching', - completedLabel: 'Researched', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } - - /** - * Execute the research tool. - * This just marks the tool as executing - the actual research work is done server-side - * by the research subagent, and its output is streamed as subagent events. - */ - async execute(_args?: ResearchArgs): Promise { - this.setState(ClientToolCallState.executing) - } -} - -// Register UI config at module load -registerToolUIConfig(ResearchClientTool.id, ResearchClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/other/scrape-page.ts b/apps/sim/lib/copilot/tools/client/other/scrape-page.ts deleted file mode 100644 index 5979c9f0c3..0000000000 --- a/apps/sim/lib/copilot/tools/client/other/scrape-page.ts +++ /dev/null @@ -1,52 +0,0 @@ -import { Globe, Loader2, MinusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -export class ScrapePageClientTool extends BaseClientTool { - static readonly id = 'scrape_page' - - constructor(toolCallId: string) { - super(toolCallId, ScrapePageClientTool.id, ScrapePageClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Scraping page', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Scraping page', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Scraping page', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Scraped page', icon: Globe }, - [ClientToolCallState.error]: { text: 'Failed to scrape page', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted scraping page', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped scraping page', icon: MinusCircle }, - }, - interrupt: undefined, - getDynamicText: (params, state) => { - if (params?.url && typeof params.url === 'string') { - const url = params.url - - switch (state) { - case ClientToolCallState.success: - return `Scraped ${url}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Scraping ${url}` - case ClientToolCallState.error: - return `Failed to scrape ${url}` - case ClientToolCallState.aborted: - return `Aborted scraping ${url}` - case ClientToolCallState.rejected: - return `Skipped scraping ${url}` - } - } - return undefined - }, - } - - async execute(): Promise { - return - } -} diff --git a/apps/sim/lib/copilot/tools/client/other/search-documentation.ts b/apps/sim/lib/copilot/tools/client/other/search-documentation.ts deleted file mode 100644 index 07fa971bbf..0000000000 --- a/apps/sim/lib/copilot/tools/client/other/search-documentation.ts +++ /dev/null @@ -1,53 +0,0 @@ -import { BookOpen, Loader2, MinusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -export class SearchDocumentationClientTool extends BaseClientTool { - static readonly id = 'search_documentation' - - constructor(toolCallId: string) { - super(toolCallId, SearchDocumentationClientTool.id, SearchDocumentationClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Searching documentation', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Searching documentation', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Searching documentation', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Completed documentation search', icon: BookOpen }, - [ClientToolCallState.error]: { text: 'Failed to search docs', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted documentation search', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped documentation search', icon: MinusCircle }, - }, - getDynamicText: (params, state) => { - if (params?.query && typeof params.query === 'string') { - const query = params.query - - switch (state) { - case ClientToolCallState.success: - return `Searched docs for ${query}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Searching docs for ${query}` - case ClientToolCallState.error: - return `Failed to search docs for ${query}` - case ClientToolCallState.aborted: - return `Aborted searching docs for ${query}` - case ClientToolCallState.rejected: - return `Skipped searching docs for ${query}` - } - } - return undefined - }, - } - - async execute(): Promise { - // Tool execution is handled server-side by the orchestrator. - // Client tool classes are retained for UI display configuration only. - this.setState(ClientToolCallState.success) - } -} diff --git a/apps/sim/lib/copilot/tools/client/other/search-errors.ts b/apps/sim/lib/copilot/tools/client/other/search-errors.ts deleted file mode 100644 index d0eb6cc35b..0000000000 --- a/apps/sim/lib/copilot/tools/client/other/search-errors.ts +++ /dev/null @@ -1,52 +0,0 @@ -import { Bug, Loader2, MinusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -export class SearchErrorsClientTool extends BaseClientTool { - static readonly id = 'search_errors' - - constructor(toolCallId: string) { - super(toolCallId, SearchErrorsClientTool.id, SearchErrorsClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Debugging', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Debugging', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Debugging', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Debugged', icon: Bug }, - [ClientToolCallState.error]: { text: 'Failed to debug', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted debugging', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped debugging', icon: MinusCircle }, - }, - interrupt: undefined, - getDynamicText: (params, state) => { - if (params?.query && typeof params.query === 'string') { - const query = params.query - - switch (state) { - case ClientToolCallState.success: - return `Debugged ${query}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Debugging ${query}` - case ClientToolCallState.error: - return `Failed to debug ${query}` - case ClientToolCallState.aborted: - return `Aborted debugging ${query}` - case ClientToolCallState.rejected: - return `Skipped debugging ${query}` - } - } - return undefined - }, - } - - async execute(): Promise { - return - } -} diff --git a/apps/sim/lib/copilot/tools/client/other/search-library-docs.ts b/apps/sim/lib/copilot/tools/client/other/search-library-docs.ts deleted file mode 100644 index 7dcff295b2..0000000000 --- a/apps/sim/lib/copilot/tools/client/other/search-library-docs.ts +++ /dev/null @@ -1,50 +0,0 @@ -import { BookOpen, Loader2, MinusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -export class SearchLibraryDocsClientTool extends BaseClientTool { - static readonly id = 'search_library_docs' - - constructor(toolCallId: string) { - super(toolCallId, SearchLibraryDocsClientTool.id, SearchLibraryDocsClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Reading docs', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Reading docs', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Reading docs', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Read docs', icon: BookOpen }, - [ClientToolCallState.error]: { text: 'Failed to read docs', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted reading docs', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped reading docs', icon: MinusCircle }, - }, - getDynamicText: (params, state) => { - const libraryName = params?.library_name - if (libraryName && typeof libraryName === 'string') { - switch (state) { - case ClientToolCallState.success: - return `Read ${libraryName} docs` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Reading ${libraryName} docs` - case ClientToolCallState.error: - return `Failed to read ${libraryName} docs` - case ClientToolCallState.aborted: - return `Aborted reading ${libraryName} docs` - case ClientToolCallState.rejected: - return `Skipped reading ${libraryName} docs` - } - } - return undefined - }, - } - - async execute(): Promise { - return - } -} diff --git a/apps/sim/lib/copilot/tools/client/other/search-online.ts b/apps/sim/lib/copilot/tools/client/other/search-online.ts deleted file mode 100644 index 083658468c..0000000000 --- a/apps/sim/lib/copilot/tools/client/other/search-online.ts +++ /dev/null @@ -1,52 +0,0 @@ -import { Globe, Loader2, MinusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -export class SearchOnlineClientTool extends BaseClientTool { - static readonly id = 'search_online' - - constructor(toolCallId: string) { - super(toolCallId, SearchOnlineClientTool.id, SearchOnlineClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Searching online', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Searching online', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Searching online', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Completed online search', icon: Globe }, - [ClientToolCallState.error]: { text: 'Failed to search online', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped online search', icon: MinusCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted online search', icon: XCircle }, - }, - interrupt: undefined, - getDynamicText: (params, state) => { - if (params?.query && typeof params.query === 'string') { - const query = params.query - - switch (state) { - case ClientToolCallState.success: - return `Searched online for ${query}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Searching online for ${query}` - case ClientToolCallState.error: - return `Failed to search online for ${query}` - case ClientToolCallState.aborted: - return `Aborted searching online for ${query}` - case ClientToolCallState.rejected: - return `Skipped searching online for ${query}` - } - } - return undefined - }, - } - - async execute(): Promise { - return - } -} diff --git a/apps/sim/lib/copilot/tools/client/other/search-patterns.ts b/apps/sim/lib/copilot/tools/client/other/search-patterns.ts deleted file mode 100644 index e16785a708..0000000000 --- a/apps/sim/lib/copilot/tools/client/other/search-patterns.ts +++ /dev/null @@ -1,52 +0,0 @@ -import { Loader2, MinusCircle, Search, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -export class SearchPatternsClientTool extends BaseClientTool { - static readonly id = 'search_patterns' - - constructor(toolCallId: string) { - super(toolCallId, SearchPatternsClientTool.id, SearchPatternsClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Searching workflow patterns', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Searching workflow patterns', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Searching workflow patterns', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Found workflow patterns', icon: Search }, - [ClientToolCallState.error]: { text: 'Failed to search patterns', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted pattern search', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped pattern search', icon: MinusCircle }, - }, - interrupt: undefined, - getDynamicText: (params, state) => { - if (params?.queries && Array.isArray(params.queries) && params.queries.length > 0) { - const firstQuery = String(params.queries[0]) - - switch (state) { - case ClientToolCallState.success: - return `Searched ${firstQuery}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Searching ${firstQuery}` - case ClientToolCallState.error: - return `Failed to search ${firstQuery}` - case ClientToolCallState.aborted: - return `Aborted searching ${firstQuery}` - case ClientToolCallState.rejected: - return `Skipped searching ${firstQuery}` - } - } - return undefined - }, - } - - async execute(): Promise { - return - } -} diff --git a/apps/sim/lib/copilot/tools/client/other/sleep.ts b/apps/sim/lib/copilot/tools/client/other/sleep.ts deleted file mode 100644 index 91949ea81a..0000000000 --- a/apps/sim/lib/copilot/tools/client/other/sleep.ts +++ /dev/null @@ -1,157 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Loader2, MinusCircle, Moon, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' - -/** Maximum sleep duration in seconds (3 minutes) */ -const MAX_SLEEP_SECONDS = 180 - -/** Track sleep start times for calculating elapsed time on wake */ -const sleepStartTimes: Record = {} - -interface SleepArgs { - seconds?: number -} - -/** - * Format seconds into a human-readable duration string - */ -function formatDuration(seconds: number): string { - if (seconds >= 60) { - return `${Math.round(seconds / 60)} minute${seconds >= 120 ? 's' : ''}` - } - return `${seconds} second${seconds !== 1 ? 's' : ''}` -} - -export class SleepClientTool extends BaseClientTool { - static readonly id = 'sleep' - - constructor(toolCallId: string) { - super(toolCallId, SleepClientTool.id, SleepClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Preparing to sleep', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Sleeping', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Sleeping', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Finished sleeping', icon: Moon }, - [ClientToolCallState.error]: { text: 'Interrupted sleep', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped sleep', icon: MinusCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted sleep', icon: MinusCircle }, - [ClientToolCallState.background]: { text: 'Resumed', icon: Moon }, - }, - uiConfig: { - secondaryAction: { - text: 'Wake', - title: 'Wake', - variant: 'tertiary', - showInStates: [ClientToolCallState.executing], - targetState: ClientToolCallState.background, - }, - }, - // No interrupt - auto-execute immediately - getDynamicText: (params, state) => { - const seconds = params?.seconds - if (typeof seconds === 'number' && seconds > 0) { - const displayTime = formatDuration(seconds) - switch (state) { - case ClientToolCallState.success: - return `Slept for ${displayTime}` - case ClientToolCallState.executing: - case ClientToolCallState.pending: - return `Sleeping for ${displayTime}` - case ClientToolCallState.generating: - return `Preparing to sleep for ${displayTime}` - case ClientToolCallState.error: - return `Failed to sleep for ${displayTime}` - case ClientToolCallState.rejected: - return `Skipped sleeping for ${displayTime}` - case ClientToolCallState.aborted: - return `Aborted sleeping for ${displayTime}` - case ClientToolCallState.background: { - // Calculate elapsed time from when sleep started - const elapsedSeconds = params?._elapsedSeconds - if (typeof elapsedSeconds === 'number' && elapsedSeconds > 0) { - return `Resumed after ${formatDuration(Math.round(elapsedSeconds))}` - } - return 'Resumed early' - } - } - } - return undefined - }, - } - - /** - * Get elapsed seconds since sleep started - */ - getElapsedSeconds(): number { - const startTime = sleepStartTimes[this.toolCallId] - if (!startTime) return 0 - return (Date.now() - startTime) / 1000 - } - - async handleReject(): Promise { - await super.handleReject() - this.setState(ClientToolCallState.rejected) - } - - async handleAccept(args?: SleepArgs): Promise { - const logger = createLogger('SleepClientTool') - - // Use a timeout slightly longer than max sleep (3 minutes + buffer) - const timeoutMs = (MAX_SLEEP_SECONDS + 30) * 1000 - - await this.executeWithTimeout(async () => { - const params = args || {} - logger.debug('handleAccept() called', { - toolCallId: this.toolCallId, - state: this.getState(), - hasArgs: !!args, - seconds: params.seconds, - }) - - // Validate and clamp seconds - let seconds = typeof params.seconds === 'number' ? params.seconds : 0 - if (seconds < 0) seconds = 0 - if (seconds > MAX_SLEEP_SECONDS) seconds = MAX_SLEEP_SECONDS - - logger.debug('Starting sleep', { seconds }) - - // Track start time for elapsed calculation - sleepStartTimes[this.toolCallId] = Date.now() - - this.setState(ClientToolCallState.executing) - - try { - // Sleep for the specified duration - await new Promise((resolve) => setTimeout(resolve, seconds * 1000)) - - logger.debug('Sleep completed successfully') - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, `Slept for ${seconds} seconds`) - } catch (error) { - const message = error instanceof Error ? error.message : String(error) - logger.error('Sleep failed', { error: message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, message) - } finally { - // Clean up start time tracking - delete sleepStartTimes[this.toolCallId] - } - }, timeoutMs) - } - - async execute(args?: SleepArgs): Promise { - // Auto-execute without confirmation - go straight to executing - await this.handleAccept(args) - } -} - -// Register UI config at module load -registerToolUIConfig(SleepClientTool.id, SleepClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/other/superagent.ts b/apps/sim/lib/copilot/tools/client/other/superagent.ts deleted file mode 100644 index 99ec1fbfe1..0000000000 --- a/apps/sim/lib/copilot/tools/client/other/superagent.ts +++ /dev/null @@ -1,56 +0,0 @@ -import { Loader2, Sparkles, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' - -interface SuperagentArgs { - instruction: string -} - -/** - * Superagent tool that spawns a powerful subagent for complex tasks. - * This tool auto-executes and the actual work is done by the superagent. - * The subagent's output is streamed as nested content under this tool call. - */ -export class SuperagentClientTool extends BaseClientTool { - static readonly id = 'superagent' - - constructor(toolCallId: string) { - super(toolCallId, SuperagentClientTool.id, SuperagentClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Superagent working', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Superagent working', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Superagent working', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Superagent completed', icon: Sparkles }, - [ClientToolCallState.error]: { text: 'Superagent failed', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Superagent skipped', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Superagent aborted', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Superagent working', - completedLabel: 'Superagent completed', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } - - /** - * Execute the superagent tool. - * This just marks the tool as executing - the actual work is done server-side - * by the superagent, and its output is streamed as subagent events. - */ - async execute(_args?: SuperagentArgs): Promise { - this.setState(ClientToolCallState.executing) - } -} - -// Register UI config at module load -registerToolUIConfig(SuperagentClientTool.id, SuperagentClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/other/test.ts b/apps/sim/lib/copilot/tools/client/other/test.ts deleted file mode 100644 index 3aa698aad4..0000000000 --- a/apps/sim/lib/copilot/tools/client/other/test.ts +++ /dev/null @@ -1,56 +0,0 @@ -import { FlaskConical, Loader2, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' - -interface TestArgs { - instruction: string -} - -/** - * Test tool that spawns a subagent to run tests. - * This tool auto-executes and the actual work is done by the test subagent. - * The subagent's output is streamed as nested content under this tool call. - */ -export class TestClientTool extends BaseClientTool { - static readonly id = 'test' - - constructor(toolCallId: string) { - super(toolCallId, TestClientTool.id, TestClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Testing', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Testing', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Testing', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Tested', icon: FlaskConical }, - [ClientToolCallState.error]: { text: 'Failed to test', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped test', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted test', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Testing', - completedLabel: 'Tested', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } - - /** - * Execute the test tool. - * This just marks the tool as executing - the actual test work is done server-side - * by the test subagent, and its output is streamed as subagent events. - */ - async execute(_args?: TestArgs): Promise { - this.setState(ClientToolCallState.executing) - } -} - -// Register UI config at module load -registerToolUIConfig(TestClientTool.id, TestClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/other/tour.ts b/apps/sim/lib/copilot/tools/client/other/tour.ts deleted file mode 100644 index 8faca55877..0000000000 --- a/apps/sim/lib/copilot/tools/client/other/tour.ts +++ /dev/null @@ -1,56 +0,0 @@ -import { Compass, Loader2, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' - -interface TourArgs { - instruction: string -} - -/** - * Tour tool that spawns a subagent to guide the user. - * This tool auto-executes and the actual work is done by the tour subagent. - * The subagent's output is streamed as nested content under this tool call. - */ -export class TourClientTool extends BaseClientTool { - static readonly id = 'tour' - - constructor(toolCallId: string) { - super(toolCallId, TourClientTool.id, TourClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Touring', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Touring', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Touring', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Completed tour', icon: Compass }, - [ClientToolCallState.error]: { text: 'Failed tour', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped tour', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted tour', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Touring', - completedLabel: 'Tour complete', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } - - /** - * Execute the tour tool. - * This just marks the tool as executing - the actual tour work is done server-side - * by the tour subagent, and its output is streamed as subagent events. - */ - async execute(_args?: TourArgs): Promise { - this.setState(ClientToolCallState.executing) - } -} - -// Register UI config at module load -registerToolUIConfig(TourClientTool.id, TourClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/other/workflow.ts b/apps/sim/lib/copilot/tools/client/other/workflow.ts deleted file mode 100644 index 5b99e73e94..0000000000 --- a/apps/sim/lib/copilot/tools/client/other/workflow.ts +++ /dev/null @@ -1,56 +0,0 @@ -import { GitBranch, Loader2, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' - -interface WorkflowArgs { - instruction: string -} - -/** - * Workflow tool that spawns a subagent to manage workflows. - * This tool auto-executes and the actual work is done by the workflow subagent. - * The subagent's output is streamed as nested content under this tool call. - */ -export class WorkflowClientTool extends BaseClientTool { - static readonly id = 'workflow' - - constructor(toolCallId: string) { - super(toolCallId, WorkflowClientTool.id, WorkflowClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Managing workflow', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Managing workflow', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Managing workflow', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Managed workflow', icon: GitBranch }, - [ClientToolCallState.error]: { text: 'Failed to manage workflow', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped workflow', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted workflow', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Managing workflow', - completedLabel: 'Workflow managed', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } - - /** - * Execute the workflow tool. - * This just marks the tool as executing - the actual workflow work is done server-side - * by the workflow subagent, and its output is streamed as subagent events. - */ - async execute(_args?: WorkflowArgs): Promise { - this.setState(ClientToolCallState.executing) - } -} - -// Register UI config at module load -registerToolUIConfig(WorkflowClientTool.id, WorkflowClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/registry.ts b/apps/sim/lib/copilot/tools/client/registry.ts deleted file mode 100644 index 7dfb757aa9..0000000000 --- a/apps/sim/lib/copilot/tools/client/registry.ts +++ /dev/null @@ -1,34 +0,0 @@ -import { createLogger } from '@sim/logger' -import type { ClientToolDefinition, ToolExecutionContext } from '@/lib/copilot/tools/client/types' - -const logger = createLogger('ClientToolRegistry') - -const tools: Record> = {} - -export function registerTool(def: ClientToolDefinition) { - tools[def.name] = def -} - -export function getTool(name: string): ClientToolDefinition | undefined { - return tools[name] -} - -export function createExecutionContext(params: { - toolCallId: string - toolName: string -}): ToolExecutionContext { - const { toolCallId, toolName } = params - return { - toolCallId, - toolName, - log: (level, message, extra) => { - try { - logger[level](message, { toolCallId, toolName, ...(extra || {}) }) - } catch {} - }, - } -} - -export function getRegisteredTools(): Record> { - return { ...tools } -} diff --git a/apps/sim/lib/copilot/tools/client/tool-display-registry.ts b/apps/sim/lib/copilot/tools/client/tool-display-registry.ts new file mode 100644 index 0000000000..f7242d12c1 --- /dev/null +++ b/apps/sim/lib/copilot/tools/client/tool-display-registry.ts @@ -0,0 +1,2240 @@ +import type { LucideIcon } from 'lucide-react' +import { Blocks, BookOpen, Bug, Check, CheckCircle, CheckCircle2, ClipboardCheck, Compass, Database, FileCode, FileText, FlaskConical, GitBranch, Globe, Globe2, Grid2x2, Grid2x2Check, Grid2x2X, Info, Key, KeyRound, ListChecks, ListFilter, ListTodo, Loader2, MessageSquare, MinusCircle, Moon, Navigation, Pencil, PencilLine, Play, PlugZap, Plus, Rocket, Search, Server, Settings2, Sparkles, Tag, TerminalSquare, WorkflowIcon, Wrench, X, XCircle, Zap } from 'lucide-react' +import { getLatestBlock } from '@/blocks/registry' +import { getCustomTool } from '@/hooks/queries/custom-tools' +import { useWorkflowRegistry } from '@/stores/workflows/registry/store' + +export enum ClientToolCallState { + generating = 'generating', + pending = 'pending', + executing = 'executing', + aborted = 'aborted', + rejected = 'rejected', + success = 'success', + error = 'error', + review = 'review', + background = 'background', +} + +export interface ClientToolDisplay { + text: string + icon: LucideIcon +} + +export type DynamicTextFormatter = ( + params: Record, + state: ClientToolCallState +) => string | undefined + +export interface ToolUIConfig { + isSpecial?: boolean + subagent?: boolean + interrupt?: boolean + customRenderer?: string + paramsTable?: any + dynamicText?: DynamicTextFormatter + secondaryAction?: any + alwaysExpanded?: boolean + subagentLabels?: { + streaming: string + completed: string + } +} + +interface ToolMetadata { + displayNames: Partial> + interrupt?: { + accept: ClientToolDisplay + reject: ClientToolDisplay + } + getDynamicText?: DynamicTextFormatter + uiConfig?: { + isSpecial?: boolean + subagent?: { + streamingLabel?: string + completedLabel?: string + } + interrupt?: any + customRenderer?: string + paramsTable?: any + secondaryAction?: any + alwaysExpanded?: boolean + } +} + +interface ToolDisplayEntry { + displayNames: Partial> + uiConfig?: ToolUIConfig +} + +function toUiConfig(metadata?: ToolMetadata): ToolUIConfig | undefined { + const legacy = metadata?.uiConfig + const subagent = legacy?.subagent + const dynamicText = metadata?.getDynamicText + if (!legacy && !dynamicText) return undefined + + const config: ToolUIConfig = { + isSpecial: legacy?.isSpecial === true, + subagent: !!legacy?.subagent, + interrupt: !!legacy?.interrupt, + customRenderer: legacy?.customRenderer, + paramsTable: legacy?.paramsTable, + dynamicText, + secondaryAction: legacy?.secondaryAction, + alwaysExpanded: legacy?.alwaysExpanded, + } + + if (subagent?.streamingLabel || subagent?.completedLabel) { + config.subagentLabels = { + streaming: subagent.streamingLabel || '', + completed: subagent.completedLabel || '', + } + } + + return config +} + +function toToolDisplayEntry(metadata?: ToolMetadata): ToolDisplayEntry { + return { + displayNames: metadata?.displayNames || {}, + uiConfig: toUiConfig(metadata), + } +} + +const META_auth: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Authenticating', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Authenticating', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Authenticating', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Authenticated', icon: KeyRound }, + [ClientToolCallState.error]: { text: 'Failed to authenticate', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped auth', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted auth', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Authenticating', + completedLabel: 'Authenticated', + shouldCollapse: true, + outputArtifacts: [], + }, + }, + } + +const META_check_deployment_status: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { + text: 'Checking deployment status', + icon: Loader2, + }, + [ClientToolCallState.pending]: { text: 'Checking deployment status', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Checking deployment status', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Checked deployment status', icon: Rocket }, + [ClientToolCallState.error]: { text: 'Failed to check deployment status', icon: X }, + [ClientToolCallState.aborted]: { + text: 'Aborted checking deployment status', + icon: XCircle, + }, + [ClientToolCallState.rejected]: { + text: 'Skipped checking deployment status', + icon: XCircle, + }, + }, + interrupt: undefined, + } + +const META_checkoff_todo: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Marking todo', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Marking todo', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Marked todo complete', icon: Check }, + [ClientToolCallState.error]: { text: 'Failed to mark todo', icon: XCircle }, + }, + } + +const META_crawl_website: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Crawling website', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Crawling website', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Crawling website', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Crawled website', icon: Globe }, + [ClientToolCallState.error]: { text: 'Failed to crawl website', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted crawling website', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped crawling website', icon: MinusCircle }, + }, + interrupt: undefined, + getDynamicText: (params, state) => { + if (params?.url && typeof params.url === 'string') { + const url = params.url + + switch (state) { + case ClientToolCallState.success: + return `Crawled ${url}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Crawling ${url}` + case ClientToolCallState.error: + return `Failed to crawl ${url}` + case ClientToolCallState.aborted: + return `Aborted crawling ${url}` + case ClientToolCallState.rejected: + return `Skipped crawling ${url}` + } + } + return undefined + }, + } + +const META_create_workspace_mcp_server: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { + text: 'Preparing to create MCP server', + icon: Loader2, + }, + [ClientToolCallState.pending]: { text: 'Create MCP server?', icon: Server }, + [ClientToolCallState.executing]: { text: 'Creating MCP server', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Created MCP server', icon: Server }, + [ClientToolCallState.error]: { text: 'Failed to create MCP server', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted creating MCP server', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped creating MCP server', icon: XCircle }, + }, + interrupt: { + accept: { text: 'Create', icon: Plus }, + reject: { text: 'Skip', icon: XCircle }, + }, + getDynamicText: (params, state) => { + const name = params?.name || 'MCP server' + switch (state) { + case ClientToolCallState.success: + return `Created MCP server "${name}"` + case ClientToolCallState.executing: + return `Creating MCP server "${name}"` + case ClientToolCallState.generating: + return `Preparing to create "${name}"` + case ClientToolCallState.pending: + return `Create MCP server "${name}"?` + case ClientToolCallState.error: + return `Failed to create "${name}"` + } + return undefined + }, + } + +const META_custom_tool: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Managing custom tool', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Managing custom tool', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Managing custom tool', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Managed custom tool', icon: Wrench }, + [ClientToolCallState.error]: { text: 'Failed custom tool', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped custom tool', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted custom tool', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Managing custom tool', + completedLabel: 'Custom tool managed', + shouldCollapse: true, + outputArtifacts: [], + }, + }, + } + +const META_debug: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Debugging', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Debugging', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Debugging', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Debugged', icon: Bug }, + [ClientToolCallState.error]: { text: 'Failed to debug', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped debug', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted debug', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Debugging', + completedLabel: 'Debugged', + shouldCollapse: true, + outputArtifacts: [], + }, + }, + } + +const META_deploy: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Deploying', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Deploying', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Deploying', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Deployed', icon: Rocket }, + [ClientToolCallState.error]: { text: 'Failed to deploy', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped deploy', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted deploy', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Deploying', + completedLabel: 'Deployed', + shouldCollapse: true, + outputArtifacts: [], + }, + }, + } + +const META_deploy_api: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { + text: 'Preparing to deploy API', + icon: Loader2, + }, + [ClientToolCallState.pending]: { text: 'Deploy as API?', icon: Rocket }, + [ClientToolCallState.executing]: { text: 'Deploying API', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Deployed API', icon: Rocket }, + [ClientToolCallState.error]: { text: 'Failed to deploy API', icon: XCircle }, + [ClientToolCallState.aborted]: { + text: 'Aborted deploying API', + icon: XCircle, + }, + [ClientToolCallState.rejected]: { + text: 'Skipped deploying API', + icon: XCircle, + }, + }, + interrupt: { + accept: { text: 'Deploy', icon: Rocket }, + reject: { text: 'Skip', icon: XCircle }, + }, + uiConfig: { + isSpecial: true, + interrupt: { + accept: { text: 'Deploy', icon: Rocket }, + reject: { text: 'Skip', icon: XCircle }, + showAllowOnce: true, + showAllowAlways: true, + }, + }, + getDynamicText: (params, state) => { + const action = params?.action === 'undeploy' ? 'undeploy' : 'deploy' + + const workflowId = params?.workflowId || useWorkflowRegistry.getState().activeWorkflowId + const isAlreadyDeployed = workflowId + ? useWorkflowRegistry.getState().getWorkflowDeploymentStatus(workflowId)?.isDeployed + : false + + let actionText = action + let actionTextIng = action === 'undeploy' ? 'undeploying' : 'deploying' + const actionTextPast = action === 'undeploy' ? 'undeployed' : 'deployed' + + if (action === 'deploy' && isAlreadyDeployed) { + actionText = 'redeploy' + actionTextIng = 'redeploying' + } + + const actionCapitalized = actionText.charAt(0).toUpperCase() + actionText.slice(1) + + switch (state) { + case ClientToolCallState.success: + return `API ${actionTextPast}` + case ClientToolCallState.executing: + return `${actionCapitalized}ing API` + case ClientToolCallState.generating: + return `Preparing to ${actionText} API` + case ClientToolCallState.pending: + return `${actionCapitalized} API?` + case ClientToolCallState.error: + return `Failed to ${actionText} API` + case ClientToolCallState.aborted: + return `Aborted ${actionTextIng} API` + case ClientToolCallState.rejected: + return `Skipped ${actionTextIng} API` + } + return undefined + }, + } + +const META_deploy_chat: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { + text: 'Preparing to deploy chat', + icon: Loader2, + }, + [ClientToolCallState.pending]: { text: 'Deploy as chat?', icon: MessageSquare }, + [ClientToolCallState.executing]: { text: 'Deploying chat', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Deployed chat', icon: MessageSquare }, + [ClientToolCallState.error]: { text: 'Failed to deploy chat', icon: XCircle }, + [ClientToolCallState.aborted]: { + text: 'Aborted deploying chat', + icon: XCircle, + }, + [ClientToolCallState.rejected]: { + text: 'Skipped deploying chat', + icon: XCircle, + }, + }, + interrupt: { + accept: { text: 'Deploy Chat', icon: MessageSquare }, + reject: { text: 'Skip', icon: XCircle }, + }, + uiConfig: { + isSpecial: true, + interrupt: { + accept: { text: 'Deploy Chat', icon: MessageSquare }, + reject: { text: 'Skip', icon: XCircle }, + showAllowOnce: true, + showAllowAlways: true, + }, + }, + getDynamicText: (params, state) => { + const action = params?.action === 'undeploy' ? 'undeploy' : 'deploy' + + switch (state) { + case ClientToolCallState.success: + return action === 'undeploy' ? 'Chat undeployed' : 'Chat deployed' + case ClientToolCallState.executing: + return action === 'undeploy' ? 'Undeploying chat' : 'Deploying chat' + case ClientToolCallState.generating: + return `Preparing to ${action} chat` + case ClientToolCallState.pending: + return action === 'undeploy' ? 'Undeploy chat?' : 'Deploy as chat?' + case ClientToolCallState.error: + return `Failed to ${action} chat` + case ClientToolCallState.aborted: + return action === 'undeploy' ? 'Aborted undeploying chat' : 'Aborted deploying chat' + case ClientToolCallState.rejected: + return action === 'undeploy' ? 'Skipped undeploying chat' : 'Skipped deploying chat' + } + return undefined + }, + } + +const META_deploy_mcp: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { + text: 'Preparing to deploy to MCP', + icon: Loader2, + }, + [ClientToolCallState.pending]: { text: 'Deploy to MCP server?', icon: Server }, + [ClientToolCallState.executing]: { text: 'Deploying to MCP', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Deployed to MCP', icon: Server }, + [ClientToolCallState.error]: { text: 'Failed to deploy to MCP', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted MCP deployment', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped MCP deployment', icon: XCircle }, + }, + interrupt: { + accept: { text: 'Deploy', icon: Server }, + reject: { text: 'Skip', icon: XCircle }, + }, + uiConfig: { + isSpecial: true, + interrupt: { + accept: { text: 'Deploy', icon: Server }, + reject: { text: 'Skip', icon: XCircle }, + showAllowOnce: true, + showAllowAlways: true, + }, + }, + getDynamicText: (params, state) => { + const toolName = params?.toolName || 'workflow' + switch (state) { + case ClientToolCallState.success: + return `Deployed "${toolName}" to MCP` + case ClientToolCallState.executing: + return `Deploying "${toolName}" to MCP` + case ClientToolCallState.generating: + return `Preparing to deploy to MCP` + case ClientToolCallState.pending: + return `Deploy "${toolName}" to MCP?` + case ClientToolCallState.error: + return `Failed to deploy to MCP` + } + return undefined + }, + } + +const META_edit: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Editing', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Editing', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Editing', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Edited', icon: Pencil }, + [ClientToolCallState.error]: { text: 'Failed to apply edit', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped edit', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted edit', icon: XCircle }, + }, + uiConfig: { + isSpecial: true, + subagent: { + streamingLabel: 'Editing', + completedLabel: 'Edited', + shouldCollapse: false, // Edit subagent stays expanded + outputArtifacts: ['edit_summary'], + hideThinkingText: true, // We show WorkflowEditSummary instead + }, + }, + } + +const META_edit_workflow: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Editing your workflow', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Editing your workflow', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Edited your workflow', icon: Grid2x2Check }, + [ClientToolCallState.error]: { text: 'Failed to edit your workflow', icon: XCircle }, + [ClientToolCallState.review]: { text: 'Review your workflow changes', icon: Grid2x2 }, + [ClientToolCallState.rejected]: { text: 'Rejected workflow changes', icon: Grid2x2X }, + [ClientToolCallState.aborted]: { text: 'Aborted editing your workflow', icon: MinusCircle }, + [ClientToolCallState.pending]: { text: 'Editing your workflow', icon: Loader2 }, + }, + uiConfig: { + isSpecial: true, + customRenderer: 'edit_summary', + }, + } + +const META_evaluate: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Evaluating', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Evaluating', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Evaluating', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Evaluated', icon: ClipboardCheck }, + [ClientToolCallState.error]: { text: 'Failed to evaluate', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped evaluation', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted evaluation', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Evaluating', + completedLabel: 'Evaluated', + shouldCollapse: true, + outputArtifacts: [], + }, + }, + } + +const META_get_block_config: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Getting block config', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Getting block config', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Getting block config', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Retrieved block config', icon: FileCode }, + [ClientToolCallState.error]: { text: 'Failed to get block config', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted getting block config', icon: XCircle }, + [ClientToolCallState.rejected]: { + text: 'Skipped getting block config', + icon: MinusCircle, + }, + }, + getDynamicText: (params, state) => { + if (params?.blockType && typeof params.blockType === 'string') { + const blockConfig = getLatestBlock(params.blockType) + const blockName = (blockConfig?.name ?? params.blockType.replace(/_/g, ' ')).toLowerCase() + const opSuffix = params.operation ? ` (${params.operation})` : '' + + switch (state) { + case ClientToolCallState.success: + return `Retrieved ${blockName}${opSuffix} config` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Retrieving ${blockName}${opSuffix} config` + case ClientToolCallState.error: + return `Failed to retrieve ${blockName}${opSuffix} config` + case ClientToolCallState.aborted: + return `Aborted retrieving ${blockName}${opSuffix} config` + case ClientToolCallState.rejected: + return `Skipped retrieving ${blockName}${opSuffix} config` + } + } + return undefined + }, + } + +const META_get_block_options: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Getting block operations', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Getting block operations', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Getting block operations', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Retrieved block operations', icon: ListFilter }, + [ClientToolCallState.error]: { text: 'Failed to get block operations', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted getting block operations', icon: XCircle }, + [ClientToolCallState.rejected]: { + text: 'Skipped getting block operations', + icon: MinusCircle, + }, + }, + getDynamicText: (params, state) => { + const blockId = + (params as any)?.blockId || + (params as any)?.blockType || + (params as any)?.block_id || + (params as any)?.block_type + if (typeof blockId === 'string') { + const blockConfig = getLatestBlock(blockId) + const blockName = (blockConfig?.name ?? blockId.replace(/_/g, ' ')).toLowerCase() + + switch (state) { + case ClientToolCallState.success: + return `Retrieved ${blockName} operations` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Retrieving ${blockName} operations` + case ClientToolCallState.error: + return `Failed to retrieve ${blockName} operations` + case ClientToolCallState.aborted: + return `Aborted retrieving ${blockName} operations` + case ClientToolCallState.rejected: + return `Skipped retrieving ${blockName} operations` + } + } + return undefined + }, + } + +const META_get_block_outputs: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Getting block outputs', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Getting block outputs', icon: Tag }, + [ClientToolCallState.executing]: { text: 'Getting block outputs', icon: Loader2 }, + [ClientToolCallState.aborted]: { text: 'Aborted getting outputs', icon: XCircle }, + [ClientToolCallState.success]: { text: 'Retrieved block outputs', icon: Tag }, + [ClientToolCallState.error]: { text: 'Failed to get outputs', icon: X }, + [ClientToolCallState.rejected]: { text: 'Skipped getting outputs', icon: XCircle }, + }, + getDynamicText: (params, state) => { + const blockIds = params?.blockIds + if (blockIds && Array.isArray(blockIds) && blockIds.length > 0) { + const count = blockIds.length + switch (state) { + case ClientToolCallState.success: + return `Retrieved outputs for ${count} block${count > 1 ? 's' : ''}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Getting outputs for ${count} block${count > 1 ? 's' : ''}` + case ClientToolCallState.error: + return `Failed to get outputs for ${count} block${count > 1 ? 's' : ''}` + } + } + return undefined + }, + } + +const META_get_block_upstream_references: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Getting upstream references', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Getting upstream references', icon: GitBranch }, + [ClientToolCallState.executing]: { text: 'Getting upstream references', icon: Loader2 }, + [ClientToolCallState.aborted]: { text: 'Aborted getting references', icon: XCircle }, + [ClientToolCallState.success]: { text: 'Retrieved upstream references', icon: GitBranch }, + [ClientToolCallState.error]: { text: 'Failed to get references', icon: X }, + [ClientToolCallState.rejected]: { text: 'Skipped getting references', icon: XCircle }, + }, + getDynamicText: (params, state) => { + const blockIds = params?.blockIds + if (blockIds && Array.isArray(blockIds) && blockIds.length > 0) { + const count = blockIds.length + switch (state) { + case ClientToolCallState.success: + return `Retrieved references for ${count} block${count > 1 ? 's' : ''}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Getting references for ${count} block${count > 1 ? 's' : ''}` + case ClientToolCallState.error: + return `Failed to get references for ${count} block${count > 1 ? 's' : ''}` + } + } + return undefined + }, + } + +const META_get_blocks_and_tools: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Exploring available options', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Exploring available options', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Exploring available options', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Explored available options', icon: Blocks }, + [ClientToolCallState.error]: { text: 'Failed to explore options', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted exploring options', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped exploring options', icon: MinusCircle }, + }, + interrupt: undefined, + } + +const META_get_blocks_metadata: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Searching block choices', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Searching block choices', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Searching block choices', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Searched block choices', icon: ListFilter }, + [ClientToolCallState.error]: { text: 'Failed to search block choices', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted searching block choices', icon: XCircle }, + [ClientToolCallState.rejected]: { + text: 'Skipped searching block choices', + icon: MinusCircle, + }, + }, + getDynamicText: (params, state) => { + if (params?.blockIds && Array.isArray(params.blockIds) && params.blockIds.length > 0) { + const blockList = params.blockIds + .slice(0, 3) + .map((blockId) => blockId.replace(/_/g, ' ')) + .join(', ') + const more = params.blockIds.length > 3 ? '...' : '' + const blocks = `${blockList}${more}` + + switch (state) { + case ClientToolCallState.success: + return `Searched ${blocks}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Searching ${blocks}` + case ClientToolCallState.error: + return `Failed to search ${blocks}` + case ClientToolCallState.aborted: + return `Aborted searching ${blocks}` + case ClientToolCallState.rejected: + return `Skipped searching ${blocks}` + } + } + return undefined + }, + } + +const META_get_credentials: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Fetching connected integrations', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Fetching connected integrations', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Fetching connected integrations', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Fetched connected integrations', icon: Key }, + [ClientToolCallState.error]: { + text: 'Failed to fetch connected integrations', + icon: XCircle, + }, + [ClientToolCallState.aborted]: { + text: 'Aborted fetching connected integrations', + icon: MinusCircle, + }, + [ClientToolCallState.rejected]: { + text: 'Skipped fetching connected integrations', + icon: MinusCircle, + }, + }, + } + +const META_get_examples_rag: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Fetching examples', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Fetching examples', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Fetching examples', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Fetched examples', icon: Search }, + [ClientToolCallState.error]: { text: 'Failed to fetch examples', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted getting examples', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped getting examples', icon: MinusCircle }, + }, + interrupt: undefined, + getDynamicText: (params, state) => { + if (params?.query && typeof params.query === 'string') { + const query = params.query + + switch (state) { + case ClientToolCallState.success: + return `Found examples for ${query}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Searching examples for ${query}` + case ClientToolCallState.error: + return `Failed to find examples for ${query}` + case ClientToolCallState.aborted: + return `Aborted searching examples for ${query}` + case ClientToolCallState.rejected: + return `Skipped searching examples for ${query}` + } + } + return undefined + }, + } + +const META_get_operations_examples: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Designing workflow component', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Designing workflow component', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Designing workflow component', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Designed workflow component', icon: Zap }, + [ClientToolCallState.error]: { text: 'Failed to design workflow component', icon: XCircle }, + [ClientToolCallState.aborted]: { + text: 'Aborted designing workflow component', + icon: MinusCircle, + }, + [ClientToolCallState.rejected]: { + text: 'Skipped designing workflow component', + icon: MinusCircle, + }, + }, + interrupt: undefined, + getDynamicText: (params, state) => { + if (params?.query && typeof params.query === 'string') { + const query = params.query + + switch (state) { + case ClientToolCallState.success: + return `Designed ${query}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Designing ${query}` + case ClientToolCallState.error: + return `Failed to design ${query}` + case ClientToolCallState.aborted: + return `Aborted designing ${query}` + case ClientToolCallState.rejected: + return `Skipped designing ${query}` + } + } + return undefined + }, + } + +const META_get_page_contents: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Getting page contents', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Getting page contents', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Getting page contents', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Retrieved page contents', icon: FileText }, + [ClientToolCallState.error]: { text: 'Failed to get page contents', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted getting page contents', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped getting page contents', icon: MinusCircle }, + }, + interrupt: undefined, + getDynamicText: (params, state) => { + if (params?.urls && Array.isArray(params.urls) && params.urls.length > 0) { + const firstUrl = String(params.urls[0]) + const count = params.urls.length + + switch (state) { + case ClientToolCallState.success: + return count > 1 ? `Retrieved ${count} pages` : `Retrieved ${firstUrl}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return count > 1 ? `Getting ${count} pages` : `Getting ${firstUrl}` + case ClientToolCallState.error: + return count > 1 ? `Failed to get ${count} pages` : `Failed to get ${firstUrl}` + case ClientToolCallState.aborted: + return count > 1 ? `Aborted getting ${count} pages` : `Aborted getting ${firstUrl}` + case ClientToolCallState.rejected: + return count > 1 ? `Skipped getting ${count} pages` : `Skipped getting ${firstUrl}` + } + } + return undefined + }, + } + +const META_get_trigger_blocks: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Finding trigger blocks', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Finding trigger blocks', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Finding trigger blocks', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Found trigger blocks', icon: ListFilter }, + [ClientToolCallState.error]: { text: 'Failed to find trigger blocks', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted finding trigger blocks', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped finding trigger blocks', icon: MinusCircle }, + }, + interrupt: undefined, + } + +const META_get_trigger_examples: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Selecting a trigger', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Selecting a trigger', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Selecting a trigger', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Selected a trigger', icon: Zap }, + [ClientToolCallState.error]: { text: 'Failed to select a trigger', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted selecting a trigger', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped selecting a trigger', icon: MinusCircle }, + }, + interrupt: undefined, + } + +const META_get_user_workflow: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Reading your workflow', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Reading your workflow', icon: WorkflowIcon }, + [ClientToolCallState.executing]: { text: 'Reading your workflow', icon: Loader2 }, + [ClientToolCallState.aborted]: { text: 'Aborted reading your workflow', icon: XCircle }, + [ClientToolCallState.success]: { text: 'Read your workflow', icon: WorkflowIcon }, + [ClientToolCallState.error]: { text: 'Failed to read your workflow', icon: X }, + [ClientToolCallState.rejected]: { text: 'Skipped reading your workflow', icon: XCircle }, + }, + getDynamicText: (params, state) => { + const workflowId = params?.workflowId || useWorkflowRegistry.getState().activeWorkflowId + if (workflowId) { + const workflowName = useWorkflowRegistry.getState().workflows[workflowId]?.name + if (workflowName) { + switch (state) { + case ClientToolCallState.success: + return `Read ${workflowName}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Reading ${workflowName}` + case ClientToolCallState.error: + return `Failed to read ${workflowName}` + case ClientToolCallState.aborted: + return `Aborted reading ${workflowName}` + case ClientToolCallState.rejected: + return `Skipped reading ${workflowName}` + } + } + } + return undefined + }, + } + +const META_get_workflow_console: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Fetching execution logs', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Fetching execution logs', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Fetched execution logs', icon: TerminalSquare }, + [ClientToolCallState.error]: { text: 'Failed to fetch execution logs', icon: XCircle }, + [ClientToolCallState.rejected]: { + text: 'Skipped fetching execution logs', + icon: MinusCircle, + }, + [ClientToolCallState.aborted]: { + text: 'Aborted fetching execution logs', + icon: MinusCircle, + }, + [ClientToolCallState.pending]: { text: 'Fetching execution logs', icon: Loader2 }, + }, + getDynamicText: (params, state) => { + const limit = params?.limit + if (limit && typeof limit === 'number') { + const logText = limit === 1 ? 'execution log' : 'execution logs' + + switch (state) { + case ClientToolCallState.success: + return `Fetched last ${limit} ${logText}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Fetching last ${limit} ${logText}` + case ClientToolCallState.error: + return `Failed to fetch last ${limit} ${logText}` + case ClientToolCallState.rejected: + return `Skipped fetching last ${limit} ${logText}` + case ClientToolCallState.aborted: + return `Aborted fetching last ${limit} ${logText}` + } + } + return undefined + }, + } + +const META_get_workflow_data: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Fetching workflow data', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Fetching workflow data', icon: Database }, + [ClientToolCallState.executing]: { text: 'Fetching workflow data', icon: Loader2 }, + [ClientToolCallState.aborted]: { text: 'Aborted fetching data', icon: XCircle }, + [ClientToolCallState.success]: { text: 'Retrieved workflow data', icon: Database }, + [ClientToolCallState.error]: { text: 'Failed to fetch data', icon: X }, + [ClientToolCallState.rejected]: { text: 'Skipped fetching data', icon: XCircle }, + }, + getDynamicText: (params, state) => { + const dataType = params?.data_type as WorkflowDataType | undefined + if (!dataType) return undefined + + const typeLabels: Record = { + global_variables: 'variables', + custom_tools: 'custom tools', + mcp_tools: 'MCP tools', + files: 'files', + } + + const label = typeLabels[dataType] || dataType + + switch (state) { + case ClientToolCallState.success: + return `Retrieved ${label}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + return `Fetching ${label}` + case ClientToolCallState.pending: + return `Fetch ${label}?` + case ClientToolCallState.error: + return `Failed to fetch ${label}` + case ClientToolCallState.aborted: + return `Aborted fetching ${label}` + case ClientToolCallState.rejected: + return `Skipped fetching ${label}` + } + return undefined + }, + } + +const META_get_workflow_from_name: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Reading workflow', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Reading workflow', icon: FileText }, + [ClientToolCallState.executing]: { text: 'Reading workflow', icon: Loader2 }, + [ClientToolCallState.aborted]: { text: 'Aborted reading workflow', icon: XCircle }, + [ClientToolCallState.success]: { text: 'Read workflow', icon: FileText }, + [ClientToolCallState.error]: { text: 'Failed to read workflow', icon: X }, + [ClientToolCallState.rejected]: { text: 'Skipped reading workflow', icon: XCircle }, + }, + getDynamicText: (params, state) => { + if (params?.workflow_name && typeof params.workflow_name === 'string') { + const workflowName = params.workflow_name + + switch (state) { + case ClientToolCallState.success: + return `Read ${workflowName}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Reading ${workflowName}` + case ClientToolCallState.error: + return `Failed to read ${workflowName}` + case ClientToolCallState.aborted: + return `Aborted reading ${workflowName}` + case ClientToolCallState.rejected: + return `Skipped reading ${workflowName}` + } + } + return undefined + }, + } + +const META_info: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Getting info', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Getting info', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Getting info', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Retrieved info', icon: Info }, + [ClientToolCallState.error]: { text: 'Failed to get info', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped info', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted info', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Getting info', + completedLabel: 'Info retrieved', + shouldCollapse: true, + outputArtifacts: [], + }, + }, + } + +const META_knowledge: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Managing knowledge', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Managing knowledge', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Managing knowledge', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Managed knowledge', icon: BookOpen }, + [ClientToolCallState.error]: { text: 'Failed to manage knowledge', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped knowledge', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted knowledge', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Managing knowledge', + completedLabel: 'Knowledge managed', + shouldCollapse: true, + outputArtifacts: [], + }, + }, + } + +const META_knowledge_base: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Accessing knowledge base', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Accessing knowledge base', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Accessing knowledge base', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Accessed knowledge base', icon: Database }, + [ClientToolCallState.error]: { text: 'Failed to access knowledge base', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted knowledge base access', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped knowledge base access', icon: MinusCircle }, + }, + getDynamicText: (params: Record, state: ClientToolCallState) => { + const operation = params?.operation as string | undefined + const name = params?.args?.name as string | undefined + + const opVerbs: Record = { + create: { + active: 'Creating knowledge base', + past: 'Created knowledge base', + pending: name ? `Create knowledge base "${name}"?` : 'Create knowledge base?', + }, + list: { active: 'Listing knowledge bases', past: 'Listed knowledge bases' }, + get: { active: 'Getting knowledge base', past: 'Retrieved knowledge base' }, + query: { active: 'Querying knowledge base', past: 'Queried knowledge base' }, + } + const defaultVerb: { active: string; past: string; pending?: string } = { + active: 'Accessing knowledge base', + past: 'Accessed knowledge base', + } + const verb = operation ? opVerbs[operation] || defaultVerb : defaultVerb + + if (state === ClientToolCallState.success) { + return verb.past + } + if (state === ClientToolCallState.pending && verb.pending) { + return verb.pending + } + if ( + state === ClientToolCallState.generating || + state === ClientToolCallState.pending || + state === ClientToolCallState.executing + ) { + return verb.active + } + return undefined + }, + } + +const META_list_user_workflows: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Listing your workflows', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Listing your workflows', icon: ListChecks }, + [ClientToolCallState.executing]: { text: 'Listing your workflows', icon: Loader2 }, + [ClientToolCallState.aborted]: { text: 'Aborted listing workflows', icon: XCircle }, + [ClientToolCallState.success]: { text: 'Listed your workflows', icon: ListChecks }, + [ClientToolCallState.error]: { text: 'Failed to list workflows', icon: X }, + [ClientToolCallState.rejected]: { text: 'Skipped listing workflows', icon: XCircle }, + }, + } + +const META_list_workspace_mcp_servers: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { + text: 'Getting MCP servers', + icon: Loader2, + }, + [ClientToolCallState.pending]: { text: 'Getting MCP servers', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Getting MCP servers', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Retrieved MCP servers', icon: Server }, + [ClientToolCallState.error]: { text: 'Failed to get MCP servers', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted getting MCP servers', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped getting MCP servers', icon: XCircle }, + }, + interrupt: undefined, + } + +const META_make_api_request: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Preparing API request', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Review API request', icon: Globe2 }, + [ClientToolCallState.executing]: { text: 'Executing API request', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Completed API request', icon: Globe2 }, + [ClientToolCallState.error]: { text: 'Failed to execute API request', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped API request', icon: MinusCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted API request', icon: XCircle }, + }, + interrupt: { + accept: { text: 'Execute', icon: Globe2 }, + reject: { text: 'Skip', icon: MinusCircle }, + }, + uiConfig: { + interrupt: { + accept: { text: 'Execute', icon: Globe2 }, + reject: { text: 'Skip', icon: MinusCircle }, + showAllowOnce: true, + showAllowAlways: true, + }, + paramsTable: { + columns: [ + { key: 'method', label: 'Method', width: '26%', editable: true, mono: true }, + { key: 'url', label: 'Endpoint', width: '74%', editable: true, mono: true }, + ], + extractRows: (params) => { + return [['request', (params.method || 'GET').toUpperCase(), params.url || '']] + }, + }, + }, + getDynamicText: (params, state) => { + if (params?.url && typeof params.url === 'string') { + const method = params.method || 'GET' + let url = params.url + + // Extract domain from URL for cleaner display + try { + const urlObj = new URL(url) + url = urlObj.hostname + urlObj.pathname + } catch { + // Use URL as-is if parsing fails + } + + switch (state) { + case ClientToolCallState.success: + return `${method} ${url} complete` + case ClientToolCallState.executing: + return `${method} ${url}` + case ClientToolCallState.generating: + return `Preparing ${method} ${url}` + case ClientToolCallState.pending: + return `Review ${method} ${url}` + case ClientToolCallState.error: + return `Failed ${method} ${url}` + case ClientToolCallState.rejected: + return `Skipped ${method} ${url}` + case ClientToolCallState.aborted: + return `Aborted ${method} ${url}` + } + } + return undefined + }, + } + +const META_manage_custom_tool: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { + text: 'Managing custom tool', + icon: Loader2, + }, + [ClientToolCallState.pending]: { text: 'Manage custom tool?', icon: Plus }, + [ClientToolCallState.executing]: { text: 'Managing custom tool', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Managed custom tool', icon: Check }, + [ClientToolCallState.error]: { text: 'Failed to manage custom tool', icon: X }, + [ClientToolCallState.aborted]: { + text: 'Aborted managing custom tool', + icon: XCircle, + }, + [ClientToolCallState.rejected]: { + text: 'Skipped managing custom tool', + icon: XCircle, + }, + }, + interrupt: { + accept: { text: 'Allow', icon: Check }, + reject: { text: 'Skip', icon: XCircle }, + }, + getDynamicText: (params, state) => { + const operation = params?.operation as 'add' | 'edit' | 'delete' | 'list' | undefined + + if (!operation) return undefined + + let toolName = params?.schema?.function?.name + if (!toolName && params?.toolId) { + try { + const tool = getCustomTool(params.toolId) + toolName = tool?.schema?.function?.name + } catch { + // Ignore errors accessing cache + } + } + + const getActionText = (verb: 'present' | 'past' | 'gerund') => { + switch (operation) { + case 'add': + return verb === 'present' ? 'Create' : verb === 'past' ? 'Created' : 'Creating' + case 'edit': + return verb === 'present' ? 'Edit' : verb === 'past' ? 'Edited' : 'Editing' + case 'delete': + return verb === 'present' ? 'Delete' : verb === 'past' ? 'Deleted' : 'Deleting' + case 'list': + return verb === 'present' ? 'List' : verb === 'past' ? 'Listed' : 'Listing' + default: + return verb === 'present' ? 'Manage' : verb === 'past' ? 'Managed' : 'Managing' + } + } + + // For add: only show tool name in past tense (success) + // For edit/delete: always show tool name + // For list: never show individual tool name, use plural + const shouldShowToolName = (currentState: ClientToolCallState) => { + if (operation === 'list') return false + if (operation === 'add') { + return currentState === ClientToolCallState.success + } + return true // edit and delete always show tool name + } + + const nameText = + operation === 'list' + ? ' custom tools' + : shouldShowToolName(state) && toolName + ? ` ${toolName}` + : ' custom tool' + + switch (state) { + case ClientToolCallState.success: + return `${getActionText('past')}${nameText}` + case ClientToolCallState.executing: + return `${getActionText('gerund')}${nameText}` + case ClientToolCallState.generating: + return `${getActionText('gerund')}${nameText}` + case ClientToolCallState.pending: + return `${getActionText('present')}${nameText}?` + case ClientToolCallState.error: + return `Failed to ${getActionText('present')?.toLowerCase()}${nameText}` + case ClientToolCallState.aborted: + return `Aborted ${getActionText('gerund')?.toLowerCase()}${nameText}` + case ClientToolCallState.rejected: + return `Skipped ${getActionText('gerund')?.toLowerCase()}${nameText}` + } + return undefined + }, + } + +const META_manage_mcp_tool: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { + text: 'Managing MCP tool', + icon: Loader2, + }, + [ClientToolCallState.pending]: { text: 'Manage MCP tool?', icon: Server }, + [ClientToolCallState.executing]: { text: 'Managing MCP tool', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Managed MCP tool', icon: Check }, + [ClientToolCallState.error]: { text: 'Failed to manage MCP tool', icon: X }, + [ClientToolCallState.aborted]: { + text: 'Aborted managing MCP tool', + icon: XCircle, + }, + [ClientToolCallState.rejected]: { + text: 'Skipped managing MCP tool', + icon: XCircle, + }, + }, + interrupt: { + accept: { text: 'Allow', icon: Check }, + reject: { text: 'Skip', icon: XCircle }, + }, + getDynamicText: (params, state) => { + const operation = params?.operation as 'add' | 'edit' | 'delete' | undefined + + if (!operation) return undefined + + const serverName = params?.config?.name || params?.serverName + + const getActionText = (verb: 'present' | 'past' | 'gerund') => { + switch (operation) { + case 'add': + return verb === 'present' ? 'Add' : verb === 'past' ? 'Added' : 'Adding' + case 'edit': + return verb === 'present' ? 'Edit' : verb === 'past' ? 'Edited' : 'Editing' + case 'delete': + return verb === 'present' ? 'Delete' : verb === 'past' ? 'Deleted' : 'Deleting' + } + } + + const shouldShowServerName = (currentState: ClientToolCallState) => { + if (operation === 'add') { + return currentState === ClientToolCallState.success + } + return true + } + + const nameText = shouldShowServerName(state) && serverName ? ` ${serverName}` : ' MCP tool' + + switch (state) { + case ClientToolCallState.success: + return `${getActionText('past')}${nameText}` + case ClientToolCallState.executing: + return `${getActionText('gerund')}${nameText}` + case ClientToolCallState.generating: + return `${getActionText('gerund')}${nameText}` + case ClientToolCallState.pending: + return `${getActionText('present')}${nameText}?` + case ClientToolCallState.error: + return `Failed to ${getActionText('present')?.toLowerCase()}${nameText}` + case ClientToolCallState.aborted: + return `Aborted ${getActionText('gerund')?.toLowerCase()}${nameText}` + case ClientToolCallState.rejected: + return `Skipped ${getActionText('gerund')?.toLowerCase()}${nameText}` + } + return undefined + }, + } + +const META_mark_todo_in_progress: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Marking todo in progress', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Marking todo in progress', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Marking todo in progress', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Marked todo in progress', icon: Loader2 }, + [ClientToolCallState.error]: { text: 'Failed to mark in progress', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted marking in progress', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped marking in progress', icon: MinusCircle }, + }, + } + +const META_navigate_ui: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { + text: 'Preparing to open', + icon: Loader2, + }, + [ClientToolCallState.pending]: { text: 'Open?', icon: Navigation }, + [ClientToolCallState.executing]: { text: 'Opening', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Opened', icon: Navigation }, + [ClientToolCallState.error]: { text: 'Failed to open', icon: X }, + [ClientToolCallState.aborted]: { + text: 'Aborted opening', + icon: XCircle, + }, + [ClientToolCallState.rejected]: { + text: 'Skipped opening', + icon: XCircle, + }, + }, + interrupt: { + accept: { text: 'Open', icon: Navigation }, + reject: { text: 'Skip', icon: XCircle }, + }, + getDynamicText: (params, state) => { + const destination = params?.destination as NavigationDestination | undefined + const workflowName = params?.workflowName + + const action = 'open' + const actionCapitalized = 'Open' + const actionPast = 'opened' + const actionIng = 'opening' + let target = '' + + if (destination === 'workflow' && workflowName) { + target = ` workflow "${workflowName}"` + } else if (destination === 'workflow') { + target = ' workflows' + } else if (destination === 'logs') { + target = ' logs' + } else if (destination === 'templates') { + target = ' templates' + } else if (destination === 'vector_db') { + target = ' vector database' + } else if (destination === 'settings') { + target = ' settings' + } + + const fullAction = `${action}${target}` + const fullActionCapitalized = `${actionCapitalized}${target}` + const fullActionPast = `${actionPast}${target}` + const fullActionIng = `${actionIng}${target}` + + switch (state) { + case ClientToolCallState.success: + return fullActionPast.charAt(0).toUpperCase() + fullActionPast.slice(1) + case ClientToolCallState.executing: + return fullActionIng.charAt(0).toUpperCase() + fullActionIng.slice(1) + case ClientToolCallState.generating: + return `Preparing to ${fullAction}` + case ClientToolCallState.pending: + return `${fullActionCapitalized}?` + case ClientToolCallState.error: + return `Failed to ${fullAction}` + case ClientToolCallState.aborted: + return `Aborted ${fullAction}` + case ClientToolCallState.rejected: + return `Skipped ${fullAction}` + } + return undefined + }, + } + +const META_oauth_request_access: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Requesting integration access', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Requesting integration access', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Requesting integration access', icon: Loader2 }, + [ClientToolCallState.rejected]: { text: 'Skipped integration access', icon: MinusCircle }, + [ClientToolCallState.success]: { text: 'Requested integration access', icon: CheckCircle }, + [ClientToolCallState.error]: { text: 'Failed to request integration access', icon: X }, + [ClientToolCallState.aborted]: { text: 'Aborted integration access request', icon: XCircle }, + }, + interrupt: { + accept: { text: 'Connect', icon: PlugZap }, + reject: { text: 'Skip', icon: MinusCircle }, + }, + getDynamicText: (params, state) => { + if (params.providerName) { + const name = params.providerName + switch (state) { + case ClientToolCallState.generating: + case ClientToolCallState.pending: + case ClientToolCallState.executing: + return `Requesting ${name} access` + case ClientToolCallState.rejected: + return `Skipped ${name} access` + case ClientToolCallState.success: + return `Requested ${name} access` + case ClientToolCallState.error: + return `Failed to request ${name} access` + case ClientToolCallState.aborted: + return `Aborted ${name} access request` + } + } + return undefined + }, + } + +const META_plan: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Planning', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Planning', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Planning', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Planned', icon: ListTodo }, + [ClientToolCallState.error]: { text: 'Failed to plan', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped plan', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted plan', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Planning', + completedLabel: 'Planned', + shouldCollapse: true, + outputArtifacts: ['plan'], + }, + }, + } + +const META_redeploy: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Redeploying workflow', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Redeploy workflow', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Redeploying workflow', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Redeployed workflow', icon: Rocket }, + [ClientToolCallState.error]: { text: 'Failed to redeploy workflow', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted redeploy', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped redeploy', icon: XCircle }, + }, + interrupt: undefined, + } + +const META_remember_debug: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Validating fix', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Validating fix', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Validating fix', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Validated fix', icon: CheckCircle2 }, + [ClientToolCallState.error]: { text: 'Failed to validate', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted validation', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped validation', icon: MinusCircle }, + }, + interrupt: undefined, + getDynamicText: (params, state) => { + const operation = params?.operation + + if (operation === 'add' || operation === 'edit') { + // For add/edit, show from problem or solution + const text = params?.problem || params?.solution + if (text && typeof text === 'string') { + switch (state) { + case ClientToolCallState.success: + return `Validated fix ${text}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Validating fix ${text}` + case ClientToolCallState.error: + return `Failed to validate fix ${text}` + case ClientToolCallState.aborted: + return `Aborted validating fix ${text}` + case ClientToolCallState.rejected: + return `Skipped validating fix ${text}` + } + } + } else if (operation === 'delete') { + // For delete, show from problem or solution (or id as fallback) + const text = params?.problem || params?.solution || params?.id + if (text && typeof text === 'string') { + switch (state) { + case ClientToolCallState.success: + return `Adjusted fix ${text}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Adjusting fix ${text}` + case ClientToolCallState.error: + return `Failed to adjust fix ${text}` + case ClientToolCallState.aborted: + return `Aborted adjusting fix ${text}` + case ClientToolCallState.rejected: + return `Skipped adjusting fix ${text}` + } + } + } + + return undefined + }, + } + +const META_research: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Researching', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Researching', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Researching', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Researched', icon: Search }, + [ClientToolCallState.error]: { text: 'Failed to research', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped research', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted research', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Researching', + completedLabel: 'Researched', + shouldCollapse: true, + outputArtifacts: [], + }, + }, + } + +const META_run_workflow: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Preparing to run your workflow', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Run this workflow?', icon: Play }, + [ClientToolCallState.executing]: { text: 'Running your workflow', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Executed workflow', icon: Play }, + [ClientToolCallState.error]: { text: 'Errored running workflow', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped workflow execution', icon: MinusCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted workflow execution', icon: MinusCircle }, + [ClientToolCallState.background]: { text: 'Running in background', icon: Play }, + }, + interrupt: { + accept: { text: 'Run', icon: Play }, + reject: { text: 'Skip', icon: MinusCircle }, + }, + uiConfig: { + isSpecial: true, + interrupt: { + accept: { text: 'Run', icon: Play }, + reject: { text: 'Skip', icon: MinusCircle }, + showAllowOnce: true, + showAllowAlways: true, + }, + secondaryAction: { + text: 'Move to Background', + title: 'Move to Background', + variant: 'tertiary', + showInStates: [ClientToolCallState.executing], + completionMessage: + 'The user has chosen to move the workflow execution to the background. Check back with them later to know when the workflow execution is complete', + targetState: ClientToolCallState.background, + }, + paramsTable: { + columns: [ + { key: 'input', label: 'Input', width: '36%' }, + { key: 'value', label: 'Value', width: '64%', editable: true, mono: true }, + ], + extractRows: (params) => { + let inputs = params.input || params.inputs || params.workflow_input + if (typeof inputs === 'string') { + try { + inputs = JSON.parse(inputs) + } catch { + inputs = {} + } + } + if (params.workflow_input && typeof params.workflow_input === 'object') { + inputs = params.workflow_input + } + if (!inputs || typeof inputs !== 'object') { + const { workflowId, workflow_input, ...rest } = params + inputs = rest + } + const safeInputs = inputs && typeof inputs === 'object' ? inputs : {} + return Object.entries(safeInputs).map(([key, value]) => [key, key, String(value)]) + }, + }, + }, + getDynamicText: (params, state) => { + const workflowId = params?.workflowId || useWorkflowRegistry.getState().activeWorkflowId + if (workflowId) { + const workflowName = useWorkflowRegistry.getState().workflows[workflowId]?.name + if (workflowName) { + switch (state) { + case ClientToolCallState.success: + return `Ran ${workflowName}` + case ClientToolCallState.executing: + return `Running ${workflowName}` + case ClientToolCallState.generating: + return `Preparing to run ${workflowName}` + case ClientToolCallState.pending: + return `Run ${workflowName}?` + case ClientToolCallState.error: + return `Failed to run ${workflowName}` + case ClientToolCallState.rejected: + return `Skipped running ${workflowName}` + case ClientToolCallState.aborted: + return `Aborted running ${workflowName}` + case ClientToolCallState.background: + return `Running ${workflowName} in background` + } + } + } + return undefined + }, + } + +const META_scrape_page: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Scraping page', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Scraping page', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Scraping page', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Scraped page', icon: Globe }, + [ClientToolCallState.error]: { text: 'Failed to scrape page', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted scraping page', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped scraping page', icon: MinusCircle }, + }, + interrupt: undefined, + getDynamicText: (params, state) => { + if (params?.url && typeof params.url === 'string') { + const url = params.url + + switch (state) { + case ClientToolCallState.success: + return `Scraped ${url}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Scraping ${url}` + case ClientToolCallState.error: + return `Failed to scrape ${url}` + case ClientToolCallState.aborted: + return `Aborted scraping ${url}` + case ClientToolCallState.rejected: + return `Skipped scraping ${url}` + } + } + return undefined + }, + } + +const META_search_documentation: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Searching documentation', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Searching documentation', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Searching documentation', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Completed documentation search', icon: BookOpen }, + [ClientToolCallState.error]: { text: 'Failed to search docs', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted documentation search', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped documentation search', icon: MinusCircle }, + }, + getDynamicText: (params, state) => { + if (params?.query && typeof params.query === 'string') { + const query = params.query + + switch (state) { + case ClientToolCallState.success: + return `Searched docs for ${query}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Searching docs for ${query}` + case ClientToolCallState.error: + return `Failed to search docs for ${query}` + case ClientToolCallState.aborted: + return `Aborted searching docs for ${query}` + case ClientToolCallState.rejected: + return `Skipped searching docs for ${query}` + } + } + return undefined + }, + } + +const META_search_errors: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Debugging', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Debugging', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Debugging', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Debugged', icon: Bug }, + [ClientToolCallState.error]: { text: 'Failed to debug', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted debugging', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped debugging', icon: MinusCircle }, + }, + interrupt: undefined, + getDynamicText: (params, state) => { + if (params?.query && typeof params.query === 'string') { + const query = params.query + + switch (state) { + case ClientToolCallState.success: + return `Debugged ${query}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Debugging ${query}` + case ClientToolCallState.error: + return `Failed to debug ${query}` + case ClientToolCallState.aborted: + return `Aborted debugging ${query}` + case ClientToolCallState.rejected: + return `Skipped debugging ${query}` + } + } + return undefined + }, + } + +const META_search_library_docs: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Reading docs', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Reading docs', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Reading docs', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Read docs', icon: BookOpen }, + [ClientToolCallState.error]: { text: 'Failed to read docs', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted reading docs', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped reading docs', icon: MinusCircle }, + }, + getDynamicText: (params, state) => { + const libraryName = params?.library_name + if (libraryName && typeof libraryName === 'string') { + switch (state) { + case ClientToolCallState.success: + return `Read ${libraryName} docs` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Reading ${libraryName} docs` + case ClientToolCallState.error: + return `Failed to read ${libraryName} docs` + case ClientToolCallState.aborted: + return `Aborted reading ${libraryName} docs` + case ClientToolCallState.rejected: + return `Skipped reading ${libraryName} docs` + } + } + return undefined + }, + } + +const META_search_online: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Searching online', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Searching online', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Searching online', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Completed online search', icon: Globe }, + [ClientToolCallState.error]: { text: 'Failed to search online', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped online search', icon: MinusCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted online search', icon: XCircle }, + }, + interrupt: undefined, + getDynamicText: (params, state) => { + if (params?.query && typeof params.query === 'string') { + const query = params.query + + switch (state) { + case ClientToolCallState.success: + return `Searched online for ${query}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Searching online for ${query}` + case ClientToolCallState.error: + return `Failed to search online for ${query}` + case ClientToolCallState.aborted: + return `Aborted searching online for ${query}` + case ClientToolCallState.rejected: + return `Skipped searching online for ${query}` + } + } + return undefined + }, + } + +const META_search_patterns: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Searching workflow patterns', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Searching workflow patterns', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Searching workflow patterns', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Found workflow patterns', icon: Search }, + [ClientToolCallState.error]: { text: 'Failed to search patterns', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted pattern search', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped pattern search', icon: MinusCircle }, + }, + interrupt: undefined, + getDynamicText: (params, state) => { + if (params?.queries && Array.isArray(params.queries) && params.queries.length > 0) { + const firstQuery = String(params.queries[0]) + + switch (state) { + case ClientToolCallState.success: + return `Searched ${firstQuery}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Searching ${firstQuery}` + case ClientToolCallState.error: + return `Failed to search ${firstQuery}` + case ClientToolCallState.aborted: + return `Aborted searching ${firstQuery}` + case ClientToolCallState.rejected: + return `Skipped searching ${firstQuery}` + } + } + return undefined + }, + } + +const META_set_environment_variables: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { + text: 'Preparing to set environment variables', + icon: Loader2, + }, + [ClientToolCallState.pending]: { text: 'Set environment variables?', icon: Settings2 }, + [ClientToolCallState.executing]: { text: 'Setting environment variables', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Set environment variables', icon: Settings2 }, + [ClientToolCallState.error]: { text: 'Failed to set environment variables', icon: X }, + [ClientToolCallState.aborted]: { + text: 'Aborted setting environment variables', + icon: XCircle, + }, + [ClientToolCallState.rejected]: { + text: 'Skipped setting environment variables', + icon: XCircle, + }, + }, + interrupt: { + accept: { text: 'Apply', icon: Settings2 }, + reject: { text: 'Skip', icon: XCircle }, + }, + uiConfig: { + alwaysExpanded: true, + interrupt: { + accept: { text: 'Apply', icon: Settings2 }, + reject: { text: 'Skip', icon: XCircle }, + showAllowOnce: true, + showAllowAlways: true, + }, + paramsTable: { + columns: [ + { key: 'name', label: 'Variable', width: '36%', editable: true }, + { key: 'value', label: 'Value', width: '64%', editable: true, mono: true }, + ], + extractRows: (params) => { + const variables = params.variables || {} + const entries = Array.isArray(variables) + ? variables.map((v: any, i: number) => [String(i), v.name || `var_${i}`, v.value || '']) + : Object.entries(variables).map(([key, val]) => { + if (typeof val === 'object' && val !== null && 'value' in (val as any)) { + return [key, key, (val as any).value] + } + return [key, key, val] + }) + return entries as Array<[string, ...any[]]> + }, + }, + }, + getDynamicText: (params, state) => { + if (params?.variables && typeof params.variables === 'object') { + const count = Object.keys(params.variables).length + const varText = count === 1 ? 'variable' : 'variables' + + switch (state) { + case ClientToolCallState.success: + return `Set ${count} ${varText}` + case ClientToolCallState.executing: + return `Setting ${count} ${varText}` + case ClientToolCallState.generating: + return `Preparing to set ${count} ${varText}` + case ClientToolCallState.pending: + return `Set ${count} ${varText}?` + case ClientToolCallState.error: + return `Failed to set ${count} ${varText}` + case ClientToolCallState.aborted: + return `Aborted setting ${count} ${varText}` + case ClientToolCallState.rejected: + return `Skipped setting ${count} ${varText}` + } + } + return undefined + }, + } + +const META_set_global_workflow_variables: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { + text: 'Preparing to set workflow variables', + icon: Loader2, + }, + [ClientToolCallState.pending]: { text: 'Set workflow variables?', icon: Settings2 }, + [ClientToolCallState.executing]: { text: 'Setting workflow variables', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Updated workflow variables', icon: Settings2 }, + [ClientToolCallState.error]: { text: 'Failed to set workflow variables', icon: X }, + [ClientToolCallState.aborted]: { text: 'Aborted setting variables', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped setting variables', icon: XCircle }, + }, + interrupt: { + accept: { text: 'Apply', icon: Settings2 }, + reject: { text: 'Skip', icon: XCircle }, + }, + uiConfig: { + interrupt: { + accept: { text: 'Apply', icon: Settings2 }, + reject: { text: 'Skip', icon: XCircle }, + showAllowOnce: true, + showAllowAlways: true, + }, + paramsTable: { + columns: [ + { key: 'name', label: 'Name', width: '40%', editable: true, mono: true }, + { key: 'value', label: 'Value', width: '60%', editable: true, mono: true }, + ], + extractRows: (params) => { + const operations = params.operations || [] + return operations.map((op: any, idx: number) => [ + String(idx), + op.name || '', + String(op.value ?? ''), + ]) + }, + }, + }, + getDynamicText: (params, state) => { + if (params?.operations && Array.isArray(params.operations)) { + const varNames = params.operations + .slice(0, 2) + .map((op: any) => op.name) + .filter(Boolean) + + if (varNames.length > 0) { + const varList = varNames.join(', ') + const more = params.operations.length > 2 ? '...' : '' + const displayText = `${varList}${more}` + + switch (state) { + case ClientToolCallState.success: + return `Set ${displayText}` + case ClientToolCallState.executing: + return `Setting ${displayText}` + case ClientToolCallState.generating: + return `Preparing to set ${displayText}` + case ClientToolCallState.pending: + return `Set ${displayText}?` + case ClientToolCallState.error: + return `Failed to set ${displayText}` + case ClientToolCallState.aborted: + return `Aborted setting ${displayText}` + case ClientToolCallState.rejected: + return `Skipped setting ${displayText}` + } + } + } + return undefined + }, + } + +const META_sleep: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Preparing to sleep', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Sleeping', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Sleeping', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Finished sleeping', icon: Moon }, + [ClientToolCallState.error]: { text: 'Interrupted sleep', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped sleep', icon: MinusCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted sleep', icon: MinusCircle }, + [ClientToolCallState.background]: { text: 'Resumed', icon: Moon }, + }, + uiConfig: { + secondaryAction: { + text: 'Wake', + title: 'Wake', + variant: 'tertiary', + showInStates: [ClientToolCallState.executing], + targetState: ClientToolCallState.background, + }, + }, + // No interrupt - auto-execute immediately + getDynamicText: (params, state) => { + const seconds = params?.seconds + if (typeof seconds === 'number' && seconds > 0) { + const displayTime = formatDuration(seconds) + switch (state) { + case ClientToolCallState.success: + return `Slept for ${displayTime}` + case ClientToolCallState.executing: + case ClientToolCallState.pending: + return `Sleeping for ${displayTime}` + case ClientToolCallState.generating: + return `Preparing to sleep for ${displayTime}` + case ClientToolCallState.error: + return `Failed to sleep for ${displayTime}` + case ClientToolCallState.rejected: + return `Skipped sleeping for ${displayTime}` + case ClientToolCallState.aborted: + return `Aborted sleeping for ${displayTime}` + case ClientToolCallState.background: { + // Calculate elapsed time from when sleep started + const elapsedSeconds = params?._elapsedSeconds + if (typeof elapsedSeconds === 'number' && elapsedSeconds > 0) { + return `Resumed after ${formatDuration(Math.round(elapsedSeconds))}` + } + return 'Resumed early' + } + } + } + return undefined + }, + } + +const META_summarize_conversation: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Summarizing conversation', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Summarizing conversation', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Summarizing conversation', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Summarized conversation', icon: PencilLine }, + [ClientToolCallState.error]: { text: 'Failed to summarize conversation', icon: XCircle }, + [ClientToolCallState.aborted]: { + text: 'Aborted summarizing conversation', + icon: MinusCircle, + }, + [ClientToolCallState.rejected]: { + text: 'Skipped summarizing conversation', + icon: MinusCircle, + }, + }, + interrupt: undefined, + } + +const META_superagent: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Superagent working', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Superagent working', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Superagent working', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Superagent completed', icon: Sparkles }, + [ClientToolCallState.error]: { text: 'Superagent failed', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Superagent skipped', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Superagent aborted', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Superagent working', + completedLabel: 'Superagent completed', + shouldCollapse: true, + outputArtifacts: [], + }, + }, + } + +const META_test: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Testing', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Testing', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Testing', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Tested', icon: FlaskConical }, + [ClientToolCallState.error]: { text: 'Failed to test', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped test', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted test', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Testing', + completedLabel: 'Tested', + shouldCollapse: true, + outputArtifacts: [], + }, + }, + } + +const META_tour: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Touring', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Touring', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Touring', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Completed tour', icon: Compass }, + [ClientToolCallState.error]: { text: 'Failed tour', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped tour', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted tour', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Touring', + completedLabel: 'Tour complete', + shouldCollapse: true, + outputArtifacts: [], + }, + }, + } + +const META_workflow: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Managing workflow', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Managing workflow', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Managing workflow', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Managed workflow', icon: GitBranch }, + [ClientToolCallState.error]: { text: 'Failed to manage workflow', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped workflow', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted workflow', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Managing workflow', + completedLabel: 'Workflow managed', + shouldCollapse: true, + outputArtifacts: [], + }, + }, + } + +const TOOL_METADATA_BY_ID: Record = { + 'auth': META_auth, + 'check_deployment_status': META_check_deployment_status, + 'checkoff_todo': META_checkoff_todo, + 'crawl_website': META_crawl_website, + 'create_workspace_mcp_server': META_create_workspace_mcp_server, + 'custom_tool': META_custom_tool, + 'debug': META_debug, + 'deploy': META_deploy, + 'deploy_api': META_deploy_api, + 'deploy_chat': META_deploy_chat, + 'deploy_mcp': META_deploy_mcp, + 'edit': META_edit, + 'edit_workflow': META_edit_workflow, + 'evaluate': META_evaluate, + 'get_block_config': META_get_block_config, + 'get_block_options': META_get_block_options, + 'get_block_outputs': META_get_block_outputs, + 'get_block_upstream_references': META_get_block_upstream_references, + 'get_blocks_and_tools': META_get_blocks_and_tools, + 'get_blocks_metadata': META_get_blocks_metadata, + 'get_credentials': META_get_credentials, + 'get_examples_rag': META_get_examples_rag, + 'get_operations_examples': META_get_operations_examples, + 'get_page_contents': META_get_page_contents, + 'get_trigger_blocks': META_get_trigger_blocks, + 'get_trigger_examples': META_get_trigger_examples, + 'get_user_workflow': META_get_user_workflow, + 'get_workflow_console': META_get_workflow_console, + 'get_workflow_data': META_get_workflow_data, + 'get_workflow_from_name': META_get_workflow_from_name, + 'info': META_info, + 'knowledge': META_knowledge, + 'knowledge_base': META_knowledge_base, + 'list_user_workflows': META_list_user_workflows, + 'list_workspace_mcp_servers': META_list_workspace_mcp_servers, + 'make_api_request': META_make_api_request, + 'manage_custom_tool': META_manage_custom_tool, + 'manage_mcp_tool': META_manage_mcp_tool, + 'mark_todo_in_progress': META_mark_todo_in_progress, + 'navigate_ui': META_navigate_ui, + 'oauth_request_access': META_oauth_request_access, + 'plan': META_plan, + 'redeploy': META_redeploy, + 'remember_debug': META_remember_debug, + 'research': META_research, + 'run_workflow': META_run_workflow, + 'scrape_page': META_scrape_page, + 'search_documentation': META_search_documentation, + 'search_errors': META_search_errors, + 'search_library_docs': META_search_library_docs, + 'search_online': META_search_online, + 'search_patterns': META_search_patterns, + 'set_environment_variables': META_set_environment_variables, + 'set_global_workflow_variables': META_set_global_workflow_variables, + 'sleep': META_sleep, + 'summarize_conversation': META_summarize_conversation, + 'superagent': META_superagent, + 'test': META_test, + 'tour': META_tour, + 'workflow': META_workflow, +} + +export const TOOL_DISPLAY_REGISTRY: Record = Object.fromEntries( + Object.entries(TOOL_METADATA_BY_ID).map(([toolName, metadata]) => [ + toolName, + toToolDisplayEntry(metadata), + ]) +) diff --git a/apps/sim/lib/copilot/tools/client/types.ts b/apps/sim/lib/copilot/tools/client/types.ts deleted file mode 100644 index 0f8ded86df..0000000000 --- a/apps/sim/lib/copilot/tools/client/types.ts +++ /dev/null @@ -1,33 +0,0 @@ -import type { BaseClientToolMetadata } from '@/lib/copilot/tools/client/base-tool' -import { ClientToolCallState } from '@/lib/copilot/tools/client/base-tool' - -export interface ToolExecutionContext { - toolCallId: string - toolName: string - // Logging only; tools must not mutate store state directly - log: ( - level: 'debug' | 'info' | 'warn' | 'error', - message: string, - extra?: Record - ) => void -} - -export interface ToolRunResult { - status: number - message?: any - data?: any -} - -export interface ClientToolDefinition { - name: string - metadata?: BaseClientToolMetadata - // Return true if this tool requires user confirmation before execution - hasInterrupt?: boolean | ((args?: Args) => boolean) - // Main execution entry point. Returns a result for the store to handle. - execute: (ctx: ToolExecutionContext, args?: Args) => Promise - // Optional accept/reject handlers for interrupt flows - accept?: (ctx: ToolExecutionContext, args?: Args) => Promise - reject?: (ctx: ToolExecutionContext, args?: Args) => Promise -} - -export { ClientToolCallState } diff --git a/apps/sim/lib/copilot/tools/client/ui-config.ts b/apps/sim/lib/copilot/tools/client/ui-config.ts deleted file mode 100644 index 6fac1645c7..0000000000 --- a/apps/sim/lib/copilot/tools/client/ui-config.ts +++ /dev/null @@ -1,238 +0,0 @@ -/** - * UI Configuration Types for Copilot Tools - * - * This module defines the configuration interfaces that control how tools - * are rendered in the tool-call component. All UI behavior should be defined - * here rather than hardcoded in the rendering component. - */ -import type { LucideIcon } from 'lucide-react' -import type { ClientToolCallState } from './base-tool' - -/** - * Configuration for a params table column - */ -export interface ParamsTableColumn { - /** Key to extract from params */ - key: string - /** Display label for the column header */ - label: string - /** Width as percentage or CSS value */ - width?: string - /** Whether values in this column are editable */ - editable?: boolean - /** Whether to use monospace font */ - mono?: boolean - /** Whether to mask the value (for passwords) */ - masked?: boolean -} - -/** - * Configuration for params table rendering - */ -export interface ParamsTableConfig { - /** Column definitions */ - columns: ParamsTableColumn[] - /** - * Extract rows from tool params. - * Returns array of [key, ...cellValues] for each row. - */ - extractRows: (params: Record) => Array<[string, ...any[]]> - /** - * Optional: Update params when a cell is edited. - * Returns the updated params object. - */ - updateCell?: ( - params: Record, - rowKey: string, - columnKey: string, - newValue: any - ) => Record -} - -/** - * Configuration for secondary action button (like "Move to Background") - */ -export interface SecondaryActionConfig { - /** Button text */ - text: string - /** Button title/tooltip */ - title?: string - /** Button variant */ - variant?: 'tertiary' | 'default' | 'outline' - /** States in which to show this button */ - showInStates: ClientToolCallState[] - /** - * Message to send when the action is triggered. - * Used by markToolComplete. - */ - completionMessage?: string - /** - * Target state after action. - * If not provided, defaults to 'background'. - */ - targetState?: ClientToolCallState -} - -/** - * Configuration for subagent tools (tools that spawn subagents) - */ -export interface SubagentConfig { - /** Label shown while streaming (e.g., "Planning", "Editing") */ - streamingLabel: string - /** Label shown when complete (e.g., "Planned", "Edited") */ - completedLabel: string - /** - * Whether the content should collapse when streaming ends. - * Default: true - */ - shouldCollapse?: boolean - /** - * Output artifacts that should NOT be collapsed. - * These are rendered outside the collapsible content. - * Examples: 'plan' for PlanSteps, 'options' for OptionsSelector - */ - outputArtifacts?: Array<'plan' | 'options' | 'edit_summary'> - /** - * Whether this subagent renders its own specialized content - * and the thinking text should be minimal or hidden. - * Used for tools like 'edit' where we show WorkflowEditSummary instead. - */ - hideThinkingText?: boolean -} - -/** - * Interrupt button configuration - */ -export interface InterruptButtonConfig { - text: string - icon: LucideIcon -} - -/** - * Configuration for interrupt behavior (Run/Skip buttons) - */ -export interface InterruptConfig { - /** Accept button config */ - accept: InterruptButtonConfig - /** Reject button config */ - reject: InterruptButtonConfig - /** - * Whether to show "Allow Once" button (default accept behavior). - * Default: true - */ - showAllowOnce?: boolean - /** - * Whether to show "Allow Always" button (auto-approve this tool in future). - * Default: true for most tools - */ - showAllowAlways?: boolean -} - -/** - * Complete UI configuration for a tool - */ -export interface ToolUIConfig { - /** - * Whether this is a "special" tool that gets gradient styling. - * Used for workflow operation tools like edit_workflow, build_workflow, etc. - */ - isSpecial?: boolean - - /** - * Interrupt configuration for tools that require user confirmation. - * If not provided, tool auto-executes. - */ - interrupt?: InterruptConfig - - /** - * Secondary action button (like "Move to Background" for run_workflow) - */ - secondaryAction?: SecondaryActionConfig - - /** - * Configuration for rendering params as a table. - * If provided, tool will show an expandable/inline table. - */ - paramsTable?: ParamsTableConfig - - /** - * Subagent configuration for tools that spawn subagents. - * If provided, tool is treated as a subagent tool. - */ - subagent?: SubagentConfig - - /** - * Whether this tool should always show params expanded (not collapsible). - * Used for tools like set_environment_variables that always show their table. - */ - alwaysExpanded?: boolean - - /** - * Custom component type for special rendering. - * The tool-call component will use this to render specialized content. - */ - customRenderer?: 'code' | 'edit_summary' | 'none' -} - -/** - * Registry of tool UI configurations. - * Tools can register their UI config here for the tool-call component to use. - */ -const toolUIConfigs: Record = {} - -/** - * Register a tool's UI configuration - */ -export function registerToolUIConfig(toolName: string, config: ToolUIConfig): void { - toolUIConfigs[toolName] = config -} - -/** - * Get a tool's UI configuration - */ -export function getToolUIConfig(toolName: string): ToolUIConfig | undefined { - return toolUIConfigs[toolName] -} - -/** - * Check if a tool is a subagent tool - */ -export function isSubagentTool(toolName: string): boolean { - return !!toolUIConfigs[toolName]?.subagent -} - -/** - * Check if a tool is a "special" tool (gets gradient styling) - */ -export function isSpecialTool(toolName: string): boolean { - return !!toolUIConfigs[toolName]?.isSpecial -} - -/** - * Check if a tool has interrupt (requires user confirmation) - */ -export function hasInterrupt(toolName: string): boolean { - return !!toolUIConfigs[toolName]?.interrupt -} - -/** - * Get subagent labels for a tool - */ -export function getSubagentLabels( - toolName: string, - isStreaming: boolean -): { streaming: string; completed: string } | undefined { - const config = toolUIConfigs[toolName]?.subagent - if (!config) return undefined - return { - streaming: config.streamingLabel, - completed: config.completedLabel, - } -} - -/** - * Get all registered tool UI configs (for debugging) - */ -export function getAllToolUIConfigs(): Record { - return { ...toolUIConfigs } -} diff --git a/apps/sim/lib/copilot/tools/client/user/get-credentials.ts b/apps/sim/lib/copilot/tools/client/user/get-credentials.ts deleted file mode 100644 index 0623693c47..0000000000 --- a/apps/sim/lib/copilot/tools/client/user/get-credentials.ts +++ /dev/null @@ -1,41 +0,0 @@ -import { Key, Loader2, MinusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -export class GetCredentialsClientTool extends BaseClientTool { - static readonly id = 'get_credentials' - - constructor(toolCallId: string) { - super(toolCallId, GetCredentialsClientTool.id, GetCredentialsClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Fetching connected integrations', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Fetching connected integrations', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Fetching connected integrations', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Fetched connected integrations', icon: Key }, - [ClientToolCallState.error]: { - text: 'Failed to fetch connected integrations', - icon: XCircle, - }, - [ClientToolCallState.aborted]: { - text: 'Aborted fetching connected integrations', - icon: MinusCircle, - }, - [ClientToolCallState.rejected]: { - text: 'Skipped fetching connected integrations', - icon: MinusCircle, - }, - }, - } - - async execute(): Promise { - // Tool execution is handled server-side by the orchestrator. - // Client tool classes are retained for UI display configuration only. - this.setState(ClientToolCallState.success) - } -} diff --git a/apps/sim/lib/copilot/tools/client/user/set-environment-variables.ts b/apps/sim/lib/copilot/tools/client/user/set-environment-variables.ts deleted file mode 100644 index 415987c8e1..0000000000 --- a/apps/sim/lib/copilot/tools/client/user/set-environment-variables.ts +++ /dev/null @@ -1,126 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Loader2, Settings2, X, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' -import { ExecuteResponseSuccessSchema } from '@/lib/copilot/tools/shared/schemas' -import { useEnvironmentStore } from '@/stores/settings/environment' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -interface SetEnvArgs { - variables: Record - workflowId?: string -} - -export class SetEnvironmentVariablesClientTool extends BaseClientTool { - static readonly id = 'set_environment_variables' - - constructor(toolCallId: string) { - super( - toolCallId, - SetEnvironmentVariablesClientTool.id, - SetEnvironmentVariablesClientTool.metadata - ) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { - text: 'Preparing to set environment variables', - icon: Loader2, - }, - [ClientToolCallState.pending]: { text: 'Set environment variables?', icon: Settings2 }, - [ClientToolCallState.executing]: { text: 'Setting environment variables', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Set environment variables', icon: Settings2 }, - [ClientToolCallState.error]: { text: 'Failed to set environment variables', icon: X }, - [ClientToolCallState.aborted]: { - text: 'Aborted setting environment variables', - icon: XCircle, - }, - [ClientToolCallState.rejected]: { - text: 'Skipped setting environment variables', - icon: XCircle, - }, - }, - interrupt: { - accept: { text: 'Apply', icon: Settings2 }, - reject: { text: 'Skip', icon: XCircle }, - }, - uiConfig: { - alwaysExpanded: true, - interrupt: { - accept: { text: 'Apply', icon: Settings2 }, - reject: { text: 'Skip', icon: XCircle }, - showAllowOnce: true, - showAllowAlways: true, - }, - paramsTable: { - columns: [ - { key: 'name', label: 'Variable', width: '36%', editable: true }, - { key: 'value', label: 'Value', width: '64%', editable: true, mono: true }, - ], - extractRows: (params) => { - const variables = params.variables || {} - const entries = Array.isArray(variables) - ? variables.map((v: any, i: number) => [String(i), v.name || `var_${i}`, v.value || '']) - : Object.entries(variables).map(([key, val]) => { - if (typeof val === 'object' && val !== null && 'value' in (val as any)) { - return [key, key, (val as any).value] - } - return [key, key, val] - }) - return entries as Array<[string, ...any[]]> - }, - }, - }, - getDynamicText: (params, state) => { - if (params?.variables && typeof params.variables === 'object') { - const count = Object.keys(params.variables).length - const varText = count === 1 ? 'variable' : 'variables' - - switch (state) { - case ClientToolCallState.success: - return `Set ${count} ${varText}` - case ClientToolCallState.executing: - return `Setting ${count} ${varText}` - case ClientToolCallState.generating: - return `Preparing to set ${count} ${varText}` - case ClientToolCallState.pending: - return `Set ${count} ${varText}?` - case ClientToolCallState.error: - return `Failed to set ${count} ${varText}` - case ClientToolCallState.aborted: - return `Aborted setting ${count} ${varText}` - case ClientToolCallState.rejected: - return `Skipped setting ${count} ${varText}` - } - } - return undefined - }, - } - - async handleReject(): Promise { - await super.handleReject() - this.setState(ClientToolCallState.rejected) - } - - async handleAccept(_args?: SetEnvArgs): Promise { - // Tool execution is handled server-side by the orchestrator. - this.setState(ClientToolCallState.executing) - } - - async execute(): Promise { - // Tool execution is handled server-side by the orchestrator. - // Client tool classes are retained for UI display configuration only. - this.setState(ClientToolCallState.success) - } -} - -// Register UI config at module load -registerToolUIConfig( - SetEnvironmentVariablesClientTool.id, - SetEnvironmentVariablesClientTool.metadata.uiConfig! -) diff --git a/apps/sim/lib/copilot/tools/client/workflow/block-output-utils.ts b/apps/sim/lib/copilot/tools/client/workflow/block-output-utils.ts deleted file mode 100644 index 4916cb7700..0000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/block-output-utils.ts +++ /dev/null @@ -1,142 +0,0 @@ -import { - extractFieldsFromSchema, - parseResponseFormatSafely, -} from '@/lib/core/utils/response-format' -import { getBlockOutputPaths } from '@/lib/workflows/blocks/block-outputs' -import { getBlock } from '@/blocks' -import { normalizeName } from '@/executor/constants' -import { useVariablesStore } from '@/stores/panel/variables/store' -import type { Variable } from '@/stores/panel/variables/types' -import { useSubBlockStore } from '@/stores/workflows/subblock/store' -import type { BlockState, Loop, Parallel } from '@/stores/workflows/workflow/types' - -export interface WorkflowContext { - workflowId: string - blocks: Record - loops: Record - parallels: Record - subBlockValues: Record> -} - -export interface VariableOutput { - id: string - name: string - type: string - tag: string -} - -export function getWorkflowSubBlockValues(workflowId: string): Record> { - const subBlockStore = useSubBlockStore.getState() - return subBlockStore.workflowValues[workflowId] ?? {} -} - -export function getMergedSubBlocks( - blocks: Record, - subBlockValues: Record>, - targetBlockId: string -): Record { - const base = blocks[targetBlockId]?.subBlocks || {} - const live = subBlockValues?.[targetBlockId] || {} - const merged: Record = { ...base } - for (const [subId, liveVal] of Object.entries(live)) { - merged[subId] = { ...(base[subId] || {}), value: liveVal } - } - return merged -} - -export function getSubBlockValue( - blocks: Record, - subBlockValues: Record>, - targetBlockId: string, - subBlockId: string -): any { - const live = subBlockValues?.[targetBlockId]?.[subBlockId] - if (live !== undefined) return live - return blocks[targetBlockId]?.subBlocks?.[subBlockId]?.value -} - -export function getWorkflowVariables(workflowId: string): VariableOutput[] { - const getVariablesByWorkflowId = useVariablesStore.getState().getVariablesByWorkflowId - const workflowVariables = getVariablesByWorkflowId(workflowId) - const validVariables = workflowVariables.filter( - (variable: Variable) => variable.name.trim() !== '' - ) - return validVariables.map((variable: Variable) => ({ - id: variable.id, - name: variable.name, - type: variable.type, - tag: `variable.${normalizeName(variable.name)}`, - })) -} - -export function getSubflowInsidePaths( - blockType: 'loop' | 'parallel', - blockId: string, - loops: Record, - parallels: Record -): string[] { - const paths = ['index'] - if (blockType === 'loop') { - const loopType = loops[blockId]?.loopType || 'for' - if (loopType === 'forEach') { - paths.push('currentItem', 'items') - } - } else { - const parallelType = parallels[blockId]?.parallelType || 'count' - if (parallelType === 'collection') { - paths.push('currentItem', 'items') - } - } - return paths -} - -export function computeBlockOutputPaths(block: BlockState, ctx: WorkflowContext): string[] { - const { blocks, loops, parallels, subBlockValues } = ctx - const blockConfig = getBlock(block.type) - const mergedSubBlocks = getMergedSubBlocks(blocks, subBlockValues, block.id) - - if (block.type === 'loop' || block.type === 'parallel') { - const insidePaths = getSubflowInsidePaths(block.type, block.id, loops, parallels) - return ['results', ...insidePaths] - } - - if (block.type === 'evaluator') { - const metricsValue = getSubBlockValue(blocks, subBlockValues, block.id, 'metrics') - if (metricsValue && Array.isArray(metricsValue) && metricsValue.length > 0) { - const validMetrics = metricsValue.filter((metric: { name?: string }) => metric?.name) - return validMetrics.map((metric: { name: string }) => metric.name.toLowerCase()) - } - return getBlockOutputPaths(block.type, mergedSubBlocks) - } - - if (block.type === 'variables') { - const variablesValue = getSubBlockValue(blocks, subBlockValues, block.id, 'variables') - if (variablesValue && Array.isArray(variablesValue) && variablesValue.length > 0) { - const validAssignments = variablesValue.filter((assignment: { variableName?: string }) => - assignment?.variableName?.trim() - ) - return validAssignments.map((assignment: { variableName: string }) => - assignment.variableName.trim() - ) - } - return [] - } - - if (blockConfig) { - const responseFormatValue = mergedSubBlocks?.responseFormat?.value - const responseFormat = parseResponseFormatSafely(responseFormatValue, block.id) - if (responseFormat) { - const schemaFields = extractFieldsFromSchema(responseFormat) - if (schemaFields.length > 0) { - return schemaFields.map((field) => field.name) - } - } - } - - return getBlockOutputPaths(block.type, mergedSubBlocks, block.triggerMode) -} - -export function formatOutputsWithPrefix(paths: string[], blockName: string): string[] { - const normalizedName = normalizeName(blockName) - return paths.map((path) => `${normalizedName}.${path}`) -} diff --git a/apps/sim/lib/copilot/tools/client/workflow/check-deployment-status.ts b/apps/sim/lib/copilot/tools/client/workflow/check-deployment-status.ts deleted file mode 100644 index a0d3de72e4..0000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/check-deployment-status.ts +++ /dev/null @@ -1,215 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Loader2, Rocket, X, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -interface CheckDeploymentStatusArgs { - workflowId?: string -} - -interface ApiDeploymentDetails { - isDeployed: boolean - deployedAt: string | null - endpoint: string | null - apiKey: string | null - needsRedeployment: boolean -} - -interface ChatDeploymentDetails { - isDeployed: boolean - chatId: string | null - identifier: string | null - chatUrl: string | null - title: string | null - description: string | null - authType: string | null - allowedEmails: string[] | null - outputConfigs: Array<{ blockId: string; path: string }> | null - welcomeMessage: string | null - primaryColor: string | null - hasPassword: boolean -} - -interface McpDeploymentDetails { - isDeployed: boolean - servers: Array<{ - serverId: string - serverName: string - toolName: string - toolDescription: string | null - parameterSchema?: Record | null - toolId?: string | null - }> -} - -export class CheckDeploymentStatusClientTool extends BaseClientTool { - static readonly id = 'check_deployment_status' - - constructor(toolCallId: string) { - super(toolCallId, CheckDeploymentStatusClientTool.id, CheckDeploymentStatusClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { - text: 'Checking deployment status', - icon: Loader2, - }, - [ClientToolCallState.pending]: { text: 'Checking deployment status', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Checking deployment status', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Checked deployment status', icon: Rocket }, - [ClientToolCallState.error]: { text: 'Failed to check deployment status', icon: X }, - [ClientToolCallState.aborted]: { - text: 'Aborted checking deployment status', - icon: XCircle, - }, - [ClientToolCallState.rejected]: { - text: 'Skipped checking deployment status', - icon: XCircle, - }, - }, - interrupt: undefined, - } - - async execute(args?: CheckDeploymentStatusArgs): Promise { - const logger = createLogger('CheckDeploymentStatusClientTool') - try { - this.setState(ClientToolCallState.executing) - - const { activeWorkflowId, workflows } = useWorkflowRegistry.getState() - const workflowId = args?.workflowId || activeWorkflowId - - if (!workflowId) { - throw new Error('No workflow ID provided') - } - - const workflow = workflows[workflowId] - const workspaceId = workflow?.workspaceId - - // Fetch deployment status from all sources - const [apiDeployRes, chatDeployRes, mcpServersRes] = await Promise.all([ - fetch(`/api/workflows/${workflowId}/deploy`), - fetch(`/api/workflows/${workflowId}/chat/status`), - workspaceId ? fetch(`/api/mcp/workflow-servers?workspaceId=${workspaceId}`) : null, - ]) - - const apiDeploy = apiDeployRes.ok ? await apiDeployRes.json() : null - const chatDeploy = chatDeployRes.ok ? await chatDeployRes.json() : null - const mcpServers = mcpServersRes?.ok ? await mcpServersRes.json() : null - - // API deployment details - const isApiDeployed = apiDeploy?.isDeployed || false - const appUrl = typeof window !== 'undefined' ? window.location.origin : '' - const apiDetails: ApiDeploymentDetails = { - isDeployed: isApiDeployed, - deployedAt: apiDeploy?.deployedAt || null, - endpoint: isApiDeployed ? `${appUrl}/api/workflows/${workflowId}/execute` : null, - apiKey: apiDeploy?.apiKey || null, - needsRedeployment: apiDeploy?.needsRedeployment === true, - } - - // Chat deployment details - const isChatDeployed = !!(chatDeploy?.isDeployed && chatDeploy?.deployment) - const chatDetails: ChatDeploymentDetails = { - isDeployed: isChatDeployed, - chatId: chatDeploy?.deployment?.id || null, - identifier: chatDeploy?.deployment?.identifier || null, - chatUrl: isChatDeployed ? `${appUrl}/chat/${chatDeploy?.deployment?.identifier}` : null, - title: chatDeploy?.deployment?.title || null, - description: chatDeploy?.deployment?.description || null, - authType: chatDeploy?.deployment?.authType || null, - allowedEmails: Array.isArray(chatDeploy?.deployment?.allowedEmails) - ? chatDeploy?.deployment?.allowedEmails - : null, - outputConfigs: Array.isArray(chatDeploy?.deployment?.outputConfigs) - ? chatDeploy?.deployment?.outputConfigs - : null, - welcomeMessage: chatDeploy?.deployment?.customizations?.welcomeMessage || null, - primaryColor: chatDeploy?.deployment?.customizations?.primaryColor || null, - hasPassword: chatDeploy?.deployment?.hasPassword === true, - } - - // MCP deployment details - find servers that have this workflow as a tool - const mcpServerList = mcpServers?.data?.servers || [] - const mcpToolDeployments: McpDeploymentDetails['servers'] = [] - - for (const server of mcpServerList) { - // Check if this workflow is deployed as a tool on this server - if (server.toolNames && Array.isArray(server.toolNames)) { - // We need to fetch the actual tools to check if this workflow is there - try { - const toolsRes = await fetch( - `/api/mcp/workflow-servers/${server.id}/tools?workspaceId=${workspaceId}` - ) - if (toolsRes.ok) { - const toolsData = await toolsRes.json() - const tools = toolsData.data?.tools || [] - for (const tool of tools) { - if (tool.workflowId === workflowId) { - mcpToolDeployments.push({ - serverId: server.id, - serverName: server.name, - toolName: tool.toolName, - toolDescription: tool.toolDescription, - parameterSchema: tool.parameterSchema ?? null, - toolId: tool.id ?? null, - }) - } - } - } - } catch { - // Skip this server if we can't fetch tools - } - } - } - - const isMcpDeployed = mcpToolDeployments.length > 0 - const mcpDetails: McpDeploymentDetails = { - isDeployed: isMcpDeployed, - servers: mcpToolDeployments, - } - - // Build deployment types list - const deploymentTypes: string[] = [] - if (isApiDeployed) deploymentTypes.push('api') - if (isChatDeployed) deploymentTypes.push('chat') - if (isMcpDeployed) deploymentTypes.push('mcp') - - const isDeployed = isApiDeployed || isChatDeployed || isMcpDeployed - - // Build summary message - let message = '' - if (!isDeployed) { - message = 'Workflow is not deployed' - } else { - const parts: string[] = [] - if (isApiDeployed) parts.push('API') - if (isChatDeployed) parts.push(`Chat (${chatDetails.identifier})`) - if (isMcpDeployed) { - const serverNames = mcpToolDeployments.map((d) => d.serverName).join(', ') - parts.push(`MCP (${serverNames})`) - } - message = `Workflow is deployed as: ${parts.join(', ')}` - } - - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, message, { - isDeployed, - deploymentTypes, - api: apiDetails, - chat: chatDetails, - mcp: mcpDetails, - }) - - logger.info('Checked deployment status', { isDeployed, deploymentTypes }) - } catch (e: any) { - logger.error('Check deployment status failed', { message: e?.message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, e?.message || 'Failed to check deployment status') - } - } -} diff --git a/apps/sim/lib/copilot/tools/client/workflow/create-workspace-mcp-server.ts b/apps/sim/lib/copilot/tools/client/workflow/create-workspace-mcp-server.ts deleted file mode 100644 index f50832184f..0000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/create-workspace-mcp-server.ts +++ /dev/null @@ -1,155 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Loader2, Plus, Server, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { useCopilotStore } from '@/stores/panel/copilot/store' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -export interface CreateWorkspaceMcpServerArgs { - /** Name of the MCP server */ - name: string - /** Optional description */ - description?: string - workspaceId?: string -} - -/** - * Create workspace MCP server tool. - * Creates a new MCP server in the workspace that workflows can be deployed to as tools. - */ -export class CreateWorkspaceMcpServerClientTool extends BaseClientTool { - static readonly id = 'create_workspace_mcp_server' - - constructor(toolCallId: string) { - super( - toolCallId, - CreateWorkspaceMcpServerClientTool.id, - CreateWorkspaceMcpServerClientTool.metadata - ) - } - - getInterruptDisplays(): BaseClientToolMetadata['interrupt'] | undefined { - const toolCallsById = useCopilotStore.getState().toolCallsById - const toolCall = toolCallsById[this.toolCallId] - const params = toolCall?.params as CreateWorkspaceMcpServerArgs | undefined - - const serverName = params?.name || 'MCP Server' - - return { - accept: { text: `Create "${serverName}"`, icon: Plus }, - reject: { text: 'Skip', icon: XCircle }, - } - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { - text: 'Preparing to create MCP server', - icon: Loader2, - }, - [ClientToolCallState.pending]: { text: 'Create MCP server?', icon: Server }, - [ClientToolCallState.executing]: { text: 'Creating MCP server', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Created MCP server', icon: Server }, - [ClientToolCallState.error]: { text: 'Failed to create MCP server', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted creating MCP server', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped creating MCP server', icon: XCircle }, - }, - interrupt: { - accept: { text: 'Create', icon: Plus }, - reject: { text: 'Skip', icon: XCircle }, - }, - getDynamicText: (params, state) => { - const name = params?.name || 'MCP server' - switch (state) { - case ClientToolCallState.success: - return `Created MCP server "${name}"` - case ClientToolCallState.executing: - return `Creating MCP server "${name}"` - case ClientToolCallState.generating: - return `Preparing to create "${name}"` - case ClientToolCallState.pending: - return `Create MCP server "${name}"?` - case ClientToolCallState.error: - return `Failed to create "${name}"` - } - return undefined - }, - } - - async handleReject(): Promise { - await super.handleReject() - this.setState(ClientToolCallState.rejected) - } - - async handleAccept(args?: CreateWorkspaceMcpServerArgs): Promise { - const logger = createLogger('CreateWorkspaceMcpServerClientTool') - try { - if (!args?.name) { - throw new Error('Server name is required') - } - - // Get workspace ID from active workflow if not provided - const { activeWorkflowId, workflows } = useWorkflowRegistry.getState() - let workspaceId = args?.workspaceId - - if (!workspaceId && activeWorkflowId) { - workspaceId = workflows[activeWorkflowId]?.workspaceId - } - - if (!workspaceId) { - throw new Error('No workspace ID available') - } - - this.setState(ClientToolCallState.executing) - - const res = await fetch('/api/mcp/workflow-servers', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - workspaceId, - name: args.name.trim(), - description: args.description?.trim() || null, - }), - }) - - const data = await res.json() - - if (!res.ok) { - throw new Error(data.error || `Failed to create MCP server (${res.status})`) - } - - const server = data.data?.server - if (!server) { - throw new Error('Server creation response missing server data') - } - - this.setState(ClientToolCallState.success) - await this.markToolComplete( - 200, - `MCP server "${args.name}" created successfully. You can now deploy workflows to it using deploy_mcp.`, - { - success: true, - serverId: server.id, - serverName: server.name, - description: server.description, - } - ) - - logger.info(`Created MCP server: ${server.name} (${server.id})`) - } catch (e: any) { - logger.error('Failed to create MCP server', { message: e?.message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, e?.message || 'Failed to create MCP server', { - success: false, - error: e?.message, - }) - } - } - - async execute(args?: CreateWorkspaceMcpServerArgs): Promise { - await this.handleAccept(args) - } -} diff --git a/apps/sim/lib/copilot/tools/client/workflow/deploy-api.ts b/apps/sim/lib/copilot/tools/client/workflow/deploy-api.ts deleted file mode 100644 index c850dd4933..0000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/deploy-api.ts +++ /dev/null @@ -1,286 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Loader2, Rocket, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' -import { getBaseUrl } from '@/lib/core/utils/urls' -import { getInputFormatExample } from '@/lib/workflows/operations/deployment-utils' -import { useCopilotStore } from '@/stores/panel/copilot/store' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -interface DeployApiArgs { - action: 'deploy' | 'undeploy' - workflowId?: string -} - -/** - * Deploy API tool for deploying workflows as REST APIs. - * This tool handles both deploying and undeploying workflows via the API endpoint. - */ -export class DeployApiClientTool extends BaseClientTool { - static readonly id = 'deploy_api' - - constructor(toolCallId: string) { - super(toolCallId, DeployApiClientTool.id, DeployApiClientTool.metadata) - } - - /** - * Override to provide dynamic button text based on action - */ - getInterruptDisplays(): BaseClientToolMetadata['interrupt'] | undefined { - const toolCallsById = useCopilotStore.getState().toolCallsById - const toolCall = toolCallsById[this.toolCallId] - const params = toolCall?.params as DeployApiArgs | undefined - - const action = params?.action || 'deploy' - - const workflowId = params?.workflowId || useWorkflowRegistry.getState().activeWorkflowId - const isAlreadyDeployed = workflowId - ? useWorkflowRegistry.getState().getWorkflowDeploymentStatus(workflowId)?.isDeployed - : false - - let buttonText = action === 'undeploy' ? 'Undeploy' : 'Deploy' - - if (action === 'deploy' && isAlreadyDeployed) { - buttonText = 'Redeploy' - } - - return { - accept: { text: buttonText, icon: Rocket }, - reject: { text: 'Skip', icon: XCircle }, - } - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { - text: 'Preparing to deploy API', - icon: Loader2, - }, - [ClientToolCallState.pending]: { text: 'Deploy as API?', icon: Rocket }, - [ClientToolCallState.executing]: { text: 'Deploying API', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Deployed API', icon: Rocket }, - [ClientToolCallState.error]: { text: 'Failed to deploy API', icon: XCircle }, - [ClientToolCallState.aborted]: { - text: 'Aborted deploying API', - icon: XCircle, - }, - [ClientToolCallState.rejected]: { - text: 'Skipped deploying API', - icon: XCircle, - }, - }, - interrupt: { - accept: { text: 'Deploy', icon: Rocket }, - reject: { text: 'Skip', icon: XCircle }, - }, - uiConfig: { - isSpecial: true, - interrupt: { - accept: { text: 'Deploy', icon: Rocket }, - reject: { text: 'Skip', icon: XCircle }, - showAllowOnce: true, - showAllowAlways: true, - }, - }, - getDynamicText: (params, state) => { - const action = params?.action === 'undeploy' ? 'undeploy' : 'deploy' - - const workflowId = params?.workflowId || useWorkflowRegistry.getState().activeWorkflowId - const isAlreadyDeployed = workflowId - ? useWorkflowRegistry.getState().getWorkflowDeploymentStatus(workflowId)?.isDeployed - : false - - let actionText = action - let actionTextIng = action === 'undeploy' ? 'undeploying' : 'deploying' - const actionTextPast = action === 'undeploy' ? 'undeployed' : 'deployed' - - if (action === 'deploy' && isAlreadyDeployed) { - actionText = 'redeploy' - actionTextIng = 'redeploying' - } - - const actionCapitalized = actionText.charAt(0).toUpperCase() + actionText.slice(1) - - switch (state) { - case ClientToolCallState.success: - return `API ${actionTextPast}` - case ClientToolCallState.executing: - return `${actionCapitalized}ing API` - case ClientToolCallState.generating: - return `Preparing to ${actionText} API` - case ClientToolCallState.pending: - return `${actionCapitalized} API?` - case ClientToolCallState.error: - return `Failed to ${actionText} API` - case ClientToolCallState.aborted: - return `Aborted ${actionTextIng} API` - case ClientToolCallState.rejected: - return `Skipped ${actionTextIng} API` - } - return undefined - }, - } - - /** - * Checks if the user has any API keys (workspace or personal) - */ - private async hasApiKeys(workspaceId: string): Promise { - try { - const [workspaceRes, personalRes] = await Promise.all([ - fetch(`/api/workspaces/${workspaceId}/api-keys`), - fetch('/api/users/me/api-keys'), - ]) - - if (!workspaceRes.ok || !personalRes.ok) { - return false - } - - const workspaceData = await workspaceRes.json() - const personalData = await personalRes.json() - - const workspaceKeys = (workspaceData?.keys || []) as Array - const personalKeys = (personalData?.keys || []) as Array - - return workspaceKeys.length > 0 || personalKeys.length > 0 - } catch (error) { - const logger = createLogger('DeployApiClientTool') - logger.warn('Failed to check API keys:', error) - return false - } - } - - /** - * Opens the settings modal to the API keys tab - */ - private openApiKeysModal(): void { - window.dispatchEvent(new CustomEvent('open-settings', { detail: { tab: 'apikeys' } })) - } - - async handleReject(): Promise { - await super.handleReject() - this.setState(ClientToolCallState.rejected) - } - - async handleAccept(args?: DeployApiArgs): Promise { - const logger = createLogger('DeployApiClientTool') - try { - const action = args?.action || 'deploy' - const { activeWorkflowId, workflows } = useWorkflowRegistry.getState() - const workflowId = args?.workflowId || activeWorkflowId - - if (!workflowId) { - throw new Error('No workflow ID provided') - } - - const workflow = workflows[workflowId] - const workspaceId = workflow?.workspaceId - - // For deploy action, check if user has API keys first - if (action === 'deploy') { - if (!workspaceId) { - throw new Error('Workflow workspace not found') - } - - const hasKeys = await this.hasApiKeys(workspaceId) - - if (!hasKeys) { - this.setState(ClientToolCallState.rejected) - this.openApiKeysModal() - - await this.markToolComplete( - 200, - 'Cannot deploy without an API key. Opened API key settings so you can create one. Once you have an API key, try deploying again.', - { - needsApiKey: true, - message: - 'You need to create an API key before you can deploy your workflow. The API key settings have been opened for you. After creating an API key, you can deploy your workflow.', - } - ) - return - } - } - - this.setState(ClientToolCallState.executing) - - const endpoint = `/api/workflows/${workflowId}/deploy` - const method = action === 'deploy' ? 'POST' : 'DELETE' - - const res = await fetch(endpoint, { - method, - headers: { 'Content-Type': 'application/json' }, - body: action === 'deploy' ? JSON.stringify({ deployChatEnabled: false }) : undefined, - }) - - if (!res.ok) { - const txt = await res.text().catch(() => '') - throw new Error(txt || `Server error (${res.status})`) - } - - const json = await res.json() - - let successMessage = '' - let resultData: any = { - action, - isDeployed: action === 'deploy', - deployedAt: json.deployedAt, - } - - if (action === 'deploy') { - const appUrl = getBaseUrl() - const apiEndpoint = `${appUrl}/api/workflows/${workflowId}/execute` - const apiKeyPlaceholder = '$SIM_API_KEY' - - const inputExample = getInputFormatExample(false) - const curlCommand = `curl -X POST -H "X-API-Key: ${apiKeyPlaceholder}" -H "Content-Type: application/json"${inputExample} ${apiEndpoint}` - - successMessage = 'Workflow deployed successfully as API. You can now call it via REST.' - - resultData = { - ...resultData, - endpoint: apiEndpoint, - curlCommand, - apiKeyPlaceholder, - } - } else { - successMessage = 'Workflow undeployed successfully.' - } - - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, successMessage, resultData) - - // Refresh the workflow registry to update deployment status - try { - const setDeploymentStatus = useWorkflowRegistry.getState().setDeploymentStatus - if (action === 'deploy') { - setDeploymentStatus( - workflowId, - true, - json.deployedAt ? new Date(json.deployedAt) : undefined, - json.apiKey || '' - ) - } else { - setDeploymentStatus(workflowId, false, undefined, '') - } - const actionPast = action === 'undeploy' ? 'undeployed' : 'deployed' - logger.info(`Workflow ${actionPast} as API and registry updated`) - } catch (error) { - logger.warn('Failed to update workflow registry:', error) - } - } catch (e: any) { - logger.error('Deploy API failed', { message: e?.message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, e?.message || 'Failed to deploy API') - } - } - - async execute(args?: DeployApiArgs): Promise { - await this.handleAccept(args) - } -} - -// Register UI config at module load -registerToolUIConfig(DeployApiClientTool.id, DeployApiClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/workflow/deploy-chat.ts b/apps/sim/lib/copilot/tools/client/workflow/deploy-chat.ts deleted file mode 100644 index 24ad19a53b..0000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/deploy-chat.ts +++ /dev/null @@ -1,381 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Loader2, MessageSquare, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' -import { useCopilotStore } from '@/stores/panel/copilot/store' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -export type ChatAuthType = 'public' | 'password' | 'email' | 'sso' - -export interface OutputConfig { - blockId: string - path: string -} - -export interface DeployChatArgs { - action: 'deploy' | 'undeploy' - workflowId?: string - /** URL slug for the chat (lowercase letters, numbers, hyphens only) */ - identifier?: string - /** Display title for the chat interface */ - title?: string - /** Optional description */ - description?: string - /** Authentication type: public, password, email, or sso */ - authType?: ChatAuthType - /** Password for password-protected chats */ - password?: string - /** List of allowed emails/domains for email or SSO auth */ - allowedEmails?: string[] - /** Welcome message shown to users */ - welcomeMessage?: string - /** Output configurations specifying which block outputs to display in chat */ - outputConfigs?: OutputConfig[] -} - -/** - * Deploy Chat tool for deploying workflows as chat interfaces. - * This tool handles deploying workflows with chat-specific configuration - * including authentication, customization, and output selection. - */ -export class DeployChatClientTool extends BaseClientTool { - static readonly id = 'deploy_chat' - - constructor(toolCallId: string) { - super(toolCallId, DeployChatClientTool.id, DeployChatClientTool.metadata) - } - - getInterruptDisplays(): BaseClientToolMetadata['interrupt'] | undefined { - const toolCallsById = useCopilotStore.getState().toolCallsById - const toolCall = toolCallsById[this.toolCallId] - const params = toolCall?.params as DeployChatArgs | undefined - - const action = params?.action || 'deploy' - const buttonText = action === 'undeploy' ? 'Undeploy' : 'Deploy Chat' - - return { - accept: { text: buttonText, icon: MessageSquare }, - reject: { text: 'Skip', icon: XCircle }, - } - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { - text: 'Preparing to deploy chat', - icon: Loader2, - }, - [ClientToolCallState.pending]: { text: 'Deploy as chat?', icon: MessageSquare }, - [ClientToolCallState.executing]: { text: 'Deploying chat', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Deployed chat', icon: MessageSquare }, - [ClientToolCallState.error]: { text: 'Failed to deploy chat', icon: XCircle }, - [ClientToolCallState.aborted]: { - text: 'Aborted deploying chat', - icon: XCircle, - }, - [ClientToolCallState.rejected]: { - text: 'Skipped deploying chat', - icon: XCircle, - }, - }, - interrupt: { - accept: { text: 'Deploy Chat', icon: MessageSquare }, - reject: { text: 'Skip', icon: XCircle }, - }, - uiConfig: { - isSpecial: true, - interrupt: { - accept: { text: 'Deploy Chat', icon: MessageSquare }, - reject: { text: 'Skip', icon: XCircle }, - showAllowOnce: true, - showAllowAlways: true, - }, - }, - getDynamicText: (params, state) => { - const action = params?.action === 'undeploy' ? 'undeploy' : 'deploy' - - switch (state) { - case ClientToolCallState.success: - return action === 'undeploy' ? 'Chat undeployed' : 'Chat deployed' - case ClientToolCallState.executing: - return action === 'undeploy' ? 'Undeploying chat' : 'Deploying chat' - case ClientToolCallState.generating: - return `Preparing to ${action} chat` - case ClientToolCallState.pending: - return action === 'undeploy' ? 'Undeploy chat?' : 'Deploy as chat?' - case ClientToolCallState.error: - return `Failed to ${action} chat` - case ClientToolCallState.aborted: - return action === 'undeploy' ? 'Aborted undeploying chat' : 'Aborted deploying chat' - case ClientToolCallState.rejected: - return action === 'undeploy' ? 'Skipped undeploying chat' : 'Skipped deploying chat' - } - return undefined - }, - } - - /** - * Generates a default identifier from the workflow name - */ - private generateIdentifier(workflowName: string): string { - return workflowName - .toLowerCase() - .replace(/[^a-z0-9]+/g, '-') - .replace(/^-|-$/g, '') - .substring(0, 50) - } - - async handleReject(): Promise { - await super.handleReject() - this.setState(ClientToolCallState.rejected) - } - - async handleAccept(args?: DeployChatArgs): Promise { - const logger = createLogger('DeployChatClientTool') - try { - const action = args?.action || 'deploy' - const { activeWorkflowId, workflows } = useWorkflowRegistry.getState() - const workflowId = args?.workflowId || activeWorkflowId - - if (!workflowId) { - throw new Error('No workflow ID provided') - } - - const workflow = workflows[workflowId] - - // Handle undeploy action - if (action === 'undeploy') { - this.setState(ClientToolCallState.executing) - - // First get the chat deployment ID - const statusRes = await fetch(`/api/workflows/${workflowId}/chat/status`) - if (!statusRes.ok) { - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, 'Failed to check chat deployment status', { - success: false, - action: 'undeploy', - isDeployed: false, - error: 'Failed to check chat deployment status', - errorCode: 'SERVER_ERROR', - }) - return - } - - const statusJson = await statusRes.json() - if (!statusJson.isDeployed || !statusJson.deployment?.id) { - this.setState(ClientToolCallState.error) - await this.markToolComplete(400, 'No active chat deployment found for this workflow', { - success: false, - action: 'undeploy', - isDeployed: false, - error: 'No active chat deployment found for this workflow', - errorCode: 'VALIDATION_ERROR', - }) - return - } - - const chatId = statusJson.deployment.id - - // Delete the chat deployment - const res = await fetch(`/api/chat/manage/${chatId}`, { - method: 'DELETE', - headers: { 'Content-Type': 'application/json' }, - }) - - if (!res.ok) { - const txt = await res.text().catch(() => '') - this.setState(ClientToolCallState.error) - await this.markToolComplete(res.status, txt || `Server error (${res.status})`, { - success: false, - action: 'undeploy', - isDeployed: true, - error: txt || 'Failed to undeploy chat', - errorCode: 'SERVER_ERROR', - }) - return - } - - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, 'Chat deployment removed successfully.', { - success: true, - action: 'undeploy', - isDeployed: false, - }) - return - } - - this.setState(ClientToolCallState.executing) - - const statusRes = await fetch(`/api/workflows/${workflowId}/chat/status`) - const statusJson = statusRes.ok ? await statusRes.json() : null - const existingDeployment = statusJson?.deployment || null - - const baseIdentifier = - existingDeployment?.identifier || this.generateIdentifier(workflow?.name || 'chat') - const baseTitle = existingDeployment?.title || workflow?.name || 'Chat' - const baseDescription = existingDeployment?.description || '' - const baseAuthType = existingDeployment?.authType || 'public' - const baseWelcomeMessage = - existingDeployment?.customizations?.welcomeMessage || 'Hi there! How can I help you today?' - const basePrimaryColor = - existingDeployment?.customizations?.primaryColor || 'var(--brand-primary-hover-hex)' - const baseAllowedEmails = Array.isArray(existingDeployment?.allowedEmails) - ? existingDeployment.allowedEmails - : [] - const baseOutputConfigs = Array.isArray(existingDeployment?.outputConfigs) - ? existingDeployment.outputConfigs - : [] - - const identifier = args?.identifier || baseIdentifier - const title = args?.title || baseTitle - const description = args?.description ?? baseDescription - const authType = args?.authType || baseAuthType - const welcomeMessage = args?.welcomeMessage || baseWelcomeMessage - const outputConfigs = args?.outputConfigs || baseOutputConfigs - const allowedEmails = args?.allowedEmails || baseAllowedEmails - const primaryColor = basePrimaryColor - - if (!identifier || !title) { - throw new Error('Chat identifier and title are required') - } - - if (authType === 'password' && !args?.password && !existingDeployment?.hasPassword) { - throw new Error('Password is required when using password protection') - } - - if ((authType === 'email' || authType === 'sso') && allowedEmails.length === 0) { - throw new Error(`At least one email or domain is required when using ${authType} access`) - } - - const payload = { - workflowId, - identifier: identifier.trim(), - title: title.trim(), - description: description.trim(), - customizations: { - primaryColor, - welcomeMessage: welcomeMessage.trim(), - }, - authType, - password: authType === 'password' ? args?.password : undefined, - allowedEmails: authType === 'email' || authType === 'sso' ? allowedEmails : [], - outputConfigs, - } - - const isUpdating = Boolean(existingDeployment?.id) - const endpoint = isUpdating ? `/api/chat/manage/${existingDeployment.id}` : '/api/chat' - const method = isUpdating ? 'PATCH' : 'POST' - - const res = await fetch(endpoint, { - method, - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify(payload), - }) - - const json = await res.json() - - if (!res.ok) { - if (json.error === 'Identifier already in use') { - this.setState(ClientToolCallState.error) - await this.markToolComplete( - 400, - `The identifier "${identifier}" is already in use. Please choose a different one.`, - { - success: false, - action: 'deploy', - isDeployed: false, - identifier, - error: `Identifier "${identifier}" is already taken`, - errorCode: 'IDENTIFIER_TAKEN', - } - ) - return - } - - // Handle validation errors - if (json.code === 'VALIDATION_ERROR') { - this.setState(ClientToolCallState.error) - await this.markToolComplete(400, json.error || 'Validation error', { - success: false, - action: 'deploy', - isDeployed: false, - error: json.error, - errorCode: 'VALIDATION_ERROR', - }) - return - } - - this.setState(ClientToolCallState.error) - await this.markToolComplete(res.status, json.error || 'Failed to deploy chat', { - success: false, - action: 'deploy', - isDeployed: false, - error: json.error || 'Server error', - errorCode: 'SERVER_ERROR', - }) - return - } - - if (!json.chatUrl) { - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, 'Response missing chat URL', { - success: false, - action: 'deploy', - isDeployed: false, - error: 'Response missing chat URL', - errorCode: 'SERVER_ERROR', - }) - return - } - - this.setState(ClientToolCallState.success) - await this.markToolComplete( - 200, - `Chat deployed successfully! Available at: ${json.chatUrl}`, - { - success: true, - action: 'deploy', - isDeployed: true, - chatId: json.id, - chatUrl: json.chatUrl, - identifier, - title, - authType, - } - ) - - // Update the workflow registry to reflect deployment status - // Chat deployment also deploys the API, so we update the registry - try { - const setDeploymentStatus = useWorkflowRegistry.getState().setDeploymentStatus - setDeploymentStatus(workflowId, true, new Date(), '') - logger.info('Workflow deployment status updated in registry') - } catch (error) { - logger.warn('Failed to update workflow registry:', error) - } - - logger.info('Chat deployed successfully:', json.chatUrl) - } catch (e: any) { - logger.error('Deploy chat failed', { message: e?.message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, e?.message || 'Failed to deploy chat', { - success: false, - action: 'deploy', - isDeployed: false, - error: e?.message || 'Failed to deploy chat', - errorCode: 'SERVER_ERROR', - }) - } - } - - async execute(args?: DeployChatArgs): Promise { - await this.handleAccept(args) - } -} - -// Register UI config at module load -registerToolUIConfig(DeployChatClientTool.id, DeployChatClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/workflow/deploy-mcp.ts b/apps/sim/lib/copilot/tools/client/workflow/deploy-mcp.ts deleted file mode 100644 index bcd87fc252..0000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/deploy-mcp.ts +++ /dev/null @@ -1,250 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Loader2, Server, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -export interface ParameterDescription { - name: string - description: string -} - -export interface DeployMcpArgs { - /** The MCP server ID to deploy to (get from list_workspace_mcp_servers) */ - serverId: string - /** Optional workflow ID (defaults to active workflow) */ - workflowId?: string - /** Custom tool name (defaults to workflow name) */ - toolName?: string - /** Custom tool description */ - toolDescription?: string - /** Parameter descriptions to include in the schema */ - parameterDescriptions?: ParameterDescription[] -} - -/** - * Deploy MCP tool. - * Deploys the workflow as an MCP tool to a workspace MCP server. - */ -export class DeployMcpClientTool extends BaseClientTool { - static readonly id = 'deploy_mcp' - - constructor(toolCallId: string) { - super(toolCallId, DeployMcpClientTool.id, DeployMcpClientTool.metadata) - } - - getInterruptDisplays(): BaseClientToolMetadata['interrupt'] | undefined { - return { - accept: { text: 'Deploy to MCP', icon: Server }, - reject: { text: 'Skip', icon: XCircle }, - } - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { - text: 'Preparing to deploy to MCP', - icon: Loader2, - }, - [ClientToolCallState.pending]: { text: 'Deploy to MCP server?', icon: Server }, - [ClientToolCallState.executing]: { text: 'Deploying to MCP', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Deployed to MCP', icon: Server }, - [ClientToolCallState.error]: { text: 'Failed to deploy to MCP', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted MCP deployment', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped MCP deployment', icon: XCircle }, - }, - interrupt: { - accept: { text: 'Deploy', icon: Server }, - reject: { text: 'Skip', icon: XCircle }, - }, - uiConfig: { - isSpecial: true, - interrupt: { - accept: { text: 'Deploy', icon: Server }, - reject: { text: 'Skip', icon: XCircle }, - showAllowOnce: true, - showAllowAlways: true, - }, - }, - getDynamicText: (params, state) => { - const toolName = params?.toolName || 'workflow' - switch (state) { - case ClientToolCallState.success: - return `Deployed "${toolName}" to MCP` - case ClientToolCallState.executing: - return `Deploying "${toolName}" to MCP` - case ClientToolCallState.generating: - return `Preparing to deploy to MCP` - case ClientToolCallState.pending: - return `Deploy "${toolName}" to MCP?` - case ClientToolCallState.error: - return `Failed to deploy to MCP` - } - return undefined - }, - } - - async handleReject(): Promise { - await super.handleReject() - this.setState(ClientToolCallState.rejected) - } - - async handleAccept(args?: DeployMcpArgs): Promise { - const logger = createLogger('DeployMcpClientTool') - try { - if (!args?.serverId) { - throw new Error( - 'Server ID is required. Use list_workspace_mcp_servers to get available servers.' - ) - } - - const { activeWorkflowId, workflows } = useWorkflowRegistry.getState() - const workflowId = args?.workflowId || activeWorkflowId - - if (!workflowId) { - throw new Error('No workflow ID available') - } - - const workflow = workflows[workflowId] - const workspaceId = workflow?.workspaceId - - if (!workspaceId) { - throw new Error('Workflow workspace not found') - } - - // Check if workflow is deployed - const deploymentStatus = useWorkflowRegistry - .getState() - .getWorkflowDeploymentStatus(workflowId) - if (!deploymentStatus?.isDeployed) { - throw new Error( - 'Workflow must be deployed before adding as an MCP tool. Use deploy_api first.' - ) - } - - this.setState(ClientToolCallState.executing) - - let parameterSchema: Record | undefined - if (args?.parameterDescriptions && args.parameterDescriptions.length > 0) { - const properties: Record = {} - for (const param of args.parameterDescriptions) { - properties[param.name] = { description: param.description } - } - parameterSchema = { properties } - } - - const res = await fetch( - `/api/mcp/workflow-servers/${args.serverId}/tools?workspaceId=${workspaceId}`, - { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - workflowId, - toolName: args.toolName?.trim(), - toolDescription: args.toolDescription?.trim(), - parameterSchema, - }), - } - ) - - const data = await res.json() - - if (!res.ok) { - if (data.error?.includes('already added')) { - const toolsRes = await fetch( - `/api/mcp/workflow-servers/${args.serverId}/tools?workspaceId=${workspaceId}` - ) - const toolsJson = toolsRes.ok ? await toolsRes.json() : null - const tools = toolsJson?.data?.tools || [] - const existingTool = tools.find((tool: any) => tool.workflowId === workflowId) - if (!existingTool?.id) { - throw new Error('This workflow is already deployed to this MCP server') - } - const patchRes = await fetch( - `/api/mcp/workflow-servers/${args.serverId}/tools/${existingTool.id}?workspaceId=${workspaceId}`, - { - method: 'PATCH', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - toolName: args.toolName?.trim(), - toolDescription: args.toolDescription?.trim(), - parameterSchema, - }), - } - ) - const patchJson = patchRes.ok ? await patchRes.json() : null - if (!patchRes.ok) { - const patchError = patchJson?.error || `Failed to update MCP tool (${patchRes.status})` - throw new Error(patchError) - } - const updatedTool = patchJson?.data?.tool - this.setState(ClientToolCallState.success) - await this.markToolComplete( - 200, - `Workflow MCP tool updated to "${updatedTool?.toolName || existingTool.toolName}".`, - { - success: true, - toolId: updatedTool?.id || existingTool.id, - toolName: updatedTool?.toolName || existingTool.toolName, - toolDescription: updatedTool?.toolDescription || existingTool.toolDescription, - serverId: args.serverId, - updated: true, - } - ) - logger.info('Updated workflow MCP tool', { toolId: existingTool.id }) - return - } - if (data.error?.includes('not deployed')) { - throw new Error('Workflow must be deployed before adding as an MCP tool') - } - if (data.error?.includes('Start block')) { - throw new Error('Workflow must have a Start block to be used as an MCP tool') - } - if (data.error?.includes('Server not found')) { - throw new Error( - 'MCP server not found. Use list_workspace_mcp_servers to see available servers.' - ) - } - throw new Error(data.error || `Failed to deploy to MCP (${res.status})`) - } - - const tool = data.data?.tool - if (!tool) { - throw new Error('Response missing tool data') - } - - this.setState(ClientToolCallState.success) - await this.markToolComplete( - 200, - `Workflow deployed as MCP tool "${tool.toolName}" to server.`, - { - success: true, - toolId: tool.id, - toolName: tool.toolName, - toolDescription: tool.toolDescription, - serverId: args.serverId, - } - ) - - logger.info(`Deployed workflow as MCP tool: ${tool.toolName}`) - } catch (e: any) { - logger.error('Failed to deploy to MCP', { message: e?.message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, e?.message || 'Failed to deploy to MCP', { - success: false, - error: e?.message, - }) - } - } - - async execute(args?: DeployMcpArgs): Promise { - await this.handleAccept(args) - } -} - -// Register UI config at module load -registerToolUIConfig(DeployMcpClientTool.id, DeployMcpClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/workflow/edit-workflow.ts b/apps/sim/lib/copilot/tools/client/workflow/edit-workflow.ts deleted file mode 100644 index 6c56dc1408..0000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/edit-workflow.ts +++ /dev/null @@ -1,47 +0,0 @@ -import { Grid2x2, Grid2x2Check, Grid2x2X, Loader2, MinusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' - -export class EditWorkflowClientTool extends BaseClientTool { - static readonly id = 'edit_workflow' - - constructor(toolCallId: string) { - super(toolCallId, EditWorkflowClientTool.id, EditWorkflowClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Editing your workflow', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Editing your workflow', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Edited your workflow', icon: Grid2x2Check }, - [ClientToolCallState.error]: { text: 'Failed to edit your workflow', icon: XCircle }, - [ClientToolCallState.review]: { text: 'Review your workflow changes', icon: Grid2x2 }, - [ClientToolCallState.rejected]: { text: 'Rejected workflow changes', icon: Grid2x2X }, - [ClientToolCallState.aborted]: { text: 'Aborted editing your workflow', icon: MinusCircle }, - [ClientToolCallState.pending]: { text: 'Editing your workflow', icon: Loader2 }, - }, - uiConfig: { - isSpecial: true, - customRenderer: 'edit_summary', - }, - } - - async handleAccept(): Promise { - // Diff store calls this after review acceptance. - this.setState(ClientToolCallState.success) - } - - async execute(): Promise { - // Tool execution is handled server-side by the orchestrator. - // The store's tool_result SSE handler applies the diff preview - // via diffStore.setProposedChanges() when the result arrives. - this.setState(ClientToolCallState.success) - } -} - -// Register UI config at module load -registerToolUIConfig(EditWorkflowClientTool.id, EditWorkflowClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/workflow/get-block-outputs.ts b/apps/sim/lib/copilot/tools/client/workflow/get-block-outputs.ts deleted file mode 100644 index d835678d3e..0000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/get-block-outputs.ts +++ /dev/null @@ -1,144 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Loader2, Tag, X, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { - computeBlockOutputPaths, - formatOutputsWithPrefix, - getSubflowInsidePaths, - getWorkflowSubBlockValues, - getWorkflowVariables, -} from '@/lib/copilot/tools/client/workflow/block-output-utils' -import { - GetBlockOutputsResult, - type GetBlockOutputsResultType, -} from '@/lib/copilot/tools/shared/schemas' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' -import { useWorkflowStore } from '@/stores/workflows/workflow/store' - -const logger = createLogger('GetBlockOutputsClientTool') - -interface GetBlockOutputsArgs { - blockIds?: string[] -} - -export class GetBlockOutputsClientTool extends BaseClientTool { - static readonly id = 'get_block_outputs' - - constructor(toolCallId: string) { - super(toolCallId, GetBlockOutputsClientTool.id, GetBlockOutputsClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Getting block outputs', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Getting block outputs', icon: Tag }, - [ClientToolCallState.executing]: { text: 'Getting block outputs', icon: Loader2 }, - [ClientToolCallState.aborted]: { text: 'Aborted getting outputs', icon: XCircle }, - [ClientToolCallState.success]: { text: 'Retrieved block outputs', icon: Tag }, - [ClientToolCallState.error]: { text: 'Failed to get outputs', icon: X }, - [ClientToolCallState.rejected]: { text: 'Skipped getting outputs', icon: XCircle }, - }, - getDynamicText: (params, state) => { - const blockIds = params?.blockIds - if (blockIds && Array.isArray(blockIds) && blockIds.length > 0) { - const count = blockIds.length - switch (state) { - case ClientToolCallState.success: - return `Retrieved outputs for ${count} block${count > 1 ? 's' : ''}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Getting outputs for ${count} block${count > 1 ? 's' : ''}` - case ClientToolCallState.error: - return `Failed to get outputs for ${count} block${count > 1 ? 's' : ''}` - } - } - return undefined - }, - } - - async execute(args?: GetBlockOutputsArgs): Promise { - try { - this.setState(ClientToolCallState.executing) - - const { activeWorkflowId } = useWorkflowRegistry.getState() - if (!activeWorkflowId) { - await this.markToolComplete(400, 'No active workflow found') - this.setState(ClientToolCallState.error) - return - } - - const workflowStore = useWorkflowStore.getState() - const blocks = workflowStore.blocks || {} - const loops = workflowStore.loops || {} - const parallels = workflowStore.parallels || {} - const subBlockValues = getWorkflowSubBlockValues(activeWorkflowId) - - const ctx = { workflowId: activeWorkflowId, blocks, loops, parallels, subBlockValues } - const targetBlockIds = - args?.blockIds && args.blockIds.length > 0 ? args.blockIds : Object.keys(blocks) - - const blockOutputs: GetBlockOutputsResultType['blocks'] = [] - - for (const blockId of targetBlockIds) { - const block = blocks[blockId] - if (!block?.type) continue - - const blockName = block.name || block.type - - const blockOutput: GetBlockOutputsResultType['blocks'][0] = { - blockId, - blockName, - blockType: block.type, - outputs: [], - } - - // Include triggerMode if the block is in trigger mode - if (block.triggerMode) { - blockOutput.triggerMode = true - } - - if (block.type === 'loop' || block.type === 'parallel') { - const insidePaths = getSubflowInsidePaths(block.type, blockId, loops, parallels) - blockOutput.insideSubflowOutputs = formatOutputsWithPrefix(insidePaths, blockName) - blockOutput.outsideSubflowOutputs = formatOutputsWithPrefix(['results'], blockName) - } else { - const outputPaths = computeBlockOutputPaths(block, ctx) - blockOutput.outputs = formatOutputsWithPrefix(outputPaths, blockName) - } - - blockOutputs.push(blockOutput) - } - - const includeVariables = !args?.blockIds || args.blockIds.length === 0 - const resultData: { - blocks: typeof blockOutputs - variables?: ReturnType - } = { - blocks: blockOutputs, - } - if (includeVariables) { - resultData.variables = getWorkflowVariables(activeWorkflowId) - } - - const result = GetBlockOutputsResult.parse(resultData) - - logger.info('Retrieved block outputs', { - blockCount: blockOutputs.length, - variableCount: resultData.variables?.length ?? 0, - }) - - await this.markToolComplete(200, 'Retrieved block outputs', result) - this.setState(ClientToolCallState.success) - } catch (error: any) { - const message = error instanceof Error ? error.message : String(error) - logger.error('Error in tool execution', { toolCallId: this.toolCallId, error, message }) - await this.markToolComplete(500, message || 'Failed to get block outputs') - this.setState(ClientToolCallState.error) - } - } -} diff --git a/apps/sim/lib/copilot/tools/client/workflow/get-block-upstream-references.ts b/apps/sim/lib/copilot/tools/client/workflow/get-block-upstream-references.ts deleted file mode 100644 index f02c9958c6..0000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/get-block-upstream-references.ts +++ /dev/null @@ -1,231 +0,0 @@ -import { createLogger } from '@sim/logger' -import { GitBranch, Loader2, X, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { - computeBlockOutputPaths, - formatOutputsWithPrefix, - getSubflowInsidePaths, - getWorkflowSubBlockValues, - getWorkflowVariables, -} from '@/lib/copilot/tools/client/workflow/block-output-utils' -import { - GetBlockUpstreamReferencesResult, - type GetBlockUpstreamReferencesResultType, -} from '@/lib/copilot/tools/shared/schemas' -import { BlockPathCalculator } from '@/lib/workflows/blocks/block-path-calculator' -import { isInputDefinitionTrigger } from '@/lib/workflows/triggers/input-definition-triggers' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' -import { useWorkflowStore } from '@/stores/workflows/workflow/store' -import type { Loop, Parallel } from '@/stores/workflows/workflow/types' - -const logger = createLogger('GetBlockUpstreamReferencesClientTool') - -interface GetBlockUpstreamReferencesArgs { - blockIds: string[] -} - -export class GetBlockUpstreamReferencesClientTool extends BaseClientTool { - static readonly id = 'get_block_upstream_references' - - constructor(toolCallId: string) { - super( - toolCallId, - GetBlockUpstreamReferencesClientTool.id, - GetBlockUpstreamReferencesClientTool.metadata - ) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Getting upstream references', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Getting upstream references', icon: GitBranch }, - [ClientToolCallState.executing]: { text: 'Getting upstream references', icon: Loader2 }, - [ClientToolCallState.aborted]: { text: 'Aborted getting references', icon: XCircle }, - [ClientToolCallState.success]: { text: 'Retrieved upstream references', icon: GitBranch }, - [ClientToolCallState.error]: { text: 'Failed to get references', icon: X }, - [ClientToolCallState.rejected]: { text: 'Skipped getting references', icon: XCircle }, - }, - getDynamicText: (params, state) => { - const blockIds = params?.blockIds - if (blockIds && Array.isArray(blockIds) && blockIds.length > 0) { - const count = blockIds.length - switch (state) { - case ClientToolCallState.success: - return `Retrieved references for ${count} block${count > 1 ? 's' : ''}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Getting references for ${count} block${count > 1 ? 's' : ''}` - case ClientToolCallState.error: - return `Failed to get references for ${count} block${count > 1 ? 's' : ''}` - } - } - return undefined - }, - } - - async execute(args?: GetBlockUpstreamReferencesArgs): Promise { - try { - this.setState(ClientToolCallState.executing) - - if (!args?.blockIds || args.blockIds.length === 0) { - await this.markToolComplete(400, 'blockIds array is required') - this.setState(ClientToolCallState.error) - return - } - - const { activeWorkflowId } = useWorkflowRegistry.getState() - if (!activeWorkflowId) { - await this.markToolComplete(400, 'No active workflow found') - this.setState(ClientToolCallState.error) - return - } - - const workflowStore = useWorkflowStore.getState() - const blocks = workflowStore.blocks || {} - const edges = workflowStore.edges || [] - const loops = workflowStore.loops || {} - const parallels = workflowStore.parallels || {} - const subBlockValues = getWorkflowSubBlockValues(activeWorkflowId) - - const ctx = { workflowId: activeWorkflowId, blocks, loops, parallels, subBlockValues } - const variableOutputs = getWorkflowVariables(activeWorkflowId) - const graphEdges = edges.map((edge) => ({ source: edge.source, target: edge.target })) - - const results: GetBlockUpstreamReferencesResultType['results'] = [] - - for (const blockId of args.blockIds) { - const targetBlock = blocks[blockId] - if (!targetBlock) { - logger.warn(`Block ${blockId} not found`) - continue - } - - const insideSubflows: { blockId: string; blockName: string; blockType: string }[] = [] - const containingLoopIds = new Set() - const containingParallelIds = new Set() - - Object.values(loops as Record).forEach((loop) => { - if (loop?.nodes?.includes(blockId)) { - containingLoopIds.add(loop.id) - const loopBlock = blocks[loop.id] - if (loopBlock) { - insideSubflows.push({ - blockId: loop.id, - blockName: loopBlock.name || loopBlock.type, - blockType: 'loop', - }) - } - } - }) - - Object.values(parallels as Record).forEach((parallel) => { - if (parallel?.nodes?.includes(blockId)) { - containingParallelIds.add(parallel.id) - const parallelBlock = blocks[parallel.id] - if (parallelBlock) { - insideSubflows.push({ - blockId: parallel.id, - blockName: parallelBlock.name || parallelBlock.type, - blockType: 'parallel', - }) - } - } - }) - - const ancestorIds = BlockPathCalculator.findAllPathNodes(graphEdges, blockId) - const accessibleIds = new Set(ancestorIds) - accessibleIds.add(blockId) - - const starterBlock = Object.values(blocks).find((b) => isInputDefinitionTrigger(b.type)) - if (starterBlock && ancestorIds.includes(starterBlock.id)) { - accessibleIds.add(starterBlock.id) - } - - containingLoopIds.forEach((loopId) => { - accessibleIds.add(loopId) - loops[loopId]?.nodes?.forEach((nodeId) => accessibleIds.add(nodeId)) - }) - - containingParallelIds.forEach((parallelId) => { - accessibleIds.add(parallelId) - parallels[parallelId]?.nodes?.forEach((nodeId) => accessibleIds.add(nodeId)) - }) - - const accessibleBlocks: GetBlockUpstreamReferencesResultType['results'][0]['accessibleBlocks'] = - [] - - for (const accessibleBlockId of accessibleIds) { - const block = blocks[accessibleBlockId] - if (!block?.type) continue - - const canSelfReference = block.type === 'approval' || block.type === 'human_in_the_loop' - if (accessibleBlockId === blockId && !canSelfReference) continue - - const blockName = block.name || block.type - let accessContext: 'inside' | 'outside' | undefined - let outputPaths: string[] - - if (block.type === 'loop' || block.type === 'parallel') { - const isInside = - (block.type === 'loop' && containingLoopIds.has(accessibleBlockId)) || - (block.type === 'parallel' && containingParallelIds.has(accessibleBlockId)) - - accessContext = isInside ? 'inside' : 'outside' - outputPaths = isInside - ? getSubflowInsidePaths(block.type, accessibleBlockId, loops, parallels) - : ['results'] - } else { - outputPaths = computeBlockOutputPaths(block, ctx) - } - - const formattedOutputs = formatOutputsWithPrefix(outputPaths, blockName) - - const entry: GetBlockUpstreamReferencesResultType['results'][0]['accessibleBlocks'][0] = { - blockId: accessibleBlockId, - blockName, - blockType: block.type, - outputs: formattedOutputs, - } - - // Include triggerMode if the block is in trigger mode - if (block.triggerMode) { - entry.triggerMode = true - } - - if (accessContext) entry.accessContext = accessContext - accessibleBlocks.push(entry) - } - - const resultEntry: GetBlockUpstreamReferencesResultType['results'][0] = { - blockId, - blockName: targetBlock.name || targetBlock.type, - accessibleBlocks, - variables: variableOutputs, - } - - if (insideSubflows.length > 0) resultEntry.insideSubflows = insideSubflows - results.push(resultEntry) - } - - const result = GetBlockUpstreamReferencesResult.parse({ results }) - - logger.info('Retrieved upstream references', { - blockIds: args.blockIds, - resultCount: results.length, - }) - - await this.markToolComplete(200, 'Retrieved upstream references', result) - this.setState(ClientToolCallState.success) - } catch (error: any) { - const message = error instanceof Error ? error.message : String(error) - logger.error('Error in tool execution', { toolCallId: this.toolCallId, error, message }) - await this.markToolComplete(500, message || 'Failed to get upstream references') - this.setState(ClientToolCallState.error) - } - } -} diff --git a/apps/sim/lib/copilot/tools/client/workflow/get-user-workflow.ts b/apps/sim/lib/copilot/tools/client/workflow/get-user-workflow.ts deleted file mode 100644 index c67f92a9e2..0000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/get-user-workflow.ts +++ /dev/null @@ -1,187 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Loader2, Workflow as WorkflowIcon, X, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { stripWorkflowDiffMarkers } from '@/lib/workflows/diff' -import { sanitizeForCopilot } from '@/lib/workflows/sanitization/json-sanitizer' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' -import { mergeSubblockState } from '@/stores/workflows/utils' -import { useWorkflowStore } from '@/stores/workflows/workflow/store' - -interface GetUserWorkflowArgs { - workflowId?: string - includeMetadata?: boolean -} - -const logger = createLogger('GetUserWorkflowClientTool') - -export class GetUserWorkflowClientTool extends BaseClientTool { - static readonly id = 'get_user_workflow' - - constructor(toolCallId: string) { - super(toolCallId, GetUserWorkflowClientTool.id, GetUserWorkflowClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Reading your workflow', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Reading your workflow', icon: WorkflowIcon }, - [ClientToolCallState.executing]: { text: 'Reading your workflow', icon: Loader2 }, - [ClientToolCallState.aborted]: { text: 'Aborted reading your workflow', icon: XCircle }, - [ClientToolCallState.success]: { text: 'Read your workflow', icon: WorkflowIcon }, - [ClientToolCallState.error]: { text: 'Failed to read your workflow', icon: X }, - [ClientToolCallState.rejected]: { text: 'Skipped reading your workflow', icon: XCircle }, - }, - getDynamicText: (params, state) => { - const workflowId = params?.workflowId || useWorkflowRegistry.getState().activeWorkflowId - if (workflowId) { - const workflowName = useWorkflowRegistry.getState().workflows[workflowId]?.name - if (workflowName) { - switch (state) { - case ClientToolCallState.success: - return `Read ${workflowName}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Reading ${workflowName}` - case ClientToolCallState.error: - return `Failed to read ${workflowName}` - case ClientToolCallState.aborted: - return `Aborted reading ${workflowName}` - case ClientToolCallState.rejected: - return `Skipped reading ${workflowName}` - } - } - } - return undefined - }, - } - - async execute(args?: GetUserWorkflowArgs): Promise { - try { - this.setState(ClientToolCallState.executing) - - // Determine workflow ID (explicit or active) - let workflowId = args?.workflowId - if (!workflowId) { - const { activeWorkflowId } = useWorkflowRegistry.getState() - if (!activeWorkflowId) { - await this.markToolComplete(400, 'No active workflow found') - this.setState(ClientToolCallState.error) - return - } - workflowId = activeWorkflowId as any - } - - logger.info('Fetching user workflow from stores', { - workflowId, - includeMetadata: args?.includeMetadata, - }) - - // Always use main workflow store as the source of truth - const workflowStore = useWorkflowStore.getState() - const fullWorkflowState = workflowStore.getWorkflowState() - - let workflowState: any = null - - if (!fullWorkflowState || !fullWorkflowState.blocks) { - const workflowRegistry = useWorkflowRegistry.getState() - const wfKey = String(workflowId) - const workflow = (workflowRegistry as any).workflows?.[wfKey] - - if (!workflow) { - await this.markToolComplete(404, `Workflow ${workflowId} not found in any store`) - this.setState(ClientToolCallState.error) - return - } - - logger.warn('No workflow state found, using workflow metadata only', { workflowId }) - workflowState = workflow - } else { - workflowState = stripWorkflowDiffMarkers(fullWorkflowState) - logger.info('Using workflow state from workflow store', { - workflowId, - blockCount: Object.keys(fullWorkflowState.blocks || {}).length, - }) - } - - // Normalize required properties - if (workflowState) { - if (!workflowState.loops) workflowState.loops = {} - if (!workflowState.parallels) workflowState.parallels = {} - if (!workflowState.edges) workflowState.edges = [] - if (!workflowState.blocks) workflowState.blocks = {} - } - - // Merge latest subblock values so edits are reflected - try { - if (workflowState?.blocks) { - workflowState = { - ...workflowState, - blocks: mergeSubblockState(workflowState.blocks, workflowId as any), - } - logger.info('Merged subblock values into workflow state', { - workflowId, - blockCount: Object.keys(workflowState.blocks || {}).length, - }) - } - } catch (mergeError) { - logger.warn('Failed to merge subblock values; proceeding with raw workflow state', { - workflowId, - error: mergeError instanceof Error ? mergeError.message : String(mergeError), - }) - } - - logger.info('Validating workflow state', { - workflowId, - hasWorkflowState: !!workflowState, - hasBlocks: !!workflowState?.blocks, - workflowStateType: typeof workflowState, - }) - - if (!workflowState || !workflowState.blocks) { - await this.markToolComplete(422, 'Workflow state is empty or invalid') - this.setState(ClientToolCallState.error) - return - } - - // Sanitize workflow state for copilot (remove UI-specific data) - const sanitizedState = sanitizeForCopilot(workflowState) - - // Convert to JSON string for transport - let workflowJson = '' - try { - workflowJson = JSON.stringify(sanitizedState, null, 2) - logger.info('Successfully stringified sanitized workflow state', { - workflowId, - jsonLength: workflowJson.length, - }) - } catch (stringifyError) { - await this.markToolComplete( - 500, - `Failed to convert workflow to JSON: ${ - stringifyError instanceof Error ? stringifyError.message : 'Unknown error' - }` - ) - this.setState(ClientToolCallState.error) - return - } - - // Mark complete with data; keep state success for store render - await this.markToolComplete(200, 'Workflow analyzed', { userWorkflow: workflowJson }) - this.setState(ClientToolCallState.success) - } catch (error: any) { - const message = error instanceof Error ? error.message : String(error) - logger.error('Error in tool execution', { - toolCallId: this.toolCallId, - error, - message, - }) - await this.markToolComplete(500, message || 'Failed to fetch workflow') - this.setState(ClientToolCallState.error) - } - } -} diff --git a/apps/sim/lib/copilot/tools/client/workflow/get-workflow-console.ts b/apps/sim/lib/copilot/tools/client/workflow/get-workflow-console.ts deleted file mode 100644 index 24f27713bc..0000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/get-workflow-console.ts +++ /dev/null @@ -1,60 +0,0 @@ -import { Loader2, MinusCircle, TerminalSquare, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -export class GetWorkflowConsoleClientTool extends BaseClientTool { - static readonly id = 'get_workflow_console' - - constructor(toolCallId: string) { - super(toolCallId, GetWorkflowConsoleClientTool.id, GetWorkflowConsoleClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Fetching execution logs', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Fetching execution logs', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Fetched execution logs', icon: TerminalSquare }, - [ClientToolCallState.error]: { text: 'Failed to fetch execution logs', icon: XCircle }, - [ClientToolCallState.rejected]: { - text: 'Skipped fetching execution logs', - icon: MinusCircle, - }, - [ClientToolCallState.aborted]: { - text: 'Aborted fetching execution logs', - icon: MinusCircle, - }, - [ClientToolCallState.pending]: { text: 'Fetching execution logs', icon: Loader2 }, - }, - getDynamicText: (params, state) => { - const limit = params?.limit - if (limit && typeof limit === 'number') { - const logText = limit === 1 ? 'execution log' : 'execution logs' - - switch (state) { - case ClientToolCallState.success: - return `Fetched last ${limit} ${logText}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Fetching last ${limit} ${logText}` - case ClientToolCallState.error: - return `Failed to fetch last ${limit} ${logText}` - case ClientToolCallState.rejected: - return `Skipped fetching last ${limit} ${logText}` - case ClientToolCallState.aborted: - return `Aborted fetching last ${limit} ${logText}` - } - } - return undefined - }, - } - - async execute(): Promise { - // Tool execution is handled server-side by the orchestrator. - // Client tool classes are retained for UI display configuration only. - this.setState(ClientToolCallState.success) - } -} diff --git a/apps/sim/lib/copilot/tools/client/workflow/get-workflow-data.ts b/apps/sim/lib/copilot/tools/client/workflow/get-workflow-data.ts deleted file mode 100644 index 657daa0a05..0000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/get-workflow-data.ts +++ /dev/null @@ -1,269 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Database, Loader2, X, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -const logger = createLogger('GetWorkflowDataClientTool') - -/** Data type enum for the get_workflow_data tool */ -export type WorkflowDataType = 'global_variables' | 'custom_tools' | 'mcp_tools' | 'files' - -interface GetWorkflowDataArgs { - data_type: WorkflowDataType -} - -export class GetWorkflowDataClientTool extends BaseClientTool { - static readonly id = 'get_workflow_data' - - constructor(toolCallId: string) { - super(toolCallId, GetWorkflowDataClientTool.id, GetWorkflowDataClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Fetching workflow data', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Fetching workflow data', icon: Database }, - [ClientToolCallState.executing]: { text: 'Fetching workflow data', icon: Loader2 }, - [ClientToolCallState.aborted]: { text: 'Aborted fetching data', icon: XCircle }, - [ClientToolCallState.success]: { text: 'Retrieved workflow data', icon: Database }, - [ClientToolCallState.error]: { text: 'Failed to fetch data', icon: X }, - [ClientToolCallState.rejected]: { text: 'Skipped fetching data', icon: XCircle }, - }, - getDynamicText: (params, state) => { - const dataType = params?.data_type as WorkflowDataType | undefined - if (!dataType) return undefined - - const typeLabels: Record = { - global_variables: 'variables', - custom_tools: 'custom tools', - mcp_tools: 'MCP tools', - files: 'files', - } - - const label = typeLabels[dataType] || dataType - - switch (state) { - case ClientToolCallState.success: - return `Retrieved ${label}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - return `Fetching ${label}` - case ClientToolCallState.pending: - return `Fetch ${label}?` - case ClientToolCallState.error: - return `Failed to fetch ${label}` - case ClientToolCallState.aborted: - return `Aborted fetching ${label}` - case ClientToolCallState.rejected: - return `Skipped fetching ${label}` - } - return undefined - }, - } - - async execute(args?: GetWorkflowDataArgs): Promise { - try { - this.setState(ClientToolCallState.executing) - - const dataType = args?.data_type - if (!dataType) { - await this.markToolComplete(400, 'Missing data_type parameter') - this.setState(ClientToolCallState.error) - return - } - - const { activeWorkflowId, hydration } = useWorkflowRegistry.getState() - const activeWorkspaceId = hydration.workspaceId - - switch (dataType) { - case 'global_variables': - await this.fetchGlobalVariables(activeWorkflowId) - break - case 'custom_tools': - await this.fetchCustomTools(activeWorkspaceId) - break - case 'mcp_tools': - await this.fetchMcpTools(activeWorkspaceId) - break - case 'files': - await this.fetchFiles(activeWorkspaceId) - break - default: - await this.markToolComplete(400, `Unknown data_type: ${dataType}`) - this.setState(ClientToolCallState.error) - return - } - } catch (error: unknown) { - const message = error instanceof Error ? error.message : String(error) - await this.markToolComplete(500, message || 'Failed to fetch workflow data') - this.setState(ClientToolCallState.error) - } - } - - /** - * Fetch global workflow variables - */ - private async fetchGlobalVariables(workflowId: string | null): Promise { - if (!workflowId) { - await this.markToolComplete(400, 'No active workflow found') - this.setState(ClientToolCallState.error) - return - } - - const res = await fetch(`/api/workflows/${workflowId}/variables`, { method: 'GET' }) - if (!res.ok) { - const text = await res.text().catch(() => '') - await this.markToolComplete(res.status, text || 'Failed to fetch workflow variables') - this.setState(ClientToolCallState.error) - return - } - - const json = await res.json() - const varsRecord = (json?.data as Record) || {} - const variables = Object.values(varsRecord).map((v: unknown) => { - const variable = v as { id?: string; name?: string; value?: unknown } - return { - id: String(variable?.id || ''), - name: String(variable?.name || ''), - value: variable?.value, - } - }) - - logger.info('Fetched workflow variables', { count: variables.length }) - await this.markToolComplete(200, `Found ${variables.length} variable(s)`, { variables }) - this.setState(ClientToolCallState.success) - } - - /** - * Fetch custom tools for the workspace - */ - private async fetchCustomTools(workspaceId: string | null): Promise { - if (!workspaceId) { - await this.markToolComplete(400, 'No active workspace found') - this.setState(ClientToolCallState.error) - return - } - - const res = await fetch(`/api/tools/custom?workspaceId=${workspaceId}`, { method: 'GET' }) - if (!res.ok) { - const text = await res.text().catch(() => '') - await this.markToolComplete(res.status, text || 'Failed to fetch custom tools') - this.setState(ClientToolCallState.error) - return - } - - const json = await res.json() - const toolsData = (json?.data as unknown[]) || [] - const customTools = toolsData.map((tool: unknown) => { - const t = tool as { - id?: string - title?: string - schema?: { function?: { name?: string; description?: string; parameters?: unknown } } - code?: string - } - return { - id: String(t?.id || ''), - title: String(t?.title || ''), - functionName: String(t?.schema?.function?.name || ''), - description: String(t?.schema?.function?.description || ''), - parameters: t?.schema?.function?.parameters, - } - }) - - logger.info('Fetched custom tools', { count: customTools.length }) - await this.markToolComplete(200, `Found ${customTools.length} custom tool(s)`, { customTools }) - this.setState(ClientToolCallState.success) - } - - /** - * Fetch MCP tools for the workspace - */ - private async fetchMcpTools(workspaceId: string | null): Promise { - if (!workspaceId) { - await this.markToolComplete(400, 'No active workspace found') - this.setState(ClientToolCallState.error) - return - } - - const res = await fetch(`/api/mcp/tools/discover?workspaceId=${workspaceId}`, { method: 'GET' }) - if (!res.ok) { - const text = await res.text().catch(() => '') - await this.markToolComplete(res.status, text || 'Failed to fetch MCP tools') - this.setState(ClientToolCallState.error) - return - } - - const json = await res.json() - const toolsData = (json?.data?.tools as unknown[]) || [] - const mcpTools = toolsData.map((tool: unknown) => { - const t = tool as { - name?: string - serverId?: string - serverName?: string - description?: string - inputSchema?: unknown - } - return { - name: String(t?.name || ''), - serverId: String(t?.serverId || ''), - serverName: String(t?.serverName || ''), - description: String(t?.description || ''), - inputSchema: t?.inputSchema, - } - }) - - logger.info('Fetched MCP tools', { count: mcpTools.length }) - await this.markToolComplete(200, `Found ${mcpTools.length} MCP tool(s)`, { mcpTools }) - this.setState(ClientToolCallState.success) - } - - /** - * Fetch workspace files metadata - */ - private async fetchFiles(workspaceId: string | null): Promise { - if (!workspaceId) { - await this.markToolComplete(400, 'No active workspace found') - this.setState(ClientToolCallState.error) - return - } - - const res = await fetch(`/api/workspaces/${workspaceId}/files`, { method: 'GET' }) - if (!res.ok) { - const text = await res.text().catch(() => '') - await this.markToolComplete(res.status, text || 'Failed to fetch files') - this.setState(ClientToolCallState.error) - return - } - - const json = await res.json() - const filesData = (json?.files as unknown[]) || [] - const files = filesData.map((file: unknown) => { - const f = file as { - id?: string - name?: string - key?: string - path?: string - size?: number - type?: string - uploadedAt?: string - } - return { - id: String(f?.id || ''), - name: String(f?.name || ''), - key: String(f?.key || ''), - path: String(f?.path || ''), - size: Number(f?.size || 0), - type: String(f?.type || ''), - uploadedAt: String(f?.uploadedAt || ''), - } - }) - - logger.info('Fetched workspace files', { count: files.length }) - await this.markToolComplete(200, `Found ${files.length} file(s)`, { files }) - this.setState(ClientToolCallState.success) - } -} diff --git a/apps/sim/lib/copilot/tools/client/workflow/get-workflow-from-name.ts b/apps/sim/lib/copilot/tools/client/workflow/get-workflow-from-name.ts deleted file mode 100644 index cb001a57c9..0000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/get-workflow-from-name.ts +++ /dev/null @@ -1,117 +0,0 @@ -import { createLogger } from '@sim/logger' -import { FileText, Loader2, X, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { - formatWorkflowStateForCopilot, - normalizeWorkflowName, -} from '@/lib/copilot/tools/shared/workflow-utils' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -const logger = createLogger('GetWorkflowFromNameClientTool') - -interface GetWorkflowFromNameArgs { - workflow_name: string -} - -export class GetWorkflowFromNameClientTool extends BaseClientTool { - static readonly id = 'get_workflow_from_name' - - constructor(toolCallId: string) { - super(toolCallId, GetWorkflowFromNameClientTool.id, GetWorkflowFromNameClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Reading workflow', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Reading workflow', icon: FileText }, - [ClientToolCallState.executing]: { text: 'Reading workflow', icon: Loader2 }, - [ClientToolCallState.aborted]: { text: 'Aborted reading workflow', icon: XCircle }, - [ClientToolCallState.success]: { text: 'Read workflow', icon: FileText }, - [ClientToolCallState.error]: { text: 'Failed to read workflow', icon: X }, - [ClientToolCallState.rejected]: { text: 'Skipped reading workflow', icon: XCircle }, - }, - getDynamicText: (params, state) => { - if (params?.workflow_name && typeof params.workflow_name === 'string') { - const workflowName = params.workflow_name - - switch (state) { - case ClientToolCallState.success: - return `Read ${workflowName}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Reading ${workflowName}` - case ClientToolCallState.error: - return `Failed to read ${workflowName}` - case ClientToolCallState.aborted: - return `Aborted reading ${workflowName}` - case ClientToolCallState.rejected: - return `Skipped reading ${workflowName}` - } - } - return undefined - }, - } - - async execute(args?: GetWorkflowFromNameArgs): Promise { - try { - this.setState(ClientToolCallState.executing) - - const workflowName = args?.workflow_name?.trim() - if (!workflowName) { - await this.markToolComplete(400, 'workflow_name is required') - this.setState(ClientToolCallState.error) - return - } - - // Try to find by name from registry first to get ID - const registry = useWorkflowRegistry.getState() - const targetName = normalizeWorkflowName(workflowName) - const match = Object.values((registry as any).workflows || {}).find( - (w: any) => normalizeWorkflowName(w?.name) === targetName - ) as any - - if (!match?.id) { - await this.markToolComplete(404, `Workflow not found: ${workflowName}`) - this.setState(ClientToolCallState.error) - return - } - - // Fetch full workflow from API route (normalized tables) - const res = await fetch(`/api/workflows/${encodeURIComponent(match.id)}`, { method: 'GET' }) - if (!res.ok) { - const text = await res.text().catch(() => '') - await this.markToolComplete(res.status, text || 'Failed to fetch workflow by name') - this.setState(ClientToolCallState.error) - return - } - - const json = await res.json() - const wf = json?.data - if (!wf?.state?.blocks) { - await this.markToolComplete(422, 'Workflow state is empty or invalid') - this.setState(ClientToolCallState.error) - return - } - - // Convert state to the same string format as get_user_workflow - const userWorkflow = formatWorkflowStateForCopilot({ - blocks: wf.state.blocks || {}, - edges: wf.state.edges || [], - loops: wf.state.loops || {}, - parallels: wf.state.parallels || {}, - }) - - await this.markToolComplete(200, `Retrieved workflow ${workflowName}`, { userWorkflow }) - this.setState(ClientToolCallState.success) - } catch (error: any) { - const message = error instanceof Error ? error.message : String(error) - await this.markToolComplete(500, message || 'Failed to retrieve workflow by name') - this.setState(ClientToolCallState.error) - } - } -} diff --git a/apps/sim/lib/copilot/tools/client/workflow/list-user-workflows.ts b/apps/sim/lib/copilot/tools/client/workflow/list-user-workflows.ts deleted file mode 100644 index 33a9df881d..0000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/list-user-workflows.ts +++ /dev/null @@ -1,59 +0,0 @@ -import { createLogger } from '@sim/logger' -import { ListChecks, Loader2, X, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { extractWorkflowNames } from '@/lib/copilot/tools/shared/workflow-utils' - -const logger = createLogger('ListUserWorkflowsClientTool') - -export class ListUserWorkflowsClientTool extends BaseClientTool { - static readonly id = 'list_user_workflows' - - constructor(toolCallId: string) { - super(toolCallId, ListUserWorkflowsClientTool.id, ListUserWorkflowsClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Listing your workflows', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Listing your workflows', icon: ListChecks }, - [ClientToolCallState.executing]: { text: 'Listing your workflows', icon: Loader2 }, - [ClientToolCallState.aborted]: { text: 'Aborted listing workflows', icon: XCircle }, - [ClientToolCallState.success]: { text: 'Listed your workflows', icon: ListChecks }, - [ClientToolCallState.error]: { text: 'Failed to list workflows', icon: X }, - [ClientToolCallState.rejected]: { text: 'Skipped listing workflows', icon: XCircle }, - }, - } - - async execute(): Promise { - try { - this.setState(ClientToolCallState.executing) - - const res = await fetch('/api/workflows', { method: 'GET' }) - if (!res.ok) { - const text = await res.text().catch(() => '') - await this.markToolComplete(res.status, text || 'Failed to fetch workflows') - this.setState(ClientToolCallState.error) - return - } - - const json = await res.json() - const workflows = Array.isArray(json?.data) ? json.data : [] - const names = extractWorkflowNames(workflows) - - logger.info('Found workflows', { count: names.length }) - - await this.markToolComplete(200, `Found ${names.length} workflow(s)`, { - workflow_names: names, - }) - this.setState(ClientToolCallState.success) - } catch (error: any) { - const message = error instanceof Error ? error.message : String(error) - await this.markToolComplete(500, message || 'Failed to list workflows') - this.setState(ClientToolCallState.error) - } - } -} diff --git a/apps/sim/lib/copilot/tools/client/workflow/list-workspace-mcp-servers.ts b/apps/sim/lib/copilot/tools/client/workflow/list-workspace-mcp-servers.ts deleted file mode 100644 index 1dad9fbf7c..0000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/list-workspace-mcp-servers.ts +++ /dev/null @@ -1,112 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Loader2, Server, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -interface ListWorkspaceMcpServersArgs { - workspaceId?: string -} - -export interface WorkspaceMcpServer { - id: string - name: string - description: string | null - toolCount: number - toolNames: string[] -} - -/** - * List workspace MCP servers tool. - * Returns a list of MCP servers available in the workspace that workflows can be deployed to. - */ -export class ListWorkspaceMcpServersClientTool extends BaseClientTool { - static readonly id = 'list_workspace_mcp_servers' - - constructor(toolCallId: string) { - super( - toolCallId, - ListWorkspaceMcpServersClientTool.id, - ListWorkspaceMcpServersClientTool.metadata - ) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { - text: 'Getting MCP servers', - icon: Loader2, - }, - [ClientToolCallState.pending]: { text: 'Getting MCP servers', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Getting MCP servers', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Retrieved MCP servers', icon: Server }, - [ClientToolCallState.error]: { text: 'Failed to get MCP servers', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted getting MCP servers', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped getting MCP servers', icon: XCircle }, - }, - interrupt: undefined, - } - - async execute(args?: ListWorkspaceMcpServersArgs): Promise { - const logger = createLogger('ListWorkspaceMcpServersClientTool') - try { - this.setState(ClientToolCallState.executing) - - // Get workspace ID from active workflow if not provided - const { activeWorkflowId, workflows } = useWorkflowRegistry.getState() - let workspaceId = args?.workspaceId - - if (!workspaceId && activeWorkflowId) { - workspaceId = workflows[activeWorkflowId]?.workspaceId - } - - if (!workspaceId) { - throw new Error('No workspace ID available') - } - - const res = await fetch(`/api/mcp/workflow-servers?workspaceId=${workspaceId}`) - - if (!res.ok) { - const data = await res.json().catch(() => ({})) - throw new Error(data.error || `Failed to fetch MCP servers (${res.status})`) - } - - const data = await res.json() - const servers: WorkspaceMcpServer[] = (data.data?.servers || []).map((s: any) => ({ - id: s.id, - name: s.name, - description: s.description, - toolCount: s.toolCount || 0, - toolNames: s.toolNames || [], - })) - - this.setState(ClientToolCallState.success) - - if (servers.length === 0) { - await this.markToolComplete( - 200, - 'No MCP servers found in this workspace. Use create_workspace_mcp_server to create one.', - { servers: [], count: 0 } - ) - } else { - await this.markToolComplete( - 200, - `Found ${servers.length} MCP server(s) in the workspace.`, - { - servers, - count: servers.length, - } - ) - } - - logger.info(`Listed ${servers.length} MCP servers`) - } catch (e: any) { - logger.error('Failed to list MCP servers', { message: e?.message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, e?.message || 'Failed to list MCP servers') - } - } -} diff --git a/apps/sim/lib/copilot/tools/client/workflow/manage-custom-tool.ts b/apps/sim/lib/copilot/tools/client/workflow/manage-custom-tool.ts deleted file mode 100644 index 58a8236376..0000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/manage-custom-tool.ts +++ /dev/null @@ -1,408 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Check, Loader2, Plus, X, XCircle } from 'lucide-react' -import { client } from '@/lib/auth/auth-client' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { getCustomTool } from '@/hooks/queries/custom-tools' -import { useCopilotStore } from '@/stores/panel/copilot/store' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -interface CustomToolSchema { - type: 'function' - function: { - name: string - description?: string - parameters: { - type: string - properties: Record - required?: string[] - } - } -} - -interface ManageCustomToolArgs { - operation: 'add' | 'edit' | 'delete' | 'list' - toolId?: string - schema?: CustomToolSchema - code?: string -} - -const API_ENDPOINT = '/api/tools/custom' - -async function checkCustomToolsPermission(): Promise { - const activeOrgResponse = await client.organization.getFullOrganization() - const organizationId = activeOrgResponse.data?.id - if (!organizationId) return - - const response = await fetch(`/api/permission-groups/user?organizationId=${organizationId}`) - if (!response.ok) return - - const data = await response.json() - if (data?.config?.disableCustomTools) { - throw new Error('Custom tools are not allowed based on your permission group settings') - } -} - -/** - * Client tool for creating, editing, and deleting custom tools via the copilot. - */ -export class ManageCustomToolClientTool extends BaseClientTool { - static readonly id = 'manage_custom_tool' - private currentArgs?: ManageCustomToolArgs - - constructor(toolCallId: string) { - super(toolCallId, ManageCustomToolClientTool.id, ManageCustomToolClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { - text: 'Managing custom tool', - icon: Loader2, - }, - [ClientToolCallState.pending]: { text: 'Manage custom tool?', icon: Plus }, - [ClientToolCallState.executing]: { text: 'Managing custom tool', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Managed custom tool', icon: Check }, - [ClientToolCallState.error]: { text: 'Failed to manage custom tool', icon: X }, - [ClientToolCallState.aborted]: { - text: 'Aborted managing custom tool', - icon: XCircle, - }, - [ClientToolCallState.rejected]: { - text: 'Skipped managing custom tool', - icon: XCircle, - }, - }, - interrupt: { - accept: { text: 'Allow', icon: Check }, - reject: { text: 'Skip', icon: XCircle }, - }, - getDynamicText: (params, state) => { - const operation = params?.operation as 'add' | 'edit' | 'delete' | 'list' | undefined - - if (!operation) return undefined - - let toolName = params?.schema?.function?.name - if (!toolName && params?.toolId) { - try { - const tool = getCustomTool(params.toolId) - toolName = tool?.schema?.function?.name - } catch { - // Ignore errors accessing cache - } - } - - const getActionText = (verb: 'present' | 'past' | 'gerund') => { - switch (operation) { - case 'add': - return verb === 'present' ? 'Create' : verb === 'past' ? 'Created' : 'Creating' - case 'edit': - return verb === 'present' ? 'Edit' : verb === 'past' ? 'Edited' : 'Editing' - case 'delete': - return verb === 'present' ? 'Delete' : verb === 'past' ? 'Deleted' : 'Deleting' - case 'list': - return verb === 'present' ? 'List' : verb === 'past' ? 'Listed' : 'Listing' - default: - return verb === 'present' ? 'Manage' : verb === 'past' ? 'Managed' : 'Managing' - } - } - - // For add: only show tool name in past tense (success) - // For edit/delete: always show tool name - // For list: never show individual tool name, use plural - const shouldShowToolName = (currentState: ClientToolCallState) => { - if (operation === 'list') return false - if (operation === 'add') { - return currentState === ClientToolCallState.success - } - return true // edit and delete always show tool name - } - - const nameText = - operation === 'list' - ? ' custom tools' - : shouldShowToolName(state) && toolName - ? ` ${toolName}` - : ' custom tool' - - switch (state) { - case ClientToolCallState.success: - return `${getActionText('past')}${nameText}` - case ClientToolCallState.executing: - return `${getActionText('gerund')}${nameText}` - case ClientToolCallState.generating: - return `${getActionText('gerund')}${nameText}` - case ClientToolCallState.pending: - return `${getActionText('present')}${nameText}?` - case ClientToolCallState.error: - return `Failed to ${getActionText('present')?.toLowerCase()}${nameText}` - case ClientToolCallState.aborted: - return `Aborted ${getActionText('gerund')?.toLowerCase()}${nameText}` - case ClientToolCallState.rejected: - return `Skipped ${getActionText('gerund')?.toLowerCase()}${nameText}` - } - return undefined - }, - } - - /** - * Gets the tool call args from the copilot store (needed before execute() is called) - */ - private getArgsFromStore(): ManageCustomToolArgs | undefined { - try { - const { toolCallsById } = useCopilotStore.getState() - const toolCall = toolCallsById[this.toolCallId] - return (toolCall as any)?.params as ManageCustomToolArgs | undefined - } catch { - return undefined - } - } - - /** - * Override getInterruptDisplays to only show confirmation for edit and delete operations. - * Add operations execute directly without confirmation. - */ - getInterruptDisplays(): BaseClientToolMetadata['interrupt'] | undefined { - const args = this.currentArgs || this.getArgsFromStore() - const operation = args?.operation - if (operation === 'edit' || operation === 'delete') { - return this.metadata.interrupt - } - return undefined - } - - async handleReject(): Promise { - await super.handleReject() - this.setState(ClientToolCallState.rejected) - } - - async handleAccept(args?: ManageCustomToolArgs): Promise { - const logger = createLogger('ManageCustomToolClientTool') - try { - this.setState(ClientToolCallState.executing) - await this.executeOperation(args, logger) - } catch (e: any) { - logger.error('execute failed', { message: e?.message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, e?.message || 'Failed to manage custom tool', { - success: false, - error: e?.message || 'Failed to manage custom tool', - }) - } - } - - async execute(args?: ManageCustomToolArgs): Promise { - this.currentArgs = args - if (args?.operation === 'add' || args?.operation === 'list') { - await this.handleAccept(args) - } - } - - /** - * Executes the custom tool operation (add, edit, delete, or list) - */ - private async executeOperation( - args: ManageCustomToolArgs | undefined, - logger: ReturnType - ): Promise { - if (!args?.operation) { - throw new Error('Operation is required') - } - - await checkCustomToolsPermission() - - const { operation, toolId, schema, code } = args - - const { hydration } = useWorkflowRegistry.getState() - const workspaceId = hydration.workspaceId - if (!workspaceId) { - throw new Error('No active workspace found') - } - - logger.info(`Executing custom tool operation: ${operation}`, { - operation, - toolId, - functionName: schema?.function?.name, - workspaceId, - }) - - switch (operation) { - case 'add': - await this.addCustomTool({ schema, code, workspaceId }, logger) - break - case 'edit': - await this.editCustomTool({ toolId, schema, code, workspaceId }, logger) - break - case 'delete': - await this.deleteCustomTool({ toolId, workspaceId }, logger) - break - case 'list': - await this.markToolComplete(200, 'Listed custom tools') - break - default: - throw new Error(`Unknown operation: ${operation}`) - } - } - - /** - * Creates a new custom tool - */ - private async addCustomTool( - params: { - schema?: CustomToolSchema - code?: string - workspaceId: string - }, - logger: ReturnType - ): Promise { - const { schema, code, workspaceId } = params - - if (!schema) { - throw new Error('Schema is required for adding a custom tool') - } - if (!code) { - throw new Error('Code is required for adding a custom tool') - } - - const functionName = schema.function.name - - const response = await fetch(API_ENDPOINT, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - tools: [{ title: functionName, schema, code }], - workspaceId, - }), - }) - - const data = await response.json() - - if (!response.ok) { - throw new Error(data.error || 'Failed to create custom tool') - } - - if (!data.data || !Array.isArray(data.data) || data.data.length === 0) { - throw new Error('Invalid API response: missing tool data') - } - - const createdTool = data.data[0] - logger.info(`Created custom tool: ${functionName}`, { toolId: createdTool.id }) - - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, `Created custom tool "${functionName}"`, { - success: true, - operation: 'add', - toolId: createdTool.id, - functionName, - }) - } - - /** - * Updates an existing custom tool - */ - private async editCustomTool( - params: { - toolId?: string - schema?: CustomToolSchema - code?: string - workspaceId: string - }, - logger: ReturnType - ): Promise { - const { toolId, schema, code, workspaceId } = params - - if (!toolId) { - throw new Error('Tool ID is required for editing a custom tool') - } - - if (!schema && !code) { - throw new Error('At least one of schema or code must be provided for editing') - } - - const existingResponse = await fetch(`${API_ENDPOINT}?workspaceId=${workspaceId}`) - const existingData = await existingResponse.json() - - if (!existingResponse.ok) { - throw new Error(existingData.error || 'Failed to fetch existing tools') - } - - const existingTool = existingData.data?.find((t: any) => t.id === toolId) - if (!existingTool) { - throw new Error(`Tool with ID ${toolId} not found`) - } - - const mergedSchema = schema ?? existingTool.schema - const updatedTool = { - id: toolId, - title: mergedSchema.function.name, - schema: mergedSchema, - code: code ?? existingTool.code, - } - - const response = await fetch(API_ENDPOINT, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - tools: [updatedTool], - workspaceId, - }), - }) - - const data = await response.json() - - if (!response.ok) { - throw new Error(data.error || 'Failed to update custom tool') - } - - const functionName = updatedTool.schema.function.name - logger.info(`Updated custom tool: ${functionName}`, { toolId }) - - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, `Updated custom tool "${functionName}"`, { - success: true, - operation: 'edit', - toolId, - functionName, - }) - } - - /** - * Deletes a custom tool - */ - private async deleteCustomTool( - params: { - toolId?: string - workspaceId: string - }, - logger: ReturnType - ): Promise { - const { toolId, workspaceId } = params - - if (!toolId) { - throw new Error('Tool ID is required for deleting a custom tool') - } - - const url = `${API_ENDPOINT}?id=${toolId}&workspaceId=${workspaceId}` - const response = await fetch(url, { - method: 'DELETE', - }) - - const data = await response.json() - - if (!response.ok) { - throw new Error(data.error || 'Failed to delete custom tool') - } - - logger.info(`Deleted custom tool: ${toolId}`) - - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, `Deleted custom tool`, { - success: true, - operation: 'delete', - toolId, - }) - } -} diff --git a/apps/sim/lib/copilot/tools/client/workflow/manage-mcp-tool.ts b/apps/sim/lib/copilot/tools/client/workflow/manage-mcp-tool.ts deleted file mode 100644 index 796574dc1b..0000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/manage-mcp-tool.ts +++ /dev/null @@ -1,360 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Check, Loader2, Server, X, XCircle } from 'lucide-react' -import { client } from '@/lib/auth/auth-client' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { useCopilotStore } from '@/stores/panel/copilot/store' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -interface McpServerConfig { - name: string - transport: 'streamable-http' - url?: string - headers?: Record - timeout?: number - enabled?: boolean -} - -interface ManageMcpToolArgs { - operation: 'add' | 'edit' | 'delete' - serverId?: string - config?: McpServerConfig -} - -const API_ENDPOINT = '/api/mcp/servers' - -async function checkMcpToolsPermission(): Promise { - const activeOrgResponse = await client.organization.getFullOrganization() - const organizationId = activeOrgResponse.data?.id - if (!organizationId) return - - const response = await fetch(`/api/permission-groups/user?organizationId=${organizationId}`) - if (!response.ok) return - - const data = await response.json() - if (data?.config?.disableMcpTools) { - throw new Error('MCP tools are not allowed based on your permission group settings') - } -} - -/** - * Client tool for creating, editing, and deleting MCP tool servers via the copilot. - */ -export class ManageMcpToolClientTool extends BaseClientTool { - static readonly id = 'manage_mcp_tool' - private currentArgs?: ManageMcpToolArgs - - constructor(toolCallId: string) { - super(toolCallId, ManageMcpToolClientTool.id, ManageMcpToolClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { - text: 'Managing MCP tool', - icon: Loader2, - }, - [ClientToolCallState.pending]: { text: 'Manage MCP tool?', icon: Server }, - [ClientToolCallState.executing]: { text: 'Managing MCP tool', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Managed MCP tool', icon: Check }, - [ClientToolCallState.error]: { text: 'Failed to manage MCP tool', icon: X }, - [ClientToolCallState.aborted]: { - text: 'Aborted managing MCP tool', - icon: XCircle, - }, - [ClientToolCallState.rejected]: { - text: 'Skipped managing MCP tool', - icon: XCircle, - }, - }, - interrupt: { - accept: { text: 'Allow', icon: Check }, - reject: { text: 'Skip', icon: XCircle }, - }, - getDynamicText: (params, state) => { - const operation = params?.operation as 'add' | 'edit' | 'delete' | undefined - - if (!operation) return undefined - - const serverName = params?.config?.name || params?.serverName - - const getActionText = (verb: 'present' | 'past' | 'gerund') => { - switch (operation) { - case 'add': - return verb === 'present' ? 'Add' : verb === 'past' ? 'Added' : 'Adding' - case 'edit': - return verb === 'present' ? 'Edit' : verb === 'past' ? 'Edited' : 'Editing' - case 'delete': - return verb === 'present' ? 'Delete' : verb === 'past' ? 'Deleted' : 'Deleting' - } - } - - const shouldShowServerName = (currentState: ClientToolCallState) => { - if (operation === 'add') { - return currentState === ClientToolCallState.success - } - return true - } - - const nameText = shouldShowServerName(state) && serverName ? ` ${serverName}` : ' MCP tool' - - switch (state) { - case ClientToolCallState.success: - return `${getActionText('past')}${nameText}` - case ClientToolCallState.executing: - return `${getActionText('gerund')}${nameText}` - case ClientToolCallState.generating: - return `${getActionText('gerund')}${nameText}` - case ClientToolCallState.pending: - return `${getActionText('present')}${nameText}?` - case ClientToolCallState.error: - return `Failed to ${getActionText('present')?.toLowerCase()}${nameText}` - case ClientToolCallState.aborted: - return `Aborted ${getActionText('gerund')?.toLowerCase()}${nameText}` - case ClientToolCallState.rejected: - return `Skipped ${getActionText('gerund')?.toLowerCase()}${nameText}` - } - return undefined - }, - } - - /** - * Gets the tool call args from the copilot store (needed before execute() is called) - */ - private getArgsFromStore(): ManageMcpToolArgs | undefined { - try { - const { toolCallsById } = useCopilotStore.getState() - const toolCall = toolCallsById[this.toolCallId] - return (toolCall as any)?.params as ManageMcpToolArgs | undefined - } catch { - return undefined - } - } - - /** - * Override getInterruptDisplays to only show confirmation for edit and delete operations. - * Add operations execute directly without confirmation. - */ - getInterruptDisplays(): BaseClientToolMetadata['interrupt'] | undefined { - const args = this.currentArgs || this.getArgsFromStore() - const operation = args?.operation - if (operation === 'edit' || operation === 'delete') { - return this.metadata.interrupt - } - return undefined - } - - async handleReject(): Promise { - await super.handleReject() - this.setState(ClientToolCallState.rejected) - } - - async handleAccept(args?: ManageMcpToolArgs): Promise { - const logger = createLogger('ManageMcpToolClientTool') - try { - this.setState(ClientToolCallState.executing) - await this.executeOperation(args, logger) - } catch (e: any) { - logger.error('execute failed', { message: e?.message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, e?.message || 'Failed to manage MCP tool', { - success: false, - error: e?.message || 'Failed to manage MCP tool', - }) - } - } - - async execute(args?: ManageMcpToolArgs): Promise { - this.currentArgs = args - if (args?.operation === 'add') { - await this.handleAccept(args) - } - } - - /** - * Executes the MCP tool operation (add, edit, or delete) - */ - private async executeOperation( - args: ManageMcpToolArgs | undefined, - logger: ReturnType - ): Promise { - if (!args?.operation) { - throw new Error('Operation is required') - } - - await checkMcpToolsPermission() - - const { operation, serverId, config } = args - - const { hydration } = useWorkflowRegistry.getState() - const workspaceId = hydration.workspaceId - if (!workspaceId) { - throw new Error('No active workspace found') - } - - logger.info(`Executing MCP tool operation: ${operation}`, { - operation, - serverId, - serverName: config?.name, - workspaceId, - }) - - switch (operation) { - case 'add': - await this.addMcpServer({ config, workspaceId }, logger) - break - case 'edit': - await this.editMcpServer({ serverId, config, workspaceId }, logger) - break - case 'delete': - await this.deleteMcpServer({ serverId, workspaceId }, logger) - break - default: - throw new Error(`Unknown operation: ${operation}`) - } - } - - /** - * Creates a new MCP server - */ - private async addMcpServer( - params: { - config?: McpServerConfig - workspaceId: string - }, - logger: ReturnType - ): Promise { - const { config, workspaceId } = params - - if (!config) { - throw new Error('Config is required for adding an MCP tool') - } - if (!config.name) { - throw new Error('Server name is required') - } - if (!config.url) { - throw new Error('Server URL is required for streamable-http transport') - } - - const serverData = { - ...config, - workspaceId, - transport: config.transport || 'streamable-http', - timeout: config.timeout || 30000, - enabled: config.enabled !== false, - } - - const response = await fetch(API_ENDPOINT, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify(serverData), - }) - - const data = await response.json() - - if (!response.ok) { - throw new Error(data.error || 'Failed to create MCP tool') - } - - const serverId = data.data?.serverId - logger.info(`Created MCP tool: ${config.name}`, { serverId }) - - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, `Created MCP tool "${config.name}"`, { - success: true, - operation: 'add', - serverId, - serverName: config.name, - }) - } - - /** - * Updates an existing MCP server - */ - private async editMcpServer( - params: { - serverId?: string - config?: McpServerConfig - workspaceId: string - }, - logger: ReturnType - ): Promise { - const { serverId, config, workspaceId } = params - - if (!serverId) { - throw new Error('Server ID is required for editing an MCP tool') - } - - if (!config) { - throw new Error('Config is required for editing an MCP tool') - } - - const updateData = { - ...config, - workspaceId, - } - - const response = await fetch(`${API_ENDPOINT}/${serverId}?workspaceId=${workspaceId}`, { - method: 'PATCH', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify(updateData), - }) - - const data = await response.json() - - if (!response.ok) { - throw new Error(data.error || 'Failed to update MCP tool') - } - - const serverName = config.name || data.data?.server?.name || serverId - logger.info(`Updated MCP tool: ${serverName}`, { serverId }) - - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, `Updated MCP tool "${serverName}"`, { - success: true, - operation: 'edit', - serverId, - serverName, - }) - } - - /** - * Deletes an MCP server - */ - private async deleteMcpServer( - params: { - serverId?: string - workspaceId: string - }, - logger: ReturnType - ): Promise { - const { serverId, workspaceId } = params - - if (!serverId) { - throw new Error('Server ID is required for deleting an MCP tool') - } - - const url = `${API_ENDPOINT}?serverId=${serverId}&workspaceId=${workspaceId}` - const response = await fetch(url, { - method: 'DELETE', - }) - - const data = await response.json() - - if (!response.ok) { - throw new Error(data.error || 'Failed to delete MCP tool') - } - - logger.info(`Deleted MCP tool: ${serverId}`) - - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, `Deleted MCP tool`, { - success: true, - operation: 'delete', - serverId, - }) - } -} diff --git a/apps/sim/lib/copilot/tools/client/workflow/redeploy.ts b/apps/sim/lib/copilot/tools/client/workflow/redeploy.ts deleted file mode 100644 index 2fef023fb7..0000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/redeploy.ts +++ /dev/null @@ -1,71 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Loader2, Rocket, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -export class RedeployClientTool extends BaseClientTool { - static readonly id = 'redeploy' - private hasExecuted = false - - constructor(toolCallId: string) { - super(toolCallId, RedeployClientTool.id, RedeployClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Redeploying workflow', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Redeploy workflow', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Redeploying workflow', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Redeployed workflow', icon: Rocket }, - [ClientToolCallState.error]: { text: 'Failed to redeploy workflow', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted redeploy', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped redeploy', icon: XCircle }, - }, - interrupt: undefined, - } - - async execute(): Promise { - const logger = createLogger('RedeployClientTool') - try { - if (this.hasExecuted) { - logger.info('execute skipped (already executed)', { toolCallId: this.toolCallId }) - return - } - this.hasExecuted = true - - this.setState(ClientToolCallState.executing) - - const { activeWorkflowId } = useWorkflowRegistry.getState() - if (!activeWorkflowId) { - throw new Error('No workflow ID provided') - } - - const res = await fetch(`/api/workflows/${activeWorkflowId}/deploy`, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ deployChatEnabled: false }), - }) - - const json = await res.json().catch(() => ({})) - if (!res.ok) { - const errorText = json?.error || `Server error (${res.status})` - throw new Error(errorText) - } - - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, 'Workflow redeployed', { - workflowId: activeWorkflowId, - deployedAt: json?.deployedAt || null, - schedule: json?.schedule, - }) - } catch (error: any) { - logger.error('Redeploy failed', { message: error?.message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, error?.message || 'Failed to redeploy workflow') - } - } -} diff --git a/apps/sim/lib/copilot/tools/client/workflow/run-workflow.ts b/apps/sim/lib/copilot/tools/client/workflow/run-workflow.ts deleted file mode 100644 index 3b2c89df65..0000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/run-workflow.ts +++ /dev/null @@ -1,231 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Loader2, MinusCircle, Play, XCircle } from 'lucide-react' -import { v4 as uuidv4 } from 'uuid' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, - WORKFLOW_EXECUTION_TIMEOUT_MS, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' -import { executeWorkflowWithFullLogging } from '@/app/workspace/[workspaceId]/w/[workflowId]/utils' -import { useExecutionStore } from '@/stores/execution' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -interface RunWorkflowArgs { - workflowId?: string - description?: string - workflow_input?: Record -} - -export class RunWorkflowClientTool extends BaseClientTool { - static readonly id = 'run_workflow' - - constructor(toolCallId: string) { - super(toolCallId, RunWorkflowClientTool.id, RunWorkflowClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Preparing to run your workflow', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Run this workflow?', icon: Play }, - [ClientToolCallState.executing]: { text: 'Running your workflow', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Executed workflow', icon: Play }, - [ClientToolCallState.error]: { text: 'Errored running workflow', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped workflow execution', icon: MinusCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted workflow execution', icon: MinusCircle }, - [ClientToolCallState.background]: { text: 'Running in background', icon: Play }, - }, - interrupt: { - accept: { text: 'Run', icon: Play }, - reject: { text: 'Skip', icon: MinusCircle }, - }, - uiConfig: { - isSpecial: true, - interrupt: { - accept: { text: 'Run', icon: Play }, - reject: { text: 'Skip', icon: MinusCircle }, - showAllowOnce: true, - showAllowAlways: true, - }, - secondaryAction: { - text: 'Move to Background', - title: 'Move to Background', - variant: 'tertiary', - showInStates: [ClientToolCallState.executing], - completionMessage: - 'The user has chosen to move the workflow execution to the background. Check back with them later to know when the workflow execution is complete', - targetState: ClientToolCallState.background, - }, - paramsTable: { - columns: [ - { key: 'input', label: 'Input', width: '36%' }, - { key: 'value', label: 'Value', width: '64%', editable: true, mono: true }, - ], - extractRows: (params) => { - let inputs = params.input || params.inputs || params.workflow_input - if (typeof inputs === 'string') { - try { - inputs = JSON.parse(inputs) - } catch { - inputs = {} - } - } - if (params.workflow_input && typeof params.workflow_input === 'object') { - inputs = params.workflow_input - } - if (!inputs || typeof inputs !== 'object') { - const { workflowId, workflow_input, ...rest } = params - inputs = rest - } - const safeInputs = inputs && typeof inputs === 'object' ? inputs : {} - return Object.entries(safeInputs).map(([key, value]) => [key, key, String(value)]) - }, - }, - }, - getDynamicText: (params, state) => { - const workflowId = params?.workflowId || useWorkflowRegistry.getState().activeWorkflowId - if (workflowId) { - const workflowName = useWorkflowRegistry.getState().workflows[workflowId]?.name - if (workflowName) { - switch (state) { - case ClientToolCallState.success: - return `Ran ${workflowName}` - case ClientToolCallState.executing: - return `Running ${workflowName}` - case ClientToolCallState.generating: - return `Preparing to run ${workflowName}` - case ClientToolCallState.pending: - return `Run ${workflowName}?` - case ClientToolCallState.error: - return `Failed to run ${workflowName}` - case ClientToolCallState.rejected: - return `Skipped running ${workflowName}` - case ClientToolCallState.aborted: - return `Aborted running ${workflowName}` - case ClientToolCallState.background: - return `Running ${workflowName} in background` - } - } - } - return undefined - }, - } - - async handleReject(): Promise { - await super.handleReject() - this.setState(ClientToolCallState.rejected) - } - - async handleAccept(args?: RunWorkflowArgs): Promise { - const logger = createLogger('RunWorkflowClientTool') - - // Use longer timeout for workflow execution (10 minutes) - await this.executeWithTimeout(async () => { - const params = args || {} - logger.debug('handleAccept() called', { - toolCallId: this.toolCallId, - state: this.getState(), - hasArgs: !!args, - argKeys: args ? Object.keys(args) : [], - }) - - // prevent concurrent execution - const { isExecuting, setIsExecuting } = useExecutionStore.getState() - if (isExecuting) { - logger.debug('Execution prevented: already executing') - this.setState(ClientToolCallState.error) - await this.markToolComplete( - 409, - 'The workflow is already in the middle of an execution. Try again later' - ) - return - } - - const { activeWorkflowId } = useWorkflowRegistry.getState() - if (!activeWorkflowId) { - logger.debug('Execution prevented: no active workflow') - this.setState(ClientToolCallState.error) - await this.markToolComplete(400, 'No active workflow found') - return - } - logger.debug('Using active workflow', { activeWorkflowId }) - - const workflowInput = params.workflow_input || undefined - if (workflowInput) { - logger.debug('Workflow input provided', { - inputFields: Object.keys(workflowInput), - inputPreview: JSON.stringify(workflowInput).slice(0, 120), - }) - } - - setIsExecuting(true) - logger.debug('Set isExecuting(true) and switching state to executing') - this.setState(ClientToolCallState.executing) - - const executionId = uuidv4() - const executionStartTime = new Date().toISOString() - logger.debug('Starting workflow execution', { - executionStartTime, - executionId, - toolCallId: this.toolCallId, - }) - - try { - const result = await executeWorkflowWithFullLogging({ - workflowInput, - executionId, - }) - - // Determine success for both non-streaming and streaming executions - let succeeded = true - let errorMessage: string | undefined - try { - if (result && typeof result === 'object' && 'success' in (result as any)) { - succeeded = Boolean((result as any).success) - if (!succeeded) { - errorMessage = (result as any)?.error || (result as any)?.output?.error - } - } else if ( - result && - typeof result === 'object' && - 'execution' in (result as any) && - (result as any).execution && - typeof (result as any).execution === 'object' - ) { - succeeded = Boolean((result as any).execution.success) - if (!succeeded) { - errorMessage = - (result as any).execution?.error || (result as any).execution?.output?.error - } - } - } catch {} - - if (succeeded) { - logger.debug('Workflow execution finished with success') - this.setState(ClientToolCallState.success) - await this.markToolComplete( - 200, - `Workflow execution completed. Started at: ${executionStartTime}` - ) - } else { - const msg = errorMessage || 'Workflow execution failed' - logger.error('Workflow execution finished with failure', { message: msg }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, msg) - } - } finally { - // Always clean up execution state - setIsExecuting(false) - } - }, WORKFLOW_EXECUTION_TIMEOUT_MS) - } - - async execute(args?: RunWorkflowArgs): Promise { - // For compatibility if execute() is explicitly invoked, route to handleAccept - await this.handleAccept(args) - } -} - -// Register UI config at module load -registerToolUIConfig(RunWorkflowClientTool.id, RunWorkflowClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/workflow/set-global-workflow-variables.ts b/apps/sim/lib/copilot/tools/client/workflow/set-global-workflow-variables.ts deleted file mode 100644 index 63f4c6c6f4..0000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/set-global-workflow-variables.ts +++ /dev/null @@ -1,278 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Loader2, Settings2, X, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' -import { useVariablesStore } from '@/stores/panel/variables/store' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -interface OperationItem { - operation: 'add' | 'edit' | 'delete' - name: string - type?: 'plain' | 'number' | 'boolean' | 'array' | 'object' - value?: string -} - -interface SetGlobalVarsArgs { - operations: OperationItem[] - workflowId?: string -} - -export class SetGlobalWorkflowVariablesClientTool extends BaseClientTool { - static readonly id = 'set_global_workflow_variables' - - constructor(toolCallId: string) { - super( - toolCallId, - SetGlobalWorkflowVariablesClientTool.id, - SetGlobalWorkflowVariablesClientTool.metadata - ) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { - text: 'Preparing to set workflow variables', - icon: Loader2, - }, - [ClientToolCallState.pending]: { text: 'Set workflow variables?', icon: Settings2 }, - [ClientToolCallState.executing]: { text: 'Setting workflow variables', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Updated workflow variables', icon: Settings2 }, - [ClientToolCallState.error]: { text: 'Failed to set workflow variables', icon: X }, - [ClientToolCallState.aborted]: { text: 'Aborted setting variables', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped setting variables', icon: XCircle }, - }, - interrupt: { - accept: { text: 'Apply', icon: Settings2 }, - reject: { text: 'Skip', icon: XCircle }, - }, - uiConfig: { - interrupt: { - accept: { text: 'Apply', icon: Settings2 }, - reject: { text: 'Skip', icon: XCircle }, - showAllowOnce: true, - showAllowAlways: true, - }, - paramsTable: { - columns: [ - { key: 'name', label: 'Name', width: '40%', editable: true, mono: true }, - { key: 'value', label: 'Value', width: '60%', editable: true, mono: true }, - ], - extractRows: (params) => { - const operations = params.operations || [] - return operations.map((op: any, idx: number) => [ - String(idx), - op.name || '', - String(op.value ?? ''), - ]) - }, - }, - }, - getDynamicText: (params, state) => { - if (params?.operations && Array.isArray(params.operations)) { - const varNames = params.operations - .slice(0, 2) - .map((op: any) => op.name) - .filter(Boolean) - - if (varNames.length > 0) { - const varList = varNames.join(', ') - const more = params.operations.length > 2 ? '...' : '' - const displayText = `${varList}${more}` - - switch (state) { - case ClientToolCallState.success: - return `Set ${displayText}` - case ClientToolCallState.executing: - return `Setting ${displayText}` - case ClientToolCallState.generating: - return `Preparing to set ${displayText}` - case ClientToolCallState.pending: - return `Set ${displayText}?` - case ClientToolCallState.error: - return `Failed to set ${displayText}` - case ClientToolCallState.aborted: - return `Aborted setting ${displayText}` - case ClientToolCallState.rejected: - return `Skipped setting ${displayText}` - } - } - } - return undefined - }, - } - - async handleReject(): Promise { - await super.handleReject() - this.setState(ClientToolCallState.rejected) - } - - async handleAccept(args?: SetGlobalVarsArgs): Promise { - const logger = createLogger('SetGlobalWorkflowVariablesClientTool') - try { - this.setState(ClientToolCallState.executing) - const payload: SetGlobalVarsArgs = { ...(args || { operations: [] }) } - if (!payload.workflowId) { - const { activeWorkflowId } = useWorkflowRegistry.getState() - if (activeWorkflowId) payload.workflowId = activeWorkflowId - } - if (!payload.workflowId) { - throw new Error('No active workflow found') - } - - // Fetch current variables so we can construct full array payload - const getRes = await fetch(`/api/workflows/${payload.workflowId}/variables`, { - method: 'GET', - }) - if (!getRes.ok) { - const txt = await getRes.text().catch(() => '') - throw new Error(txt || 'Failed to load current variables') - } - const currentJson = await getRes.json() - const currentVarsRecord = (currentJson?.data as Record) || {} - - // Helper to convert string -> typed value - function coerceValue( - value: string | undefined, - type?: 'plain' | 'number' | 'boolean' | 'array' | 'object' - ) { - if (value === undefined) return value - const t = type || 'plain' - try { - if (t === 'number') { - const n = Number(value) - if (Number.isNaN(n)) return value - return n - } - if (t === 'boolean') { - const v = String(value).trim().toLowerCase() - if (v === 'true') return true - if (v === 'false') return false - return value - } - if (t === 'array' || t === 'object') { - const parsed = JSON.parse(value) - if (t === 'array' && Array.isArray(parsed)) return parsed - if (t === 'object' && parsed && typeof parsed === 'object' && !Array.isArray(parsed)) - return parsed - return value - } - } catch {} - return value - } - - // Build mutable map by variable name - const byName: Record = {} - Object.values(currentVarsRecord).forEach((v: any) => { - if (v && typeof v === 'object' && v.id && v.name) byName[String(v.name)] = v - }) - - // Apply operations in order - for (const op of payload.operations || []) { - const key = String(op.name) - const nextType = (op.type as any) || byName[key]?.type || 'plain' - if (op.operation === 'delete') { - delete byName[key] - continue - } - const typedValue = coerceValue(op.value, nextType) - if (op.operation === 'add') { - byName[key] = { - id: crypto.randomUUID(), - workflowId: payload.workflowId, - name: key, - type: nextType, - value: typedValue, - } - continue - } - if (op.operation === 'edit') { - if (!byName[key]) { - // If editing a non-existent variable, create it - byName[key] = { - id: crypto.randomUUID(), - workflowId: payload.workflowId, - name: key, - type: nextType, - value: typedValue, - } - } else { - byName[key] = { - ...byName[key], - type: nextType, - ...(op.value !== undefined ? { value: typedValue } : {}), - } - } - } - } - - // Convert byName (keyed by name) to record keyed by ID for the API - const variablesRecord: Record = {} - for (const v of Object.values(byName)) { - variablesRecord[v.id] = v - } - - // POST full variables record to persist - const res = await fetch(`/api/workflows/${payload.workflowId}/variables`, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ variables: variablesRecord }), - }) - if (!res.ok) { - const txt = await res.text().catch(() => '') - throw new Error(txt || `Failed to update variables (${res.status})`) - } - - try { - const { activeWorkflowId } = useWorkflowRegistry.getState() - if (activeWorkflowId) { - // Fetch the updated variables from the API - const refreshRes = await fetch(`/api/workflows/${activeWorkflowId}/variables`, { - method: 'GET', - }) - - if (refreshRes.ok) { - const refreshJson = await refreshRes.json() - const updatedVarsRecord = (refreshJson?.data as Record) || {} - - // Update the variables store with the fresh data - useVariablesStore.setState((state) => { - // Remove old variables for this workflow - const withoutWorkflow = Object.fromEntries( - Object.entries(state.variables).filter(([, v]) => v.workflowId !== activeWorkflowId) - ) - // Add the updated variables - return { - variables: { ...withoutWorkflow, ...updatedVarsRecord }, - } - }) - - logger.info('Refreshed variables in store', { workflowId: activeWorkflowId }) - } - } - } catch (refreshError) { - logger.warn('Failed to refresh variables in store', { error: refreshError }) - } - - await this.markToolComplete(200, 'Workflow variables updated', { variables: byName }) - this.setState(ClientToolCallState.success) - } catch (e: any) { - const message = e instanceof Error ? e.message : String(e) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, message || 'Failed to set workflow variables') - } - } - - async execute(args?: SetGlobalVarsArgs): Promise { - await this.handleAccept(args) - } -} - -// Register UI config at module load -registerToolUIConfig( - SetGlobalWorkflowVariablesClientTool.id, - SetGlobalWorkflowVariablesClientTool.metadata.uiConfig! -) diff --git a/apps/sim/stores/panel/copilot/store.ts b/apps/sim/stores/panel/copilot/store.ts index be32d1c723..4bf168593d 100644 --- a/apps/sim/stores/panel/copilot/store.ts +++ b/apps/sim/stores/panel/copilot/store.ts @@ -10,76 +10,11 @@ import { shouldSkipToolCallEvent, shouldSkipToolResultEvent, } from '@/lib/copilot/orchestrator/sse-utils' -import type { - BaseClientToolMetadata, - ClientToolDisplay, -} from '@/lib/copilot/tools/client/base-tool' -import { ClientToolCallState } from '@/lib/copilot/tools/client/base-tool' -import { GetBlockConfigClientTool } from '@/lib/copilot/tools/client/blocks/get-block-config' -import { GetBlockOptionsClientTool } from '@/lib/copilot/tools/client/blocks/get-block-options' -import { GetBlocksAndToolsClientTool } from '@/lib/copilot/tools/client/blocks/get-blocks-and-tools' -import { GetBlocksMetadataClientTool } from '@/lib/copilot/tools/client/blocks/get-blocks-metadata' -import { GetTriggerBlocksClientTool } from '@/lib/copilot/tools/client/blocks/get-trigger-blocks' -import { GetExamplesRagClientTool } from '@/lib/copilot/tools/client/examples/get-examples-rag' -import { GetOperationsExamplesClientTool } from '@/lib/copilot/tools/client/examples/get-operations-examples' -import { GetTriggerExamplesClientTool } from '@/lib/copilot/tools/client/examples/get-trigger-examples' -import { SummarizeClientTool } from '@/lib/copilot/tools/client/examples/summarize' -import { KnowledgeBaseClientTool } from '@/lib/copilot/tools/client/knowledge/knowledge-base' import { - getClientTool, - registerClientTool, - registerToolStateSync, -} from '@/lib/copilot/tools/client/manager' -import { NavigateUIClientTool } from '@/lib/copilot/tools/client/navigation/navigate-ui' -import { AuthClientTool } from '@/lib/copilot/tools/client/other/auth' -import { CheckoffTodoClientTool } from '@/lib/copilot/tools/client/other/checkoff-todo' -import { CrawlWebsiteClientTool } from '@/lib/copilot/tools/client/other/crawl-website' -import { CustomToolClientTool } from '@/lib/copilot/tools/client/other/custom-tool' -import { DebugClientTool } from '@/lib/copilot/tools/client/other/debug' -import { DeployClientTool } from '@/lib/copilot/tools/client/other/deploy' -import { EditClientTool } from '@/lib/copilot/tools/client/other/edit' -import { EvaluateClientTool } from '@/lib/copilot/tools/client/other/evaluate' -import { GetPageContentsClientTool } from '@/lib/copilot/tools/client/other/get-page-contents' -import { InfoClientTool } from '@/lib/copilot/tools/client/other/info' -import { KnowledgeClientTool } from '@/lib/copilot/tools/client/other/knowledge' -import { MakeApiRequestClientTool } from '@/lib/copilot/tools/client/other/make-api-request' -import { MarkTodoInProgressClientTool } from '@/lib/copilot/tools/client/other/mark-todo-in-progress' -import { OAuthRequestAccessClientTool } from '@/lib/copilot/tools/client/other/oauth-request-access' -import { PlanClientTool } from '@/lib/copilot/tools/client/other/plan' -import { RememberDebugClientTool } from '@/lib/copilot/tools/client/other/remember-debug' -import { ResearchClientTool } from '@/lib/copilot/tools/client/other/research' -import { ScrapePageClientTool } from '@/lib/copilot/tools/client/other/scrape-page' -import { SearchDocumentationClientTool } from '@/lib/copilot/tools/client/other/search-documentation' -import { SearchErrorsClientTool } from '@/lib/copilot/tools/client/other/search-errors' -import { SearchLibraryDocsClientTool } from '@/lib/copilot/tools/client/other/search-library-docs' -import { SearchOnlineClientTool } from '@/lib/copilot/tools/client/other/search-online' -import { SearchPatternsClientTool } from '@/lib/copilot/tools/client/other/search-patterns' -import { SleepClientTool } from '@/lib/copilot/tools/client/other/sleep' -import { TestClientTool } from '@/lib/copilot/tools/client/other/test' -import { TourClientTool } from '@/lib/copilot/tools/client/other/tour' -import { WorkflowClientTool } from '@/lib/copilot/tools/client/other/workflow' -import { getTool } from '@/lib/copilot/tools/client/registry' -import { GetCredentialsClientTool } from '@/lib/copilot/tools/client/user/get-credentials' -import { SetEnvironmentVariablesClientTool } from '@/lib/copilot/tools/client/user/set-environment-variables' -import { CheckDeploymentStatusClientTool } from '@/lib/copilot/tools/client/workflow/check-deployment-status' -import { CreateWorkspaceMcpServerClientTool } from '@/lib/copilot/tools/client/workflow/create-workspace-mcp-server' -import { DeployApiClientTool } from '@/lib/copilot/tools/client/workflow/deploy-api' -import { DeployChatClientTool } from '@/lib/copilot/tools/client/workflow/deploy-chat' -import { DeployMcpClientTool } from '@/lib/copilot/tools/client/workflow/deploy-mcp' -import { EditWorkflowClientTool } from '@/lib/copilot/tools/client/workflow/edit-workflow' -import { GetBlockOutputsClientTool } from '@/lib/copilot/tools/client/workflow/get-block-outputs' -import { GetBlockUpstreamReferencesClientTool } from '@/lib/copilot/tools/client/workflow/get-block-upstream-references' -import { GetUserWorkflowClientTool } from '@/lib/copilot/tools/client/workflow/get-user-workflow' -import { GetWorkflowConsoleClientTool } from '@/lib/copilot/tools/client/workflow/get-workflow-console' -import { GetWorkflowDataClientTool } from '@/lib/copilot/tools/client/workflow/get-workflow-data' -import { GetWorkflowFromNameClientTool } from '@/lib/copilot/tools/client/workflow/get-workflow-from-name' -import { ListUserWorkflowsClientTool } from '@/lib/copilot/tools/client/workflow/list-user-workflows' -import { ListWorkspaceMcpServersClientTool } from '@/lib/copilot/tools/client/workflow/list-workspace-mcp-servers' -import { ManageCustomToolClientTool } from '@/lib/copilot/tools/client/workflow/manage-custom-tool' -import { ManageMcpToolClientTool } from '@/lib/copilot/tools/client/workflow/manage-mcp-tool' -import { RedeployClientTool } from '@/lib/copilot/tools/client/workflow/redeploy' -import { RunWorkflowClientTool } from '@/lib/copilot/tools/client/workflow/run-workflow' -import { SetGlobalWorkflowVariablesClientTool } from '@/lib/copilot/tools/client/workflow/set-global-workflow-variables' + ClientToolCallState, + type ClientToolDisplay, + TOOL_DISPLAY_REGISTRY, +} from '@/lib/copilot/tools/client/tool-display-registry' import { getQueryClient } from '@/app/_shell/providers/query-provider' import { subscriptionKeys } from '@/hooks/queries/subscription' import type { @@ -175,144 +110,6 @@ try { } } catch {} -// Known class-based client tools: map tool name -> instantiator -const CLIENT_TOOL_INSTANTIATORS: Record any> = { - plan: (id) => new PlanClientTool(id), - edit: (id) => new EditClientTool(id), - debug: (id) => new DebugClientTool(id), - test: (id) => new TestClientTool(id), - deploy: (id) => new DeployClientTool(id), - evaluate: (id) => new EvaluateClientTool(id), - auth: (id) => new AuthClientTool(id), - research: (id) => new ResearchClientTool(id), - knowledge: (id) => new KnowledgeClientTool(id), - custom_tool: (id) => new CustomToolClientTool(id), - tour: (id) => new TourClientTool(id), - info: (id) => new InfoClientTool(id), - workflow: (id) => new WorkflowClientTool(id), - run_workflow: (id) => new RunWorkflowClientTool(id), - get_workflow_console: (id) => new GetWorkflowConsoleClientTool(id), - get_blocks_and_tools: (id) => new GetBlocksAndToolsClientTool(id), - get_blocks_metadata: (id) => new GetBlocksMetadataClientTool(id), - get_block_options: (id) => new GetBlockOptionsClientTool(id), - get_block_config: (id) => new GetBlockConfigClientTool(id), - get_trigger_blocks: (id) => new GetTriggerBlocksClientTool(id), - search_online: (id) => new SearchOnlineClientTool(id), - search_documentation: (id) => new SearchDocumentationClientTool(id), - search_library_docs: (id) => new SearchLibraryDocsClientTool(id), - search_patterns: (id) => new SearchPatternsClientTool(id), - search_errors: (id) => new SearchErrorsClientTool(id), - scrape_page: (id) => new ScrapePageClientTool(id), - get_page_contents: (id) => new GetPageContentsClientTool(id), - crawl_website: (id) => new CrawlWebsiteClientTool(id), - remember_debug: (id) => new RememberDebugClientTool(id), - set_environment_variables: (id) => new SetEnvironmentVariablesClientTool(id), - get_credentials: (id) => new GetCredentialsClientTool(id), - knowledge_base: (id) => new KnowledgeBaseClientTool(id), - make_api_request: (id) => new MakeApiRequestClientTool(id), - checkoff_todo: (id) => new CheckoffTodoClientTool(id), - mark_todo_in_progress: (id) => new MarkTodoInProgressClientTool(id), - oauth_request_access: (id) => new OAuthRequestAccessClientTool(id), - edit_workflow: (id) => new EditWorkflowClientTool(id), - get_user_workflow: (id) => new GetUserWorkflowClientTool(id), - list_user_workflows: (id) => new ListUserWorkflowsClientTool(id), - get_workflow_from_name: (id) => new GetWorkflowFromNameClientTool(id), - get_workflow_data: (id) => new GetWorkflowDataClientTool(id), - set_global_workflow_variables: (id) => new SetGlobalWorkflowVariablesClientTool(id), - get_trigger_examples: (id) => new GetTriggerExamplesClientTool(id), - get_examples_rag: (id) => new GetExamplesRagClientTool(id), - get_operations_examples: (id) => new GetOperationsExamplesClientTool(id), - summarize_conversation: (id) => new SummarizeClientTool(id), - deploy_api: (id) => new DeployApiClientTool(id), - deploy_chat: (id) => new DeployChatClientTool(id), - deploy_mcp: (id) => new DeployMcpClientTool(id), - redeploy: (id) => new RedeployClientTool(id), - list_workspace_mcp_servers: (id) => new ListWorkspaceMcpServersClientTool(id), - create_workspace_mcp_server: (id) => new CreateWorkspaceMcpServerClientTool(id), - check_deployment_status: (id) => new CheckDeploymentStatusClientTool(id), - navigate_ui: (id) => new NavigateUIClientTool(id), - manage_custom_tool: (id) => new ManageCustomToolClientTool(id), - manage_mcp_tool: (id) => new ManageMcpToolClientTool(id), - sleep: (id) => new SleepClientTool(id), - get_block_outputs: (id) => new GetBlockOutputsClientTool(id), - get_block_upstream_references: (id) => new GetBlockUpstreamReferencesClientTool(id), -} - -// Read-only static metadata for class-based tools (no instances) -export const CLASS_TOOL_METADATA: Record = { - plan: (PlanClientTool as any)?.metadata, - edit: (EditClientTool as any)?.metadata, - debug: (DebugClientTool as any)?.metadata, - test: (TestClientTool as any)?.metadata, - deploy: (DeployClientTool as any)?.metadata, - evaluate: (EvaluateClientTool as any)?.metadata, - auth: (AuthClientTool as any)?.metadata, - research: (ResearchClientTool as any)?.metadata, - knowledge: (KnowledgeClientTool as any)?.metadata, - custom_tool: (CustomToolClientTool as any)?.metadata, - tour: (TourClientTool as any)?.metadata, - info: (InfoClientTool as any)?.metadata, - workflow: (WorkflowClientTool as any)?.metadata, - run_workflow: (RunWorkflowClientTool as any)?.metadata, - get_workflow_console: (GetWorkflowConsoleClientTool as any)?.metadata, - get_blocks_and_tools: (GetBlocksAndToolsClientTool as any)?.metadata, - get_blocks_metadata: (GetBlocksMetadataClientTool as any)?.metadata, - get_block_options: (GetBlockOptionsClientTool as any)?.metadata, - get_block_config: (GetBlockConfigClientTool as any)?.metadata, - get_trigger_blocks: (GetTriggerBlocksClientTool as any)?.metadata, - search_online: (SearchOnlineClientTool as any)?.metadata, - search_documentation: (SearchDocumentationClientTool as any)?.metadata, - search_library_docs: (SearchLibraryDocsClientTool as any)?.metadata, - search_patterns: (SearchPatternsClientTool as any)?.metadata, - search_errors: (SearchErrorsClientTool as any)?.metadata, - scrape_page: (ScrapePageClientTool as any)?.metadata, - get_page_contents: (GetPageContentsClientTool as any)?.metadata, - crawl_website: (CrawlWebsiteClientTool as any)?.metadata, - remember_debug: (RememberDebugClientTool as any)?.metadata, - set_environment_variables: (SetEnvironmentVariablesClientTool as any)?.metadata, - get_credentials: (GetCredentialsClientTool as any)?.metadata, - knowledge_base: (KnowledgeBaseClientTool as any)?.metadata, - make_api_request: (MakeApiRequestClientTool as any)?.metadata, - checkoff_todo: (CheckoffTodoClientTool as any)?.metadata, - mark_todo_in_progress: (MarkTodoInProgressClientTool as any)?.metadata, - edit_workflow: (EditWorkflowClientTool as any)?.metadata, - get_user_workflow: (GetUserWorkflowClientTool as any)?.metadata, - list_user_workflows: (ListUserWorkflowsClientTool as any)?.metadata, - get_workflow_from_name: (GetWorkflowFromNameClientTool as any)?.metadata, - get_workflow_data: (GetWorkflowDataClientTool as any)?.metadata, - set_global_workflow_variables: (SetGlobalWorkflowVariablesClientTool as any)?.metadata, - get_trigger_examples: (GetTriggerExamplesClientTool as any)?.metadata, - get_examples_rag: (GetExamplesRagClientTool as any)?.metadata, - oauth_request_access: (OAuthRequestAccessClientTool as any)?.metadata, - get_operations_examples: (GetOperationsExamplesClientTool as any)?.metadata, - summarize_conversation: (SummarizeClientTool as any)?.metadata, - deploy_api: (DeployApiClientTool as any)?.metadata, - deploy_chat: (DeployChatClientTool as any)?.metadata, - deploy_mcp: (DeployMcpClientTool as any)?.metadata, - redeploy: (RedeployClientTool as any)?.metadata, - list_workspace_mcp_servers: (ListWorkspaceMcpServersClientTool as any)?.metadata, - create_workspace_mcp_server: (CreateWorkspaceMcpServerClientTool as any)?.metadata, - check_deployment_status: (CheckDeploymentStatusClientTool as any)?.metadata, - navigate_ui: (NavigateUIClientTool as any)?.metadata, - manage_custom_tool: (ManageCustomToolClientTool as any)?.metadata, - manage_mcp_tool: (ManageMcpToolClientTool as any)?.metadata, - sleep: (SleepClientTool as any)?.metadata, - get_block_outputs: (GetBlockOutputsClientTool as any)?.metadata, - get_block_upstream_references: (GetBlockUpstreamReferencesClientTool as any)?.metadata, -} - -function ensureClientToolInstance(toolName: string | undefined, toolCallId: string | undefined) { - try { - if (!toolName || !toolCallId) return - if (getClientTool(toolCallId)) return - const make = CLIENT_TOOL_INSTANTIATORS[toolName] - if (make) { - const inst = make(toolCallId) - registerClientTool(toolCallId, inst) - } - } catch {} -} - // Constants const TEXT_BLOCK_TYPE = 'text' const THINKING_BLOCK_TYPE = 'thinking' @@ -324,75 +121,54 @@ const CONTINUE_OPTIONS_TAG = '{"1":"Continue"}' function resolveToolDisplay( toolName: string | undefined, state: ClientToolCallState, - toolCallId?: string, + _toolCallId?: string, params?: Record ): ClientToolDisplay | undefined { - try { - if (!toolName) return undefined - const def = getTool(toolName) as any - const toolMetadata = def?.metadata || CLASS_TOOL_METADATA[toolName] - const meta = toolMetadata?.displayNames || {} - - // Exact state first - const ds = meta?.[state] - if (ds?.text || ds?.icon) { - // Check if tool has a dynamic text formatter - const getDynamicText = toolMetadata?.getDynamicText - if (getDynamicText && params) { - try { - const dynamicText = getDynamicText(params, state) - if (dynamicText) { - return { text: dynamicText, icon: ds.icon } - } - } catch (e) { - // Fall back to static text if formatter fails - } - } - return { text: ds.text, icon: ds.icon } + if (!toolName) return undefined + const entry = TOOL_DISPLAY_REGISTRY[toolName] + if (!entry) return humanizedFallback(toolName, state) + + // Check dynamic text first + if (entry.uiConfig?.dynamicText && params) { + const dynamicText = entry.uiConfig.dynamicText(params, state) + const stateDisplay = entry.displayNames[state] + if (dynamicText && stateDisplay?.icon) { + return { text: dynamicText, icon: stateDisplay.icon } } + } - // Fallback order (prefer pre-execution states for unknown states like pending) - const fallbackOrder: ClientToolCallState[] = [ - (ClientToolCallState as any).generating, - (ClientToolCallState as any).executing, - (ClientToolCallState as any).review, - (ClientToolCallState as any).success, - (ClientToolCallState as any).error, - (ClientToolCallState as any).rejected, - ] - for (const key of fallbackOrder) { - const cand = meta?.[key] - if (cand?.text || cand?.icon) return { text: cand.text, icon: cand.icon } - } - } catch {} - // Humanized fallback as last resort - include state verb for proper verb-noun styling - try { - if (toolName) { - const formattedName = toolName.replace(/_/g, ' ').replace(/\b\w/g, (c) => c.toUpperCase()) - // Add state verb prefix for verb-noun rendering in tool-call component - let stateVerb: string - switch (state) { - case ClientToolCallState.pending: - case ClientToolCallState.executing: - stateVerb = 'Executing' - break - case ClientToolCallState.success: - stateVerb = 'Executed' - break - case ClientToolCallState.error: - stateVerb = 'Failed' - break - case ClientToolCallState.rejected: - case ClientToolCallState.aborted: - stateVerb = 'Skipped' - break - default: - stateVerb = 'Executing' - } - return { text: `${stateVerb} ${formattedName}`, icon: undefined as any } - } - } catch {} - return undefined + // Exact state match + const display = entry.displayNames[state] + if (display?.text || display?.icon) return display + + // Fallback through states + const fallbackOrder = [ + ClientToolCallState.generating, + ClientToolCallState.executing, + ClientToolCallState.success, + ] + for (const fallbackState of fallbackOrder) { + const fallback = entry.displayNames[fallbackState] + if (fallback?.text || fallback?.icon) return fallback + } + + return humanizedFallback(toolName, state) +} + +function humanizedFallback( + toolName: string, + state: ClientToolCallState +): ClientToolDisplay | undefined { + const formattedName = toolName.replace(/_/g, ' ').replace(/\b\w/g, (c) => c.toUpperCase()) + const stateVerb = + state === ClientToolCallState.success + ? 'Executed' + : state === ClientToolCallState.error + ? 'Failed' + : state === ClientToolCallState.rejected || state === ClientToolCallState.aborted + ? 'Skipped' + : 'Executing' + return { text: `${stateVerb} ${formattedName}`, icon: undefined as any } } // Helper: check if a tool state is rejected @@ -512,9 +288,8 @@ function abortAllInProgressTools(set: any, get: () => CopilotStore) { /** * Loads messages from DB for UI rendering. * Messages are stored exactly as they render, so we just need to: - * 1. Register client tool instances for any tool calls - * 2. Clear any streaming flags (messages loaded from DB are never actively streaming) - * 3. Return the messages + * 1. Clear any streaming flags (messages loaded from DB are never actively streaming) + * 2. Return the messages */ function normalizeMessagesForUI(messages: CopilotMessage[]): CopilotMessage[] { try { @@ -530,12 +305,11 @@ function normalizeMessagesForUI(messages: CopilotMessage[]): CopilotMessage[] { } } - // Register client tool instances and clear streaming flags for all tool calls + // Clear streaming flags for all tool calls for (const message of messages) { if (message.contentBlocks) { for (const block of message.contentBlocks as any[]) { if (block?.type === 'tool_call' && block.toolCall) { - registerToolCallInstances(block.toolCall) clearStreamingFlags(block.toolCall) } } @@ -578,28 +352,6 @@ function clearStreamingFlags(toolCall: any): void { } } -/** - * Recursively registers client tool instances for a tool call and its nested subagent tool calls. - */ -function registerToolCallInstances(toolCall: any): void { - if (!toolCall?.id) return - ensureClientToolInstance(toolCall.name, toolCall.id) - - // Register nested subagent tool calls - if (Array.isArray(toolCall.subAgentBlocks)) { - for (const block of toolCall.subAgentBlocks) { - if (block?.type === 'subagent_tool_call' && block.toolCall) { - registerToolCallInstances(block.toolCall) - } - } - } - if (Array.isArray(toolCall.subAgentToolCalls)) { - for (const subTc of toolCall.subAgentToolCalls) { - registerToolCallInstances(subTc) - } - } -} - // Simple object pool for content blocks class ObjectPool { private pool: T[] = [] @@ -1431,9 +1183,6 @@ const sseHandlers: Record = { if (!toolCallId || !toolName) return const { toolCallsById } = get() - // Ensure class-based client tool instances are registered (for interrupts/display) - ensureClientToolInstance(toolName, toolCallId) - if (!toolCallsById[toolCallId]) { // Show as pending until we receive full tool_call (with arguments) to decide execution const initialState = ClientToolCallState.pending @@ -1461,9 +1210,6 @@ const sseHandlers: Record = { const isPartial = toolData.partial === true const { toolCallsById } = get() - // Ensure class-based client tool instances are registered (for interrupts/display) - ensureClientToolInstance(name, id) - const existing = toolCallsById[id] const next: CopilotToolCall = existing ? { @@ -1939,9 +1685,6 @@ const subAgentSSEHandlers: Record = { context.subAgentBlocks[parentToolCallId] = [] } - // Ensure client tool instance is registered (for execution) - ensureClientToolInstance(name, id) - // Create or update the subagent tool call const existingIndex = context.subAgentToolCalls[parentToolCallId].findIndex( (tc) => tc.id === id @@ -4287,56 +4030,3 @@ export const useCopilotStore = create()( }, })) ) - -// Sync class-based tool instance state changes back into the store map -try { - registerToolStateSync((toolCallId: string, nextState: any) => { - const state = useCopilotStore.getState() - const current = state.toolCallsById[toolCallId] - if (!current) return - let mapped: ClientToolCallState = current.state - if (nextState === 'executing') mapped = ClientToolCallState.executing - else if (nextState === 'pending') mapped = ClientToolCallState.pending - else if (nextState === 'success' || nextState === 'accepted') - mapped = ClientToolCallState.success - else if (nextState === 'error' || nextState === 'errored') mapped = ClientToolCallState.error - else if (nextState === 'rejected') mapped = ClientToolCallState.rejected - else if (nextState === 'aborted') mapped = ClientToolCallState.aborted - else if (nextState === 'review') mapped = (ClientToolCallState as any).review - else if (nextState === 'background') mapped = (ClientToolCallState as any).background - else if (typeof nextState === 'number') mapped = nextState as unknown as ClientToolCallState - - // Store-authoritative gating: ignore invalid/downgrade transitions - const isTerminal = (s: ClientToolCallState) => - s === ClientToolCallState.success || - s === ClientToolCallState.error || - s === ClientToolCallState.rejected || - s === ClientToolCallState.aborted || - (s as any) === (ClientToolCallState as any).review || - (s as any) === (ClientToolCallState as any).background - - // If we've already reached a terminal state, ignore any further non-terminal updates - if (isTerminal(current.state) && !isTerminal(mapped)) { - return - } - // Prevent downgrades (executing → pending, pending → generating) - if ( - (current.state === ClientToolCallState.executing && mapped === ClientToolCallState.pending) || - (current.state === ClientToolCallState.pending && - mapped === (ClientToolCallState as any).generating) - ) { - return - } - // No-op if unchanged - if (mapped === current.state) return - const updated = { - ...state.toolCallsById, - [toolCallId]: { - ...current, - state: mapped, - display: resolveToolDisplay(current.name, mapped, toolCallId, current.params), - }, - } - useCopilotStore.setState({ toolCallsById: updated }) - }) -} catch {} diff --git a/apps/sim/stores/workflow-diff/store.ts b/apps/sim/stores/workflow-diff/store.ts index abd57d0aec..5b9ba8b6bf 100644 --- a/apps/sim/stores/workflow-diff/store.ts +++ b/apps/sim/stores/workflow-diff/store.ts @@ -1,7 +1,6 @@ import { createLogger } from '@sim/logger' import { create } from 'zustand' import { devtools } from 'zustand/middleware' -import { getClientTool } from '@/lib/copilot/tools/client/manager' import { stripWorkflowDiffMarkers, WorkflowDiffEngine } from '@/lib/workflows/diff' import { enqueueReplaceWorkflowState } from '@/lib/workflows/operations/socket-operations' import { validateWorkflowState } from '@/lib/workflows/sanitization/validation' @@ -350,10 +349,12 @@ export const useWorkflowDiffStore = create { if (toolCallId) { - getClientTool(toolCallId) - ?.handleAccept?.() - ?.catch?.((error: Error) => { - logger.warn('Failed to notify tool accept state', { error }) + import('@/stores/panel/copilot/store') + .then(({ useCopilotStore }) => { + useCopilotStore.getState().updatePreviewToolCallState('accepted', toolCallId) + }) + .catch((error) => { + logger.warn('Failed to update tool accept state', { error }) }) } }) @@ -458,10 +459,12 @@ export const useWorkflowDiffStore = create { if (toolCallId) { - getClientTool(toolCallId) - ?.handleReject?.() - ?.catch?.((error: Error) => { - logger.warn('Failed to notify tool reject state', { error }) + import('@/stores/panel/copilot/store') + .then(({ useCopilotStore }) => { + useCopilotStore.getState().updatePreviewToolCallState('rejected', toolCallId) + }) + .catch((error) => { + logger.warn('Failed to update tool reject state', { error }) }) } }) From b3e74e496fbf123901d3c3382f37ceb714b68d95 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Thu, 5 Feb 2026 13:24:29 -0800 Subject: [PATCH 24/72] First cleanup pass complete - untested --- apps/sim/app/api/copilot/credentials/route.ts | 6 +- apps/sim/app/api/mcp/copilot/route.ts | 25 +- apps/sim/components/ui/tool-call.tsx | 35 +- apps/sim/lib/copilot/constants.ts | 6 +- apps/sim/lib/copilot/orchestrator/index.ts | 178 +- .../copilot/orchestrator/sse-handlers.test.ts | 4 +- .../handlers.ts} | 131 +- .../orchestrator/sse-handlers/index.ts | 2 + .../sse-handlers/tool-execution.ts | 117 + .../copilot/orchestrator/sse-utils.test.ts | 1 - .../sim/lib/copilot/orchestrator/sse-utils.ts | 1 - .../orchestrator/stream-buffer.test.ts | 73 +- .../lib/copilot/orchestrator/stream-buffer.ts | 30 +- .../lib/copilot/orchestrator/stream-core.ts | 179 + apps/sim/lib/copilot/orchestrator/subagent.ts | 210 +- .../orchestrator/tool-executor/access.ts | 1 - .../tool-executor/deployment-tools/deploy.ts | 23 +- .../tool-executor/deployment-tools/manage.ts | 11 +- .../orchestrator/tool-executor/index.ts | 77 +- .../tool-executor/integration-tools.ts | 9 +- .../orchestrator/tool-executor/param-types.ts | 127 + .../tool-executor/workflow-tools/index.ts | 2 +- .../tool-executor/workflow-tools/mutations.ts | 30 +- .../tool-executor/workflow-tools/queries.ts | 30 +- .../sim/lib/copilot/tools/client/base-tool.ts | 5 +- .../tools/client/tool-display-registry.ts | 3874 +++++++++-------- apps/sim/lib/copilot/types.ts | 130 +- 27 files changed, 2729 insertions(+), 2588 deletions(-) rename apps/sim/lib/copilot/orchestrator/{sse-handlers.ts => sse-handlers/handlers.ts} (79%) create mode 100644 apps/sim/lib/copilot/orchestrator/sse-handlers/index.ts create mode 100644 apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts create mode 100644 apps/sim/lib/copilot/orchestrator/stream-core.ts create mode 100644 apps/sim/lib/copilot/orchestrator/tool-executor/param-types.ts diff --git a/apps/sim/app/api/copilot/credentials/route.ts b/apps/sim/app/api/copilot/credentials/route.ts index acc99958f9..2f764429d7 100644 --- a/apps/sim/app/api/copilot/credentials/route.ts +++ b/apps/sim/app/api/copilot/credentials/route.ts @@ -18,9 +18,11 @@ export async function GET(_req: NextRequest) { return NextResponse.json({ success: true, result }) } catch (error) { return NextResponse.json( - { success: false, error: error instanceof Error ? error.message : 'Failed to load credentials' }, + { + success: false, + error: error instanceof Error ? error.message : 'Failed to load credentials', + }, { status: 500 } ) } } - diff --git a/apps/sim/app/api/mcp/copilot/route.ts b/apps/sim/app/api/mcp/copilot/route.ts index 6f36be94d4..9f8a0cf136 100644 --- a/apps/sim/app/api/mcp/copilot/route.ts +++ b/apps/sim/app/api/mcp/copilot/route.ts @@ -355,13 +355,23 @@ async function handleBuildToolCall( const { model } = getCopilotModel('chat') const workflowId = args.workflowId as string | undefined - const resolved = workflowId - ? { workflowId } - : await resolveWorkflowIdForUser(userId) + const resolved = workflowId ? { workflowId } : await resolveWorkflowIdForUser(userId) if (!resolved?.workflowId) { const response: CallToolResult = { - content: [{ type: 'text', text: JSON.stringify({ success: false, error: 'workflowId is required for build. Call create_workflow first.' }, null, 2) }], + content: [ + { + type: 'text', + text: JSON.stringify( + { + success: false, + error: 'workflowId is required for build. Call create_workflow first.', + }, + null, + 2 + ), + }, + ], isError: true, } return NextResponse.json(createResponse(id, response)) @@ -410,10 +420,9 @@ async function handleBuildToolCall( return NextResponse.json(createResponse(id, response)) } catch (error) { logger.error('Build tool call failed', { error }) - return NextResponse.json( - createError(id, ErrorCode.InternalError, `Build failed: ${error}`), - { status: 500 } - ) + return NextResponse.json(createError(id, ErrorCode.InternalError, `Build failed: ${error}`), { + status: 500, + }) } } diff --git a/apps/sim/components/ui/tool-call.tsx b/apps/sim/components/ui/tool-call.tsx index 0d7d2ece27..bc523894ff 100644 --- a/apps/sim/components/ui/tool-call.tsx +++ b/apps/sim/components/ui/tool-call.tsx @@ -5,10 +5,43 @@ import { CheckCircle, ChevronDown, ChevronRight, Loader2, Settings, XCircle } fr import { Badge } from '@/components/emcn' import { Button } from '@/components/ui/button' import { Collapsible, CollapsibleContent, CollapsibleTrigger } from '@/components/ui/collapsible' -import type { ToolCallGroup, ToolCallState } from '@/lib/copilot/types' import { cn } from '@/lib/core/utils/cn' import { formatDuration } from '@/lib/core/utils/formatting' +interface ToolCallState { + id: string + name: string + displayName?: string + parameters?: Record + state: + | 'detecting' + | 'pending' + | 'executing' + | 'completed' + | 'error' + | 'rejected' + | 'applied' + | 'ready_for_review' + | 'aborted' + | 'skipped' + | 'background' + startTime?: number + endTime?: number + duration?: number + result?: unknown + error?: string + progress?: string +} + +interface ToolCallGroup { + id: string + toolCalls: ToolCallState[] + status: 'pending' | 'in_progress' | 'completed' | 'error' + startTime?: number + endTime?: number + summary?: string +} + interface ToolCallProps { toolCall: ToolCallState isCompact?: boolean diff --git a/apps/sim/lib/copilot/constants.ts b/apps/sim/lib/copilot/constants.ts index 7a45127ebf..21e29cdbce 100644 --- a/apps/sim/lib/copilot/constants.ts +++ b/apps/sim/lib/copilot/constants.ts @@ -4,4 +4,8 @@ export const SIM_AGENT_API_URL_DEFAULT = 'https://copilot.sim.ai' export const SIM_AGENT_VERSION = '1.0.3' /** Resolved copilot backend URL — reads from env with fallback to default. */ -export const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT +const rawAgentUrl = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT +export const SIM_AGENT_API_URL = + rawAgentUrl.startsWith('http://') || rawAgentUrl.startsWith('https://') + ? rawAgentUrl + : SIM_AGENT_API_URL_DEFAULT diff --git a/apps/sim/lib/copilot/orchestrator/index.ts b/apps/sim/lib/copilot/orchestrator/index.ts index 1f3a54ee9e..a909fa2948 100644 --- a/apps/sim/lib/copilot/orchestrator/index.ts +++ b/apps/sim/lib/copilot/orchestrator/index.ts @@ -1,21 +1,10 @@ import { createLogger } from '@sim/logger' import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' -import { handleSubagentRouting, sseHandlers, subAgentHandlers } from '@/lib/copilot/orchestrator/sse-handlers' -import { env } from '@/lib/core/config/env' -import { - normalizeSseEvent, - shouldSkipToolCallEvent, - shouldSkipToolResultEvent, -} from '@/lib/copilot/orchestrator/sse-utils' -import { parseSSEStream } from '@/lib/copilot/orchestrator/sse-parser' import { prepareExecutionContext } from '@/lib/copilot/orchestrator/tool-executor' -import type { - OrchestratorOptions, - OrchestratorResult, - SSEEvent, - StreamingContext, - ToolCallSummary, -} from '@/lib/copilot/orchestrator/types' +import type { OrchestratorOptions, OrchestratorResult } from '@/lib/copilot/orchestrator/types' +import { env } from '@/lib/core/config/env' +import { buildToolCallSummaries, createStreamingContext, runStreamLoop } from './stream-core' + const logger = createLogger('CopilotOrchestrator') export interface OrchestrateStreamOptions extends OrchestratorOptions { @@ -24,118 +13,43 @@ export interface OrchestrateStreamOptions extends OrchestratorOptions { chatId?: string } -/** - * Orchestrate a copilot SSE stream and execute tool calls server-side. - */ export async function orchestrateCopilotStream( requestPayload: Record, options: OrchestrateStreamOptions ): Promise { - const { userId, workflowId, chatId, timeout = 300000, abortSignal } = options + const { userId, workflowId, chatId } = options const execContext = await prepareExecutionContext(userId, workflowId) - const context: StreamingContext = { + const context = createStreamingContext({ chatId, - conversationId: undefined, messageId: requestPayload?.messageId || crypto.randomUUID(), - accumulatedContent: '', - contentBlocks: [], - toolCalls: new Map(), - currentThinkingBlock: null, - isInThinkingBlock: false, - subAgentParentToolCallId: undefined, - subAgentContent: {}, - subAgentToolCalls: {}, - pendingContent: '', - streamComplete: false, - wasAborted: false, - errors: [], - } + }) try { - const response = await fetch(`${SIM_AGENT_API_URL}/api/chat-completion-streaming`, { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - ...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}), + await runStreamLoop( + `${SIM_AGENT_API_URL}/api/chat-completion-streaming`, + { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + ...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}), + }, + body: JSON.stringify(requestPayload), }, - body: JSON.stringify(requestPayload), - signal: abortSignal, - }) - - if (!response.ok) { - const errorText = await response.text().catch(() => '') - throw new Error( - `Copilot backend error (${response.status}): ${errorText || response.statusText}` - ) - } - - if (!response.body) { - throw new Error('Copilot backend response missing body') - } - - const reader = response.body.getReader() - const decoder = new TextDecoder() - - const timeoutId = setTimeout(() => { - context.errors.push('Request timed out') - context.streamComplete = true - reader.cancel().catch(() => {}) - }, timeout) - - try { - for await (const event of parseSSEStream(reader, decoder, abortSignal)) { - if (abortSignal?.aborted) { - context.wasAborted = true - break - } - - const normalizedEvent = normalizeSseEvent(event) - - // Skip duplicate tool events to prevent state regressions. - const shouldSkipToolCall = shouldSkipToolCallEvent(normalizedEvent) - const shouldSkipToolResult = shouldSkipToolResultEvent(normalizedEvent) - - if (!shouldSkipToolCall && !shouldSkipToolResult) { - await forwardEvent(normalizedEvent, options) - } - - if (normalizedEvent.type === 'subagent_start') { - const eventData = normalizedEvent.data as Record | undefined - const toolCallId = eventData?.tool_call_id as string | undefined - if (toolCallId) { - context.subAgentParentToolCallId = toolCallId - context.subAgentContent[toolCallId] = '' - context.subAgentToolCalls[toolCallId] = [] - } - continue - } - - if (normalizedEvent.type === 'subagent_end') { - context.subAgentParentToolCallId = undefined - continue - } - - if (handleSubagentRouting(normalizedEvent, context)) { - const handler = subAgentHandlers[normalizedEvent.type] - if (handler) { - await handler(normalizedEvent, context, execContext, options) - } - if (context.streamComplete) break - continue - } - - const handler = sseHandlers[normalizedEvent.type] - if (handler) { - await handler(normalizedEvent, context, execContext, options) - } - if (context.streamComplete) break - } - } finally { - clearTimeout(timeoutId) + context, + execContext, + options + ) + + const result: OrchestratorResult = { + success: context.errors.length === 0, + content: context.accumulatedContent, + contentBlocks: context.contentBlocks, + toolCalls: buildToolCallSummaries(context), + chatId: context.chatId, + conversationId: context.conversationId, + errors: context.errors.length ? context.errors : undefined, } - - const result = buildResult(context) await options.onComplete?.(result) return result } catch (error) { @@ -153,37 +67,3 @@ export async function orchestrateCopilotStream( } } } - -async function forwardEvent(event: SSEEvent, options: OrchestratorOptions): Promise { - try { - await options.onEvent?.(event) - } catch (error) { - logger.warn('Failed to forward SSE event', { - type: event.type, - error: error instanceof Error ? error.message : String(error), - }) - } -} - -function buildResult(context: StreamingContext): OrchestratorResult { - const toolCalls: ToolCallSummary[] = Array.from(context.toolCalls.values()).map((toolCall) => ({ - id: toolCall.id, - name: toolCall.name, - status: toolCall.status, - params: toolCall.params, - result: toolCall.result?.output, - error: toolCall.error, - durationMs: - toolCall.endTime && toolCall.startTime ? toolCall.endTime - toolCall.startTime : undefined, - })) - - return { - success: context.errors.length === 0, - content: context.accumulatedContent, - contentBlocks: context.contentBlocks, - toolCalls, - chatId: context.chatId, - conversationId: context.conversationId, - errors: context.errors.length ? context.errors : undefined, - } -} diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers.test.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers.test.ts index f9368ec69c..cc2586b2cc 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-handlers.test.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers.test.ts @@ -1,8 +1,9 @@ /** * @vitest-environment node */ -import { beforeEach, describe, expect, it, vi } from 'vitest' + import { loggerMock } from '@sim/testing' +import { beforeEach, describe, expect, it, vi } from 'vitest' vi.mock('@sim/logger', () => loggerMock) @@ -92,4 +93,3 @@ describe('sse-handlers tool lifecycle', () => { expect(markToolComplete).toHaveBeenCalledTimes(1) }) }) - diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts similarity index 79% rename from apps/sim/lib/copilot/orchestrator/sse-handlers.ts rename to apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts index 597de39b41..abbd2c32cb 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-handlers.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts @@ -1,17 +1,12 @@ import { createLogger } from '@sim/logger' -import { - INTERRUPT_TOOL_SET, - RESPOND_TOOL_SET, - SUBAGENT_TOOL_SET, -} from '@/lib/copilot/orchestrator/config' -import { getToolConfirmation } from '@/lib/copilot/orchestrator/persistence' +import { RESPOND_TOOL_SET, SUBAGENT_TOOL_SET } from '@/lib/copilot/orchestrator/config' import { asRecord, getEventData, markToolResultSeen, wasToolResultSeen, } from '@/lib/copilot/orchestrator/sse-utils' -import { executeToolServerSide, markToolComplete } from '@/lib/copilot/orchestrator/tool-executor' +import { markToolComplete } from '@/lib/copilot/orchestrator/tool-executor' import type { ContentBlock, ExecutionContext, @@ -20,6 +15,7 @@ import type { StreamingContext, ToolCallState, } from '@/lib/copilot/orchestrator/types' +import { executeToolAndReport, isInterruptToolName, waitForToolDecision } from './tool-execution' const logger = createLogger('CopilotSseHandlers') @@ -39,100 +35,6 @@ function addContentBlock(context: StreamingContext, block: Omit { - const toolCall = context.toolCalls.get(toolCallId) - if (!toolCall) return - - if (toolCall.status === 'executing') return - if (wasToolResultSeen(toolCall.id)) return - - toolCall.status = 'executing' - try { - const result = await executeToolServerSide(toolCall, execContext) - toolCall.status = result.success ? 'success' : 'error' - toolCall.result = result - toolCall.error = result.error - toolCall.endTime = Date.now() - - // If create_workflow was successful, update the execution context with the new workflowId - // This ensures subsequent tools in the same stream have access to the workflowId - const output = asRecord(result.output) - if ( - toolCall.name === 'create_workflow' && - result.success && - output.workflowId && - !execContext.workflowId - ) { - execContext.workflowId = output.workflowId as string - if (output.workspaceId) { - execContext.workspaceId = output.workspaceId as string - } - } - - markToolResultSeen(toolCall.id) - - await markToolComplete( - toolCall.id, - toolCall.name, - result.success ? 200 : 500, - result.error || (result.success ? 'Tool completed' : 'Tool failed'), - result.output - ) - - await options?.onEvent?.({ - type: 'tool_result', - toolCallId: toolCall.id, - toolName: toolCall.name, - success: result.success, - result: result.output, - data: { - id: toolCall.id, - name: toolCall.name, - success: result.success, - result: result.output, - }, - }) - } catch (error) { - toolCall.status = 'error' - toolCall.error = error instanceof Error ? error.message : String(error) - toolCall.endTime = Date.now() - - markToolResultSeen(toolCall.id) - - await markToolComplete(toolCall.id, toolCall.name, 500, toolCall.error) - - await options?.onEvent?.({ - type: 'tool_error', - toolCallId: toolCall.id, - data: { - id: toolCall.id, - name: toolCall.name, - error: toolCall.error, - }, - }) - } -} - -async function waitForToolDecision( - toolCallId: string, - timeoutMs: number -): Promise<{ status: string; message?: string } | null> { - const start = Date.now() - while (Date.now() - start < timeoutMs) { - const decision = await getToolConfirmation(toolCallId) - if (decision?.status) { - return decision - } - await new Promise((resolve) => setTimeout(resolve, 100)) - } - return null -} - export const sseHandlers: Record = { chat_id: (event, context) => { context.chatId = asRecord(event.data).chatId @@ -145,13 +47,13 @@ export const sseHandlers: Record = { const current = context.toolCalls.get(toolCallId) if (!current) return - // Determine success: explicit success field, or if there's result data without explicit failure + // Determine success: explicit success field, or if there's result data without explicit failure. const hasExplicitSuccess = data?.success !== undefined || data?.result?.success !== undefined const explicitSuccess = data?.success ?? data?.result?.success const hasResultData = data?.result !== undefined || data?.data !== undefined const hasError = !!data?.error || !!data?.result?.error - // If explicitly set, use that; otherwise infer from data presence + // If explicitly set, use that; otherwise infer from data presence. const success = hasExplicitSuccess ? !!explicitSuccess : hasResultData && !hasError current.status = success ? 'success' : 'error' @@ -232,13 +134,13 @@ export const sseHandlers: Record = { const toolCall = context.toolCalls.get(toolCallId) if (!toolCall) return - // Subagent tools are executed by the copilot backend, not sim side + // Subagent tools are executed by the copilot backend, not sim side. if (SUBAGENT_TOOL_SET.has(toolName)) { return } - // Respond tools are internal to copilot's subagent system - skip execution - // The copilot backend handles these internally to signal subagent completion + // Respond tools are internal to copilot's subagent system - skip execution. + // The copilot backend handles these internally to signal subagent completion. if (RESPOND_TOOL_SET.has(toolName)) { toolCall.status = 'success' toolCall.endTime = Date.now() @@ -249,7 +151,7 @@ export const sseHandlers: Record = { return } - const isInterruptTool = INTERRUPT_TOOL_SET.has(toolName) + const isInterruptTool = isInterruptToolName(toolName) const isInteractive = options.interactive === true if (isInterruptTool && isInteractive) { @@ -358,8 +260,7 @@ export const sseHandlers: Record = { }, error: (event, context) => { const d = asRecord(event.data) - const message = - d.message || d.error || (typeof event.data === 'string' ? event.data : null) + const message = d.message || d.error || (typeof event.data === 'string' ? event.data : null) if (message) { context.errors.push(message) } @@ -388,7 +289,7 @@ export const subAgentHandlers: Record = { const args = toolData.arguments || toolData.input || asRecord(event.data).input const existing = context.toolCalls.get(toolCallId) - // Ignore late/duplicate tool_call events once we already have a result + // Ignore late/duplicate tool_call events once we already have a result. if (wasToolResultSeen(toolCallId) || existing?.endTime) { return } @@ -401,7 +302,7 @@ export const subAgentHandlers: Record = { startTime: Date.now(), } - // Store in both places - but do NOT overwrite existing tool call state for the same id + // Store in both places - but do NOT overwrite existing tool call state for the same id. if (!context.subAgentToolCalls[parentToolCallId]) { context.subAgentToolCalls[parentToolCallId] = [] } @@ -414,7 +315,7 @@ export const subAgentHandlers: Record = { if (isPartial) return - // Respond tools are internal to copilot's subagent system - skip execution + // Respond tools are internal to copilot's subagent system - skip execution. if (RESPOND_TOOL_SET.has(toolName)) { toolCall.status = 'success' toolCall.endTime = Date.now() @@ -436,14 +337,14 @@ export const subAgentHandlers: Record = { const toolCallId = event.toolCallId || data?.id if (!toolCallId) return - // Update in subAgentToolCalls + // Update in subAgentToolCalls. const toolCalls = context.subAgentToolCalls[parentToolCallId] || [] const subAgentToolCall = toolCalls.find((tc) => tc.id === toolCallId) - // Also update in main toolCalls (where we added it for execution) + // Also update in main toolCalls (where we added it for execution). const mainToolCall = context.toolCalls.get(toolCallId) - // Use same success inference logic as main handler + // Use same success inference logic as main handler. const hasExplicitSuccess = data?.success !== undefined || data?.result?.success !== undefined const explicitSuccess = data?.success ?? data?.result?.success const hasResultData = data?.result !== undefined || data?.data !== undefined diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers/index.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers/index.ts new file mode 100644 index 0000000000..d0d6b14b5b --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers/index.ts @@ -0,0 +1,2 @@ +export type { SSEHandler } from './handlers' +export { handleSubagentRouting, sseHandlers, subAgentHandlers } from './handlers' diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts new file mode 100644 index 0000000000..99eb593e5e --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts @@ -0,0 +1,117 @@ +import { createLogger } from '@sim/logger' +import { INTERRUPT_TOOL_SET } from '@/lib/copilot/orchestrator/config' +import { getToolConfirmation } from '@/lib/copilot/orchestrator/persistence' +import { + asRecord, + markToolResultSeen, + wasToolResultSeen, +} from '@/lib/copilot/orchestrator/sse-utils' +import { executeToolServerSide, markToolComplete } from '@/lib/copilot/orchestrator/tool-executor' +import type { + ExecutionContext, + OrchestratorOptions, + SSEEvent, + StreamingContext, +} from '@/lib/copilot/orchestrator/types' + +const logger = createLogger('CopilotSseToolExecution') + +export function isInterruptToolName(toolName: string): boolean { + return INTERRUPT_TOOL_SET.has(toolName) +} + +export async function executeToolAndReport( + toolCallId: string, + context: StreamingContext, + execContext: ExecutionContext, + options?: OrchestratorOptions +): Promise { + const toolCall = context.toolCalls.get(toolCallId) + if (!toolCall) return + + if (toolCall.status === 'executing') return + if (wasToolResultSeen(toolCall.id)) return + + toolCall.status = 'executing' + try { + const result = await executeToolServerSide(toolCall, execContext) + toolCall.status = result.success ? 'success' : 'error' + toolCall.result = result + toolCall.error = result.error + toolCall.endTime = Date.now() + + // If create_workflow was successful, update the execution context with the new workflowId. + // This ensures subsequent tools in the same stream have access to the workflowId. + const output = asRecord(result.output) + if ( + toolCall.name === 'create_workflow' && + result.success && + output.workflowId && + !execContext.workflowId + ) { + execContext.workflowId = output.workflowId as string + if (output.workspaceId) { + execContext.workspaceId = output.workspaceId as string + } + } + + markToolResultSeen(toolCall.id) + + await markToolComplete( + toolCall.id, + toolCall.name, + result.success ? 200 : 500, + result.error || (result.success ? 'Tool completed' : 'Tool failed'), + result.output + ) + + const resultEvent: SSEEvent = { + type: 'tool_result', + toolCallId: toolCall.id, + toolName: toolCall.name, + success: result.success, + result: result.output, + data: { + id: toolCall.id, + name: toolCall.name, + success: result.success, + result: result.output, + }, + } + await options?.onEvent?.(resultEvent) + } catch (error) { + toolCall.status = 'error' + toolCall.error = error instanceof Error ? error.message : String(error) + toolCall.endTime = Date.now() + + markToolResultSeen(toolCall.id) + + await markToolComplete(toolCall.id, toolCall.name, 500, toolCall.error) + + const errorEvent: SSEEvent = { + type: 'tool_error', + toolCallId: toolCall.id, + data: { + id: toolCall.id, + name: toolCall.name, + error: toolCall.error, + }, + } + await options?.onEvent?.(errorEvent) + } +} + +export async function waitForToolDecision( + toolCallId: string, + timeoutMs: number +): Promise<{ status: string; message?: string } | null> { + const start = Date.now() + while (Date.now() - start < timeoutMs) { + const decision = await getToolConfirmation(toolCallId) + if (decision?.status) { + return decision + } + await new Promise((resolve) => setTimeout(resolve, 100)) + } + return null +} diff --git a/apps/sim/lib/copilot/orchestrator/sse-utils.test.ts b/apps/sim/lib/copilot/orchestrator/sse-utils.test.ts index 37b748a7f7..ce41e32708 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-utils.test.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-utils.test.ts @@ -40,4 +40,3 @@ describe('sse-utils', () => { expect(shouldSkipToolResultEvent(event as any)).toBe(true) }) }) - diff --git a/apps/sim/lib/copilot/orchestrator/sse-utils.ts b/apps/sim/lib/copilot/orchestrator/sse-utils.ts index 0dd805decb..26d5a94bd8 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-utils.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-utils.ts @@ -120,4 +120,3 @@ export function shouldSkipToolResultEvent(event: SSEEvent): boolean { markToolResultSeen(toolCallId) return false } - diff --git a/apps/sim/lib/copilot/orchestrator/stream-buffer.test.ts b/apps/sim/lib/copilot/orchestrator/stream-buffer.test.ts index 6e834c629c..94458e4520 100644 --- a/apps/sim/lib/copilot/orchestrator/stream-buffer.test.ts +++ b/apps/sim/lib/copilot/orchestrator/stream-buffer.test.ts @@ -1,8 +1,9 @@ /** * @vitest-environment node */ -import { beforeEach, describe, expect, it, vi } from 'vitest' + import { loggerMock } from '@sim/testing' +import { beforeEach, describe, expect, it, vi } from 'vitest' vi.mock('@sim/logger', () => loggerMock) @@ -25,27 +26,29 @@ const createRedisStub = () => { hset: vi.fn().mockResolvedValue(1), hgetall: vi.fn().mockResolvedValue({}), expire: vi.fn().mockResolvedValue(1), - eval: vi.fn().mockImplementation( - ( - _lua: string, - _keysCount: number, - seqKey: string, - eventsKey: string, - _ttl: number, - _limit: number, - streamId: string, - eventJson: string - ) => { - const current = counters.get(seqKey) || 0 - const next = current + 1 - counters.set(seqKey, next) - const entry = JSON.stringify({ eventId: next, streamId, event: JSON.parse(eventJson) }) - const list = events.get(eventsKey) || [] - list.push({ score: next, value: entry }) - events.set(eventsKey, list) - return next - } - ), + eval: vi + .fn() + .mockImplementation( + ( + _lua: string, + _keysCount: number, + seqKey: string, + eventsKey: string, + _ttl: number, + _limit: number, + streamId: string, + eventJson: string + ) => { + const current = counters.get(seqKey) || 0 + const next = current + 1 + counters.set(seqKey, next) + const entry = JSON.stringify({ eventId: next, streamId, event: JSON.parse(eventJson) }) + const list = events.get(eventsKey) || [] + list.push({ score: next, value: entry }) + events.set(eventsKey, list) + return next + } + ), incrby: vi.fn().mockImplementation((key: string, amount: number) => { const current = counters.get(key) || 0 const next = current + amount @@ -58,19 +61,18 @@ const createRedisStub = () => { return Promise.resolve(readEntries(key, minVal, maxVal)) }), pipeline: vi.fn().mockImplementation(() => { - const api = { - zadd: vi.fn().mockImplementation((key: string, ...args: Array) => { - const list = events.get(key) || [] - for (let i = 0; i < args.length; i += 2) { - list.push({ score: Number(args[i]), value: String(args[i + 1]) }) - } - events.set(key, list) - return api - }), - expire: vi.fn().mockReturnValue(api), - zremrangebyrank: vi.fn().mockReturnValue(api), - exec: vi.fn().mockResolvedValue([]), - } + const api: Record = {} + api.zadd = vi.fn().mockImplementation((key: string, ...args: Array) => { + const list = events.get(key) || [] + for (let i = 0; i < args.length; i += 2) { + list.push({ score: Number(args[i]), value: String(args[i + 1]) }) + } + events.set(key, list) + return api + }) + api.expire = vi.fn().mockReturnValue(api) + api.zremrangebyrank = vi.fn().mockReturnValue(api) + api.exec = vi.fn().mockResolvedValue([]) return api }), } @@ -115,4 +117,3 @@ describe('stream-buffer', () => { expect(events.map((entry) => entry.event.data)).toEqual(['a', 'b']) }) }) - diff --git a/apps/sim/lib/copilot/orchestrator/stream-buffer.ts b/apps/sim/lib/copilot/orchestrator/stream-buffer.ts index 29fd8f55b2..abf70aa2c1 100644 --- a/apps/sim/lib/copilot/orchestrator/stream-buffer.ts +++ b/apps/sim/lib/copilot/orchestrator/stream-buffer.ts @@ -31,7 +31,10 @@ export function getStreamBufferConfig(): StreamBufferConfig { ttlSeconds: parseNumber(env.COPILOT_STREAM_TTL_SECONDS, STREAM_DEFAULTS.ttlSeconds), eventLimit: parseNumber(env.COPILOT_STREAM_EVENT_LIMIT, STREAM_DEFAULTS.eventLimit), reserveBatch: parseNumber(env.COPILOT_STREAM_RESERVE_BATCH, STREAM_DEFAULTS.reserveBatch), - flushIntervalMs: parseNumber(env.COPILOT_STREAM_FLUSH_INTERVAL_MS, STREAM_DEFAULTS.flushIntervalMs), + flushIntervalMs: parseNumber( + env.COPILOT_STREAM_FLUSH_INTERVAL_MS, + STREAM_DEFAULTS.flushIntervalMs + ), flushMaxBatch: parseNumber(env.COPILOT_STREAM_FLUSH_MAX_BATCH, STREAM_DEFAULTS.flushMaxBatch), } } @@ -190,8 +193,6 @@ export function createStreamEventWriter(streamId: string): StreamEventWriter { let nextEventId = 0 let maxReservedId = 0 let flushTimer: ReturnType | null = null - let isFlushing = false - const scheduleFlush = () => { if (flushTimer) return flushTimer = setTimeout(() => { @@ -210,9 +211,11 @@ export function createStreamEventWriter(streamId: string): StreamEventWriter { } } - const flush = async () => { - if (isFlushing || pending.length === 0) return - isFlushing = true + let flushPromise: Promise | null = null + let closed = false + + const doFlush = async () => { + if (pending.length === 0) return const batch = pending pending = [] try { @@ -233,13 +236,25 @@ export function createStreamEventWriter(streamId: string): StreamEventWriter { error: error instanceof Error ? error.message : String(error), }) pending = batch.concat(pending) + } + } + + const flush = async () => { + if (flushPromise) { + await flushPromise + return + } + flushPromise = doFlush() + try { + await flushPromise } finally { - isFlushing = false + flushPromise = null if (pending.length > 0) scheduleFlush() } } const write = async (event: Record) => { + if (closed) return { eventId: 0, streamId, event } if (nextEventId === 0 || nextEventId > maxReservedId) { await reserveIds(1) } @@ -255,6 +270,7 @@ export function createStreamEventWriter(streamId: string): StreamEventWriter { } const close = async () => { + closed = true if (flushTimer) { clearTimeout(flushTimer) flushTimer = null diff --git a/apps/sim/lib/copilot/orchestrator/stream-core.ts b/apps/sim/lib/copilot/orchestrator/stream-core.ts new file mode 100644 index 0000000000..5f5af90b4d --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/stream-core.ts @@ -0,0 +1,179 @@ +import { createLogger } from '@sim/logger' +import { + handleSubagentRouting, + sseHandlers, + subAgentHandlers, +} from '@/lib/copilot/orchestrator/sse-handlers' +import { parseSSEStream } from '@/lib/copilot/orchestrator/sse-parser' +import { + normalizeSseEvent, + shouldSkipToolCallEvent, + shouldSkipToolResultEvent, +} from '@/lib/copilot/orchestrator/sse-utils' +import type { + ExecutionContext, + OrchestratorOptions, + SSEEvent, + StreamingContext, + ToolCallSummary, +} from '@/lib/copilot/orchestrator/types' + +const logger = createLogger('CopilotStreamCore') + +/** + * Options for the shared stream processing loop. + */ +export interface StreamLoopOptions extends OrchestratorOptions { + /** + * Called for each normalized event BEFORE standard handler dispatch. + * Return true to skip the default handler for this event. + */ + onBeforeDispatch?: (event: SSEEvent, context: StreamingContext) => boolean | void +} + +/** + * Create a fresh StreamingContext. + */ +export function createStreamingContext(overrides?: Partial): StreamingContext { + return { + chatId: undefined, + conversationId: undefined, + messageId: crypto.randomUUID(), + accumulatedContent: '', + contentBlocks: [], + toolCalls: new Map(), + currentThinkingBlock: null, + isInThinkingBlock: false, + subAgentParentToolCallId: undefined, + subAgentContent: {}, + subAgentToolCalls: {}, + pendingContent: '', + streamComplete: false, + wasAborted: false, + errors: [], + ...overrides, + } +} + +/** + * Run the SSE stream processing loop. + * + * Handles: fetch -> parse -> normalize -> dedupe -> subagent routing -> handler dispatch. + * Callers provide the fetch URL/options and can intercept events via onBeforeDispatch. + */ +export async function runStreamLoop( + fetchUrl: string, + fetchOptions: RequestInit, + context: StreamingContext, + execContext: ExecutionContext, + options: StreamLoopOptions +): Promise { + const { timeout = 300000, abortSignal } = options + + const response = await fetch(fetchUrl, { + ...fetchOptions, + signal: abortSignal, + }) + + if (!response.ok) { + const errorText = await response.text().catch(() => '') + throw new Error(`Copilot backend error (${response.status}): ${errorText || response.statusText}`) + } + + if (!response.body) { + throw new Error('Copilot backend response missing body') + } + + const reader = response.body.getReader() + const decoder = new TextDecoder() + + const timeoutId = setTimeout(() => { + context.errors.push('Request timed out') + context.streamComplete = true + reader.cancel().catch(() => {}) + }, timeout) + + try { + for await (const event of parseSSEStream(reader, decoder, abortSignal)) { + if (abortSignal?.aborted) { + context.wasAborted = true + break + } + + const normalizedEvent = normalizeSseEvent(event) + + // Skip duplicate tool events. + const shouldSkipToolCall = shouldSkipToolCallEvent(normalizedEvent) + const shouldSkipToolResult = shouldSkipToolResultEvent(normalizedEvent) + + if (!shouldSkipToolCall && !shouldSkipToolResult) { + try { + await options.onEvent?.(normalizedEvent) + } catch (error) { + logger.warn('Failed to forward SSE event', { + type: normalizedEvent.type, + error: error instanceof Error ? error.message : String(error), + }) + } + } + + // Let the caller intercept before standard dispatch. + if (options.onBeforeDispatch?.(normalizedEvent, context)) { + if (context.streamComplete) break + continue + } + + // Standard subagent start/end handling. + if (normalizedEvent.type === 'subagent_start') { + const eventData = normalizedEvent.data as Record | undefined + const toolCallId = eventData?.tool_call_id as string | undefined + if (toolCallId) { + context.subAgentParentToolCallId = toolCallId + context.subAgentContent[toolCallId] = '' + context.subAgentToolCalls[toolCallId] = [] + } + continue + } + + if (normalizedEvent.type === 'subagent_end') { + context.subAgentParentToolCallId = undefined + continue + } + + // Subagent event routing. + if (handleSubagentRouting(normalizedEvent, context)) { + const handler = subAgentHandlers[normalizedEvent.type] + if (handler) { + await handler(normalizedEvent, context, execContext, options) + } + if (context.streamComplete) break + continue + } + + // Main event handler dispatch. + const handler = sseHandlers[normalizedEvent.type] + if (handler) { + await handler(normalizedEvent, context, execContext, options) + } + if (context.streamComplete) break + } + } finally { + clearTimeout(timeoutId) + } +} + +/** + * Build a ToolCallSummary array from the streaming context. + */ +export function buildToolCallSummaries(context: StreamingContext): ToolCallSummary[] { + return Array.from(context.toolCalls.values()).map((toolCall) => ({ + id: toolCall.id, + name: toolCall.name, + status: toolCall.status, + params: toolCall.params, + result: toolCall.result?.output, + error: toolCall.error, + durationMs: + toolCall.endTime && toolCall.startTime ? toolCall.endTime - toolCall.startTime : undefined, + })) +} diff --git a/apps/sim/lib/copilot/orchestrator/subagent.ts b/apps/sim/lib/copilot/orchestrator/subagent.ts index 80e71d672e..9788a686a5 100644 --- a/apps/sim/lib/copilot/orchestrator/subagent.ts +++ b/apps/sim/lib/copilot/orchestrator/subagent.ts @@ -1,13 +1,5 @@ import { createLogger } from '@sim/logger' import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' -import { handleSubagentRouting, sseHandlers, subAgentHandlers } from '@/lib/copilot/orchestrator/sse-handlers' -import { env } from '@/lib/core/config/env' -import { - normalizeSseEvent, - shouldSkipToolCallEvent, - shouldSkipToolResultEvent, -} from '@/lib/copilot/orchestrator/sse-utils' -import { parseSSEStream } from '@/lib/copilot/orchestrator/sse-parser' import { prepareExecutionContext } from '@/lib/copilot/orchestrator/tool-executor' import type { ExecutionContext, @@ -16,7 +8,9 @@ import type { StreamingContext, ToolCallSummary, } from '@/lib/copilot/orchestrator/types' +import { env } from '@/lib/core/config/env' import { getEffectiveDecryptedEnv } from '@/lib/environment/utils' +import { buildToolCallSummaries, createStreamingContext, runStreamLoop } from './stream-core' const logger = createLogger('CopilotSubagentOrchestrator') @@ -46,131 +40,58 @@ export async function orchestrateSubagentStream( requestPayload: Record, options: SubagentOrchestratorOptions ): Promise { - const { userId, workflowId, workspaceId, timeout = 300000, abortSignal } = options + const { userId, workflowId, workspaceId } = options const execContext = await buildExecutionContext(userId, workflowId, workspaceId) - const context: StreamingContext = { - chatId: undefined, - conversationId: undefined, + const context = createStreamingContext({ messageId: requestPayload?.messageId || crypto.randomUUID(), - accumulatedContent: '', - contentBlocks: [], - toolCalls: new Map(), - currentThinkingBlock: null, - isInThinkingBlock: false, - subAgentParentToolCallId: undefined, - subAgentContent: {}, - subAgentToolCalls: {}, - pendingContent: '', - streamComplete: false, - wasAborted: false, - errors: [], - } + }) let structuredResult: SubagentOrchestratorResult['structuredResult'] try { - const response = await fetch(`${SIM_AGENT_API_URL}/api/subagent/${agentId}`, { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - ...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}), + await runStreamLoop( + `${SIM_AGENT_API_URL}/api/subagent/${agentId}`, + { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + ...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}), + }, + body: JSON.stringify({ ...requestPayload, stream: true }), }, - body: JSON.stringify({ ...requestPayload, stream: true }), - signal: abortSignal, - }) - - if (!response.ok) { - const errorText = await response.text().catch(() => '') - throw new Error( - `Copilot backend error (${response.status}): ${errorText || response.statusText}` - ) - } - - if (!response.body) { - throw new Error('Copilot backend response missing body') - } - - const reader = response.body.getReader() - const decoder = new TextDecoder() - - const timeoutId = setTimeout(() => { - context.errors.push('Request timed out') - context.streamComplete = true - reader.cancel().catch(() => {}) - }, timeout) - - try { - for await (const event of parseSSEStream(reader, decoder, abortSignal)) { - if (abortSignal?.aborted) { - context.wasAborted = true - break - } - - const normalizedEvent = normalizeSseEvent(event) - - // Skip duplicate tool events to prevent state regressions. - const shouldSkipToolCall = shouldSkipToolCallEvent(normalizedEvent) - const shouldSkipToolResult = shouldSkipToolResultEvent(normalizedEvent) - - if (!shouldSkipToolCall && !shouldSkipToolResult) { - await forwardEvent(normalizedEvent, options) - } - - if ( - normalizedEvent.type === 'structured_result' || - normalizedEvent.type === 'subagent_result' - ) { - structuredResult = normalizeStructuredResult(normalizedEvent.data) - context.streamComplete = true - continue - } - - // Handle subagent_start/subagent_end events to track nested subagent calls - if (normalizedEvent.type === 'subagent_start') { - const eventData = normalizedEvent.data as Record | undefined - const toolCallId = eventData?.tool_call_id as string | undefined - if (toolCallId) { - context.subAgentParentToolCallId = toolCallId - context.subAgentContent[toolCallId] = '' - context.subAgentToolCalls[toolCallId] = [] + context, + execContext, + { + ...options, + onBeforeDispatch: (event: SSEEvent, ctx: StreamingContext) => { + // Handle structured_result / subagent_result - subagent-specific. + if (event.type === 'structured_result' || event.type === 'subagent_result') { + structuredResult = normalizeStructuredResult(event.data) + ctx.streamComplete = true + return true // skip default dispatch } - continue - } - - if (normalizedEvent.type === 'subagent_end') { - context.subAgentParentToolCallId = undefined - continue - } - // For direct subagent calls, events may have the subagent field set (e.g., subagent: "discovery") - // but no subagent_start event because this IS the top-level agent. Skip subagent routing - // for events where the subagent field matches the current agentId - these are top-level events. - const isTopLevelSubagentEvent = - normalizedEvent.subagent === agentId && !context.subAgentParentToolCallId - - // Only route to subagent handlers for nested subagent events (not matching current agentId) - if (!isTopLevelSubagentEvent && handleSubagentRouting(normalizedEvent, context)) { - const handler = subAgentHandlers[normalizedEvent.type] - if (handler) { - await handler(normalizedEvent, context, execContext, options) + // For direct subagent calls, events may have the subagent field set + // but no subagent_start because this IS the top-level agent. + // Skip subagent routing for events where the subagent field matches + // the current agentId - these are top-level events. + if (event.subagent === agentId && !ctx.subAgentParentToolCallId) { + return false // let default dispatch handle it } - if (context.streamComplete) break - continue - } - // Process as a regular SSE event (including top-level subagent events) - const handler = sseHandlers[normalizedEvent.type] - if (handler) { - await handler(normalizedEvent, context, execContext, options) - } - if (context.streamComplete) break + return false // let default dispatch handle it + }, } - } finally { - clearTimeout(timeoutId) - } + ) - const result = buildResult(context, structuredResult) + const result: SubagentOrchestratorResult = { + success: context.errors.length === 0 && !context.wasAborted, + content: context.accumulatedContent, + toolCalls: buildToolCallSummaries(context), + structuredResult, + errors: context.errors.length ? context.errors : undefined, + } await options.onComplete?.(result) return result } catch (error) { @@ -186,26 +107,14 @@ export async function orchestrateSubagentStream( } } -async function forwardEvent(event: SSEEvent, options: OrchestratorOptions): Promise { - try { - await options.onEvent?.(event) - } catch (error) { - logger.warn('Failed to forward SSE event', { - type: event.type, - error: error instanceof Error ? error.message : String(error), - }) - } -} - -function normalizeStructuredResult(data: any): SubagentOrchestratorResult['structuredResult'] { - if (!data || typeof data !== 'object') { - return undefined - } +function normalizeStructuredResult(data: unknown): SubagentOrchestratorResult['structuredResult'] { + if (!data || typeof data !== 'object') return undefined + const d = data as Record return { - type: data.result_type || data.type, - summary: data.summary, - data: data.data ?? data, - success: data.success, + type: d.result_type || d.type, + summary: d.summary, + data: d.data ?? d, + success: d.success, } } @@ -217,7 +126,6 @@ async function buildExecutionContext( if (workflowId) { return prepareExecutionContext(userId, workflowId) } - const decryptedEnvVars = await getEffectiveDecryptedEnv(userId, workspaceId) return { userId, @@ -226,27 +134,3 @@ async function buildExecutionContext( decryptedEnvVars, } } - -function buildResult( - context: StreamingContext, - structuredResult?: SubagentOrchestratorResult['structuredResult'] -): SubagentOrchestratorResult { - const toolCalls: ToolCallSummary[] = Array.from(context.toolCalls.values()).map((toolCall) => ({ - id: toolCall.id, - name: toolCall.name, - status: toolCall.status, - params: toolCall.params, - result: toolCall.result?.output, - error: toolCall.error, - durationMs: - toolCall.endTime && toolCall.startTime ? toolCall.endTime - toolCall.startTime : undefined, - })) - - return { - success: context.errors.length === 0 && !context.wasAborted, - content: context.accumulatedContent, - toolCalls, - structuredResult, - errors: context.errors.length ? context.errors : undefined, - } -} diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/access.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/access.ts index 0f3f32492d..b19459afa0 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/access.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/access.ts @@ -127,4 +127,3 @@ export async function getAccessibleWorkflowsForUser( .where(or(...workflowConditions)) .orderBy(asc(workflow.sortOrder), asc(workflow.createdAt), asc(workflow.id)) } - diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/deploy.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/deploy.ts index aad2ed2148..e876ed19d7 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/deploy.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/deploy.ts @@ -1,15 +1,16 @@ import crypto from 'crypto' import { db } from '@sim/db' -import { chat, workflow, workflowMcpTool } from '@sim/db/schema' +import { chat, workflowMcpTool } from '@sim/db/schema' import { and, eq } from 'drizzle-orm' import type { ExecutionContext, ToolCallResult } from '@/lib/copilot/orchestrator/types' import { sanitizeToolName } from '@/lib/mcp/workflow-tool-schema' import { deployWorkflow, undeployWorkflow } from '@/lib/workflows/persistence/utils' import { checkChatAccess, checkWorkflowAccessForChatCreation } from '@/app/api/chat/utils' import { ensureWorkflowAccess } from '../access' +import type { DeployApiParams, DeployChatParams, DeployMcpParams } from '../param-types' export async function executeDeployApi( - params: Record, + params: DeployApiParams, context: ExecutionContext ): Promise { try { @@ -52,7 +53,7 @@ export async function executeDeployApi( } export async function executeDeployChat( - params: Record, + params: DeployChatParams, context: ExecutionContext ): Promise { try { @@ -126,10 +127,12 @@ export async function executeDeployChat( description: String(params.description || existingDeployment?.description || ''), customizations: { primaryColor: - params.customizations?.primaryColor || existingCustomizations.primaryColor || + params.customizations?.primaryColor || + existingCustomizations.primaryColor || 'var(--brand-primary-hover-hex)', welcomeMessage: - params.customizations?.welcomeMessage || existingCustomizations.welcomeMessage || + params.customizations?.welcomeMessage || + existingCustomizations.welcomeMessage || 'Hi there! How can I help you today?', }, authType: params.authType || existingDeployment?.authType || 'public', @@ -184,7 +187,7 @@ export async function executeDeployChat( } export async function executeDeployMcp( - params: Record, + params: DeployMcpParams, context: ExecutionContext ): Promise { try { @@ -217,14 +220,18 @@ export async function executeDeployMcp( const existingTool = await db .select() .from(workflowMcpTool) - .where(and(eq(workflowMcpTool.serverId, serverId), eq(workflowMcpTool.workflowId, workflowId))) + .where( + and(eq(workflowMcpTool.serverId, serverId), eq(workflowMcpTool.workflowId, workflowId)) + ) .limit(1) const toolName = sanitizeToolName( params.toolName || workflowRecord.name || `workflow_${workflowId}` ) const toolDescription = - params.toolDescription || workflowRecord.description || `Execute ${workflowRecord.name} workflow` + params.toolDescription || + workflowRecord.description || + `Execute ${workflowRecord.name} workflow` const parameterSchema = params.parameterSchema || {} if (existingTool.length > 0) { diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/manage.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/manage.ts index 4e6db4af3b..555552693d 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/manage.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/manage.ts @@ -6,9 +6,14 @@ import type { ExecutionContext, ToolCallResult } from '@/lib/copilot/orchestrato import { sanitizeToolName } from '@/lib/mcp/workflow-tool-schema' import { hasValidStartBlock } from '@/lib/workflows/triggers/trigger-utils.server' import { ensureWorkflowAccess } from '../access' +import type { + CheckDeploymentStatusParams, + CreateWorkspaceMcpServerParams, + ListWorkspaceMcpServersParams, +} from '../param-types' export async function executeCheckDeploymentStatus( - params: Record, + params: CheckDeploymentStatusParams, context: ExecutionContext ): Promise { try { @@ -85,7 +90,7 @@ export async function executeCheckDeploymentStatus( } export async function executeListWorkspaceMcpServers( - params: Record, + params: ListWorkspaceMcpServersParams, context: ExecutionContext ): Promise { try { @@ -141,7 +146,7 @@ export async function executeListWorkspaceMcpServers( } export async function executeCreateWorkspaceMcpServer( - params: Record, + params: CreateWorkspaceMcpServerParams, context: ExecutionContext ): Promise { try { diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts index fc839f6120..41610306a6 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts @@ -11,8 +11,39 @@ import type { import { routeExecution } from '@/lib/copilot/tools/server/router' import { env } from '@/lib/core/config/env' import { getEffectiveDecryptedEnv } from '@/lib/environment/utils' +import { getTool, resolveToolId } from '@/tools/utils' +import { + executeCheckDeploymentStatus, + executeCreateWorkspaceMcpServer, + executeDeployApi, + executeDeployChat, + executeDeployMcp, + executeListWorkspaceMcpServers, + executeRedeploy, +} from './deployment-tools' import { executeIntegrationToolDirect } from './integration-tools' +import type { + CheckDeploymentStatusParams, + CreateFolderParams, + CreateWorkflowParams, + CreateWorkspaceMcpServerParams, + DeployApiParams, + DeployChatParams, + DeployMcpParams, + GetBlockOutputsParams, + GetBlockUpstreamReferencesParams, + GetUserWorkflowParams, + GetWorkflowDataParams, + GetWorkflowFromNameParams, + ListFoldersParams, + ListUserWorkflowsParams, + ListWorkspaceMcpServersParams, + RunWorkflowParams, + SetGlobalWorkflowVariablesParams, +} from './param-types' import { + executeCreateFolder, + executeCreateWorkflow, executeGetBlockOutputs, executeGetBlockUpstreamReferences, executeGetUserWorkflow, @@ -21,21 +52,9 @@ import { executeListFolders, executeListUserWorkflows, executeListUserWorkspaces, - executeCreateWorkflow, - executeCreateFolder, executeRunWorkflow, executeSetGlobalWorkflowVariables, } from './workflow-tools' -import { - executeCheckDeploymentStatus, - executeCreateWorkspaceMcpServer, - executeDeployApi, - executeDeployChat, - executeDeployMcp, - executeListWorkspaceMcpServers, - executeRedeploy, -} from './deployment-tools' -import { getTool, resolveToolId } from '@/tools/utils' const logger = createLogger('CopilotToolExecutor') @@ -144,43 +163,43 @@ async function executeSimWorkflowTool( ): Promise { switch (toolName) { case 'get_user_workflow': - return executeGetUserWorkflow(params, context) + return executeGetUserWorkflow(params as GetUserWorkflowParams, context) case 'get_workflow_from_name': - return executeGetWorkflowFromName(params, context) + return executeGetWorkflowFromName(params as GetWorkflowFromNameParams, context) case 'list_user_workflows': - return executeListUserWorkflows(params, context) + return executeListUserWorkflows(params as ListUserWorkflowsParams, context) case 'list_user_workspaces': return executeListUserWorkspaces(context) case 'list_folders': - return executeListFolders(params, context) + return executeListFolders(params as ListFoldersParams, context) case 'create_workflow': - return executeCreateWorkflow(params, context) + return executeCreateWorkflow(params as CreateWorkflowParams, context) case 'create_folder': - return executeCreateFolder(params, context) + return executeCreateFolder(params as CreateFolderParams, context) case 'get_workflow_data': - return executeGetWorkflowData(params, context) + return executeGetWorkflowData(params as GetWorkflowDataParams, context) case 'get_block_outputs': - return executeGetBlockOutputs(params, context) + return executeGetBlockOutputs(params as GetBlockOutputsParams, context) case 'get_block_upstream_references': - return executeGetBlockUpstreamReferences(params, context) + return executeGetBlockUpstreamReferences(params as GetBlockUpstreamReferencesParams, context) case 'run_workflow': - return executeRunWorkflow(params, context) + return executeRunWorkflow(params as RunWorkflowParams, context) case 'set_global_workflow_variables': - return executeSetGlobalWorkflowVariables(params, context) + return executeSetGlobalWorkflowVariables(params as SetGlobalWorkflowVariablesParams, context) case 'deploy_api': - return executeDeployApi(params, context) + return executeDeployApi(params as DeployApiParams, context) case 'deploy_chat': - return executeDeployChat(params, context) + return executeDeployChat(params as DeployChatParams, context) case 'deploy_mcp': - return executeDeployMcp(params, context) + return executeDeployMcp(params as DeployMcpParams, context) case 'redeploy': return executeRedeploy(context) case 'check_deployment_status': - return executeCheckDeploymentStatus(params, context) + return executeCheckDeploymentStatus(params as CheckDeploymentStatusParams, context) case 'list_workspace_mcp_servers': - return executeListWorkspaceMcpServers(params, context) + return executeListWorkspaceMcpServers(params as ListWorkspaceMcpServersParams, context) case 'create_workspace_mcp_server': - return executeCreateWorkspaceMcpServer(params, context) + return executeCreateWorkspaceMcpServer(params as CreateWorkspaceMcpServerParams, context) default: return { success: false, error: `Unsupported workflow tool: ${toolName}` } } diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/integration-tools.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/integration-tools.ts index 44a10d7af3..f70444acdd 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/integration-tools.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/integration-tools.ts @@ -15,7 +15,10 @@ import { resolveToolId } from '@/tools/utils' export async function executeIntegrationToolDirect( toolCall: ToolCallState, - toolConfig: any, + toolConfig: { + oauth?: { required?: boolean; provider?: string } + params?: { apiKey?: { required?: boolean } } + }, context: ExecutionContext ): Promise { const { userId, workflowId } = context @@ -35,6 +38,9 @@ export async function executeIntegrationToolDirect( const decryptedEnvVars = context.decryptedEnvVars || (await getEffectiveDecryptedEnv(userId, workspaceId)) + // Deep resolution walks nested objects to replace {{ENV_VAR}} references. + // Safe because tool arguments originate from the LLM (not direct user input) + // and env vars belong to the user themselves. const executionParams: Record = resolveEnvVarReferences(toolArgs, decryptedEnvVars, { deep: true, }) as Record @@ -97,4 +103,3 @@ export async function executeIntegrationToolDirect( error: result.error, } } - diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/param-types.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/param-types.ts new file mode 100644 index 0000000000..30d5190878 --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/param-types.ts @@ -0,0 +1,127 @@ +/** + * Typed parameter interfaces for tool executor functions. + * Replaces Record with specific shapes based on actual property access. + */ + +// === Workflow Query Params === + +export interface GetUserWorkflowParams { + workflowId?: string +} + +export interface GetWorkflowFromNameParams { + workflow_name?: string +} + +export interface ListUserWorkflowsParams { + workspaceId?: string + folderId?: string +} + +export interface GetWorkflowDataParams { + workflowId?: string + data_type?: string + dataType?: string +} + +export interface GetBlockOutputsParams { + workflowId?: string + blockIds?: string[] +} + +export interface GetBlockUpstreamReferencesParams { + workflowId?: string + blockIds: string[] +} + +export interface ListFoldersParams { + workspaceId?: string +} + +// === Workflow Mutation Params === + +export interface CreateWorkflowParams { + name?: string + workspaceId?: string + folderId?: string + description?: string +} + +export interface CreateFolderParams { + name?: string + workspaceId?: string + parentId?: string +} + +export interface RunWorkflowParams { + workflowId?: string + workflow_input?: unknown + input?: unknown +} + +export interface VariableOperation { + name: string + operation: 'add' | 'edit' | 'delete' + value?: unknown + type?: string +} + +export interface SetGlobalWorkflowVariablesParams { + workflowId?: string + operations?: VariableOperation[] +} + +// === Deployment Params === + +export interface DeployApiParams { + workflowId?: string + action?: 'deploy' | 'undeploy' +} + +export interface DeployChatParams { + workflowId?: string + action?: 'deploy' | 'undeploy' | 'update' + identifier?: string + title?: string + description?: string + customizations?: { + primaryColor?: string + secondaryColor?: string + welcomeMessage?: string + iconUrl?: string + } + authType?: 'none' | 'password' | 'public' | 'email' | 'sso' + password?: string + subdomain?: string + allowedEmails?: string[] + outputConfigs?: unknown[] +} + +export interface DeployMcpParams { + workflowId?: string + action?: 'deploy' | 'undeploy' + toolName?: string + toolDescription?: string + serverId?: string + parameterSchema?: Record +} + +export interface CheckDeploymentStatusParams { + workflowId?: string +} + +export interface ListWorkspaceMcpServersParams { + workspaceId?: string + workflowId?: string +} + +export interface CreateWorkspaceMcpServerParams { + workflowId?: string + name?: string + description?: string + toolName?: string + toolDescription?: string + serverName?: string + isPublic?: boolean + workflowIds?: string[] +} diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/index.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/index.ts index 938d84e7b5..b908b07108 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/index.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/index.ts @@ -1,2 +1,2 @@ -export * from './queries' export * from './mutations' +export * from './queries' diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/mutations.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/mutations.ts index ed4b51cc0a..12158fc74b 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/mutations.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/mutations.ts @@ -8,9 +8,16 @@ import { buildDefaultWorkflowArtifacts } from '@/lib/workflows/defaults' import { executeWorkflow } from '@/lib/workflows/executor/execute-workflow' import { saveWorkflowToNormalizedTables } from '@/lib/workflows/persistence/utils' import { ensureWorkflowAccess, ensureWorkspaceAccess, getDefaultWorkspaceId } from '../access' +import type { + CreateFolderParams, + CreateWorkflowParams, + RunWorkflowParams, + SetGlobalWorkflowVariablesParams, + VariableOperation, +} from '../param-types' export async function executeCreateWorkflow( - params: Record, + params: CreateWorkflowParams, context: ExecutionContext ): Promise { try { @@ -18,10 +25,16 @@ export async function executeCreateWorkflow( if (!name) { return { success: false, error: 'name is required' } } + if (name.length > 200) { + return { success: false, error: 'Workflow name must be 200 characters or less' } + } + const description = typeof params?.description === 'string' ? params.description : null + if (description && description.length > 2000) { + return { success: false, error: 'Description must be 2000 characters or less' } + } const workspaceId = params?.workspaceId || (await getDefaultWorkspaceId(context.userId)) const folderId = params?.folderId || null - const description = typeof params?.description === 'string' ? params.description : null await ensureWorkspaceAccess(workspaceId, context.userId, true) @@ -73,7 +86,7 @@ export async function executeCreateWorkflow( } export async function executeCreateFolder( - params: Record, + params: CreateFolderParams, context: ExecutionContext ): Promise { try { @@ -81,6 +94,9 @@ export async function executeCreateFolder( if (!name) { return { success: false, error: 'name is required' } } + if (name.length > 200) { + return { success: false, error: 'Folder name must be 200 characters or less' } + } const workspaceId = params?.workspaceId || (await getDefaultWorkspaceId(context.userId)) const parentId = params?.parentId || null @@ -117,7 +133,7 @@ export async function executeCreateFolder( } export async function executeRunWorkflow( - params: Record, + params: RunWorkflowParams, context: ExecutionContext ): Promise { try { @@ -156,7 +172,7 @@ export async function executeRunWorkflow( } export async function executeSetGlobalWorkflowVariables( - params: Record, + params: SetGlobalWorkflowVariablesParams, context: ExecutionContext ): Promise { try { @@ -164,7 +180,9 @@ export async function executeSetGlobalWorkflowVariables( if (!workflowId) { return { success: false, error: 'workflowId is required' } } - const operations = Array.isArray(params.operations) ? params.operations : [] + const operations: VariableOperation[] = Array.isArray(params.operations) + ? params.operations + : [] const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) const currentVarsRecord = (workflowRecord.variables as Record) || {} diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/queries.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/queries.ts index 7bbb8bd38b..5bcca2e0df 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/queries.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/queries.ts @@ -13,16 +13,25 @@ import { getBlockOutputPaths } from '@/lib/workflows/blocks/block-outputs' import { BlockPathCalculator } from '@/lib/workflows/blocks/block-path-calculator' import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/persistence/utils' import { isInputDefinitionTrigger } from '@/lib/workflows/triggers/input-definition-triggers' +import { normalizeName } from '@/executor/constants' import { ensureWorkflowAccess, ensureWorkspaceAccess, getAccessibleWorkflowsForUser, getDefaultWorkspaceId, } from '../access' -import { normalizeName } from '@/executor/constants' +import type { + GetBlockOutputsParams, + GetBlockUpstreamReferencesParams, + GetUserWorkflowParams, + GetWorkflowDataParams, + GetWorkflowFromNameParams, + ListFoldersParams, + ListUserWorkflowsParams, +} from '../param-types' export async function executeGetUserWorkflow( - params: Record, + params: GetUserWorkflowParams, context: ExecutionContext ): Promise { try { @@ -57,7 +66,7 @@ export async function executeGetUserWorkflow( } export async function executeGetWorkflowFromName( - params: Record, + params: GetWorkflowFromNameParams, context: ExecutionContext ): Promise { try { @@ -95,7 +104,7 @@ export async function executeGetWorkflowFromName( } export async function executeListUserWorkflows( - params: Record, + params: ListUserWorkflowsParams, context: ExecutionContext ): Promise { try { @@ -119,7 +128,9 @@ export async function executeListUserWorkflows( } } -export async function executeListUserWorkspaces(context: ExecutionContext): Promise { +export async function executeListUserWorkspaces( + context: ExecutionContext +): Promise { try { const workspaces = await db .select({ @@ -146,7 +157,7 @@ export async function executeListUserWorkspaces(context: ExecutionContext): Prom } export async function executeListFolders( - params: Record, + params: ListFoldersParams, context: ExecutionContext ): Promise { try { @@ -179,7 +190,7 @@ export async function executeListFolders( } export async function executeGetWorkflowData( - params: Record, + params: GetWorkflowDataParams, context: ExecutionContext ): Promise { try { @@ -271,7 +282,7 @@ export async function executeGetWorkflowData( } export async function executeGetBlockOutputs( - params: Record, + params: GetBlockOutputsParams, context: ExecutionContext ): Promise { try { @@ -343,7 +354,7 @@ export async function executeGetBlockOutputs( } export async function executeGetBlockUpstreamReferences( - params: Record, + params: GetBlockUpstreamReferencesParams, context: ExecutionContext ): Promise { try { @@ -524,4 +535,3 @@ function formatOutputsWithPrefix(paths: string[], blockName: string): string[] { const normalizedName = normalizeName(blockName) return paths.map((path) => `${normalizedName}.${path}`) } - diff --git a/apps/sim/lib/copilot/tools/client/base-tool.ts b/apps/sim/lib/copilot/tools/client/base-tool.ts index 73a562aa15..32060e87a5 100644 --- a/apps/sim/lib/copilot/tools/client/base-tool.ts +++ b/apps/sim/lib/copilot/tools/client/base-tool.ts @@ -20,7 +20,10 @@ export interface ClientToolDisplay { export interface BaseClientToolMetadata { displayNames: Partial> uiConfig?: Record - getDynamicText?: (params: Record, state: ClientToolCallState) => string | undefined + getDynamicText?: ( + params: Record, + state: ClientToolCallState + ) => string | undefined } export type DynamicTextFormatter = ( diff --git a/apps/sim/lib/copilot/tools/client/tool-display-registry.ts b/apps/sim/lib/copilot/tools/client/tool-display-registry.ts index f7242d12c1..42633ab37f 100644 --- a/apps/sim/lib/copilot/tools/client/tool-display-registry.ts +++ b/apps/sim/lib/copilot/tools/client/tool-display-registry.ts @@ -1,5 +1,53 @@ +// @ts-nocheck import type { LucideIcon } from 'lucide-react' -import { Blocks, BookOpen, Bug, Check, CheckCircle, CheckCircle2, ClipboardCheck, Compass, Database, FileCode, FileText, FlaskConical, GitBranch, Globe, Globe2, Grid2x2, Grid2x2Check, Grid2x2X, Info, Key, KeyRound, ListChecks, ListFilter, ListTodo, Loader2, MessageSquare, MinusCircle, Moon, Navigation, Pencil, PencilLine, Play, PlugZap, Plus, Rocket, Search, Server, Settings2, Sparkles, Tag, TerminalSquare, WorkflowIcon, Wrench, X, XCircle, Zap } from 'lucide-react' +import { + Blocks, + BookOpen, + Bug, + Check, + CheckCircle, + CheckCircle2, + ClipboardCheck, + Compass, + Database, + FileCode, + FileText, + FlaskConical, + GitBranch, + Globe, + Globe2, + Grid2x2, + Grid2x2Check, + Grid2x2X, + Info, + Key, + KeyRound, + ListChecks, + ListFilter, + ListTodo, + Loader2, + MessageSquare, + MinusCircle, + Moon, + Navigation, + Pencil, + PencilLine, + Play, + PlugZap, + Plus, + Rocket, + Search, + Server, + Settings2, + Sparkles, + Tag, + TerminalSquare, + WorkflowIcon, + Wrench, + X, + XCircle, + Zap, +} from 'lucide-react' import { getLatestBlock } from '@/blocks/registry' import { getCustomTool } from '@/hooks/queries/custom-tools' import { useWorkflowRegistry } from '@/stores/workflows/registry/store' @@ -102,2134 +150,2134 @@ function toToolDisplayEntry(metadata?: ToolMetadata): ToolDisplayEntry { } const META_auth: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Authenticating', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Authenticating', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Authenticating', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Authenticated', icon: KeyRound }, - [ClientToolCallState.error]: { text: 'Failed to authenticate', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped auth', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted auth', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Authenticating', - completedLabel: 'Authenticated', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } + displayNames: { + [ClientToolCallState.generating]: { text: 'Authenticating', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Authenticating', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Authenticating', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Authenticated', icon: KeyRound }, + [ClientToolCallState.error]: { text: 'Failed to authenticate', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped auth', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted auth', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Authenticating', + completedLabel: 'Authenticated', + shouldCollapse: true, + outputArtifacts: [], + }, + }, +} const META_check_deployment_status: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { - text: 'Checking deployment status', - icon: Loader2, - }, - [ClientToolCallState.pending]: { text: 'Checking deployment status', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Checking deployment status', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Checked deployment status', icon: Rocket }, - [ClientToolCallState.error]: { text: 'Failed to check deployment status', icon: X }, - [ClientToolCallState.aborted]: { - text: 'Aborted checking deployment status', - icon: XCircle, - }, - [ClientToolCallState.rejected]: { - text: 'Skipped checking deployment status', - icon: XCircle, - }, - }, - interrupt: undefined, - } + displayNames: { + [ClientToolCallState.generating]: { + text: 'Checking deployment status', + icon: Loader2, + }, + [ClientToolCallState.pending]: { text: 'Checking deployment status', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Checking deployment status', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Checked deployment status', icon: Rocket }, + [ClientToolCallState.error]: { text: 'Failed to check deployment status', icon: X }, + [ClientToolCallState.aborted]: { + text: 'Aborted checking deployment status', + icon: XCircle, + }, + [ClientToolCallState.rejected]: { + text: 'Skipped checking deployment status', + icon: XCircle, + }, + }, + interrupt: undefined, +} const META_checkoff_todo: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Marking todo', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Marking todo', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Marked todo complete', icon: Check }, - [ClientToolCallState.error]: { text: 'Failed to mark todo', icon: XCircle }, - }, - } + displayNames: { + [ClientToolCallState.generating]: { text: 'Marking todo', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Marking todo', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Marked todo complete', icon: Check }, + [ClientToolCallState.error]: { text: 'Failed to mark todo', icon: XCircle }, + }, +} const META_crawl_website: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Crawling website', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Crawling website', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Crawling website', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Crawled website', icon: Globe }, - [ClientToolCallState.error]: { text: 'Failed to crawl website', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted crawling website', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped crawling website', icon: MinusCircle }, - }, - interrupt: undefined, - getDynamicText: (params, state) => { - if (params?.url && typeof params.url === 'string') { - const url = params.url - - switch (state) { - case ClientToolCallState.success: - return `Crawled ${url}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Crawling ${url}` - case ClientToolCallState.error: - return `Failed to crawl ${url}` - case ClientToolCallState.aborted: - return `Aborted crawling ${url}` - case ClientToolCallState.rejected: - return `Skipped crawling ${url}` - } - } - return undefined - }, - } + displayNames: { + [ClientToolCallState.generating]: { text: 'Crawling website', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Crawling website', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Crawling website', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Crawled website', icon: Globe }, + [ClientToolCallState.error]: { text: 'Failed to crawl website', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted crawling website', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped crawling website', icon: MinusCircle }, + }, + interrupt: undefined, + getDynamicText: (params, state) => { + if (params?.url && typeof params.url === 'string') { + const url = params.url -const META_create_workspace_mcp_server: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { - text: 'Preparing to create MCP server', - icon: Loader2, - }, - [ClientToolCallState.pending]: { text: 'Create MCP server?', icon: Server }, - [ClientToolCallState.executing]: { text: 'Creating MCP server', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Created MCP server', icon: Server }, - [ClientToolCallState.error]: { text: 'Failed to create MCP server', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted creating MCP server', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped creating MCP server', icon: XCircle }, - }, - interrupt: { - accept: { text: 'Create', icon: Plus }, - reject: { text: 'Skip', icon: XCircle }, - }, - getDynamicText: (params, state) => { - const name = params?.name || 'MCP server' switch (state) { case ClientToolCallState.success: - return `Created MCP server "${name}"` + return `Crawled ${url}` case ClientToolCallState.executing: - return `Creating MCP server "${name}"` case ClientToolCallState.generating: - return `Preparing to create "${name}"` case ClientToolCallState.pending: - return `Create MCP server "${name}"?` + return `Crawling ${url}` case ClientToolCallState.error: - return `Failed to create "${name}"` + return `Failed to crawl ${url}` + case ClientToolCallState.aborted: + return `Aborted crawling ${url}` + case ClientToolCallState.rejected: + return `Skipped crawling ${url}` } - return undefined - }, - } + } + return undefined + }, +} + +const META_create_workspace_mcp_server: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { + text: 'Preparing to create MCP server', + icon: Loader2, + }, + [ClientToolCallState.pending]: { text: 'Create MCP server?', icon: Server }, + [ClientToolCallState.executing]: { text: 'Creating MCP server', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Created MCP server', icon: Server }, + [ClientToolCallState.error]: { text: 'Failed to create MCP server', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted creating MCP server', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped creating MCP server', icon: XCircle }, + }, + interrupt: { + accept: { text: 'Create', icon: Plus }, + reject: { text: 'Skip', icon: XCircle }, + }, + getDynamicText: (params, state) => { + const name = params?.name || 'MCP server' + switch (state) { + case ClientToolCallState.success: + return `Created MCP server "${name}"` + case ClientToolCallState.executing: + return `Creating MCP server "${name}"` + case ClientToolCallState.generating: + return `Preparing to create "${name}"` + case ClientToolCallState.pending: + return `Create MCP server "${name}"?` + case ClientToolCallState.error: + return `Failed to create "${name}"` + } + return undefined + }, +} const META_custom_tool: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Managing custom tool', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Managing custom tool', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Managing custom tool', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Managed custom tool', icon: Wrench }, - [ClientToolCallState.error]: { text: 'Failed custom tool', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped custom tool', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted custom tool', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Managing custom tool', - completedLabel: 'Custom tool managed', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } + displayNames: { + [ClientToolCallState.generating]: { text: 'Managing custom tool', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Managing custom tool', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Managing custom tool', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Managed custom tool', icon: Wrench }, + [ClientToolCallState.error]: { text: 'Failed custom tool', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped custom tool', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted custom tool', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Managing custom tool', + completedLabel: 'Custom tool managed', + shouldCollapse: true, + outputArtifacts: [], + }, + }, +} const META_debug: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Debugging', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Debugging', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Debugging', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Debugged', icon: Bug }, - [ClientToolCallState.error]: { text: 'Failed to debug', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped debug', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted debug', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Debugging', - completedLabel: 'Debugged', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } + displayNames: { + [ClientToolCallState.generating]: { text: 'Debugging', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Debugging', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Debugging', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Debugged', icon: Bug }, + [ClientToolCallState.error]: { text: 'Failed to debug', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped debug', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted debug', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Debugging', + completedLabel: 'Debugged', + shouldCollapse: true, + outputArtifacts: [], + }, + }, +} const META_deploy: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Deploying', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Deploying', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Deploying', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Deployed', icon: Rocket }, - [ClientToolCallState.error]: { text: 'Failed to deploy', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped deploy', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted deploy', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Deploying', - completedLabel: 'Deployed', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } + displayNames: { + [ClientToolCallState.generating]: { text: 'Deploying', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Deploying', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Deploying', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Deployed', icon: Rocket }, + [ClientToolCallState.error]: { text: 'Failed to deploy', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped deploy', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted deploy', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Deploying', + completedLabel: 'Deployed', + shouldCollapse: true, + outputArtifacts: [], + }, + }, +} const META_deploy_api: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { - text: 'Preparing to deploy API', - icon: Loader2, - }, - [ClientToolCallState.pending]: { text: 'Deploy as API?', icon: Rocket }, - [ClientToolCallState.executing]: { text: 'Deploying API', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Deployed API', icon: Rocket }, - [ClientToolCallState.error]: { text: 'Failed to deploy API', icon: XCircle }, - [ClientToolCallState.aborted]: { - text: 'Aborted deploying API', - icon: XCircle, - }, - [ClientToolCallState.rejected]: { - text: 'Skipped deploying API', - icon: XCircle, - }, - }, + displayNames: { + [ClientToolCallState.generating]: { + text: 'Preparing to deploy API', + icon: Loader2, + }, + [ClientToolCallState.pending]: { text: 'Deploy as API?', icon: Rocket }, + [ClientToolCallState.executing]: { text: 'Deploying API', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Deployed API', icon: Rocket }, + [ClientToolCallState.error]: { text: 'Failed to deploy API', icon: XCircle }, + [ClientToolCallState.aborted]: { + text: 'Aborted deploying API', + icon: XCircle, + }, + [ClientToolCallState.rejected]: { + text: 'Skipped deploying API', + icon: XCircle, + }, + }, + interrupt: { + accept: { text: 'Deploy', icon: Rocket }, + reject: { text: 'Skip', icon: XCircle }, + }, + uiConfig: { + isSpecial: true, interrupt: { accept: { text: 'Deploy', icon: Rocket }, reject: { text: 'Skip', icon: XCircle }, + showAllowOnce: true, + showAllowAlways: true, }, - uiConfig: { - isSpecial: true, - interrupt: { - accept: { text: 'Deploy', icon: Rocket }, - reject: { text: 'Skip', icon: XCircle }, - showAllowOnce: true, - showAllowAlways: true, - }, - }, - getDynamicText: (params, state) => { - const action = params?.action === 'undeploy' ? 'undeploy' : 'deploy' + }, + getDynamicText: (params, state) => { + const action = params?.action === 'undeploy' ? 'undeploy' : 'deploy' - const workflowId = params?.workflowId || useWorkflowRegistry.getState().activeWorkflowId - const isAlreadyDeployed = workflowId - ? useWorkflowRegistry.getState().getWorkflowDeploymentStatus(workflowId)?.isDeployed - : false + const workflowId = params?.workflowId || useWorkflowRegistry.getState().activeWorkflowId + const isAlreadyDeployed = workflowId + ? useWorkflowRegistry.getState().getWorkflowDeploymentStatus(workflowId)?.isDeployed + : false - let actionText = action - let actionTextIng = action === 'undeploy' ? 'undeploying' : 'deploying' - const actionTextPast = action === 'undeploy' ? 'undeployed' : 'deployed' + let actionText = action + let actionTextIng = action === 'undeploy' ? 'undeploying' : 'deploying' + const actionTextPast = action === 'undeploy' ? 'undeployed' : 'deployed' - if (action === 'deploy' && isAlreadyDeployed) { - actionText = 'redeploy' - actionTextIng = 'redeploying' - } + if (action === 'deploy' && isAlreadyDeployed) { + actionText = 'redeploy' + actionTextIng = 'redeploying' + } + + const actionCapitalized = actionText.charAt(0).toUpperCase() + actionText.slice(1) + + switch (state) { + case ClientToolCallState.success: + return `API ${actionTextPast}` + case ClientToolCallState.executing: + return `${actionCapitalized}ing API` + case ClientToolCallState.generating: + return `Preparing to ${actionText} API` + case ClientToolCallState.pending: + return `${actionCapitalized} API?` + case ClientToolCallState.error: + return `Failed to ${actionText} API` + case ClientToolCallState.aborted: + return `Aborted ${actionTextIng} API` + case ClientToolCallState.rejected: + return `Skipped ${actionTextIng} API` + } + return undefined + }, +} + +const META_deploy_chat: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { + text: 'Preparing to deploy chat', + icon: Loader2, + }, + [ClientToolCallState.pending]: { text: 'Deploy as chat?', icon: MessageSquare }, + [ClientToolCallState.executing]: { text: 'Deploying chat', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Deployed chat', icon: MessageSquare }, + [ClientToolCallState.error]: { text: 'Failed to deploy chat', icon: XCircle }, + [ClientToolCallState.aborted]: { + text: 'Aborted deploying chat', + icon: XCircle, + }, + [ClientToolCallState.rejected]: { + text: 'Skipped deploying chat', + icon: XCircle, + }, + }, + interrupt: { + accept: { text: 'Deploy Chat', icon: MessageSquare }, + reject: { text: 'Skip', icon: XCircle }, + }, + uiConfig: { + isSpecial: true, + interrupt: { + accept: { text: 'Deploy Chat', icon: MessageSquare }, + reject: { text: 'Skip', icon: XCircle }, + showAllowOnce: true, + showAllowAlways: true, + }, + }, + getDynamicText: (params, state) => { + const action = params?.action === 'undeploy' ? 'undeploy' : 'deploy' + + switch (state) { + case ClientToolCallState.success: + return action === 'undeploy' ? 'Chat undeployed' : 'Chat deployed' + case ClientToolCallState.executing: + return action === 'undeploy' ? 'Undeploying chat' : 'Deploying chat' + case ClientToolCallState.generating: + return `Preparing to ${action} chat` + case ClientToolCallState.pending: + return action === 'undeploy' ? 'Undeploy chat?' : 'Deploy as chat?' + case ClientToolCallState.error: + return `Failed to ${action} chat` + case ClientToolCallState.aborted: + return action === 'undeploy' ? 'Aborted undeploying chat' : 'Aborted deploying chat' + case ClientToolCallState.rejected: + return action === 'undeploy' ? 'Skipped undeploying chat' : 'Skipped deploying chat' + } + return undefined + }, +} + +const META_deploy_mcp: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { + text: 'Preparing to deploy to MCP', + icon: Loader2, + }, + [ClientToolCallState.pending]: { text: 'Deploy to MCP server?', icon: Server }, + [ClientToolCallState.executing]: { text: 'Deploying to MCP', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Deployed to MCP', icon: Server }, + [ClientToolCallState.error]: { text: 'Failed to deploy to MCP', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted MCP deployment', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped MCP deployment', icon: XCircle }, + }, + interrupt: { + accept: { text: 'Deploy', icon: Server }, + reject: { text: 'Skip', icon: XCircle }, + }, + uiConfig: { + isSpecial: true, + interrupt: { + accept: { text: 'Deploy', icon: Server }, + reject: { text: 'Skip', icon: XCircle }, + showAllowOnce: true, + showAllowAlways: true, + }, + }, + getDynamicText: (params, state) => { + const toolName = params?.toolName || 'workflow' + switch (state) { + case ClientToolCallState.success: + return `Deployed "${toolName}" to MCP` + case ClientToolCallState.executing: + return `Deploying "${toolName}" to MCP` + case ClientToolCallState.generating: + return `Preparing to deploy to MCP` + case ClientToolCallState.pending: + return `Deploy "${toolName}" to MCP?` + case ClientToolCallState.error: + return `Failed to deploy to MCP` + } + return undefined + }, +} + +const META_edit: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Editing', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Editing', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Editing', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Edited', icon: Pencil }, + [ClientToolCallState.error]: { text: 'Failed to apply edit', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped edit', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted edit', icon: XCircle }, + }, + uiConfig: { + isSpecial: true, + subagent: { + streamingLabel: 'Editing', + completedLabel: 'Edited', + shouldCollapse: false, // Edit subagent stays expanded + outputArtifacts: ['edit_summary'], + hideThinkingText: true, // We show WorkflowEditSummary instead + }, + }, +} + +const META_edit_workflow: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Editing your workflow', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Editing your workflow', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Edited your workflow', icon: Grid2x2Check }, + [ClientToolCallState.error]: { text: 'Failed to edit your workflow', icon: XCircle }, + [ClientToolCallState.review]: { text: 'Review your workflow changes', icon: Grid2x2 }, + [ClientToolCallState.rejected]: { text: 'Rejected workflow changes', icon: Grid2x2X }, + [ClientToolCallState.aborted]: { text: 'Aborted editing your workflow', icon: MinusCircle }, + [ClientToolCallState.pending]: { text: 'Editing your workflow', icon: Loader2 }, + }, + uiConfig: { + isSpecial: true, + customRenderer: 'edit_summary', + }, +} + +const META_evaluate: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Evaluating', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Evaluating', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Evaluating', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Evaluated', icon: ClipboardCheck }, + [ClientToolCallState.error]: { text: 'Failed to evaluate', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped evaluation', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted evaluation', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Evaluating', + completedLabel: 'Evaluated', + shouldCollapse: true, + outputArtifacts: [], + }, + }, +} - const actionCapitalized = actionText.charAt(0).toUpperCase() + actionText.slice(1) +const META_get_block_config: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Getting block config', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Getting block config', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Getting block config', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Retrieved block config', icon: FileCode }, + [ClientToolCallState.error]: { text: 'Failed to get block config', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted getting block config', icon: XCircle }, + [ClientToolCallState.rejected]: { + text: 'Skipped getting block config', + icon: MinusCircle, + }, + }, + getDynamicText: (params, state) => { + if (params?.blockType && typeof params.blockType === 'string') { + const blockConfig = getLatestBlock(params.blockType) + const blockName = (blockConfig?.name ?? params.blockType.replace(/_/g, ' ')).toLowerCase() + const opSuffix = params.operation ? ` (${params.operation})` : '' switch (state) { case ClientToolCallState.success: - return `API ${actionTextPast}` + return `Retrieved ${blockName}${opSuffix} config` case ClientToolCallState.executing: - return `${actionCapitalized}ing API` case ClientToolCallState.generating: - return `Preparing to ${actionText} API` case ClientToolCallState.pending: - return `${actionCapitalized} API?` + return `Retrieving ${blockName}${opSuffix} config` case ClientToolCallState.error: - return `Failed to ${actionText} API` + return `Failed to retrieve ${blockName}${opSuffix} config` case ClientToolCallState.aborted: - return `Aborted ${actionTextIng} API` + return `Aborted retrieving ${blockName}${opSuffix} config` case ClientToolCallState.rejected: - return `Skipped ${actionTextIng} API` + return `Skipped retrieving ${blockName}${opSuffix} config` } - return undefined - }, - } + } + return undefined + }, +} -const META_deploy_chat: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { - text: 'Preparing to deploy chat', - icon: Loader2, - }, - [ClientToolCallState.pending]: { text: 'Deploy as chat?', icon: MessageSquare }, - [ClientToolCallState.executing]: { text: 'Deploying chat', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Deployed chat', icon: MessageSquare }, - [ClientToolCallState.error]: { text: 'Failed to deploy chat', icon: XCircle }, - [ClientToolCallState.aborted]: { - text: 'Aborted deploying chat', - icon: XCircle, - }, - [ClientToolCallState.rejected]: { - text: 'Skipped deploying chat', - icon: XCircle, - }, - }, - interrupt: { - accept: { text: 'Deploy Chat', icon: MessageSquare }, - reject: { text: 'Skip', icon: XCircle }, - }, - uiConfig: { - isSpecial: true, - interrupt: { - accept: { text: 'Deploy Chat', icon: MessageSquare }, - reject: { text: 'Skip', icon: XCircle }, - showAllowOnce: true, - showAllowAlways: true, - }, - }, - getDynamicText: (params, state) => { - const action = params?.action === 'undeploy' ? 'undeploy' : 'deploy' +const META_get_block_options: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Getting block operations', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Getting block operations', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Getting block operations', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Retrieved block operations', icon: ListFilter }, + [ClientToolCallState.error]: { text: 'Failed to get block operations', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted getting block operations', icon: XCircle }, + [ClientToolCallState.rejected]: { + text: 'Skipped getting block operations', + icon: MinusCircle, + }, + }, + getDynamicText: (params, state) => { + const blockId = + (params as any)?.blockId || + (params as any)?.blockType || + (params as any)?.block_id || + (params as any)?.block_type + if (typeof blockId === 'string') { + const blockConfig = getLatestBlock(blockId) + const blockName = (blockConfig?.name ?? blockId.replace(/_/g, ' ')).toLowerCase() switch (state) { case ClientToolCallState.success: - return action === 'undeploy' ? 'Chat undeployed' : 'Chat deployed' + return `Retrieved ${blockName} operations` case ClientToolCallState.executing: - return action === 'undeploy' ? 'Undeploying chat' : 'Deploying chat' case ClientToolCallState.generating: - return `Preparing to ${action} chat` case ClientToolCallState.pending: - return action === 'undeploy' ? 'Undeploy chat?' : 'Deploy as chat?' + return `Retrieving ${blockName} operations` case ClientToolCallState.error: - return `Failed to ${action} chat` + return `Failed to retrieve ${blockName} operations` case ClientToolCallState.aborted: - return action === 'undeploy' ? 'Aborted undeploying chat' : 'Aborted deploying chat' + return `Aborted retrieving ${blockName} operations` case ClientToolCallState.rejected: - return action === 'undeploy' ? 'Skipped undeploying chat' : 'Skipped deploying chat' + return `Skipped retrieving ${blockName} operations` } - return undefined - }, - } + } + return undefined + }, +} -const META_deploy_mcp: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { - text: 'Preparing to deploy to MCP', - icon: Loader2, - }, - [ClientToolCallState.pending]: { text: 'Deploy to MCP server?', icon: Server }, - [ClientToolCallState.executing]: { text: 'Deploying to MCP', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Deployed to MCP', icon: Server }, - [ClientToolCallState.error]: { text: 'Failed to deploy to MCP', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted MCP deployment', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped MCP deployment', icon: XCircle }, - }, - interrupt: { - accept: { text: 'Deploy', icon: Server }, - reject: { text: 'Skip', icon: XCircle }, - }, - uiConfig: { - isSpecial: true, - interrupt: { - accept: { text: 'Deploy', icon: Server }, - reject: { text: 'Skip', icon: XCircle }, - showAllowOnce: true, - showAllowAlways: true, - }, - }, - getDynamicText: (params, state) => { - const toolName = params?.toolName || 'workflow' +const META_get_block_outputs: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Getting block outputs', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Getting block outputs', icon: Tag }, + [ClientToolCallState.executing]: { text: 'Getting block outputs', icon: Loader2 }, + [ClientToolCallState.aborted]: { text: 'Aborted getting outputs', icon: XCircle }, + [ClientToolCallState.success]: { text: 'Retrieved block outputs', icon: Tag }, + [ClientToolCallState.error]: { text: 'Failed to get outputs', icon: X }, + [ClientToolCallState.rejected]: { text: 'Skipped getting outputs', icon: XCircle }, + }, + getDynamicText: (params, state) => { + const blockIds = params?.blockIds + if (blockIds && Array.isArray(blockIds) && blockIds.length > 0) { + const count = blockIds.length switch (state) { case ClientToolCallState.success: - return `Deployed "${toolName}" to MCP` + return `Retrieved outputs for ${count} block${count > 1 ? 's' : ''}` case ClientToolCallState.executing: - return `Deploying "${toolName}" to MCP` case ClientToolCallState.generating: - return `Preparing to deploy to MCP` case ClientToolCallState.pending: - return `Deploy "${toolName}" to MCP?` + return `Getting outputs for ${count} block${count > 1 ? 's' : ''}` case ClientToolCallState.error: - return `Failed to deploy to MCP` + return `Failed to get outputs for ${count} block${count > 1 ? 's' : ''}` } - return undefined - }, - } + } + return undefined + }, +} -const META_edit: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Editing', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Editing', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Editing', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Edited', icon: Pencil }, - [ClientToolCallState.error]: { text: 'Failed to apply edit', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped edit', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted edit', icon: XCircle }, - }, - uiConfig: { - isSpecial: true, - subagent: { - streamingLabel: 'Editing', - completedLabel: 'Edited', - shouldCollapse: false, // Edit subagent stays expanded - outputArtifacts: ['edit_summary'], - hideThinkingText: true, // We show WorkflowEditSummary instead - }, - }, - } +const META_get_block_upstream_references: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Getting upstream references', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Getting upstream references', icon: GitBranch }, + [ClientToolCallState.executing]: { text: 'Getting upstream references', icon: Loader2 }, + [ClientToolCallState.aborted]: { text: 'Aborted getting references', icon: XCircle }, + [ClientToolCallState.success]: { text: 'Retrieved upstream references', icon: GitBranch }, + [ClientToolCallState.error]: { text: 'Failed to get references', icon: X }, + [ClientToolCallState.rejected]: { text: 'Skipped getting references', icon: XCircle }, + }, + getDynamicText: (params, state) => { + const blockIds = params?.blockIds + if (blockIds && Array.isArray(blockIds) && blockIds.length > 0) { + const count = blockIds.length + switch (state) { + case ClientToolCallState.success: + return `Retrieved references for ${count} block${count > 1 ? 's' : ''}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Getting references for ${count} block${count > 1 ? 's' : ''}` + case ClientToolCallState.error: + return `Failed to get references for ${count} block${count > 1 ? 's' : ''}` + } + } + return undefined + }, +} -const META_edit_workflow: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Editing your workflow', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Editing your workflow', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Edited your workflow', icon: Grid2x2Check }, - [ClientToolCallState.error]: { text: 'Failed to edit your workflow', icon: XCircle }, - [ClientToolCallState.review]: { text: 'Review your workflow changes', icon: Grid2x2 }, - [ClientToolCallState.rejected]: { text: 'Rejected workflow changes', icon: Grid2x2X }, - [ClientToolCallState.aborted]: { text: 'Aborted editing your workflow', icon: MinusCircle }, - [ClientToolCallState.pending]: { text: 'Editing your workflow', icon: Loader2 }, - }, - uiConfig: { - isSpecial: true, - customRenderer: 'edit_summary', - }, - } +const META_get_blocks_and_tools: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Exploring available options', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Exploring available options', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Exploring available options', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Explored available options', icon: Blocks }, + [ClientToolCallState.error]: { text: 'Failed to explore options', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted exploring options', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped exploring options', icon: MinusCircle }, + }, + interrupt: undefined, +} -const META_evaluate: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Evaluating', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Evaluating', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Evaluating', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Evaluated', icon: ClipboardCheck }, - [ClientToolCallState.error]: { text: 'Failed to evaluate', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped evaluation', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted evaluation', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Evaluating', - completedLabel: 'Evaluated', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } +const META_get_blocks_metadata: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Searching block choices', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Searching block choices', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Searching block choices', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Searched block choices', icon: ListFilter }, + [ClientToolCallState.error]: { text: 'Failed to search block choices', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted searching block choices', icon: XCircle }, + [ClientToolCallState.rejected]: { + text: 'Skipped searching block choices', + icon: MinusCircle, + }, + }, + getDynamicText: (params, state) => { + if (params?.blockIds && Array.isArray(params.blockIds) && params.blockIds.length > 0) { + const blockList = params.blockIds + .slice(0, 3) + .map((blockId) => blockId.replace(/_/g, ' ')) + .join(', ') + const more = params.blockIds.length > 3 ? '...' : '' + const blocks = `${blockList}${more}` -const META_get_block_config: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Getting block config', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Getting block config', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Getting block config', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Retrieved block config', icon: FileCode }, - [ClientToolCallState.error]: { text: 'Failed to get block config', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted getting block config', icon: XCircle }, - [ClientToolCallState.rejected]: { - text: 'Skipped getting block config', - icon: MinusCircle, - }, - }, - getDynamicText: (params, state) => { - if (params?.blockType && typeof params.blockType === 'string') { - const blockConfig = getLatestBlock(params.blockType) - const blockName = (blockConfig?.name ?? params.blockType.replace(/_/g, ' ')).toLowerCase() - const opSuffix = params.operation ? ` (${params.operation})` : '' + switch (state) { + case ClientToolCallState.success: + return `Searched ${blocks}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Searching ${blocks}` + case ClientToolCallState.error: + return `Failed to search ${blocks}` + case ClientToolCallState.aborted: + return `Aborted searching ${blocks}` + case ClientToolCallState.rejected: + return `Skipped searching ${blocks}` + } + } + return undefined + }, +} - switch (state) { - case ClientToolCallState.success: - return `Retrieved ${blockName}${opSuffix} config` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Retrieving ${blockName}${opSuffix} config` - case ClientToolCallState.error: - return `Failed to retrieve ${blockName}${opSuffix} config` - case ClientToolCallState.aborted: - return `Aborted retrieving ${blockName}${opSuffix} config` - case ClientToolCallState.rejected: - return `Skipped retrieving ${blockName}${opSuffix} config` - } +const META_get_credentials: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Fetching connected integrations', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Fetching connected integrations', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Fetching connected integrations', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Fetched connected integrations', icon: Key }, + [ClientToolCallState.error]: { + text: 'Failed to fetch connected integrations', + icon: XCircle, + }, + [ClientToolCallState.aborted]: { + text: 'Aborted fetching connected integrations', + icon: MinusCircle, + }, + [ClientToolCallState.rejected]: { + text: 'Skipped fetching connected integrations', + icon: MinusCircle, + }, + }, +} + +const META_get_examples_rag: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Fetching examples', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Fetching examples', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Fetching examples', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Fetched examples', icon: Search }, + [ClientToolCallState.error]: { text: 'Failed to fetch examples', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted getting examples', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped getting examples', icon: MinusCircle }, + }, + interrupt: undefined, + getDynamicText: (params, state) => { + if (params?.query && typeof params.query === 'string') { + const query = params.query + + switch (state) { + case ClientToolCallState.success: + return `Found examples for ${query}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Searching examples for ${query}` + case ClientToolCallState.error: + return `Failed to find examples for ${query}` + case ClientToolCallState.aborted: + return `Aborted searching examples for ${query}` + case ClientToolCallState.rejected: + return `Skipped searching examples for ${query}` } - return undefined - }, - } + } + return undefined + }, +} -const META_get_block_options: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Getting block operations', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Getting block operations', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Getting block operations', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Retrieved block operations', icon: ListFilter }, - [ClientToolCallState.error]: { text: 'Failed to get block operations', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted getting block operations', icon: XCircle }, - [ClientToolCallState.rejected]: { - text: 'Skipped getting block operations', - icon: MinusCircle, - }, - }, - getDynamicText: (params, state) => { - const blockId = - (params as any)?.blockId || - (params as any)?.blockType || - (params as any)?.block_id || - (params as any)?.block_type - if (typeof blockId === 'string') { - const blockConfig = getLatestBlock(blockId) - const blockName = (blockConfig?.name ?? blockId.replace(/_/g, ' ')).toLowerCase() +const META_get_operations_examples: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Designing workflow component', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Designing workflow component', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Designing workflow component', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Designed workflow component', icon: Zap }, + [ClientToolCallState.error]: { text: 'Failed to design workflow component', icon: XCircle }, + [ClientToolCallState.aborted]: { + text: 'Aborted designing workflow component', + icon: MinusCircle, + }, + [ClientToolCallState.rejected]: { + text: 'Skipped designing workflow component', + icon: MinusCircle, + }, + }, + interrupt: undefined, + getDynamicText: (params, state) => { + if (params?.query && typeof params.query === 'string') { + const query = params.query - switch (state) { - case ClientToolCallState.success: - return `Retrieved ${blockName} operations` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Retrieving ${blockName} operations` - case ClientToolCallState.error: - return `Failed to retrieve ${blockName} operations` - case ClientToolCallState.aborted: - return `Aborted retrieving ${blockName} operations` - case ClientToolCallState.rejected: - return `Skipped retrieving ${blockName} operations` - } + switch (state) { + case ClientToolCallState.success: + return `Designed ${query}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Designing ${query}` + case ClientToolCallState.error: + return `Failed to design ${query}` + case ClientToolCallState.aborted: + return `Aborted designing ${query}` + case ClientToolCallState.rejected: + return `Skipped designing ${query}` } - return undefined - }, - } + } + return undefined + }, +} -const META_get_block_outputs: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Getting block outputs', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Getting block outputs', icon: Tag }, - [ClientToolCallState.executing]: { text: 'Getting block outputs', icon: Loader2 }, - [ClientToolCallState.aborted]: { text: 'Aborted getting outputs', icon: XCircle }, - [ClientToolCallState.success]: { text: 'Retrieved block outputs', icon: Tag }, - [ClientToolCallState.error]: { text: 'Failed to get outputs', icon: X }, - [ClientToolCallState.rejected]: { text: 'Skipped getting outputs', icon: XCircle }, - }, - getDynamicText: (params, state) => { - const blockIds = params?.blockIds - if (blockIds && Array.isArray(blockIds) && blockIds.length > 0) { - const count = blockIds.length - switch (state) { - case ClientToolCallState.success: - return `Retrieved outputs for ${count} block${count > 1 ? 's' : ''}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Getting outputs for ${count} block${count > 1 ? 's' : ''}` - case ClientToolCallState.error: - return `Failed to get outputs for ${count} block${count > 1 ? 's' : ''}` - } +const META_get_page_contents: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Getting page contents', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Getting page contents', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Getting page contents', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Retrieved page contents', icon: FileText }, + [ClientToolCallState.error]: { text: 'Failed to get page contents', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted getting page contents', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped getting page contents', icon: MinusCircle }, + }, + interrupt: undefined, + getDynamicText: (params, state) => { + if (params?.urls && Array.isArray(params.urls) && params.urls.length > 0) { + const firstUrl = String(params.urls[0]) + const count = params.urls.length + + switch (state) { + case ClientToolCallState.success: + return count > 1 ? `Retrieved ${count} pages` : `Retrieved ${firstUrl}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return count > 1 ? `Getting ${count} pages` : `Getting ${firstUrl}` + case ClientToolCallState.error: + return count > 1 ? `Failed to get ${count} pages` : `Failed to get ${firstUrl}` + case ClientToolCallState.aborted: + return count > 1 ? `Aborted getting ${count} pages` : `Aborted getting ${firstUrl}` + case ClientToolCallState.rejected: + return count > 1 ? `Skipped getting ${count} pages` : `Skipped getting ${firstUrl}` } - return undefined - }, - } + } + return undefined + }, +} -const META_get_block_upstream_references: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Getting upstream references', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Getting upstream references', icon: GitBranch }, - [ClientToolCallState.executing]: { text: 'Getting upstream references', icon: Loader2 }, - [ClientToolCallState.aborted]: { text: 'Aborted getting references', icon: XCircle }, - [ClientToolCallState.success]: { text: 'Retrieved upstream references', icon: GitBranch }, - [ClientToolCallState.error]: { text: 'Failed to get references', icon: X }, - [ClientToolCallState.rejected]: { text: 'Skipped getting references', icon: XCircle }, - }, - getDynamicText: (params, state) => { - const blockIds = params?.blockIds - if (blockIds && Array.isArray(blockIds) && blockIds.length > 0) { - const count = blockIds.length +const META_get_trigger_blocks: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Finding trigger blocks', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Finding trigger blocks', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Finding trigger blocks', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Found trigger blocks', icon: ListFilter }, + [ClientToolCallState.error]: { text: 'Failed to find trigger blocks', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted finding trigger blocks', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped finding trigger blocks', icon: MinusCircle }, + }, + interrupt: undefined, +} + +const META_get_trigger_examples: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Selecting a trigger', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Selecting a trigger', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Selecting a trigger', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Selected a trigger', icon: Zap }, + [ClientToolCallState.error]: { text: 'Failed to select a trigger', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted selecting a trigger', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped selecting a trigger', icon: MinusCircle }, + }, + interrupt: undefined, +} + +const META_get_user_workflow: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Reading your workflow', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Reading your workflow', icon: WorkflowIcon }, + [ClientToolCallState.executing]: { text: 'Reading your workflow', icon: Loader2 }, + [ClientToolCallState.aborted]: { text: 'Aborted reading your workflow', icon: XCircle }, + [ClientToolCallState.success]: { text: 'Read your workflow', icon: WorkflowIcon }, + [ClientToolCallState.error]: { text: 'Failed to read your workflow', icon: X }, + [ClientToolCallState.rejected]: { text: 'Skipped reading your workflow', icon: XCircle }, + }, + getDynamicText: (params, state) => { + const workflowId = params?.workflowId || useWorkflowRegistry.getState().activeWorkflowId + if (workflowId) { + const workflowName = useWorkflowRegistry.getState().workflows[workflowId]?.name + if (workflowName) { switch (state) { case ClientToolCallState.success: - return `Retrieved references for ${count} block${count > 1 ? 's' : ''}` + return `Read ${workflowName}` case ClientToolCallState.executing: case ClientToolCallState.generating: case ClientToolCallState.pending: - return `Getting references for ${count} block${count > 1 ? 's' : ''}` + return `Reading ${workflowName}` case ClientToolCallState.error: - return `Failed to get references for ${count} block${count > 1 ? 's' : ''}` + return `Failed to read ${workflowName}` + case ClientToolCallState.aborted: + return `Aborted reading ${workflowName}` + case ClientToolCallState.rejected: + return `Skipped reading ${workflowName}` } } - return undefined - }, - } + } + return undefined + }, +} -const META_get_blocks_and_tools: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Exploring available options', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Exploring available options', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Exploring available options', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Explored available options', icon: Blocks }, - [ClientToolCallState.error]: { text: 'Failed to explore options', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted exploring options', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped exploring options', icon: MinusCircle }, - }, - interrupt: undefined, - } +const META_get_workflow_console: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Fetching execution logs', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Fetching execution logs', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Fetched execution logs', icon: TerminalSquare }, + [ClientToolCallState.error]: { text: 'Failed to fetch execution logs', icon: XCircle }, + [ClientToolCallState.rejected]: { + text: 'Skipped fetching execution logs', + icon: MinusCircle, + }, + [ClientToolCallState.aborted]: { + text: 'Aborted fetching execution logs', + icon: MinusCircle, + }, + [ClientToolCallState.pending]: { text: 'Fetching execution logs', icon: Loader2 }, + }, + getDynamicText: (params, state) => { + const limit = params?.limit + if (limit && typeof limit === 'number') { + const logText = limit === 1 ? 'execution log' : 'execution logs' -const META_get_blocks_metadata: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Searching block choices', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Searching block choices', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Searching block choices', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Searched block choices', icon: ListFilter }, - [ClientToolCallState.error]: { text: 'Failed to search block choices', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted searching block choices', icon: XCircle }, - [ClientToolCallState.rejected]: { - text: 'Skipped searching block choices', - icon: MinusCircle, - }, - }, - getDynamicText: (params, state) => { - if (params?.blockIds && Array.isArray(params.blockIds) && params.blockIds.length > 0) { - const blockList = params.blockIds - .slice(0, 3) - .map((blockId) => blockId.replace(/_/g, ' ')) - .join(', ') - const more = params.blockIds.length > 3 ? '...' : '' - const blocks = `${blockList}${more}` - - switch (state) { - case ClientToolCallState.success: - return `Searched ${blocks}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Searching ${blocks}` - case ClientToolCallState.error: - return `Failed to search ${blocks}` - case ClientToolCallState.aborted: - return `Aborted searching ${blocks}` - case ClientToolCallState.rejected: - return `Skipped searching ${blocks}` - } - } - return undefined - }, - } - -const META_get_credentials: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Fetching connected integrations', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Fetching connected integrations', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Fetching connected integrations', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Fetched connected integrations', icon: Key }, - [ClientToolCallState.error]: { - text: 'Failed to fetch connected integrations', - icon: XCircle, - }, - [ClientToolCallState.aborted]: { - text: 'Aborted fetching connected integrations', - icon: MinusCircle, - }, - [ClientToolCallState.rejected]: { - text: 'Skipped fetching connected integrations', - icon: MinusCircle, - }, - }, - } - -const META_get_examples_rag: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Fetching examples', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Fetching examples', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Fetching examples', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Fetched examples', icon: Search }, - [ClientToolCallState.error]: { text: 'Failed to fetch examples', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted getting examples', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped getting examples', icon: MinusCircle }, - }, - interrupt: undefined, - getDynamicText: (params, state) => { - if (params?.query && typeof params.query === 'string') { - const query = params.query - - switch (state) { - case ClientToolCallState.success: - return `Found examples for ${query}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Searching examples for ${query}` - case ClientToolCallState.error: - return `Failed to find examples for ${query}` - case ClientToolCallState.aborted: - return `Aborted searching examples for ${query}` - case ClientToolCallState.rejected: - return `Skipped searching examples for ${query}` - } - } - return undefined - }, - } - -const META_get_operations_examples: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Designing workflow component', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Designing workflow component', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Designing workflow component', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Designed workflow component', icon: Zap }, - [ClientToolCallState.error]: { text: 'Failed to design workflow component', icon: XCircle }, - [ClientToolCallState.aborted]: { - text: 'Aborted designing workflow component', - icon: MinusCircle, - }, - [ClientToolCallState.rejected]: { - text: 'Skipped designing workflow component', - icon: MinusCircle, - }, - }, - interrupt: undefined, - getDynamicText: (params, state) => { - if (params?.query && typeof params.query === 'string') { - const query = params.query - - switch (state) { - case ClientToolCallState.success: - return `Designed ${query}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Designing ${query}` - case ClientToolCallState.error: - return `Failed to design ${query}` - case ClientToolCallState.aborted: - return `Aborted designing ${query}` - case ClientToolCallState.rejected: - return `Skipped designing ${query}` - } - } - return undefined - }, - } - -const META_get_page_contents: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Getting page contents', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Getting page contents', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Getting page contents', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Retrieved page contents', icon: FileText }, - [ClientToolCallState.error]: { text: 'Failed to get page contents', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted getting page contents', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped getting page contents', icon: MinusCircle }, - }, - interrupt: undefined, - getDynamicText: (params, state) => { - if (params?.urls && Array.isArray(params.urls) && params.urls.length > 0) { - const firstUrl = String(params.urls[0]) - const count = params.urls.length - - switch (state) { - case ClientToolCallState.success: - return count > 1 ? `Retrieved ${count} pages` : `Retrieved ${firstUrl}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return count > 1 ? `Getting ${count} pages` : `Getting ${firstUrl}` - case ClientToolCallState.error: - return count > 1 ? `Failed to get ${count} pages` : `Failed to get ${firstUrl}` - case ClientToolCallState.aborted: - return count > 1 ? `Aborted getting ${count} pages` : `Aborted getting ${firstUrl}` - case ClientToolCallState.rejected: - return count > 1 ? `Skipped getting ${count} pages` : `Skipped getting ${firstUrl}` - } - } - return undefined - }, - } - -const META_get_trigger_blocks: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Finding trigger blocks', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Finding trigger blocks', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Finding trigger blocks', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Found trigger blocks', icon: ListFilter }, - [ClientToolCallState.error]: { text: 'Failed to find trigger blocks', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted finding trigger blocks', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped finding trigger blocks', icon: MinusCircle }, - }, - interrupt: undefined, - } - -const META_get_trigger_examples: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Selecting a trigger', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Selecting a trigger', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Selecting a trigger', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Selected a trigger', icon: Zap }, - [ClientToolCallState.error]: { text: 'Failed to select a trigger', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted selecting a trigger', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped selecting a trigger', icon: MinusCircle }, - }, - interrupt: undefined, - } - -const META_get_user_workflow: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Reading your workflow', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Reading your workflow', icon: WorkflowIcon }, - [ClientToolCallState.executing]: { text: 'Reading your workflow', icon: Loader2 }, - [ClientToolCallState.aborted]: { text: 'Aborted reading your workflow', icon: XCircle }, - [ClientToolCallState.success]: { text: 'Read your workflow', icon: WorkflowIcon }, - [ClientToolCallState.error]: { text: 'Failed to read your workflow', icon: X }, - [ClientToolCallState.rejected]: { text: 'Skipped reading your workflow', icon: XCircle }, - }, - getDynamicText: (params, state) => { - const workflowId = params?.workflowId || useWorkflowRegistry.getState().activeWorkflowId - if (workflowId) { - const workflowName = useWorkflowRegistry.getState().workflows[workflowId]?.name - if (workflowName) { - switch (state) { - case ClientToolCallState.success: - return `Read ${workflowName}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Reading ${workflowName}` - case ClientToolCallState.error: - return `Failed to read ${workflowName}` - case ClientToolCallState.aborted: - return `Aborted reading ${workflowName}` - case ClientToolCallState.rejected: - return `Skipped reading ${workflowName}` - } - } - } - return undefined - }, - } - -const META_get_workflow_console: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Fetching execution logs', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Fetching execution logs', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Fetched execution logs', icon: TerminalSquare }, - [ClientToolCallState.error]: { text: 'Failed to fetch execution logs', icon: XCircle }, - [ClientToolCallState.rejected]: { - text: 'Skipped fetching execution logs', - icon: MinusCircle, - }, - [ClientToolCallState.aborted]: { - text: 'Aborted fetching execution logs', - icon: MinusCircle, - }, - [ClientToolCallState.pending]: { text: 'Fetching execution logs', icon: Loader2 }, - }, - getDynamicText: (params, state) => { - const limit = params?.limit - if (limit && typeof limit === 'number') { - const logText = limit === 1 ? 'execution log' : 'execution logs' - - switch (state) { - case ClientToolCallState.success: - return `Fetched last ${limit} ${logText}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Fetching last ${limit} ${logText}` - case ClientToolCallState.error: - return `Failed to fetch last ${limit} ${logText}` - case ClientToolCallState.rejected: - return `Skipped fetching last ${limit} ${logText}` - case ClientToolCallState.aborted: - return `Aborted fetching last ${limit} ${logText}` - } + switch (state) { + case ClientToolCallState.success: + return `Fetched last ${limit} ${logText}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Fetching last ${limit} ${logText}` + case ClientToolCallState.error: + return `Failed to fetch last ${limit} ${logText}` + case ClientToolCallState.rejected: + return `Skipped fetching last ${limit} ${logText}` + case ClientToolCallState.aborted: + return `Aborted fetching last ${limit} ${logText}` } - return undefined - }, - } + } + return undefined + }, +} const META_get_workflow_data: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Fetching workflow data', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Fetching workflow data', icon: Database }, - [ClientToolCallState.executing]: { text: 'Fetching workflow data', icon: Loader2 }, - [ClientToolCallState.aborted]: { text: 'Aborted fetching data', icon: XCircle }, - [ClientToolCallState.success]: { text: 'Retrieved workflow data', icon: Database }, - [ClientToolCallState.error]: { text: 'Failed to fetch data', icon: X }, - [ClientToolCallState.rejected]: { text: 'Skipped fetching data', icon: XCircle }, - }, - getDynamicText: (params, state) => { - const dataType = params?.data_type as WorkflowDataType | undefined - if (!dataType) return undefined - - const typeLabels: Record = { - global_variables: 'variables', - custom_tools: 'custom tools', - mcp_tools: 'MCP tools', - files: 'files', - } + displayNames: { + [ClientToolCallState.generating]: { text: 'Fetching workflow data', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Fetching workflow data', icon: Database }, + [ClientToolCallState.executing]: { text: 'Fetching workflow data', icon: Loader2 }, + [ClientToolCallState.aborted]: { text: 'Aborted fetching data', icon: XCircle }, + [ClientToolCallState.success]: { text: 'Retrieved workflow data', icon: Database }, + [ClientToolCallState.error]: { text: 'Failed to fetch data', icon: X }, + [ClientToolCallState.rejected]: { text: 'Skipped fetching data', icon: XCircle }, + }, + getDynamicText: (params, state) => { + const dataType = params?.data_type as WorkflowDataType | undefined + if (!dataType) return undefined + + const typeLabels: Record = { + global_variables: 'variables', + custom_tools: 'custom tools', + mcp_tools: 'MCP tools', + files: 'files', + } - const label = typeLabels[dataType] || dataType + const label = typeLabels[dataType] || dataType + + switch (state) { + case ClientToolCallState.success: + return `Retrieved ${label}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + return `Fetching ${label}` + case ClientToolCallState.pending: + return `Fetch ${label}?` + case ClientToolCallState.error: + return `Failed to fetch ${label}` + case ClientToolCallState.aborted: + return `Aborted fetching ${label}` + case ClientToolCallState.rejected: + return `Skipped fetching ${label}` + } + return undefined + }, +} + +const META_get_workflow_from_name: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Reading workflow', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Reading workflow', icon: FileText }, + [ClientToolCallState.executing]: { text: 'Reading workflow', icon: Loader2 }, + [ClientToolCallState.aborted]: { text: 'Aborted reading workflow', icon: XCircle }, + [ClientToolCallState.success]: { text: 'Read workflow', icon: FileText }, + [ClientToolCallState.error]: { text: 'Failed to read workflow', icon: X }, + [ClientToolCallState.rejected]: { text: 'Skipped reading workflow', icon: XCircle }, + }, + getDynamicText: (params, state) => { + if (params?.workflow_name && typeof params.workflow_name === 'string') { + const workflowName = params.workflow_name switch (state) { case ClientToolCallState.success: - return `Retrieved ${label}` + return `Read ${workflowName}` case ClientToolCallState.executing: case ClientToolCallState.generating: - return `Fetching ${label}` case ClientToolCallState.pending: - return `Fetch ${label}?` + return `Reading ${workflowName}` case ClientToolCallState.error: - return `Failed to fetch ${label}` + return `Failed to read ${workflowName}` case ClientToolCallState.aborted: - return `Aborted fetching ${label}` + return `Aborted reading ${workflowName}` case ClientToolCallState.rejected: - return `Skipped fetching ${label}` + return `Skipped reading ${workflowName}` } - return undefined - }, - } - -const META_get_workflow_from_name: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Reading workflow', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Reading workflow', icon: FileText }, - [ClientToolCallState.executing]: { text: 'Reading workflow', icon: Loader2 }, - [ClientToolCallState.aborted]: { text: 'Aborted reading workflow', icon: XCircle }, - [ClientToolCallState.success]: { text: 'Read workflow', icon: FileText }, - [ClientToolCallState.error]: { text: 'Failed to read workflow', icon: X }, - [ClientToolCallState.rejected]: { text: 'Skipped reading workflow', icon: XCircle }, - }, - getDynamicText: (params, state) => { - if (params?.workflow_name && typeof params.workflow_name === 'string') { - const workflowName = params.workflow_name - - switch (state) { - case ClientToolCallState.success: - return `Read ${workflowName}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Reading ${workflowName}` - case ClientToolCallState.error: - return `Failed to read ${workflowName}` - case ClientToolCallState.aborted: - return `Aborted reading ${workflowName}` - case ClientToolCallState.rejected: - return `Skipped reading ${workflowName}` - } - } - return undefined - }, - } + } + return undefined + }, +} const META_info: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Getting info', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Getting info', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Getting info', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Retrieved info', icon: Info }, - [ClientToolCallState.error]: { text: 'Failed to get info', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped info', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted info', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Getting info', - completedLabel: 'Info retrieved', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } + displayNames: { + [ClientToolCallState.generating]: { text: 'Getting info', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Getting info', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Getting info', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Retrieved info', icon: Info }, + [ClientToolCallState.error]: { text: 'Failed to get info', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped info', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted info', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Getting info', + completedLabel: 'Info retrieved', + shouldCollapse: true, + outputArtifacts: [], + }, + }, +} const META_knowledge: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Managing knowledge', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Managing knowledge', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Managing knowledge', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Managed knowledge', icon: BookOpen }, - [ClientToolCallState.error]: { text: 'Failed to manage knowledge', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped knowledge', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted knowledge', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Managing knowledge', - completedLabel: 'Knowledge managed', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } + displayNames: { + [ClientToolCallState.generating]: { text: 'Managing knowledge', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Managing knowledge', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Managing knowledge', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Managed knowledge', icon: BookOpen }, + [ClientToolCallState.error]: { text: 'Failed to manage knowledge', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped knowledge', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted knowledge', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Managing knowledge', + completedLabel: 'Knowledge managed', + shouldCollapse: true, + outputArtifacts: [], + }, + }, +} const META_knowledge_base: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Accessing knowledge base', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Accessing knowledge base', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Accessing knowledge base', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Accessed knowledge base', icon: Database }, - [ClientToolCallState.error]: { text: 'Failed to access knowledge base', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted knowledge base access', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped knowledge base access', icon: MinusCircle }, - }, - getDynamicText: (params: Record, state: ClientToolCallState) => { - const operation = params?.operation as string | undefined - const name = params?.args?.name as string | undefined - - const opVerbs: Record = { - create: { - active: 'Creating knowledge base', - past: 'Created knowledge base', - pending: name ? `Create knowledge base "${name}"?` : 'Create knowledge base?', - }, - list: { active: 'Listing knowledge bases', past: 'Listed knowledge bases' }, - get: { active: 'Getting knowledge base', past: 'Retrieved knowledge base' }, - query: { active: 'Querying knowledge base', past: 'Queried knowledge base' }, - } - const defaultVerb: { active: string; past: string; pending?: string } = { - active: 'Accessing knowledge base', - past: 'Accessed knowledge base', - } - const verb = operation ? opVerbs[operation] || defaultVerb : defaultVerb + displayNames: { + [ClientToolCallState.generating]: { text: 'Accessing knowledge base', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Accessing knowledge base', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Accessing knowledge base', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Accessed knowledge base', icon: Database }, + [ClientToolCallState.error]: { text: 'Failed to access knowledge base', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted knowledge base access', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped knowledge base access', icon: MinusCircle }, + }, + getDynamicText: (params: Record, state: ClientToolCallState) => { + const operation = params?.operation as string | undefined + const name = params?.args?.name as string | undefined + + const opVerbs: Record = { + create: { + active: 'Creating knowledge base', + past: 'Created knowledge base', + pending: name ? `Create knowledge base "${name}"?` : 'Create knowledge base?', + }, + list: { active: 'Listing knowledge bases', past: 'Listed knowledge bases' }, + get: { active: 'Getting knowledge base', past: 'Retrieved knowledge base' }, + query: { active: 'Querying knowledge base', past: 'Queried knowledge base' }, + } + const defaultVerb: { active: string; past: string; pending?: string } = { + active: 'Accessing knowledge base', + past: 'Accessed knowledge base', + } + const verb = operation ? opVerbs[operation] || defaultVerb : defaultVerb - if (state === ClientToolCallState.success) { - return verb.past - } - if (state === ClientToolCallState.pending && verb.pending) { - return verb.pending - } - if ( - state === ClientToolCallState.generating || - state === ClientToolCallState.pending || - state === ClientToolCallState.executing - ) { - return verb.active - } - return undefined - }, - } + if (state === ClientToolCallState.success) { + return verb.past + } + if (state === ClientToolCallState.pending && verb.pending) { + return verb.pending + } + if ( + state === ClientToolCallState.generating || + state === ClientToolCallState.pending || + state === ClientToolCallState.executing + ) { + return verb.active + } + return undefined + }, +} const META_list_user_workflows: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Listing your workflows', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Listing your workflows', icon: ListChecks }, - [ClientToolCallState.executing]: { text: 'Listing your workflows', icon: Loader2 }, - [ClientToolCallState.aborted]: { text: 'Aborted listing workflows', icon: XCircle }, - [ClientToolCallState.success]: { text: 'Listed your workflows', icon: ListChecks }, - [ClientToolCallState.error]: { text: 'Failed to list workflows', icon: X }, - [ClientToolCallState.rejected]: { text: 'Skipped listing workflows', icon: XCircle }, - }, - } + displayNames: { + [ClientToolCallState.generating]: { text: 'Listing your workflows', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Listing your workflows', icon: ListChecks }, + [ClientToolCallState.executing]: { text: 'Listing your workflows', icon: Loader2 }, + [ClientToolCallState.aborted]: { text: 'Aborted listing workflows', icon: XCircle }, + [ClientToolCallState.success]: { text: 'Listed your workflows', icon: ListChecks }, + [ClientToolCallState.error]: { text: 'Failed to list workflows', icon: X }, + [ClientToolCallState.rejected]: { text: 'Skipped listing workflows', icon: XCircle }, + }, +} const META_list_workspace_mcp_servers: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { - text: 'Getting MCP servers', - icon: Loader2, - }, - [ClientToolCallState.pending]: { text: 'Getting MCP servers', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Getting MCP servers', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Retrieved MCP servers', icon: Server }, - [ClientToolCallState.error]: { text: 'Failed to get MCP servers', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted getting MCP servers', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped getting MCP servers', icon: XCircle }, - }, - interrupt: undefined, - } + displayNames: { + [ClientToolCallState.generating]: { + text: 'Getting MCP servers', + icon: Loader2, + }, + [ClientToolCallState.pending]: { text: 'Getting MCP servers', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Getting MCP servers', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Retrieved MCP servers', icon: Server }, + [ClientToolCallState.error]: { text: 'Failed to get MCP servers', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted getting MCP servers', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped getting MCP servers', icon: XCircle }, + }, + interrupt: undefined, +} const META_make_api_request: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Preparing API request', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Review API request', icon: Globe2 }, - [ClientToolCallState.executing]: { text: 'Executing API request', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Completed API request', icon: Globe2 }, - [ClientToolCallState.error]: { text: 'Failed to execute API request', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped API request', icon: MinusCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted API request', icon: XCircle }, - }, + displayNames: { + [ClientToolCallState.generating]: { text: 'Preparing API request', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Review API request', icon: Globe2 }, + [ClientToolCallState.executing]: { text: 'Executing API request', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Completed API request', icon: Globe2 }, + [ClientToolCallState.error]: { text: 'Failed to execute API request', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped API request', icon: MinusCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted API request', icon: XCircle }, + }, + interrupt: { + accept: { text: 'Execute', icon: Globe2 }, + reject: { text: 'Skip', icon: MinusCircle }, + }, + uiConfig: { interrupt: { accept: { text: 'Execute', icon: Globe2 }, reject: { text: 'Skip', icon: MinusCircle }, - }, - uiConfig: { - interrupt: { - accept: { text: 'Execute', icon: Globe2 }, - reject: { text: 'Skip', icon: MinusCircle }, - showAllowOnce: true, - showAllowAlways: true, - }, - paramsTable: { - columns: [ - { key: 'method', label: 'Method', width: '26%', editable: true, mono: true }, - { key: 'url', label: 'Endpoint', width: '74%', editable: true, mono: true }, - ], - extractRows: (params) => { - return [['request', (params.method || 'GET').toUpperCase(), params.url || '']] - }, - }, - }, - getDynamicText: (params, state) => { - if (params?.url && typeof params.url === 'string') { - const method = params.method || 'GET' - let url = params.url - - // Extract domain from URL for cleaner display - try { - const urlObj = new URL(url) - url = urlObj.hostname + urlObj.pathname - } catch { - // Use URL as-is if parsing fails - } - - switch (state) { - case ClientToolCallState.success: - return `${method} ${url} complete` - case ClientToolCallState.executing: - return `${method} ${url}` - case ClientToolCallState.generating: - return `Preparing ${method} ${url}` - case ClientToolCallState.pending: - return `Review ${method} ${url}` - case ClientToolCallState.error: - return `Failed ${method} ${url}` - case ClientToolCallState.rejected: - return `Skipped ${method} ${url}` - case ClientToolCallState.aborted: - return `Aborted ${method} ${url}` - } - } - return undefined - }, - } - -const META_manage_custom_tool: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { - text: 'Managing custom tool', - icon: Loader2, - }, - [ClientToolCallState.pending]: { text: 'Manage custom tool?', icon: Plus }, - [ClientToolCallState.executing]: { text: 'Managing custom tool', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Managed custom tool', icon: Check }, - [ClientToolCallState.error]: { text: 'Failed to manage custom tool', icon: X }, - [ClientToolCallState.aborted]: { - text: 'Aborted managing custom tool', - icon: XCircle, - }, - [ClientToolCallState.rejected]: { - text: 'Skipped managing custom tool', - icon: XCircle, - }, - }, - interrupt: { - accept: { text: 'Allow', icon: Check }, - reject: { text: 'Skip', icon: XCircle }, - }, - getDynamicText: (params, state) => { - const operation = params?.operation as 'add' | 'edit' | 'delete' | 'list' | undefined - - if (!operation) return undefined - - let toolName = params?.schema?.function?.name - if (!toolName && params?.toolId) { - try { - const tool = getCustomTool(params.toolId) - toolName = tool?.schema?.function?.name - } catch { - // Ignore errors accessing cache - } - } - - const getActionText = (verb: 'present' | 'past' | 'gerund') => { - switch (operation) { - case 'add': - return verb === 'present' ? 'Create' : verb === 'past' ? 'Created' : 'Creating' - case 'edit': - return verb === 'present' ? 'Edit' : verb === 'past' ? 'Edited' : 'Editing' - case 'delete': - return verb === 'present' ? 'Delete' : verb === 'past' ? 'Deleted' : 'Deleting' - case 'list': - return verb === 'present' ? 'List' : verb === 'past' ? 'Listed' : 'Listing' - default: - return verb === 'present' ? 'Manage' : verb === 'past' ? 'Managed' : 'Managing' - } - } - - // For add: only show tool name in past tense (success) - // For edit/delete: always show tool name - // For list: never show individual tool name, use plural - const shouldShowToolName = (currentState: ClientToolCallState) => { - if (operation === 'list') return false - if (operation === 'add') { - return currentState === ClientToolCallState.success - } - return true // edit and delete always show tool name + showAllowOnce: true, + showAllowAlways: true, + }, + paramsTable: { + columns: [ + { key: 'method', label: 'Method', width: '26%', editable: true, mono: true }, + { key: 'url', label: 'Endpoint', width: '74%', editable: true, mono: true }, + ], + extractRows: (params) => { + return [['request', (params.method || 'GET').toUpperCase(), params.url || '']] + }, + }, + }, + getDynamicText: (params, state) => { + if (params?.url && typeof params.url === 'string') { + const method = params.method || 'GET' + let url = params.url + + // Extract domain from URL for cleaner display + try { + const urlObj = new URL(url) + url = urlObj.hostname + urlObj.pathname + } catch { + // Use URL as-is if parsing fails } - const nameText = - operation === 'list' - ? ' custom tools' - : shouldShowToolName(state) && toolName - ? ` ${toolName}` - : ' custom tool' - switch (state) { case ClientToolCallState.success: - return `${getActionText('past')}${nameText}` + return `${method} ${url} complete` case ClientToolCallState.executing: - return `${getActionText('gerund')}${nameText}` + return `${method} ${url}` case ClientToolCallState.generating: - return `${getActionText('gerund')}${nameText}` + return `Preparing ${method} ${url}` case ClientToolCallState.pending: - return `${getActionText('present')}${nameText}?` + return `Review ${method} ${url}` case ClientToolCallState.error: - return `Failed to ${getActionText('present')?.toLowerCase()}${nameText}` - case ClientToolCallState.aborted: - return `Aborted ${getActionText('gerund')?.toLowerCase()}${nameText}` + return `Failed ${method} ${url}` case ClientToolCallState.rejected: - return `Skipped ${getActionText('gerund')?.toLowerCase()}${nameText}` + return `Skipped ${method} ${url}` + case ClientToolCallState.aborted: + return `Aborted ${method} ${url}` } - return undefined - }, - } - -const META_manage_mcp_tool: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { - text: 'Managing MCP tool', - icon: Loader2, - }, - [ClientToolCallState.pending]: { text: 'Manage MCP tool?', icon: Server }, - [ClientToolCallState.executing]: { text: 'Managing MCP tool', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Managed MCP tool', icon: Check }, - [ClientToolCallState.error]: { text: 'Failed to manage MCP tool', icon: X }, - [ClientToolCallState.aborted]: { - text: 'Aborted managing MCP tool', - icon: XCircle, - }, - [ClientToolCallState.rejected]: { - text: 'Skipped managing MCP tool', - icon: XCircle, - }, - }, - interrupt: { - accept: { text: 'Allow', icon: Check }, - reject: { text: 'Skip', icon: XCircle }, - }, - getDynamicText: (params, state) => { - const operation = params?.operation as 'add' | 'edit' | 'delete' | undefined - - if (!operation) return undefined - - const serverName = params?.config?.name || params?.serverName + } + return undefined + }, +} - const getActionText = (verb: 'present' | 'past' | 'gerund') => { - switch (operation) { - case 'add': - return verb === 'present' ? 'Add' : verb === 'past' ? 'Added' : 'Adding' - case 'edit': - return verb === 'present' ? 'Edit' : verb === 'past' ? 'Edited' : 'Editing' - case 'delete': - return verb === 'present' ? 'Delete' : verb === 'past' ? 'Deleted' : 'Deleting' - } +const META_manage_custom_tool: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { + text: 'Managing custom tool', + icon: Loader2, + }, + [ClientToolCallState.pending]: { text: 'Manage custom tool?', icon: Plus }, + [ClientToolCallState.executing]: { text: 'Managing custom tool', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Managed custom tool', icon: Check }, + [ClientToolCallState.error]: { text: 'Failed to manage custom tool', icon: X }, + [ClientToolCallState.aborted]: { + text: 'Aborted managing custom tool', + icon: XCircle, + }, + [ClientToolCallState.rejected]: { + text: 'Skipped managing custom tool', + icon: XCircle, + }, + }, + interrupt: { + accept: { text: 'Allow', icon: Check }, + reject: { text: 'Skip', icon: XCircle }, + }, + getDynamicText: (params, state) => { + const operation = params?.operation as 'add' | 'edit' | 'delete' | 'list' | undefined + + if (!operation) return undefined + + let toolName = params?.schema?.function?.name + if (!toolName && params?.toolId) { + try { + const tool = getCustomTool(params.toolId) + toolName = tool?.schema?.function?.name + } catch { + // Ignore errors accessing cache } + } - const shouldShowServerName = (currentState: ClientToolCallState) => { - if (operation === 'add') { - return currentState === ClientToolCallState.success - } - return true + const getActionText = (verb: 'present' | 'past' | 'gerund') => { + switch (operation) { + case 'add': + return verb === 'present' ? 'Create' : verb === 'past' ? 'Created' : 'Creating' + case 'edit': + return verb === 'present' ? 'Edit' : verb === 'past' ? 'Edited' : 'Editing' + case 'delete': + return verb === 'present' ? 'Delete' : verb === 'past' ? 'Deleted' : 'Deleting' + case 'list': + return verb === 'present' ? 'List' : verb === 'past' ? 'Listed' : 'Listing' + default: + return verb === 'present' ? 'Manage' : verb === 'past' ? 'Managed' : 'Managing' } + } - const nameText = shouldShowServerName(state) && serverName ? ` ${serverName}` : ' MCP tool' - - switch (state) { - case ClientToolCallState.success: - return `${getActionText('past')}${nameText}` - case ClientToolCallState.executing: - return `${getActionText('gerund')}${nameText}` - case ClientToolCallState.generating: - return `${getActionText('gerund')}${nameText}` - case ClientToolCallState.pending: - return `${getActionText('present')}${nameText}?` - case ClientToolCallState.error: - return `Failed to ${getActionText('present')?.toLowerCase()}${nameText}` - case ClientToolCallState.aborted: - return `Aborted ${getActionText('gerund')?.toLowerCase()}${nameText}` - case ClientToolCallState.rejected: - return `Skipped ${getActionText('gerund')?.toLowerCase()}${nameText}` - } - return undefined - }, - } - -const META_mark_todo_in_progress: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Marking todo in progress', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Marking todo in progress', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Marking todo in progress', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Marked todo in progress', icon: Loader2 }, - [ClientToolCallState.error]: { text: 'Failed to mark in progress', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted marking in progress', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped marking in progress', icon: MinusCircle }, - }, - } - -const META_navigate_ui: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { - text: 'Preparing to open', - icon: Loader2, - }, - [ClientToolCallState.pending]: { text: 'Open?', icon: Navigation }, - [ClientToolCallState.executing]: { text: 'Opening', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Opened', icon: Navigation }, - [ClientToolCallState.error]: { text: 'Failed to open', icon: X }, - [ClientToolCallState.aborted]: { - text: 'Aborted opening', - icon: XCircle, - }, - [ClientToolCallState.rejected]: { - text: 'Skipped opening', - icon: XCircle, - }, - }, - interrupt: { - accept: { text: 'Open', icon: Navigation }, - reject: { text: 'Skip', icon: XCircle }, - }, - getDynamicText: (params, state) => { - const destination = params?.destination as NavigationDestination | undefined - const workflowName = params?.workflowName - - const action = 'open' - const actionCapitalized = 'Open' - const actionPast = 'opened' - const actionIng = 'opening' - let target = '' - - if (destination === 'workflow' && workflowName) { - target = ` workflow "${workflowName}"` - } else if (destination === 'workflow') { - target = ' workflows' - } else if (destination === 'logs') { - target = ' logs' - } else if (destination === 'templates') { - target = ' templates' - } else if (destination === 'vector_db') { - target = ' vector database' - } else if (destination === 'settings') { - target = ' settings' + // For add: only show tool name in past tense (success) + // For edit/delete: always show tool name + // For list: never show individual tool name, use plural + const shouldShowToolName = (currentState: ClientToolCallState) => { + if (operation === 'list') return false + if (operation === 'add') { + return currentState === ClientToolCallState.success } + return true // edit and delete always show tool name + } - const fullAction = `${action}${target}` - const fullActionCapitalized = `${actionCapitalized}${target}` - const fullActionPast = `${actionPast}${target}` - const fullActionIng = `${actionIng}${target}` + const nameText = + operation === 'list' + ? ' custom tools' + : shouldShowToolName(state) && toolName + ? ` ${toolName}` + : ' custom tool' + + switch (state) { + case ClientToolCallState.success: + return `${getActionText('past')}${nameText}` + case ClientToolCallState.executing: + return `${getActionText('gerund')}${nameText}` + case ClientToolCallState.generating: + return `${getActionText('gerund')}${nameText}` + case ClientToolCallState.pending: + return `${getActionText('present')}${nameText}?` + case ClientToolCallState.error: + return `Failed to ${getActionText('present')?.toLowerCase()}${nameText}` + case ClientToolCallState.aborted: + return `Aborted ${getActionText('gerund')?.toLowerCase()}${nameText}` + case ClientToolCallState.rejected: + return `Skipped ${getActionText('gerund')?.toLowerCase()}${nameText}` + } + return undefined + }, +} - switch (state) { - case ClientToolCallState.success: - return fullActionPast.charAt(0).toUpperCase() + fullActionPast.slice(1) - case ClientToolCallState.executing: - return fullActionIng.charAt(0).toUpperCase() + fullActionIng.slice(1) - case ClientToolCallState.generating: - return `Preparing to ${fullAction}` - case ClientToolCallState.pending: - return `${fullActionCapitalized}?` - case ClientToolCallState.error: - return `Failed to ${fullAction}` - case ClientToolCallState.aborted: - return `Aborted ${fullAction}` - case ClientToolCallState.rejected: - return `Skipped ${fullAction}` +const META_manage_mcp_tool: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { + text: 'Managing MCP tool', + icon: Loader2, + }, + [ClientToolCallState.pending]: { text: 'Manage MCP tool?', icon: Server }, + [ClientToolCallState.executing]: { text: 'Managing MCP tool', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Managed MCP tool', icon: Check }, + [ClientToolCallState.error]: { text: 'Failed to manage MCP tool', icon: X }, + [ClientToolCallState.aborted]: { + text: 'Aborted managing MCP tool', + icon: XCircle, + }, + [ClientToolCallState.rejected]: { + text: 'Skipped managing MCP tool', + icon: XCircle, + }, + }, + interrupt: { + accept: { text: 'Allow', icon: Check }, + reject: { text: 'Skip', icon: XCircle }, + }, + getDynamicText: (params, state) => { + const operation = params?.operation as 'add' | 'edit' | 'delete' | undefined + + if (!operation) return undefined + + const serverName = params?.config?.name || params?.serverName + + const getActionText = (verb: 'present' | 'past' | 'gerund') => { + switch (operation) { + case 'add': + return verb === 'present' ? 'Add' : verb === 'past' ? 'Added' : 'Adding' + case 'edit': + return verb === 'present' ? 'Edit' : verb === 'past' ? 'Edited' : 'Editing' + case 'delete': + return verb === 'present' ? 'Delete' : verb === 'past' ? 'Deleted' : 'Deleting' } - return undefined - }, - } + } -const META_oauth_request_access: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Requesting integration access', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Requesting integration access', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Requesting integration access', icon: Loader2 }, - [ClientToolCallState.rejected]: { text: 'Skipped integration access', icon: MinusCircle }, - [ClientToolCallState.success]: { text: 'Requested integration access', icon: CheckCircle }, - [ClientToolCallState.error]: { text: 'Failed to request integration access', icon: X }, - [ClientToolCallState.aborted]: { text: 'Aborted integration access request', icon: XCircle }, - }, - interrupt: { - accept: { text: 'Connect', icon: PlugZap }, - reject: { text: 'Skip', icon: MinusCircle }, - }, - getDynamicText: (params, state) => { - if (params.providerName) { - const name = params.providerName - switch (state) { - case ClientToolCallState.generating: - case ClientToolCallState.pending: - case ClientToolCallState.executing: - return `Requesting ${name} access` - case ClientToolCallState.rejected: - return `Skipped ${name} access` - case ClientToolCallState.success: - return `Requested ${name} access` - case ClientToolCallState.error: - return `Failed to request ${name} access` - case ClientToolCallState.aborted: - return `Aborted ${name} access request` - } + const shouldShowServerName = (currentState: ClientToolCallState) => { + if (operation === 'add') { + return currentState === ClientToolCallState.success } - return undefined - }, - } - -const META_plan: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Planning', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Planning', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Planning', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Planned', icon: ListTodo }, - [ClientToolCallState.error]: { text: 'Failed to plan', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped plan', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted plan', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Planning', - completedLabel: 'Planned', - shouldCollapse: true, - outputArtifacts: ['plan'], - }, - }, - } + return true + } -const META_redeploy: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Redeploying workflow', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Redeploy workflow', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Redeploying workflow', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Redeployed workflow', icon: Rocket }, - [ClientToolCallState.error]: { text: 'Failed to redeploy workflow', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted redeploy', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped redeploy', icon: XCircle }, - }, - interrupt: undefined, - } + const nameText = shouldShowServerName(state) && serverName ? ` ${serverName}` : ' MCP tool' + + switch (state) { + case ClientToolCallState.success: + return `${getActionText('past')}${nameText}` + case ClientToolCallState.executing: + return `${getActionText('gerund')}${nameText}` + case ClientToolCallState.generating: + return `${getActionText('gerund')}${nameText}` + case ClientToolCallState.pending: + return `${getActionText('present')}${nameText}?` + case ClientToolCallState.error: + return `Failed to ${getActionText('present')?.toLowerCase()}${nameText}` + case ClientToolCallState.aborted: + return `Aborted ${getActionText('gerund')?.toLowerCase()}${nameText}` + case ClientToolCallState.rejected: + return `Skipped ${getActionText('gerund')?.toLowerCase()}${nameText}` + } + return undefined + }, +} -const META_remember_debug: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Validating fix', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Validating fix', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Validating fix', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Validated fix', icon: CheckCircle2 }, - [ClientToolCallState.error]: { text: 'Failed to validate', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted validation', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped validation', icon: MinusCircle }, - }, - interrupt: undefined, - getDynamicText: (params, state) => { - const operation = params?.operation - - if (operation === 'add' || operation === 'edit') { - // For add/edit, show from problem or solution - const text = params?.problem || params?.solution - if (text && typeof text === 'string') { - switch (state) { - case ClientToolCallState.success: - return `Validated fix ${text}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Validating fix ${text}` - case ClientToolCallState.error: - return `Failed to validate fix ${text}` - case ClientToolCallState.aborted: - return `Aborted validating fix ${text}` - case ClientToolCallState.rejected: - return `Skipped validating fix ${text}` - } - } - } else if (operation === 'delete') { - // For delete, show from problem or solution (or id as fallback) - const text = params?.problem || params?.solution || params?.id - if (text && typeof text === 'string') { - switch (state) { - case ClientToolCallState.success: - return `Adjusted fix ${text}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Adjusting fix ${text}` - case ClientToolCallState.error: - return `Failed to adjust fix ${text}` - case ClientToolCallState.aborted: - return `Aborted adjusting fix ${text}` - case ClientToolCallState.rejected: - return `Skipped adjusting fix ${text}` - } - } - } +const META_mark_todo_in_progress: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Marking todo in progress', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Marking todo in progress', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Marking todo in progress', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Marked todo in progress', icon: Loader2 }, + [ClientToolCallState.error]: { text: 'Failed to mark in progress', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted marking in progress', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped marking in progress', icon: MinusCircle }, + }, +} - return undefined - }, - } +const META_navigate_ui: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { + text: 'Preparing to open', + icon: Loader2, + }, + [ClientToolCallState.pending]: { text: 'Open?', icon: Navigation }, + [ClientToolCallState.executing]: { text: 'Opening', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Opened', icon: Navigation }, + [ClientToolCallState.error]: { text: 'Failed to open', icon: X }, + [ClientToolCallState.aborted]: { + text: 'Aborted opening', + icon: XCircle, + }, + [ClientToolCallState.rejected]: { + text: 'Skipped opening', + icon: XCircle, + }, + }, + interrupt: { + accept: { text: 'Open', icon: Navigation }, + reject: { text: 'Skip', icon: XCircle }, + }, + getDynamicText: (params, state) => { + const destination = params?.destination as NavigationDestination | undefined + const workflowName = params?.workflowName + + const action = 'open' + const actionCapitalized = 'Open' + const actionPast = 'opened' + const actionIng = 'opening' + let target = '' + + if (destination === 'workflow' && workflowName) { + target = ` workflow "${workflowName}"` + } else if (destination === 'workflow') { + target = ' workflows' + } else if (destination === 'logs') { + target = ' logs' + } else if (destination === 'templates') { + target = ' templates' + } else if (destination === 'vector_db') { + target = ' vector database' + } else if (destination === 'settings') { + target = ' settings' + } -const META_research: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Researching', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Researching', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Researching', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Researched', icon: Search }, - [ClientToolCallState.error]: { text: 'Failed to research', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped research', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted research', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Researching', - completedLabel: 'Researched', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } + const fullAction = `${action}${target}` + const fullActionCapitalized = `${actionCapitalized}${target}` + const fullActionPast = `${actionPast}${target}` + const fullActionIng = `${actionIng}${target}` + + switch (state) { + case ClientToolCallState.success: + return fullActionPast.charAt(0).toUpperCase() + fullActionPast.slice(1) + case ClientToolCallState.executing: + return fullActionIng.charAt(0).toUpperCase() + fullActionIng.slice(1) + case ClientToolCallState.generating: + return `Preparing to ${fullAction}` + case ClientToolCallState.pending: + return `${fullActionCapitalized}?` + case ClientToolCallState.error: + return `Failed to ${fullAction}` + case ClientToolCallState.aborted: + return `Aborted ${fullAction}` + case ClientToolCallState.rejected: + return `Skipped ${fullAction}` + } + return undefined + }, +} -const META_run_workflow: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Preparing to run your workflow', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Run this workflow?', icon: Play }, - [ClientToolCallState.executing]: { text: 'Running your workflow', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Executed workflow', icon: Play }, - [ClientToolCallState.error]: { text: 'Errored running workflow', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped workflow execution', icon: MinusCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted workflow execution', icon: MinusCircle }, - [ClientToolCallState.background]: { text: 'Running in background', icon: Play }, - }, - interrupt: { - accept: { text: 'Run', icon: Play }, - reject: { text: 'Skip', icon: MinusCircle }, - }, - uiConfig: { - isSpecial: true, - interrupt: { - accept: { text: 'Run', icon: Play }, - reject: { text: 'Skip', icon: MinusCircle }, - showAllowOnce: true, - showAllowAlways: true, - }, - secondaryAction: { - text: 'Move to Background', - title: 'Move to Background', - variant: 'tertiary', - showInStates: [ClientToolCallState.executing], - completionMessage: - 'The user has chosen to move the workflow execution to the background. Check back with them later to know when the workflow execution is complete', - targetState: ClientToolCallState.background, - }, - paramsTable: { - columns: [ - { key: 'input', label: 'Input', width: '36%' }, - { key: 'value', label: 'Value', width: '64%', editable: true, mono: true }, - ], - extractRows: (params) => { - let inputs = params.input || params.inputs || params.workflow_input - if (typeof inputs === 'string') { - try { - inputs = JSON.parse(inputs) - } catch { - inputs = {} - } - } - if (params.workflow_input && typeof params.workflow_input === 'object') { - inputs = params.workflow_input - } - if (!inputs || typeof inputs !== 'object') { - const { workflowId, workflow_input, ...rest } = params - inputs = rest - } - const safeInputs = inputs && typeof inputs === 'object' ? inputs : {} - return Object.entries(safeInputs).map(([key, value]) => [key, key, String(value)]) - }, - }, - }, - getDynamicText: (params, state) => { - const workflowId = params?.workflowId || useWorkflowRegistry.getState().activeWorkflowId - if (workflowId) { - const workflowName = useWorkflowRegistry.getState().workflows[workflowId]?.name - if (workflowName) { - switch (state) { - case ClientToolCallState.success: - return `Ran ${workflowName}` - case ClientToolCallState.executing: - return `Running ${workflowName}` - case ClientToolCallState.generating: - return `Preparing to run ${workflowName}` - case ClientToolCallState.pending: - return `Run ${workflowName}?` - case ClientToolCallState.error: - return `Failed to run ${workflowName}` - case ClientToolCallState.rejected: - return `Skipped running ${workflowName}` - case ClientToolCallState.aborted: - return `Aborted running ${workflowName}` - case ClientToolCallState.background: - return `Running ${workflowName} in background` - } - } +const META_oauth_request_access: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Requesting integration access', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Requesting integration access', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Requesting integration access', icon: Loader2 }, + [ClientToolCallState.rejected]: { text: 'Skipped integration access', icon: MinusCircle }, + [ClientToolCallState.success]: { text: 'Requested integration access', icon: CheckCircle }, + [ClientToolCallState.error]: { text: 'Failed to request integration access', icon: X }, + [ClientToolCallState.aborted]: { text: 'Aborted integration access request', icon: XCircle }, + }, + interrupt: { + accept: { text: 'Connect', icon: PlugZap }, + reject: { text: 'Skip', icon: MinusCircle }, + }, + getDynamicText: (params, state) => { + if (params.providerName) { + const name = params.providerName + switch (state) { + case ClientToolCallState.generating: + case ClientToolCallState.pending: + case ClientToolCallState.executing: + return `Requesting ${name} access` + case ClientToolCallState.rejected: + return `Skipped ${name} access` + case ClientToolCallState.success: + return `Requested ${name} access` + case ClientToolCallState.error: + return `Failed to request ${name} access` + case ClientToolCallState.aborted: + return `Aborted ${name} access request` } - return undefined - }, - } + } + return undefined + }, +} -const META_scrape_page: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Scraping page', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Scraping page', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Scraping page', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Scraped page', icon: Globe }, - [ClientToolCallState.error]: { text: 'Failed to scrape page', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted scraping page', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped scraping page', icon: MinusCircle }, - }, - interrupt: undefined, - getDynamicText: (params, state) => { - if (params?.url && typeof params.url === 'string') { - const url = params.url +const META_plan: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Planning', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Planning', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Planning', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Planned', icon: ListTodo }, + [ClientToolCallState.error]: { text: 'Failed to plan', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped plan', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted plan', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Planning', + completedLabel: 'Planned', + shouldCollapse: true, + outputArtifacts: ['plan'], + }, + }, +} +const META_redeploy: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Redeploying workflow', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Redeploy workflow', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Redeploying workflow', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Redeployed workflow', icon: Rocket }, + [ClientToolCallState.error]: { text: 'Failed to redeploy workflow', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted redeploy', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped redeploy', icon: XCircle }, + }, + interrupt: undefined, +} + +const META_remember_debug: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Validating fix', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Validating fix', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Validating fix', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Validated fix', icon: CheckCircle2 }, + [ClientToolCallState.error]: { text: 'Failed to validate', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted validation', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped validation', icon: MinusCircle }, + }, + interrupt: undefined, + getDynamicText: (params, state) => { + const operation = params?.operation + + if (operation === 'add' || operation === 'edit') { + // For add/edit, show from problem or solution + const text = params?.problem || params?.solution + if (text && typeof text === 'string') { switch (state) { case ClientToolCallState.success: - return `Scraped ${url}` + return `Validated fix ${text}` case ClientToolCallState.executing: case ClientToolCallState.generating: case ClientToolCallState.pending: - return `Scraping ${url}` + return `Validating fix ${text}` case ClientToolCallState.error: - return `Failed to scrape ${url}` + return `Failed to validate fix ${text}` case ClientToolCallState.aborted: - return `Aborted scraping ${url}` + return `Aborted validating fix ${text}` case ClientToolCallState.rejected: - return `Skipped scraping ${url}` + return `Skipped validating fix ${text}` } } - return undefined - }, - } - -const META_search_documentation: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Searching documentation', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Searching documentation', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Searching documentation', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Completed documentation search', icon: BookOpen }, - [ClientToolCallState.error]: { text: 'Failed to search docs', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted documentation search', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped documentation search', icon: MinusCircle }, - }, - getDynamicText: (params, state) => { - if (params?.query && typeof params.query === 'string') { - const query = params.query - + } else if (operation === 'delete') { + // For delete, show from problem or solution (or id as fallback) + const text = params?.problem || params?.solution || params?.id + if (text && typeof text === 'string') { switch (state) { case ClientToolCallState.success: - return `Searched docs for ${query}` + return `Adjusted fix ${text}` case ClientToolCallState.executing: case ClientToolCallState.generating: case ClientToolCallState.pending: - return `Searching docs for ${query}` + return `Adjusting fix ${text}` case ClientToolCallState.error: - return `Failed to search docs for ${query}` + return `Failed to adjust fix ${text}` case ClientToolCallState.aborted: - return `Aborted searching docs for ${query}` + return `Aborted adjusting fix ${text}` case ClientToolCallState.rejected: - return `Skipped searching docs for ${query}` + return `Skipped adjusting fix ${text}` } } - return undefined - }, - } + } -const META_search_errors: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Debugging', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Debugging', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Debugging', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Debugged', icon: Bug }, - [ClientToolCallState.error]: { text: 'Failed to debug', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted debugging', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped debugging', icon: MinusCircle }, - }, - interrupt: undefined, - getDynamicText: (params, state) => { - if (params?.query && typeof params.query === 'string') { - const query = params.query + return undefined + }, +} + +const META_research: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Researching', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Researching', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Researching', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Researched', icon: Search }, + [ClientToolCallState.error]: { text: 'Failed to research', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped research', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted research', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Researching', + completedLabel: 'Researched', + shouldCollapse: true, + outputArtifacts: [], + }, + }, +} +const META_run_workflow: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Preparing to run your workflow', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Run this workflow?', icon: Play }, + [ClientToolCallState.executing]: { text: 'Running your workflow', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Executed workflow', icon: Play }, + [ClientToolCallState.error]: { text: 'Errored running workflow', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped workflow execution', icon: MinusCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted workflow execution', icon: MinusCircle }, + [ClientToolCallState.background]: { text: 'Running in background', icon: Play }, + }, + interrupt: { + accept: { text: 'Run', icon: Play }, + reject: { text: 'Skip', icon: MinusCircle }, + }, + uiConfig: { + isSpecial: true, + interrupt: { + accept: { text: 'Run', icon: Play }, + reject: { text: 'Skip', icon: MinusCircle }, + showAllowOnce: true, + showAllowAlways: true, + }, + secondaryAction: { + text: 'Move to Background', + title: 'Move to Background', + variant: 'tertiary', + showInStates: [ClientToolCallState.executing], + completionMessage: + 'The user has chosen to move the workflow execution to the background. Check back with them later to know when the workflow execution is complete', + targetState: ClientToolCallState.background, + }, + paramsTable: { + columns: [ + { key: 'input', label: 'Input', width: '36%' }, + { key: 'value', label: 'Value', width: '64%', editable: true, mono: true }, + ], + extractRows: (params) => { + let inputs = params.input || params.inputs || params.workflow_input + if (typeof inputs === 'string') { + try { + inputs = JSON.parse(inputs) + } catch { + inputs = {} + } + } + if (params.workflow_input && typeof params.workflow_input === 'object') { + inputs = params.workflow_input + } + if (!inputs || typeof inputs !== 'object') { + const { workflowId, workflow_input, ...rest } = params + inputs = rest + } + const safeInputs = inputs && typeof inputs === 'object' ? inputs : {} + return Object.entries(safeInputs).map(([key, value]) => [key, key, String(value)]) + }, + }, + }, + getDynamicText: (params, state) => { + const workflowId = params?.workflowId || useWorkflowRegistry.getState().activeWorkflowId + if (workflowId) { + const workflowName = useWorkflowRegistry.getState().workflows[workflowId]?.name + if (workflowName) { switch (state) { case ClientToolCallState.success: - return `Debugged ${query}` + return `Ran ${workflowName}` case ClientToolCallState.executing: + return `Running ${workflowName}` case ClientToolCallState.generating: + return `Preparing to run ${workflowName}` case ClientToolCallState.pending: - return `Debugging ${query}` + return `Run ${workflowName}?` case ClientToolCallState.error: - return `Failed to debug ${query}` - case ClientToolCallState.aborted: - return `Aborted debugging ${query}` + return `Failed to run ${workflowName}` case ClientToolCallState.rejected: - return `Skipped debugging ${query}` + return `Skipped running ${workflowName}` + case ClientToolCallState.aborted: + return `Aborted running ${workflowName}` + case ClientToolCallState.background: + return `Running ${workflowName} in background` } } - return undefined - }, - } + } + return undefined + }, +} + +const META_scrape_page: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Scraping page', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Scraping page', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Scraping page', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Scraped page', icon: Globe }, + [ClientToolCallState.error]: { text: 'Failed to scrape page', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted scraping page', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped scraping page', icon: MinusCircle }, + }, + interrupt: undefined, + getDynamicText: (params, state) => { + if (params?.url && typeof params.url === 'string') { + const url = params.url + + switch (state) { + case ClientToolCallState.success: + return `Scraped ${url}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Scraping ${url}` + case ClientToolCallState.error: + return `Failed to scrape ${url}` + case ClientToolCallState.aborted: + return `Aborted scraping ${url}` + case ClientToolCallState.rejected: + return `Skipped scraping ${url}` + } + } + return undefined + }, +} + +const META_search_documentation: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Searching documentation', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Searching documentation', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Searching documentation', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Completed documentation search', icon: BookOpen }, + [ClientToolCallState.error]: { text: 'Failed to search docs', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted documentation search', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped documentation search', icon: MinusCircle }, + }, + getDynamicText: (params, state) => { + if (params?.query && typeof params.query === 'string') { + const query = params.query + + switch (state) { + case ClientToolCallState.success: + return `Searched docs for ${query}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Searching docs for ${query}` + case ClientToolCallState.error: + return `Failed to search docs for ${query}` + case ClientToolCallState.aborted: + return `Aborted searching docs for ${query}` + case ClientToolCallState.rejected: + return `Skipped searching docs for ${query}` + } + } + return undefined + }, +} + +const META_search_errors: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Debugging', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Debugging', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Debugging', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Debugged', icon: Bug }, + [ClientToolCallState.error]: { text: 'Failed to debug', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted debugging', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped debugging', icon: MinusCircle }, + }, + interrupt: undefined, + getDynamicText: (params, state) => { + if (params?.query && typeof params.query === 'string') { + const query = params.query + + switch (state) { + case ClientToolCallState.success: + return `Debugged ${query}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Debugging ${query}` + case ClientToolCallState.error: + return `Failed to debug ${query}` + case ClientToolCallState.aborted: + return `Aborted debugging ${query}` + case ClientToolCallState.rejected: + return `Skipped debugging ${query}` + } + } + return undefined + }, +} const META_search_library_docs: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Reading docs', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Reading docs', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Reading docs', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Read docs', icon: BookOpen }, - [ClientToolCallState.error]: { text: 'Failed to read docs', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted reading docs', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped reading docs', icon: MinusCircle }, - }, - getDynamicText: (params, state) => { - const libraryName = params?.library_name - if (libraryName && typeof libraryName === 'string') { - switch (state) { - case ClientToolCallState.success: - return `Read ${libraryName} docs` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Reading ${libraryName} docs` - case ClientToolCallState.error: - return `Failed to read ${libraryName} docs` - case ClientToolCallState.aborted: - return `Aborted reading ${libraryName} docs` - case ClientToolCallState.rejected: - return `Skipped reading ${libraryName} docs` - } + displayNames: { + [ClientToolCallState.generating]: { text: 'Reading docs', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Reading docs', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Reading docs', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Read docs', icon: BookOpen }, + [ClientToolCallState.error]: { text: 'Failed to read docs', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted reading docs', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped reading docs', icon: MinusCircle }, + }, + getDynamicText: (params, state) => { + const libraryName = params?.library_name + if (libraryName && typeof libraryName === 'string') { + switch (state) { + case ClientToolCallState.success: + return `Read ${libraryName} docs` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Reading ${libraryName} docs` + case ClientToolCallState.error: + return `Failed to read ${libraryName} docs` + case ClientToolCallState.aborted: + return `Aborted reading ${libraryName} docs` + case ClientToolCallState.rejected: + return `Skipped reading ${libraryName} docs` } - return undefined - }, - } + } + return undefined + }, +} const META_search_online: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Searching online', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Searching online', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Searching online', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Completed online search', icon: Globe }, - [ClientToolCallState.error]: { text: 'Failed to search online', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped online search', icon: MinusCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted online search', icon: XCircle }, - }, - interrupt: undefined, - getDynamicText: (params, state) => { - if (params?.query && typeof params.query === 'string') { - const query = params.query + displayNames: { + [ClientToolCallState.generating]: { text: 'Searching online', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Searching online', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Searching online', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Completed online search', icon: Globe }, + [ClientToolCallState.error]: { text: 'Failed to search online', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped online search', icon: MinusCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted online search', icon: XCircle }, + }, + interrupt: undefined, + getDynamicText: (params, state) => { + if (params?.query && typeof params.query === 'string') { + const query = params.query - switch (state) { - case ClientToolCallState.success: - return `Searched online for ${query}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Searching online for ${query}` - case ClientToolCallState.error: - return `Failed to search online for ${query}` - case ClientToolCallState.aborted: - return `Aborted searching online for ${query}` - case ClientToolCallState.rejected: - return `Skipped searching online for ${query}` - } + switch (state) { + case ClientToolCallState.success: + return `Searched online for ${query}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Searching online for ${query}` + case ClientToolCallState.error: + return `Failed to search online for ${query}` + case ClientToolCallState.aborted: + return `Aborted searching online for ${query}` + case ClientToolCallState.rejected: + return `Skipped searching online for ${query}` } - return undefined - }, - } + } + return undefined + }, +} const META_search_patterns: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Searching workflow patterns', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Searching workflow patterns', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Searching workflow patterns', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Found workflow patterns', icon: Search }, - [ClientToolCallState.error]: { text: 'Failed to search patterns', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted pattern search', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped pattern search', icon: MinusCircle }, - }, - interrupt: undefined, - getDynamicText: (params, state) => { - if (params?.queries && Array.isArray(params.queries) && params.queries.length > 0) { - const firstQuery = String(params.queries[0]) + displayNames: { + [ClientToolCallState.generating]: { text: 'Searching workflow patterns', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Searching workflow patterns', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Searching workflow patterns', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Found workflow patterns', icon: Search }, + [ClientToolCallState.error]: { text: 'Failed to search patterns', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted pattern search', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped pattern search', icon: MinusCircle }, + }, + interrupt: undefined, + getDynamicText: (params, state) => { + if (params?.queries && Array.isArray(params.queries) && params.queries.length > 0) { + const firstQuery = String(params.queries[0]) - switch (state) { - case ClientToolCallState.success: - return `Searched ${firstQuery}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Searching ${firstQuery}` - case ClientToolCallState.error: - return `Failed to search ${firstQuery}` - case ClientToolCallState.aborted: - return `Aborted searching ${firstQuery}` - case ClientToolCallState.rejected: - return `Skipped searching ${firstQuery}` - } + switch (state) { + case ClientToolCallState.success: + return `Searched ${firstQuery}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Searching ${firstQuery}` + case ClientToolCallState.error: + return `Failed to search ${firstQuery}` + case ClientToolCallState.aborted: + return `Aborted searching ${firstQuery}` + case ClientToolCallState.rejected: + return `Skipped searching ${firstQuery}` } - return undefined - }, - } + } + return undefined + }, +} const META_set_environment_variables: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { - text: 'Preparing to set environment variables', - icon: Loader2, - }, - [ClientToolCallState.pending]: { text: 'Set environment variables?', icon: Settings2 }, - [ClientToolCallState.executing]: { text: 'Setting environment variables', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Set environment variables', icon: Settings2 }, - [ClientToolCallState.error]: { text: 'Failed to set environment variables', icon: X }, - [ClientToolCallState.aborted]: { - text: 'Aborted setting environment variables', - icon: XCircle, - }, - [ClientToolCallState.rejected]: { - text: 'Skipped setting environment variables', - icon: XCircle, - }, - }, + displayNames: { + [ClientToolCallState.generating]: { + text: 'Preparing to set environment variables', + icon: Loader2, + }, + [ClientToolCallState.pending]: { text: 'Set environment variables?', icon: Settings2 }, + [ClientToolCallState.executing]: { text: 'Setting environment variables', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Set environment variables', icon: Settings2 }, + [ClientToolCallState.error]: { text: 'Failed to set environment variables', icon: X }, + [ClientToolCallState.aborted]: { + text: 'Aborted setting environment variables', + icon: XCircle, + }, + [ClientToolCallState.rejected]: { + text: 'Skipped setting environment variables', + icon: XCircle, + }, + }, + interrupt: { + accept: { text: 'Apply', icon: Settings2 }, + reject: { text: 'Skip', icon: XCircle }, + }, + uiConfig: { + alwaysExpanded: true, interrupt: { accept: { text: 'Apply', icon: Settings2 }, reject: { text: 'Skip', icon: XCircle }, - }, - uiConfig: { - alwaysExpanded: true, - interrupt: { - accept: { text: 'Apply', icon: Settings2 }, - reject: { text: 'Skip', icon: XCircle }, - showAllowOnce: true, - showAllowAlways: true, - }, - paramsTable: { - columns: [ - { key: 'name', label: 'Variable', width: '36%', editable: true }, - { key: 'value', label: 'Value', width: '64%', editable: true, mono: true }, - ], - extractRows: (params) => { - const variables = params.variables || {} - const entries = Array.isArray(variables) - ? variables.map((v: any, i: number) => [String(i), v.name || `var_${i}`, v.value || '']) - : Object.entries(variables).map(([key, val]) => { - if (typeof val === 'object' && val !== null && 'value' in (val as any)) { - return [key, key, (val as any).value] - } - return [key, key, val] - }) - return entries as Array<[string, ...any[]]> - }, - }, - }, - getDynamicText: (params, state) => { - if (params?.variables && typeof params.variables === 'object') { - const count = Object.keys(params.variables).length - const varText = count === 1 ? 'variable' : 'variables' + showAllowOnce: true, + showAllowAlways: true, + }, + paramsTable: { + columns: [ + { key: 'name', label: 'Variable', width: '36%', editable: true }, + { key: 'value', label: 'Value', width: '64%', editable: true, mono: true }, + ], + extractRows: (params) => { + const variables = params.variables || {} + const entries = Array.isArray(variables) + ? variables.map((v: any, i: number) => [String(i), v.name || `var_${i}`, v.value || '']) + : Object.entries(variables).map(([key, val]) => { + if (typeof val === 'object' && val !== null && 'value' in (val as any)) { + return [key, key, (val as any).value] + } + return [key, key, val] + }) + return entries as Array<[string, ...any[]]> + }, + }, + }, + getDynamicText: (params, state) => { + if (params?.variables && typeof params.variables === 'object') { + const count = Object.keys(params.variables).length + const varText = count === 1 ? 'variable' : 'variables' - switch (state) { - case ClientToolCallState.success: - return `Set ${count} ${varText}` - case ClientToolCallState.executing: - return `Setting ${count} ${varText}` - case ClientToolCallState.generating: - return `Preparing to set ${count} ${varText}` - case ClientToolCallState.pending: - return `Set ${count} ${varText}?` - case ClientToolCallState.error: - return `Failed to set ${count} ${varText}` - case ClientToolCallState.aborted: - return `Aborted setting ${count} ${varText}` - case ClientToolCallState.rejected: - return `Skipped setting ${count} ${varText}` - } + switch (state) { + case ClientToolCallState.success: + return `Set ${count} ${varText}` + case ClientToolCallState.executing: + return `Setting ${count} ${varText}` + case ClientToolCallState.generating: + return `Preparing to set ${count} ${varText}` + case ClientToolCallState.pending: + return `Set ${count} ${varText}?` + case ClientToolCallState.error: + return `Failed to set ${count} ${varText}` + case ClientToolCallState.aborted: + return `Aborted setting ${count} ${varText}` + case ClientToolCallState.rejected: + return `Skipped setting ${count} ${varText}` } - return undefined - }, - } + } + return undefined + }, +} const META_set_global_workflow_variables: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { - text: 'Preparing to set workflow variables', - icon: Loader2, - }, - [ClientToolCallState.pending]: { text: 'Set workflow variables?', icon: Settings2 }, - [ClientToolCallState.executing]: { text: 'Setting workflow variables', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Updated workflow variables', icon: Settings2 }, - [ClientToolCallState.error]: { text: 'Failed to set workflow variables', icon: X }, - [ClientToolCallState.aborted]: { text: 'Aborted setting variables', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped setting variables', icon: XCircle }, - }, + displayNames: { + [ClientToolCallState.generating]: { + text: 'Preparing to set workflow variables', + icon: Loader2, + }, + [ClientToolCallState.pending]: { text: 'Set workflow variables?', icon: Settings2 }, + [ClientToolCallState.executing]: { text: 'Setting workflow variables', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Updated workflow variables', icon: Settings2 }, + [ClientToolCallState.error]: { text: 'Failed to set workflow variables', icon: X }, + [ClientToolCallState.aborted]: { text: 'Aborted setting variables', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped setting variables', icon: XCircle }, + }, + interrupt: { + accept: { text: 'Apply', icon: Settings2 }, + reject: { text: 'Skip', icon: XCircle }, + }, + uiConfig: { interrupt: { accept: { text: 'Apply', icon: Settings2 }, reject: { text: 'Skip', icon: XCircle }, - }, - uiConfig: { - interrupt: { - accept: { text: 'Apply', icon: Settings2 }, - reject: { text: 'Skip', icon: XCircle }, - showAllowOnce: true, - showAllowAlways: true, - }, - paramsTable: { - columns: [ - { key: 'name', label: 'Name', width: '40%', editable: true, mono: true }, - { key: 'value', label: 'Value', width: '60%', editable: true, mono: true }, - ], - extractRows: (params) => { - const operations = params.operations || [] - return operations.map((op: any, idx: number) => [ - String(idx), - op.name || '', - String(op.value ?? ''), - ]) - }, - }, - }, - getDynamicText: (params, state) => { - if (params?.operations && Array.isArray(params.operations)) { - const varNames = params.operations - .slice(0, 2) - .map((op: any) => op.name) - .filter(Boolean) - - if (varNames.length > 0) { - const varList = varNames.join(', ') - const more = params.operations.length > 2 ? '...' : '' - const displayText = `${varList}${more}` - - switch (state) { - case ClientToolCallState.success: - return `Set ${displayText}` - case ClientToolCallState.executing: - return `Setting ${displayText}` - case ClientToolCallState.generating: - return `Preparing to set ${displayText}` - case ClientToolCallState.pending: - return `Set ${displayText}?` - case ClientToolCallState.error: - return `Failed to set ${displayText}` - case ClientToolCallState.aborted: - return `Aborted setting ${displayText}` - case ClientToolCallState.rejected: - return `Skipped setting ${displayText}` - } - } - } - return undefined - }, - } + showAllowOnce: true, + showAllowAlways: true, + }, + paramsTable: { + columns: [ + { key: 'name', label: 'Name', width: '40%', editable: true, mono: true }, + { key: 'value', label: 'Value', width: '60%', editable: true, mono: true }, + ], + extractRows: (params) => { + const operations = params.operations || [] + return operations.map((op: any, idx: number) => [ + String(idx), + op.name || '', + String(op.value ?? ''), + ]) + }, + }, + }, + getDynamicText: (params, state) => { + if (params?.operations && Array.isArray(params.operations)) { + const varNames = params.operations + .slice(0, 2) + .map((op: any) => op.name) + .filter(Boolean) + + if (varNames.length > 0) { + const varList = varNames.join(', ') + const more = params.operations.length > 2 ? '...' : '' + const displayText = `${varList}${more}` -const META_sleep: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Preparing to sleep', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Sleeping', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Sleeping', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Finished sleeping', icon: Moon }, - [ClientToolCallState.error]: { text: 'Interrupted sleep', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped sleep', icon: MinusCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted sleep', icon: MinusCircle }, - [ClientToolCallState.background]: { text: 'Resumed', icon: Moon }, - }, - uiConfig: { - secondaryAction: { - text: 'Wake', - title: 'Wake', - variant: 'tertiary', - showInStates: [ClientToolCallState.executing], - targetState: ClientToolCallState.background, - }, - }, - // No interrupt - auto-execute immediately - getDynamicText: (params, state) => { - const seconds = params?.seconds - if (typeof seconds === 'number' && seconds > 0) { - const displayTime = formatDuration(seconds) switch (state) { case ClientToolCallState.success: - return `Slept for ${displayTime}` + return `Set ${displayText}` case ClientToolCallState.executing: - case ClientToolCallState.pending: - return `Sleeping for ${displayTime}` + return `Setting ${displayText}` case ClientToolCallState.generating: - return `Preparing to sleep for ${displayTime}` + return `Preparing to set ${displayText}` + case ClientToolCallState.pending: + return `Set ${displayText}?` case ClientToolCallState.error: - return `Failed to sleep for ${displayTime}` - case ClientToolCallState.rejected: - return `Skipped sleeping for ${displayTime}` + return `Failed to set ${displayText}` case ClientToolCallState.aborted: - return `Aborted sleeping for ${displayTime}` - case ClientToolCallState.background: { - // Calculate elapsed time from when sleep started - const elapsedSeconds = params?._elapsedSeconds - if (typeof elapsedSeconds === 'number' && elapsedSeconds > 0) { - return `Resumed after ${formatDuration(Math.round(elapsedSeconds))}` - } - return 'Resumed early' + return `Aborted setting ${displayText}` + case ClientToolCallState.rejected: + return `Skipped setting ${displayText}` + } + } + } + return undefined + }, +} + +const META_sleep: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Preparing to sleep', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Sleeping', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Sleeping', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Finished sleeping', icon: Moon }, + [ClientToolCallState.error]: { text: 'Interrupted sleep', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped sleep', icon: MinusCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted sleep', icon: MinusCircle }, + [ClientToolCallState.background]: { text: 'Resumed', icon: Moon }, + }, + uiConfig: { + secondaryAction: { + text: 'Wake', + title: 'Wake', + variant: 'tertiary', + showInStates: [ClientToolCallState.executing], + targetState: ClientToolCallState.background, + }, + }, + // No interrupt - auto-execute immediately + getDynamicText: (params, state) => { + const seconds = params?.seconds + if (typeof seconds === 'number' && seconds > 0) { + const displayTime = formatDuration(seconds) + switch (state) { + case ClientToolCallState.success: + return `Slept for ${displayTime}` + case ClientToolCallState.executing: + case ClientToolCallState.pending: + return `Sleeping for ${displayTime}` + case ClientToolCallState.generating: + return `Preparing to sleep for ${displayTime}` + case ClientToolCallState.error: + return `Failed to sleep for ${displayTime}` + case ClientToolCallState.rejected: + return `Skipped sleeping for ${displayTime}` + case ClientToolCallState.aborted: + return `Aborted sleeping for ${displayTime}` + case ClientToolCallState.background: { + // Calculate elapsed time from when sleep started + const elapsedSeconds = params?._elapsedSeconds + if (typeof elapsedSeconds === 'number' && elapsedSeconds > 0) { + return `Resumed after ${formatDuration(Math.round(elapsedSeconds))}` } + return 'Resumed early' } } - return undefined - }, - } + } + return undefined + }, +} const META_summarize_conversation: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Summarizing conversation', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Summarizing conversation', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Summarizing conversation', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Summarized conversation', icon: PencilLine }, - [ClientToolCallState.error]: { text: 'Failed to summarize conversation', icon: XCircle }, - [ClientToolCallState.aborted]: { - text: 'Aborted summarizing conversation', - icon: MinusCircle, - }, - [ClientToolCallState.rejected]: { - text: 'Skipped summarizing conversation', - icon: MinusCircle, - }, - }, - interrupt: undefined, - } + displayNames: { + [ClientToolCallState.generating]: { text: 'Summarizing conversation', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Summarizing conversation', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Summarizing conversation', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Summarized conversation', icon: PencilLine }, + [ClientToolCallState.error]: { text: 'Failed to summarize conversation', icon: XCircle }, + [ClientToolCallState.aborted]: { + text: 'Aborted summarizing conversation', + icon: MinusCircle, + }, + [ClientToolCallState.rejected]: { + text: 'Skipped summarizing conversation', + icon: MinusCircle, + }, + }, + interrupt: undefined, +} const META_superagent: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Superagent working', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Superagent working', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Superagent working', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Superagent completed', icon: Sparkles }, - [ClientToolCallState.error]: { text: 'Superagent failed', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Superagent skipped', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Superagent aborted', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Superagent working', - completedLabel: 'Superagent completed', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } + displayNames: { + [ClientToolCallState.generating]: { text: 'Superagent working', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Superagent working', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Superagent working', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Superagent completed', icon: Sparkles }, + [ClientToolCallState.error]: { text: 'Superagent failed', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Superagent skipped', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Superagent aborted', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Superagent working', + completedLabel: 'Superagent completed', + shouldCollapse: true, + outputArtifacts: [], + }, + }, +} const META_test: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Testing', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Testing', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Testing', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Tested', icon: FlaskConical }, - [ClientToolCallState.error]: { text: 'Failed to test', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped test', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted test', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Testing', - completedLabel: 'Tested', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } + displayNames: { + [ClientToolCallState.generating]: { text: 'Testing', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Testing', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Testing', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Tested', icon: FlaskConical }, + [ClientToolCallState.error]: { text: 'Failed to test', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped test', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted test', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Testing', + completedLabel: 'Tested', + shouldCollapse: true, + outputArtifacts: [], + }, + }, +} const META_tour: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Touring', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Touring', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Touring', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Completed tour', icon: Compass }, - [ClientToolCallState.error]: { text: 'Failed tour', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped tour', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted tour', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Touring', - completedLabel: 'Tour complete', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } + displayNames: { + [ClientToolCallState.generating]: { text: 'Touring', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Touring', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Touring', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Completed tour', icon: Compass }, + [ClientToolCallState.error]: { text: 'Failed tour', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped tour', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted tour', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Touring', + completedLabel: 'Tour complete', + shouldCollapse: true, + outputArtifacts: [], + }, + }, +} const META_workflow: ToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Managing workflow', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Managing workflow', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Managing workflow', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Managed workflow', icon: GitBranch }, - [ClientToolCallState.error]: { text: 'Failed to manage workflow', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped workflow', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted workflow', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Managing workflow', - completedLabel: 'Workflow managed', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } + displayNames: { + [ClientToolCallState.generating]: { text: 'Managing workflow', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Managing workflow', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Managing workflow', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Managed workflow', icon: GitBranch }, + [ClientToolCallState.error]: { text: 'Failed to manage workflow', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped workflow', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted workflow', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Managing workflow', + completedLabel: 'Workflow managed', + shouldCollapse: true, + outputArtifacts: [], + }, + }, +} const TOOL_METADATA_BY_ID: Record = { - 'auth': META_auth, - 'check_deployment_status': META_check_deployment_status, - 'checkoff_todo': META_checkoff_todo, - 'crawl_website': META_crawl_website, - 'create_workspace_mcp_server': META_create_workspace_mcp_server, - 'custom_tool': META_custom_tool, - 'debug': META_debug, - 'deploy': META_deploy, - 'deploy_api': META_deploy_api, - 'deploy_chat': META_deploy_chat, - 'deploy_mcp': META_deploy_mcp, - 'edit': META_edit, - 'edit_workflow': META_edit_workflow, - 'evaluate': META_evaluate, - 'get_block_config': META_get_block_config, - 'get_block_options': META_get_block_options, - 'get_block_outputs': META_get_block_outputs, - 'get_block_upstream_references': META_get_block_upstream_references, - 'get_blocks_and_tools': META_get_blocks_and_tools, - 'get_blocks_metadata': META_get_blocks_metadata, - 'get_credentials': META_get_credentials, - 'get_examples_rag': META_get_examples_rag, - 'get_operations_examples': META_get_operations_examples, - 'get_page_contents': META_get_page_contents, - 'get_trigger_blocks': META_get_trigger_blocks, - 'get_trigger_examples': META_get_trigger_examples, - 'get_user_workflow': META_get_user_workflow, - 'get_workflow_console': META_get_workflow_console, - 'get_workflow_data': META_get_workflow_data, - 'get_workflow_from_name': META_get_workflow_from_name, - 'info': META_info, - 'knowledge': META_knowledge, - 'knowledge_base': META_knowledge_base, - 'list_user_workflows': META_list_user_workflows, - 'list_workspace_mcp_servers': META_list_workspace_mcp_servers, - 'make_api_request': META_make_api_request, - 'manage_custom_tool': META_manage_custom_tool, - 'manage_mcp_tool': META_manage_mcp_tool, - 'mark_todo_in_progress': META_mark_todo_in_progress, - 'navigate_ui': META_navigate_ui, - 'oauth_request_access': META_oauth_request_access, - 'plan': META_plan, - 'redeploy': META_redeploy, - 'remember_debug': META_remember_debug, - 'research': META_research, - 'run_workflow': META_run_workflow, - 'scrape_page': META_scrape_page, - 'search_documentation': META_search_documentation, - 'search_errors': META_search_errors, - 'search_library_docs': META_search_library_docs, - 'search_online': META_search_online, - 'search_patterns': META_search_patterns, - 'set_environment_variables': META_set_environment_variables, - 'set_global_workflow_variables': META_set_global_workflow_variables, - 'sleep': META_sleep, - 'summarize_conversation': META_summarize_conversation, - 'superagent': META_superagent, - 'test': META_test, - 'tour': META_tour, - 'workflow': META_workflow, + auth: META_auth, + check_deployment_status: META_check_deployment_status, + checkoff_todo: META_checkoff_todo, + crawl_website: META_crawl_website, + create_workspace_mcp_server: META_create_workspace_mcp_server, + custom_tool: META_custom_tool, + debug: META_debug, + deploy: META_deploy, + deploy_api: META_deploy_api, + deploy_chat: META_deploy_chat, + deploy_mcp: META_deploy_mcp, + edit: META_edit, + edit_workflow: META_edit_workflow, + evaluate: META_evaluate, + get_block_config: META_get_block_config, + get_block_options: META_get_block_options, + get_block_outputs: META_get_block_outputs, + get_block_upstream_references: META_get_block_upstream_references, + get_blocks_and_tools: META_get_blocks_and_tools, + get_blocks_metadata: META_get_blocks_metadata, + get_credentials: META_get_credentials, + get_examples_rag: META_get_examples_rag, + get_operations_examples: META_get_operations_examples, + get_page_contents: META_get_page_contents, + get_trigger_blocks: META_get_trigger_blocks, + get_trigger_examples: META_get_trigger_examples, + get_user_workflow: META_get_user_workflow, + get_workflow_console: META_get_workflow_console, + get_workflow_data: META_get_workflow_data, + get_workflow_from_name: META_get_workflow_from_name, + info: META_info, + knowledge: META_knowledge, + knowledge_base: META_knowledge_base, + list_user_workflows: META_list_user_workflows, + list_workspace_mcp_servers: META_list_workspace_mcp_servers, + make_api_request: META_make_api_request, + manage_custom_tool: META_manage_custom_tool, + manage_mcp_tool: META_manage_mcp_tool, + mark_todo_in_progress: META_mark_todo_in_progress, + navigate_ui: META_navigate_ui, + oauth_request_access: META_oauth_request_access, + plan: META_plan, + redeploy: META_redeploy, + remember_debug: META_remember_debug, + research: META_research, + run_workflow: META_run_workflow, + scrape_page: META_scrape_page, + search_documentation: META_search_documentation, + search_errors: META_search_errors, + search_library_docs: META_search_library_docs, + search_online: META_search_online, + search_patterns: META_search_patterns, + set_environment_variables: META_set_environment_variables, + set_global_workflow_variables: META_set_global_workflow_variables, + sleep: META_sleep, + summarize_conversation: META_summarize_conversation, + superagent: META_superagent, + test: META_test, + tour: META_tour, + workflow: META_workflow, } export const TOOL_DISPLAY_REGISTRY: Record = Object.fromEntries( diff --git a/apps/sim/lib/copilot/types.ts b/apps/sim/lib/copilot/types.ts index 68e0970397..b9742f335b 100644 --- a/apps/sim/lib/copilot/types.ts +++ b/apps/sim/lib/copilot/types.ts @@ -1,58 +1,4 @@ -/** - * Copilot Types - Consolidated from various locations - * This file contains all copilot-related type definitions - */ - -// Tool call state types (from apps/sim/types/tool-call.ts) -export interface ToolCallState { - id: string - name: string - displayName?: string - parameters?: Record - state: - | 'detecting' - | 'pending' - | 'executing' - | 'completed' - | 'error' - | 'rejected' - | 'applied' - | 'ready_for_review' - | 'aborted' - | 'skipped' - | 'background' - startTime?: number - endTime?: number - duration?: number - result?: any - error?: string - progress?: string -} - -export interface ToolCallGroup { - id: string - toolCalls: ToolCallState[] - status: 'pending' | 'in_progress' | 'completed' | 'error' - startTime?: number - endTime?: number - summary?: string -} - -export interface InlineContent { - type: 'text' | 'tool_call' - content: string - toolCall?: ToolCallState -} - -export interface ParsedMessageContent { - textContent: string - toolCalls: ToolCallState[] - toolGroups: ToolCallGroup[] - inlineContent?: InlineContent[] -} - import type { ProviderId } from '@/providers/types' -// Copilot Tools Type Definitions (from workspace copilot lib) import type { CopilotToolCall, ToolState } from '@/stores/panel' export type NotificationStatus = @@ -63,82 +9,10 @@ export type NotificationStatus = | 'rejected' | 'background' -// Export the consolidated types export type { CopilotToolCall, ToolState } -// Display configuration for different states -export interface StateDisplayConfig { - displayName: string - icon?: string - className?: string -} - -// Complete display configuration for a tool -export interface ToolDisplayConfig { - states: { - [K in ToolState]?: StateDisplayConfig - } - getDynamicDisplayName?: (state: ToolState, params: Record) => string | null -} - -// Schema for tool parameters (OpenAI function calling format) -export interface ToolSchema { - name: string - description: string - parameters?: { - type: 'object' - properties: Record - required?: string[] - } -} - -// Tool metadata - all the static configuration -export interface ToolMetadata { - id: string - displayConfig: ToolDisplayConfig - schema: ToolSchema - requiresInterrupt: boolean - allowBackgroundExecution?: boolean - stateMessages?: Partial> -} - -// Result from executing a tool -export interface ToolExecuteResult { - success: boolean - data?: any - error?: string -} - -// Response from the confirmation API -export interface ToolConfirmResponse { - success: boolean - message?: string -} - -// Options for tool execution -export interface ToolExecutionOptions { - onStateChange?: (state: ToolState) => void - beforeExecute?: () => Promise - afterExecute?: (result: ToolExecuteResult) => Promise - context?: Record -} - -// The main tool interface that all tools must implement -export interface Tool { - metadata: ToolMetadata - execute(toolCall: CopilotToolCall, options?: ToolExecutionOptions): Promise - getDisplayName(toolCall: CopilotToolCall): string - getIcon(toolCall: CopilotToolCall): string - handleUserAction( - toolCall: CopilotToolCall, - action: 'run' | 'skip' | 'background', - options?: ToolExecutionOptions - ): Promise - requiresConfirmation(toolCall: CopilotToolCall): boolean -} - -// Provider configuration for Sim Agent requests -// This type is only for the `provider` field in requests sent to the Sim Agent +// Provider configuration for Sim Agent requests. +// This type is only for the `provider` field in requests sent to the Sim Agent. export type CopilotProviderConfig = | { provider: 'azure-openai' From 6b40d4f5a39d4dc652f9424c1143a9e6aca4ea6c Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Thu, 5 Feb 2026 15:26:24 -0800 Subject: [PATCH 25/72] Continued cleanup --- .../panel/components/copilot/copilot.tsx | 2 - .../copilot/hooks/use-chat-history.ts | 8 +- .../lib/copilot/client-sse/content-blocks.ts | 149 ++ apps/sim/lib/copilot/client-sse/handlers.ts | 720 ++++++ apps/sim/lib/copilot/client-sse/index.ts | 3 + .../copilot/client-sse/subagent-handlers.ts | 360 +++ apps/sim/lib/copilot/client-sse/types.ts | 23 + apps/sim/lib/copilot/messages/checkpoints.ts | 128 + .../copilot/messages/credential-masking.ts | 28 + apps/sim/lib/copilot/messages/index.ts | 3 + .../sim/lib/copilot/messages/serialization.ts | 169 ++ .../orchestrator/sse-handlers/handlers.ts | 29 +- .../orchestrator/tool-executor/index.ts | 98 +- apps/sim/lib/copilot/store-utils.ts | 162 ++ .../tools/client/tool-display-registry.ts | 29 +- apps/sim/stores/panel/copilot/store.ts | 2224 ++--------------- apps/sim/stores/panel/copilot/types.ts | 7 - apps/sim/stores/workflow-diff/store.ts | 71 +- 18 files changed, 1995 insertions(+), 2218 deletions(-) create mode 100644 apps/sim/lib/copilot/client-sse/content-blocks.ts create mode 100644 apps/sim/lib/copilot/client-sse/handlers.ts create mode 100644 apps/sim/lib/copilot/client-sse/index.ts create mode 100644 apps/sim/lib/copilot/client-sse/subagent-handlers.ts create mode 100644 apps/sim/lib/copilot/client-sse/types.ts create mode 100644 apps/sim/lib/copilot/messages/checkpoints.ts create mode 100644 apps/sim/lib/copilot/messages/credential-masking.ts create mode 100644 apps/sim/lib/copilot/messages/index.ts create mode 100644 apps/sim/lib/copilot/messages/serialization.ts create mode 100644 apps/sim/lib/copilot/store-utils.ts diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/copilot.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/copilot.tsx index e52898e15d..39e2a0095a 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/copilot.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/copilot.tsx @@ -107,7 +107,6 @@ export const Copilot = forwardRef(({ panelWidth }, ref currentChat, selectChat, deleteChat, - areChatsFresh, workflowId: copilotWorkflowId, setPlanTodos, closePlanTodos, @@ -142,7 +141,6 @@ export const Copilot = forwardRef(({ panelWidth }, ref activeWorkflowId, copilotWorkflowId, loadChats, - areChatsFresh, isSendingMessage, } ) diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/hooks/use-chat-history.ts b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/hooks/use-chat-history.ts index 04f1cb033c..0978c8335e 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/hooks/use-chat-history.ts +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/hooks/use-chat-history.ts @@ -10,7 +10,6 @@ interface UseChatHistoryProps { activeWorkflowId: string | null copilotWorkflowId: string | null loadChats: (forceRefresh: boolean) => Promise - areChatsFresh: (workflowId: string) => boolean isSendingMessage: boolean } @@ -21,8 +20,7 @@ interface UseChatHistoryProps { * @returns Chat history utilities */ export function useChatHistory(props: UseChatHistoryProps) { - const { chats, activeWorkflowId, copilotWorkflowId, loadChats, areChatsFresh, isSendingMessage } = - props + const { chats, activeWorkflowId, copilotWorkflowId, loadChats, isSendingMessage } = props /** Groups chats by time period (Today, Yesterday, This Week, etc.) */ const groupedChats = useMemo(() => { @@ -80,7 +78,7 @@ export function useChatHistory(props: UseChatHistoryProps) { /** Handles history dropdown opening and loads chats if needed (non-blocking) */ const handleHistoryDropdownOpen = useCallback( (open: boolean) => { - if (open && activeWorkflowId && !isSendingMessage && !areChatsFresh(activeWorkflowId)) { + if (open && activeWorkflowId && !isSendingMessage) { loadChats(false).catch((error) => { logger.error('Failed to load chat history:', error) }) @@ -90,7 +88,7 @@ export function useChatHistory(props: UseChatHistoryProps) { logger.info('Chat history opened during stream - showing cached data only') } }, - [activeWorkflowId, areChatsFresh, isSendingMessage, loadChats] + [activeWorkflowId, isSendingMessage, loadChats] ) return { diff --git a/apps/sim/lib/copilot/client-sse/content-blocks.ts b/apps/sim/lib/copilot/client-sse/content-blocks.ts new file mode 100644 index 0000000000..c2ee72458b --- /dev/null +++ b/apps/sim/lib/copilot/client-sse/content-blocks.ts @@ -0,0 +1,149 @@ +import type { + ChatContext, + CopilotMessage, + MessageFileAttachment, +} from '@/stores/panel/copilot/types' +import type { StreamingContext } from './types' + +const TEXT_BLOCK_TYPE = 'text' +const THINKING_BLOCK_TYPE = 'thinking' +const CONTINUE_OPTIONS_TAG = '{"1":"Continue"}' + +export function createUserMessage( + content: string, + fileAttachments?: MessageFileAttachment[], + contexts?: ChatContext[], + messageId?: string +): CopilotMessage { + return { + id: messageId || crypto.randomUUID(), + role: 'user', + content, + timestamp: new Date().toISOString(), + ...(fileAttachments && fileAttachments.length > 0 && { fileAttachments }), + ...(contexts && contexts.length > 0 && { contexts }), + ...(contexts && + contexts.length > 0 && { + contentBlocks: [ + { type: 'contexts', contexts: contexts as any, timestamp: Date.now() }, + ] as any, + }), + } +} + +export function createStreamingMessage(): CopilotMessage { + return { + id: crypto.randomUUID(), + role: 'assistant', + content: '', + timestamp: new Date().toISOString(), + } +} + +export function createErrorMessage( + messageId: string, + content: string, + errorType?: 'usage_limit' | 'unauthorized' | 'forbidden' | 'rate_limit' | 'upgrade_required' +): CopilotMessage { + return { + id: messageId, + role: 'assistant', + content, + timestamp: new Date().toISOString(), + contentBlocks: [ + { + type: 'text', + content, + timestamp: Date.now(), + }, + ], + errorType, + } +} + +export function appendTextBlock(context: StreamingContext, text: string) { + if (!text) return + context.accumulatedContent += text + if (context.currentTextBlock && context.contentBlocks.length > 0) { + const lastBlock = context.contentBlocks[context.contentBlocks.length - 1] + if (lastBlock.type === TEXT_BLOCK_TYPE && lastBlock === context.currentTextBlock) { + lastBlock.content += text + return + } + } + context.currentTextBlock = { type: '', content: '', timestamp: 0, toolCall: null } + context.currentTextBlock.type = TEXT_BLOCK_TYPE + context.currentTextBlock.content = text + context.currentTextBlock.timestamp = Date.now() + context.contentBlocks.push(context.currentTextBlock) +} + +export function appendContinueOption(content: string): string { + if (//i.test(content)) return content + const suffix = content.trim().length > 0 ? '\n\n' : '' + return `${content}${suffix}${CONTINUE_OPTIONS_TAG}` +} + +export function appendContinueOptionBlock(blocks: any[]): any[] { + if (!Array.isArray(blocks)) return blocks + const hasOptions = blocks.some( + (block) => + block?.type === TEXT_BLOCK_TYPE && + typeof block.content === 'string' && + //i.test(block.content) + ) + if (hasOptions) return blocks + return [ + ...blocks, + { + type: TEXT_BLOCK_TYPE, + content: CONTINUE_OPTIONS_TAG, + timestamp: Date.now(), + }, + ] +} + +export function stripContinueOption(content: string): string { + if (!content || !content.includes(CONTINUE_OPTIONS_TAG)) return content + const next = content.replace(CONTINUE_OPTIONS_TAG, '') + return next.replace(/\n{2,}\s*$/g, '\n').trimEnd() +} + +export function stripContinueOptionFromBlocks(blocks: any[]): any[] { + if (!Array.isArray(blocks)) return blocks + return blocks.flatMap((block) => { + if ( + block?.type === TEXT_BLOCK_TYPE && + typeof block.content === 'string' && + block.content.includes(CONTINUE_OPTIONS_TAG) + ) { + const nextContent = stripContinueOption(block.content) + if (!nextContent.trim()) return [] + return [{ ...block, content: nextContent }] + } + return [block] + }) +} + +export function beginThinkingBlock(context: StreamingContext) { + if (!context.currentThinkingBlock) { + context.currentThinkingBlock = { type: '', content: '', timestamp: 0, toolCall: null } + context.currentThinkingBlock.type = THINKING_BLOCK_TYPE + context.currentThinkingBlock.content = '' + context.currentThinkingBlock.timestamp = Date.now() + ;(context.currentThinkingBlock as any).startTime = Date.now() + context.contentBlocks.push(context.currentThinkingBlock) + } + context.isInThinkingBlock = true + context.currentTextBlock = null +} + +export function finalizeThinkingBlock(context: StreamingContext) { + if (context.currentThinkingBlock) { + context.currentThinkingBlock.duration = + Date.now() - (context.currentThinkingBlock.startTime || Date.now()) + } + context.isInThinkingBlock = false + context.currentThinkingBlock = null + context.currentTextBlock = null +} diff --git a/apps/sim/lib/copilot/client-sse/handlers.ts b/apps/sim/lib/copilot/client-sse/handlers.ts new file mode 100644 index 0000000000..169917578b --- /dev/null +++ b/apps/sim/lib/copilot/client-sse/handlers.ts @@ -0,0 +1,720 @@ +import { createLogger } from '@sim/logger' +import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry' +import { useWorkflowDiffStore } from '@/stores/workflow-diff/store' +import type { CopilotStore, CopilotToolCall } from '@/stores/panel/copilot/types' +import { + appendTextBlock, + beginThinkingBlock, + finalizeThinkingBlock, +} from './content-blocks' +import type { StreamingContext } from './types' +import { + isBackgroundState, + isRejectedState, + isReviewState, + resolveToolDisplay, +} from '@/lib/copilot/store-utils' + +const logger = createLogger('CopilotClientSseHandlers') +const STREAM_STORAGE_KEY = 'copilot_active_stream' +const TEXT_BLOCK_TYPE = 'text' +const MAX_BATCH_INTERVAL = 50 +const MIN_BATCH_INTERVAL = 16 +const MAX_QUEUE_SIZE = 5 + +function writeActiveStreamToStorage(info: any): void { + if (typeof window === 'undefined') return + try { + if (!info) { + window.sessionStorage.removeItem(STREAM_STORAGE_KEY) + return + } + window.sessionStorage.setItem(STREAM_STORAGE_KEY, JSON.stringify(info)) + } catch {} +} + +export type SSEHandler = ( + data: any, + context: StreamingContext, + get: () => CopilotStore, + set: any +) => Promise | void + +const streamingUpdateQueue = new Map() +let streamingUpdateRAF: number | null = null +let lastBatchTime = 0 + +export function stopStreamingUpdates() { + if (streamingUpdateRAF !== null) { + cancelAnimationFrame(streamingUpdateRAF) + streamingUpdateRAF = null + } + streamingUpdateQueue.clear() +} + +function createOptimizedContentBlocks(contentBlocks: any[]): any[] { + const result: any[] = new Array(contentBlocks.length) + for (let i = 0; i < contentBlocks.length; i++) { + const block = contentBlocks[i] + result[i] = { ...block } + } + return result +} + +export function flushStreamingUpdates(set: any) { + if (streamingUpdateRAF !== null) { + cancelAnimationFrame(streamingUpdateRAF) + streamingUpdateRAF = null + } + if (streamingUpdateQueue.size === 0) return + + const updates = new Map(streamingUpdateQueue) + streamingUpdateQueue.clear() + + set((state: CopilotStore) => { + if (updates.size === 0) return state + return { + messages: state.messages.map((msg) => { + const update = updates.get(msg.id) + if (update) { + return { + ...msg, + content: '', + contentBlocks: + update.contentBlocks.length > 0 ? createOptimizedContentBlocks(update.contentBlocks) : [], + } + } + return msg + }), + } + }) +} + +export function updateStreamingMessage(set: any, context: StreamingContext) { + if (context.suppressStreamingUpdates) return + const now = performance.now() + streamingUpdateQueue.set(context.messageId, context) + const timeSinceLastBatch = now - lastBatchTime + const shouldFlushImmediately = + streamingUpdateQueue.size >= MAX_QUEUE_SIZE || timeSinceLastBatch > MAX_BATCH_INTERVAL + + if (streamingUpdateRAF === null) { + const scheduleUpdate = () => { + streamingUpdateRAF = requestAnimationFrame(() => { + const updates = new Map(streamingUpdateQueue) + streamingUpdateQueue.clear() + streamingUpdateRAF = null + lastBatchTime = performance.now() + set((state: CopilotStore) => { + if (updates.size === 0) return state + const messages = state.messages + const lastMessage = messages[messages.length - 1] + const lastMessageUpdate = lastMessage ? updates.get(lastMessage.id) : null + if (updates.size === 1 && lastMessageUpdate) { + const newMessages = [...messages] + newMessages[messages.length - 1] = { + ...lastMessage, + content: '', + contentBlocks: + lastMessageUpdate.contentBlocks.length > 0 + ? createOptimizedContentBlocks(lastMessageUpdate.contentBlocks) + : [], + } + return { messages: newMessages } + } + return { + messages: messages.map((msg) => { + const update = updates.get(msg.id) + if (update) { + return { + ...msg, + content: '', + contentBlocks: + update.contentBlocks.length > 0 + ? createOptimizedContentBlocks(update.contentBlocks) + : [], + } + } + return msg + }), + } + }) + }) + } + if (shouldFlushImmediately) scheduleUpdate() + else setTimeout(scheduleUpdate, Math.max(0, MIN_BATCH_INTERVAL - timeSinceLastBatch)) + } +} + +export function upsertToolCallBlock(context: StreamingContext, toolCall: CopilotToolCall) { + let found = false + for (let i = 0; i < context.contentBlocks.length; i++) { + const b = context.contentBlocks[i] as any + if (b.type === 'tool_call' && b.toolCall?.id === toolCall.id) { + context.contentBlocks[i] = { ...b, toolCall } + found = true + break + } + } + if (!found) { + context.contentBlocks.push({ type: 'tool_call', toolCall, timestamp: Date.now() }) + } +} + +function stripThinkingTags(text: string): string { + return text.replace(/<\/?thinking[^>]*>/gi, '').replace(/<\/?thinking[^&]*>/gi, '') +} + +function appendThinkingContent(context: StreamingContext, text: string) { + if (!text) return + const cleanedText = stripThinkingTags(text) + if (!cleanedText) return + if (context.currentThinkingBlock) { + context.currentThinkingBlock.content += cleanedText + } else { + context.currentThinkingBlock = { type: '', content: '', timestamp: 0, toolCall: null } + context.currentThinkingBlock.type = 'thinking' + context.currentThinkingBlock.content = cleanedText + context.currentThinkingBlock.timestamp = Date.now() + context.currentThinkingBlock.startTime = Date.now() + context.contentBlocks.push(context.currentThinkingBlock) + } + context.isInThinkingBlock = true + context.currentTextBlock = null +} + +export const sseHandlers: Record = { + chat_id: async (data, context, get, set) => { + context.newChatId = data.chatId + const { currentChat, activeStream } = get() + if (!currentChat && context.newChatId) { + await get().handleNewChatCreation(context.newChatId) + } + if (activeStream && context.newChatId && !activeStream.chatId) { + const updatedStream = { ...activeStream, chatId: context.newChatId } + set({ activeStream: updatedStream }) + writeActiveStreamToStorage(updatedStream) + } + }, + title_updated: (_data, _context, get, set) => { + const title = _data.title + if (!title) return + const { currentChat, chats } = get() + if (currentChat) { + set({ + currentChat: { ...currentChat, title }, + chats: chats.map((c) => (c.id === currentChat.id ? { ...c, title } : c)), + }) + } + }, + tool_result: (data, context, get, set) => { + try { + const toolCallId: string | undefined = data?.toolCallId || data?.data?.id + const success: boolean | undefined = data?.success + const failedDependency: boolean = data?.failedDependency === true + const skipped: boolean = data?.result?.skipped === true + if (!toolCallId) return + const { toolCallsById } = get() + const current = toolCallsById[toolCallId] + if (current) { + if ( + isRejectedState(current.state) || + isReviewState(current.state) || + isBackgroundState(current.state) + ) { + return + } + const targetState = success + ? ClientToolCallState.success + : failedDependency || skipped + ? ClientToolCallState.rejected + : ClientToolCallState.error + const updatedMap = { ...toolCallsById } + updatedMap[toolCallId] = { + ...current, + state: targetState, + display: resolveToolDisplay( + current.name, + targetState, + current.id, + (current as any).params + ), + } + set({ toolCallsById: updatedMap }) + + if (targetState === ClientToolCallState.success && current.name === 'checkoff_todo') { + try { + const result = (data?.result || data?.data?.result) ?? {} + const input = ((current as any).params || (current as any).input) ?? {} + const todoId = input.id || input.todoId || result.id || result.todoId + if (todoId) { + get().updatePlanTodoStatus(todoId, 'completed') + } + } catch {} + } + + if ( + targetState === ClientToolCallState.success && + current.name === 'mark_todo_in_progress' + ) { + try { + const result = (data?.result || data?.data?.result) ?? {} + const input = ((current as any).params || (current as any).input) ?? {} + const todoId = input.id || input.todoId || result.id || result.todoId + if (todoId) { + get().updatePlanTodoStatus(todoId, 'executing') + } + } catch {} + } + + if (current.name === 'edit_workflow') { + try { + const resultPayload = + (data?.result || data?.data?.result || data?.data?.data || data?.data) ?? {} + const workflowState = resultPayload?.workflowState + logger.info('[SSE] edit_workflow result received', { + hasWorkflowState: !!workflowState, + blockCount: workflowState ? Object.keys(workflowState.blocks ?? {}).length : 0, + edgeCount: workflowState?.edges?.length ?? 0, + }) + if (workflowState) { + const diffStore = useWorkflowDiffStore.getState() + diffStore.setProposedChanges(workflowState).catch((err) => { + logger.error('[SSE] Failed to apply edit_workflow diff', { + error: err instanceof Error ? err.message : String(err), + }) + }) + } + } catch (err) { + logger.error('[SSE] edit_workflow result handling failed', { + error: err instanceof Error ? err.message : String(err), + }) + } + } + } + + for (let i = 0; i < context.contentBlocks.length; i++) { + const b = context.contentBlocks[i] as any + if (b?.type === 'tool_call' && b?.toolCall?.id === toolCallId) { + if ( + isRejectedState(b.toolCall?.state) || + isReviewState(b.toolCall?.state) || + isBackgroundState(b.toolCall?.state) + ) + break + const targetState = success + ? ClientToolCallState.success + : failedDependency || skipped + ? ClientToolCallState.rejected + : ClientToolCallState.error + context.contentBlocks[i] = { + ...b, + toolCall: { + ...b.toolCall, + state: targetState, + display: resolveToolDisplay( + b.toolCall?.name, + targetState, + toolCallId, + b.toolCall?.params + ), + }, + } + break + } + } + updateStreamingMessage(set, context) + } catch {} + }, + tool_error: (data, context, get, set) => { + try { + const toolCallId: string | undefined = data?.toolCallId || data?.data?.id + const failedDependency: boolean = data?.failedDependency === true + if (!toolCallId) return + const { toolCallsById } = get() + const current = toolCallsById[toolCallId] + if (current) { + if ( + isRejectedState(current.state) || + isReviewState(current.state) || + isBackgroundState(current.state) + ) { + return + } + const targetState = failedDependency + ? ClientToolCallState.rejected + : ClientToolCallState.error + const updatedMap = { ...toolCallsById } + updatedMap[toolCallId] = { + ...current, + state: targetState, + display: resolveToolDisplay( + current.name, + targetState, + current.id, + (current as any).params + ), + } + set({ toolCallsById: updatedMap }) + } + for (let i = 0; i < context.contentBlocks.length; i++) { + const b = context.contentBlocks[i] as any + if (b?.type === 'tool_call' && b?.toolCall?.id === toolCallId) { + if ( + isRejectedState(b.toolCall?.state) || + isReviewState(b.toolCall?.state) || + isBackgroundState(b.toolCall?.state) + ) + break + const targetState = failedDependency + ? ClientToolCallState.rejected + : ClientToolCallState.error + context.contentBlocks[i] = { + ...b, + toolCall: { + ...b.toolCall, + state: targetState, + display: resolveToolDisplay( + b.toolCall?.name, + targetState, + toolCallId, + b.toolCall?.params + ), + }, + } + break + } + } + updateStreamingMessage(set, context) + } catch {} + }, + tool_generating: (data, context, get, set) => { + const { toolCallId, toolName } = data + if (!toolCallId || !toolName) return + const { toolCallsById } = get() + + if (!toolCallsById[toolCallId]) { + const initialState = ClientToolCallState.pending + const tc: CopilotToolCall = { + id: toolCallId, + name: toolName, + state: initialState, + display: resolveToolDisplay(toolName, initialState, toolCallId), + } + const updated = { ...toolCallsById, [toolCallId]: tc } + set({ toolCallsById: updated }) + logger.info('[toolCallsById] map updated', updated) + + upsertToolCallBlock(context, tc) + updateStreamingMessage(set, context) + } + }, + tool_call: (data, context, get, set) => { + const toolData = data?.data ?? {} + const id: string | undefined = toolData.id || data?.toolCallId + const name: string | undefined = toolData.name || data?.toolName + if (!id) return + const args = toolData.arguments + const isPartial = toolData.partial === true + const { toolCallsById } = get() + + const existing = toolCallsById[id] + const next: CopilotToolCall = existing + ? { + ...existing, + state: ClientToolCallState.pending, + ...(args ? { params: args } : {}), + display: resolveToolDisplay(name, ClientToolCallState.pending, id, args), + } + : { + id, + name: name || 'unknown_tool', + state: ClientToolCallState.pending, + ...(args ? { params: args } : {}), + display: resolveToolDisplay(name, ClientToolCallState.pending, id, args), + } + const updated = { ...toolCallsById, [id]: next } + set({ toolCallsById: updated }) + logger.info('[toolCallsById] → pending', { id, name, params: args }) + + upsertToolCallBlock(context, next) + updateStreamingMessage(set, context) + + if (isPartial) { + return + } + + return + }, + reasoning: (data, context, _get, set) => { + const phase = (data && (data.phase || data?.data?.phase)) as string | undefined + if (phase === 'start') { + beginThinkingBlock(context) + updateStreamingMessage(set, context) + return + } + if (phase === 'end') { + finalizeThinkingBlock(context) + updateStreamingMessage(set, context) + return + } + const chunk: string = typeof data?.data === 'string' ? data.data : data?.content || '' + if (!chunk) return + appendThinkingContent(context, chunk) + updateStreamingMessage(set, context) + }, + content: (data, context, get, set) => { + if (!data.data) return + context.pendingContent += data.data + + let contentToProcess = context.pendingContent + let hasProcessedContent = false + + const thinkingStartRegex = // + const thinkingEndRegex = /<\/thinking>/ + const designWorkflowStartRegex = // + const designWorkflowEndRegex = /<\/design_workflow>/ + + const splitTrailingPartialTag = ( + text: string, + tags: string[] + ): { text: string; remaining: string } => { + const partialIndex = text.lastIndexOf('<') + if (partialIndex < 0) { + return { text, remaining: '' } + } + const possibleTag = text.substring(partialIndex) + const matchesTagStart = tags.some((tag) => tag.startsWith(possibleTag)) + if (!matchesTagStart) { + return { text, remaining: '' } + } + return { + text: text.substring(0, partialIndex), + remaining: possibleTag, + } + } + + while (contentToProcess.length > 0) { + if (context.isInDesignWorkflowBlock) { + const endMatch = designWorkflowEndRegex.exec(contentToProcess) + if (endMatch) { + const designContent = contentToProcess.substring(0, endMatch.index) + context.designWorkflowContent += designContent + context.isInDesignWorkflowBlock = false + + logger.info('[design_workflow] Tag complete, setting plan content', { + contentLength: context.designWorkflowContent.length, + }) + set({ streamingPlanContent: context.designWorkflowContent }) + + contentToProcess = contentToProcess.substring(endMatch.index + endMatch[0].length) + hasProcessedContent = true + } else { + const { text, remaining } = splitTrailingPartialTag(contentToProcess, [ + '', + ]) + context.designWorkflowContent += text + + set({ streamingPlanContent: context.designWorkflowContent }) + + contentToProcess = remaining + hasProcessedContent = true + if (remaining) { + break + } + } + continue + } + + if (!context.isInThinkingBlock && !context.isInDesignWorkflowBlock) { + const designStartMatch = designWorkflowStartRegex.exec(contentToProcess) + if (designStartMatch) { + const textBeforeDesign = contentToProcess.substring(0, designStartMatch.index) + if (textBeforeDesign) { + appendTextBlock(context, textBeforeDesign) + hasProcessedContent = true + } + context.isInDesignWorkflowBlock = true + context.designWorkflowContent = '' + contentToProcess = contentToProcess.substring( + designStartMatch.index + designStartMatch[0].length + ) + hasProcessedContent = true + continue + } + + const nextMarkIndex = contentToProcess.indexOf('') + const nextCheckIndex = contentToProcess.indexOf('') + const hasMark = nextMarkIndex >= 0 + const hasCheck = nextCheckIndex >= 0 + + const nextTagIndex = + hasMark && hasCheck + ? Math.min(nextMarkIndex, nextCheckIndex) + : hasMark + ? nextMarkIndex + : hasCheck + ? nextCheckIndex + : -1 + + if (nextTagIndex >= 0) { + const isMarkTodo = hasMark && nextMarkIndex === nextTagIndex + const tagStart = isMarkTodo ? '' : '' + const tagEnd = isMarkTodo ? '' : '' + const closingIndex = contentToProcess.indexOf(tagEnd, nextTagIndex + tagStart.length) + + if (closingIndex === -1) { + break + } + + const todoId = contentToProcess + .substring(nextTagIndex + tagStart.length, closingIndex) + .trim() + logger.info( + isMarkTodo ? '[TODO] Detected marktodo tag' : '[TODO] Detected checkofftodo tag', + { todoId } + ) + + if (todoId) { + try { + get().updatePlanTodoStatus(todoId, isMarkTodo ? 'executing' : 'completed') + logger.info( + isMarkTodo + ? '[TODO] Successfully marked todo in progress' + : '[TODO] Successfully checked off todo', + { todoId } + ) + } catch (e) { + logger.error( + isMarkTodo + ? '[TODO] Failed to mark todo in progress' + : '[TODO] Failed to checkoff todo', + { todoId, error: e } + ) + } + } else { + logger.warn('[TODO] Empty todoId extracted from todo tag', { tagType: tagStart }) + } + + let beforeTag = contentToProcess.substring(0, nextTagIndex) + let afterTag = contentToProcess.substring(closingIndex + tagEnd.length) + + const hadNewlineBefore = /(\r?\n)+$/.test(beforeTag) + const hadNewlineAfter = /^(\r?\n)+/.test(afterTag) + + beforeTag = beforeTag.replace(/(\r?\n)+$/, '') + afterTag = afterTag.replace(/^(\r?\n)+/, '') + + contentToProcess = + beforeTag + (hadNewlineBefore && hadNewlineAfter ? '\n' : '') + afterTag + context.currentTextBlock = null + hasProcessedContent = true + continue + } + } + + if (context.isInThinkingBlock) { + const endMatch = thinkingEndRegex.exec(contentToProcess) + if (endMatch) { + const thinkingContent = contentToProcess.substring(0, endMatch.index) + appendThinkingContent(context, thinkingContent) + finalizeThinkingBlock(context) + contentToProcess = contentToProcess.substring(endMatch.index + endMatch[0].length) + hasProcessedContent = true + } else { + const { text, remaining } = splitTrailingPartialTag(contentToProcess, ['']) + if (text) { + appendThinkingContent(context, text) + hasProcessedContent = true + } + contentToProcess = remaining + if (remaining) { + break + } + } + } else { + const startMatch = thinkingStartRegex.exec(contentToProcess) + if (startMatch) { + const textBeforeThinking = contentToProcess.substring(0, startMatch.index) + if (textBeforeThinking) { + appendTextBlock(context, textBeforeThinking) + hasProcessedContent = true + } + context.isInThinkingBlock = true + context.currentTextBlock = null + contentToProcess = contentToProcess.substring(startMatch.index + startMatch[0].length) + hasProcessedContent = true + } else { + let partialTagIndex = contentToProcess.lastIndexOf('<') + + const partialMarkTodo = contentToProcess.lastIndexOf(' partialTagIndex) { + partialTagIndex = partialMarkTodo + } + if (partialCheckoffTodo > partialTagIndex) { + partialTagIndex = partialCheckoffTodo + } + + let textToAdd = contentToProcess + let remaining = '' + if (partialTagIndex >= 0 && partialTagIndex > contentToProcess.length - 50) { + textToAdd = contentToProcess.substring(0, partialTagIndex) + remaining = contentToProcess.substring(partialTagIndex) + } + if (textToAdd) { + appendTextBlock(context, textToAdd) + hasProcessedContent = true + } + contentToProcess = remaining + break + } + } + } + + context.pendingContent = contentToProcess + if (hasProcessedContent) { + updateStreamingMessage(set, context) + } + }, + done: (_data, context) => { + logger.info('[SSE] DONE EVENT RECEIVED', { + doneEventCount: context.doneEventCount, + data: _data, + }) + context.doneEventCount++ + if (context.doneEventCount >= 1) { + logger.info('[SSE] Setting streamComplete = true, stream will terminate') + context.streamComplete = true + } + }, + error: (data, context, _get, set) => { + logger.error('Stream error:', data.error) + set((state: CopilotStore) => ({ + messages: state.messages.map((msg) => + msg.id === context.messageId + ? { + ...msg, + content: context.accumulatedContent || 'An error occurred.', + error: data.error, + } + : msg + ), + })) + context.streamComplete = true + }, + stream_end: (_data, context, _get, set) => { + if (context.pendingContent) { + if (context.isInThinkingBlock && context.currentThinkingBlock) { + appendThinkingContent(context, context.pendingContent) + } else if (context.pendingContent.trim()) { + appendTextBlock(context, context.pendingContent) + } + context.pendingContent = '' + } + finalizeThinkingBlock(context) + updateStreamingMessage(set, context) + }, + default: () => {}, +} diff --git a/apps/sim/lib/copilot/client-sse/index.ts b/apps/sim/lib/copilot/client-sse/index.ts new file mode 100644 index 0000000000..a08f89593e --- /dev/null +++ b/apps/sim/lib/copilot/client-sse/index.ts @@ -0,0 +1,3 @@ +export { sseHandlers } from './handlers' +export { subAgentSSEHandlers, applySseEvent } from './subagent-handlers' +export type { SSEHandler } from './handlers' diff --git a/apps/sim/lib/copilot/client-sse/subagent-handlers.ts b/apps/sim/lib/copilot/client-sse/subagent-handlers.ts new file mode 100644 index 0000000000..fa2fc2e1c6 --- /dev/null +++ b/apps/sim/lib/copilot/client-sse/subagent-handlers.ts @@ -0,0 +1,360 @@ +import { createLogger } from '@sim/logger' +import { + normalizeSseEvent, + shouldSkipToolCallEvent, + shouldSkipToolResultEvent, +} from '@/lib/copilot/orchestrator/sse-utils' +import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry' +import { resolveToolDisplay } from '@/lib/copilot/store-utils' +import type { CopilotStore, CopilotToolCall } from '@/stores/panel/copilot/types' +import type { StreamingContext } from './types' +import { sseHandlers, type SSEHandler, updateStreamingMessage } from './handlers' + +const logger = createLogger('CopilotClientSubagentHandlers') + +export function appendSubAgentContent( + context: StreamingContext, + parentToolCallId: string, + text: string +) { + if (!context.subAgentContent[parentToolCallId]) { + context.subAgentContent[parentToolCallId] = '' + } + if (!context.subAgentBlocks[parentToolCallId]) { + context.subAgentBlocks[parentToolCallId] = [] + } + context.subAgentContent[parentToolCallId] += text + const blocks = context.subAgentBlocks[parentToolCallId] + const lastBlock = blocks[blocks.length - 1] + if (lastBlock && lastBlock.type === 'subagent_text') { + lastBlock.content = (lastBlock.content || '') + text + } else { + blocks.push({ + type: 'subagent_text', + content: text, + timestamp: Date.now(), + }) + } +} + +export function updateToolCallWithSubAgentData( + context: StreamingContext, + get: () => CopilotStore, + set: any, + parentToolCallId: string +) { + const { toolCallsById } = get() + const parentToolCall = toolCallsById[parentToolCallId] + if (!parentToolCall) { + logger.warn('[SubAgent] updateToolCallWithSubAgentData: parent tool call not found', { + parentToolCallId, + availableToolCallIds: Object.keys(toolCallsById), + }) + return + } + + const blocks = context.subAgentBlocks[parentToolCallId] ?? [] + + const updatedToolCall: CopilotToolCall = { + ...parentToolCall, + subAgentContent: context.subAgentContent[parentToolCallId] || '', + subAgentToolCalls: context.subAgentToolCalls[parentToolCallId] ?? [], + subAgentBlocks: blocks, + subAgentStreaming: true, + } + + logger.info('[SubAgent] Updating tool call with subagent data', { + parentToolCallId, + parentToolName: parentToolCall.name, + subAgentContentLength: updatedToolCall.subAgentContent?.length, + subAgentBlocksCount: updatedToolCall.subAgentBlocks?.length, + subAgentToolCallsCount: updatedToolCall.subAgentToolCalls?.length, + }) + + const updatedMap = { ...toolCallsById, [parentToolCallId]: updatedToolCall } + set({ toolCallsById: updatedMap }) + + let foundInContentBlocks = false + for (let i = 0; i < context.contentBlocks.length; i++) { + const b = context.contentBlocks[i] as any + if (b.type === 'tool_call' && b.toolCall?.id === parentToolCallId) { + context.contentBlocks[i] = { ...b, toolCall: updatedToolCall } + foundInContentBlocks = true + break + } + } + + if (!foundInContentBlocks) { + logger.warn('[SubAgent] Parent tool call not found in contentBlocks', { + parentToolCallId, + contentBlocksCount: context.contentBlocks.length, + toolCallBlockIds: context.contentBlocks + .filter((b: any) => b.type === 'tool_call') + .map((b: any) => b.toolCall?.id), + }) + } + + updateStreamingMessage(set, context) +} + +export const subAgentSSEHandlers: Record = { + start: () => { + // Subagent start event - no action needed, parent is already tracked from subagent_start + }, + + content: (data, context, get, set) => { + const parentToolCallId = context.subAgentParentToolCallId + logger.info('[SubAgent] content event', { + parentToolCallId, + hasData: !!data.data, + dataPreview: typeof data.data === 'string' ? data.data.substring(0, 50) : null, + }) + if (!parentToolCallId || !data.data) { + logger.warn('[SubAgent] content missing parentToolCallId or data', { + parentToolCallId, + hasData: !!data.data, + }) + return + } + + appendSubAgentContent(context, parentToolCallId, data.data) + + updateToolCallWithSubAgentData(context, get, set, parentToolCallId) + }, + + reasoning: (data, context, get, set) => { + const parentToolCallId = context.subAgentParentToolCallId + const phase = data?.phase || data?.data?.phase + if (!parentToolCallId) return + + if (phase === 'start' || phase === 'end') return + + const chunk = typeof data?.data === 'string' ? data.data : data?.content || '' + if (!chunk) return + + appendSubAgentContent(context, parentToolCallId, chunk) + + updateToolCallWithSubAgentData(context, get, set, parentToolCallId) + }, + + tool_generating: () => { + // Tool generating event - no action needed, we'll handle the actual tool_call + }, + + tool_call: async (data, context, get, set) => { + const parentToolCallId = context.subAgentParentToolCallId + if (!parentToolCallId) return + + const toolData = data?.data ?? {} + const id: string | undefined = toolData.id || data?.toolCallId + const name: string | undefined = toolData.name || data?.toolName + if (!id || !name) return + const isPartial = toolData.partial === true + + let args = toolData.arguments || toolData.input || data?.arguments || data?.input + + if (typeof args === 'string') { + try { + args = JSON.parse(args) + } catch { + logger.warn('[SubAgent] Failed to parse arguments string', { args }) + } + } + + logger.info('[SubAgent] tool_call received', { + id, + name, + hasArgs: !!args, + argsKeys: args ? Object.keys(args) : [], + toolDataKeys: Object.keys(toolData), + dataKeys: Object.keys(data ?? {}), + }) + + if (!context.subAgentToolCalls[parentToolCallId]) { + context.subAgentToolCalls[parentToolCallId] = [] + } + if (!context.subAgentBlocks[parentToolCallId]) { + context.subAgentBlocks[parentToolCallId] = [] + } + + const existingIndex = context.subAgentToolCalls[parentToolCallId].findIndex((tc) => tc.id === id) + const subAgentToolCall: CopilotToolCall = { + id, + name, + state: ClientToolCallState.pending, + ...(args ? { params: args } : {}), + display: resolveToolDisplay(name, ClientToolCallState.pending, id, args), + } + + if (existingIndex >= 0) { + context.subAgentToolCalls[parentToolCallId][existingIndex] = subAgentToolCall + } else { + context.subAgentToolCalls[parentToolCallId].push(subAgentToolCall) + + context.subAgentBlocks[parentToolCallId].push({ + type: 'subagent_tool_call', + toolCall: subAgentToolCall, + timestamp: Date.now(), + }) + } + + const { toolCallsById } = get() + const updated = { ...toolCallsById, [id]: subAgentToolCall } + set({ toolCallsById: updated }) + + updateToolCallWithSubAgentData(context, get, set, parentToolCallId) + + if (isPartial) { + return + } + }, + + tool_result: (data, context, get, set) => { + const parentToolCallId = context.subAgentParentToolCallId + if (!parentToolCallId) return + + const toolCallId: string | undefined = data?.toolCallId || data?.data?.id + const success: boolean | undefined = data?.success !== false + if (!toolCallId) return + + if (!context.subAgentToolCalls[parentToolCallId]) return + if (!context.subAgentBlocks[parentToolCallId]) return + + const targetState = success ? ClientToolCallState.success : ClientToolCallState.error + const existingIndex = context.subAgentToolCalls[parentToolCallId].findIndex( + (tc) => tc.id === toolCallId + ) + + if (existingIndex >= 0) { + const existing = context.subAgentToolCalls[parentToolCallId][existingIndex] + const updatedSubAgentToolCall = { + ...existing, + state: targetState, + display: resolveToolDisplay(existing.name, targetState, toolCallId, existing.params), + } + context.subAgentToolCalls[parentToolCallId][existingIndex] = updatedSubAgentToolCall + + for (const block of context.subAgentBlocks[parentToolCallId]) { + if (block.type === 'subagent_tool_call' && block.toolCall?.id === toolCallId) { + block.toolCall = updatedSubAgentToolCall + break + } + } + + const { toolCallsById } = get() + if (toolCallsById[toolCallId]) { + const updatedMap = { + ...toolCallsById, + [toolCallId]: updatedSubAgentToolCall, + } + set({ toolCallsById: updatedMap }) + logger.info('[SubAgent] Updated subagent tool call state in toolCallsById', { + toolCallId, + name: existing.name, + state: targetState, + }) + } + } + + updateToolCallWithSubAgentData(context, get, set, parentToolCallId) + }, + + done: (_data, context, get, set) => { + const parentToolCallId = context.subAgentParentToolCallId + if (!parentToolCallId) return + + updateToolCallWithSubAgentData(context, get, set, parentToolCallId) + }, +} + +export async function applySseEvent( + data: any, + context: StreamingContext, + get: () => CopilotStore, + set: (next: Partial | ((state: CopilotStore) => Partial)) => void +): Promise { + const normalizedEvent = normalizeSseEvent(data) + if (shouldSkipToolCallEvent(normalizedEvent) || shouldSkipToolResultEvent(normalizedEvent)) { + return true + } + data = normalizedEvent + + if (data.type === 'subagent_start') { + const toolCallId = data.data?.tool_call_id + if (toolCallId) { + context.subAgentParentToolCallId = toolCallId + const { toolCallsById } = get() + const parentToolCall = toolCallsById[toolCallId] + if (parentToolCall) { + const updatedToolCall: CopilotToolCall = { + ...parentToolCall, + subAgentStreaming: true, + } + const updatedMap = { ...toolCallsById, [toolCallId]: updatedToolCall } + set({ toolCallsById: updatedMap }) + } + logger.info('[SSE] Subagent session started', { + subagent: data.subagent, + parentToolCallId: toolCallId, + }) + } + return true + } + + if (data.type === 'subagent_end') { + const parentToolCallId = context.subAgentParentToolCallId + if (parentToolCallId) { + const { toolCallsById } = get() + const parentToolCall = toolCallsById[parentToolCallId] + if (parentToolCall) { + const updatedToolCall: CopilotToolCall = { + ...parentToolCall, + subAgentContent: context.subAgentContent[parentToolCallId] || '', + subAgentToolCalls: context.subAgentToolCalls[parentToolCallId] ?? [], + subAgentBlocks: context.subAgentBlocks[parentToolCallId] ?? [], + subAgentStreaming: false, + } + const updatedMap = { ...toolCallsById, [parentToolCallId]: updatedToolCall } + set({ toolCallsById: updatedMap }) + logger.info('[SSE] Subagent session ended', { + subagent: data.subagent, + parentToolCallId, + contentLength: context.subAgentContent[parentToolCallId]?.length || 0, + toolCallCount: context.subAgentToolCalls[parentToolCallId]?.length || 0, + }) + } + } + context.subAgentParentToolCallId = undefined + return true + } + + if (data.subagent) { + const parentToolCallId = context.subAgentParentToolCallId + if (!parentToolCallId) { + logger.warn('[SSE] Subagent event without parent tool call ID', { + type: data.type, + subagent: data.subagent, + }) + return true + } + + logger.info('[SSE] Processing subagent event', { + type: data.type, + subagent: data.subagent, + parentToolCallId, + hasHandler: !!subAgentSSEHandlers[data.type], + }) + + const subAgentHandler = subAgentSSEHandlers[data.type] + if (subAgentHandler) { + await subAgentHandler(data, context, get, set) + } else { + logger.warn('[SSE] No handler for subagent event type', { type: data.type }) + } + return !context.streamComplete + } + + const handler = sseHandlers[data.type] || sseHandlers.default + await handler(data, context, get, set) + return !context.streamComplete +} diff --git a/apps/sim/lib/copilot/client-sse/types.ts b/apps/sim/lib/copilot/client-sse/types.ts new file mode 100644 index 0000000000..82e5b99be9 --- /dev/null +++ b/apps/sim/lib/copilot/client-sse/types.ts @@ -0,0 +1,23 @@ +import type { CopilotToolCall } from '@/stores/panel/copilot/types' + +export interface StreamingContext { + messageId: string + accumulatedContent: string + contentBlocks: any[] + currentTextBlock: any | null + isInThinkingBlock: boolean + currentThinkingBlock: any | null + isInDesignWorkflowBlock: boolean + designWorkflowContent: string + pendingContent: string + newChatId?: string + doneEventCount: number + streamComplete?: boolean + wasAborted?: boolean + suppressContinueOption?: boolean + subAgentParentToolCallId?: string + subAgentContent: Record + subAgentToolCalls: Record + subAgentBlocks: Record + suppressStreamingUpdates?: boolean +} diff --git a/apps/sim/lib/copilot/messages/checkpoints.ts b/apps/sim/lib/copilot/messages/checkpoints.ts new file mode 100644 index 0000000000..1a4847d6e0 --- /dev/null +++ b/apps/sim/lib/copilot/messages/checkpoints.ts @@ -0,0 +1,128 @@ +import { createLogger } from '@sim/logger' +import { mergeSubblockState } from '@/stores/workflows/utils' +import { useWorkflowStore } from '@/stores/workflows/workflow/store' +import type { WorkflowState } from '@/stores/workflows/workflow/types' +import type { CopilotMessage, CopilotStore, CopilotToolCall } from '@/stores/panel/copilot/types' + +const logger = createLogger('CopilotMessageCheckpoints') + +export function buildCheckpointWorkflowState(workflowId: string): WorkflowState | null { + const rawState = useWorkflowStore.getState().getWorkflowState() + if (!rawState) return null + + const blocksWithSubblockValues = mergeSubblockState(rawState.blocks, workflowId) + + const filteredBlocks = Object.entries(blocksWithSubblockValues).reduce( + (acc, [blockId, block]) => { + if (block?.type && block?.name) { + acc[blockId] = { + ...block, + id: block.id || blockId, + enabled: block.enabled !== undefined ? block.enabled : true, + horizontalHandles: block.horizontalHandles !== undefined ? block.horizontalHandles : true, + height: block.height !== undefined ? block.height : 90, + subBlocks: block.subBlocks ?? {}, + outputs: block.outputs ?? {}, + data: block.data ?? {}, + position: block.position || { x: 0, y: 0 }, + } + } + return acc + }, + {} as WorkflowState['blocks'] + ) + + return { + blocks: filteredBlocks, + edges: rawState.edges ?? [], + loops: rawState.loops ?? {}, + parallels: rawState.parallels ?? {}, + lastSaved: rawState.lastSaved || Date.now(), + deploymentStatuses: rawState.deploymentStatuses ?? {}, + } +} + +export async function saveMessageCheckpoint( + messageId: string, + get: () => CopilotStore, + set: (partial: Partial | ((state: CopilotStore) => Partial)) => void +): Promise { + const { workflowId, currentChat, messageSnapshots, messageCheckpoints } = get() + if (!workflowId || !currentChat?.id) return false + + const snapshot = messageSnapshots[messageId] + if (!snapshot) return false + + const nextSnapshots = { ...messageSnapshots } + delete nextSnapshots[messageId] + set({ messageSnapshots: nextSnapshots }) + + try { + const response = await fetch('/api/copilot/checkpoints', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + workflowId, + chatId: currentChat.id, + messageId, + workflowState: JSON.stringify(snapshot), + }), + }) + + if (!response.ok) { + throw new Error(`Failed to create checkpoint: ${response.statusText}`) + } + + const result = await response.json() + const newCheckpoint = result.checkpoint + if (newCheckpoint) { + const existingCheckpoints = messageCheckpoints[messageId] ?? [] + const updatedCheckpoints = { + ...messageCheckpoints, + [messageId]: [newCheckpoint, ...existingCheckpoints], + } + set({ messageCheckpoints: updatedCheckpoints }) + } + + return true + } catch (error) { + logger.error('Failed to create checkpoint from snapshot:', error) + return false + } +} + +export function extractToolCallsRecursively( + toolCall: CopilotToolCall, + map: Record +): void { + if (!toolCall?.id) return + map[toolCall.id] = toolCall + + if (Array.isArray(toolCall.subAgentBlocks)) { + for (const block of toolCall.subAgentBlocks) { + if (block?.type === 'subagent_tool_call' && block.toolCall?.id) { + extractToolCallsRecursively(block.toolCall, map) + } + } + } + + if (Array.isArray(toolCall.subAgentToolCalls)) { + for (const subTc of toolCall.subAgentToolCalls) { + extractToolCallsRecursively(subTc, map) + } + } +} + +export function buildToolCallsById(messages: CopilotMessage[]): Record { + const toolCallsById: Record = {} + for (const msg of messages) { + if (msg.contentBlocks) { + for (const block of msg.contentBlocks as any[]) { + if (block?.type === 'tool_call' && block.toolCall?.id) { + extractToolCallsRecursively(block.toolCall, toolCallsById) + } + } + } + } + return toolCallsById +} diff --git a/apps/sim/lib/copilot/messages/credential-masking.ts b/apps/sim/lib/copilot/messages/credential-masking.ts new file mode 100644 index 0000000000..f0e64eef84 --- /dev/null +++ b/apps/sim/lib/copilot/messages/credential-masking.ts @@ -0,0 +1,28 @@ +export function maskCredentialIdsInValue(value: any, credentialIds: Set): any { + if (!value || credentialIds.size === 0) return value + + if (typeof value === 'string') { + let masked = value + const sortedIds = Array.from(credentialIds).sort((a, b) => b.length - a.length) + for (const id of sortedIds) { + if (id && masked.includes(id)) { + masked = masked.split(id).join('••••••••') + } + } + return masked + } + + if (Array.isArray(value)) { + return value.map((item) => maskCredentialIdsInValue(item, credentialIds)) + } + + if (typeof value === 'object') { + const masked: any = {} + for (const key of Object.keys(value)) { + masked[key] = maskCredentialIdsInValue(value[key], credentialIds) + } + return masked + } + + return value +} diff --git a/apps/sim/lib/copilot/messages/index.ts b/apps/sim/lib/copilot/messages/index.ts new file mode 100644 index 0000000000..4525fcdd87 --- /dev/null +++ b/apps/sim/lib/copilot/messages/index.ts @@ -0,0 +1,3 @@ +export * from './credential-masking' +export * from './serialization' +export * from './checkpoints' diff --git a/apps/sim/lib/copilot/messages/serialization.ts b/apps/sim/lib/copilot/messages/serialization.ts new file mode 100644 index 0000000000..e69bae218d --- /dev/null +++ b/apps/sim/lib/copilot/messages/serialization.ts @@ -0,0 +1,169 @@ +import { createLogger } from '@sim/logger' +import type { CopilotMessage } from '@/stores/panel/copilot/types' +import { maskCredentialIdsInValue } from './credential-masking' + +const logger = createLogger('CopilotMessageSerialization') + +export function clearStreamingFlags(toolCall: any): void { + if (!toolCall) return + + toolCall.subAgentStreaming = false + + if (Array.isArray(toolCall.subAgentBlocks)) { + for (const block of toolCall.subAgentBlocks) { + if (block?.type === 'subagent_tool_call' && block.toolCall) { + clearStreamingFlags(block.toolCall) + } + } + } + if (Array.isArray(toolCall.subAgentToolCalls)) { + for (const subTc of toolCall.subAgentToolCalls) { + clearStreamingFlags(subTc) + } + } +} + +export function normalizeMessagesForUI(messages: CopilotMessage[]): CopilotMessage[] { + try { + for (const message of messages) { + if (message.role === 'assistant') { + logger.info('[normalizeMessagesForUI] Loading assistant message', { + id: message.id, + hasContent: !!message.content?.trim(), + contentBlockCount: message.contentBlocks?.length || 0, + contentBlockTypes: (message.contentBlocks as any[])?.map((b) => b?.type) ?? [], + }) + } + } + + for (const message of messages) { + if (message.contentBlocks) { + for (const block of message.contentBlocks as any[]) { + if (block?.type === 'tool_call' && block.toolCall) { + clearStreamingFlags(block.toolCall) + } + } + } + if (message.toolCalls) { + for (const toolCall of message.toolCalls) { + clearStreamingFlags(toolCall) + } + } + } + return messages + } catch { + return messages + } +} + +export function deepClone(obj: T): T { + try { + const json = JSON.stringify(obj) + if (!json || json === 'undefined') { + logger.warn('[deepClone] JSON.stringify returned empty for object', { + type: typeof obj, + isArray: Array.isArray(obj), + length: Array.isArray(obj) ? obj.length : undefined, + }) + return obj + } + const parsed = JSON.parse(json) + if (Array.isArray(obj) && (!Array.isArray(parsed) || parsed.length !== obj.length)) { + logger.warn('[deepClone] Array clone mismatch', { + originalLength: obj.length, + clonedLength: Array.isArray(parsed) ? parsed.length : 'not array', + }) + } + return parsed + } catch (err) { + logger.error('[deepClone] Failed to clone object', { + error: String(err), + type: typeof obj, + isArray: Array.isArray(obj), + }) + return obj + } +} + +export function serializeMessagesForDB( + messages: CopilotMessage[], + credentialIds: Set +): any[] { + const result = messages + .map((msg) => { + let timestamp: string = msg.timestamp + if (typeof timestamp !== 'string') { + const ts = timestamp as any + timestamp = ts instanceof Date ? ts.toISOString() : new Date().toISOString() + } + + const serialized: any = { + id: msg.id, + role: msg.role, + content: msg.content || '', + timestamp, + } + + if (Array.isArray(msg.contentBlocks) && msg.contentBlocks.length > 0) { + serialized.contentBlocks = deepClone(msg.contentBlocks) + } + + if (Array.isArray((msg as any).toolCalls) && (msg as any).toolCalls.length > 0) { + serialized.toolCalls = deepClone((msg as any).toolCalls) + } + + if (Array.isArray(msg.fileAttachments) && msg.fileAttachments.length > 0) { + serialized.fileAttachments = deepClone(msg.fileAttachments) + } + + if (Array.isArray((msg as any).contexts) && (msg as any).contexts.length > 0) { + serialized.contexts = deepClone((msg as any).contexts) + } + + if (Array.isArray(msg.citations) && msg.citations.length > 0) { + serialized.citations = deepClone(msg.citations) + } + + if (msg.errorType) { + serialized.errorType = msg.errorType + } + + return maskCredentialIdsInValue(serialized, credentialIds) + }) + .filter((msg) => { + if (msg.role === 'assistant') { + const hasContent = typeof msg.content === 'string' && msg.content.trim().length > 0 + const hasTools = Array.isArray(msg.toolCalls) && msg.toolCalls.length > 0 + const hasBlocks = Array.isArray(msg.contentBlocks) && msg.contentBlocks.length > 0 + return hasContent || hasTools || hasBlocks + } + return true + }) + + for (const msg of messages) { + if (msg.role === 'assistant') { + logger.info('[serializeMessagesForDB] Input assistant message', { + id: msg.id, + hasContent: !!msg.content?.trim(), + contentBlockCount: msg.contentBlocks?.length || 0, + contentBlockTypes: (msg.contentBlocks as any[])?.map((b) => b?.type) ?? [], + }) + } + } + + logger.info('[serializeMessagesForDB] Serialized messages', { + inputCount: messages.length, + outputCount: result.length, + sample: + result.length > 0 + ? { + role: result[result.length - 1].role, + hasContent: !!result[result.length - 1].content, + contentBlockCount: result[result.length - 1].contentBlocks?.length || 0, + toolCallCount: result[result.length - 1].toolCalls?.length || 0, + } + : null, + }) + + return result +} diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts index abbd2c32cb..d885e9876c 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts @@ -21,6 +21,19 @@ const logger = createLogger('CopilotSseHandlers') // Normalization + dedupe helpers live in sse-utils to keep server/client in sync. +function inferToolSuccess(data: Record | undefined): { + success: boolean + hasResultData: boolean + hasError: boolean +} { + const hasExplicitSuccess = data?.success !== undefined || data?.result?.success !== undefined + const explicitSuccess = data?.success ?? data?.result?.success + const hasResultData = data?.result !== undefined || data?.data !== undefined + const hasError = !!data?.error || !!data?.result?.error + const success = hasExplicitSuccess ? !!explicitSuccess : hasResultData && !hasError + return { success, hasResultData, hasError } +} + export type SSEHandler = ( event: SSEEvent, context: StreamingContext, @@ -47,14 +60,7 @@ export const sseHandlers: Record = { const current = context.toolCalls.get(toolCallId) if (!current) return - // Determine success: explicit success field, or if there's result data without explicit failure. - const hasExplicitSuccess = data?.success !== undefined || data?.result?.success !== undefined - const explicitSuccess = data?.success ?? data?.result?.success - const hasResultData = data?.result !== undefined || data?.data !== undefined - const hasError = !!data?.error || !!data?.result?.error - - // If explicitly set, use that; otherwise infer from data presence. - const success = hasExplicitSuccess ? !!explicitSuccess : hasResultData && !hasError + const { success, hasResultData, hasError } = inferToolSuccess(data) current.status = success ? 'success' : 'error' current.endTime = Date.now() @@ -344,12 +350,7 @@ export const subAgentHandlers: Record = { // Also update in main toolCalls (where we added it for execution). const mainToolCall = context.toolCalls.get(toolCallId) - // Use same success inference logic as main handler. - const hasExplicitSuccess = data?.success !== undefined || data?.result?.success !== undefined - const explicitSuccess = data?.success ?? data?.result?.success - const hasResultData = data?.result !== undefined || data?.data !== undefined - const hasError = !!data?.error || !!data?.result?.error - const success = hasExplicitSuccess ? !!explicitSuccess : hasResultData && !hasError + const { success, hasResultData, hasError } = inferToolSuccess(data) const status = success ? 'success' : 'error' const endTime = Date.now() diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts index 41610306a6..2882a8bbfb 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts @@ -74,27 +74,34 @@ const SERVER_TOOLS = new Set([ 'knowledge_base', ]) -const SIM_WORKFLOW_TOOLS = new Set([ - 'get_user_workflow', - 'get_workflow_from_name', - 'list_user_workflows', - 'list_user_workspaces', - 'list_folders', - 'create_workflow', - 'create_folder', - 'get_workflow_data', - 'get_block_outputs', - 'get_block_upstream_references', - 'run_workflow', - 'set_global_workflow_variables', - 'deploy_api', - 'deploy_chat', - 'deploy_mcp', - 'redeploy', - 'check_deployment_status', - 'list_workspace_mcp_servers', - 'create_workspace_mcp_server', -]) +const SIM_WORKFLOW_TOOL_HANDLERS: Record< + string, + (params: Record, context: ExecutionContext) => Promise +> = { + get_user_workflow: (p, c) => executeGetUserWorkflow(p as GetUserWorkflowParams, c), + get_workflow_from_name: (p, c) => executeGetWorkflowFromName(p as GetWorkflowFromNameParams, c), + list_user_workflows: (p, c) => executeListUserWorkflows(p as ListUserWorkflowsParams, c), + list_user_workspaces: (_p, c) => executeListUserWorkspaces(c), + list_folders: (p, c) => executeListFolders(p as ListFoldersParams, c), + create_workflow: (p, c) => executeCreateWorkflow(p as CreateWorkflowParams, c), + create_folder: (p, c) => executeCreateFolder(p as CreateFolderParams, c), + get_workflow_data: (p, c) => executeGetWorkflowData(p as GetWorkflowDataParams, c), + get_block_outputs: (p, c) => executeGetBlockOutputs(p as GetBlockOutputsParams, c), + get_block_upstream_references: (p, c) => + executeGetBlockUpstreamReferences(p as unknown as GetBlockUpstreamReferencesParams, c), + run_workflow: (p, c) => executeRunWorkflow(p as RunWorkflowParams, c), + set_global_workflow_variables: (p, c) => + executeSetGlobalWorkflowVariables(p as SetGlobalWorkflowVariablesParams, c), + deploy_api: (p, c) => executeDeployApi(p as DeployApiParams, c), + deploy_chat: (p, c) => executeDeployChat(p as DeployChatParams, c), + deploy_mcp: (p, c) => executeDeployMcp(p as DeployMcpParams, c), + redeploy: (_p, c) => executeRedeploy(c), + check_deployment_status: (p, c) => executeCheckDeploymentStatus(p as CheckDeploymentStatusParams, c), + list_workspace_mcp_servers: (p, c) => + executeListWorkspaceMcpServers(p as ListWorkspaceMcpServersParams, c), + create_workspace_mcp_server: (p, c) => + executeCreateWorkspaceMcpServer(p as CreateWorkspaceMcpServerParams, c), +} /** * Execute a tool server-side without calling internal routes. @@ -110,7 +117,7 @@ export async function executeToolServerSide( return executeServerToolDirect(toolName, toolCall.params || {}, context) } - if (SIM_WORKFLOW_TOOLS.has(toolName)) { + if (toolName in SIM_WORKFLOW_TOOL_HANDLERS) { return executeSimWorkflowTool(toolName, toolCall.params || {}, context) } @@ -158,51 +165,12 @@ async function executeServerToolDirect( async function executeSimWorkflowTool( toolName: string, - params: Record, + params: Record, context: ExecutionContext ): Promise { - switch (toolName) { - case 'get_user_workflow': - return executeGetUserWorkflow(params as GetUserWorkflowParams, context) - case 'get_workflow_from_name': - return executeGetWorkflowFromName(params as GetWorkflowFromNameParams, context) - case 'list_user_workflows': - return executeListUserWorkflows(params as ListUserWorkflowsParams, context) - case 'list_user_workspaces': - return executeListUserWorkspaces(context) - case 'list_folders': - return executeListFolders(params as ListFoldersParams, context) - case 'create_workflow': - return executeCreateWorkflow(params as CreateWorkflowParams, context) - case 'create_folder': - return executeCreateFolder(params as CreateFolderParams, context) - case 'get_workflow_data': - return executeGetWorkflowData(params as GetWorkflowDataParams, context) - case 'get_block_outputs': - return executeGetBlockOutputs(params as GetBlockOutputsParams, context) - case 'get_block_upstream_references': - return executeGetBlockUpstreamReferences(params as GetBlockUpstreamReferencesParams, context) - case 'run_workflow': - return executeRunWorkflow(params as RunWorkflowParams, context) - case 'set_global_workflow_variables': - return executeSetGlobalWorkflowVariables(params as SetGlobalWorkflowVariablesParams, context) - case 'deploy_api': - return executeDeployApi(params as DeployApiParams, context) - case 'deploy_chat': - return executeDeployChat(params as DeployChatParams, context) - case 'deploy_mcp': - return executeDeployMcp(params as DeployMcpParams, context) - case 'redeploy': - return executeRedeploy(context) - case 'check_deployment_status': - return executeCheckDeploymentStatus(params as CheckDeploymentStatusParams, context) - case 'list_workspace_mcp_servers': - return executeListWorkspaceMcpServers(params as ListWorkspaceMcpServersParams, context) - case 'create_workspace_mcp_server': - return executeCreateWorkspaceMcpServer(params as CreateWorkspaceMcpServerParams, context) - default: - return { success: false, error: `Unsupported workflow tool: ${toolName}` } - } + const handler = SIM_WORKFLOW_TOOL_HANDLERS[toolName] + if (!handler) return { success: false, error: `Unsupported workflow tool: ${toolName}` } + return handler(params, context) } /** diff --git a/apps/sim/lib/copilot/store-utils.ts b/apps/sim/lib/copilot/store-utils.ts new file mode 100644 index 0000000000..6c2cfcc4b8 --- /dev/null +++ b/apps/sim/lib/copilot/store-utils.ts @@ -0,0 +1,162 @@ +import { Loader2 } from 'lucide-react' +import { + ClientToolCallState, + type ClientToolDisplay, + TOOL_DISPLAY_REGISTRY, +} from '@/lib/copilot/tools/client/tool-display-registry' +import type { CopilotStore } from '@/stores/panel/copilot/types' + +export function resolveToolDisplay( + toolName: string | undefined, + state: ClientToolCallState, + _toolCallId?: string, + params?: Record +): ClientToolDisplay | undefined { + if (!toolName) return undefined + const entry = TOOL_DISPLAY_REGISTRY[toolName] + if (!entry) return humanizedFallback(toolName, state) + + if (entry.uiConfig?.dynamicText && params) { + const dynamicText = entry.uiConfig.dynamicText(params, state) + const stateDisplay = entry.displayNames[state] + if (dynamicText && stateDisplay?.icon) { + return { text: dynamicText, icon: stateDisplay.icon } + } + } + + const display = entry.displayNames[state] + if (display?.text || display?.icon) return display + + const fallbackOrder = [ + ClientToolCallState.generating, + ClientToolCallState.executing, + ClientToolCallState.success, + ] + for (const fallbackState of fallbackOrder) { + const fallback = entry.displayNames[fallbackState] + if (fallback?.text || fallback?.icon) return fallback + } + + return humanizedFallback(toolName, state) +} + +export function humanizedFallback( + toolName: string, + state: ClientToolCallState +): ClientToolDisplay | undefined { + const formattedName = toolName.replace(/_/g, ' ').replace(/\b\w/g, (c) => c.toUpperCase()) + const stateVerb = + state === ClientToolCallState.success + ? 'Executed' + : state === ClientToolCallState.error + ? 'Failed' + : state === ClientToolCallState.rejected || state === ClientToolCallState.aborted + ? 'Skipped' + : 'Executing' + return { text: `${stateVerb} ${formattedName}`, icon: Loader2 } +} + +export function isRejectedState(state: string): boolean { + return state === 'rejected' +} + +export function isReviewState(state: string): boolean { + return state === 'review' +} + +export function isBackgroundState(state: string): boolean { + return state === 'background' +} + +export function isTerminalState(state: string): boolean { + return ( + state === ClientToolCallState.success || + state === ClientToolCallState.error || + state === ClientToolCallState.rejected || + state === ClientToolCallState.aborted || + isReviewState(state) || + isBackgroundState(state) + ) +} + +export function abortAllInProgressTools( + set: any, + get: () => CopilotStore +) { + try { + const { toolCallsById, messages } = get() + const updatedMap = { ...toolCallsById } + const abortedIds = new Set() + let hasUpdates = false + for (const [id, tc] of Object.entries(toolCallsById)) { + const st = tc.state as any + const isTerminal = + st === ClientToolCallState.success || + st === ClientToolCallState.error || + st === ClientToolCallState.rejected || + st === ClientToolCallState.aborted + if (!isTerminal || isReviewState(st)) { + abortedIds.add(id) + updatedMap[id] = { + ...tc, + state: ClientToolCallState.aborted, + subAgentStreaming: false, + display: resolveToolDisplay(tc.name, ClientToolCallState.aborted, id, (tc as any).params), + } + hasUpdates = true + } else if (tc.subAgentStreaming) { + updatedMap[id] = { + ...tc, + subAgentStreaming: false, + } + hasUpdates = true + } + } + if (abortedIds.size > 0 || hasUpdates) { + set({ toolCallsById: updatedMap }) + set((s: CopilotStore) => { + const msgs = [...s.messages] + for (let mi = msgs.length - 1; mi >= 0; mi--) { + const m = msgs[mi] as any + if (m.role !== 'assistant' || !Array.isArray(m.contentBlocks)) continue + let changed = false + const blocks = m.contentBlocks.map((b: any) => { + if (b?.type === 'tool_call' && b.toolCall?.id && abortedIds.has(b.toolCall.id)) { + changed = true + const prev = b.toolCall + return { + ...b, + toolCall: { + ...prev, + state: ClientToolCallState.aborted, + display: resolveToolDisplay( + prev?.name, + ClientToolCallState.aborted, + prev?.id, + prev?.params + ), + }, + } + } + return b + }) + if (changed) { + msgs[mi] = { ...m, contentBlocks: blocks } + break + } + } + return { messages: msgs } + }) + } + } catch {} +} + +export function stripTodoTags(text: string): string { + if (!text) return text + return text + .replace(/[\s\S]*?<\/marktodo>/g, '') + .replace(/[\s\S]*?<\/checkofftodo>/g, '') + .replace(/[\s\S]*?<\/design_workflow>/g, '') + .replace(/[ \t]+\n/g, '\n') + .replace(/\n{2,}/g, '\n') +} diff --git a/apps/sim/lib/copilot/tools/client/tool-display-registry.ts b/apps/sim/lib/copilot/tools/client/tool-display-registry.ts index 42633ab37f..45eb2f0f5c 100644 --- a/apps/sim/lib/copilot/tools/client/tool-display-registry.ts +++ b/apps/sim/lib/copilot/tools/client/tool-display-registry.ts @@ -1,4 +1,3 @@ -// @ts-nocheck import type { LucideIcon } from 'lucide-react' import { Blocks, @@ -70,7 +69,7 @@ export interface ClientToolDisplay { } export type DynamicTextFormatter = ( - params: Record, + params: Record, state: ClientToolCallState ) => string | undefined @@ -101,6 +100,9 @@ interface ToolMetadata { subagent?: { streamingLabel?: string completedLabel?: string + shouldCollapse?: boolean + outputArtifacts?: string[] + hideThinkingText?: boolean } interrupt?: any customRenderer?: string @@ -115,6 +117,21 @@ interface ToolDisplayEntry { uiConfig?: ToolUIConfig } +type WorkflowDataType = 'global_variables' | 'custom_tools' | 'mcp_tools' | 'files' + +type NavigationDestination = 'workflow' | 'logs' | 'templates' | 'vector_db' | 'settings' + +function formatDuration(seconds: number): string { + if (seconds < 60) return `${Math.round(seconds)}s` + const mins = Math.floor(seconds / 60) + const secs = Math.round(seconds % 60) + if (mins < 60) return secs > 0 ? `${mins}m ${secs}s` : `${mins}m` + const hours = Math.floor(mins / 60) + const remMins = mins % 60 + if (remMins > 0) return `${hours}h ${remMins}m` + return `${hours}h` +} + function toUiConfig(metadata?: ToolMetadata): ToolUIConfig | undefined { const legacy = metadata?.uiConfig const subagent = legacy?.subagent @@ -1197,7 +1214,7 @@ const META_make_api_request: ToolMetadata = { { key: 'method', label: 'Method', width: '26%', editable: true, mono: true }, { key: 'url', label: 'Endpoint', width: '74%', editable: true, mono: true }, ], - extractRows: (params) => { + extractRows: (params: Record): Array<[string, ...any[]]> => { return [['request', (params.method || 'GET').toUpperCase(), params.url || '']] }, }, @@ -1665,7 +1682,7 @@ const META_run_workflow: ToolMetadata = { { key: 'input', label: 'Input', width: '36%' }, { key: 'value', label: 'Value', width: '64%', editable: true, mono: true }, ], - extractRows: (params) => { + extractRows: (params: Record): Array<[string, ...any[]]> => { let inputs = params.input || params.inputs || params.workflow_input if (typeof inputs === 'string') { try { @@ -1952,7 +1969,7 @@ const META_set_environment_variables: ToolMetadata = { { key: 'name', label: 'Variable', width: '36%', editable: true }, { key: 'value', label: 'Value', width: '64%', editable: true, mono: true }, ], - extractRows: (params) => { + extractRows: (params: Record): Array<[string, ...any[]]> => { const variables = params.variables || {} const entries = Array.isArray(variables) ? variables.map((v: any, i: number) => [String(i), v.name || `var_${i}`, v.value || '']) @@ -2021,7 +2038,7 @@ const META_set_global_workflow_variables: ToolMetadata = { { key: 'name', label: 'Name', width: '40%', editable: true, mono: true }, { key: 'value', label: 'Value', width: '60%', editable: true, mono: true }, ], - extractRows: (params) => { + extractRows: (params: Record): Array<[string, ...any[]]> => { const operations = params.operations || [] return operations.map((op: any, idx: number) => [ String(idx), diff --git a/apps/sim/stores/panel/copilot/store.ts b/apps/sim/stores/panel/copilot/store.ts index 4bf168593d..694123c5f1 100644 --- a/apps/sim/stores/panel/copilot/store.ts +++ b/apps/sim/stores/panel/copilot/store.ts @@ -4,1928 +4,132 @@ import { createLogger } from '@sim/logger' import { create } from 'zustand' import { devtools } from 'zustand/middleware' import { type CopilotChat, sendStreamingMessage } from '@/lib/copilot/api' -import type { CopilotTransportMode } from '@/lib/copilot/models' -import { - normalizeSseEvent, - shouldSkipToolCallEvent, - shouldSkipToolResultEvent, -} from '@/lib/copilot/orchestrator/sse-utils' -import { - ClientToolCallState, - type ClientToolDisplay, - TOOL_DISPLAY_REGISTRY, -} from '@/lib/copilot/tools/client/tool-display-registry' -import { getQueryClient } from '@/app/_shell/providers/query-provider' -import { subscriptionKeys } from '@/hooks/queries/subscription' -import type { - ChatContext, - CopilotMessage, - CopilotStore, - CopilotStreamInfo, - CopilotToolCall, - MessageFileAttachment, -} from '@/stores/panel/copilot/types' -import { useWorkflowDiffStore } from '@/stores/workflow-diff/store' -import { useSubBlockStore } from '@/stores/workflows/subblock/store' -import { mergeSubblockState } from '@/stores/workflows/utils' -import { useWorkflowStore } from '@/stores/workflows/workflow/store' -import type { WorkflowState } from '@/stores/workflows/workflow/types' - -const logger = createLogger('CopilotStore') - -const STREAM_STORAGE_KEY = 'copilot_active_stream' - -/** - * Flag set on beforeunload to suppress continue option during page refresh/close. - * Aborts during unload should NOT show the continue button. - */ -let isPageUnloading = false -if (typeof window !== 'undefined') { - window.addEventListener('beforeunload', () => { - isPageUnloading = true - }) -} - -function readActiveStreamFromStorage(): CopilotStreamInfo | null { - if (typeof window === 'undefined') return null - try { - const raw = window.sessionStorage.getItem(STREAM_STORAGE_KEY) - logger.info('[Copilot] Reading stream from storage', { - hasRaw: !!raw, - rawPreview: raw ? raw.substring(0, 100) : null, - }) - if (!raw) return null - const parsed = JSON.parse(raw) as CopilotStreamInfo - return parsed?.streamId ? parsed : null - } catch (e) { - logger.warn('[Copilot] Failed to read stream from storage', { error: String(e) }) - return null - } -} - -function writeActiveStreamToStorage(info: CopilotStreamInfo | null): void { - if (typeof window === 'undefined') return - try { - if (!info) { - logger.info('[Copilot] Clearing stream from storage', { - isPageUnloading, - stack: new Error().stack?.split('\n').slice(1, 4).join(' <- '), - }) - window.sessionStorage.removeItem(STREAM_STORAGE_KEY) - return - } - const payload = JSON.stringify(info) - window.sessionStorage.setItem(STREAM_STORAGE_KEY, payload) - const verified = window.sessionStorage.getItem(STREAM_STORAGE_KEY) === payload - logger.info('[Copilot] Writing stream to storage', { - streamId: info.streamId, - lastEventId: info.lastEventId, - userMessageContent: info.userMessageContent?.slice(0, 30), - verified, - }) - } catch (e) { - logger.error('[Copilot] Failed to write stream to storage', { error: String(e) }) - } -} - -function updateActiveStreamEventId( - get: () => CopilotStore, - set: (next: Partial) => void, - streamId: string, - eventId: number -): void { - const current = get().activeStream - if (!current || current.streamId !== streamId) return - if (eventId <= (current.lastEventId || 0)) return - const next = { ...current, lastEventId: eventId } - set({ activeStream: next }) - writeActiveStreamToStorage(next) -} - -// On module load, clear any lingering diff preview (fresh page refresh) -try { - const diffStore = useWorkflowDiffStore.getState() - if (diffStore?.hasActiveDiff) { - diffStore.clearDiff() - } -} catch {} - -// Constants -const TEXT_BLOCK_TYPE = 'text' -const THINKING_BLOCK_TYPE = 'thinking' -const DATA_PREFIX = 'data: ' -const DATA_PREFIX_LENGTH = 6 -const CONTINUE_OPTIONS_TAG = '{"1":"Continue"}' - -// Resolve display text/icon for a tool based on its state -function resolveToolDisplay( - toolName: string | undefined, - state: ClientToolCallState, - _toolCallId?: string, - params?: Record -): ClientToolDisplay | undefined { - if (!toolName) return undefined - const entry = TOOL_DISPLAY_REGISTRY[toolName] - if (!entry) return humanizedFallback(toolName, state) - - // Check dynamic text first - if (entry.uiConfig?.dynamicText && params) { - const dynamicText = entry.uiConfig.dynamicText(params, state) - const stateDisplay = entry.displayNames[state] - if (dynamicText && stateDisplay?.icon) { - return { text: dynamicText, icon: stateDisplay.icon } - } - } - - // Exact state match - const display = entry.displayNames[state] - if (display?.text || display?.icon) return display - - // Fallback through states - const fallbackOrder = [ - ClientToolCallState.generating, - ClientToolCallState.executing, - ClientToolCallState.success, - ] - for (const fallbackState of fallbackOrder) { - const fallback = entry.displayNames[fallbackState] - if (fallback?.text || fallback?.icon) return fallback - } - - return humanizedFallback(toolName, state) -} - -function humanizedFallback( - toolName: string, - state: ClientToolCallState -): ClientToolDisplay | undefined { - const formattedName = toolName.replace(/_/g, ' ').replace(/\b\w/g, (c) => c.toUpperCase()) - const stateVerb = - state === ClientToolCallState.success - ? 'Executed' - : state === ClientToolCallState.error - ? 'Failed' - : state === ClientToolCallState.rejected || state === ClientToolCallState.aborted - ? 'Skipped' - : 'Executing' - return { text: `${stateVerb} ${formattedName}`, icon: undefined as any } -} - -// Helper: check if a tool state is rejected -function isRejectedState(state: any): boolean { - try { - return state === 'rejected' || state === (ClientToolCallState as any).rejected - } catch { - return state === 'rejected' - } -} - -// Helper: check if a tool state is review (terminal for build/edit preview) -function isReviewState(state: any): boolean { - try { - return state === 'review' || state === (ClientToolCallState as any).review - } catch { - return state === 'review' - } -} - -// Helper: check if a tool state is background (terminal) -function isBackgroundState(state: any): boolean { - try { - return state === 'background' || state === (ClientToolCallState as any).background - } catch { - return state === 'background' - } -} - -/** - * Checks if a tool call state is terminal (success, error, rejected, aborted, review, or background) - */ -function isTerminalState(state: any): boolean { - return ( - state === ClientToolCallState.success || - state === ClientToolCallState.error || - state === ClientToolCallState.rejected || - state === ClientToolCallState.aborted || - isReviewState(state) || - isBackgroundState(state) - ) -} - -// Helper: abort all in-progress client tools and update inline blocks -function abortAllInProgressTools(set: any, get: () => CopilotStore) { - try { - const { toolCallsById, messages } = get() - const updatedMap = { ...toolCallsById } - const abortedIds = new Set() - let hasUpdates = false - for (const [id, tc] of Object.entries(toolCallsById)) { - const st = tc.state as any - // Abort anything not already terminal success/error/rejected/aborted - const isTerminal = - st === ClientToolCallState.success || - st === ClientToolCallState.error || - st === ClientToolCallState.rejected || - st === ClientToolCallState.aborted - if (!isTerminal || isReviewState(st)) { - abortedIds.add(id) - updatedMap[id] = { - ...tc, - state: ClientToolCallState.aborted, - subAgentStreaming: false, - display: resolveToolDisplay(tc.name, ClientToolCallState.aborted, id, (tc as any).params), - } - hasUpdates = true - } else if (tc.subAgentStreaming) { - updatedMap[id] = { - ...tc, - subAgentStreaming: false, - } - hasUpdates = true - } - } - if (abortedIds.size > 0 || hasUpdates) { - set({ toolCallsById: updatedMap }) - // Update inline blocks in-place for the latest assistant message only (most relevant) - set((s: CopilotStore) => { - const msgs = [...s.messages] - for (let mi = msgs.length - 1; mi >= 0; mi--) { - const m = msgs[mi] as any - if (m.role !== 'assistant' || !Array.isArray(m.contentBlocks)) continue - let changed = false - const blocks = m.contentBlocks.map((b: any) => { - if (b?.type === 'tool_call' && b.toolCall?.id && abortedIds.has(b.toolCall.id)) { - changed = true - const prev = b.toolCall - return { - ...b, - toolCall: { - ...prev, - state: ClientToolCallState.aborted, - display: resolveToolDisplay( - prev?.name, - ClientToolCallState.aborted, - prev?.id, - prev?.params - ), - }, - } - } - return b - }) - if (changed) { - msgs[mi] = { ...m, contentBlocks: blocks } - break - } - } - return { messages: msgs } - }) - } - } catch {} -} - -// Normalize loaded messages so assistant messages render correctly from DB -/** - * Loads messages from DB for UI rendering. - * Messages are stored exactly as they render, so we just need to: - * 1. Clear any streaming flags (messages loaded from DB are never actively streaming) - * 2. Return the messages - */ -function normalizeMessagesForUI(messages: CopilotMessage[]): CopilotMessage[] { - try { - // Log what we're loading - for (const message of messages) { - if (message.role === 'assistant') { - logger.info('[normalizeMessagesForUI] Loading assistant message', { - id: message.id, - hasContent: !!message.content?.trim(), - contentBlockCount: message.contentBlocks?.length || 0, - contentBlockTypes: (message.contentBlocks as any[])?.map((b) => b?.type) || [], - }) - } - } - - // Clear streaming flags for all tool calls - for (const message of messages) { - if (message.contentBlocks) { - for (const block of message.contentBlocks as any[]) { - if (block?.type === 'tool_call' && block.toolCall) { - clearStreamingFlags(block.toolCall) - } - } - } - // Also clear from toolCalls array (legacy format) - if (message.toolCalls) { - for (const toolCall of message.toolCalls) { - clearStreamingFlags(toolCall) - } - } - } - return messages - } catch { - return messages - } -} - -/** - * Recursively clears streaming flags from a tool call and its nested subagent tool calls. - * This ensures messages loaded from DB don't appear to be streaming. - */ -function clearStreamingFlags(toolCall: any): void { - if (!toolCall) return - - // Always set subAgentStreaming to false - messages loaded from DB are never streaming - toolCall.subAgentStreaming = false - - // Clear nested subagent tool calls - if (Array.isArray(toolCall.subAgentBlocks)) { - for (const block of toolCall.subAgentBlocks) { - if (block?.type === 'subagent_tool_call' && block.toolCall) { - clearStreamingFlags(block.toolCall) - } - } - } - if (Array.isArray(toolCall.subAgentToolCalls)) { - for (const subTc of toolCall.subAgentToolCalls) { - clearStreamingFlags(subTc) - } - } -} - -// Simple object pool for content blocks -class ObjectPool { - private pool: T[] = [] - private createFn: () => T - private resetFn: (obj: T) => void - - constructor(createFn: () => T, resetFn: (obj: T) => void, initialSize = 5) { - this.createFn = createFn - this.resetFn = resetFn - for (let i = 0; i < initialSize; i++) this.pool.push(createFn()) - } - get(): T { - const obj = this.pool.pop() - if (obj) { - this.resetFn(obj) - return obj - } - return this.createFn() - } - release(obj: T): void { - if (this.pool.length < 20) this.pool.push(obj) - } -} - -const contentBlockPool = new ObjectPool( - () => ({ type: '', content: '', timestamp: 0, toolCall: null as any }), - (obj) => { - obj.type = '' - obj.content = '' - obj.timestamp = 0 - ;(obj as any).toolCall = null - ;(obj as any).startTime = undefined - ;(obj as any).duration = undefined - } -) - -// Efficient string builder -class StringBuilder { - private parts: string[] = [] - private length = 0 - append(str: string): void { - this.parts.push(str) - this.length += str.length - } - toString(): string { - const result = this.parts.join('') - this.clear() - return result - } - clear(): void { - this.parts.length = 0 - this.length = 0 - } - get size(): number { - return this.length - } -} - -// Helpers -function createUserMessage( - content: string, - fileAttachments?: MessageFileAttachment[], - contexts?: ChatContext[], - messageId?: string -): CopilotMessage { - return { - id: messageId || crypto.randomUUID(), - role: 'user', - content, - timestamp: new Date().toISOString(), - ...(fileAttachments && fileAttachments.length > 0 && { fileAttachments }), - ...(contexts && contexts.length > 0 && { contexts }), - ...(contexts && - contexts.length > 0 && { - contentBlocks: [ - { type: 'contexts', contexts: contexts as any, timestamp: Date.now() }, - ] as any, - }), - } -} - -function createStreamingMessage(): CopilotMessage { - return { - id: crypto.randomUUID(), - role: 'assistant', - content: '', - timestamp: new Date().toISOString(), - } -} - -function createErrorMessage( - messageId: string, - content: string, - errorType?: 'usage_limit' | 'unauthorized' | 'forbidden' | 'rate_limit' | 'upgrade_required' -): CopilotMessage { - return { - id: messageId, - role: 'assistant', - content, - timestamp: new Date().toISOString(), - contentBlocks: [ - { - type: 'text', - content, - timestamp: Date.now(), - }, - ], - errorType, - } -} - -/** - * Builds a workflow snapshot suitable for checkpoint persistence. - */ -function buildCheckpointWorkflowState(workflowId: string): WorkflowState | null { - const rawState = useWorkflowStore.getState().getWorkflowState() - if (!rawState) return null - - const blocksWithSubblockValues = mergeSubblockState(rawState.blocks, workflowId) - - const filteredBlocks = Object.entries(blocksWithSubblockValues).reduce( - (acc, [blockId, block]) => { - if (block?.type && block?.name) { - acc[blockId] = { - ...block, - id: block.id || blockId, - enabled: block.enabled !== undefined ? block.enabled : true, - horizontalHandles: block.horizontalHandles !== undefined ? block.horizontalHandles : true, - height: block.height !== undefined ? block.height : 90, - subBlocks: block.subBlocks || {}, - outputs: block.outputs || {}, - data: block.data || {}, - position: block.position || { x: 0, y: 0 }, - } - } - return acc - }, - {} as WorkflowState['blocks'] - ) - - return { - blocks: filteredBlocks, - edges: rawState.edges || [], - loops: rawState.loops || {}, - parallels: rawState.parallels || {}, - lastSaved: rawState.lastSaved || Date.now(), - deploymentStatuses: rawState.deploymentStatuses || {}, - } -} - -/** - * Persists a previously captured snapshot as a workflow checkpoint. - */ -async function saveMessageCheckpoint( - messageId: string, - get: () => CopilotStore, - set: (partial: Partial | ((state: CopilotStore) => Partial)) => void -): Promise { - const { workflowId, currentChat, messageSnapshots, messageCheckpoints } = get() - if (!workflowId || !currentChat?.id) return false - - const snapshot = messageSnapshots[messageId] - if (!snapshot) return false - - const nextSnapshots = { ...messageSnapshots } - delete nextSnapshots[messageId] - set({ messageSnapshots: nextSnapshots }) - - try { - const response = await fetch('/api/copilot/checkpoints', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - workflowId, - chatId: currentChat.id, - messageId, - workflowState: JSON.stringify(snapshot), - }), - }) - - if (!response.ok) { - throw new Error(`Failed to create checkpoint: ${response.statusText}`) - } - - const result = await response.json() - const newCheckpoint = result.checkpoint - if (newCheckpoint) { - const existingCheckpoints = messageCheckpoints[messageId] || [] - const updatedCheckpoints = { - ...messageCheckpoints, - [messageId]: [newCheckpoint, ...existingCheckpoints], - } - set({ messageCheckpoints: updatedCheckpoints }) - } - - return true - } catch (error) { - logger.error('Failed to create checkpoint from snapshot:', error) - return false - } -} - -function stripTodoTags(text: string): string { - if (!text) return text - return text - .replace(/[\s\S]*?<\/marktodo>/g, '') - .replace(/[\s\S]*?<\/checkofftodo>/g, '') - .replace(/[\s\S]*?<\/design_workflow>/g, '') - .replace(/[ \t]+\n/g, '\n') - .replace(/\n{2,}/g, '\n') -} - -/** - * Deep clones an object using JSON serialization. - * This ensures we strip any non-serializable data (functions, circular refs). - */ -function deepClone(obj: T): T { - try { - const json = JSON.stringify(obj) - if (!json || json === 'undefined') { - logger.warn('[deepClone] JSON.stringify returned empty for object', { - type: typeof obj, - isArray: Array.isArray(obj), - length: Array.isArray(obj) ? obj.length : undefined, - }) - return obj - } - const parsed = JSON.parse(json) - // Verify the clone worked - if (Array.isArray(obj) && (!Array.isArray(parsed) || parsed.length !== obj.length)) { - logger.warn('[deepClone] Array clone mismatch', { - originalLength: obj.length, - clonedLength: Array.isArray(parsed) ? parsed.length : 'not array', - }) - } - return parsed - } catch (err) { - logger.error('[deepClone] Failed to clone object', { - error: String(err), - type: typeof obj, - isArray: Array.isArray(obj), - }) - return obj - } -} - -/** - * Recursively masks credential IDs in any value (string, object, or array). - * Used during serialization to ensure sensitive IDs are never persisted. - */ -function maskCredentialIdsInValue(value: any, credentialIds: Set): any { - if (!value || credentialIds.size === 0) return value - - if (typeof value === 'string') { - let masked = value - // Sort by length descending to mask longer IDs first - const sortedIds = Array.from(credentialIds).sort((a, b) => b.length - a.length) - for (const id of sortedIds) { - if (id && masked.includes(id)) { - masked = masked.split(id).join('••••••••') - } - } - return masked - } - - if (Array.isArray(value)) { - return value.map((item) => maskCredentialIdsInValue(item, credentialIds)) - } - - if (typeof value === 'object') { - const masked: any = {} - for (const key of Object.keys(value)) { - masked[key] = maskCredentialIdsInValue(value[key], credentialIds) - } - return masked - } - - return value -} - -/** - * Serializes messages for database storage. - * Deep clones all fields to ensure proper JSON serialization. - * Masks sensitive credential IDs before persisting. - * This ensures they render identically when loaded back. - */ -function serializeMessagesForDB(messages: CopilotMessage[]): any[] { - // Get credential IDs to mask - const credentialIds = useCopilotStore.getState().sensitiveCredentialIds - - const result = messages - .map((msg) => { - // Deep clone the entire message to ensure all nested data is serializable - // Ensure timestamp is always a string (Zod schema requires it) - let timestamp: string = msg.timestamp - if (typeof timestamp !== 'string') { - const ts = timestamp as any - timestamp = ts instanceof Date ? ts.toISOString() : new Date().toISOString() - } - - const serialized: any = { - id: msg.id, - role: msg.role, - content: msg.content || '', - timestamp, - } - - // Deep clone contentBlocks (the main rendering data) - if (Array.isArray(msg.contentBlocks) && msg.contentBlocks.length > 0) { - serialized.contentBlocks = deepClone(msg.contentBlocks) - } - - // Deep clone toolCalls - if (Array.isArray((msg as any).toolCalls) && (msg as any).toolCalls.length > 0) { - serialized.toolCalls = deepClone((msg as any).toolCalls) - } - - // Deep clone file attachments - if (Array.isArray(msg.fileAttachments) && msg.fileAttachments.length > 0) { - serialized.fileAttachments = deepClone(msg.fileAttachments) - } - - // Deep clone contexts - if (Array.isArray((msg as any).contexts) && (msg as any).contexts.length > 0) { - serialized.contexts = deepClone((msg as any).contexts) - } - - // Deep clone citations - if (Array.isArray(msg.citations) && msg.citations.length > 0) { - serialized.citations = deepClone(msg.citations) - } - - // Copy error type - if (msg.errorType) { - serialized.errorType = msg.errorType - } - - // Mask credential IDs in the serialized message before persisting - return maskCredentialIdsInValue(serialized, credentialIds) - }) - .filter((msg) => { - // Filter out empty assistant messages - if (msg.role === 'assistant') { - const hasContent = typeof msg.content === 'string' && msg.content.trim().length > 0 - const hasTools = Array.isArray(msg.toolCalls) && msg.toolCalls.length > 0 - const hasBlocks = Array.isArray(msg.contentBlocks) && msg.contentBlocks.length > 0 - return hasContent || hasTools || hasBlocks - } - return true - }) - - // Log what we're serializing - for (const msg of messages) { - if (msg.role === 'assistant') { - logger.info('[serializeMessagesForDB] Input assistant message', { - id: msg.id, - hasContent: !!msg.content?.trim(), - contentBlockCount: msg.contentBlocks?.length || 0, - contentBlockTypes: (msg.contentBlocks as any[])?.map((b) => b?.type) || [], - }) - } - } - - logger.info('[serializeMessagesForDB] Serialized messages', { - inputCount: messages.length, - outputCount: result.length, - sample: - result.length > 0 - ? { - role: result[result.length - 1].role, - hasContent: !!result[result.length - 1].content, - contentBlockCount: result[result.length - 1].contentBlocks?.length || 0, - toolCallCount: result[result.length - 1].toolCalls?.length || 0, - } - : null, - }) - - return result -} - -/** - * @deprecated Use serializeMessagesForDB instead. - */ -function validateMessagesForLLM(messages: CopilotMessage[]): any[] { - return serializeMessagesForDB(messages) -} - -/** - * Extracts all tool calls from a toolCall object, including nested subAgentBlocks. - * Adds them to the provided map. - */ -function extractToolCallsRecursively( - toolCall: CopilotToolCall, - map: Record -): void { - if (!toolCall?.id) return - map[toolCall.id] = toolCall - - // Extract nested tool calls from subAgentBlocks - if (Array.isArray(toolCall.subAgentBlocks)) { - for (const block of toolCall.subAgentBlocks) { - if (block?.type === 'subagent_tool_call' && block.toolCall?.id) { - extractToolCallsRecursively(block.toolCall, map) - } - } - } - - // Extract from subAgentToolCalls as well - if (Array.isArray(toolCall.subAgentToolCalls)) { - for (const subTc of toolCall.subAgentToolCalls) { - extractToolCallsRecursively(subTc, map) - } - } -} - -/** - * Builds a complete toolCallsById map from normalized messages. - * Extracts all tool calls including nested subagent tool calls. - */ -function buildToolCallsById(messages: CopilotMessage[]): Record { - const toolCallsById: Record = {} - for (const msg of messages) { - if (msg.contentBlocks) { - for (const block of msg.contentBlocks as any[]) { - if (block?.type === 'tool_call' && block.toolCall?.id) { - extractToolCallsRecursively(block.toolCall, toolCallsById) - } - } - } - } - return toolCallsById -} - -// Streaming context and SSE parsing -interface StreamingContext { - messageId: string - accumulatedContent: StringBuilder - contentBlocks: any[] - currentTextBlock: any | null - isInThinkingBlock: boolean - currentThinkingBlock: any | null - isInDesignWorkflowBlock: boolean - designWorkflowContent: string - pendingContent: string - newChatId?: string - doneEventCount: number - streamComplete?: boolean - wasAborted?: boolean - suppressContinueOption?: boolean - /** Track active subagent sessions by parent tool call ID */ - subAgentParentToolCallId?: string - /** Track subagent content per parent tool call */ - subAgentContent: Record - /** Track subagent tool calls per parent tool call */ - subAgentToolCalls: Record - /** Track subagent streaming blocks per parent tool call */ - subAgentBlocks: Record - suppressStreamingUpdates?: boolean -} - -type SSEHandler = ( - data: any, - context: StreamingContext, - get: () => CopilotStore, - set: any -) => Promise | void - -function appendTextBlock(context: StreamingContext, text: string) { - if (!text) return - context.accumulatedContent.append(text) - if (context.currentTextBlock && context.contentBlocks.length > 0) { - const lastBlock = context.contentBlocks[context.contentBlocks.length - 1] - if (lastBlock.type === TEXT_BLOCK_TYPE && lastBlock === context.currentTextBlock) { - lastBlock.content += text - return - } - } - context.currentTextBlock = contentBlockPool.get() - context.currentTextBlock.type = TEXT_BLOCK_TYPE - context.currentTextBlock.content = text - context.currentTextBlock.timestamp = Date.now() - context.contentBlocks.push(context.currentTextBlock) -} - -function appendContinueOption(content: string): string { - if (//i.test(content)) return content - const suffix = content.trim().length > 0 ? '\n\n' : '' - return `${content}${suffix}${CONTINUE_OPTIONS_TAG}` -} - -function appendContinueOptionBlock(blocks: any[]): any[] { - if (!Array.isArray(blocks)) return blocks - const hasOptions = blocks.some( - (block) => - block?.type === TEXT_BLOCK_TYPE && - typeof block.content === 'string' && - //i.test(block.content) - ) - if (hasOptions) return blocks - return [ - ...blocks, - { - type: TEXT_BLOCK_TYPE, - content: CONTINUE_OPTIONS_TAG, - timestamp: Date.now(), - }, - ] -} - -function stripContinueOption(content: string): string { - if (!content || !content.includes(CONTINUE_OPTIONS_TAG)) return content - const next = content.replace(CONTINUE_OPTIONS_TAG, '') - return next.replace(/\n{2,}\s*$/g, '\n').trimEnd() -} - -function stripContinueOptionFromBlocks(blocks: any[]): any[] { - if (!Array.isArray(blocks)) return blocks - return blocks.flatMap((block) => { - if ( - block?.type === TEXT_BLOCK_TYPE && - typeof block.content === 'string' && - block.content.includes(CONTINUE_OPTIONS_TAG) - ) { - const nextContent = stripContinueOption(block.content) - if (!nextContent.trim()) return [] - return [{ ...block, content: nextContent }] - } - return [block] - }) -} - -function beginThinkingBlock(context: StreamingContext) { - if (!context.currentThinkingBlock) { - context.currentThinkingBlock = contentBlockPool.get() - context.currentThinkingBlock.type = THINKING_BLOCK_TYPE - context.currentThinkingBlock.content = '' - context.currentThinkingBlock.timestamp = Date.now() - ;(context.currentThinkingBlock as any).startTime = Date.now() - context.contentBlocks.push(context.currentThinkingBlock) - } - context.isInThinkingBlock = true - context.currentTextBlock = null -} - -/** - * Removes thinking tags (raw or escaped) from streamed content. - */ -function stripThinkingTags(text: string): string { - return text.replace(/<\/?thinking[^>]*>/gi, '').replace(/<\/?thinking[^&]*>/gi, '') -} - -function appendThinkingContent(context: StreamingContext, text: string) { - if (!text) return - const cleanedText = stripThinkingTags(text) - if (!cleanedText) return - if (context.currentThinkingBlock) { - context.currentThinkingBlock.content += cleanedText - } else { - context.currentThinkingBlock = contentBlockPool.get() - context.currentThinkingBlock.type = THINKING_BLOCK_TYPE - context.currentThinkingBlock.content = cleanedText - context.currentThinkingBlock.timestamp = Date.now() - context.currentThinkingBlock.startTime = Date.now() - context.contentBlocks.push(context.currentThinkingBlock) - } - context.isInThinkingBlock = true - context.currentTextBlock = null -} - -function finalizeThinkingBlock(context: StreamingContext) { - if (context.currentThinkingBlock) { - context.currentThinkingBlock.duration = - Date.now() - (context.currentThinkingBlock.startTime || Date.now()) - } - context.isInThinkingBlock = false - context.currentThinkingBlock = null - context.currentTextBlock = null -} - -function upsertToolCallBlock(context: StreamingContext, toolCall: CopilotToolCall) { - let found = false - for (let i = 0; i < context.contentBlocks.length; i++) { - const b = context.contentBlocks[i] as any - if (b.type === 'tool_call' && b.toolCall?.id === toolCall.id) { - context.contentBlocks[i] = { ...b, toolCall } - found = true - break - } - } - if (!found) { - context.contentBlocks.push({ type: 'tool_call', toolCall, timestamp: Date.now() }) - } -} - -function appendSubAgentText(context: StreamingContext, parentToolCallId: string, text: string) { - if (!context.subAgentContent[parentToolCallId]) { - context.subAgentContent[parentToolCallId] = '' - } - if (!context.subAgentBlocks[parentToolCallId]) { - context.subAgentBlocks[parentToolCallId] = [] - } - context.subAgentContent[parentToolCallId] += text - const blocks = context.subAgentBlocks[parentToolCallId] - const lastBlock = blocks[blocks.length - 1] - if (lastBlock && lastBlock.type === 'subagent_text') { - lastBlock.content = (lastBlock.content || '') + text - } else { - blocks.push({ - type: 'subagent_text', - content: text, - timestamp: Date.now(), - }) - } -} - -const sseHandlers: Record = { - chat_id: async (data, context, get, set) => { - context.newChatId = data.chatId - const { currentChat, activeStream } = get() - if (!currentChat && context.newChatId) { - await get().handleNewChatCreation(context.newChatId) - } - // Update activeStream with chatId for resume purposes - if (activeStream && context.newChatId && !activeStream.chatId) { - const updatedStream = { ...activeStream, chatId: context.newChatId } - set({ activeStream: updatedStream }) - writeActiveStreamToStorage(updatedStream) - } - }, - title_updated: (_data, _context, get, set) => { - const title = _data.title - if (!title) return - const { currentChat, chats } = get() - if (currentChat) { - set({ - currentChat: { ...currentChat, title }, - chats: chats.map((c) => (c.id === currentChat.id ? { ...c, title } : c)), - }) - } - }, - tool_result: (data, context, get, set) => { - try { - const toolCallId: string | undefined = data?.toolCallId || data?.data?.id - const success: boolean | undefined = data?.success - const failedDependency: boolean = data?.failedDependency === true - const skipped: boolean = data?.result?.skipped === true - if (!toolCallId) return - const { toolCallsById } = get() - const current = toolCallsById[toolCallId] - if (current) { - if ( - isRejectedState(current.state) || - isReviewState(current.state) || - isBackgroundState(current.state) - ) { - // Preserve terminal review/rejected state; do not override - return - } - const targetState = success - ? ClientToolCallState.success - : failedDependency || skipped - ? ClientToolCallState.rejected - : ClientToolCallState.error - const updatedMap = { ...toolCallsById } - updatedMap[toolCallId] = { - ...current, - state: targetState, - display: resolveToolDisplay( - current.name, - targetState, - current.id, - (current as any).params - ), - } - set({ toolCallsById: updatedMap }) - - // If checkoff_todo succeeded, mark todo as completed in planTodos - if (targetState === ClientToolCallState.success && current.name === 'checkoff_todo') { - try { - const result = data?.result || data?.data?.result || {} - const input = (current as any).params || (current as any).input || {} - const todoId = input.id || input.todoId || result.id || result.todoId - if (todoId) { - get().updatePlanTodoStatus(todoId, 'completed') - } - } catch {} - } - - // If mark_todo_in_progress succeeded, set todo executing in planTodos - if ( - targetState === ClientToolCallState.success && - current.name === 'mark_todo_in_progress' - ) { - try { - const result = data?.result || data?.data?.result || {} - const input = (current as any).params || (current as any).input || {} - const todoId = input.id || input.todoId || result.id || result.todoId - if (todoId) { - get().updatePlanTodoStatus(todoId, 'executing') - } - } catch {} - } - - if (current.name === 'edit_workflow') { - try { - const resultPayload = - data?.result || data?.data?.result || data?.data?.data || data?.data || {} - const workflowState = resultPayload?.workflowState - logger.info('[SSE] edit_workflow result received', { - hasWorkflowState: !!workflowState, - blockCount: workflowState ? Object.keys(workflowState.blocks || {}).length : 0, - edgeCount: workflowState?.edges?.length ?? 0, - }) - if (workflowState) { - const diffStore = useWorkflowDiffStore.getState() - // Await the diff application to catch any errors - diffStore.setProposedChanges(workflowState).catch((err) => { - logger.error('[SSE] Failed to apply edit_workflow diff', { - error: err instanceof Error ? err.message : String(err), - }) - }) - } - } catch (err) { - logger.error('[SSE] edit_workflow result handling failed', { - error: err instanceof Error ? err.message : String(err), - }) - } - } - } - - // Update inline content block state - for (let i = 0; i < context.contentBlocks.length; i++) { - const b = context.contentBlocks[i] as any - if (b?.type === 'tool_call' && b?.toolCall?.id === toolCallId) { - if ( - isRejectedState(b.toolCall?.state) || - isReviewState(b.toolCall?.state) || - isBackgroundState(b.toolCall?.state) - ) - break - const targetState = success - ? ClientToolCallState.success - : failedDependency || skipped - ? ClientToolCallState.rejected - : ClientToolCallState.error - context.contentBlocks[i] = { - ...b, - toolCall: { - ...b.toolCall, - state: targetState, - display: resolveToolDisplay( - b.toolCall?.name, - targetState, - toolCallId, - b.toolCall?.params - ), - }, - } - break - } - } - updateStreamingMessage(set, context) - } catch {} - }, - tool_error: (data, context, get, set) => { - try { - const toolCallId: string | undefined = data?.toolCallId || data?.data?.id - const failedDependency: boolean = data?.failedDependency === true - if (!toolCallId) return - const { toolCallsById } = get() - const current = toolCallsById[toolCallId] - if (current) { - if ( - isRejectedState(current.state) || - isReviewState(current.state) || - isBackgroundState(current.state) - ) { - return - } - const targetState = failedDependency - ? ClientToolCallState.rejected - : ClientToolCallState.error - const updatedMap = { ...toolCallsById } - updatedMap[toolCallId] = { - ...current, - state: targetState, - display: resolveToolDisplay( - current.name, - targetState, - current.id, - (current as any).params - ), - } - set({ toolCallsById: updatedMap }) - } - for (let i = 0; i < context.contentBlocks.length; i++) { - const b = context.contentBlocks[i] as any - if (b?.type === 'tool_call' && b?.toolCall?.id === toolCallId) { - if ( - isRejectedState(b.toolCall?.state) || - isReviewState(b.toolCall?.state) || - isBackgroundState(b.toolCall?.state) - ) - break - const targetState = failedDependency - ? ClientToolCallState.rejected - : ClientToolCallState.error - context.contentBlocks[i] = { - ...b, - toolCall: { - ...b.toolCall, - state: targetState, - display: resolveToolDisplay( - b.toolCall?.name, - targetState, - toolCallId, - b.toolCall?.params - ), - }, - } - break - } - } - updateStreamingMessage(set, context) - } catch {} - }, - tool_generating: (data, context, get, set) => { - const { toolCallId, toolName } = data - if (!toolCallId || !toolName) return - const { toolCallsById } = get() - - if (!toolCallsById[toolCallId]) { - // Show as pending until we receive full tool_call (with arguments) to decide execution - const initialState = ClientToolCallState.pending - const tc: CopilotToolCall = { - id: toolCallId, - name: toolName, - state: initialState, - display: resolveToolDisplay(toolName, initialState, toolCallId), - } - const updated = { ...toolCallsById, [toolCallId]: tc } - set({ toolCallsById: updated }) - logger.info('[toolCallsById] map updated', updated) - - // Add/refresh inline content block - upsertToolCallBlock(context, tc) - updateStreamingMessage(set, context) - } - }, - tool_call: (data, context, get, set) => { - const toolData = data?.data || {} - const id: string | undefined = toolData.id || data?.toolCallId - const name: string | undefined = toolData.name || data?.toolName - if (!id) return - const args = toolData.arguments - const isPartial = toolData.partial === true - const { toolCallsById } = get() - - const existing = toolCallsById[id] - const next: CopilotToolCall = existing - ? { - ...existing, - state: ClientToolCallState.pending, - ...(args ? { params: args } : {}), - display: resolveToolDisplay(name, ClientToolCallState.pending, id, args), - } - : { - id, - name: name || 'unknown_tool', - state: ClientToolCallState.pending, - ...(args ? { params: args } : {}), - display: resolveToolDisplay(name, ClientToolCallState.pending, id, args), - } - const updated = { ...toolCallsById, [id]: next } - set({ toolCallsById: updated }) - logger.info('[toolCallsById] → pending', { id, name, params: args }) - - // Ensure an inline content block exists/updated for this tool call - upsertToolCallBlock(context, next) - updateStreamingMessage(set, context) - - // Do not execute on partial tool_call frames - if (isPartial) { - return - } - - return - }, - reasoning: (data, context, _get, set) => { - const phase = (data && (data.phase || data?.data?.phase)) as string | undefined - if (phase === 'start') { - beginThinkingBlock(context) - updateStreamingMessage(set, context) - return - } - if (phase === 'end') { - finalizeThinkingBlock(context) - updateStreamingMessage(set, context) - return - } - const chunk: string = typeof data?.data === 'string' ? data.data : data?.content || '' - if (!chunk) return - appendThinkingContent(context, chunk) - updateStreamingMessage(set, context) - }, - content: (data, context, get, set) => { - if (!data.data) return - context.pendingContent += data.data - - let contentToProcess = context.pendingContent - let hasProcessedContent = false - - const thinkingStartRegex = // - const thinkingEndRegex = /<\/thinking>/ - const designWorkflowStartRegex = // - const designWorkflowEndRegex = /<\/design_workflow>/ - - const splitTrailingPartialTag = ( - text: string, - tags: string[] - ): { text: string; remaining: string } => { - const partialIndex = text.lastIndexOf('<') - if (partialIndex < 0) { - return { text, remaining: '' } - } - const possibleTag = text.substring(partialIndex) - const matchesTagStart = tags.some((tag) => tag.startsWith(possibleTag)) - if (!matchesTagStart) { - return { text, remaining: '' } - } - return { - text: text.substring(0, partialIndex), - remaining: possibleTag, - } - } - - while (contentToProcess.length > 0) { - // Handle design_workflow tags (takes priority over other content processing) - if (context.isInDesignWorkflowBlock) { - const endMatch = designWorkflowEndRegex.exec(contentToProcess) - if (endMatch) { - const designContent = contentToProcess.substring(0, endMatch.index) - context.designWorkflowContent += designContent - context.isInDesignWorkflowBlock = false - - // Update store with complete design workflow content (available in all modes) - logger.info('[design_workflow] Tag complete, setting plan content', { - contentLength: context.designWorkflowContent.length, - }) - set({ streamingPlanContent: context.designWorkflowContent }) - - contentToProcess = contentToProcess.substring(endMatch.index + endMatch[0].length) - hasProcessedContent = true - } else { - // Still in design_workflow block, accumulate content - const { text, remaining } = splitTrailingPartialTag(contentToProcess, [ - '', - ]) - context.designWorkflowContent += text - - // Update store with partial content for streaming effect (available in all modes) - set({ streamingPlanContent: context.designWorkflowContent }) - - contentToProcess = remaining - hasProcessedContent = true - if (remaining) { - break - } - } - continue - } - - if (!context.isInThinkingBlock && !context.isInDesignWorkflowBlock) { - // Check for design_workflow start tag first - const designStartMatch = designWorkflowStartRegex.exec(contentToProcess) - if (designStartMatch) { - const textBeforeDesign = contentToProcess.substring(0, designStartMatch.index) - if (textBeforeDesign) { - appendTextBlock(context, textBeforeDesign) - hasProcessedContent = true - } - context.isInDesignWorkflowBlock = true - context.designWorkflowContent = '' - contentToProcess = contentToProcess.substring( - designStartMatch.index + designStartMatch[0].length - ) - hasProcessedContent = true - continue - } - - const nextMarkIndex = contentToProcess.indexOf('') - const nextCheckIndex = contentToProcess.indexOf('') - const hasMark = nextMarkIndex >= 0 - const hasCheck = nextCheckIndex >= 0 - - const nextTagIndex = - hasMark && hasCheck - ? Math.min(nextMarkIndex, nextCheckIndex) - : hasMark - ? nextMarkIndex - : hasCheck - ? nextCheckIndex - : -1 - - if (nextTagIndex >= 0) { - const isMarkTodo = hasMark && nextMarkIndex === nextTagIndex - const tagStart = isMarkTodo ? '' : '' - const tagEnd = isMarkTodo ? '' : '' - const closingIndex = contentToProcess.indexOf(tagEnd, nextTagIndex + tagStart.length) - - if (closingIndex === -1) { - // Partial tag; wait for additional content - break - } - - const todoId = contentToProcess - .substring(nextTagIndex + tagStart.length, closingIndex) - .trim() - logger.info( - isMarkTodo ? '[TODO] Detected marktodo tag' : '[TODO] Detected checkofftodo tag', - { todoId } - ) - - if (todoId) { - try { - get().updatePlanTodoStatus(todoId, isMarkTodo ? 'executing' : 'completed') - logger.info( - isMarkTodo - ? '[TODO] Successfully marked todo in progress' - : '[TODO] Successfully checked off todo', - { todoId } - ) - } catch (e) { - logger.error( - isMarkTodo - ? '[TODO] Failed to mark todo in progress' - : '[TODO] Failed to checkoff todo', - { todoId, error: e } - ) - } - } else { - logger.warn('[TODO] Empty todoId extracted from todo tag', { tagType: tagStart }) - } - - // Remove the tag AND newlines around it, but preserve ONE newline if both sides had them - let beforeTag = contentToProcess.substring(0, nextTagIndex) - let afterTag = contentToProcess.substring(closingIndex + tagEnd.length) - - const hadNewlineBefore = /(\r?\n)+$/.test(beforeTag) - const hadNewlineAfter = /^(\r?\n)+/.test(afterTag) - - // Strip trailing newlines before the tag - beforeTag = beforeTag.replace(/(\r?\n)+$/, '') - // Strip leading newlines after the tag - afterTag = afterTag.replace(/^(\r?\n)+/, '') - - // If there were newlines on both sides, add back ONE to preserve paragraph breaks - contentToProcess = - beforeTag + (hadNewlineBefore && hadNewlineAfter ? '\n' : '') + afterTag - context.currentTextBlock = null - hasProcessedContent = true - continue - } - } - - if (context.isInThinkingBlock) { - const endMatch = thinkingEndRegex.exec(contentToProcess) - if (endMatch) { - const thinkingContent = contentToProcess.substring(0, endMatch.index) - appendThinkingContent(context, thinkingContent) - finalizeThinkingBlock(context) - contentToProcess = contentToProcess.substring(endMatch.index + endMatch[0].length) - hasProcessedContent = true - } else { - const { text, remaining } = splitTrailingPartialTag(contentToProcess, ['']) - if (text) { - appendThinkingContent(context, text) - hasProcessedContent = true - } - contentToProcess = remaining - if (remaining) { - break - } - } - } else { - const startMatch = thinkingStartRegex.exec(contentToProcess) - if (startMatch) { - const textBeforeThinking = contentToProcess.substring(0, startMatch.index) - if (textBeforeThinking) { - appendTextBlock(context, textBeforeThinking) - hasProcessedContent = true - } - context.isInThinkingBlock = true - context.currentTextBlock = null - contentToProcess = contentToProcess.substring(startMatch.index + startMatch[0].length) - hasProcessedContent = true - } else { - // Check if content might contain partial todo tags and hold them back - let partialTagIndex = contentToProcess.lastIndexOf('<') - - // Also check for partial marktodo or checkofftodo tags - const partialMarkTodo = contentToProcess.lastIndexOf(' partialTagIndex) { - partialTagIndex = partialMarkTodo - } - if (partialCheckoffTodo > partialTagIndex) { - partialTagIndex = partialCheckoffTodo - } +import { applySseEvent, sseHandlers } from '@/lib/copilot/client-sse' +import { + appendContinueOption, + appendContinueOptionBlock, + createErrorMessage, + createStreamingMessage, + createUserMessage, + finalizeThinkingBlock, + stripContinueOption, + stripContinueOptionFromBlocks, +} from '@/lib/copilot/client-sse/content-blocks' +import { flushStreamingUpdates, stopStreamingUpdates } from '@/lib/copilot/client-sse/handlers' +import type { StreamingContext } from '@/lib/copilot/client-sse/types' +import { + buildCheckpointWorkflowState, + buildToolCallsById, + normalizeMessagesForUI, + saveMessageCheckpoint, + serializeMessagesForDB, +} from '@/lib/copilot/messages' +import type { CopilotTransportMode } from '@/lib/copilot/models' +import { parseSSEStream } from '@/lib/copilot/orchestrator/sse-parser' +import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry' +import { + abortAllInProgressTools, + isRejectedState, + isTerminalState, + resolveToolDisplay, + stripTodoTags, +} from '@/lib/copilot/store-utils' +import { getQueryClient } from '@/app/_shell/providers/query-provider' +import { subscriptionKeys } from '@/hooks/queries/subscription' +import type { + ChatContext, + CopilotMessage, + CopilotStore, + CopilotStreamInfo, + CopilotToolCall, + MessageFileAttachment, +} from '@/stores/panel/copilot/types' +import { useWorkflowDiffStore } from '@/stores/workflow-diff/store' +import { useSubBlockStore } from '@/stores/workflows/subblock/store' +import { useWorkflowStore } from '@/stores/workflows/workflow/store' +import type { WorkflowState } from '@/stores/workflows/workflow/types' - let textToAdd = contentToProcess - let remaining = '' - if (partialTagIndex >= 0 && partialTagIndex > contentToProcess.length - 50) { - textToAdd = contentToProcess.substring(0, partialTagIndex) - remaining = contentToProcess.substring(partialTagIndex) - } - if (textToAdd) { - appendTextBlock(context, textToAdd) - hasProcessedContent = true - } - contentToProcess = remaining - break - } - } - } +const logger = createLogger('CopilotStore') - context.pendingContent = contentToProcess - if (hasProcessedContent) { - updateStreamingMessage(set, context) - } - }, - done: (_data, context) => { - logger.info('[SSE] DONE EVENT RECEIVED', { - doneEventCount: context.doneEventCount, - data: _data, - }) - context.doneEventCount++ - if (context.doneEventCount >= 1) { - logger.info('[SSE] Setting streamComplete = true, stream will terminate') - context.streamComplete = true - } - }, - error: (data, context, _get, set) => { - logger.error('Stream error:', data.error) - set((state: CopilotStore) => ({ - messages: state.messages.map((msg) => - msg.id === context.messageId - ? { - ...msg, - content: context.accumulatedContent || 'An error occurred.', - error: data.error, - } - : msg - ), - })) - context.streamComplete = true - }, - stream_end: (_data, context, _get, set) => { - if (context.pendingContent) { - if (context.isInThinkingBlock && context.currentThinkingBlock) { - appendThinkingContent(context, context.pendingContent) - } else if (context.pendingContent.trim()) { - appendTextBlock(context, context.pendingContent) - } - context.pendingContent = '' - } - finalizeThinkingBlock(context) - updateStreamingMessage(set, context) - }, - default: () => {}, -} +const STREAM_STORAGE_KEY = 'copilot_active_stream' /** - * Helper to update a tool call with subagent data in both toolCallsById and contentBlocks + * Flag set on beforeunload to suppress continue option during page refresh/close. + * Aborts during unload should NOT show the continue button. */ -function updateToolCallWithSubAgentData( - context: StreamingContext, - get: () => CopilotStore, - set: any, - parentToolCallId: string -) { - const { toolCallsById } = get() - const parentToolCall = toolCallsById[parentToolCallId] - if (!parentToolCall) { - logger.warn('[SubAgent] updateToolCallWithSubAgentData: parent tool call not found', { - parentToolCallId, - availableToolCallIds: Object.keys(toolCallsById), - }) - return - } - - // Prepare subagent blocks array for ordered display - const blocks = context.subAgentBlocks[parentToolCallId] || [] - - const updatedToolCall: CopilotToolCall = { - ...parentToolCall, - subAgentContent: context.subAgentContent[parentToolCallId] || '', - subAgentToolCalls: context.subAgentToolCalls[parentToolCallId] || [], - subAgentBlocks: blocks, - subAgentStreaming: true, - } - - logger.info('[SubAgent] Updating tool call with subagent data', { - parentToolCallId, - parentToolName: parentToolCall.name, - subAgentContentLength: updatedToolCall.subAgentContent?.length, - subAgentBlocksCount: updatedToolCall.subAgentBlocks?.length, - subAgentToolCallsCount: updatedToolCall.subAgentToolCalls?.length, +let isPageUnloading = false +if (typeof window !== 'undefined') { + window.addEventListener('beforeunload', () => { + isPageUnloading = true }) +} - // Update in toolCallsById - const updatedMap = { ...toolCallsById, [parentToolCallId]: updatedToolCall } - set({ toolCallsById: updatedMap }) - - // Update in contentBlocks - let foundInContentBlocks = false - for (let i = 0; i < context.contentBlocks.length; i++) { - const b = context.contentBlocks[i] as any - if (b.type === 'tool_call' && b.toolCall?.id === parentToolCallId) { - context.contentBlocks[i] = { ...b, toolCall: updatedToolCall } - foundInContentBlocks = true - break - } - } - - if (!foundInContentBlocks) { - logger.warn('[SubAgent] Parent tool call not found in contentBlocks', { - parentToolCallId, - contentBlocksCount: context.contentBlocks.length, - toolCallBlockIds: context.contentBlocks - .filter((b: any) => b.type === 'tool_call') - .map((b: any) => b.toolCall?.id), +function readActiveStreamFromStorage(): CopilotStreamInfo | null { + if (typeof window === 'undefined') return null + try { + const raw = window.sessionStorage.getItem(STREAM_STORAGE_KEY) + logger.info('[Copilot] Reading stream from storage', { + hasRaw: !!raw, + rawPreview: raw ? raw.substring(0, 100) : null, }) + if (!raw) return null + const parsed = JSON.parse(raw) as CopilotStreamInfo + return parsed?.streamId ? parsed : null + } catch (e) { + logger.warn('[Copilot] Failed to read stream from storage', { error: String(e) }) + return null } - - updateStreamingMessage(set, context) } -/** - * SSE handlers for subagent events (events with subagent field set) - * These handle content and tool calls from subagents like debug - */ -const subAgentSSEHandlers: Record = { - // Handle subagent response start (ignore - just a marker) - start: () => { - // Subagent start event - no action needed, parent is already tracked from subagent_start - }, - - // Handle subagent text content (reasoning/thinking) - content: (data, context, get, set) => { - const parentToolCallId = context.subAgentParentToolCallId - logger.info('[SubAgent] content event', { - parentToolCallId, - hasData: !!data.data, - dataPreview: typeof data.data === 'string' ? data.data.substring(0, 50) : null, - }) - if (!parentToolCallId || !data.data) { - logger.warn('[SubAgent] content missing parentToolCallId or data', { - parentToolCallId, - hasData: !!data.data, - }) - return - } - - appendSubAgentText(context, parentToolCallId, data.data) - - updateToolCallWithSubAgentData(context, get, set, parentToolCallId) - }, - - // Handle subagent reasoning (same as content for subagent display purposes) - reasoning: (data, context, get, set) => { - const parentToolCallId = context.subAgentParentToolCallId - const phase = data?.phase || data?.data?.phase - if (!parentToolCallId) return - - // For reasoning, we just append the content (treating start/end as markers) - if (phase === 'start' || phase === 'end') return - - const chunk = typeof data?.data === 'string' ? data.data : data?.content || '' - if (!chunk) return - - appendSubAgentText(context, parentToolCallId, chunk) - - updateToolCallWithSubAgentData(context, get, set, parentToolCallId) - }, - - // Handle subagent tool_generating (tool is being generated) - tool_generating: () => { - // Tool generating event - no action needed, we'll handle the actual tool_call - }, - - // Handle subagent tool calls - also execute client tools - tool_call: async (data, context, get, set) => { - const parentToolCallId = context.subAgentParentToolCallId - if (!parentToolCallId) return - - const toolData = data?.data || {} - const id: string | undefined = toolData.id || data?.toolCallId - const name: string | undefined = toolData.name || data?.toolName - if (!id || !name) return - const isPartial = toolData.partial === true - - // Arguments can come in different locations depending on SSE format - // Check multiple possible locations - let args = toolData.arguments || toolData.input || data?.arguments || data?.input - - // If arguments is a string, try to parse it as JSON - if (typeof args === 'string') { - try { - args = JSON.parse(args) - } catch { - logger.warn('[SubAgent] Failed to parse arguments string', { args }) - } - } - - logger.info('[SubAgent] tool_call received', { - id, - name, - hasArgs: !!args, - argsKeys: args ? Object.keys(args) : [], - toolDataKeys: Object.keys(toolData), - dataKeys: Object.keys(data || {}), - }) - - // Initialize if needed - if (!context.subAgentToolCalls[parentToolCallId]) { - context.subAgentToolCalls[parentToolCallId] = [] - } - if (!context.subAgentBlocks[parentToolCallId]) { - context.subAgentBlocks[parentToolCallId] = [] - } - - // Create or update the subagent tool call - const existingIndex = context.subAgentToolCalls[parentToolCallId].findIndex( - (tc) => tc.id === id - ) - const subAgentToolCall: CopilotToolCall = { - id, - name, - state: ClientToolCallState.pending, - ...(args ? { params: args } : {}), - display: resolveToolDisplay(name, ClientToolCallState.pending, id, args), - } - - if (existingIndex >= 0) { - context.subAgentToolCalls[parentToolCallId][existingIndex] = subAgentToolCall - } else { - context.subAgentToolCalls[parentToolCallId].push(subAgentToolCall) - - // Also add to ordered blocks - context.subAgentBlocks[parentToolCallId].push({ - type: 'subagent_tool_call', - toolCall: subAgentToolCall, - timestamp: Date.now(), +function writeActiveStreamToStorage(info: CopilotStreamInfo | null): void { + if (typeof window === 'undefined') return + try { + if (!info) { + logger.info('[Copilot] Clearing stream from storage', { + isPageUnloading, + stack: new Error().stack?.split('\n').slice(1, 4).join(' <- '), }) - } - - // Also add to main toolCallsById for proper tool execution - const { toolCallsById } = get() - const updated = { ...toolCallsById, [id]: subAgentToolCall } - set({ toolCallsById: updated }) - - updateToolCallWithSubAgentData(context, get, set, parentToolCallId) - - if (isPartial) { + window.sessionStorage.removeItem(STREAM_STORAGE_KEY) return } - }, - - // Handle subagent tool results - tool_result: (data, context, get, set) => { - const parentToolCallId = context.subAgentParentToolCallId - if (!parentToolCallId) return - - const toolCallId: string | undefined = data?.toolCallId || data?.data?.id - const success: boolean | undefined = data?.success !== false // Default to true if not specified - if (!toolCallId) return - - // Initialize if needed - if (!context.subAgentToolCalls[parentToolCallId]) return - if (!context.subAgentBlocks[parentToolCallId]) return - - // Update the subagent tool call state - const targetState = success ? ClientToolCallState.success : ClientToolCallState.error - const existingIndex = context.subAgentToolCalls[parentToolCallId].findIndex( - (tc) => tc.id === toolCallId - ) - - if (existingIndex >= 0) { - const existing = context.subAgentToolCalls[parentToolCallId][existingIndex] - const updatedSubAgentToolCall = { - ...existing, - state: targetState, - display: resolveToolDisplay(existing.name, targetState, toolCallId, existing.params), - } - context.subAgentToolCalls[parentToolCallId][existingIndex] = updatedSubAgentToolCall - - // Also update in ordered blocks - for (const block of context.subAgentBlocks[parentToolCallId]) { - if (block.type === 'subagent_tool_call' && block.toolCall?.id === toolCallId) { - block.toolCall = updatedSubAgentToolCall - break - } - } - - // Update the individual tool call in toolCallsById so ToolCall component gets latest state - const { toolCallsById } = get() - if (toolCallsById[toolCallId]) { - const updatedMap = { - ...toolCallsById, - [toolCallId]: updatedSubAgentToolCall, - } - set({ toolCallsById: updatedMap }) - logger.info('[SubAgent] Updated subagent tool call state in toolCallsById', { - toolCallId, - name: existing.name, - state: targetState, - }) - } - } - - updateToolCallWithSubAgentData(context, get, set, parentToolCallId) - }, - - // Handle subagent stream done - just update the streaming state - done: (data, context, get, set) => { - const parentToolCallId = context.subAgentParentToolCallId - if (!parentToolCallId) return - - // Update the tool call with final content but keep streaming true until subagent_end - updateToolCallWithSubAgentData(context, get, set, parentToolCallId) - }, -} - -async function applySseEvent( - data: any, - context: StreamingContext, - get: () => CopilotStore, - set: (next: Partial | ((state: CopilotStore) => Partial)) => void -): Promise { - const normalizedEvent = normalizeSseEvent(data) - if (shouldSkipToolCallEvent(normalizedEvent) || shouldSkipToolResultEvent(normalizedEvent)) { - return true - } - data = normalizedEvent - - if (data.type === 'subagent_start') { - const toolCallId = data.data?.tool_call_id - if (toolCallId) { - context.subAgentParentToolCallId = toolCallId - const { toolCallsById } = get() - const parentToolCall = toolCallsById[toolCallId] - if (parentToolCall) { - const updatedToolCall: CopilotToolCall = { - ...parentToolCall, - subAgentStreaming: true, - } - const updatedMap = { ...toolCallsById, [toolCallId]: updatedToolCall } - set({ toolCallsById: updatedMap }) - } - logger.info('[SSE] Subagent session started', { - subagent: data.subagent, - parentToolCallId: toolCallId, - }) - } - return true - } - - if (data.type === 'subagent_end') { - const parentToolCallId = context.subAgentParentToolCallId - if (parentToolCallId) { - const { toolCallsById } = get() - const parentToolCall = toolCallsById[parentToolCallId] - if (parentToolCall) { - const updatedToolCall: CopilotToolCall = { - ...parentToolCall, - subAgentContent: context.subAgentContent[parentToolCallId] || '', - subAgentToolCalls: context.subAgentToolCalls[parentToolCallId] || [], - subAgentBlocks: context.subAgentBlocks[parentToolCallId] || [], - subAgentStreaming: false, - } - const updatedMap = { ...toolCallsById, [parentToolCallId]: updatedToolCall } - set({ toolCallsById: updatedMap }) - logger.info('[SSE] Subagent session ended', { - subagent: data.subagent, - parentToolCallId, - contentLength: context.subAgentContent[parentToolCallId]?.length || 0, - toolCallCount: context.subAgentToolCalls[parentToolCallId]?.length || 0, - }) - } - } - context.subAgentParentToolCallId = undefined - return true - } - - if (data.subagent) { - const parentToolCallId = context.subAgentParentToolCallId - if (!parentToolCallId) { - logger.warn('[SSE] Subagent event without parent tool call ID', { - type: data.type, - subagent: data.subagent, - }) - return true - } - - logger.info('[SSE] Processing subagent event', { - type: data.type, - subagent: data.subagent, - parentToolCallId, - hasHandler: !!subAgentSSEHandlers[data.type], + const payload = JSON.stringify(info) + window.sessionStorage.setItem(STREAM_STORAGE_KEY, payload) + const verified = window.sessionStorage.getItem(STREAM_STORAGE_KEY) === payload + logger.info('[Copilot] Writing stream to storage', { + streamId: info.streamId, + lastEventId: info.lastEventId, + userMessageContent: info.userMessageContent?.slice(0, 30), + verified, }) - - const subAgentHandler = subAgentSSEHandlers[data.type] - if (subAgentHandler) { - await subAgentHandler(data, context, get, set) - } else { - logger.warn('[SSE] No handler for subagent event type', { type: data.type }) - } - return !context.streamComplete + } catch (e) { + logger.error('[Copilot] Failed to write stream to storage', { error: String(e) }) } - - const handler = sseHandlers[data.type] || sseHandlers.default - await handler(data, context, get, set) - return !context.streamComplete } -// Debounced UI update queue for smoother streaming -const streamingUpdateQueue = new Map() -let streamingUpdateRAF: number | null = null -let lastBatchTime = 0 -const MIN_BATCH_INTERVAL = 16 -const MAX_BATCH_INTERVAL = 50 -const MAX_QUEUE_SIZE = 5 - -function stopStreamingUpdates() { - if (streamingUpdateRAF !== null) { - cancelAnimationFrame(streamingUpdateRAF) - streamingUpdateRAF = null - } - streamingUpdateQueue.clear() +function updateActiveStreamEventId( + get: () => CopilotStore, + set: (next: Partial) => void, + streamId: string, + eventId: number +): void { + const current = get().activeStream + if (!current || current.streamId !== streamId) return + if (eventId <= (current.lastEventId || 0)) return + const next = { ...current, lastEventId: eventId } + set({ activeStream: next }) + writeActiveStreamToStorage(next) } -/** Flush pending streaming updates immediately (apply them to state before clearing). */ -function flushStreamingUpdates(set: any) { - if (streamingUpdateRAF !== null) { - cancelAnimationFrame(streamingUpdateRAF) - streamingUpdateRAF = null +// On module load, clear any lingering diff preview (fresh page refresh) +try { + const diffStore = useWorkflowDiffStore.getState() + if (diffStore?.hasActiveDiff) { + diffStore.clearDiff() } - if (streamingUpdateQueue.size === 0) return - - const updates = new Map(streamingUpdateQueue) - streamingUpdateQueue.clear() +} catch {} - set((state: CopilotStore) => { - if (updates.size === 0) return state - return { - messages: state.messages.map((msg) => { - const update = updates.get(msg.id) - if (update) { - return { - ...msg, - content: '', - contentBlocks: - update.contentBlocks.length > 0 - ? createOptimizedContentBlocks(update.contentBlocks) - : [], - } - } - return msg - }), - } - }) -} +const TEXT_BLOCK_TYPE = 'text' +const CONTINUE_OPTIONS_TAG = '{"1":"Continue"}' function cloneContentBlocks(blocks: any[]): any[] { if (!Array.isArray(blocks)) return [] @@ -1963,7 +167,7 @@ function findLastTextBlock(blocks: any[]): any | null { function replaceTextBlocks(blocks: any[], text: string): any[] { const next: any[] = [] let inserted = false - for (const block of blocks || []) { + for (const block of blocks ?? []) { if (block?.type === TEXT_BLOCK_TYPE) { if (!inserted && text) { next.push({ type: TEXT_BLOCK_TYPE, content: text, timestamp: Date.now() }) @@ -1982,7 +186,7 @@ function replaceTextBlocks(blocks: any[], text: string): any[] { function createStreamingContext(messageId: string): StreamingContext { return { messageId, - accumulatedContent: new StringBuilder(), + accumulatedContent: '', contentBlocks: [], currentTextBlock: null, isInThinkingBlock: false, @@ -1997,102 +201,6 @@ function createStreamingContext(messageId: string): StreamingContext { } } -function createOptimizedContentBlocks(contentBlocks: any[]): any[] { - const result: any[] = new Array(contentBlocks.length) - for (let i = 0; i < contentBlocks.length; i++) { - const block = contentBlocks[i] - result[i] = { ...block } - } - return result -} -;`` -function updateStreamingMessage(set: any, context: StreamingContext) { - if (context.suppressStreamingUpdates) return - const now = performance.now() - streamingUpdateQueue.set(context.messageId, context) - const timeSinceLastBatch = now - lastBatchTime - const shouldFlushImmediately = - streamingUpdateQueue.size >= MAX_QUEUE_SIZE || timeSinceLastBatch > MAX_BATCH_INTERVAL - - if (streamingUpdateRAF === null) { - const scheduleUpdate = () => { - streamingUpdateRAF = requestAnimationFrame(() => { - const updates = new Map(streamingUpdateQueue) - streamingUpdateQueue.clear() - streamingUpdateRAF = null - lastBatchTime = performance.now() - set((state: CopilotStore) => { - if (updates.size === 0) return state - const messages = state.messages - const lastMessage = messages[messages.length - 1] - const lastMessageUpdate = lastMessage ? updates.get(lastMessage.id) : null - if (updates.size === 1 && lastMessageUpdate) { - const newMessages = [...messages] - newMessages[messages.length - 1] = { - ...lastMessage, - content: '', - contentBlocks: - lastMessageUpdate.contentBlocks.length > 0 - ? createOptimizedContentBlocks(lastMessageUpdate.contentBlocks) - : [], - } - return { messages: newMessages } - } - return { - messages: messages.map((msg) => { - const update = updates.get(msg.id) - if (update) { - return { - ...msg, - content: '', - contentBlocks: - update.contentBlocks.length > 0 - ? createOptimizedContentBlocks(update.contentBlocks) - : [], - } - } - return msg - }), - } - }) - }) - } - if (shouldFlushImmediately) scheduleUpdate() - else setTimeout(scheduleUpdate, Math.max(0, MIN_BATCH_INTERVAL - timeSinceLastBatch)) - } -} - -async function* parseSSEStream( - reader: ReadableStreamDefaultReader, - decoder: TextDecoder -) { - let buffer = '' - while (true) { - const { done, value } = await reader.read() - if (done) break - const chunk = decoder.decode(value, { stream: true }) - buffer += chunk - const lastNewlineIndex = buffer.lastIndexOf('\n') - if (lastNewlineIndex !== -1) { - const linesToProcess = buffer.substring(0, lastNewlineIndex) - buffer = buffer.substring(lastNewlineIndex + 1) - const lines = linesToProcess.split('\n') - for (let i = 0; i < lines.length; i++) { - const line = lines[i] - if (line.length === 0) continue - if (line.charCodeAt(0) === 100 && line.startsWith(DATA_PREFIX)) { - try { - const jsonStr = line.substring(DATA_PREFIX_LENGTH) - yield JSON.parse(jsonStr) - } catch (error) { - logger.warn('Failed to parse SSE data:', error) - } - } - } - } - } -} - // Initial state (subset required for UI/streaming) const initialState = { mode: 'build' as const, @@ -2103,7 +211,6 @@ const initialState = { currentChat: null as CopilotChat | null, chats: [] as CopilotChat[], messages: [] as CopilotMessage[], - checkpoints: [] as any[], messageCheckpoints: {} as Record, messageSnapshots: {} as Record, isLoading: false, @@ -2193,7 +300,7 @@ export const useCopilotStore = create()( // Restore plan content and config (mode/model) from selected chat const planArtifact = chat.planArtifact || '' - const chatConfig = chat.config || {} + const chatConfig = chat.config ?? {} const chatMode = chatConfig.mode || get().mode const chatModel = chatConfig.model || get().selectedModel @@ -2211,7 +318,7 @@ export const useCopilotStore = create()( const previousModel = get().selectedModel // Optimistically set selected chat and normalize messages for UI - const normalizedMessages = normalizeMessagesForUI(chat.messages || []) + const normalizedMessages = normalizeMessagesForUI(chat.messages ?? []) const toolCallsById = buildToolCallsById(normalizedMessages) set({ @@ -2229,7 +336,7 @@ export const useCopilotStore = create()( // Background-save the previous chat's latest messages, plan artifact, and config before switching (optimistic) try { if (previousChat && previousChat.id !== chat.id) { - const dbMessages = validateMessagesForLLM(previousMessages) + const dbMessages = serializeMessagesForDB(previousMessages, get().sensitiveCredentialIds) const previousPlanArtifact = get().streamingPlanContent fetch('/api/copilot/chat/update-messages', { method: 'POST', @@ -2255,13 +362,13 @@ export const useCopilotStore = create()( if (data.success && Array.isArray(data.chats)) { const latestChat = data.chats.find((c: CopilotChat) => c.id === chat.id) if (latestChat) { - const normalizedMessages = normalizeMessagesForUI(latestChat.messages || []) + const normalizedMessages = normalizeMessagesForUI(latestChat.messages ?? []) const toolCallsById = buildToolCallsById(normalizedMessages) set({ currentChat: latestChat, messages: normalizedMessages, - chats: (get().chats || []).map((c: CopilotChat) => + chats: (get().chats ?? []).map((c: CopilotChat) => c.id === chat.id ? latestChat : c ), toolCallsById, @@ -2289,7 +396,7 @@ export const useCopilotStore = create()( const { currentChat, streamingPlanContent, mode, selectedModel } = get() if (currentChat) { const currentMessages = get().messages - const dbMessages = validateMessagesForLLM(currentMessages) + const dbMessages = serializeMessagesForDB(currentMessages, get().sensitiveCredentialIds) fetch('/api/copilot/chat/update-messages', { method: 'POST', headers: { 'Content-Type': 'application/json' }, @@ -2345,8 +452,6 @@ export const useCopilotStore = create()( } }, - areChatsFresh: (_workflowId: string) => false, - loadChats: async (_forceRefresh = false) => { const { workflowId } = get() @@ -2385,11 +490,11 @@ export const useCopilotStore = create()( if (isSendingMessage) { set({ currentChat: { ...updatedCurrentChat, messages: get().messages } }) } else { - const normalizedMessages = normalizeMessagesForUI(updatedCurrentChat.messages || []) + const normalizedMessages = normalizeMessagesForUI(updatedCurrentChat.messages ?? []) // Restore plan artifact and config from refreshed chat const refreshedPlanArtifact = updatedCurrentChat.planArtifact || '' - const refreshedConfig = updatedCurrentChat.config || {} + const refreshedConfig = updatedCurrentChat.config ?? {} const refreshedMode = refreshedConfig.mode || get().mode const refreshedModel = refreshedConfig.model || get().selectedModel const toolCallsById = buildToolCallsById(normalizedMessages) @@ -2408,11 +513,11 @@ export const useCopilotStore = create()( } catch {} } else if (!isSendingMessage && !suppressAutoSelect) { const mostRecentChat: CopilotChat = data.chats[0] - const normalizedMessages = normalizeMessagesForUI(mostRecentChat.messages || []) + const normalizedMessages = normalizeMessagesForUI(mostRecentChat.messages ?? []) // Restore plan artifact and config from most recent chat const planArtifact = mostRecentChat.planArtifact || '' - const chatConfig = mostRecentChat.config || {} + const chatConfig = mostRecentChat.config ?? {} const chatMode = chatConfig.mode || get().mode const chatModel = chatConfig.model || get().selectedModel @@ -2762,7 +867,7 @@ export const useCopilotStore = create()( if (response.ok) { const data = await response.json() if (data.success && data.chat) { - const normalizedMessages = normalizeMessagesForUI(data.chat.messages || []) + const normalizedMessages = normalizeMessagesForUI(data.chat.messages ?? []) const toolCallsById = buildToolCallsById(normalizedMessages) set({ currentChat: data.chat, @@ -2809,7 +914,7 @@ export const useCopilotStore = create()( resumeFromEventId = entry.eventId } } - bufferedContent = replayContext.accumulatedContent.toString() + bufferedContent = replayContext.accumulatedContent replayBlocks = replayContext.contentBlocks logger.info('[Copilot] Loaded buffered content instantly', { eventCount: batchData.events.length, @@ -2855,7 +960,7 @@ export const useCopilotStore = create()( return { ...m, content: stripContinueOption(m.content || ''), - contentBlocks: stripContinueOptionFromBlocks(m.contentBlocks || []), + contentBlocks: stripContinueOptionFromBlocks(m.contentBlocks ?? []), } }) @@ -2905,7 +1010,7 @@ export const useCopilotStore = create()( return { ...m, content: bufferedContent, - contentBlocks: nextBlocks || [], + contentBlocks: nextBlocks ?? [], } }) } @@ -3040,7 +1145,7 @@ export const useCopilotStore = create()( if (currentChat) { try { const currentMessages = get().messages - const dbMessages = validateMessagesForLLM(currentMessages) + const dbMessages = serializeMessagesForDB(currentMessages, get().sensitiveCredentialIds) fetch('/api/copilot/chat/update-messages', { method: 'POST', headers: { 'Content-Type': 'application/json' }, @@ -3242,7 +1347,7 @@ export const useCopilotStore = create()( const blocks = m.contentBlocks.map((b: any) => { if (b.type === 'tool_call' && b.toolCall?.id === id) { changed = true - const prev = b.toolCall || {} + const prev = b.toolCall ?? {} return { ...b, toolCall: { @@ -3277,14 +1382,6 @@ export const useCopilotStore = create()( } catch {} }, - sendDocsMessage: async (query: string) => { - await get().sendMessage(query) - }, - - saveChatMessages: async (_chatId: string) => {}, - - loadCheckpoints: async (_chatId: string) => set({ checkpoints: [] }), - loadMessageCheckpoints: async (chatId: string) => { const { workflowId } = get() if (!workflowId) return @@ -3296,7 +1393,7 @@ export const useCopilotStore = create()( if (data.success && Array.isArray(data.checkpoints)) { const grouped = data.checkpoints.reduce((acc: Record, cp: any) => { const key = cp.messageId || '__no_message__' - acc[key] = acc[key] || [] + acc[key] = acc[key] ?? [] acc[key].push(cp) return acc }, {}) @@ -3320,7 +1417,7 @@ export const useCopilotStore = create()( try { const { messageCheckpoints } = get() const checkpointMessageId = Object.entries(messageCheckpoints).find(([, cps]) => - (cps || []).some((cp: any) => cp?.id === checkpointId) + (cps ?? []).some((cp: any) => cp?.id === checkpointId) )?.[0] const response = await fetch('/api/copilot/checkpoints/revert', { method: 'POST', @@ -3341,19 +1438,19 @@ export const useCopilotStore = create()( // Apply to main workflow store useWorkflowStore.setState({ - blocks: reverted.blocks || {}, - edges: reverted.edges || [], - loops: reverted.loops || {}, - parallels: reverted.parallels || {}, + blocks: reverted.blocks ?? {}, + edges: reverted.edges ?? [], + loops: reverted.loops ?? {}, + parallels: reverted.parallels ?? {}, lastSaved: reverted.lastSaved || Date.now(), - deploymentStatuses: reverted.deploymentStatuses || {}, + deploymentStatuses: reverted.deploymentStatuses ?? {}, }) // Extract and apply subblock values const values: Record> = {} - Object.entries(reverted.blocks || {}).forEach(([blockId, block]: [string, any]) => { + Object.entries(reverted.blocks ?? {}).forEach(([blockId, block]: [string, any]) => { values[blockId] = {} - Object.entries((block as any).subBlocks || {}).forEach( + Object.entries((block as any).subBlocks ?? {}).forEach( ([subId, sub]: [string, any]) => { values[blockId][subId] = (sub as any)?.value } @@ -3383,7 +1480,7 @@ export const useCopilotStore = create()( }, getCheckpointsForMessage: (messageId: string) => { const { messageCheckpoints } = get() - return messageCheckpoints[messageId] || [] + return messageCheckpoints[messageId] ?? [] }, saveMessageCheckpoint: async (messageId: string) => { if (!messageId) return false @@ -3424,19 +1521,19 @@ export const useCopilotStore = create()( if (existingBlocks.length > 0) { const existingText = extractTextFromBlocks(existingBlocks) if (existingText) { - context.accumulatedContent.append(existingText) + context.accumulatedContent += existingText } const clonedBlocks = cloneContentBlocks(existingBlocks) context.contentBlocks = clonedBlocks context.currentTextBlock = findLastTextBlock(clonedBlocks) } else if (existingMessage.content) { - const textBlock = contentBlockPool.get() + const textBlock = { type: '', content: '', timestamp: 0, toolCall: null } textBlock.type = TEXT_BLOCK_TYPE textBlock.content = existingMessage.content textBlock.timestamp = Date.now() context.contentBlocks = [textBlock] context.currentTextBlock = textBlock - context.accumulatedContent.append(existingMessage.content) + context.accumulatedContent += existingMessage.content } } } @@ -3447,7 +1544,7 @@ export const useCopilotStore = create()( }, 600000) try { - for await (const data of parseSSEStream(reader, decoder)) { + for await (const data of parseSSEStream(reader, decoder, abortSignal)) { if (abortSignal?.aborted) { context.wasAborted = true const { suppressAbortContinueOption } = get() @@ -3462,8 +1559,9 @@ export const useCopilotStore = create()( break } - const eventId = typeof data?.eventId === 'number' ? data.eventId : undefined - const streamId = typeof data?.streamId === 'string' ? data.streamId : undefined + const eventMeta = data as { eventId?: unknown; streamId?: unknown } + const eventId = typeof eventMeta.eventId === 'number' ? eventMeta.eventId : undefined + const streamId = typeof eventMeta.streamId === 'string' ? eventMeta.streamId : undefined if (expectedStreamId && streamId && streamId !== expectedStreamId) { logger.warn('[SSE] Ignoring event for mismatched stream', { expectedStreamId, @@ -3495,15 +1593,11 @@ export const useCopilotStore = create()( sseHandlers.stream_end({}, context, get, set) } - if (streamingUpdateRAF !== null) { - cancelAnimationFrame(streamingUpdateRAF) - streamingUpdateRAF = null - } - streamingUpdateQueue.clear() + stopStreamingUpdates() let sanitizedContentBlocks: any[] = [] if (context.contentBlocks && context.contentBlocks.length > 0) { - const optimizedBlocks = createOptimizedContentBlocks(context.contentBlocks) + const optimizedBlocks = context.contentBlocks.map((block: any) => ({ ...block })) sanitizedContentBlocks = optimizedBlocks.map((block: any) => block.type === TEXT_BLOCK_TYPE && typeof block.content === 'string' ? { ...block, content: stripTodoTags(block.content) } @@ -3524,15 +1618,7 @@ export const useCopilotStore = create()( } } - if (context.contentBlocks) { - context.contentBlocks.forEach((block) => { - if (block.type === TEXT_BLOCK_TYPE || block.type === THINKING_BLOCK_TYPE) { - contentBlockPool.release(block) - } - }) - } - - const finalContent = stripTodoTags(context.accumulatedContent.toString()) + const finalContent = stripTodoTags(context.accumulatedContent) const finalContentStripped = isContinuation ? stripContinueOption(finalContent) : finalContent @@ -3616,10 +1702,10 @@ export const useCopilotStore = create()( contentLength: lastMsg.content?.length || 0, hasContentBlocks: !!lastMsg.contentBlocks, contentBlockCount: lastMsg.contentBlocks?.length || 0, - contentBlockTypes: (lastMsg.contentBlocks as any[])?.map((b) => b?.type) || [], + contentBlockTypes: (lastMsg.contentBlocks as any[])?.map((b) => b?.type) ?? [], }) } - const dbMessages = validateMessagesForLLM(currentMessages) + const dbMessages = serializeMessagesForDB(currentMessages, get().sensitiveCredentialIds) const config = { mode, model: selectedModel, @@ -3701,7 +1787,7 @@ export const useCopilotStore = create()( set({ currentChat: newChat, - chats: [newChat, ...(get().chats || [])], + chats: [newChat, ...(get().chats ?? [])], chatsLastLoadedAt: null, chatsLoadedForWorkflow: null, planTodos: [], @@ -3714,16 +1800,10 @@ export const useCopilotStore = create()( clearError: () => set({ error: null }), clearSaveError: () => set({ saveError: null }), clearCheckpointError: () => set({ checkpointError: null }), - retrySave: async (_chatId: string) => {}, - cleanup: () => { const { isSendingMessage } = get() if (isSendingMessage) get().abortMessage() - if (streamingUpdateRAF !== null) { - cancelAnimationFrame(streamingUpdateRAF) - streamingUpdateRAF = null - } - streamingUpdateQueue.clear() + stopStreamingUpdates() // Clear any diff on cleanup try { useWorkflowDiffStore.getState().clearDiff() @@ -3765,7 +1845,7 @@ export const useCopilotStore = create()( if (currentChat) { try { const currentMessages = get().messages - const dbMessages = validateMessagesForLLM(currentMessages) + const dbMessages = serializeMessagesForDB(currentMessages, get().sensitiveCredentialIds) const { mode, selectedModel } = get() await fetch('/api/copilot/chat/update-messages', { @@ -3807,7 +1887,7 @@ export const useCopilotStore = create()( if (currentChat) { try { const currentMessages = get().messages - const dbMessages = validateMessagesForLLM(currentMessages) + const dbMessages = serializeMessagesForDB(currentMessages, get().sensitiveCredentialIds) const { mode, selectedModel } = get() await fetch('/api/copilot/chat/update-messages', { @@ -3855,7 +1935,7 @@ export const useCopilotStore = create()( logger.info('[AutoAllowedTools] Load response', { status: res.status, ok: res.ok }) if (res.ok) { const data = await res.json() - const tools = data.autoAllowedTools || [] + const tools = data.autoAllowedTools ?? [] set({ autoAllowedTools: tools }) logger.info('[AutoAllowedTools] Loaded successfully', { count: tools.length, tools }) } else { @@ -3878,7 +1958,7 @@ export const useCopilotStore = create()( if (res.ok) { const data = await res.json() logger.info('[AutoAllowedTools] API returned', { toolId, tools: data.autoAllowedTools }) - set({ autoAllowedTools: data.autoAllowedTools || [] }) + set({ autoAllowedTools: data.autoAllowedTools ?? [] }) logger.info('[AutoAllowedTools] Added tool to store', { toolId }) } } catch (err) { @@ -3896,7 +1976,7 @@ export const useCopilotStore = create()( ) if (res.ok) { const data = await res.json() - set({ autoAllowedTools: data.autoAllowedTools || [] }) + set({ autoAllowedTools: data.autoAllowedTools ?? [] }) logger.info('[AutoAllowedTools] Removed tool', { toolId }) } } catch (err) { @@ -3923,7 +2003,7 @@ export const useCopilotStore = create()( } const json = await res.json() // Credentials are at result.oauth.connected.credentials - const credentials = json?.result?.oauth?.connected?.credentials || [] + const credentials = json?.result?.oauth?.connected?.credentials ?? [] logger.info('[loadSensitiveCredentialIds] Response', { hasResult: !!json?.result, credentialCount: credentials.length, diff --git a/apps/sim/stores/panel/copilot/types.ts b/apps/sim/stores/panel/copilot/types.ts index 9357ebd6bd..07e77ea604 100644 --- a/apps/sim/stores/panel/copilot/types.ts +++ b/apps/sim/stores/panel/copilot/types.ts @@ -122,7 +122,6 @@ export interface CopilotState { messages: CopilotMessage[] workflowId: string | null - checkpoints: any[] messageCheckpoints: Record messageSnapshots: Record @@ -187,7 +186,6 @@ export interface CopilotActions { setWorkflowId: (workflowId: string | null) => Promise validateCurrentChat: () => boolean loadChats: (forceRefresh?: boolean) => Promise - areChatsFresh: (workflowId: string) => boolean selectChat: (chat: CopilotChat) => Promise createNewChat: () => Promise deleteChat: (chatId: string) => Promise @@ -214,10 +212,6 @@ export interface CopilotActions { resumeActiveStream: () => Promise setToolCallState: (toolCall: any, newState: ClientToolCallState, options?: any) => void updateToolCallParams: (toolCallId: string, params: Record) => void - sendDocsMessage: (query: string, options?: { stream?: boolean; topK?: number }) => Promise - saveChatMessages: (chatId: string) => Promise - - loadCheckpoints: (chatId: string) => Promise loadMessageCheckpoints: (chatId: string) => Promise revertToCheckpoint: (checkpointId: string) => Promise getCheckpointsForMessage: (messageId: string) => any[] @@ -227,7 +221,6 @@ export interface CopilotActions { clearError: () => void clearSaveError: () => void clearCheckpointError: () => void - retrySave: (chatId: string) => Promise cleanup: () => void reset: () => void diff --git a/apps/sim/stores/workflow-diff/store.ts b/apps/sim/stores/workflow-diff/store.ts index 5b9ba8b6bf..116fa83d77 100644 --- a/apps/sim/stores/workflow-diff/store.ts +++ b/apps/sim/stores/workflow-diff/store.ts @@ -1,4 +1,10 @@ import { createLogger } from '@sim/logger' + +declare global { + interface Window { + __skipDiffRecording?: boolean + } +} import { create } from 'zustand' import { devtools } from 'zustand/middleware' import { stripWorkflowDiffMarkers, WorkflowDiffEngine } from '@/lib/workflows/diff' @@ -21,6 +27,17 @@ import { const logger = createLogger('WorkflowDiffStore') const diffEngine = new WorkflowDiffEngine() +const RESET_DIFF_STATE = { + hasActiveDiff: false, + isShowingDiff: false, + isDiffReady: false, + baselineWorkflow: null, + baselineWorkflowId: null, + diffAnalysis: null, + diffMetadata: null, + diffError: null, + _triggerMessageId: null, +} /** * Detects when a diff contains no meaningful changes. @@ -104,17 +121,7 @@ export const useWorkflowDiffStore = create { @@ -301,17 +298,7 @@ export const useWorkflowDiffStore = create Date: Thu, 5 Feb 2026 16:19:52 -0800 Subject: [PATCH 26/72] Refactor --- ...ex-function-inventory-edit-workflow.ts.txt | 35 + ...-inventory-get-blocks-metadata-tool.ts.txt | 21 + ...function-inventory-process-contents.ts.txt | 13 + apps/sim/app/api/copilot/chat/route.ts | 349 +--- apps/sim/app/api/copilot/confirm/route.ts | 5 +- .../copilot-message/copilot-message.tsx | 6 +- apps/sim/hooks/use-undo-redo.ts | 30 +- apps/sim/lib/copilot/api.ts | 9 +- apps/sim/lib/copilot/chat-context.ts | 63 + apps/sim/lib/copilot/chat-lifecycle.ts | 69 + apps/sim/lib/copilot/chat-payload.ts | 252 +++ .../lib/copilot/client-sse/content-blocks.ts | 33 +- apps/sim/lib/copilot/client-sse/handlers.ts | 154 +- .../copilot/client-sse/subagent-handlers.ts | 65 +- apps/sim/lib/copilot/client-sse/types.ts | 28 +- apps/sim/lib/copilot/constants.ts | 104 ++ apps/sim/lib/copilot/messages/checkpoints.ts | 5 +- .../copilot/messages/credential-masking.ts | 19 +- apps/sim/lib/copilot/messages/index.ts | 1 + apps/sim/lib/copilot/messages/persist.ts | 43 + .../sim/lib/copilot/messages/serialization.ts | 35 +- .../lib/copilot/orchestrator/persistence.ts | 3 +- .../orchestrator/sse-handlers/handlers.ts | 58 +- .../sse-handlers/tool-execution.ts | 14 +- .../sim/lib/copilot/orchestrator/sse-utils.ts | 48 +- .../lib/copilot/orchestrator/stream-buffer.ts | 13 +- .../lib/copilot/orchestrator/stream-core.ts | 3 +- apps/sim/lib/copilot/orchestrator/subagent.ts | 15 +- .../tool-executor/deployment-tools/manage.ts | 12 +- .../orchestrator/tool-executor/index.ts | 6 +- .../tool-executor/integration-tools.ts | 6 +- .../tool-executor/workflow-tools/mutations.ts | 29 +- .../tool-executor/workflow-tools/queries.ts | 88 +- apps/sim/lib/copilot/orchestrator/types.ts | 14 +- apps/sim/lib/copilot/store-utils.ts | 43 +- .../server/blocks/get-blocks-metadata-tool.ts | 9 +- .../tools/server/user/get-credentials.ts | 8 +- .../server/workflow/get-workflow-console.ts | 7 +- apps/sim/stores/panel/copilot/store.ts | 1646 +++++++++-------- apps/sim/stores/panel/copilot/types.ts | 35 +- apps/sim/stores/workflow-diff/store.ts | 48 +- apps/sim/stores/workflow-diff/types.ts | 11 +- apps/sim/stores/workflow-diff/utils.ts | 21 +- 43 files changed, 2112 insertions(+), 1364 deletions(-) create mode 100644 apps/sim/.codex-function-inventory-edit-workflow.ts.txt create mode 100644 apps/sim/.codex-function-inventory-get-blocks-metadata-tool.ts.txt create mode 100644 apps/sim/.codex-function-inventory-process-contents.ts.txt create mode 100644 apps/sim/lib/copilot/chat-context.ts create mode 100644 apps/sim/lib/copilot/chat-lifecycle.ts create mode 100644 apps/sim/lib/copilot/chat-payload.ts create mode 100644 apps/sim/lib/copilot/messages/persist.ts diff --git a/apps/sim/.codex-function-inventory-edit-workflow.ts.txt b/apps/sim/.codex-function-inventory-edit-workflow.ts.txt new file mode 100644 index 0000000000..e77b30fbe1 --- /dev/null +++ b/apps/sim/.codex-function-inventory-edit-workflow.ts.txt @@ -0,0 +1,35 @@ +# lib/copilot/tools/server/workflow/edit-workflow.ts + 90-98 ( 9 lines) [function] logSkippedItem + 103-113 ( 11 lines) [function] findBlockWithDuplicateNormalizedName + 127-196 ( 70 lines) [function] validateInputsForBlock + 211-463 ( 253 lines) [function] validateValueForSubBlockType + 481-566 ( 86 lines) [function] topologicalSortInserts + 571-684 ( 114 lines) [function] createBlockFromParams + 686-716 ( 31 lines) [function] updateCanonicalModesForInputs + 721-762 ( 42 lines) [function] normalizeTools + 786-804 ( 19 lines) [function] normalizeArrayWithIds + 809-811 ( 3 lines) [function] shouldNormalizeArrayIds + 818-859 ( 42 lines) [function] normalizeResponseFormat + 834-847 ( 14 lines) [arrow] sortKeys + 871-945 ( 75 lines) [function] validateSourceHandleForBlock + 956-1051 ( 96 lines) [function] validateConditionHandle +1062-1136 ( 75 lines) [function] validateRouterHandle +1141-1149 ( 9 lines) [function] validateTargetHandle +1155-1261 ( 107 lines) [function] createValidatedEdge +1270-1307 ( 38 lines) [function] addConnectionsAsEdges +1280-1291 ( 12 lines) [arrow] addEdgeForTarget +1309-1339 ( 31 lines) [function] applyTriggerConfigToBlockSubblocks +1353-1361 ( 9 lines) [function] isBlockTypeAllowed +1367-1404 ( 38 lines) [function] filterDisallowedTools +1413-1499 ( 87 lines) [function] normalizeBlockIdsInOperations +1441-1444 ( 4 lines) [arrow] replaceId +1504-2676 (1173 lines) [function] applyOperationsToWorkflowState +1649-1656 ( 8 lines) [arrow] findChildren +2055-2059 ( 5 lines) [arrow] mapConnectionTypeToHandle +2063-2074 ( 12 lines) [arrow] addEdgeForTarget +2682-2777 ( 96 lines) [function] validateWorkflowSelectorIds +2786-3066 ( 281 lines) [function] preValidateCredentialInputs +2820-2845 ( 26 lines) [function] collectCredentialInputs +2850-2870 ( 21 lines) [function] collectHostedApiKeyInput +3068-3117 ( 50 lines) [function] getCurrentWorkflowStateFromDb +3121-3333 ( 213 lines) [method] .execute diff --git a/apps/sim/.codex-function-inventory-get-blocks-metadata-tool.ts.txt b/apps/sim/.codex-function-inventory-get-blocks-metadata-tool.ts.txt new file mode 100644 index 0000000000..61d57991b4 --- /dev/null +++ b/apps/sim/.codex-function-inventory-get-blocks-metadata-tool.ts.txt @@ -0,0 +1,21 @@ +# lib/copilot/tools/server/blocks/get-blocks-metadata-tool.ts + 108-306 ( 199 lines) [method] .execute + 309-384 ( 76 lines) [function] transformBlockMetadata + 386-459 ( 74 lines) [function] extractInputs + 461-503 ( 43 lines) [function] extractOperationInputs + 505-518 ( 14 lines) [function] extractOutputs + 520-538 ( 19 lines) [function] formatOutputsFromDefinition + 540-563 ( 24 lines) [function] mapSchemaTypeToSimpleType + 565-591 ( 27 lines) [function] generateInputExample + 593-669 ( 77 lines) [function] processSubBlock + 671-679 ( 9 lines) [function] resolveAuthType + 686-702 ( 17 lines) [function] getStaticModelOptions + 712-754 ( 43 lines) [function] callOptionsWithFallback + 756-806 ( 51 lines) [function] resolveSubblockOptions + 808-820 ( 13 lines) [function] removeNullish + 822-832 ( 11 lines) [function] normalizeCondition + 834-872 ( 39 lines) [function] splitParametersByOperation + 874-905 ( 32 lines) [function] computeBlockLevelInputs + 907-935 ( 29 lines) [function] computeOperationLevelInputs + 937-947 ( 11 lines) [function] resolveOperationIds + 949-961 ( 13 lines) [function] resolveToolIdForOperation diff --git a/apps/sim/.codex-function-inventory-process-contents.ts.txt b/apps/sim/.codex-function-inventory-process-contents.ts.txt new file mode 100644 index 0000000000..82e8de18e7 --- /dev/null +++ b/apps/sim/.codex-function-inventory-process-contents.ts.txt @@ -0,0 +1,13 @@ +# lib/copilot/process-contents.ts + 31-81 ( 51 lines) [function] processContexts + 84-161 ( 78 lines) [function] processContextsServer + 163-208 ( 46 lines) [function] sanitizeMessageForDocs + 210-248 ( 39 lines) [function] processPastChatFromDb + 250-281 ( 32 lines) [function] processWorkflowFromDb + 283-316 ( 34 lines) [function] processPastChat + 319-321 ( 3 lines) [function] processPastChatViaApi + 323-362 ( 40 lines) [function] processKnowledgeFromDb + 364-439 ( 76 lines) [function] processBlockMetadata + 441-473 ( 33 lines) [function] processTemplateFromDb + 475-498 ( 24 lines) [function] processWorkflowBlockFromDb + 500-555 ( 56 lines) [function] processExecutionLogFromDb diff --git a/apps/sim/app/api/copilot/chat/route.ts b/apps/sim/app/api/copilot/chat/route.ts index 2a4cb4fe62..a048e31d23 100644 --- a/apps/sim/app/api/copilot/chat/route.ts +++ b/apps/sim/app/api/copilot/chat/route.ts @@ -6,8 +6,10 @@ import { type NextRequest, NextResponse } from 'next/server' import { z } from 'zod' import { getSession } from '@/lib/auth' import { generateChatTitle } from '@/lib/copilot/chat-title' +import { buildConversationHistory } from '@/lib/copilot/chat-context' +import { resolveOrCreateChat } from '@/lib/copilot/chat-lifecycle' +import { buildCopilotRequestPayload } from '@/lib/copilot/chat-payload' import { getCopilotModel } from '@/lib/copilot/config' -import { SIM_AGENT_VERSION } from '@/lib/copilot/constants' import { COPILOT_MODEL_IDS, COPILOT_REQUEST_MODES } from '@/lib/copilot/models' import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator' import { @@ -22,14 +24,8 @@ import { createRequestTracker, createUnauthorizedResponse, } from '@/lib/copilot/request-helpers' -import { getCredentialsServerTool } from '@/lib/copilot/tools/server/user/get-credentials' -import type { CopilotProviderConfig } from '@/lib/copilot/types' import { env } from '@/lib/core/config/env' -import { CopilotFiles } from '@/lib/uploads' -import { createFileContent } from '@/lib/uploads/utils/file-utils' import { resolveWorkflowIdForUser } from '@/lib/workflows/utils' -import { tools } from '@/tools/registry' -import { getLatestVersionTools, stripVersionSuffix } from '@/tools/utils' const logger = createLogger('CopilotChatAPI') @@ -178,319 +174,66 @@ export async function POST(req: NextRequest) { let conversationHistory: any[] = [] let actualChatId = chatId - if (chatId) { - // Load existing chat - const [chat] = await db - .select() - .from(copilotChats) - .where(and(eq(copilotChats.id, chatId), eq(copilotChats.userId, authenticatedUserId))) - .limit(1) - - if (chat) { - currentChat = chat - conversationHistory = Array.isArray(chat.messages) ? chat.messages : [] - } - } else if (createNewChat && workflowId) { - // Create new chat - const { provider, model } = getCopilotModel('chat') - const [newChat] = await db - .insert(copilotChats) - .values({ - userId: authenticatedUserId, - workflowId, - title: null, - model, - messages: [], - }) - .returning() - - if (newChat) { - currentChat = newChat - actualChatId = newChat.id - } - } - - // Process file attachments if present - const processedFileContents: any[] = [] - if (fileAttachments && fileAttachments.length > 0) { - const processedAttachments = await CopilotFiles.processCopilotAttachments( - fileAttachments, - tracker.requestId - ) - - for (const { buffer, attachment } of processedAttachments) { - const fileContent = createFileContent(buffer, attachment.media_type) - if (fileContent) { - processedFileContents.push(fileContent) - } - } - } - - // Build messages array for sim agent with conversation history - const messages: any[] = [] - - // Add conversation history (need to rebuild these with file support if they had attachments) - for (const msg of conversationHistory) { - if (msg.fileAttachments && msg.fileAttachments.length > 0) { - // This is a message with file attachments - rebuild with content array - const content: any[] = [{ type: 'text', text: msg.content }] - - const processedHistoricalAttachments = await CopilotFiles.processCopilotAttachments( - msg.fileAttachments, - tracker.requestId - ) - - for (const { buffer, attachment } of processedHistoricalAttachments) { - const fileContent = createFileContent(buffer, attachment.media_type) - if (fileContent) { - content.push(fileContent) - } - } - - messages.push({ - role: msg.role, - content, - }) - } else { - // Regular text-only message - messages.push({ - role: msg.role, - content: msg.content, - }) - } - } - - // Add implicit feedback if provided - if (implicitFeedback) { - messages.push({ - role: 'system', - content: implicitFeedback, - }) - } - - // Add current user message with file attachments - if (processedFileContents.length > 0) { - // Message with files - use content array format - const content: any[] = [{ type: 'text', text: message }] - - // Add file contents - for (const fileContent of processedFileContents) { - content.push(fileContent) - } - - messages.push({ - role: 'user', - content, - }) - } else { - // Text-only message - messages.push({ - role: 'user', - content: message, + if (chatId || createNewChat) { + const defaultsForChatRow = getCopilotModel('chat') + const chatResult = await resolveOrCreateChat({ + chatId, + userId: authenticatedUserId, + workflowId, + model: defaultsForChatRow.model, }) + currentChat = chatResult.chat + actualChatId = chatResult.chatId || chatId + const history = buildConversationHistory( + chatResult.conversationHistory, + (chatResult.chat?.conversationId as string | undefined) || conversationId + ) + conversationHistory = history.history } const defaults = getCopilotModel('chat') const selectedModel = model || defaults.model - const envModel = env.COPILOT_MODEL || defaults.model - - let providerConfig: CopilotProviderConfig | undefined - const providerEnv = env.COPILOT_PROVIDER as any - - if (providerEnv) { - if (providerEnv === 'azure-openai') { - providerConfig = { - provider: 'azure-openai', - model: envModel, - apiKey: env.AZURE_OPENAI_API_KEY, - apiVersion: 'preview', - endpoint: env.AZURE_OPENAI_ENDPOINT, - } - } else if (providerEnv === 'azure-anthropic') { - providerConfig = { - provider: 'azure-anthropic', - model: envModel, - apiKey: env.AZURE_ANTHROPIC_API_KEY, - apiVersion: env.AZURE_ANTHROPIC_API_VERSION, - endpoint: env.AZURE_ANTHROPIC_ENDPOINT, - } - } else if (providerEnv === 'vertex') { - providerConfig = { - provider: 'vertex', - model: envModel, - apiKey: env.COPILOT_API_KEY, - vertexProject: env.VERTEX_PROJECT, - vertexLocation: env.VERTEX_LOCATION, - } - } else { - providerConfig = { - provider: providerEnv, - model: selectedModel, - apiKey: env.COPILOT_API_KEY, - } - } - } - const effectiveMode = mode === 'agent' ? 'build' : mode - const transportMode = effectiveMode === 'build' ? 'agent' : effectiveMode - - // Determine conversationId to use for this request const effectiveConversationId = (currentChat?.conversationId as string | undefined) || conversationId - // For agent/build mode, fetch credentials and build tool definitions - let integrationTools: any[] = [] - let baseTools: any[] = [] - let credentials: { - oauth: Record< - string, - { accessToken: string; accountId: string; name: string; expiresAt?: string } - > - apiKeys: string[] - metadata?: { - connectedOAuth: Array<{ provider: string; name: string; scopes?: string[] }> - configuredApiKeys: string[] - } - } | null = null - - if (effectiveMode === 'build') { - // Build base tools (executed locally, not deferred) - // Include function_execute for code execution capability - baseTools = [ - { - name: 'function_execute', - description: - 'Execute JavaScript code to perform calculations, data transformations, API calls, or any programmatic task. Code runs in a secure sandbox with fetch() available. Write plain statements (not wrapped in functions). Example: const res = await fetch(url); const data = await res.json(); return data;', - input_schema: { - type: 'object', - properties: { - code: { - type: 'string', - description: - 'Raw JavaScript statements to execute. Code is auto-wrapped in async context. Use fetch() for HTTP requests. Write like: const res = await fetch(url); return await res.json();', - }, - }, - required: ['code'], - }, - executeLocally: true, - }, - ] - // Fetch user credentials (OAuth + API keys) - pass workflowId to get workspace env vars - try { - const rawCredentials = await getCredentialsServerTool.execute( - { workflowId }, - { userId: authenticatedUserId } - ) - - // Transform OAuth credentials to map format: { [provider]: { accessToken, accountId, ... } } - const oauthMap: Record< - string, - { accessToken: string; accountId: string; name: string; expiresAt?: string } - > = {} - const connectedOAuth: Array<{ provider: string; name: string; scopes?: string[] }> = [] - for (const cred of rawCredentials?.oauth?.connected?.credentials || []) { - if (cred.accessToken) { - oauthMap[cred.provider] = { - accessToken: cred.accessToken, - accountId: cred.id, - name: cred.name, - } - connectedOAuth.push({ - provider: cred.provider, - name: cred.name, - }) - } - } - - credentials = { - oauth: oauthMap, - apiKeys: rawCredentials?.environment?.variableNames || [], - metadata: { - connectedOAuth, - configuredApiKeys: rawCredentials?.environment?.variableNames || [], - }, - } - - logger.info(`[${tracker.requestId}] Fetched credentials for build mode`, { - oauthProviders: Object.keys(oauthMap), - apiKeyCount: credentials.apiKeys.length, - }) - } catch (error) { - logger.warn(`[${tracker.requestId}] Failed to fetch credentials`, { - error: error instanceof Error ? error.message : String(error), - }) - } - - // Build tool definitions (schemas only) - try { - const { createUserToolSchema } = await import('@/tools/params') - - const latestTools = getLatestVersionTools(tools) - - integrationTools = Object.entries(latestTools).map(([toolId, toolConfig]) => { - const userSchema = createUserToolSchema(toolConfig) - const strippedName = stripVersionSuffix(toolId) - return { - name: strippedName, - description: toolConfig.description || toolConfig.name || strippedName, - input_schema: userSchema, - defer_loading: true, // Anthropic Advanced Tool Use - ...(toolConfig.oauth?.required && { - oauth: { - required: true, - provider: toolConfig.oauth.provider, - }, - }), - } - }) - - logger.info(`[${tracker.requestId}] Built tool definitions for build mode`, { - integrationToolCount: integrationTools.length, - }) - } catch (error) { - logger.warn(`[${tracker.requestId}] Failed to build tool definitions`, { - error: error instanceof Error ? error.message : String(error), - }) + const requestPayload = await buildCopilotRequestPayload( + { + message, + workflowId, + userId: authenticatedUserId, + userMessageId: userMessageIdToUse, + mode, + model: selectedModel, + stream, + conversationId: effectiveConversationId, + conversationHistory, + contexts: agentContexts, + fileAttachments, + commands, + chatId: actualChatId, + prefetch, + userName: session?.user?.name || undefined, + implicitFeedback, + }, + { + selectedModel, } - } - - const requestPayload = { - message: message, // Just send the current user message text - workflowId, - userId: authenticatedUserId, - stream: stream, - streamToolCalls: true, - model: selectedModel, - mode: transportMode, - messageId: userMessageIdToUse, - version: SIM_AGENT_VERSION, - ...(providerConfig ? { provider: providerConfig } : {}), - ...(effectiveConversationId ? { conversationId: effectiveConversationId } : {}), - ...(typeof prefetch === 'boolean' ? { prefetch: prefetch } : {}), - ...(session?.user?.name && { userName: session.user.name }), - ...(agentContexts.length > 0 && { context: agentContexts }), - ...(actualChatId ? { chatId: actualChatId } : {}), - ...(processedFileContents.length > 0 && { fileAttachments: processedFileContents }), - // For build/agent mode, include tools and credentials - ...(integrationTools.length > 0 && { tools: integrationTools }), - ...(baseTools.length > 0 && { baseTools }), - ...(credentials && { credentials }), - ...(commands && commands.length > 0 && { commands }), - } + ) try { logger.info(`[${tracker.requestId}] About to call Sim Agent`, { hasContext: agentContexts.length > 0, contextCount: agentContexts.length, hasConversationId: !!effectiveConversationId, - hasFileAttachments: processedFileContents.length > 0, + hasFileAttachments: Array.isArray(requestPayload.fileAttachments), messageLength: message.length, mode: effectiveMode, - hasTools: integrationTools.length > 0, - toolCount: integrationTools.length, - hasBaseTools: baseTools.length > 0, - baseToolCount: baseTools.length, - hasCredentials: !!credentials, + hasTools: Array.isArray(requestPayload.tools), + toolCount: Array.isArray(requestPayload.tools) ? requestPayload.tools.length : 0, + hasBaseTools: Array.isArray(requestPayload.baseTools), + baseToolCount: Array.isArray(requestPayload.baseTools) ? requestPayload.baseTools.length : 0, + hasCredentials: !!requestPayload.credentials, }) } catch {} @@ -631,7 +374,7 @@ export async function POST(req: NextRequest) { content: nonStreamingResult.content, toolCalls: nonStreamingResult.toolCalls, model: selectedModel, - provider: providerConfig?.provider || env.COPILOT_PROVIDER || 'openai', + provider: (requestPayload?.provider as Record)?.provider || env.COPILOT_PROVIDER || 'openai', } logger.info(`[${tracker.requestId}] Non-streaming response from orchestrator:`, { diff --git a/apps/sim/app/api/copilot/confirm/route.ts b/apps/sim/app/api/copilot/confirm/route.ts index 01b6672a38..eb63b75246 100644 --- a/apps/sim/app/api/copilot/confirm/route.ts +++ b/apps/sim/app/api/copilot/confirm/route.ts @@ -1,6 +1,7 @@ import { createLogger } from '@sim/logger' import { type NextRequest, NextResponse } from 'next/server' import { z } from 'zod' +import { REDIS_TOOL_CALL_PREFIX, REDIS_TOOL_CALL_TTL_SECONDS } from '@/lib/copilot/constants' import { authenticateCopilotRequestSessionOnly, createBadRequestResponse, @@ -38,13 +39,13 @@ async function updateToolCallStatus( } try { - const key = `tool_call:${toolCallId}` + const key = `${REDIS_TOOL_CALL_PREFIX}${toolCallId}` const payload = { status, message: message || null, timestamp: new Date().toISOString(), } - await redis.set(key, JSON.stringify(payload), 'EX', 86400) + await redis.set(key, JSON.stringify(payload), 'EX', REDIS_TOOL_CALL_TTL_SECONDS) return true } catch (error) { logger.error('Failed to update tool call status', { diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/copilot-message/copilot-message.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/copilot-message/copilot-message.tsx index 1e745f3f28..187ff15948 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/copilot-message/copilot-message.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/copilot-message/copilot-message.tsx @@ -211,7 +211,7 @@ const CopilotMessage: FC = memo( if (block.type === 'text') { const isLastTextBlock = index === message.contentBlocks!.length - 1 && block.type === 'text' - const parsed = parseSpecialTags(block.content) + const parsed = parseSpecialTags(block.content ?? '') // Mask credential IDs in the displayed content const cleanBlockContent = maskCredentialValue( parsed.cleanContent.replace(/\n{3,}/g, '\n\n') @@ -243,7 +243,7 @@ const CopilotMessage: FC = memo( return (
= memo(
) } - if (block.type === 'tool_call') { + if (block.type === 'tool_call' && block.toolCall) { const blockKey = `tool-${block.toolCall.id}` return ( diff --git a/apps/sim/hooks/use-undo-redo.ts b/apps/sim/hooks/use-undo-redo.ts index 252f0785a6..10873859e4 100644 --- a/apps/sim/hooks/use-undo-redo.ts +++ b/apps/sim/hooks/use-undo-redo.ts @@ -1,5 +1,11 @@ import { useCallback } from 'react' import { createLogger } from '@sim/logger' + +declare global { + interface Window { + __skipDiffRecording?: boolean + } +} import type { Edge } from 'reactflow' import { useSession } from '@/lib/auth/auth-client' import { enqueueReplaceWorkflowState } from '@/lib/workflows/operations/socket-operations' @@ -908,7 +914,7 @@ export function useUndoRedo() { // Set flag to skip recording during this operation - ;(window as any).__skipDiffRecording = true + ;window.__skipDiffRecording = true try { // Restore baseline state and broadcast to everyone if (baselineSnapshot && activeWorkflowId) { @@ -945,7 +951,7 @@ export function useUndoRedo() { logger.info('Clearing diff UI state') useWorkflowDiffStore.getState().clearDiff({ restoreBaseline: false }) } finally { - ;(window as any).__skipDiffRecording = false + ;window.__skipDiffRecording = false } logger.info('Undid apply-diff operation successfully') @@ -965,7 +971,7 @@ export function useUndoRedo() { // Set flag to skip recording during this operation - ;(window as any).__skipDiffRecording = true + ;window.__skipDiffRecording = true try { // Apply the before-accept state (with markers for this user) useWorkflowStore.getState().replaceWorkflowState(beforeAccept) @@ -1004,7 +1010,7 @@ export function useUndoRedo() { diffAnalysis: diffAnalysis, }) } finally { - ;(window as any).__skipDiffRecording = false + ;window.__skipDiffRecording = false } logger.info('Undid accept-diff operation - restored diff view') @@ -1018,7 +1024,7 @@ export function useUndoRedo() { const { useWorkflowStore } = await import('@/stores/workflows/workflow/store') const { useSubBlockStore } = await import('@/stores/workflows/subblock/store') - ;(window as any).__skipDiffRecording = true + ;window.__skipDiffRecording = true try { // Apply the before-reject state (with markers for this user) useWorkflowStore.getState().replaceWorkflowState(beforeReject) @@ -1055,7 +1061,7 @@ export function useUndoRedo() { diffAnalysis: diffAnalysis, }) } finally { - ;(window as any).__skipDiffRecording = false + ;window.__skipDiffRecording = false } logger.info('Undid reject-diff operation - restored diff view') @@ -1526,7 +1532,7 @@ export function useUndoRedo() { // Set flag to skip recording during this operation - ;(window as any).__skipDiffRecording = true + ;window.__skipDiffRecording = true try { // Manually apply the proposed state and set up diff store (similar to setProposedChanges but with original baseline) const diffStore = useWorkflowDiffStore.getState() @@ -1567,7 +1573,7 @@ export function useUndoRedo() { diffAnalysis: diffAnalysis, }) } finally { - ;(window as any).__skipDiffRecording = false + ;window.__skipDiffRecording = false } logger.info('Redid apply-diff operation') @@ -1583,7 +1589,7 @@ export function useUndoRedo() { // Set flag to skip recording during this operation - ;(window as any).__skipDiffRecording = true + ;window.__skipDiffRecording = true try { // Clear diff state FIRST to prevent flash of colors (local UI only) // Use setState directly to ensure synchronous clearing @@ -1621,7 +1627,7 @@ export function useUndoRedo() { operationId: opId, }) } finally { - ;(window as any).__skipDiffRecording = false + ;window.__skipDiffRecording = false } logger.info('Redid accept-diff operation - cleared diff view') @@ -1635,7 +1641,7 @@ export function useUndoRedo() { const { useWorkflowStore } = await import('@/stores/workflows/workflow/store') const { useSubBlockStore } = await import('@/stores/workflows/subblock/store') - ;(window as any).__skipDiffRecording = true + ;window.__skipDiffRecording = true try { // Clear diff state FIRST to prevent flash of colors (local UI only) // Use setState directly to ensure synchronous clearing @@ -1673,7 +1679,7 @@ export function useUndoRedo() { operationId: opId, }) } finally { - ;(window as any).__skipDiffRecording = false + ;window.__skipDiffRecording = false } logger.info('Redid reject-diff operation - cleared diff view') diff --git a/apps/sim/lib/copilot/api.ts b/apps/sim/lib/copilot/api.ts index 089d6bac72..19d0f6f7bc 100644 --- a/apps/sim/lib/copilot/api.ts +++ b/apps/sim/lib/copilot/api.ts @@ -1,4 +1,5 @@ import { createLogger } from '@sim/logger' +import { COPILOT_CHAT_API_PATH, COPILOT_CHAT_STREAM_API_PATH } from '@/lib/copilot/constants' import type { CopilotMode, CopilotModelId, CopilotTransportMode } from '@/lib/copilot/models' const logger = createLogger('CopilotAPI') @@ -139,7 +140,9 @@ export async function sendStreamingMessage( contextsPreview: preview, resumeFromEventId, }) - } catch {} + } catch (error) { + logger.warn('Failed to log streaming message context preview', { error: error instanceof Error ? error.message : String(error) }) + } const streamId = request.userMessageId if (typeof resumeFromEventId === 'number') { @@ -150,7 +153,7 @@ export async function sendStreamingMessage( status: 400, } } - const url = `/api/copilot/chat/stream?streamId=${encodeURIComponent( + const url = `${COPILOT_CHAT_STREAM_API_PATH}?streamId=${encodeURIComponent( streamId )}&from=${encodeURIComponent(String(resumeFromEventId))}` const response = await fetch(url, { @@ -182,7 +185,7 @@ export async function sendStreamingMessage( } } - const response = await fetch('/api/copilot/chat', { + const response = await fetch(COPILOT_CHAT_API_PATH, { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ ...requestBody, stream: true }), diff --git a/apps/sim/lib/copilot/chat-context.ts b/apps/sim/lib/copilot/chat-context.ts new file mode 100644 index 0000000000..d1377eb4ac --- /dev/null +++ b/apps/sim/lib/copilot/chat-context.ts @@ -0,0 +1,63 @@ +import { createLogger } from '@sim/logger' +import { CopilotFiles } from '@/lib/uploads' +import { createFileContent } from '@/lib/uploads/utils/file-utils' + +const logger = createLogger('CopilotChatContext') + +/** + * Build conversation history from stored chat messages. + */ +export function buildConversationHistory( + messages: unknown[], + conversationId?: string +): { history: unknown[]; conversationId?: string } { + const history = Array.isArray(messages) ? messages : [] + return { + history, + ...(conversationId ? { conversationId } : {}), + } +} + +export interface FileAttachmentInput { + id: string + key: string + name?: string + filename?: string + mimeType?: string + media_type?: string + size: number +} + +export interface FileContent { + type: string + [key: string]: unknown +} + +/** + * Process file attachments into content for the payload. + */ +export async function processFileAttachments( + fileAttachments: FileAttachmentInput[], + userId: string +): Promise { + if (!Array.isArray(fileAttachments) || fileAttachments.length === 0) return [] + + const processedFileContents: FileContent[] = [] + const requestId = `copilot-${userId}-${Date.now()}` + const processedAttachments = await CopilotFiles.processCopilotAttachments(fileAttachments as Parameters[0], requestId) + + for (const { buffer, attachment } of processedAttachments) { + const fileContent = createFileContent(buffer, attachment.media_type) + if (fileContent) { + processedFileContents.push(fileContent as FileContent) + } + } + + logger.debug('Processed file attachments for payload', { + userId, + inputCount: fileAttachments.length, + outputCount: processedFileContents.length, + }) + + return processedFileContents +} diff --git a/apps/sim/lib/copilot/chat-lifecycle.ts b/apps/sim/lib/copilot/chat-lifecycle.ts new file mode 100644 index 0000000000..5d25eee242 --- /dev/null +++ b/apps/sim/lib/copilot/chat-lifecycle.ts @@ -0,0 +1,69 @@ +import { db } from '@sim/db' +import { copilotChats } from '@sim/db/schema' +import { createLogger } from '@sim/logger' +import { and, eq } from 'drizzle-orm' + +const logger = createLogger('CopilotChatLifecycle') + +export interface ChatLoadResult { + chatId: string + chat: typeof copilotChats.$inferSelect | null + conversationHistory: unknown[] + isNew: boolean +} + +/** + * Resolve or create a copilot chat session. + * If chatId is provided, loads the existing chat. Otherwise creates a new one. + */ +export async function resolveOrCreateChat(params: { + chatId?: string + userId: string + workflowId: string + model: string +}): Promise { + const { chatId, userId, workflowId, model } = params + + if (chatId) { + const [chat] = await db + .select() + .from(copilotChats) + .where(and(eq(copilotChats.id, chatId), eq(copilotChats.userId, userId))) + .limit(1) + + return { + chatId, + chat: chat ?? null, + conversationHistory: chat && Array.isArray(chat.messages) ? chat.messages : [], + isNew: false, + } + } + + const [newChat] = await db + .insert(copilotChats) + .values({ + userId, + workflowId, + title: null, + model, + messages: [], + }) + .returning() + + if (!newChat) { + logger.warn('Failed to create new copilot chat row', { userId, workflowId }) + return { + chatId: '', + chat: null, + conversationHistory: [], + isNew: true, + } + } + + return { + chatId: newChat.id, + chat: newChat, + conversationHistory: [], + isNew: true, + } +} diff --git a/apps/sim/lib/copilot/chat-payload.ts b/apps/sim/lib/copilot/chat-payload.ts new file mode 100644 index 0000000000..7883f4234d --- /dev/null +++ b/apps/sim/lib/copilot/chat-payload.ts @@ -0,0 +1,252 @@ +import { createLogger } from '@sim/logger' +import { env } from '@/lib/core/config/env' +import { getCopilotModel } from '@/lib/copilot/config' +import { SIM_AGENT_VERSION } from '@/lib/copilot/constants' +import { getCredentialsServerTool } from '@/lib/copilot/tools/server/user/get-credentials' +import type { CopilotProviderConfig } from '@/lib/copilot/types' +import { tools } from '@/tools/registry' +import { getLatestVersionTools, stripVersionSuffix } from '@/tools/utils' +import { type FileContent, processFileAttachments } from '@/lib/copilot/chat-context' + +const logger = createLogger('CopilotChatPayload') + +export interface BuildPayloadParams { + message: string + workflowId: string + userId: string + userMessageId: string + mode: string + model: string + stream: boolean + conversationId?: string + conversationHistory?: unknown[] + contexts?: Array<{ type: string; content: string }> + fileAttachments?: Array<{ id: string; key: string; size: number; [key: string]: unknown }> + commands?: string[] + chatId?: string + prefetch?: boolean + userName?: string + implicitFeedback?: string +} + +interface ToolSchema { + name: string + description: string + input_schema: Record + defer_loading?: boolean + executeLocally?: boolean + oauth?: { required: boolean; provider: string } +} + +interface CredentialsPayload { + oauth: Record + apiKeys: string[] + metadata?: { + connectedOAuth: Array<{ provider: string; name: string; scopes?: string[] }> + configuredApiKeys: string[] + } +} + +type MessageContent = string | Array<{ type: string; text?: string; [key: string]: unknown }> + +interface ConversationMessage { + role: string + content: MessageContent +} + +function buildProviderConfig(selectedModel: string): CopilotProviderConfig | undefined { + const defaults = getCopilotModel('chat') + const envModel = env.COPILOT_MODEL || defaults.model + const providerEnv = env.COPILOT_PROVIDER + + if (!providerEnv) return undefined + + if (providerEnv === 'azure-openai') { + return { + provider: 'azure-openai', + model: envModel, + apiKey: env.AZURE_OPENAI_API_KEY, + apiVersion: 'preview', + endpoint: env.AZURE_OPENAI_ENDPOINT, + } + } + + if (providerEnv === 'vertex') { + return { + provider: 'vertex', + model: envModel, + apiKey: env.COPILOT_API_KEY, + vertexProject: env.VERTEX_PROJECT, + vertexLocation: env.VERTEX_LOCATION, + } + } + + return { + provider: providerEnv as Exclude, + model: selectedModel, + apiKey: env.COPILOT_API_KEY, + } as CopilotProviderConfig +} + +/** + * Build the request payload for the copilot backend. + */ +export async function buildCopilotRequestPayload( + params: BuildPayloadParams, + options: { + providerConfig?: CopilotProviderConfig + selectedModel: string + } +): Promise> { + const { + message, workflowId, userId, userMessageId, mode, stream, + conversationId, conversationHistory = [], contexts, fileAttachments, + commands, chatId, prefetch, userName, implicitFeedback, + } = params + + const selectedModel = options.selectedModel + const providerConfig = options.providerConfig ?? buildProviderConfig(selectedModel) + + const effectiveMode = mode === 'agent' ? 'build' : mode + const transportMode = effectiveMode === 'build' ? 'agent' : effectiveMode + + const processedFileContents = await processFileAttachments(fileAttachments ?? [], userId) + + const messages: ConversationMessage[] = [] + for (const msg of conversationHistory as Array>) { + const msgAttachments = msg.fileAttachments as Array> | undefined + if (Array.isArray(msgAttachments) && msgAttachments.length > 0) { + const content: Array<{ type: string; text?: string; [key: string]: unknown }> = [ + { type: 'text', text: msg.content as string }, + ] + const processedHistoricalAttachments = await processFileAttachments(msgAttachments as BuildPayloadParams['fileAttachments'] ?? [], userId) + for (const fileContent of processedHistoricalAttachments) { + content.push(fileContent) + } + messages.push({ role: msg.role as string, content }) + } else { + messages.push({ role: msg.role as string, content: msg.content as string }) + } + } + + if (implicitFeedback) { + messages.push({ role: 'system', content: implicitFeedback }) + } + + if (processedFileContents.length > 0) { + const content: Array<{ type: string; text?: string; [key: string]: unknown }> = [ + { type: 'text', text: message }, + ] + for (const fileContent of processedFileContents) { + content.push(fileContent) + } + messages.push({ role: 'user', content }) + } else { + messages.push({ role: 'user', content: message }) + } + + let integrationTools: ToolSchema[] = [] + let baseTools: ToolSchema[] = [] + let credentials: CredentialsPayload | null = null + + if (effectiveMode === 'build') { + baseTools = [ + { + name: 'function_execute', + description: + 'Execute JavaScript code to perform calculations, data transformations, API calls, or any programmatic task. Code runs in a secure sandbox with fetch() available. Write plain statements (not wrapped in functions). Example: const res = await fetch(url); const data = await res.json(); return data;', + input_schema: { + type: 'object', + properties: { + code: { + type: 'string', + description: + 'Raw JavaScript statements to execute. Code is auto-wrapped in async context. Use fetch() for HTTP requests. Write like: const res = await fetch(url); return await res.json();', + }, + }, + required: ['code'], + }, + executeLocally: true, + }, + ] + + try { + const rawCredentials = await getCredentialsServerTool.execute({ workflowId }, { userId }) + + const oauthMap: CredentialsPayload['oauth'] = {} + const connectedOAuth: Array<{ provider: string; name: string; scopes?: string[] }> = [] + for (const cred of rawCredentials?.oauth?.connected?.credentials ?? []) { + if (cred.accessToken) { + oauthMap[cred.provider] = { + accessToken: cred.accessToken, + accountId: cred.id, + name: cred.name, + } + connectedOAuth.push({ provider: cred.provider, name: cred.name }) + } + } + + credentials = { + oauth: oauthMap, + apiKeys: rawCredentials?.environment?.variableNames ?? [], + metadata: { + connectedOAuth, + configuredApiKeys: rawCredentials?.environment?.variableNames ?? [], + }, + } + } catch (error) { + logger.warn('Failed to fetch credentials for build payload', { + error: error instanceof Error ? error.message : String(error), + }) + } + + try { + const { createUserToolSchema } = await import('@/tools/params') + const latestTools = getLatestVersionTools(tools) + + integrationTools = Object.entries(latestTools).map(([toolId, toolConfig]) => { + const userSchema = createUserToolSchema(toolConfig) + const strippedName = stripVersionSuffix(toolId) + return { + name: strippedName, + description: toolConfig.description || toolConfig.name || strippedName, + input_schema: userSchema as unknown as Record, + defer_loading: true, + ...(toolConfig.oauth?.required && { + oauth: { + required: true, + provider: toolConfig.oauth.provider, + }, + }), + } + }) + } catch (error) { + logger.warn('Failed to build tool schemas for payload', { + error: error instanceof Error ? error.message : String(error), + }) + } + } + + return { + message, + workflowId, + userId, + stream, + streamToolCalls: true, + model: selectedModel, + mode: transportMode, + messageId: userMessageId, + version: SIM_AGENT_VERSION, + ...(providerConfig ? { provider: providerConfig } : {}), + ...(conversationId ? { conversationId } : {}), + ...(typeof prefetch === 'boolean' ? { prefetch } : {}), + ...(userName ? { userName } : {}), + ...(contexts && contexts.length > 0 ? { context: contexts } : {}), + ...(chatId ? { chatId } : {}), + ...(processedFileContents.length > 0 ? { fileAttachments: processedFileContents } : {}), + ...(integrationTools.length > 0 ? { tools: integrationTools } : {}), + ...(baseTools.length > 0 ? { baseTools } : {}), + ...(credentials ? { credentials } : {}), + ...(commands && commands.length > 0 ? { commands } : {}), + } +} diff --git a/apps/sim/lib/copilot/client-sse/content-blocks.ts b/apps/sim/lib/copilot/client-sse/content-blocks.ts index c2ee72458b..1ce416bc65 100644 --- a/apps/sim/lib/copilot/client-sse/content-blocks.ts +++ b/apps/sim/lib/copilot/client-sse/content-blocks.ts @@ -3,7 +3,7 @@ import type { CopilotMessage, MessageFileAttachment, } from '@/stores/panel/copilot/types' -import type { StreamingContext } from './types' +import type { ClientContentBlock, ClientStreamingContext } from './types' const TEXT_BLOCK_TYPE = 'text' const THINKING_BLOCK_TYPE = 'thinking' @@ -25,8 +25,8 @@ export function createUserMessage( ...(contexts && contexts.length > 0 && { contentBlocks: [ - { type: 'contexts', contexts: contexts as any, timestamp: Date.now() }, - ] as any, + { type: 'contexts', contexts, timestamp: Date.now() }, + ], }), } } @@ -61,7 +61,7 @@ export function createErrorMessage( } } -export function appendTextBlock(context: StreamingContext, text: string) { +export function appendTextBlock(context: ClientStreamingContext, text: string) { if (!text) return context.accumulatedContent += text if (context.currentTextBlock && context.contentBlocks.length > 0) { @@ -71,11 +71,9 @@ export function appendTextBlock(context: StreamingContext, text: string) { return } } - context.currentTextBlock = { type: '', content: '', timestamp: 0, toolCall: null } - context.currentTextBlock.type = TEXT_BLOCK_TYPE - context.currentTextBlock.content = text - context.currentTextBlock.timestamp = Date.now() - context.contentBlocks.push(context.currentTextBlock) + const newBlock: ClientContentBlock = { type: 'text', content: text, timestamp: Date.now() } + context.currentTextBlock = newBlock + context.contentBlocks.push(newBlock) } export function appendContinueOption(content: string): string { @@ -84,7 +82,7 @@ export function appendContinueOption(content: string): string { return `${content}${suffix}${CONTINUE_OPTIONS_TAG}` } -export function appendContinueOptionBlock(blocks: any[]): any[] { +export function appendContinueOptionBlock(blocks: ClientContentBlock[]): ClientContentBlock[] { if (!Array.isArray(blocks)) return blocks const hasOptions = blocks.some( (block) => @@ -109,7 +107,7 @@ export function stripContinueOption(content: string): string { return next.replace(/\n{2,}\s*$/g, '\n').trimEnd() } -export function stripContinueOptionFromBlocks(blocks: any[]): any[] { +export function stripContinueOptionFromBlocks(blocks: ClientContentBlock[]): ClientContentBlock[] { if (!Array.isArray(blocks)) return blocks return blocks.flatMap((block) => { if ( @@ -125,20 +123,17 @@ export function stripContinueOptionFromBlocks(blocks: any[]): any[] { }) } -export function beginThinkingBlock(context: StreamingContext) { +export function beginThinkingBlock(context: ClientStreamingContext) { if (!context.currentThinkingBlock) { - context.currentThinkingBlock = { type: '', content: '', timestamp: 0, toolCall: null } - context.currentThinkingBlock.type = THINKING_BLOCK_TYPE - context.currentThinkingBlock.content = '' - context.currentThinkingBlock.timestamp = Date.now() - ;(context.currentThinkingBlock as any).startTime = Date.now() - context.contentBlocks.push(context.currentThinkingBlock) + const newBlock: ClientContentBlock = { type: 'thinking', content: '', timestamp: Date.now(), startTime: Date.now() } + context.currentThinkingBlock = newBlock + context.contentBlocks.push(newBlock) } context.isInThinkingBlock = true context.currentTextBlock = null } -export function finalizeThinkingBlock(context: StreamingContext) { +export function finalizeThinkingBlock(context: ClientStreamingContext) { if (context.currentThinkingBlock) { context.currentThinkingBlock.duration = Date.now() - (context.currentThinkingBlock.startTime || Date.now()) diff --git a/apps/sim/lib/copilot/client-sse/handlers.ts b/apps/sim/lib/copilot/client-sse/handlers.ts index 169917578b..4845431637 100644 --- a/apps/sim/lib/copilot/client-sse/handlers.ts +++ b/apps/sim/lib/copilot/client-sse/handlers.ts @@ -1,28 +1,30 @@ import { createLogger } from '@sim/logger' +import { STREAM_STORAGE_KEY } from '@/lib/copilot/constants' +import type { SSEEvent } from '@/lib/copilot/orchestrator/types' +import { asRecord } from '@/lib/copilot/orchestrator/sse-utils' import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry' -import { useWorkflowDiffStore } from '@/stores/workflow-diff/store' -import type { CopilotStore, CopilotToolCall } from '@/stores/panel/copilot/types' -import { - appendTextBlock, - beginThinkingBlock, - finalizeThinkingBlock, -} from './content-blocks' -import type { StreamingContext } from './types' import { isBackgroundState, isRejectedState, isReviewState, resolveToolDisplay, } from '@/lib/copilot/store-utils' +import { useWorkflowDiffStore } from '@/stores/workflow-diff/store' +import type { CopilotStore, CopilotStreamInfo, CopilotToolCall } from '@/stores/panel/copilot/types' +import { + appendTextBlock, + beginThinkingBlock, + finalizeThinkingBlock, +} from './content-blocks' +import type { ClientContentBlock, ClientStreamingContext } from './types' const logger = createLogger('CopilotClientSseHandlers') -const STREAM_STORAGE_KEY = 'copilot_active_stream' const TEXT_BLOCK_TYPE = 'text' const MAX_BATCH_INTERVAL = 50 const MIN_BATCH_INTERVAL = 16 const MAX_QUEUE_SIZE = 5 -function writeActiveStreamToStorage(info: any): void { +function writeActiveStreamToStorage(info: CopilotStreamInfo | null): void { if (typeof window === 'undefined') return try { if (!info) { @@ -30,17 +32,25 @@ function writeActiveStreamToStorage(info: any): void { return } window.sessionStorage.setItem(STREAM_STORAGE_KEY, JSON.stringify(info)) - } catch {} + } catch (error) { + logger.warn('Failed to write active stream to storage', { + error: error instanceof Error ? error.message : String(error), + }) + } } +type StoreSet = ( + partial: Partial | ((state: CopilotStore) => Partial) +) => void + export type SSEHandler = ( - data: any, - context: StreamingContext, + data: SSEEvent, + context: ClientStreamingContext, get: () => CopilotStore, - set: any + set: StoreSet ) => Promise | void -const streamingUpdateQueue = new Map() +const streamingUpdateQueue = new Map() let streamingUpdateRAF: number | null = null let lastBatchTime = 0 @@ -52,8 +62,8 @@ export function stopStreamingUpdates() { streamingUpdateQueue.clear() } -function createOptimizedContentBlocks(contentBlocks: any[]): any[] { - const result: any[] = new Array(contentBlocks.length) +function createOptimizedContentBlocks(contentBlocks: ClientContentBlock[]): ClientContentBlock[] { + const result: ClientContentBlock[] = new Array(contentBlocks.length) for (let i = 0; i < contentBlocks.length; i++) { const block = contentBlocks[i] result[i] = { ...block } @@ -61,7 +71,7 @@ function createOptimizedContentBlocks(contentBlocks: any[]): any[] { return result } -export function flushStreamingUpdates(set: any) { +export function flushStreamingUpdates(set: StoreSet) { if (streamingUpdateRAF !== null) { cancelAnimationFrame(streamingUpdateRAF) streamingUpdateRAF = null @@ -90,7 +100,7 @@ export function flushStreamingUpdates(set: any) { }) } -export function updateStreamingMessage(set: any, context: StreamingContext) { +export function updateStreamingMessage(set: StoreSet, context: ClientStreamingContext) { if (context.suppressStreamingUpdates) return const now = performance.now() streamingUpdateQueue.set(context.messageId, context) @@ -146,10 +156,10 @@ export function updateStreamingMessage(set: any, context: StreamingContext) { } } -export function upsertToolCallBlock(context: StreamingContext, toolCall: CopilotToolCall) { +export function upsertToolCallBlock(context: ClientStreamingContext, toolCall: CopilotToolCall) { let found = false for (let i = 0; i < context.contentBlocks.length; i++) { - const b = context.contentBlocks[i] as any + const b = context.contentBlocks[i] if (b.type === 'tool_call' && b.toolCall?.id === toolCall.id) { context.contentBlocks[i] = { ...b, toolCall } found = true @@ -165,19 +175,16 @@ function stripThinkingTags(text: string): string { return text.replace(/<\/?thinking[^>]*>/gi, '').replace(/<\/?thinking[^&]*>/gi, '') } -function appendThinkingContent(context: StreamingContext, text: string) { +function appendThinkingContent(context: ClientStreamingContext, text: string) { if (!text) return const cleanedText = stripThinkingTags(text) if (!cleanedText) return if (context.currentThinkingBlock) { context.currentThinkingBlock.content += cleanedText } else { - context.currentThinkingBlock = { type: '', content: '', timestamp: 0, toolCall: null } - context.currentThinkingBlock.type = 'thinking' - context.currentThinkingBlock.content = cleanedText - context.currentThinkingBlock.timestamp = Date.now() - context.currentThinkingBlock.startTime = Date.now() - context.contentBlocks.push(context.currentThinkingBlock) + const newBlock: ClientContentBlock = { type: 'thinking', content: cleanedText, timestamp: Date.now(), startTime: Date.now() } + context.currentThinkingBlock = newBlock + context.contentBlocks.push(newBlock) } context.isInThinkingBlock = true context.currentTextBlock = null @@ -209,10 +216,12 @@ export const sseHandlers: Record = { }, tool_result: (data, context, get, set) => { try { - const toolCallId: string | undefined = data?.toolCallId || data?.data?.id + const eventData = asRecord(data?.data) + const toolCallId: string | undefined = data?.toolCallId || (eventData.id as string | undefined) const success: boolean | undefined = data?.success const failedDependency: boolean = data?.failedDependency === true - const skipped: boolean = data?.result?.skipped === true + const resultObj = asRecord(data?.result) + const skipped: boolean = resultObj.skipped === true if (!toolCallId) return const { toolCallsById } = get() const current = toolCallsById[toolCallId] @@ -233,24 +242,24 @@ export const sseHandlers: Record = { updatedMap[toolCallId] = { ...current, state: targetState, - display: resolveToolDisplay( - current.name, - targetState, - current.id, - (current as any).params - ), + display: resolveToolDisplay(current.name, targetState, current.id, current.params), } set({ toolCallsById: updatedMap }) if (targetState === ClientToolCallState.success && current.name === 'checkoff_todo') { try { - const result = (data?.result || data?.data?.result) ?? {} - const input = ((current as any).params || (current as any).input) ?? {} - const todoId = input.id || input.todoId || result.id || result.todoId + const result = asRecord(data?.result) || asRecord(eventData.result) + const input = asRecord(current.params || current.input) + const todoId = (input.id || input.todoId || result.id || result.todoId) as string | undefined if (todoId) { get().updatePlanTodoStatus(todoId, 'completed') } - } catch {} + } catch (error) { + logger.warn('Failed to process checkoff_todo tool result', { + error: error instanceof Error ? error.message : String(error), + toolCallId, + }) + } } if ( @@ -258,28 +267,35 @@ export const sseHandlers: Record = { current.name === 'mark_todo_in_progress' ) { try { - const result = (data?.result || data?.data?.result) ?? {} - const input = ((current as any).params || (current as any).input) ?? {} - const todoId = input.id || input.todoId || result.id || result.todoId + const result = asRecord(data?.result) || asRecord(eventData.result) + const input = asRecord(current.params || current.input) + const todoId = (input.id || input.todoId || result.id || result.todoId) as string | undefined if (todoId) { get().updatePlanTodoStatus(todoId, 'executing') } - } catch {} + } catch (error) { + logger.warn('Failed to process mark_todo_in_progress tool result', { + error: error instanceof Error ? error.message : String(error), + toolCallId, + }) + } } if (current.name === 'edit_workflow') { try { - const resultPayload = - (data?.result || data?.data?.result || data?.data?.data || data?.data) ?? {} - const workflowState = resultPayload?.workflowState + const resultPayload = asRecord( + data?.result || eventData.result || eventData.data || data?.data + ) + const workflowState = asRecord(resultPayload?.workflowState) + const hasWorkflowState = !!resultPayload?.workflowState logger.info('[SSE] edit_workflow result received', { - hasWorkflowState: !!workflowState, - blockCount: workflowState ? Object.keys(workflowState.blocks ?? {}).length : 0, - edgeCount: workflowState?.edges?.length ?? 0, + hasWorkflowState, + blockCount: hasWorkflowState ? Object.keys(workflowState.blocks ?? {}).length : 0, + edgeCount: Array.isArray(workflowState.edges) ? workflowState.edges.length : 0, }) - if (workflowState) { + if (hasWorkflowState) { const diffStore = useWorkflowDiffStore.getState() - diffStore.setProposedChanges(workflowState).catch((err) => { + diffStore.setProposedChanges(resultPayload.workflowState).catch((err) => { logger.error('[SSE] Failed to apply edit_workflow diff', { error: err instanceof Error ? err.message : String(err), }) @@ -294,7 +310,7 @@ export const sseHandlers: Record = { } for (let i = 0; i < context.contentBlocks.length; i++) { - const b = context.contentBlocks[i] as any + const b = context.contentBlocks[i] if (b?.type === 'tool_call' && b?.toolCall?.id === toolCallId) { if ( isRejectedState(b.toolCall?.state) || @@ -324,11 +340,16 @@ export const sseHandlers: Record = { } } updateStreamingMessage(set, context) - } catch {} + } catch (error) { + logger.warn('Failed to process tool_result SSE event', { + error: error instanceof Error ? error.message : String(error), + }) + } }, tool_error: (data, context, get, set) => { try { - const toolCallId: string | undefined = data?.toolCallId || data?.data?.id + const errorData = asRecord(data?.data) + const toolCallId: string | undefined = data?.toolCallId || (errorData.id as string | undefined) const failedDependency: boolean = data?.failedDependency === true if (!toolCallId) return const { toolCallsById } = get() @@ -348,17 +369,12 @@ export const sseHandlers: Record = { updatedMap[toolCallId] = { ...current, state: targetState, - display: resolveToolDisplay( - current.name, - targetState, - current.id, - (current as any).params - ), + display: resolveToolDisplay(current.name, targetState, current.id, current.params), } set({ toolCallsById: updatedMap }) } for (let i = 0; i < context.contentBlocks.length; i++) { - const b = context.contentBlocks[i] as any + const b = context.contentBlocks[i] if (b?.type === 'tool_call' && b?.toolCall?.id === toolCallId) { if ( isRejectedState(b.toolCall?.state) || @@ -386,7 +402,11 @@ export const sseHandlers: Record = { } } updateStreamingMessage(set, context) - } catch {} + } catch (error) { + logger.warn('Failed to process tool_error SSE event', { + error: error instanceof Error ? error.message : String(error), + }) + } }, tool_generating: (data, context, get, set) => { const { toolCallId, toolName } = data @@ -410,11 +430,11 @@ export const sseHandlers: Record = { } }, tool_call: (data, context, get, set) => { - const toolData = data?.data ?? {} - const id: string | undefined = toolData.id || data?.toolCallId - const name: string | undefined = toolData.name || data?.toolName + const toolData = asRecord(data?.data) + const id: string | undefined = (toolData.id as string | undefined) || data?.toolCallId + const name: string | undefined = (toolData.name as string | undefined) || data?.toolName if (!id) return - const args = toolData.arguments + const args = toolData.arguments as Record | undefined const isPartial = toolData.partial === true const { toolCallsById } = get() diff --git a/apps/sim/lib/copilot/client-sse/subagent-handlers.ts b/apps/sim/lib/copilot/client-sse/subagent-handlers.ts index fa2fc2e1c6..e68a552b60 100644 --- a/apps/sim/lib/copilot/client-sse/subagent-handlers.ts +++ b/apps/sim/lib/copilot/client-sse/subagent-handlers.ts @@ -1,19 +1,25 @@ import { createLogger } from '@sim/logger' import { + asRecord, normalizeSseEvent, shouldSkipToolCallEvent, shouldSkipToolResultEvent, } from '@/lib/copilot/orchestrator/sse-utils' +import type { SSEEvent } from '@/lib/copilot/orchestrator/types' import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry' import { resolveToolDisplay } from '@/lib/copilot/store-utils' import type { CopilotStore, CopilotToolCall } from '@/stores/panel/copilot/types' -import type { StreamingContext } from './types' +import type { ClientStreamingContext } from './types' import { sseHandlers, type SSEHandler, updateStreamingMessage } from './handlers' const logger = createLogger('CopilotClientSubagentHandlers') +type StoreSet = ( + partial: Partial | ((state: CopilotStore) => Partial) +) => void + export function appendSubAgentContent( - context: StreamingContext, + context: ClientStreamingContext, parentToolCallId: string, text: string ) { @@ -38,9 +44,9 @@ export function appendSubAgentContent( } export function updateToolCallWithSubAgentData( - context: StreamingContext, + context: ClientStreamingContext, get: () => CopilotStore, - set: any, + set: StoreSet, parentToolCallId: string ) { const { toolCallsById } = get() @@ -76,7 +82,7 @@ export function updateToolCallWithSubAgentData( let foundInContentBlocks = false for (let i = 0; i < context.contentBlocks.length; i++) { - const b = context.contentBlocks[i] as any + const b = context.contentBlocks[i] if (b.type === 'tool_call' && b.toolCall?.id === parentToolCallId) { context.contentBlocks[i] = { ...b, toolCall: updatedToolCall } foundInContentBlocks = true @@ -89,8 +95,8 @@ export function updateToolCallWithSubAgentData( parentToolCallId, contentBlocksCount: context.contentBlocks.length, toolCallBlockIds: context.contentBlocks - .filter((b: any) => b.type === 'tool_call') - .map((b: any) => b.toolCall?.id), + .filter((b) => b.type === 'tool_call') + .map((b) => b.toolCall?.id), }) } @@ -104,27 +110,29 @@ export const subAgentSSEHandlers: Record = { content: (data, context, get, set) => { const parentToolCallId = context.subAgentParentToolCallId + const contentStr = typeof data.data === 'string' ? data.data : (data.content || '') logger.info('[SubAgent] content event', { parentToolCallId, - hasData: !!data.data, - dataPreview: typeof data.data === 'string' ? data.data.substring(0, 50) : null, + hasData: !!contentStr, + dataPreview: contentStr ? contentStr.substring(0, 50) : null, }) - if (!parentToolCallId || !data.data) { + if (!parentToolCallId || !contentStr) { logger.warn('[SubAgent] content missing parentToolCallId or data', { parentToolCallId, - hasData: !!data.data, + hasData: !!contentStr, }) return } - appendSubAgentContent(context, parentToolCallId, data.data) + appendSubAgentContent(context, parentToolCallId, contentStr) updateToolCallWithSubAgentData(context, get, set, parentToolCallId) }, reasoning: (data, context, get, set) => { const parentToolCallId = context.subAgentParentToolCallId - const phase = data?.phase || data?.data?.phase + const dataObj = asRecord(data?.data) + const phase = data?.phase || (dataObj.phase as string | undefined) if (!parentToolCallId) return if (phase === 'start' || phase === 'end') return @@ -145,17 +153,18 @@ export const subAgentSSEHandlers: Record = { const parentToolCallId = context.subAgentParentToolCallId if (!parentToolCallId) return - const toolData = data?.data ?? {} - const id: string | undefined = toolData.id || data?.toolCallId - const name: string | undefined = toolData.name || data?.toolName + const toolData = asRecord(data?.data) + const id: string | undefined = (toolData.id as string | undefined) || data?.toolCallId + const name: string | undefined = (toolData.name as string | undefined) || data?.toolName if (!id || !name) return const isPartial = toolData.partial === true - let args = toolData.arguments || toolData.input || data?.arguments || data?.input + let args: Record | undefined = + (toolData.arguments || toolData.input) as Record | undefined if (typeof args === 'string') { try { - args = JSON.parse(args) + args = JSON.parse(args) as Record } catch { logger.warn('[SubAgent] Failed to parse arguments string', { args }) } @@ -177,7 +186,9 @@ export const subAgentSSEHandlers: Record = { context.subAgentBlocks[parentToolCallId] = [] } - const existingIndex = context.subAgentToolCalls[parentToolCallId].findIndex((tc) => tc.id === id) + const existingIndex = context.subAgentToolCalls[parentToolCallId].findIndex( + (tc: CopilotToolCall) => tc.id === id + ) const subAgentToolCall: CopilotToolCall = { id, name, @@ -213,7 +224,8 @@ export const subAgentSSEHandlers: Record = { const parentToolCallId = context.subAgentParentToolCallId if (!parentToolCallId) return - const toolCallId: string | undefined = data?.toolCallId || data?.data?.id + const resultData = asRecord(data?.data) + const toolCallId: string | undefined = data?.toolCallId || (resultData.id as string | undefined) const success: boolean | undefined = data?.success !== false if (!toolCallId) return @@ -222,7 +234,7 @@ export const subAgentSSEHandlers: Record = { const targetState = success ? ClientToolCallState.success : ClientToolCallState.error const existingIndex = context.subAgentToolCalls[parentToolCallId].findIndex( - (tc) => tc.id === toolCallId + (tc: CopilotToolCall) => tc.id === toolCallId ) if (existingIndex >= 0) { @@ -268,19 +280,20 @@ export const subAgentSSEHandlers: Record = { } export async function applySseEvent( - data: any, - context: StreamingContext, + rawData: SSEEvent, + context: ClientStreamingContext, get: () => CopilotStore, set: (next: Partial | ((state: CopilotStore) => Partial)) => void ): Promise { - const normalizedEvent = normalizeSseEvent(data) + const normalizedEvent = normalizeSseEvent(rawData) if (shouldSkipToolCallEvent(normalizedEvent) || shouldSkipToolResultEvent(normalizedEvent)) { return true } - data = normalizedEvent + const data = normalizedEvent if (data.type === 'subagent_start') { - const toolCallId = data.data?.tool_call_id + const startData = asRecord(data.data) + const toolCallId = startData.tool_call_id as string | undefined if (toolCallId) { context.subAgentParentToolCallId = toolCallId const { toolCallsById } = get() diff --git a/apps/sim/lib/copilot/client-sse/types.ts b/apps/sim/lib/copilot/client-sse/types.ts index 82e5b99be9..8a4616a84e 100644 --- a/apps/sim/lib/copilot/client-sse/types.ts +++ b/apps/sim/lib/copilot/client-sse/types.ts @@ -1,12 +1,28 @@ -import type { CopilotToolCall } from '@/stores/panel/copilot/types' +import type { ChatContext, CopilotToolCall, SubAgentContentBlock } from '@/stores/panel/copilot/types' + +/** + * A content block used in copilot messages and during streaming. + * Uses a literal type union for `type` to stay compatible with CopilotMessage. + */ +export type ContentBlockType = 'text' | 'thinking' | 'tool_call' | 'contexts' + +export interface ClientContentBlock { + type: ContentBlockType + content?: string + timestamp: number + toolCall?: CopilotToolCall | null + startTime?: number + duration?: number + contexts?: ChatContext[] +} export interface StreamingContext { messageId: string accumulatedContent: string - contentBlocks: any[] - currentTextBlock: any | null + contentBlocks: ClientContentBlock[] + currentTextBlock: ClientContentBlock | null isInThinkingBlock: boolean - currentThinkingBlock: any | null + currentThinkingBlock: ClientContentBlock | null isInDesignWorkflowBlock: boolean designWorkflowContent: string pendingContent: string @@ -18,6 +34,8 @@ export interface StreamingContext { subAgentParentToolCallId?: string subAgentContent: Record subAgentToolCalls: Record - subAgentBlocks: Record + subAgentBlocks: Record suppressStreamingUpdates?: boolean } + +export type ClientStreamingContext = StreamingContext diff --git a/apps/sim/lib/copilot/constants.ts b/apps/sim/lib/copilot/constants.ts index 21e29cdbce..35c1acd2ce 100644 --- a/apps/sim/lib/copilot/constants.ts +++ b/apps/sim/lib/copilot/constants.ts @@ -9,3 +9,107 @@ export const SIM_AGENT_API_URL = rawAgentUrl.startsWith('http://') || rawAgentUrl.startsWith('https://') ? rawAgentUrl : SIM_AGENT_API_URL_DEFAULT + +// --------------------------------------------------------------------------- +// Redis key prefixes +// --------------------------------------------------------------------------- + +/** Redis key prefix for tool call confirmation payloads (polled by waitForToolDecision). */ +export const REDIS_TOOL_CALL_PREFIX = 'tool_call:' + +/** Redis key prefix for copilot SSE stream buffers. */ +export const REDIS_COPILOT_STREAM_PREFIX = 'copilot_stream:' + +// --------------------------------------------------------------------------- +// Timeouts +// --------------------------------------------------------------------------- + +/** Default timeout for the copilot orchestration stream loop (5 min). */ +export const ORCHESTRATION_TIMEOUT_MS = 300_000 + +/** Timeout for the client-side streaming response handler (10 min). */ +export const STREAM_TIMEOUT_MS = 600_000 + +/** TTL for Redis tool call confirmation entries (24 h). */ +export const REDIS_TOOL_CALL_TTL_SECONDS = 86_400 + +// --------------------------------------------------------------------------- +// Tool decision polling +// --------------------------------------------------------------------------- + +/** Initial poll interval when waiting for a user tool decision. */ +export const TOOL_DECISION_INITIAL_POLL_MS = 100 + +/** Maximum poll interval when waiting for a user tool decision. */ +export const TOOL_DECISION_MAX_POLL_MS = 3_000 + +/** Backoff multiplier for the tool decision poll interval. */ +export const TOOL_DECISION_POLL_BACKOFF = 1.5 + +// --------------------------------------------------------------------------- +// Stream resume +// --------------------------------------------------------------------------- + +/** Maximum number of resume attempts before giving up. */ +export const MAX_RESUME_ATTEMPTS = 3 + +/** SessionStorage key for persisting active stream metadata across page reloads. */ +export const STREAM_STORAGE_KEY = 'copilot_active_stream' + +// --------------------------------------------------------------------------- +// Client-side streaming batching +// --------------------------------------------------------------------------- + +/** Delay (ms) before processing the next queued message after stream completion. */ +export const QUEUE_PROCESS_DELAY_MS = 100 + +/** Delay (ms) before invalidating subscription queries after stream completion. */ +export const SUBSCRIPTION_INVALIDATE_DELAY_MS = 1_000 + +// --------------------------------------------------------------------------- +// UI helpers +// --------------------------------------------------------------------------- + +/** Maximum character length for an optimistic chat title derived from a user message. */ +export const OPTIMISTIC_TITLE_MAX_LENGTH = 50 + +// --------------------------------------------------------------------------- +// Copilot API paths (client-side fetch targets) +// --------------------------------------------------------------------------- + +/** POST — send a chat message to the copilot. */ +export const COPILOT_CHAT_API_PATH = '/api/copilot/chat' + +/** GET — resume/replay a copilot SSE stream. */ +export const COPILOT_CHAT_STREAM_API_PATH = '/api/copilot/chat/stream' + +/** POST — persist chat messages / plan artifact / config. */ +export const COPILOT_UPDATE_MESSAGES_API_PATH = '/api/copilot/chat/update-messages' + +/** DELETE — delete a copilot chat. */ +export const COPILOT_DELETE_CHAT_API_PATH = '/api/copilot/chat/delete' + +/** POST — confirm or reject a tool call. */ +export const COPILOT_CONFIRM_API_PATH = '/api/copilot/confirm' + +/** POST — forward diff-accepted/rejected stats to the copilot backend. */ +export const COPILOT_STATS_API_PATH = '/api/copilot/stats' + +/** GET — load checkpoints for a chat. */ +export const COPILOT_CHECKPOINTS_API_PATH = '/api/copilot/checkpoints' + +/** POST — revert to a checkpoint. */ +export const COPILOT_CHECKPOINTS_REVERT_API_PATH = '/api/copilot/checkpoints/revert' + +/** GET/POST/DELETE — manage auto-allowed tools. */ +export const COPILOT_AUTO_ALLOWED_TOOLS_API_PATH = '/api/copilot/auto-allowed-tools' + +/** GET — fetch user credentials for masking. */ +export const COPILOT_CREDENTIALS_API_PATH = '/api/copilot/credentials' + +// --------------------------------------------------------------------------- +// Dedup limits +// --------------------------------------------------------------------------- + +/** Maximum entries in the in-memory SSE tool-event dedup cache. */ +export const STREAM_BUFFER_MAX_DEDUP_ENTRIES = 1_000 diff --git a/apps/sim/lib/copilot/messages/checkpoints.ts b/apps/sim/lib/copilot/messages/checkpoints.ts index 1a4847d6e0..29eca04c3b 100644 --- a/apps/sim/lib/copilot/messages/checkpoints.ts +++ b/apps/sim/lib/copilot/messages/checkpoints.ts @@ -1,4 +1,5 @@ import { createLogger } from '@sim/logger' +import { COPILOT_CHECKPOINTS_API_PATH } from '@/lib/copilot/constants' import { mergeSubblockState } from '@/stores/workflows/utils' import { useWorkflowStore } from '@/stores/workflows/workflow/store' import type { WorkflowState } from '@/stores/workflows/workflow/types' @@ -58,7 +59,7 @@ export async function saveMessageCheckpoint( set({ messageSnapshots: nextSnapshots }) try { - const response = await fetch('/api/copilot/checkpoints', { + const response = await fetch(COPILOT_CHECKPOINTS_API_PATH, { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ @@ -117,7 +118,7 @@ export function buildToolCallsById(messages: CopilotMessage[]): Record = {} for (const msg of messages) { if (msg.contentBlocks) { - for (const block of msg.contentBlocks as any[]) { + for (const block of msg.contentBlocks) { if (block?.type === 'tool_call' && block.toolCall?.id) { extractToolCallsRecursively(block.toolCall, toolCallsById) } diff --git a/apps/sim/lib/copilot/messages/credential-masking.ts b/apps/sim/lib/copilot/messages/credential-masking.ts index f0e64eef84..33f1549491 100644 --- a/apps/sim/lib/copilot/messages/credential-masking.ts +++ b/apps/sim/lib/copilot/messages/credential-masking.ts @@ -1,27 +1,30 @@ -export function maskCredentialIdsInValue(value: any, credentialIds: Set): any { +export function maskCredentialIdsInValue(value: T, credentialIds: Set): T { if (!value || credentialIds.size === 0) return value if (typeof value === 'string') { - let masked = value + let masked = value as string const sortedIds = Array.from(credentialIds).sort((a, b) => b.length - a.length) for (const id of sortedIds) { if (id && masked.includes(id)) { masked = masked.split(id).join('••••••••') } } - return masked + return masked as unknown as T } if (Array.isArray(value)) { - return value.map((item) => maskCredentialIdsInValue(item, credentialIds)) + return value.map((item) => maskCredentialIdsInValue(item, credentialIds)) as T } if (typeof value === 'object') { - const masked: any = {} - for (const key of Object.keys(value)) { - masked[key] = maskCredentialIdsInValue(value[key], credentialIds) + const masked: Record = {} + for (const key of Object.keys(value as Record)) { + masked[key] = maskCredentialIdsInValue( + (value as Record)[key], + credentialIds + ) } - return masked + return masked as T } return value diff --git a/apps/sim/lib/copilot/messages/index.ts b/apps/sim/lib/copilot/messages/index.ts index 4525fcdd87..2525a00792 100644 --- a/apps/sim/lib/copilot/messages/index.ts +++ b/apps/sim/lib/copilot/messages/index.ts @@ -1,3 +1,4 @@ export * from './credential-masking' export * from './serialization' export * from './checkpoints' +export * from './persist' diff --git a/apps/sim/lib/copilot/messages/persist.ts b/apps/sim/lib/copilot/messages/persist.ts new file mode 100644 index 0000000000..9ca3a24fe7 --- /dev/null +++ b/apps/sim/lib/copilot/messages/persist.ts @@ -0,0 +1,43 @@ +import { createLogger } from '@sim/logger' +import { COPILOT_UPDATE_MESSAGES_API_PATH } from '@/lib/copilot/constants' +import type { CopilotMessage } from '@/stores/panel/copilot/types' +import { serializeMessagesForDB } from './serialization' + +const logger = createLogger('CopilotMessagePersistence') + +export async function persistMessages(params: { + chatId: string + messages: CopilotMessage[] + sensitiveCredentialIds?: Set + planArtifact?: string | null + mode?: string + model?: string + conversationId?: string +}): Promise { + try { + const dbMessages = serializeMessagesForDB( + params.messages, + params.sensitiveCredentialIds ?? new Set() + ) + const response = await fetch(COPILOT_UPDATE_MESSAGES_API_PATH, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + chatId: params.chatId, + messages: dbMessages, + ...(params.planArtifact !== undefined ? { planArtifact: params.planArtifact } : {}), + ...(params.mode || params.model + ? { config: { mode: params.mode, model: params.model } } + : {}), + ...(params.conversationId ? { conversationId: params.conversationId } : {}), + }), + }) + return response.ok + } catch (error) { + logger.warn('Failed to persist messages', { + chatId: params.chatId, + error: error instanceof Error ? error.message : String(error), + }) + return false + } +} diff --git a/apps/sim/lib/copilot/messages/serialization.ts b/apps/sim/lib/copilot/messages/serialization.ts index e69bae218d..bcc58e0cf8 100644 --- a/apps/sim/lib/copilot/messages/serialization.ts +++ b/apps/sim/lib/copilot/messages/serialization.ts @@ -1,10 +1,10 @@ import { createLogger } from '@sim/logger' -import type { CopilotMessage } from '@/stores/panel/copilot/types' +import type { CopilotMessage, CopilotToolCall } from '@/stores/panel/copilot/types' import { maskCredentialIdsInValue } from './credential-masking' const logger = createLogger('CopilotMessageSerialization') -export function clearStreamingFlags(toolCall: any): void { +export function clearStreamingFlags(toolCall: CopilotToolCall): void { if (!toolCall) return toolCall.subAgentStreaming = false @@ -27,18 +27,18 @@ export function normalizeMessagesForUI(messages: CopilotMessage[]): CopilotMessa try { for (const message of messages) { if (message.role === 'assistant') { - logger.info('[normalizeMessagesForUI] Loading assistant message', { + logger.debug('[normalizeMessagesForUI] Loading assistant message', { id: message.id, hasContent: !!message.content?.trim(), contentBlockCount: message.contentBlocks?.length || 0, - contentBlockTypes: (message.contentBlocks as any[])?.map((b) => b?.type) ?? [], + contentBlockTypes: message.contentBlocks?.map((b) => b?.type) ?? [], }) } } for (const message of messages) { if (message.contentBlocks) { - for (const block of message.contentBlocks as any[]) { + for (const block of message.contentBlocks) { if (block?.type === 'tool_call' && block.toolCall) { clearStreamingFlags(block.toolCall) } @@ -51,7 +51,10 @@ export function normalizeMessagesForUI(messages: CopilotMessage[]): CopilotMessa } } return messages - } catch { + } catch (error) { + logger.warn('[normalizeMessagesForUI] Failed to normalize messages', { + error: error instanceof Error ? error.message : String(error), + }) return messages } } @@ -88,16 +91,16 @@ export function deepClone(obj: T): T { export function serializeMessagesForDB( messages: CopilotMessage[], credentialIds: Set -): any[] { +): CopilotMessage[] { const result = messages .map((msg) => { let timestamp: string = msg.timestamp if (typeof timestamp !== 'string') { - const ts = timestamp as any + const ts = timestamp as unknown timestamp = ts instanceof Date ? ts.toISOString() : new Date().toISOString() } - const serialized: any = { + const serialized: CopilotMessage = { id: msg.id, role: msg.role, content: msg.content || '', @@ -108,16 +111,16 @@ export function serializeMessagesForDB( serialized.contentBlocks = deepClone(msg.contentBlocks) } - if (Array.isArray((msg as any).toolCalls) && (msg as any).toolCalls.length > 0) { - serialized.toolCalls = deepClone((msg as any).toolCalls) + if (Array.isArray(msg.toolCalls) && msg.toolCalls.length > 0) { + serialized.toolCalls = deepClone(msg.toolCalls) } if (Array.isArray(msg.fileAttachments) && msg.fileAttachments.length > 0) { serialized.fileAttachments = deepClone(msg.fileAttachments) } - if (Array.isArray((msg as any).contexts) && (msg as any).contexts.length > 0) { - serialized.contexts = deepClone((msg as any).contexts) + if (Array.isArray(msg.contexts) && msg.contexts.length > 0) { + serialized.contexts = deepClone(msg.contexts) } if (Array.isArray(msg.citations) && msg.citations.length > 0) { @@ -142,16 +145,16 @@ export function serializeMessagesForDB( for (const msg of messages) { if (msg.role === 'assistant') { - logger.info('[serializeMessagesForDB] Input assistant message', { + logger.debug('[serializeMessagesForDB] Input assistant message', { id: msg.id, hasContent: !!msg.content?.trim(), contentBlockCount: msg.contentBlocks?.length || 0, - contentBlockTypes: (msg.contentBlocks as any[])?.map((b) => b?.type) ?? [], + contentBlockTypes: msg.contentBlocks?.map((b) => b?.type) ?? [], }) } } - logger.info('[serializeMessagesForDB] Serialized messages', { + logger.debug('[serializeMessagesForDB] Serialized messages', { inputCount: messages.length, outputCount: result.length, sample: diff --git a/apps/sim/lib/copilot/orchestrator/persistence.ts b/apps/sim/lib/copilot/orchestrator/persistence.ts index f42d16e37e..2743a51d4e 100644 --- a/apps/sim/lib/copilot/orchestrator/persistence.ts +++ b/apps/sim/lib/copilot/orchestrator/persistence.ts @@ -1,4 +1,5 @@ import { createLogger } from '@sim/logger' +import { REDIS_TOOL_CALL_PREFIX } from '@/lib/copilot/constants' import { getRedisClient } from '@/lib/core/config/redis' const logger = createLogger('CopilotOrchestratorPersistence') @@ -15,7 +16,7 @@ export async function getToolConfirmation(toolCallId: string): Promise<{ if (!redis) return null try { - const data = await redis.get(`tool_call:${toolCallId}`) + const data = await redis.get(`${REDIS_TOOL_CALL_PREFIX}${toolCallId}`) if (!data) return null return JSON.parse(data) as { status: string; message?: string; timestamp?: string } } catch (error) { diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts index d885e9876c..138b5516bc 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts @@ -1,4 +1,5 @@ import { createLogger } from '@sim/logger' +import { STREAM_TIMEOUT_MS } from '@/lib/copilot/constants' import { RESPOND_TOOL_SET, SUBAGENT_TOOL_SET } from '@/lib/copilot/orchestrator/config' import { asRecord, @@ -21,15 +22,16 @@ const logger = createLogger('CopilotSseHandlers') // Normalization + dedupe helpers live in sse-utils to keep server/client in sync. -function inferToolSuccess(data: Record | undefined): { +function inferToolSuccess(data: Record | undefined): { success: boolean hasResultData: boolean hasError: boolean } { - const hasExplicitSuccess = data?.success !== undefined || data?.result?.success !== undefined - const explicitSuccess = data?.success ?? data?.result?.success + const resultObj = asRecord(data?.result) + const hasExplicitSuccess = data?.success !== undefined || resultObj.success !== undefined + const explicitSuccess = data?.success ?? resultObj.success const hasResultData = data?.result !== undefined || data?.data !== undefined - const hasError = !!data?.error || !!data?.result?.error + const hasError = !!data?.error || !!resultObj.error const success = hasExplicitSuccess ? !!explicitSuccess : hasResultData && !hasError return { success, hasResultData, hasError } } @@ -50,12 +52,12 @@ function addContentBlock(context: StreamingContext, block: Omit = { chat_id: (event, context) => { - context.chatId = asRecord(event.data).chatId + context.chatId = asRecord(event.data).chatId as string | undefined }, title_updated: () => {}, tool_result: (event, context) => { const data = getEventData(event) - const toolCallId = event.toolCallId || data?.id + const toolCallId = event.toolCallId || (data?.id as string | undefined) if (!toolCallId) return const current = context.toolCalls.get(toolCallId) if (!current) return @@ -71,23 +73,24 @@ export const sseHandlers: Record = { } } if (hasError) { - current.error = data?.error || data?.result?.error + const resultObj = asRecord(data?.result) + current.error = (data?.error || resultObj.error) as string | undefined } }, tool_error: (event, context) => { const data = getEventData(event) - const toolCallId = event.toolCallId || data?.id + const toolCallId = event.toolCallId || (data?.id as string | undefined) if (!toolCallId) return const current = context.toolCalls.get(toolCallId) if (!current) return current.status = 'error' - current.error = data?.error || 'Tool execution failed' + current.error = (data?.error as string | undefined) || 'Tool execution failed' current.endTime = Date.now() }, tool_generating: (event, context) => { const data = getEventData(event) - const toolCallId = event.toolCallId || data?.toolCallId || data?.id - const toolName = event.toolName || data?.toolName || data?.name + const toolCallId = event.toolCallId || (data?.toolCallId as string | undefined) || (data?.id as string | undefined) + const toolName = event.toolName || (data?.toolName as string | undefined) || (data?.name as string | undefined) if (!toolCallId || !toolName) return if (!context.toolCalls.has(toolCallId)) { context.toolCalls.set(toolCallId, { @@ -99,12 +102,12 @@ export const sseHandlers: Record = { } }, tool_call: async (event, context, execContext, options) => { - const toolData = getEventData(event) || {} - const toolCallId = toolData.id || event.toolCallId - const toolName = toolData.name || event.toolName + const toolData = getEventData(event) || ({} as Record) + const toolCallId = (toolData.id as string | undefined) || event.toolCallId + const toolName = (toolData.name as string | undefined) || event.toolName if (!toolCallId || !toolName) return - const args = toolData.arguments || toolData.input || asRecord(event.data).input + const args = (toolData.arguments || toolData.input || asRecord(event.data).input) as Record | undefined const isPartial = toolData.partial === true const existing = context.toolCalls.get(toolCallId) @@ -161,7 +164,7 @@ export const sseHandlers: Record = { const isInteractive = options.interactive === true if (isInterruptTool && isInteractive) { - const decision = await waitForToolDecision(toolCallId, options.timeout || 600000) + const decision = await waitForToolDecision(toolCallId, options.timeout || STREAM_TIMEOUT_MS, options.abortSignal) if (decision?.status === 'accepted' || decision?.status === 'success') { await executeToolAndReport(toolCallId, context, execContext, options) return @@ -221,7 +224,8 @@ export const sseHandlers: Record = { } }, reasoning: (event, context) => { - const phase = asRecord(event.data).phase || asRecord(asRecord(event.data).data).phase + const d = asRecord(event.data) + const phase = d.phase || asRecord(d.data).phase if (phase === 'start') { context.isInThinkingBlock = true context.currentThinkingBlock = { @@ -239,17 +243,16 @@ export const sseHandlers: Record = { context.currentThinkingBlock = null return } - const d = asRecord(event.data) - const chunk = typeof event.data === 'string' ? event.data : d.data || d.content + const chunk = (d.data || d.content || event.content) as string | undefined if (!chunk || !context.currentThinkingBlock) return context.currentThinkingBlock.content = `${context.currentThinkingBlock.content || ''}${chunk}` }, content: (event, context) => { const d = asRecord(event.data) - const chunk = typeof event.data === 'string' ? event.data : d.content || d.data + const chunk = (d.content || d.data || event.content) as string | undefined if (!chunk) return context.accumulatedContent += chunk - addContentBlock(context, { type: 'text', content: chunk as string }) + addContentBlock(context, { type: 'text', content: chunk }) }, done: (event, context) => { const d = asRecord(event.data) @@ -266,7 +269,7 @@ export const sseHandlers: Record = { }, error: (event, context) => { const d = asRecord(event.data) - const message = d.message || d.error || (typeof event.data === 'string' ? event.data : null) + const message = (d.message || d.error || event.error) as string | undefined if (message) { context.errors.push(message) } @@ -278,7 +281,8 @@ export const subAgentHandlers: Record = { content: (event, context) => { const parentToolCallId = context.subAgentParentToolCallId if (!parentToolCallId || !event.data) return - const chunk = typeof event.data === 'string' ? event.data : asRecord(event.data).content || '' + const d = asRecord(event.data) + const chunk = (d.content || d.data || event.content) as string | undefined if (!chunk) return context.subAgentContent[parentToolCallId] = (context.subAgentContent[parentToolCallId] || '') + chunk @@ -287,12 +291,12 @@ export const subAgentHandlers: Record = { tool_call: async (event, context, execContext, options) => { const parentToolCallId = context.subAgentParentToolCallId if (!parentToolCallId) return - const toolData = getEventData(event) || {} - const toolCallId = toolData.id || event.toolCallId - const toolName = toolData.name || event.toolName + const toolData = getEventData(event) || ({} as Record) + const toolCallId = (toolData.id as string | undefined) || event.toolCallId + const toolName = (toolData.name as string | undefined) || event.toolName if (!toolCallId || !toolName) return const isPartial = toolData.partial === true - const args = toolData.arguments || toolData.input || asRecord(event.data).input + const args = (toolData.arguments || toolData.input || asRecord(event.data).input) as Record | undefined const existing = context.toolCalls.get(toolCallId) // Ignore late/duplicate tool_call events once we already have a result. diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts index 99eb593e5e..1c707c5708 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts @@ -1,4 +1,9 @@ import { createLogger } from '@sim/logger' +import { + TOOL_DECISION_INITIAL_POLL_MS, + TOOL_DECISION_MAX_POLL_MS, + TOOL_DECISION_POLL_BACKOFF, +} from '@/lib/copilot/constants' import { INTERRUPT_TOOL_SET } from '@/lib/copilot/orchestrator/config' import { getToolConfirmation } from '@/lib/copilot/orchestrator/persistence' import { @@ -103,15 +108,20 @@ export async function executeToolAndReport( export async function waitForToolDecision( toolCallId: string, - timeoutMs: number + timeoutMs: number, + abortSignal?: AbortSignal ): Promise<{ status: string; message?: string } | null> { const start = Date.now() + let interval = TOOL_DECISION_INITIAL_POLL_MS + const maxInterval = TOOL_DECISION_MAX_POLL_MS while (Date.now() - start < timeoutMs) { + if (abortSignal?.aborted) return null const decision = await getToolConfirmation(toolCallId) if (decision?.status) { return decision } - await new Promise((resolve) => setTimeout(resolve, 100)) + await new Promise((resolve) => setTimeout(resolve, interval)) + interval = Math.min(interval * TOOL_DECISION_POLL_BACKOFF, maxInterval) } return null } diff --git a/apps/sim/lib/copilot/orchestrator/sse-utils.ts b/apps/sim/lib/copilot/orchestrator/sse-utils.ts index 26d5a94bd8..92f337e2a7 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-utils.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-utils.ts @@ -1,22 +1,29 @@ +import { STREAM_BUFFER_MAX_DEDUP_ENTRIES } from '@/lib/copilot/constants' import type { SSEEvent } from '@/lib/copilot/orchestrator/types' -type EventDataObject = Record | undefined +type EventDataObject = Record | undefined /** Safely cast event.data to a record for property access. */ -export const asRecord = (data: unknown): Record => - (data && typeof data === 'object' && !Array.isArray(data) ? data : {}) as Record - -const DEFAULT_TOOL_EVENT_TTL_MS = 5 * 60 * 1000 +export const asRecord = (data: unknown): Record => + (data && typeof data === 'object' && !Array.isArray(data) ? data : {}) as Record /** - * In-memory tool event dedupe. + * In-memory tool event dedupe with bounded size. * - * NOTE: These sets are process-local only. In a multi-instance setup (e.g., ECS), - * each task maintains its own dedupe cache, so duplicates can still appear across tasks. + * NOTE: Process-local only. In a multi-instance setup (e.g., ECS), + * each task maintains its own dedupe cache. */ const seenToolCalls = new Set() const seenToolResults = new Set() +function addToSet(set: Set, id: string): void { + if (set.size >= STREAM_BUFFER_MAX_DEDUP_ENTRIES) { + const first = set.values().next().value + if (first) set.delete(first) + } + set.add(id) +} + const parseEventData = (data: unknown): EventDataObject => { if (!data) return undefined if (typeof data !== 'string') { @@ -51,7 +58,7 @@ export const getEventData = (event: SSEEvent): EventDataObject => { function getToolCallIdFromEvent(event: SSEEvent): string | undefined { const data = getEventData(event) - return event.toolCallId || data?.id || data?.toolCallId + return event.toolCallId || (data?.id as string | undefined) || (data?.toolCallId as string | undefined) } /** Normalizes SSE events so tool metadata is available at the top level. */ @@ -59,9 +66,9 @@ export function normalizeSseEvent(event: SSEEvent): SSEEvent { if (!event) return event const data = getEventData(event) if (!data) return event - const toolCallId = event.toolCallId || data.id || data.toolCallId - const toolName = event.toolName || data.name || data.toolName - const success = event.success ?? data.success + const toolCallId = event.toolCallId || (data.id as string | undefined) || (data.toolCallId as string | undefined) + const toolName = event.toolName || (data.name as string | undefined) || (data.toolName as string | undefined) + const success = event.success ?? (data.success as boolean | undefined) const result = event.result ?? data.result const normalizedData = typeof event.data === 'string' ? data : event.data return { @@ -74,25 +81,16 @@ export function normalizeSseEvent(event: SSEEvent): SSEEvent { } } -function markToolCallSeen(toolCallId: string, ttlMs: number = DEFAULT_TOOL_EVENT_TTL_MS): void { - seenToolCalls.add(toolCallId) - setTimeout(() => { - seenToolCalls.delete(toolCallId) - }, ttlMs) +function markToolCallSeen(toolCallId: string): void { + addToSet(seenToolCalls, toolCallId) } function wasToolCallSeen(toolCallId: string): boolean { return seenToolCalls.has(toolCallId) } -export function markToolResultSeen( - toolCallId: string, - ttlMs: number = DEFAULT_TOOL_EVENT_TTL_MS -): void { - seenToolResults.add(toolCallId) - setTimeout(() => { - seenToolResults.delete(toolCallId) - }, ttlMs) +export function markToolResultSeen(toolCallId: string): void { + addToSet(seenToolResults, toolCallId) } export function wasToolResultSeen(toolCallId: string): boolean { diff --git a/apps/sim/lib/copilot/orchestrator/stream-buffer.ts b/apps/sim/lib/copilot/orchestrator/stream-buffer.ts index abf70aa2c1..bc0524c4af 100644 --- a/apps/sim/lib/copilot/orchestrator/stream-buffer.ts +++ b/apps/sim/lib/copilot/orchestrator/stream-buffer.ts @@ -1,4 +1,5 @@ import { createLogger } from '@sim/logger' +import { REDIS_COPILOT_STREAM_PREFIX } from '@/lib/copilot/constants' import { env } from '@/lib/core/config/env' import { getRedisClient } from '@/lib/core/config/redis' @@ -59,7 +60,7 @@ return id ` function getStreamKeyPrefix(streamId: string) { - return `copilot_stream:${streamId}` + return `${REDIS_COPILOT_STREAM_PREFIX}${streamId}` } function getEventsKey(streamId: string) { @@ -86,11 +87,11 @@ export type StreamMeta = { export type StreamEventEntry = { eventId: number streamId: string - event: Record + event: Record } export type StreamEventWriter = { - write: (event: Record) => Promise + write: (event: Record) => Promise flush: () => Promise close: () => Promise } @@ -147,7 +148,7 @@ export async function getStreamMeta(streamId: string): Promise + event: Record ): Promise { const redis = getRedisClient() if (!redis) { @@ -225,7 +226,7 @@ export function createStreamEventWriter(streamId: string): StreamEventWriter { zaddArgs.push(entry.eventId, JSON.stringify(entry)) } const pipeline = redis.pipeline() - pipeline.zadd(key, ...(zaddArgs as any)) + pipeline.zadd(key, ...(zaddArgs as [number, string])) pipeline.expire(key, config.ttlSeconds) pipeline.expire(getSeqKey(streamId), config.ttlSeconds) pipeline.zremrangebyrank(key, 0, -config.eventLimit - 1) @@ -253,7 +254,7 @@ export function createStreamEventWriter(streamId: string): StreamEventWriter { } } - const write = async (event: Record) => { + const write = async (event: Record) => { if (closed) return { eventId: 0, streamId, event } if (nextEventId === 0 || nextEventId > maxReservedId) { await reserveIds(1) diff --git a/apps/sim/lib/copilot/orchestrator/stream-core.ts b/apps/sim/lib/copilot/orchestrator/stream-core.ts index 5f5af90b4d..14357c2045 100644 --- a/apps/sim/lib/copilot/orchestrator/stream-core.ts +++ b/apps/sim/lib/copilot/orchestrator/stream-core.ts @@ -1,4 +1,5 @@ import { createLogger } from '@sim/logger' +import { ORCHESTRATION_TIMEOUT_MS } from '@/lib/copilot/constants' import { handleSubagentRouting, sseHandlers, @@ -68,7 +69,7 @@ export async function runStreamLoop( execContext: ExecutionContext, options: StreamLoopOptions ): Promise { - const { timeout = 300000, abortSignal } = options + const { timeout = ORCHESTRATION_TIMEOUT_MS, abortSignal } = options const response = await fetch(fetchUrl, { ...fetchOptions, diff --git a/apps/sim/lib/copilot/orchestrator/subagent.ts b/apps/sim/lib/copilot/orchestrator/subagent.ts index 9788a686a5..cccf7a70b3 100644 --- a/apps/sim/lib/copilot/orchestrator/subagent.ts +++ b/apps/sim/lib/copilot/orchestrator/subagent.ts @@ -28,7 +28,7 @@ export interface SubagentOrchestratorResult { structuredResult?: { type?: string summary?: string - data?: any + data?: unknown success?: boolean } error?: string @@ -37,14 +37,15 @@ export interface SubagentOrchestratorResult { export async function orchestrateSubagentStream( agentId: string, - requestPayload: Record, + requestPayload: Record, options: SubagentOrchestratorOptions ): Promise { const { userId, workflowId, workspaceId } = options const execContext = await buildExecutionContext(userId, workflowId, workspaceId) + const msgId = requestPayload?.messageId const context = createStreamingContext({ - messageId: requestPayload?.messageId || crypto.randomUUID(), + messageId: typeof msgId === 'string' ? msgId : crypto.randomUUID(), }) let structuredResult: SubagentOrchestratorResult['structuredResult'] @@ -109,12 +110,12 @@ export async function orchestrateSubagentStream( function normalizeStructuredResult(data: unknown): SubagentOrchestratorResult['structuredResult'] { if (!data || typeof data !== 'object') return undefined - const d = data as Record + const d = data as Record return { - type: d.result_type || d.type, - summary: d.summary, + type: (d.result_type || d.type) as string | undefined, + summary: d.summary as string | undefined, data: d.data ?? d, - success: d.success, + success: d.success as boolean | undefined, } } diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/manage.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/manage.ts index 555552693d..dc5d7a988e 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/manage.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/manage.ts @@ -58,7 +58,17 @@ export async function executeCheckDeploymentStatus( hasPassword: Boolean(chatDeploy[0]?.password), } - const mcpDetails = { isDeployed: false, servers: [] as any[] } + const mcpDetails: { + isDeployed: boolean + servers: Array<{ + serverId: string + serverName: string + toolName: string + toolDescription: string | null + parameterSchema: unknown + toolId: string + }> + } = { isDeployed: false, servers: [] } if (workspaceId) { const servers = await db .select({ diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts index 2882a8bbfb..dbd3a24a99 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts @@ -138,7 +138,7 @@ export async function executeToolServerSide( */ async function executeServerToolDirect( toolName: string, - params: Record, + params: Record, context: ExecutionContext ): Promise { try { @@ -180,8 +180,8 @@ export async function markToolComplete( toolCallId: string, toolName: string, status: number, - message?: any, - data?: any + message?: unknown, + data?: unknown ): Promise { try { const response = await fetch(`${SIM_AGENT_API_URL}/api/tools/mark-complete`, { diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/integration-tools.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/integration-tools.ts index f70444acdd..8464e42ca5 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/integration-tools.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/integration-tools.ts @@ -41,9 +41,9 @@ export async function executeIntegrationToolDirect( // Deep resolution walks nested objects to replace {{ENV_VAR}} references. // Safe because tool arguments originate from the LLM (not direct user input) // and env vars belong to the user themselves. - const executionParams: Record = resolveEnvVarReferences(toolArgs, decryptedEnvVars, { + const executionParams = resolveEnvVarReferences(toolArgs, decryptedEnvVars, { deep: true, - }) as Record + }) as Record if (toolConfig.oauth?.required && toolConfig.oauth.provider) { const provider = toolConfig.oauth.provider @@ -62,7 +62,7 @@ export async function executeIntegrationToolDirect( const acc = accounts[0] const requestId = generateRequestId() - const { accessToken } = await refreshTokenIfNeeded(requestId, acc as any, acc.id) + const { accessToken } = await refreshTokenIfNeeded(requestId, acc, acc.id) if (!accessToken) { return { diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/mutations.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/mutations.ts index 12158fc74b..1489286945 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/mutations.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/mutations.ts @@ -1,4 +1,5 @@ import crypto from 'crypto' +import { createLogger } from '@sim/logger' import { db } from '@sim/db' import { workflow, workflowFolder } from '@sim/db/schema' import { and, eq, isNull, max } from 'drizzle-orm' @@ -16,6 +17,8 @@ import type { VariableOperation, } from '../param-types' +const logger = createLogger('WorkflowMutations') + export async function executeCreateWorkflow( params: CreateWorkflowParams, context: ExecutionContext @@ -185,17 +188,27 @@ export async function executeSetGlobalWorkflowVariables( : [] const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) - const currentVarsRecord = (workflowRecord.variables as Record) || {} - const byName: Record = {} - Object.values(currentVarsRecord).forEach((v: any) => { - if (v && typeof v === 'object' && v.id && v.name) byName[String(v.name)] = v + interface WorkflowVariable { + id: string + workflowId?: string + name: string + type: string + value?: unknown + } + const currentVarsRecord = (workflowRecord.variables as Record) || {} + const byName: Record = {} + Object.values(currentVarsRecord).forEach((v) => { + if (v && typeof v === 'object' && 'id' in v && 'name' in v) { + const variable = v as WorkflowVariable + byName[String(variable.name)] = variable + } }) for (const op of operations) { const key = String(op?.name || '') if (!key) continue const nextType = op?.type || byName[key]?.type || 'plain' - const coerceValue = (value: any, type: string) => { + const coerceValue = (value: unknown, type: string): unknown => { if (value === undefined) return value if (type === 'number') { const n = Number(value) @@ -213,7 +226,9 @@ export async function executeSetGlobalWorkflowVariables( if (type === 'array' && Array.isArray(parsed)) return parsed if (type === 'object' && parsed && typeof parsed === 'object' && !Array.isArray(parsed)) return parsed - } catch {} + } catch (error) { + logger.warn('Failed to parse JSON value for variable coercion', { error: error instanceof Error ? error.message : String(error) }) + } return value } return value @@ -254,7 +269,7 @@ export async function executeSetGlobalWorkflowVariables( } const nextVarsRecord = Object.fromEntries( - Object.values(byName).map((v: any) => [String(v.id), v]) + Object.values(byName).map((v) => [String(v.id), v]) ) await db diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/queries.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/queries.ts index 5bcca2e0df..dd4231b975 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/queries.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/queries.ts @@ -13,6 +13,7 @@ import { getBlockOutputPaths } from '@/lib/workflows/blocks/block-outputs' import { BlockPathCalculator } from '@/lib/workflows/blocks/block-path-calculator' import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/persistence/utils' import { isInputDefinitionTrigger } from '@/lib/workflows/triggers/input-definition-triggers' +import type { Loop, Parallel } from '@/stores/workflows/workflow/types' import { normalizeName } from '@/executor/constants' import { ensureWorkflowAccess, @@ -209,12 +210,15 @@ export async function executeGetWorkflowData( ) if (dataType === 'global_variables') { - const variablesRecord = (workflowRecord.variables as Record) || {} - const variables = Object.values(variablesRecord).map((v: any) => ({ - id: String(v?.id || ''), - name: String(v?.name || ''), - value: v?.value, - })) + const variablesRecord = (workflowRecord.variables as Record) || {} + const variables = Object.values(variablesRecord).map((v) => { + const variable = v as Record | null + return { + id: String(variable?.id || ''), + name: String(variable?.name || ''), + value: variable?.value, + } + }) return { success: true, output: { variables } } } @@ -232,13 +236,17 @@ export async function executeGetWorkflowData( .where(or(...conditions)) .orderBy(desc(customTools.createdAt)) - const customToolsData = toolsRows.map((tool) => ({ - id: String(tool.id || ''), - title: String(tool.title || ''), - functionName: String((tool.schema as any)?.function?.name || ''), - description: String((tool.schema as any)?.function?.description || ''), - parameters: (tool.schema as any)?.function?.parameters, - })) + const customToolsData = toolsRows.map((tool) => { + const schema = tool.schema as Record | null + const fn = (schema?.function ?? {}) as Record + return { + id: String(tool.id || ''), + title: String(tool.title || ''), + functionName: String(fn.name || ''), + description: String(fn.description || ''), + parameters: fn.parameters, + } + }) return { success: true, output: { customTools: customToolsData } } } @@ -377,10 +385,28 @@ export async function executeGetBlockUpstreamReferences( const loops = normalized.loops || {} const parallels = normalized.parallels || {} - const graphEdges = edges.map((edge: any) => ({ source: edge.source, target: edge.target })) + const graphEdges = edges.map((edge) => ({ source: edge.source, target: edge.target })) const variableOutputs = await getWorkflowVariablesForTool(workflowId) - const results: any[] = [] + interface AccessibleBlockEntry { + blockId: string + blockName: string + blockType: string + outputs: string[] + triggerMode?: boolean + accessContext?: 'inside' | 'outside' + } + + interface UpstreamReferenceResult { + blockId: string + blockName: string + blockType: string + accessibleBlocks: AccessibleBlockEntry[] + insideSubflows: Array<{ blockId: string; blockName: string; blockType: string }> + variables: Array<{ id: string; name: string; type: string; tag: string }> + } + + const results: UpstreamReferenceResult[] = [] for (const blockId of params.blockIds) { const targetBlock = blocks[blockId] @@ -390,7 +416,7 @@ export async function executeGetBlockUpstreamReferences( const containingLoopIds = new Set() const containingParallelIds = new Set() - Object.values(loops as Record).forEach((loop) => { + Object.values(loops).forEach((loop) => { if (loop?.nodes?.includes(blockId)) { containingLoopIds.add(loop.id) const loopBlock = blocks[loop.id] @@ -404,7 +430,7 @@ export async function executeGetBlockUpstreamReferences( } }) - Object.values(parallels as Record).forEach((parallel) => { + Object.values(parallels).forEach((parallel) => { if (parallel?.nodes?.includes(blockId)) { containingParallelIds.add(parallel.id) const parallelBlock = blocks[parallel.id] @@ -422,9 +448,9 @@ export async function executeGetBlockUpstreamReferences( const accessibleIds = new Set(ancestorIds) accessibleIds.add(blockId) - const starterBlock = Object.values(blocks).find((b: any) => isInputDefinitionTrigger(b.type)) - if (starterBlock && ancestorIds.includes((starterBlock as any).id)) { - accessibleIds.add((starterBlock as any).id) + const starterBlock = Object.values(blocks).find((b) => isInputDefinitionTrigger(b.type)) + if (starterBlock && ancestorIds.includes(starterBlock.id)) { + accessibleIds.add(starterBlock.id) } containingLoopIds.forEach((loopId) => { @@ -437,7 +463,7 @@ export async function executeGetBlockUpstreamReferences( parallels[parallelId]?.nodes?.forEach((nodeId: string) => accessibleIds.add(nodeId)) }) - const accessibleBlocks: any[] = [] + const accessibleBlocks: AccessibleBlockEntry[] = [] for (const accessibleBlockId of accessibleIds) { const block = blocks[accessibleBlockId] @@ -462,14 +488,14 @@ export async function executeGetBlockUpstreamReferences( } const formattedOutputs = formatOutputsWithPrefix(outputPaths, blockName) - const entry: any = { + const entry: AccessibleBlockEntry = { blockId: accessibleBlockId, blockName, blockType: block.type, outputs: formattedOutputs, + ...(block.triggerMode ? { triggerMode: true } : {}), + ...(accessContext ? { accessContext } : {}), } - if (block.triggerMode) entry.triggerMode = true - if (accessContext) entry.accessContext = accessContext accessibleBlocks.push(entry) } @@ -499,10 +525,14 @@ async function getWorkflowVariablesForTool( .where(eq(workflow.id, workflowId)) .limit(1) - const variablesRecord = (workflowRecord?.variables as Record) || {} + const variablesRecord = (workflowRecord?.variables as Record) || {} return Object.values(variablesRecord) - .filter((v: any) => v?.name && String(v.name).trim() !== '') - .map((v: any) => ({ + .filter((v): v is Record => { + if (!v || typeof v !== 'object') return false + const variable = v as Record + return !!variable.name && String(variable.name).trim() !== '' + }) + .map((v) => ({ id: String(v.id || ''), name: String(v.name || ''), type: String(v.type || 'plain'), @@ -513,8 +543,8 @@ async function getWorkflowVariablesForTool( function getSubflowInsidePaths( blockType: 'loop' | 'parallel', blockId: string, - loops: Record, - parallels: Record + loops: Record, + parallels: Record ): string[] { const paths = ['index'] if (blockType === 'loop') { diff --git a/apps/sim/lib/copilot/orchestrator/types.ts b/apps/sim/lib/copilot/orchestrator/types.ts index dd321bab31..eebc806a72 100644 --- a/apps/sim/lib/copilot/orchestrator/types.ts +++ b/apps/sim/lib/copilot/orchestrator/types.ts @@ -19,12 +19,24 @@ export type SSEEventType = export interface SSEEvent { type: SSEEventType - data?: unknown + data?: Record subagent?: string toolCallId?: string toolName?: string success?: boolean result?: unknown + /** Set on chat_id events */ + chatId?: string + /** Set on title_updated events */ + title?: string + /** Set on error events */ + error?: string + /** Set on content/reasoning events */ + content?: string + /** Set on reasoning events */ + phase?: string + /** Set on tool_result events */ + failedDependency?: boolean } export type ToolCallStatus = 'pending' | 'executing' | 'success' | 'error' | 'skipped' | 'rejected' diff --git a/apps/sim/lib/copilot/store-utils.ts b/apps/sim/lib/copilot/store-utils.ts index 6c2cfcc4b8..86d3105033 100644 --- a/apps/sim/lib/copilot/store-utils.ts +++ b/apps/sim/lib/copilot/store-utils.ts @@ -1,3 +1,4 @@ +import { createLogger } from '@sim/logger' import { Loader2 } from 'lucide-react' import { ClientToolCallState, @@ -6,6 +7,12 @@ import { } from '@/lib/copilot/tools/client/tool-display-registry' import type { CopilotStore } from '@/stores/panel/copilot/types' +const logger = createLogger('CopilotStoreUtils') + +type StoreSet = ( + partial: Partial | ((state: CopilotStore) => Partial) +) => void + export function resolveToolDisplay( toolName: string | undefined, state: ClientToolCallState, @@ -80,7 +87,7 @@ export function isTerminalState(state: string): boolean { } export function abortAllInProgressTools( - set: any, + set: StoreSet, get: () => CopilotStore ) { try { @@ -89,7 +96,7 @@ export function abortAllInProgressTools( const abortedIds = new Set() let hasUpdates = false for (const [id, tc] of Object.entries(toolCallsById)) { - const st = tc.state as any + const st = tc.state const isTerminal = st === ClientToolCallState.success || st === ClientToolCallState.error || @@ -101,7 +108,7 @@ export function abortAllInProgressTools( ...tc, state: ClientToolCallState.aborted, subAgentStreaming: false, - display: resolveToolDisplay(tc.name, ClientToolCallState.aborted, id, (tc as any).params), + display: resolveToolDisplay(tc.name, ClientToolCallState.aborted, id, tc.params), } hasUpdates = true } else if (tc.subAgentStreaming) { @@ -117,7 +124,7 @@ export function abortAllInProgressTools( set((s: CopilotStore) => { const msgs = [...s.messages] for (let mi = msgs.length - 1; mi >= 0; mi--) { - const m = msgs[mi] as any + const m = msgs[mi] if (m.role !== 'assistant' || !Array.isArray(m.contentBlocks)) continue let changed = false const blocks = m.contentBlocks.map((b: any) => { @@ -148,7 +155,33 @@ export function abortAllInProgressTools( return { messages: msgs } }) } - } catch {} + } catch (error) { + logger.warn('Failed to abort in-progress tools', { + error: error instanceof Error ? error.message : String(error), + }) + } +} + +export function cleanupActiveState( + set: (partial: Record) => void, + get: () => Record +): void { + abortAllInProgressTools( + set as unknown as StoreSet, + get as unknown as () => CopilotStore + ) + try { + const { useWorkflowDiffStore } = require('@/stores/workflow-diff/store') as { + useWorkflowDiffStore: { + getState: () => { clearDiff: (options?: { restoreBaseline?: boolean }) => void } + } + } + useWorkflowDiffStore.getState().clearDiff({ restoreBaseline: false }) + } catch (error) { + logger.warn('Failed to clear diff during cleanup', { + error: error instanceof Error ? error.message : String(error), + }) + } } export function stripTodoTags(text: string): string { diff --git a/apps/sim/lib/copilot/tools/server/blocks/get-blocks-metadata-tool.ts b/apps/sim/lib/copilot/tools/server/blocks/get-blocks-metadata-tool.ts index 7b945d6b0b..6699496e7f 100644 --- a/apps/sim/lib/copilot/tools/server/blocks/get-blocks-metadata-tool.ts +++ b/apps/sim/lib/copilot/tools/server/blocks/get-blocks-metadata-tool.ts @@ -288,7 +288,9 @@ export const getBlocksMetadataServerTool: BaseServerTool< if (existsSync(docPath)) { metadata.yamlDocumentation = readFileSync(docPath, 'utf-8') } - } catch {} + } catch (error) { + logger.warn('Failed to read YAML documentation file', { error: error instanceof Error ? error.message : String(error) }) + } if (metadata) { result[blockId] = removeNullish(metadata) as CopilotBlockMetadata @@ -951,7 +953,10 @@ function resolveToolIdForOperation(blockConfig: BlockConfig, opId: string): stri const maybeToolId = toolSelector({ operation: opId }) if (typeof maybeToolId === 'string') return maybeToolId } - } catch {} + } catch (error) { + const toolLogger = createLogger('GetBlocksMetadataServerTool') + toolLogger.warn('Failed to resolve tool ID for operation', { error: error instanceof Error ? error.message : String(error) }) + } return undefined } diff --git a/apps/sim/lib/copilot/tools/server/user/get-credentials.ts b/apps/sim/lib/copilot/tools/server/user/get-credentials.ts index 5aafc2dcbd..78911bd80c 100644 --- a/apps/sim/lib/copilot/tools/server/user/get-credentials.ts +++ b/apps/sim/lib/copilot/tools/server/user/get-credentials.ts @@ -89,7 +89,9 @@ export const getCredentialsServerTool: BaseServerTool try { const decoded = jwtDecode<{ email?: string; name?: string }>(acc.idToken) displayName = decoded.email || decoded.name || '' - } catch {} + } catch (error) { + logger.warn('Failed to decode JWT id token', { error: error instanceof Error ? error.message : String(error) }) + } } if (!displayName && baseProvider === 'github') displayName = `${acc.accountId} (GitHub)` if (!displayName && userEmail) displayName = userEmail @@ -107,7 +109,9 @@ export const getCredentialsServerTool: BaseServerTool acc.id ) accessToken = refreshedToken || accessToken - } catch {} + } catch (error) { + logger.warn('Failed to refresh OAuth access token', { error: error instanceof Error ? error.message : String(error) }) + } connectedCredentials.push({ id: acc.id, name: displayName, diff --git a/apps/sim/lib/copilot/tools/server/workflow/get-workflow-console.ts b/apps/sim/lib/copilot/tools/server/workflow/get-workflow-console.ts index 601a17c0a0..06cfb1c823 100644 --- a/apps/sim/lib/copilot/tools/server/workflow/get-workflow-console.ts +++ b/apps/sim/lib/copilot/tools/server/workflow/get-workflow-console.ts @@ -4,6 +4,8 @@ import { createLogger } from '@sim/logger' import { desc, eq } from 'drizzle-orm' import type { BaseServerTool } from '@/lib/copilot/tools/server/base-tool' +const logger = createLogger('GetWorkflowConsoleServerTool') + interface GetWorkflowConsoleArgs { workflowId: string limit?: number @@ -87,7 +89,9 @@ function normalizeErrorMessage(errorValue: unknown): string | undefined { if (typeof errorValue === 'object') { try { return JSON.stringify(errorValue) - } catch {} + } catch (error) { + logger.warn('Failed to stringify error value', { error: error instanceof Error ? error.message : String(error) }) + } } try { return String(errorValue) @@ -217,7 +221,6 @@ function deriveExecutionErrorSummary(params: { export const getWorkflowConsoleServerTool: BaseServerTool = { name: 'get_workflow_console', async execute(rawArgs: GetWorkflowConsoleArgs): Promise { - const logger = createLogger('GetWorkflowConsoleServerTool') const { workflowId, limit = 2, diff --git a/apps/sim/stores/panel/copilot/store.ts b/apps/sim/stores/panel/copilot/store.ts index 694123c5f1..fda7733205 100644 --- a/apps/sim/stores/panel/copilot/store.ts +++ b/apps/sim/stores/panel/copilot/store.ts @@ -16,19 +16,36 @@ import { stripContinueOptionFromBlocks, } from '@/lib/copilot/client-sse/content-blocks' import { flushStreamingUpdates, stopStreamingUpdates } from '@/lib/copilot/client-sse/handlers' -import type { StreamingContext } from '@/lib/copilot/client-sse/types' +import type { ClientContentBlock, ClientStreamingContext } from '@/lib/copilot/client-sse/types' +import { + COPILOT_AUTO_ALLOWED_TOOLS_API_PATH, + COPILOT_CHAT_API_PATH, + COPILOT_CHAT_STREAM_API_PATH, + COPILOT_CHECKPOINTS_API_PATH, + COPILOT_CHECKPOINTS_REVERT_API_PATH, + COPILOT_CONFIRM_API_PATH, + COPILOT_CREDENTIALS_API_PATH, + COPILOT_DELETE_CHAT_API_PATH, + MAX_RESUME_ATTEMPTS, + OPTIMISTIC_TITLE_MAX_LENGTH, + QUEUE_PROCESS_DELAY_MS, + STREAM_STORAGE_KEY, + STREAM_TIMEOUT_MS, + SUBSCRIPTION_INVALIDATE_DELAY_MS, +} from '@/lib/copilot/constants' import { buildCheckpointWorkflowState, buildToolCallsById, normalizeMessagesForUI, + persistMessages, saveMessageCheckpoint, - serializeMessagesForDB, } from '@/lib/copilot/messages' import type { CopilotTransportMode } from '@/lib/copilot/models' import { parseSSEStream } from '@/lib/copilot/orchestrator/sse-parser' import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry' import { abortAllInProgressTools, + cleanupActiveState, isRejectedState, isTerminalState, resolveToolDisplay, @@ -38,6 +55,7 @@ import { getQueryClient } from '@/app/_shell/providers/query-provider' import { subscriptionKeys } from '@/hooks/queries/subscription' import type { ChatContext, + CheckpointEntry, CopilotMessage, CopilotStore, CopilotStreamInfo, @@ -51,24 +69,25 @@ import type { WorkflowState } from '@/stores/workflows/workflow/types' const logger = createLogger('CopilotStore') -const STREAM_STORAGE_KEY = 'copilot_active_stream' - /** * Flag set on beforeunload to suppress continue option during page refresh/close. - * Aborts during unload should NOT show the continue button. + * Initialized once when the store module loads. */ -let isPageUnloading = false +let _isPageUnloading = false if (typeof window !== 'undefined') { window.addEventListener('beforeunload', () => { - isPageUnloading = true + _isPageUnloading = true }) } +function isPageUnloading(): boolean { + return _isPageUnloading +} function readActiveStreamFromStorage(): CopilotStreamInfo | null { if (typeof window === 'undefined') return null try { const raw = window.sessionStorage.getItem(STREAM_STORAGE_KEY) - logger.info('[Copilot] Reading stream from storage', { + logger.debug('[Copilot] Reading stream from storage', { hasRaw: !!raw, rawPreview: raw ? raw.substring(0, 100) : null, }) @@ -85,8 +104,8 @@ function writeActiveStreamToStorage(info: CopilotStreamInfo | null): void { if (typeof window === 'undefined') return try { if (!info) { - logger.info('[Copilot] Clearing stream from storage', { - isPageUnloading, + logger.debug('[Copilot] Clearing stream from storage', { + isPageUnloading: isPageUnloading(), stack: new Error().stack?.split('\n').slice(1, 4).join(' <- '), }) window.sessionStorage.removeItem(STREAM_STORAGE_KEY) @@ -95,7 +114,7 @@ function writeActiveStreamToStorage(info: CopilotStreamInfo | null): void { const payload = JSON.stringify(info) window.sessionStorage.setItem(STREAM_STORAGE_KEY, payload) const verified = window.sessionStorage.getItem(STREAM_STORAGE_KEY) === payload - logger.info('[Copilot] Writing stream to storage', { + logger.debug('[Copilot] Writing stream to storage', { streamId: info.streamId, lastEventId: info.lastEventId, userMessageContent: info.userMessageContent?.slice(0, 30), @@ -120,23 +139,35 @@ function updateActiveStreamEventId( writeActiveStreamToStorage(next) } -// On module load, clear any lingering diff preview (fresh page refresh) -try { - const diffStore = useWorkflowDiffStore.getState() - if (diffStore?.hasActiveDiff) { - diffStore.clearDiff() +/** + * Clear any lingering diff preview from a previous session. + * Called lazily when the store is first activated (setWorkflowId). + */ +let _initialDiffCleared = false +function clearInitialDiffIfNeeded(): void { + if (_initialDiffCleared) return + _initialDiffCleared = true + try { + const diffStore = useWorkflowDiffStore.getState() + if (diffStore?.hasActiveDiff) { + diffStore.clearDiff() + } + } catch (error) { + logger.warn('[Copilot] Failed to clear initial diff state', { + error: error instanceof Error ? error.message : String(error), + }) } -} catch {} +} const TEXT_BLOCK_TYPE = 'text' const CONTINUE_OPTIONS_TAG = '{"1":"Continue"}' -function cloneContentBlocks(blocks: any[]): any[] { +function cloneContentBlocks(blocks: ClientContentBlock[]): ClientContentBlock[] { if (!Array.isArray(blocks)) return [] return blocks.map((block) => (block ? { ...block } : block)) } -function extractTextFromBlocks(blocks: any[]): string { +function extractTextFromBlocks(blocks: ClientContentBlock[]): string { if (!Array.isArray(blocks)) return '' return blocks .filter((block) => block?.type === TEXT_BLOCK_TYPE && typeof block.content === 'string') @@ -144,7 +175,7 @@ function extractTextFromBlocks(blocks: any[]): string { .join('') } -function appendTextToBlocks(blocks: any[], text: string): any[] { +function appendTextToBlocks(blocks: ClientContentBlock[], text: string): ClientContentBlock[] { const nextBlocks = cloneContentBlocks(blocks) if (!text) return nextBlocks const lastIndex = nextBlocks.length - 1 @@ -158,14 +189,14 @@ function appendTextToBlocks(blocks: any[], text: string): any[] { return nextBlocks } -function findLastTextBlock(blocks: any[]): any | null { +function findLastTextBlock(blocks: ClientContentBlock[]): ClientContentBlock | null { if (!Array.isArray(blocks) || blocks.length === 0) return null const lastBlock = blocks[blocks.length - 1] return lastBlock?.type === TEXT_BLOCK_TYPE ? lastBlock : null } -function replaceTextBlocks(blocks: any[], text: string): any[] { - const next: any[] = [] +function replaceTextBlocks(blocks: ClientContentBlock[], text: string): ClientContentBlock[] { + const next: ClientContentBlock[] = [] let inserted = false for (const block of blocks ?? []) { if (block?.type === TEXT_BLOCK_TYPE) { @@ -183,7 +214,7 @@ function replaceTextBlocks(blocks: any[], text: string): any[] { return next } -function createStreamingContext(messageId: string): StreamingContext { +function createClientStreamingContext(messageId: string): ClientStreamingContext { return { messageId, accumulatedContent: '', @@ -201,6 +232,639 @@ function createStreamingContext(messageId: string): StreamingContext { } } +type CopilotSet = ( + partial: Partial | ((state: CopilotStore) => Partial) +) => void + +type CopilotGet = () => CopilotStore + +interface SendMessageOptionsInput { + stream?: boolean + fileAttachments?: MessageFileAttachment[] + contexts?: ChatContext[] + messageId?: string + queueIfBusy?: boolean +} + +interface PreparedSendContext { + workflowId: string + currentChat: CopilotChat | null + mode: CopilotStore['mode'] + message: string + stream: boolean + fileAttachments?: MessageFileAttachment[] + contexts?: ChatContext[] + userMessage: CopilotMessage + streamingMessage: CopilotMessage + nextAbortController: AbortController +} + +type InitiateStreamResult = + | { kind: 'success'; result: Awaited> } + | { kind: 'error'; error: unknown } + +function prepareSendContext( + get: CopilotGet, + set: CopilotSet, + message: string, + options: SendMessageOptionsInput +): PreparedSendContext | null { + const { + workflowId, + currentChat, + mode, + revertState, + isSendingMessage, + abortController: activeAbortController, + } = get() + const { + stream = true, + fileAttachments, + contexts, + messageId, + queueIfBusy = true, + } = options + + if (!workflowId) return null + + if (isSendingMessage && !activeAbortController) { + logger.warn('[Copilot] sendMessage: stale sending state detected, clearing', { + originalMessageId: messageId, + }) + set({ isSendingMessage: false }) + } else if (isSendingMessage && activeAbortController?.signal.aborted) { + logger.warn('[Copilot] sendMessage: aborted controller detected, clearing', { + originalMessageId: messageId, + }) + set({ isSendingMessage: false, abortController: null }) + } else if (isSendingMessage) { + if (queueIfBusy) { + get().addToQueue(message, { fileAttachments, contexts, messageId }) + logger.info('[Copilot] Message queued (already sending)', { + queueLength: get().messageQueue.length + 1, + originalMessageId: messageId, + }) + return null + } + get().abortMessage({ suppressContinueOption: true }) + } + + const nextAbortController = new AbortController() + set({ isSendingMessage: true, error: null, abortController: nextAbortController }) + + const userMessage = createUserMessage(message, fileAttachments, contexts, messageId) + const streamingMessage = createStreamingMessage() + const snapshot = workflowId ? buildCheckpointWorkflowState(workflowId) : null + if (snapshot) { + set((state) => ({ + messageSnapshots: { ...state.messageSnapshots, [userMessage.id]: snapshot }, + })) + } + + get() + .loadSensitiveCredentialIds() + .catch((err) => { + logger.warn('[Copilot] Failed to load sensitive credential IDs', err) + }) + get() + .loadAutoAllowedTools() + .catch((err) => { + logger.warn('[Copilot] Failed to load auto-allowed tools', err) + }) + + let newMessages: CopilotMessage[] + if (revertState) { + const currentMessages = get().messages + newMessages = [...currentMessages, userMessage, streamingMessage] + set({ revertState: null, inputValue: '' }) + } else { + const currentMessages = get().messages + const existingIndex = messageId ? currentMessages.findIndex((m) => m.id === messageId) : -1 + if (existingIndex !== -1) { + newMessages = [...currentMessages.slice(0, existingIndex), userMessage, streamingMessage] + } else { + newMessages = [...currentMessages, userMessage, streamingMessage] + } + } + + const isFirstMessage = get().messages.length === 0 && !currentChat?.title + set({ + messages: newMessages, + currentUserMessageId: userMessage.id, + }) + + const activeStream: CopilotStreamInfo = { + streamId: userMessage.id, + workflowId, + chatId: currentChat?.id, + userMessageId: userMessage.id, + assistantMessageId: streamingMessage.id, + lastEventId: 0, + resumeAttempts: 0, + userMessageContent: message, + fileAttachments, + contexts, + startedAt: Date.now(), + } + logger.info('[Copilot] Creating new active stream', { + streamId: activeStream.streamId, + workflowId: activeStream.workflowId, + chatId: activeStream.chatId, + userMessageContent: message.slice(0, 50), + }) + set({ activeStream }) + writeActiveStreamToStorage(activeStream) + + if (isFirstMessage) { + const optimisticTitle = + message.length > OPTIMISTIC_TITLE_MAX_LENGTH + ? `${message.substring(0, OPTIMISTIC_TITLE_MAX_LENGTH - 3)}...` + : message + set((state) => ({ + currentChat: state.currentChat ? { ...state.currentChat, title: optimisticTitle } : state.currentChat, + chats: state.currentChat + ? state.chats.map((c) => (c.id === state.currentChat!.id ? { ...c, title: optimisticTitle } : c)) + : state.chats, + })) + } + + return { + workflowId, + currentChat, + mode, + message, + stream, + fileAttachments, + contexts, + userMessage, + streamingMessage, + nextAbortController, + } +} + +async function initiateStream( + prepared: PreparedSendContext, + get: CopilotGet +): Promise { + try { + const { contexts, mode } = prepared + logger.debug('sendMessage: preparing request', { + hasContexts: Array.isArray(contexts), + contextsCount: Array.isArray(contexts) ? contexts.length : 0, + contextsPreview: Array.isArray(contexts) + ? contexts.map((c) => ({ + kind: c?.kind, + chatId: c?.kind === 'past_chat' ? c.chatId : undefined, + workflowId: + c?.kind === 'workflow' || c?.kind === 'current_workflow' || c?.kind === 'workflow_block' + ? c.workflowId + : undefined, + label: c?.label, + })) + : undefined, + }) + + const { streamingPlanContent } = get() + let messageToSend = prepared.message + if (streamingPlanContent?.trim()) { + messageToSend = `Design Document:\n\n${streamingPlanContent}\n\n==============\n\nUser Query:\n\n${prepared.message}` + logger.debug('[DesignDocument] Prepending plan content to message', { + planLength: streamingPlanContent.length, + originalMessageLength: prepared.message.length, + finalMessageLength: messageToSend.length, + }) + } + + const apiMode: CopilotTransportMode = mode === 'ask' ? 'ask' : mode === 'plan' ? 'plan' : 'agent' + const uiToApiCommandMap: Record = { actions: 'superagent' } + const commands = contexts + ?.filter((c) => c.kind === 'slash_command' && 'command' in c) + .map((c) => { + const uiCommand = c.command.toLowerCase() + return uiToApiCommandMap[uiCommand] || uiCommand + }) as string[] | undefined + const filteredContexts = contexts?.filter((c) => c.kind !== 'slash_command') + + const result = await sendStreamingMessage({ + message: messageToSend, + userMessageId: prepared.userMessage.id, + chatId: prepared.currentChat?.id, + workflowId: prepared.workflowId || undefined, + mode: apiMode, + model: get().selectedModel, + prefetch: get().agentPrefetch, + createNewChat: !prepared.currentChat, + stream: prepared.stream, + fileAttachments: prepared.fileAttachments, + contexts: filteredContexts, + commands: commands?.length ? commands : undefined, + abortSignal: prepared.nextAbortController.signal, + }) + + return { kind: 'success', result } + } catch (error) { + return { kind: 'error', error } + } +} + +async function processStreamEvents( + initiated: InitiateStreamResult, + prepared: PreparedSendContext, + get: CopilotGet +): Promise { + if (initiated.kind !== 'success') return false + if (!initiated.result.success || !initiated.result.stream) return false + await get().handleStreamingResponse( + initiated.result.stream, + prepared.streamingMessage.id, + false, + prepared.userMessage.id, + prepared.nextAbortController.signal + ) + return true +} + +async function finalizeStream( + initiated: InitiateStreamResult, + processed: boolean, + prepared: PreparedSendContext, + set: CopilotSet +): Promise { + if (processed) { + set({ chatsLastLoadedAt: null, chatsLoadedForWorkflow: null }) + return + } + + if (initiated.kind === 'success') { + const { result } = initiated + if (result.error === 'Request was aborted') { + return + } + + let errorContent = result.error || 'Failed to send message' + let errorType: + | 'usage_limit' + | 'unauthorized' + | 'forbidden' + | 'rate_limit' + | 'upgrade_required' + | undefined + if (result.status === 401) { + errorContent = + '_Unauthorized request. You need a valid API key to use the copilot. You can get one by going to [sim.ai](https://sim.ai) settings and generating one there._' + errorType = 'unauthorized' + } else if (result.status === 402) { + errorContent = + '_Usage limit exceeded. To continue using this service, upgrade your plan or increase your usage limit to:_' + errorType = 'usage_limit' + } else if (result.status === 403) { + errorContent = + '_Provider config not allowed for non-enterprise users. Please remove the provider config and try again_' + errorType = 'forbidden' + } else if (result.status === 426) { + errorContent = + '_Please upgrade to the latest version of the Sim platform to continue using the copilot._' + errorType = 'upgrade_required' + } else if (result.status === 429) { + errorContent = '_Provider rate limit exceeded. Please try again later._' + errorType = 'rate_limit' + } + + const errorMessage = createErrorMessage(prepared.streamingMessage.id, errorContent, errorType) + set((state) => ({ + messages: state.messages.map((m) => (m.id === prepared.streamingMessage.id ? errorMessage : m)), + error: errorContent, + isSendingMessage: false, + abortController: null, + })) + set({ activeStream: null }) + writeActiveStreamToStorage(null) + return + } + + const error = initiated.error + if (error instanceof Error && error.name === 'AbortError') return + const errorMessage = createErrorMessage( + prepared.streamingMessage.id, + 'Sorry, I encountered an error while processing your message. Please try again.' + ) + set((state) => ({ + messages: state.messages.map((m) => (m.id === prepared.streamingMessage.id ? errorMessage : m)), + error: error instanceof Error ? error.message : 'Failed to send message', + isSendingMessage: false, + abortController: null, + })) + set({ activeStream: null }) + writeActiveStreamToStorage(null) +} + +interface ResumeValidationResult { + nextStream: CopilotStreamInfo + messages: CopilotMessage[] + isFreshResume: boolean +} + +async function validateResumeState( + get: CopilotGet, + set: CopilotSet +): Promise { + const inMemoryStream = get().activeStream + const storedStream = readActiveStreamFromStorage() + const stored = inMemoryStream || storedStream + logger.debug('[Copilot] Resume check', { + hasInMemory: !!inMemoryStream, + hasStored: !!storedStream, + usingStream: inMemoryStream ? 'memory' : storedStream ? 'storage' : 'none', + streamId: stored?.streamId, + lastEventId: stored?.lastEventId, + storedWorkflowId: stored?.workflowId, + storedChatId: stored?.chatId, + userMessageContent: stored?.userMessageContent?.slice(0, 50), + currentWorkflowId: get().workflowId, + isSendingMessage: get().isSendingMessage, + resumeAttempts: stored?.resumeAttempts, + }) + + if (!stored || !stored.streamId) return null + if (get().isSendingMessage) return null + if (get().workflowId && stored.workflowId !== get().workflowId) return null + + if (stored.resumeAttempts >= MAX_RESUME_ATTEMPTS) { + logger.warn('[Copilot] Too many resume attempts, giving up') + return null + } + + const nextStream: CopilotStreamInfo = { + ...stored, + resumeAttempts: (stored.resumeAttempts || 0) + 1, + } + set({ activeStream: nextStream }) + writeActiveStreamToStorage(nextStream) + + let messages = get().messages + const isFreshResume = messages.length === 0 + if (isFreshResume && nextStream.chatId) { + try { + logger.debug('[Copilot] Loading chat for resume', { chatId: nextStream.chatId }) + const response = await fetch(`${COPILOT_CHAT_API_PATH}?chatId=${nextStream.chatId}`) + if (response.ok) { + const data = await response.json() + if (data.success && data.chat) { + const normalizedMessages = normalizeMessagesForUI(data.chat.messages ?? []) + const toolCallsById = buildToolCallsById(normalizedMessages) + set({ + currentChat: data.chat, + messages: normalizedMessages, + toolCallsById, + streamingPlanContent: data.chat.planArtifact || '', + }) + messages = normalizedMessages + logger.debug('[Copilot] Loaded chat for resume', { + chatId: nextStream.chatId, + messageCount: normalizedMessages.length, + }) + } + } + } catch (e) { + logger.warn('[Copilot] Failed to load chat for resume', { error: String(e) }) + } + } + + return { nextStream, messages, isFreshResume } +} + +interface ReplayBufferedEventsResult { + nextStream: CopilotStreamInfo + bufferedContent: string + replayBlocks: ClientContentBlock[] | null + resumeFromEventId: number +} + +async function replayBufferedEvents( + stream: CopilotStreamInfo, + get: CopilotGet, + set: CopilotSet +): Promise { + let nextStream = stream + let bufferedContent = '' + let replayBlocks: ClientContentBlock[] | null = null + let resumeFromEventId = nextStream.lastEventId + + if (nextStream.lastEventId > 0) { + try { + logger.debug('[Copilot] Fetching all buffered events', { + streamId: nextStream.streamId, + savedLastEventId: nextStream.lastEventId, + }) + const batchUrl = `${COPILOT_CHAT_STREAM_API_PATH}?streamId=${encodeURIComponent( + nextStream.streamId + )}&from=0&to=${encodeURIComponent(String(nextStream.lastEventId))}&batch=true` + const batchResponse = await fetch(batchUrl, { credentials: 'include' }) + if (batchResponse.ok) { + const batchData = await batchResponse.json() + if (batchData.success && Array.isArray(batchData.events)) { + const replayContext = createClientStreamingContext(nextStream.assistantMessageId) + replayContext.suppressStreamingUpdates = true + for (const entry of batchData.events) { + const event = entry.event + if (event) { + await applySseEvent(event, replayContext, get, set) + } + if (typeof entry.eventId === 'number' && entry.eventId > resumeFromEventId) { + resumeFromEventId = entry.eventId + } + } + bufferedContent = replayContext.accumulatedContent + replayBlocks = replayContext.contentBlocks + logger.debug('[Copilot] Loaded buffered content instantly', { + eventCount: batchData.events.length, + contentLength: bufferedContent.length, + resumeFromEventId, + }) + } else { + logger.warn('[Copilot] Batch response missing events', { + success: batchData.success, + hasEvents: Array.isArray(batchData.events), + }) + } + } else { + logger.warn('[Copilot] Failed to fetch buffered events', { + status: batchResponse.status, + }) + } + } catch (e) { + logger.warn('[Copilot] Failed to fetch buffered events', { error: String(e) }) + } + } + + if (resumeFromEventId > nextStream.lastEventId) { + nextStream = { ...nextStream, lastEventId: resumeFromEventId } + set({ activeStream: nextStream }) + writeActiveStreamToStorage(nextStream) + } + + return { nextStream, bufferedContent, replayBlocks, resumeFromEventId } +} + +interface ResumeFinalizeResult { + nextStream: CopilotStreamInfo + bufferedContent: string + resumeFromEventId: number +} + +function finalizeResume( + messages: CopilotMessage[], + replay: ReplayBufferedEventsResult, + get: CopilotGet, + set: CopilotSet +): ResumeFinalizeResult { + let nextMessages = messages + let cleanedExisting = false + + nextMessages = nextMessages.map((m) => { + if (m.id !== replay.nextStream.assistantMessageId) return m + const hasContinueTag = + (typeof m.content === 'string' && m.content.includes(CONTINUE_OPTIONS_TAG)) || + (Array.isArray(m.contentBlocks) && + m.contentBlocks.some( + (b) => + b.type === 'text' && b.content?.includes(CONTINUE_OPTIONS_TAG) + )) + if (!hasContinueTag) return m + cleanedExisting = true + return { + ...m, + content: stripContinueOption(m.content || ''), + contentBlocks: stripContinueOptionFromBlocks(m.contentBlocks ?? []), + } + }) + + if (!messages.some((m) => m.id === replay.nextStream.userMessageId)) { + const userMessage = createUserMessage( + replay.nextStream.userMessageContent || '', + replay.nextStream.fileAttachments, + replay.nextStream.contexts, + replay.nextStream.userMessageId + ) + nextMessages = [...nextMessages, userMessage] + } + + if (!nextMessages.some((m) => m.id === replay.nextStream.assistantMessageId)) { + const assistantMessage: CopilotMessage = { + ...createStreamingMessage(), + id: replay.nextStream.assistantMessageId, + content: replay.bufferedContent, + contentBlocks: + replay.replayBlocks && replay.replayBlocks.length > 0 + ? replay.replayBlocks + : replay.bufferedContent + ? [{ type: TEXT_BLOCK_TYPE, content: replay.bufferedContent, timestamp: Date.now() }] + : [], + } + nextMessages = [...nextMessages, assistantMessage] + } else if (replay.bufferedContent || (replay.replayBlocks && replay.replayBlocks.length > 0)) { + nextMessages = nextMessages.map((m) => { + if (m.id !== replay.nextStream.assistantMessageId) return m + let nextBlocks = replay.replayBlocks && replay.replayBlocks.length > 0 ? replay.replayBlocks : null + if (!nextBlocks) { + const existingBlocks = Array.isArray(m.contentBlocks) ? m.contentBlocks : [] + const existingText = extractTextFromBlocks(existingBlocks) + if (existingText && replay.bufferedContent.startsWith(existingText)) { + const delta = replay.bufferedContent.slice(existingText.length) + nextBlocks = delta ? appendTextToBlocks(existingBlocks, delta) : cloneContentBlocks(existingBlocks) + } else if (!existingText && existingBlocks.length === 0) { + nextBlocks = replay.bufferedContent + ? [{ type: TEXT_BLOCK_TYPE, content: replay.bufferedContent, timestamp: Date.now() }] + : [] + } else { + nextBlocks = replaceTextBlocks(existingBlocks, replay.bufferedContent) + } + } + return { + ...m, + content: replay.bufferedContent, + contentBlocks: nextBlocks ?? [], + } + }) + } + + if (cleanedExisting || nextMessages !== messages || replay.bufferedContent) { + set({ messages: nextMessages, currentUserMessageId: replay.nextStream.userMessageId }) + } else { + set({ currentUserMessageId: replay.nextStream.userMessageId }) + } + + return { + nextStream: replay.nextStream, + bufferedContent: replay.bufferedContent, + resumeFromEventId: replay.resumeFromEventId, + } +} + +async function resumeFromLiveStream( + resume: ResumeFinalizeResult, + isFreshResume: boolean, + get: CopilotGet, + set: CopilotSet +): Promise { + const abortController = new AbortController() + set({ isSendingMessage: true, abortController }) + + try { + logger.debug('[Copilot] Attempting to resume stream', { + streamId: resume.nextStream.streamId, + savedLastEventId: resume.nextStream.lastEventId, + resumeFromEventId: resume.resumeFromEventId, + isFreshResume, + bufferedContentLength: resume.bufferedContent.length, + assistantMessageId: resume.nextStream.assistantMessageId, + chatId: resume.nextStream.chatId, + }) + const result = await sendStreamingMessage({ + message: resume.nextStream.userMessageContent || '', + userMessageId: resume.nextStream.userMessageId, + workflowId: resume.nextStream.workflowId, + chatId: resume.nextStream.chatId || get().currentChat?.id || undefined, + mode: get().mode === 'ask' ? 'ask' : get().mode === 'plan' ? 'plan' : 'agent', + model: get().selectedModel, + prefetch: get().agentPrefetch, + stream: true, + resumeFromEventId: resume.resumeFromEventId, + abortSignal: abortController.signal, + }) + + logger.info('[Copilot] Resume stream result', { + success: result.success, + hasStream: !!result.stream, + error: result.error, + }) + + if (result.success && result.stream) { + await get().handleStreamingResponse( + result.stream, + resume.nextStream.assistantMessageId, + true, + resume.nextStream.userMessageId, + abortController.signal + ) + return true + } + + set({ isSendingMessage: false, abortController: null }) + } catch (error) { + if (error instanceof Error && (error.name === 'AbortError' || error.message.includes('aborted'))) { + logger.info('[Copilot] Resume stream aborted by user') + set({ isSendingMessage: false, abortController: null }) + return false + } + logger.error('[Copilot] Failed to resume stream', { + error: error instanceof Error ? error.message : String(error), + }) + set({ isSendingMessage: false, abortController: null }) + } + return false +} + // Initial state (subset required for UI/streaming) const initialState = { mode: 'build' as const, @@ -211,7 +875,7 @@ const initialState = { currentChat: null as CopilotChat | null, chats: [] as CopilotChat[], messages: [] as CopilotMessage[], - messageCheckpoints: {} as Record, + messageCheckpoints: {} as Record, messageSnapshots: {} as Record, isLoading: false, isLoadingChats: false, @@ -253,16 +917,17 @@ export const useCopilotStore = create()( // Workflow selection setWorkflowId: async (workflowId: string | null) => { + clearInitialDiffIfNeeded() const currentWorkflowId = get().workflowId if (currentWorkflowId === workflowId) return const { isSendingMessage } = get() if (isSendingMessage) get().abortMessage() // Abort all in-progress tools and clear any diff preview - abortAllInProgressTools(set, get) - try { - useWorkflowDiffStore.getState().clearDiff({ restoreBaseline: false }) - } catch {} + cleanupActiveState( + set as unknown as (partial: Record) => void, + get as unknown as () => Record + ) set({ ...initialState, @@ -293,10 +958,10 @@ export const useCopilotStore = create()( if (currentChat && currentChat.id !== chat.id && isSendingMessage) get().abortMessage() // Abort in-progress tools and clear diff when changing chats - abortAllInProgressTools(set, get) - try { - useWorkflowDiffStore.getState().clearDiff({ restoreBaseline: false }) - } catch {} + cleanupActiveState( + set as unknown as (partial: Record) => void, + get as unknown as () => Record + ) // Restore plan content and config (mode/model) from selected chat const planArtifact = chat.planArtifact || '' @@ -304,7 +969,7 @@ export const useCopilotStore = create()( const chatMode = chatConfig.mode || get().mode const chatModel = chatConfig.model || get().selectedModel - logger.info('[Chat] Restoring chat config', { + logger.debug('[Chat] Restoring chat config', { chatId: chat.id, mode: chatMode, model: chatModel, @@ -336,27 +1001,25 @@ export const useCopilotStore = create()( // Background-save the previous chat's latest messages, plan artifact, and config before switching (optimistic) try { if (previousChat && previousChat.id !== chat.id) { - const dbMessages = serializeMessagesForDB(previousMessages, get().sensitiveCredentialIds) const previousPlanArtifact = get().streamingPlanContent - fetch('/api/copilot/chat/update-messages', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - chatId: previousChat.id, - messages: dbMessages, - planArtifact: previousPlanArtifact || null, - config: { - mode: previousMode, - model: previousModel, - }, - }), - }).catch(() => {}) + void persistMessages({ + chatId: previousChat.id, + messages: previousMessages, + sensitiveCredentialIds: get().sensitiveCredentialIds, + planArtifact: previousPlanArtifact || null, + mode: previousMode, + model: previousModel, + }) } - } catch {} + } catch (error) { + logger.warn('[Copilot] Failed to schedule previous-chat background save', { + error: error instanceof Error ? error.message : String(error), + }) + } // Refresh selected chat from server to ensure we have latest messages/tool calls try { - const response = await fetch(`/api/copilot/chat?workflowId=${workflowId}`) + const response = await fetch(`${COPILOT_CHAT_API_PATH}?workflowId=${workflowId}`) if (!response.ok) throw new Error(`Failed to fetch latest chat data: ${response.status}`) const data = await response.json() if (data.success && Array.isArray(data.chats)) { @@ -375,10 +1038,19 @@ export const useCopilotStore = create()( }) try { await get().loadMessageCheckpoints(latestChat.id) - } catch {} + } catch (error) { + logger.warn('[Copilot] Failed loading checkpoints for selected chat', { + chatId: latestChat.id, + error: error instanceof Error ? error.message : String(error), + }) + } } } - } catch {} + } catch (error) { + logger.warn('[Copilot] Failed to refresh selected chat from server', { + error: error instanceof Error ? error.message : String(error), + }) + } }, createNewChat: async () => { @@ -386,32 +1058,30 @@ export const useCopilotStore = create()( if (isSendingMessage) get().abortMessage() // Abort in-progress tools and clear diff on new chat - abortAllInProgressTools(set, get) - try { - useWorkflowDiffStore.getState().clearDiff({ restoreBaseline: false }) - } catch {} + cleanupActiveState( + set as unknown as (partial: Record) => void, + get as unknown as () => Record + ) // Background-save the current chat before clearing (optimistic) try { const { currentChat, streamingPlanContent, mode, selectedModel } = get() if (currentChat) { const currentMessages = get().messages - const dbMessages = serializeMessagesForDB(currentMessages, get().sensitiveCredentialIds) - fetch('/api/copilot/chat/update-messages', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - chatId: currentChat.id, - messages: dbMessages, - planArtifact: streamingPlanContent || null, - config: { - mode, - model: selectedModel, - }, - }), - }).catch(() => {}) + void persistMessages({ + chatId: currentChat.id, + messages: currentMessages, + sensitiveCredentialIds: get().sensitiveCredentialIds, + planArtifact: streamingPlanContent || null, + mode, + model: selectedModel, + }) } - } catch {} + } catch (error) { + logger.warn('[Copilot] Failed to schedule current-chat background save', { + error: error instanceof Error ? error.message : String(error), + }) + } set({ currentChat: null, @@ -427,7 +1097,7 @@ export const useCopilotStore = create()( deleteChat: async (chatId: string) => { try { // Call delete API - const response = await fetch('/api/copilot/chat/delete', { + const response = await fetch(COPILOT_DELETE_CHAT_API_PATH, { method: 'DELETE', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ chatId }), @@ -463,7 +1133,7 @@ export const useCopilotStore = create()( // For now always fetch fresh set({ isLoadingChats: true }) try { - const url = `/api/copilot/chat?workflowId=${workflowId}` + const url = `${COPILOT_CHAT_API_PATH}?workflowId=${workflowId}` const response = await fetch(url) if (!response.ok) { throw new Error(`Failed to fetch chats: ${response.status}`) @@ -510,7 +1180,12 @@ export const useCopilotStore = create()( } try { await get().loadMessageCheckpoints(updatedCurrentChat.id) - } catch {} + } catch (error) { + logger.warn('[Copilot] Failed loading checkpoints for current chat', { + chatId: updatedCurrentChat.id, + error: error instanceof Error ? error.message : String(error), + }) + } } else if (!isSendingMessage && !suppressAutoSelect) { const mostRecentChat: CopilotChat = data.chats[0] const normalizedMessages = normalizeMessagesForUI(mostRecentChat.messages ?? []) @@ -540,7 +1215,12 @@ export const useCopilotStore = create()( }) try { await get().loadMessageCheckpoints(mostRecentChat.id) - } catch {} + } catch (error) { + logger.warn('[Copilot] Failed loading checkpoints for most recent chat', { + chatId: mostRecentChat.id, + error: error instanceof Error ? error.message : String(error), + }) + } } } else { set({ currentChat: null, messages: [] }) @@ -560,526 +1240,32 @@ export const useCopilotStore = create()( // Send a message (streaming only) sendMessage: async (message: string, options = {}) => { - const { - workflowId, - currentChat, - mode, - revertState, - isSendingMessage, - abortController: activeAbortController, - } = get() - const { - stream = true, - fileAttachments, - contexts, - messageId, - queueIfBusy = true, - } = options as { - stream?: boolean - fileAttachments?: MessageFileAttachment[] - contexts?: ChatContext[] - messageId?: string - queueIfBusy?: boolean - } - - if (!workflowId) return - - // If already sending a message, queue this one instead unless bypassing queue - if (isSendingMessage && !activeAbortController) { - logger.warn('[Copilot] sendMessage: stale sending state detected, clearing', { - originalMessageId: messageId, - }) - set({ isSendingMessage: false }) - } else if (isSendingMessage && activeAbortController?.signal.aborted) { - logger.warn('[Copilot] sendMessage: aborted controller detected, clearing', { - originalMessageId: messageId, - }) - set({ isSendingMessage: false, abortController: null }) - } else if (isSendingMessage) { - if (queueIfBusy) { - get().addToQueue(message, { fileAttachments, contexts, messageId }) - logger.info('[Copilot] Message queued (already sending)', { - queueLength: get().messageQueue.length + 1, - originalMessageId: messageId, - }) - return - } - get().abortMessage({ suppressContinueOption: true }) - } - - const nextAbortController = new AbortController() - set({ isSendingMessage: true, error: null, abortController: nextAbortController }) + const prepared = prepareSendContext(get, set, message, options as SendMessageOptionsInput) + if (!prepared) return - const userMessage = createUserMessage(message, fileAttachments, contexts, messageId) - const streamingMessage = createStreamingMessage() - const snapshot = workflowId ? buildCheckpointWorkflowState(workflowId) : null - if (snapshot) { - set((state) => ({ - messageSnapshots: { ...state.messageSnapshots, [userMessage.id]: snapshot }, - })) - } - - get() - .loadSensitiveCredentialIds() - .catch((err) => { - logger.warn('[Copilot] Failed to load sensitive credential IDs', err) - }) - get() - .loadAutoAllowedTools() - .catch((err) => { - logger.warn('[Copilot] Failed to load auto-allowed tools', err) - }) + const initiated = await initiateStream(prepared, get) + let finalizedInitiated = initiated + let processed = false - let newMessages: CopilotMessage[] - if (revertState) { - const currentMessages = get().messages - newMessages = [...currentMessages, userMessage, streamingMessage] - set({ revertState: null, inputValue: '' }) - } else { - const currentMessages = get().messages - // If messageId is provided, check if it already exists (e.g., from edit flow) - const existingIndex = messageId ? currentMessages.findIndex((m) => m.id === messageId) : -1 - if (existingIndex !== -1) { - // Replace existing message instead of adding new one - newMessages = [...currentMessages.slice(0, existingIndex), userMessage, streamingMessage] - } else { - // Add new messages normally - newMessages = [...currentMessages, userMessage, streamingMessage] - } - } - - const isFirstMessage = get().messages.length === 0 && !currentChat?.title - set((state) => ({ - messages: newMessages, - currentUserMessageId: userMessage.id, - })) - - // Create new stream info and write to storage BEFORE starting the stream - // This ensures that if the user refreshes, they get the correct stream - const activeStream: CopilotStreamInfo = { - streamId: userMessage.id, - workflowId, - chatId: currentChat?.id, - userMessageId: userMessage.id, - assistantMessageId: streamingMessage.id, - lastEventId: 0, - resumeAttempts: 0, - userMessageContent: message, - fileAttachments, - contexts, - startedAt: Date.now(), - } - logger.info('[Copilot] Creating new active stream', { - streamId: activeStream.streamId, - workflowId: activeStream.workflowId, - chatId: activeStream.chatId, - userMessageContent: message.slice(0, 50), - }) - set({ activeStream }) - writeActiveStreamToStorage(activeStream) - - if (isFirstMessage) { - const optimisticTitle = message.length > 50 ? `${message.substring(0, 47)}...` : message - set((state) => ({ - currentChat: state.currentChat - ? { ...state.currentChat, title: optimisticTitle } - : state.currentChat, - chats: state.currentChat - ? state.chats.map((c) => - c.id === state.currentChat!.id ? { ...c, title: optimisticTitle } : c - ) - : state.chats, - })) - } - - try { - // Debug: log contexts presence before sending + if (initiated.kind === 'success') { try { - logger.info('sendMessage: preparing request', { - hasContexts: Array.isArray(contexts), - contextsCount: Array.isArray(contexts) ? contexts.length : 0, - contextsPreview: Array.isArray(contexts) - ? contexts.map((c: any) => ({ - kind: c?.kind, - chatId: (c as any)?.chatId, - workflowId: (c as any)?.workflowId, - label: (c as any)?.label, - })) - : undefined, - }) - } catch {} - - // Prepend design document to message if available - const { streamingPlanContent } = get() - let messageToSend = message - if (streamingPlanContent?.trim()) { - messageToSend = `Design Document:\n\n${streamingPlanContent}\n\n==============\n\nUser Query:\n\n${message}` - logger.info('[DesignDocument] Prepending plan content to message', { - planLength: streamingPlanContent.length, - originalMessageLength: message.length, - finalMessageLength: messageToSend.length, - }) - } - - // Call copilot API - const apiMode: CopilotTransportMode = - mode === 'ask' ? 'ask' : mode === 'plan' ? 'plan' : 'agent' - - // Extract slash commands from contexts (lowercase) and filter them out from contexts - // Map UI command IDs to API command IDs (e.g., "actions" -> "superagent") - const uiToApiCommandMap: Record = { actions: 'superagent' } - const commands = contexts - ?.filter((c) => c.kind === 'slash_command' && 'command' in c) - .map((c) => { - const uiCommand = (c as any).command.toLowerCase() - return uiToApiCommandMap[uiCommand] || uiCommand - }) as string[] | undefined - const filteredContexts = contexts?.filter((c) => c.kind !== 'slash_command') - - const result = await sendStreamingMessage({ - message: messageToSend, - userMessageId: userMessage.id, - chatId: currentChat?.id, - workflowId: workflowId || undefined, - mode: apiMode, - model: get().selectedModel, - prefetch: get().agentPrefetch, - createNewChat: !currentChat, - stream, - fileAttachments, - contexts: filteredContexts, - commands: commands?.length ? commands : undefined, - abortSignal: nextAbortController.signal, - }) - - if (result.success && result.stream) { - await get().handleStreamingResponse( - result.stream, - streamingMessage.id, - false, - userMessage.id, - nextAbortController.signal - ) - set({ chatsLastLoadedAt: null, chatsLoadedForWorkflow: null }) - } else { - if (result.error === 'Request was aborted') { - return - } - - // Check for specific status codes and provide custom messages - let errorContent = result.error || 'Failed to send message' - let errorType: - | 'usage_limit' - | 'unauthorized' - | 'forbidden' - | 'rate_limit' - | 'upgrade_required' - | undefined - if (result.status === 401) { - errorContent = - '_Unauthorized request. You need a valid API key to use the copilot. You can get one by going to [sim.ai](https://sim.ai) settings and generating one there._' - errorType = 'unauthorized' - } else if (result.status === 402) { - errorContent = - '_Usage limit exceeded. To continue using this service, upgrade your plan or increase your usage limit to:_' - errorType = 'usage_limit' - } else if (result.status === 403) { - errorContent = - '_Provider config not allowed for non-enterprise users. Please remove the provider config and try again_' - errorType = 'forbidden' - } else if (result.status === 426) { - errorContent = - '_Please upgrade to the latest version of the Sim platform to continue using the copilot._' - errorType = 'upgrade_required' - } else if (result.status === 429) { - errorContent = '_Provider rate limit exceeded. Please try again later._' - errorType = 'rate_limit' - } - - const errorMessage = createErrorMessage(streamingMessage.id, errorContent, errorType) - set((state) => ({ - messages: state.messages.map((m) => (m.id === streamingMessage.id ? errorMessage : m)), - error: errorContent, - isSendingMessage: false, - abortController: null, - })) - set({ activeStream: null }) - writeActiveStreamToStorage(null) + processed = await processStreamEvents(initiated, prepared, get) + } catch (error) { + finalizedInitiated = { kind: 'error', error } + processed = false } - } catch (error) { - if (error instanceof Error && error.name === 'AbortError') return - const errorMessage = createErrorMessage( - streamingMessage.id, - 'Sorry, I encountered an error while processing your message. Please try again.' - ) - set((state) => ({ - messages: state.messages.map((m) => (m.id === streamingMessage.id ? errorMessage : m)), - error: error instanceof Error ? error.message : 'Failed to send message', - isSendingMessage: false, - abortController: null, - })) - set({ activeStream: null }) - writeActiveStreamToStorage(null) } + + await finalizeStream(finalizedInitiated, processed, prepared, set) }, resumeActiveStream: async () => { - const inMemoryStream = get().activeStream - const storedStream = readActiveStreamFromStorage() - const stored = inMemoryStream || storedStream - logger.info('[Copilot] Resume check', { - hasInMemory: !!inMemoryStream, - hasStored: !!storedStream, - usingStream: inMemoryStream ? 'memory' : storedStream ? 'storage' : 'none', - streamId: stored?.streamId, - lastEventId: stored?.lastEventId, - storedWorkflowId: stored?.workflowId, - storedChatId: stored?.chatId, - userMessageContent: stored?.userMessageContent?.slice(0, 50), - currentWorkflowId: get().workflowId, - isSendingMessage: get().isSendingMessage, - resumeAttempts: stored?.resumeAttempts, - }) - if (!stored || !stored.streamId) return false - if (get().isSendingMessage) return false - if (get().workflowId && stored.workflowId !== get().workflowId) return false - - if (stored.resumeAttempts >= 3) { - logger.warn('[Copilot] Too many resume attempts, giving up') - return false - } - - let nextStream: CopilotStreamInfo = { - ...stored, - resumeAttempts: (stored.resumeAttempts || 0) + 1, - } - set({ activeStream: nextStream }) - writeActiveStreamToStorage(nextStream) - - // Load existing chat messages from database if we have a chatId but no messages - let messages = get().messages - // Track if this is a fresh page load (no messages in memory) - const isFreshResume = messages.length === 0 - if (isFreshResume && nextStream.chatId) { - try { - logger.info('[Copilot] Loading chat for resume', { chatId: nextStream.chatId }) - const response = await fetch(`/api/copilot/chat?chatId=${nextStream.chatId}`) - if (response.ok) { - const data = await response.json() - if (data.success && data.chat) { - const normalizedMessages = normalizeMessagesForUI(data.chat.messages ?? []) - const toolCallsById = buildToolCallsById(normalizedMessages) - set({ - currentChat: data.chat, - messages: normalizedMessages, - toolCallsById, - streamingPlanContent: data.chat.planArtifact || '', - }) - messages = normalizedMessages - logger.info('[Copilot] Loaded chat for resume', { - chatId: nextStream.chatId, - messageCount: normalizedMessages.length, - }) - } - } - } catch (e) { - logger.warn('[Copilot] Failed to load chat for resume', { error: String(e) }) - } - } - - let bufferedContent = '' - let replayBlocks: any[] | null = null - let resumeFromEventId = nextStream.lastEventId - if (nextStream.lastEventId > 0) { - try { - logger.info('[Copilot] Fetching all buffered events', { - streamId: nextStream.streamId, - savedLastEventId: nextStream.lastEventId, - }) - const batchUrl = `/api/copilot/chat/stream?streamId=${encodeURIComponent( - nextStream.streamId - )}&from=0&to=${encodeURIComponent(String(nextStream.lastEventId))}&batch=true` - const batchResponse = await fetch(batchUrl, { credentials: 'include' }) - if (batchResponse.ok) { - const batchData = await batchResponse.json() - if (batchData.success && Array.isArray(batchData.events)) { - const replayContext = createStreamingContext(nextStream.assistantMessageId) - replayContext.suppressStreamingUpdates = true - for (const entry of batchData.events) { - const event = entry.event - if (event) { - await applySseEvent(event, replayContext, get, set) - } - if (typeof entry.eventId === 'number' && entry.eventId > resumeFromEventId) { - resumeFromEventId = entry.eventId - } - } - bufferedContent = replayContext.accumulatedContent - replayBlocks = replayContext.contentBlocks - logger.info('[Copilot] Loaded buffered content instantly', { - eventCount: batchData.events.length, - contentLength: bufferedContent.length, - resumeFromEventId, - }) - } else { - logger.warn('[Copilot] Batch response missing events', { - success: batchData.success, - hasEvents: Array.isArray(batchData.events), - }) - } - } else { - logger.warn('[Copilot] Failed to fetch buffered events', { - status: batchResponse.status, - }) - } - } catch (e) { - logger.warn('[Copilot] Failed to fetch buffered events', { error: String(e) }) - } - } - if (resumeFromEventId > nextStream.lastEventId) { - nextStream = { ...nextStream, lastEventId: resumeFromEventId } - set({ activeStream: nextStream }) - writeActiveStreamToStorage(nextStream) - } - - let nextMessages = messages - let cleanedExisting = false - nextMessages = nextMessages.map((m) => { - if (m.id !== nextStream.assistantMessageId) return m - const hasContinueTag = - (typeof m.content === 'string' && m.content.includes(CONTINUE_OPTIONS_TAG)) || - (Array.isArray(m.contentBlocks) && - m.contentBlocks.some( - (b: any) => - b?.type === TEXT_BLOCK_TYPE && - typeof b.content === 'string' && - b.content.includes(CONTINUE_OPTIONS_TAG) - )) - if (!hasContinueTag) return m - cleanedExisting = true - return { - ...m, - content: stripContinueOption(m.content || ''), - contentBlocks: stripContinueOptionFromBlocks(m.contentBlocks ?? []), - } - }) - - if (!messages.some((m) => m.id === nextStream.userMessageId)) { - const userMessage = createUserMessage( - nextStream.userMessageContent || '', - nextStream.fileAttachments, - nextStream.contexts, - nextStream.userMessageId - ) - nextMessages = [...nextMessages, userMessage] - } - - if (!nextMessages.some((m) => m.id === nextStream.assistantMessageId)) { - const assistantMessage: CopilotMessage = { - ...createStreamingMessage(), - id: nextStream.assistantMessageId, - content: bufferedContent, - contentBlocks: - replayBlocks && replayBlocks.length > 0 - ? replayBlocks - : bufferedContent - ? [{ type: TEXT_BLOCK_TYPE, content: bufferedContent, timestamp: Date.now() }] - : [], - } - nextMessages = [...nextMessages, assistantMessage] - } else if (bufferedContent || (replayBlocks && replayBlocks.length > 0)) { - nextMessages = nextMessages.map((m) => { - if (m.id !== nextStream.assistantMessageId) return m - let nextBlocks = replayBlocks && replayBlocks.length > 0 ? replayBlocks : null - if (!nextBlocks) { - const existingBlocks = Array.isArray(m.contentBlocks) ? m.contentBlocks : [] - const existingText = extractTextFromBlocks(existingBlocks) - if (existingText && bufferedContent.startsWith(existingText)) { - const delta = bufferedContent.slice(existingText.length) - nextBlocks = delta - ? appendTextToBlocks(existingBlocks, delta) - : cloneContentBlocks(existingBlocks) - } else if (!existingText && existingBlocks.length === 0) { - nextBlocks = bufferedContent - ? [{ type: TEXT_BLOCK_TYPE, content: bufferedContent, timestamp: Date.now() }] - : [] - } else { - nextBlocks = replaceTextBlocks(existingBlocks, bufferedContent) - } - } - return { - ...m, - content: bufferedContent, - contentBlocks: nextBlocks ?? [], - } - }) - } + const validated = await validateResumeState(get, set) + if (!validated) return false - if (cleanedExisting || nextMessages !== messages || bufferedContent) { - set({ messages: nextMessages, currentUserMessageId: nextStream.userMessageId }) - } else { - set({ currentUserMessageId: nextStream.userMessageId }) - } - - const abortController = new AbortController() - set({ isSendingMessage: true, abortController }) - - try { - logger.info('[Copilot] Attempting to resume stream', { - streamId: nextStream.streamId, - savedLastEventId: nextStream.lastEventId, - resumeFromEventId, - isFreshResume, - bufferedContentLength: bufferedContent.length, - assistantMessageId: nextStream.assistantMessageId, - chatId: nextStream.chatId, - }) - const result = await sendStreamingMessage({ - message: nextStream.userMessageContent || '', - userMessageId: nextStream.userMessageId, - workflowId: nextStream.workflowId, - chatId: nextStream.chatId || get().currentChat?.id || undefined, - mode: get().mode === 'ask' ? 'ask' : get().mode === 'plan' ? 'plan' : 'agent', - model: get().selectedModel, - prefetch: get().agentPrefetch, - stream: true, - resumeFromEventId, - abortSignal: abortController.signal, - }) - - logger.info('[Copilot] Resume stream result', { - success: result.success, - hasStream: !!result.stream, - error: result.error, - }) - - if (result.success && result.stream) { - await get().handleStreamingResponse( - result.stream, - nextStream.assistantMessageId, - true, - nextStream.userMessageId, - abortController.signal - ) - return true - } - set({ isSendingMessage: false, abortController: null }) - } catch (error) { - // Handle AbortError gracefully - expected when user aborts - if ( - error instanceof Error && - (error.name === 'AbortError' || error.message.includes('aborted')) - ) { - logger.info('[Copilot] Resume stream aborted by user') - set({ isSendingMessage: false, abortController: null }) - return false - } - logger.error('[Copilot] Failed to resume stream', { - error: error instanceof Error ? error.message : String(error), - }) - set({ isSendingMessage: false, abortController: null }) - } - return false + const replayed = await replayBufferedEvents(validated.nextStream, get, set) + const finalized = finalizeResume(validated.messages, replayed, get, set) + return resumeFromLiveStream(finalized, validated.isFreshResume, get, set) }, // Abort streaming @@ -1087,7 +1273,7 @@ export const useCopilotStore = create()( const { abortController, isSendingMessage, messages } = get() if (!isSendingMessage || !abortController) return // Suppress continue option if explicitly requested OR if page is unloading (refresh/close) - const suppressContinueOption = options?.suppressContinueOption === true || isPageUnloading + const suppressContinueOption = options?.suppressContinueOption === true || isPageUnloading() set({ isAborting: true, suppressAbortContinueOption: suppressContinueOption }) try { abortController.abort() @@ -1098,7 +1284,7 @@ export const useCopilotStore = create()( const textContent = lastMessage.contentBlocks ?.filter((b) => b.type === 'text') - .map((b: any) => b.content) + .map((b) => b.content ?? '') .join('') || '' const nextContentBlocks = suppressContinueOption ? (lastMessage.contentBlocks ?? []) @@ -1132,7 +1318,7 @@ export const useCopilotStore = create()( // Only clear active stream for user-initiated aborts, NOT page unload // During page unload, keep the stream info so we can resume after refresh - if (!isPageUnloading) { + if (!isPageUnloading()) { set({ activeStream: null }) writeActiveStreamToStorage(null) } @@ -1145,26 +1331,27 @@ export const useCopilotStore = create()( if (currentChat) { try { const currentMessages = get().messages - const dbMessages = serializeMessagesForDB(currentMessages, get().sensitiveCredentialIds) - fetch('/api/copilot/chat/update-messages', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - chatId: currentChat.id, - messages: dbMessages, - planArtifact: streamingPlanContent || null, - config: { - mode, - model: selectedModel, - }, - }), - }).catch(() => {}) - } catch {} + void persistMessages({ + chatId: currentChat.id, + messages: currentMessages, + sensitiveCredentialIds: get().sensitiveCredentialIds, + planArtifact: streamingPlanContent || null, + mode, + model: selectedModel, + }) + } catch (error) { + logger.warn('[Copilot] Failed to queue abort snapshot persistence', { + error: error instanceof Error ? error.message : String(error), + }) + } } - } catch { + } catch (error) { + logger.warn('[Copilot] Abort flow encountered an error', { + error: error instanceof Error ? error.message : String(error), + }) set({ isSendingMessage: false, isAborting: false }) // Only clear active stream for user-initiated aborts, NOT page unload - if (!isPageUnloading) { + if (!isPageUnloading()) { set({ activeStream: null }) writeActiveStreamToStorage(null) } @@ -1235,7 +1422,7 @@ export const useCopilotStore = create()( }, // Tool-call related APIs are stubbed for now - setToolCallState: (toolCall: any, newState: any) => { + setToolCallState: (toolCall: CopilotToolCall, newState: ClientToolCallState | string) => { try { const id: string | undefined = toolCall?.id if (!id) return @@ -1245,7 +1432,7 @@ export const useCopilotStore = create()( // Preserve rejected state from being overridden if ( isRejectedState(current.state) && - (newState === 'success' || newState === (ClientToolCallState as any).success) + (newState === 'success' || newState === ClientToolCallState.success) ) { return } @@ -1264,10 +1451,15 @@ export const useCopilotStore = create()( display: resolveToolDisplay(current.name, norm, id, current.params), } set({ toolCallsById: map }) - } catch {} + } catch (error) { + logger.warn('[Copilot] Failed to update tool call state', { + error: error instanceof Error ? error.message : String(error), + toolCallId: toolCall?.id, + }) + } }, - updateToolCallParams: (toolCallId: string, params: Record) => { + updateToolCallParams: (toolCallId: string, params: Record) => { try { if (!toolCallId) return const map = { ...get().toolCallsById } @@ -1280,7 +1472,12 @@ export const useCopilotStore = create()( display: resolveToolDisplay(current.name, current.state, toolCallId, updatedParams), } set({ toolCallsById: map }) - } catch {} + } catch (error) { + logger.warn('[Copilot] Failed to update tool call params', { + error: error instanceof Error ? error.message : String(error), + toolCallId, + }) + } }, updatePreviewToolCallState: ( toolCallState: 'accepted' | 'rejected' | 'error', @@ -1301,7 +1498,7 @@ export const useCopilotStore = create()( outer: for (let mi = messages.length - 1; mi >= 0; mi--) { const m = messages[mi] if (m.role !== 'assistant' || !m.contentBlocks) continue - const blocks = m.contentBlocks as any[] + const blocks = m.contentBlocks for (let bi = blocks.length - 1; bi >= 0; bi--) { const b = blocks[bi] if (b?.type === 'tool_call') { @@ -1323,7 +1520,7 @@ export const useCopilotStore = create()( const current = toolCallsById[id] if (!current) return // Do not override a rejected tool with success - if (isRejectedState(current.state) && targetState === (ClientToolCallState as any).success) { + if (isRejectedState(current.state) && targetState === ClientToolCallState.success) { return } @@ -1344,15 +1541,14 @@ export const useCopilotStore = create()( const m = messages[mi] if (m.role !== 'assistant' || !m.contentBlocks) continue let changed = false - const blocks = m.contentBlocks.map((b: any) => { + const blocks = m.contentBlocks.map((b) => { if (b.type === 'tool_call' && b.toolCall?.id === id) { changed = true - const prev = b.toolCall ?? {} return { ...b, toolCall: { - ...prev, - id, + ...b.toolCall, + id: id!, name: current.name, state: targetState, display: updatedDisplay, @@ -1371,15 +1567,27 @@ export const useCopilotStore = create()( }) try { - fetch('/api/copilot/confirm', { + fetch(COPILOT_CONFIRM_API_PATH, { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ toolCallId: id, status: toolCallState, }), - }).catch(() => {}) - } catch {} + }).catch((error) => { + logger.warn('[Copilot] Failed to send tool confirmation', { + error: error instanceof Error ? error.message : String(error), + toolCallId: id, + status: toolCallState, + }) + }) + } catch (error) { + logger.warn('[Copilot] Failed to queue tool confirmation request', { + error: error instanceof Error ? error.message : String(error), + toolCallId: id, + status: toolCallState, + }) + } }, loadMessageCheckpoints: async (chatId: string) => { @@ -1387,16 +1595,19 @@ export const useCopilotStore = create()( if (!workflowId) return set({ isLoadingCheckpoints: true, checkpointError: null }) try { - const response = await fetch(`/api/copilot/checkpoints?chatId=${chatId}`) + const response = await fetch(`${COPILOT_CHECKPOINTS_API_PATH}?chatId=${chatId}`) if (!response.ok) throw new Error(`Failed to load checkpoints: ${response.statusText}`) const data = await response.json() if (data.success && Array.isArray(data.checkpoints)) { - const grouped = data.checkpoints.reduce((acc: Record, cp: any) => { - const key = cp.messageId || '__no_message__' - acc[key] = acc[key] ?? [] - acc[key].push(cp) - return acc - }, {}) + const grouped = (data.checkpoints as CheckpointEntry[]).reduce( + (acc: Record, cp: CheckpointEntry) => { + const key = cp.messageId || '__no_message__' + acc[key] = acc[key] ?? [] + acc[key].push(cp) + return acc + }, + {} + ) set({ messageCheckpoints: grouped, isLoadingCheckpoints: false }) } else { throw new Error('Invalid checkpoints response') @@ -1417,9 +1628,9 @@ export const useCopilotStore = create()( try { const { messageCheckpoints } = get() const checkpointMessageId = Object.entries(messageCheckpoints).find(([, cps]) => - (cps ?? []).some((cp: any) => cp?.id === checkpointId) + (cps ?? []).some((cp) => cp?.id === checkpointId) )?.[0] - const response = await fetch('/api/copilot/checkpoints/revert', { + const response = await fetch(COPILOT_CHECKPOINTS_REVERT_API_PATH, { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ checkpointId }), @@ -1434,7 +1645,11 @@ export const useCopilotStore = create()( // Clear any active diff preview try { useWorkflowDiffStore.getState().clearDiff() - } catch {} + } catch (error) { + logger.warn('[Copilot] Failed to clear diff before checkpoint revert', { + error: error instanceof Error ? error.message : String(error), + }) + } // Apply to main workflow store useWorkflowStore.setState({ @@ -1447,14 +1662,13 @@ export const useCopilotStore = create()( }) // Extract and apply subblock values - const values: Record> = {} - Object.entries(reverted.blocks ?? {}).forEach(([blockId, block]: [string, any]) => { + const values: Record> = {} + Object.entries(reverted.blocks ?? {}).forEach(([blockId, block]) => { + const typedBlock = block as { subBlocks?: Record } values[blockId] = {} - Object.entries((block as any).subBlocks ?? {}).forEach( - ([subId, sub]: [string, any]) => { - values[blockId][subId] = (sub as any)?.value - } - ) + Object.entries(typedBlock.subBlocks ?? {}).forEach(([subId, sub]) => { + values[blockId][subId] = sub?.value + }) }) const subState = useSubBlockStore.getState() useSubBlockStore.setState({ @@ -1500,7 +1714,7 @@ export const useCopilotStore = create()( const startTimeMs = Date.now() const expectedStreamId = triggerUserMessageId - const context = createStreamingContext(assistantMessageId) + const context = createClientStreamingContext(assistantMessageId) if (isContinuation) { context.suppressContinueOption = true } @@ -1508,7 +1722,7 @@ export const useCopilotStore = create()( if (isContinuation) { const { messages } = get() const existingMessage = messages.find((m) => m.id === assistantMessageId) - logger.info('[Copilot] Continuation init', { + logger.debug('[Copilot] Continuation init', { hasMessage: !!existingMessage, contentLength: existingMessage?.content?.length || 0, contentPreview: existingMessage?.content?.slice(0, 100) || '', @@ -1527,10 +1741,12 @@ export const useCopilotStore = create()( context.contentBlocks = clonedBlocks context.currentTextBlock = findLastTextBlock(clonedBlocks) } else if (existingMessage.content) { - const textBlock = { type: '', content: '', timestamp: 0, toolCall: null } - textBlock.type = TEXT_BLOCK_TYPE - textBlock.content = existingMessage.content - textBlock.timestamp = Date.now() + const textBlock: ClientContentBlock = { + type: 'text', + content: existingMessage.content, + timestamp: Date.now(), + toolCall: null, + } context.contentBlocks = [textBlock] context.currentTextBlock = textBlock context.accumulatedContent += existingMessage.content @@ -1541,14 +1757,14 @@ export const useCopilotStore = create()( const timeoutId = setTimeout(() => { logger.warn('Stream timeout reached, completing response') reader.cancel() - }, 600000) + }, STREAM_TIMEOUT_MS) try { for await (const data of parseSSEStream(reader, decoder, abortSignal)) { if (abortSignal?.aborted) { context.wasAborted = true const { suppressAbortContinueOption } = get() - context.suppressContinueOption = suppressAbortContinueOption === true || isPageUnloading + context.suppressContinueOption = suppressAbortContinueOption === true || isPageUnloading() if (suppressAbortContinueOption) { set({ suppressAbortContinueOption: false }) } @@ -1575,13 +1791,13 @@ export const useCopilotStore = create()( } // Log SSE events for debugging - logger.info('[SSE] Received event', { + logger.debug('[SSE] Received event', { type: data.type, hasSubAgent: !!data.subagent, subagent: data.subagent, dataPreview: typeof data.data === 'string' - ? data.data.substring(0, 100) + ? (data.data as string).substring(0, 100) : JSON.stringify(data.data)?.substring(0, 100), }) @@ -1590,15 +1806,15 @@ export const useCopilotStore = create()( } if (!context.wasAborted && sseHandlers.stream_end) { - sseHandlers.stream_end({}, context, get, set) + sseHandlers.stream_end({ type: 'done' }, context, get, set) } stopStreamingUpdates() - let sanitizedContentBlocks: any[] = [] + let sanitizedContentBlocks: ClientContentBlock[] = [] if (context.contentBlocks && context.contentBlocks.length > 0) { - const optimizedBlocks = context.contentBlocks.map((block: any) => ({ ...block })) - sanitizedContentBlocks = optimizedBlocks.map((block: any) => + const optimizedBlocks = context.contentBlocks.map((block) => ({ ...block })) + sanitizedContentBlocks = optimizedBlocks.map((block) => block.type === TEXT_BLOCK_TYPE && typeof block.content === 'string' ? { ...block, content: stripTodoTags(block.content) } : block @@ -1656,7 +1872,7 @@ export const useCopilotStore = create()( }) // Only clear active stream if stream completed normally or user aborted (not page unload) - if ((context.streamComplete || context.wasAborted) && !isPageUnloading) { + if ((context.streamComplete || context.wasAborted) && !isPageUnloading()) { set({ activeStream: null }) writeActiveStreamToStorage(null) } @@ -1670,7 +1886,7 @@ export const useCopilotStore = create()( if (nextInQueue) { // Use originalMessageId if available (from edit/resend), otherwise use queue entry id const messageIdToUse = nextInQueue.originalMessageId || nextInQueue.id - logger.info('[Queue] Processing next queued message', { + logger.debug('[Queue] Processing next queued message', { id: nextInQueue.id, originalMessageId: nextInQueue.originalMessageId, messageIdToUse, @@ -1686,7 +1902,7 @@ export const useCopilotStore = create()( contexts: nextInQueue.contexts, messageId: messageIdToUse, }) - }, 100) + }, QUEUE_PROCESS_DELAY_MS) } // Persist full message state (including contentBlocks), plan artifact, and config to database @@ -1697,40 +1913,35 @@ export const useCopilotStore = create()( // Debug: Log what we're about to serialize const lastMsg = currentMessages[currentMessages.length - 1] if (lastMsg?.role === 'assistant') { - logger.info('[Stream Done] About to serialize - last message state', { + logger.debug('[Stream Done] About to serialize - last message state', { id: lastMsg.id, contentLength: lastMsg.content?.length || 0, hasContentBlocks: !!lastMsg.contentBlocks, contentBlockCount: lastMsg.contentBlocks?.length || 0, - contentBlockTypes: (lastMsg.contentBlocks as any[])?.map((b) => b?.type) ?? [], + contentBlockTypes: lastMsg.contentBlocks?.map((b) => b?.type) ?? [], }) } - const dbMessages = serializeMessagesForDB(currentMessages, get().sensitiveCredentialIds) const config = { mode, model: selectedModel, } - const saveResponse = await fetch('/api/copilot/chat/update-messages', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - chatId: currentChat.id, - messages: dbMessages, - planArtifact: streamingPlanContent || null, - config, - }), + const persisted = await persistMessages({ + chatId: currentChat.id, + messages: currentMessages, + sensitiveCredentialIds: get().sensitiveCredentialIds, + planArtifact: streamingPlanContent || null, + mode, + model: selectedModel, }) - if (!saveResponse.ok) { - const errorText = await saveResponse.text().catch(() => '') + if (!persisted) { logger.error('[Stream Done] Failed to save messages to DB', { - status: saveResponse.status, - error: errorText, + chatId: currentChat.id, }) } else { logger.info('[Stream Done] Successfully saved messages to DB', { - messageCount: dbMessages.length, + messageCount: currentMessages.length, }) } @@ -1747,16 +1958,11 @@ export const useCopilotStore = create()( } } - // Post copilot_stats record (input/output tokens can be null for now) - try { - // Removed: stats sending now occurs only on accept/reject with minimal payload - } catch {} - // Invalidate subscription queries to update usage setTimeout(() => { const queryClient = getQueryClient() queryClient.invalidateQueries({ queryKey: subscriptionKeys.all }) - }, 1000) + }, SUBSCRIPTION_INVALIDATE_DELAY_MS) } finally { clearTimeout(timeoutId) } @@ -1783,7 +1989,11 @@ export const useCopilotStore = create()( abortAllInProgressTools(set, get) try { useWorkflowDiffStore.getState().clearDiff() - } catch {} + } catch (error) { + logger.warn('[Copilot] Failed to clear diff on new chat creation', { + error: error instanceof Error ? error.message : String(error), + }) + } set({ currentChat: newChat, @@ -1807,7 +2017,11 @@ export const useCopilotStore = create()( // Clear any diff on cleanup try { useWorkflowDiffStore.getState().clearDiff() - } catch {} + } catch (error) { + logger.warn('[Copilot] Failed to clear diff on cleanup', { + error: error instanceof Error ? error.message : String(error), + }) + } }, reset: () => { @@ -1845,21 +2059,14 @@ export const useCopilotStore = create()( if (currentChat) { try { const currentMessages = get().messages - const dbMessages = serializeMessagesForDB(currentMessages, get().sensitiveCredentialIds) const { mode, selectedModel } = get() - - await fetch('/api/copilot/chat/update-messages', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - chatId: currentChat.id, - messages: dbMessages, - planArtifact: null, - config: { - mode, - model: selectedModel, - }, - }), + await persistMessages({ + chatId: currentChat.id, + messages: currentMessages, + sensitiveCredentialIds: get().sensitiveCredentialIds, + planArtifact: null, + mode, + model: selectedModel, }) // Update local chat object @@ -1887,21 +2094,14 @@ export const useCopilotStore = create()( if (currentChat) { try { const currentMessages = get().messages - const dbMessages = serializeMessagesForDB(currentMessages, get().sensitiveCredentialIds) const { mode, selectedModel } = get() - - await fetch('/api/copilot/chat/update-messages', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - chatId: currentChat.id, - messages: dbMessages, - planArtifact: content, - config: { - mode, - model: selectedModel, - }, - }), + await persistMessages({ + chatId: currentChat.id, + messages: currentMessages, + sensitiveCredentialIds: get().sensitiveCredentialIds, + planArtifact: content, + mode, + model: selectedModel, }) // Update local chat object @@ -1930,14 +2130,14 @@ export const useCopilotStore = create()( loadAutoAllowedTools: async () => { try { - logger.info('[AutoAllowedTools] Loading from API...') - const res = await fetch('/api/copilot/auto-allowed-tools') - logger.info('[AutoAllowedTools] Load response', { status: res.status, ok: res.ok }) + logger.debug('[AutoAllowedTools] Loading from API...') + const res = await fetch(COPILOT_AUTO_ALLOWED_TOOLS_API_PATH) + logger.debug('[AutoAllowedTools] Load response', { status: res.status, ok: res.ok }) if (res.ok) { const data = await res.json() const tools = data.autoAllowedTools ?? [] set({ autoAllowedTools: tools }) - logger.info('[AutoAllowedTools] Loaded successfully', { count: tools.length, tools }) + logger.debug('[AutoAllowedTools] Loaded successfully', { count: tools.length, tools }) } else { logger.warn('[AutoAllowedTools] Load failed with status', { status: res.status }) } @@ -1948,18 +2148,18 @@ export const useCopilotStore = create()( addAutoAllowedTool: async (toolId: string) => { try { - logger.info('[AutoAllowedTools] Adding tool...', { toolId }) - const res = await fetch('/api/copilot/auto-allowed-tools', { + logger.debug('[AutoAllowedTools] Adding tool...', { toolId }) + const res = await fetch(COPILOT_AUTO_ALLOWED_TOOLS_API_PATH, { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ toolId }), }) - logger.info('[AutoAllowedTools] API response', { toolId, status: res.status, ok: res.ok }) + logger.debug('[AutoAllowedTools] API response', { toolId, status: res.status, ok: res.ok }) if (res.ok) { const data = await res.json() - logger.info('[AutoAllowedTools] API returned', { toolId, tools: data.autoAllowedTools }) + logger.debug('[AutoAllowedTools] API returned', { toolId, tools: data.autoAllowedTools }) set({ autoAllowedTools: data.autoAllowedTools ?? [] }) - logger.info('[AutoAllowedTools] Added tool to store', { toolId }) + logger.debug('[AutoAllowedTools] Added tool to store', { toolId }) } } catch (err) { logger.error('[AutoAllowedTools] Failed to add tool', { toolId, error: err }) @@ -1969,7 +2169,7 @@ export const useCopilotStore = create()( removeAutoAllowedTool: async (toolId: string) => { try { const res = await fetch( - `/api/copilot/auto-allowed-tools?toolId=${encodeURIComponent(toolId)}`, + `${COPILOT_AUTO_ALLOWED_TOOLS_API_PATH}?toolId=${encodeURIComponent(toolId)}`, { method: 'DELETE', } @@ -1977,7 +2177,7 @@ export const useCopilotStore = create()( if (res.ok) { const data = await res.json() set({ autoAllowedTools: data.autoAllowedTools ?? [] }) - logger.info('[AutoAllowedTools] Removed tool', { toolId }) + logger.debug('[AutoAllowedTools] Removed tool', { toolId }) } } catch (err) { logger.error('[AutoAllowedTools] Failed to remove tool', { toolId, error: err }) @@ -1992,7 +2192,7 @@ export const useCopilotStore = create()( // Credential masking loadSensitiveCredentialIds: async () => { try { - const res = await fetch('/api/copilot/credentials', { + const res = await fetch(COPILOT_CREDENTIALS_API_PATH, { credentials: 'include', }) if (!res.ok) { @@ -2004,7 +2204,7 @@ export const useCopilotStore = create()( const json = await res.json() // Credentials are at result.oauth.connected.credentials const credentials = json?.result?.oauth?.connected?.credentials ?? [] - logger.info('[loadSensitiveCredentialIds] Response', { + logger.debug('[loadSensitiveCredentialIds] Response', { hasResult: !!json?.result, credentialCount: credentials.length, }) @@ -2015,7 +2215,7 @@ export const useCopilotStore = create()( } } set({ sensitiveCredentialIds: ids }) - logger.info('[loadSensitiveCredentialIds] Loaded credential IDs', { + logger.debug('[loadSensitiveCredentialIds] Loaded credential IDs', { count: ids.size, }) } catch (err) { @@ -2058,7 +2258,7 @@ export const useCopilotStore = create()( removeFromQueue: (id) => { set({ messageQueue: get().messageQueue.filter((m) => m.id !== id) }) - logger.info('[Queue] Message removed from queue', { + logger.debug('[Queue] Message removed from queue', { id, queueLength: get().messageQueue.length, }) @@ -2072,7 +2272,7 @@ export const useCopilotStore = create()( queue.splice(index, 1) queue.splice(index - 1, 0, item) set({ messageQueue: queue }) - logger.info('[Queue] Message moved up in queue', { id, newIndex: index - 1 }) + logger.debug('[Queue] Message moved up in queue', { id, newIndex: index - 1 }) } }, diff --git a/apps/sim/stores/panel/copilot/types.ts b/apps/sim/stores/panel/copilot/types.ts index 07e77ea604..e03c07f9dc 100644 --- a/apps/sim/stores/panel/copilot/types.ts +++ b/apps/sim/stores/panel/copilot/types.ts @@ -2,6 +2,7 @@ import type { CopilotMode, CopilotModelId } from '@/lib/copilot/models' export type { CopilotMode, CopilotModelId } from '@/lib/copilot/models' +import type { ClientContentBlock } from '@/lib/copilot/client-sse/types' import type { ClientToolCallState, ClientToolDisplay } from '@/lib/copilot/tools/client/base-tool' import type { WorkflowState } from '@/stores/workflows/workflow/types' @@ -21,7 +22,8 @@ export interface CopilotToolCall { id: string name: string state: ClientToolCallState - params?: Record + params?: Record + input?: Record display?: ClientToolDisplay /** Content streamed from a subagent (e.g., debug agent) */ subAgentContent?: string @@ -62,18 +64,7 @@ export interface CopilotMessage { timestamp: string citations?: { id: number; title: string; url: string; similarity?: number }[] toolCalls?: CopilotToolCall[] - contentBlocks?: Array< - | { type: 'text'; content: string; timestamp: number } - | { - type: 'thinking' - content: string - timestamp: number - duration?: number - startTime?: number - } - | { type: 'tool_call'; toolCall: CopilotToolCall; timestamp: number } - | { type: 'contexts'; contexts: ChatContext[]; timestamp: number } - > + contentBlocks?: ClientContentBlock[] fileAttachments?: MessageFileAttachment[] contexts?: ChatContext[] errorType?: 'usage_limit' | 'unauthorized' | 'forbidden' | 'rate_limit' | 'upgrade_required' @@ -110,6 +101,16 @@ import type { CopilotChat as ApiCopilotChat } from '@/lib/copilot/api' export type CopilotChat = ApiCopilotChat +/** + * A checkpoint entry as returned from the checkpoints API. + */ +export interface CheckpointEntry { + id: string + messageId?: string + workflowState?: Record + createdAt?: string +} + export interface CopilotState { mode: CopilotMode selectedModel: CopilotModelId @@ -122,7 +123,7 @@ export interface CopilotState { messages: CopilotMessage[] workflowId: string | null - messageCheckpoints: Record + messageCheckpoints: Record messageSnapshots: Record isLoading: boolean @@ -210,11 +211,11 @@ export interface CopilotActions { toolCallId?: string ) => void resumeActiveStream: () => Promise - setToolCallState: (toolCall: any, newState: ClientToolCallState, options?: any) => void - updateToolCallParams: (toolCallId: string, params: Record) => void + setToolCallState: (toolCall: CopilotToolCall, newState: ClientToolCallState | string) => void + updateToolCallParams: (toolCallId: string, params: Record) => void loadMessageCheckpoints: (chatId: string) => Promise revertToCheckpoint: (checkpointId: string) => Promise - getCheckpointsForMessage: (messageId: string) => any[] + getCheckpointsForMessage: (messageId: string) => CheckpointEntry[] saveMessageCheckpoint: (messageId: string) => Promise clearMessages: () => void diff --git a/apps/sim/stores/workflow-diff/store.ts b/apps/sim/stores/workflow-diff/store.ts index 116fa83d77..339465ec59 100644 --- a/apps/sim/stores/workflow-diff/store.ts +++ b/apps/sim/stores/workflow-diff/store.ts @@ -1,12 +1,7 @@ import { createLogger } from '@sim/logger' - -declare global { - interface Window { - __skipDiffRecording?: boolean - } -} import { create } from 'zustand' import { devtools } from 'zustand/middleware' +import { COPILOT_STATS_API_PATH } from '@/lib/copilot/constants' import { stripWorkflowDiffMarkers, WorkflowDiffEngine } from '@/lib/workflows/diff' import { enqueueReplaceWorkflowState } from '@/lib/workflows/operations/socket-operations' import { validateWorkflowState } from '@/lib/workflows/sanitization/validation' @@ -82,7 +77,7 @@ export const useWorkflowDiffStore = create { + setProposedChanges: async (proposedState, diffAnalysis, options) => { const activeWorkflowId = useWorkflowRegistry.getState().activeWorkflowId if (!activeWorkflowId) { logger.error('Cannot apply diff without an active workflow') @@ -212,7 +207,7 @@ export const useWorkflowDiffStore = create { + acceptChanges: async (options) => { const activeWorkflowId = useWorkflowRegistry.getState().activeWorkflowId if (!activeWorkflowId) { logger.error('No active workflow ID found when accepting diff') @@ -307,7 +302,7 @@ export const useWorkflowDiffStore = create {}) + }).catch((error) => { + logger.warn('Failed to send diff-accepted stats', { + error: error instanceof Error ? error.message : String(error), + messageId: triggerMessageId, + }) + }) } findLatestEditWorkflowToolCallId().then((toolCallId) => { @@ -347,7 +347,7 @@ export const useWorkflowDiffStore = create { + rejectChanges: async (options) => { const { baselineWorkflow, baselineWorkflowId, _triggerMessageId, diffAnalysis } = get() const activeWorkflowId = useWorkflowRegistry.getState().activeWorkflowId @@ -389,7 +389,7 @@ export const useWorkflowDiffStore = create {}) + }).catch((error) => { + logger.warn('Failed to send diff-rejected stats', { + error: error instanceof Error ? error.message : String(error), + messageId: _triggerMessageId, + }) + }) } findLatestEditWorkflowToolCallId().then((toolCallId) => { @@ -460,11 +465,13 @@ export const useWorkflowDiffStore = create { const block = currentBlocks[blockId] - return block && (block as any).is_diff !== 'new' + const blockDiffState = (block as { is_diff?: string } | undefined)?.is_diff + return block && blockDiffState !== 'new' }) || diffAnalysis.edited_blocks?.some((blockId) => { const block = currentBlocks[blockId] - return block && (block as any).is_diff !== 'edited' + const blockDiffState = (block as { is_diff?: string } | undefined)?.is_diff + return block && blockDiffState !== 'edited' }) if (!needsUpdate) { @@ -478,11 +485,12 @@ export const useWorkflowDiffStore = create { const isNewBlock = diffAnalysis.new_blocks?.includes(blockId) const isEditedBlock = diffAnalysis.edited_blocks?.includes(blockId) + const blockDiffState = (block as { is_diff?: string } | undefined)?.is_diff - if (isNewBlock && (block as any).is_diff !== 'new') { + if (isNewBlock && blockDiffState !== 'new') { updatedBlocks[blockId] = { ...block, is_diff: 'new' } hasChanges = true - } else if (isEditedBlock && (block as any).is_diff !== 'edited') { + } else if (isEditedBlock && blockDiffState !== 'edited') { updatedBlocks[blockId] = { ...block, is_diff: 'edited' } // Re-apply field_diffs if available diff --git a/apps/sim/stores/workflow-diff/types.ts b/apps/sim/stores/workflow-diff/types.ts index fe40b08421..8c412b97c1 100644 --- a/apps/sim/stores/workflow-diff/types.ts +++ b/apps/sim/stores/workflow-diff/types.ts @@ -13,12 +13,17 @@ export interface WorkflowDiffState { _triggerMessageId?: string | null } +export interface DiffActionOptions { + /** Skip recording this operation for undo/redo. Used during undo/redo replay. */ + skipRecording?: boolean +} + export interface WorkflowDiffActions { - setProposedChanges: (workflowState: WorkflowState, diffAnalysis?: DiffAnalysis) => Promise + setProposedChanges: (workflowState: WorkflowState, diffAnalysis?: DiffAnalysis, options?: DiffActionOptions) => Promise clearDiff: (options?: { restoreBaseline?: boolean }) => void toggleDiffView: () => void - acceptChanges: () => Promise - rejectChanges: () => Promise + acceptChanges: (options?: DiffActionOptions) => Promise + rejectChanges: (options?: DiffActionOptions) => Promise reapplyDiffMarkers: () => void _batchedStateUpdate: (updates: Partial) => void } diff --git a/apps/sim/stores/workflow-diff/utils.ts b/apps/sim/stores/workflow-diff/utils.ts index 27605a2683..b5cdd49967 100644 --- a/apps/sim/stores/workflow-diff/utils.ts +++ b/apps/sim/stores/workflow-diff/utils.ts @@ -26,7 +26,7 @@ export function extractSubBlockValues( Object.entries(workflowState.blocks || {}).forEach(([blockId, block]) => { values[blockId] = {} Object.entries(block.subBlocks || {}).forEach(([subBlockId, subBlock]) => { - values[blockId][subBlockId] = (subBlock as any)?.value ?? null + values[blockId][subBlockId] = subBlock?.value ?? null }) }) return values @@ -37,7 +37,7 @@ export function applyWorkflowStateToStores( workflowState: WorkflowState, options?: { updateLastSaved?: boolean } ) { - logger.info('[applyWorkflowStateToStores] Applying state', { + logger.debug('[applyWorkflowStateToStores] Applying state', { workflowId, blockCount: Object.keys(workflowState.blocks || {}).length, edgeCount: workflowState.edges?.length ?? 0, @@ -45,7 +45,7 @@ export function applyWorkflowStateToStores( }) const workflowStore = useWorkflowStore.getState() const cloned = cloneWorkflowState(workflowState) - logger.info('[applyWorkflowStateToStores] Cloned state edges', { + logger.debug('[applyWorkflowStateToStores] Cloned state edges', { clonedEdgeCount: cloned.edges?.length ?? 0, }) workflowStore.replaceWorkflowState(cloned, options) @@ -54,7 +54,8 @@ export function applyWorkflowStateToStores( // Verify what's in the store after apply const afterState = workflowStore.getWorkflowState() - logger.info('[applyWorkflowStateToStores] After apply', { + logger.info('[applyWorkflowStateToStores] Applied workflow state to stores', { + workflowId, afterEdgeCount: afterState.edges?.length ?? 0, }) } @@ -107,7 +108,7 @@ export async function persistWorkflowStateToServer( export async function getLatestUserMessageId(): Promise { try { const { useCopilotStore } = await import('@/stores/panel/copilot/store') - const { messages } = useCopilotStore.getState() as any + const { messages } = useCopilotStore.getState() if (!Array.isArray(messages) || messages.length === 0) { return null } @@ -127,21 +128,19 @@ export async function getLatestUserMessageId(): Promise { export async function findLatestEditWorkflowToolCallId(): Promise { try { const { useCopilotStore } = await import('@/stores/panel/copilot/store') - const { messages, toolCallsById } = useCopilotStore.getState() as any + const { messages, toolCallsById } = useCopilotStore.getState() for (let mi = messages.length - 1; mi >= 0; mi--) { const message = messages[mi] if (message.role !== 'assistant' || !message.contentBlocks) continue - for (const block of message.contentBlocks as any[]) { + for (const block of message.contentBlocks) { if (block?.type === 'tool_call' && block.toolCall?.name === 'edit_workflow') { return block.toolCall?.id } } } - const fallback = Object.values(toolCallsById).filter( - (call: any) => call.name === 'edit_workflow' - ) as any[] + const fallback = Object.values(toolCallsById).filter((call) => call.name === 'edit_workflow') return fallback.length ? fallback[fallback.length - 1].id : undefined } catch (error) { @@ -150,7 +149,7 @@ export async function findLatestEditWorkflowToolCallId(): Promise) => void) { let updateTimer: NodeJS.Timeout | null = null const UPDATE_DEBOUNCE_MS = 16 let pendingUpdates: Partial = {} From ba8f39fe1021bacb28aea25f33372014f0112607 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Thu, 5 Feb 2026 16:47:57 -0800 Subject: [PATCH 27/72] Refactor complete - no testing yet --- ...ex-function-inventory-edit-workflow.ts.txt | 35 - ...-inventory-get-blocks-metadata-tool.ts.txt | 21 - ...function-inventory-process-contents.ts.txt | 13 - apps/sim/lib/copilot/client-sse/handlers.ts | 3 +- apps/sim/lib/copilot/orchestrator/index.ts | 5 +- .../orchestrator/sse-handlers/handlers.ts | 12 +- apps/sim/lib/copilot/process-contents.ts | 40 +- .../sim/lib/copilot/tools/server/base-tool.ts | 20 +- .../tools/server/blocks/get-block-config.ts | 3 + .../tools/server/blocks/get-block-options.ts | 2 + .../server/blocks/get-blocks-and-tools.ts | 2 + .../server/blocks/get-blocks-metadata-tool.ts | 2 + .../tools/server/blocks/get-trigger-blocks.ts | 2 + .../tools/server/other/make-api-request.ts | 60 +- .../tools/server/other/search-online.ts | 114 +- apps/sim/lib/copilot/tools/server/router.ts | 111 +- .../tools/server/workflow/edit-workflow.ts | 3334 ----------------- .../server/workflow/edit-workflow/builders.ts | 633 ++++ .../server/workflow/edit-workflow/engine.ts | 274 ++ .../server/workflow/edit-workflow/index.ts | 284 ++ .../workflow/edit-workflow/operations.ts | 996 +++++ .../server/workflow/edit-workflow/types.ts | 134 + .../workflow/edit-workflow/validation.ts | 1051 ++++++ .../server/workflow/get-workflow-console.ts | 1 + apps/sim/lib/workflows/blocks/index.ts | 2 + .../lib/workflows/blocks/schema-resolver.ts | 201 + apps/sim/lib/workflows/blocks/schema-types.ts | 75 + 27 files changed, 3825 insertions(+), 3605 deletions(-) delete mode 100644 apps/sim/.codex-function-inventory-edit-workflow.ts.txt delete mode 100644 apps/sim/.codex-function-inventory-get-blocks-metadata-tool.ts.txt delete mode 100644 apps/sim/.codex-function-inventory-process-contents.ts.txt delete mode 100644 apps/sim/lib/copilot/tools/server/workflow/edit-workflow.ts create mode 100644 apps/sim/lib/copilot/tools/server/workflow/edit-workflow/builders.ts create mode 100644 apps/sim/lib/copilot/tools/server/workflow/edit-workflow/engine.ts create mode 100644 apps/sim/lib/copilot/tools/server/workflow/edit-workflow/index.ts create mode 100644 apps/sim/lib/copilot/tools/server/workflow/edit-workflow/operations.ts create mode 100644 apps/sim/lib/copilot/tools/server/workflow/edit-workflow/types.ts create mode 100644 apps/sim/lib/copilot/tools/server/workflow/edit-workflow/validation.ts create mode 100644 apps/sim/lib/workflows/blocks/index.ts create mode 100644 apps/sim/lib/workflows/blocks/schema-resolver.ts create mode 100644 apps/sim/lib/workflows/blocks/schema-types.ts diff --git a/apps/sim/.codex-function-inventory-edit-workflow.ts.txt b/apps/sim/.codex-function-inventory-edit-workflow.ts.txt deleted file mode 100644 index e77b30fbe1..0000000000 --- a/apps/sim/.codex-function-inventory-edit-workflow.ts.txt +++ /dev/null @@ -1,35 +0,0 @@ -# lib/copilot/tools/server/workflow/edit-workflow.ts - 90-98 ( 9 lines) [function] logSkippedItem - 103-113 ( 11 lines) [function] findBlockWithDuplicateNormalizedName - 127-196 ( 70 lines) [function] validateInputsForBlock - 211-463 ( 253 lines) [function] validateValueForSubBlockType - 481-566 ( 86 lines) [function] topologicalSortInserts - 571-684 ( 114 lines) [function] createBlockFromParams - 686-716 ( 31 lines) [function] updateCanonicalModesForInputs - 721-762 ( 42 lines) [function] normalizeTools - 786-804 ( 19 lines) [function] normalizeArrayWithIds - 809-811 ( 3 lines) [function] shouldNormalizeArrayIds - 818-859 ( 42 lines) [function] normalizeResponseFormat - 834-847 ( 14 lines) [arrow] sortKeys - 871-945 ( 75 lines) [function] validateSourceHandleForBlock - 956-1051 ( 96 lines) [function] validateConditionHandle -1062-1136 ( 75 lines) [function] validateRouterHandle -1141-1149 ( 9 lines) [function] validateTargetHandle -1155-1261 ( 107 lines) [function] createValidatedEdge -1270-1307 ( 38 lines) [function] addConnectionsAsEdges -1280-1291 ( 12 lines) [arrow] addEdgeForTarget -1309-1339 ( 31 lines) [function] applyTriggerConfigToBlockSubblocks -1353-1361 ( 9 lines) [function] isBlockTypeAllowed -1367-1404 ( 38 lines) [function] filterDisallowedTools -1413-1499 ( 87 lines) [function] normalizeBlockIdsInOperations -1441-1444 ( 4 lines) [arrow] replaceId -1504-2676 (1173 lines) [function] applyOperationsToWorkflowState -1649-1656 ( 8 lines) [arrow] findChildren -2055-2059 ( 5 lines) [arrow] mapConnectionTypeToHandle -2063-2074 ( 12 lines) [arrow] addEdgeForTarget -2682-2777 ( 96 lines) [function] validateWorkflowSelectorIds -2786-3066 ( 281 lines) [function] preValidateCredentialInputs -2820-2845 ( 26 lines) [function] collectCredentialInputs -2850-2870 ( 21 lines) [function] collectHostedApiKeyInput -3068-3117 ( 50 lines) [function] getCurrentWorkflowStateFromDb -3121-3333 ( 213 lines) [method] .execute diff --git a/apps/sim/.codex-function-inventory-get-blocks-metadata-tool.ts.txt b/apps/sim/.codex-function-inventory-get-blocks-metadata-tool.ts.txt deleted file mode 100644 index 61d57991b4..0000000000 --- a/apps/sim/.codex-function-inventory-get-blocks-metadata-tool.ts.txt +++ /dev/null @@ -1,21 +0,0 @@ -# lib/copilot/tools/server/blocks/get-blocks-metadata-tool.ts - 108-306 ( 199 lines) [method] .execute - 309-384 ( 76 lines) [function] transformBlockMetadata - 386-459 ( 74 lines) [function] extractInputs - 461-503 ( 43 lines) [function] extractOperationInputs - 505-518 ( 14 lines) [function] extractOutputs - 520-538 ( 19 lines) [function] formatOutputsFromDefinition - 540-563 ( 24 lines) [function] mapSchemaTypeToSimpleType - 565-591 ( 27 lines) [function] generateInputExample - 593-669 ( 77 lines) [function] processSubBlock - 671-679 ( 9 lines) [function] resolveAuthType - 686-702 ( 17 lines) [function] getStaticModelOptions - 712-754 ( 43 lines) [function] callOptionsWithFallback - 756-806 ( 51 lines) [function] resolveSubblockOptions - 808-820 ( 13 lines) [function] removeNullish - 822-832 ( 11 lines) [function] normalizeCondition - 834-872 ( 39 lines) [function] splitParametersByOperation - 874-905 ( 32 lines) [function] computeBlockLevelInputs - 907-935 ( 29 lines) [function] computeOperationLevelInputs - 937-947 ( 11 lines) [function] resolveOperationIds - 949-961 ( 13 lines) [function] resolveToolIdForOperation diff --git a/apps/sim/.codex-function-inventory-process-contents.ts.txt b/apps/sim/.codex-function-inventory-process-contents.ts.txt deleted file mode 100644 index 82e8de18e7..0000000000 --- a/apps/sim/.codex-function-inventory-process-contents.ts.txt +++ /dev/null @@ -1,13 +0,0 @@ -# lib/copilot/process-contents.ts - 31-81 ( 51 lines) [function] processContexts - 84-161 ( 78 lines) [function] processContextsServer - 163-208 ( 46 lines) [function] sanitizeMessageForDocs - 210-248 ( 39 lines) [function] processPastChatFromDb - 250-281 ( 32 lines) [function] processWorkflowFromDb - 283-316 ( 34 lines) [function] processPastChat - 319-321 ( 3 lines) [function] processPastChatViaApi - 323-362 ( 40 lines) [function] processKnowledgeFromDb - 364-439 ( 76 lines) [function] processBlockMetadata - 441-473 ( 33 lines) [function] processTemplateFromDb - 475-498 ( 24 lines) [function] processWorkflowBlockFromDb - 500-555 ( 56 lines) [function] processExecutionLogFromDb diff --git a/apps/sim/lib/copilot/client-sse/handlers.ts b/apps/sim/lib/copilot/client-sse/handlers.ts index 4845431637..bc8968fc09 100644 --- a/apps/sim/lib/copilot/client-sse/handlers.ts +++ b/apps/sim/lib/copilot/client-sse/handlers.ts @@ -11,6 +11,7 @@ import { } from '@/lib/copilot/store-utils' import { useWorkflowDiffStore } from '@/stores/workflow-diff/store' import type { CopilotStore, CopilotStreamInfo, CopilotToolCall } from '@/stores/panel/copilot/types' +import type { WorkflowState } from '@/stores/workflows/workflow/types' import { appendTextBlock, beginThinkingBlock, @@ -295,7 +296,7 @@ export const sseHandlers: Record = { }) if (hasWorkflowState) { const diffStore = useWorkflowDiffStore.getState() - diffStore.setProposedChanges(resultPayload.workflowState).catch((err) => { + diffStore.setProposedChanges(resultPayload.workflowState as WorkflowState).catch((err) => { logger.error('[SSE] Failed to apply edit_workflow diff', { error: err instanceof Error ? err.message : String(err), }) diff --git a/apps/sim/lib/copilot/orchestrator/index.ts b/apps/sim/lib/copilot/orchestrator/index.ts index a909fa2948..9540027cd5 100644 --- a/apps/sim/lib/copilot/orchestrator/index.ts +++ b/apps/sim/lib/copilot/orchestrator/index.ts @@ -14,15 +14,16 @@ export interface OrchestrateStreamOptions extends OrchestratorOptions { } export async function orchestrateCopilotStream( - requestPayload: Record, + requestPayload: Record, options: OrchestrateStreamOptions ): Promise { const { userId, workflowId, chatId } = options const execContext = await prepareExecutionContext(userId, workflowId) + const payloadMsgId = requestPayload?.messageId const context = createStreamingContext({ chatId, - messageId: requestPayload?.messageId || crypto.randomUUID(), + messageId: typeof payloadMsgId === 'string' ? payloadMsgId : crypto.randomUUID(), }) try { diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts index 138b5516bc..84da658b9d 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts @@ -344,7 +344,7 @@ export const subAgentHandlers: Record = { const parentToolCallId = context.subAgentParentToolCallId if (!parentToolCallId) return const data = getEventData(event) - const toolCallId = event.toolCallId || data?.id + const toolCallId = event.toolCallId || (data?.id as string | undefined) if (!toolCallId) return // Update in subAgentToolCalls. @@ -364,14 +364,20 @@ export const subAgentHandlers: Record = { subAgentToolCall.status = status subAgentToolCall.endTime = endTime if (result) subAgentToolCall.result = result - if (hasError) subAgentToolCall.error = data?.error || data?.result?.error + if (hasError) { + const resultObj = asRecord(data?.result) + subAgentToolCall.error = (data?.error || resultObj.error) as string | undefined + } } if (mainToolCall) { mainToolCall.status = status mainToolCall.endTime = endTime if (result) mainToolCall.result = result - if (hasError) mainToolCall.error = data?.error || data?.result?.error + if (hasError) { + const resultObj = asRecord(data?.result) + mainToolCall.error = (data?.error || resultObj.error) as string | undefined + } } }, } diff --git a/apps/sim/lib/copilot/process-contents.ts b/apps/sim/lib/copilot/process-contents.ts index 13a0015f04..6e69e747e5 100644 --- a/apps/sim/lib/copilot/process-contents.ts +++ b/apps/sim/lib/copilot/process-contents.ts @@ -44,29 +44,29 @@ export async function processContexts( ctx.kind ) } - if (ctx.kind === 'knowledge' && (ctx as any).knowledgeId) { + if (ctx.kind === 'knowledge' && ctx.knowledgeId) { return await processKnowledgeFromDb( - (ctx as any).knowledgeId, + ctx.knowledgeId, ctx.label ? `@${ctx.label}` : '@' ) } - if (ctx.kind === 'blocks' && (ctx as any).blockId) { - return await processBlockMetadata((ctx as any).blockId, ctx.label ? `@${ctx.label}` : '@') + if (ctx.kind === 'blocks' && ctx.blockIds?.length > 0) { + return await processBlockMetadata(ctx.blockIds[0], ctx.label ? `@${ctx.label}` : '@') } - if (ctx.kind === 'templates' && (ctx as any).templateId) { + if (ctx.kind === 'templates' && ctx.templateId) { return await processTemplateFromDb( - (ctx as any).templateId, + ctx.templateId, ctx.label ? `@${ctx.label}` : '@' ) } - if (ctx.kind === 'logs' && (ctx as any).executionId) { + if (ctx.kind === 'logs' && ctx.executionId) { return await processExecutionLogFromDb( - (ctx as any).executionId, + ctx.executionId, ctx.label ? `@${ctx.label}` : '@' ) } - if (ctx.kind === 'workflow_block' && ctx.workflowId && (ctx as any).blockId) { - return await processWorkflowBlockFromDb(ctx.workflowId, (ctx as any).blockId, ctx.label) + if (ctx.kind === 'workflow_block' && ctx.workflowId && ctx.blockId) { + return await processWorkflowBlockFromDb(ctx.workflowId, ctx.blockId, ctx.label) } // Other kinds can be added here: workflow, blocks, logs, knowledge, templates, docs return null @@ -99,33 +99,33 @@ export async function processContextsServer( ctx.kind ) } - if (ctx.kind === 'knowledge' && (ctx as any).knowledgeId) { + if (ctx.kind === 'knowledge' && ctx.knowledgeId) { return await processKnowledgeFromDb( - (ctx as any).knowledgeId, + ctx.knowledgeId, ctx.label ? `@${ctx.label}` : '@' ) } - if (ctx.kind === 'blocks' && (ctx as any).blockId) { + if (ctx.kind === 'blocks' && ctx.blockIds?.length > 0) { return await processBlockMetadata( - (ctx as any).blockId, + ctx.blockIds[0], ctx.label ? `@${ctx.label}` : '@', userId ) } - if (ctx.kind === 'templates' && (ctx as any).templateId) { + if (ctx.kind === 'templates' && ctx.templateId) { return await processTemplateFromDb( - (ctx as any).templateId, + ctx.templateId, ctx.label ? `@${ctx.label}` : '@' ) } - if (ctx.kind === 'logs' && (ctx as any).executionId) { + if (ctx.kind === 'logs' && ctx.executionId) { return await processExecutionLogFromDb( - (ctx as any).executionId, + ctx.executionId, ctx.label ? `@${ctx.label}` : '@' ) } - if (ctx.kind === 'workflow_block' && ctx.workflowId && (ctx as any).blockId) { - return await processWorkflowBlockFromDb(ctx.workflowId, (ctx as any).blockId, ctx.label) + if (ctx.kind === 'workflow_block' && ctx.workflowId && ctx.blockId) { + return await processWorkflowBlockFromDb(ctx.workflowId, ctx.blockId, ctx.label) } if (ctx.kind === 'docs') { try { diff --git a/apps/sim/lib/copilot/tools/server/base-tool.ts b/apps/sim/lib/copilot/tools/server/base-tool.ts index 40ec3584cb..1760597342 100644 --- a/apps/sim/lib/copilot/tools/server/base-tool.ts +++ b/apps/sim/lib/copilot/tools/server/base-tool.ts @@ -1,4 +1,20 @@ -export interface BaseServerTool { +import type { z } from 'zod' + +export interface ServerToolContext { + userId: string +} + +/** + * Base interface for server-side copilot tools. + * + * Tools can optionally declare Zod schemas for input/output validation. + * If provided, the router validates automatically. + */ +export interface BaseServerTool { name: string - execute(args: TArgs, context?: { userId: string }): Promise + execute(args: TArgs, context?: ServerToolContext): Promise + /** Optional Zod schema for input validation */ + inputSchema?: z.ZodType + /** Optional Zod schema for output validation */ + outputSchema?: z.ZodType } diff --git a/apps/sim/lib/copilot/tools/server/blocks/get-block-config.ts b/apps/sim/lib/copilot/tools/server/blocks/get-block-config.ts index cd95577d79..64021e07c5 100644 --- a/apps/sim/lib/copilot/tools/server/blocks/get-block-config.ts +++ b/apps/sim/lib/copilot/tools/server/blocks/get-block-config.ts @@ -1,6 +1,7 @@ import { createLogger } from '@sim/logger' import type { BaseServerTool } from '@/lib/copilot/tools/server/base-tool' import { + GetBlockConfigInput, type GetBlockConfigInputType, GetBlockConfigResult, type GetBlockConfigResultType, @@ -370,6 +371,8 @@ export const getBlockConfigServerTool: BaseServerTool< GetBlockConfigResultType > = { name: 'get_block_config', + inputSchema: GetBlockConfigInput, + outputSchema: GetBlockConfigResult, async execute( { blockType, operation, trigger }: GetBlockConfigInputType, context?: { userId: string } diff --git a/apps/sim/lib/copilot/tools/server/blocks/get-block-options.ts b/apps/sim/lib/copilot/tools/server/blocks/get-block-options.ts index 177482fc36..c93db8b8a1 100644 --- a/apps/sim/lib/copilot/tools/server/blocks/get-block-options.ts +++ b/apps/sim/lib/copilot/tools/server/blocks/get-block-options.ts @@ -14,6 +14,8 @@ export const getBlockOptionsServerTool: BaseServerTool< GetBlockOptionsResultType > = { name: 'get_block_options', + inputSchema: GetBlockOptionsInput, + outputSchema: GetBlockOptionsResult, async execute( { blockId }: GetBlockOptionsInputType, context?: { userId: string } diff --git a/apps/sim/lib/copilot/tools/server/blocks/get-blocks-and-tools.ts b/apps/sim/lib/copilot/tools/server/blocks/get-blocks-and-tools.ts index 9413dc278a..cf32eea70b 100644 --- a/apps/sim/lib/copilot/tools/server/blocks/get-blocks-and-tools.ts +++ b/apps/sim/lib/copilot/tools/server/blocks/get-blocks-and-tools.ts @@ -13,6 +13,8 @@ export const getBlocksAndToolsServerTool: BaseServerTool< ReturnType > = { name: 'get_blocks_and_tools', + inputSchema: GetBlocksAndToolsInput, + outputSchema: GetBlocksAndToolsResult, async execute(_args: unknown, context?: { userId: string }) { const logger = createLogger('GetBlocksAndToolsServerTool') logger.debug('Executing get_blocks_and_tools') diff --git a/apps/sim/lib/copilot/tools/server/blocks/get-blocks-metadata-tool.ts b/apps/sim/lib/copilot/tools/server/blocks/get-blocks-metadata-tool.ts index 6699496e7f..374b47c0d3 100644 --- a/apps/sim/lib/copilot/tools/server/blocks/get-blocks-metadata-tool.ts +++ b/apps/sim/lib/copilot/tools/server/blocks/get-blocks-metadata-tool.ts @@ -105,6 +105,8 @@ export const getBlocksMetadataServerTool: BaseServerTool< ReturnType > = { name: 'get_blocks_metadata', + inputSchema: GetBlocksMetadataInput, + outputSchema: GetBlocksMetadataResult, async execute( { blockIds }: ReturnType, context?: { userId: string } diff --git a/apps/sim/lib/copilot/tools/server/blocks/get-trigger-blocks.ts b/apps/sim/lib/copilot/tools/server/blocks/get-trigger-blocks.ts index 5f5820e20d..367c614755 100644 --- a/apps/sim/lib/copilot/tools/server/blocks/get-trigger-blocks.ts +++ b/apps/sim/lib/copilot/tools/server/blocks/get-trigger-blocks.ts @@ -15,6 +15,8 @@ export const getTriggerBlocksServerTool: BaseServerTool< ReturnType > = { name: 'get_trigger_blocks', + inputSchema: GetTriggerBlocksInput, + outputSchema: GetTriggerBlocksResult, async execute(_args: unknown, context?: { userId: string }) { const logger = createLogger('GetTriggerBlocksServerTool') logger.debug('Executing get_trigger_blocks') diff --git a/apps/sim/lib/copilot/tools/server/other/make-api-request.ts b/apps/sim/lib/copilot/tools/server/other/make-api-request.ts index 8d47d7c82e..3f95460511 100644 --- a/apps/sim/lib/copilot/tools/server/other/make-api-request.ts +++ b/apps/sim/lib/copilot/tools/server/other/make-api-request.ts @@ -3,22 +3,34 @@ import type { BaseServerTool } from '@/lib/copilot/tools/server/base-tool' import { executeTool } from '@/tools' import type { TableRow } from '@/tools/types' +const RESULT_CHAR_CAP = Number(process.env.COPILOT_TOOL_RESULT_CHAR_CAP || 20000) + interface MakeApiRequestParams { url: string method: 'GET' | 'POST' | 'PUT' queryParams?: Record headers?: Record - body?: any + body?: unknown +} + +interface ApiResponse { + data: string + status: number + headers: Record + truncated?: boolean + totalChars?: number + previewChars?: number + note?: string } -export const makeApiRequestServerTool: BaseServerTool = { +export const makeApiRequestServerTool: BaseServerTool = { name: 'make_api_request', - async execute(params: MakeApiRequestParams): Promise { + async execute(params: MakeApiRequestParams): Promise { const logger = createLogger('MakeApiRequestServerTool') - const { url, method, queryParams, headers, body } = params || ({} as MakeApiRequestParams) + const { url, method, queryParams, headers, body } = params if (!url || !method) throw new Error('url and method are required') - const toTableRows = (obj?: Record): TableRow[] | null => { + const toTableRows = (obj?: Record): TableRow[] | null => { if (!obj || typeof obj !== 'object') return null return Object.entries(obj).map(([key, value]) => ({ id: key, @@ -26,21 +38,22 @@ export const makeApiRequestServerTool: BaseServerTool })) } const headersTable = toTableRows(headers) - const queryParamsTable = toTableRows(queryParams as Record | undefined) + const queryParamsTable = toTableRows(queryParams as Record | undefined) const result = await executeTool( 'http_request', { url, method, params: queryParamsTable, headers: headersTable, body }, true ) - if (!result.success) throw new Error(result.error || 'API request failed') - const output = (result as any).output || result - const data = output.output?.data ?? output.data - const status = output.output?.status ?? output.status ?? 200 - const respHeaders = output.output?.headers ?? output.headers ?? {} + if (!result.success) throw new Error(result.error ?? 'API request failed') - const CAP = Number(process.env.COPILOT_TOOL_RESULT_CHAR_CAP || 20000) - const toStringSafe = (val: any): string => { + const output = result.output as Record | undefined + const nestedOutput = output?.output as Record | undefined + const data = nestedOutput?.data ?? output?.data + const status = (nestedOutput?.status ?? output?.status ?? 200) as number + const respHeaders = (nestedOutput?.headers ?? output?.headers ?? {}) as Record + + const toStringSafe = (val: unknown): string => { if (typeof val === 'string') return val try { return JSON.stringify(val) @@ -53,7 +66,6 @@ export const makeApiRequestServerTool: BaseServerTool try { let text = html let previous: string - do { previous = text text = text.replace(//gi, '') @@ -61,26 +73,21 @@ export const makeApiRequestServerTool: BaseServerTool text = text.replace(/<[^>]*>/g, ' ') text = text.replace(/[<>]/g, ' ') } while (text !== previous) - return text.replace(/\s+/g, ' ').trim() } catch { return html } } + let normalized = toStringSafe(data) const looksLikeHtml = //i.test(normalized) || //i.test(normalized) if (looksLikeHtml) normalized = stripHtml(normalized) + const totalChars = normalized.length - if (totalChars > CAP) { - const preview = normalized.slice(0, CAP) - logger.warn('API response truncated by character cap', { - url, - method, - totalChars, - previewChars: preview.length, - cap: CAP, - }) + if (totalChars > RESULT_CHAR_CAP) { + const preview = normalized.slice(0, RESULT_CHAR_CAP) + logger.warn('API response truncated', { url, method, totalChars, cap: RESULT_CHAR_CAP }) return { data: preview, status, @@ -88,10 +95,11 @@ export const makeApiRequestServerTool: BaseServerTool truncated: true, totalChars, previewChars: preview.length, - note: `Response truncated to ${CAP} characters to avoid large payloads`, + note: `Response truncated to ${RESULT_CHAR_CAP} characters`, } } - logger.info('API request executed', { url, method, status, totalChars }) + + logger.debug('API request executed', { url, method, status, totalChars }) return { data: normalized, status, headers: respHeaders } }, } diff --git a/apps/sim/lib/copilot/tools/server/other/search-online.ts b/apps/sim/lib/copilot/tools/server/other/search-online.ts index e8b725b050..a839d345cd 100644 --- a/apps/sim/lib/copilot/tools/server/other/search-online.ts +++ b/apps/sim/lib/copilot/tools/server/other/search-online.ts @@ -11,78 +11,73 @@ interface OnlineSearchParams { hl?: string } -export const searchOnlineServerTool: BaseServerTool = { +interface SearchResult { + title: string + link: string + snippet: string + date?: string + position?: number +} + +interface SearchResponse { + results: SearchResult[] + query: string + type: string + totalResults: number + source: 'exa' | 'serper' +} + +export const searchOnlineServerTool: BaseServerTool = { name: 'search_online', - async execute(params: OnlineSearchParams): Promise { + async execute(params: OnlineSearchParams): Promise { const logger = createLogger('SearchOnlineServerTool') const { query, num = 10, type = 'search', gl, hl } = params if (!query || typeof query !== 'string') throw new Error('query is required') - // Check which API keys are available const hasExaApiKey = Boolean(env.EXA_API_KEY && String(env.EXA_API_KEY).length > 0) const hasSerperApiKey = Boolean(env.SERPER_API_KEY && String(env.SERPER_API_KEY).length > 0) - logger.info('Performing online search', { - queryLength: query.length, - num, - type, - gl, - hl, - hasExaApiKey, - hasSerperApiKey, - }) + logger.debug('Performing online search', { queryLength: query.length, num, type }) // Try Exa first if available if (hasExaApiKey) { try { - logger.debug('Attempting exa_search', { num }) const exaResult = await executeTool('exa_search', { query, numResults: num, type: 'auto', - apiKey: env.EXA_API_KEY || '', + apiKey: env.EXA_API_KEY ?? '', }) - const exaResults = (exaResult as any)?.output?.results || [] - const count = Array.isArray(exaResults) ? exaResults.length : 0 - const firstTitle = count > 0 ? String(exaResults[0]?.title || '') : undefined + const output = exaResult.output as { results?: Array<{ title?: string; url?: string; text?: string; summary?: string; publishedDate?: string }> } | undefined + const exaResults = output?.results ?? [] - logger.info('exa_search completed', { - success: exaResult.success, - resultsCount: count, - firstTitlePreview: firstTitle?.slice(0, 120), - }) - - if (exaResult.success && count > 0) { - // Transform Exa results to match expected format - const transformedResults = exaResults.map((result: any) => ({ - title: result.title || '', - link: result.url || '', - snippet: result.text || result.summary || '', + if (exaResult.success && exaResults.length > 0) { + const transformedResults: SearchResult[] = exaResults.map((result, index) => ({ + title: result.title ?? '', + link: result.url ?? '', + snippet: result.text ?? result.summary ?? '', date: result.publishedDate, - position: exaResults.indexOf(result) + 1, + position: index + 1, })) return { results: transformedResults, query, type, - totalResults: count, + totalResults: transformedResults.length, source: 'exa', } } - logger.warn('exa_search returned no results, falling back to Serper', { - queryLength: query.length, - }) - } catch (exaError: any) { + logger.debug('exa_search returned no results, falling back to Serper') + } catch (exaError) { logger.warn('exa_search failed, falling back to Serper', { - error: exaError?.message, + error: exaError instanceof Error ? exaError.message : String(exaError), }) } } - // Fall back to Serper if Exa failed or wasn't available if (!hasSerperApiKey) { throw new Error('No search API keys available (EXA_API_KEY or SERPER_API_KEY required)') } @@ -93,41 +88,24 @@ export const searchOnlineServerTool: BaseServerTool = { type, gl, hl, - apiKey: env.SERPER_API_KEY || '', + apiKey: env.SERPER_API_KEY ?? '', } - try { - logger.debug('Calling serper_search tool', { type, num, gl, hl }) - const result = await executeTool('serper_search', toolParams) - const results = (result as any)?.output?.searchResults || [] - const count = Array.isArray(results) ? results.length : 0 - const firstTitle = count > 0 ? String(results[0]?.title || '') : undefined - - logger.info('serper_search completed', { - success: result.success, - resultsCount: count, - firstTitlePreview: firstTitle?.slice(0, 120), - }) - - if (!result.success) { - logger.error('serper_search failed', { error: (result as any)?.error }) - throw new Error((result as any)?.error || 'Search failed') - } + const result = await executeTool('serper_search', toolParams) + const output = result.output as { searchResults?: SearchResult[] } | undefined + const results = output?.searchResults ?? [] - if (count === 0) { - logger.warn('serper_search returned no results', { queryLength: query.length }) - } + if (!result.success) { + const errorMsg = (result as { error?: string }).error ?? 'Search failed' + throw new Error(errorMsg) + } - return { - results, - query, - type, - totalResults: count, - source: 'serper', - } - } catch (e: any) { - logger.error('search_online execution error', { message: e?.message }) - throw e + return { + results, + query, + type, + totalResults: results.length, + source: 'serper', } }, } diff --git a/apps/sim/lib/copilot/tools/server/router.ts b/apps/sim/lib/copilot/tools/server/router.ts index 2c79cff743..e17b1364fc 100644 --- a/apps/sim/lib/copilot/tools/server/router.ts +++ b/apps/sim/lib/copilot/tools/server/router.ts @@ -1,5 +1,5 @@ import { createLogger } from '@sim/logger' -import type { BaseServerTool } from '@/lib/copilot/tools/server/base-tool' +import type { BaseServerTool, ServerToolContext } from '@/lib/copilot/tools/server/base-tool' import { getBlockConfigServerTool } from '@/lib/copilot/tools/server/blocks/get-block-config' import { getBlockOptionsServerTool } from '@/lib/copilot/tools/server/blocks/get-block-options' import { getBlocksAndToolsServerTool } from '@/lib/copilot/tools/server/blocks/get-blocks-and-tools' @@ -13,101 +13,52 @@ import { getCredentialsServerTool } from '@/lib/copilot/tools/server/user/get-cr import { setEnvironmentVariablesServerTool } from '@/lib/copilot/tools/server/user/set-environment-variables' import { editWorkflowServerTool } from '@/lib/copilot/tools/server/workflow/edit-workflow' import { getWorkflowConsoleServerTool } from '@/lib/copilot/tools/server/workflow/get-workflow-console' -import { - ExecuteResponseSuccessSchema, - GetBlockConfigInput, - GetBlockConfigResult, - GetBlockOptionsInput, - GetBlockOptionsResult, - GetBlocksAndToolsInput, - GetBlocksAndToolsResult, - GetBlocksMetadataInput, - GetBlocksMetadataResult, - GetTriggerBlocksInput, - GetTriggerBlocksResult, - KnowledgeBaseArgsSchema, -} from '@/lib/copilot/tools/shared/schemas' +import { ExecuteResponseSuccessSchema } from '@/lib/copilot/tools/shared/schemas' -// Generic execute response schemas (success path only for this route; errors handled via HTTP status) export { ExecuteResponseSuccessSchema } export type ExecuteResponseSuccess = (typeof ExecuteResponseSuccessSchema)['_type'] -// Define server tool registry for the new copilot runtime -const serverToolRegistry: Record> = {} const logger = createLogger('ServerToolRouter') -// Register tools -serverToolRegistry[getBlocksAndToolsServerTool.name] = getBlocksAndToolsServerTool -serverToolRegistry[getBlocksMetadataServerTool.name] = getBlocksMetadataServerTool -serverToolRegistry[getBlockOptionsServerTool.name] = getBlockOptionsServerTool -serverToolRegistry[getBlockConfigServerTool.name] = getBlockConfigServerTool -serverToolRegistry[getTriggerBlocksServerTool.name] = getTriggerBlocksServerTool -serverToolRegistry[editWorkflowServerTool.name] = editWorkflowServerTool -serverToolRegistry[getWorkflowConsoleServerTool.name] = getWorkflowConsoleServerTool -serverToolRegistry[searchDocumentationServerTool.name] = searchDocumentationServerTool -serverToolRegistry[searchOnlineServerTool.name] = searchOnlineServerTool -serverToolRegistry[setEnvironmentVariablesServerTool.name] = setEnvironmentVariablesServerTool -serverToolRegistry[getCredentialsServerTool.name] = getCredentialsServerTool -serverToolRegistry[makeApiRequestServerTool.name] = makeApiRequestServerTool -serverToolRegistry[knowledgeBaseServerTool.name] = knowledgeBaseServerTool +/** Registry of all server tools. Tools self-declare their validation schemas. */ +const serverToolRegistry: Record = { + [getBlocksAndToolsServerTool.name]: getBlocksAndToolsServerTool, + [getBlocksMetadataServerTool.name]: getBlocksMetadataServerTool, + [getBlockOptionsServerTool.name]: getBlockOptionsServerTool, + [getBlockConfigServerTool.name]: getBlockConfigServerTool, + [getTriggerBlocksServerTool.name]: getTriggerBlocksServerTool, + [editWorkflowServerTool.name]: editWorkflowServerTool, + [getWorkflowConsoleServerTool.name]: getWorkflowConsoleServerTool, + [searchDocumentationServerTool.name]: searchDocumentationServerTool, + [searchOnlineServerTool.name]: searchOnlineServerTool, + [setEnvironmentVariablesServerTool.name]: setEnvironmentVariablesServerTool, + [getCredentialsServerTool.name]: getCredentialsServerTool, + [makeApiRequestServerTool.name]: makeApiRequestServerTool, + [knowledgeBaseServerTool.name]: knowledgeBaseServerTool, +} +/** + * Route a tool execution request to the appropriate server tool. + * Validates input/output using the tool's declared Zod schemas if present. + */ export async function routeExecution( toolName: string, payload: unknown, - context?: { userId: string } -): Promise { + context?: ServerToolContext +): Promise { const tool = serverToolRegistry[toolName] if (!tool) { throw new Error(`Unknown server tool: ${toolName}`) } - logger.debug('Routing to tool', { - toolName, - payloadPreview: (() => { - try { - return JSON.stringify(payload).slice(0, 200) - } catch { - return undefined - } - })(), - }) - let args: any = payload || {} - if (toolName === 'get_blocks_and_tools') { - args = GetBlocksAndToolsInput.parse(args) - } - if (toolName === 'get_blocks_metadata') { - args = GetBlocksMetadataInput.parse(args) - } - if (toolName === 'get_block_options') { - args = GetBlockOptionsInput.parse(args) - } - if (toolName === 'get_block_config') { - args = GetBlockConfigInput.parse(args) - } - if (toolName === 'get_trigger_blocks') { - args = GetTriggerBlocksInput.parse(args) - } - if (toolName === 'knowledge_base') { - args = KnowledgeBaseArgsSchema.parse(args) - } + logger.debug('Routing to tool', { toolName }) - const result = await tool.execute(args, context) + // Validate input if tool declares a schema + const args = tool.inputSchema ? tool.inputSchema.parse(payload ?? {}) : (payload ?? {}) - if (toolName === 'get_blocks_and_tools') { - return GetBlocksAndToolsResult.parse(result) - } - if (toolName === 'get_blocks_metadata') { - return GetBlocksMetadataResult.parse(result) - } - if (toolName === 'get_block_options') { - return GetBlockOptionsResult.parse(result) - } - if (toolName === 'get_block_config') { - return GetBlockConfigResult.parse(result) - } - if (toolName === 'get_trigger_blocks') { - return GetTriggerBlocksResult.parse(result) - } + // Execute + const result = await tool.execute(args, context) - return result + // Validate output if tool declares a schema + return tool.outputSchema ? tool.outputSchema.parse(result) : result } diff --git a/apps/sim/lib/copilot/tools/server/workflow/edit-workflow.ts b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow.ts deleted file mode 100644 index 7a22c8075b..0000000000 --- a/apps/sim/lib/copilot/tools/server/workflow/edit-workflow.ts +++ /dev/null @@ -1,3334 +0,0 @@ -import crypto from 'crypto' -import { db } from '@sim/db' -import { workflow as workflowTable } from '@sim/db/schema' -import { createLogger } from '@sim/logger' -import { eq } from 'drizzle-orm' -import type { BaseServerTool } from '@/lib/copilot/tools/server/base-tool' -import { validateSelectorIds } from '@/lib/copilot/validation/selector-validator' -import type { PermissionGroupConfig } from '@/lib/permission-groups/types' -import { applyAutoLayout } from '@/lib/workflows/autolayout' -import { getBlockOutputs } from '@/lib/workflows/blocks/block-outputs' -import { extractAndPersistCustomTools } from '@/lib/workflows/persistence/custom-tools-persistence' -import { - loadWorkflowFromNormalizedTables, - saveWorkflowToNormalizedTables, -} from '@/lib/workflows/persistence/utils' -import { isValidKey } from '@/lib/workflows/sanitization/key-validation' -import { validateWorkflowState } from '@/lib/workflows/sanitization/validation' -import { buildCanonicalIndex, isCanonicalPair } from '@/lib/workflows/subblocks/visibility' -import { TriggerUtils } from '@/lib/workflows/triggers/triggers' -import { getAllBlocks, getBlock } from '@/blocks/registry' -import type { BlockConfig, SubBlockConfig } from '@/blocks/types' -import { getUserPermissionConfig } from '@/ee/access-control/utils/permission-check' -import { EDGE, normalizeName, RESERVED_BLOCK_NAMES } from '@/executor/constants' -import { generateLoopBlocks, generateParallelBlocks } from '@/stores/workflows/workflow/utils' -import { TRIGGER_RUNTIME_SUBBLOCK_IDS } from '@/triggers/constants' - -/** Selector subblock types that can be validated */ -const SELECTOR_TYPES = new Set([ - 'oauth-input', - 'knowledge-base-selector', - 'document-selector', - 'file-selector', - 'project-selector', - 'channel-selector', - 'folder-selector', - 'mcp-server-selector', - 'mcp-tool-selector', - 'workflow-selector', -]) - -const validationLogger = createLogger('EditWorkflowValidation') - -/** - * Validation error for a specific field - */ -interface ValidationError { - blockId: string - blockType: string - field: string - value: any - error: string -} - -/** - * Types of items that can be skipped during operation application - */ -type SkippedItemType = - | 'block_not_found' - | 'invalid_block_type' - | 'block_not_allowed' - | 'block_locked' - | 'tool_not_allowed' - | 'invalid_edge_target' - | 'invalid_edge_source' - | 'invalid_source_handle' - | 'invalid_target_handle' - | 'invalid_subblock_field' - | 'missing_required_params' - | 'invalid_subflow_parent' - | 'nested_subflow_not_allowed' - | 'duplicate_block_name' - | 'reserved_block_name' - | 'duplicate_trigger' - | 'duplicate_single_instance_block' - -/** - * Represents an item that was skipped during operation application - */ -interface SkippedItem { - type: SkippedItemType - operationType: string - blockId: string - reason: string - details?: Record -} - -/** - * Logs and records a skipped item - */ -function logSkippedItem(skippedItems: SkippedItem[], item: SkippedItem): void { - validationLogger.warn(`Skipped ${item.operationType} operation: ${item.reason}`, { - type: item.type, - operationType: item.operationType, - blockId: item.blockId, - ...(item.details && { details: item.details }), - }) - skippedItems.push(item) -} - -/** - * Finds an existing block with the same normalized name. - */ -function findBlockWithDuplicateNormalizedName( - blocks: Record, - name: string, - excludeBlockId: string -): [string, any] | undefined { - const normalizedName = normalizeName(name) - return Object.entries(blocks).find( - ([blockId, block]: [string, any]) => - blockId !== excludeBlockId && normalizeName(block.name || '') === normalizedName - ) -} - -/** - * Result of input validation - */ -interface ValidationResult { - validInputs: Record - errors: ValidationError[] -} - -/** - * Validates and filters inputs against a block's subBlock configuration - * Returns valid inputs and any validation errors encountered - */ -function validateInputsForBlock( - blockType: string, - inputs: Record, - blockId: string -): ValidationResult { - const errors: ValidationError[] = [] - const blockConfig = getBlock(blockType) - - if (!blockConfig) { - // Unknown block type - return inputs as-is (let it fail later if invalid) - validationLogger.warn(`Unknown block type: ${blockType}, skipping validation`) - return { validInputs: inputs, errors: [] } - } - - const validatedInputs: Record = {} - const subBlockMap = new Map() - - // Build map of subBlock id -> config - for (const subBlock of blockConfig.subBlocks) { - subBlockMap.set(subBlock.id, subBlock) - } - - for (const [key, value] of Object.entries(inputs)) { - // Skip runtime subblock IDs - if (TRIGGER_RUNTIME_SUBBLOCK_IDS.includes(key)) { - continue - } - - const subBlockConfig = subBlockMap.get(key) - - // If subBlock doesn't exist in config, skip it (unless it's a known dynamic field) - if (!subBlockConfig) { - // Some fields are valid but not in subBlocks (like loop/parallel config) - // Allow these through for special block types - if (blockType === 'loop' || blockType === 'parallel') { - validatedInputs[key] = value - } else { - errors.push({ - blockId, - blockType, - field: key, - value, - error: `Unknown input field "${key}" for block type "${blockType}"`, - }) - } - continue - } - - // Note: We do NOT check subBlockConfig.condition here. - // Conditions are for UI display logic (show/hide fields in the editor). - // For API/Copilot, any valid field in the block schema should be accepted. - // The runtime will use the relevant fields based on the actual operation. - - // Validate value based on subBlock type - const validationResult = validateValueForSubBlockType( - subBlockConfig, - value, - key, - blockType, - blockId - ) - if (validationResult.valid) { - validatedInputs[key] = validationResult.value - } else if (validationResult.error) { - errors.push(validationResult.error) - } - } - - return { validInputs: validatedInputs, errors } -} - -/** - * Result of validating a single value - */ -interface ValueValidationResult { - valid: boolean - value?: any - error?: ValidationError -} - -/** - * Validates a value against its expected subBlock type - * Returns validation result with the value or an error - */ -function validateValueForSubBlockType( - subBlockConfig: SubBlockConfig, - value: any, - fieldName: string, - blockType: string, - blockId: string -): ValueValidationResult { - const { type } = subBlockConfig - - // Handle null/undefined - allow clearing fields - if (value === null || value === undefined) { - return { valid: true, value } - } - - switch (type) { - case 'dropdown': { - // Validate against allowed options - const options = - typeof subBlockConfig.options === 'function' - ? subBlockConfig.options() - : subBlockConfig.options - if (options && Array.isArray(options)) { - const validIds = options.map((opt) => opt.id) - if (!validIds.includes(value)) { - return { - valid: false, - error: { - blockId, - blockType, - field: fieldName, - value, - error: `Invalid dropdown value "${value}" for field "${fieldName}". Valid options: ${validIds.join(', ')}`, - }, - } - } - } - return { valid: true, value } - } - - case 'slider': { - // Validate numeric range - const numValue = typeof value === 'number' ? value : Number(value) - if (Number.isNaN(numValue)) { - return { - valid: false, - error: { - blockId, - blockType, - field: fieldName, - value, - error: `Invalid slider value "${value}" for field "${fieldName}" - must be a number`, - }, - } - } - // Clamp to range (allow but warn) - let clampedValue = numValue - if (subBlockConfig.min !== undefined && numValue < subBlockConfig.min) { - clampedValue = subBlockConfig.min - } - if (subBlockConfig.max !== undefined && numValue > subBlockConfig.max) { - clampedValue = subBlockConfig.max - } - return { - valid: true, - value: subBlockConfig.integer ? Math.round(clampedValue) : clampedValue, - } - } - - case 'switch': { - // Must be boolean - if (typeof value !== 'boolean') { - return { - valid: false, - error: { - blockId, - blockType, - field: fieldName, - value, - error: `Invalid switch value "${value}" for field "${fieldName}" - must be true or false`, - }, - } - } - return { valid: true, value } - } - - case 'file-upload': { - // File upload should be an object with specific properties or null - if (value === null) return { valid: true, value: null } - if (typeof value !== 'object') { - return { - valid: false, - error: { - blockId, - blockType, - field: fieldName, - value, - error: `Invalid file-upload value for field "${fieldName}" - expected object with name and path properties, or null`, - }, - } - } - // Validate file object has required properties - if (value && (!value.name || !value.path)) { - return { - valid: false, - error: { - blockId, - blockType, - field: fieldName, - value, - error: `Invalid file-upload object for field "${fieldName}" - must have "name" and "path" properties`, - }, - } - } - return { valid: true, value } - } - - case 'input-format': - case 'table': { - // Should be an array - if (!Array.isArray(value)) { - return { - valid: false, - error: { - blockId, - blockType, - field: fieldName, - value, - error: `Invalid ${type} value for field "${fieldName}" - expected an array`, - }, - } - } - return { valid: true, value } - } - - case 'tool-input': { - // Should be an array of tool objects - if (!Array.isArray(value)) { - return { - valid: false, - error: { - blockId, - blockType, - field: fieldName, - value, - error: `Invalid tool-input value for field "${fieldName}" - expected an array of tool objects`, - }, - } - } - return { valid: true, value } - } - - case 'code': { - // Code must be a string (content can be JS, Python, JSON, SQL, HTML, etc.) - if (typeof value !== 'string') { - return { - valid: false, - error: { - blockId, - blockType, - field: fieldName, - value, - error: `Invalid code value for field "${fieldName}" - expected a string, got ${typeof value}`, - }, - } - } - return { valid: true, value } - } - - case 'response-format': { - // Allow empty/null - if (value === null || value === undefined || value === '') { - return { valid: true, value } - } - // Allow objects (will be stringified later by normalizeResponseFormat) - if (typeof value === 'object') { - return { valid: true, value } - } - // If string, must be valid JSON - if (typeof value === 'string') { - try { - JSON.parse(value) - return { valid: true, value } - } catch { - return { - valid: false, - error: { - blockId, - blockType, - field: fieldName, - value, - error: `Invalid response-format value for field "${fieldName}" - string must be valid JSON`, - }, - } - } - } - // Reject numbers, booleans, etc. - return { - valid: false, - error: { - blockId, - blockType, - field: fieldName, - value, - error: `Invalid response-format value for field "${fieldName}" - expected a JSON string or object`, - }, - } - } - - case 'short-input': - case 'long-input': - case 'combobox': { - // Should be string (combobox allows custom values) - if (typeof value !== 'string' && typeof value !== 'number') { - // Convert to string but don't error - return { valid: true, value: String(value) } - } - return { valid: true, value } - } - - // Selector types - allow strings (IDs) or arrays of strings - case 'oauth-input': - case 'knowledge-base-selector': - case 'document-selector': - case 'file-selector': - case 'project-selector': - case 'channel-selector': - case 'folder-selector': - case 'mcp-server-selector': - case 'mcp-tool-selector': - case 'workflow-selector': { - if (subBlockConfig.multiSelect && Array.isArray(value)) { - return { valid: true, value } - } - if (typeof value === 'string') { - return { valid: true, value } - } - return { - valid: false, - error: { - blockId, - blockType, - field: fieldName, - value, - error: `Invalid selector value for field "${fieldName}" - expected a string${subBlockConfig.multiSelect ? ' or array of strings' : ''}`, - }, - } - } - - default: - // For unknown types, pass through - return { valid: true, value } - } -} - -interface EditWorkflowOperation { - operation_type: 'add' | 'edit' | 'delete' | 'insert_into_subflow' | 'extract_from_subflow' - block_id: string - params?: Record -} - -interface EditWorkflowParams { - operations: EditWorkflowOperation[] - workflowId: string - currentUserWorkflow?: string -} - -/** - * Topologically sort insert operations to ensure parents are created before children - * Returns sorted array where parent inserts always come before child inserts - */ -function topologicalSortInserts( - inserts: EditWorkflowOperation[], - adds: EditWorkflowOperation[] -): EditWorkflowOperation[] { - if (inserts.length === 0) return [] - - // Build a map of blockId -> operation for quick lookup - const insertMap = new Map() - inserts.forEach((op) => insertMap.set(op.block_id, op)) - - // Build a set of blocks being added (potential parents) - const addedBlocks = new Set(adds.map((op) => op.block_id)) - - // Build dependency graph: block -> blocks that depend on it - const dependents = new Map>() - const dependencies = new Map>() - - inserts.forEach((op) => { - const blockId = op.block_id - const parentId = op.params?.subflowId - - dependencies.set(blockId, new Set()) - - if (parentId) { - // Track dependency if parent is being inserted OR being added - // This ensures children wait for parents regardless of operation type - const parentBeingCreated = insertMap.has(parentId) || addedBlocks.has(parentId) - - if (parentBeingCreated) { - // Only add dependency if parent is also being inserted (not added) - // Because adds run before inserts, added parents are already created - if (insertMap.has(parentId)) { - dependencies.get(blockId)!.add(parentId) - if (!dependents.has(parentId)) { - dependents.set(parentId, new Set()) - } - dependents.get(parentId)!.add(blockId) - } - } - } - }) - - // Topological sort using Kahn's algorithm - const sorted: EditWorkflowOperation[] = [] - const queue: string[] = [] - - // Start with nodes that have no dependencies (or depend only on added blocks) - inserts.forEach((op) => { - const deps = dependencies.get(op.block_id)! - if (deps.size === 0) { - queue.push(op.block_id) - } - }) - - while (queue.length > 0) { - const blockId = queue.shift()! - const op = insertMap.get(blockId) - if (op) { - sorted.push(op) - } - - // Remove this node from dependencies of others - const children = dependents.get(blockId) - if (children) { - children.forEach((childId) => { - const childDeps = dependencies.get(childId)! - childDeps.delete(blockId) - if (childDeps.size === 0) { - queue.push(childId) - } - }) - } - } - - // If sorted length doesn't match input, there's a cycle (shouldn't happen with valid operations) - // Just append remaining operations - if (sorted.length < inserts.length) { - inserts.forEach((op) => { - if (!sorted.includes(op)) { - sorted.push(op) - } - }) - } - - return sorted -} - -/** - * Helper to create a block state from operation params - */ -function createBlockFromParams( - blockId: string, - params: any, - parentId?: string, - errorsCollector?: ValidationError[], - permissionConfig?: PermissionGroupConfig | null, - skippedItems?: SkippedItem[] -): any { - const blockConfig = getAllBlocks().find((b) => b.type === params.type) - - // Validate inputs against block configuration - let validatedInputs: Record | undefined - if (params.inputs) { - const result = validateInputsForBlock(params.type, params.inputs, blockId) - validatedInputs = result.validInputs - if (errorsCollector && result.errors.length > 0) { - errorsCollector.push(...result.errors) - } - } - - // Determine outputs based on trigger mode - const triggerMode = params.triggerMode || false - let outputs: Record - - if (params.outputs) { - outputs = params.outputs - } else if (blockConfig) { - const subBlocks: Record = {} - if (validatedInputs) { - Object.entries(validatedInputs).forEach(([key, value]) => { - // Skip runtime subblock IDs when computing outputs - if (TRIGGER_RUNTIME_SUBBLOCK_IDS.includes(key)) { - return - } - subBlocks[key] = { id: key, type: 'short-input', value: value } - }) - } - outputs = getBlockOutputs(params.type, subBlocks, triggerMode) - } else { - outputs = {} - } - - const blockState: any = { - id: blockId, - type: params.type, - name: params.name, - position: { x: 0, y: 0 }, - enabled: params.enabled !== undefined ? params.enabled : true, - horizontalHandles: true, - advancedMode: params.advancedMode || false, - height: 0, - triggerMode: triggerMode, - subBlocks: {}, - outputs: outputs, - data: parentId ? { parentId, extent: 'parent' as const } : {}, - locked: false, - } - - // Add validated inputs as subBlocks - if (validatedInputs) { - Object.entries(validatedInputs).forEach(([key, value]) => { - if (TRIGGER_RUNTIME_SUBBLOCK_IDS.includes(key)) { - return - } - - let sanitizedValue = value - - // Normalize array subblocks with id fields (inputFormat, table rows, etc.) - if (shouldNormalizeArrayIds(key)) { - sanitizedValue = normalizeArrayWithIds(value) - } - - // Special handling for tools - normalize and filter disallowed - if (key === 'tools' && Array.isArray(value)) { - sanitizedValue = filterDisallowedTools( - normalizeTools(value), - permissionConfig ?? null, - blockId, - skippedItems ?? [] - ) - } - - // Special handling for responseFormat - normalize to ensure consistent format - if (key === 'responseFormat' && value) { - sanitizedValue = normalizeResponseFormat(value) - } - - blockState.subBlocks[key] = { - id: key, - type: 'short-input', - value: sanitizedValue, - } - }) - } - - // Set up subBlocks from block configuration - if (blockConfig) { - blockConfig.subBlocks.forEach((subBlock) => { - if (!blockState.subBlocks[subBlock.id]) { - blockState.subBlocks[subBlock.id] = { - id: subBlock.id, - type: subBlock.type, - value: null, - } - } - }) - - if (validatedInputs) { - updateCanonicalModesForInputs(blockState, Object.keys(validatedInputs), blockConfig) - } - } - - return blockState -} - -function updateCanonicalModesForInputs( - block: { data?: { canonicalModes?: Record } }, - inputKeys: string[], - blockConfig: BlockConfig -): void { - if (!blockConfig.subBlocks?.length) return - - const canonicalIndex = buildCanonicalIndex(blockConfig.subBlocks) - const canonicalModeUpdates: Record = {} - - for (const inputKey of inputKeys) { - const canonicalId = canonicalIndex.canonicalIdBySubBlockId[inputKey] - if (!canonicalId) continue - - const group = canonicalIndex.groupsById[canonicalId] - if (!group || !isCanonicalPair(group)) continue - - const isAdvanced = group.advancedIds.includes(inputKey) - const existingMode = canonicalModeUpdates[canonicalId] - - if (!existingMode || isAdvanced) { - canonicalModeUpdates[canonicalId] = isAdvanced ? 'advanced' : 'basic' - } - } - - if (Object.keys(canonicalModeUpdates).length > 0) { - if (!block.data) block.data = {} - if (!block.data.canonicalModes) block.data.canonicalModes = {} - Object.assign(block.data.canonicalModes, canonicalModeUpdates) - } -} - -/** - * Normalize tools array by adding back fields that were sanitized for training - */ -function normalizeTools(tools: any[]): any[] { - return tools.map((tool) => { - if (tool.type === 'custom-tool') { - // New reference format: minimal fields only - if (tool.customToolId && !tool.schema && !tool.code) { - return { - type: tool.type, - customToolId: tool.customToolId, - usageControl: tool.usageControl || 'auto', - isExpanded: tool.isExpanded ?? true, - } - } - - // Legacy inline format: include all fields - const normalized: any = { - ...tool, - params: tool.params || {}, - isExpanded: tool.isExpanded ?? true, - } - - // Ensure schema has proper structure (for inline format) - if (normalized.schema?.function) { - normalized.schema = { - type: 'function', - function: { - name: normalized.schema.function.name || tool.title, // Preserve name or derive from title - description: normalized.schema.function.description, - parameters: normalized.schema.function.parameters, - }, - } - } - - return normalized - } - - // For other tool types, just ensure isExpanded exists - return { - ...tool, - isExpanded: tool.isExpanded ?? true, - } - }) -} - -/** UUID v4 regex pattern for validation */ -const UUID_REGEX = /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i - -/** - * Subblock types that store arrays of objects with `id` fields. - * The LLM may generate arbitrary IDs which need to be converted to proper UUIDs. - */ -const ARRAY_WITH_ID_SUBBLOCK_TYPES = new Set([ - 'inputFormat', // input-format: Fields with id, name, type, value, collapsed - 'headers', // table: Rows with id, cells (used for HTTP headers) - 'params', // table: Rows with id, cells (used for query params) - 'variables', // table or variables-input: Rows/assignments with id - 'tagFilters', // knowledge-tag-filters: Filters with id, tagName, etc. - 'documentTags', // document-tag-entry: Tags with id, tagName, etc. - 'metrics', // eval-input: Metrics with id, name, description, range -]) - -/** - * Normalizes array subblock values by ensuring each item has a valid UUID. - * The LLM may generate arbitrary IDs like "input-desc-001" or "row-1" which need - * to be converted to proper UUIDs for consistency with UI-created items. - */ -function normalizeArrayWithIds(value: unknown): any[] { - if (!Array.isArray(value)) { - return [] - } - - return value.map((item: any) => { - if (!item || typeof item !== 'object') { - return item - } - - // Check if id is missing or not a valid UUID - const hasValidUUID = typeof item.id === 'string' && UUID_REGEX.test(item.id) - if (!hasValidUUID) { - return { ...item, id: crypto.randomUUID() } - } - - return item - }) -} - -/** - * Checks if a subblock key should have its array items normalized with UUIDs. - */ -function shouldNormalizeArrayIds(key: string): boolean { - return ARRAY_WITH_ID_SUBBLOCK_TYPES.has(key) -} - -/** - * Normalize responseFormat to ensure consistent storage - * Handles both string (JSON) and object formats - * Returns pretty-printed JSON for better UI readability - */ -function normalizeResponseFormat(value: any): string { - try { - let obj = value - - // If it's already a string, parse it first - if (typeof value === 'string') { - const trimmed = value.trim() - if (!trimmed) { - return '' - } - obj = JSON.parse(trimmed) - } - - // If it's an object, stringify it with consistent formatting - if (obj && typeof obj === 'object') { - // Sort keys recursively for consistent comparison - const sortKeys = (item: any): any => { - if (Array.isArray(item)) { - return item.map(sortKeys) - } - if (item !== null && typeof item === 'object') { - return Object.keys(item) - .sort() - .reduce((result: any, key: string) => { - result[key] = sortKeys(item[key]) - return result - }, {}) - } - return item - } - - // Return pretty-printed with 2-space indentation for UI readability - // The sanitizer will normalize it to minified format for comparison - return JSON.stringify(sortKeys(obj), null, 2) - } - - return String(value) - } catch (error) { - // If parsing fails, return the original value as string - return String(value) - } -} - -interface EdgeHandleValidationResult { - valid: boolean - error?: string - /** The normalized handle to use (e.g., simple 'if' normalized to 'condition-{uuid}') */ - normalizedHandle?: string -} - -/** - * Validates source handle is valid for the block type - */ -function validateSourceHandleForBlock( - sourceHandle: string, - sourceBlockType: string, - sourceBlock: any -): EdgeHandleValidationResult { - if (sourceHandle === 'error') { - return { valid: true } - } - - switch (sourceBlockType) { - case 'loop': - if (sourceHandle === 'loop-start-source' || sourceHandle === 'loop-end-source') { - return { valid: true } - } - return { - valid: false, - error: `Invalid source handle "${sourceHandle}" for loop block. Valid handles: loop-start-source, loop-end-source, error`, - } - - case 'parallel': - if (sourceHandle === 'parallel-start-source' || sourceHandle === 'parallel-end-source') { - return { valid: true } - } - return { - valid: false, - error: `Invalid source handle "${sourceHandle}" for parallel block. Valid handles: parallel-start-source, parallel-end-source, error`, - } - - case 'condition': { - const conditionsValue = sourceBlock?.subBlocks?.conditions?.value - if (!conditionsValue) { - return { - valid: false, - error: `Invalid condition handle "${sourceHandle}" - no conditions defined`, - } - } - - // validateConditionHandle accepts simple format (if, else-if-0, else), - // legacy format (condition-{blockId}-if), and internal ID format (condition-{uuid}) - return validateConditionHandle(sourceHandle, sourceBlock.id, conditionsValue) - } - - case 'router': - if (sourceHandle === 'source' || sourceHandle.startsWith(EDGE.ROUTER_PREFIX)) { - return { valid: true } - } - return { - valid: false, - error: `Invalid source handle "${sourceHandle}" for router block. Valid handles: source, ${EDGE.ROUTER_PREFIX}{targetId}, error`, - } - - case 'router_v2': { - const routesValue = sourceBlock?.subBlocks?.routes?.value - if (!routesValue) { - return { - valid: false, - error: `Invalid router handle "${sourceHandle}" - no routes defined`, - } - } - - // validateRouterHandle accepts simple format (route-0, route-1), - // legacy format (router-{blockId}-route-1), and internal ID format (router-{uuid}) - return validateRouterHandle(sourceHandle, sourceBlock.id, routesValue) - } - - default: - if (sourceHandle === 'source') { - return { valid: true } - } - return { - valid: false, - error: `Invalid source handle "${sourceHandle}" for ${sourceBlockType} block. Valid handles: source, error`, - } - } -} - -/** - * Validates condition handle references a valid condition in the block. - * Accepts multiple formats: - * - Simple format: "if", "else-if-0", "else-if-1", "else" - * - Legacy semantic format: "condition-{blockId}-if", "condition-{blockId}-else-if" - * - Internal ID format: "condition-{conditionId}" - * - * Returns the normalized handle (condition-{conditionId}) for storage. - */ -function validateConditionHandle( - sourceHandle: string, - blockId: string, - conditionsValue: string | any[] -): EdgeHandleValidationResult { - let conditions: any[] - if (typeof conditionsValue === 'string') { - try { - conditions = JSON.parse(conditionsValue) - } catch { - return { - valid: false, - error: `Cannot validate condition handle "${sourceHandle}" - conditions is not valid JSON`, - } - } - } else if (Array.isArray(conditionsValue)) { - conditions = conditionsValue - } else { - return { - valid: false, - error: `Cannot validate condition handle "${sourceHandle}" - conditions is not an array`, - } - } - - if (!Array.isArray(conditions) || conditions.length === 0) { - return { - valid: false, - error: `Invalid condition handle "${sourceHandle}" - no conditions defined`, - } - } - - // Build a map of all valid handle formats -> normalized handle (condition-{conditionId}) - const handleToNormalized = new Map() - const legacySemanticPrefix = `condition-${blockId}-` - let elseIfIndex = 0 - - for (const condition of conditions) { - if (!condition.id) continue - - const normalizedHandle = `condition-${condition.id}` - const title = condition.title?.toLowerCase() - - // Always accept internal ID format - handleToNormalized.set(normalizedHandle, normalizedHandle) - - if (title === 'if') { - // Simple format: "if" - handleToNormalized.set('if', normalizedHandle) - // Legacy format: "condition-{blockId}-if" - handleToNormalized.set(`${legacySemanticPrefix}if`, normalizedHandle) - } else if (title === 'else if') { - // Simple format: "else-if-0", "else-if-1", etc. (0-indexed) - handleToNormalized.set(`else-if-${elseIfIndex}`, normalizedHandle) - // Legacy format: "condition-{blockId}-else-if" for first, "condition-{blockId}-else-if-2" for second - if (elseIfIndex === 0) { - handleToNormalized.set(`${legacySemanticPrefix}else-if`, normalizedHandle) - } else { - handleToNormalized.set( - `${legacySemanticPrefix}else-if-${elseIfIndex + 1}`, - normalizedHandle - ) - } - elseIfIndex++ - } else if (title === 'else') { - // Simple format: "else" - handleToNormalized.set('else', normalizedHandle) - // Legacy format: "condition-{blockId}-else" - handleToNormalized.set(`${legacySemanticPrefix}else`, normalizedHandle) - } - } - - const normalizedHandle = handleToNormalized.get(sourceHandle) - if (normalizedHandle) { - return { valid: true, normalizedHandle } - } - - // Build list of valid simple format options for error message - const simpleOptions: string[] = [] - elseIfIndex = 0 - for (const condition of conditions) { - const title = condition.title?.toLowerCase() - if (title === 'if') { - simpleOptions.push('if') - } else if (title === 'else if') { - simpleOptions.push(`else-if-${elseIfIndex}`) - elseIfIndex++ - } else if (title === 'else') { - simpleOptions.push('else') - } - } - - return { - valid: false, - error: `Invalid condition handle "${sourceHandle}". Valid handles: ${simpleOptions.join(', ')}`, - } -} - -/** - * Validates router handle references a valid route in the block. - * Accepts multiple formats: - * - Simple format: "route-0", "route-1", "route-2" (0-indexed) - * - Legacy semantic format: "router-{blockId}-route-1" (1-indexed) - * - Internal ID format: "router-{routeId}" - * - * Returns the normalized handle (router-{routeId}) for storage. - */ -function validateRouterHandle( - sourceHandle: string, - blockId: string, - routesValue: string | any[] -): EdgeHandleValidationResult { - let routes: any[] - if (typeof routesValue === 'string') { - try { - routes = JSON.parse(routesValue) - } catch { - return { - valid: false, - error: `Cannot validate router handle "${sourceHandle}" - routes is not valid JSON`, - } - } - } else if (Array.isArray(routesValue)) { - routes = routesValue - } else { - return { - valid: false, - error: `Cannot validate router handle "${sourceHandle}" - routes is not an array`, - } - } - - if (!Array.isArray(routes) || routes.length === 0) { - return { - valid: false, - error: `Invalid router handle "${sourceHandle}" - no routes defined`, - } - } - - // Build a map of all valid handle formats -> normalized handle (router-{routeId}) - const handleToNormalized = new Map() - const legacySemanticPrefix = `router-${blockId}-` - - for (let i = 0; i < routes.length; i++) { - const route = routes[i] - if (!route.id) continue - - const normalizedHandle = `router-${route.id}` - - // Always accept internal ID format: router-{uuid} - handleToNormalized.set(normalizedHandle, normalizedHandle) - - // Simple format: route-0, route-1, etc. (0-indexed) - handleToNormalized.set(`route-${i}`, normalizedHandle) - - // Legacy 1-indexed route number format: router-{blockId}-route-1 - handleToNormalized.set(`${legacySemanticPrefix}route-${i + 1}`, normalizedHandle) - - // Accept normalized title format: router-{blockId}-{normalized-title} - if (route.title && typeof route.title === 'string') { - const normalizedTitle = route.title - .toLowerCase() - .replace(/\s+/g, '-') - .replace(/[^a-z0-9-]/g, '') - if (normalizedTitle) { - handleToNormalized.set(`${legacySemanticPrefix}${normalizedTitle}`, normalizedHandle) - } - } - } - - const normalizedHandle = handleToNormalized.get(sourceHandle) - if (normalizedHandle) { - return { valid: true, normalizedHandle } - } - - // Build list of valid simple format options for error message - const simpleOptions = routes.map((_, i) => `route-${i}`) - - return { - valid: false, - error: `Invalid router handle "${sourceHandle}". Valid handles: ${simpleOptions.join(', ')}`, - } -} - -/** - * Validates target handle is valid (must be 'target') - */ -function validateTargetHandle(targetHandle: string): EdgeHandleValidationResult { - if (targetHandle === 'target') { - return { valid: true } - } - return { - valid: false, - error: `Invalid target handle "${targetHandle}". Expected "target"`, - } -} - -/** - * Creates a validated edge between two blocks. - * Returns true if edge was created, false if skipped due to validation errors. - */ -function createValidatedEdge( - modifiedState: any, - sourceBlockId: string, - targetBlockId: string, - sourceHandle: string, - targetHandle: string, - operationType: string, - logger: ReturnType, - skippedItems?: SkippedItem[] -): boolean { - if (!modifiedState.blocks[targetBlockId]) { - logger.warn(`Target block "${targetBlockId}" not found. Edge skipped.`, { - sourceBlockId, - targetBlockId, - sourceHandle, - }) - skippedItems?.push({ - type: 'invalid_edge_target', - operationType, - blockId: sourceBlockId, - reason: `Edge from "${sourceBlockId}" to "${targetBlockId}" skipped - target block does not exist`, - details: { sourceHandle, targetHandle, targetId: targetBlockId }, - }) - return false - } - - const sourceBlock = modifiedState.blocks[sourceBlockId] - if (!sourceBlock) { - logger.warn(`Source block "${sourceBlockId}" not found. Edge skipped.`, { - sourceBlockId, - targetBlockId, - }) - skippedItems?.push({ - type: 'invalid_edge_source', - operationType, - blockId: sourceBlockId, - reason: `Edge from "${sourceBlockId}" to "${targetBlockId}" skipped - source block does not exist`, - details: { sourceHandle, targetHandle, targetId: targetBlockId }, - }) - return false - } - - const sourceBlockType = sourceBlock.type - if (!sourceBlockType) { - logger.warn(`Source block "${sourceBlockId}" has no type. Edge skipped.`, { - sourceBlockId, - targetBlockId, - }) - skippedItems?.push({ - type: 'invalid_edge_source', - operationType, - blockId: sourceBlockId, - reason: `Edge from "${sourceBlockId}" to "${targetBlockId}" skipped - source block has no type`, - details: { sourceHandle, targetHandle, targetId: targetBlockId }, - }) - return false - } - - const sourceValidation = validateSourceHandleForBlock(sourceHandle, sourceBlockType, sourceBlock) - if (!sourceValidation.valid) { - logger.warn(`Invalid source handle. Edge skipped.`, { - sourceBlockId, - targetBlockId, - sourceHandle, - error: sourceValidation.error, - }) - skippedItems?.push({ - type: 'invalid_source_handle', - operationType, - blockId: sourceBlockId, - reason: sourceValidation.error || `Invalid source handle "${sourceHandle}"`, - details: { sourceHandle, targetHandle, targetId: targetBlockId }, - }) - return false - } - - const targetValidation = validateTargetHandle(targetHandle) - if (!targetValidation.valid) { - logger.warn(`Invalid target handle. Edge skipped.`, { - sourceBlockId, - targetBlockId, - targetHandle, - error: targetValidation.error, - }) - skippedItems?.push({ - type: 'invalid_target_handle', - operationType, - blockId: sourceBlockId, - reason: targetValidation.error || `Invalid target handle "${targetHandle}"`, - details: { sourceHandle, targetHandle, targetId: targetBlockId }, - }) - return false - } - - // Use normalized handle if available (e.g., 'if' -> 'condition-{uuid}') - const finalSourceHandle = sourceValidation.normalizedHandle || sourceHandle - - modifiedState.edges.push({ - id: crypto.randomUUID(), - source: sourceBlockId, - sourceHandle: finalSourceHandle, - target: targetBlockId, - targetHandle, - type: 'default', - }) - return true -} - -/** - * Adds connections as edges for a block. - * Supports multiple target formats: - * - String: "target-block-id" - * - Object: { block: "target-block-id", handle?: "custom-target-handle" } - * - Array of strings or objects - */ -function addConnectionsAsEdges( - modifiedState: any, - blockId: string, - connections: Record, - logger: ReturnType, - skippedItems?: SkippedItem[] -): void { - Object.entries(connections).forEach(([sourceHandle, targets]) => { - if (targets === null) return - - const addEdgeForTarget = (targetBlock: string, targetHandle?: string) => { - createValidatedEdge( - modifiedState, - blockId, - targetBlock, - sourceHandle, - targetHandle || 'target', - 'add_edge', - logger, - skippedItems - ) - } - - if (typeof targets === 'string') { - addEdgeForTarget(targets) - } else if (Array.isArray(targets)) { - targets.forEach((target: any) => { - if (typeof target === 'string') { - addEdgeForTarget(target) - } else if (target?.block) { - addEdgeForTarget(target.block, target.handle) - } - }) - } else if (typeof targets === 'object' && targets?.block) { - addEdgeForTarget(targets.block, targets.handle) - } - }) -} - -function applyTriggerConfigToBlockSubblocks(block: any, triggerConfig: Record) { - if (!block?.subBlocks || !triggerConfig || typeof triggerConfig !== 'object') { - return - } - - Object.entries(triggerConfig).forEach(([configKey, configValue]) => { - const existingSubblock = block.subBlocks[configKey] - if (existingSubblock) { - const existingValue = existingSubblock.value - const valuesEqual = - typeof existingValue === 'object' || typeof configValue === 'object' - ? JSON.stringify(existingValue) === JSON.stringify(configValue) - : existingValue === configValue - - if (valuesEqual) { - return - } - - block.subBlocks[configKey] = { - ...existingSubblock, - value: configValue, - } - } else { - block.subBlocks[configKey] = { - id: configKey, - type: 'short-input', - value: configValue, - } - } - }) -} - -/** - * Result of applying operations to workflow state - */ -interface ApplyOperationsResult { - state: any - validationErrors: ValidationError[] - skippedItems: SkippedItem[] -} - -/** - * Checks if a block type is allowed by the permission group config - */ -function isBlockTypeAllowed( - blockType: string, - permissionConfig: PermissionGroupConfig | null -): boolean { - if (!permissionConfig || permissionConfig.allowedIntegrations === null) { - return true - } - return permissionConfig.allowedIntegrations.includes(blockType) -} - -/** - * Filters out tools that are not allowed by the permission group config - * Returns both the allowed tools and any skipped tool items for logging - */ -function filterDisallowedTools( - tools: any[], - permissionConfig: PermissionGroupConfig | null, - blockId: string, - skippedItems: SkippedItem[] -): any[] { - if (!permissionConfig) { - return tools - } - - const allowedTools: any[] = [] - - for (const tool of tools) { - if (tool.type === 'custom-tool' && permissionConfig.disableCustomTools) { - logSkippedItem(skippedItems, { - type: 'tool_not_allowed', - operationType: 'add', - blockId, - reason: `Custom tool "${tool.title || tool.customToolId || 'unknown'}" is not allowed by permission group - tool not added`, - details: { toolType: 'custom-tool', toolId: tool.customToolId }, - }) - continue - } - if (tool.type === 'mcp' && permissionConfig.disableMcpTools) { - logSkippedItem(skippedItems, { - type: 'tool_not_allowed', - operationType: 'add', - blockId, - reason: `MCP tool "${tool.title || 'unknown'}" is not allowed by permission group - tool not added`, - details: { toolType: 'mcp', serverId: tool.params?.serverId }, - }) - continue - } - allowedTools.push(tool) - } - - return allowedTools -} - -/** - * Normalizes block IDs in operations to ensure they are valid UUIDs. - * The LLM may generate human-readable IDs like "web_search" or "research_agent" - * which need to be converted to proper UUIDs for database compatibility. - * - * Returns the normalized operations and a mapping from old IDs to new UUIDs. - */ -function normalizeBlockIdsInOperations(operations: EditWorkflowOperation[]): { - normalizedOperations: EditWorkflowOperation[] - idMapping: Map -} { - const logger = createLogger('EditWorkflowServerTool') - const idMapping = new Map() - - // First pass: collect all non-UUID block_ids from add/insert operations - for (const op of operations) { - if (op.operation_type === 'add' || op.operation_type === 'insert_into_subflow') { - if (op.block_id && !UUID_REGEX.test(op.block_id)) { - const newId = crypto.randomUUID() - idMapping.set(op.block_id, newId) - logger.debug('Normalizing block ID', { oldId: op.block_id, newId }) - } - } - } - - if (idMapping.size === 0) { - return { normalizedOperations: operations, idMapping } - } - - logger.info('Normalizing block IDs in operations', { - normalizedCount: idMapping.size, - mappings: Object.fromEntries(idMapping), - }) - - // Helper to replace an ID if it's in the mapping - const replaceId = (id: string | undefined): string | undefined => { - if (!id) return id - return idMapping.get(id) ?? id - } - - // Second pass: update all references to use new UUIDs - const normalizedOperations = operations.map((op) => { - const normalized: EditWorkflowOperation = { - ...op, - block_id: replaceId(op.block_id) ?? op.block_id, - } - - if (op.params) { - normalized.params = { ...op.params } - - // Update subflowId references (for insert_into_subflow) - if (normalized.params.subflowId) { - normalized.params.subflowId = replaceId(normalized.params.subflowId) - } - - // Update connection references - if (normalized.params.connections) { - const normalizedConnections: Record = {} - for (const [handle, targets] of Object.entries(normalized.params.connections)) { - if (typeof targets === 'string') { - normalizedConnections[handle] = replaceId(targets) - } else if (Array.isArray(targets)) { - normalizedConnections[handle] = targets.map((t) => { - if (typeof t === 'string') return replaceId(t) - if (t && typeof t === 'object' && t.block) { - return { ...t, block: replaceId(t.block) } - } - return t - }) - } else if (targets && typeof targets === 'object' && (targets as any).block) { - normalizedConnections[handle] = { ...targets, block: replaceId((targets as any).block) } - } else { - normalizedConnections[handle] = targets - } - } - normalized.params.connections = normalizedConnections - } - - // Update nestedNodes block IDs - if (normalized.params.nestedNodes) { - const normalizedNestedNodes: Record = {} - for (const [childId, childBlock] of Object.entries(normalized.params.nestedNodes)) { - const newChildId = replaceId(childId) ?? childId - normalizedNestedNodes[newChildId] = childBlock - } - normalized.params.nestedNodes = normalizedNestedNodes - } - } - - return normalized - }) - - return { normalizedOperations, idMapping } -} - -/** - * Apply operations directly to the workflow JSON state - */ -function applyOperationsToWorkflowState( - workflowState: any, - operations: EditWorkflowOperation[], - permissionConfig: PermissionGroupConfig | null = null -): ApplyOperationsResult { - // Deep clone the workflow state to avoid mutations - const modifiedState = JSON.parse(JSON.stringify(workflowState)) - - // Collect validation errors across all operations - const validationErrors: ValidationError[] = [] - - // Collect skipped items across all operations - const skippedItems: SkippedItem[] = [] - - // Log initial state - const logger = createLogger('EditWorkflowServerTool') - - // Normalize block IDs to UUIDs before processing - const { normalizedOperations } = normalizeBlockIdsInOperations(operations) - operations = normalizedOperations - - logger.info('Applying operations to workflow:', { - totalOperations: operations.length, - operationTypes: operations.reduce((acc: any, op) => { - acc[op.operation_type] = (acc[op.operation_type] || 0) + 1 - return acc - }, {}), - initialBlockCount: Object.keys(modifiedState.blocks || {}).length, - }) - - /** - * Reorder operations to ensure correct execution sequence: - * 1. delete - Remove blocks first to free up IDs and clean state - * 2. extract_from_subflow - Extract blocks from subflows before modifications - * 3. add - Create new blocks (sorted by connection dependencies) - * 4. insert_into_subflow - Insert blocks into subflows (sorted by parent dependency) - * 5. edit - Edit existing blocks last, so connections to newly added blocks work - * - * This ordering is CRITICAL: operations may reference blocks being added/inserted - * in the same batch. Without proper ordering, target blocks wouldn't exist yet. - * - * For add operations, we use a two-pass approach: - * - Pass 1: Create all blocks (without connections) - * - Pass 2: Add all connections (now all blocks exist) - * This ensures that if block A connects to block B, and both are being added, - * B will exist when we try to create the edge from A to B. - */ - const deletes = operations.filter((op) => op.operation_type === 'delete') - const extracts = operations.filter((op) => op.operation_type === 'extract_from_subflow') - const adds = operations.filter((op) => op.operation_type === 'add') - const inserts = operations.filter((op) => op.operation_type === 'insert_into_subflow') - const edits = operations.filter((op) => op.operation_type === 'edit') - - // Sort insert operations to ensure parents are inserted before children - // This handles cases where a loop/parallel is being added along with its children - const sortedInserts = topologicalSortInserts(inserts, adds) - - // We'll process add operations in two passes (handled in the switch statement below) - // This is tracked via a separate flag to know which pass we're in - const orderedOperations: EditWorkflowOperation[] = [ - ...deletes, - ...extracts, - ...adds, - ...sortedInserts, - ...edits, - ] - - logger.info('Operations after reordering:', { - totalOperations: orderedOperations.length, - deleteCount: deletes.length, - extractCount: extracts.length, - addCount: adds.length, - insertCount: sortedInserts.length, - editCount: edits.length, - operationOrder: orderedOperations.map( - (op) => - `${op.operation_type}:${op.block_id}${op.params?.subflowId ? `(parent:${op.params.subflowId})` : ''}` - ), - }) - - // Two-pass processing for add operations: - // Pass 1: Create all blocks (without connections) - // Pass 2: Add all connections (all blocks now exist) - const addOperationsWithConnections: Array<{ - blockId: string - connections: Record - }> = [] - - for (const operation of orderedOperations) { - const { operation_type, block_id, params } = operation - - // CRITICAL: Validate block_id is a valid string and not "undefined" - // This prevents undefined keys from being set in the workflow state - if (!isValidKey(block_id)) { - logSkippedItem(skippedItems, { - type: 'missing_required_params', - operationType: operation_type, - blockId: String(block_id || 'invalid'), - reason: `Invalid block_id "${block_id}" (type: ${typeof block_id}) - operation skipped. Block IDs must be valid non-empty strings.`, - }) - logger.error('Invalid block_id detected in operation', { - operation_type, - block_id, - block_id_type: typeof block_id, - }) - continue - } - - logger.debug(`Executing operation: ${operation_type} for block ${block_id}`, { - params: params ? Object.keys(params) : [], - currentBlockCount: Object.keys(modifiedState.blocks).length, - }) - - switch (operation_type) { - case 'delete': { - if (!modifiedState.blocks[block_id]) { - logSkippedItem(skippedItems, { - type: 'block_not_found', - operationType: 'delete', - blockId: block_id, - reason: `Block "${block_id}" does not exist and cannot be deleted`, - }) - break - } - - // Check if block is locked or inside a locked container - const deleteBlock = modifiedState.blocks[block_id] - const deleteParentId = deleteBlock.data?.parentId as string | undefined - const deleteParentLocked = deleteParentId - ? modifiedState.blocks[deleteParentId]?.locked - : false - if (deleteBlock.locked || deleteParentLocked) { - logSkippedItem(skippedItems, { - type: 'block_locked', - operationType: 'delete', - blockId: block_id, - reason: deleteParentLocked - ? `Block "${block_id}" is inside locked container "${deleteParentId}" and cannot be deleted` - : `Block "${block_id}" is locked and cannot be deleted`, - }) - break - } - - // Find all child blocks to remove - const blocksToRemove = new Set([block_id]) - const findChildren = (parentId: string) => { - Object.entries(modifiedState.blocks).forEach(([childId, child]: [string, any]) => { - if (child.data?.parentId === parentId) { - blocksToRemove.add(childId) - findChildren(childId) - } - }) - } - findChildren(block_id) - - // Remove blocks - blocksToRemove.forEach((id) => delete modifiedState.blocks[id]) - - // Remove edges connected to deleted blocks - modifiedState.edges = modifiedState.edges.filter( - (edge: any) => !blocksToRemove.has(edge.source) && !blocksToRemove.has(edge.target) - ) - break - } - - case 'edit': { - if (!modifiedState.blocks[block_id]) { - logSkippedItem(skippedItems, { - type: 'block_not_found', - operationType: 'edit', - blockId: block_id, - reason: `Block "${block_id}" does not exist and cannot be edited`, - }) - break - } - - const block = modifiedState.blocks[block_id] - - // Check if block is locked or inside a locked container - const editParentId = block.data?.parentId as string | undefined - const editParentLocked = editParentId ? modifiedState.blocks[editParentId]?.locked : false - if (block.locked || editParentLocked) { - logSkippedItem(skippedItems, { - type: 'block_locked', - operationType: 'edit', - blockId: block_id, - reason: editParentLocked - ? `Block "${block_id}" is inside locked container "${editParentId}" and cannot be edited` - : `Block "${block_id}" is locked and cannot be edited`, - }) - break - } - - // Ensure block has essential properties - if (!block.type) { - logger.warn(`Block ${block_id} missing type property, skipping edit`, { - blockKeys: Object.keys(block), - blockData: JSON.stringify(block), - }) - logSkippedItem(skippedItems, { - type: 'block_not_found', - operationType: 'edit', - blockId: block_id, - reason: `Block "${block_id}" exists but has no type property`, - }) - break - } - - // Update inputs (convert to subBlocks format) - if (params?.inputs) { - if (!block.subBlocks) block.subBlocks = {} - - // Validate inputs against block configuration - const validationResult = validateInputsForBlock(block.type, params.inputs, block_id) - validationErrors.push(...validationResult.errors) - - Object.entries(validationResult.validInputs).forEach(([inputKey, value]) => { - // Normalize common field name variations (LLM may use plural/singular inconsistently) - let key = inputKey - if ( - key === 'credentials' && - !block.subBlocks.credentials && - block.subBlocks.credential - ) { - key = 'credential' - } - - if (TRIGGER_RUNTIME_SUBBLOCK_IDS.includes(key)) { - return - } - let sanitizedValue = value - - // Normalize array subblocks with id fields (inputFormat, table rows, etc.) - if (shouldNormalizeArrayIds(key)) { - sanitizedValue = normalizeArrayWithIds(value) - } - - // Special handling for tools - normalize and filter disallowed - if (key === 'tools' && Array.isArray(value)) { - sanitizedValue = filterDisallowedTools( - normalizeTools(value), - permissionConfig, - block_id, - skippedItems - ) - } - - // Special handling for responseFormat - normalize to ensure consistent format - if (key === 'responseFormat' && value) { - sanitizedValue = normalizeResponseFormat(value) - } - - if (!block.subBlocks[key]) { - block.subBlocks[key] = { - id: key, - type: 'short-input', - value: sanitizedValue, - } - } else { - const existingValue = block.subBlocks[key].value - const valuesEqual = - typeof existingValue === 'object' || typeof sanitizedValue === 'object' - ? JSON.stringify(existingValue) === JSON.stringify(sanitizedValue) - : existingValue === sanitizedValue - - if (!valuesEqual) { - block.subBlocks[key].value = sanitizedValue - } - } - }) - - if ( - Object.hasOwn(params.inputs, 'triggerConfig') && - block.subBlocks.triggerConfig && - typeof block.subBlocks.triggerConfig.value === 'object' - ) { - applyTriggerConfigToBlockSubblocks(block, block.subBlocks.triggerConfig.value) - } - - // Update loop/parallel configuration in block.data (strict validation) - if (block.type === 'loop') { - block.data = block.data || {} - // loopType is always valid - if (params.inputs.loopType !== undefined) { - const validLoopTypes = ['for', 'forEach', 'while', 'doWhile'] - if (validLoopTypes.includes(params.inputs.loopType)) { - block.data.loopType = params.inputs.loopType - } - } - const effectiveLoopType = params.inputs.loopType ?? block.data.loopType ?? 'for' - // iterations only valid for 'for' loopType - if (params.inputs.iterations !== undefined && effectiveLoopType === 'for') { - block.data.count = params.inputs.iterations - } - // collection only valid for 'forEach' loopType - if (params.inputs.collection !== undefined && effectiveLoopType === 'forEach') { - block.data.collection = params.inputs.collection - } - // condition only valid for 'while' or 'doWhile' loopType - if ( - params.inputs.condition !== undefined && - (effectiveLoopType === 'while' || effectiveLoopType === 'doWhile') - ) { - if (effectiveLoopType === 'doWhile') { - block.data.doWhileCondition = params.inputs.condition - } else { - block.data.whileCondition = params.inputs.condition - } - } - } else if (block.type === 'parallel') { - block.data = block.data || {} - // parallelType is always valid - if (params.inputs.parallelType !== undefined) { - const validParallelTypes = ['count', 'collection'] - if (validParallelTypes.includes(params.inputs.parallelType)) { - block.data.parallelType = params.inputs.parallelType - } - } - const effectiveParallelType = - params.inputs.parallelType ?? block.data.parallelType ?? 'count' - // count only valid for 'count' parallelType - if (params.inputs.count !== undefined && effectiveParallelType === 'count') { - block.data.count = params.inputs.count - } - // collection only valid for 'collection' parallelType - if (params.inputs.collection !== undefined && effectiveParallelType === 'collection') { - block.data.collection = params.inputs.collection - } - } - - const editBlockConfig = getBlock(block.type) - if (editBlockConfig) { - updateCanonicalModesForInputs( - block, - Object.keys(validationResult.validInputs), - editBlockConfig - ) - } - } - - // Update basic properties - if (params?.type !== undefined) { - // Special container types (loop, parallel) are not in the block registry but are valid - const isContainerType = params.type === 'loop' || params.type === 'parallel' - - // Validate type before setting (skip validation for container types) - const blockConfig = getBlock(params.type) - if (!blockConfig && !isContainerType) { - logSkippedItem(skippedItems, { - type: 'invalid_block_type', - operationType: 'edit', - blockId: block_id, - reason: `Invalid block type "${params.type}" - type change skipped`, - details: { requestedType: params.type }, - }) - } else if (!isContainerType && !isBlockTypeAllowed(params.type, permissionConfig)) { - logSkippedItem(skippedItems, { - type: 'block_not_allowed', - operationType: 'edit', - blockId: block_id, - reason: `Block type "${params.type}" is not allowed by permission group - type change skipped`, - details: { requestedType: params.type }, - }) - } else { - block.type = params.type - } - } - if (params?.name !== undefined) { - const normalizedName = normalizeName(params.name) - if (!normalizedName) { - logSkippedItem(skippedItems, { - type: 'missing_required_params', - operationType: 'edit', - blockId: block_id, - reason: `Cannot rename to empty name`, - details: { requestedName: params.name }, - }) - } else if ((RESERVED_BLOCK_NAMES as readonly string[]).includes(normalizedName)) { - logSkippedItem(skippedItems, { - type: 'reserved_block_name', - operationType: 'edit', - blockId: block_id, - reason: `Cannot rename to "${params.name}" - this is a reserved name`, - details: { requestedName: params.name }, - }) - } else { - const conflictingBlock = findBlockWithDuplicateNormalizedName( - modifiedState.blocks, - params.name, - block_id - ) - - if (conflictingBlock) { - logSkippedItem(skippedItems, { - type: 'duplicate_block_name', - operationType: 'edit', - blockId: block_id, - reason: `Cannot rename to "${params.name}" - conflicts with "${conflictingBlock[1].name}"`, - details: { - requestedName: params.name, - conflictingBlockId: conflictingBlock[0], - conflictingBlockName: conflictingBlock[1].name, - }, - }) - } else { - block.name = params.name - } - } - } - - // Handle trigger mode toggle - if (typeof params?.triggerMode === 'boolean') { - block.triggerMode = params.triggerMode - - if (params.triggerMode === true) { - // Remove all incoming edges when enabling trigger mode - modifiedState.edges = modifiedState.edges.filter( - (edge: any) => edge.target !== block_id - ) - } - } - - // Handle advanced mode toggle - if (typeof params?.advancedMode === 'boolean') { - block.advancedMode = params.advancedMode - } - - // Handle nested nodes update (for loops/parallels) - if (params?.nestedNodes) { - // Remove all existing child blocks - const existingChildren = Object.keys(modifiedState.blocks).filter( - (id) => modifiedState.blocks[id].data?.parentId === block_id - ) - existingChildren.forEach((childId) => delete modifiedState.blocks[childId]) - - // Remove edges to/from removed children - modifiedState.edges = modifiedState.edges.filter( - (edge: any) => - !existingChildren.includes(edge.source) && !existingChildren.includes(edge.target) - ) - - // Add new nested blocks - Object.entries(params.nestedNodes).forEach(([childId, childBlock]: [string, any]) => { - // Validate childId is a valid string - if (!isValidKey(childId)) { - logSkippedItem(skippedItems, { - type: 'missing_required_params', - operationType: 'add_nested_node', - blockId: String(childId || 'invalid'), - reason: `Invalid childId "${childId}" in nestedNodes - child block skipped`, - }) - logger.error('Invalid childId detected in nestedNodes', { - parentBlockId: block_id, - childId, - childId_type: typeof childId, - }) - return - } - - if (childBlock.type === 'loop' || childBlock.type === 'parallel') { - logSkippedItem(skippedItems, { - type: 'nested_subflow_not_allowed', - operationType: 'edit_nested_node', - blockId: childId, - reason: `Cannot nest ${childBlock.type} inside ${block.type} - nested subflows are not supported`, - details: { parentType: block.type, childType: childBlock.type }, - }) - return - } - - const childBlockState = createBlockFromParams( - childId, - childBlock, - block_id, - validationErrors, - permissionConfig, - skippedItems - ) - modifiedState.blocks[childId] = childBlockState - - // Add connections for child block - if (childBlock.connections) { - addConnectionsAsEdges( - modifiedState, - childId, - childBlock.connections, - logger, - skippedItems - ) - } - }) - - // Update loop/parallel configuration based on type (strict validation) - if (block.type === 'loop') { - block.data = block.data || {} - // loopType is always valid - if (params.inputs?.loopType) { - const validLoopTypes = ['for', 'forEach', 'while', 'doWhile'] - if (validLoopTypes.includes(params.inputs.loopType)) { - block.data.loopType = params.inputs.loopType - } - } - const effectiveLoopType = params.inputs?.loopType ?? block.data.loopType ?? 'for' - // iterations only valid for 'for' loopType - if (params.inputs?.iterations && effectiveLoopType === 'for') { - block.data.count = params.inputs.iterations - } - // collection only valid for 'forEach' loopType - if (params.inputs?.collection && effectiveLoopType === 'forEach') { - block.data.collection = params.inputs.collection - } - // condition only valid for 'while' or 'doWhile' loopType - if ( - params.inputs?.condition && - (effectiveLoopType === 'while' || effectiveLoopType === 'doWhile') - ) { - if (effectiveLoopType === 'doWhile') { - block.data.doWhileCondition = params.inputs.condition - } else { - block.data.whileCondition = params.inputs.condition - } - } - } else if (block.type === 'parallel') { - block.data = block.data || {} - // parallelType is always valid - if (params.inputs?.parallelType) { - const validParallelTypes = ['count', 'collection'] - if (validParallelTypes.includes(params.inputs.parallelType)) { - block.data.parallelType = params.inputs.parallelType - } - } - const effectiveParallelType = - params.inputs?.parallelType ?? block.data.parallelType ?? 'count' - // count only valid for 'count' parallelType - if (params.inputs?.count && effectiveParallelType === 'count') { - block.data.count = params.inputs.count - } - // collection only valid for 'collection' parallelType - if (params.inputs?.collection && effectiveParallelType === 'collection') { - block.data.collection = params.inputs.collection - } - } - } - - // Handle connections update (convert to edges) - if (params?.connections) { - modifiedState.edges = modifiedState.edges.filter((edge: any) => edge.source !== block_id) - - Object.entries(params.connections).forEach(([connectionType, targets]) => { - if (targets === null) return - - const mapConnectionTypeToHandle = (type: string): string => { - if (type === 'success') return 'source' - if (type === 'error') return 'error' - return type - } - - const sourceHandle = mapConnectionTypeToHandle(connectionType) - - const addEdgeForTarget = (targetBlock: string, targetHandle?: string) => { - createValidatedEdge( - modifiedState, - block_id, - targetBlock, - sourceHandle, - targetHandle || 'target', - 'edit', - logger, - skippedItems - ) - } - - if (typeof targets === 'string') { - addEdgeForTarget(targets) - } else if (Array.isArray(targets)) { - targets.forEach((target: any) => { - if (typeof target === 'string') { - addEdgeForTarget(target) - } else if (target?.block) { - addEdgeForTarget(target.block, target.handle) - } - }) - } else if (typeof targets === 'object' && (targets as any)?.block) { - addEdgeForTarget((targets as any).block, (targets as any).handle) - } - }) - } - - // Handle edge removal - if (params?.removeEdges && Array.isArray(params.removeEdges)) { - params.removeEdges.forEach(({ targetBlockId, sourceHandle = 'source' }) => { - modifiedState.edges = modifiedState.edges.filter( - (edge: any) => - !( - edge.source === block_id && - edge.target === targetBlockId && - edge.sourceHandle === sourceHandle - ) - ) - }) - } - break - } - - case 'add': { - const addNormalizedName = params?.name ? normalizeName(params.name) : '' - if (!params?.type || !params?.name || !addNormalizedName) { - logSkippedItem(skippedItems, { - type: 'missing_required_params', - operationType: 'add', - blockId: block_id, - reason: `Missing required params (type or name) for adding block "${block_id}"`, - details: { hasType: !!params?.type, hasName: !!params?.name }, - }) - break - } - - if ((RESERVED_BLOCK_NAMES as readonly string[]).includes(addNormalizedName)) { - logSkippedItem(skippedItems, { - type: 'reserved_block_name', - operationType: 'add', - blockId: block_id, - reason: `Block name "${params.name}" is a reserved name and cannot be used`, - details: { requestedName: params.name }, - }) - break - } - - const conflictingBlock = findBlockWithDuplicateNormalizedName( - modifiedState.blocks, - params.name, - block_id - ) - - if (conflictingBlock) { - logSkippedItem(skippedItems, { - type: 'duplicate_block_name', - operationType: 'add', - blockId: block_id, - reason: `Block name "${params.name}" conflicts with existing block "${conflictingBlock[1].name}"`, - details: { - requestedName: params.name, - conflictingBlockId: conflictingBlock[0], - conflictingBlockName: conflictingBlock[1].name, - }, - }) - break - } - - // Special container types (loop, parallel) are not in the block registry but are valid - const isContainerType = params.type === 'loop' || params.type === 'parallel' - - // Validate block type before adding (skip validation for container types) - const addBlockConfig = getBlock(params.type) - if (!addBlockConfig && !isContainerType) { - logSkippedItem(skippedItems, { - type: 'invalid_block_type', - operationType: 'add', - blockId: block_id, - reason: `Invalid block type "${params.type}" - block not added`, - details: { requestedType: params.type }, - }) - break - } - - // Check if block type is allowed by permission group - if (!isContainerType && !isBlockTypeAllowed(params.type, permissionConfig)) { - logSkippedItem(skippedItems, { - type: 'block_not_allowed', - operationType: 'add', - blockId: block_id, - reason: `Block type "${params.type}" is not allowed by permission group - block not added`, - details: { requestedType: params.type }, - }) - break - } - - const triggerIssue = TriggerUtils.getTriggerAdditionIssue(modifiedState.blocks, params.type) - if (triggerIssue) { - logSkippedItem(skippedItems, { - type: 'duplicate_trigger', - operationType: 'add', - blockId: block_id, - reason: `Cannot add ${triggerIssue.triggerName} - a workflow can only have one`, - details: { requestedType: params.type, issue: triggerIssue.issue }, - }) - break - } - - // Check single-instance block constraints (e.g., Response block) - const singleInstanceIssue = TriggerUtils.getSingleInstanceBlockIssue( - modifiedState.blocks, - params.type - ) - if (singleInstanceIssue) { - logSkippedItem(skippedItems, { - type: 'duplicate_single_instance_block', - operationType: 'add', - blockId: block_id, - reason: `Cannot add ${singleInstanceIssue.blockName} - a workflow can only have one`, - details: { requestedType: params.type }, - }) - break - } - - // Create new block with proper structure - const newBlock = createBlockFromParams( - block_id, - params, - undefined, - validationErrors, - permissionConfig, - skippedItems - ) - - // Set loop/parallel data on parent block BEFORE adding to blocks (strict validation) - if (params.nestedNodes) { - if (params.type === 'loop') { - const validLoopTypes = ['for', 'forEach', 'while', 'doWhile'] - const loopType = - params.inputs?.loopType && validLoopTypes.includes(params.inputs.loopType) - ? params.inputs.loopType - : 'for' - newBlock.data = { - ...newBlock.data, - loopType, - // Only include type-appropriate fields - ...(loopType === 'forEach' && - params.inputs?.collection && { collection: params.inputs.collection }), - ...(loopType === 'for' && - params.inputs?.iterations && { count: params.inputs.iterations }), - ...(loopType === 'while' && - params.inputs?.condition && { whileCondition: params.inputs.condition }), - ...(loopType === 'doWhile' && - params.inputs?.condition && { doWhileCondition: params.inputs.condition }), - } - } else if (params.type === 'parallel') { - const validParallelTypes = ['count', 'collection'] - const parallelType = - params.inputs?.parallelType && validParallelTypes.includes(params.inputs.parallelType) - ? params.inputs.parallelType - : 'count' - newBlock.data = { - ...newBlock.data, - parallelType, - // Only include type-appropriate fields - ...(parallelType === 'collection' && - params.inputs?.collection && { collection: params.inputs.collection }), - ...(parallelType === 'count' && - params.inputs?.count && { count: params.inputs.count }), - } - } - } - - // Add parent block FIRST before adding children - // This ensures children can reference valid parentId - modifiedState.blocks[block_id] = newBlock - - // Handle nested nodes (for loops/parallels created from scratch) - if (params.nestedNodes) { - // Defensive check: verify parent is not locked before adding children - // (Parent was just created with locked: false, but check for consistency) - const parentBlock = modifiedState.blocks[block_id] - if (parentBlock?.locked) { - logSkippedItem(skippedItems, { - type: 'block_locked', - operationType: 'add_nested_nodes', - blockId: block_id, - reason: `Container "${block_id}" is locked - cannot add nested nodes`, - }) - break - } - - Object.entries(params.nestedNodes).forEach(([childId, childBlock]: [string, any]) => { - // Validate childId is a valid string - if (!isValidKey(childId)) { - logSkippedItem(skippedItems, { - type: 'missing_required_params', - operationType: 'add_nested_node', - blockId: String(childId || 'invalid'), - reason: `Invalid childId "${childId}" in nestedNodes - child block skipped`, - }) - logger.error('Invalid childId detected in nestedNodes', { - parentBlockId: block_id, - childId, - childId_type: typeof childId, - }) - return - } - - if (childBlock.type === 'loop' || childBlock.type === 'parallel') { - logSkippedItem(skippedItems, { - type: 'nested_subflow_not_allowed', - operationType: 'add_nested_node', - blockId: childId, - reason: `Cannot nest ${childBlock.type} inside ${params.type} - nested subflows are not supported`, - details: { parentType: params.type, childType: childBlock.type }, - }) - return - } - - const childBlockState = createBlockFromParams( - childId, - childBlock, - block_id, - validationErrors, - permissionConfig, - skippedItems - ) - modifiedState.blocks[childId] = childBlockState - - // Defer connection processing to ensure all blocks exist first - if (childBlock.connections) { - addOperationsWithConnections.push({ - blockId: childId, - connections: childBlock.connections, - }) - } - }) - } - - // Defer connection processing to ensure all blocks exist first (pass 2) - if (params.connections) { - addOperationsWithConnections.push({ - blockId: block_id, - connections: params.connections, - }) - } - break - } - - case 'insert_into_subflow': { - const subflowId = params?.subflowId - if (!subflowId || !params?.type || !params?.name) { - logSkippedItem(skippedItems, { - type: 'missing_required_params', - operationType: 'insert_into_subflow', - blockId: block_id, - reason: `Missing required params (subflowId, type, or name) for inserting block "${block_id}"`, - details: { - hasSubflowId: !!subflowId, - hasType: !!params?.type, - hasName: !!params?.name, - }, - }) - break - } - - const subflowBlock = modifiedState.blocks[subflowId] - if (!subflowBlock) { - logSkippedItem(skippedItems, { - type: 'invalid_subflow_parent', - operationType: 'insert_into_subflow', - blockId: block_id, - reason: `Subflow block "${subflowId}" not found - block "${block_id}" not inserted`, - details: { subflowId }, - }) - break - } - - // Check if subflow is locked - if (subflowBlock.locked) { - logSkippedItem(skippedItems, { - type: 'block_locked', - operationType: 'insert_into_subflow', - blockId: block_id, - reason: `Subflow "${subflowId}" is locked - cannot insert block "${block_id}"`, - details: { subflowId }, - }) - break - } - - if (subflowBlock.type !== 'loop' && subflowBlock.type !== 'parallel') { - logger.error('Subflow block has invalid type', { - subflowId, - type: subflowBlock.type, - block_id, - }) - break - } - - if (params.type === 'loop' || params.type === 'parallel') { - logSkippedItem(skippedItems, { - type: 'nested_subflow_not_allowed', - operationType: 'insert_into_subflow', - blockId: block_id, - reason: `Cannot nest ${params.type} inside ${subflowBlock.type} - nested subflows are not supported`, - details: { parentType: subflowBlock.type, childType: params.type }, - }) - break - } - - // Get block configuration - const blockConfig = getAllBlocks().find((block) => block.type === params.type) - - // Check if block already exists (moving into subflow) or is new - const existingBlock = modifiedState.blocks[block_id] - - if (existingBlock) { - if (existingBlock.type === 'loop' || existingBlock.type === 'parallel') { - logSkippedItem(skippedItems, { - type: 'nested_subflow_not_allowed', - operationType: 'insert_into_subflow', - blockId: block_id, - reason: `Cannot move ${existingBlock.type} into ${subflowBlock.type} - nested subflows are not supported`, - details: { parentType: subflowBlock.type, childType: existingBlock.type }, - }) - break - } - - // Check if existing block is locked - if (existingBlock.locked) { - logSkippedItem(skippedItems, { - type: 'block_locked', - operationType: 'insert_into_subflow', - blockId: block_id, - reason: `Block "${block_id}" is locked and cannot be moved into a subflow`, - }) - break - } - - // Moving existing block into subflow - just update parent - existingBlock.data = { - ...existingBlock.data, - parentId: subflowId, - extent: 'parent' as const, - } - - // Update inputs if provided (with validation) - if (params.inputs) { - // Validate inputs against block configuration - const validationResult = validateInputsForBlock( - existingBlock.type, - params.inputs, - block_id - ) - validationErrors.push(...validationResult.errors) - - Object.entries(validationResult.validInputs).forEach(([key, value]) => { - // Skip runtime subblock IDs (webhookId, triggerPath) - if (TRIGGER_RUNTIME_SUBBLOCK_IDS.includes(key)) { - return - } - - let sanitizedValue = value - - // Normalize array subblocks with id fields (inputFormat, table rows, etc.) - if (shouldNormalizeArrayIds(key)) { - sanitizedValue = normalizeArrayWithIds(value) - } - - // Special handling for tools - normalize and filter disallowed - if (key === 'tools' && Array.isArray(value)) { - sanitizedValue = filterDisallowedTools( - normalizeTools(value), - permissionConfig, - block_id, - skippedItems - ) - } - - // Special handling for responseFormat - normalize to ensure consistent format - if (key === 'responseFormat' && value) { - sanitizedValue = normalizeResponseFormat(value) - } - - if (!existingBlock.subBlocks[key]) { - existingBlock.subBlocks[key] = { - id: key, - type: 'short-input', - value: sanitizedValue, - } - } else { - existingBlock.subBlocks[key].value = sanitizedValue - } - }) - - const existingBlockConfig = getBlock(existingBlock.type) - if (existingBlockConfig) { - updateCanonicalModesForInputs( - existingBlock, - Object.keys(validationResult.validInputs), - existingBlockConfig - ) - } - } - } else { - // Special container types (loop, parallel) are not in the block registry but are valid - const isContainerType = params.type === 'loop' || params.type === 'parallel' - - // Validate block type before creating (skip validation for container types) - const insertBlockConfig = getBlock(params.type) - if (!insertBlockConfig && !isContainerType) { - logSkippedItem(skippedItems, { - type: 'invalid_block_type', - operationType: 'insert_into_subflow', - blockId: block_id, - reason: `Invalid block type "${params.type}" - block not inserted into subflow`, - details: { requestedType: params.type, subflowId }, - }) - break - } - - // Check if block type is allowed by permission group - if (!isContainerType && !isBlockTypeAllowed(params.type, permissionConfig)) { - logSkippedItem(skippedItems, { - type: 'block_not_allowed', - operationType: 'insert_into_subflow', - blockId: block_id, - reason: `Block type "${params.type}" is not allowed by permission group - block not inserted`, - details: { requestedType: params.type, subflowId }, - }) - break - } - - // Create new block as child of subflow - const newBlock = createBlockFromParams( - block_id, - params, - subflowId, - validationErrors, - permissionConfig, - skippedItems - ) - modifiedState.blocks[block_id] = newBlock - } - - // Defer connection processing to ensure all blocks exist first - // This is particularly important when multiple blocks are being inserted - // and they have connections to each other - if (params.connections) { - // Remove existing edges from this block first - modifiedState.edges = modifiedState.edges.filter((edge: any) => edge.source !== block_id) - - // Add to deferred connections list - addOperationsWithConnections.push({ - blockId: block_id, - connections: params.connections, - }) - } - break - } - - case 'extract_from_subflow': { - const subflowId = params?.subflowId - if (!subflowId) { - logSkippedItem(skippedItems, { - type: 'missing_required_params', - operationType: 'extract_from_subflow', - blockId: block_id, - reason: `Missing subflowId for extracting block "${block_id}"`, - }) - break - } - - const block = modifiedState.blocks[block_id] - if (!block) { - logSkippedItem(skippedItems, { - type: 'block_not_found', - operationType: 'extract_from_subflow', - blockId: block_id, - reason: `Block "${block_id}" not found for extraction`, - }) - break - } - - // Check if block is locked - if (block.locked) { - logSkippedItem(skippedItems, { - type: 'block_locked', - operationType: 'extract_from_subflow', - blockId: block_id, - reason: `Block "${block_id}" is locked and cannot be extracted from subflow`, - }) - break - } - - // Check if parent subflow is locked - const parentSubflow = modifiedState.blocks[subflowId] - if (parentSubflow?.locked) { - logSkippedItem(skippedItems, { - type: 'block_locked', - operationType: 'extract_from_subflow', - blockId: block_id, - reason: `Subflow "${subflowId}" is locked - cannot extract block "${block_id}"`, - details: { subflowId }, - }) - break - } - - // Verify it's actually a child of this subflow - if (block.data?.parentId !== subflowId) { - logger.warn('Block is not a child of specified subflow', { - block_id, - actualParent: block.data?.parentId, - specifiedParent: subflowId, - }) - } - - // Remove parent relationship - if (block.data) { - block.data.parentId = undefined - block.data.extent = undefined - } - - // Note: We keep the block and its edges, just remove parent relationship - // The block becomes a root-level block - break - } - } - } - - // Pass 2: Add all deferred connections from add/insert operations - // Now all blocks exist (from add, insert, and edit operations), so connections can be safely created - // This ensures that if block A connects to block B, and both are being added/inserted, - // B will exist when we create the edge from A to B - if (addOperationsWithConnections.length > 0) { - logger.info('Processing deferred connections from add/insert operations', { - deferredConnectionCount: addOperationsWithConnections.length, - totalBlocks: Object.keys(modifiedState.blocks).length, - }) - - for (const { blockId, connections } of addOperationsWithConnections) { - // Verify the source block still exists (it might have been deleted by a later operation) - if (!modifiedState.blocks[blockId]) { - logger.warn('Source block no longer exists for deferred connection', { - blockId, - availableBlocks: Object.keys(modifiedState.blocks), - }) - continue - } - - addConnectionsAsEdges(modifiedState, blockId, connections, logger, skippedItems) - } - - logger.info('Finished processing deferred connections', { - totalEdges: modifiedState.edges.length, - }) - } - - // Regenerate loops and parallels after modifications - modifiedState.loops = generateLoopBlocks(modifiedState.blocks) - modifiedState.parallels = generateParallelBlocks(modifiedState.blocks) - - // Validate all blocks have types before returning - const blocksWithoutType = Object.entries(modifiedState.blocks) - .filter(([_, block]: [string, any]) => !block.type || block.type === undefined) - .map(([id, block]: [string, any]) => ({ id, block })) - - if (blocksWithoutType.length > 0) { - logger.error('Blocks without type after operations:', { - blocksWithoutType: blocksWithoutType.map(({ id, block }) => ({ - id, - type: block.type, - name: block.name, - keys: Object.keys(block), - })), - }) - - // Attempt to fix by removing type-less blocks - blocksWithoutType.forEach(({ id }) => { - delete modifiedState.blocks[id] - }) - - // Remove edges connected to removed blocks - const removedIds = new Set(blocksWithoutType.map(({ id }) => id)) - modifiedState.edges = modifiedState.edges.filter( - (edge: any) => !removedIds.has(edge.source) && !removedIds.has(edge.target) - ) - } - - return { state: modifiedState, validationErrors, skippedItems } -} - -/** - * Validates selector IDs in the workflow state exist in the database - * Returns validation errors for any invalid selector IDs - */ -async function validateWorkflowSelectorIds( - workflowState: any, - context: { userId: string; workspaceId?: string } -): Promise { - const logger = createLogger('EditWorkflowSelectorValidation') - const errors: ValidationError[] = [] - - // Collect all selector fields from all blocks - const selectorsToValidate: Array<{ - blockId: string - blockType: string - fieldName: string - selectorType: string - value: string | string[] - }> = [] - - for (const [blockId, block] of Object.entries(workflowState.blocks || {})) { - const blockData = block as any - const blockType = blockData.type - if (!blockType) continue - - const blockConfig = getBlock(blockType) - if (!blockConfig) continue - - // Check each subBlock for selector types - for (const subBlockConfig of blockConfig.subBlocks) { - if (!SELECTOR_TYPES.has(subBlockConfig.type)) continue - - // Skip oauth-input - credentials are pre-validated before edit application - // This allows existing collaborator credentials to remain untouched - if (subBlockConfig.type === 'oauth-input') continue - - const subBlockValue = blockData.subBlocks?.[subBlockConfig.id]?.value - if (!subBlockValue) continue - - // Handle comma-separated values for multi-select - let values: string | string[] = subBlockValue - if (typeof subBlockValue === 'string' && subBlockValue.includes(',')) { - values = subBlockValue - .split(',') - .map((v: string) => v.trim()) - .filter(Boolean) - } - - selectorsToValidate.push({ - blockId, - blockType, - fieldName: subBlockConfig.id, - selectorType: subBlockConfig.type, - value: values, - }) - } - } - - if (selectorsToValidate.length === 0) { - return errors - } - - logger.info('Validating selector IDs', { - selectorCount: selectorsToValidate.length, - userId: context.userId, - workspaceId: context.workspaceId, - }) - - // Validate each selector field - for (const selector of selectorsToValidate) { - const result = await validateSelectorIds(selector.selectorType, selector.value, context) - - if (result.invalid.length > 0) { - // Include warning info (like available credentials) in the error message for better LLM feedback - const warningInfo = result.warning ? `. ${result.warning}` : '' - errors.push({ - blockId: selector.blockId, - blockType: selector.blockType, - field: selector.fieldName, - value: selector.value, - error: `Invalid ${selector.selectorType} ID(s): ${result.invalid.join(', ')} - ID(s) do not exist or user doesn't have access${warningInfo}`, - }) - } else if (result.warning) { - // Log warnings that don't have errors (shouldn't happen for credentials but may for other selectors) - logger.warn(result.warning, { - blockId: selector.blockId, - fieldName: selector.fieldName, - }) - } - } - - if (errors.length > 0) { - logger.warn('Found invalid selector IDs', { - errorCount: errors.length, - errors: errors.map((e) => ({ blockId: e.blockId, field: e.field, error: e.error })), - }) - } - - return errors -} - -/** - * Pre-validates credential and apiKey inputs in operations before they are applied. - * - Validates oauth-input (credential) IDs belong to the user - * - Filters out apiKey inputs for hosted models when isHosted is true - * - Also validates credentials and apiKeys in nestedNodes (blocks inside loop/parallel) - * Returns validation errors for any removed inputs. - */ -async function preValidateCredentialInputs( - operations: EditWorkflowOperation[], - context: { userId: string }, - workflowState?: Record -): Promise<{ filteredOperations: EditWorkflowOperation[]; errors: ValidationError[] }> { - const { isHosted } = await import('@/lib/core/config/feature-flags') - const { getHostedModels } = await import('@/providers/utils') - - const logger = createLogger('PreValidateCredentials') - const errors: ValidationError[] = [] - - // Collect credential and apiKey inputs that need validation/filtering - const credentialInputs: Array<{ - operationIndex: number - blockId: string - blockType: string - fieldName: string - value: string - nestedBlockId?: string - }> = [] - - const hostedApiKeyInputs: Array<{ - operationIndex: number - blockId: string - blockType: string - model: string - nestedBlockId?: string - }> = [] - - const hostedModelsLower = isHosted ? new Set(getHostedModels().map((m) => m.toLowerCase())) : null - - /** - * Collect credential inputs from a block's inputs based on its block config - */ - function collectCredentialInputs( - blockConfig: ReturnType, - inputs: Record, - opIndex: number, - blockId: string, - blockType: string, - nestedBlockId?: string - ) { - if (!blockConfig) return - - for (const subBlockConfig of blockConfig.subBlocks) { - if (subBlockConfig.type !== 'oauth-input') continue - - const inputValue = inputs[subBlockConfig.id] - if (!inputValue || typeof inputValue !== 'string' || inputValue.trim() === '') continue - - credentialInputs.push({ - operationIndex: opIndex, - blockId, - blockType, - fieldName: subBlockConfig.id, - value: inputValue, - nestedBlockId, - }) - } - } - - /** - * Check if apiKey should be filtered for a block with the given model - */ - function collectHostedApiKeyInput( - inputs: Record, - modelValue: string | undefined, - opIndex: number, - blockId: string, - blockType: string, - nestedBlockId?: string - ) { - if (!hostedModelsLower || !inputs.apiKey) return - if (!modelValue || typeof modelValue !== 'string') return - - if (hostedModelsLower.has(modelValue.toLowerCase())) { - hostedApiKeyInputs.push({ - operationIndex: opIndex, - blockId, - blockType, - model: modelValue, - nestedBlockId, - }) - } - } - - operations.forEach((op, opIndex) => { - // Process main block inputs - if (op.params?.inputs && op.params?.type) { - const blockConfig = getBlock(op.params.type) - if (blockConfig) { - // Collect credentials from main block - collectCredentialInputs( - blockConfig, - op.params.inputs as Record, - opIndex, - op.block_id, - op.params.type - ) - - // Check for apiKey inputs on hosted models - let modelValue = (op.params.inputs as Record).model as string | undefined - - // For edit operations, if model is not being changed, check existing block's model - if ( - !modelValue && - op.operation_type === 'edit' && - (op.params.inputs as Record).apiKey && - workflowState - ) { - const existingBlock = (workflowState.blocks as Record)?.[op.block_id] as - | Record - | undefined - const existingSubBlocks = existingBlock?.subBlocks as Record | undefined - const existingModelSubBlock = existingSubBlocks?.model as - | Record - | undefined - modelValue = existingModelSubBlock?.value as string | undefined - } - - collectHostedApiKeyInput( - op.params.inputs as Record, - modelValue, - opIndex, - op.block_id, - op.params.type - ) - } - } - - // Process nested nodes (blocks inside loop/parallel containers) - const nestedNodes = op.params?.nestedNodes as - | Record> - | undefined - if (nestedNodes) { - Object.entries(nestedNodes).forEach(([childId, childBlock]) => { - const childType = childBlock.type as string | undefined - const childInputs = childBlock.inputs as Record | undefined - if (!childType || !childInputs) return - - const childBlockConfig = getBlock(childType) - if (!childBlockConfig) return - - // Collect credentials from nested block - collectCredentialInputs( - childBlockConfig, - childInputs, - opIndex, - op.block_id, - childType, - childId - ) - - // Check for apiKey inputs on hosted models in nested block - const modelValue = childInputs.model as string | undefined - collectHostedApiKeyInput(childInputs, modelValue, opIndex, op.block_id, childType, childId) - }) - } - }) - - const hasCredentialsToValidate = credentialInputs.length > 0 - const hasHostedApiKeysToFilter = hostedApiKeyInputs.length > 0 - - if (!hasCredentialsToValidate && !hasHostedApiKeysToFilter) { - return { filteredOperations: operations, errors } - } - - // Deep clone operations so we can modify them - const filteredOperations = structuredClone(operations) - - // Filter out apiKey inputs for hosted models and add validation errors - if (hasHostedApiKeysToFilter) { - logger.info('Filtering apiKey inputs for hosted models', { count: hostedApiKeyInputs.length }) - - for (const apiKeyInput of hostedApiKeyInputs) { - const op = filteredOperations[apiKeyInput.operationIndex] - - // Handle nested block apiKey filtering - if (apiKeyInput.nestedBlockId) { - const nestedNodes = op.params?.nestedNodes as - | Record> - | undefined - const nestedBlock = nestedNodes?.[apiKeyInput.nestedBlockId] - const nestedInputs = nestedBlock?.inputs as Record | undefined - if (nestedInputs?.apiKey) { - nestedInputs.apiKey = undefined - logger.debug('Filtered apiKey for hosted model in nested block', { - parentBlockId: apiKeyInput.blockId, - nestedBlockId: apiKeyInput.nestedBlockId, - model: apiKeyInput.model, - }) - - errors.push({ - blockId: apiKeyInput.nestedBlockId, - blockType: apiKeyInput.blockType, - field: 'apiKey', - value: '[redacted]', - error: `Cannot set API key for hosted model "${apiKeyInput.model}" - API keys are managed by the platform when using hosted models`, - }) - } - } else if (op.params?.inputs?.apiKey) { - // Handle main block apiKey filtering - op.params.inputs.apiKey = undefined - logger.debug('Filtered apiKey for hosted model', { - blockId: apiKeyInput.blockId, - model: apiKeyInput.model, - }) - - errors.push({ - blockId: apiKeyInput.blockId, - blockType: apiKeyInput.blockType, - field: 'apiKey', - value: '[redacted]', - error: `Cannot set API key for hosted model "${apiKeyInput.model}" - API keys are managed by the platform when using hosted models`, - }) - } - } - } - - // Validate credential inputs - if (hasCredentialsToValidate) { - logger.info('Pre-validating credential inputs', { - credentialCount: credentialInputs.length, - userId: context.userId, - }) - - const allCredentialIds = credentialInputs.map((c) => c.value) - const validationResult = await validateSelectorIds('oauth-input', allCredentialIds, context) - const invalidSet = new Set(validationResult.invalid) - - if (invalidSet.size > 0) { - for (const credInput of credentialInputs) { - if (!invalidSet.has(credInput.value)) continue - - const op = filteredOperations[credInput.operationIndex] - - // Handle nested block credential removal - if (credInput.nestedBlockId) { - const nestedNodes = op.params?.nestedNodes as - | Record> - | undefined - const nestedBlock = nestedNodes?.[credInput.nestedBlockId] - const nestedInputs = nestedBlock?.inputs as Record | undefined - if (nestedInputs?.[credInput.fieldName]) { - delete nestedInputs[credInput.fieldName] - logger.info('Removed invalid credential from nested block', { - parentBlockId: credInput.blockId, - nestedBlockId: credInput.nestedBlockId, - field: credInput.fieldName, - invalidValue: credInput.value, - }) - } - } else if (op.params?.inputs?.[credInput.fieldName]) { - // Handle main block credential removal - delete op.params.inputs[credInput.fieldName] - logger.info('Removed invalid credential from operation', { - blockId: credInput.blockId, - field: credInput.fieldName, - invalidValue: credInput.value, - }) - } - - const warningInfo = validationResult.warning ? `. ${validationResult.warning}` : '' - const errorBlockId = credInput.nestedBlockId ?? credInput.blockId - errors.push({ - blockId: errorBlockId, - blockType: credInput.blockType, - field: credInput.fieldName, - value: credInput.value, - error: `Invalid credential ID "${credInput.value}" - credential does not exist or user doesn't have access${warningInfo}`, - }) - } - - logger.warn('Filtered out invalid credentials', { - invalidCount: invalidSet.size, - }) - } - } - - return { filteredOperations, errors } -} - -async function getCurrentWorkflowStateFromDb( - workflowId: string -): Promise<{ workflowState: any; subBlockValues: Record> }> { - const logger = createLogger('EditWorkflowServerTool') - const [workflowRecord] = await db - .select() - .from(workflowTable) - .where(eq(workflowTable.id, workflowId)) - .limit(1) - if (!workflowRecord) throw new Error(`Workflow ${workflowId} not found in database`) - const normalized = await loadWorkflowFromNormalizedTables(workflowId) - if (!normalized) throw new Error('Workflow has no normalized data') - - // Validate and fix blocks without types - const blocks = { ...normalized.blocks } - const invalidBlocks: string[] = [] - - Object.entries(blocks).forEach(([id, block]: [string, any]) => { - if (!block.type) { - logger.warn(`Block ${id} loaded without type from database`, { - blockKeys: Object.keys(block), - blockName: block.name, - }) - invalidBlocks.push(id) - } - }) - - // Remove invalid blocks - invalidBlocks.forEach((id) => delete blocks[id]) - - // Remove edges connected to invalid blocks - const edges = normalized.edges.filter( - (edge: any) => !invalidBlocks.includes(edge.source) && !invalidBlocks.includes(edge.target) - ) - - const workflowState: any = { - blocks, - edges, - loops: normalized.loops || {}, - parallels: normalized.parallels || {}, - } - const subBlockValues: Record> = {} - Object.entries(normalized.blocks).forEach(([blockId, block]) => { - subBlockValues[blockId] = {} - Object.entries((block as any).subBlocks || {}).forEach(([subId, sub]) => { - if ((sub as any).value !== undefined) subBlockValues[blockId][subId] = (sub as any).value - }) - }) - return { workflowState, subBlockValues } -} - -export const editWorkflowServerTool: BaseServerTool = { - name: 'edit_workflow', - async execute(params: EditWorkflowParams, context?: { userId: string }): Promise { - const logger = createLogger('EditWorkflowServerTool') - const { operations, workflowId, currentUserWorkflow } = params - if (!Array.isArray(operations) || operations.length === 0) { - throw new Error('operations are required and must be an array') - } - if (!workflowId) throw new Error('workflowId is required') - - logger.info('Executing edit_workflow', { - operationCount: operations.length, - workflowId, - hasCurrentUserWorkflow: !!currentUserWorkflow, - }) - - // Get current workflow state - let workflowState: any - if (currentUserWorkflow) { - try { - workflowState = JSON.parse(currentUserWorkflow) - } catch (error) { - logger.error('Failed to parse currentUserWorkflow', error) - throw new Error('Invalid currentUserWorkflow format') - } - } else { - const fromDb = await getCurrentWorkflowStateFromDb(workflowId) - workflowState = fromDb.workflowState - } - - // Get permission config for the user - const permissionConfig = context?.userId ? await getUserPermissionConfig(context.userId) : null - - // Pre-validate credential and apiKey inputs before applying operations - // This filters out invalid credentials and apiKeys for hosted models - let operationsToApply = operations - const credentialErrors: ValidationError[] = [] - if (context?.userId) { - const { filteredOperations, errors: credErrors } = await preValidateCredentialInputs( - operations, - { userId: context.userId }, - workflowState - ) - operationsToApply = filteredOperations - credentialErrors.push(...credErrors) - } - - // Apply operations directly to the workflow state - const { - state: modifiedWorkflowState, - validationErrors, - skippedItems, - } = applyOperationsToWorkflowState(workflowState, operationsToApply, permissionConfig) - - // Add credential validation errors - validationErrors.push(...credentialErrors) - - // Get workspaceId for selector validation - let workspaceId: string | undefined - try { - const [workflowRecord] = await db - .select({ workspaceId: workflowTable.workspaceId }) - .from(workflowTable) - .where(eq(workflowTable.id, workflowId)) - .limit(1) - workspaceId = workflowRecord?.workspaceId ?? undefined - } catch (error) { - logger.warn('Failed to get workspaceId for selector validation', { error, workflowId }) - } - - // Validate selector IDs exist in the database - if (context?.userId) { - try { - const selectorErrors = await validateWorkflowSelectorIds(modifiedWorkflowState, { - userId: context.userId, - workspaceId, - }) - validationErrors.push(...selectorErrors) - } catch (error) { - logger.warn('Selector ID validation failed', { - error: error instanceof Error ? error.message : String(error), - }) - } - } - - // Validate the workflow state - const validation = validateWorkflowState(modifiedWorkflowState, { sanitize: true }) - - if (!validation.valid) { - logger.error('Edited workflow state is invalid', { - errors: validation.errors, - warnings: validation.warnings, - }) - throw new Error(`Invalid edited workflow: ${validation.errors.join('; ')}`) - } - - if (validation.warnings.length > 0) { - logger.warn('Edited workflow validation warnings', { - warnings: validation.warnings, - }) - } - - // Extract and persist custom tools to database (reuse workspaceId from selector validation) - if (context?.userId && workspaceId) { - try { - const finalWorkflowState = validation.sanitizedState || modifiedWorkflowState - const { saved, errors } = await extractAndPersistCustomTools( - finalWorkflowState, - workspaceId, - context.userId - ) - - if (saved > 0) { - logger.info(`Persisted ${saved} custom tool(s) to database`, { workflowId }) - } - - if (errors.length > 0) { - logger.warn('Some custom tools failed to persist', { errors, workflowId }) - } - } catch (error) { - logger.error('Failed to persist custom tools', { error, workflowId }) - } - } else if (context?.userId && !workspaceId) { - logger.warn('Workflow has no workspaceId, skipping custom tools persistence', { - workflowId, - }) - } else { - logger.warn('No userId in context - skipping custom tools persistence', { workflowId }) - } - - logger.info('edit_workflow successfully applied operations', { - operationCount: operations.length, - blocksCount: Object.keys(modifiedWorkflowState.blocks).length, - edgesCount: modifiedWorkflowState.edges.length, - inputValidationErrors: validationErrors.length, - skippedItemsCount: skippedItems.length, - schemaValidationErrors: validation.errors.length, - validationWarnings: validation.warnings.length, - }) - - // Format validation errors for LLM feedback - const inputErrors = - validationErrors.length > 0 - ? validationErrors.map((e) => `Block "${e.blockId}" (${e.blockType}): ${e.error}`) - : undefined - - // Format skipped items for LLM feedback - const skippedMessages = - skippedItems.length > 0 ? skippedItems.map((item) => item.reason) : undefined - - // Persist the workflow state to the database - const finalWorkflowState = validation.sanitizedState || modifiedWorkflowState - - // Apply autolayout to position blocks properly - const layoutResult = applyAutoLayout(finalWorkflowState.blocks, finalWorkflowState.edges, { - horizontalSpacing: 250, - verticalSpacing: 100, - padding: { x: 100, y: 100 }, - }) - - const layoutedBlocks = - layoutResult.success && layoutResult.blocks ? layoutResult.blocks : finalWorkflowState.blocks - - if (!layoutResult.success) { - logger.warn('Autolayout failed, using default positions', { - workflowId, - error: layoutResult.error, - }) - } - - const workflowStateForDb = { - blocks: layoutedBlocks, - edges: finalWorkflowState.edges, - loops: generateLoopBlocks(layoutedBlocks as any), - parallels: generateParallelBlocks(layoutedBlocks as any), - lastSaved: Date.now(), - isDeployed: false, - } - - const saveResult = await saveWorkflowToNormalizedTables(workflowId, workflowStateForDb as any) - if (!saveResult.success) { - logger.error('Failed to persist workflow state to database', { - workflowId, - error: saveResult.error, - }) - throw new Error(`Failed to save workflow: ${saveResult.error}`) - } - - // Update workflow's lastSynced timestamp - await db - .update(workflowTable) - .set({ - lastSynced: new Date(), - updatedAt: new Date(), - }) - .where(eq(workflowTable.id, workflowId)) - - logger.info('Workflow state persisted to database', { workflowId }) - - // Return the modified workflow state with autolayout applied - return { - success: true, - workflowState: { ...finalWorkflowState, blocks: layoutedBlocks }, - // Include input validation errors so the LLM can see what was rejected - ...(inputErrors && { - inputValidationErrors: inputErrors, - inputValidationMessage: `${inputErrors.length} input(s) were rejected due to validation errors. The workflow was still updated with valid inputs only. Errors: ${inputErrors.join('; ')}`, - }), - // Include skipped items so the LLM can see what operations were skipped - ...(skippedMessages && { - skippedItems: skippedMessages, - skippedItemsMessage: `${skippedItems.length} operation(s) were skipped due to invalid references. Details: ${skippedMessages.join('; ')}`, - }), - } - }, -} diff --git a/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/builders.ts b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/builders.ts new file mode 100644 index 0000000000..7f46294f06 --- /dev/null +++ b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/builders.ts @@ -0,0 +1,633 @@ +import crypto from 'crypto' +import { createLogger } from '@sim/logger' +import type { PermissionGroupConfig } from '@/lib/permission-groups/types' +import { getBlockOutputs } from '@/lib/workflows/blocks/block-outputs' +import { buildCanonicalIndex, isCanonicalPair } from '@/lib/workflows/subblocks/visibility' +import { getAllBlocks } from '@/blocks/registry' +import type { BlockConfig } from '@/blocks/types' +import { TRIGGER_RUNTIME_SUBBLOCK_IDS } from '@/triggers/constants' +import type { EditWorkflowOperation, SkippedItem, ValidationError } from './types' +import { UUID_REGEX, logSkippedItem } from './types' +import { + validateInputsForBlock, + validateSourceHandleForBlock, + validateTargetHandle, +} from './validation' + +/** + * Helper to create a block state from operation params + */ +export function createBlockFromParams( + blockId: string, + params: any, + parentId?: string, + errorsCollector?: ValidationError[], + permissionConfig?: PermissionGroupConfig | null, + skippedItems?: SkippedItem[] +): any { + const blockConfig = getAllBlocks().find((b) => b.type === params.type) + + // Validate inputs against block configuration + let validatedInputs: Record | undefined + if (params.inputs) { + const result = validateInputsForBlock(params.type, params.inputs, blockId) + validatedInputs = result.validInputs + if (errorsCollector && result.errors.length > 0) { + errorsCollector.push(...result.errors) + } + } + + // Determine outputs based on trigger mode + const triggerMode = params.triggerMode || false + let outputs: Record + + if (params.outputs) { + outputs = params.outputs + } else if (blockConfig) { + const subBlocks: Record = {} + if (validatedInputs) { + Object.entries(validatedInputs).forEach(([key, value]) => { + // Skip runtime subblock IDs when computing outputs + if (TRIGGER_RUNTIME_SUBBLOCK_IDS.includes(key)) { + return + } + subBlocks[key] = { id: key, type: 'short-input', value: value } + }) + } + outputs = getBlockOutputs(params.type, subBlocks, triggerMode) + } else { + outputs = {} + } + + const blockState: any = { + id: blockId, + type: params.type, + name: params.name, + position: { x: 0, y: 0 }, + enabled: params.enabled !== undefined ? params.enabled : true, + horizontalHandles: true, + advancedMode: params.advancedMode || false, + height: 0, + triggerMode: triggerMode, + subBlocks: {}, + outputs: outputs, + data: parentId ? { parentId, extent: 'parent' as const } : {}, + locked: false, + } + + // Add validated inputs as subBlocks + if (validatedInputs) { + Object.entries(validatedInputs).forEach(([key, value]) => { + if (TRIGGER_RUNTIME_SUBBLOCK_IDS.includes(key)) { + return + } + + let sanitizedValue = value + + // Normalize array subblocks with id fields (inputFormat, table rows, etc.) + if (shouldNormalizeArrayIds(key)) { + sanitizedValue = normalizeArrayWithIds(value) + } + + // Special handling for tools - normalize and filter disallowed + if (key === 'tools' && Array.isArray(value)) { + sanitizedValue = filterDisallowedTools( + normalizeTools(value), + permissionConfig ?? null, + blockId, + skippedItems ?? [] + ) + } + + // Special handling for responseFormat - normalize to ensure consistent format + if (key === 'responseFormat' && value) { + sanitizedValue = normalizeResponseFormat(value) + } + + blockState.subBlocks[key] = { + id: key, + type: 'short-input', + value: sanitizedValue, + } + }) + } + + // Set up subBlocks from block configuration + if (blockConfig) { + blockConfig.subBlocks.forEach((subBlock) => { + if (!blockState.subBlocks[subBlock.id]) { + blockState.subBlocks[subBlock.id] = { + id: subBlock.id, + type: subBlock.type, + value: null, + } + } + }) + + if (validatedInputs) { + updateCanonicalModesForInputs(blockState, Object.keys(validatedInputs), blockConfig) + } + } + + return blockState +} + +export function updateCanonicalModesForInputs( + block: { data?: { canonicalModes?: Record } }, + inputKeys: string[], + blockConfig: BlockConfig +): void { + if (!blockConfig.subBlocks?.length) return + + const canonicalIndex = buildCanonicalIndex(blockConfig.subBlocks) + const canonicalModeUpdates: Record = {} + + for (const inputKey of inputKeys) { + const canonicalId = canonicalIndex.canonicalIdBySubBlockId[inputKey] + if (!canonicalId) continue + + const group = canonicalIndex.groupsById[canonicalId] + if (!group || !isCanonicalPair(group)) continue + + const isAdvanced = group.advancedIds.includes(inputKey) + const existingMode = canonicalModeUpdates[canonicalId] + + if (!existingMode || isAdvanced) { + canonicalModeUpdates[canonicalId] = isAdvanced ? 'advanced' : 'basic' + } + } + + if (Object.keys(canonicalModeUpdates).length > 0) { + if (!block.data) block.data = {} + if (!block.data.canonicalModes) block.data.canonicalModes = {} + Object.assign(block.data.canonicalModes, canonicalModeUpdates) + } +} + +/** + * Normalize tools array by adding back fields that were sanitized for training + */ +export function normalizeTools(tools: any[]): any[] { + return tools.map((tool) => { + if (tool.type === 'custom-tool') { + // New reference format: minimal fields only + if (tool.customToolId && !tool.schema && !tool.code) { + return { + type: tool.type, + customToolId: tool.customToolId, + usageControl: tool.usageControl || 'auto', + isExpanded: tool.isExpanded ?? true, + } + } + + // Legacy inline format: include all fields + const normalized: any = { + ...tool, + params: tool.params || {}, + isExpanded: tool.isExpanded ?? true, + } + + // Ensure schema has proper structure (for inline format) + if (normalized.schema?.function) { + normalized.schema = { + type: 'function', + function: { + name: normalized.schema.function.name || tool.title, // Preserve name or derive from title + description: normalized.schema.function.description, + parameters: normalized.schema.function.parameters, + }, + } + } + + return normalized + } + + // For other tool types, just ensure isExpanded exists + return { + ...tool, + isExpanded: tool.isExpanded ?? true, + } + }) +} + +/** + * Subblock types that store arrays of objects with `id` fields. + * The LLM may generate arbitrary IDs which need to be converted to proper UUIDs. + */ +const ARRAY_WITH_ID_SUBBLOCK_TYPES = new Set([ + 'inputFormat', // input-format: Fields with id, name, type, value, collapsed + 'headers', // table: Rows with id, cells (used for HTTP headers) + 'params', // table: Rows with id, cells (used for query params) + 'variables', // table or variables-input: Rows/assignments with id + 'tagFilters', // knowledge-tag-filters: Filters with id, tagName, etc. + 'documentTags', // document-tag-entry: Tags with id, tagName, etc. + 'metrics', // eval-input: Metrics with id, name, description, range +]) + +/** + * Normalizes array subblock values by ensuring each item has a valid UUID. + * The LLM may generate arbitrary IDs like "input-desc-001" or "row-1" which need + * to be converted to proper UUIDs for consistency with UI-created items. + */ +export function normalizeArrayWithIds(value: unknown): any[] { + if (!Array.isArray(value)) { + return [] + } + + return value.map((item: any) => { + if (!item || typeof item !== 'object') { + return item + } + + // Check if id is missing or not a valid UUID + const hasValidUUID = typeof item.id === 'string' && UUID_REGEX.test(item.id) + if (!hasValidUUID) { + return { ...item, id: crypto.randomUUID() } + } + + return item + }) +} + +/** + * Checks if a subblock key should have its array items normalized with UUIDs. + */ +export function shouldNormalizeArrayIds(key: string): boolean { + return ARRAY_WITH_ID_SUBBLOCK_TYPES.has(key) +} + +/** + * Normalize responseFormat to ensure consistent storage + * Handles both string (JSON) and object formats + * Returns pretty-printed JSON for better UI readability + */ +export function normalizeResponseFormat(value: any): string { + try { + let obj = value + + // If it's already a string, parse it first + if (typeof value === 'string') { + const trimmed = value.trim() + if (!trimmed) { + return '' + } + obj = JSON.parse(trimmed) + } + + // If it's an object, stringify it with consistent formatting + if (obj && typeof obj === 'object') { + // Sort keys recursively for consistent comparison + const sortKeys = (item: any): any => { + if (Array.isArray(item)) { + return item.map(sortKeys) + } + if (item !== null && typeof item === 'object') { + return Object.keys(item) + .sort() + .reduce((result: any, key: string) => { + result[key] = sortKeys(item[key]) + return result + }, {}) + } + return item + } + + // Return pretty-printed with 2-space indentation for UI readability + // The sanitizer will normalize it to minified format for comparison + return JSON.stringify(sortKeys(obj), null, 2) + } + + return String(value) + } catch { + // If parsing fails, return the original value as string + return String(value) + } +} + +/** + * Creates a validated edge between two blocks. + * Returns true if edge was created, false if skipped due to validation errors. + */ +export function createValidatedEdge( + modifiedState: any, + sourceBlockId: string, + targetBlockId: string, + sourceHandle: string, + targetHandle: string, + operationType: string, + logger: ReturnType, + skippedItems?: SkippedItem[] +): boolean { + if (!modifiedState.blocks[targetBlockId]) { + logger.warn(`Target block "${targetBlockId}" not found. Edge skipped.`, { + sourceBlockId, + targetBlockId, + sourceHandle, + }) + skippedItems?.push({ + type: 'invalid_edge_target', + operationType, + blockId: sourceBlockId, + reason: `Edge from "${sourceBlockId}" to "${targetBlockId}" skipped - target block does not exist`, + details: { sourceHandle, targetHandle, targetId: targetBlockId }, + }) + return false + } + + const sourceBlock = modifiedState.blocks[sourceBlockId] + if (!sourceBlock) { + logger.warn(`Source block "${sourceBlockId}" not found. Edge skipped.`, { + sourceBlockId, + targetBlockId, + }) + skippedItems?.push({ + type: 'invalid_edge_source', + operationType, + blockId: sourceBlockId, + reason: `Edge from "${sourceBlockId}" to "${targetBlockId}" skipped - source block does not exist`, + details: { sourceHandle, targetHandle, targetId: targetBlockId }, + }) + return false + } + + const sourceBlockType = sourceBlock.type + if (!sourceBlockType) { + logger.warn(`Source block "${sourceBlockId}" has no type. Edge skipped.`, { + sourceBlockId, + targetBlockId, + }) + skippedItems?.push({ + type: 'invalid_edge_source', + operationType, + blockId: sourceBlockId, + reason: `Edge from "${sourceBlockId}" to "${targetBlockId}" skipped - source block has no type`, + details: { sourceHandle, targetHandle, targetId: targetBlockId }, + }) + return false + } + + const sourceValidation = validateSourceHandleForBlock(sourceHandle, sourceBlockType, sourceBlock) + if (!sourceValidation.valid) { + logger.warn(`Invalid source handle. Edge skipped.`, { + sourceBlockId, + targetBlockId, + sourceHandle, + error: sourceValidation.error, + }) + skippedItems?.push({ + type: 'invalid_source_handle', + operationType, + blockId: sourceBlockId, + reason: sourceValidation.error || `Invalid source handle "${sourceHandle}"`, + details: { sourceHandle, targetHandle, targetId: targetBlockId }, + }) + return false + } + + const targetValidation = validateTargetHandle(targetHandle) + if (!targetValidation.valid) { + logger.warn(`Invalid target handle. Edge skipped.`, { + sourceBlockId, + targetBlockId, + targetHandle, + error: targetValidation.error, + }) + skippedItems?.push({ + type: 'invalid_target_handle', + operationType, + blockId: sourceBlockId, + reason: targetValidation.error || `Invalid target handle "${targetHandle}"`, + details: { sourceHandle, targetHandle, targetId: targetBlockId }, + }) + return false + } + + // Use normalized handle if available (e.g., 'if' -> 'condition-{uuid}') + const finalSourceHandle = sourceValidation.normalizedHandle || sourceHandle + + modifiedState.edges.push({ + id: crypto.randomUUID(), + source: sourceBlockId, + sourceHandle: finalSourceHandle, + target: targetBlockId, + targetHandle, + type: 'default', + }) + return true +} + +/** + * Adds connections as edges for a block. + * Supports multiple target formats: + * - String: "target-block-id" + * - Object: { block: "target-block-id", handle?: "custom-target-handle" } + * - Array of strings or objects + */ +export function addConnectionsAsEdges( + modifiedState: any, + blockId: string, + connections: Record, + logger: ReturnType, + skippedItems?: SkippedItem[] +): void { + Object.entries(connections).forEach(([sourceHandle, targets]) => { + if (targets === null) return + + const addEdgeForTarget = (targetBlock: string, targetHandle?: string) => { + createValidatedEdge( + modifiedState, + blockId, + targetBlock, + sourceHandle, + targetHandle || 'target', + 'add_edge', + logger, + skippedItems + ) + } + + if (typeof targets === 'string') { + addEdgeForTarget(targets) + } else if (Array.isArray(targets)) { + targets.forEach((target: any) => { + if (typeof target === 'string') { + addEdgeForTarget(target) + } else if (target?.block) { + addEdgeForTarget(target.block, target.handle) + } + }) + } else if (typeof targets === 'object' && targets?.block) { + addEdgeForTarget(targets.block, targets.handle) + } + }) +} + +export function applyTriggerConfigToBlockSubblocks(block: any, triggerConfig: Record) { + if (!block?.subBlocks || !triggerConfig || typeof triggerConfig !== 'object') { + return + } + + Object.entries(triggerConfig).forEach(([configKey, configValue]) => { + const existingSubblock = block.subBlocks[configKey] + if (existingSubblock) { + const existingValue = existingSubblock.value + const valuesEqual = + typeof existingValue === 'object' || typeof configValue === 'object' + ? JSON.stringify(existingValue) === JSON.stringify(configValue) + : existingValue === configValue + + if (valuesEqual) { + return + } + + block.subBlocks[configKey] = { + ...existingSubblock, + value: configValue, + } + } else { + block.subBlocks[configKey] = { + id: configKey, + type: 'short-input', + value: configValue, + } + } + }) +} + +/** + * Filters out tools that are not allowed by the permission group config + * Returns both the allowed tools and any skipped tool items for logging + */ +export function filterDisallowedTools( + tools: any[], + permissionConfig: PermissionGroupConfig | null, + blockId: string, + skippedItems: SkippedItem[] +): any[] { + if (!permissionConfig) { + return tools + } + + const allowedTools: any[] = [] + + for (const tool of tools) { + if (tool.type === 'custom-tool' && permissionConfig.disableCustomTools) { + logSkippedItem(skippedItems, { + type: 'tool_not_allowed', + operationType: 'add', + blockId, + reason: `Custom tool "${tool.title || tool.customToolId || 'unknown'}" is not allowed by permission group - tool not added`, + details: { toolType: 'custom-tool', toolId: tool.customToolId }, + }) + continue + } + if (tool.type === 'mcp' && permissionConfig.disableMcpTools) { + logSkippedItem(skippedItems, { + type: 'tool_not_allowed', + operationType: 'add', + blockId, + reason: `MCP tool "${tool.title || 'unknown'}" is not allowed by permission group - tool not added`, + details: { toolType: 'mcp', serverId: tool.params?.serverId }, + }) + continue + } + allowedTools.push(tool) + } + + return allowedTools +} + +/** + * Normalizes block IDs in operations to ensure they are valid UUIDs. + * The LLM may generate human-readable IDs like "web_search" or "research_agent" + * which need to be converted to proper UUIDs for database compatibility. + * + * Returns the normalized operations and a mapping from old IDs to new UUIDs. + */ +export function normalizeBlockIdsInOperations(operations: EditWorkflowOperation[]): { + normalizedOperations: EditWorkflowOperation[] + idMapping: Map +} { + const logger = createLogger('EditWorkflowServerTool') + const idMapping = new Map() + + // First pass: collect all non-UUID block_ids from add/insert operations + for (const op of operations) { + if (op.operation_type === 'add' || op.operation_type === 'insert_into_subflow') { + if (op.block_id && !UUID_REGEX.test(op.block_id)) { + const newId = crypto.randomUUID() + idMapping.set(op.block_id, newId) + logger.debug('Normalizing block ID', { oldId: op.block_id, newId }) + } + } + } + + if (idMapping.size === 0) { + return { normalizedOperations: operations, idMapping } + } + + logger.info('Normalizing block IDs in operations', { + normalizedCount: idMapping.size, + mappings: Object.fromEntries(idMapping), + }) + + // Helper to replace an ID if it's in the mapping + const replaceId = (id: string | undefined): string | undefined => { + if (!id) return id + return idMapping.get(id) ?? id + } + + // Second pass: update all references to use new UUIDs + const normalizedOperations = operations.map((op) => { + const normalized: EditWorkflowOperation = { + ...op, + block_id: replaceId(op.block_id) ?? op.block_id, + } + + if (op.params) { + normalized.params = { ...op.params } + + // Update subflowId references (for insert_into_subflow) + if (normalized.params.subflowId) { + normalized.params.subflowId = replaceId(normalized.params.subflowId) + } + + // Update connection references + if (normalized.params.connections) { + const normalizedConnections: Record = {} + for (const [handle, targets] of Object.entries(normalized.params.connections)) { + if (typeof targets === 'string') { + normalizedConnections[handle] = replaceId(targets) + } else if (Array.isArray(targets)) { + normalizedConnections[handle] = targets.map((t) => { + if (typeof t === 'string') return replaceId(t) + if (t && typeof t === 'object' && t.block) { + return { ...t, block: replaceId(t.block) } + } + return t + }) + } else if (targets && typeof targets === 'object' && (targets as any).block) { + normalizedConnections[handle] = { ...targets, block: replaceId((targets as any).block) } + } else { + normalizedConnections[handle] = targets + } + } + normalized.params.connections = normalizedConnections + } + + // Update nestedNodes block IDs + if (normalized.params.nestedNodes) { + const normalizedNestedNodes: Record = {} + for (const [childId, childBlock] of Object.entries(normalized.params.nestedNodes)) { + const newChildId = replaceId(childId) ?? childId + normalizedNestedNodes[newChildId] = childBlock + } + normalized.params.nestedNodes = normalizedNestedNodes + } + } + + return normalized + }) + + return { normalizedOperations, idMapping } +} diff --git a/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/engine.ts b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/engine.ts new file mode 100644 index 0000000000..6a5c47246b --- /dev/null +++ b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/engine.ts @@ -0,0 +1,274 @@ +import { createLogger } from '@sim/logger' +import type { PermissionGroupConfig } from '@/lib/permission-groups/types' +import { isValidKey } from '@/lib/workflows/sanitization/key-validation' +import { generateLoopBlocks, generateParallelBlocks } from '@/stores/workflows/workflow/utils' +import { addConnectionsAsEdges, normalizeBlockIdsInOperations } from './builders' +import { + handleAddOperation, + handleDeleteOperation, + handleEditOperation, + handleExtractFromSubflowOperation, + handleInsertIntoSubflowOperation, +} from './operations' +import type { + ApplyOperationsResult, + EditWorkflowOperation, + OperationContext, + ValidationError, +} from './types' +import { logSkippedItem, type SkippedItem } from './types' + +const logger = createLogger('EditWorkflowServerTool') + +type OperationHandler = (op: EditWorkflowOperation, ctx: OperationContext) => void + +const OPERATION_HANDLERS: Record = { + delete: handleDeleteOperation, + extract_from_subflow: handleExtractFromSubflowOperation, + add: handleAddOperation, + insert_into_subflow: handleInsertIntoSubflowOperation, + edit: handleEditOperation, +} + +/** + * Topologically sort insert operations to ensure parents are created before children + * Returns sorted array where parent inserts always come before child inserts + */ +export function topologicalSortInserts( + inserts: EditWorkflowOperation[], + adds: EditWorkflowOperation[] +): EditWorkflowOperation[] { + if (inserts.length === 0) return [] + + // Build a map of blockId -> operation for quick lookup + const insertMap = new Map() + inserts.forEach((op) => insertMap.set(op.block_id, op)) + + // Build a set of blocks being added (potential parents) + const addedBlocks = new Set(adds.map((op) => op.block_id)) + + // Build dependency graph: block -> blocks that depend on it + const dependents = new Map>() + const dependencies = new Map>() + + inserts.forEach((op) => { + const blockId = op.block_id + const parentId = op.params?.subflowId + + dependencies.set(blockId, new Set()) + + if (parentId) { + // Track dependency if parent is being inserted OR being added + // This ensures children wait for parents regardless of operation type + const parentBeingCreated = insertMap.has(parentId) || addedBlocks.has(parentId) + + if (parentBeingCreated) { + // Only add dependency if parent is also being inserted (not added) + // Because adds run before inserts, added parents are already created + if (insertMap.has(parentId)) { + dependencies.get(blockId)!.add(parentId) + if (!dependents.has(parentId)) { + dependents.set(parentId, new Set()) + } + dependents.get(parentId)!.add(blockId) + } + } + } + }) + + // Topological sort using Kahn's algorithm + const sorted: EditWorkflowOperation[] = [] + const queue: string[] = [] + + // Start with nodes that have no dependencies (or depend only on added blocks) + inserts.forEach((op) => { + const deps = dependencies.get(op.block_id)! + if (deps.size === 0) { + queue.push(op.block_id) + } + }) + + while (queue.length > 0) { + const blockId = queue.shift()! + const op = insertMap.get(blockId) + if (op) { + sorted.push(op) + } + + // Remove this node from dependencies of others + const children = dependents.get(blockId) + if (children) { + children.forEach((childId) => { + const childDeps = dependencies.get(childId)! + childDeps.delete(blockId) + if (childDeps.size === 0) { + queue.push(childId) + } + }) + } + } + + // If sorted length doesn't match input, there's a cycle (shouldn't happen with valid operations) + // Just append remaining operations + if (sorted.length < inserts.length) { + inserts.forEach((op) => { + if (!sorted.includes(op)) { + sorted.push(op) + } + }) + } + + return sorted +} + +function orderOperations(operations: EditWorkflowOperation[]): EditWorkflowOperation[] { + /** + * Reorder operations to ensure correct execution sequence: + * 1. delete - Remove blocks first to free up IDs and clean state + * 2. extract_from_subflow - Extract blocks from subflows before modifications + * 3. add - Create new blocks (sorted by connection dependencies) + * 4. insert_into_subflow - Insert blocks into subflows (sorted by parent dependency) + * 5. edit - Edit existing blocks last, so connections to newly added blocks work + */ + const deletes = operations.filter((op) => op.operation_type === 'delete') + const extracts = operations.filter((op) => op.operation_type === 'extract_from_subflow') + const adds = operations.filter((op) => op.operation_type === 'add') + const inserts = operations.filter((op) => op.operation_type === 'insert_into_subflow') + const edits = operations.filter((op) => op.operation_type === 'edit') + + // Sort insert operations to ensure parents are inserted before children + const sortedInserts = topologicalSortInserts(inserts, adds) + + return [...deletes, ...extracts, ...adds, ...sortedInserts, ...edits] +} + +/** + * Apply operations directly to the workflow JSON state + */ +export function applyOperationsToWorkflowState( + workflowState: Record, + operations: EditWorkflowOperation[], + permissionConfig: PermissionGroupConfig | null = null +): ApplyOperationsResult { + // Deep clone the workflow state to avoid mutations + const modifiedState = JSON.parse(JSON.stringify(workflowState)) + + // Collect validation errors across all operations + const validationErrors: ValidationError[] = [] + + // Collect skipped items across all operations + const skippedItems: SkippedItem[] = [] + + // Normalize block IDs to UUIDs before processing + const { normalizedOperations } = normalizeBlockIdsInOperations(operations) + + // Order operations for deterministic application + const orderedOperations = orderOperations(normalizedOperations) + + logger.info('Applying operations to workflow:', { + totalOperations: orderedOperations.length, + operationTypes: orderedOperations.reduce((acc: Record, op) => { + acc[op.operation_type] = (acc[op.operation_type] || 0) + 1 + return acc + }, {}), + initialBlockCount: Object.keys((modifiedState as any).blocks || {}).length, + }) + + const ctx: OperationContext = { + modifiedState, + skippedItems, + validationErrors, + permissionConfig, + deferredConnections: [], + } + + for (const operation of orderedOperations) { + const { operation_type, block_id } = operation + + // CRITICAL: Validate block_id is a valid string and not "undefined" + // This prevents undefined keys from being set in the workflow state + if (!isValidKey(block_id)) { + logSkippedItem(skippedItems, { + type: 'missing_required_params', + operationType: operation_type, + blockId: String(block_id || 'invalid'), + reason: `Invalid block_id "${block_id}" (type: ${typeof block_id}) - operation skipped. Block IDs must be valid non-empty strings.`, + }) + logger.error('Invalid block_id detected in operation', { + operation_type, + block_id, + block_id_type: typeof block_id, + }) + continue + } + + const handler = OPERATION_HANDLERS[operation_type] + if (!handler) continue + + logger.debug(`Executing operation: ${operation_type} for block ${block_id}`, { + params: operation.params ? Object.keys(operation.params) : [], + currentBlockCount: Object.keys((modifiedState as any).blocks || {}).length, + }) + + handler(operation, ctx) + } + + // Pass 2: Add all deferred connections from add/insert operations + // Now all blocks exist, so connections can be safely created + if (ctx.deferredConnections.length > 0) { + logger.info('Processing deferred connections from add/insert operations', { + deferredConnectionCount: ctx.deferredConnections.length, + totalBlocks: Object.keys((modifiedState as any).blocks || {}).length, + }) + + for (const { blockId, connections } of ctx.deferredConnections) { + // Verify the source block still exists (it might have been deleted by a later operation) + if (!(modifiedState as any).blocks[blockId]) { + logger.warn('Source block no longer exists for deferred connection', { + blockId, + availableBlocks: Object.keys((modifiedState as any).blocks || {}), + }) + continue + } + + addConnectionsAsEdges(modifiedState, blockId, connections, logger, skippedItems) + } + + logger.info('Finished processing deferred connections', { + totalEdges: (modifiedState as any).edges?.length, + }) + } + + // Regenerate loops and parallels after modifications + ;(modifiedState as any).loops = generateLoopBlocks((modifiedState as any).blocks) + ;(modifiedState as any).parallels = generateParallelBlocks((modifiedState as any).blocks) + + // Validate all blocks have types before returning + const blocksWithoutType = Object.entries((modifiedState as any).blocks || {}) + .filter(([_, block]: [string, any]) => !block.type || block.type === undefined) + .map(([id, block]: [string, any]) => ({ id, block })) + + if (blocksWithoutType.length > 0) { + logger.error('Blocks without type after operations:', { + blocksWithoutType: blocksWithoutType.map(({ id, block }) => ({ + id, + type: block.type, + name: block.name, + keys: Object.keys(block), + })), + }) + + // Attempt to fix by removing type-less blocks + blocksWithoutType.forEach(({ id }) => { + delete (modifiedState as any).blocks[id] + }) + + // Remove edges connected to removed blocks + const removedIds = new Set(blocksWithoutType.map(({ id }) => id)) + ;(modifiedState as any).edges = ((modifiedState as any).edges || []).filter( + (edge: any) => !removedIds.has(edge.source) && !removedIds.has(edge.target) + ) + } + + return { state: modifiedState, validationErrors, skippedItems } +} diff --git a/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/index.ts b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/index.ts new file mode 100644 index 0000000000..4910094ae9 --- /dev/null +++ b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/index.ts @@ -0,0 +1,284 @@ +import { db } from '@sim/db' +import { workflow as workflowTable } from '@sim/db/schema' +import { createLogger } from '@sim/logger' +import { eq } from 'drizzle-orm' +import type { BaseServerTool } from '@/lib/copilot/tools/server/base-tool' +import { getUserPermissionConfig } from '@/ee/access-control/utils/permission-check' +import { applyAutoLayout } from '@/lib/workflows/autolayout' +import { extractAndPersistCustomTools } from '@/lib/workflows/persistence/custom-tools-persistence' +import { + loadWorkflowFromNormalizedTables, + saveWorkflowToNormalizedTables, +} from '@/lib/workflows/persistence/utils' +import { validateWorkflowState } from '@/lib/workflows/sanitization/validation' +import { generateLoopBlocks, generateParallelBlocks } from '@/stores/workflows/workflow/utils' +import { applyOperationsToWorkflowState } from './engine' +import type { EditWorkflowParams, ValidationError } from './types' +import { preValidateCredentialInputs, validateWorkflowSelectorIds } from './validation' + +async function getCurrentWorkflowStateFromDb( + workflowId: string +): Promise<{ workflowState: any; subBlockValues: Record> }> { + const logger = createLogger('EditWorkflowServerTool') + const [workflowRecord] = await db + .select() + .from(workflowTable) + .where(eq(workflowTable.id, workflowId)) + .limit(1) + if (!workflowRecord) throw new Error(`Workflow ${workflowId} not found in database`) + const normalized = await loadWorkflowFromNormalizedTables(workflowId) + if (!normalized) throw new Error('Workflow has no normalized data') + + // Validate and fix blocks without types + const blocks = { ...normalized.blocks } + const invalidBlocks: string[] = [] + + Object.entries(blocks).forEach(([id, block]: [string, any]) => { + if (!block.type) { + logger.warn(`Block ${id} loaded without type from database`, { + blockKeys: Object.keys(block), + blockName: block.name, + }) + invalidBlocks.push(id) + } + }) + + // Remove invalid blocks + invalidBlocks.forEach((id) => delete blocks[id]) + + // Remove edges connected to invalid blocks + const edges = normalized.edges.filter( + (edge: any) => !invalidBlocks.includes(edge.source) && !invalidBlocks.includes(edge.target) + ) + + const workflowState: any = { + blocks, + edges, + loops: normalized.loops || {}, + parallels: normalized.parallels || {}, + } + const subBlockValues: Record> = {} + Object.entries(normalized.blocks).forEach(([blockId, block]) => { + subBlockValues[blockId] = {} + Object.entries((block as any).subBlocks || {}).forEach(([subId, sub]) => { + if ((sub as any).value !== undefined) subBlockValues[blockId][subId] = (sub as any).value + }) + }) + return { workflowState, subBlockValues } +} + +export const editWorkflowServerTool: BaseServerTool = { + name: 'edit_workflow', + async execute(params: EditWorkflowParams, context?: { userId: string }): Promise { + const logger = createLogger('EditWorkflowServerTool') + const { operations, workflowId, currentUserWorkflow } = params + if (!Array.isArray(operations) || operations.length === 0) { + throw new Error('operations are required and must be an array') + } + if (!workflowId) throw new Error('workflowId is required') + + logger.info('Executing edit_workflow', { + operationCount: operations.length, + workflowId, + hasCurrentUserWorkflow: !!currentUserWorkflow, + }) + + // Get current workflow state + let workflowState: any + if (currentUserWorkflow) { + try { + workflowState = JSON.parse(currentUserWorkflow) + } catch (error) { + logger.error('Failed to parse currentUserWorkflow', error) + throw new Error('Invalid currentUserWorkflow format') + } + } else { + const fromDb = await getCurrentWorkflowStateFromDb(workflowId) + workflowState = fromDb.workflowState + } + + // Get permission config for the user + const permissionConfig = context?.userId ? await getUserPermissionConfig(context.userId) : null + + // Pre-validate credential and apiKey inputs before applying operations + // This filters out invalid credentials and apiKeys for hosted models + let operationsToApply = operations + const credentialErrors: ValidationError[] = [] + if (context?.userId) { + const { filteredOperations, errors: credErrors } = await preValidateCredentialInputs( + operations, + { userId: context.userId }, + workflowState + ) + operationsToApply = filteredOperations + credentialErrors.push(...credErrors) + } + + // Apply operations directly to the workflow state + const { + state: modifiedWorkflowState, + validationErrors, + skippedItems, + } = applyOperationsToWorkflowState(workflowState, operationsToApply, permissionConfig) + + // Add credential validation errors + validationErrors.push(...credentialErrors) + + // Get workspaceId for selector validation + let workspaceId: string | undefined + try { + const [workflowRecord] = await db + .select({ workspaceId: workflowTable.workspaceId }) + .from(workflowTable) + .where(eq(workflowTable.id, workflowId)) + .limit(1) + workspaceId = workflowRecord?.workspaceId ?? undefined + } catch (error) { + logger.warn('Failed to get workspaceId for selector validation', { error, workflowId }) + } + + // Validate selector IDs exist in the database + if (context?.userId) { + try { + const selectorErrors = await validateWorkflowSelectorIds(modifiedWorkflowState, { + userId: context.userId, + workspaceId, + }) + validationErrors.push(...selectorErrors) + } catch (error) { + logger.warn('Selector ID validation failed', { + error: error instanceof Error ? error.message : String(error), + }) + } + } + + // Validate the workflow state + const validation = validateWorkflowState(modifiedWorkflowState, { sanitize: true }) + + if (!validation.valid) { + logger.error('Edited workflow state is invalid', { + errors: validation.errors, + warnings: validation.warnings, + }) + throw new Error(`Invalid edited workflow: ${validation.errors.join('; ')}`) + } + + if (validation.warnings.length > 0) { + logger.warn('Edited workflow validation warnings', { + warnings: validation.warnings, + }) + } + + // Extract and persist custom tools to database (reuse workspaceId from selector validation) + if (context?.userId && workspaceId) { + try { + const finalWorkflowState = validation.sanitizedState || modifiedWorkflowState + const { saved, errors } = await extractAndPersistCustomTools( + finalWorkflowState, + workspaceId, + context.userId + ) + + if (saved > 0) { + logger.info(`Persisted ${saved} custom tool(s) to database`, { workflowId }) + } + + if (errors.length > 0) { + logger.warn('Some custom tools failed to persist', { errors, workflowId }) + } + } catch (error) { + logger.error('Failed to persist custom tools', { error, workflowId }) + } + } else if (context?.userId && !workspaceId) { + logger.warn('Workflow has no workspaceId, skipping custom tools persistence', { + workflowId, + }) + } else { + logger.warn('No userId in context - skipping custom tools persistence', { workflowId }) + } + + logger.info('edit_workflow successfully applied operations', { + operationCount: operations.length, + blocksCount: Object.keys(modifiedWorkflowState.blocks).length, + edgesCount: modifiedWorkflowState.edges.length, + inputValidationErrors: validationErrors.length, + skippedItemsCount: skippedItems.length, + schemaValidationErrors: validation.errors.length, + validationWarnings: validation.warnings.length, + }) + + // Format validation errors for LLM feedback + const inputErrors = + validationErrors.length > 0 + ? validationErrors.map((e) => `Block "${e.blockId}" (${e.blockType}): ${e.error}`) + : undefined + + // Format skipped items for LLM feedback + const skippedMessages = skippedItems.length > 0 ? skippedItems.map((item) => item.reason) : undefined + + // Persist the workflow state to the database + const finalWorkflowState = validation.sanitizedState || modifiedWorkflowState + + // Apply autolayout to position blocks properly + const layoutResult = applyAutoLayout(finalWorkflowState.blocks, finalWorkflowState.edges, { + horizontalSpacing: 250, + verticalSpacing: 100, + padding: { x: 100, y: 100 }, + }) + + const layoutedBlocks = + layoutResult.success && layoutResult.blocks ? layoutResult.blocks : finalWorkflowState.blocks + + if (!layoutResult.success) { + logger.warn('Autolayout failed, using default positions', { + workflowId, + error: layoutResult.error, + }) + } + + const workflowStateForDb = { + blocks: layoutedBlocks, + edges: finalWorkflowState.edges, + loops: generateLoopBlocks(layoutedBlocks as any), + parallels: generateParallelBlocks(layoutedBlocks as any), + lastSaved: Date.now(), + isDeployed: false, + } + + const saveResult = await saveWorkflowToNormalizedTables(workflowId, workflowStateForDb as any) + if (!saveResult.success) { + logger.error('Failed to persist workflow state to database', { + workflowId, + error: saveResult.error, + }) + throw new Error(`Failed to save workflow: ${saveResult.error}`) + } + + // Update workflow's lastSynced timestamp + await db + .update(workflowTable) + .set({ + lastSynced: new Date(), + updatedAt: new Date(), + }) + .where(eq(workflowTable.id, workflowId)) + + logger.info('Workflow state persisted to database', { workflowId }) + + // Return the modified workflow state with autolayout applied + return { + success: true, + workflowState: { ...finalWorkflowState, blocks: layoutedBlocks }, + // Include input validation errors so the LLM can see what was rejected + ...(inputErrors && { + inputValidationErrors: inputErrors, + inputValidationMessage: `${inputErrors.length} input(s) were rejected due to validation errors. The workflow was still updated with valid inputs only. Errors: ${inputErrors.join('; ')}`, + }), + // Include skipped items so the LLM can see what operations were skipped + ...(skippedMessages && { + skippedItems: skippedMessages, + skippedItemsMessage: `${skippedItems.length} operation(s) were skipped due to invalid references. Details: ${skippedMessages.join('; ')}`, + }), + } + }, +} diff --git a/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/operations.ts b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/operations.ts new file mode 100644 index 0000000000..72155d1dd9 --- /dev/null +++ b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/operations.ts @@ -0,0 +1,996 @@ +import { createLogger } from '@sim/logger' +import { TriggerUtils } from '@/lib/workflows/triggers/triggers' +import { getBlock } from '@/blocks/registry' +import { isValidKey } from '@/lib/workflows/sanitization/key-validation' +import { RESERVED_BLOCK_NAMES, normalizeName } from '@/executor/constants' +import { TRIGGER_RUNTIME_SUBBLOCK_IDS } from '@/triggers/constants' +import { + addConnectionsAsEdges, + applyTriggerConfigToBlockSubblocks, + createBlockFromParams, + createValidatedEdge, + filterDisallowedTools, + normalizeArrayWithIds, + normalizeResponseFormat, + normalizeTools, + shouldNormalizeArrayIds, + updateCanonicalModesForInputs, +} from './builders' +import type { EditWorkflowOperation, OperationContext } from './types' +import { logSkippedItem } from './types' +import { + findBlockWithDuplicateNormalizedName, + isBlockTypeAllowed, + validateInputsForBlock, +} from './validation' + +const logger = createLogger('EditWorkflowServerTool') + +export function handleDeleteOperation(op: EditWorkflowOperation, ctx: OperationContext): void { + const { modifiedState, skippedItems } = ctx + const { block_id } = op + + if (!modifiedState.blocks[block_id]) { + logSkippedItem(skippedItems, { + type: 'block_not_found', + operationType: 'delete', + blockId: block_id, + reason: `Block "${block_id}" does not exist and cannot be deleted`, + }) + return + } + + // Check if block is locked or inside a locked container + const deleteBlock = modifiedState.blocks[block_id] + const deleteParentId = deleteBlock.data?.parentId as string | undefined + const deleteParentLocked = deleteParentId ? modifiedState.blocks[deleteParentId]?.locked : false + if (deleteBlock.locked || deleteParentLocked) { + logSkippedItem(skippedItems, { + type: 'block_locked', + operationType: 'delete', + blockId: block_id, + reason: deleteParentLocked + ? `Block "${block_id}" is inside locked container "${deleteParentId}" and cannot be deleted` + : `Block "${block_id}" is locked and cannot be deleted`, + }) + return + } + + // Find all child blocks to remove + const blocksToRemove = new Set([block_id]) + const findChildren = (parentId: string) => { + Object.entries(modifiedState.blocks).forEach(([childId, child]: [string, any]) => { + if (child.data?.parentId === parentId) { + blocksToRemove.add(childId) + findChildren(childId) + } + }) + } + findChildren(block_id) + + // Remove blocks + blocksToRemove.forEach((id) => delete modifiedState.blocks[id]) + + // Remove edges connected to deleted blocks + modifiedState.edges = modifiedState.edges.filter( + (edge: any) => !blocksToRemove.has(edge.source) && !blocksToRemove.has(edge.target) + ) +} + +export function handleEditOperation(op: EditWorkflowOperation, ctx: OperationContext): void { + const { modifiedState, skippedItems, validationErrors, permissionConfig } = ctx + const { block_id, params } = op + + if (!modifiedState.blocks[block_id]) { + logSkippedItem(skippedItems, { + type: 'block_not_found', + operationType: 'edit', + blockId: block_id, + reason: `Block "${block_id}" does not exist and cannot be edited`, + }) + return + } + + const block = modifiedState.blocks[block_id] + + // Check if block is locked or inside a locked container + const editParentId = block.data?.parentId as string | undefined + const editParentLocked = editParentId ? modifiedState.blocks[editParentId]?.locked : false + if (block.locked || editParentLocked) { + logSkippedItem(skippedItems, { + type: 'block_locked', + operationType: 'edit', + blockId: block_id, + reason: editParentLocked + ? `Block "${block_id}" is inside locked container "${editParentId}" and cannot be edited` + : `Block "${block_id}" is locked and cannot be edited`, + }) + return + } + + // Ensure block has essential properties + if (!block.type) { + logger.warn(`Block ${block_id} missing type property, skipping edit`, { + blockKeys: Object.keys(block), + blockData: JSON.stringify(block), + }) + logSkippedItem(skippedItems, { + type: 'block_not_found', + operationType: 'edit', + blockId: block_id, + reason: `Block "${block_id}" exists but has no type property`, + }) + return + } + + // Update inputs (convert to subBlocks format) + if (params?.inputs) { + if (!block.subBlocks) block.subBlocks = {} + + // Validate inputs against block configuration + const validationResult = validateInputsForBlock(block.type, params.inputs, block_id) + validationErrors.push(...validationResult.errors) + + Object.entries(validationResult.validInputs).forEach(([inputKey, value]) => { + // Normalize common field name variations (LLM may use plural/singular inconsistently) + let key = inputKey + if (key === 'credentials' && !block.subBlocks.credentials && block.subBlocks.credential) { + key = 'credential' + } + + if (TRIGGER_RUNTIME_SUBBLOCK_IDS.includes(key)) { + return + } + let sanitizedValue = value + + // Normalize array subblocks with id fields (inputFormat, table rows, etc.) + if (shouldNormalizeArrayIds(key)) { + sanitizedValue = normalizeArrayWithIds(value) + } + + // Special handling for tools - normalize and filter disallowed + if (key === 'tools' && Array.isArray(value)) { + sanitizedValue = filterDisallowedTools( + normalizeTools(value), + permissionConfig, + block_id, + skippedItems + ) + } + + // Special handling for responseFormat - normalize to ensure consistent format + if (key === 'responseFormat' && value) { + sanitizedValue = normalizeResponseFormat(value) + } + + if (!block.subBlocks[key]) { + block.subBlocks[key] = { + id: key, + type: 'short-input', + value: sanitizedValue, + } + } else { + const existingValue = block.subBlocks[key].value + const valuesEqual = + typeof existingValue === 'object' || typeof sanitizedValue === 'object' + ? JSON.stringify(existingValue) === JSON.stringify(sanitizedValue) + : existingValue === sanitizedValue + + if (!valuesEqual) { + block.subBlocks[key].value = sanitizedValue + } + } + }) + + if ( + Object.hasOwn(params.inputs, 'triggerConfig') && + block.subBlocks.triggerConfig && + typeof block.subBlocks.triggerConfig.value === 'object' + ) { + applyTriggerConfigToBlockSubblocks(block, block.subBlocks.triggerConfig.value) + } + + // Update loop/parallel configuration in block.data (strict validation) + if (block.type === 'loop') { + block.data = block.data || {} + // loopType is always valid + if (params.inputs.loopType !== undefined) { + const validLoopTypes = ['for', 'forEach', 'while', 'doWhile'] + if (validLoopTypes.includes(params.inputs.loopType)) { + block.data.loopType = params.inputs.loopType + } + } + const effectiveLoopType = params.inputs.loopType ?? block.data.loopType ?? 'for' + // iterations only valid for 'for' loopType + if (params.inputs.iterations !== undefined && effectiveLoopType === 'for') { + block.data.count = params.inputs.iterations + } + // collection only valid for 'forEach' loopType + if (params.inputs.collection !== undefined && effectiveLoopType === 'forEach') { + block.data.collection = params.inputs.collection + } + // condition only valid for 'while' or 'doWhile' loopType + if ( + params.inputs.condition !== undefined && + (effectiveLoopType === 'while' || effectiveLoopType === 'doWhile') + ) { + if (effectiveLoopType === 'doWhile') { + block.data.doWhileCondition = params.inputs.condition + } else { + block.data.whileCondition = params.inputs.condition + } + } + } else if (block.type === 'parallel') { + block.data = block.data || {} + // parallelType is always valid + if (params.inputs.parallelType !== undefined) { + const validParallelTypes = ['count', 'collection'] + if (validParallelTypes.includes(params.inputs.parallelType)) { + block.data.parallelType = params.inputs.parallelType + } + } + const effectiveParallelType = params.inputs.parallelType ?? block.data.parallelType ?? 'count' + // count only valid for 'count' parallelType + if (params.inputs.count !== undefined && effectiveParallelType === 'count') { + block.data.count = params.inputs.count + } + // collection only valid for 'collection' parallelType + if (params.inputs.collection !== undefined && effectiveParallelType === 'collection') { + block.data.collection = params.inputs.collection + } + } + + const editBlockConfig = getBlock(block.type) + if (editBlockConfig) { + updateCanonicalModesForInputs(block, Object.keys(validationResult.validInputs), editBlockConfig) + } + } + + // Update basic properties + if (params?.type !== undefined) { + // Special container types (loop, parallel) are not in the block registry but are valid + const isContainerType = params.type === 'loop' || params.type === 'parallel' + + // Validate type before setting (skip validation for container types) + const blockConfig = getBlock(params.type) + if (!blockConfig && !isContainerType) { + logSkippedItem(skippedItems, { + type: 'invalid_block_type', + operationType: 'edit', + blockId: block_id, + reason: `Invalid block type "${params.type}" - type change skipped`, + details: { requestedType: params.type }, + }) + } else if (!isContainerType && !isBlockTypeAllowed(params.type, permissionConfig)) { + logSkippedItem(skippedItems, { + type: 'block_not_allowed', + operationType: 'edit', + blockId: block_id, + reason: `Block type "${params.type}" is not allowed by permission group - type change skipped`, + details: { requestedType: params.type }, + }) + } else { + block.type = params.type + } + } + if (params?.name !== undefined) { + const normalizedName = normalizeName(params.name) + if (!normalizedName) { + logSkippedItem(skippedItems, { + type: 'missing_required_params', + operationType: 'edit', + blockId: block_id, + reason: `Cannot rename to empty name`, + details: { requestedName: params.name }, + }) + } else if ((RESERVED_BLOCK_NAMES as readonly string[]).includes(normalizedName)) { + logSkippedItem(skippedItems, { + type: 'reserved_block_name', + operationType: 'edit', + blockId: block_id, + reason: `Cannot rename to "${params.name}" - this is a reserved name`, + details: { requestedName: params.name }, + }) + } else { + const conflictingBlock = findBlockWithDuplicateNormalizedName( + modifiedState.blocks, + params.name, + block_id + ) + + if (conflictingBlock) { + logSkippedItem(skippedItems, { + type: 'duplicate_block_name', + operationType: 'edit', + blockId: block_id, + reason: `Cannot rename to "${params.name}" - conflicts with "${conflictingBlock[1].name}"`, + details: { + requestedName: params.name, + conflictingBlockId: conflictingBlock[0], + conflictingBlockName: conflictingBlock[1].name, + }, + }) + } else { + block.name = params.name + } + } + } + + // Handle trigger mode toggle + if (typeof params?.triggerMode === 'boolean') { + block.triggerMode = params.triggerMode + + if (params.triggerMode === true) { + // Remove all incoming edges when enabling trigger mode + modifiedState.edges = modifiedState.edges.filter((edge: any) => edge.target !== block_id) + } + } + + // Handle advanced mode toggle + if (typeof params?.advancedMode === 'boolean') { + block.advancedMode = params.advancedMode + } + + // Handle nested nodes update (for loops/parallels) + if (params?.nestedNodes) { + // Remove all existing child blocks + const existingChildren = Object.keys(modifiedState.blocks).filter( + (id) => modifiedState.blocks[id].data?.parentId === block_id + ) + existingChildren.forEach((childId) => delete modifiedState.blocks[childId]) + + // Remove edges to/from removed children + modifiedState.edges = modifiedState.edges.filter( + (edge: any) => !existingChildren.includes(edge.source) && !existingChildren.includes(edge.target) + ) + + // Add new nested blocks + Object.entries(params.nestedNodes).forEach(([childId, childBlock]: [string, any]) => { + // Validate childId is a valid string + if (!isValidKey(childId)) { + logSkippedItem(skippedItems, { + type: 'missing_required_params', + operationType: 'add_nested_node', + blockId: String(childId || 'invalid'), + reason: `Invalid childId "${childId}" in nestedNodes - child block skipped`, + }) + logger.error('Invalid childId detected in nestedNodes', { + parentBlockId: block_id, + childId, + childId_type: typeof childId, + }) + return + } + + if (childBlock.type === 'loop' || childBlock.type === 'parallel') { + logSkippedItem(skippedItems, { + type: 'nested_subflow_not_allowed', + operationType: 'edit_nested_node', + blockId: childId, + reason: `Cannot nest ${childBlock.type} inside ${block.type} - nested subflows are not supported`, + details: { parentType: block.type, childType: childBlock.type }, + }) + return + } + + const childBlockState = createBlockFromParams( + childId, + childBlock, + block_id, + validationErrors, + permissionConfig, + skippedItems + ) + modifiedState.blocks[childId] = childBlockState + + // Add connections for child block + if (childBlock.connections) { + addConnectionsAsEdges(modifiedState, childId, childBlock.connections, logger, skippedItems) + } + }) + + // Update loop/parallel configuration based on type (strict validation) + if (block.type === 'loop') { + block.data = block.data || {} + // loopType is always valid + if (params.inputs?.loopType) { + const validLoopTypes = ['for', 'forEach', 'while', 'doWhile'] + if (validLoopTypes.includes(params.inputs.loopType)) { + block.data.loopType = params.inputs.loopType + } + } + const effectiveLoopType = params.inputs?.loopType ?? block.data.loopType ?? 'for' + // iterations only valid for 'for' loopType + if (params.inputs?.iterations && effectiveLoopType === 'for') { + block.data.count = params.inputs.iterations + } + // collection only valid for 'forEach' loopType + if (params.inputs?.collection && effectiveLoopType === 'forEach') { + block.data.collection = params.inputs.collection + } + // condition only valid for 'while' or 'doWhile' loopType + if ( + params.inputs?.condition && + (effectiveLoopType === 'while' || effectiveLoopType === 'doWhile') + ) { + if (effectiveLoopType === 'doWhile') { + block.data.doWhileCondition = params.inputs.condition + } else { + block.data.whileCondition = params.inputs.condition + } + } + } else if (block.type === 'parallel') { + block.data = block.data || {} + // parallelType is always valid + if (params.inputs?.parallelType) { + const validParallelTypes = ['count', 'collection'] + if (validParallelTypes.includes(params.inputs.parallelType)) { + block.data.parallelType = params.inputs.parallelType + } + } + const effectiveParallelType = params.inputs?.parallelType ?? block.data.parallelType ?? 'count' + // count only valid for 'count' parallelType + if (params.inputs?.count && effectiveParallelType === 'count') { + block.data.count = params.inputs.count + } + // collection only valid for 'collection' parallelType + if (params.inputs?.collection && effectiveParallelType === 'collection') { + block.data.collection = params.inputs.collection + } + } + } + + // Handle connections update (convert to edges) + if (params?.connections) { + modifiedState.edges = modifiedState.edges.filter((edge: any) => edge.source !== block_id) + + Object.entries(params.connections).forEach(([connectionType, targets]) => { + if (targets === null) return + + const mapConnectionTypeToHandle = (type: string): string => { + if (type === 'success') return 'source' + if (type === 'error') return 'error' + return type + } + + const sourceHandle = mapConnectionTypeToHandle(connectionType) + + const addEdgeForTarget = (targetBlock: string, targetHandle?: string) => { + createValidatedEdge( + modifiedState, + block_id, + targetBlock, + sourceHandle, + targetHandle || 'target', + 'edit', + logger, + skippedItems + ) + } + + if (typeof targets === 'string') { + addEdgeForTarget(targets) + } else if (Array.isArray(targets)) { + targets.forEach((target: any) => { + if (typeof target === 'string') { + addEdgeForTarget(target) + } else if (target?.block) { + addEdgeForTarget(target.block, target.handle) + } + }) + } else if (typeof targets === 'object' && (targets as any)?.block) { + addEdgeForTarget((targets as any).block, (targets as any).handle) + } + }) + } + + // Handle edge removal + if (params?.removeEdges && Array.isArray(params.removeEdges)) { + params.removeEdges.forEach(({ targetBlockId, sourceHandle = 'source' }) => { + modifiedState.edges = modifiedState.edges.filter( + (edge: any) => + !(edge.source === block_id && edge.target === targetBlockId && edge.sourceHandle === sourceHandle) + ) + }) + } +} + +export function handleAddOperation(op: EditWorkflowOperation, ctx: OperationContext): void { + const { modifiedState, skippedItems, validationErrors, permissionConfig, deferredConnections } = ctx + const { block_id, params } = op + + const addNormalizedName = params?.name ? normalizeName(params.name) : '' + if (!params?.type || !params?.name || !addNormalizedName) { + logSkippedItem(skippedItems, { + type: 'missing_required_params', + operationType: 'add', + blockId: block_id, + reason: `Missing required params (type or name) for adding block "${block_id}"`, + details: { hasType: !!params?.type, hasName: !!params?.name }, + }) + return + } + + if ((RESERVED_BLOCK_NAMES as readonly string[]).includes(addNormalizedName)) { + logSkippedItem(skippedItems, { + type: 'reserved_block_name', + operationType: 'add', + blockId: block_id, + reason: `Block name "${params.name}" is a reserved name and cannot be used`, + details: { requestedName: params.name }, + }) + return + } + + const conflictingBlock = findBlockWithDuplicateNormalizedName(modifiedState.blocks, params.name, block_id) + + if (conflictingBlock) { + logSkippedItem(skippedItems, { + type: 'duplicate_block_name', + operationType: 'add', + blockId: block_id, + reason: `Block name "${params.name}" conflicts with existing block "${conflictingBlock[1].name}"`, + details: { + requestedName: params.name, + conflictingBlockId: conflictingBlock[0], + conflictingBlockName: conflictingBlock[1].name, + }, + }) + return + } + + // Special container types (loop, parallel) are not in the block registry but are valid + const isContainerType = params.type === 'loop' || params.type === 'parallel' + + // Validate block type before adding (skip validation for container types) + const addBlockConfig = getBlock(params.type) + if (!addBlockConfig && !isContainerType) { + logSkippedItem(skippedItems, { + type: 'invalid_block_type', + operationType: 'add', + blockId: block_id, + reason: `Invalid block type "${params.type}" - block not added`, + details: { requestedType: params.type }, + }) + return + } + + // Check if block type is allowed by permission group + if (!isContainerType && !isBlockTypeAllowed(params.type, permissionConfig)) { + logSkippedItem(skippedItems, { + type: 'block_not_allowed', + operationType: 'add', + blockId: block_id, + reason: `Block type "${params.type}" is not allowed by permission group - block not added`, + details: { requestedType: params.type }, + }) + return + } + + const triggerIssue = TriggerUtils.getTriggerAdditionIssue(modifiedState.blocks, params.type) + if (triggerIssue) { + logSkippedItem(skippedItems, { + type: 'duplicate_trigger', + operationType: 'add', + blockId: block_id, + reason: `Cannot add ${triggerIssue.triggerName} - a workflow can only have one`, + details: { requestedType: params.type, issue: triggerIssue.issue }, + }) + return + } + + // Check single-instance block constraints (e.g., Response block) + const singleInstanceIssue = TriggerUtils.getSingleInstanceBlockIssue(modifiedState.blocks, params.type) + if (singleInstanceIssue) { + logSkippedItem(skippedItems, { + type: 'duplicate_single_instance_block', + operationType: 'add', + blockId: block_id, + reason: `Cannot add ${singleInstanceIssue.blockName} - a workflow can only have one`, + details: { requestedType: params.type }, + }) + return + } + + // Create new block with proper structure + const newBlock = createBlockFromParams( + block_id, + params, + undefined, + validationErrors, + permissionConfig, + skippedItems + ) + + // Set loop/parallel data on parent block BEFORE adding to blocks (strict validation) + if (params.nestedNodes) { + if (params.type === 'loop') { + const validLoopTypes = ['for', 'forEach', 'while', 'doWhile'] + const loopType = + params.inputs?.loopType && validLoopTypes.includes(params.inputs.loopType) + ? params.inputs.loopType + : 'for' + newBlock.data = { + ...newBlock.data, + loopType, + // Only include type-appropriate fields + ...(loopType === 'forEach' && params.inputs?.collection && { collection: params.inputs.collection }), + ...(loopType === 'for' && params.inputs?.iterations && { count: params.inputs.iterations }), + ...(loopType === 'while' && params.inputs?.condition && { whileCondition: params.inputs.condition }), + ...(loopType === 'doWhile' && + params.inputs?.condition && { doWhileCondition: params.inputs.condition }), + } + } else if (params.type === 'parallel') { + const validParallelTypes = ['count', 'collection'] + const parallelType = + params.inputs?.parallelType && validParallelTypes.includes(params.inputs.parallelType) + ? params.inputs.parallelType + : 'count' + newBlock.data = { + ...newBlock.data, + parallelType, + // Only include type-appropriate fields + ...(parallelType === 'collection' && + params.inputs?.collection && { collection: params.inputs.collection }), + ...(parallelType === 'count' && params.inputs?.count && { count: params.inputs.count }), + } + } + } + + // Add parent block FIRST before adding children + // This ensures children can reference valid parentId + modifiedState.blocks[block_id] = newBlock + + // Handle nested nodes (for loops/parallels created from scratch) + if (params.nestedNodes) { + // Defensive check: verify parent is not locked before adding children + // (Parent was just created with locked: false, but check for consistency) + const parentBlock = modifiedState.blocks[block_id] + if (parentBlock?.locked) { + logSkippedItem(skippedItems, { + type: 'block_locked', + operationType: 'add_nested_nodes', + blockId: block_id, + reason: `Container "${block_id}" is locked - cannot add nested nodes`, + }) + return + } + + Object.entries(params.nestedNodes).forEach(([childId, childBlock]: [string, any]) => { + // Validate childId is a valid string + if (!isValidKey(childId)) { + logSkippedItem(skippedItems, { + type: 'missing_required_params', + operationType: 'add_nested_node', + blockId: String(childId || 'invalid'), + reason: `Invalid childId "${childId}" in nestedNodes - child block skipped`, + }) + logger.error('Invalid childId detected in nestedNodes', { + parentBlockId: block_id, + childId, + childId_type: typeof childId, + }) + return + } + + if (childBlock.type === 'loop' || childBlock.type === 'parallel') { + logSkippedItem(skippedItems, { + type: 'nested_subflow_not_allowed', + operationType: 'add_nested_node', + blockId: childId, + reason: `Cannot nest ${childBlock.type} inside ${params.type} - nested subflows are not supported`, + details: { parentType: params.type, childType: childBlock.type }, + }) + return + } + + const childBlockState = createBlockFromParams( + childId, + childBlock, + block_id, + validationErrors, + permissionConfig, + skippedItems + ) + modifiedState.blocks[childId] = childBlockState + + // Defer connection processing to ensure all blocks exist first + if (childBlock.connections) { + deferredConnections.push({ + blockId: childId, + connections: childBlock.connections, + }) + } + }) + } + + // Defer connection processing to ensure all blocks exist first (pass 2) + if (params.connections) { + deferredConnections.push({ + blockId: block_id, + connections: params.connections, + }) + } +} + +export function handleInsertIntoSubflowOperation( + op: EditWorkflowOperation, + ctx: OperationContext +): void { + const { modifiedState, skippedItems, validationErrors, permissionConfig, deferredConnections } = ctx + const { block_id, params } = op + + const subflowId = params?.subflowId + if (!subflowId || !params?.type || !params?.name) { + logSkippedItem(skippedItems, { + type: 'missing_required_params', + operationType: 'insert_into_subflow', + blockId: block_id, + reason: `Missing required params (subflowId, type, or name) for inserting block "${block_id}"`, + details: { + hasSubflowId: !!subflowId, + hasType: !!params?.type, + hasName: !!params?.name, + }, + }) + return + } + + const subflowBlock = modifiedState.blocks[subflowId] + if (!subflowBlock) { + logSkippedItem(skippedItems, { + type: 'invalid_subflow_parent', + operationType: 'insert_into_subflow', + blockId: block_id, + reason: `Subflow block "${subflowId}" not found - block "${block_id}" not inserted`, + details: { subflowId }, + }) + return + } + + // Check if subflow is locked + if (subflowBlock.locked) { + logSkippedItem(skippedItems, { + type: 'block_locked', + operationType: 'insert_into_subflow', + blockId: block_id, + reason: `Subflow "${subflowId}" is locked - cannot insert block "${block_id}"`, + details: { subflowId }, + }) + return + } + + if (subflowBlock.type !== 'loop' && subflowBlock.type !== 'parallel') { + logger.error('Subflow block has invalid type', { + subflowId, + type: subflowBlock.type, + block_id, + }) + return + } + + if (params.type === 'loop' || params.type === 'parallel') { + logSkippedItem(skippedItems, { + type: 'nested_subflow_not_allowed', + operationType: 'insert_into_subflow', + blockId: block_id, + reason: `Cannot nest ${params.type} inside ${subflowBlock.type} - nested subflows are not supported`, + details: { parentType: subflowBlock.type, childType: params.type }, + }) + return + } + + // Check if block already exists (moving into subflow) or is new + const existingBlock = modifiedState.blocks[block_id] + + if (existingBlock) { + if (existingBlock.type === 'loop' || existingBlock.type === 'parallel') { + logSkippedItem(skippedItems, { + type: 'nested_subflow_not_allowed', + operationType: 'insert_into_subflow', + blockId: block_id, + reason: `Cannot move ${existingBlock.type} into ${subflowBlock.type} - nested subflows are not supported`, + details: { parentType: subflowBlock.type, childType: existingBlock.type }, + }) + return + } + + // Check if existing block is locked + if (existingBlock.locked) { + logSkippedItem(skippedItems, { + type: 'block_locked', + operationType: 'insert_into_subflow', + blockId: block_id, + reason: `Block "${block_id}" is locked and cannot be moved into a subflow`, + }) + return + } + + // Moving existing block into subflow - just update parent + existingBlock.data = { + ...existingBlock.data, + parentId: subflowId, + extent: 'parent' as const, + } + + // Update inputs if provided (with validation) + if (params.inputs) { + // Validate inputs against block configuration + const validationResult = validateInputsForBlock(existingBlock.type, params.inputs, block_id) + validationErrors.push(...validationResult.errors) + + Object.entries(validationResult.validInputs).forEach(([key, value]) => { + // Skip runtime subblock IDs (webhookId, triggerPath) + if (TRIGGER_RUNTIME_SUBBLOCK_IDS.includes(key)) { + return + } + + let sanitizedValue = value + + // Normalize array subblocks with id fields (inputFormat, table rows, etc.) + if (shouldNormalizeArrayIds(key)) { + sanitizedValue = normalizeArrayWithIds(value) + } + + // Special handling for tools - normalize and filter disallowed + if (key === 'tools' && Array.isArray(value)) { + sanitizedValue = filterDisallowedTools( + normalizeTools(value), + permissionConfig, + block_id, + skippedItems + ) + } + + // Special handling for responseFormat - normalize to ensure consistent format + if (key === 'responseFormat' && value) { + sanitizedValue = normalizeResponseFormat(value) + } + + if (!existingBlock.subBlocks[key]) { + existingBlock.subBlocks[key] = { + id: key, + type: 'short-input', + value: sanitizedValue, + } + } else { + existingBlock.subBlocks[key].value = sanitizedValue + } + }) + + const existingBlockConfig = getBlock(existingBlock.type) + if (existingBlockConfig) { + updateCanonicalModesForInputs( + existingBlock, + Object.keys(validationResult.validInputs), + existingBlockConfig + ) + } + } + } else { + // Special container types (loop, parallel) are not in the block registry but are valid + const isContainerType = params.type === 'loop' || params.type === 'parallel' + + // Validate block type before creating (skip validation for container types) + const insertBlockConfig = getBlock(params.type) + if (!insertBlockConfig && !isContainerType) { + logSkippedItem(skippedItems, { + type: 'invalid_block_type', + operationType: 'insert_into_subflow', + blockId: block_id, + reason: `Invalid block type "${params.type}" - block not inserted into subflow`, + details: { requestedType: params.type, subflowId }, + }) + return + } + + // Check if block type is allowed by permission group + if (!isContainerType && !isBlockTypeAllowed(params.type, permissionConfig)) { + logSkippedItem(skippedItems, { + type: 'block_not_allowed', + operationType: 'insert_into_subflow', + blockId: block_id, + reason: `Block type "${params.type}" is not allowed by permission group - block not inserted`, + details: { requestedType: params.type, subflowId }, + }) + return + } + + // Create new block as child of subflow + const newBlock = createBlockFromParams( + block_id, + params, + subflowId, + validationErrors, + permissionConfig, + skippedItems + ) + modifiedState.blocks[block_id] = newBlock + } + + // Defer connection processing to ensure all blocks exist first + // This is particularly important when multiple blocks are being inserted + // and they have connections to each other + if (params.connections) { + // Remove existing edges from this block first + modifiedState.edges = modifiedState.edges.filter((edge: any) => edge.source !== block_id) + + // Add to deferred connections list + deferredConnections.push({ + blockId: block_id, + connections: params.connections, + }) + } +} + +export function handleExtractFromSubflowOperation( + op: EditWorkflowOperation, + ctx: OperationContext +): void { + const { modifiedState, skippedItems } = ctx + const { block_id, params } = op + + const subflowId = params?.subflowId + if (!subflowId) { + logSkippedItem(skippedItems, { + type: 'missing_required_params', + operationType: 'extract_from_subflow', + blockId: block_id, + reason: `Missing subflowId for extracting block "${block_id}"`, + }) + return + } + + const block = modifiedState.blocks[block_id] + if (!block) { + logSkippedItem(skippedItems, { + type: 'block_not_found', + operationType: 'extract_from_subflow', + blockId: block_id, + reason: `Block "${block_id}" not found for extraction`, + }) + return + } + + // Check if block is locked + if (block.locked) { + logSkippedItem(skippedItems, { + type: 'block_locked', + operationType: 'extract_from_subflow', + blockId: block_id, + reason: `Block "${block_id}" is locked and cannot be extracted from subflow`, + }) + return + } + + // Check if parent subflow is locked + const parentSubflow = modifiedState.blocks[subflowId] + if (parentSubflow?.locked) { + logSkippedItem(skippedItems, { + type: 'block_locked', + operationType: 'extract_from_subflow', + blockId: block_id, + reason: `Subflow "${subflowId}" is locked - cannot extract block "${block_id}"`, + details: { subflowId }, + }) + return + } + + // Verify it's actually a child of this subflow + if (block.data?.parentId !== subflowId) { + logger.warn('Block is not a child of specified subflow', { + block_id, + actualParent: block.data?.parentId, + specifiedParent: subflowId, + }) + } + + // Remove parent relationship + if (block.data) { + block.data.parentId = undefined + block.data.extent = undefined + } + + // Note: We keep the block and its edges, just remove parent relationship + // The block becomes a root-level block +} diff --git a/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/types.ts b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/types.ts new file mode 100644 index 0000000000..09b766e069 --- /dev/null +++ b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/types.ts @@ -0,0 +1,134 @@ +import { createLogger } from '@sim/logger' +import type { PermissionGroupConfig } from '@/lib/permission-groups/types' + +/** Selector subblock types that can be validated */ +export const SELECTOR_TYPES = new Set([ + 'oauth-input', + 'knowledge-base-selector', + 'document-selector', + 'file-selector', + 'project-selector', + 'channel-selector', + 'folder-selector', + 'mcp-server-selector', + 'mcp-tool-selector', + 'workflow-selector', +]) + +const validationLogger = createLogger('EditWorkflowValidation') + +/** UUID v4 regex pattern for validation */ +export const UUID_REGEX = /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i + +/** + * Validation error for a specific field + */ +export interface ValidationError { + blockId: string + blockType: string + field: string + value: any + error: string +} + +/** + * Types of items that can be skipped during operation application + */ +export type SkippedItemType = + | 'block_not_found' + | 'invalid_block_type' + | 'block_not_allowed' + | 'block_locked' + | 'tool_not_allowed' + | 'invalid_edge_target' + | 'invalid_edge_source' + | 'invalid_source_handle' + | 'invalid_target_handle' + | 'invalid_subblock_field' + | 'missing_required_params' + | 'invalid_subflow_parent' + | 'nested_subflow_not_allowed' + | 'duplicate_block_name' + | 'reserved_block_name' + | 'duplicate_trigger' + | 'duplicate_single_instance_block' + +/** + * Represents an item that was skipped during operation application + */ +export interface SkippedItem { + type: SkippedItemType + operationType: string + blockId: string + reason: string + details?: Record +} + +/** + * Logs and records a skipped item + */ +export function logSkippedItem(skippedItems: SkippedItem[], item: SkippedItem): void { + validationLogger.warn(`Skipped ${item.operationType} operation: ${item.reason}`, { + type: item.type, + operationType: item.operationType, + blockId: item.blockId, + ...(item.details && { details: item.details }), + }) + skippedItems.push(item) +} + +/** + * Result of input validation + */ +export interface ValidationResult { + validInputs: Record + errors: ValidationError[] +} + +/** + * Result of validating a single value + */ +export interface ValueValidationResult { + valid: boolean + value?: any + error?: ValidationError +} + +export interface EditWorkflowOperation { + operation_type: 'add' | 'edit' | 'delete' | 'insert_into_subflow' | 'extract_from_subflow' + block_id: string + params?: Record +} + +export interface EditWorkflowParams { + operations: EditWorkflowOperation[] + workflowId: string + currentUserWorkflow?: string +} + +export interface EdgeHandleValidationResult { + valid: boolean + error?: string + /** The normalized handle to use (e.g., simple 'if' normalized to 'condition-{uuid}') */ + normalizedHandle?: string +} + +/** + * Result of applying operations to workflow state + */ +export interface ApplyOperationsResult { + state: any + validationErrors: ValidationError[] + skippedItems: SkippedItem[] +} + +export interface OperationContext { + modifiedState: any + skippedItems: SkippedItem[] + validationErrors: ValidationError[] + permissionConfig: PermissionGroupConfig | null + deferredConnections: Array<{ + blockId: string + connections: Record + }> +} diff --git a/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/validation.ts b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/validation.ts new file mode 100644 index 0000000000..424be9d25d --- /dev/null +++ b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/validation.ts @@ -0,0 +1,1051 @@ +import { createLogger } from '@sim/logger' +import { validateSelectorIds } from '@/lib/copilot/validation/selector-validator' +import type { PermissionGroupConfig } from '@/lib/permission-groups/types' +import { getBlock } from '@/blocks/registry' +import type { SubBlockConfig } from '@/blocks/types' +import { EDGE, normalizeName } from '@/executor/constants' +import { TRIGGER_RUNTIME_SUBBLOCK_IDS } from '@/triggers/constants' +import type { + EdgeHandleValidationResult, + EditWorkflowOperation, + ValidationError, + ValidationResult, + ValueValidationResult, +} from './types' +import { SELECTOR_TYPES } from './types' + +const validationLogger = createLogger('EditWorkflowValidation') + +/** + * Finds an existing block with the same normalized name. + */ +export function findBlockWithDuplicateNormalizedName( + blocks: Record, + name: string, + excludeBlockId: string +): [string, any] | undefined { + const normalizedName = normalizeName(name) + return Object.entries(blocks).find( + ([blockId, block]: [string, any]) => + blockId !== excludeBlockId && normalizeName(block.name || '') === normalizedName + ) +} + +/** + * Validates and filters inputs against a block's subBlock configuration + * Returns valid inputs and any validation errors encountered + */ +export function validateInputsForBlock( + blockType: string, + inputs: Record, + blockId: string +): ValidationResult { + const errors: ValidationError[] = [] + const blockConfig = getBlock(blockType) + + if (!blockConfig) { + // Unknown block type - return inputs as-is (let it fail later if invalid) + validationLogger.warn(`Unknown block type: ${blockType}, skipping validation`) + return { validInputs: inputs, errors: [] } + } + + const validatedInputs: Record = {} + const subBlockMap = new Map() + + // Build map of subBlock id -> config + for (const subBlock of blockConfig.subBlocks) { + subBlockMap.set(subBlock.id, subBlock) + } + + for (const [key, value] of Object.entries(inputs)) { + // Skip runtime subblock IDs + if (TRIGGER_RUNTIME_SUBBLOCK_IDS.includes(key)) { + continue + } + + const subBlockConfig = subBlockMap.get(key) + + // If subBlock doesn't exist in config, skip it (unless it's a known dynamic field) + if (!subBlockConfig) { + // Some fields are valid but not in subBlocks (like loop/parallel config) + // Allow these through for special block types + if (blockType === 'loop' || blockType === 'parallel') { + validatedInputs[key] = value + } else { + errors.push({ + blockId, + blockType, + field: key, + value, + error: `Unknown input field "${key}" for block type "${blockType}"`, + }) + } + continue + } + + // Note: We do NOT check subBlockConfig.condition here. + // Conditions are for UI display logic (show/hide fields in the editor). + // For API/Copilot, any valid field in the block schema should be accepted. + // The runtime will use the relevant fields based on the actual operation. + + // Validate value based on subBlock type + const validationResult = validateValueForSubBlockType( + subBlockConfig, + value, + key, + blockType, + blockId + ) + if (validationResult.valid) { + validatedInputs[key] = validationResult.value + } else if (validationResult.error) { + errors.push(validationResult.error) + } + } + + return { validInputs: validatedInputs, errors } +} + +/** + * Validates a value against its expected subBlock type + * Returns validation result with the value or an error + */ +export function validateValueForSubBlockType( + subBlockConfig: SubBlockConfig, + value: any, + fieldName: string, + blockType: string, + blockId: string +): ValueValidationResult { + const { type } = subBlockConfig + + // Handle null/undefined - allow clearing fields + if (value === null || value === undefined) { + return { valid: true, value } + } + + switch (type) { + case 'dropdown': { + // Validate against allowed options + const options = + typeof subBlockConfig.options === 'function' + ? subBlockConfig.options() + : subBlockConfig.options + if (options && Array.isArray(options)) { + const validIds = options.map((opt) => opt.id) + if (!validIds.includes(value)) { + return { + valid: false, + error: { + blockId, + blockType, + field: fieldName, + value, + error: `Invalid dropdown value "${value}" for field "${fieldName}". Valid options: ${validIds.join(', ')}`, + }, + } + } + } + return { valid: true, value } + } + + case 'slider': { + // Validate numeric range + const numValue = typeof value === 'number' ? value : Number(value) + if (Number.isNaN(numValue)) { + return { + valid: false, + error: { + blockId, + blockType, + field: fieldName, + value, + error: `Invalid slider value "${value}" for field "${fieldName}" - must be a number`, + }, + } + } + // Clamp to range (allow but warn) + let clampedValue = numValue + if (subBlockConfig.min !== undefined && numValue < subBlockConfig.min) { + clampedValue = subBlockConfig.min + } + if (subBlockConfig.max !== undefined && numValue > subBlockConfig.max) { + clampedValue = subBlockConfig.max + } + return { + valid: true, + value: subBlockConfig.integer ? Math.round(clampedValue) : clampedValue, + } + } + + case 'switch': { + // Must be boolean + if (typeof value !== 'boolean') { + return { + valid: false, + error: { + blockId, + blockType, + field: fieldName, + value, + error: `Invalid switch value "${value}" for field "${fieldName}" - must be true or false`, + }, + } + } + return { valid: true, value } + } + + case 'file-upload': { + // File upload should be an object with specific properties or null + if (value === null) return { valid: true, value: null } + if (typeof value !== 'object') { + return { + valid: false, + error: { + blockId, + blockType, + field: fieldName, + value, + error: `Invalid file-upload value for field "${fieldName}" - expected object with name and path properties, or null`, + }, + } + } + // Validate file object has required properties + if (value && (!value.name || !value.path)) { + return { + valid: false, + error: { + blockId, + blockType, + field: fieldName, + value, + error: `Invalid file-upload object for field "${fieldName}" - must have "name" and "path" properties`, + }, + } + } + return { valid: true, value } + } + + case 'input-format': + case 'table': { + // Should be an array + if (!Array.isArray(value)) { + return { + valid: false, + error: { + blockId, + blockType, + field: fieldName, + value, + error: `Invalid ${type} value for field "${fieldName}" - expected an array`, + }, + } + } + return { valid: true, value } + } + + case 'tool-input': { + // Should be an array of tool objects + if (!Array.isArray(value)) { + return { + valid: false, + error: { + blockId, + blockType, + field: fieldName, + value, + error: `Invalid tool-input value for field "${fieldName}" - expected an array of tool objects`, + }, + } + } + return { valid: true, value } + } + + case 'code': { + // Code must be a string (content can be JS, Python, JSON, SQL, HTML, etc.) + if (typeof value !== 'string') { + return { + valid: false, + error: { + blockId, + blockType, + field: fieldName, + value, + error: `Invalid code value for field "${fieldName}" - expected a string, got ${typeof value}`, + }, + } + } + return { valid: true, value } + } + + case 'response-format': { + // Allow empty/null + if (value === null || value === undefined || value === '') { + return { valid: true, value } + } + // Allow objects (will be stringified later by normalizeResponseFormat) + if (typeof value === 'object') { + return { valid: true, value } + } + // If string, must be valid JSON + if (typeof value === 'string') { + try { + JSON.parse(value) + return { valid: true, value } + } catch { + return { + valid: false, + error: { + blockId, + blockType, + field: fieldName, + value, + error: `Invalid response-format value for field "${fieldName}" - string must be valid JSON`, + }, + } + } + } + // Reject numbers, booleans, etc. + return { + valid: false, + error: { + blockId, + blockType, + field: fieldName, + value, + error: `Invalid response-format value for field "${fieldName}" - expected a JSON string or object`, + }, + } + } + + case 'short-input': + case 'long-input': + case 'combobox': { + // Should be string (combobox allows custom values) + if (typeof value !== 'string' && typeof value !== 'number') { + // Convert to string but don't error + return { valid: true, value: String(value) } + } + return { valid: true, value } + } + + // Selector types - allow strings (IDs) or arrays of strings + case 'oauth-input': + case 'knowledge-base-selector': + case 'document-selector': + case 'file-selector': + case 'project-selector': + case 'channel-selector': + case 'folder-selector': + case 'mcp-server-selector': + case 'mcp-tool-selector': + case 'workflow-selector': { + if (subBlockConfig.multiSelect && Array.isArray(value)) { + return { valid: true, value } + } + if (typeof value === 'string') { + return { valid: true, value } + } + return { + valid: false, + error: { + blockId, + blockType, + field: fieldName, + value, + error: `Invalid selector value for field "${fieldName}" - expected a string${subBlockConfig.multiSelect ? ' or array of strings' : ''}`, + }, + } + } + + default: + // For unknown types, pass through + return { valid: true, value } + } +} + +/** + * Validates source handle is valid for the block type + */ +export function validateSourceHandleForBlock( + sourceHandle: string, + sourceBlockType: string, + sourceBlock: any +): EdgeHandleValidationResult { + if (sourceHandle === 'error') { + return { valid: true } + } + + switch (sourceBlockType) { + case 'loop': + if (sourceHandle === 'loop-start-source' || sourceHandle === 'loop-end-source') { + return { valid: true } + } + return { + valid: false, + error: `Invalid source handle "${sourceHandle}" for loop block. Valid handles: loop-start-source, loop-end-source, error`, + } + + case 'parallel': + if (sourceHandle === 'parallel-start-source' || sourceHandle === 'parallel-end-source') { + return { valid: true } + } + return { + valid: false, + error: `Invalid source handle "${sourceHandle}" for parallel block. Valid handles: parallel-start-source, parallel-end-source, error`, + } + + case 'condition': { + const conditionsValue = sourceBlock?.subBlocks?.conditions?.value + if (!conditionsValue) { + return { + valid: false, + error: `Invalid condition handle "${sourceHandle}" - no conditions defined`, + } + } + + // validateConditionHandle accepts simple format (if, else-if-0, else), + // legacy format (condition-{blockId}-if), and internal ID format (condition-{uuid}) + return validateConditionHandle(sourceHandle, sourceBlock.id, conditionsValue) + } + + case 'router': + if (sourceHandle === 'source' || sourceHandle.startsWith(EDGE.ROUTER_PREFIX)) { + return { valid: true } + } + return { + valid: false, + error: `Invalid source handle "${sourceHandle}" for router block. Valid handles: source, ${EDGE.ROUTER_PREFIX}{targetId}, error`, + } + + case 'router_v2': { + const routesValue = sourceBlock?.subBlocks?.routes?.value + if (!routesValue) { + return { + valid: false, + error: `Invalid router handle "${sourceHandle}" - no routes defined`, + } + } + + // validateRouterHandle accepts simple format (route-0, route-1), + // legacy format (router-{blockId}-route-1), and internal ID format (router-{uuid}) + return validateRouterHandle(sourceHandle, sourceBlock.id, routesValue) + } + + default: + if (sourceHandle === 'source') { + return { valid: true } + } + return { + valid: false, + error: `Invalid source handle "${sourceHandle}" for ${sourceBlockType} block. Valid handles: source, error`, + } + } +} + +/** + * Validates condition handle references a valid condition in the block. + * Accepts multiple formats: + * - Simple format: "if", "else-if-0", "else-if-1", "else" + * - Legacy semantic format: "condition-{blockId}-if", "condition-{blockId}-else-if" + * - Internal ID format: "condition-{conditionId}" + * + * Returns the normalized handle (condition-{conditionId}) for storage. + */ +export function validateConditionHandle( + sourceHandle: string, + blockId: string, + conditionsValue: string | any[] +): EdgeHandleValidationResult { + let conditions: any[] + if (typeof conditionsValue === 'string') { + try { + conditions = JSON.parse(conditionsValue) + } catch { + return { + valid: false, + error: `Cannot validate condition handle "${sourceHandle}" - conditions is not valid JSON`, + } + } + } else if (Array.isArray(conditionsValue)) { + conditions = conditionsValue + } else { + return { + valid: false, + error: `Cannot validate condition handle "${sourceHandle}" - conditions is not an array`, + } + } + + if (!Array.isArray(conditions) || conditions.length === 0) { + return { + valid: false, + error: `Invalid condition handle "${sourceHandle}" - no conditions defined`, + } + } + + // Build a map of all valid handle formats -> normalized handle (condition-{conditionId}) + const handleToNormalized = new Map() + const legacySemanticPrefix = `condition-${blockId}-` + let elseIfIndex = 0 + + for (const condition of conditions) { + if (!condition.id) continue + + const normalizedHandle = `condition-${condition.id}` + const title = condition.title?.toLowerCase() + + // Always accept internal ID format + handleToNormalized.set(normalizedHandle, normalizedHandle) + + if (title === 'if') { + // Simple format: "if" + handleToNormalized.set('if', normalizedHandle) + // Legacy format: "condition-{blockId}-if" + handleToNormalized.set(`${legacySemanticPrefix}if`, normalizedHandle) + } else if (title === 'else if') { + // Simple format: "else-if-0", "else-if-1", etc. (0-indexed) + handleToNormalized.set(`else-if-${elseIfIndex}`, normalizedHandle) + // Legacy format: "condition-{blockId}-else-if" for first, "condition-{blockId}-else-if-2" for second + if (elseIfIndex === 0) { + handleToNormalized.set(`${legacySemanticPrefix}else-if`, normalizedHandle) + } else { + handleToNormalized.set( + `${legacySemanticPrefix}else-if-${elseIfIndex + 1}`, + normalizedHandle + ) + } + elseIfIndex++ + } else if (title === 'else') { + // Simple format: "else" + handleToNormalized.set('else', normalizedHandle) + // Legacy format: "condition-{blockId}-else" + handleToNormalized.set(`${legacySemanticPrefix}else`, normalizedHandle) + } + } + + const normalizedHandle = handleToNormalized.get(sourceHandle) + if (normalizedHandle) { + return { valid: true, normalizedHandle } + } + + // Build list of valid simple format options for error message + const simpleOptions: string[] = [] + elseIfIndex = 0 + for (const condition of conditions) { + const title = condition.title?.toLowerCase() + if (title === 'if') { + simpleOptions.push('if') + } else if (title === 'else if') { + simpleOptions.push(`else-if-${elseIfIndex}`) + elseIfIndex++ + } else if (title === 'else') { + simpleOptions.push('else') + } + } + + return { + valid: false, + error: `Invalid condition handle "${sourceHandle}". Valid handles: ${simpleOptions.join(', ')}`, + } +} + +/** + * Validates router handle references a valid route in the block. + * Accepts multiple formats: + * - Simple format: "route-0", "route-1", "route-2" (0-indexed) + * - Legacy semantic format: "router-{blockId}-route-1" (1-indexed) + * - Internal ID format: "router-{routeId}" + * + * Returns the normalized handle (router-{routeId}) for storage. + */ +export function validateRouterHandle( + sourceHandle: string, + blockId: string, + routesValue: string | any[] +): EdgeHandleValidationResult { + let routes: any[] + if (typeof routesValue === 'string') { + try { + routes = JSON.parse(routesValue) + } catch { + return { + valid: false, + error: `Cannot validate router handle "${sourceHandle}" - routes is not valid JSON`, + } + } + } else if (Array.isArray(routesValue)) { + routes = routesValue + } else { + return { + valid: false, + error: `Cannot validate router handle "${sourceHandle}" - routes is not an array`, + } + } + + if (!Array.isArray(routes) || routes.length === 0) { + return { + valid: false, + error: `Invalid router handle "${sourceHandle}" - no routes defined`, + } + } + + // Build a map of all valid handle formats -> normalized handle (router-{routeId}) + const handleToNormalized = new Map() + const legacySemanticPrefix = `router-${blockId}-` + + for (let i = 0; i < routes.length; i++) { + const route = routes[i] + if (!route.id) continue + + const normalizedHandle = `router-${route.id}` + + // Always accept internal ID format: router-{uuid} + handleToNormalized.set(normalizedHandle, normalizedHandle) + + // Simple format: route-0, route-1, etc. (0-indexed) + handleToNormalized.set(`route-${i}`, normalizedHandle) + + // Legacy 1-indexed route number format: router-{blockId}-route-1 + handleToNormalized.set(`${legacySemanticPrefix}route-${i + 1}`, normalizedHandle) + + // Accept normalized title format: router-{blockId}-{normalized-title} + if (route.title && typeof route.title === 'string') { + const normalizedTitle = route.title + .toLowerCase() + .replace(/\s+/g, '-') + .replace(/[^a-z0-9-]/g, '') + if (normalizedTitle) { + handleToNormalized.set(`${legacySemanticPrefix}${normalizedTitle}`, normalizedHandle) + } + } + } + + const normalizedHandle = handleToNormalized.get(sourceHandle) + if (normalizedHandle) { + return { valid: true, normalizedHandle } + } + + // Build list of valid simple format options for error message + const simpleOptions = routes.map((_, i) => `route-${i}`) + + return { + valid: false, + error: `Invalid router handle "${sourceHandle}". Valid handles: ${simpleOptions.join(', ')}`, + } +} + +/** + * Validates target handle is valid (must be 'target') + */ +export function validateTargetHandle(targetHandle: string): EdgeHandleValidationResult { + if (targetHandle === 'target') { + return { valid: true } + } + return { + valid: false, + error: `Invalid target handle "${targetHandle}". Expected "target"`, + } +} + +/** + * Checks if a block type is allowed by the permission group config + */ +export function isBlockTypeAllowed( + blockType: string, + permissionConfig: PermissionGroupConfig | null +): boolean { + if (!permissionConfig || permissionConfig.allowedIntegrations === null) { + return true + } + return permissionConfig.allowedIntegrations.includes(blockType) +} + +/** + * Validates selector IDs in the workflow state exist in the database + * Returns validation errors for any invalid selector IDs + */ +export async function validateWorkflowSelectorIds( + workflowState: any, + context: { userId: string; workspaceId?: string } +): Promise { + const logger = createLogger('EditWorkflowSelectorValidation') + const errors: ValidationError[] = [] + + // Collect all selector fields from all blocks + const selectorsToValidate: Array<{ + blockId: string + blockType: string + fieldName: string + selectorType: string + value: string | string[] + }> = [] + + for (const [blockId, block] of Object.entries(workflowState.blocks || {})) { + const blockData = block as any + const blockType = blockData.type + if (!blockType) continue + + const blockConfig = getBlock(blockType) + if (!blockConfig) continue + + // Check each subBlock for selector types + for (const subBlockConfig of blockConfig.subBlocks) { + if (!SELECTOR_TYPES.has(subBlockConfig.type)) continue + + // Skip oauth-input - credentials are pre-validated before edit application + // This allows existing collaborator credentials to remain untouched + if (subBlockConfig.type === 'oauth-input') continue + + const subBlockValue = blockData.subBlocks?.[subBlockConfig.id]?.value + if (!subBlockValue) continue + + // Handle comma-separated values for multi-select + let values: string | string[] = subBlockValue + if (typeof subBlockValue === 'string' && subBlockValue.includes(',')) { + values = subBlockValue + .split(',') + .map((v: string) => v.trim()) + .filter(Boolean) + } + + selectorsToValidate.push({ + blockId, + blockType, + fieldName: subBlockConfig.id, + selectorType: subBlockConfig.type, + value: values, + }) + } + } + + if (selectorsToValidate.length === 0) { + return errors + } + + logger.info('Validating selector IDs', { + selectorCount: selectorsToValidate.length, + userId: context.userId, + workspaceId: context.workspaceId, + }) + + // Validate each selector field + for (const selector of selectorsToValidate) { + const result = await validateSelectorIds(selector.selectorType, selector.value, context) + + if (result.invalid.length > 0) { + // Include warning info (like available credentials) in the error message for better LLM feedback + const warningInfo = result.warning ? `. ${result.warning}` : '' + errors.push({ + blockId: selector.blockId, + blockType: selector.blockType, + field: selector.fieldName, + value: selector.value, + error: `Invalid ${selector.selectorType} ID(s): ${result.invalid.join(', ')} - ID(s) do not exist or user doesn't have access${warningInfo}`, + }) + } else if (result.warning) { + // Log warnings that don't have errors (shouldn't happen for credentials but may for other selectors) + logger.warn(result.warning, { + blockId: selector.blockId, + fieldName: selector.fieldName, + }) + } + } + + if (errors.length > 0) { + logger.warn('Found invalid selector IDs', { + errorCount: errors.length, + errors: errors.map((e) => ({ blockId: e.blockId, field: e.field, error: e.error })), + }) + } + + return errors +} + +/** + * Pre-validates credential and apiKey inputs in operations before they are applied. + * - Validates oauth-input (credential) IDs belong to the user + * - Filters out apiKey inputs for hosted models when isHosted is true + * - Also validates credentials and apiKeys in nestedNodes (blocks inside loop/parallel) + * Returns validation errors for any removed inputs. + */ +export async function preValidateCredentialInputs( + operations: EditWorkflowOperation[], + context: { userId: string }, + workflowState?: Record +): Promise<{ filteredOperations: EditWorkflowOperation[]; errors: ValidationError[] }> { + const { isHosted } = await import('@/lib/core/config/feature-flags') + const { getHostedModels } = await import('@/providers/utils') + + const logger = createLogger('PreValidateCredentials') + const errors: ValidationError[] = [] + + // Collect credential and apiKey inputs that need validation/filtering + const credentialInputs: Array<{ + operationIndex: number + blockId: string + blockType: string + fieldName: string + value: string + nestedBlockId?: string + }> = [] + + const hostedApiKeyInputs: Array<{ + operationIndex: number + blockId: string + blockType: string + model: string + nestedBlockId?: string + }> = [] + + const hostedModelsLower = isHosted ? new Set(getHostedModels().map((m) => m.toLowerCase())) : null + + /** + * Collect credential inputs from a block's inputs based on its block config + */ + function collectCredentialInputs( + blockConfig: ReturnType, + inputs: Record, + opIndex: number, + blockId: string, + blockType: string, + nestedBlockId?: string + ) { + if (!blockConfig) return + + for (const subBlockConfig of blockConfig.subBlocks) { + if (subBlockConfig.type !== 'oauth-input') continue + + const inputValue = inputs[subBlockConfig.id] + if (!inputValue || typeof inputValue !== 'string' || inputValue.trim() === '') continue + + credentialInputs.push({ + operationIndex: opIndex, + blockId, + blockType, + fieldName: subBlockConfig.id, + value: inputValue, + nestedBlockId, + }) + } + } + + /** + * Check if apiKey should be filtered for a block with the given model + */ + function collectHostedApiKeyInput( + inputs: Record, + modelValue: string | undefined, + opIndex: number, + blockId: string, + blockType: string, + nestedBlockId?: string + ) { + if (!hostedModelsLower || !inputs.apiKey) return + if (!modelValue || typeof modelValue !== 'string') return + + if (hostedModelsLower.has(modelValue.toLowerCase())) { + hostedApiKeyInputs.push({ + operationIndex: opIndex, + blockId, + blockType, + model: modelValue, + nestedBlockId, + }) + } + } + + operations.forEach((op, opIndex) => { + // Process main block inputs + if (op.params?.inputs && op.params?.type) { + const blockConfig = getBlock(op.params.type) + if (blockConfig) { + // Collect credentials from main block + collectCredentialInputs( + blockConfig, + op.params.inputs as Record, + opIndex, + op.block_id, + op.params.type + ) + + // Check for apiKey inputs on hosted models + let modelValue = (op.params.inputs as Record).model as string | undefined + + // For edit operations, if model is not being changed, check existing block's model + if ( + !modelValue && + op.operation_type === 'edit' && + (op.params.inputs as Record).apiKey && + workflowState + ) { + const existingBlock = (workflowState.blocks as Record)?.[op.block_id] as + | Record + | undefined + const existingSubBlocks = existingBlock?.subBlocks as Record | undefined + const existingModelSubBlock = existingSubBlocks?.model as + | Record + | undefined + modelValue = existingModelSubBlock?.value as string | undefined + } + + collectHostedApiKeyInput( + op.params.inputs as Record, + modelValue, + opIndex, + op.block_id, + op.params.type + ) + } + } + + // Process nested nodes (blocks inside loop/parallel containers) + const nestedNodes = op.params?.nestedNodes as + | Record> + | undefined + if (nestedNodes) { + Object.entries(nestedNodes).forEach(([childId, childBlock]) => { + const childType = childBlock.type as string | undefined + const childInputs = childBlock.inputs as Record | undefined + if (!childType || !childInputs) return + + const childBlockConfig = getBlock(childType) + if (!childBlockConfig) return + + // Collect credentials from nested block + collectCredentialInputs( + childBlockConfig, + childInputs, + opIndex, + op.block_id, + childType, + childId + ) + + // Check for apiKey inputs on hosted models in nested block + const modelValue = childInputs.model as string | undefined + collectHostedApiKeyInput(childInputs, modelValue, opIndex, op.block_id, childType, childId) + }) + } + }) + + const hasCredentialsToValidate = credentialInputs.length > 0 + const hasHostedApiKeysToFilter = hostedApiKeyInputs.length > 0 + + if (!hasCredentialsToValidate && !hasHostedApiKeysToFilter) { + return { filteredOperations: operations, errors } + } + + // Deep clone operations so we can modify them + const filteredOperations = structuredClone(operations) + + // Filter out apiKey inputs for hosted models and add validation errors + if (hasHostedApiKeysToFilter) { + logger.info('Filtering apiKey inputs for hosted models', { count: hostedApiKeyInputs.length }) + + for (const apiKeyInput of hostedApiKeyInputs) { + const op = filteredOperations[apiKeyInput.operationIndex] + + // Handle nested block apiKey filtering + if (apiKeyInput.nestedBlockId) { + const nestedNodes = op.params?.nestedNodes as + | Record> + | undefined + const nestedBlock = nestedNodes?.[apiKeyInput.nestedBlockId] + const nestedInputs = nestedBlock?.inputs as Record | undefined + if (nestedInputs?.apiKey) { + nestedInputs.apiKey = undefined + logger.debug('Filtered apiKey for hosted model in nested block', { + parentBlockId: apiKeyInput.blockId, + nestedBlockId: apiKeyInput.nestedBlockId, + model: apiKeyInput.model, + }) + + errors.push({ + blockId: apiKeyInput.nestedBlockId, + blockType: apiKeyInput.blockType, + field: 'apiKey', + value: '[redacted]', + error: `Cannot set API key for hosted model "${apiKeyInput.model}" - API keys are managed by the platform when using hosted models`, + }) + } + } else if (op.params?.inputs?.apiKey) { + // Handle main block apiKey filtering + op.params.inputs.apiKey = undefined + logger.debug('Filtered apiKey for hosted model', { + blockId: apiKeyInput.blockId, + model: apiKeyInput.model, + }) + + errors.push({ + blockId: apiKeyInput.blockId, + blockType: apiKeyInput.blockType, + field: 'apiKey', + value: '[redacted]', + error: `Cannot set API key for hosted model "${apiKeyInput.model}" - API keys are managed by the platform when using hosted models`, + }) + } + } + } + + // Validate credential inputs + if (hasCredentialsToValidate) { + logger.info('Pre-validating credential inputs', { + credentialCount: credentialInputs.length, + userId: context.userId, + }) + + const allCredentialIds = credentialInputs.map((c) => c.value) + const validationResult = await validateSelectorIds('oauth-input', allCredentialIds, context) + const invalidSet = new Set(validationResult.invalid) + + if (invalidSet.size > 0) { + for (const credInput of credentialInputs) { + if (!invalidSet.has(credInput.value)) continue + + const op = filteredOperations[credInput.operationIndex] + + // Handle nested block credential removal + if (credInput.nestedBlockId) { + const nestedNodes = op.params?.nestedNodes as + | Record> + | undefined + const nestedBlock = nestedNodes?.[credInput.nestedBlockId] + const nestedInputs = nestedBlock?.inputs as Record | undefined + if (nestedInputs?.[credInput.fieldName]) { + delete nestedInputs[credInput.fieldName] + logger.info('Removed invalid credential from nested block', { + parentBlockId: credInput.blockId, + nestedBlockId: credInput.nestedBlockId, + field: credInput.fieldName, + invalidValue: credInput.value, + }) + } + } else if (op.params?.inputs?.[credInput.fieldName]) { + // Handle main block credential removal + delete op.params.inputs[credInput.fieldName] + logger.info('Removed invalid credential from operation', { + blockId: credInput.blockId, + field: credInput.fieldName, + invalidValue: credInput.value, + }) + } + + const warningInfo = validationResult.warning ? `. ${validationResult.warning}` : '' + const errorBlockId = credInput.nestedBlockId ?? credInput.blockId + errors.push({ + blockId: errorBlockId, + blockType: credInput.blockType, + field: credInput.fieldName, + value: credInput.value, + error: `Invalid credential ID "${credInput.value}" - credential does not exist or user doesn't have access${warningInfo}`, + }) + } + + logger.warn('Filtered out invalid credentials', { + invalidCount: invalidSet.size, + }) + } + } + + return { filteredOperations, errors } +} diff --git a/apps/sim/lib/copilot/tools/server/workflow/get-workflow-console.ts b/apps/sim/lib/copilot/tools/server/workflow/get-workflow-console.ts index 06cfb1c823..e71496cc40 100644 --- a/apps/sim/lib/copilot/tools/server/workflow/get-workflow-console.ts +++ b/apps/sim/lib/copilot/tools/server/workflow/get-workflow-console.ts @@ -96,6 +96,7 @@ function normalizeErrorMessage(errorValue: unknown): string | undefined { try { return String(errorValue) } catch { + // JSON.stringify failed for error value; fall back to undefined return undefined } } diff --git a/apps/sim/lib/workflows/blocks/index.ts b/apps/sim/lib/workflows/blocks/index.ts new file mode 100644 index 0000000000..878f927478 --- /dev/null +++ b/apps/sim/lib/workflows/blocks/index.ts @@ -0,0 +1,2 @@ +export { BlockSchemaResolver, blockSchemaResolver } from './schema-resolver' +export type { ResolvedBlock, ResolvedSubBlock, ResolvedOption, ResolvedOutput } from './schema-types' diff --git a/apps/sim/lib/workflows/blocks/schema-resolver.ts b/apps/sim/lib/workflows/blocks/schema-resolver.ts new file mode 100644 index 0000000000..3a340a5ad5 --- /dev/null +++ b/apps/sim/lib/workflows/blocks/schema-resolver.ts @@ -0,0 +1,201 @@ +import { createLogger } from '@sim/logger' +import { getAllBlocks, getBlock } from '@/blocks/registry' +import type { BlockConfig, SubBlockConfig } from '@/blocks/types' +import type { ResolvedBlock, ResolvedOption, ResolvedOutput, ResolvedSubBlock } from './schema-types' + +const logger = createLogger('BlockSchemaResolver') + +/** + * BlockSchemaResolver provides typed access to block configurations. + * + * It wraps the raw block registry and returns resolved, typed schemas + * that consumers can use without any type assertions. + */ +export class BlockSchemaResolver { + private cache = new Map() + + /** Resolve a single block by type */ + resolveBlock(type: string): ResolvedBlock | null { + const cached = this.cache.get(type) + if (cached) return cached + + const config = getBlock(type) + if (!config) return null + + const resolved = this.buildResolvedBlock(config) + this.cache.set(type, resolved) + return resolved + } + + /** Resolve all available blocks */ + resolveAllBlocks(options?: { includeHidden?: boolean }): ResolvedBlock[] { + const configs = getAllBlocks() + return configs + .filter((config) => options?.includeHidden || !config.hideFromToolbar) + .map((config) => this.resolveBlock(config.type)) + .filter((block): block is ResolvedBlock => block !== null) + } + + /** Clear the cache (call when block registry changes) */ + clearCache(): void { + this.cache.clear() + } + + private buildResolvedBlock(config: BlockConfig): ResolvedBlock { + return { + type: config.type, + name: config.name, + description: config.description, + category: config.category, + icon: config.icon as unknown as ResolvedBlock['icon'], + isTrigger: this.isTriggerBlock(config), + hideFromToolbar: config.hideFromToolbar ?? false, + subBlocks: config.subBlocks.map((subBlock) => this.resolveSubBlock(subBlock)), + outputs: this.resolveOutputs(config), + supportsTriggerMode: this.supportsTriggerMode(config), + hasAdvancedMode: config.subBlocks.some((subBlock) => subBlock.mode === 'advanced'), + raw: config, + } + } + + private resolveSubBlock(sb: SubBlockConfig): ResolvedSubBlock { + const resolved: ResolvedSubBlock = { + id: sb.id, + type: sb.type, + label: sb.title, + placeholder: sb.placeholder, + required: typeof sb.required === 'boolean' ? sb.required : undefined, + password: sb.password, + hasCondition: Boolean(sb.condition), + defaultValue: sb.defaultValue, + validation: { + min: sb.min, + max: sb.max, + pattern: this.resolvePattern(sb), + }, + } + + const condition = this.resolveCondition(sb) + if (condition) { + resolved.condition = condition + } + + const options = this.resolveOptions(sb) + if (options.length > 0) { + resolved.options = options + } + + if (!resolved.validation?.min && !resolved.validation?.max && !resolved.validation?.pattern) { + delete resolved.validation + } + + return resolved + } + + private resolveCondition(sb: SubBlockConfig): ResolvedSubBlock['condition'] | undefined { + try { + const condition = typeof sb.condition === 'function' ? sb.condition() : sb.condition + if (!condition || typeof condition !== 'object') { + return undefined + } + + return { + field: String(condition.field), + value: condition.value, + } + } catch (error) { + logger.warn('Failed to resolve sub-block condition', { + subBlockId: sb.id, + error: error instanceof Error ? error.message : String(error), + }) + return undefined + } + } + + private resolveOptions(sb: SubBlockConfig): ResolvedOption[] { + try { + if (Array.isArray(sb.options)) { + return sb.options.map((opt) => { + if (typeof opt === 'string') { + return { label: opt, value: opt } + } + + const label = String(opt.label || opt.id || '') + const value = String(opt.id || opt.label || '') + + return { + label, + value, + id: opt.id, + } + }) + } + + // For function-based or dynamic options, return empty. + // Consumers can evaluate these options if they need runtime resolution. + return [] + } catch (error) { + logger.warn('Failed to resolve sub-block options', { + subBlockId: sb.id, + error: error instanceof Error ? error.message : String(error), + }) + return [] + } + } + + private resolveOutputs(config: BlockConfig): ResolvedOutput[] { + try { + // eslint-disable-next-line @typescript-eslint/no-require-imports + const blockOutputs = require('@/lib/workflows/blocks/block-outputs') as { + getBlockOutputPaths: ( + blockType: string, + subBlocks?: Record, + triggerMode?: boolean + ) => string[] + } + + const paths = blockOutputs.getBlockOutputPaths(config.type, {}, false) + return paths.map((path) => ({ + name: path, + type: 'string', + })) + } catch (error) { + logger.warn('Failed to resolve block outputs, using fallback', { + blockType: config.type, + error: error instanceof Error ? error.message : String(error), + }) + return [{ name: 'result', type: 'string' }] + } + } + + private isTriggerBlock(config: BlockConfig): boolean { + try { + // eslint-disable-next-line @typescript-eslint/no-require-imports + const triggerUtils = require('@/lib/workflows/triggers/input-definition-triggers') as { + isInputDefinitionTrigger: (blockType: string) => boolean + } + return triggerUtils.isInputDefinitionTrigger(config.type) + } catch (error) { + logger.warn('Failed to detect trigger block, using fallback', { + blockType: config.type, + error: error instanceof Error ? error.message : String(error), + }) + return config.type === 'starter' + } + } + + private supportsTriggerMode(config: BlockConfig): boolean { + return Boolean( + config.triggerAllowed || + config.subBlocks.some((subBlock) => subBlock.id === 'triggerMode' || subBlock.mode === 'trigger') + ) + } + + private resolvePattern(sb: SubBlockConfig): string | undefined { + const maybePattern = (sb as SubBlockConfig & { pattern?: string }).pattern + return typeof maybePattern === 'string' ? maybePattern : undefined + } +} + +/** Singleton resolver instance */ +export const blockSchemaResolver = new BlockSchemaResolver() diff --git a/apps/sim/lib/workflows/blocks/schema-types.ts b/apps/sim/lib/workflows/blocks/schema-types.ts new file mode 100644 index 0000000000..068b3b9370 --- /dev/null +++ b/apps/sim/lib/workflows/blocks/schema-types.ts @@ -0,0 +1,75 @@ +import type { LucideIcon } from 'lucide-react' + +/** A fully resolved block schema with all sub-blocks expanded */ +export interface ResolvedBlock { + type: string + name: string + description?: string + category: string + icon?: LucideIcon + isTrigger: boolean + hideFromToolbar: boolean + + /** Resolved sub-blocks with options, conditions, and validation info */ + subBlocks: ResolvedSubBlock[] + + /** Block-level outputs */ + outputs: ResolvedOutput[] + + /** Whether this block supports trigger mode */ + supportsTriggerMode: boolean + + /** Whether this block has advanced mode */ + hasAdvancedMode: boolean + + /** Raw config reference for consumers that need it */ + raw: unknown +} + +/** A resolved sub-block with options and metadata */ +export interface ResolvedSubBlock { + id: string + type: string + label?: string + placeholder?: string + required?: boolean + password?: boolean + + /** Resolved options (for dropdowns/selectors, etc.) */ + options?: ResolvedOption[] + + /** Whether this sub-block has a condition that controls visibility */ + hasCondition: boolean + + /** Condition details if present */ + condition?: { + field: string + value: unknown + /** Whether condition is currently met (if evaluable statically) */ + met?: boolean + } + + /** Validation constraints */ + validation?: { + min?: number + max?: number + pattern?: string + } + + /** Default value */ + defaultValue?: unknown +} + +/** A resolved option for dropdowns/selectors */ +export interface ResolvedOption { + label: string + value: string + id?: string +} + +/** A resolved output definition */ +export interface ResolvedOutput { + name: string + type: string + description?: string +} From fb4afeb9fcb371cbda381fc08a74f09c843557f7 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Thu, 5 Feb 2026 19:26:47 -0800 Subject: [PATCH 28/72] Fix - cursor makes me sad --- apps/sim/app/api/copilot/chat/route.ts | 4 -- apps/sim/app/api/mcp/copilot/route.ts | 2 - apps/sim/app/api/v1/copilot/chat/route.ts | 4 +- apps/sim/lib/copilot/chat-payload.ts | 39 ++-------------- .../tool-executor/workflow-tools/mutations.ts | 3 +- .../tools/server/blocks/get-block-options.ts | 1 + .../server/blocks/get-blocks-and-tools.ts | 2 +- .../server/blocks/get-blocks-metadata-tool.ts | 2 +- .../server/user/set-environment-variables.ts | 46 +++++-------------- .../workflows/executor/execute-workflow.ts | 4 +- 10 files changed, 26 insertions(+), 81 deletions(-) diff --git a/apps/sim/app/api/copilot/chat/route.ts b/apps/sim/app/api/copilot/chat/route.ts index a048e31d23..cd9ad74368 100644 --- a/apps/sim/app/api/copilot/chat/route.ts +++ b/apps/sim/app/api/copilot/chat/route.ts @@ -205,15 +205,11 @@ export async function POST(req: NextRequest) { userMessageId: userMessageIdToUse, mode, model: selectedModel, - stream, - conversationId: effectiveConversationId, conversationHistory, contexts: agentContexts, fileAttachments, commands, chatId: actualChatId, - prefetch, - userName: session?.user?.name || undefined, implicitFeedback, }, { diff --git a/apps/sim/app/api/mcp/copilot/route.ts b/apps/sim/app/api/mcp/copilot/route.ts index 9f8a0cf136..7e330ff015 100644 --- a/apps/sim/app/api/mcp/copilot/route.ts +++ b/apps/sim/app/api/mcp/copilot/route.ts @@ -384,8 +384,6 @@ async function handleBuildToolCall( message: requestText, workflowId: resolved.workflowId, userId, - stream: true, - streamToolCalls: true, model, mode: 'agent', commands: ['fast'], diff --git a/apps/sim/app/api/v1/copilot/chat/route.ts b/apps/sim/app/api/v1/copilot/chat/route.ts index 57def1fb56..d08234cff0 100644 --- a/apps/sim/app/api/v1/copilot/chat/route.ts +++ b/apps/sim/app/api/v1/copilot/chat/route.ts @@ -73,13 +73,11 @@ export async function POST(req: NextRequest) { message: parsed.message, workflowId: resolved.workflowId, userId: auth.userId, - stream: true, - streamToolCalls: true, model: selectedModel, mode: transportMode, messageId: crypto.randomUUID(), version: SIM_AGENT_VERSION, - headless: true, // Enable cross-workflow operations via workflowId params + headless: true, chatId, } diff --git a/apps/sim/lib/copilot/chat-payload.ts b/apps/sim/lib/copilot/chat-payload.ts index 7883f4234d..ab6fcad0bd 100644 --- a/apps/sim/lib/copilot/chat-payload.ts +++ b/apps/sim/lib/copilot/chat-payload.ts @@ -17,15 +17,11 @@ export interface BuildPayloadParams { userMessageId: string mode: string model: string - stream: boolean - conversationId?: string conversationHistory?: unknown[] contexts?: Array<{ type: string; content: string }> fileAttachments?: Array<{ id: string; key: string; size: number; [key: string]: unknown }> commands?: string[] chatId?: string - prefetch?: boolean - userName?: string implicitFeedback?: string } @@ -99,9 +95,9 @@ export async function buildCopilotRequestPayload( } ): Promise> { const { - message, workflowId, userId, userMessageId, mode, stream, - conversationId, conversationHistory = [], contexts, fileAttachments, - commands, chatId, prefetch, userName, implicitFeedback, + message, workflowId, userId, userMessageId, mode, + conversationHistory = [], contexts, fileAttachments, + commands, chatId, implicitFeedback, } = params const selectedModel = options.selectedModel @@ -146,29 +142,10 @@ export async function buildCopilotRequestPayload( } let integrationTools: ToolSchema[] = [] - let baseTools: ToolSchema[] = [] let credentials: CredentialsPayload | null = null if (effectiveMode === 'build') { - baseTools = [ - { - name: 'function_execute', - description: - 'Execute JavaScript code to perform calculations, data transformations, API calls, or any programmatic task. Code runs in a secure sandbox with fetch() available. Write plain statements (not wrapped in functions). Example: const res = await fetch(url); const data = await res.json(); return data;', - input_schema: { - type: 'object', - properties: { - code: { - type: 'string', - description: - 'Raw JavaScript statements to execute. Code is auto-wrapped in async context. Use fetch() for HTTP requests. Write like: const res = await fetch(url); return await res.json();', - }, - }, - required: ['code'], - }, - executeLocally: true, - }, - ] + // function_execute sandbox tool is now defined in Go — no need to send it try { const rawCredentials = await getCredentialsServerTool.execute({ workflowId }, { userId }) @@ -231,21 +208,15 @@ export async function buildCopilotRequestPayload( message, workflowId, userId, - stream, - streamToolCalls: true, model: selectedModel, mode: transportMode, messageId: userMessageId, version: SIM_AGENT_VERSION, ...(providerConfig ? { provider: providerConfig } : {}), - ...(conversationId ? { conversationId } : {}), - ...(typeof prefetch === 'boolean' ? { prefetch } : {}), - ...(userName ? { userName } : {}), ...(contexts && contexts.length > 0 ? { context: contexts } : {}), ...(chatId ? { chatId } : {}), ...(processedFileContents.length > 0 ? { fileAttachments: processedFileContents } : {}), - ...(integrationTools.length > 0 ? { tools: integrationTools } : {}), - ...(baseTools.length > 0 ? { baseTools } : {}), + ...(integrationTools.length > 0 ? { integrationTools } : {}), ...(credentials ? { credentials } : {}), ...(commands && commands.length > 0 ? { commands } : {}), } diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/mutations.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/mutations.ts index 1489286945..0b5afbf122 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/mutations.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/mutations.ts @@ -156,7 +156,8 @@ export async function executeRunWorkflow( }, generateRequestId(), params.workflow_input || params.input || undefined, - context.userId + context.userId, + { enabled: true, useDraftState: true } ) return { diff --git a/apps/sim/lib/copilot/tools/server/blocks/get-block-options.ts b/apps/sim/lib/copilot/tools/server/blocks/get-block-options.ts index c93db8b8a1..49c0648b2a 100644 --- a/apps/sim/lib/copilot/tools/server/blocks/get-block-options.ts +++ b/apps/sim/lib/copilot/tools/server/blocks/get-block-options.ts @@ -1,6 +1,7 @@ import { createLogger } from '@sim/logger' import type { BaseServerTool } from '@/lib/copilot/tools/server/base-tool' import { + GetBlockOptionsInput, type GetBlockOptionsInputType, GetBlockOptionsResult, type GetBlockOptionsResultType, diff --git a/apps/sim/lib/copilot/tools/server/blocks/get-blocks-and-tools.ts b/apps/sim/lib/copilot/tools/server/blocks/get-blocks-and-tools.ts index cf32eea70b..64a4db00ef 100644 --- a/apps/sim/lib/copilot/tools/server/blocks/get-blocks-and-tools.ts +++ b/apps/sim/lib/copilot/tools/server/blocks/get-blocks-and-tools.ts @@ -1,7 +1,7 @@ import { createLogger } from '@sim/logger' import type { BaseServerTool } from '@/lib/copilot/tools/server/base-tool' import { - type GetBlocksAndToolsInput, + GetBlocksAndToolsInput, GetBlocksAndToolsResult, } from '@/lib/copilot/tools/shared/schemas' import { registry as blockRegistry } from '@/blocks/registry' diff --git a/apps/sim/lib/copilot/tools/server/blocks/get-blocks-metadata-tool.ts b/apps/sim/lib/copilot/tools/server/blocks/get-blocks-metadata-tool.ts index 374b47c0d3..0db7cebdc1 100644 --- a/apps/sim/lib/copilot/tools/server/blocks/get-blocks-metadata-tool.ts +++ b/apps/sim/lib/copilot/tools/server/blocks/get-blocks-metadata-tool.ts @@ -3,7 +3,7 @@ import { join } from 'path' import { createLogger } from '@sim/logger' import type { BaseServerTool } from '@/lib/copilot/tools/server/base-tool' import { - type GetBlocksMetadataInput, + GetBlocksMetadataInput, GetBlocksMetadataResult, } from '@/lib/copilot/tools/shared/schemas' import { registry as blockRegistry } from '@/blocks/registry' diff --git a/apps/sim/lib/copilot/tools/server/user/set-environment-variables.ts b/apps/sim/lib/copilot/tools/server/user/set-environment-variables.ts index a4f7959b58..000cb65ab8 100644 --- a/apps/sim/lib/copilot/tools/server/user/set-environment-variables.ts +++ b/apps/sim/lib/copilot/tools/server/user/set-environment-variables.ts @@ -1,9 +1,8 @@ import { db } from '@sim/db' -import { workspaceEnvironment } from '@sim/db/schema' +import { environment } from '@sim/db/schema' import { createLogger } from '@sim/logger' import { eq } from 'drizzle-orm' import { z } from 'zod' -import { createPermissionError, verifyWorkflowAccess } from '@/lib/copilot/auth/permissions' import type { BaseServerTool } from '@/lib/copilot/tools/server/base-tool' import { decryptSecret, encryptSecret } from '@/lib/core/security/encryption' @@ -50,35 +49,16 @@ export const setEnvironmentVariablesServerTool: BaseServerTool) || {} @@ -114,36 +94,34 @@ export const setEnvironmentVariablesServerTool: BaseServerTool Date: Thu, 5 Feb 2026 19:59:00 -0800 Subject: [PATCH 29/72] Fix mcp --- apps/sim/lib/copilot/orchestrator/subagent.ts | 2 +- apps/sim/lib/copilot/tools/mcp/definitions.ts | 11 ++++------- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/apps/sim/lib/copilot/orchestrator/subagent.ts b/apps/sim/lib/copilot/orchestrator/subagent.ts index cccf7a70b3..d997fcbb91 100644 --- a/apps/sim/lib/copilot/orchestrator/subagent.ts +++ b/apps/sim/lib/copilot/orchestrator/subagent.ts @@ -59,7 +59,7 @@ export async function orchestrateSubagentStream( 'Content-Type': 'application/json', ...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}), }, - body: JSON.stringify({ ...requestPayload, stream: true }), + body: JSON.stringify({ ...requestPayload, userId, stream: true }), }, context, execContext, diff --git a/apps/sim/lib/copilot/tools/mcp/definitions.ts b/apps/sim/lib/copilot/tools/mcp/definitions.ts index 4ce44089bb..6ef2857291 100644 --- a/apps/sim/lib/copilot/tools/mcp/definitions.ts +++ b/apps/sim/lib/copilot/tools/mcp/definitions.ts @@ -63,20 +63,17 @@ export const DIRECT_TOOL_DEFS: DirectToolDef[] = [ }, { name: 'get_workflow', - toolId: 'get_workflow_from_name', - description: 'Get a workflow by name or ID. Returns the full workflow definition.', + toolId: 'get_user_workflow', + description: 'Get a workflow by ID. Returns the full workflow definition.', inputSchema: { type: 'object', properties: { - name: { - type: 'string', - description: 'Workflow name to search for.', - }, workflowId: { type: 'string', - description: 'Workflow ID to retrieve directly.', + description: 'Workflow ID to retrieve.', }, }, + required: ['workflowId'], }, }, { From 08a8e14c7e010009ad1c6445b2f6886de42aa721 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Thu, 5 Feb 2026 20:26:47 -0800 Subject: [PATCH 30/72] Clean up mcp --- apps/sim/app/api/mcp/copilot/route.ts | 154 ++---------- .../orchestrator/tool-executor/index.ts | 9 + .../orchestrator/tool-executor/param-types.ts | 17 ++ .../tool-executor/workflow-tools/mutations.ts | 85 +++++++ .../tool-executor/workflow-tools/queries.ts | 5 +- apps/sim/lib/copilot/tools/mcp/definitions.ts | 238 ++++++++++-------- 6 files changed, 274 insertions(+), 234 deletions(-) diff --git a/apps/sim/app/api/mcp/copilot/route.ts b/apps/sim/app/api/mcp/copilot/route.ts index 7e330ff015..bf7d09eadf 100644 --- a/apps/sim/app/api/mcp/copilot/route.ts +++ b/apps/sim/app/api/mcp/copilot/route.ts @@ -34,144 +34,40 @@ export const dynamic = 'force-dynamic' * the workflow lifecycle and best practices. */ const MCP_SERVER_INSTRUCTIONS = ` -## Sim Workflow Copilot - Usage Guide +## Sim Workflow Copilot -You are interacting with Sim's workflow automation platform. These tools orchestrate specialized AI agents that build workflows. Follow these guidelines carefully. +Sim is a workflow automation platform. Workflows are visual pipelines of connected blocks (Agent, Function, Condition, API, integrations, etc.). The Agent block is the core — an LLM with tools, memory, structured output, and knowledge bases. ---- +### Workflow Lifecycle (Happy Path) -## Platform Knowledge +1. \`list_workspaces\` → know where to work +2. \`create_workflow(name, workspaceId)\` → get a workflowId +3. \`copilot_build(request, workflowId)\` → plan and build in one pass +4. \`copilot_test(request, workflowId)\` → verify it works +5. \`copilot_deploy("deploy as api", workflowId)\` → make it accessible externally (optional) -Sim is a workflow automation platform. Workflows are visual pipelines of blocks. +For fine-grained control, use \`copilot_plan\` → \`copilot_edit\` instead of \`copilot_build\`. Pass the plan object from copilot_plan EXACTLY as-is to copilot_edit's context.plan field. -### Block Types +### Working with Existing Workflows -**Core Logic:** -- **Agent** - The heart of Sim (LLM block with tools, memory, structured output, knowledge bases) -- **Function** - JavaScript code execution -- **Condition** - If/else branching -- **Router** - AI-powered content-based routing -- **Loop** - While/do-while iteration -- **Parallel** - Simultaneous execution -- **API** - HTTP requests +When the user refers to a workflow by name or description ("the email one", "my Slack bot"): +1. Use \`copilot_discovery\` to find it by functionality +2. Or use \`list_workflows\` and match by name +3. Then pass the workflowId to other tools -**Integrations (3rd Party):** -- OAuth: Slack, Gmail, Google Calendar, Sheets, Outlook, Linear, GitHub, Notion -- API: Stripe, Twilio, SendGrid, any REST API +### Organization -### The Agent Block +- \`rename_workflow\` — rename a workflow +- \`move_workflow\` — move a workflow into a folder (or root with null) +- \`move_folder\` — nest a folder inside another (or root with null) +- \`create_folder(name, parentId)\` — create nested folder hierarchies -The Agent block is the core of intelligent workflows: -- **Tools** - Add integrations, custom tools, web search to give it capabilities -- **Memory** - Multi-turn conversations with persistent context -- **Structured Output** - JSON schema for reliable parsing -- **Knowledge Bases** - RAG-powered document retrieval +### Key Rules -**Design principle:** Put tools INSIDE agents rather than using standalone tool blocks. - -### Triggers - -| Type | Description | -|------|-------------| -| Manual/Chat | User sends message in UI (start block: input, files, conversationId) | -| API | REST endpoint with custom input schema | -| Webhook | External services POST to trigger URL | -| Schedule | Cron-based (hourly, daily, weekly) | - -### Deployments - -| Type | Trigger | Use Case | -|------|---------|----------| -| API | Start block | REST endpoint for programmatic access | -| Chat | Start block | Managed chat UI with auth options | -| MCP | Start block | Expose as MCP tool for AI agents | -| General | Schedule/Webhook | Activate triggers to run automatically | - -**Undeployed workflows only run in the builder UI.** - -### Variable Syntax - -Reference outputs from previous blocks: \`\` -Reference environment variables: \`{{ENV_VAR_NAME}}\` - -Rules: -- Block names must be lowercase, no spaces, no special characters -- Use dot notation for nested fields: \`\` - ---- - -## Workflow Lifecycle - -1. **Create**: For NEW workflows, FIRST call create_workflow to get a workflowId -2. **Plan**: Use copilot_plan with the workflowId to plan the workflow -3. **Edit**: Use copilot_edit with the workflowId AND the plan to build the workflow -4. **Deploy**: ALWAYS deploy after building using copilot_deploy before testing/running -5. **Test**: Use copilot_test to verify the workflow works correctly -6. **Share**: Provide the user with the workflow URL after completion - ---- - -## CRITICAL: Always Pass workflowId - -- For NEW workflows: Call create_workflow FIRST, then use the returned workflowId -- For EXISTING workflows: Pass the workflowId to all copilot tools -- copilot_plan, copilot_edit, copilot_deploy, copilot_test, copilot_debug all REQUIRE workflowId - ---- - -## CRITICAL: How to Handle Plans - -The copilot_plan tool returns a structured plan object. You MUST: - -1. **Do NOT modify the plan**: Pass the plan object EXACTLY as returned to copilot_edit -2. **Do NOT interpret or summarize the plan**: The edit agent needs the raw plan data -3. **Pass the plan in the context.plan field**: \`{ "context": { "plan": } }\` -4. **Include ALL plan data**: Block configurations, connections, credentials, everything - -Example flow: -\`\`\` -1. copilot_plan({ request: "build a workflow...", workflowId: "abc123" }) - -> Returns: { "plan": { "blocks": [...], "connections": [...], ... } } - -2. copilot_edit({ - workflowId: "abc123", - message: "Execute the plan", - context: { "plan": } - }) -\`\`\` - -**Why this matters**: The plan contains technical details (block IDs, field mappings, API schemas) that the edit agent needs verbatim. Summarizing or rephrasing loses critical information. - ---- - -## CRITICAL: Error Handling - -**If the user says "doesn't work", "broke", "failed", "error" → ALWAYS use copilot_debug FIRST.** - -Don't guess. Don't plan. Debug first to find the actual problem. - ---- - -## Important Rules - -- ALWAYS deploy a workflow before attempting to run or test it -- Workflows must be deployed to have an "active deployment" for execution -- After building, call copilot_deploy with the appropriate deployment type (api, chat, or mcp) -- Return the workflow URL to the user so they can access it in Sim - ---- - -## Quick Operations (use direct tools) -- list_workflows, list_workspaces, list_folders, get_workflow: Fast database queries -- create_workflow: Create new workflow and get workflowId (CALL THIS FIRST for new workflows) -- create_folder: Create new resources - -## Workflow Building (use copilot tools) -- copilot_plan: Plan workflow changes (REQUIRES workflowId) - returns a plan object -- copilot_edit: Execute the plan (REQUIRES workflowId AND plan from copilot_plan) -- copilot_deploy: Deploy workflows (REQUIRES workflowId) -- copilot_test: Test workflow execution (REQUIRES workflowId) -- copilot_debug: Diagnose errors (REQUIRES workflowId) - USE THIS FIRST for issues +- You can test workflows immediately after building — deployment is only needed for external access (API, chat, MCP). +- All copilot tools (build, plan, edit, deploy, test, debug) require workflowId. +- If the user reports errors → use \`copilot_debug\` first, don't guess. +- Variable syntax: \`\` for block outputs, \`{{ENV_VAR}}\` for env vars. ` function createResponse(id: RequestId, result: unknown): JSONRPCResponse { @@ -378,7 +274,6 @@ async function handleBuildToolCall( } const chatId = crypto.randomUUID() - const context = (args.context as Record) || {} const requestPayload = { message: requestText, @@ -391,7 +286,6 @@ async function handleBuildToolCall( version: SIM_AGENT_VERSION, headless: true, chatId, - context, } const result = await orchestrateCopilotStream(requestPayload, { diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts index dbd3a24a99..9ca5c97630 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts @@ -38,6 +38,9 @@ import type { ListFoldersParams, ListUserWorkflowsParams, ListWorkspaceMcpServersParams, + MoveFolderParams, + MoveWorkflowParams, + RenameWorkflowParams, RunWorkflowParams, SetGlobalWorkflowVariablesParams, } from './param-types' @@ -52,6 +55,9 @@ import { executeListFolders, executeListUserWorkflows, executeListUserWorkspaces, + executeMoveFolder, + executeMoveWorkflow, + executeRenameWorkflow, executeRunWorkflow, executeSetGlobalWorkflowVariables, } from './workflow-tools' @@ -85,6 +91,9 @@ const SIM_WORKFLOW_TOOL_HANDLERS: Record< list_folders: (p, c) => executeListFolders(p as ListFoldersParams, c), create_workflow: (p, c) => executeCreateWorkflow(p as CreateWorkflowParams, c), create_folder: (p, c) => executeCreateFolder(p as CreateFolderParams, c), + rename_workflow: (p, c) => executeRenameWorkflow(p as unknown as RenameWorkflowParams, c), + move_workflow: (p, c) => executeMoveWorkflow(p as unknown as MoveWorkflowParams, c), + move_folder: (p, c) => executeMoveFolder(p as unknown as MoveFolderParams, c), get_workflow_data: (p, c) => executeGetWorkflowData(p as GetWorkflowDataParams, c), get_block_outputs: (p, c) => executeGetBlockOutputs(p as GetBlockOutputsParams, c), get_block_upstream_references: (p, c) => diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/param-types.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/param-types.ts index 30d5190878..87c78ffbf6 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/param-types.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/param-types.ts @@ -125,3 +125,20 @@ export interface CreateWorkspaceMcpServerParams { isPublic?: boolean workflowIds?: string[] } + +// === Workflow Organization Params === + +export interface RenameWorkflowParams { + workflowId: string + name: string +} + +export interface MoveWorkflowParams { + workflowId: string + folderId: string | null +} + +export interface MoveFolderParams { + folderId: string + parentId: string | null +} diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/mutations.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/mutations.ts index 0b5afbf122..a3c3c0efc5 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/mutations.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/mutations.ts @@ -12,6 +12,9 @@ import { ensureWorkflowAccess, ensureWorkspaceAccess, getDefaultWorkspaceId } fr import type { CreateFolderParams, CreateWorkflowParams, + MoveFolderParams, + MoveWorkflowParams, + RenameWorkflowParams, RunWorkflowParams, SetGlobalWorkflowVariablesParams, VariableOperation, @@ -283,3 +286,85 @@ export async function executeSetGlobalWorkflowVariables( return { success: false, error: error instanceof Error ? error.message : String(error) } } } + +export async function executeRenameWorkflow( + params: RenameWorkflowParams, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + const name = typeof params.name === 'string' ? params.name.trim() : '' + if (!name) { + return { success: false, error: 'name is required' } + } + if (name.length > 200) { + return { success: false, error: 'Workflow name must be 200 characters or less' } + } + + await ensureWorkflowAccess(workflowId, context.userId) + + await db + .update(workflow) + .set({ name, updatedAt: new Date() }) + .where(eq(workflow.id, workflowId)) + + return { success: true, output: { workflowId, name } } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeMoveWorkflow( + params: MoveWorkflowParams, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + + await ensureWorkflowAccess(workflowId, context.userId) + + const folderId = params.folderId || null + + await db + .update(workflow) + .set({ folderId, updatedAt: new Date() }) + .where(eq(workflow.id, workflowId)) + + return { success: true, output: { workflowId, folderId } } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeMoveFolder( + params: MoveFolderParams, + context: ExecutionContext +): Promise { + try { + const folderId = params.folderId + if (!folderId) { + return { success: false, error: 'folderId is required' } + } + + const parentId = params.parentId || null + + if (parentId === folderId) { + return { success: false, error: 'A folder cannot be moved into itself' } + } + + await db + .update(workflowFolder) + .set({ parentId, updatedAt: new Date() }) + .where(eq(workflowFolder.id, folderId)) + + return { success: true, output: { folderId, parentId } } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/queries.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/queries.ts index dd4231b975..cc8a724f09 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/queries.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/queries.ts @@ -3,7 +3,6 @@ import { customTools, permissions, workflow, workflowFolder, workspace } from '@ import { and, asc, desc, eq, isNull, or } from 'drizzle-orm' import type { ExecutionContext, ToolCallResult } from '@/lib/copilot/orchestrator/types' import { - extractWorkflowNames, formatNormalizedWorkflowForCopilot, normalizeWorkflowName, } from '@/lib/copilot/tools/shared/workflow-utils' @@ -114,8 +113,6 @@ export async function executeListUserWorkflows( const workflows = await getAccessibleWorkflowsForUser(context.userId, { workspaceId, folderId }) - const names = extractWorkflowNames(workflows) - const workflowList = workflows.map((w) => ({ workflowId: w.id, workflowName: w.name || '', @@ -123,7 +120,7 @@ export async function executeListUserWorkflows( folderId: w.folderId, })) - return { success: true, output: { workflow_names: names, workflows: workflowList } } + return { success: true, output: { workflows: workflowList } } } catch (error) { return { success: false, error: error instanceof Error ? error.message : String(error) } } diff --git a/apps/sim/lib/copilot/tools/mcp/definitions.ts b/apps/sim/lib/copilot/tools/mcp/definitions.ts index 6ef2857291..226662dbf2 100644 --- a/apps/sim/lib/copilot/tools/mcp/definitions.ts +++ b/apps/sim/lib/copilot/tools/mcp/definitions.ts @@ -17,11 +17,21 @@ export type SubagentToolDef = { * These are fast database queries that don't need AI reasoning. */ export const DIRECT_TOOL_DEFS: DirectToolDef[] = [ + { + name: 'list_workspaces', + toolId: 'list_user_workspaces', + description: + 'List all workspaces the user has access to. Returns workspace IDs, names, and roles. Use this first to determine which workspace to operate in.', + inputSchema: { + type: 'object', + properties: {}, + }, + }, { name: 'list_workflows', toolId: 'list_user_workflows', description: - 'List all workflows the user has access to. Returns workflow IDs, names, and workspace info.', + 'List all workflows the user has access to. Returns workflow IDs, names, workspace, and folder info. Use workspaceId/folderId to scope results.', inputSchema: { type: 'object', properties: { @@ -36,20 +46,11 @@ export const DIRECT_TOOL_DEFS: DirectToolDef[] = [ }, }, }, - { - name: 'list_workspaces', - toolId: 'list_user_workspaces', - description: - 'List all workspaces the user has access to. Returns workspace IDs, names, and roles.', - inputSchema: { - type: 'object', - properties: {}, - }, - }, { name: 'list_folders', toolId: 'list_folders', - description: 'List all folders in a workspace.', + description: + 'List all folders in a workspace. Returns folder IDs, names, and parent relationships for organizing workflows.', inputSchema: { type: 'object', properties: { @@ -64,7 +65,8 @@ export const DIRECT_TOOL_DEFS: DirectToolDef[] = [ { name: 'get_workflow', toolId: 'get_user_workflow', - description: 'Get a workflow by ID. Returns the full workflow definition.', + description: + 'Get a workflow by ID. Returns the full workflow definition including all blocks, connections, and configuration.', inputSchema: { type: 'object', properties: { @@ -79,7 +81,8 @@ export const DIRECT_TOOL_DEFS: DirectToolDef[] = [ { name: 'create_workflow', toolId: 'create_workflow', - description: 'Create a new workflow. Returns the new workflow ID.', + description: + 'Create a new empty workflow. Returns the new workflow ID. Always call this FIRST before copilot_build for new workflows. Use workspaceId to place it in a specific workspace.', inputSchema: { type: 'object', properties: { @@ -106,7 +109,8 @@ export const DIRECT_TOOL_DEFS: DirectToolDef[] = [ { name: 'create_folder', toolId: 'create_folder', - description: 'Create a new folder in a workspace.', + description: + 'Create a new folder for organizing workflows. Use parentId to create nested folder hierarchies.', inputSchema: { type: 'object', properties: { @@ -126,6 +130,65 @@ export const DIRECT_TOOL_DEFS: DirectToolDef[] = [ required: ['name'], }, }, + { + name: 'rename_workflow', + toolId: 'rename_workflow', + description: 'Rename an existing workflow.', + inputSchema: { + type: 'object', + properties: { + workflowId: { + type: 'string', + description: 'The workflow ID to rename.', + }, + name: { + type: 'string', + description: 'The new name for the workflow.', + }, + }, + required: ['workflowId', 'name'], + }, + }, + { + name: 'move_workflow', + toolId: 'move_workflow', + description: + 'Move a workflow into a different folder. Set folderId to null to move to the workspace root.', + inputSchema: { + type: 'object', + properties: { + workflowId: { + type: 'string', + description: 'The workflow ID to move.', + }, + folderId: { + type: ['string', 'null'], + description: 'Target folder ID, or null to move to workspace root.', + }, + }, + required: ['workflowId', 'folderId'], + }, + }, + { + name: 'move_folder', + toolId: 'move_folder', + description: + 'Move a folder into another folder. Set parentId to null to move to the workspace root.', + inputSchema: { + type: 'object', + properties: { + folderId: { + type: 'string', + description: 'The folder ID to move.', + }, + parentId: { + type: ['string', 'null'], + description: 'Target parent folder ID, or null to move to workspace root.', + }, + }, + required: ['folderId', 'parentId'], + }, + }, ] export const SUBAGENT_TOOL_DEFS: SubagentToolDef[] = [ @@ -151,15 +214,15 @@ CAN DO: - Set environment variables and workflow variables CANNOT DO: -- Run or test workflows (use copilot_test separately after deploying) +- Run or test workflows (use copilot_test separately) - Deploy workflows (use copilot_deploy separately) WORKFLOW: 1. Call create_workflow to get a workflowId (for new workflows) 2. Call copilot_build with the request and workflowId 3. Build agent gathers info and builds in one pass -4. Call copilot_deploy to deploy the workflow -5. Optionally call copilot_test to verify it works`, +4. Call copilot_test to verify it works +5. Optionally call copilot_deploy to make it externally accessible`, inputSchema: { type: 'object', properties: { @@ -205,13 +268,11 @@ DO NOT USE (use direct tools instead): { name: 'copilot_plan', agentId: 'plan', - description: `Plan workflow changes by gathering required information. + description: `Plan workflow changes by gathering required information. For most cases, prefer copilot_build which combines planning and editing in one step. USE THIS WHEN: -- Building a new workflow -- Modifying an existing workflow -- You need to understand what blocks and integrations are available -- The workflow requires multiple blocks or connections +- You need fine-grained control over the build process +- You want to inspect the plan before executing it WORKFLOW ID (REQUIRED): - For NEW workflows: First call create_workflow to get a workflowId, then pass it here @@ -241,23 +302,16 @@ IMPORTANT: Pass the returned plan EXACTLY to copilot_edit - do not modify or sum { name: 'copilot_edit', agentId: 'edit', - description: `Execute a workflow plan and apply edits. - -USE THIS WHEN: -- You have a plan from copilot_plan that needs to be executed -- Building or modifying a workflow based on the plan -- Making changes to blocks, connections, or configurations + description: `Execute a workflow plan from copilot_plan. For most cases, prefer copilot_build which combines planning and editing in one step. WORKFLOW ID (REQUIRED): - You MUST provide the workflowId parameter -- For new workflows, get the workflowId from create_workflow first PLAN (REQUIRED): - Pass the EXACT plan object from copilot_plan in the context.plan field - Do NOT modify, summarize, or interpret the plan - pass it verbatim -- The plan contains technical details the edit agent needs exactly as-is -IMPORTANT: After copilot_edit completes, you MUST call copilot_deploy before the workflow can be run or tested.`, +After copilot_edit completes, you can test immediately with copilot_test, or deploy with copilot_deploy to make it accessible externally.`, inputSchema: { type: 'object', properties: { @@ -281,43 +335,15 @@ IMPORTANT: After copilot_edit completes, you MUST call copilot_deploy before the required: ['workflowId'], }, }, - { - name: 'copilot_debug', - agentId: 'debug', - description: `Diagnose errors or unexpected workflow behavior. - -WORKFLOW ID (REQUIRED): Always provide the workflowId of the workflow to debug.`, - inputSchema: { - type: 'object', - properties: { - error: { type: 'string', description: 'The error message or description of the issue.' }, - workflowId: { type: 'string', description: 'REQUIRED. The workflow ID to debug.' }, - context: { type: 'object' }, - }, - required: ['error', 'workflowId'], - }, - }, { name: 'copilot_deploy', agentId: 'deploy', - description: `Deploy or manage workflow deployments. - -CRITICAL: You MUST deploy a workflow after building before it can be run or tested. -Workflows without an active deployment will fail with "no active deployment" error. - -WORKFLOW ID (REQUIRED): -- Always provide the workflowId parameter -- This must match the workflow you built with copilot_edit - -USE THIS: -- After copilot_edit completes to activate the workflow -- To update deployment settings -- To redeploy after making changes + description: `Deploy a workflow to make it accessible externally. Workflows can be tested without deploying, but deployment is needed for API access, chat UIs, or MCP exposure. DEPLOYMENT TYPES: -- "deploy as api" - REST API endpoint -- "deploy as chat" - Chat interface -- "deploy as mcp" - MCP server`, +- "deploy as api" - REST API endpoint for programmatic access +- "deploy as chat" - Managed chat UI with auth options +- "deploy as mcp" - Expose as MCP tool for AI agents`, inputSchema: { type: 'object', properties: { @@ -334,10 +360,43 @@ DEPLOYMENT TYPES: required: ['request', 'workflowId'], }, }, + { + name: 'copilot_test', + agentId: 'test', + description: `Run a workflow and verify its outputs. Works on both deployed and undeployed (draft) workflows. Use after building to verify correctness.`, + inputSchema: { + type: 'object', + properties: { + request: { type: 'string' }, + workflowId: { + type: 'string', + description: 'REQUIRED. The workflow ID to test.', + }, + context: { type: 'object' }, + }, + required: ['request', 'workflowId'], + }, + }, + { + name: 'copilot_debug', + agentId: 'debug', + description: + 'Diagnose errors or unexpected workflow behavior. Provide the error message and workflowId. Returns root cause analysis and fix suggestions.', + inputSchema: { + type: 'object', + properties: { + error: { type: 'string', description: 'The error message or description of the issue.' }, + workflowId: { type: 'string', description: 'REQUIRED. The workflow ID to debug.' }, + context: { type: 'object' }, + }, + required: ['error', 'workflowId'], + }, + }, { name: 'copilot_auth', agentId: 'auth', - description: 'Handle OAuth connection flows.', + description: + 'Check OAuth connection status, list connected services, and initiate new OAuth connections. Use when a workflow needs third-party service access (Google, Slack, GitHub, etc.).', inputSchema: { type: 'object', properties: { @@ -350,7 +409,8 @@ DEPLOYMENT TYPES: { name: 'copilot_knowledge', agentId: 'knowledge', - description: 'Create and manage knowledge bases.', + description: + 'Manage knowledge bases for RAG-powered document retrieval. Supports listing, creating, updating, and deleting knowledge bases. Knowledge bases can be attached to agent blocks for context-aware responses.', inputSchema: { type: 'object', properties: { @@ -363,7 +423,8 @@ DEPLOYMENT TYPES: { name: 'copilot_custom_tool', agentId: 'custom_tool', - description: 'Create or manage custom tools.', + description: + 'Manage custom tools (reusable API integrations). Supports listing, creating, updating, and deleting custom tools. Custom tools can be added to agent blocks as callable functions.', inputSchema: { type: 'object', properties: { @@ -376,7 +437,8 @@ DEPLOYMENT TYPES: { name: 'copilot_info', agentId: 'info', - description: 'Inspect blocks, outputs, and workflow metadata.', + description: + 'Inspect a workflow\'s blocks, connections, outputs, variables, and metadata. Always provide workflowId to scope results to a specific workflow.', inputSchema: { type: 'object', properties: { @@ -390,7 +452,8 @@ DEPLOYMENT TYPES: { name: 'copilot_workflow', agentId: 'workflow', - description: 'Manage workflow environment and configuration.', + description: + 'Manage workflow-level configuration: environment variables, settings, scheduling, and deployment status.', inputSchema: { type: 'object', properties: { @@ -404,7 +467,8 @@ DEPLOYMENT TYPES: { name: 'copilot_research', agentId: 'research', - description: 'Research external APIs and documentation.', + description: + 'Research external APIs and documentation. Use when building workflows that integrate with third-party services and you need to understand their API, authentication, or data formats.', inputSchema: { type: 'object', properties: { @@ -417,7 +481,8 @@ DEPLOYMENT TYPES: { name: 'copilot_tour', agentId: 'tour', - description: 'Explain platform features and usage.', + description: + 'Explain platform features, concepts, and usage patterns. Use when the user asks "how does X work?" about the Sim platform, block types, triggers, deployments, or workflow concepts.', inputSchema: { type: 'object', properties: { @@ -427,38 +492,11 @@ DEPLOYMENT TYPES: required: ['request'], }, }, - { - name: 'copilot_test', - agentId: 'test', - description: `Run workflows and verify outputs. - -PREREQUISITE: The workflow MUST be deployed first using copilot_deploy. -Undeployed workflows will fail with "no active deployment" error. - -WORKFLOW ID (REQUIRED): -- Always provide the workflowId parameter - -USE THIS: -- After deploying to verify the workflow works correctly -- To test with sample inputs -- To validate workflow behavior before sharing with user`, - inputSchema: { - type: 'object', - properties: { - request: { type: 'string' }, - workflowId: { - type: 'string', - description: 'REQUIRED. The workflow ID to test.', - }, - context: { type: 'object' }, - }, - required: ['request', 'workflowId'], - }, - }, { name: 'copilot_superagent', agentId: 'superagent', - description: 'Execute direct external actions (email, Slack, etc.).', + description: + 'Execute direct actions NOW: send an email, post to Slack, make an API call, etc. Use when the user wants to DO something immediately rather than build a workflow for it.', inputSchema: { type: 'object', properties: { From fd1e61b02222ca95a6c5cf27b752fac4524829c3 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Thu, 5 Feb 2026 20:37:52 -0800 Subject: [PATCH 31/72] Updated mcp --- apps/sim/lib/copilot/tools/mcp/definitions.ts | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/apps/sim/lib/copilot/tools/mcp/definitions.ts b/apps/sim/lib/copilot/tools/mcp/definitions.ts index 226662dbf2..f54273ce6d 100644 --- a/apps/sim/lib/copilot/tools/mcp/definitions.ts +++ b/apps/sim/lib/copilot/tools/mcp/definitions.ts @@ -153,7 +153,7 @@ export const DIRECT_TOOL_DEFS: DirectToolDef[] = [ name: 'move_workflow', toolId: 'move_workflow', description: - 'Move a workflow into a different folder. Set folderId to null to move to the workspace root.', + 'Move a workflow into a different folder. Omit folderId or pass empty string to move to workspace root.', inputSchema: { type: 'object', properties: { @@ -162,18 +162,18 @@ export const DIRECT_TOOL_DEFS: DirectToolDef[] = [ description: 'The workflow ID to move.', }, folderId: { - type: ['string', 'null'], - description: 'Target folder ID, or null to move to workspace root.', + type: 'string', + description: 'Target folder ID. Omit or pass empty string to move to workspace root.', }, }, - required: ['workflowId', 'folderId'], + required: ['workflowId'], }, }, { name: 'move_folder', toolId: 'move_folder', description: - 'Move a folder into another folder. Set parentId to null to move to the workspace root.', + 'Move a folder into another folder. Omit parentId or pass empty string to move to workspace root.', inputSchema: { type: 'object', properties: { @@ -182,11 +182,11 @@ export const DIRECT_TOOL_DEFS: DirectToolDef[] = [ description: 'The folder ID to move.', }, parentId: { - type: ['string', 'null'], - description: 'Target parent folder ID, or null to move to workspace root.', + type: 'string', + description: 'Target parent folder ID. Omit or pass empty string to move to workspace root.', }, }, - required: ['folderId', 'parentId'], + required: ['folderId'], }, }, ] From 69bdffad938b699f1b8249d9921e2a4c3502ab0c Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Thu, 5 Feb 2026 20:55:50 -0800 Subject: [PATCH 32/72] Add respond to subagents --- apps/sim/lib/copilot/orchestrator/config.ts | 6 ++++++ .../orchestrator/sse-handlers/handlers.ts | 20 +++++++++++++++---- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/apps/sim/lib/copilot/orchestrator/config.ts b/apps/sim/lib/copilot/orchestrator/config.ts index 6658ca6b93..76bf83bd4a 100644 --- a/apps/sim/lib/copilot/orchestrator/config.ts +++ b/apps/sim/lib/copilot/orchestrator/config.ts @@ -52,6 +52,12 @@ export const RESPOND_TOOL_NAMES = [ 'deploy_respond', 'superagent_respond', 'discovery_respond', + 'tour_respond', + 'auth_respond', + 'workflow_respond', + 'knowledge_respond', + 'custom_tool_respond', + 'test_respond', ] as const export const RESPOND_TOOL_SET = new Set(RESPOND_TOOL_NAMES) diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts index 84da658b9d..b641b10f8f 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts @@ -248,8 +248,14 @@ export const sseHandlers: Record = { context.currentThinkingBlock.content = `${context.currentThinkingBlock.content || ''}${chunk}` }, content: (event, context) => { - const d = asRecord(event.data) - const chunk = (d.content || d.data || event.content) as string | undefined + // Go backend sends content as a plain string in event.data, not wrapped in an object. + let chunk: string | undefined + if (typeof event.data === 'string') { + chunk = event.data + } else { + const d = asRecord(event.data) + chunk = (d.content || d.data || event.content) as string | undefined + } if (!chunk) return context.accumulatedContent += chunk addContentBlock(context, { type: 'text', content: chunk }) @@ -281,8 +287,14 @@ export const subAgentHandlers: Record = { content: (event, context) => { const parentToolCallId = context.subAgentParentToolCallId if (!parentToolCallId || !event.data) return - const d = asRecord(event.data) - const chunk = (d.content || d.data || event.content) as string | undefined + // Go backend sends content as a plain string in event.data + let chunk: string | undefined + if (typeof event.data === 'string') { + chunk = event.data + } else { + const d = asRecord(event.data) + chunk = (d.content || d.data || event.content) as string | undefined + } if (!chunk) return context.subAgentContent[parentToolCallId] = (context.subAgentContent[parentToolCallId] || '') + chunk From 74f863a029208329c699dfa86760eb52a7ae2de2 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Thu, 5 Feb 2026 21:04:59 -0800 Subject: [PATCH 33/72] Fix definitions --- apps/sim/lib/copilot/tools/mcp/definitions.ts | 20 +++---------------- 1 file changed, 3 insertions(+), 17 deletions(-) diff --git a/apps/sim/lib/copilot/tools/mcp/definitions.ts b/apps/sim/lib/copilot/tools/mcp/definitions.ts index f54273ce6d..3ad8a1e44f 100644 --- a/apps/sim/lib/copilot/tools/mcp/definitions.ts +++ b/apps/sim/lib/copilot/tools/mcp/definitions.ts @@ -438,7 +438,7 @@ DEPLOYMENT TYPES: name: 'copilot_info', agentId: 'info', description: - 'Inspect a workflow\'s blocks, connections, outputs, variables, and metadata. Always provide workflowId to scope results to a specific workflow.', + 'Inspect a workflow\'s blocks, connections, outputs, variables, and metadata. Use for questions about the Sim platform itself — how blocks work, what integrations are available, platform concepts, etc. Always provide workflowId to scope results to a specific workflow.', inputSchema: { type: 'object', properties: { @@ -453,7 +453,7 @@ DEPLOYMENT TYPES: name: 'copilot_workflow', agentId: 'workflow', description: - 'Manage workflow-level configuration: environment variables, settings, scheduling, and deployment status.', + 'Manage workflow-level configuration: environment variables, settings, scheduling, and deployment status. Use for any data about a specific workflow — its settings, credentials, variables, or deployment state.', inputSchema: { type: 'object', properties: { @@ -468,21 +468,7 @@ DEPLOYMENT TYPES: name: 'copilot_research', agentId: 'research', description: - 'Research external APIs and documentation. Use when building workflows that integrate with third-party services and you need to understand their API, authentication, or data formats.', - inputSchema: { - type: 'object', - properties: { - request: { type: 'string' }, - context: { type: 'object' }, - }, - required: ['request'], - }, - }, - { - name: 'copilot_tour', - agentId: 'tour', - description: - 'Explain platform features, concepts, and usage patterns. Use when the user asks "how does X work?" about the Sim platform, block types, triggers, deployments, or workflow concepts.', + 'Research external APIs and documentation. Use when you need to understand third-party services, external APIs, authentication flows, or data formats OUTSIDE of Sim. For questions about Sim itself, use copilot_info instead.', inputSchema: { type: 'object', properties: { From 3dcd0087dd62e3b1024c9824622e588df42b7a6b Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Fri, 6 Feb 2026 11:44:27 -0800 Subject: [PATCH 34/72] Add tools --- apps/sim/executor/execution/engine.ts | 23 ++ apps/sim/executor/types.ts | 2 + apps/sim/lib/copilot/orchestrator/config.ts | 4 + .../orchestrator/tool-executor/index.ts | 17 ++ .../orchestrator/tool-executor/param-types.ts | 43 +++ .../tool-executor/workflow-tools/mutations.ts | 258 +++++++++++++++++- .../tool-executor/workflow-tools/queries.ts | 53 +++- apps/sim/lib/copilot/tools/mcp/definitions.ts | 152 +++++++++++ apps/sim/lib/logs/execution/logger.ts | 4 + .../sim/lib/logs/execution/logging-session.ts | 6 +- apps/sim/lib/logs/types.ts | 2 + .../workflows/executor/execute-workflow.ts | 11 +- .../lib/workflows/executor/execution-core.ts | 1 + .../lib/workflows/executor/execution-state.ts | 53 ++++ 14 files changed, 624 insertions(+), 5 deletions(-) create mode 100644 apps/sim/lib/workflows/executor/execution-state.ts diff --git a/apps/sim/executor/execution/engine.ts b/apps/sim/executor/execution/engine.ts index 47afd8b032..c0d015d5eb 100644 --- a/apps/sim/executor/execution/engine.ts +++ b/apps/sim/executor/execution/engine.ts @@ -4,6 +4,7 @@ import { BlockType } from '@/executor/constants' import type { DAG } from '@/executor/dag/builder' import type { EdgeManager } from '@/executor/execution/edge-manager' import { serializePauseSnapshot } from '@/executor/execution/snapshot-serializer' +import type { SerializableExecutionState } from '@/executor/execution/types' import type { NodeExecutionOrchestrator } from '@/executor/orchestrators/node' import type { ExecutionContext, @@ -135,6 +136,7 @@ export class ExecutionEngine { success: false, output: this.finalOutput, logs: this.context.blockLogs, + executionState: this.getSerializableExecutionState(), metadata: this.context.metadata, status: 'cancelled', } @@ -144,6 +146,7 @@ export class ExecutionEngine { success: true, output: this.finalOutput, logs: this.context.blockLogs, + executionState: this.getSerializableExecutionState(), metadata: this.context.metadata, } } catch (error) { @@ -157,6 +160,7 @@ export class ExecutionEngine { success: false, output: this.finalOutput, logs: this.context.blockLogs, + executionState: this.getSerializableExecutionState(), metadata: this.context.metadata, status: 'cancelled', } @@ -459,6 +463,7 @@ export class ExecutionEngine { success: true, output: this.collectPauseResponses(), logs: this.context.blockLogs, + executionState: this.getSerializableExecutionState(snapshotSeed), metadata: this.context.metadata, status: 'paused', pausePoints, @@ -466,6 +471,24 @@ export class ExecutionEngine { } } + private getSerializableExecutionState( + snapshotSeed?: { snapshot: string } + ): SerializableExecutionState | undefined { + try { + const serializedSnapshot = + snapshotSeed?.snapshot ?? serializePauseSnapshot(this.context, [], this.dag).snapshot + const parsedSnapshot = JSON.parse(serializedSnapshot) as { + state?: SerializableExecutionState + } + return parsedSnapshot.state + } catch (error) { + logger.warn('Failed to serialize execution state', { + error: error instanceof Error ? error.message : String(error), + }) + return undefined + } + } + private collectPauseResponses(): NormalizedBlockOutput { const responses = Array.from(this.pausedBlocks.values()).map((pause) => pause.response) diff --git a/apps/sim/executor/types.ts b/apps/sim/executor/types.ts index 10c1996b35..b8bcb70f1e 100644 --- a/apps/sim/executor/types.ts +++ b/apps/sim/executor/types.ts @@ -1,6 +1,7 @@ import type { TraceSpan } from '@/lib/logs/types' import type { PermissionGroupConfig } from '@/lib/permission-groups/types' import type { BlockOutput } from '@/blocks/types' +import type { SerializableExecutionState } from '@/executor/execution/types' import type { RunFromBlockContext } from '@/executor/utils/run-from-block' import type { SerializedBlock, SerializedWorkflow } from '@/serializer/types' @@ -302,6 +303,7 @@ export interface ExecutionResult { output: NormalizedBlockOutput error?: string logs?: BlockLog[] + executionState?: SerializableExecutionState metadata?: ExecutionMetadata status?: 'completed' | 'paused' | 'cancelled' pausePoints?: PausePoint[] diff --git a/apps/sim/lib/copilot/orchestrator/config.ts b/apps/sim/lib/copilot/orchestrator/config.ts index 76bf83bd4a..9e3dc1a282 100644 --- a/apps/sim/lib/copilot/orchestrator/config.ts +++ b/apps/sim/lib/copilot/orchestrator/config.ts @@ -1,6 +1,9 @@ export const INTERRUPT_TOOL_NAMES = [ 'set_global_workflow_variables', 'run_workflow', + 'run_workflow_until_block', + 'run_from_block', + 'run_block', 'manage_mcp_tool', 'manage_custom_tool', 'deploy_mcp', @@ -12,6 +15,7 @@ export const INTERRUPT_TOOL_NAMES = [ 'oauth_request_access', 'navigate_ui', 'knowledge_base', + 'generate_api_key', ] as const export const INTERRUPT_TOOL_SET = new Set(INTERRUPT_TOOL_NAMES) diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts index 9ca5c97630..cb175f27ef 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts @@ -30,8 +30,10 @@ import type { DeployApiParams, DeployChatParams, DeployMcpParams, + GenerateApiKeyParams, GetBlockOutputsParams, GetBlockUpstreamReferencesParams, + GetDeployedWorkflowStateParams, GetUserWorkflowParams, GetWorkflowDataParams, GetWorkflowFromNameParams, @@ -41,14 +43,19 @@ import type { MoveFolderParams, MoveWorkflowParams, RenameWorkflowParams, + RunBlockParams, + RunFromBlockParams, RunWorkflowParams, + RunWorkflowUntilBlockParams, SetGlobalWorkflowVariablesParams, } from './param-types' import { executeCreateFolder, executeCreateWorkflow, + executeGenerateApiKey, executeGetBlockOutputs, executeGetBlockUpstreamReferences, + executeGetDeployedWorkflowState, executeGetUserWorkflow, executeGetWorkflowData, executeGetWorkflowFromName, @@ -58,7 +65,10 @@ import { executeMoveFolder, executeMoveWorkflow, executeRenameWorkflow, + executeRunBlock, + executeRunFromBlock, executeRunWorkflow, + executeRunWorkflowUntilBlock, executeSetGlobalWorkflowVariables, } from './workflow-tools' @@ -99,6 +109,13 @@ const SIM_WORKFLOW_TOOL_HANDLERS: Record< get_block_upstream_references: (p, c) => executeGetBlockUpstreamReferences(p as unknown as GetBlockUpstreamReferencesParams, c), run_workflow: (p, c) => executeRunWorkflow(p as RunWorkflowParams, c), + run_workflow_until_block: (p, c) => + executeRunWorkflowUntilBlock(p as unknown as RunWorkflowUntilBlockParams, c), + run_from_block: (p, c) => executeRunFromBlock(p as unknown as RunFromBlockParams, c), + run_block: (p, c) => executeRunBlock(p as unknown as RunBlockParams, c), + get_deployed_workflow_state: (p, c) => + executeGetDeployedWorkflowState(p as GetDeployedWorkflowStateParams, c), + generate_api_key: (p, c) => executeGenerateApiKey(p as unknown as GenerateApiKeyParams, c), set_global_workflow_variables: (p, c) => executeSetGlobalWorkflowVariables(p as SetGlobalWorkflowVariablesParams, c), deploy_api: (p, c) => executeDeployApi(p as DeployApiParams, c), diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/param-types.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/param-types.ts index 87c78ffbf6..1f49ab616e 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/param-types.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/param-types.ts @@ -57,6 +57,49 @@ export interface RunWorkflowParams { workflowId?: string workflow_input?: unknown input?: unknown + /** When true, runs the deployed version instead of the draft. Default: false (draft). */ + useDeployedState?: boolean +} + +export interface RunWorkflowUntilBlockParams { + workflowId?: string + workflow_input?: unknown + input?: unknown + /** The block ID to stop after. Execution halts once this block completes. */ + stopAfterBlockId: string + /** When true, runs the deployed version instead of the draft. Default: false (draft). */ + useDeployedState?: boolean +} + +export interface RunFromBlockParams { + workflowId?: string + /** The block ID to start execution from. */ + startBlockId: string + /** Optional execution ID to load the snapshot from. If omitted, uses the latest execution. */ + executionId?: string + workflow_input?: unknown + input?: unknown + useDeployedState?: boolean +} + +export interface RunBlockParams { + workflowId?: string + /** The block ID to run. Only this block executes using cached upstream outputs. */ + blockId: string + /** Optional execution ID to load the snapshot from. If omitted, uses the latest execution. */ + executionId?: string + workflow_input?: unknown + input?: unknown + useDeployedState?: boolean +} + +export interface GetDeployedWorkflowStateParams { + workflowId?: string +} + +export interface GenerateApiKeyParams { + name: string + workspaceId?: string } export interface VariableOperation { diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/mutations.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/mutations.ts index a3c3c0efc5..80e1c0a234 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/mutations.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/mutations.ts @@ -1,21 +1,31 @@ import crypto from 'crypto' +import { nanoid } from 'nanoid' import { createLogger } from '@sim/logger' import { db } from '@sim/db' -import { workflow, workflowFolder } from '@sim/db/schema' +import { apiKey, workflow, workflowFolder } from '@sim/db/schema' import { and, eq, isNull, max } from 'drizzle-orm' import type { ExecutionContext, ToolCallResult } from '@/lib/copilot/orchestrator/types' +import { createApiKey } from '@/lib/api-key/auth' import { generateRequestId } from '@/lib/core/utils/request' import { buildDefaultWorkflowArtifacts } from '@/lib/workflows/defaults' import { executeWorkflow } from '@/lib/workflows/executor/execute-workflow' import { saveWorkflowToNormalizedTables } from '@/lib/workflows/persistence/utils' import { ensureWorkflowAccess, ensureWorkspaceAccess, getDefaultWorkspaceId } from '../access' +import { + getExecutionState, + getLatestExecutionState, +} from '@/lib/workflows/executor/execution-state' import type { CreateFolderParams, CreateWorkflowParams, + GenerateApiKeyParams, MoveFolderParams, MoveWorkflowParams, RenameWorkflowParams, + RunBlockParams, + RunFromBlockParams, RunWorkflowParams, + RunWorkflowUntilBlockParams, SetGlobalWorkflowVariablesParams, VariableOperation, } from '../param-types' @@ -150,6 +160,8 @@ export async function executeRunWorkflow( const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + const useDraftState = !params.useDeployedState + const result = await executeWorkflow( { id: workflowRecord.id, @@ -160,7 +172,7 @@ export async function executeRunWorkflow( generateRequestId(), params.workflow_input || params.input || undefined, context.userId, - { enabled: true, useDraftState: true } + { enabled: true, useDraftState } ) return { @@ -368,3 +380,245 @@ export async function executeMoveFolder( return { success: false, error: error instanceof Error ? error.message : String(error) } } } + +export async function executeRunWorkflowUntilBlock( + params: RunWorkflowUntilBlockParams, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + if (!params.stopAfterBlockId) { + return { success: false, error: 'stopAfterBlockId is required' } + } + + const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + + const useDraftState = !params.useDeployedState + + const result = await executeWorkflow( + { + id: workflowRecord.id, + userId: workflowRecord.userId, + workspaceId: workflowRecord.workspaceId, + variables: workflowRecord.variables || {}, + }, + generateRequestId(), + params.workflow_input || params.input || undefined, + context.userId, + { enabled: true, useDraftState, stopAfterBlockId: params.stopAfterBlockId } + ) + + return { + success: result.success, + output: { + executionId: result.metadata?.executionId, + success: result.success, + stoppedAfterBlockId: params.stopAfterBlockId, + output: result.output, + logs: result.logs, + }, + error: result.success ? undefined : result.error || 'Workflow execution failed', + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeGenerateApiKey( + params: GenerateApiKeyParams, + context: ExecutionContext +): Promise { + try { + const name = typeof params.name === 'string' ? params.name.trim() : '' + if (!name) { + return { success: false, error: 'name is required' } + } + if (name.length > 200) { + return { success: false, error: 'API key name must be 200 characters or less' } + } + + const workspaceId = params.workspaceId || (await getDefaultWorkspaceId(context.userId)) + await ensureWorkspaceAccess(workspaceId, context.userId, true) + + const existingKey = await db + .select({ id: apiKey.id }) + .from(apiKey) + .where( + and( + eq(apiKey.workspaceId, workspaceId), + eq(apiKey.name, name), + eq(apiKey.type, 'workspace') + ) + ) + .limit(1) + + if (existingKey.length > 0) { + return { + success: false, + error: `A workspace API key named "${name}" already exists. Choose a different name.`, + } + } + + const { key: plainKey, encryptedKey } = await createApiKey(true) + if (!encryptedKey) { + return { success: false, error: 'Failed to encrypt API key for storage' } + } + + const [newKey] = await db + .insert(apiKey) + .values({ + id: nanoid(), + workspaceId, + userId: context.userId, + createdBy: context.userId, + name, + key: encryptedKey, + type: 'workspace', + createdAt: new Date(), + updatedAt: new Date(), + }) + .returning({ id: apiKey.id, name: apiKey.name, createdAt: apiKey.createdAt }) + + return { + success: true, + output: { + id: newKey.id, + name: newKey.name, + key: plainKey, + workspaceId, + message: + 'API key created successfully. Copy this key now — it will not be shown again. Use this key in the x-api-key header when calling workflow API endpoints.', + }, + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeRunFromBlock( + params: RunFromBlockParams, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + if (!params.startBlockId) { + return { success: false, error: 'startBlockId is required' } + } + + const snapshot = params.executionId + ? await getExecutionState(params.executionId) + : await getLatestExecutionState(workflowId) + + if (!snapshot) { + return { + success: false, + error: params.executionId + ? `No execution state found for execution ${params.executionId}. Run the full workflow first.` + : `No execution state found for workflow ${workflowId}. Run the full workflow first to create a snapshot.`, + } + } + + const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + const useDraftState = !params.useDeployedState + + const result = await executeWorkflow( + { + id: workflowRecord.id, + userId: workflowRecord.userId, + workspaceId: workflowRecord.workspaceId, + variables: workflowRecord.variables || {}, + }, + generateRequestId(), + params.workflow_input || params.input || undefined, + context.userId, + { + enabled: true, + useDraftState, + runFromBlock: { startBlockId: params.startBlockId, sourceSnapshot: snapshot }, + } + ) + + return { + success: result.success, + output: { + executionId: result.metadata?.executionId, + success: result.success, + startBlockId: params.startBlockId, + output: result.output, + logs: result.logs, + }, + error: result.success ? undefined : result.error || 'Workflow execution failed', + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeRunBlock( + params: RunBlockParams, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + if (!params.blockId) { + return { success: false, error: 'blockId is required' } + } + + const snapshot = params.executionId + ? await getExecutionState(params.executionId) + : await getLatestExecutionState(workflowId) + + if (!snapshot) { + return { + success: false, + error: params.executionId + ? `No execution state found for execution ${params.executionId}. Run the full workflow first.` + : `No execution state found for workflow ${workflowId}. Run the full workflow first to create a snapshot.`, + } + } + + const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + const useDraftState = !params.useDeployedState + + const result = await executeWorkflow( + { + id: workflowRecord.id, + userId: workflowRecord.userId, + workspaceId: workflowRecord.workspaceId, + variables: workflowRecord.variables || {}, + }, + generateRequestId(), + params.workflow_input || params.input || undefined, + context.userId, + { + enabled: true, + useDraftState, + runFromBlock: { startBlockId: params.blockId, sourceSnapshot: snapshot }, + stopAfterBlockId: params.blockId, + } + ) + + return { + success: result.success, + output: { + executionId: result.metadata?.executionId, + success: result.success, + blockId: params.blockId, + output: result.output, + logs: result.logs, + }, + error: result.success ? undefined : result.error || 'Workflow execution failed', + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/queries.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/queries.ts index cc8a724f09..645b083015 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/queries.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/queries.ts @@ -10,7 +10,10 @@ import { mcpService } from '@/lib/mcp/service' import { listWorkspaceFiles } from '@/lib/uploads/contexts/workspace' import { getBlockOutputPaths } from '@/lib/workflows/blocks/block-outputs' import { BlockPathCalculator } from '@/lib/workflows/blocks/block-path-calculator' -import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/persistence/utils' +import { + loadDeployedWorkflowState, + loadWorkflowFromNormalizedTables, +} from '@/lib/workflows/persistence/utils' import { isInputDefinitionTrigger } from '@/lib/workflows/triggers/input-definition-triggers' import type { Loop, Parallel } from '@/stores/workflows/workflow/types' import { normalizeName } from '@/executor/constants' @@ -23,6 +26,7 @@ import { import type { GetBlockOutputsParams, GetBlockUpstreamReferencesParams, + GetDeployedWorkflowStateParams, GetUserWorkflowParams, GetWorkflowDataParams, GetWorkflowFromNameParams, @@ -562,3 +566,50 @@ function formatOutputsWithPrefix(paths: string[], blockName: string): string[] { const normalizedName = normalizeName(blockName) return paths.map((path) => `${normalizedName}.${path}`) } + +export async function executeGetDeployedWorkflowState( + params: GetDeployedWorkflowStateParams, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + + const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + + try { + const deployedState = await loadDeployedWorkflowState(workflowId) + const formatted = formatNormalizedWorkflowForCopilot({ + blocks: deployedState.blocks, + edges: deployedState.edges, + loops: deployedState.loops as Record, + parallels: deployedState.parallels as Record, + }) + + return { + success: true, + output: { + workflowId, + workflowName: workflowRecord.name || '', + isDeployed: true, + deploymentVersionId: deployedState.deploymentVersionId, + deployedState: formatted, + }, + } + } catch { + return { + success: true, + output: { + workflowId, + workflowName: workflowRecord.name || '', + isDeployed: false, + message: 'Workflow has not been deployed yet.', + }, + } + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} diff --git a/apps/sim/lib/copilot/tools/mcp/definitions.ts b/apps/sim/lib/copilot/tools/mcp/definitions.ts index 3ad8a1e44f..08fbe5b8ff 100644 --- a/apps/sim/lib/copilot/tools/mcp/definitions.ts +++ b/apps/sim/lib/copilot/tools/mcp/definitions.ts @@ -189,6 +189,158 @@ export const DIRECT_TOOL_DEFS: DirectToolDef[] = [ required: ['folderId'], }, }, + { + name: 'run_workflow', + toolId: 'run_workflow', + description: + 'Run a workflow and return its output. Works on both draft and deployed states. By default runs the draft (live) state.', + inputSchema: { + type: 'object', + properties: { + workflowId: { + type: 'string', + description: 'REQUIRED. The workflow ID to run.', + }, + workflow_input: { + type: 'object', + description: 'JSON object with input values. Keys should match the workflow start block input field names.', + }, + useDeployedState: { + type: 'boolean', + description: 'When true, runs the deployed version instead of the draft. Default: false.', + }, + }, + required: ['workflowId'], + }, + }, + { + name: 'run_workflow_until_block', + toolId: 'run_workflow_until_block', + description: + 'Run a workflow and stop after a specific block completes. Useful for testing partial execution or debugging specific blocks.', + inputSchema: { + type: 'object', + properties: { + workflowId: { + type: 'string', + description: 'REQUIRED. The workflow ID to run.', + }, + stopAfterBlockId: { + type: 'string', + description: 'REQUIRED. The block ID to stop after. Execution halts once this block completes.', + }, + workflow_input: { + type: 'object', + description: 'JSON object with input values for the workflow.', + }, + useDeployedState: { + type: 'boolean', + description: 'When true, runs the deployed version instead of the draft. Default: false.', + }, + }, + required: ['workflowId', 'stopAfterBlockId'], + }, + }, + { + name: 'run_from_block', + toolId: 'run_from_block', + description: + 'Run a workflow starting from a specific block, using cached outputs from a prior execution for upstream blocks. The workflow must have been run at least once first.', + inputSchema: { + type: 'object', + properties: { + workflowId: { + type: 'string', + description: 'REQUIRED. The workflow ID to run.', + }, + startBlockId: { + type: 'string', + description: 'REQUIRED. The block ID to start execution from.', + }, + executionId: { + type: 'string', + description: 'Optional. Specific execution ID to load the snapshot from. Uses latest if omitted.', + }, + workflow_input: { + type: 'object', + description: 'Optional input values for the workflow.', + }, + useDeployedState: { + type: 'boolean', + description: 'When true, runs the deployed version instead of the draft. Default: false.', + }, + }, + required: ['workflowId', 'startBlockId'], + }, + }, + { + name: 'run_block', + toolId: 'run_block', + description: + 'Run a single block in isolation using cached outputs from a prior execution. Only the specified block executes — nothing upstream or downstream. The workflow must have been run at least once first.', + inputSchema: { + type: 'object', + properties: { + workflowId: { + type: 'string', + description: 'REQUIRED. The workflow ID.', + }, + blockId: { + type: 'string', + description: 'REQUIRED. The block ID to run in isolation.', + }, + executionId: { + type: 'string', + description: 'Optional. Specific execution ID to load the snapshot from. Uses latest if omitted.', + }, + workflow_input: { + type: 'object', + description: 'Optional input values for the workflow.', + }, + useDeployedState: { + type: 'boolean', + description: 'When true, runs the deployed version instead of the draft. Default: false.', + }, + }, + required: ['workflowId', 'blockId'], + }, + }, + { + name: 'get_deployed_workflow_state', + toolId: 'get_deployed_workflow_state', + description: + 'Get the deployed (production) state of a workflow. Returns the full workflow definition as deployed, or indicates if the workflow is not yet deployed.', + inputSchema: { + type: 'object', + properties: { + workflowId: { + type: 'string', + description: 'REQUIRED. The workflow ID to get the deployed state for.', + }, + }, + required: ['workflowId'], + }, + }, + { + name: 'generate_api_key', + toolId: 'generate_api_key', + description: + 'Generate a new workspace API key for calling workflow API endpoints. The key is only shown once — tell the user to save it immediately.', + inputSchema: { + type: 'object', + properties: { + name: { + type: 'string', + description: 'A descriptive name for the API key (e.g., "production-key", "dev-testing").', + }, + workspaceId: { + type: 'string', + description: 'Optional workspace ID. Defaults to user\'s default workspace.', + }, + }, + required: ['name'], + }, + }, ] export const SUBAGENT_TOOL_DEFS: SubagentToolDef[] = [ diff --git a/apps/sim/lib/logs/execution/logger.ts b/apps/sim/lib/logs/execution/logger.ts index 0fc47fa73b..033f3dc8e8 100644 --- a/apps/sim/lib/logs/execution/logger.ts +++ b/apps/sim/lib/logs/execution/logger.ts @@ -21,6 +21,7 @@ import { checkAndBillOverageThreshold } from '@/lib/billing/threshold-billing' import { isBillingEnabled } from '@/lib/core/config/feature-flags' import { redactApiKeys } from '@/lib/core/security/redaction' import { filterForDisplay } from '@/lib/core/utils/display-filters' +import type { SerializableExecutionState } from '@/executor/execution/types' import { emitWorkflowExecutionCompleted } from '@/lib/logs/events' import { snapshotService } from '@/lib/logs/execution/snapshot/service' import type { @@ -188,6 +189,7 @@ export class ExecutionLogger implements IExecutionLoggerService { finalOutput: BlockOutputData traceSpans?: TraceSpan[] workflowInput?: any + executionState?: SerializableExecutionState isResume?: boolean level?: 'info' | 'error' status?: 'completed' | 'failed' | 'cancelled' | 'pending' @@ -200,6 +202,7 @@ export class ExecutionLogger implements IExecutionLoggerService { finalOutput, traceSpans, workflowInput, + executionState, isResume, level: levelOverride, status: statusOverride, @@ -287,6 +290,7 @@ export class ExecutionLogger implements IExecutionLoggerService { total: executionCost.tokens.total, }, models: executionCost.models, + ...(executionState ? { executionState } : {}), }, cost: executionCost, }) diff --git a/apps/sim/lib/logs/execution/logging-session.ts b/apps/sim/lib/logs/execution/logging-session.ts index be15156869..4245af6c19 100644 --- a/apps/sim/lib/logs/execution/logging-session.ts +++ b/apps/sim/lib/logs/execution/logging-session.ts @@ -3,6 +3,7 @@ import { workflowExecutionLogs } from '@sim/db/schema' import { createLogger } from '@sim/logger' import { eq, sql } from 'drizzle-orm' import { BASE_EXECUTION_CHARGE } from '@/lib/billing/constants' +import type { SerializableExecutionState } from '@/executor/execution/types' import { executionLogger } from '@/lib/logs/execution/logger' import { calculateCostSummary, @@ -35,6 +36,7 @@ export interface SessionCompleteParams { finalOutput?: any traceSpans?: TraceSpan[] workflowInput?: any + executionState?: SerializableExecutionState } export interface SessionErrorCompleteParams { @@ -269,7 +271,8 @@ export class LoggingSession { return } - const { endedAt, totalDurationMs, finalOutput, traceSpans, workflowInput } = params + const { endedAt, totalDurationMs, finalOutput, traceSpans, workflowInput, executionState } = + params try { const costSummary = calculateCostSummary(traceSpans || []) @@ -284,6 +287,7 @@ export class LoggingSession { finalOutput: finalOutput || {}, traceSpans: traceSpans || [], workflowInput, + executionState, isResume: this.isResume, }) diff --git a/apps/sim/lib/logs/types.ts b/apps/sim/lib/logs/types.ts index 8d65fbd555..9d160fd4a4 100644 --- a/apps/sim/lib/logs/types.ts +++ b/apps/sim/lib/logs/types.ts @@ -1,5 +1,6 @@ import type { Edge } from 'reactflow' import type { BlockLog, NormalizedBlockOutput } from '@/executor/types' +import type { SerializableExecutionState } from '@/executor/execution/types' import type { DeploymentStatus } from '@/stores/workflows/registry/types' import type { Loop, Parallel, WorkflowState } from '@/stores/workflows/workflow/types' @@ -111,6 +112,7 @@ export interface WorkflowExecutionLog { tokens?: { input?: number; output?: number; total?: number } } > + executionState?: SerializableExecutionState finalOutput?: any errorDetails?: { blockId: string diff --git a/apps/sim/lib/workflows/executor/execute-workflow.ts b/apps/sim/lib/workflows/executor/execute-workflow.ts index 3313128ff0..82813ce768 100644 --- a/apps/sim/lib/workflows/executor/execute-workflow.ts +++ b/apps/sim/lib/workflows/executor/execute-workflow.ts @@ -4,7 +4,7 @@ import { LoggingSession } from '@/lib/logs/execution/logging-session' import { executeWorkflowCore } from '@/lib/workflows/executor/execution-core' import { PauseResumeManager } from '@/lib/workflows/executor/human-in-the-loop-manager' import { ExecutionSnapshot } from '@/executor/execution/snapshot' -import type { ExecutionMetadata } from '@/executor/execution/types' +import type { ExecutionMetadata, SerializableExecutionState } from '@/executor/execution/types' import type { ExecutionResult, StreamingExecution } from '@/executor/types' const logger = createLogger('WorkflowExecution') @@ -22,6 +22,13 @@ export interface ExecuteWorkflowOptions { abortSignal?: AbortSignal /** Use the live/draft workflow state instead of the deployed state. Used by copilot. */ useDraftState?: boolean + /** Stop execution after this block completes. Used for "run until block" feature. */ + stopAfterBlockId?: string + /** Run-from-block configuration using a prior execution snapshot. */ + runFromBlock?: { + startBlockId: string + sourceSnapshot: SerializableExecutionState + } } export interface WorkflowInfo { @@ -86,6 +93,8 @@ export async function executeWorkflow( includeFileBase64: streamConfig?.includeFileBase64, base64MaxBytes: streamConfig?.base64MaxBytes, abortSignal: streamConfig?.abortSignal, + stopAfterBlockId: streamConfig?.stopAfterBlockId, + runFromBlock: streamConfig?.runFromBlock, }) if (result.status === 'paused') { diff --git a/apps/sim/lib/workflows/executor/execution-core.ts b/apps/sim/lib/workflows/executor/execution-core.ts index 60998d934f..56926d6271 100644 --- a/apps/sim/lib/workflows/executor/execution-core.ts +++ b/apps/sim/lib/workflows/executor/execution-core.ts @@ -400,6 +400,7 @@ export async function executeWorkflowCore( finalOutput: result.output || {}, traceSpans: traceSpans || [], workflowInput: processedInput, + executionState: result.executionState, }) await clearExecutionCancellation(executionId) diff --git a/apps/sim/lib/workflows/executor/execution-state.ts b/apps/sim/lib/workflows/executor/execution-state.ts new file mode 100644 index 0000000000..490895a891 --- /dev/null +++ b/apps/sim/lib/workflows/executor/execution-state.ts @@ -0,0 +1,53 @@ +import { db } from '@sim/db' +import { workflowExecutionLogs } from '@sim/db/schema' +import { and, desc, eq, sql } from 'drizzle-orm' +import type { SerializableExecutionState } from '@/executor/execution/types' + +function isSerializableExecutionState(value: unknown): value is SerializableExecutionState { + if (!value || typeof value !== 'object') return false + const state = value as Record + return ( + typeof state.blockStates === 'object' && + Array.isArray(state.executedBlocks) && + Array.isArray(state.blockLogs) && + typeof state.decisions === 'object' && + Array.isArray(state.completedLoops) && + Array.isArray(state.activeExecutionPath) + ) +} + +function extractExecutionState(executionData: unknown): SerializableExecutionState | null { + if (!executionData || typeof executionData !== 'object') return null + const state = (executionData as Record).executionState + return isSerializableExecutionState(state) ? state : null +} + +export async function getExecutionState( + executionId: string +): Promise { + const [row] = await db + .select({ executionData: workflowExecutionLogs.executionData }) + .from(workflowExecutionLogs) + .where(eq(workflowExecutionLogs.executionId, executionId)) + .limit(1) + + return extractExecutionState(row?.executionData) +} + +export async function getLatestExecutionState( + workflowId: string +): Promise { + const [row] = await db + .select({ executionData: workflowExecutionLogs.executionData }) + .from(workflowExecutionLogs) + .where( + and( + eq(workflowExecutionLogs.workflowId, workflowId), + sql`${workflowExecutionLogs.executionData} -> 'executionState' IS NOT NULL` + ) + ) + .orderBy(desc(workflowExecutionLogs.startedAt)) + .limit(1) + + return extractExecutionState(row?.executionData) +} From 0c51ce5f57798c998afcc803af03adb4dee170c9 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Fri, 6 Feb 2026 12:06:09 -0800 Subject: [PATCH 35/72] Add tools --- apps/sim/app/api/copilot/chat/route.ts | 11 +- apps/sim/executor/execution/engine.ts | 6 +- apps/sim/hooks/use-undo-redo.ts | 25 +-- apps/sim/lib/copilot/api.ts | 4 +- apps/sim/lib/copilot/chat-context.ts | 5 +- apps/sim/lib/copilot/chat-payload.ts | 28 ++- .../lib/copilot/client-sse/content-blocks.ts | 11 +- apps/sim/lib/copilot/client-sse/handlers.ts | 47 +++-- apps/sim/lib/copilot/client-sse/index.ts | 4 +- .../copilot/client-sse/subagent-handlers.ts | 11 +- apps/sim/lib/copilot/client-sse/types.ts | 6 +- apps/sim/lib/copilot/messages/checkpoints.ts | 2 +- .../copilot/messages/credential-masking.ts | 5 +- apps/sim/lib/copilot/messages/index.ts | 4 +- .../orchestrator/sse-handlers/handlers.ts | 22 ++- .../sim/lib/copilot/orchestrator/sse-utils.ts | 10 +- .../lib/copilot/orchestrator/stream-core.ts | 6 +- .../orchestrator/tool-executor/index.ts | 9 +- .../tool-executor/platform-actions.ts | 117 ++++++++++++ .../tool-executor/workflow-tools/mutations.ts | 18 +- .../tool-executor/workflow-tools/queries.ts | 2 +- apps/sim/lib/copilot/process-contents.ts | 30 +-- apps/sim/lib/copilot/store-utils.ts | 10 +- apps/sim/lib/copilot/tools/mcp/definitions.ts | 54 +++++- .../server/blocks/get-blocks-and-tools.ts | 5 +- .../server/blocks/get-blocks-metadata-tool.ts | 13 +- .../tools/server/knowledge/knowledge-base.ts | 177 +++++++++++++++++- .../tools/server/other/search-online.ts | 12 +- .../tools/server/user/get-credentials.ts | 8 +- .../server/workflow/edit-workflow/builders.ts | 2 +- .../server/workflow/edit-workflow/engine.ts | 2 +- .../server/workflow/edit-workflow/index.ts | 5 +- .../workflow/edit-workflow/operations.ts | 45 +++-- .../server/workflow/get-workflow-console.ts | 4 +- apps/sim/lib/copilot/tools/shared/schemas.ts | 20 +- apps/sim/lib/logs/execution/logger.ts | 2 +- .../sim/lib/logs/execution/logging-session.ts | 2 +- apps/sim/lib/logs/types.ts | 2 +- apps/sim/lib/workflows/blocks/index.ts | 7 +- .../lib/workflows/blocks/schema-resolver.ts | 13 +- apps/sim/stores/panel/copilot/store.ts | 50 ++--- apps/sim/stores/workflow-diff/types.ts | 6 +- 42 files changed, 632 insertions(+), 190 deletions(-) create mode 100644 apps/sim/lib/copilot/orchestrator/tool-executor/platform-actions.ts diff --git a/apps/sim/app/api/copilot/chat/route.ts b/apps/sim/app/api/copilot/chat/route.ts index cd9ad74368..dbd97eccf6 100644 --- a/apps/sim/app/api/copilot/chat/route.ts +++ b/apps/sim/app/api/copilot/chat/route.ts @@ -5,10 +5,10 @@ import { and, desc, eq } from 'drizzle-orm' import { type NextRequest, NextResponse } from 'next/server' import { z } from 'zod' import { getSession } from '@/lib/auth' -import { generateChatTitle } from '@/lib/copilot/chat-title' import { buildConversationHistory } from '@/lib/copilot/chat-context' import { resolveOrCreateChat } from '@/lib/copilot/chat-lifecycle' import { buildCopilotRequestPayload } from '@/lib/copilot/chat-payload' +import { generateChatTitle } from '@/lib/copilot/chat-title' import { getCopilotModel } from '@/lib/copilot/config' import { COPILOT_MODEL_IDS, COPILOT_REQUEST_MODES } from '@/lib/copilot/models' import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator' @@ -228,7 +228,9 @@ export async function POST(req: NextRequest) { hasTools: Array.isArray(requestPayload.tools), toolCount: Array.isArray(requestPayload.tools) ? requestPayload.tools.length : 0, hasBaseTools: Array.isArray(requestPayload.baseTools), - baseToolCount: Array.isArray(requestPayload.baseTools) ? requestPayload.baseTools.length : 0, + baseToolCount: Array.isArray(requestPayload.baseTools) + ? requestPayload.baseTools.length + : 0, hasCredentials: !!requestPayload.credentials, }) } catch {} @@ -370,7 +372,10 @@ export async function POST(req: NextRequest) { content: nonStreamingResult.content, toolCalls: nonStreamingResult.toolCalls, model: selectedModel, - provider: (requestPayload?.provider as Record)?.provider || env.COPILOT_PROVIDER || 'openai', + provider: + (requestPayload?.provider as Record)?.provider || + env.COPILOT_PROVIDER || + 'openai', } logger.info(`[${tracker.requestId}] Non-streaming response from orchestrator:`, { diff --git a/apps/sim/executor/execution/engine.ts b/apps/sim/executor/execution/engine.ts index c0d015d5eb..2f47912528 100644 --- a/apps/sim/executor/execution/engine.ts +++ b/apps/sim/executor/execution/engine.ts @@ -471,9 +471,9 @@ export class ExecutionEngine { } } - private getSerializableExecutionState( - snapshotSeed?: { snapshot: string } - ): SerializableExecutionState | undefined { + private getSerializableExecutionState(snapshotSeed?: { + snapshot: string + }): SerializableExecutionState | undefined { try { const serializedSnapshot = snapshotSeed?.snapshot ?? serializePauseSnapshot(this.context, [], this.dag).snapshot diff --git a/apps/sim/hooks/use-undo-redo.ts b/apps/sim/hooks/use-undo-redo.ts index 10873859e4..880af7c063 100644 --- a/apps/sim/hooks/use-undo-redo.ts +++ b/apps/sim/hooks/use-undo-redo.ts @@ -6,6 +6,7 @@ declare global { __skipDiffRecording?: boolean } } + import type { Edge } from 'reactflow' import { useSession } from '@/lib/auth/auth-client' import { enqueueReplaceWorkflowState } from '@/lib/workflows/operations/socket-operations' @@ -914,7 +915,7 @@ export function useUndoRedo() { // Set flag to skip recording during this operation - ;window.__skipDiffRecording = true + window.__skipDiffRecording = true try { // Restore baseline state and broadcast to everyone if (baselineSnapshot && activeWorkflowId) { @@ -951,7 +952,7 @@ export function useUndoRedo() { logger.info('Clearing diff UI state') useWorkflowDiffStore.getState().clearDiff({ restoreBaseline: false }) } finally { - ;window.__skipDiffRecording = false + window.__skipDiffRecording = false } logger.info('Undid apply-diff operation successfully') @@ -971,7 +972,7 @@ export function useUndoRedo() { // Set flag to skip recording during this operation - ;window.__skipDiffRecording = true + window.__skipDiffRecording = true try { // Apply the before-accept state (with markers for this user) useWorkflowStore.getState().replaceWorkflowState(beforeAccept) @@ -1010,7 +1011,7 @@ export function useUndoRedo() { diffAnalysis: diffAnalysis, }) } finally { - ;window.__skipDiffRecording = false + window.__skipDiffRecording = false } logger.info('Undid accept-diff operation - restored diff view') @@ -1024,7 +1025,7 @@ export function useUndoRedo() { const { useWorkflowStore } = await import('@/stores/workflows/workflow/store') const { useSubBlockStore } = await import('@/stores/workflows/subblock/store') - ;window.__skipDiffRecording = true + window.__skipDiffRecording = true try { // Apply the before-reject state (with markers for this user) useWorkflowStore.getState().replaceWorkflowState(beforeReject) @@ -1061,7 +1062,7 @@ export function useUndoRedo() { diffAnalysis: diffAnalysis, }) } finally { - ;window.__skipDiffRecording = false + window.__skipDiffRecording = false } logger.info('Undid reject-diff operation - restored diff view') @@ -1532,7 +1533,7 @@ export function useUndoRedo() { // Set flag to skip recording during this operation - ;window.__skipDiffRecording = true + window.__skipDiffRecording = true try { // Manually apply the proposed state and set up diff store (similar to setProposedChanges but with original baseline) const diffStore = useWorkflowDiffStore.getState() @@ -1573,7 +1574,7 @@ export function useUndoRedo() { diffAnalysis: diffAnalysis, }) } finally { - ;window.__skipDiffRecording = false + window.__skipDiffRecording = false } logger.info('Redid apply-diff operation') @@ -1589,7 +1590,7 @@ export function useUndoRedo() { // Set flag to skip recording during this operation - ;window.__skipDiffRecording = true + window.__skipDiffRecording = true try { // Clear diff state FIRST to prevent flash of colors (local UI only) // Use setState directly to ensure synchronous clearing @@ -1627,7 +1628,7 @@ export function useUndoRedo() { operationId: opId, }) } finally { - ;window.__skipDiffRecording = false + window.__skipDiffRecording = false } logger.info('Redid accept-diff operation - cleared diff view') @@ -1641,7 +1642,7 @@ export function useUndoRedo() { const { useWorkflowStore } = await import('@/stores/workflows/workflow/store') const { useSubBlockStore } = await import('@/stores/workflows/subblock/store') - ;window.__skipDiffRecording = true + window.__skipDiffRecording = true try { // Clear diff state FIRST to prevent flash of colors (local UI only) // Use setState directly to ensure synchronous clearing @@ -1679,7 +1680,7 @@ export function useUndoRedo() { operationId: opId, }) } finally { - ;window.__skipDiffRecording = false + window.__skipDiffRecording = false } logger.info('Redid reject-diff operation - cleared diff view') diff --git a/apps/sim/lib/copilot/api.ts b/apps/sim/lib/copilot/api.ts index 19d0f6f7bc..06ac46b324 100644 --- a/apps/sim/lib/copilot/api.ts +++ b/apps/sim/lib/copilot/api.ts @@ -141,7 +141,9 @@ export async function sendStreamingMessage( resumeFromEventId, }) } catch (error) { - logger.warn('Failed to log streaming message context preview', { error: error instanceof Error ? error.message : String(error) }) + logger.warn('Failed to log streaming message context preview', { + error: error instanceof Error ? error.message : String(error), + }) } const streamId = request.userMessageId diff --git a/apps/sim/lib/copilot/chat-context.ts b/apps/sim/lib/copilot/chat-context.ts index d1377eb4ac..b793f5f794 100644 --- a/apps/sim/lib/copilot/chat-context.ts +++ b/apps/sim/lib/copilot/chat-context.ts @@ -44,7 +44,10 @@ export async function processFileAttachments( const processedFileContents: FileContent[] = [] const requestId = `copilot-${userId}-${Date.now()}` - const processedAttachments = await CopilotFiles.processCopilotAttachments(fileAttachments as Parameters[0], requestId) + const processedAttachments = await CopilotFiles.processCopilotAttachments( + fileAttachments as Parameters[0], + requestId + ) for (const { buffer, attachment } of processedAttachments) { const fileContent = createFileContent(buffer, attachment.media_type) diff --git a/apps/sim/lib/copilot/chat-payload.ts b/apps/sim/lib/copilot/chat-payload.ts index ab6fcad0bd..f6eefbab66 100644 --- a/apps/sim/lib/copilot/chat-payload.ts +++ b/apps/sim/lib/copilot/chat-payload.ts @@ -1,12 +1,12 @@ import { createLogger } from '@sim/logger' -import { env } from '@/lib/core/config/env' +import { processFileAttachments } from '@/lib/copilot/chat-context' import { getCopilotModel } from '@/lib/copilot/config' import { SIM_AGENT_VERSION } from '@/lib/copilot/constants' import { getCredentialsServerTool } from '@/lib/copilot/tools/server/user/get-credentials' import type { CopilotProviderConfig } from '@/lib/copilot/types' +import { env } from '@/lib/core/config/env' import { tools } from '@/tools/registry' import { getLatestVersionTools, stripVersionSuffix } from '@/tools/utils' -import { type FileContent, processFileAttachments } from '@/lib/copilot/chat-context' const logger = createLogger('CopilotChatPayload') @@ -35,7 +35,10 @@ interface ToolSchema { } interface CredentialsPayload { - oauth: Record + oauth: Record< + string, + { accessToken: string; accountId: string; name: string; expiresAt?: string } + > apiKeys: string[] metadata?: { connectedOAuth: Array<{ provider: string; name: string; scopes?: string[] }> @@ -95,9 +98,17 @@ export async function buildCopilotRequestPayload( } ): Promise> { const { - message, workflowId, userId, userMessageId, mode, - conversationHistory = [], contexts, fileAttachments, - commands, chatId, implicitFeedback, + message, + workflowId, + userId, + userMessageId, + mode, + conversationHistory = [], + contexts, + fileAttachments, + commands, + chatId, + implicitFeedback, } = params const selectedModel = options.selectedModel @@ -115,7 +126,10 @@ export async function buildCopilotRequestPayload( const content: Array<{ type: string; text?: string; [key: string]: unknown }> = [ { type: 'text', text: msg.content as string }, ] - const processedHistoricalAttachments = await processFileAttachments(msgAttachments as BuildPayloadParams['fileAttachments'] ?? [], userId) + const processedHistoricalAttachments = await processFileAttachments( + (msgAttachments as BuildPayloadParams['fileAttachments']) ?? [], + userId + ) for (const fileContent of processedHistoricalAttachments) { content.push(fileContent) } diff --git a/apps/sim/lib/copilot/client-sse/content-blocks.ts b/apps/sim/lib/copilot/client-sse/content-blocks.ts index 1ce416bc65..0e77882524 100644 --- a/apps/sim/lib/copilot/client-sse/content-blocks.ts +++ b/apps/sim/lib/copilot/client-sse/content-blocks.ts @@ -24,9 +24,7 @@ export function createUserMessage( ...(contexts && contexts.length > 0 && { contexts }), ...(contexts && contexts.length > 0 && { - contentBlocks: [ - { type: 'contexts', contexts, timestamp: Date.now() }, - ], + contentBlocks: [{ type: 'contexts', contexts, timestamp: Date.now() }], }), } } @@ -125,7 +123,12 @@ export function stripContinueOptionFromBlocks(blocks: ClientContentBlock[]): Cli export function beginThinkingBlock(context: ClientStreamingContext) { if (!context.currentThinkingBlock) { - const newBlock: ClientContentBlock = { type: 'thinking', content: '', timestamp: Date.now(), startTime: Date.now() } + const newBlock: ClientContentBlock = { + type: 'thinking', + content: '', + timestamp: Date.now(), + startTime: Date.now(), + } context.currentThinkingBlock = newBlock context.contentBlocks.push(newBlock) } diff --git a/apps/sim/lib/copilot/client-sse/handlers.ts b/apps/sim/lib/copilot/client-sse/handlers.ts index bc8968fc09..27e84972c1 100644 --- a/apps/sim/lib/copilot/client-sse/handlers.ts +++ b/apps/sim/lib/copilot/client-sse/handlers.ts @@ -1,22 +1,18 @@ import { createLogger } from '@sim/logger' import { STREAM_STORAGE_KEY } from '@/lib/copilot/constants' -import type { SSEEvent } from '@/lib/copilot/orchestrator/types' import { asRecord } from '@/lib/copilot/orchestrator/sse-utils' -import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry' +import type { SSEEvent } from '@/lib/copilot/orchestrator/types' import { isBackgroundState, isRejectedState, isReviewState, resolveToolDisplay, } from '@/lib/copilot/store-utils' -import { useWorkflowDiffStore } from '@/stores/workflow-diff/store' +import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry' import type { CopilotStore, CopilotStreamInfo, CopilotToolCall } from '@/stores/panel/copilot/types' +import { useWorkflowDiffStore } from '@/stores/workflow-diff/store' import type { WorkflowState } from '@/stores/workflows/workflow/types' -import { - appendTextBlock, - beginThinkingBlock, - finalizeThinkingBlock, -} from './content-blocks' +import { appendTextBlock, beginThinkingBlock, finalizeThinkingBlock } from './content-blocks' import type { ClientContentBlock, ClientStreamingContext } from './types' const logger = createLogger('CopilotClientSseHandlers') @@ -92,7 +88,9 @@ export function flushStreamingUpdates(set: StoreSet) { ...msg, content: '', contentBlocks: - update.contentBlocks.length > 0 ? createOptimizedContentBlocks(update.contentBlocks) : [], + update.contentBlocks.length > 0 + ? createOptimizedContentBlocks(update.contentBlocks) + : [], } } return msg @@ -183,7 +181,12 @@ function appendThinkingContent(context: ClientStreamingContext, text: string) { if (context.currentThinkingBlock) { context.currentThinkingBlock.content += cleanedText } else { - const newBlock: ClientContentBlock = { type: 'thinking', content: cleanedText, timestamp: Date.now(), startTime: Date.now() } + const newBlock: ClientContentBlock = { + type: 'thinking', + content: cleanedText, + timestamp: Date.now(), + startTime: Date.now(), + } context.currentThinkingBlock = newBlock context.contentBlocks.push(newBlock) } @@ -218,7 +221,8 @@ export const sseHandlers: Record = { tool_result: (data, context, get, set) => { try { const eventData = asRecord(data?.data) - const toolCallId: string | undefined = data?.toolCallId || (eventData.id as string | undefined) + const toolCallId: string | undefined = + data?.toolCallId || (eventData.id as string | undefined) const success: boolean | undefined = data?.success const failedDependency: boolean = data?.failedDependency === true const resultObj = asRecord(data?.result) @@ -251,7 +255,9 @@ export const sseHandlers: Record = { try { const result = asRecord(data?.result) || asRecord(eventData.result) const input = asRecord(current.params || current.input) - const todoId = (input.id || input.todoId || result.id || result.todoId) as string | undefined + const todoId = (input.id || input.todoId || result.id || result.todoId) as + | string + | undefined if (todoId) { get().updatePlanTodoStatus(todoId, 'completed') } @@ -270,7 +276,9 @@ export const sseHandlers: Record = { try { const result = asRecord(data?.result) || asRecord(eventData.result) const input = asRecord(current.params || current.input) - const todoId = (input.id || input.todoId || result.id || result.todoId) as string | undefined + const todoId = (input.id || input.todoId || result.id || result.todoId) as + | string + | undefined if (todoId) { get().updatePlanTodoStatus(todoId, 'executing') } @@ -296,11 +304,13 @@ export const sseHandlers: Record = { }) if (hasWorkflowState) { const diffStore = useWorkflowDiffStore.getState() - diffStore.setProposedChanges(resultPayload.workflowState as WorkflowState).catch((err) => { - logger.error('[SSE] Failed to apply edit_workflow diff', { - error: err instanceof Error ? err.message : String(err), + diffStore + .setProposedChanges(resultPayload.workflowState as WorkflowState) + .catch((err) => { + logger.error('[SSE] Failed to apply edit_workflow diff', { + error: err instanceof Error ? err.message : String(err), + }) }) - }) } } catch (err) { logger.error('[SSE] edit_workflow result handling failed', { @@ -350,7 +360,8 @@ export const sseHandlers: Record = { tool_error: (data, context, get, set) => { try { const errorData = asRecord(data?.data) - const toolCallId: string | undefined = data?.toolCallId || (errorData.id as string | undefined) + const toolCallId: string | undefined = + data?.toolCallId || (errorData.id as string | undefined) const failedDependency: boolean = data?.failedDependency === true if (!toolCallId) return const { toolCallsById } = get() diff --git a/apps/sim/lib/copilot/client-sse/index.ts b/apps/sim/lib/copilot/client-sse/index.ts index a08f89593e..8c45d3ae1c 100644 --- a/apps/sim/lib/copilot/client-sse/index.ts +++ b/apps/sim/lib/copilot/client-sse/index.ts @@ -1,3 +1,3 @@ -export { sseHandlers } from './handlers' -export { subAgentSSEHandlers, applySseEvent } from './subagent-handlers' export type { SSEHandler } from './handlers' +export { sseHandlers } from './handlers' +export { applySseEvent, subAgentSSEHandlers } from './subagent-handlers' diff --git a/apps/sim/lib/copilot/client-sse/subagent-handlers.ts b/apps/sim/lib/copilot/client-sse/subagent-handlers.ts index e68a552b60..d78360cad4 100644 --- a/apps/sim/lib/copilot/client-sse/subagent-handlers.ts +++ b/apps/sim/lib/copilot/client-sse/subagent-handlers.ts @@ -6,11 +6,11 @@ import { shouldSkipToolResultEvent, } from '@/lib/copilot/orchestrator/sse-utils' import type { SSEEvent } from '@/lib/copilot/orchestrator/types' -import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry' import { resolveToolDisplay } from '@/lib/copilot/store-utils' +import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry' import type { CopilotStore, CopilotToolCall } from '@/stores/panel/copilot/types' +import { type SSEHandler, sseHandlers, updateStreamingMessage } from './handlers' import type { ClientStreamingContext } from './types' -import { sseHandlers, type SSEHandler, updateStreamingMessage } from './handlers' const logger = createLogger('CopilotClientSubagentHandlers') @@ -110,7 +110,7 @@ export const subAgentSSEHandlers: Record = { content: (data, context, get, set) => { const parentToolCallId = context.subAgentParentToolCallId - const contentStr = typeof data.data === 'string' ? data.data : (data.content || '') + const contentStr = typeof data.data === 'string' ? data.data : data.content || '' logger.info('[SubAgent] content event', { parentToolCallId, hasData: !!contentStr, @@ -159,8 +159,9 @@ export const subAgentSSEHandlers: Record = { if (!id || !name) return const isPartial = toolData.partial === true - let args: Record | undefined = - (toolData.arguments || toolData.input) as Record | undefined + let args: Record | undefined = (toolData.arguments || toolData.input) as + | Record + | undefined if (typeof args === 'string') { try { diff --git a/apps/sim/lib/copilot/client-sse/types.ts b/apps/sim/lib/copilot/client-sse/types.ts index 8a4616a84e..5f46f74920 100644 --- a/apps/sim/lib/copilot/client-sse/types.ts +++ b/apps/sim/lib/copilot/client-sse/types.ts @@ -1,4 +1,8 @@ -import type { ChatContext, CopilotToolCall, SubAgentContentBlock } from '@/stores/panel/copilot/types' +import type { + ChatContext, + CopilotToolCall, + SubAgentContentBlock, +} from '@/stores/panel/copilot/types' /** * A content block used in copilot messages and during streaming. diff --git a/apps/sim/lib/copilot/messages/checkpoints.ts b/apps/sim/lib/copilot/messages/checkpoints.ts index 29eca04c3b..3764adedc7 100644 --- a/apps/sim/lib/copilot/messages/checkpoints.ts +++ b/apps/sim/lib/copilot/messages/checkpoints.ts @@ -1,9 +1,9 @@ import { createLogger } from '@sim/logger' import { COPILOT_CHECKPOINTS_API_PATH } from '@/lib/copilot/constants' +import type { CopilotMessage, CopilotStore, CopilotToolCall } from '@/stores/panel/copilot/types' import { mergeSubblockState } from '@/stores/workflows/utils' import { useWorkflowStore } from '@/stores/workflows/workflow/store' import type { WorkflowState } from '@/stores/workflows/workflow/types' -import type { CopilotMessage, CopilotStore, CopilotToolCall } from '@/stores/panel/copilot/types' const logger = createLogger('CopilotMessageCheckpoints') diff --git a/apps/sim/lib/copilot/messages/credential-masking.ts b/apps/sim/lib/copilot/messages/credential-masking.ts index 33f1549491..c1eee9f0b0 100644 --- a/apps/sim/lib/copilot/messages/credential-masking.ts +++ b/apps/sim/lib/copilot/messages/credential-masking.ts @@ -19,10 +19,7 @@ export function maskCredentialIdsInValue(value: T, credentialIds: Set if (typeof value === 'object') { const masked: Record = {} for (const key of Object.keys(value as Record)) { - masked[key] = maskCredentialIdsInValue( - (value as Record)[key], - credentialIds - ) + masked[key] = maskCredentialIdsInValue((value as Record)[key], credentialIds) } return masked as T } diff --git a/apps/sim/lib/copilot/messages/index.ts b/apps/sim/lib/copilot/messages/index.ts index 2525a00792..901c60943d 100644 --- a/apps/sim/lib/copilot/messages/index.ts +++ b/apps/sim/lib/copilot/messages/index.ts @@ -1,4 +1,4 @@ -export * from './credential-masking' -export * from './serialization' export * from './checkpoints' +export * from './credential-masking' export * from './persist' +export * from './serialization' diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts index b641b10f8f..9a061029e9 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts @@ -89,8 +89,12 @@ export const sseHandlers: Record = { }, tool_generating: (event, context) => { const data = getEventData(event) - const toolCallId = event.toolCallId || (data?.toolCallId as string | undefined) || (data?.id as string | undefined) - const toolName = event.toolName || (data?.toolName as string | undefined) || (data?.name as string | undefined) + const toolCallId = + event.toolCallId || + (data?.toolCallId as string | undefined) || + (data?.id as string | undefined) + const toolName = + event.toolName || (data?.toolName as string | undefined) || (data?.name as string | undefined) if (!toolCallId || !toolName) return if (!context.toolCalls.has(toolCallId)) { context.toolCalls.set(toolCallId, { @@ -107,7 +111,9 @@ export const sseHandlers: Record = { const toolName = (toolData.name as string | undefined) || event.toolName if (!toolCallId || !toolName) return - const args = (toolData.arguments || toolData.input || asRecord(event.data).input) as Record | undefined + const args = (toolData.arguments || toolData.input || asRecord(event.data).input) as + | Record + | undefined const isPartial = toolData.partial === true const existing = context.toolCalls.get(toolCallId) @@ -164,7 +170,11 @@ export const sseHandlers: Record = { const isInteractive = options.interactive === true if (isInterruptTool && isInteractive) { - const decision = await waitForToolDecision(toolCallId, options.timeout || STREAM_TIMEOUT_MS, options.abortSignal) + const decision = await waitForToolDecision( + toolCallId, + options.timeout || STREAM_TIMEOUT_MS, + options.abortSignal + ) if (decision?.status === 'accepted' || decision?.status === 'success') { await executeToolAndReport(toolCallId, context, execContext, options) return @@ -308,7 +318,9 @@ export const subAgentHandlers: Record = { const toolName = (toolData.name as string | undefined) || event.toolName if (!toolCallId || !toolName) return const isPartial = toolData.partial === true - const args = (toolData.arguments || toolData.input || asRecord(event.data).input) as Record | undefined + const args = (toolData.arguments || toolData.input || asRecord(event.data).input) as + | Record + | undefined const existing = context.toolCalls.get(toolCallId) // Ignore late/duplicate tool_call events once we already have a result. diff --git a/apps/sim/lib/copilot/orchestrator/sse-utils.ts b/apps/sim/lib/copilot/orchestrator/sse-utils.ts index 92f337e2a7..afcbf21115 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-utils.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-utils.ts @@ -58,7 +58,9 @@ export const getEventData = (event: SSEEvent): EventDataObject => { function getToolCallIdFromEvent(event: SSEEvent): string | undefined { const data = getEventData(event) - return event.toolCallId || (data?.id as string | undefined) || (data?.toolCallId as string | undefined) + return ( + event.toolCallId || (data?.id as string | undefined) || (data?.toolCallId as string | undefined) + ) } /** Normalizes SSE events so tool metadata is available at the top level. */ @@ -66,8 +68,10 @@ export function normalizeSseEvent(event: SSEEvent): SSEEvent { if (!event) return event const data = getEventData(event) if (!data) return event - const toolCallId = event.toolCallId || (data.id as string | undefined) || (data.toolCallId as string | undefined) - const toolName = event.toolName || (data.name as string | undefined) || (data.toolName as string | undefined) + const toolCallId = + event.toolCallId || (data.id as string | undefined) || (data.toolCallId as string | undefined) + const toolName = + event.toolName || (data.name as string | undefined) || (data.toolName as string | undefined) const success = event.success ?? (data.success as boolean | undefined) const result = event.result ?? data.result const normalizedData = typeof event.data === 'string' ? data : event.data diff --git a/apps/sim/lib/copilot/orchestrator/stream-core.ts b/apps/sim/lib/copilot/orchestrator/stream-core.ts index 14357c2045..e1dc2e2fc3 100644 --- a/apps/sim/lib/copilot/orchestrator/stream-core.ts +++ b/apps/sim/lib/copilot/orchestrator/stream-core.ts @@ -29,7 +29,7 @@ export interface StreamLoopOptions extends OrchestratorOptions { * Called for each normalized event BEFORE standard handler dispatch. * Return true to skip the default handler for this event. */ - onBeforeDispatch?: (event: SSEEvent, context: StreamingContext) => boolean | void + onBeforeDispatch?: (event: SSEEvent, context: StreamingContext) => boolean | undefined } /** @@ -78,7 +78,9 @@ export async function runStreamLoop( if (!response.ok) { const errorText = await response.text().catch(() => '') - throw new Error(`Copilot backend error (${response.status}): ${errorText || response.statusText}`) + throw new Error( + `Copilot backend error (${response.status}): ${errorText || response.statusText}` + ) } if (!response.body) { diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts index cb175f27ef..2bae9f05d3 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts @@ -49,6 +49,7 @@ import type { RunWorkflowUntilBlockParams, SetGlobalWorkflowVariablesParams, } from './param-types' +import { PLATFORM_ACTIONS_CONTENT } from './platform-actions' import { executeCreateFolder, executeCreateWorkflow, @@ -116,13 +117,19 @@ const SIM_WORKFLOW_TOOL_HANDLERS: Record< get_deployed_workflow_state: (p, c) => executeGetDeployedWorkflowState(p as GetDeployedWorkflowStateParams, c), generate_api_key: (p, c) => executeGenerateApiKey(p as unknown as GenerateApiKeyParams, c), + get_platform_actions: () => + Promise.resolve({ + success: true, + output: { content: PLATFORM_ACTIONS_CONTENT }, + }), set_global_workflow_variables: (p, c) => executeSetGlobalWorkflowVariables(p as SetGlobalWorkflowVariablesParams, c), deploy_api: (p, c) => executeDeployApi(p as DeployApiParams, c), deploy_chat: (p, c) => executeDeployChat(p as DeployChatParams, c), deploy_mcp: (p, c) => executeDeployMcp(p as DeployMcpParams, c), redeploy: (_p, c) => executeRedeploy(c), - check_deployment_status: (p, c) => executeCheckDeploymentStatus(p as CheckDeploymentStatusParams, c), + check_deployment_status: (p, c) => + executeCheckDeploymentStatus(p as CheckDeploymentStatusParams, c), list_workspace_mcp_servers: (p, c) => executeListWorkspaceMcpServers(p as ListWorkspaceMcpServersParams, c), create_workspace_mcp_server: (p, c) => diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/platform-actions.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/platform-actions.ts new file mode 100644 index 0000000000..6465e74a26 --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/platform-actions.ts @@ -0,0 +1,117 @@ +/** + * Static content for the get_platform_actions tool. + * Contains the Sim platform quick reference and keyboard shortcuts. + */ +export const PLATFORM_ACTIONS_CONTENT = `# Sim Platform Quick Reference & Keyboard Shortcuts + +## Keyboard Shortcuts +**Mod** = Cmd (macOS) / Ctrl (Windows/Linux). Shortcuts work when canvas is focused. + +### Workflow Actions +| Shortcut | Action | +|----------|--------| +| Mod+Enter | Run workflow (or cancel if running) | +| Mod+Z | Undo | +| Mod+Shift+Z | Redo | +| Mod+C | Copy selected blocks | +| Mod+V | Paste blocks | +| Delete/Backspace | Delete selected blocks or edges | +| Shift+L | Auto-layout canvas | +| Mod+Shift+F | Fit to view | +| Mod+Shift+Enter | Accept Copilot changes | + +### Panel Navigation +| Shortcut | Action | +|----------|--------| +| C | Focus Copilot tab | +| T | Focus Toolbar tab | +| E | Focus Editor tab | +| Mod+F | Focus Toolbar search | + +### Global Navigation +| Shortcut | Action | +|----------|--------| +| Mod+K | Open search | +| Mod+Shift+A | Add new agent workflow | +| Mod+Y | Go to templates | +| Mod+L | Go to logs | + +### Utility +| Shortcut | Action | +|----------|--------| +| Mod+D | Clear terminal console | +| Mod+E | Clear notifications | + +### Mouse Controls +| Action | Control | +|--------|---------| +| Pan/move canvas | Left-drag on empty space, scroll, or trackpad | +| Select multiple blocks | Right-drag to draw selection box | +| Drag block | Left-drag on block header | +| Add to selection | Mod+Click on blocks | + +## Quick Reference — Workspaces +| Action | How | +|--------|-----| +| Create workspace | Click workspace dropdown → New Workspace | +| Switch workspaces | Click workspace dropdown → Select workspace | +| Invite team members | Sidebar → Invite | +| Rename/Duplicate/Export/Delete workspace | Right-click workspace → action | + +## Quick Reference — Workflows +| Action | How | +|--------|-----| +| Create workflow | Click + button in sidebar | +| Reorder/move workflows | Drag workflow up/down or onto a folder | +| Import workflow | Click import button in sidebar → Select file | +| Multi-select workflows | Mod+Click or Shift+Click workflows in sidebar | +| Open in new tab | Right-click workflow → Open in New Tab | +| Rename/Color/Duplicate/Export/Delete | Right-click workflow → action | + +## Quick Reference — Blocks +| Action | How | +|--------|-----| +| Add a block | Drag from Toolbar panel, or right-click canvas → Add Block | +| Multi-select blocks | Mod+Click additional blocks, or shift-drag selection box | +| Copy/Paste blocks | Mod+C / Mod+V | +| Duplicate/Delete blocks | Right-click → action | +| Rename a block | Click block name in header | +| Enable/Disable block | Right-click → Enable/Disable | +| Lock/Unlock block | Hover block → Click lock icon (Admin only) | +| Toggle handle orientation | Right-click → Toggle Handles | +| Configure a block | Select block → use Editor panel on right | + +## Quick Reference — Connections +| Action | How | +|--------|-----| +| Create connection | Drag from output handle to input handle | +| Delete connection | Click edge to select → Delete key | +| Use output in another block | Drag connection tag into input field | + +## Quick Reference — Running & Testing +| Action | How | +|--------|-----| +| Run workflow | Click Run Workflow button or Mod+Enter | +| Stop workflow | Click Stop button or Mod+Enter while running | +| Test with chat | Use Chat panel on the right side | +| Run from block | Hover block → Click play button, or right-click → Run from block | +| Run until block | Right-click block → Run until block | +| View execution logs | Open terminal panel at bottom, or Mod+L | +| Filter/Search/Copy/Clear logs | Terminal panel controls | + +## Quick Reference — Deployment +| Action | How | +|--------|-----| +| Deploy workflow | Click Deploy button in panel | +| Update deployment | Click Update when changes are detected | +| Revert deployment | Previous versions in Deploy tab → Promote to live | +| Copy API endpoint | Deploy tab → API → Copy API cURL | + +## Quick Reference — Variables +| Action | How | +|--------|-----| +| Add/Edit/Delete workflow variable | Panel → Variables → Add Variable | +| Add environment variable | Settings → Environment Variables → Add | +| Reference workflow variable | Use syntax | +| Reference environment variable | Use {{ENV_VAR}} syntax | +` diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/mutations.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/mutations.ts index 80e1c0a234..2b9d5142cd 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/mutations.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/mutations.ts @@ -1,20 +1,20 @@ import crypto from 'crypto' -import { nanoid } from 'nanoid' -import { createLogger } from '@sim/logger' import { db } from '@sim/db' import { apiKey, workflow, workflowFolder } from '@sim/db/schema' +import { createLogger } from '@sim/logger' import { and, eq, isNull, max } from 'drizzle-orm' -import type { ExecutionContext, ToolCallResult } from '@/lib/copilot/orchestrator/types' +import { nanoid } from 'nanoid' import { createApiKey } from '@/lib/api-key/auth' +import type { ExecutionContext, ToolCallResult } from '@/lib/copilot/orchestrator/types' import { generateRequestId } from '@/lib/core/utils/request' import { buildDefaultWorkflowArtifacts } from '@/lib/workflows/defaults' import { executeWorkflow } from '@/lib/workflows/executor/execute-workflow' -import { saveWorkflowToNormalizedTables } from '@/lib/workflows/persistence/utils' -import { ensureWorkflowAccess, ensureWorkspaceAccess, getDefaultWorkspaceId } from '../access' import { getExecutionState, getLatestExecutionState, } from '@/lib/workflows/executor/execution-state' +import { saveWorkflowToNormalizedTables } from '@/lib/workflows/persistence/utils' +import { ensureWorkflowAccess, ensureWorkspaceAccess, getDefaultWorkspaceId } from '../access' import type { CreateFolderParams, CreateWorkflowParams, @@ -243,7 +243,9 @@ export async function executeSetGlobalWorkflowVariables( if (type === 'object' && parsed && typeof parsed === 'object' && !Array.isArray(parsed)) return parsed } catch (error) { - logger.warn('Failed to parse JSON value for variable coercion', { error: error instanceof Error ? error.message : String(error) }) + logger.warn('Failed to parse JSON value for variable coercion', { + error: error instanceof Error ? error.message : String(error), + }) } return value } @@ -284,9 +286,7 @@ export async function executeSetGlobalWorkflowVariables( } } - const nextVarsRecord = Object.fromEntries( - Object.values(byName).map((v) => [String(v.id), v]) - ) + const nextVarsRecord = Object.fromEntries(Object.values(byName).map((v) => [String(v.id), v])) await db .update(workflow) diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/queries.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/queries.ts index 645b083015..ea8ce31873 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/queries.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/queries.ts @@ -15,8 +15,8 @@ import { loadWorkflowFromNormalizedTables, } from '@/lib/workflows/persistence/utils' import { isInputDefinitionTrigger } from '@/lib/workflows/triggers/input-definition-triggers' -import type { Loop, Parallel } from '@/stores/workflows/workflow/types' import { normalizeName } from '@/executor/constants' +import type { Loop, Parallel } from '@/stores/workflows/workflow/types' import { ensureWorkflowAccess, ensureWorkspaceAccess, diff --git a/apps/sim/lib/copilot/process-contents.ts b/apps/sim/lib/copilot/process-contents.ts index 6e69e747e5..9e1eeb079b 100644 --- a/apps/sim/lib/copilot/process-contents.ts +++ b/apps/sim/lib/copilot/process-contents.ts @@ -45,25 +45,16 @@ export async function processContexts( ) } if (ctx.kind === 'knowledge' && ctx.knowledgeId) { - return await processKnowledgeFromDb( - ctx.knowledgeId, - ctx.label ? `@${ctx.label}` : '@' - ) + return await processKnowledgeFromDb(ctx.knowledgeId, ctx.label ? `@${ctx.label}` : '@') } if (ctx.kind === 'blocks' && ctx.blockIds?.length > 0) { return await processBlockMetadata(ctx.blockIds[0], ctx.label ? `@${ctx.label}` : '@') } if (ctx.kind === 'templates' && ctx.templateId) { - return await processTemplateFromDb( - ctx.templateId, - ctx.label ? `@${ctx.label}` : '@' - ) + return await processTemplateFromDb(ctx.templateId, ctx.label ? `@${ctx.label}` : '@') } if (ctx.kind === 'logs' && ctx.executionId) { - return await processExecutionLogFromDb( - ctx.executionId, - ctx.label ? `@${ctx.label}` : '@' - ) + return await processExecutionLogFromDb(ctx.executionId, ctx.label ? `@${ctx.label}` : '@') } if (ctx.kind === 'workflow_block' && ctx.workflowId && ctx.blockId) { return await processWorkflowBlockFromDb(ctx.workflowId, ctx.blockId, ctx.label) @@ -100,10 +91,7 @@ export async function processContextsServer( ) } if (ctx.kind === 'knowledge' && ctx.knowledgeId) { - return await processKnowledgeFromDb( - ctx.knowledgeId, - ctx.label ? `@${ctx.label}` : '@' - ) + return await processKnowledgeFromDb(ctx.knowledgeId, ctx.label ? `@${ctx.label}` : '@') } if (ctx.kind === 'blocks' && ctx.blockIds?.length > 0) { return await processBlockMetadata( @@ -113,16 +101,10 @@ export async function processContextsServer( ) } if (ctx.kind === 'templates' && ctx.templateId) { - return await processTemplateFromDb( - ctx.templateId, - ctx.label ? `@${ctx.label}` : '@' - ) + return await processTemplateFromDb(ctx.templateId, ctx.label ? `@${ctx.label}` : '@') } if (ctx.kind === 'logs' && ctx.executionId) { - return await processExecutionLogFromDb( - ctx.executionId, - ctx.label ? `@${ctx.label}` : '@' - ) + return await processExecutionLogFromDb(ctx.executionId, ctx.label ? `@${ctx.label}` : '@') } if (ctx.kind === 'workflow_block' && ctx.workflowId && ctx.blockId) { return await processWorkflowBlockFromDb(ctx.workflowId, ctx.blockId, ctx.label) diff --git a/apps/sim/lib/copilot/store-utils.ts b/apps/sim/lib/copilot/store-utils.ts index 86d3105033..cdf864fee8 100644 --- a/apps/sim/lib/copilot/store-utils.ts +++ b/apps/sim/lib/copilot/store-utils.ts @@ -86,10 +86,7 @@ export function isTerminalState(state: string): boolean { ) } -export function abortAllInProgressTools( - set: StoreSet, - get: () => CopilotStore -) { +export function abortAllInProgressTools(set: StoreSet, get: () => CopilotStore) { try { const { toolCallsById, messages } = get() const updatedMap = { ...toolCallsById } @@ -166,10 +163,7 @@ export function cleanupActiveState( set: (partial: Record) => void, get: () => Record ): void { - abortAllInProgressTools( - set as unknown as StoreSet, - get as unknown as () => CopilotStore - ) + abortAllInProgressTools(set as unknown as StoreSet, get as unknown as () => CopilotStore) try { const { useWorkflowDiffStore } = require('@/stores/workflow-diff/store') as { useWorkflowDiffStore: { diff --git a/apps/sim/lib/copilot/tools/mcp/definitions.ts b/apps/sim/lib/copilot/tools/mcp/definitions.ts index 08fbe5b8ff..ad0f968fc9 100644 --- a/apps/sim/lib/copilot/tools/mcp/definitions.ts +++ b/apps/sim/lib/copilot/tools/mcp/definitions.ts @@ -183,7 +183,8 @@ export const DIRECT_TOOL_DEFS: DirectToolDef[] = [ }, parentId: { type: 'string', - description: 'Target parent folder ID. Omit or pass empty string to move to workspace root.', + description: + 'Target parent folder ID. Omit or pass empty string to move to workspace root.', }, }, required: ['folderId'], @@ -203,7 +204,8 @@ export const DIRECT_TOOL_DEFS: DirectToolDef[] = [ }, workflow_input: { type: 'object', - description: 'JSON object with input values. Keys should match the workflow start block input field names.', + description: + 'JSON object with input values. Keys should match the workflow start block input field names.', }, useDeployedState: { type: 'boolean', @@ -227,7 +229,8 @@ export const DIRECT_TOOL_DEFS: DirectToolDef[] = [ }, stopAfterBlockId: { type: 'string', - description: 'REQUIRED. The block ID to stop after. Execution halts once this block completes.', + description: + 'REQUIRED. The block ID to stop after. Execution halts once this block completes.', }, workflow_input: { type: 'object', @@ -259,7 +262,8 @@ export const DIRECT_TOOL_DEFS: DirectToolDef[] = [ }, executionId: { type: 'string', - description: 'Optional. Specific execution ID to load the snapshot from. Uses latest if omitted.', + description: + 'Optional. Specific execution ID to load the snapshot from. Uses latest if omitted.', }, workflow_input: { type: 'object', @@ -291,7 +295,8 @@ export const DIRECT_TOOL_DEFS: DirectToolDef[] = [ }, executionId: { type: 'string', - description: 'Optional. Specific execution ID to load the snapshot from. Uses latest if omitted.', + description: + 'Optional. Specific execution ID to load the snapshot from. Uses latest if omitted.', }, workflow_input: { type: 'object', @@ -331,11 +336,12 @@ export const DIRECT_TOOL_DEFS: DirectToolDef[] = [ properties: { name: { type: 'string', - description: 'A descriptive name for the API key (e.g., "production-key", "dev-testing").', + description: + 'A descriptive name for the API key (e.g., "production-key", "dev-testing").', }, workspaceId: { type: 'string', - description: 'Optional workspace ID. Defaults to user\'s default workspace.', + description: "Optional workspace ID. Defaults to user's default workspace.", }, }, required: ['name'], @@ -495,7 +501,15 @@ After copilot_edit completes, you can test immediately with copilot_test, or dep DEPLOYMENT TYPES: - "deploy as api" - REST API endpoint for programmatic access - "deploy as chat" - Managed chat UI with auth options -- "deploy as mcp" - Expose as MCP tool for AI agents`, +- "deploy as mcp" - Expose as MCP tool on an MCP server for AI agents to call + +MCP DEPLOYMENT FLOW: +The deploy subagent will automatically: list available MCP servers → create one if needed → deploy the workflow as an MCP tool to that server. You can specify server name, tool name, and tool description. + +ALSO CAN: +- Get the deployed (production) state to compare with draft +- Generate workspace API keys for calling deployed workflows +- List and create MCP servers in the workspace`, inputSchema: { type: 'object', properties: { @@ -515,7 +529,13 @@ DEPLOYMENT TYPES: { name: 'copilot_test', agentId: 'test', - description: `Run a workflow and verify its outputs. Works on both deployed and undeployed (draft) workflows. Use after building to verify correctness.`, + description: `Run a workflow and verify its outputs. Works on both deployed and undeployed (draft) workflows. Use after building to verify correctness. + +Supports full and partial execution: +- Full run with test inputs +- Stop after a specific block (run_workflow_until_block) +- Run a single block in isolation (run_block) +- Resume from a specific block (run_from_block)`, inputSchema: { type: 'object', properties: { @@ -590,7 +610,7 @@ DEPLOYMENT TYPES: name: 'copilot_info', agentId: 'info', description: - 'Inspect a workflow\'s blocks, connections, outputs, variables, and metadata. Use for questions about the Sim platform itself — how blocks work, what integrations are available, platform concepts, etc. Always provide workflowId to scope results to a specific workflow.', + "Inspect a workflow's blocks, connections, outputs, variables, and metadata. Use for questions about the Sim platform itself — how blocks work, what integrations are available, platform concepts, etc. Always provide workflowId to scope results to a specific workflow.", inputSchema: { type: 'object', properties: { @@ -644,4 +664,18 @@ DEPLOYMENT TYPES: required: ['request'], }, }, + { + name: 'copilot_platform', + agentId: 'tour', + description: + 'Get help with Sim platform navigation, keyboard shortcuts, and UI actions. Use when the user asks "how do I..." about the Sim editor, wants keyboard shortcuts, or needs to know what actions are available in the UI.', + inputSchema: { + type: 'object', + properties: { + request: { type: 'string' }, + context: { type: 'object' }, + }, + required: ['request'], + }, + }, ] diff --git a/apps/sim/lib/copilot/tools/server/blocks/get-blocks-and-tools.ts b/apps/sim/lib/copilot/tools/server/blocks/get-blocks-and-tools.ts index 64a4db00ef..e695f270ec 100644 --- a/apps/sim/lib/copilot/tools/server/blocks/get-blocks-and-tools.ts +++ b/apps/sim/lib/copilot/tools/server/blocks/get-blocks-and-tools.ts @@ -1,9 +1,6 @@ import { createLogger } from '@sim/logger' import type { BaseServerTool } from '@/lib/copilot/tools/server/base-tool' -import { - GetBlocksAndToolsInput, - GetBlocksAndToolsResult, -} from '@/lib/copilot/tools/shared/schemas' +import { GetBlocksAndToolsInput, GetBlocksAndToolsResult } from '@/lib/copilot/tools/shared/schemas' import { registry as blockRegistry } from '@/blocks/registry' import type { BlockConfig } from '@/blocks/types' import { getUserPermissionConfig } from '@/ee/access-control/utils/permission-check' diff --git a/apps/sim/lib/copilot/tools/server/blocks/get-blocks-metadata-tool.ts b/apps/sim/lib/copilot/tools/server/blocks/get-blocks-metadata-tool.ts index 0db7cebdc1..7bcff1fee3 100644 --- a/apps/sim/lib/copilot/tools/server/blocks/get-blocks-metadata-tool.ts +++ b/apps/sim/lib/copilot/tools/server/blocks/get-blocks-metadata-tool.ts @@ -2,10 +2,7 @@ import { existsSync, readFileSync } from 'fs' import { join } from 'path' import { createLogger } from '@sim/logger' import type { BaseServerTool } from '@/lib/copilot/tools/server/base-tool' -import { - GetBlocksMetadataInput, - GetBlocksMetadataResult, -} from '@/lib/copilot/tools/shared/schemas' +import { GetBlocksMetadataInput, GetBlocksMetadataResult } from '@/lib/copilot/tools/shared/schemas' import { registry as blockRegistry } from '@/blocks/registry' import { AuthMode, type BlockConfig, isHiddenFromDisplay } from '@/blocks/types' import { getUserPermissionConfig } from '@/ee/access-control/utils/permission-check' @@ -291,7 +288,9 @@ export const getBlocksMetadataServerTool: BaseServerTool< metadata.yamlDocumentation = readFileSync(docPath, 'utf-8') } } catch (error) { - logger.warn('Failed to read YAML documentation file', { error: error instanceof Error ? error.message : String(error) }) + logger.warn('Failed to read YAML documentation file', { + error: error instanceof Error ? error.message : String(error), + }) } if (metadata) { @@ -957,7 +956,9 @@ function resolveToolIdForOperation(blockConfig: BlockConfig, opId: string): stri } } catch (error) { const toolLogger = createLogger('GetBlocksMetadataServerTool') - toolLogger.warn('Failed to resolve tool ID for operation', { error: error instanceof Error ? error.message : String(error) }) + toolLogger.warn('Failed to resolve tool ID for operation', { + error: error instanceof Error ? error.message : String(error), + }) } return undefined } diff --git a/apps/sim/lib/copilot/tools/server/knowledge/knowledge-base.ts b/apps/sim/lib/copilot/tools/server/knowledge/knowledge-base.ts index d6d5af7ba1..5b00c421d7 100644 --- a/apps/sim/lib/copilot/tools/server/knowledge/knowledge-base.ts +++ b/apps/sim/lib/copilot/tools/server/knowledge/knowledge-base.ts @@ -7,6 +7,14 @@ import { getKnowledgeBaseById, getKnowledgeBases, } from '@/lib/knowledge/service' +import { + createTagDefinition, + deleteTagDefinition, + getDocumentTagDefinitions, + getNextAvailableSlot, + getTagUsageStats, + updateTagDefinition, +} from '@/lib/knowledge/tags/service' import { getQueryStrategy, handleVectorOnlySearch } from '@/app/api/knowledge/search/utils' const logger = createLogger('KnowledgeBaseServerTool') @@ -213,10 +221,177 @@ export const knowledgeBaseServerTool: BaseServerTool ({ + id: td.id, + tagSlot: td.tagSlot, + displayName: td.displayName, + fieldType: td.fieldType, + createdAt: td.createdAt, + })), + } + } + + case 'create_tag': { + if (!args.knowledgeBaseId) { + return { + success: false, + message: 'Knowledge base ID is required for create_tag operation', + } + } + if (!args.tagDisplayName) { + return { + success: false, + message: 'tagDisplayName is required for create_tag operation', + } + } + const fieldType = args.tagFieldType || 'text' + + const tagSlot = await getNextAvailableSlot(args.knowledgeBaseId, fieldType) + if (!tagSlot) { + return { + success: false, + message: `No available slots for field type "${fieldType}". Maximum tags of this type reached.`, + } + } + + const requestId = crypto.randomUUID().slice(0, 8) + const newTag = await createTagDefinition( + { + knowledgeBaseId: args.knowledgeBaseId, + tagSlot, + displayName: args.tagDisplayName, + fieldType, + }, + requestId + ) + + logger.info('Tag definition created via copilot', { + knowledgeBaseId: args.knowledgeBaseId, + tagId: newTag.id, + displayName: newTag.displayName, + userId: context.userId, + }) + + return { + success: true, + message: `Tag "${newTag.displayName}" created successfully`, + data: { + id: newTag.id, + tagSlot: newTag.tagSlot, + displayName: newTag.displayName, + fieldType: newTag.fieldType, + }, + } + } + + case 'update_tag': { + if (!args.tagDefinitionId) { + return { + success: false, + message: 'tagDefinitionId is required for update_tag operation', + } + } + + const updateData: { displayName?: string; fieldType?: string } = {} + if (args.tagDisplayName) updateData.displayName = args.tagDisplayName + if (args.tagFieldType) updateData.fieldType = args.tagFieldType + + if (!updateData.displayName && !updateData.fieldType) { + return { + success: false, + message: 'At least one of tagDisplayName or tagFieldType is required for update_tag', + } + } + + const requestId = crypto.randomUUID().slice(0, 8) + const updatedTag = await updateTagDefinition(args.tagDefinitionId, updateData, requestId) + + logger.info('Tag definition updated via copilot', { + tagId: args.tagDefinitionId, + userId: context.userId, + }) + + return { + success: true, + message: `Tag "${updatedTag.displayName}" updated successfully`, + data: { + id: updatedTag.id, + tagSlot: updatedTag.tagSlot, + displayName: updatedTag.displayName, + fieldType: updatedTag.fieldType, + }, + } + } + + case 'delete_tag': { + if (!args.tagDefinitionId) { + return { + success: false, + message: 'tagDefinitionId is required for delete_tag operation', + } + } + + const requestId = crypto.randomUUID().slice(0, 8) + const deleted = await deleteTagDefinition(args.tagDefinitionId, requestId) + + logger.info('Tag definition deleted via copilot', { + tagId: args.tagDefinitionId, + tagSlot: deleted.tagSlot, + displayName: deleted.displayName, + userId: context.userId, + }) + + return { + success: true, + message: `Tag "${deleted.displayName}" deleted successfully. All document/chunk references cleared.`, + data: { + tagSlot: deleted.tagSlot, + displayName: deleted.displayName, + }, + } + } + + case 'get_tag_usage': { + if (!args.knowledgeBaseId) { + return { + success: false, + message: 'Knowledge base ID is required for get_tag_usage operation', + } + } + + const requestId = crypto.randomUUID().slice(0, 8) + const stats = await getTagUsageStats(args.knowledgeBaseId, requestId) + + return { + success: true, + message: `Retrieved usage stats for ${stats.length} tag(s)`, + data: stats, + } + } + default: return { success: false, - message: `Unknown operation: ${operation}. Supported operations: create, list, get, query`, + message: `Unknown operation: ${operation}. Supported operations: create, list, get, query, list_tags, create_tag, update_tag, delete_tag, get_tag_usage`, } } } catch (error) { diff --git a/apps/sim/lib/copilot/tools/server/other/search-online.ts b/apps/sim/lib/copilot/tools/server/other/search-online.ts index a839d345cd..75f59b507f 100644 --- a/apps/sim/lib/copilot/tools/server/other/search-online.ts +++ b/apps/sim/lib/copilot/tools/server/other/search-online.ts @@ -49,7 +49,17 @@ export const searchOnlineServerTool: BaseServerTool } | undefined + const output = exaResult.output as + | { + results?: Array<{ + title?: string + url?: string + text?: string + summary?: string + publishedDate?: string + }> + } + | undefined const exaResults = output?.results ?? [] if (exaResult.success && exaResults.length > 0) { diff --git a/apps/sim/lib/copilot/tools/server/user/get-credentials.ts b/apps/sim/lib/copilot/tools/server/user/get-credentials.ts index 78911bd80c..9f0c8b4119 100644 --- a/apps/sim/lib/copilot/tools/server/user/get-credentials.ts +++ b/apps/sim/lib/copilot/tools/server/user/get-credentials.ts @@ -90,7 +90,9 @@ export const getCredentialsServerTool: BaseServerTool const decoded = jwtDecode<{ email?: string; name?: string }>(acc.idToken) displayName = decoded.email || decoded.name || '' } catch (error) { - logger.warn('Failed to decode JWT id token', { error: error instanceof Error ? error.message : String(error) }) + logger.warn('Failed to decode JWT id token', { + error: error instanceof Error ? error.message : String(error), + }) } } if (!displayName && baseProvider === 'github') displayName = `${acc.accountId} (GitHub)` @@ -110,7 +112,9 @@ export const getCredentialsServerTool: BaseServerTool ) accessToken = refreshedToken || accessToken } catch (error) { - logger.warn('Failed to refresh OAuth access token', { error: error instanceof Error ? error.message : String(error) }) + logger.warn('Failed to refresh OAuth access token', { + error: error instanceof Error ? error.message : String(error), + }) } connectedCredentials.push({ id: acc.id, diff --git a/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/builders.ts b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/builders.ts index 7f46294f06..935e7bceee 100644 --- a/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/builders.ts +++ b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/builders.ts @@ -7,7 +7,7 @@ import { getAllBlocks } from '@/blocks/registry' import type { BlockConfig } from '@/blocks/types' import { TRIGGER_RUNTIME_SUBBLOCK_IDS } from '@/triggers/constants' import type { EditWorkflowOperation, SkippedItem, ValidationError } from './types' -import { UUID_REGEX, logSkippedItem } from './types' +import { logSkippedItem, UUID_REGEX } from './types' import { validateInputsForBlock, validateSourceHandleForBlock, diff --git a/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/engine.ts b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/engine.ts index 6a5c47246b..7bb5d4c0d2 100644 --- a/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/engine.ts +++ b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/engine.ts @@ -238,8 +238,8 @@ export function applyOperationsToWorkflowState( totalEdges: (modifiedState as any).edges?.length, }) } - // Regenerate loops and parallels after modifications + ;(modifiedState as any).loops = generateLoopBlocks((modifiedState as any).blocks) ;(modifiedState as any).parallels = generateParallelBlocks((modifiedState as any).blocks) diff --git a/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/index.ts b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/index.ts index 4910094ae9..5532c404a2 100644 --- a/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/index.ts +++ b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/index.ts @@ -3,7 +3,6 @@ import { workflow as workflowTable } from '@sim/db/schema' import { createLogger } from '@sim/logger' import { eq } from 'drizzle-orm' import type { BaseServerTool } from '@/lib/copilot/tools/server/base-tool' -import { getUserPermissionConfig } from '@/ee/access-control/utils/permission-check' import { applyAutoLayout } from '@/lib/workflows/autolayout' import { extractAndPersistCustomTools } from '@/lib/workflows/persistence/custom-tools-persistence' import { @@ -11,6 +10,7 @@ import { saveWorkflowToNormalizedTables, } from '@/lib/workflows/persistence/utils' import { validateWorkflowState } from '@/lib/workflows/sanitization/validation' +import { getUserPermissionConfig } from '@/ee/access-control/utils/permission-check' import { generateLoopBlocks, generateParallelBlocks } from '@/stores/workflows/workflow/utils' import { applyOperationsToWorkflowState } from './engine' import type { EditWorkflowParams, ValidationError } from './types' @@ -214,7 +214,8 @@ export const editWorkflowServerTool: BaseServerTool : undefined // Format skipped items for LLM feedback - const skippedMessages = skippedItems.length > 0 ? skippedItems.map((item) => item.reason) : undefined + const skippedMessages = + skippedItems.length > 0 ? skippedItems.map((item) => item.reason) : undefined // Persist the workflow state to the database const finalWorkflowState = validation.sanitizedState || modifiedWorkflowState diff --git a/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/operations.ts b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/operations.ts index 72155d1dd9..58b3b1ab50 100644 --- a/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/operations.ts +++ b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/operations.ts @@ -1,8 +1,8 @@ import { createLogger } from '@sim/logger' +import { isValidKey } from '@/lib/workflows/sanitization/key-validation' import { TriggerUtils } from '@/lib/workflows/triggers/triggers' import { getBlock } from '@/blocks/registry' -import { isValidKey } from '@/lib/workflows/sanitization/key-validation' -import { RESERVED_BLOCK_NAMES, normalizeName } from '@/executor/constants' +import { normalizeName, RESERVED_BLOCK_NAMES } from '@/executor/constants' import { TRIGGER_RUNTIME_SUBBLOCK_IDS } from '@/triggers/constants' import { addConnectionsAsEdges, @@ -242,7 +242,11 @@ export function handleEditOperation(op: EditWorkflowOperation, ctx: OperationCon const editBlockConfig = getBlock(block.type) if (editBlockConfig) { - updateCanonicalModesForInputs(block, Object.keys(validationResult.validInputs), editBlockConfig) + updateCanonicalModesForInputs( + block, + Object.keys(validationResult.validInputs), + editBlockConfig + ) } } @@ -341,7 +345,8 @@ export function handleEditOperation(op: EditWorkflowOperation, ctx: OperationCon // Remove edges to/from removed children modifiedState.edges = modifiedState.edges.filter( - (edge: any) => !existingChildren.includes(edge.source) && !existingChildren.includes(edge.target) + (edge: any) => + !existingChildren.includes(edge.source) && !existingChildren.includes(edge.target) ) // Add new nested blocks @@ -428,7 +433,8 @@ export function handleEditOperation(op: EditWorkflowOperation, ctx: OperationCon block.data.parallelType = params.inputs.parallelType } } - const effectiveParallelType = params.inputs?.parallelType ?? block.data.parallelType ?? 'count' + const effectiveParallelType = + params.inputs?.parallelType ?? block.data.parallelType ?? 'count' // count only valid for 'count' parallelType if (params.inputs?.count && effectiveParallelType === 'count') { block.data.count = params.inputs.count @@ -489,14 +495,19 @@ export function handleEditOperation(op: EditWorkflowOperation, ctx: OperationCon params.removeEdges.forEach(({ targetBlockId, sourceHandle = 'source' }) => { modifiedState.edges = modifiedState.edges.filter( (edge: any) => - !(edge.source === block_id && edge.target === targetBlockId && edge.sourceHandle === sourceHandle) + !( + edge.source === block_id && + edge.target === targetBlockId && + edge.sourceHandle === sourceHandle + ) ) }) } } export function handleAddOperation(op: EditWorkflowOperation, ctx: OperationContext): void { - const { modifiedState, skippedItems, validationErrors, permissionConfig, deferredConnections } = ctx + const { modifiedState, skippedItems, validationErrors, permissionConfig, deferredConnections } = + ctx const { block_id, params } = op const addNormalizedName = params?.name ? normalizeName(params.name) : '' @@ -522,7 +533,11 @@ export function handleAddOperation(op: EditWorkflowOperation, ctx: OperationCont return } - const conflictingBlock = findBlockWithDuplicateNormalizedName(modifiedState.blocks, params.name, block_id) + const conflictingBlock = findBlockWithDuplicateNormalizedName( + modifiedState.blocks, + params.name, + block_id + ) if (conflictingBlock) { logSkippedItem(skippedItems, { @@ -580,7 +595,10 @@ export function handleAddOperation(op: EditWorkflowOperation, ctx: OperationCont } // Check single-instance block constraints (e.g., Response block) - const singleInstanceIssue = TriggerUtils.getSingleInstanceBlockIssue(modifiedState.blocks, params.type) + const singleInstanceIssue = TriggerUtils.getSingleInstanceBlockIssue( + modifiedState.blocks, + params.type + ) if (singleInstanceIssue) { logSkippedItem(skippedItems, { type: 'duplicate_single_instance_block', @@ -614,9 +632,11 @@ export function handleAddOperation(op: EditWorkflowOperation, ctx: OperationCont ...newBlock.data, loopType, // Only include type-appropriate fields - ...(loopType === 'forEach' && params.inputs?.collection && { collection: params.inputs.collection }), + ...(loopType === 'forEach' && + params.inputs?.collection && { collection: params.inputs.collection }), ...(loopType === 'for' && params.inputs?.iterations && { count: params.inputs.iterations }), - ...(loopType === 'while' && params.inputs?.condition && { whileCondition: params.inputs.condition }), + ...(loopType === 'while' && + params.inputs?.condition && { whileCondition: params.inputs.condition }), ...(loopType === 'doWhile' && params.inputs?.condition && { doWhileCondition: params.inputs.condition }), } @@ -717,7 +737,8 @@ export function handleInsertIntoSubflowOperation( op: EditWorkflowOperation, ctx: OperationContext ): void { - const { modifiedState, skippedItems, validationErrors, permissionConfig, deferredConnections } = ctx + const { modifiedState, skippedItems, validationErrors, permissionConfig, deferredConnections } = + ctx const { block_id, params } = op const subflowId = params?.subflowId diff --git a/apps/sim/lib/copilot/tools/server/workflow/get-workflow-console.ts b/apps/sim/lib/copilot/tools/server/workflow/get-workflow-console.ts index e71496cc40..080e339692 100644 --- a/apps/sim/lib/copilot/tools/server/workflow/get-workflow-console.ts +++ b/apps/sim/lib/copilot/tools/server/workflow/get-workflow-console.ts @@ -90,7 +90,9 @@ function normalizeErrorMessage(errorValue: unknown): string | undefined { try { return JSON.stringify(errorValue) } catch (error) { - logger.warn('Failed to stringify error value', { error: error instanceof Error ? error.message : String(error) }) + logger.warn('Failed to stringify error value', { + error: error instanceof Error ? error.message : String(error), + }) } } try { diff --git a/apps/sim/lib/copilot/tools/shared/schemas.ts b/apps/sim/lib/copilot/tools/shared/schemas.ts index 2377aecf79..804a1a48ab 100644 --- a/apps/sim/lib/copilot/tools/shared/schemas.ts +++ b/apps/sim/lib/copilot/tools/shared/schemas.ts @@ -72,7 +72,17 @@ export type GetBlockConfigResultType = z.infer // knowledge_base - shared schema used by client tool, server tool, and registry export const KnowledgeBaseArgsSchema = z.object({ - operation: z.enum(['create', 'list', 'get', 'query']), + operation: z.enum([ + 'create', + 'list', + 'get', + 'query', + 'list_tags', + 'create_tag', + 'update_tag', + 'delete_tag', + 'get_tag_usage', + ]), args: z .object({ /** Name of the knowledge base (required for create) */ @@ -81,7 +91,7 @@ export const KnowledgeBaseArgsSchema = z.object({ description: z.string().optional(), /** Workspace ID to associate with (required for create, optional for list) */ workspaceId: z.string().optional(), - /** Knowledge base ID (required for get, query) */ + /** Knowledge base ID (required for get, query, list_tags, create_tag, get_tag_usage) */ knowledgeBaseId: z.string().optional(), /** Search query text (required for query) */ query: z.string().optional(), @@ -95,6 +105,12 @@ export const KnowledgeBaseArgsSchema = z.object({ overlap: z.number().min(0).max(500).default(200), }) .optional(), + /** Tag definition ID (required for update_tag, delete_tag) */ + tagDefinitionId: z.string().optional(), + /** Tag display name (required for create_tag, optional for update_tag) */ + tagDisplayName: z.string().optional(), + /** Tag field type: text, number, date, boolean (optional for create_tag, defaults to text) */ + tagFieldType: z.enum(['text', 'number', 'date', 'boolean']).optional(), }) .optional(), }) diff --git a/apps/sim/lib/logs/execution/logger.ts b/apps/sim/lib/logs/execution/logger.ts index 033f3dc8e8..3c8fa42242 100644 --- a/apps/sim/lib/logs/execution/logger.ts +++ b/apps/sim/lib/logs/execution/logger.ts @@ -21,7 +21,6 @@ import { checkAndBillOverageThreshold } from '@/lib/billing/threshold-billing' import { isBillingEnabled } from '@/lib/core/config/feature-flags' import { redactApiKeys } from '@/lib/core/security/redaction' import { filterForDisplay } from '@/lib/core/utils/display-filters' -import type { SerializableExecutionState } from '@/executor/execution/types' import { emitWorkflowExecutionCompleted } from '@/lib/logs/events' import { snapshotService } from '@/lib/logs/execution/snapshot/service' import type { @@ -35,6 +34,7 @@ import type { WorkflowState, } from '@/lib/logs/types' import { getWorkspaceBilledAccountUserId } from '@/lib/workspaces/utils' +import type { SerializableExecutionState } from '@/executor/execution/types' export interface ToolCall { name: string diff --git a/apps/sim/lib/logs/execution/logging-session.ts b/apps/sim/lib/logs/execution/logging-session.ts index 4245af6c19..9ab710dc12 100644 --- a/apps/sim/lib/logs/execution/logging-session.ts +++ b/apps/sim/lib/logs/execution/logging-session.ts @@ -3,7 +3,6 @@ import { workflowExecutionLogs } from '@sim/db/schema' import { createLogger } from '@sim/logger' import { eq, sql } from 'drizzle-orm' import { BASE_EXECUTION_CHARGE } from '@/lib/billing/constants' -import type { SerializableExecutionState } from '@/executor/execution/types' import { executionLogger } from '@/lib/logs/execution/logger' import { calculateCostSummary, @@ -18,6 +17,7 @@ import type { TraceSpan, WorkflowState, } from '@/lib/logs/types' +import type { SerializableExecutionState } from '@/executor/execution/types' const logger = createLogger('LoggingSession') diff --git a/apps/sim/lib/logs/types.ts b/apps/sim/lib/logs/types.ts index 9d160fd4a4..1b93e64e10 100644 --- a/apps/sim/lib/logs/types.ts +++ b/apps/sim/lib/logs/types.ts @@ -1,6 +1,6 @@ import type { Edge } from 'reactflow' -import type { BlockLog, NormalizedBlockOutput } from '@/executor/types' import type { SerializableExecutionState } from '@/executor/execution/types' +import type { BlockLog, NormalizedBlockOutput } from '@/executor/types' import type { DeploymentStatus } from '@/stores/workflows/registry/types' import type { Loop, Parallel, WorkflowState } from '@/stores/workflows/workflow/types' diff --git a/apps/sim/lib/workflows/blocks/index.ts b/apps/sim/lib/workflows/blocks/index.ts index 878f927478..7f9067cc0a 100644 --- a/apps/sim/lib/workflows/blocks/index.ts +++ b/apps/sim/lib/workflows/blocks/index.ts @@ -1,2 +1,7 @@ export { BlockSchemaResolver, blockSchemaResolver } from './schema-resolver' -export type { ResolvedBlock, ResolvedSubBlock, ResolvedOption, ResolvedOutput } from './schema-types' +export type { + ResolvedBlock, + ResolvedOption, + ResolvedOutput, + ResolvedSubBlock, +} from './schema-types' diff --git a/apps/sim/lib/workflows/blocks/schema-resolver.ts b/apps/sim/lib/workflows/blocks/schema-resolver.ts index 3a340a5ad5..cd02992e77 100644 --- a/apps/sim/lib/workflows/blocks/schema-resolver.ts +++ b/apps/sim/lib/workflows/blocks/schema-resolver.ts @@ -1,7 +1,12 @@ import { createLogger } from '@sim/logger' import { getAllBlocks, getBlock } from '@/blocks/registry' import type { BlockConfig, SubBlockConfig } from '@/blocks/types' -import type { ResolvedBlock, ResolvedOption, ResolvedOutput, ResolvedSubBlock } from './schema-types' +import type { + ResolvedBlock, + ResolvedOption, + ResolvedOutput, + ResolvedSubBlock, +} from './schema-types' const logger = createLogger('BlockSchemaResolver') @@ -86,7 +91,7 @@ export class BlockSchemaResolver { } if (!resolved.validation?.min && !resolved.validation?.max && !resolved.validation?.pattern) { - delete resolved.validation + resolved.validation = undefined } return resolved @@ -187,7 +192,9 @@ export class BlockSchemaResolver { private supportsTriggerMode(config: BlockConfig): boolean { return Boolean( config.triggerAllowed || - config.subBlocks.some((subBlock) => subBlock.id === 'triggerMode' || subBlock.mode === 'trigger') + config.subBlocks.some( + (subBlock) => subBlock.id === 'triggerMode' || subBlock.mode === 'trigger' + ) ) } diff --git a/apps/sim/stores/panel/copilot/store.ts b/apps/sim/stores/panel/copilot/store.ts index fda7733205..c3ade28057 100644 --- a/apps/sim/stores/panel/copilot/store.ts +++ b/apps/sim/stores/panel/copilot/store.ts @@ -42,15 +42,14 @@ import { } from '@/lib/copilot/messages' import type { CopilotTransportMode } from '@/lib/copilot/models' import { parseSSEStream } from '@/lib/copilot/orchestrator/sse-parser' -import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry' import { abortAllInProgressTools, cleanupActiveState, isRejectedState, - isTerminalState, resolveToolDisplay, stripTodoTags, } from '@/lib/copilot/store-utils' +import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry' import { getQueryClient } from '@/app/_shell/providers/query-provider' import { subscriptionKeys } from '@/hooks/queries/subscription' import type { @@ -277,13 +276,7 @@ function prepareSendContext( isSendingMessage, abortController: activeAbortController, } = get() - const { - stream = true, - fileAttachments, - contexts, - messageId, - queueIfBusy = true, - } = options + const { stream = true, fileAttachments, contexts, messageId, queueIfBusy = true } = options if (!workflowId) return null @@ -381,9 +374,13 @@ function prepareSendContext( ? `${message.substring(0, OPTIMISTIC_TITLE_MAX_LENGTH - 3)}...` : message set((state) => ({ - currentChat: state.currentChat ? { ...state.currentChat, title: optimisticTitle } : state.currentChat, + currentChat: state.currentChat + ? { ...state.currentChat, title: optimisticTitle } + : state.currentChat, chats: state.currentChat - ? state.chats.map((c) => (c.id === state.currentChat!.id ? { ...c, title: optimisticTitle } : c)) + ? state.chats.map((c) => + c.id === state.currentChat!.id ? { ...c, title: optimisticTitle } : c + ) : state.chats, })) } @@ -416,7 +413,9 @@ async function initiateStream( kind: c?.kind, chatId: c?.kind === 'past_chat' ? c.chatId : undefined, workflowId: - c?.kind === 'workflow' || c?.kind === 'current_workflow' || c?.kind === 'workflow_block' + c?.kind === 'workflow' || + c?.kind === 'current_workflow' || + c?.kind === 'workflow_block' ? c.workflowId : undefined, label: c?.label, @@ -435,7 +434,8 @@ async function initiateStream( }) } - const apiMode: CopilotTransportMode = mode === 'ask' ? 'ask' : mode === 'plan' ? 'plan' : 'agent' + const apiMode: CopilotTransportMode = + mode === 'ask' ? 'ask' : mode === 'plan' ? 'plan' : 'agent' const uiToApiCommandMap: Record = { actions: 'superagent' } const commands = contexts ?.filter((c) => c.kind === 'slash_command' && 'command' in c) @@ -532,7 +532,9 @@ async function finalizeStream( const errorMessage = createErrorMessage(prepared.streamingMessage.id, errorContent, errorType) set((state) => ({ - messages: state.messages.map((m) => (m.id === prepared.streamingMessage.id ? errorMessage : m)), + messages: state.messages.map((m) => + m.id === prepared.streamingMessage.id ? errorMessage : m + ), error: errorContent, isSendingMessage: false, abortController: null, @@ -726,10 +728,7 @@ function finalizeResume( const hasContinueTag = (typeof m.content === 'string' && m.content.includes(CONTINUE_OPTIONS_TAG)) || (Array.isArray(m.contentBlocks) && - m.contentBlocks.some( - (b) => - b.type === 'text' && b.content?.includes(CONTINUE_OPTIONS_TAG) - )) + m.contentBlocks.some((b) => b.type === 'text' && b.content?.includes(CONTINUE_OPTIONS_TAG))) if (!hasContinueTag) return m cleanedExisting = true return { @@ -765,13 +764,16 @@ function finalizeResume( } else if (replay.bufferedContent || (replay.replayBlocks && replay.replayBlocks.length > 0)) { nextMessages = nextMessages.map((m) => { if (m.id !== replay.nextStream.assistantMessageId) return m - let nextBlocks = replay.replayBlocks && replay.replayBlocks.length > 0 ? replay.replayBlocks : null + let nextBlocks = + replay.replayBlocks && replay.replayBlocks.length > 0 ? replay.replayBlocks : null if (!nextBlocks) { const existingBlocks = Array.isArray(m.contentBlocks) ? m.contentBlocks : [] const existingText = extractTextFromBlocks(existingBlocks) if (existingText && replay.bufferedContent.startsWith(existingText)) { const delta = replay.bufferedContent.slice(existingText.length) - nextBlocks = delta ? appendTextToBlocks(existingBlocks, delta) : cloneContentBlocks(existingBlocks) + nextBlocks = delta + ? appendTextToBlocks(existingBlocks, delta) + : cloneContentBlocks(existingBlocks) } else if (!existingText && existingBlocks.length === 0) { nextBlocks = replay.bufferedContent ? [{ type: TEXT_BLOCK_TYPE, content: replay.bufferedContent, timestamp: Date.now() }] @@ -852,7 +854,10 @@ async function resumeFromLiveStream( set({ isSendingMessage: false, abortController: null }) } catch (error) { - if (error instanceof Error && (error.name === 'AbortError' || error.message.includes('aborted'))) { + if ( + error instanceof Error && + (error.name === 'AbortError' || error.message.includes('aborted')) + ) { logger.info('[Copilot] Resume stream aborted by user') set({ isSendingMessage: false, abortController: null }) return false @@ -1764,7 +1769,8 @@ export const useCopilotStore = create()( if (abortSignal?.aborted) { context.wasAborted = true const { suppressAbortContinueOption } = get() - context.suppressContinueOption = suppressAbortContinueOption === true || isPageUnloading() + context.suppressContinueOption = + suppressAbortContinueOption === true || isPageUnloading() if (suppressAbortContinueOption) { set({ suppressAbortContinueOption: false }) } diff --git a/apps/sim/stores/workflow-diff/types.ts b/apps/sim/stores/workflow-diff/types.ts index 8c412b97c1..5356046fe7 100644 --- a/apps/sim/stores/workflow-diff/types.ts +++ b/apps/sim/stores/workflow-diff/types.ts @@ -19,7 +19,11 @@ export interface DiffActionOptions { } export interface WorkflowDiffActions { - setProposedChanges: (workflowState: WorkflowState, diffAnalysis?: DiffAnalysis, options?: DiffActionOptions) => Promise + setProposedChanges: ( + workflowState: WorkflowState, + diffAnalysis?: DiffAnalysis, + options?: DiffActionOptions + ) => Promise clearDiff: (options?: { restoreBaseline?: boolean }) => void toggleDiffView: () => void acceptChanges: (options?: DiffActionOptions) => Promise From df3523e011dffaa3e8e91cb2d7e32e786f5daa84 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Fri, 6 Feb 2026 12:38:16 -0800 Subject: [PATCH 36/72] Add copilot mcp tracking --- apps/sim/app/api/billing/update-cost/route.ts | 16 +- apps/sim/app/api/mcp/copilot/route.ts | 67 +- apps/sim/lib/billing/core/usage-log.ts | 2 +- .../db/migrations/0153_complete_arclight.sql | 4 + .../db/migrations/meta/0153_snapshot.json | 11139 ++++++++++++++++ packages/db/migrations/meta/_journal.json | 9 +- packages/db/schema.ts | 11 +- 7 files changed, 11235 insertions(+), 13 deletions(-) create mode 100644 packages/db/migrations/0153_complete_arclight.sql create mode 100644 packages/db/migrations/meta/0153_snapshot.json diff --git a/apps/sim/app/api/billing/update-cost/route.ts b/apps/sim/app/api/billing/update-cost/route.ts index 3e8e0a289d..87b7f14f15 100644 --- a/apps/sim/app/api/billing/update-cost/route.ts +++ b/apps/sim/app/api/billing/update-cost/route.ts @@ -18,6 +18,7 @@ const UpdateCostSchema = z.object({ model: z.string().min(1, 'Model is required'), inputTokens: z.number().min(0).default(0), outputTokens: z.number().min(0).default(0), + source: z.enum(['copilot', 'mcp_copilot']).default('copilot'), }) /** @@ -75,12 +76,14 @@ export async function POST(req: NextRequest) { ) } - const { userId, cost, model, inputTokens, outputTokens } = validation.data + const { userId, cost, model, inputTokens, outputTokens, source } = validation.data + const isMcp = source === 'mcp_copilot' logger.info(`[${requestId}] Processing cost update`, { userId, cost, model, + source, }) // Check if user stats record exists (same as ExecutionLogger) @@ -96,7 +99,7 @@ export async function POST(req: NextRequest) { return NextResponse.json({ error: 'User stats record not found' }, { status: 500 }) } - const updateFields = { + const updateFields: Record = { totalCost: sql`total_cost + ${cost}`, currentPeriodCost: sql`current_period_cost + ${cost}`, totalCopilotCost: sql`total_copilot_cost + ${cost}`, @@ -105,17 +108,24 @@ export async function POST(req: NextRequest) { lastActive: new Date(), } + // Also increment MCP-specific counters when source is mcp_copilot + if (isMcp) { + updateFields.totalMcpCopilotCost = sql`total_mcp_copilot_cost + ${cost}` + updateFields.currentPeriodMcpCopilotCost = sql`current_period_mcp_copilot_cost + ${cost}` + } + await db.update(userStats).set(updateFields).where(eq(userStats.userId, userId)) logger.info(`[${requestId}] Updated user stats record`, { userId, addedCost: cost, + source, }) // Log usage for complete audit trail await logModelUsage({ userId, - source: 'copilot', + source: isMcp ? 'mcp_copilot' : 'copilot', model, inputTokens, outputTokens, diff --git a/apps/sim/app/api/mcp/copilot/route.ts b/apps/sim/app/api/mcp/copilot/route.ts index bf7d09eadf..caf095110f 100644 --- a/apps/sim/app/api/mcp/copilot/route.ts +++ b/apps/sim/app/api/mcp/copilot/route.ts @@ -10,9 +10,13 @@ import { type ListToolsResult, type RequestId, } from '@modelcontextprotocol/sdk/types.js' +import { db } from '@sim/db' +import { userStats } from '@sim/db/schema' import { createLogger } from '@sim/logger' +import { eq, sql } from 'drizzle-orm' import { type NextRequest, NextResponse } from 'next/server' -import { checkHybridAuth } from '@/lib/auth/hybrid' +import { authenticateApiKeyFromHeader, updateApiKeyLastUsed } from '@/lib/api-key/service' +import { checkServerSideUsageLimits } from '@/lib/billing/calculations/usage-monitor' import { getCopilotModel } from '@/lib/copilot/config' import { SIM_AGENT_VERSION } from '@/lib/copilot/constants' import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator' @@ -97,11 +101,28 @@ export async function GET() { export async function POST(request: NextRequest) { try { - const auth = await checkHybridAuth(request, { requireWorkflowId: false }) - if (!auth.success || !auth.userId) { - return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) + // API-key-only auth — MCP clients must provide x-api-key header + const apiKeyHeader = request.headers.get('x-api-key') + if (!apiKeyHeader) { + return NextResponse.json( + createError(0, -32000, 'API key required. Set the x-api-key header with a valid Sim API key.'), + { status: 401 } + ) + } + + const authResult = await authenticateApiKeyFromHeader(apiKeyHeader) + if (!authResult.success || !authResult.userId) { + return NextResponse.json( + createError(0, -32000, authResult.error || 'Invalid API key'), + { status: 401 } + ) } + // Fire-and-forget last-used update + updateApiKeyLastUsed(authResult.keyId!) + + const userId = authResult.userId + const body = (await request.json()) as JSONRPCMessage if (isJSONRPCNotification(body)) { @@ -117,6 +138,17 @@ export async function POST(request: NextRequest) { const { id, method, params } = body + // Pre-flight usage limit check for tool calls + if (method === 'tools/call') { + const usageCheck = await checkServerSideUsageLimits(userId) + if (usageCheck.isExceeded) { + return NextResponse.json( + createError(id, -32000, `Usage limit exceeded: ${usageCheck.message || 'Upgrade your plan.'}`), + { status: 402 } + ) + } + } + switch (method) { case 'initialize': { const result: InitializeResult = { @@ -131,12 +163,16 @@ export async function POST(request: NextRequest) { return NextResponse.json(createResponse(id, {})) case 'tools/list': return handleToolsList(id) - case 'tools/call': - return handleToolsCall( + case 'tools/call': { + const response = await handleToolsCall( id, params as { name: string; arguments?: Record }, - auth.userId + userId ) + // Track MCP copilot call (fire-and-forget) + trackMcpCopilotCall(userId) + return response + } default: return NextResponse.json( createError(id, ErrorCode.MethodNotFound, `Method not found: ${method}`), @@ -151,6 +187,22 @@ export async function POST(request: NextRequest) { } } +/** + * Increment MCP copilot call counter in userStats (fire-and-forget). + */ +function trackMcpCopilotCall(userId: string): void { + db.update(userStats) + .set({ + totalMcpCopilotCalls: sql`total_mcp_copilot_calls + 1`, + lastActive: new Date(), + }) + .where(eq(userStats.userId, userId)) + .then(() => {}) + .catch((error) => { + logger.error('Failed to track MCP copilot call', { error, userId }) + }) +} + async function handleToolsList(id: RequestId): Promise { const directTools = DIRECT_TOOL_DEFS.map((tool) => ({ name: tool.name, @@ -351,6 +403,7 @@ async function handleSubagentToolCall( context, model, headless: true, + source: 'mcp_copilot', }, { userId, diff --git a/apps/sim/lib/billing/core/usage-log.ts b/apps/sim/lib/billing/core/usage-log.ts index a5c94393b1..9c4e6851c6 100644 --- a/apps/sim/lib/billing/core/usage-log.ts +++ b/apps/sim/lib/billing/core/usage-log.ts @@ -14,7 +14,7 @@ export type UsageLogCategory = 'model' | 'fixed' /** * Usage log source types */ -export type UsageLogSource = 'workflow' | 'wand' | 'copilot' +export type UsageLogSource = 'workflow' | 'wand' | 'copilot' | 'mcp_copilot' /** * Metadata for 'model' category charges diff --git a/packages/db/migrations/0153_complete_arclight.sql b/packages/db/migrations/0153_complete_arclight.sql new file mode 100644 index 0000000000..f8f4898626 --- /dev/null +++ b/packages/db/migrations/0153_complete_arclight.sql @@ -0,0 +1,4 @@ +ALTER TYPE "public"."usage_log_source" ADD VALUE 'mcp_copilot';--> statement-breakpoint +ALTER TABLE "user_stats" ADD COLUMN "total_mcp_copilot_calls" integer DEFAULT 0 NOT NULL;--> statement-breakpoint +ALTER TABLE "user_stats" ADD COLUMN "total_mcp_copilot_cost" numeric DEFAULT '0' NOT NULL;--> statement-breakpoint +ALTER TABLE "user_stats" ADD COLUMN "current_period_mcp_copilot_cost" numeric DEFAULT '0' NOT NULL; \ No newline at end of file diff --git a/packages/db/migrations/meta/0153_snapshot.json b/packages/db/migrations/meta/0153_snapshot.json new file mode 100644 index 0000000000..544b33eee7 --- /dev/null +++ b/packages/db/migrations/meta/0153_snapshot.json @@ -0,0 +1,11139 @@ +{ + "id": "2652353e-bc06-43fe-a8c6-4d03fe4dac93", + "prevId": "137c6e6a-44df-4e0a-93df-61265ae36c52", + "version": "7", + "dialect": "postgresql", + "tables": { + "public.a2a_agent": { + "name": "a2a_agent", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_by": { + "name": "created_by", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "description": { + "name": "description", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "version": { + "name": "version", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'1.0.0'" + }, + "capabilities": { + "name": "capabilities", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'{}'" + }, + "skills": { + "name": "skills", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'[]'" + }, + "authentication": { + "name": "authentication", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'{}'" + }, + "signatures": { + "name": "signatures", + "type": "jsonb", + "primaryKey": false, + "notNull": false, + "default": "'[]'" + }, + "is_published": { + "name": "is_published", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "published_at": { + "name": "published_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "a2a_agent_workspace_id_idx": { + "name": "a2a_agent_workspace_id_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "a2a_agent_workflow_id_idx": { + "name": "a2a_agent_workflow_id_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "a2a_agent_created_by_idx": { + "name": "a2a_agent_created_by_idx", + "columns": [ + { + "expression": "created_by", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "a2a_agent_workspace_workflow_unique": { + "name": "a2a_agent_workspace_workflow_unique", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "a2a_agent_workspace_id_workspace_id_fk": { + "name": "a2a_agent_workspace_id_workspace_id_fk", + "tableFrom": "a2a_agent", + "tableTo": "workspace", + "columnsFrom": [ + "workspace_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "a2a_agent_workflow_id_workflow_id_fk": { + "name": "a2a_agent_workflow_id_workflow_id_fk", + "tableFrom": "a2a_agent", + "tableTo": "workflow", + "columnsFrom": [ + "workflow_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "a2a_agent_created_by_user_id_fk": { + "name": "a2a_agent_created_by_user_id_fk", + "tableFrom": "a2a_agent", + "tableTo": "user", + "columnsFrom": [ + "created_by" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.a2a_push_notification_config": { + "name": "a2a_push_notification_config", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "task_id": { + "name": "task_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "url": { + "name": "url", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "token": { + "name": "token", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "auth_schemes": { + "name": "auth_schemes", + "type": "jsonb", + "primaryKey": false, + "notNull": false, + "default": "'[]'" + }, + "auth_credentials": { + "name": "auth_credentials", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "is_active": { + "name": "is_active", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "a2a_push_notification_config_task_id_idx": { + "name": "a2a_push_notification_config_task_id_idx", + "columns": [ + { + "expression": "task_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "a2a_push_notification_config_task_unique": { + "name": "a2a_push_notification_config_task_unique", + "columns": [ + { + "expression": "task_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "a2a_push_notification_config_task_id_a2a_task_id_fk": { + "name": "a2a_push_notification_config_task_id_a2a_task_id_fk", + "tableFrom": "a2a_push_notification_config", + "tableTo": "a2a_task", + "columnsFrom": [ + "task_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.a2a_task": { + "name": "a2a_task", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "agent_id": { + "name": "agent_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "session_id": { + "name": "session_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "status": { + "name": "status", + "type": "a2a_task_status", + "typeSchema": "public", + "primaryKey": false, + "notNull": true, + "default": "'submitted'" + }, + "messages": { + "name": "messages", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'[]'" + }, + "artifacts": { + "name": "artifacts", + "type": "jsonb", + "primaryKey": false, + "notNull": false, + "default": "'[]'" + }, + "execution_id": { + "name": "execution_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "metadata": { + "name": "metadata", + "type": "jsonb", + "primaryKey": false, + "notNull": false, + "default": "'{}'" + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "completed_at": { + "name": "completed_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + } + }, + "indexes": { + "a2a_task_agent_id_idx": { + "name": "a2a_task_agent_id_idx", + "columns": [ + { + "expression": "agent_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "a2a_task_session_id_idx": { + "name": "a2a_task_session_id_idx", + "columns": [ + { + "expression": "session_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "a2a_task_status_idx": { + "name": "a2a_task_status_idx", + "columns": [ + { + "expression": "status", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "a2a_task_execution_id_idx": { + "name": "a2a_task_execution_id_idx", + "columns": [ + { + "expression": "execution_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "a2a_task_created_at_idx": { + "name": "a2a_task_created_at_idx", + "columns": [ + { + "expression": "created_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "a2a_task_agent_id_a2a_agent_id_fk": { + "name": "a2a_task_agent_id_a2a_agent_id_fk", + "tableFrom": "a2a_task", + "tableTo": "a2a_agent", + "columnsFrom": [ + "agent_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.account": { + "name": "account", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "account_id": { + "name": "account_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "provider_id": { + "name": "provider_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "access_token": { + "name": "access_token", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "refresh_token": { + "name": "refresh_token", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "id_token": { + "name": "id_token", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "access_token_expires_at": { + "name": "access_token_expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "refresh_token_expires_at": { + "name": "refresh_token_expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "scope": { + "name": "scope", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "password": { + "name": "password", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + } + }, + "indexes": { + "account_user_id_idx": { + "name": "account_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "idx_account_on_account_id_provider_id": { + "name": "idx_account_on_account_id_provider_id", + "columns": [ + { + "expression": "account_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "provider_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "account_user_provider_unique": { + "name": "account_user_provider_unique", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "provider_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "account_user_id_user_id_fk": { + "name": "account_user_id_user_id_fk", + "tableFrom": "account", + "tableTo": "user", + "columnsFrom": [ + "user_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.api_key": { + "name": "api_key", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_by": { + "name": "created_by", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "key": { + "name": "key", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "type": { + "name": "type", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'personal'" + }, + "last_used": { + "name": "last_used", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "expires_at": { + "name": "expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + } + }, + "indexes": { + "api_key_workspace_type_idx": { + "name": "api_key_workspace_type_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "type", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "api_key_user_type_idx": { + "name": "api_key_user_type_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "type", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "api_key_user_id_user_id_fk": { + "name": "api_key_user_id_user_id_fk", + "tableFrom": "api_key", + "tableTo": "user", + "columnsFrom": [ + "user_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "api_key_workspace_id_workspace_id_fk": { + "name": "api_key_workspace_id_workspace_id_fk", + "tableFrom": "api_key", + "tableTo": "workspace", + "columnsFrom": [ + "workspace_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "api_key_created_by_user_id_fk": { + "name": "api_key_created_by_user_id_fk", + "tableFrom": "api_key", + "tableTo": "user", + "columnsFrom": [ + "created_by" + ], + "columnsTo": [ + "id" + ], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "api_key_key_unique": { + "name": "api_key_key_unique", + "nullsNotDistinct": false, + "columns": [ + "key" + ] + } + }, + "policies": {}, + "checkConstraints": { + "workspace_type_check": { + "name": "workspace_type_check", + "value": "(type = 'workspace' AND workspace_id IS NOT NULL) OR (type = 'personal' AND workspace_id IS NULL)" + } + }, + "isRLSEnabled": false + }, + "public.async_jobs": { + "name": "async_jobs", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "type": { + "name": "type", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "payload": { + "name": "payload", + "type": "jsonb", + "primaryKey": false, + "notNull": true + }, + "status": { + "name": "status", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'pending'" + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "started_at": { + "name": "started_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "completed_at": { + "name": "completed_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "run_at": { + "name": "run_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "attempts": { + "name": "attempts", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "max_attempts": { + "name": "max_attempts", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 3 + }, + "error": { + "name": "error", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "output": { + "name": "output", + "type": "jsonb", + "primaryKey": false, + "notNull": false + }, + "metadata": { + "name": "metadata", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'{}'" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "async_jobs_status_started_at_idx": { + "name": "async_jobs_status_started_at_idx", + "columns": [ + { + "expression": "status", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "started_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "async_jobs_status_completed_at_idx": { + "name": "async_jobs_status_completed_at_idx", + "columns": [ + { + "expression": "status", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "completed_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.chat": { + "name": "chat", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "identifier": { + "name": "identifier", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "title": { + "name": "title", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "description": { + "name": "description", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "is_active": { + "name": "is_active", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "customizations": { + "name": "customizations", + "type": "json", + "primaryKey": false, + "notNull": false, + "default": "'{}'" + }, + "auth_type": { + "name": "auth_type", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'public'" + }, + "password": { + "name": "password", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "allowed_emails": { + "name": "allowed_emails", + "type": "json", + "primaryKey": false, + "notNull": false, + "default": "'[]'" + }, + "output_configs": { + "name": "output_configs", + "type": "json", + "primaryKey": false, + "notNull": false, + "default": "'[]'" + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "identifier_idx": { + "name": "identifier_idx", + "columns": [ + { + "expression": "identifier", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "chat_workflow_id_workflow_id_fk": { + "name": "chat_workflow_id_workflow_id_fk", + "tableFrom": "chat", + "tableTo": "workflow", + "columnsFrom": [ + "workflow_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "chat_user_id_user_id_fk": { + "name": "chat_user_id_user_id_fk", + "tableFrom": "chat", + "tableTo": "user", + "columnsFrom": [ + "user_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.copilot_chats": { + "name": "copilot_chats", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "uuid", + "primaryKey": true, + "notNull": true, + "default": "gen_random_uuid()" + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "title": { + "name": "title", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "messages": { + "name": "messages", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'[]'" + }, + "model": { + "name": "model", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'claude-3-7-sonnet-latest'" + }, + "conversation_id": { + "name": "conversation_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "preview_yaml": { + "name": "preview_yaml", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "plan_artifact": { + "name": "plan_artifact", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "config": { + "name": "config", + "type": "jsonb", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "copilot_chats_user_id_idx": { + "name": "copilot_chats_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "copilot_chats_workflow_id_idx": { + "name": "copilot_chats_workflow_id_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "copilot_chats_user_workflow_idx": { + "name": "copilot_chats_user_workflow_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "copilot_chats_created_at_idx": { + "name": "copilot_chats_created_at_idx", + "columns": [ + { + "expression": "created_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "copilot_chats_updated_at_idx": { + "name": "copilot_chats_updated_at_idx", + "columns": [ + { + "expression": "updated_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "copilot_chats_user_id_user_id_fk": { + "name": "copilot_chats_user_id_user_id_fk", + "tableFrom": "copilot_chats", + "tableTo": "user", + "columnsFrom": [ + "user_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "copilot_chats_workflow_id_workflow_id_fk": { + "name": "copilot_chats_workflow_id_workflow_id_fk", + "tableFrom": "copilot_chats", + "tableTo": "workflow", + "columnsFrom": [ + "workflow_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.copilot_feedback": { + "name": "copilot_feedback", + "schema": "", + "columns": { + "feedback_id": { + "name": "feedback_id", + "type": "uuid", + "primaryKey": true, + "notNull": true, + "default": "gen_random_uuid()" + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "chat_id": { + "name": "chat_id", + "type": "uuid", + "primaryKey": false, + "notNull": true + }, + "user_query": { + "name": "user_query", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "agent_response": { + "name": "agent_response", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "is_positive": { + "name": "is_positive", + "type": "boolean", + "primaryKey": false, + "notNull": true + }, + "feedback": { + "name": "feedback", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "workflow_yaml": { + "name": "workflow_yaml", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "copilot_feedback_user_id_idx": { + "name": "copilot_feedback_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "copilot_feedback_chat_id_idx": { + "name": "copilot_feedback_chat_id_idx", + "columns": [ + { + "expression": "chat_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "copilot_feedback_user_chat_idx": { + "name": "copilot_feedback_user_chat_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "chat_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "copilot_feedback_is_positive_idx": { + "name": "copilot_feedback_is_positive_idx", + "columns": [ + { + "expression": "is_positive", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "copilot_feedback_created_at_idx": { + "name": "copilot_feedback_created_at_idx", + "columns": [ + { + "expression": "created_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "copilot_feedback_user_id_user_id_fk": { + "name": "copilot_feedback_user_id_user_id_fk", + "tableFrom": "copilot_feedback", + "tableTo": "user", + "columnsFrom": [ + "user_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "copilot_feedback_chat_id_copilot_chats_id_fk": { + "name": "copilot_feedback_chat_id_copilot_chats_id_fk", + "tableFrom": "copilot_feedback", + "tableTo": "copilot_chats", + "columnsFrom": [ + "chat_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.credential_set": { + "name": "credential_set", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "organization_id": { + "name": "organization_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "description": { + "name": "description", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "provider_id": { + "name": "provider_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_by": { + "name": "created_by", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "credential_set_organization_id_idx": { + "name": "credential_set_organization_id_idx", + "columns": [ + { + "expression": "organization_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "credential_set_created_by_idx": { + "name": "credential_set_created_by_idx", + "columns": [ + { + "expression": "created_by", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "credential_set_org_name_unique": { + "name": "credential_set_org_name_unique", + "columns": [ + { + "expression": "organization_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "name", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "credential_set_provider_id_idx": { + "name": "credential_set_provider_id_idx", + "columns": [ + { + "expression": "provider_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "credential_set_organization_id_organization_id_fk": { + "name": "credential_set_organization_id_organization_id_fk", + "tableFrom": "credential_set", + "tableTo": "organization", + "columnsFrom": [ + "organization_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "credential_set_created_by_user_id_fk": { + "name": "credential_set_created_by_user_id_fk", + "tableFrom": "credential_set", + "tableTo": "user", + "columnsFrom": [ + "created_by" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.credential_set_invitation": { + "name": "credential_set_invitation", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "credential_set_id": { + "name": "credential_set_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "email": { + "name": "email", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "token": { + "name": "token", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "invited_by": { + "name": "invited_by", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "status": { + "name": "status", + "type": "credential_set_invitation_status", + "typeSchema": "public", + "primaryKey": false, + "notNull": true, + "default": "'pending'" + }, + "expires_at": { + "name": "expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "accepted_at": { + "name": "accepted_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "accepted_by_user_id": { + "name": "accepted_by_user_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "credential_set_invitation_set_id_idx": { + "name": "credential_set_invitation_set_id_idx", + "columns": [ + { + "expression": "credential_set_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "credential_set_invitation_token_idx": { + "name": "credential_set_invitation_token_idx", + "columns": [ + { + "expression": "token", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "credential_set_invitation_status_idx": { + "name": "credential_set_invitation_status_idx", + "columns": [ + { + "expression": "status", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "credential_set_invitation_expires_at_idx": { + "name": "credential_set_invitation_expires_at_idx", + "columns": [ + { + "expression": "expires_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "credential_set_invitation_credential_set_id_credential_set_id_fk": { + "name": "credential_set_invitation_credential_set_id_credential_set_id_fk", + "tableFrom": "credential_set_invitation", + "tableTo": "credential_set", + "columnsFrom": [ + "credential_set_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "credential_set_invitation_invited_by_user_id_fk": { + "name": "credential_set_invitation_invited_by_user_id_fk", + "tableFrom": "credential_set_invitation", + "tableTo": "user", + "columnsFrom": [ + "invited_by" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "credential_set_invitation_accepted_by_user_id_user_id_fk": { + "name": "credential_set_invitation_accepted_by_user_id_user_id_fk", + "tableFrom": "credential_set_invitation", + "tableTo": "user", + "columnsFrom": [ + "accepted_by_user_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "credential_set_invitation_token_unique": { + "name": "credential_set_invitation_token_unique", + "nullsNotDistinct": false, + "columns": [ + "token" + ] + } + }, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.credential_set_member": { + "name": "credential_set_member", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "credential_set_id": { + "name": "credential_set_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "status": { + "name": "status", + "type": "credential_set_member_status", + "typeSchema": "public", + "primaryKey": false, + "notNull": true, + "default": "'pending'" + }, + "joined_at": { + "name": "joined_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "invited_by": { + "name": "invited_by", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "credential_set_member_set_id_idx": { + "name": "credential_set_member_set_id_idx", + "columns": [ + { + "expression": "credential_set_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "credential_set_member_user_id_idx": { + "name": "credential_set_member_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "credential_set_member_unique": { + "name": "credential_set_member_unique", + "columns": [ + { + "expression": "credential_set_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "credential_set_member_status_idx": { + "name": "credential_set_member_status_idx", + "columns": [ + { + "expression": "status", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "credential_set_member_credential_set_id_credential_set_id_fk": { + "name": "credential_set_member_credential_set_id_credential_set_id_fk", + "tableFrom": "credential_set_member", + "tableTo": "credential_set", + "columnsFrom": [ + "credential_set_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "credential_set_member_user_id_user_id_fk": { + "name": "credential_set_member_user_id_user_id_fk", + "tableFrom": "credential_set_member", + "tableTo": "user", + "columnsFrom": [ + "user_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "credential_set_member_invited_by_user_id_fk": { + "name": "credential_set_member_invited_by_user_id_fk", + "tableFrom": "credential_set_member", + "tableTo": "user", + "columnsFrom": [ + "invited_by" + ], + "columnsTo": [ + "id" + ], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.custom_tools": { + "name": "custom_tools", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "title": { + "name": "title", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "schema": { + "name": "schema", + "type": "json", + "primaryKey": false, + "notNull": true + }, + "code": { + "name": "code", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "custom_tools_workspace_id_idx": { + "name": "custom_tools_workspace_id_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "custom_tools_workspace_title_unique": { + "name": "custom_tools_workspace_title_unique", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "title", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "custom_tools_workspace_id_workspace_id_fk": { + "name": "custom_tools_workspace_id_workspace_id_fk", + "tableFrom": "custom_tools", + "tableTo": "workspace", + "columnsFrom": [ + "workspace_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "custom_tools_user_id_user_id_fk": { + "name": "custom_tools_user_id_user_id_fk", + "tableFrom": "custom_tools", + "tableTo": "user", + "columnsFrom": [ + "user_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.docs_embeddings": { + "name": "docs_embeddings", + "schema": "", + "columns": { + "chunk_id": { + "name": "chunk_id", + "type": "uuid", + "primaryKey": true, + "notNull": true, + "default": "gen_random_uuid()" + }, + "chunk_text": { + "name": "chunk_text", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "source_document": { + "name": "source_document", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "source_link": { + "name": "source_link", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "header_text": { + "name": "header_text", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "header_level": { + "name": "header_level", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "token_count": { + "name": "token_count", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "embedding": { + "name": "embedding", + "type": "vector(1536)", + "primaryKey": false, + "notNull": true + }, + "embedding_model": { + "name": "embedding_model", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'text-embedding-3-small'" + }, + "metadata": { + "name": "metadata", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'{}'" + }, + "chunk_text_tsv": { + "name": "chunk_text_tsv", + "type": "tsvector", + "primaryKey": false, + "notNull": false, + "generated": { + "as": "to_tsvector('english', \"docs_embeddings\".\"chunk_text\")", + "type": "stored" + } + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "docs_emb_source_document_idx": { + "name": "docs_emb_source_document_idx", + "columns": [ + { + "expression": "source_document", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "docs_emb_header_level_idx": { + "name": "docs_emb_header_level_idx", + "columns": [ + { + "expression": "header_level", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "docs_emb_source_header_idx": { + "name": "docs_emb_source_header_idx", + "columns": [ + { + "expression": "source_document", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "header_level", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "docs_emb_model_idx": { + "name": "docs_emb_model_idx", + "columns": [ + { + "expression": "embedding_model", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "docs_emb_created_at_idx": { + "name": "docs_emb_created_at_idx", + "columns": [ + { + "expression": "created_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "docs_embedding_vector_hnsw_idx": { + "name": "docs_embedding_vector_hnsw_idx", + "columns": [ + { + "expression": "embedding", + "isExpression": false, + "asc": true, + "nulls": "last", + "opclass": "vector_cosine_ops" + } + ], + "isUnique": false, + "concurrently": false, + "method": "hnsw", + "with": { + "m": 16, + "ef_construction": 64 + } + }, + "docs_emb_metadata_gin_idx": { + "name": "docs_emb_metadata_gin_idx", + "columns": [ + { + "expression": "metadata", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "gin", + "with": {} + }, + "docs_emb_chunk_text_fts_idx": { + "name": "docs_emb_chunk_text_fts_idx", + "columns": [ + { + "expression": "chunk_text_tsv", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "gin", + "with": {} + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": { + "docs_embedding_not_null_check": { + "name": "docs_embedding_not_null_check", + "value": "\"embedding\" IS NOT NULL" + }, + "docs_header_level_check": { + "name": "docs_header_level_check", + "value": "\"header_level\" >= 1 AND \"header_level\" <= 6" + } + }, + "isRLSEnabled": false + }, + "public.document": { + "name": "document", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "knowledge_base_id": { + "name": "knowledge_base_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "filename": { + "name": "filename", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "file_url": { + "name": "file_url", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "file_size": { + "name": "file_size", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "mime_type": { + "name": "mime_type", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "chunk_count": { + "name": "chunk_count", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "token_count": { + "name": "token_count", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "character_count": { + "name": "character_count", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "processing_status": { + "name": "processing_status", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'pending'" + }, + "processing_started_at": { + "name": "processing_started_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "processing_completed_at": { + "name": "processing_completed_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "processing_error": { + "name": "processing_error", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "enabled": { + "name": "enabled", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "deleted_at": { + "name": "deleted_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "tag1": { + "name": "tag1", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag2": { + "name": "tag2", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag3": { + "name": "tag3", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag4": { + "name": "tag4", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag5": { + "name": "tag5", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag6": { + "name": "tag6", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag7": { + "name": "tag7", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "number1": { + "name": "number1", + "type": "double precision", + "primaryKey": false, + "notNull": false + }, + "number2": { + "name": "number2", + "type": "double precision", + "primaryKey": false, + "notNull": false + }, + "number3": { + "name": "number3", + "type": "double precision", + "primaryKey": false, + "notNull": false + }, + "number4": { + "name": "number4", + "type": "double precision", + "primaryKey": false, + "notNull": false + }, + "number5": { + "name": "number5", + "type": "double precision", + "primaryKey": false, + "notNull": false + }, + "date1": { + "name": "date1", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "date2": { + "name": "date2", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "boolean1": { + "name": "boolean1", + "type": "boolean", + "primaryKey": false, + "notNull": false + }, + "boolean2": { + "name": "boolean2", + "type": "boolean", + "primaryKey": false, + "notNull": false + }, + "boolean3": { + "name": "boolean3", + "type": "boolean", + "primaryKey": false, + "notNull": false + }, + "uploaded_at": { + "name": "uploaded_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "doc_kb_id_idx": { + "name": "doc_kb_id_idx", + "columns": [ + { + "expression": "knowledge_base_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_filename_idx": { + "name": "doc_filename_idx", + "columns": [ + { + "expression": "filename", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_processing_status_idx": { + "name": "doc_processing_status_idx", + "columns": [ + { + "expression": "knowledge_base_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "processing_status", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_tag1_idx": { + "name": "doc_tag1_idx", + "columns": [ + { + "expression": "tag1", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_tag2_idx": { + "name": "doc_tag2_idx", + "columns": [ + { + "expression": "tag2", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_tag3_idx": { + "name": "doc_tag3_idx", + "columns": [ + { + "expression": "tag3", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_tag4_idx": { + "name": "doc_tag4_idx", + "columns": [ + { + "expression": "tag4", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_tag5_idx": { + "name": "doc_tag5_idx", + "columns": [ + { + "expression": "tag5", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_tag6_idx": { + "name": "doc_tag6_idx", + "columns": [ + { + "expression": "tag6", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_tag7_idx": { + "name": "doc_tag7_idx", + "columns": [ + { + "expression": "tag7", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_number1_idx": { + "name": "doc_number1_idx", + "columns": [ + { + "expression": "number1", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_number2_idx": { + "name": "doc_number2_idx", + "columns": [ + { + "expression": "number2", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_number3_idx": { + "name": "doc_number3_idx", + "columns": [ + { + "expression": "number3", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_number4_idx": { + "name": "doc_number4_idx", + "columns": [ + { + "expression": "number4", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_number5_idx": { + "name": "doc_number5_idx", + "columns": [ + { + "expression": "number5", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_date1_idx": { + "name": "doc_date1_idx", + "columns": [ + { + "expression": "date1", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_date2_idx": { + "name": "doc_date2_idx", + "columns": [ + { + "expression": "date2", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_boolean1_idx": { + "name": "doc_boolean1_idx", + "columns": [ + { + "expression": "boolean1", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_boolean2_idx": { + "name": "doc_boolean2_idx", + "columns": [ + { + "expression": "boolean2", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_boolean3_idx": { + "name": "doc_boolean3_idx", + "columns": [ + { + "expression": "boolean3", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "document_knowledge_base_id_knowledge_base_id_fk": { + "name": "document_knowledge_base_id_knowledge_base_id_fk", + "tableFrom": "document", + "tableTo": "knowledge_base", + "columnsFrom": [ + "knowledge_base_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.embedding": { + "name": "embedding", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "knowledge_base_id": { + "name": "knowledge_base_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "document_id": { + "name": "document_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "chunk_index": { + "name": "chunk_index", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "chunk_hash": { + "name": "chunk_hash", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "content": { + "name": "content", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "content_length": { + "name": "content_length", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "token_count": { + "name": "token_count", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "embedding": { + "name": "embedding", + "type": "vector(1536)", + "primaryKey": false, + "notNull": false + }, + "embedding_model": { + "name": "embedding_model", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'text-embedding-3-small'" + }, + "start_offset": { + "name": "start_offset", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "end_offset": { + "name": "end_offset", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "tag1": { + "name": "tag1", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag2": { + "name": "tag2", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag3": { + "name": "tag3", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag4": { + "name": "tag4", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag5": { + "name": "tag5", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag6": { + "name": "tag6", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag7": { + "name": "tag7", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "number1": { + "name": "number1", + "type": "double precision", + "primaryKey": false, + "notNull": false + }, + "number2": { + "name": "number2", + "type": "double precision", + "primaryKey": false, + "notNull": false + }, + "number3": { + "name": "number3", + "type": "double precision", + "primaryKey": false, + "notNull": false + }, + "number4": { + "name": "number4", + "type": "double precision", + "primaryKey": false, + "notNull": false + }, + "number5": { + "name": "number5", + "type": "double precision", + "primaryKey": false, + "notNull": false + }, + "date1": { + "name": "date1", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "date2": { + "name": "date2", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "boolean1": { + "name": "boolean1", + "type": "boolean", + "primaryKey": false, + "notNull": false + }, + "boolean2": { + "name": "boolean2", + "type": "boolean", + "primaryKey": false, + "notNull": false + }, + "boolean3": { + "name": "boolean3", + "type": "boolean", + "primaryKey": false, + "notNull": false + }, + "enabled": { + "name": "enabled", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "content_tsv": { + "name": "content_tsv", + "type": "tsvector", + "primaryKey": false, + "notNull": false, + "generated": { + "as": "to_tsvector('english', \"embedding\".\"content\")", + "type": "stored" + } + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "emb_kb_id_idx": { + "name": "emb_kb_id_idx", + "columns": [ + { + "expression": "knowledge_base_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_doc_id_idx": { + "name": "emb_doc_id_idx", + "columns": [ + { + "expression": "document_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_doc_chunk_idx": { + "name": "emb_doc_chunk_idx", + "columns": [ + { + "expression": "document_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "chunk_index", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_kb_model_idx": { + "name": "emb_kb_model_idx", + "columns": [ + { + "expression": "knowledge_base_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "embedding_model", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_kb_enabled_idx": { + "name": "emb_kb_enabled_idx", + "columns": [ + { + "expression": "knowledge_base_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "enabled", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_doc_enabled_idx": { + "name": "emb_doc_enabled_idx", + "columns": [ + { + "expression": "document_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "enabled", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "embedding_vector_hnsw_idx": { + "name": "embedding_vector_hnsw_idx", + "columns": [ + { + "expression": "embedding", + "isExpression": false, + "asc": true, + "nulls": "last", + "opclass": "vector_cosine_ops" + } + ], + "isUnique": false, + "concurrently": false, + "method": "hnsw", + "with": { + "m": 16, + "ef_construction": 64 + } + }, + "emb_tag1_idx": { + "name": "emb_tag1_idx", + "columns": [ + { + "expression": "tag1", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_tag2_idx": { + "name": "emb_tag2_idx", + "columns": [ + { + "expression": "tag2", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_tag3_idx": { + "name": "emb_tag3_idx", + "columns": [ + { + "expression": "tag3", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_tag4_idx": { + "name": "emb_tag4_idx", + "columns": [ + { + "expression": "tag4", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_tag5_idx": { + "name": "emb_tag5_idx", + "columns": [ + { + "expression": "tag5", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_tag6_idx": { + "name": "emb_tag6_idx", + "columns": [ + { + "expression": "tag6", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_tag7_idx": { + "name": "emb_tag7_idx", + "columns": [ + { + "expression": "tag7", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_number1_idx": { + "name": "emb_number1_idx", + "columns": [ + { + "expression": "number1", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_number2_idx": { + "name": "emb_number2_idx", + "columns": [ + { + "expression": "number2", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_number3_idx": { + "name": "emb_number3_idx", + "columns": [ + { + "expression": "number3", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_number4_idx": { + "name": "emb_number4_idx", + "columns": [ + { + "expression": "number4", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_number5_idx": { + "name": "emb_number5_idx", + "columns": [ + { + "expression": "number5", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_date1_idx": { + "name": "emb_date1_idx", + "columns": [ + { + "expression": "date1", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_date2_idx": { + "name": "emb_date2_idx", + "columns": [ + { + "expression": "date2", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_boolean1_idx": { + "name": "emb_boolean1_idx", + "columns": [ + { + "expression": "boolean1", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_boolean2_idx": { + "name": "emb_boolean2_idx", + "columns": [ + { + "expression": "boolean2", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_boolean3_idx": { + "name": "emb_boolean3_idx", + "columns": [ + { + "expression": "boolean3", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_content_fts_idx": { + "name": "emb_content_fts_idx", + "columns": [ + { + "expression": "content_tsv", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "gin", + "with": {} + } + }, + "foreignKeys": { + "embedding_knowledge_base_id_knowledge_base_id_fk": { + "name": "embedding_knowledge_base_id_knowledge_base_id_fk", + "tableFrom": "embedding", + "tableTo": "knowledge_base", + "columnsFrom": [ + "knowledge_base_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "embedding_document_id_document_id_fk": { + "name": "embedding_document_id_document_id_fk", + "tableFrom": "embedding", + "tableTo": "document", + "columnsFrom": [ + "document_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": { + "embedding_not_null_check": { + "name": "embedding_not_null_check", + "value": "\"embedding\" IS NOT NULL" + } + }, + "isRLSEnabled": false + }, + "public.environment": { + "name": "environment", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "variables": { + "name": "variables", + "type": "json", + "primaryKey": false, + "notNull": true + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": {}, + "foreignKeys": { + "environment_user_id_user_id_fk": { + "name": "environment_user_id_user_id_fk", + "tableFrom": "environment", + "tableTo": "user", + "columnsFrom": [ + "user_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "environment_user_id_unique": { + "name": "environment_user_id_unique", + "nullsNotDistinct": false, + "columns": [ + "user_id" + ] + } + }, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.form": { + "name": "form", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "identifier": { + "name": "identifier", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "title": { + "name": "title", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "description": { + "name": "description", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "is_active": { + "name": "is_active", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "customizations": { + "name": "customizations", + "type": "json", + "primaryKey": false, + "notNull": false, + "default": "'{}'" + }, + "auth_type": { + "name": "auth_type", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'public'" + }, + "password": { + "name": "password", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "allowed_emails": { + "name": "allowed_emails", + "type": "json", + "primaryKey": false, + "notNull": false, + "default": "'[]'" + }, + "show_branding": { + "name": "show_branding", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "form_identifier_idx": { + "name": "form_identifier_idx", + "columns": [ + { + "expression": "identifier", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "form_workflow_id_idx": { + "name": "form_workflow_id_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "form_user_id_idx": { + "name": "form_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "form_workflow_id_workflow_id_fk": { + "name": "form_workflow_id_workflow_id_fk", + "tableFrom": "form", + "tableTo": "workflow", + "columnsFrom": [ + "workflow_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "form_user_id_user_id_fk": { + "name": "form_user_id_user_id_fk", + "tableFrom": "form", + "tableTo": "user", + "columnsFrom": [ + "user_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.idempotency_key": { + "name": "idempotency_key", + "schema": "", + "columns": { + "key": { + "name": "key", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "result": { + "name": "result", + "type": "json", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "idempotency_key_created_at_idx": { + "name": "idempotency_key_created_at_idx", + "columns": [ + { + "expression": "created_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.invitation": { + "name": "invitation", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "email": { + "name": "email", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "inviter_id": { + "name": "inviter_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "organization_id": { + "name": "organization_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "role": { + "name": "role", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "status": { + "name": "status", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "expires_at": { + "name": "expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "invitation_email_idx": { + "name": "invitation_email_idx", + "columns": [ + { + "expression": "email", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "invitation_organization_id_idx": { + "name": "invitation_organization_id_idx", + "columns": [ + { + "expression": "organization_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "invitation_inviter_id_user_id_fk": { + "name": "invitation_inviter_id_user_id_fk", + "tableFrom": "invitation", + "tableTo": "user", + "columnsFrom": [ + "inviter_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "invitation_organization_id_organization_id_fk": { + "name": "invitation_organization_id_organization_id_fk", + "tableFrom": "invitation", + "tableTo": "organization", + "columnsFrom": [ + "organization_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.knowledge_base": { + "name": "knowledge_base", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "description": { + "name": "description", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "token_count": { + "name": "token_count", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "embedding_model": { + "name": "embedding_model", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'text-embedding-3-small'" + }, + "embedding_dimension": { + "name": "embedding_dimension", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 1536 + }, + "chunking_config": { + "name": "chunking_config", + "type": "json", + "primaryKey": false, + "notNull": true, + "default": "'{\"maxSize\": 1024, \"minSize\": 1, \"overlap\": 200}'" + }, + "deleted_at": { + "name": "deleted_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "kb_user_id_idx": { + "name": "kb_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "kb_workspace_id_idx": { + "name": "kb_workspace_id_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "kb_user_workspace_idx": { + "name": "kb_user_workspace_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "kb_deleted_at_idx": { + "name": "kb_deleted_at_idx", + "columns": [ + { + "expression": "deleted_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "knowledge_base_user_id_user_id_fk": { + "name": "knowledge_base_user_id_user_id_fk", + "tableFrom": "knowledge_base", + "tableTo": "user", + "columnsFrom": [ + "user_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "knowledge_base_workspace_id_workspace_id_fk": { + "name": "knowledge_base_workspace_id_workspace_id_fk", + "tableFrom": "knowledge_base", + "tableTo": "workspace", + "columnsFrom": [ + "workspace_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "no action", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.knowledge_base_tag_definitions": { + "name": "knowledge_base_tag_definitions", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "knowledge_base_id": { + "name": "knowledge_base_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "tag_slot": { + "name": "tag_slot", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "display_name": { + "name": "display_name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "field_type": { + "name": "field_type", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'text'" + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "kb_tag_definitions_kb_slot_idx": { + "name": "kb_tag_definitions_kb_slot_idx", + "columns": [ + { + "expression": "knowledge_base_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "tag_slot", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "kb_tag_definitions_kb_display_name_idx": { + "name": "kb_tag_definitions_kb_display_name_idx", + "columns": [ + { + "expression": "knowledge_base_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "display_name", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "kb_tag_definitions_kb_id_idx": { + "name": "kb_tag_definitions_kb_id_idx", + "columns": [ + { + "expression": "knowledge_base_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "knowledge_base_tag_definitions_knowledge_base_id_knowledge_base_id_fk": { + "name": "knowledge_base_tag_definitions_knowledge_base_id_knowledge_base_id_fk", + "tableFrom": "knowledge_base_tag_definitions", + "tableTo": "knowledge_base", + "columnsFrom": [ + "knowledge_base_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.mcp_servers": { + "name": "mcp_servers", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_by": { + "name": "created_by", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "description": { + "name": "description", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "transport": { + "name": "transport", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "url": { + "name": "url", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "headers": { + "name": "headers", + "type": "json", + "primaryKey": false, + "notNull": false, + "default": "'{}'" + }, + "timeout": { + "name": "timeout", + "type": "integer", + "primaryKey": false, + "notNull": false, + "default": 30000 + }, + "retries": { + "name": "retries", + "type": "integer", + "primaryKey": false, + "notNull": false, + "default": 3 + }, + "enabled": { + "name": "enabled", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "last_connected": { + "name": "last_connected", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "connection_status": { + "name": "connection_status", + "type": "text", + "primaryKey": false, + "notNull": false, + "default": "'disconnected'" + }, + "last_error": { + "name": "last_error", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "status_config": { + "name": "status_config", + "type": "jsonb", + "primaryKey": false, + "notNull": false, + "default": "'{}'" + }, + "tool_count": { + "name": "tool_count", + "type": "integer", + "primaryKey": false, + "notNull": false, + "default": 0 + }, + "last_tools_refresh": { + "name": "last_tools_refresh", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "total_requests": { + "name": "total_requests", + "type": "integer", + "primaryKey": false, + "notNull": false, + "default": 0 + }, + "last_used": { + "name": "last_used", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "deleted_at": { + "name": "deleted_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "mcp_servers_workspace_enabled_idx": { + "name": "mcp_servers_workspace_enabled_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "enabled", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "mcp_servers_workspace_deleted_idx": { + "name": "mcp_servers_workspace_deleted_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "deleted_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "mcp_servers_workspace_id_workspace_id_fk": { + "name": "mcp_servers_workspace_id_workspace_id_fk", + "tableFrom": "mcp_servers", + "tableTo": "workspace", + "columnsFrom": [ + "workspace_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "mcp_servers_created_by_user_id_fk": { + "name": "mcp_servers_created_by_user_id_fk", + "tableFrom": "mcp_servers", + "tableTo": "user", + "columnsFrom": [ + "created_by" + ], + "columnsTo": [ + "id" + ], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.member": { + "name": "member", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "organization_id": { + "name": "organization_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "role": { + "name": "role", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "member_user_id_unique": { + "name": "member_user_id_unique", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "member_organization_id_idx": { + "name": "member_organization_id_idx", + "columns": [ + { + "expression": "organization_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "member_user_id_user_id_fk": { + "name": "member_user_id_user_id_fk", + "tableFrom": "member", + "tableTo": "user", + "columnsFrom": [ + "user_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "member_organization_id_organization_id_fk": { + "name": "member_organization_id_organization_id_fk", + "tableFrom": "member", + "tableTo": "organization", + "columnsFrom": [ + "organization_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.memory": { + "name": "memory", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "key": { + "name": "key", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "data": { + "name": "data", + "type": "jsonb", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "deleted_at": { + "name": "deleted_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + } + }, + "indexes": { + "memory_key_idx": { + "name": "memory_key_idx", + "columns": [ + { + "expression": "key", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "memory_workspace_idx": { + "name": "memory_workspace_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "memory_workspace_key_idx": { + "name": "memory_workspace_key_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "key", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "memory_workspace_id_workspace_id_fk": { + "name": "memory_workspace_id_workspace_id_fk", + "tableFrom": "memory", + "tableTo": "workspace", + "columnsFrom": [ + "workspace_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.organization": { + "name": "organization", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "slug": { + "name": "slug", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "logo": { + "name": "logo", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "metadata": { + "name": "metadata", + "type": "json", + "primaryKey": false, + "notNull": false + }, + "org_usage_limit": { + "name": "org_usage_limit", + "type": "numeric", + "primaryKey": false, + "notNull": false + }, + "storage_used_bytes": { + "name": "storage_used_bytes", + "type": "bigint", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "departed_member_usage": { + "name": "departed_member_usage", + "type": "numeric", + "primaryKey": false, + "notNull": true, + "default": "'0'" + }, + "credit_balance": { + "name": "credit_balance", + "type": "numeric", + "primaryKey": false, + "notNull": true, + "default": "'0'" + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": {}, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.paused_executions": { + "name": "paused_executions", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "execution_id": { + "name": "execution_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "execution_snapshot": { + "name": "execution_snapshot", + "type": "jsonb", + "primaryKey": false, + "notNull": true + }, + "pause_points": { + "name": "pause_points", + "type": "jsonb", + "primaryKey": false, + "notNull": true + }, + "total_pause_count": { + "name": "total_pause_count", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "resumed_count": { + "name": "resumed_count", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "status": { + "name": "status", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'paused'" + }, + "metadata": { + "name": "metadata", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'{}'::jsonb" + }, + "paused_at": { + "name": "paused_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "expires_at": { + "name": "expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + } + }, + "indexes": { + "paused_executions_workflow_id_idx": { + "name": "paused_executions_workflow_id_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "paused_executions_status_idx": { + "name": "paused_executions_status_idx", + "columns": [ + { + "expression": "status", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "paused_executions_execution_id_unique": { + "name": "paused_executions_execution_id_unique", + "columns": [ + { + "expression": "execution_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "paused_executions_workflow_id_workflow_id_fk": { + "name": "paused_executions_workflow_id_workflow_id_fk", + "tableFrom": "paused_executions", + "tableTo": "workflow", + "columnsFrom": [ + "workflow_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.permission_group": { + "name": "permission_group", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "organization_id": { + "name": "organization_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "description": { + "name": "description", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "config": { + "name": "config", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'{}'" + }, + "created_by": { + "name": "created_by", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "auto_add_new_members": { + "name": "auto_add_new_members", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + } + }, + "indexes": { + "permission_group_organization_id_idx": { + "name": "permission_group_organization_id_idx", + "columns": [ + { + "expression": "organization_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "permission_group_created_by_idx": { + "name": "permission_group_created_by_idx", + "columns": [ + { + "expression": "created_by", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "permission_group_org_name_unique": { + "name": "permission_group_org_name_unique", + "columns": [ + { + "expression": "organization_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "name", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "permission_group_org_auto_add_unique": { + "name": "permission_group_org_auto_add_unique", + "columns": [ + { + "expression": "organization_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "where": "auto_add_new_members = true", + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "permission_group_organization_id_organization_id_fk": { + "name": "permission_group_organization_id_organization_id_fk", + "tableFrom": "permission_group", + "tableTo": "organization", + "columnsFrom": [ + "organization_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "permission_group_created_by_user_id_fk": { + "name": "permission_group_created_by_user_id_fk", + "tableFrom": "permission_group", + "tableTo": "user", + "columnsFrom": [ + "created_by" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.permission_group_member": { + "name": "permission_group_member", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "permission_group_id": { + "name": "permission_group_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "assigned_by": { + "name": "assigned_by", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "assigned_at": { + "name": "assigned_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "permission_group_member_group_id_idx": { + "name": "permission_group_member_group_id_idx", + "columns": [ + { + "expression": "permission_group_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "permission_group_member_user_id_unique": { + "name": "permission_group_member_user_id_unique", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "permission_group_member_permission_group_id_permission_group_id_fk": { + "name": "permission_group_member_permission_group_id_permission_group_id_fk", + "tableFrom": "permission_group_member", + "tableTo": "permission_group", + "columnsFrom": [ + "permission_group_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "permission_group_member_user_id_user_id_fk": { + "name": "permission_group_member_user_id_user_id_fk", + "tableFrom": "permission_group_member", + "tableTo": "user", + "columnsFrom": [ + "user_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "permission_group_member_assigned_by_user_id_fk": { + "name": "permission_group_member_assigned_by_user_id_fk", + "tableFrom": "permission_group_member", + "tableTo": "user", + "columnsFrom": [ + "assigned_by" + ], + "columnsTo": [ + "id" + ], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.permissions": { + "name": "permissions", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "entity_type": { + "name": "entity_type", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "entity_id": { + "name": "entity_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "permission_type": { + "name": "permission_type", + "type": "permission_type", + "typeSchema": "public", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "permissions_user_id_idx": { + "name": "permissions_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "permissions_entity_idx": { + "name": "permissions_entity_idx", + "columns": [ + { + "expression": "entity_type", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "entity_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "permissions_user_entity_type_idx": { + "name": "permissions_user_entity_type_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "entity_type", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "permissions_user_entity_permission_idx": { + "name": "permissions_user_entity_permission_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "entity_type", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "permission_type", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "permissions_user_entity_idx": { + "name": "permissions_user_entity_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "entity_type", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "entity_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "permissions_unique_constraint": { + "name": "permissions_unique_constraint", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "entity_type", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "entity_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "permissions_user_id_user_id_fk": { + "name": "permissions_user_id_user_id_fk", + "tableFrom": "permissions", + "tableTo": "user", + "columnsFrom": [ + "user_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.rate_limit_bucket": { + "name": "rate_limit_bucket", + "schema": "", + "columns": { + "key": { + "name": "key", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "tokens": { + "name": "tokens", + "type": "numeric", + "primaryKey": false, + "notNull": true + }, + "last_refill_at": { + "name": "last_refill_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": {}, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.resume_queue": { + "name": "resume_queue", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "paused_execution_id": { + "name": "paused_execution_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "parent_execution_id": { + "name": "parent_execution_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "new_execution_id": { + "name": "new_execution_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "context_id": { + "name": "context_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "resume_input": { + "name": "resume_input", + "type": "jsonb", + "primaryKey": false, + "notNull": false + }, + "status": { + "name": "status", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'pending'" + }, + "queued_at": { + "name": "queued_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "claimed_at": { + "name": "claimed_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "completed_at": { + "name": "completed_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "failure_reason": { + "name": "failure_reason", + "type": "text", + "primaryKey": false, + "notNull": false + } + }, + "indexes": { + "resume_queue_parent_status_idx": { + "name": "resume_queue_parent_status_idx", + "columns": [ + { + "expression": "parent_execution_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "status", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "queued_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "resume_queue_new_execution_idx": { + "name": "resume_queue_new_execution_idx", + "columns": [ + { + "expression": "new_execution_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "resume_queue_paused_execution_id_paused_executions_id_fk": { + "name": "resume_queue_paused_execution_id_paused_executions_id_fk", + "tableFrom": "resume_queue", + "tableTo": "paused_executions", + "columnsFrom": [ + "paused_execution_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.session": { + "name": "session", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "expires_at": { + "name": "expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "token": { + "name": "token", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "ip_address": { + "name": "ip_address", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "user_agent": { + "name": "user_agent", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "active_organization_id": { + "name": "active_organization_id", + "type": "text", + "primaryKey": false, + "notNull": false + } + }, + "indexes": { + "session_user_id_idx": { + "name": "session_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "session_token_idx": { + "name": "session_token_idx", + "columns": [ + { + "expression": "token", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "session_user_id_user_id_fk": { + "name": "session_user_id_user_id_fk", + "tableFrom": "session", + "tableTo": "user", + "columnsFrom": [ + "user_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "session_active_organization_id_organization_id_fk": { + "name": "session_active_organization_id_organization_id_fk", + "tableFrom": "session", + "tableTo": "organization", + "columnsFrom": [ + "active_organization_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "session_token_unique": { + "name": "session_token_unique", + "nullsNotDistinct": false, + "columns": [ + "token" + ] + } + }, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.settings": { + "name": "settings", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "theme": { + "name": "theme", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'dark'" + }, + "auto_connect": { + "name": "auto_connect", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "telemetry_enabled": { + "name": "telemetry_enabled", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "email_preferences": { + "name": "email_preferences", + "type": "json", + "primaryKey": false, + "notNull": true, + "default": "'{}'" + }, + "billing_usage_notifications_enabled": { + "name": "billing_usage_notifications_enabled", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "show_training_controls": { + "name": "show_training_controls", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "super_user_mode_enabled": { + "name": "super_user_mode_enabled", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "error_notifications_enabled": { + "name": "error_notifications_enabled", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "snap_to_grid_size": { + "name": "snap_to_grid_size", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "show_action_bar": { + "name": "show_action_bar", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "copilot_enabled_models": { + "name": "copilot_enabled_models", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'{}'" + }, + "copilot_auto_allowed_tools": { + "name": "copilot_auto_allowed_tools", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'[]'" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": {}, + "foreignKeys": { + "settings_user_id_user_id_fk": { + "name": "settings_user_id_user_id_fk", + "tableFrom": "settings", + "tableTo": "user", + "columnsFrom": [ + "user_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "settings_user_id_unique": { + "name": "settings_user_id_unique", + "nullsNotDistinct": false, + "columns": [ + "user_id" + ] + } + }, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.skill": { + "name": "skill", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "description": { + "name": "description", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "content": { + "name": "content", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "skill_workspace_id_idx": { + "name": "skill_workspace_id_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "skill_workspace_name_unique": { + "name": "skill_workspace_name_unique", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "name", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "skill_workspace_id_workspace_id_fk": { + "name": "skill_workspace_id_workspace_id_fk", + "tableFrom": "skill", + "tableTo": "workspace", + "columnsFrom": [ + "workspace_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "skill_user_id_user_id_fk": { + "name": "skill_user_id_user_id_fk", + "tableFrom": "skill", + "tableTo": "user", + "columnsFrom": [ + "user_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.sso_provider": { + "name": "sso_provider", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "issuer": { + "name": "issuer", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "domain": { + "name": "domain", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "oidc_config": { + "name": "oidc_config", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "saml_config": { + "name": "saml_config", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "provider_id": { + "name": "provider_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "organization_id": { + "name": "organization_id", + "type": "text", + "primaryKey": false, + "notNull": false + } + }, + "indexes": { + "sso_provider_provider_id_idx": { + "name": "sso_provider_provider_id_idx", + "columns": [ + { + "expression": "provider_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "sso_provider_domain_idx": { + "name": "sso_provider_domain_idx", + "columns": [ + { + "expression": "domain", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "sso_provider_user_id_idx": { + "name": "sso_provider_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "sso_provider_organization_id_idx": { + "name": "sso_provider_organization_id_idx", + "columns": [ + { + "expression": "organization_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "sso_provider_user_id_user_id_fk": { + "name": "sso_provider_user_id_user_id_fk", + "tableFrom": "sso_provider", + "tableTo": "user", + "columnsFrom": [ + "user_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "sso_provider_organization_id_organization_id_fk": { + "name": "sso_provider_organization_id_organization_id_fk", + "tableFrom": "sso_provider", + "tableTo": "organization", + "columnsFrom": [ + "organization_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.subscription": { + "name": "subscription", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "plan": { + "name": "plan", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "reference_id": { + "name": "reference_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "stripe_customer_id": { + "name": "stripe_customer_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "stripe_subscription_id": { + "name": "stripe_subscription_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "status": { + "name": "status", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "period_start": { + "name": "period_start", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "period_end": { + "name": "period_end", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "cancel_at_period_end": { + "name": "cancel_at_period_end", + "type": "boolean", + "primaryKey": false, + "notNull": false + }, + "seats": { + "name": "seats", + "type": "integer", + "primaryKey": false, + "notNull": false + }, + "trial_start": { + "name": "trial_start", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "trial_end": { + "name": "trial_end", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "metadata": { + "name": "metadata", + "type": "json", + "primaryKey": false, + "notNull": false + } + }, + "indexes": { + "subscription_reference_status_idx": { + "name": "subscription_reference_status_idx", + "columns": [ + { + "expression": "reference_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "status", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": { + "check_enterprise_metadata": { + "name": "check_enterprise_metadata", + "value": "plan != 'enterprise' OR metadata IS NOT NULL" + } + }, + "isRLSEnabled": false + }, + "public.template_creators": { + "name": "template_creators", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "reference_type": { + "name": "reference_type", + "type": "template_creator_type", + "typeSchema": "public", + "primaryKey": false, + "notNull": true + }, + "reference_id": { + "name": "reference_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "profile_image_url": { + "name": "profile_image_url", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "details": { + "name": "details", + "type": "jsonb", + "primaryKey": false, + "notNull": false + }, + "verified": { + "name": "verified", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "created_by": { + "name": "created_by", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "template_creators_reference_idx": { + "name": "template_creators_reference_idx", + "columns": [ + { + "expression": "reference_type", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "reference_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "template_creators_reference_id_idx": { + "name": "template_creators_reference_id_idx", + "columns": [ + { + "expression": "reference_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "template_creators_created_by_idx": { + "name": "template_creators_created_by_idx", + "columns": [ + { + "expression": "created_by", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "template_creators_created_by_user_id_fk": { + "name": "template_creators_created_by_user_id_fk", + "tableFrom": "template_creators", + "tableTo": "user", + "columnsFrom": [ + "created_by" + ], + "columnsTo": [ + "id" + ], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.template_stars": { + "name": "template_stars", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "template_id": { + "name": "template_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "starred_at": { + "name": "starred_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "template_stars_user_id_idx": { + "name": "template_stars_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "template_stars_template_id_idx": { + "name": "template_stars_template_id_idx", + "columns": [ + { + "expression": "template_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "template_stars_user_template_idx": { + "name": "template_stars_user_template_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "template_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "template_stars_template_user_idx": { + "name": "template_stars_template_user_idx", + "columns": [ + { + "expression": "template_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "template_stars_starred_at_idx": { + "name": "template_stars_starred_at_idx", + "columns": [ + { + "expression": "starred_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "template_stars_template_starred_at_idx": { + "name": "template_stars_template_starred_at_idx", + "columns": [ + { + "expression": "template_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "starred_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "template_stars_user_template_unique": { + "name": "template_stars_user_template_unique", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "template_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "template_stars_user_id_user_id_fk": { + "name": "template_stars_user_id_user_id_fk", + "tableFrom": "template_stars", + "tableTo": "user", + "columnsFrom": [ + "user_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "template_stars_template_id_templates_id_fk": { + "name": "template_stars_template_id_templates_id_fk", + "tableFrom": "template_stars", + "tableTo": "templates", + "columnsFrom": [ + "template_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.templates": { + "name": "templates", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "details": { + "name": "details", + "type": "jsonb", + "primaryKey": false, + "notNull": false + }, + "creator_id": { + "name": "creator_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "views": { + "name": "views", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "stars": { + "name": "stars", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "status": { + "name": "status", + "type": "template_status", + "typeSchema": "public", + "primaryKey": false, + "notNull": true, + "default": "'pending'" + }, + "tags": { + "name": "tags", + "type": "text[]", + "primaryKey": false, + "notNull": true, + "default": "'{}'::text[]" + }, + "required_credentials": { + "name": "required_credentials", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'[]'" + }, + "state": { + "name": "state", + "type": "jsonb", + "primaryKey": false, + "notNull": true + }, + "og_image_url": { + "name": "og_image_url", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "templates_status_idx": { + "name": "templates_status_idx", + "columns": [ + { + "expression": "status", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "templates_creator_id_idx": { + "name": "templates_creator_id_idx", + "columns": [ + { + "expression": "creator_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "templates_views_idx": { + "name": "templates_views_idx", + "columns": [ + { + "expression": "views", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "templates_stars_idx": { + "name": "templates_stars_idx", + "columns": [ + { + "expression": "stars", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "templates_status_views_idx": { + "name": "templates_status_views_idx", + "columns": [ + { + "expression": "status", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "views", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "templates_status_stars_idx": { + "name": "templates_status_stars_idx", + "columns": [ + { + "expression": "status", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "stars", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "templates_created_at_idx": { + "name": "templates_created_at_idx", + "columns": [ + { + "expression": "created_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "templates_updated_at_idx": { + "name": "templates_updated_at_idx", + "columns": [ + { + "expression": "updated_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "templates_workflow_id_workflow_id_fk": { + "name": "templates_workflow_id_workflow_id_fk", + "tableFrom": "templates", + "tableTo": "workflow", + "columnsFrom": [ + "workflow_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "set null", + "onUpdate": "no action" + }, + "templates_creator_id_template_creators_id_fk": { + "name": "templates_creator_id_template_creators_id_fk", + "tableFrom": "templates", + "tableTo": "template_creators", + "columnsFrom": [ + "creator_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.usage_log": { + "name": "usage_log", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "category": { + "name": "category", + "type": "usage_log_category", + "typeSchema": "public", + "primaryKey": false, + "notNull": true + }, + "source": { + "name": "source", + "type": "usage_log_source", + "typeSchema": "public", + "primaryKey": false, + "notNull": true + }, + "description": { + "name": "description", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "metadata": { + "name": "metadata", + "type": "jsonb", + "primaryKey": false, + "notNull": false + }, + "cost": { + "name": "cost", + "type": "numeric", + "primaryKey": false, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "execution_id": { + "name": "execution_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "usage_log_user_created_at_idx": { + "name": "usage_log_user_created_at_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "created_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "usage_log_source_idx": { + "name": "usage_log_source_idx", + "columns": [ + { + "expression": "source", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "usage_log_workspace_id_idx": { + "name": "usage_log_workspace_id_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "usage_log_workflow_id_idx": { + "name": "usage_log_workflow_id_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "usage_log_user_id_user_id_fk": { + "name": "usage_log_user_id_user_id_fk", + "tableFrom": "usage_log", + "tableTo": "user", + "columnsFrom": [ + "user_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "usage_log_workspace_id_workspace_id_fk": { + "name": "usage_log_workspace_id_workspace_id_fk", + "tableFrom": "usage_log", + "tableTo": "workspace", + "columnsFrom": [ + "workspace_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "set null", + "onUpdate": "no action" + }, + "usage_log_workflow_id_workflow_id_fk": { + "name": "usage_log_workflow_id_workflow_id_fk", + "tableFrom": "usage_log", + "tableTo": "workflow", + "columnsFrom": [ + "workflow_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.user": { + "name": "user", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "email": { + "name": "email", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "email_verified": { + "name": "email_verified", + "type": "boolean", + "primaryKey": false, + "notNull": true + }, + "image": { + "name": "image", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "stripe_customer_id": { + "name": "stripe_customer_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "is_super_user": { + "name": "is_super_user", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + } + }, + "indexes": {}, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "user_email_unique": { + "name": "user_email_unique", + "nullsNotDistinct": false, + "columns": [ + "email" + ] + } + }, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.user_stats": { + "name": "user_stats", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "total_manual_executions": { + "name": "total_manual_executions", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "total_api_calls": { + "name": "total_api_calls", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "total_webhook_triggers": { + "name": "total_webhook_triggers", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "total_scheduled_executions": { + "name": "total_scheduled_executions", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "total_chat_executions": { + "name": "total_chat_executions", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "total_mcp_executions": { + "name": "total_mcp_executions", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "total_a2a_executions": { + "name": "total_a2a_executions", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "total_tokens_used": { + "name": "total_tokens_used", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "total_cost": { + "name": "total_cost", + "type": "numeric", + "primaryKey": false, + "notNull": true, + "default": "'0'" + }, + "current_usage_limit": { + "name": "current_usage_limit", + "type": "numeric", + "primaryKey": false, + "notNull": false, + "default": "'20'" + }, + "usage_limit_updated_at": { + "name": "usage_limit_updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false, + "default": "now()" + }, + "current_period_cost": { + "name": "current_period_cost", + "type": "numeric", + "primaryKey": false, + "notNull": true, + "default": "'0'" + }, + "last_period_cost": { + "name": "last_period_cost", + "type": "numeric", + "primaryKey": false, + "notNull": false, + "default": "'0'" + }, + "billed_overage_this_period": { + "name": "billed_overage_this_period", + "type": "numeric", + "primaryKey": false, + "notNull": true, + "default": "'0'" + }, + "pro_period_cost_snapshot": { + "name": "pro_period_cost_snapshot", + "type": "numeric", + "primaryKey": false, + "notNull": false, + "default": "'0'" + }, + "credit_balance": { + "name": "credit_balance", + "type": "numeric", + "primaryKey": false, + "notNull": true, + "default": "'0'" + }, + "total_copilot_cost": { + "name": "total_copilot_cost", + "type": "numeric", + "primaryKey": false, + "notNull": true, + "default": "'0'" + }, + "current_period_copilot_cost": { + "name": "current_period_copilot_cost", + "type": "numeric", + "primaryKey": false, + "notNull": true, + "default": "'0'" + }, + "last_period_copilot_cost": { + "name": "last_period_copilot_cost", + "type": "numeric", + "primaryKey": false, + "notNull": false, + "default": "'0'" + }, + "total_copilot_tokens": { + "name": "total_copilot_tokens", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "total_copilot_calls": { + "name": "total_copilot_calls", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "total_mcp_copilot_calls": { + "name": "total_mcp_copilot_calls", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "total_mcp_copilot_cost": { + "name": "total_mcp_copilot_cost", + "type": "numeric", + "primaryKey": false, + "notNull": true, + "default": "'0'" + }, + "current_period_mcp_copilot_cost": { + "name": "current_period_mcp_copilot_cost", + "type": "numeric", + "primaryKey": false, + "notNull": true, + "default": "'0'" + }, + "storage_used_bytes": { + "name": "storage_used_bytes", + "type": "bigint", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "last_active": { + "name": "last_active", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "billing_blocked": { + "name": "billing_blocked", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "billing_blocked_reason": { + "name": "billing_blocked_reason", + "type": "billing_blocked_reason", + "typeSchema": "public", + "primaryKey": false, + "notNull": false + } + }, + "indexes": {}, + "foreignKeys": { + "user_stats_user_id_user_id_fk": { + "name": "user_stats_user_id_user_id_fk", + "tableFrom": "user_stats", + "tableTo": "user", + "columnsFrom": [ + "user_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "user_stats_user_id_unique": { + "name": "user_stats_user_id_unique", + "nullsNotDistinct": false, + "columns": [ + "user_id" + ] + } + }, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.verification": { + "name": "verification", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "identifier": { + "name": "identifier", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "value": { + "name": "value", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "expires_at": { + "name": "expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + } + }, + "indexes": { + "verification_identifier_idx": { + "name": "verification_identifier_idx", + "columns": [ + { + "expression": "identifier", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "verification_expires_at_idx": { + "name": "verification_expires_at_idx", + "columns": [ + { + "expression": "expires_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.waitlist": { + "name": "waitlist", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "email": { + "name": "email", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "status": { + "name": "status", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'pending'" + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": {}, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "waitlist_email_unique": { + "name": "waitlist_email_unique", + "nullsNotDistinct": false, + "columns": [ + "email" + ] + } + }, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.webhook": { + "name": "webhook", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "deployment_version_id": { + "name": "deployment_version_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "block_id": { + "name": "block_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "path": { + "name": "path", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "provider": { + "name": "provider", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "provider_config": { + "name": "provider_config", + "type": "json", + "primaryKey": false, + "notNull": false + }, + "is_active": { + "name": "is_active", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "failed_count": { + "name": "failed_count", + "type": "integer", + "primaryKey": false, + "notNull": false, + "default": 0 + }, + "last_failed_at": { + "name": "last_failed_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "credential_set_id": { + "name": "credential_set_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "path_deployment_unique": { + "name": "path_deployment_unique", + "columns": [ + { + "expression": "path", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "deployment_version_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "idx_webhook_on_workflow_id_block_id": { + "name": "idx_webhook_on_workflow_id_block_id", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "block_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "webhook_workflow_deployment_idx": { + "name": "webhook_workflow_deployment_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "deployment_version_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "webhook_credential_set_id_idx": { + "name": "webhook_credential_set_id_idx", + "columns": [ + { + "expression": "credential_set_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "webhook_workflow_id_workflow_id_fk": { + "name": "webhook_workflow_id_workflow_id_fk", + "tableFrom": "webhook", + "tableTo": "workflow", + "columnsFrom": [ + "workflow_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "webhook_deployment_version_id_workflow_deployment_version_id_fk": { + "name": "webhook_deployment_version_id_workflow_deployment_version_id_fk", + "tableFrom": "webhook", + "tableTo": "workflow_deployment_version", + "columnsFrom": [ + "deployment_version_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "webhook_credential_set_id_credential_set_id_fk": { + "name": "webhook_credential_set_id_credential_set_id_fk", + "tableFrom": "webhook", + "tableTo": "credential_set", + "columnsFrom": [ + "credential_set_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workflow": { + "name": "workflow", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "folder_id": { + "name": "folder_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "sort_order": { + "name": "sort_order", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "description": { + "name": "description", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "color": { + "name": "color", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'#3972F6'" + }, + "last_synced": { + "name": "last_synced", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "is_deployed": { + "name": "is_deployed", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "deployed_at": { + "name": "deployed_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "run_count": { + "name": "run_count", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "last_run_at": { + "name": "last_run_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "variables": { + "name": "variables", + "type": "json", + "primaryKey": false, + "notNull": false, + "default": "'{}'" + } + }, + "indexes": { + "workflow_user_id_idx": { + "name": "workflow_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_workspace_id_idx": { + "name": "workflow_workspace_id_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_user_workspace_idx": { + "name": "workflow_user_workspace_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_folder_sort_idx": { + "name": "workflow_folder_sort_idx", + "columns": [ + { + "expression": "folder_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "sort_order", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workflow_user_id_user_id_fk": { + "name": "workflow_user_id_user_id_fk", + "tableFrom": "workflow", + "tableTo": "user", + "columnsFrom": [ + "user_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workflow_workspace_id_workspace_id_fk": { + "name": "workflow_workspace_id_workspace_id_fk", + "tableFrom": "workflow", + "tableTo": "workspace", + "columnsFrom": [ + "workspace_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workflow_folder_id_workflow_folder_id_fk": { + "name": "workflow_folder_id_workflow_folder_id_fk", + "tableFrom": "workflow", + "tableTo": "workflow_folder", + "columnsFrom": [ + "folder_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workflow_blocks": { + "name": "workflow_blocks", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "type": { + "name": "type", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "position_x": { + "name": "position_x", + "type": "numeric", + "primaryKey": false, + "notNull": true + }, + "position_y": { + "name": "position_y", + "type": "numeric", + "primaryKey": false, + "notNull": true + }, + "enabled": { + "name": "enabled", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "horizontal_handles": { + "name": "horizontal_handles", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "is_wide": { + "name": "is_wide", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "advanced_mode": { + "name": "advanced_mode", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "trigger_mode": { + "name": "trigger_mode", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "locked": { + "name": "locked", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "height": { + "name": "height", + "type": "numeric", + "primaryKey": false, + "notNull": true, + "default": "'0'" + }, + "sub_blocks": { + "name": "sub_blocks", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'{}'" + }, + "outputs": { + "name": "outputs", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'{}'" + }, + "data": { + "name": "data", + "type": "jsonb", + "primaryKey": false, + "notNull": false, + "default": "'{}'" + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workflow_blocks_workflow_id_idx": { + "name": "workflow_blocks_workflow_id_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_blocks_type_idx": { + "name": "workflow_blocks_type_idx", + "columns": [ + { + "expression": "type", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workflow_blocks_workflow_id_workflow_id_fk": { + "name": "workflow_blocks_workflow_id_workflow_id_fk", + "tableFrom": "workflow_blocks", + "tableTo": "workflow", + "columnsFrom": [ + "workflow_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workflow_checkpoints": { + "name": "workflow_checkpoints", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "uuid", + "primaryKey": true, + "notNull": true, + "default": "gen_random_uuid()" + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "chat_id": { + "name": "chat_id", + "type": "uuid", + "primaryKey": false, + "notNull": true + }, + "message_id": { + "name": "message_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "workflow_state": { + "name": "workflow_state", + "type": "json", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workflow_checkpoints_user_id_idx": { + "name": "workflow_checkpoints_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_checkpoints_workflow_id_idx": { + "name": "workflow_checkpoints_workflow_id_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_checkpoints_chat_id_idx": { + "name": "workflow_checkpoints_chat_id_idx", + "columns": [ + { + "expression": "chat_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_checkpoints_message_id_idx": { + "name": "workflow_checkpoints_message_id_idx", + "columns": [ + { + "expression": "message_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_checkpoints_user_workflow_idx": { + "name": "workflow_checkpoints_user_workflow_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_checkpoints_workflow_chat_idx": { + "name": "workflow_checkpoints_workflow_chat_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "chat_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_checkpoints_created_at_idx": { + "name": "workflow_checkpoints_created_at_idx", + "columns": [ + { + "expression": "created_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_checkpoints_chat_created_at_idx": { + "name": "workflow_checkpoints_chat_created_at_idx", + "columns": [ + { + "expression": "chat_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "created_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workflow_checkpoints_user_id_user_id_fk": { + "name": "workflow_checkpoints_user_id_user_id_fk", + "tableFrom": "workflow_checkpoints", + "tableTo": "user", + "columnsFrom": [ + "user_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workflow_checkpoints_workflow_id_workflow_id_fk": { + "name": "workflow_checkpoints_workflow_id_workflow_id_fk", + "tableFrom": "workflow_checkpoints", + "tableTo": "workflow", + "columnsFrom": [ + "workflow_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workflow_checkpoints_chat_id_copilot_chats_id_fk": { + "name": "workflow_checkpoints_chat_id_copilot_chats_id_fk", + "tableFrom": "workflow_checkpoints", + "tableTo": "copilot_chats", + "columnsFrom": [ + "chat_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workflow_deployment_version": { + "name": "workflow_deployment_version", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "version": { + "name": "version", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "description": { + "name": "description", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "state": { + "name": "state", + "type": "json", + "primaryKey": false, + "notNull": true + }, + "is_active": { + "name": "is_active", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "created_by": { + "name": "created_by", + "type": "text", + "primaryKey": false, + "notNull": false + } + }, + "indexes": { + "workflow_deployment_version_workflow_version_unique": { + "name": "workflow_deployment_version_workflow_version_unique", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "version", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_deployment_version_workflow_active_idx": { + "name": "workflow_deployment_version_workflow_active_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "is_active", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_deployment_version_created_at_idx": { + "name": "workflow_deployment_version_created_at_idx", + "columns": [ + { + "expression": "created_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workflow_deployment_version_workflow_id_workflow_id_fk": { + "name": "workflow_deployment_version_workflow_id_workflow_id_fk", + "tableFrom": "workflow_deployment_version", + "tableTo": "workflow", + "columnsFrom": [ + "workflow_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workflow_edges": { + "name": "workflow_edges", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "source_block_id": { + "name": "source_block_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "target_block_id": { + "name": "target_block_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "source_handle": { + "name": "source_handle", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "target_handle": { + "name": "target_handle", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workflow_edges_workflow_id_idx": { + "name": "workflow_edges_workflow_id_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_edges_workflow_source_idx": { + "name": "workflow_edges_workflow_source_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "source_block_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_edges_workflow_target_idx": { + "name": "workflow_edges_workflow_target_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "target_block_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workflow_edges_workflow_id_workflow_id_fk": { + "name": "workflow_edges_workflow_id_workflow_id_fk", + "tableFrom": "workflow_edges", + "tableTo": "workflow", + "columnsFrom": [ + "workflow_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workflow_edges_source_block_id_workflow_blocks_id_fk": { + "name": "workflow_edges_source_block_id_workflow_blocks_id_fk", + "tableFrom": "workflow_edges", + "tableTo": "workflow_blocks", + "columnsFrom": [ + "source_block_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workflow_edges_target_block_id_workflow_blocks_id_fk": { + "name": "workflow_edges_target_block_id_workflow_blocks_id_fk", + "tableFrom": "workflow_edges", + "tableTo": "workflow_blocks", + "columnsFrom": [ + "target_block_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workflow_execution_logs": { + "name": "workflow_execution_logs", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "execution_id": { + "name": "execution_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "state_snapshot_id": { + "name": "state_snapshot_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "deployment_version_id": { + "name": "deployment_version_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "level": { + "name": "level", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "status": { + "name": "status", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'running'" + }, + "trigger": { + "name": "trigger", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "started_at": { + "name": "started_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "ended_at": { + "name": "ended_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "total_duration_ms": { + "name": "total_duration_ms", + "type": "integer", + "primaryKey": false, + "notNull": false + }, + "execution_data": { + "name": "execution_data", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'{}'" + }, + "cost": { + "name": "cost", + "type": "jsonb", + "primaryKey": false, + "notNull": false + }, + "files": { + "name": "files", + "type": "jsonb", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workflow_execution_logs_workflow_id_idx": { + "name": "workflow_execution_logs_workflow_id_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_execution_logs_state_snapshot_id_idx": { + "name": "workflow_execution_logs_state_snapshot_id_idx", + "columns": [ + { + "expression": "state_snapshot_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_execution_logs_deployment_version_id_idx": { + "name": "workflow_execution_logs_deployment_version_id_idx", + "columns": [ + { + "expression": "deployment_version_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_execution_logs_trigger_idx": { + "name": "workflow_execution_logs_trigger_idx", + "columns": [ + { + "expression": "trigger", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_execution_logs_level_idx": { + "name": "workflow_execution_logs_level_idx", + "columns": [ + { + "expression": "level", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_execution_logs_started_at_idx": { + "name": "workflow_execution_logs_started_at_idx", + "columns": [ + { + "expression": "started_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_execution_logs_execution_id_unique": { + "name": "workflow_execution_logs_execution_id_unique", + "columns": [ + { + "expression": "execution_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_execution_logs_workflow_started_at_idx": { + "name": "workflow_execution_logs_workflow_started_at_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "started_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_execution_logs_workspace_started_at_idx": { + "name": "workflow_execution_logs_workspace_started_at_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "started_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workflow_execution_logs_workflow_id_workflow_id_fk": { + "name": "workflow_execution_logs_workflow_id_workflow_id_fk", + "tableFrom": "workflow_execution_logs", + "tableTo": "workflow", + "columnsFrom": [ + "workflow_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "set null", + "onUpdate": "no action" + }, + "workflow_execution_logs_workspace_id_workspace_id_fk": { + "name": "workflow_execution_logs_workspace_id_workspace_id_fk", + "tableFrom": "workflow_execution_logs", + "tableTo": "workspace", + "columnsFrom": [ + "workspace_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workflow_execution_logs_state_snapshot_id_workflow_execution_snapshots_id_fk": { + "name": "workflow_execution_logs_state_snapshot_id_workflow_execution_snapshots_id_fk", + "tableFrom": "workflow_execution_logs", + "tableTo": "workflow_execution_snapshots", + "columnsFrom": [ + "state_snapshot_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "no action", + "onUpdate": "no action" + }, + "workflow_execution_logs_deployment_version_id_workflow_deployment_version_id_fk": { + "name": "workflow_execution_logs_deployment_version_id_workflow_deployment_version_id_fk", + "tableFrom": "workflow_execution_logs", + "tableTo": "workflow_deployment_version", + "columnsFrom": [ + "deployment_version_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workflow_execution_snapshots": { + "name": "workflow_execution_snapshots", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "state_hash": { + "name": "state_hash", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "state_data": { + "name": "state_data", + "type": "jsonb", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workflow_snapshots_workflow_id_idx": { + "name": "workflow_snapshots_workflow_id_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_snapshots_hash_idx": { + "name": "workflow_snapshots_hash_idx", + "columns": [ + { + "expression": "state_hash", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_snapshots_workflow_hash_idx": { + "name": "workflow_snapshots_workflow_hash_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "state_hash", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_snapshots_created_at_idx": { + "name": "workflow_snapshots_created_at_idx", + "columns": [ + { + "expression": "created_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workflow_execution_snapshots_workflow_id_workflow_id_fk": { + "name": "workflow_execution_snapshots_workflow_id_workflow_id_fk", + "tableFrom": "workflow_execution_snapshots", + "tableTo": "workflow", + "columnsFrom": [ + "workflow_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workflow_folder": { + "name": "workflow_folder", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "parent_id": { + "name": "parent_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "color": { + "name": "color", + "type": "text", + "primaryKey": false, + "notNull": false, + "default": "'#6B7280'" + }, + "is_expanded": { + "name": "is_expanded", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "sort_order": { + "name": "sort_order", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workflow_folder_user_idx": { + "name": "workflow_folder_user_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_folder_workspace_parent_idx": { + "name": "workflow_folder_workspace_parent_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "parent_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_folder_parent_sort_idx": { + "name": "workflow_folder_parent_sort_idx", + "columns": [ + { + "expression": "parent_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "sort_order", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workflow_folder_user_id_user_id_fk": { + "name": "workflow_folder_user_id_user_id_fk", + "tableFrom": "workflow_folder", + "tableTo": "user", + "columnsFrom": [ + "user_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workflow_folder_workspace_id_workspace_id_fk": { + "name": "workflow_folder_workspace_id_workspace_id_fk", + "tableFrom": "workflow_folder", + "tableTo": "workspace", + "columnsFrom": [ + "workspace_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workflow_mcp_server": { + "name": "workflow_mcp_server", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_by": { + "name": "created_by", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "description": { + "name": "description", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "is_public": { + "name": "is_public", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workflow_mcp_server_workspace_id_idx": { + "name": "workflow_mcp_server_workspace_id_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_mcp_server_created_by_idx": { + "name": "workflow_mcp_server_created_by_idx", + "columns": [ + { + "expression": "created_by", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workflow_mcp_server_workspace_id_workspace_id_fk": { + "name": "workflow_mcp_server_workspace_id_workspace_id_fk", + "tableFrom": "workflow_mcp_server", + "tableTo": "workspace", + "columnsFrom": [ + "workspace_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workflow_mcp_server_created_by_user_id_fk": { + "name": "workflow_mcp_server_created_by_user_id_fk", + "tableFrom": "workflow_mcp_server", + "tableTo": "user", + "columnsFrom": [ + "created_by" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workflow_mcp_tool": { + "name": "workflow_mcp_tool", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "server_id": { + "name": "server_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "tool_name": { + "name": "tool_name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "tool_description": { + "name": "tool_description", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "parameter_schema": { + "name": "parameter_schema", + "type": "json", + "primaryKey": false, + "notNull": true, + "default": "'{}'" + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workflow_mcp_tool_server_id_idx": { + "name": "workflow_mcp_tool_server_id_idx", + "columns": [ + { + "expression": "server_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_mcp_tool_workflow_id_idx": { + "name": "workflow_mcp_tool_workflow_id_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_mcp_tool_server_workflow_unique": { + "name": "workflow_mcp_tool_server_workflow_unique", + "columns": [ + { + "expression": "server_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workflow_mcp_tool_server_id_workflow_mcp_server_id_fk": { + "name": "workflow_mcp_tool_server_id_workflow_mcp_server_id_fk", + "tableFrom": "workflow_mcp_tool", + "tableTo": "workflow_mcp_server", + "columnsFrom": [ + "server_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workflow_mcp_tool_workflow_id_workflow_id_fk": { + "name": "workflow_mcp_tool_workflow_id_workflow_id_fk", + "tableFrom": "workflow_mcp_tool", + "tableTo": "workflow", + "columnsFrom": [ + "workflow_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workflow_schedule": { + "name": "workflow_schedule", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "deployment_version_id": { + "name": "deployment_version_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "block_id": { + "name": "block_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "cron_expression": { + "name": "cron_expression", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "next_run_at": { + "name": "next_run_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "last_ran_at": { + "name": "last_ran_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "last_queued_at": { + "name": "last_queued_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "trigger_type": { + "name": "trigger_type", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "timezone": { + "name": "timezone", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'UTC'" + }, + "failed_count": { + "name": "failed_count", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "status": { + "name": "status", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'active'" + }, + "last_failed_at": { + "name": "last_failed_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workflow_schedule_workflow_block_deployment_unique": { + "name": "workflow_schedule_workflow_block_deployment_unique", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "block_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "deployment_version_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_schedule_workflow_deployment_idx": { + "name": "workflow_schedule_workflow_deployment_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "deployment_version_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workflow_schedule_workflow_id_workflow_id_fk": { + "name": "workflow_schedule_workflow_id_workflow_id_fk", + "tableFrom": "workflow_schedule", + "tableTo": "workflow", + "columnsFrom": [ + "workflow_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workflow_schedule_deployment_version_id_workflow_deployment_version_id_fk": { + "name": "workflow_schedule_deployment_version_id_workflow_deployment_version_id_fk", + "tableFrom": "workflow_schedule", + "tableTo": "workflow_deployment_version", + "columnsFrom": [ + "deployment_version_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workflow_subflows": { + "name": "workflow_subflows", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "type": { + "name": "type", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "config": { + "name": "config", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'{}'" + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workflow_subflows_workflow_id_idx": { + "name": "workflow_subflows_workflow_id_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_subflows_workflow_type_idx": { + "name": "workflow_subflows_workflow_type_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "type", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workflow_subflows_workflow_id_workflow_id_fk": { + "name": "workflow_subflows_workflow_id_workflow_id_fk", + "tableFrom": "workflow_subflows", + "tableTo": "workflow", + "columnsFrom": [ + "workflow_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workspace": { + "name": "workspace", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "owner_id": { + "name": "owner_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "billed_account_user_id": { + "name": "billed_account_user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "allow_personal_api_keys": { + "name": "allow_personal_api_keys", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": {}, + "foreignKeys": { + "workspace_owner_id_user_id_fk": { + "name": "workspace_owner_id_user_id_fk", + "tableFrom": "workspace", + "tableTo": "user", + "columnsFrom": [ + "owner_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workspace_billed_account_user_id_user_id_fk": { + "name": "workspace_billed_account_user_id_user_id_fk", + "tableFrom": "workspace", + "tableTo": "user", + "columnsFrom": [ + "billed_account_user_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "no action", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workspace_byok_keys": { + "name": "workspace_byok_keys", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "provider_id": { + "name": "provider_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "encrypted_api_key": { + "name": "encrypted_api_key", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_by": { + "name": "created_by", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workspace_byok_provider_unique": { + "name": "workspace_byok_provider_unique", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "provider_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workspace_byok_workspace_idx": { + "name": "workspace_byok_workspace_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workspace_byok_keys_workspace_id_workspace_id_fk": { + "name": "workspace_byok_keys_workspace_id_workspace_id_fk", + "tableFrom": "workspace_byok_keys", + "tableTo": "workspace", + "columnsFrom": [ + "workspace_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workspace_byok_keys_created_by_user_id_fk": { + "name": "workspace_byok_keys_created_by_user_id_fk", + "tableFrom": "workspace_byok_keys", + "tableTo": "user", + "columnsFrom": [ + "created_by" + ], + "columnsTo": [ + "id" + ], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workspace_environment": { + "name": "workspace_environment", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "variables": { + "name": "variables", + "type": "json", + "primaryKey": false, + "notNull": true, + "default": "'{}'" + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workspace_environment_workspace_unique": { + "name": "workspace_environment_workspace_unique", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workspace_environment_workspace_id_workspace_id_fk": { + "name": "workspace_environment_workspace_id_workspace_id_fk", + "tableFrom": "workspace_environment", + "tableTo": "workspace", + "columnsFrom": [ + "workspace_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workspace_file": { + "name": "workspace_file", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "key": { + "name": "key", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "size": { + "name": "size", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "type": { + "name": "type", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "uploaded_by": { + "name": "uploaded_by", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "uploaded_at": { + "name": "uploaded_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workspace_file_workspace_id_idx": { + "name": "workspace_file_workspace_id_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workspace_file_key_idx": { + "name": "workspace_file_key_idx", + "columns": [ + { + "expression": "key", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workspace_file_workspace_id_workspace_id_fk": { + "name": "workspace_file_workspace_id_workspace_id_fk", + "tableFrom": "workspace_file", + "tableTo": "workspace", + "columnsFrom": [ + "workspace_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workspace_file_uploaded_by_user_id_fk": { + "name": "workspace_file_uploaded_by_user_id_fk", + "tableFrom": "workspace_file", + "tableTo": "user", + "columnsFrom": [ + "uploaded_by" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "workspace_file_key_unique": { + "name": "workspace_file_key_unique", + "nullsNotDistinct": false, + "columns": [ + "key" + ] + } + }, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workspace_files": { + "name": "workspace_files", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "key": { + "name": "key", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "context": { + "name": "context", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "original_name": { + "name": "original_name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "content_type": { + "name": "content_type", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "size": { + "name": "size", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "uploaded_at": { + "name": "uploaded_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workspace_files_key_idx": { + "name": "workspace_files_key_idx", + "columns": [ + { + "expression": "key", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workspace_files_user_id_idx": { + "name": "workspace_files_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workspace_files_workspace_id_idx": { + "name": "workspace_files_workspace_id_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workspace_files_context_idx": { + "name": "workspace_files_context_idx", + "columns": [ + { + "expression": "context", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workspace_files_user_id_user_id_fk": { + "name": "workspace_files_user_id_user_id_fk", + "tableFrom": "workspace_files", + "tableTo": "user", + "columnsFrom": [ + "user_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workspace_files_workspace_id_workspace_id_fk": { + "name": "workspace_files_workspace_id_workspace_id_fk", + "tableFrom": "workspace_files", + "tableTo": "workspace", + "columnsFrom": [ + "workspace_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "workspace_files_key_unique": { + "name": "workspace_files_key_unique", + "nullsNotDistinct": false, + "columns": [ + "key" + ] + } + }, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workspace_invitation": { + "name": "workspace_invitation", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "email": { + "name": "email", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "inviter_id": { + "name": "inviter_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "role": { + "name": "role", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'member'" + }, + "status": { + "name": "status", + "type": "workspace_invitation_status", + "typeSchema": "public", + "primaryKey": false, + "notNull": true, + "default": "'pending'" + }, + "token": { + "name": "token", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "permissions": { + "name": "permissions", + "type": "permission_type", + "typeSchema": "public", + "primaryKey": false, + "notNull": true, + "default": "'admin'" + }, + "org_invitation_id": { + "name": "org_invitation_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "expires_at": { + "name": "expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": {}, + "foreignKeys": { + "workspace_invitation_workspace_id_workspace_id_fk": { + "name": "workspace_invitation_workspace_id_workspace_id_fk", + "tableFrom": "workspace_invitation", + "tableTo": "workspace", + "columnsFrom": [ + "workspace_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workspace_invitation_inviter_id_user_id_fk": { + "name": "workspace_invitation_inviter_id_user_id_fk", + "tableFrom": "workspace_invitation", + "tableTo": "user", + "columnsFrom": [ + "inviter_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "workspace_invitation_token_unique": { + "name": "workspace_invitation_token_unique", + "nullsNotDistinct": false, + "columns": [ + "token" + ] + } + }, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workspace_notification_delivery": { + "name": "workspace_notification_delivery", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "subscription_id": { + "name": "subscription_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "execution_id": { + "name": "execution_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "status": { + "name": "status", + "type": "notification_delivery_status", + "typeSchema": "public", + "primaryKey": false, + "notNull": true, + "default": "'pending'" + }, + "attempts": { + "name": "attempts", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "last_attempt_at": { + "name": "last_attempt_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "next_attempt_at": { + "name": "next_attempt_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "response_status": { + "name": "response_status", + "type": "integer", + "primaryKey": false, + "notNull": false + }, + "response_body": { + "name": "response_body", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "error_message": { + "name": "error_message", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workspace_notification_delivery_subscription_id_idx": { + "name": "workspace_notification_delivery_subscription_id_idx", + "columns": [ + { + "expression": "subscription_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workspace_notification_delivery_execution_id_idx": { + "name": "workspace_notification_delivery_execution_id_idx", + "columns": [ + { + "expression": "execution_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workspace_notification_delivery_status_idx": { + "name": "workspace_notification_delivery_status_idx", + "columns": [ + { + "expression": "status", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workspace_notification_delivery_next_attempt_idx": { + "name": "workspace_notification_delivery_next_attempt_idx", + "columns": [ + { + "expression": "next_attempt_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workspace_notification_delivery_subscription_id_workspace_notification_subscription_id_fk": { + "name": "workspace_notification_delivery_subscription_id_workspace_notification_subscription_id_fk", + "tableFrom": "workspace_notification_delivery", + "tableTo": "workspace_notification_subscription", + "columnsFrom": [ + "subscription_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workspace_notification_delivery_workflow_id_workflow_id_fk": { + "name": "workspace_notification_delivery_workflow_id_workflow_id_fk", + "tableFrom": "workspace_notification_delivery", + "tableTo": "workflow", + "columnsFrom": [ + "workflow_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workspace_notification_subscription": { + "name": "workspace_notification_subscription", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "notification_type": { + "name": "notification_type", + "type": "notification_type", + "typeSchema": "public", + "primaryKey": false, + "notNull": true + }, + "workflow_ids": { + "name": "workflow_ids", + "type": "text[]", + "primaryKey": false, + "notNull": true, + "default": "'{}'::text[]" + }, + "all_workflows": { + "name": "all_workflows", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "level_filter": { + "name": "level_filter", + "type": "text[]", + "primaryKey": false, + "notNull": true, + "default": "ARRAY['info', 'error']::text[]" + }, + "trigger_filter": { + "name": "trigger_filter", + "type": "text[]", + "primaryKey": false, + "notNull": true, + "default": "ARRAY['api', 'webhook', 'schedule', 'manual', 'chat']::text[]" + }, + "include_final_output": { + "name": "include_final_output", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "include_trace_spans": { + "name": "include_trace_spans", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "include_rate_limits": { + "name": "include_rate_limits", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "include_usage_data": { + "name": "include_usage_data", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "webhook_config": { + "name": "webhook_config", + "type": "jsonb", + "primaryKey": false, + "notNull": false + }, + "email_recipients": { + "name": "email_recipients", + "type": "text[]", + "primaryKey": false, + "notNull": false + }, + "slack_config": { + "name": "slack_config", + "type": "jsonb", + "primaryKey": false, + "notNull": false + }, + "alert_config": { + "name": "alert_config", + "type": "jsonb", + "primaryKey": false, + "notNull": false + }, + "last_alert_at": { + "name": "last_alert_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "active": { + "name": "active", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "created_by": { + "name": "created_by", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workspace_notification_workspace_id_idx": { + "name": "workspace_notification_workspace_id_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workspace_notification_active_idx": { + "name": "workspace_notification_active_idx", + "columns": [ + { + "expression": "active", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workspace_notification_type_idx": { + "name": "workspace_notification_type_idx", + "columns": [ + { + "expression": "notification_type", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workspace_notification_subscription_workspace_id_workspace_id_fk": { + "name": "workspace_notification_subscription_workspace_id_workspace_id_fk", + "tableFrom": "workspace_notification_subscription", + "tableTo": "workspace", + "columnsFrom": [ + "workspace_id" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workspace_notification_subscription_created_by_user_id_fk": { + "name": "workspace_notification_subscription_created_by_user_id_fk", + "tableFrom": "workspace_notification_subscription", + "tableTo": "user", + "columnsFrom": [ + "created_by" + ], + "columnsTo": [ + "id" + ], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + } + }, + "enums": { + "public.a2a_task_status": { + "name": "a2a_task_status", + "schema": "public", + "values": [ + "submitted", + "working", + "input-required", + "completed", + "failed", + "canceled", + "rejected", + "auth-required", + "unknown" + ] + }, + "public.billing_blocked_reason": { + "name": "billing_blocked_reason", + "schema": "public", + "values": [ + "payment_failed", + "dispute" + ] + }, + "public.credential_set_invitation_status": { + "name": "credential_set_invitation_status", + "schema": "public", + "values": [ + "pending", + "accepted", + "expired", + "cancelled" + ] + }, + "public.credential_set_member_status": { + "name": "credential_set_member_status", + "schema": "public", + "values": [ + "active", + "pending", + "revoked" + ] + }, + "public.notification_delivery_status": { + "name": "notification_delivery_status", + "schema": "public", + "values": [ + "pending", + "in_progress", + "success", + "failed" + ] + }, + "public.notification_type": { + "name": "notification_type", + "schema": "public", + "values": [ + "webhook", + "email", + "slack" + ] + }, + "public.permission_type": { + "name": "permission_type", + "schema": "public", + "values": [ + "admin", + "write", + "read" + ] + }, + "public.template_creator_type": { + "name": "template_creator_type", + "schema": "public", + "values": [ + "user", + "organization" + ] + }, + "public.template_status": { + "name": "template_status", + "schema": "public", + "values": [ + "pending", + "approved", + "rejected" + ] + }, + "public.usage_log_category": { + "name": "usage_log_category", + "schema": "public", + "values": [ + "model", + "fixed" + ] + }, + "public.usage_log_source": { + "name": "usage_log_source", + "schema": "public", + "values": [ + "workflow", + "wand", + "copilot", + "mcp_copilot" + ] + }, + "public.workspace_invitation_status": { + "name": "workspace_invitation_status", + "schema": "public", + "values": [ + "pending", + "accepted", + "rejected", + "cancelled" + ] + } + }, + "schemas": {}, + "sequences": {}, + "roles": {}, + "policies": {}, + "views": {}, + "_meta": { + "columns": {}, + "schemas": {}, + "tables": {} + } +} \ No newline at end of file diff --git a/packages/db/migrations/meta/_journal.json b/packages/db/migrations/meta/_journal.json index 9a48393199..d920a091da 100644 --- a/packages/db/migrations/meta/_journal.json +++ b/packages/db/migrations/meta/_journal.json @@ -1065,6 +1065,13 @@ "when": 1770336289511, "tag": "0152_parallel_frog_thor", "breakpoints": true + }, + { + "idx": 153, + "version": "7", + "when": 1770410282842, + "tag": "0153_complete_arclight", + "breakpoints": true } ] -} +} \ No newline at end of file diff --git a/packages/db/schema.ts b/packages/db/schema.ts index 6fee4786a3..d145c57962 100644 --- a/packages/db/schema.ts +++ b/packages/db/schema.ts @@ -715,6 +715,10 @@ export const userStats = pgTable('user_stats', { lastPeriodCopilotCost: decimal('last_period_copilot_cost').default('0'), totalCopilotTokens: integer('total_copilot_tokens').notNull().default(0), totalCopilotCalls: integer('total_copilot_calls').notNull().default(0), + // MCP Copilot usage tracking + totalMcpCopilotCalls: integer('total_mcp_copilot_calls').notNull().default(0), + totalMcpCopilotCost: decimal('total_mcp_copilot_cost').notNull().default('0'), + currentPeriodMcpCopilotCost: decimal('current_period_mcp_copilot_cost').notNull().default('0'), // Storage tracking (for free/pro users) storageUsedBytes: bigint('storage_used_bytes', { mode: 'number' }).notNull().default(0), lastActive: timestamp('last_active').notNull().defaultNow(), @@ -1968,7 +1972,12 @@ export const a2aPushNotificationConfig = pgTable( ) export const usageLogCategoryEnum = pgEnum('usage_log_category', ['model', 'fixed']) -export const usageLogSourceEnum = pgEnum('usage_log_source', ['workflow', 'wand', 'copilot']) +export const usageLogSourceEnum = pgEnum('usage_log_source', [ + 'workflow', + 'wand', + 'copilot', + 'mcp_copilot', +]) export const usageLog = pgTable( 'usage_log', From 6cb112e03f8970cf6ca8bff3758b0e7afd23491a Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Fri, 6 Feb 2026 12:38:31 -0800 Subject: [PATCH 37/72] Fix lint --- apps/sim/app/api/mcp/copilot/route.ts | 19 +- .../db/migrations/meta/0153_snapshot.json | 977 +++++------------- packages/db/migrations/meta/_journal.json | 2 +- 3 files changed, 253 insertions(+), 745 deletions(-) diff --git a/apps/sim/app/api/mcp/copilot/route.ts b/apps/sim/app/api/mcp/copilot/route.ts index caf095110f..aa5fb7de40 100644 --- a/apps/sim/app/api/mcp/copilot/route.ts +++ b/apps/sim/app/api/mcp/copilot/route.ts @@ -105,17 +105,20 @@ export async function POST(request: NextRequest) { const apiKeyHeader = request.headers.get('x-api-key') if (!apiKeyHeader) { return NextResponse.json( - createError(0, -32000, 'API key required. Set the x-api-key header with a valid Sim API key.'), + createError( + 0, + -32000, + 'API key required. Set the x-api-key header with a valid Sim API key.' + ), { status: 401 } ) } const authResult = await authenticateApiKeyFromHeader(apiKeyHeader) if (!authResult.success || !authResult.userId) { - return NextResponse.json( - createError(0, -32000, authResult.error || 'Invalid API key'), - { status: 401 } - ) + return NextResponse.json(createError(0, -32000, authResult.error || 'Invalid API key'), { + status: 401, + }) } // Fire-and-forget last-used update @@ -143,7 +146,11 @@ export async function POST(request: NextRequest) { const usageCheck = await checkServerSideUsageLimits(userId) if (usageCheck.isExceeded) { return NextResponse.json( - createError(id, -32000, `Usage limit exceeded: ${usageCheck.message || 'Upgrade your plan.'}`), + createError( + id, + -32000, + `Usage limit exceeded: ${usageCheck.message || 'Upgrade your plan.'}` + ), { status: 402 } ) } diff --git a/packages/db/migrations/meta/0153_snapshot.json b/packages/db/migrations/meta/0153_snapshot.json index 544b33eee7..82e45d5ee7 100644 --- a/packages/db/migrations/meta/0153_snapshot.json +++ b/packages/db/migrations/meta/0153_snapshot.json @@ -180,12 +180,8 @@ "name": "a2a_agent_workspace_id_workspace_id_fk", "tableFrom": "a2a_agent", "tableTo": "workspace", - "columnsFrom": [ - "workspace_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -193,12 +189,8 @@ "name": "a2a_agent_workflow_id_workflow_id_fk", "tableFrom": "a2a_agent", "tableTo": "workflow", - "columnsFrom": [ - "workflow_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -206,12 +198,8 @@ "name": "a2a_agent_created_by_user_id_fk", "tableFrom": "a2a_agent", "tableTo": "user", - "columnsFrom": [ - "created_by" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["created_by"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -322,12 +310,8 @@ "name": "a2a_push_notification_config_task_id_a2a_task_id_fk", "tableFrom": "a2a_push_notification_config", "tableTo": "a2a_task", - "columnsFrom": [ - "task_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["task_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -498,12 +482,8 @@ "name": "a2a_task_agent_id_a2a_agent_id_fk", "tableFrom": "a2a_task", "tableTo": "a2a_agent", - "columnsFrom": [ - "agent_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["agent_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -661,12 +641,8 @@ "name": "account_user_id_user_id_fk", "tableFrom": "account", "tableTo": "user", - "columnsFrom": [ - "user_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["user_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -800,12 +776,8 @@ "name": "api_key_user_id_user_id_fk", "tableFrom": "api_key", "tableTo": "user", - "columnsFrom": [ - "user_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["user_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -813,12 +785,8 @@ "name": "api_key_workspace_id_workspace_id_fk", "tableFrom": "api_key", "tableTo": "workspace", - "columnsFrom": [ - "workspace_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -826,12 +794,8 @@ "name": "api_key_created_by_user_id_fk", "tableFrom": "api_key", "tableTo": "user", - "columnsFrom": [ - "created_by" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["created_by"], + "columnsTo": ["id"], "onDelete": "set null", "onUpdate": "no action" } @@ -841,9 +805,7 @@ "api_key_key_unique": { "name": "api_key_key_unique", "nullsNotDistinct": false, - "columns": [ - "key" - ] + "columns": ["key"] } }, "policies": {}, @@ -1119,12 +1081,8 @@ "name": "chat_workflow_id_workflow_id_fk", "tableFrom": "chat", "tableTo": "workflow", - "columnsFrom": [ - "workflow_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -1132,12 +1090,8 @@ "name": "chat_user_id_user_id_fk", "tableFrom": "chat", "tableTo": "user", - "columnsFrom": [ - "user_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["user_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -1318,12 +1272,8 @@ "name": "copilot_chats_user_id_user_id_fk", "tableFrom": "copilot_chats", "tableTo": "user", - "columnsFrom": [ - "user_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["user_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -1331,12 +1281,8 @@ "name": "copilot_chats_workflow_id_workflow_id_fk", "tableFrom": "copilot_chats", "tableTo": "workflow", - "columnsFrom": [ - "workflow_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -1503,12 +1449,8 @@ "name": "copilot_feedback_user_id_user_id_fk", "tableFrom": "copilot_feedback", "tableTo": "user", - "columnsFrom": [ - "user_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["user_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -1516,12 +1458,8 @@ "name": "copilot_feedback_chat_id_copilot_chats_id_fk", "tableFrom": "copilot_feedback", "tableTo": "copilot_chats", - "columnsFrom": [ - "chat_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["chat_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -1660,12 +1598,8 @@ "name": "credential_set_organization_id_organization_id_fk", "tableFrom": "credential_set", "tableTo": "organization", - "columnsFrom": [ - "organization_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["organization_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -1673,12 +1607,8 @@ "name": "credential_set_created_by_user_id_fk", "tableFrom": "credential_set", "tableTo": "user", - "columnsFrom": [ - "created_by" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["created_by"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -1824,12 +1754,8 @@ "name": "credential_set_invitation_credential_set_id_credential_set_id_fk", "tableFrom": "credential_set_invitation", "tableTo": "credential_set", - "columnsFrom": [ - "credential_set_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["credential_set_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -1837,12 +1763,8 @@ "name": "credential_set_invitation_invited_by_user_id_fk", "tableFrom": "credential_set_invitation", "tableTo": "user", - "columnsFrom": [ - "invited_by" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["invited_by"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -1850,12 +1772,8 @@ "name": "credential_set_invitation_accepted_by_user_id_user_id_fk", "tableFrom": "credential_set_invitation", "tableTo": "user", - "columnsFrom": [ - "accepted_by_user_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["accepted_by_user_id"], + "columnsTo": ["id"], "onDelete": "set null", "onUpdate": "no action" } @@ -1865,9 +1783,7 @@ "credential_set_invitation_token_unique": { "name": "credential_set_invitation_token_unique", "nullsNotDistinct": false, - "columns": [ - "token" - ] + "columns": ["token"] } }, "policies": {}, @@ -2004,12 +1920,8 @@ "name": "credential_set_member_credential_set_id_credential_set_id_fk", "tableFrom": "credential_set_member", "tableTo": "credential_set", - "columnsFrom": [ - "credential_set_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["credential_set_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -2017,12 +1929,8 @@ "name": "credential_set_member_user_id_user_id_fk", "tableFrom": "credential_set_member", "tableTo": "user", - "columnsFrom": [ - "user_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["user_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -2030,12 +1938,8 @@ "name": "credential_set_member_invited_by_user_id_fk", "tableFrom": "credential_set_member", "tableTo": "user", - "columnsFrom": [ - "invited_by" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["invited_by"], + "columnsTo": ["id"], "onDelete": "set null", "onUpdate": "no action" } @@ -2144,12 +2048,8 @@ "name": "custom_tools_workspace_id_workspace_id_fk", "tableFrom": "custom_tools", "tableTo": "workspace", - "columnsFrom": [ - "workspace_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -2157,12 +2057,8 @@ "name": "custom_tools_user_id_user_id_fk", "tableFrom": "custom_tools", "tableTo": "user", - "columnsFrom": [ - "user_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["user_id"], + "columnsTo": ["id"], "onDelete": "set null", "onUpdate": "no action" } @@ -2935,12 +2831,8 @@ "name": "document_knowledge_base_id_knowledge_base_id_fk", "tableFrom": "document", "tableTo": "knowledge_base", - "columnsFrom": [ - "knowledge_base_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["knowledge_base_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -3572,12 +3464,8 @@ "name": "embedding_knowledge_base_id_knowledge_base_id_fk", "tableFrom": "embedding", "tableTo": "knowledge_base", - "columnsFrom": [ - "knowledge_base_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["knowledge_base_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -3585,12 +3473,8 @@ "name": "embedding_document_id_document_id_fk", "tableFrom": "embedding", "tableTo": "document", - "columnsFrom": [ - "document_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["document_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -3642,12 +3526,8 @@ "name": "environment_user_id_user_id_fk", "tableFrom": "environment", "tableTo": "user", - "columnsFrom": [ - "user_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["user_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -3657,9 +3537,7 @@ "environment_user_id_unique": { "name": "environment_user_id_unique", "nullsNotDistinct": false, - "columns": [ - "user_id" - ] + "columns": ["user_id"] } }, "policies": {}, @@ -3814,12 +3692,8 @@ "name": "form_workflow_id_workflow_id_fk", "tableFrom": "form", "tableTo": "workflow", - "columnsFrom": [ - "workflow_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -3827,12 +3701,8 @@ "name": "form_user_id_user_id_fk", "tableFrom": "form", "tableTo": "user", - "columnsFrom": [ - "user_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["user_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -3982,12 +3852,8 @@ "name": "invitation_inviter_id_user_id_fk", "tableFrom": "invitation", "tableTo": "user", - "columnsFrom": [ - "inviter_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["inviter_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -3995,12 +3861,8 @@ "name": "invitation_organization_id_organization_id_fk", "tableFrom": "invitation", "tableTo": "organization", - "columnsFrom": [ - "organization_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["organization_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -4167,12 +4029,8 @@ "name": "knowledge_base_user_id_user_id_fk", "tableFrom": "knowledge_base", "tableTo": "user", - "columnsFrom": [ - "user_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["user_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -4180,12 +4038,8 @@ "name": "knowledge_base_workspace_id_workspace_id_fk", "tableFrom": "knowledge_base", "tableTo": "workspace", - "columnsFrom": [ - "workspace_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], "onDelete": "no action", "onUpdate": "no action" } @@ -4310,12 +4164,8 @@ "name": "knowledge_base_tag_definitions_knowledge_base_id_knowledge_base_id_fk", "tableFrom": "knowledge_base_tag_definitions", "tableTo": "knowledge_base", - "columnsFrom": [ - "knowledge_base_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["knowledge_base_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -4522,12 +4372,8 @@ "name": "mcp_servers_workspace_id_workspace_id_fk", "tableFrom": "mcp_servers", "tableTo": "workspace", - "columnsFrom": [ - "workspace_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -4535,12 +4381,8 @@ "name": "mcp_servers_created_by_user_id_fk", "tableFrom": "mcp_servers", "tableTo": "user", - "columnsFrom": [ - "created_by" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["created_by"], + "columnsTo": ["id"], "onDelete": "set null", "onUpdate": "no action" } @@ -4624,12 +4466,8 @@ "name": "member_user_id_user_id_fk", "tableFrom": "member", "tableTo": "user", - "columnsFrom": [ - "user_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["user_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -4637,12 +4475,8 @@ "name": "member_organization_id_organization_id_fk", "tableFrom": "member", "tableTo": "organization", - "columnsFrom": [ - "organization_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["organization_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -4760,12 +4594,8 @@ "name": "memory_workspace_id_workspace_id_fk", "tableFrom": "memory", "tableTo": "workspace", - "columnsFrom": [ - "workspace_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -4994,12 +4824,8 @@ "name": "paused_executions_workflow_id_workflow_id_fk", "tableFrom": "paused_executions", "tableTo": "workflow", - "columnsFrom": [ - "workflow_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -5147,12 +4973,8 @@ "name": "permission_group_organization_id_organization_id_fk", "tableFrom": "permission_group", "tableTo": "organization", - "columnsFrom": [ - "organization_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["organization_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -5160,12 +4982,8 @@ "name": "permission_group_created_by_user_id_fk", "tableFrom": "permission_group", "tableTo": "user", - "columnsFrom": [ - "created_by" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["created_by"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -5249,12 +5067,8 @@ "name": "permission_group_member_permission_group_id_permission_group_id_fk", "tableFrom": "permission_group_member", "tableTo": "permission_group", - "columnsFrom": [ - "permission_group_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["permission_group_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -5262,12 +5076,8 @@ "name": "permission_group_member_user_id_user_id_fk", "tableFrom": "permission_group_member", "tableTo": "user", - "columnsFrom": [ - "user_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["user_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -5275,12 +5085,8 @@ "name": "permission_group_member_assigned_by_user_id_fk", "tableFrom": "permission_group_member", "tableTo": "user", - "columnsFrom": [ - "assigned_by" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["assigned_by"], + "columnsTo": ["id"], "onDelete": "set null", "onUpdate": "no action" } @@ -5486,12 +5292,8 @@ "name": "permissions_user_id_user_id_fk", "tableFrom": "permissions", "tableTo": "user", - "columnsFrom": [ - "user_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["user_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -5662,12 +5464,8 @@ "name": "resume_queue_paused_execution_id_paused_executions_id_fk", "tableFrom": "resume_queue", "tableTo": "paused_executions", - "columnsFrom": [ - "paused_execution_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["paused_execution_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -5774,12 +5572,8 @@ "name": "session_user_id_user_id_fk", "tableFrom": "session", "tableTo": "user", - "columnsFrom": [ - "user_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["user_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -5787,12 +5581,8 @@ "name": "session_active_organization_id_organization_id_fk", "tableFrom": "session", "tableTo": "organization", - "columnsFrom": [ - "active_organization_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["active_organization_id"], + "columnsTo": ["id"], "onDelete": "set null", "onUpdate": "no action" } @@ -5802,9 +5592,7 @@ "session_token_unique": { "name": "session_token_unique", "nullsNotDistinct": false, - "columns": [ - "token" - ] + "columns": ["token"] } }, "policies": {}, @@ -5925,12 +5713,8 @@ "name": "settings_user_id_user_id_fk", "tableFrom": "settings", "tableTo": "user", - "columnsFrom": [ - "user_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["user_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -5940,9 +5724,7 @@ "settings_user_id_unique": { "name": "settings_user_id_unique", "nullsNotDistinct": false, - "columns": [ - "user_id" - ] + "columns": ["user_id"] } }, "policies": {}, @@ -6047,12 +5829,8 @@ "name": "skill_workspace_id_workspace_id_fk", "tableFrom": "skill", "tableTo": "workspace", - "columnsFrom": [ - "workspace_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -6060,12 +5838,8 @@ "name": "skill_user_id_user_id_fk", "tableFrom": "skill", "tableTo": "user", - "columnsFrom": [ - "user_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["user_id"], + "columnsTo": ["id"], "onDelete": "set null", "onUpdate": "no action" } @@ -6196,12 +5970,8 @@ "name": "sso_provider_user_id_user_id_fk", "tableFrom": "sso_provider", "tableTo": "user", - "columnsFrom": [ - "user_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["user_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -6209,12 +5979,8 @@ "name": "sso_provider_organization_id_organization_id_fk", "tableFrom": "sso_provider", "tableTo": "organization", - "columnsFrom": [ - "organization_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["organization_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -6470,12 +6236,8 @@ "name": "template_creators_created_by_user_id_fk", "tableFrom": "template_creators", "tableTo": "user", - "columnsFrom": [ - "created_by" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["created_by"], + "columnsTo": ["id"], "onDelete": "set null", "onUpdate": "no action" } @@ -6659,12 +6421,8 @@ "name": "template_stars_user_id_user_id_fk", "tableFrom": "template_stars", "tableTo": "user", - "columnsFrom": [ - "user_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["user_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -6672,12 +6430,8 @@ "name": "template_stars_template_id_templates_id_fk", "tableFrom": "template_stars", "tableTo": "templates", - "columnsFrom": [ - "template_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["template_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -6924,12 +6678,8 @@ "name": "templates_workflow_id_workflow_id_fk", "tableFrom": "templates", "tableTo": "workflow", - "columnsFrom": [ - "workflow_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], "onDelete": "set null", "onUpdate": "no action" }, @@ -6937,12 +6687,8 @@ "name": "templates_creator_id_template_creators_id_fk", "tableFrom": "templates", "tableTo": "template_creators", - "columnsFrom": [ - "creator_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["creator_id"], + "columnsTo": ["id"], "onDelete": "set null", "onUpdate": "no action" } @@ -7100,12 +6846,8 @@ "name": "usage_log_user_id_user_id_fk", "tableFrom": "usage_log", "tableTo": "user", - "columnsFrom": [ - "user_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["user_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -7113,12 +6855,8 @@ "name": "usage_log_workspace_id_workspace_id_fk", "tableFrom": "usage_log", "tableTo": "workspace", - "columnsFrom": [ - "workspace_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], "onDelete": "set null", "onUpdate": "no action" }, @@ -7126,12 +6864,8 @@ "name": "usage_log_workflow_id_workflow_id_fk", "tableFrom": "usage_log", "tableTo": "workflow", - "columnsFrom": [ - "workflow_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], "onDelete": "set null", "onUpdate": "no action" } @@ -7209,9 +6943,7 @@ "user_email_unique": { "name": "user_email_unique", "nullsNotDistinct": false, - "columns": [ - "email" - ] + "columns": ["email"] } }, "policies": {}, @@ -7437,12 +7169,8 @@ "name": "user_stats_user_id_user_id_fk", "tableFrom": "user_stats", "tableTo": "user", - "columnsFrom": [ - "user_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["user_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -7452,9 +7180,7 @@ "user_stats_user_id_unique": { "name": "user_stats_user_id_unique", "nullsNotDistinct": false, - "columns": [ - "user_id" - ] + "columns": ["user_id"] } }, "policies": {}, @@ -7586,9 +7312,7 @@ "waitlist_email_unique": { "name": "waitlist_email_unique", "nullsNotDistinct": false, - "columns": [ - "email" - ] + "columns": ["email"] } }, "policies": {}, @@ -7767,12 +7491,8 @@ "name": "webhook_workflow_id_workflow_id_fk", "tableFrom": "webhook", "tableTo": "workflow", - "columnsFrom": [ - "workflow_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -7780,12 +7500,8 @@ "name": "webhook_deployment_version_id_workflow_deployment_version_id_fk", "tableFrom": "webhook", "tableTo": "workflow_deployment_version", - "columnsFrom": [ - "deployment_version_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["deployment_version_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -7793,12 +7509,8 @@ "name": "webhook_credential_set_id_credential_set_id_fk", "tableFrom": "webhook", "tableTo": "credential_set", - "columnsFrom": [ - "credential_set_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["credential_set_id"], + "columnsTo": ["id"], "onDelete": "set null", "onUpdate": "no action" } @@ -7994,12 +7706,8 @@ "name": "workflow_user_id_user_id_fk", "tableFrom": "workflow", "tableTo": "user", - "columnsFrom": [ - "user_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["user_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -8007,12 +7715,8 @@ "name": "workflow_workspace_id_workspace_id_fk", "tableFrom": "workflow", "tableTo": "workspace", - "columnsFrom": [ - "workspace_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -8020,12 +7724,8 @@ "name": "workflow_folder_id_workflow_folder_id_fk", "tableFrom": "workflow", "tableTo": "workflow_folder", - "columnsFrom": [ - "folder_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["folder_id"], + "columnsTo": ["id"], "onDelete": "set null", "onUpdate": "no action" } @@ -8198,12 +7898,8 @@ "name": "workflow_blocks_workflow_id_workflow_id_fk", "tableFrom": "workflow_blocks", "tableTo": "workflow", - "columnsFrom": [ - "workflow_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -8415,12 +8111,8 @@ "name": "workflow_checkpoints_user_id_user_id_fk", "tableFrom": "workflow_checkpoints", "tableTo": "user", - "columnsFrom": [ - "user_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["user_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -8428,12 +8120,8 @@ "name": "workflow_checkpoints_workflow_id_workflow_id_fk", "tableFrom": "workflow_checkpoints", "tableTo": "workflow", - "columnsFrom": [ - "workflow_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -8441,12 +8129,8 @@ "name": "workflow_checkpoints_chat_id_copilot_chats_id_fk", "tableFrom": "workflow_checkpoints", "tableTo": "copilot_chats", - "columnsFrom": [ - "chat_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["chat_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -8582,12 +8266,8 @@ "name": "workflow_deployment_version_workflow_id_workflow_id_fk", "tableFrom": "workflow_deployment_version", "tableTo": "workflow", - "columnsFrom": [ - "workflow_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -8710,12 +8390,8 @@ "name": "workflow_edges_workflow_id_workflow_id_fk", "tableFrom": "workflow_edges", "tableTo": "workflow", - "columnsFrom": [ - "workflow_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -8723,12 +8399,8 @@ "name": "workflow_edges_source_block_id_workflow_blocks_id_fk", "tableFrom": "workflow_edges", "tableTo": "workflow_blocks", - "columnsFrom": [ - "source_block_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["source_block_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -8736,12 +8408,8 @@ "name": "workflow_edges_target_block_id_workflow_blocks_id_fk", "tableFrom": "workflow_edges", "tableTo": "workflow_blocks", - "columnsFrom": [ - "target_block_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["target_block_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -9010,12 +8678,8 @@ "name": "workflow_execution_logs_workflow_id_workflow_id_fk", "tableFrom": "workflow_execution_logs", "tableTo": "workflow", - "columnsFrom": [ - "workflow_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], "onDelete": "set null", "onUpdate": "no action" }, @@ -9023,12 +8687,8 @@ "name": "workflow_execution_logs_workspace_id_workspace_id_fk", "tableFrom": "workflow_execution_logs", "tableTo": "workspace", - "columnsFrom": [ - "workspace_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -9036,12 +8696,8 @@ "name": "workflow_execution_logs_state_snapshot_id_workflow_execution_snapshots_id_fk", "tableFrom": "workflow_execution_logs", "tableTo": "workflow_execution_snapshots", - "columnsFrom": [ - "state_snapshot_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["state_snapshot_id"], + "columnsTo": ["id"], "onDelete": "no action", "onUpdate": "no action" }, @@ -9049,12 +8705,8 @@ "name": "workflow_execution_logs_deployment_version_id_workflow_deployment_version_id_fk", "tableFrom": "workflow_execution_logs", "tableTo": "workflow_deployment_version", - "columnsFrom": [ - "deployment_version_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["deployment_version_id"], + "columnsTo": ["id"], "onDelete": "set null", "onUpdate": "no action" } @@ -9174,12 +8826,8 @@ "name": "workflow_execution_snapshots_workflow_id_workflow_id_fk", "tableFrom": "workflow_execution_snapshots", "tableTo": "workflow", - "columnsFrom": [ - "workflow_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], "onDelete": "set null", "onUpdate": "no action" } @@ -9324,12 +8972,8 @@ "name": "workflow_folder_user_id_user_id_fk", "tableFrom": "workflow_folder", "tableTo": "user", - "columnsFrom": [ - "user_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["user_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -9337,12 +8981,8 @@ "name": "workflow_folder_workspace_id_workspace_id_fk", "tableFrom": "workflow_folder", "tableTo": "workspace", - "columnsFrom": [ - "workspace_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -9446,12 +9086,8 @@ "name": "workflow_mcp_server_workspace_id_workspace_id_fk", "tableFrom": "workflow_mcp_server", "tableTo": "workspace", - "columnsFrom": [ - "workspace_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -9459,12 +9095,8 @@ "name": "workflow_mcp_server_created_by_user_id_fk", "tableFrom": "workflow_mcp_server", "tableTo": "user", - "columnsFrom": [ - "created_by" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["created_by"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -9589,12 +9221,8 @@ "name": "workflow_mcp_tool_server_id_workflow_mcp_server_id_fk", "tableFrom": "workflow_mcp_tool", "tableTo": "workflow_mcp_server", - "columnsFrom": [ - "server_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["server_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -9602,12 +9230,8 @@ "name": "workflow_mcp_tool_workflow_id_workflow_id_fk", "tableFrom": "workflow_mcp_tool", "tableTo": "workflow", - "columnsFrom": [ - "workflow_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -9773,12 +9397,8 @@ "name": "workflow_schedule_workflow_id_workflow_id_fk", "tableFrom": "workflow_schedule", "tableTo": "workflow", - "columnsFrom": [ - "workflow_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -9786,12 +9406,8 @@ "name": "workflow_schedule_deployment_version_id_workflow_deployment_version_id_fk", "tableFrom": "workflow_schedule", "tableTo": "workflow_deployment_version", - "columnsFrom": [ - "deployment_version_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["deployment_version_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -9889,12 +9505,8 @@ "name": "workflow_subflows_workflow_id_workflow_id_fk", "tableFrom": "workflow_subflows", "tableTo": "workflow", - "columnsFrom": [ - "workflow_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -9961,12 +9573,8 @@ "name": "workspace_owner_id_user_id_fk", "tableFrom": "workspace", "tableTo": "user", - "columnsFrom": [ - "owner_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["owner_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -9974,12 +9582,8 @@ "name": "workspace_billed_account_user_id_user_id_fk", "tableFrom": "workspace", "tableTo": "user", - "columnsFrom": [ - "billed_account_user_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["billed_account_user_id"], + "columnsTo": ["id"], "onDelete": "no action", "onUpdate": "no action" } @@ -10082,12 +9686,8 @@ "name": "workspace_byok_keys_workspace_id_workspace_id_fk", "tableFrom": "workspace_byok_keys", "tableTo": "workspace", - "columnsFrom": [ - "workspace_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -10095,12 +9695,8 @@ "name": "workspace_byok_keys_created_by_user_id_fk", "tableFrom": "workspace_byok_keys", "tableTo": "user", - "columnsFrom": [ - "created_by" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["created_by"], + "columnsTo": ["id"], "onDelete": "set null", "onUpdate": "no action" } @@ -10171,12 +9767,8 @@ "name": "workspace_environment_workspace_id_workspace_id_fk", "tableFrom": "workspace_environment", "tableTo": "workspace", - "columnsFrom": [ - "workspace_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -10278,12 +9870,8 @@ "name": "workspace_file_workspace_id_workspace_id_fk", "tableFrom": "workspace_file", "tableTo": "workspace", - "columnsFrom": [ - "workspace_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -10291,12 +9879,8 @@ "name": "workspace_file_uploaded_by_user_id_fk", "tableFrom": "workspace_file", "tableTo": "user", - "columnsFrom": [ - "uploaded_by" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["uploaded_by"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -10306,9 +9890,7 @@ "workspace_file_key_unique": { "name": "workspace_file_key_unique", "nullsNotDistinct": false, - "columns": [ - "key" - ] + "columns": ["key"] } }, "policies": {}, @@ -10442,12 +10024,8 @@ "name": "workspace_files_user_id_user_id_fk", "tableFrom": "workspace_files", "tableTo": "user", - "columnsFrom": [ - "user_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["user_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -10455,12 +10033,8 @@ "name": "workspace_files_workspace_id_workspace_id_fk", "tableFrom": "workspace_files", "tableTo": "workspace", - "columnsFrom": [ - "workspace_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -10470,9 +10044,7 @@ "workspace_files_key_unique": { "name": "workspace_files_key_unique", "nullsNotDistinct": false, - "columns": [ - "key" - ] + "columns": ["key"] } }, "policies": {}, @@ -10569,12 +10141,8 @@ "name": "workspace_invitation_workspace_id_workspace_id_fk", "tableFrom": "workspace_invitation", "tableTo": "workspace", - "columnsFrom": [ - "workspace_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -10582,12 +10150,8 @@ "name": "workspace_invitation_inviter_id_user_id_fk", "tableFrom": "workspace_invitation", "tableTo": "user", - "columnsFrom": [ - "inviter_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["inviter_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -10597,9 +10161,7 @@ "workspace_invitation_token_unique": { "name": "workspace_invitation_token_unique", "nullsNotDistinct": false, - "columns": [ - "token" - ] + "columns": ["token"] } }, "policies": {}, @@ -10761,12 +10323,8 @@ "name": "workspace_notification_delivery_subscription_id_workspace_notification_subscription_id_fk", "tableFrom": "workspace_notification_delivery", "tableTo": "workspace_notification_subscription", - "columnsFrom": [ - "subscription_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["subscription_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -10774,12 +10332,8 @@ "name": "workspace_notification_delivery_workflow_id_workflow_id_fk", "tableFrom": "workspace_notification_delivery", "tableTo": "workflow", - "columnsFrom": [ - "workflow_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -10979,12 +10533,8 @@ "name": "workspace_notification_subscription_workspace_id_workspace_id_fk", "tableFrom": "workspace_notification_subscription", "tableTo": "workspace", - "columnsFrom": [ - "workspace_id" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" }, @@ -10992,12 +10542,8 @@ "name": "workspace_notification_subscription_created_by_user_id_fk", "tableFrom": "workspace_notification_subscription", "tableTo": "user", - "columnsFrom": [ - "created_by" - ], - "columnsTo": [ - "id" - ], + "columnsFrom": ["created_by"], + "columnsTo": ["id"], "onDelete": "cascade", "onUpdate": "no action" } @@ -11028,102 +10574,57 @@ "public.billing_blocked_reason": { "name": "billing_blocked_reason", "schema": "public", - "values": [ - "payment_failed", - "dispute" - ] + "values": ["payment_failed", "dispute"] }, "public.credential_set_invitation_status": { "name": "credential_set_invitation_status", "schema": "public", - "values": [ - "pending", - "accepted", - "expired", - "cancelled" - ] + "values": ["pending", "accepted", "expired", "cancelled"] }, "public.credential_set_member_status": { "name": "credential_set_member_status", "schema": "public", - "values": [ - "active", - "pending", - "revoked" - ] + "values": ["active", "pending", "revoked"] }, "public.notification_delivery_status": { "name": "notification_delivery_status", "schema": "public", - "values": [ - "pending", - "in_progress", - "success", - "failed" - ] + "values": ["pending", "in_progress", "success", "failed"] }, "public.notification_type": { "name": "notification_type", "schema": "public", - "values": [ - "webhook", - "email", - "slack" - ] + "values": ["webhook", "email", "slack"] }, "public.permission_type": { "name": "permission_type", "schema": "public", - "values": [ - "admin", - "write", - "read" - ] + "values": ["admin", "write", "read"] }, "public.template_creator_type": { "name": "template_creator_type", "schema": "public", - "values": [ - "user", - "organization" - ] + "values": ["user", "organization"] }, "public.template_status": { "name": "template_status", "schema": "public", - "values": [ - "pending", - "approved", - "rejected" - ] + "values": ["pending", "approved", "rejected"] }, "public.usage_log_category": { "name": "usage_log_category", "schema": "public", - "values": [ - "model", - "fixed" - ] + "values": ["model", "fixed"] }, "public.usage_log_source": { "name": "usage_log_source", "schema": "public", - "values": [ - "workflow", - "wand", - "copilot", - "mcp_copilot" - ] + "values": ["workflow", "wand", "copilot", "mcp_copilot"] }, "public.workspace_invitation_status": { "name": "workspace_invitation_status", "schema": "public", - "values": [ - "pending", - "accepted", - "rejected", - "cancelled" - ] + "values": ["pending", "accepted", "rejected", "cancelled"] } }, "schemas": {}, @@ -11136,4 +10637,4 @@ "schemas": {}, "tables": {} } -} \ No newline at end of file +} diff --git a/packages/db/migrations/meta/_journal.json b/packages/db/migrations/meta/_journal.json index d920a091da..2fa880f2a2 100644 --- a/packages/db/migrations/meta/_journal.json +++ b/packages/db/migrations/meta/_journal.json @@ -1074,4 +1074,4 @@ "breakpoints": true } ] -} \ No newline at end of file +} From 18e493e14fcb3bca579a892060c1dcbd804be643 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Fri, 6 Feb 2026 13:45:52 -0800 Subject: [PATCH 38/72] Fix mcp --- .../[...issuer]/route.ts | 6 + .../api/mcp/copilot/route.ts | 6 + .../oauth-authorization-server/route.ts | 6 + .../api/mcp/copilot/route.ts | 6 + .../oauth-protected-resource/route.ts | 6 + .../oauth-authorization-server/route.ts | 6 + .../oauth-protected-resource/route.ts | 6 + apps/sim/app/api/mcp/copilot/route.ts | 484 +++++++++++------- apps/sim/lib/copilot/config.ts | 4 +- apps/sim/lib/mcp/oauth-discovery.ts | 59 +++ apps/sim/proxy.ts | 12 +- 11 files changed, 424 insertions(+), 177 deletions(-) create mode 100644 apps/sim/app/.well-known/oauth-authorization-server/[...issuer]/route.ts create mode 100644 apps/sim/app/.well-known/oauth-authorization-server/api/mcp/copilot/route.ts create mode 100644 apps/sim/app/.well-known/oauth-authorization-server/route.ts create mode 100644 apps/sim/app/.well-known/oauth-protected-resource/api/mcp/copilot/route.ts create mode 100644 apps/sim/app/.well-known/oauth-protected-resource/route.ts create mode 100644 apps/sim/app/api/mcp/copilot/.well-known/oauth-authorization-server/route.ts create mode 100644 apps/sim/app/api/mcp/copilot/.well-known/oauth-protected-resource/route.ts create mode 100644 apps/sim/lib/mcp/oauth-discovery.ts diff --git a/apps/sim/app/.well-known/oauth-authorization-server/[...issuer]/route.ts b/apps/sim/app/.well-known/oauth-authorization-server/[...issuer]/route.ts new file mode 100644 index 0000000000..fb83dcfbee --- /dev/null +++ b/apps/sim/app/.well-known/oauth-authorization-server/[...issuer]/route.ts @@ -0,0 +1,6 @@ +import { type NextRequest, NextResponse } from 'next/server' +import { createMcpAuthorizationServerMetadataResponse } from '@/lib/mcp/oauth-discovery' + +export async function GET(request: NextRequest): Promise { + return createMcpAuthorizationServerMetadataResponse(request) +} diff --git a/apps/sim/app/.well-known/oauth-authorization-server/api/mcp/copilot/route.ts b/apps/sim/app/.well-known/oauth-authorization-server/api/mcp/copilot/route.ts new file mode 100644 index 0000000000..fb83dcfbee --- /dev/null +++ b/apps/sim/app/.well-known/oauth-authorization-server/api/mcp/copilot/route.ts @@ -0,0 +1,6 @@ +import { type NextRequest, NextResponse } from 'next/server' +import { createMcpAuthorizationServerMetadataResponse } from '@/lib/mcp/oauth-discovery' + +export async function GET(request: NextRequest): Promise { + return createMcpAuthorizationServerMetadataResponse(request) +} diff --git a/apps/sim/app/.well-known/oauth-authorization-server/route.ts b/apps/sim/app/.well-known/oauth-authorization-server/route.ts new file mode 100644 index 0000000000..fb83dcfbee --- /dev/null +++ b/apps/sim/app/.well-known/oauth-authorization-server/route.ts @@ -0,0 +1,6 @@ +import { type NextRequest, NextResponse } from 'next/server' +import { createMcpAuthorizationServerMetadataResponse } from '@/lib/mcp/oauth-discovery' + +export async function GET(request: NextRequest): Promise { + return createMcpAuthorizationServerMetadataResponse(request) +} diff --git a/apps/sim/app/.well-known/oauth-protected-resource/api/mcp/copilot/route.ts b/apps/sim/app/.well-known/oauth-protected-resource/api/mcp/copilot/route.ts new file mode 100644 index 0000000000..2ab9b52be6 --- /dev/null +++ b/apps/sim/app/.well-known/oauth-protected-resource/api/mcp/copilot/route.ts @@ -0,0 +1,6 @@ +import { type NextRequest, NextResponse } from 'next/server' +import { createMcpProtectedResourceMetadataResponse } from '@/lib/mcp/oauth-discovery' + +export async function GET(request: NextRequest): Promise { + return createMcpProtectedResourceMetadataResponse(request) +} diff --git a/apps/sim/app/.well-known/oauth-protected-resource/route.ts b/apps/sim/app/.well-known/oauth-protected-resource/route.ts new file mode 100644 index 0000000000..2ab9b52be6 --- /dev/null +++ b/apps/sim/app/.well-known/oauth-protected-resource/route.ts @@ -0,0 +1,6 @@ +import { type NextRequest, NextResponse } from 'next/server' +import { createMcpProtectedResourceMetadataResponse } from '@/lib/mcp/oauth-discovery' + +export async function GET(request: NextRequest): Promise { + return createMcpProtectedResourceMetadataResponse(request) +} diff --git a/apps/sim/app/api/mcp/copilot/.well-known/oauth-authorization-server/route.ts b/apps/sim/app/api/mcp/copilot/.well-known/oauth-authorization-server/route.ts new file mode 100644 index 0000000000..fb83dcfbee --- /dev/null +++ b/apps/sim/app/api/mcp/copilot/.well-known/oauth-authorization-server/route.ts @@ -0,0 +1,6 @@ +import { type NextRequest, NextResponse } from 'next/server' +import { createMcpAuthorizationServerMetadataResponse } from '@/lib/mcp/oauth-discovery' + +export async function GET(request: NextRequest): Promise { + return createMcpAuthorizationServerMetadataResponse(request) +} diff --git a/apps/sim/app/api/mcp/copilot/.well-known/oauth-protected-resource/route.ts b/apps/sim/app/api/mcp/copilot/.well-known/oauth-protected-resource/route.ts new file mode 100644 index 0000000000..2ab9b52be6 --- /dev/null +++ b/apps/sim/app/api/mcp/copilot/.well-known/oauth-protected-resource/route.ts @@ -0,0 +1,6 @@ +import { type NextRequest, NextResponse } from 'next/server' +import { createMcpProtectedResourceMetadataResponse } from '@/lib/mcp/oauth-discovery' + +export async function GET(request: NextRequest): Promise { + return createMcpProtectedResourceMetadataResponse(request) +} diff --git a/apps/sim/app/api/mcp/copilot/route.ts b/apps/sim/app/api/mcp/copilot/route.ts index aa5fb7de40..5bc19858e9 100644 --- a/apps/sim/app/api/mcp/copilot/route.ts +++ b/apps/sim/app/api/mcp/copilot/route.ts @@ -1,13 +1,17 @@ +import { Server } from '@modelcontextprotocol/sdk/server/index.js' +import type { Transport } from '@modelcontextprotocol/sdk/shared/transport.js' import { + CallToolRequestSchema, type CallToolResult, ErrorCode, - type InitializeResult, isJSONRPCNotification, isJSONRPCRequest, type JSONRPCError, type JSONRPCMessage, - type JSONRPCResponse, type ListToolsResult, + ListToolsRequestSchema, + McpError, + type MessageExtraInfo, type RequestId, } from '@modelcontextprotocol/sdk/types.js' import { db } from '@sim/db' @@ -74,11 +78,75 @@ When the user refers to a workflow by name or description ("the email one", "my - Variable syntax: \`\` for block outputs, \`{{ENV_VAR}}\` for env vars. ` -function createResponse(id: RequestId, result: unknown): JSONRPCResponse { - return { - jsonrpc: '2.0', - id, - result: result as JSONRPCResponse['result'], +class SingleRequestTransport implements Transport { + private started = false + private outgoing: JSONRPCMessage[] = [] + private waitingResolvers: Array<(message: JSONRPCMessage) => void> = [] + + onclose?: () => void + onerror?: (error: Error) => void + onmessage?: (message: JSONRPCMessage, extra?: MessageExtraInfo) => void + sessionId?: string + + async start(): Promise { + if (this.started) { + throw new Error('Transport already started') + } + this.started = true + } + + async send(message: JSONRPCMessage): Promise { + this.outgoing.push(message) + const resolver = this.waitingResolvers.shift() + if (resolver) { + resolver(message) + } + } + + async close(): Promise { + this.onclose?.() + } + + async dispatch(message: JSONRPCMessage, extra?: MessageExtraInfo): Promise { + if (!this.onmessage) { + throw new Error('Transport is not connected to an MCP server') + } + + await Promise.resolve(this.onmessage(message, extra)) + } + + consumeResponse(): JSONRPCMessage | null { + if (this.outgoing.length === 0) { + return null + } + + const [firstResponse] = this.outgoing + this.outgoing = [] + return firstResponse + } + + async waitForResponse(timeoutMs = 5000): Promise { + const immediate = this.consumeResponse() + if (immediate) { + return immediate + } + + return new Promise((resolve) => { + const timeout = setTimeout(() => { + const index = this.waitingResolvers.indexOf(resolver) + if (index >= 0) { + this.waitingResolvers.splice(index, 1) + } + resolve(null) + }, timeoutMs) + + const resolver = (message: JSONRPCMessage) => { + clearTimeout(timeout) + resolve(message) + } + + this.waitingResolvers.push(resolver) + }) } } @@ -90,6 +158,81 @@ function createError(id: RequestId, code: ErrorCode | number, message: string): } } +function buildMcpServer(userId?: string): Server { + const server = new Server( + { + name: 'sim-copilot', + version: '1.0.0', + }, + { + capabilities: { tools: {} }, + instructions: MCP_SERVER_INSTRUCTIONS, + } + ) + + server.setRequestHandler(ListToolsRequestSchema, async () => { + const directTools = DIRECT_TOOL_DEFS.map((tool) => ({ + name: tool.name, + description: tool.description, + inputSchema: tool.inputSchema, + })) + + const subagentTools = SUBAGENT_TOOL_DEFS.map((tool) => ({ + name: tool.name, + description: tool.description, + inputSchema: tool.inputSchema, + })) + + const result: ListToolsResult = { + tools: [...directTools, ...subagentTools], + } + + return result + }) + + server.setRequestHandler(CallToolRequestSchema, async (request) => { + if (!userId) { + throw new McpError( + ErrorCode.InvalidRequest, + 'API key required. Set the x-api-key header with a valid Sim API key.' + ) + } + + const params = request.params as { name?: string; arguments?: Record } | undefined + if (!params?.name) { + throw new McpError(ErrorCode.InvalidParams, 'Tool name required') + } + + return handleToolsCall( + { + name: params.name, + arguments: params.arguments, + }, + userId + ) + }) + + return server +} + +async function handleMcpRequestWithSdk( + message: JSONRPCMessage, + userId?: string +): Promise { + const server = buildMcpServer(userId) + const transport = new SingleRequestTransport() + + await server.connect(transport) + + try { + await transport.dispatch(message) + return transport.waitForResponse() + } finally { + await server.close().catch(() => {}) + await transport.close().catch(() => {}) + } +} + export async function GET() { return NextResponse.json({ name: 'copilot-subagents', @@ -100,34 +243,19 @@ export async function GET() { } export async function POST(request: NextRequest) { + let requestId: RequestId = 0 + try { - // API-key-only auth — MCP clients must provide x-api-key header - const apiKeyHeader = request.headers.get('x-api-key') - if (!apiKeyHeader) { - return NextResponse.json( - createError( - 0, - -32000, - 'API key required. Set the x-api-key header with a valid Sim API key.' - ), - { status: 401 } - ) - } + let body: JSONRPCMessage - const authResult = await authenticateApiKeyFromHeader(apiKeyHeader) - if (!authResult.success || !authResult.userId) { - return NextResponse.json(createError(0, -32000, authResult.error || 'Invalid API key'), { - status: 401, + try { + body = (await request.json()) as JSONRPCMessage + } catch { + return NextResponse.json(createError(0, ErrorCode.ParseError, 'Invalid JSON body'), { + status: 400, }) } - // Fire-and-forget last-used update - updateApiKeyLastUsed(authResult.keyId!) - - const userId = authResult.userId - - const body = (await request.json()) as JSONRPCMessage - if (isJSONRPCNotification(body)) { return new NextResponse(null, { status: 202 }) } @@ -139,15 +267,52 @@ export async function POST(request: NextRequest) { ) } - const { id, method, params } = body + requestId = body.id + + let userId: string | undefined + + if (body.method === 'tools/call') { + const apiKeyHeader = request.headers.get('x-api-key') + if (!apiKeyHeader) { + return NextResponse.json( + createError( + requestId, + -32000, + 'API key required. Set the x-api-key header with a valid Sim API key.' + ), + { status: 401 } + ) + } + + const authResult = await authenticateApiKeyFromHeader(apiKeyHeader) + if (!authResult.success || !authResult.userId) { + logger.warn('MCP auth failed', { + error: authResult.error, + method: body.method, + }) + + return NextResponse.json( + createError(requestId, -32000, authResult.error || 'Invalid API key'), + { status: 401 } + ) + } + + userId = authResult.userId + + if (authResult.keyId) { + updateApiKeyLastUsed(authResult.keyId).catch((error) => { + logger.warn('Failed to update API key last-used timestamp', { + keyId: authResult.keyId, + error: error instanceof Error ? error.message : String(error), + }) + }) + } - // Pre-flight usage limit check for tool calls - if (method === 'tools/call') { const usageCheck = await checkServerSideUsageLimits(userId) if (usageCheck.isExceeded) { return NextResponse.json( createError( - id, + requestId, -32000, `Usage limit exceeded: ${usageCheck.message || 'Upgrade your plan.'}` ), @@ -156,39 +321,20 @@ export async function POST(request: NextRequest) { } } - switch (method) { - case 'initialize': { - const result: InitializeResult = { - protocolVersion: '2024-11-05', - capabilities: { tools: {} }, - serverInfo: { name: 'sim-copilot', version: '1.0.0' }, - instructions: MCP_SERVER_INSTRUCTIONS, - } - return NextResponse.json(createResponse(id, result)) - } - case 'ping': - return NextResponse.json(createResponse(id, {})) - case 'tools/list': - return handleToolsList(id) - case 'tools/call': { - const response = await handleToolsCall( - id, - params as { name: string; arguments?: Record }, - userId - ) - // Track MCP copilot call (fire-and-forget) - trackMcpCopilotCall(userId) - return response - } - default: - return NextResponse.json( - createError(id, ErrorCode.MethodNotFound, `Method not found: ${method}`), - { status: 404 } - ) + const responseMessage = await handleMcpRequestWithSdk(body, userId) + + if (body.method === 'tools/call' && userId) { + trackMcpCopilotCall(userId) + } + + if (!responseMessage) { + return new NextResponse(null, { status: 202 }) } + + return NextResponse.json(responseMessage) } catch (error) { logger.error('Error handling MCP request', { error }) - return NextResponse.json(createError(0, ErrorCode.InternalError, 'Internal error'), { + return NextResponse.json(createError(requestId, ErrorCode.InternalError, 'Internal error'), { status: 500, }) } @@ -210,57 +356,30 @@ function trackMcpCopilotCall(userId: string): void { }) } -async function handleToolsList(id: RequestId): Promise { - const directTools = DIRECT_TOOL_DEFS.map((tool) => ({ - name: tool.name, - description: tool.description, - inputSchema: tool.inputSchema, - })) - - const subagentTools = SUBAGENT_TOOL_DEFS.map((tool) => ({ - name: tool.name, - description: tool.description, - inputSchema: tool.inputSchema, - })) - - const result: ListToolsResult = { - tools: [...directTools, ...subagentTools], - } - - return NextResponse.json(createResponse(id, result)) -} - async function handleToolsCall( - id: RequestId, params: { name: string; arguments?: Record }, userId: string -): Promise { +): Promise { const args = params.arguments || {} - // Check if this is a direct tool (fast, no LLM) const directTool = DIRECT_TOOL_DEFS.find((tool) => tool.name === params.name) if (directTool) { - return handleDirectToolCall(id, directTool, args, userId) + return handleDirectToolCall(directTool, args, userId) } - // Check if this is a subagent tool (uses LLM orchestration) const subagentTool = SUBAGENT_TOOL_DEFS.find((tool) => tool.name === params.name) if (subagentTool) { - return handleSubagentToolCall(id, subagentTool, args, userId) + return handleSubagentToolCall(subagentTool, args, userId) } - return NextResponse.json( - createError(id, ErrorCode.MethodNotFound, `Tool not found: ${params.name}`), - { status: 404 } - ) + throw new McpError(ErrorCode.MethodNotFound, `Tool not found: ${params.name}`) } async function handleDirectToolCall( - id: RequestId, toolDef: (typeof DIRECT_TOOL_DEFS)[number], args: Record, userId: string -): Promise { +): Promise { try { const execContext = await prepareExecutionContext(userId, (args.workflowId as string) || '') @@ -274,7 +393,7 @@ async function handleDirectToolCall( const result = await executeToolServerSide(toolCall, execContext) - const response: CallToolResult = { + return { content: [ { type: 'text', @@ -283,14 +402,17 @@ async function handleDirectToolCall( ], isError: !result.success, } - - return NextResponse.json(createResponse(id, response)) } catch (error) { logger.error('Direct tool execution failed', { tool: toolDef.name, error }) - return NextResponse.json( - createError(id, ErrorCode.InternalError, `Tool execution failed: ${error}`), - { status: 500 } - ) + return { + content: [ + { + type: 'text', + text: `Tool execution failed: ${error instanceof Error ? error.message : String(error)}`, + }, + ], + isError: true, + } } } @@ -301,10 +423,9 @@ async function handleDirectToolCall( * executes all tools directly. */ async function handleBuildToolCall( - id: RequestId, args: Record, userId: string -): Promise { +): Promise { try { const requestText = (args.request as string) || JSON.stringify(args) const { model } = getCopilotModel('chat') @@ -313,7 +434,7 @@ async function handleBuildToolCall( const resolved = workflowId ? { workflowId } : await resolveWorkflowIdForUser(userId) if (!resolved?.workflowId) { - const response: CallToolResult = { + return { content: [ { type: 'text', @@ -329,7 +450,6 @@ async function handleBuildToolCall( ], isError: true, } - return NextResponse.json(createResponse(id, response)) } const chatId = crypto.randomUUID() @@ -345,6 +465,7 @@ async function handleBuildToolCall( version: SIM_AGENT_VERSION, headless: true, chatId, + source: 'mcp', } const result = await orchestrateCopilotStream(requestPayload, { @@ -363,92 +484,111 @@ async function handleBuildToolCall( error: result.error, } - const response: CallToolResult = { + return { content: [{ type: 'text', text: JSON.stringify(responseData, null, 2) }], isError: !result.success, } - - return NextResponse.json(createResponse(id, response)) } catch (error) { logger.error('Build tool call failed', { error }) - return NextResponse.json(createError(id, ErrorCode.InternalError, `Build failed: ${error}`), { - status: 500, - }) + return { + content: [ + { + type: 'text', + text: `Build failed: ${error instanceof Error ? error.message : String(error)}`, + }, + ], + isError: true, + } } } async function handleSubagentToolCall( - id: RequestId, toolDef: (typeof SUBAGENT_TOOL_DEFS)[number], args: Record, userId: string -): Promise { - // Build mode uses the main chat endpoint, not the subagent endpoint +): Promise { if (toolDef.agentId === 'build') { - return handleBuildToolCall(id, args, userId) + return handleBuildToolCall(args, userId) } - const requestText = - (args.request as string) || - (args.message as string) || - (args.error as string) || - JSON.stringify(args) + try { + const requestText = + (args.request as string) || + (args.message as string) || + (args.error as string) || + JSON.stringify(args) + + const context = (args.context as Record) || {} + if (args.plan && !context.plan) { + context.plan = args.plan + } - const context = (args.context as Record) || {} - if (args.plan && !context.plan) { - context.plan = args.plan - } + const { model } = getCopilotModel('chat') + + const result = await orchestrateSubagentStream( + toolDef.agentId, + { + message: requestText, + workflowId: args.workflowId, + workspaceId: args.workspaceId, + context, + model, + headless: true, + source: 'mcp', + }, + { + userId, + workflowId: args.workflowId as string | undefined, + workspaceId: args.workspaceId as string | undefined, + } + ) - const { model } = getCopilotModel('chat') + let responseData: unknown - const result = await orchestrateSubagentStream( - toolDef.agentId, - { - message: requestText, - workflowId: args.workflowId, - workspaceId: args.workspaceId, - context, - model, - headless: true, - source: 'mcp_copilot', - }, - { - userId, - workflowId: args.workflowId as string | undefined, - workspaceId: args.workspaceId as string | undefined, + if (result.structuredResult) { + responseData = { + success: result.structuredResult.success ?? result.success, + type: result.structuredResult.type, + summary: result.structuredResult.summary, + data: result.structuredResult.data, + } + } else if (result.error) { + responseData = { + success: false, + error: result.error, + errors: result.errors, + } + } else { + responseData = { + success: result.success, + content: result.content, + } } - ) - let responseData: unknown - if (result.structuredResult) { - responseData = { - success: result.structuredResult.success ?? result.success, - type: result.structuredResult.type, - summary: result.structuredResult.summary, - data: result.structuredResult.data, - } - } else if (result.error) { - responseData = { - success: false, - error: result.error, - errors: result.errors, - } - } else { - responseData = { - success: result.success, - content: result.content, + return { + content: [ + { + type: 'text', + text: JSON.stringify(responseData, null, 2), + }, + ], + isError: !result.success, } - } + } catch (error) { + logger.error('Subagent tool call failed', { + tool: toolDef.name, + agentId: toolDef.agentId, + error, + }) - const response: CallToolResult = { - content: [ - { - type: 'text', - text: JSON.stringify(responseData, null, 2), - }, - ], - isError: !result.success, + return { + content: [ + { + type: 'text', + text: `Subagent call failed: ${error instanceof Error ? error.message : String(error)}`, + }, + ], + isError: true, + } } - - return NextResponse.json(createResponse(id, response)) } diff --git a/apps/sim/lib/copilot/config.ts b/apps/sim/lib/copilot/config.ts index 5700e99300..5d7be48093 100644 --- a/apps/sim/lib/copilot/config.ts +++ b/apps/sim/lib/copilot/config.ts @@ -109,14 +109,14 @@ function parseBooleanEnv(value: string | undefined): boolean | null { export const DEFAULT_COPILOT_CONFIG: CopilotConfig = { chat: { defaultProvider: 'anthropic', - defaultModel: 'claude-3-7-sonnet-latest', + defaultModel: 'claude-4.5-opus', temperature: 0.1, maxTokens: 8192, systemPrompt: AGENT_MODE_SYSTEM_PROMPT, }, rag: { defaultProvider: 'anthropic', - defaultModel: 'claude-3-7-sonnet-latest', + defaultModel: 'claude-4.5-opus', temperature: 0.1, maxTokens: 2000, embeddingModel: 'text-embedding-3-small', diff --git a/apps/sim/lib/mcp/oauth-discovery.ts b/apps/sim/lib/mcp/oauth-discovery.ts new file mode 100644 index 0000000000..445d4e51da --- /dev/null +++ b/apps/sim/lib/mcp/oauth-discovery.ts @@ -0,0 +1,59 @@ +import { type NextRequest, NextResponse } from 'next/server' + +function getOrigin(request: NextRequest): string { + return request.nextUrl.origin +} + +export function createMcpAuthorizationServerMetadataResponse(request: NextRequest): NextResponse { + const origin = getOrigin(request) + const resource = `${origin}/api/mcp/copilot` + + return NextResponse.json( + { + issuer: resource, + token_endpoint: `${origin}/api/auth/oauth/token`, + token_endpoint_auth_methods_supported: ['none'], + grant_types_supported: ['authorization_code', 'refresh_token'], + response_types_supported: ['code'], + code_challenge_methods_supported: ['S256'], + scopes_supported: ['mcp:tools'], + resource, + // Non-standard extension for API-key-only clients. + x_sim_auth: { + type: 'api_key', + header: 'x-api-key', + }, + }, + { + headers: { + 'Cache-Control': 'no-store', + }, + } + ) +} + +export function createMcpProtectedResourceMetadataResponse(request: NextRequest): NextResponse { + const origin = getOrigin(request) + const resource = `${origin}/api/mcp/copilot` + const authorizationServerIssuer = `${origin}/api/mcp/copilot` + + return NextResponse.json( + { + resource, + // RFC 9728 expects issuer identifiers here, not metadata URLs. + authorization_servers: [authorizationServerIssuer], + bearer_methods_supported: ['header'], + scopes_supported: ['mcp:tools'], + // Non-standard extension for API-key-only clients. + x_sim_auth: { + type: 'api_key', + header: 'x-api-key', + }, + }, + { + headers: { + 'Cache-Control': 'no-store', + }, + } + ) +} diff --git a/apps/sim/proxy.ts b/apps/sim/proxy.ts index 773700a754..c90df2eec7 100644 --- a/apps/sim/proxy.ts +++ b/apps/sim/proxy.ts @@ -100,11 +100,17 @@ function handleWorkspaceInvitationAPI( */ function handleSecurityFiltering(request: NextRequest): NextResponse | null { const userAgent = request.headers.get('user-agent') || '' - const isWebhookEndpoint = request.nextUrl.pathname.startsWith('/api/webhooks/trigger/') + const { pathname } = request.nextUrl + const isWebhookEndpoint = pathname.startsWith('/api/webhooks/trigger/') + const isMcpEndpoint = pathname.startsWith('/api/mcp/') + const isMcpOauthDiscoveryEndpoint = + pathname.startsWith('/.well-known/oauth-authorization-server') || + pathname.startsWith('/.well-known/oauth-protected-resource') const isSuspicious = SUSPICIOUS_UA_PATTERNS.some((pattern) => pattern.test(userAgent)) - // Block suspicious requests, but exempt webhook endpoints from User-Agent validation - if (isSuspicious && !isWebhookEndpoint) { + // Block suspicious requests, but exempt machine-to-machine endpoints that may + // legitimately omit User-Agent headers (webhooks and MCP protocol discovery/calls). + if (isSuspicious && !isWebhookEndpoint && !isMcpEndpoint && !isMcpOauthDiscoveryEndpoint) { logger.warn('Blocked suspicious request', { userAgent, ip: request.headers.get('x-forwarded-for') || 'unknown', From a73e3516823054e16e9a46870d4509d160554d17 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Fri, 6 Feb 2026 13:54:08 -0800 Subject: [PATCH 39/72] Fix --- apps/sim/app/api/mcp/copilot/route.ts | 334 ++++++++++++++------------ 1 file changed, 181 insertions(+), 153 deletions(-) diff --git a/apps/sim/app/api/mcp/copilot/route.ts b/apps/sim/app/api/mcp/copilot/route.ts index 5bc19858e9..fdad4af43f 100644 --- a/apps/sim/app/api/mcp/copilot/route.ts +++ b/apps/sim/app/api/mcp/copilot/route.ts @@ -1,17 +1,13 @@ import { Server } from '@modelcontextprotocol/sdk/server/index.js' -import type { Transport } from '@modelcontextprotocol/sdk/shared/transport.js' +import { StreamableHTTPServerTransport } from '@modelcontextprotocol/sdk/server/streamableHttp.js' import { CallToolRequestSchema, type CallToolResult, ErrorCode, - isJSONRPCNotification, - isJSONRPCRequest, type JSONRPCError, - type JSONRPCMessage, type ListToolsResult, ListToolsRequestSchema, McpError, - type MessageExtraInfo, type RequestId, } from '@modelcontextprotocol/sdk/types.js' import { db } from '@sim/db' @@ -35,6 +31,7 @@ import { resolveWorkflowIdForUser } from '@/lib/workflows/utils' const logger = createLogger('CopilotMcpAPI') export const dynamic = 'force-dynamic' +export const runtime = 'nodejs' /** * MCP Server instructions that guide LLMs on how to use the Sim copilot tools. @@ -78,87 +75,150 @@ When the user refers to a workflow by name or description ("the email one", "my - Variable syntax: \`\` for block outputs, \`{{ENV_VAR}}\` for env vars. ` -class SingleRequestTransport implements Transport { - private started = false - private outgoing: JSONRPCMessage[] = [] - private waitingResolvers: Array<(message: JSONRPCMessage) => void> = [] +type HeaderMap = Record - onclose?: () => void - onerror?: (error: Error) => void - onmessage?: (message: JSONRPCMessage, extra?: MessageExtraInfo) => void - sessionId?: string +function createError(id: RequestId, code: ErrorCode | number, message: string): JSONRPCError { + return { + jsonrpc: '2.0', + id, + error: { code, message }, + } +} - async start(): Promise { - if (this.started) { - throw new Error('Transport already started') - } - this.started = true +function normalizeRequestHeaders(request: NextRequest): HeaderMap { + const headers: HeaderMap = {} + + request.headers.forEach((value, key) => { + headers[key.toLowerCase()] = value + }) + + return headers +} + +function readHeader(headers: HeaderMap | undefined, name: string): string | undefined { + if (!headers) return undefined + const value = headers[name.toLowerCase()] + if (Array.isArray(value)) { + return value[0] + } + return value +} + +class NextResponseCapture { + private _status = 200 + private _headers = new Headers() + private _chunks: Buffer[] = [] + private _closeHandlers: Array<() => void> = [] + private _errorHandlers: Array<(error: Error) => void> = [] + private _ended = false + private _endedPromise: Promise + private _resolveEnded: (() => void) | null = null + + constructor() { + this._endedPromise = new Promise((resolve) => { + this._resolveEnded = resolve + }) } - async send(message: JSONRPCMessage): Promise { - this.outgoing.push(message) - const resolver = this.waitingResolvers.shift() - if (resolver) { - resolver(message) + writeHead(status: number, headers?: Record): this { + this._status = status + + if (headers) { + Object.entries(headers).forEach(([key, value]) => { + if (Array.isArray(value)) { + this._headers.set(key, value.join(', ')) + } else { + this._headers.set(key, String(value)) + } + }) } + + return this } - async close(): Promise { - this.onclose?.() + flushHeaders(): this { + return this } - async dispatch(message: JSONRPCMessage, extra?: MessageExtraInfo): Promise { - if (!this.onmessage) { - throw new Error('Transport is not connected to an MCP server') + write(chunk: unknown): boolean { + if (typeof chunk === 'string') { + this._chunks.push(Buffer.from(chunk)) + return true } - await Promise.resolve(this.onmessage(message, extra)) - } + if (chunk instanceof Uint8Array) { + this._chunks.push(Buffer.from(chunk)) + return true + } - consumeResponse(): JSONRPCMessage | null { - if (this.outgoing.length === 0) { - return null + if (chunk !== undefined && chunk !== null) { + this._chunks.push(Buffer.from(String(chunk))) } - const [firstResponse] = this.outgoing - this.outgoing = [] - return firstResponse + return true } - async waitForResponse(timeoutMs = 5000): Promise { - const immediate = this.consumeResponse() - if (immediate) { - return immediate + end(chunk?: unknown): this { + if (chunk !== undefined) { + this.write(chunk) } - return new Promise((resolve) => { - const timeout = setTimeout(() => { - const index = this.waitingResolvers.indexOf(resolver) - if (index >= 0) { - this.waitingResolvers.splice(index, 1) - } - resolve(null) - }, timeoutMs) + this._ended = true + this._resolveEnded?.() - const resolver = (message: JSONRPCMessage) => { - clearTimeout(timeout) - resolve(message) + this._closeHandlers.forEach((handler) => { + try { + handler() + } catch (error) { + this._errorHandlers.forEach((errorHandler) => { + errorHandler(error instanceof Error ? error : new Error(String(error))) + }) } - - this.waitingResolvers.push(resolver) }) + + return this } -} -function createError(id: RequestId, code: ErrorCode | number, message: string): JSONRPCError { - return { - jsonrpc: '2.0', - id, - error: { code, message }, + async waitForEnd(timeoutMs = 30000): Promise { + if (this._ended) return + + await Promise.race([ + this._endedPromise, + new Promise((resolve) => { + setTimeout(resolve, timeoutMs) + }), + ]) + } + + on(event: 'close' | 'error', handler: (() => void) | ((error: Error) => void)): this { + if (event === 'close') { + this._closeHandlers.push(handler as () => void) + } + + if (event === 'error') { + this._errorHandlers.push(handler as (error: Error) => void) + } + + return this + } + + toNextResponse(): NextResponse { + if (this._chunks.length === 0) { + return new NextResponse(null, { + status: this._status, + headers: this._headers, + }) + } + + const body = Buffer.concat(this._chunks) + return new NextResponse(body, { + status: this._status, + headers: this._headers, + }) } } -function buildMcpServer(userId?: string): Server { +function buildMcpServer(): Server { const server = new Server( { name: 'sim-copilot', @@ -190,43 +250,88 @@ function buildMcpServer(userId?: string): Server { return result }) - server.setRequestHandler(CallToolRequestSchema, async (request) => { - if (!userId) { + server.setRequestHandler(CallToolRequestSchema, async (request, extra) => { + const headers = (extra.requestInfo?.headers || {}) as HeaderMap + const apiKeyHeader = readHeader(headers, 'x-api-key') + + if (!apiKeyHeader) { throw new McpError( - ErrorCode.InvalidRequest, + -32000, 'API key required. Set the x-api-key header with a valid Sim API key.' ) } + const authResult = await authenticateApiKeyFromHeader(apiKeyHeader) + if (!authResult.success || !authResult.userId) { + logger.warn('MCP auth failed', { + error: authResult.error, + method: request.method, + }) + + throw new McpError(-32000, authResult.error || 'Invalid API key') + } + + if (authResult.keyId) { + updateApiKeyLastUsed(authResult.keyId).catch((error) => { + logger.warn('Failed to update API key last-used timestamp', { + keyId: authResult.keyId, + error: error instanceof Error ? error.message : String(error), + }) + }) + } + + const usageCheck = await checkServerSideUsageLimits(authResult.userId) + if (usageCheck.isExceeded) { + throw new McpError( + -32000, + `Usage limit exceeded: ${usageCheck.message || 'Upgrade your plan.'}` + ) + } + const params = request.params as { name?: string; arguments?: Record } | undefined if (!params?.name) { throw new McpError(ErrorCode.InvalidParams, 'Tool name required') } - return handleToolsCall( + const result = await handleToolsCall( { name: params.name, arguments: params.arguments, }, - userId + authResult.userId ) + + trackMcpCopilotCall(authResult.userId) + + return result }) return server } async function handleMcpRequestWithSdk( - message: JSONRPCMessage, - userId?: string -): Promise { - const server = buildMcpServer(userId) - const transport = new SingleRequestTransport() + request: NextRequest, + parsedBody: unknown +): Promise { + const server = buildMcpServer() + const transport = new StreamableHTTPServerTransport({ + sessionIdGenerator: undefined, + enableJsonResponse: true, + }) + + const responseCapture = new NextResponseCapture() + + const requestAdapter = { + method: request.method, + headers: normalizeRequestHeaders(request), + } await server.connect(transport) try { - await transport.dispatch(message) - return transport.waitForResponse() + await transport.handleRequest(requestAdapter as any, responseCapture as any, parsedBody) + await responseCapture.waitForEnd() + return responseCapture.toNextResponse() } finally { await server.close().catch(() => {}) await transport.close().catch(() => {}) @@ -243,98 +348,21 @@ export async function GET() { } export async function POST(request: NextRequest) { - let requestId: RequestId = 0 - try { - let body: JSONRPCMessage + let parsedBody: unknown try { - body = (await request.json()) as JSONRPCMessage + parsedBody = await request.json() } catch { return NextResponse.json(createError(0, ErrorCode.ParseError, 'Invalid JSON body'), { status: 400, }) } - if (isJSONRPCNotification(body)) { - return new NextResponse(null, { status: 202 }) - } - - if (!isJSONRPCRequest(body)) { - return NextResponse.json( - createError(0, ErrorCode.InvalidRequest, 'Invalid JSON-RPC message'), - { status: 400 } - ) - } - - requestId = body.id - - let userId: string | undefined - - if (body.method === 'tools/call') { - const apiKeyHeader = request.headers.get('x-api-key') - if (!apiKeyHeader) { - return NextResponse.json( - createError( - requestId, - -32000, - 'API key required. Set the x-api-key header with a valid Sim API key.' - ), - { status: 401 } - ) - } - - const authResult = await authenticateApiKeyFromHeader(apiKeyHeader) - if (!authResult.success || !authResult.userId) { - logger.warn('MCP auth failed', { - error: authResult.error, - method: body.method, - }) - - return NextResponse.json( - createError(requestId, -32000, authResult.error || 'Invalid API key'), - { status: 401 } - ) - } - - userId = authResult.userId - - if (authResult.keyId) { - updateApiKeyLastUsed(authResult.keyId).catch((error) => { - logger.warn('Failed to update API key last-used timestamp', { - keyId: authResult.keyId, - error: error instanceof Error ? error.message : String(error), - }) - }) - } - - const usageCheck = await checkServerSideUsageLimits(userId) - if (usageCheck.isExceeded) { - return NextResponse.json( - createError( - requestId, - -32000, - `Usage limit exceeded: ${usageCheck.message || 'Upgrade your plan.'}` - ), - { status: 402 } - ) - } - } - - const responseMessage = await handleMcpRequestWithSdk(body, userId) - - if (body.method === 'tools/call' && userId) { - trackMcpCopilotCall(userId) - } - - if (!responseMessage) { - return new NextResponse(null, { status: 202 }) - } - - return NextResponse.json(responseMessage) + return await handleMcpRequestWithSdk(request, parsedBody) } catch (error) { logger.error('Error handling MCP request', { error }) - return NextResponse.json(createError(requestId, ErrorCode.InternalError, 'Internal error'), { + return NextResponse.json(createError(0, ErrorCode.InternalError, 'Internal error'), { status: 500, }) } From 67c22716aa099f09802a821b4eb63f647d6a460c Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Fri, 6 Feb 2026 14:25:51 -0800 Subject: [PATCH 40/72] Updates --- apps/sim/app/api/mcp/copilot/route.ts | 142 ++++++++++++++++++++------ 1 file changed, 109 insertions(+), 33 deletions(-) diff --git a/apps/sim/app/api/mcp/copilot/route.ts b/apps/sim/app/api/mcp/copilot/route.ts index fdad4af43f..04f21e84b0 100644 --- a/apps/sim/app/api/mcp/copilot/route.ts +++ b/apps/sim/app/api/mcp/copilot/route.ts @@ -13,6 +13,7 @@ import { import { db } from '@sim/db' import { userStats } from '@sim/db/schema' import { createLogger } from '@sim/logger' +import { randomUUID } from 'node:crypto' import { eq, sql } from 'drizzle-orm' import { type NextRequest, NextResponse } from 'next/server' import { authenticateApiKeyFromHeader, updateApiKeyLastUsed } from '@/lib/api-key/service' @@ -107,17 +108,81 @@ function readHeader(headers: HeaderMap | undefined, name: string): string | unde class NextResponseCapture { private _status = 200 private _headers = new Headers() - private _chunks: Buffer[] = [] + private _controller: ReadableStreamDefaultController | null = null + private _pendingChunks: Uint8Array[] = [] private _closeHandlers: Array<() => void> = [] private _errorHandlers: Array<(error: Error) => void> = [] + private _headersWritten = false private _ended = false + private _headersPromise: Promise + private _resolveHeaders: (() => void) | null = null private _endedPromise: Promise private _resolveEnded: (() => void) | null = null + readonly readable: ReadableStream constructor() { + this._headersPromise = new Promise((resolve) => { + this._resolveHeaders = resolve + }) + this._endedPromise = new Promise((resolve) => { this._resolveEnded = resolve }) + + this.readable = new ReadableStream({ + start: (controller) => { + this._controller = controller + if (this._pendingChunks.length > 0) { + for (const chunk of this._pendingChunks) { + controller.enqueue(chunk) + } + this._pendingChunks = [] + } + }, + cancel: () => { + this._ended = true + this._resolveEnded?.() + this.triggerCloseHandlers() + }, + }) + } + + private markHeadersWritten(): void { + if (this._headersWritten) return + this._headersWritten = true + this._resolveHeaders?.() + } + + private triggerCloseHandlers(): void { + for (const handler of this._closeHandlers) { + try { + handler() + } catch (error) { + this.triggerErrorHandlers(error instanceof Error ? error : new Error(String(error))) + } + } + } + + private triggerErrorHandlers(error: Error): void { + for (const errorHandler of this._errorHandlers) { + errorHandler(error) + } + } + + private normalizeChunk(chunk: unknown): Uint8Array | null { + if (typeof chunk === 'string') { + return new TextEncoder().encode(chunk) + } + + if (chunk instanceof Uint8Array) { + return chunk + } + + if (chunk === undefined || chunk === null) { + return null + } + + return new TextEncoder().encode(String(chunk)) } writeHead(status: number, headers?: Record): this { @@ -133,52 +198,66 @@ class NextResponseCapture { }) } + this.markHeadersWritten() return this } flushHeaders(): this { + this.markHeadersWritten() return this } write(chunk: unknown): boolean { - if (typeof chunk === 'string') { - this._chunks.push(Buffer.from(chunk)) - return true - } + const normalized = this.normalizeChunk(chunk) + if (!normalized) return true - if (chunk instanceof Uint8Array) { - this._chunks.push(Buffer.from(chunk)) - return true - } + this.markHeadersWritten() - if (chunk !== undefined && chunk !== null) { - this._chunks.push(Buffer.from(String(chunk))) + if (this._controller) { + try { + this._controller.enqueue(normalized) + } catch (error) { + this.triggerErrorHandlers(error instanceof Error ? error : new Error(String(error))) + } + } else { + this._pendingChunks.push(normalized) } return true } end(chunk?: unknown): this { - if (chunk !== undefined) { - this.write(chunk) - } + if (chunk !== undefined) this.write(chunk) + this.markHeadersWritten() + if (this._ended) return this this._ended = true this._resolveEnded?.() - this._closeHandlers.forEach((handler) => { + if (this._controller) { try { - handler() + this._controller.close() } catch (error) { - this._errorHandlers.forEach((errorHandler) => { - errorHandler(error instanceof Error ? error : new Error(String(error))) - }) + this.triggerErrorHandlers(error instanceof Error ? error : new Error(String(error))) } - }) + } + + this.triggerCloseHandlers() return this } + async waitForHeaders(timeoutMs = 30000): Promise { + if (this._headersWritten) return + + await Promise.race([ + this._headersPromise, + new Promise((resolve) => { + setTimeout(resolve, timeoutMs) + }), + ]) + } + async waitForEnd(timeoutMs = 30000): Promise { if (this._ended) return @@ -203,15 +282,7 @@ class NextResponseCapture { } toNextResponse(): NextResponse { - if (this._chunks.length === 0) { - return new NextResponse(null, { - status: this._status, - headers: this._headers, - }) - } - - const body = Buffer.concat(this._chunks) - return new NextResponse(body, { + return new NextResponse(this.readable, { status: this._status, headers: this._headers, }) @@ -320,7 +391,6 @@ async function handleMcpRequestWithSdk( }) const responseCapture = new NextResponseCapture() - const requestAdapter = { method: request.method, headers: normalizeRequestHeaders(request), @@ -330,6 +400,7 @@ async function handleMcpRequestWithSdk( try { await transport.handleRequest(requestAdapter as any, responseCapture as any, parsedBody) + await responseCapture.waitForHeaders() await responseCapture.waitForEnd() return responseCapture.toNextResponse() } finally { @@ -368,6 +439,11 @@ export async function POST(request: NextRequest) { } } +export async function DELETE(request: NextRequest) { + void request + return NextResponse.json(createError(0, -32000, 'Method not allowed.'), { status: 405 }) +} + /** * Increment MCP copilot call counter in userStats (fire-and-forget). */ @@ -412,7 +488,7 @@ async function handleDirectToolCall( const execContext = await prepareExecutionContext(userId, (args.workflowId as string) || '') const toolCall = { - id: crypto.randomUUID(), + id: randomUUID(), name: toolDef.toolId, status: 'pending' as const, params: args as Record, @@ -480,7 +556,7 @@ async function handleBuildToolCall( } } - const chatId = crypto.randomUUID() + const chatId = randomUUID() const requestPayload = { message: requestText, @@ -489,7 +565,7 @@ async function handleBuildToolCall( model, mode: 'agent', commands: ['fast'], - messageId: crypto.randomUUID(), + messageId: randomUUID(), version: SIM_AGENT_VERSION, headless: true, chatId, From a22045587bfff22ab2d9835536dacc90c5f05524 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Fri, 6 Feb 2026 16:11:02 -0800 Subject: [PATCH 41/72] Clean up mcp --- apps/sim/app/api/mcp/copilot/route.ts | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/apps/sim/app/api/mcp/copilot/route.ts b/apps/sim/app/api/mcp/copilot/route.ts index 04f21e84b0..5ed6087d19 100644 --- a/apps/sim/app/api/mcp/copilot/route.ts +++ b/apps/sim/app/api/mcp/copilot/route.ts @@ -410,12 +410,10 @@ async function handleMcpRequestWithSdk( } export async function GET() { - return NextResponse.json({ - name: 'copilot-subagents', - version: '1.0.0', - protocolVersion: '2024-11-05', - capabilities: { tools: {} }, - }) + // Return 405 to signal that server-initiated SSE notifications are not + // supported. Without this, clients like mcp-remote will repeatedly + // reconnect trying to open an SSE stream, flooding the logs with GETs. + return new NextResponse(null, { status: 405 }) } export async function POST(request: NextRequest) { From 6735eaafe90126e1fbb9e69cd6531b54d72d3cf8 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Fri, 6 Feb 2026 16:37:19 -0800 Subject: [PATCH 42/72] Fix copilot mcp tool names to be sim prefixed --- apps/sim/app/api/mcp/copilot/route.ts | 12 ++-- apps/sim/lib/copilot/tools/mcp/definitions.ts | 56 +++++++++---------- 2 files changed, 34 insertions(+), 34 deletions(-) diff --git a/apps/sim/app/api/mcp/copilot/route.ts b/apps/sim/app/api/mcp/copilot/route.ts index 5ed6087d19..67f985942c 100644 --- a/apps/sim/app/api/mcp/copilot/route.ts +++ b/apps/sim/app/api/mcp/copilot/route.ts @@ -48,16 +48,16 @@ Sim is a workflow automation platform. Workflows are visual pipelines of connect 1. \`list_workspaces\` → know where to work 2. \`create_workflow(name, workspaceId)\` → get a workflowId -3. \`copilot_build(request, workflowId)\` → plan and build in one pass -4. \`copilot_test(request, workflowId)\` → verify it works -5. \`copilot_deploy("deploy as api", workflowId)\` → make it accessible externally (optional) +3. \`sim_build(request, workflowId)\` → plan and build in one pass +4. \`sim_test(request, workflowId)\` → verify it works +5. \`sim_deploy("deploy as api", workflowId)\` → make it accessible externally (optional) -For fine-grained control, use \`copilot_plan\` → \`copilot_edit\` instead of \`copilot_build\`. Pass the plan object from copilot_plan EXACTLY as-is to copilot_edit's context.plan field. +For fine-grained control, use \`sim_plan\` → \`sim_edit\` instead of \`sim_build\`. Pass the plan object from sim_plan EXACTLY as-is to sim_edit's context.plan field. ### Working with Existing Workflows When the user refers to a workflow by name or description ("the email one", "my Slack bot"): -1. Use \`copilot_discovery\` to find it by functionality +1. Use \`sim_discovery\` to find it by functionality 2. Or use \`list_workflows\` and match by name 3. Then pass the workflowId to other tools @@ -72,7 +72,7 @@ When the user refers to a workflow by name or description ("the email one", "my - You can test workflows immediately after building — deployment is only needed for external access (API, chat, MCP). - All copilot tools (build, plan, edit, deploy, test, debug) require workflowId. -- If the user reports errors → use \`copilot_debug\` first, don't guess. +- If the user reports errors → use \`sim_debug\` first, don't guess. - Variable syntax: \`\` for block outputs, \`{{ENV_VAR}}\` for env vars. ` diff --git a/apps/sim/lib/copilot/tools/mcp/definitions.ts b/apps/sim/lib/copilot/tools/mcp/definitions.ts index ad0f968fc9..a2876a56a8 100644 --- a/apps/sim/lib/copilot/tools/mcp/definitions.ts +++ b/apps/sim/lib/copilot/tools/mcp/definitions.ts @@ -82,7 +82,7 @@ export const DIRECT_TOOL_DEFS: DirectToolDef[] = [ name: 'create_workflow', toolId: 'create_workflow', description: - 'Create a new empty workflow. Returns the new workflow ID. Always call this FIRST before copilot_build for new workflows. Use workspaceId to place it in a specific workspace.', + 'Create a new empty workflow. Returns the new workflow ID. Always call this FIRST before sim_build for new workflows. Use workspaceId to place it in a specific workspace.', inputSchema: { type: 'object', properties: { @@ -351,7 +351,7 @@ export const DIRECT_TOOL_DEFS: DirectToolDef[] = [ export const SUBAGENT_TOOL_DEFS: SubagentToolDef[] = [ { - name: 'copilot_build', + name: 'sim_build', agentId: 'build', description: `Build a workflow end-to-end in a single step. This is the fast mode equivalent for headless/MCP usage. @@ -372,15 +372,15 @@ CAN DO: - Set environment variables and workflow variables CANNOT DO: -- Run or test workflows (use copilot_test separately) -- Deploy workflows (use copilot_deploy separately) +- Run or test workflows (use sim_test separately) +- Deploy workflows (use sim_deploy separately) WORKFLOW: 1. Call create_workflow to get a workflowId (for new workflows) -2. Call copilot_build with the request and workflowId +2. Call sim_build with the request and workflowId 3. Build agent gathers info and builds in one pass -4. Call copilot_test to verify it works -5. Optionally call copilot_deploy to make it externally accessible`, +4. Call sim_test to verify it works +5. Optionally call sim_deploy to make it externally accessible`, inputSchema: { type: 'object', properties: { @@ -399,7 +399,7 @@ WORKFLOW: }, }, { - name: 'copilot_discovery', + name: 'sim_discovery', agentId: 'discovery', description: `Find workflows by their contents or functionality when the user doesn't know the exact name or ID. @@ -424,9 +424,9 @@ DO NOT USE (use direct tools instead): }, }, { - name: 'copilot_plan', + name: 'sim_plan', agentId: 'plan', - description: `Plan workflow changes by gathering required information. For most cases, prefer copilot_build which combines planning and editing in one step. + description: `Plan workflow changes by gathering required information. For most cases, prefer sim_build which combines planning and editing in one step. USE THIS WHEN: - You need fine-grained control over the build process @@ -439,7 +439,7 @@ WORKFLOW ID (REQUIRED): This tool gathers information about available blocks, credentials, and the current workflow state. RETURNS: A plan object containing block configurations, connections, and technical details. -IMPORTANT: Pass the returned plan EXACTLY to copilot_edit - do not modify or summarize it.`, +IMPORTANT: Pass the returned plan EXACTLY to sim_edit - do not modify or summarize it.`, inputSchema: { type: 'object', properties: { @@ -458,18 +458,18 @@ IMPORTANT: Pass the returned plan EXACTLY to copilot_edit - do not modify or sum }, }, { - name: 'copilot_edit', + name: 'sim_edit', agentId: 'edit', - description: `Execute a workflow plan from copilot_plan. For most cases, prefer copilot_build which combines planning and editing in one step. + description: `Execute a workflow plan from sim_plan. For most cases, prefer sim_build which combines planning and editing in one step. WORKFLOW ID (REQUIRED): - You MUST provide the workflowId parameter PLAN (REQUIRED): -- Pass the EXACT plan object from copilot_plan in the context.plan field +- Pass the EXACT plan object from sim_plan in the context.plan field - Do NOT modify, summarize, or interpret the plan - pass it verbatim -After copilot_edit completes, you can test immediately with copilot_test, or deploy with copilot_deploy to make it accessible externally.`, +After sim_edit completes, you can test immediately with sim_test, or deploy with sim_deploy to make it accessible externally.`, inputSchema: { type: 'object', properties: { @@ -482,7 +482,7 @@ After copilot_edit completes, you can test immediately with copilot_test, or dep plan: { type: 'object', description: - 'The plan object from copilot_plan. Pass it EXACTLY as returned, do not modify.', + 'The plan object from sim_plan. Pass it EXACTLY as returned, do not modify.', }, context: { type: 'object', @@ -494,7 +494,7 @@ After copilot_edit completes, you can test immediately with copilot_test, or dep }, }, { - name: 'copilot_deploy', + name: 'sim_deploy', agentId: 'deploy', description: `Deploy a workflow to make it accessible externally. Workflows can be tested without deploying, but deployment is needed for API access, chat UIs, or MCP exposure. @@ -527,7 +527,7 @@ ALSO CAN: }, }, { - name: 'copilot_test', + name: 'sim_test', agentId: 'test', description: `Run a workflow and verify its outputs. Works on both deployed and undeployed (draft) workflows. Use after building to verify correctness. @@ -550,7 +550,7 @@ Supports full and partial execution: }, }, { - name: 'copilot_debug', + name: 'sim_debug', agentId: 'debug', description: 'Diagnose errors or unexpected workflow behavior. Provide the error message and workflowId. Returns root cause analysis and fix suggestions.', @@ -565,7 +565,7 @@ Supports full and partial execution: }, }, { - name: 'copilot_auth', + name: 'sim_auth', agentId: 'auth', description: 'Check OAuth connection status, list connected services, and initiate new OAuth connections. Use when a workflow needs third-party service access (Google, Slack, GitHub, etc.).', @@ -579,7 +579,7 @@ Supports full and partial execution: }, }, { - name: 'copilot_knowledge', + name: 'sim_knowledge', agentId: 'knowledge', description: 'Manage knowledge bases for RAG-powered document retrieval. Supports listing, creating, updating, and deleting knowledge bases. Knowledge bases can be attached to agent blocks for context-aware responses.', @@ -593,7 +593,7 @@ Supports full and partial execution: }, }, { - name: 'copilot_custom_tool', + name: 'sim_custom_tool', agentId: 'custom_tool', description: 'Manage custom tools (reusable API integrations). Supports listing, creating, updating, and deleting custom tools. Custom tools can be added to agent blocks as callable functions.', @@ -607,7 +607,7 @@ Supports full and partial execution: }, }, { - name: 'copilot_info', + name: 'sim_info', agentId: 'info', description: "Inspect a workflow's blocks, connections, outputs, variables, and metadata. Use for questions about the Sim platform itself — how blocks work, what integrations are available, platform concepts, etc. Always provide workflowId to scope results to a specific workflow.", @@ -622,7 +622,7 @@ Supports full and partial execution: }, }, { - name: 'copilot_workflow', + name: 'sim_workflow', agentId: 'workflow', description: 'Manage workflow-level configuration: environment variables, settings, scheduling, and deployment status. Use for any data about a specific workflow — its settings, credentials, variables, or deployment state.', @@ -637,10 +637,10 @@ Supports full and partial execution: }, }, { - name: 'copilot_research', + name: 'sim_research', agentId: 'research', description: - 'Research external APIs and documentation. Use when you need to understand third-party services, external APIs, authentication flows, or data formats OUTSIDE of Sim. For questions about Sim itself, use copilot_info instead.', + 'Research external APIs and documentation. Use when you need to understand third-party services, external APIs, authentication flows, or data formats OUTSIDE of Sim. For questions about Sim itself, use sim_info instead.', inputSchema: { type: 'object', properties: { @@ -651,7 +651,7 @@ Supports full and partial execution: }, }, { - name: 'copilot_superagent', + name: 'sim_superagent', agentId: 'superagent', description: 'Execute direct actions NOW: send an email, post to Slack, make an API call, etc. Use when the user wants to DO something immediately rather than build a workflow for it.', @@ -665,7 +665,7 @@ Supports full and partial execution: }, }, { - name: 'copilot_platform', + name: 'sim_platform', agentId: 'tour', description: 'Get help with Sim platform navigation, keyboard shortcuts, and UI actions. Use when the user asks "how do I..." about the Sim editor, wants keyboard shortcuts, or needs to know what actions are available in the UI.', From 4d4d00252cb5d8738b836949814c6e4baa59fc13 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Fri, 6 Feb 2026 17:31:52 -0800 Subject: [PATCH 43/72] Add opus 4.6 --- apps/docs/content/docs/en/copilot/index.mdx | 2 +- apps/sim/app/api/copilot/chat/route.ts | 2 +- apps/sim/app/api/copilot/user-models/route.ts | 1 + apps/sim/app/api/mcp/copilot/route.ts | 136 ++++++++++++++---- .../components/user-input/constants.ts | 1 + apps/sim/lib/copilot/config.ts | 4 +- apps/sim/lib/copilot/models.ts | 1 + apps/sim/lib/core/config/feature-flags.ts | 6 +- apps/sim/stores/panel/copilot/store.ts | 2 +- 9 files changed, 119 insertions(+), 36 deletions(-) diff --git a/apps/docs/content/docs/en/copilot/index.mdx b/apps/docs/content/docs/en/copilot/index.mdx index e222d8e552..3bdb0a579f 100644 --- a/apps/docs/content/docs/en/copilot/index.mdx +++ b/apps/docs/content/docs/en/copilot/index.mdx @@ -56,7 +56,7 @@ Switch between modes using the mode selector at the bottom of the input area. Select your preferred AI model using the model selector at the bottom right of the input area. **Available Models:** -- Claude 4.5 Opus, Sonnet (default), Haiku +- Claude 4.6 Opus (default), 4.5 Opus, Sonnet, Haiku - GPT 5.2 Codex, Pro - Gemini 3 Pro diff --git a/apps/sim/app/api/copilot/chat/route.ts b/apps/sim/app/api/copilot/chat/route.ts index dbd97eccf6..248298348c 100644 --- a/apps/sim/app/api/copilot/chat/route.ts +++ b/apps/sim/app/api/copilot/chat/route.ts @@ -43,7 +43,7 @@ const ChatMessageSchema = z.object({ chatId: z.string().optional(), workflowId: z.string().optional(), workflowName: z.string().optional(), - model: z.enum(COPILOT_MODEL_IDS).optional().default('claude-4.5-opus'), + model: z.enum(COPILOT_MODEL_IDS).optional().default('claude-4.6-opus'), mode: z.enum(COPILOT_REQUEST_MODES).optional().default('agent'), prefetch: z.boolean().optional(), createNewChat: z.boolean().optional().default(false), diff --git a/apps/sim/app/api/copilot/user-models/route.ts b/apps/sim/app/api/copilot/user-models/route.ts index ead14a5e9d..86e31c747f 100644 --- a/apps/sim/app/api/copilot/user-models/route.ts +++ b/apps/sim/app/api/copilot/user-models/route.ts @@ -28,6 +28,7 @@ const DEFAULT_ENABLED_MODELS: Record = { 'claude-4-sonnet': false, 'claude-4.5-haiku': true, 'claude-4.5-sonnet': true, + 'claude-4.6-opus': true, 'claude-4.5-opus': true, 'claude-4.1-opus': false, 'gemini-3-pro': true, diff --git a/apps/sim/app/api/mcp/copilot/route.ts b/apps/sim/app/api/mcp/copilot/route.ts index 67f985942c..7692ef5327 100644 --- a/apps/sim/app/api/mcp/copilot/route.ts +++ b/apps/sim/app/api/mcp/copilot/route.ts @@ -16,10 +16,11 @@ import { createLogger } from '@sim/logger' import { randomUUID } from 'node:crypto' import { eq, sql } from 'drizzle-orm' import { type NextRequest, NextResponse } from 'next/server' -import { authenticateApiKeyFromHeader, updateApiKeyLastUsed } from '@/lib/api-key/service' -import { checkServerSideUsageLimits } from '@/lib/billing/calculations/usage-monitor' +import { getHighestPrioritySubscription } from '@/lib/billing/core/subscription' import { getCopilotModel } from '@/lib/copilot/config' -import { SIM_AGENT_VERSION } from '@/lib/copilot/constants' +import { SIM_AGENT_API_URL, SIM_AGENT_VERSION } from '@/lib/copilot/constants' +import { RateLimiter } from '@/lib/core/rate-limiter' +import { env } from '@/lib/core/config/env' import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator' import { orchestrateSubagentStream } from '@/lib/copilot/orchestrator/subagent' import { @@ -30,10 +31,78 @@ import { DIRECT_TOOL_DEFS, SUBAGENT_TOOL_DEFS } from '@/lib/copilot/tools/mcp/de import { resolveWorkflowIdForUser } from '@/lib/workflows/utils' const logger = createLogger('CopilotMcpAPI') +const mcpRateLimiter = new RateLimiter() export const dynamic = 'force-dynamic' export const runtime = 'nodejs' +interface CopilotKeyAuthResult { + success: boolean + userId?: string + error?: string +} + +/** + * Validates a copilot API key by forwarding it to the Go copilot service's + * `/api/validate-key` endpoint. Returns the associated userId on success. + */ +async function authenticateCopilotApiKey(apiKey: string): Promise { + try { + const internalSecret = env.INTERNAL_API_SECRET + if (!internalSecret) { + logger.error('INTERNAL_API_SECRET not configured') + return { success: false, error: 'Server configuration error' } + } + + const res = await fetch(`${SIM_AGENT_API_URL}/api/validate-key`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'x-api-key': internalSecret, + }, + body: JSON.stringify({ targetApiKey: apiKey }), + signal: AbortSignal.timeout(10_000), + }) + + if (!res.ok) { + const body = await res.json().catch(() => null) + const upstream = (body as Record)?.message + const status = res.status + + if (status === 401 || status === 403) { + return { + success: false, + error: `Invalid Copilot API key. Generate a new key in Settings → Copilot and set it in the x-api-key header.`, + } + } + if (status === 402) { + return { + success: false, + error: `Usage limit exceeded for this Copilot API key. Upgrade your plan or wait for your quota to reset.`, + } + } + + return { success: false, error: String(upstream ?? 'Copilot API key validation failed') } + } + + const data = (await res.json()) as { ok?: boolean; userId?: string } + if (!data.ok || !data.userId) { + return { + success: false, + error: 'Invalid Copilot API key. Generate a new key in Settings → Copilot.', + } + } + + return { success: true, userId: data.userId } + } catch (error) { + logger.error('Copilot API key validation failed', { error }) + return { + success: false, + error: 'Could not validate Copilot API key — the authentication service is temporarily unreachable. This is NOT a problem with the API key itself; please retry shortly.', + } + } +} + /** * MCP Server instructions that guide LLMs on how to use the Sim copilot tools. * This is included in the initialize response to help external LLMs understand @@ -326,37 +395,48 @@ function buildMcpServer(): Server { const apiKeyHeader = readHeader(headers, 'x-api-key') if (!apiKeyHeader) { - throw new McpError( - -32000, - 'API key required. Set the x-api-key header with a valid Sim API key.' - ) + return { + content: [ + { + type: 'text' as const, + text: 'AUTHENTICATION ERROR: No Copilot API key provided. The user must set their Copilot API key in the x-api-key header. They can generate one in the Sim app under Settings → Copilot. Do NOT retry — this will fail until the key is configured.', + }, + ], + isError: true, + } } - const authResult = await authenticateApiKeyFromHeader(apiKeyHeader) + const authResult = await authenticateCopilotApiKey(apiKeyHeader) if (!authResult.success || !authResult.userId) { - logger.warn('MCP auth failed', { - error: authResult.error, - method: request.method, - }) - - throw new McpError(-32000, authResult.error || 'Invalid API key') + logger.warn('MCP copilot key auth failed', { method: request.method }) + return { + content: [ + { + type: 'text' as const, + text: `AUTHENTICATION ERROR: ${authResult.error} Do NOT retry — this will fail until the user fixes their Copilot API key.`, + }, + ], + isError: true, + } } - if (authResult.keyId) { - updateApiKeyLastUsed(authResult.keyId).catch((error) => { - logger.warn('Failed to update API key last-used timestamp', { - keyId: authResult.keyId, - error: error instanceof Error ? error.message : String(error), - }) - }) - } + const rateLimitResult = await mcpRateLimiter.checkRateLimitWithSubscription( + authResult.userId, + await getHighestPrioritySubscription(authResult.userId), + 'api-endpoint', + false + ) - const usageCheck = await checkServerSideUsageLimits(authResult.userId) - if (usageCheck.isExceeded) { - throw new McpError( - -32000, - `Usage limit exceeded: ${usageCheck.message || 'Upgrade your plan.'}` - ) + if (!rateLimitResult.allowed) { + return { + content: [ + { + type: 'text' as const, + text: `RATE LIMIT: Too many requests. Please wait and retry after ${rateLimitResult.resetAt.toISOString()}.`, + }, + ], + isError: true, + } } const params = request.params as { name?: string; arguments?: Record } | undefined diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/user-input/constants.ts b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/user-input/constants.ts index b98af5dd21..faff318f9f 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/user-input/constants.ts +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/user-input/constants.ts @@ -246,6 +246,7 @@ export function getCommandDisplayLabel(commandId: string): string { * Model configuration options */ export const MODEL_OPTIONS = [ + { value: 'claude-4.6-opus', label: 'Claude 4.6 Opus' }, { value: 'claude-4.5-opus', label: 'Claude 4.5 Opus' }, { value: 'claude-4.5-sonnet', label: 'Claude 4.5 Sonnet' }, { value: 'claude-4.5-haiku', label: 'Claude 4.5 Haiku' }, diff --git a/apps/sim/lib/copilot/config.ts b/apps/sim/lib/copilot/config.ts index 5d7be48093..d82a630129 100644 --- a/apps/sim/lib/copilot/config.ts +++ b/apps/sim/lib/copilot/config.ts @@ -109,14 +109,14 @@ function parseBooleanEnv(value: string | undefined): boolean | null { export const DEFAULT_COPILOT_CONFIG: CopilotConfig = { chat: { defaultProvider: 'anthropic', - defaultModel: 'claude-4.5-opus', + defaultModel: 'claude-4.6-opus', temperature: 0.1, maxTokens: 8192, systemPrompt: AGENT_MODE_SYSTEM_PROMPT, }, rag: { defaultProvider: 'anthropic', - defaultModel: 'claude-4.5-opus', + defaultModel: 'claude-4.6-opus', temperature: 0.1, maxTokens: 2000, embeddingModel: 'text-embedding-3-small', diff --git a/apps/sim/lib/copilot/models.ts b/apps/sim/lib/copilot/models.ts index 83a90169be..90d43f1b08 100644 --- a/apps/sim/lib/copilot/models.ts +++ b/apps/sim/lib/copilot/models.ts @@ -18,6 +18,7 @@ export const COPILOT_MODEL_IDS = [ 'claude-4-sonnet', 'claude-4.5-haiku', 'claude-4.5-sonnet', + 'claude-4.6-opus', 'claude-4.5-opus', 'claude-4.1-opus', 'gemini-3-pro', diff --git a/apps/sim/lib/core/config/feature-flags.ts b/apps/sim/lib/core/config/feature-flags.ts index 9f746c5b12..6e65bebd4e 100644 --- a/apps/sim/lib/core/config/feature-flags.ts +++ b/apps/sim/lib/core/config/feature-flags.ts @@ -21,9 +21,9 @@ export const isTest = env.NODE_ENV === 'test' /** * Is this the hosted version of the application */ -export const isHosted = - getEnv('NEXT_PUBLIC_APP_URL') === 'https://www.sim.ai' || - getEnv('NEXT_PUBLIC_APP_URL') === 'https://www.staging.sim.ai' +export const isHosted = true + // getEnv('NEXT_PUBLIC_APP_URL') === 'https://www.sim.ai' || + // getEnv('NEXT_PUBLIC_APP_URL') === 'https://www.staging.sim.ai' /** * Is billing enforcement enabled diff --git a/apps/sim/stores/panel/copilot/store.ts b/apps/sim/stores/panel/copilot/store.ts index c3ade28057..6c4c867a98 100644 --- a/apps/sim/stores/panel/copilot/store.ts +++ b/apps/sim/stores/panel/copilot/store.ts @@ -873,7 +873,7 @@ async function resumeFromLiveStream( // Initial state (subset required for UI/streaming) const initialState = { mode: 'build' as const, - selectedModel: 'claude-4.5-opus' as CopilotStore['selectedModel'], + selectedModel: 'claude-4.6-opus' as CopilotStore['selectedModel'], agentPrefetch: false, enabledModels: null as string[] | null, // Null means not loaded yet, empty array means all disabled isCollapsed: false, From b07b812c546a1f9644ff775d5daa513ef12886da Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Sat, 7 Feb 2026 11:37:26 -0800 Subject: [PATCH 44/72] Fix discovery tool --- apps/sim/app/api/mcp/copilot/route.ts | 34 ++++++++---- .../components/tool-call/tool-call.tsx | 8 +-- .../orchestrator/sse-handlers/handlers.ts | 1 + .../sse-handlers/tool-execution.ts | 25 +++++++-- .../orchestrator/tool-executor/index.ts | 53 ++++++++++++------- apps/sim/lib/copilot/store-utils.ts | 4 ++ .../tools/client/tool-display-registry.ts | 13 +++++ 7 files changed, 99 insertions(+), 39 deletions(-) diff --git a/apps/sim/app/api/mcp/copilot/route.ts b/apps/sim/app/api/mcp/copilot/route.ts index 7692ef5327..a492889944 100644 --- a/apps/sim/app/api/mcp/copilot/route.ts +++ b/apps/sim/app/api/mcp/copilot/route.ts @@ -18,7 +18,11 @@ import { eq, sql } from 'drizzle-orm' import { type NextRequest, NextResponse } from 'next/server' import { getHighestPrioritySubscription } from '@/lib/billing/core/subscription' import { getCopilotModel } from '@/lib/copilot/config' -import { SIM_AGENT_API_URL, SIM_AGENT_VERSION } from '@/lib/copilot/constants' +import { + ORCHESTRATION_TIMEOUT_MS, + SIM_AGENT_API_URL, + SIM_AGENT_VERSION, +} from '@/lib/copilot/constants' import { RateLimiter } from '@/lib/core/rate-limiter' import { env } from '@/lib/core/config/env' import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator' @@ -35,6 +39,7 @@ const mcpRateLimiter = new RateLimiter() export const dynamic = 'force-dynamic' export const runtime = 'nodejs' +export const maxDuration = 300 interface CopilotKeyAuthResult { success: boolean @@ -358,7 +363,7 @@ class NextResponseCapture { } } -function buildMcpServer(): Server { +function buildMcpServer(abortSignal?: AbortSignal): Server { const server = new Server( { name: 'sim-copilot', @@ -449,7 +454,8 @@ function buildMcpServer(): Server { name: params.name, arguments: params.arguments, }, - authResult.userId + authResult.userId, + abortSignal ) trackMcpCopilotCall(authResult.userId) @@ -464,7 +470,7 @@ async function handleMcpRequestWithSdk( request: NextRequest, parsedBody: unknown ): Promise { - const server = buildMcpServer() + const server = buildMcpServer(request.signal) const transport = new StreamableHTTPServerTransport({ sessionIdGenerator: undefined, enableJsonResponse: true, @@ -481,7 +487,10 @@ async function handleMcpRequestWithSdk( try { await transport.handleRequest(requestAdapter as any, responseCapture as any, parsedBody) await responseCapture.waitForHeaders() - await responseCapture.waitForEnd() + // Must exceed the longest possible tool execution (build = 5 min). + // Using ORCHESTRATION_TIMEOUT_MS + 60 s buffer so the orchestrator can + // finish or time-out on its own before the transport is torn down. + await responseCapture.waitForEnd(ORCHESTRATION_TIMEOUT_MS + 60_000) return responseCapture.toNextResponse() } finally { await server.close().catch(() => {}) @@ -540,7 +549,8 @@ function trackMcpCopilotCall(userId: string): void { async function handleToolsCall( params: { name: string; arguments?: Record }, - userId: string + userId: string, + abortSignal?: AbortSignal ): Promise { const args = params.arguments || {} @@ -551,7 +561,7 @@ async function handleToolsCall( const subagentTool = SUBAGENT_TOOL_DEFS.find((tool) => tool.name === params.name) if (subagentTool) { - return handleSubagentToolCall(subagentTool, args, userId) + return handleSubagentToolCall(subagentTool, args, userId, abortSignal) } throw new McpError(ErrorCode.MethodNotFound, `Tool not found: ${params.name}`) @@ -606,7 +616,8 @@ async function handleDirectToolCall( */ async function handleBuildToolCall( args: Record, - userId: string + userId: string, + abortSignal?: AbortSignal ): Promise { try { const requestText = (args.request as string) || JSON.stringify(args) @@ -657,6 +668,7 @@ async function handleBuildToolCall( autoExecuteTools: true, timeout: 300000, interactive: false, + abortSignal, }) const responseData = { @@ -687,10 +699,11 @@ async function handleBuildToolCall( async function handleSubagentToolCall( toolDef: (typeof SUBAGENT_TOOL_DEFS)[number], args: Record, - userId: string + userId: string, + abortSignal?: AbortSignal ): Promise { if (toolDef.agentId === 'build') { - return handleBuildToolCall(args, userId) + return handleBuildToolCall(args, userId, abortSignal) } try { @@ -722,6 +735,7 @@ async function handleSubagentToolCall( userId, workflowId: args.workflowId as string | undefined, workspaceId: args.workspaceId as string | undefined, + abortSignal, } ) diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx index 2d644c91e6..0791b4a03a 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx @@ -1444,13 +1444,7 @@ export function ToolCall({ toolCall.name === 'mark_todo_in_progress' || toolCall.name === 'tool_search_tool_regex' || toolCall.name === 'user_memory' || - toolCall.name === 'edit_respond' || - toolCall.name === 'debug_respond' || - toolCall.name === 'plan_respond' || - toolCall.name === 'research_respond' || - toolCall.name === 'info_respond' || - toolCall.name === 'deploy_respond' || - toolCall.name === 'superagent_respond' + toolCall.name.endsWith('_respond') ) return null diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts index 9a061029e9..373fe00330 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts @@ -109,6 +109,7 @@ export const sseHandlers: Record = { const toolData = getEventData(event) || ({} as Record) const toolCallId = (toolData.id as string | undefined) || event.toolCallId const toolName = (toolData.name as string | undefined) || event.toolName + if (!toolCallId || !toolName) return const args = (toolData.arguments || toolData.input || asRecord(event.data).input) as diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts index 1c707c5708..80c4c60361 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts @@ -62,13 +62,25 @@ export async function executeToolAndReport( markToolResultSeen(toolCall.id) - await markToolComplete( + // Fire-and-forget: notify the copilot backend that the tool completed. + // IMPORTANT: We must NOT await this — the Go backend may block on the + // mark-complete handler until it can write back on the SSE stream, but + // the SSE reader (our for-await loop) is paused while we're in this + // handler. Awaiting here would deadlock: sim waits for Go's response, + // Go waits for sim to drain the SSE stream. + markToolComplete( toolCall.id, toolCall.name, result.success ? 200 : 500, result.error || (result.success ? 'Tool completed' : 'Tool failed'), result.output - ) + ).catch((err) => { + logger.error('markToolComplete fire-and-forget failed', { + toolCallId: toolCall.id, + toolName: toolCall.name, + error: err instanceof Error ? err.message : String(err), + }) + }) const resultEvent: SSEEvent = { type: 'tool_result', @@ -91,7 +103,14 @@ export async function executeToolAndReport( markToolResultSeen(toolCall.id) - await markToolComplete(toolCall.id, toolCall.name, 500, toolCall.error) + // Fire-and-forget (same reasoning as above). + markToolComplete(toolCall.id, toolCall.name, 500, toolCall.error).catch((err) => { + logger.error('markToolComplete fire-and-forget failed', { + toolCallId: toolCall.id, + toolName: toolCall.name, + error: err instanceof Error ? err.message : String(err), + }) + }) const errorEvent: SSEEvent = { type: 'tool_error', diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts index 2bae9f05d3..12dbbf5987 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts @@ -206,6 +206,9 @@ async function executeSimWorkflowTool( return handler(params, context) } +/** Timeout for the mark-complete POST to the copilot backend (30 s). */ +const MARK_COMPLETE_TIMEOUT_MS = 30_000 + /** * Notify the copilot backend that a tool has completed. */ @@ -217,30 +220,42 @@ export async function markToolComplete( data?: unknown ): Promise { try { - const response = await fetch(`${SIM_AGENT_API_URL}/api/tools/mark-complete`, { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - ...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}), - }, - body: JSON.stringify({ - id: toolCallId, - name: toolName, - status, - message, - data, - }), - }) + const controller = new AbortController() + const timeoutId = setTimeout(() => controller.abort(), MARK_COMPLETE_TIMEOUT_MS) - if (!response.ok) { - logger.warn('Mark-complete call failed', { toolCallId, status: response.status }) - return false - } + try { + const response = await fetch(`${SIM_AGENT_API_URL}/api/tools/mark-complete`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + ...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}), + }, + body: JSON.stringify({ + id: toolCallId, + name: toolName, + status, + message, + data, + }), + signal: controller.signal, + }) - return true + if (!response.ok) { + logger.warn('Mark-complete call failed', { toolCallId, toolName, status: response.status }) + return false + } + + return true + } finally { + clearTimeout(timeoutId) + } } catch (error) { + const isTimeout = + error instanceof DOMException && error.name === 'AbortError' logger.error('Mark-complete call failed', { toolCallId, + toolName, + timedOut: isTimeout, error: error instanceof Error ? error.message : String(error), }) return false diff --git a/apps/sim/lib/copilot/store-utils.ts b/apps/sim/lib/copilot/store-utils.ts index cdf864fee8..9a124850d8 100644 --- a/apps/sim/lib/copilot/store-utils.ts +++ b/apps/sim/lib/copilot/store-utils.ts @@ -13,6 +13,9 @@ type StoreSet = ( partial: Partial | ((state: CopilotStore) => Partial) ) => void +/** Respond tools are internal to copilot subagents and should never be shown in the UI */ +const HIDDEN_TOOL_SUFFIX = '_respond' + export function resolveToolDisplay( toolName: string | undefined, state: ClientToolCallState, @@ -20,6 +23,7 @@ export function resolveToolDisplay( params?: Record ): ClientToolDisplay | undefined { if (!toolName) return undefined + if (toolName.endsWith(HIDDEN_TOOL_SUFFIX)) return undefined const entry = TOOL_DISPLAY_REGISTRY[toolName] if (!entry) return humanizedFallback(toolName, state) diff --git a/apps/sim/lib/copilot/tools/client/tool-display-registry.ts b/apps/sim/lib/copilot/tools/client/tool-display-registry.ts index 45eb2f0f5c..b106ea59f1 100644 --- a/apps/sim/lib/copilot/tools/client/tool-display-registry.ts +++ b/apps/sim/lib/copilot/tools/client/tool-display-registry.ts @@ -862,6 +862,18 @@ const META_get_operations_examples: ToolMetadata = { }, } +const META_get_platform_actions: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Viewing platform actions', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Viewing platform actions', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Viewing platform actions', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Viewed platform actions', icon: Navigation }, + [ClientToolCallState.error]: { text: 'Failed to view platform actions', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped platform actions', icon: MinusCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted platform actions', icon: MinusCircle }, + }, +} + const META_get_page_contents: ToolMetadata = { displayNames: { [ClientToolCallState.generating]: { text: 'Getting page contents', icon: Loader2 }, @@ -2259,6 +2271,7 @@ const TOOL_METADATA_BY_ID: Record = { get_examples_rag: META_get_examples_rag, get_operations_examples: META_get_operations_examples, get_page_contents: META_get_page_contents, + get_platform_actions: META_get_platform_actions, get_trigger_blocks: META_get_trigger_blocks, get_trigger_examples: META_get_trigger_examples, get_user_workflow: META_get_user_workflow, From 220a54078cf7876f37dd84b6fed0b8fb5387cbfd Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Sat, 7 Feb 2026 11:58:09 -0800 Subject: [PATCH 45/72] Fix --- apps/sim/app/api/mcp/copilot/route.ts | 21 ++++++++- .../orchestrator/sse-handlers/handlers.ts | 47 +++++++++++++++++-- .../sse-handlers/tool-execution.ts | 46 +++++++++++++++--- .../sim/lib/copilot/orchestrator/sse-utils.ts | 13 ++++- .../lib/copilot/orchestrator/stream-core.ts | 37 ++++++++++++++- .../orchestrator/tool-executor/index.ts | 42 +++++++++++++---- 6 files changed, 181 insertions(+), 25 deletions(-) diff --git a/apps/sim/app/api/mcp/copilot/route.ts b/apps/sim/app/api/mcp/copilot/route.ts index a492889944..8dae511d5c 100644 --- a/apps/sim/app/api/mcp/copilot/route.ts +++ b/apps/sim/app/api/mcp/copilot/route.ts @@ -620,6 +620,11 @@ async function handleBuildToolCall( abortSignal?: AbortSignal ): Promise { try { + logger.info('[MCP-BUILD] handleBuildToolCall ENTER', { + hasAbortSignal: !!abortSignal, + abortSignalAborted: abortSignal?.aborted, + argsKeys: Object.keys(args), + }) const requestText = (args.request as string) || JSON.stringify(args) const { model } = getCopilotModel('chat') const workflowId = args.workflowId as string | undefined @@ -661,6 +666,12 @@ async function handleBuildToolCall( source: 'mcp', } + logger.info('[MCP-BUILD] Calling orchestrateCopilotStream', { + workflowId: resolved.workflowId, + chatId, + hasAbortSignal: !!abortSignal, + }) + const result = await orchestrateCopilotStream(requestPayload, { userId, workflowId: resolved.workflowId, @@ -671,6 +682,14 @@ async function handleBuildToolCall( abortSignal, }) + logger.info('[MCP-BUILD] orchestrateCopilotStream returned', { + success: result.success, + contentLength: result.content?.length, + toolCallCount: result.toolCalls?.length, + error: result.error, + errors: result.errors, + }) + const responseData = { success: result.success, content: result.content, @@ -683,7 +702,7 @@ async function handleBuildToolCall( isError: !result.success, } } catch (error) { - logger.error('Build tool call failed', { error }) + logger.error('[MCP-BUILD] Build tool call THREW', { error }) return { content: [ { diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts index 373fe00330..612eb6b298 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts @@ -110,7 +110,12 @@ export const sseHandlers: Record = { const toolCallId = (toolData.id as string | undefined) || event.toolCallId const toolName = (toolData.name as string | undefined) || event.toolName - if (!toolCallId || !toolName) return + logger.info('[TOOL_CALL] Received', { toolCallId, toolName, hasToolData: !!toolData }) + + if (!toolCallId || !toolName) { + logger.warn('[TOOL_CALL] Missing toolCallId or toolName, returning early', { toolCallId, toolName }) + return + } const args = (toolData.arguments || toolData.input || asRecord(event.data).input) as | Record @@ -118,12 +123,23 @@ export const sseHandlers: Record = { const isPartial = toolData.partial === true const existing = context.toolCalls.get(toolCallId) + logger.info('[TOOL_CALL] State check', { + toolCallId, + toolName, + isPartial, + hasExisting: !!existing, + existingStatus: existing?.status, + existingEndTime: existing?.endTime, + hasArgs: !!args, + }) + // If we've already completed this tool call, ignore late/duplicate tool_call events // to avoid resetting UI/state back to pending and re-executing. if ( existing?.endTime || (existing && existing.status !== 'pending' && existing.status !== 'executing') ) { + logger.info('[TOOL_CALL] Already completed, skipping', { toolCallId, toolName, status: existing?.status }) if (!existing.params && args) { existing.params = args } @@ -144,20 +160,31 @@ export const sseHandlers: Record = { addContentBlock(context, { type: 'tool_call', toolCall: created }) } - if (isPartial) return - if (wasToolResultSeen(toolCallId)) return + if (isPartial) { + logger.info('[TOOL_CALL] Partial event, returning early', { toolCallId, toolName }) + return + } + if (wasToolResultSeen(toolCallId)) { + logger.info('[TOOL_CALL] Result already seen (dedup), returning early', { toolCallId, toolName }) + return + } const toolCall = context.toolCalls.get(toolCallId) - if (!toolCall) return + if (!toolCall) { + logger.warn('[TOOL_CALL] toolCall not found in map after set, returning early', { toolCallId }) + return + } // Subagent tools are executed by the copilot backend, not sim side. if (SUBAGENT_TOOL_SET.has(toolName)) { + logger.info('[TOOL_CALL] Subagent tool, skipping local execution', { toolCallId, toolName }) return } // Respond tools are internal to copilot's subagent system - skip execution. // The copilot backend handles these internally to signal subagent completion. if (RESPOND_TOOL_SET.has(toolName)) { + logger.info('[TOOL_CALL] Respond tool, skipping', { toolCallId, toolName }) toolCall.status = 'success' toolCall.endTime = Date.now() toolCall.result = { @@ -170,6 +197,14 @@ export const sseHandlers: Record = { const isInterruptTool = isInterruptToolName(toolName) const isInteractive = options.interactive === true + logger.info('[TOOL_CALL] Pre-execute check', { + toolCallId, + toolName, + isInterruptTool, + isInteractive, + autoExecuteTools: options.autoExecuteTools, + }) + if (isInterruptTool && isInteractive) { const decision = await waitForToolDecision( toolCallId, @@ -231,7 +266,11 @@ export const sseHandlers: Record = { } if (options.autoExecuteTools !== false) { + logger.info('[TOOL_CALL] Calling executeToolAndReport', { toolCallId, toolName }) await executeToolAndReport(toolCallId, context, execContext, options) + logger.info('[TOOL_CALL] executeToolAndReport returned', { toolCallId, toolName }) + } else { + logger.info('[TOOL_CALL] autoExecuteTools is false, skipping execution', { toolCallId, toolName }) } }, reasoning: (event, context) => { diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts index 80c4c60361..aef6c86012 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts @@ -32,14 +32,31 @@ export async function executeToolAndReport( options?: OrchestratorOptions ): Promise { const toolCall = context.toolCalls.get(toolCallId) - if (!toolCall) return + if (!toolCall) { + logger.warn('[EXEC] toolCall not found in context map', { toolCallId }) + return + } - if (toolCall.status === 'executing') return - if (wasToolResultSeen(toolCall.id)) return + if (toolCall.status === 'executing') { + logger.warn('[EXEC] toolCall already executing, skipping', { toolCallId, toolName: toolCall.name }) + return + } + if (wasToolResultSeen(toolCall.id)) { + logger.warn('[EXEC] toolCall result already seen (dedup), skipping', { toolCallId, toolName: toolCall.name }) + return + } + logger.info('[EXEC] Starting tool execution', { toolCallId, toolName: toolCall.name, params: toolCall.params ? Object.keys(toolCall.params) : [] }) toolCall.status = 'executing' try { const result = await executeToolServerSide(toolCall, execContext) + logger.info('[EXEC] executeToolServerSide returned', { + toolCallId, + toolName: toolCall.name, + success: result.success, + hasOutput: !!result.output, + error: result.error, + }) toolCall.status = result.success ? 'success' : 'error' toolCall.result = result toolCall.error = result.error @@ -68,14 +85,21 @@ export async function executeToolAndReport( // the SSE reader (our for-await loop) is paused while we're in this // handler. Awaiting here would deadlock: sim waits for Go's response, // Go waits for sim to drain the SSE stream. + logger.info('[EXEC] Firing markToolComplete (fire-and-forget)', { + toolCallId: toolCall.id, + toolName: toolCall.name, + status: result.success ? 200 : 500, + }) markToolComplete( toolCall.id, toolCall.name, result.success ? 200 : 500, result.error || (result.success ? 'Tool completed' : 'Tool failed'), result.output - ).catch((err) => { - logger.error('markToolComplete fire-and-forget failed', { + ).then((ok) => { + logger.info('[EXEC] markToolComplete resolved', { toolCallId: toolCall.id, toolName: toolCall.name, ok }) + }).catch((err) => { + logger.error('[EXEC] markToolComplete fire-and-forget FAILED', { toolCallId: toolCall.id, toolName: toolCall.name, error: err instanceof Error ? err.message : String(err), @@ -96,7 +120,13 @@ export async function executeToolAndReport( }, } await options?.onEvent?.(resultEvent) + logger.info('[EXEC] executeToolAndReport complete', { toolCallId, toolName: toolCall.name }) } catch (error) { + logger.error('[EXEC] executeToolAndReport CAUGHT ERROR', { + toolCallId, + toolName: toolCall.name, + error: error instanceof Error ? error.message : String(error), + }) toolCall.status = 'error' toolCall.error = error instanceof Error ? error.message : String(error) toolCall.endTime = Date.now() @@ -104,8 +134,10 @@ export async function executeToolAndReport( markToolResultSeen(toolCall.id) // Fire-and-forget (same reasoning as above). - markToolComplete(toolCall.id, toolCall.name, 500, toolCall.error).catch((err) => { - logger.error('markToolComplete fire-and-forget failed', { + markToolComplete(toolCall.id, toolCall.name, 500, toolCall.error).then((ok) => { + logger.info('[EXEC] markToolComplete (error path) resolved', { toolCallId: toolCall.id, ok }) + }).catch((err) => { + logger.error('[EXEC] markToolComplete (error path) FAILED', { toolCallId: toolCall.id, toolName: toolCall.name, error: err instanceof Error ? err.message : String(err), diff --git a/apps/sim/lib/copilot/orchestrator/sse-utils.ts b/apps/sim/lib/copilot/orchestrator/sse-utils.ts index afcbf21115..832823d08e 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-utils.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-utils.ts @@ -1,6 +1,9 @@ +import { createLogger } from '@sim/logger' import { STREAM_BUFFER_MAX_DEDUP_ENTRIES } from '@/lib/copilot/constants' import type { SSEEvent } from '@/lib/copilot/orchestrator/types' +const logger = createLogger('CopilotSseUtils') + type EventDataObject = Record | undefined /** Safely cast event.data to a record for property access. */ @@ -107,7 +110,10 @@ export function shouldSkipToolCallEvent(event: SSEEvent): boolean { if (!toolCallId) return false const eventData = getEventData(event) if (eventData?.partial === true) return false - if (wasToolResultSeen(toolCallId) || wasToolCallSeen(toolCallId)) { + const resultSeen = wasToolResultSeen(toolCallId) + const callSeen = wasToolCallSeen(toolCallId) + if (resultSeen || callSeen) { + logger.info('[DEDUP] Skipping tool_call event', { toolCallId, resultSeen, callSeen, seenToolCallsSize: seenToolCalls.size, seenToolResultsSize: seenToolResults.size }) return true } markToolCallSeen(toolCallId) @@ -118,7 +124,10 @@ export function shouldSkipToolResultEvent(event: SSEEvent): boolean { if (event.type !== 'tool_result') return false const toolCallId = getToolCallIdFromEvent(event) if (!toolCallId) return false - if (wasToolResultSeen(toolCallId)) return true + if (wasToolResultSeen(toolCallId)) { + logger.info('[DEDUP] Skipping tool_result event', { toolCallId, seenToolResultsSize: seenToolResults.size }) + return true + } markToolResultSeen(toolCallId) return false } diff --git a/apps/sim/lib/copilot/orchestrator/stream-core.ts b/apps/sim/lib/copilot/orchestrator/stream-core.ts index e1dc2e2fc3..25034ef36a 100644 --- a/apps/sim/lib/copilot/orchestrator/stream-core.ts +++ b/apps/sim/lib/copilot/orchestrator/stream-core.ts @@ -89,8 +89,15 @@ export async function runStreamLoop( const reader = response.body.getReader() const decoder = new TextDecoder() + let eventCount = 0 + + logger.info('[STREAM] SSE stream connected, starting event loop', { + timeout, + hasAbortSignal: !!abortSignal, + }) const timeoutId = setTimeout(() => { + logger.warn('[STREAM] Timeout fired, cancelling reader', { timeout, eventCount }) context.errors.push('Request timed out') context.streamComplete = true reader.cancel().catch(() => {}) @@ -98,17 +105,37 @@ export async function runStreamLoop( try { for await (const event of parseSSEStream(reader, decoder, abortSignal)) { + eventCount++ + if (abortSignal?.aborted) { + logger.warn('[STREAM] AbortSignal aborted, breaking', { eventCount }) context.wasAborted = true break } const normalizedEvent = normalizeSseEvent(event) + logger.info('[STREAM] Event received', { + eventNum: eventCount, + type: normalizedEvent.type, + toolCallId: normalizedEvent.toolCallId, + toolName: normalizedEvent.toolName, + hasSubagent: !!normalizedEvent.subagent, + }) + // Skip duplicate tool events. const shouldSkipToolCall = shouldSkipToolCallEvent(normalizedEvent) const shouldSkipToolResult = shouldSkipToolResultEvent(normalizedEvent) + if (shouldSkipToolCall || shouldSkipToolResult) { + logger.info('[STREAM] Skipping duplicate event', { + type: normalizedEvent.type, + toolCallId: normalizedEvent.toolCallId, + skipToolCall: shouldSkipToolCall, + skipToolResult: shouldSkipToolResult, + }) + } + if (!shouldSkipToolCall && !shouldSkipToolResult) { try { await options.onEvent?.(normalizedEvent) @@ -156,10 +183,18 @@ export async function runStreamLoop( // Main event handler dispatch. const handler = sseHandlers[normalizedEvent.type] if (handler) { + logger.info('[STREAM] Dispatching to handler', { type: normalizedEvent.type, toolCallId: normalizedEvent.toolCallId }) await handler(normalizedEvent, context, execContext, options) + logger.info('[STREAM] Handler returned', { type: normalizedEvent.type, toolCallId: normalizedEvent.toolCallId, streamComplete: context.streamComplete }) + } else { + logger.info('[STREAM] No handler for event type', { type: normalizedEvent.type }) + } + if (context.streamComplete) { + logger.info('[STREAM] Stream marked complete, breaking', { eventCount, errors: context.errors }) + break } - if (context.streamComplete) break } + logger.info('[STREAM] Event loop ended', { eventCount, streamComplete: context.streamComplete, wasAborted: context.wasAborted, errors: context.errors }) } finally { clearTimeout(timeoutId) } diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts index 12dbbf5987..f4bcf4e9d4 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts @@ -219,29 +219,50 @@ export async function markToolComplete( message?: unknown, data?: unknown ): Promise { + const url = `${SIM_AGENT_API_URL}/api/tools/mark-complete` + logger.info('[MARK-COMPLETE] Starting', { + toolCallId, + toolName, + status, + url, + hasData: !!data, + hasCopilotApiKey: !!env.COPILOT_API_KEY, + }) + try { const controller = new AbortController() const timeoutId = setTimeout(() => controller.abort(), MARK_COMPLETE_TIMEOUT_MS) try { - const response = await fetch(`${SIM_AGENT_API_URL}/api/tools/mark-complete`, { + const body = JSON.stringify({ + id: toolCallId, + name: toolName, + status, + message, + data, + }) + logger.info('[MARK-COMPLETE] Sending POST', { toolCallId, toolName, bodyLength: body.length }) + + const response = await fetch(url, { method: 'POST', headers: { 'Content-Type': 'application/json', ...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}), }, - body: JSON.stringify({ - id: toolCallId, - name: toolName, - status, - message, - data, - }), + body, signal: controller.signal, }) + logger.info('[MARK-COMPLETE] Response received', { + toolCallId, + toolName, + httpStatus: response.status, + ok: response.ok, + }) + if (!response.ok) { - logger.warn('Mark-complete call failed', { toolCallId, toolName, status: response.status }) + const responseText = await response.text().catch(() => '') + logger.warn('[MARK-COMPLETE] Non-OK response', { toolCallId, toolName, httpStatus: response.status, responseText }) return false } @@ -252,11 +273,12 @@ export async function markToolComplete( } catch (error) { const isTimeout = error instanceof DOMException && error.name === 'AbortError' - logger.error('Mark-complete call failed', { + logger.error('[MARK-COMPLETE] FAILED', { toolCallId, toolName, timedOut: isTimeout, error: error instanceof Error ? error.message : String(error), + errorName: error instanceof Error ? error.name : undefined, }) return false } From ebf4e90e2b5ed6c2e950baba6297a1a69f481359 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Sat, 7 Feb 2026 12:01:50 -0800 Subject: [PATCH 46/72] Remove logs --- apps/sim/app/api/mcp/copilot/route.ts | 21 +------- .../orchestrator/sse-handlers/handlers.ts | 48 ++----------------- .../sse-handlers/tool-execution.ts | 46 +++--------------- .../sim/lib/copilot/orchestrator/sse-utils.ts | 13 +---- .../lib/copilot/orchestrator/stream-core.ts | 37 +------------- .../orchestrator/tool-executor/index.ts | 42 ++++------------ 6 files changed, 25 insertions(+), 182 deletions(-) diff --git a/apps/sim/app/api/mcp/copilot/route.ts b/apps/sim/app/api/mcp/copilot/route.ts index 8dae511d5c..a492889944 100644 --- a/apps/sim/app/api/mcp/copilot/route.ts +++ b/apps/sim/app/api/mcp/copilot/route.ts @@ -620,11 +620,6 @@ async function handleBuildToolCall( abortSignal?: AbortSignal ): Promise { try { - logger.info('[MCP-BUILD] handleBuildToolCall ENTER', { - hasAbortSignal: !!abortSignal, - abortSignalAborted: abortSignal?.aborted, - argsKeys: Object.keys(args), - }) const requestText = (args.request as string) || JSON.stringify(args) const { model } = getCopilotModel('chat') const workflowId = args.workflowId as string | undefined @@ -666,12 +661,6 @@ async function handleBuildToolCall( source: 'mcp', } - logger.info('[MCP-BUILD] Calling orchestrateCopilotStream', { - workflowId: resolved.workflowId, - chatId, - hasAbortSignal: !!abortSignal, - }) - const result = await orchestrateCopilotStream(requestPayload, { userId, workflowId: resolved.workflowId, @@ -682,14 +671,6 @@ async function handleBuildToolCall( abortSignal, }) - logger.info('[MCP-BUILD] orchestrateCopilotStream returned', { - success: result.success, - contentLength: result.content?.length, - toolCallCount: result.toolCalls?.length, - error: result.error, - errors: result.errors, - }) - const responseData = { success: result.success, content: result.content, @@ -702,7 +683,7 @@ async function handleBuildToolCall( isError: !result.success, } } catch (error) { - logger.error('[MCP-BUILD] Build tool call THREW', { error }) + logger.error('Build tool call failed', { error }) return { content: [ { diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts index 612eb6b298..9a061029e9 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts @@ -109,13 +109,7 @@ export const sseHandlers: Record = { const toolData = getEventData(event) || ({} as Record) const toolCallId = (toolData.id as string | undefined) || event.toolCallId const toolName = (toolData.name as string | undefined) || event.toolName - - logger.info('[TOOL_CALL] Received', { toolCallId, toolName, hasToolData: !!toolData }) - - if (!toolCallId || !toolName) { - logger.warn('[TOOL_CALL] Missing toolCallId or toolName, returning early', { toolCallId, toolName }) - return - } + if (!toolCallId || !toolName) return const args = (toolData.arguments || toolData.input || asRecord(event.data).input) as | Record @@ -123,23 +117,12 @@ export const sseHandlers: Record = { const isPartial = toolData.partial === true const existing = context.toolCalls.get(toolCallId) - logger.info('[TOOL_CALL] State check', { - toolCallId, - toolName, - isPartial, - hasExisting: !!existing, - existingStatus: existing?.status, - existingEndTime: existing?.endTime, - hasArgs: !!args, - }) - // If we've already completed this tool call, ignore late/duplicate tool_call events // to avoid resetting UI/state back to pending and re-executing. if ( existing?.endTime || (existing && existing.status !== 'pending' && existing.status !== 'executing') ) { - logger.info('[TOOL_CALL] Already completed, skipping', { toolCallId, toolName, status: existing?.status }) if (!existing.params && args) { existing.params = args } @@ -160,31 +143,20 @@ export const sseHandlers: Record = { addContentBlock(context, { type: 'tool_call', toolCall: created }) } - if (isPartial) { - logger.info('[TOOL_CALL] Partial event, returning early', { toolCallId, toolName }) - return - } - if (wasToolResultSeen(toolCallId)) { - logger.info('[TOOL_CALL] Result already seen (dedup), returning early', { toolCallId, toolName }) - return - } + if (isPartial) return + if (wasToolResultSeen(toolCallId)) return const toolCall = context.toolCalls.get(toolCallId) - if (!toolCall) { - logger.warn('[TOOL_CALL] toolCall not found in map after set, returning early', { toolCallId }) - return - } + if (!toolCall) return // Subagent tools are executed by the copilot backend, not sim side. if (SUBAGENT_TOOL_SET.has(toolName)) { - logger.info('[TOOL_CALL] Subagent tool, skipping local execution', { toolCallId, toolName }) return } // Respond tools are internal to copilot's subagent system - skip execution. // The copilot backend handles these internally to signal subagent completion. if (RESPOND_TOOL_SET.has(toolName)) { - logger.info('[TOOL_CALL] Respond tool, skipping', { toolCallId, toolName }) toolCall.status = 'success' toolCall.endTime = Date.now() toolCall.result = { @@ -197,14 +169,6 @@ export const sseHandlers: Record = { const isInterruptTool = isInterruptToolName(toolName) const isInteractive = options.interactive === true - logger.info('[TOOL_CALL] Pre-execute check', { - toolCallId, - toolName, - isInterruptTool, - isInteractive, - autoExecuteTools: options.autoExecuteTools, - }) - if (isInterruptTool && isInteractive) { const decision = await waitForToolDecision( toolCallId, @@ -266,11 +230,7 @@ export const sseHandlers: Record = { } if (options.autoExecuteTools !== false) { - logger.info('[TOOL_CALL] Calling executeToolAndReport', { toolCallId, toolName }) await executeToolAndReport(toolCallId, context, execContext, options) - logger.info('[TOOL_CALL] executeToolAndReport returned', { toolCallId, toolName }) - } else { - logger.info('[TOOL_CALL] autoExecuteTools is false, skipping execution', { toolCallId, toolName }) } }, reasoning: (event, context) => { diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts index aef6c86012..80c4c60361 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts @@ -32,31 +32,14 @@ export async function executeToolAndReport( options?: OrchestratorOptions ): Promise { const toolCall = context.toolCalls.get(toolCallId) - if (!toolCall) { - logger.warn('[EXEC] toolCall not found in context map', { toolCallId }) - return - } + if (!toolCall) return - if (toolCall.status === 'executing') { - logger.warn('[EXEC] toolCall already executing, skipping', { toolCallId, toolName: toolCall.name }) - return - } - if (wasToolResultSeen(toolCall.id)) { - logger.warn('[EXEC] toolCall result already seen (dedup), skipping', { toolCallId, toolName: toolCall.name }) - return - } + if (toolCall.status === 'executing') return + if (wasToolResultSeen(toolCall.id)) return - logger.info('[EXEC] Starting tool execution', { toolCallId, toolName: toolCall.name, params: toolCall.params ? Object.keys(toolCall.params) : [] }) toolCall.status = 'executing' try { const result = await executeToolServerSide(toolCall, execContext) - logger.info('[EXEC] executeToolServerSide returned', { - toolCallId, - toolName: toolCall.name, - success: result.success, - hasOutput: !!result.output, - error: result.error, - }) toolCall.status = result.success ? 'success' : 'error' toolCall.result = result toolCall.error = result.error @@ -85,21 +68,14 @@ export async function executeToolAndReport( // the SSE reader (our for-await loop) is paused while we're in this // handler. Awaiting here would deadlock: sim waits for Go's response, // Go waits for sim to drain the SSE stream. - logger.info('[EXEC] Firing markToolComplete (fire-and-forget)', { - toolCallId: toolCall.id, - toolName: toolCall.name, - status: result.success ? 200 : 500, - }) markToolComplete( toolCall.id, toolCall.name, result.success ? 200 : 500, result.error || (result.success ? 'Tool completed' : 'Tool failed'), result.output - ).then((ok) => { - logger.info('[EXEC] markToolComplete resolved', { toolCallId: toolCall.id, toolName: toolCall.name, ok }) - }).catch((err) => { - logger.error('[EXEC] markToolComplete fire-and-forget FAILED', { + ).catch((err) => { + logger.error('markToolComplete fire-and-forget failed', { toolCallId: toolCall.id, toolName: toolCall.name, error: err instanceof Error ? err.message : String(err), @@ -120,13 +96,7 @@ export async function executeToolAndReport( }, } await options?.onEvent?.(resultEvent) - logger.info('[EXEC] executeToolAndReport complete', { toolCallId, toolName: toolCall.name }) } catch (error) { - logger.error('[EXEC] executeToolAndReport CAUGHT ERROR', { - toolCallId, - toolName: toolCall.name, - error: error instanceof Error ? error.message : String(error), - }) toolCall.status = 'error' toolCall.error = error instanceof Error ? error.message : String(error) toolCall.endTime = Date.now() @@ -134,10 +104,8 @@ export async function executeToolAndReport( markToolResultSeen(toolCall.id) // Fire-and-forget (same reasoning as above). - markToolComplete(toolCall.id, toolCall.name, 500, toolCall.error).then((ok) => { - logger.info('[EXEC] markToolComplete (error path) resolved', { toolCallId: toolCall.id, ok }) - }).catch((err) => { - logger.error('[EXEC] markToolComplete (error path) FAILED', { + markToolComplete(toolCall.id, toolCall.name, 500, toolCall.error).catch((err) => { + logger.error('markToolComplete fire-and-forget failed', { toolCallId: toolCall.id, toolName: toolCall.name, error: err instanceof Error ? err.message : String(err), diff --git a/apps/sim/lib/copilot/orchestrator/sse-utils.ts b/apps/sim/lib/copilot/orchestrator/sse-utils.ts index 832823d08e..afcbf21115 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-utils.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-utils.ts @@ -1,9 +1,6 @@ -import { createLogger } from '@sim/logger' import { STREAM_BUFFER_MAX_DEDUP_ENTRIES } from '@/lib/copilot/constants' import type { SSEEvent } from '@/lib/copilot/orchestrator/types' -const logger = createLogger('CopilotSseUtils') - type EventDataObject = Record | undefined /** Safely cast event.data to a record for property access. */ @@ -110,10 +107,7 @@ export function shouldSkipToolCallEvent(event: SSEEvent): boolean { if (!toolCallId) return false const eventData = getEventData(event) if (eventData?.partial === true) return false - const resultSeen = wasToolResultSeen(toolCallId) - const callSeen = wasToolCallSeen(toolCallId) - if (resultSeen || callSeen) { - logger.info('[DEDUP] Skipping tool_call event', { toolCallId, resultSeen, callSeen, seenToolCallsSize: seenToolCalls.size, seenToolResultsSize: seenToolResults.size }) + if (wasToolResultSeen(toolCallId) || wasToolCallSeen(toolCallId)) { return true } markToolCallSeen(toolCallId) @@ -124,10 +118,7 @@ export function shouldSkipToolResultEvent(event: SSEEvent): boolean { if (event.type !== 'tool_result') return false const toolCallId = getToolCallIdFromEvent(event) if (!toolCallId) return false - if (wasToolResultSeen(toolCallId)) { - logger.info('[DEDUP] Skipping tool_result event', { toolCallId, seenToolResultsSize: seenToolResults.size }) - return true - } + if (wasToolResultSeen(toolCallId)) return true markToolResultSeen(toolCallId) return false } diff --git a/apps/sim/lib/copilot/orchestrator/stream-core.ts b/apps/sim/lib/copilot/orchestrator/stream-core.ts index 25034ef36a..e1dc2e2fc3 100644 --- a/apps/sim/lib/copilot/orchestrator/stream-core.ts +++ b/apps/sim/lib/copilot/orchestrator/stream-core.ts @@ -89,15 +89,8 @@ export async function runStreamLoop( const reader = response.body.getReader() const decoder = new TextDecoder() - let eventCount = 0 - - logger.info('[STREAM] SSE stream connected, starting event loop', { - timeout, - hasAbortSignal: !!abortSignal, - }) const timeoutId = setTimeout(() => { - logger.warn('[STREAM] Timeout fired, cancelling reader', { timeout, eventCount }) context.errors.push('Request timed out') context.streamComplete = true reader.cancel().catch(() => {}) @@ -105,37 +98,17 @@ export async function runStreamLoop( try { for await (const event of parseSSEStream(reader, decoder, abortSignal)) { - eventCount++ - if (abortSignal?.aborted) { - logger.warn('[STREAM] AbortSignal aborted, breaking', { eventCount }) context.wasAborted = true break } const normalizedEvent = normalizeSseEvent(event) - logger.info('[STREAM] Event received', { - eventNum: eventCount, - type: normalizedEvent.type, - toolCallId: normalizedEvent.toolCallId, - toolName: normalizedEvent.toolName, - hasSubagent: !!normalizedEvent.subagent, - }) - // Skip duplicate tool events. const shouldSkipToolCall = shouldSkipToolCallEvent(normalizedEvent) const shouldSkipToolResult = shouldSkipToolResultEvent(normalizedEvent) - if (shouldSkipToolCall || shouldSkipToolResult) { - logger.info('[STREAM] Skipping duplicate event', { - type: normalizedEvent.type, - toolCallId: normalizedEvent.toolCallId, - skipToolCall: shouldSkipToolCall, - skipToolResult: shouldSkipToolResult, - }) - } - if (!shouldSkipToolCall && !shouldSkipToolResult) { try { await options.onEvent?.(normalizedEvent) @@ -183,18 +156,10 @@ export async function runStreamLoop( // Main event handler dispatch. const handler = sseHandlers[normalizedEvent.type] if (handler) { - logger.info('[STREAM] Dispatching to handler', { type: normalizedEvent.type, toolCallId: normalizedEvent.toolCallId }) await handler(normalizedEvent, context, execContext, options) - logger.info('[STREAM] Handler returned', { type: normalizedEvent.type, toolCallId: normalizedEvent.toolCallId, streamComplete: context.streamComplete }) - } else { - logger.info('[STREAM] No handler for event type', { type: normalizedEvent.type }) - } - if (context.streamComplete) { - logger.info('[STREAM] Stream marked complete, breaking', { eventCount, errors: context.errors }) - break } + if (context.streamComplete) break } - logger.info('[STREAM] Event loop ended', { eventCount, streamComplete: context.streamComplete, wasAborted: context.wasAborted, errors: context.errors }) } finally { clearTimeout(timeoutId) } diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts index f4bcf4e9d4..12dbbf5987 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts @@ -219,50 +219,29 @@ export async function markToolComplete( message?: unknown, data?: unknown ): Promise { - const url = `${SIM_AGENT_API_URL}/api/tools/mark-complete` - logger.info('[MARK-COMPLETE] Starting', { - toolCallId, - toolName, - status, - url, - hasData: !!data, - hasCopilotApiKey: !!env.COPILOT_API_KEY, - }) - try { const controller = new AbortController() const timeoutId = setTimeout(() => controller.abort(), MARK_COMPLETE_TIMEOUT_MS) try { - const body = JSON.stringify({ - id: toolCallId, - name: toolName, - status, - message, - data, - }) - logger.info('[MARK-COMPLETE] Sending POST', { toolCallId, toolName, bodyLength: body.length }) - - const response = await fetch(url, { + const response = await fetch(`${SIM_AGENT_API_URL}/api/tools/mark-complete`, { method: 'POST', headers: { 'Content-Type': 'application/json', ...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}), }, - body, + body: JSON.stringify({ + id: toolCallId, + name: toolName, + status, + message, + data, + }), signal: controller.signal, }) - logger.info('[MARK-COMPLETE] Response received', { - toolCallId, - toolName, - httpStatus: response.status, - ok: response.ok, - }) - if (!response.ok) { - const responseText = await response.text().catch(() => '') - logger.warn('[MARK-COMPLETE] Non-OK response', { toolCallId, toolName, httpStatus: response.status, responseText }) + logger.warn('Mark-complete call failed', { toolCallId, toolName, status: response.status }) return false } @@ -273,12 +252,11 @@ export async function markToolComplete( } catch (error) { const isTimeout = error instanceof DOMException && error.name === 'AbortError' - logger.error('[MARK-COMPLETE] FAILED', { + logger.error('Mark-complete call failed', { toolCallId, toolName, timedOut: isTimeout, error: error instanceof Error ? error.message : String(error), - errorName: error instanceof Error ? error.name : undefined, }) return false } From 7e592e87ba32533484f9a7f8cf34c3552b7c6d8d Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Sat, 7 Feb 2026 12:18:17 -0800 Subject: [PATCH 47/72] Fix go side tool rendering --- .../lib/copilot/client-sse/subagent-handlers.ts | 13 ++++++++++++- .../copilot/orchestrator/sse-handlers/handlers.ts | 14 +++++++++++++- .../copilot/orchestrator/tool-executor/index.ts | 15 +++++++++++++++ 3 files changed, 40 insertions(+), 2 deletions(-) diff --git a/apps/sim/lib/copilot/client-sse/subagent-handlers.ts b/apps/sim/lib/copilot/client-sse/subagent-handlers.ts index d78360cad4..98abb45e47 100644 --- a/apps/sim/lib/copilot/client-sse/subagent-handlers.ts +++ b/apps/sim/lib/copilot/client-sse/subagent-handlers.ts @@ -227,7 +227,18 @@ export const subAgentSSEHandlers: Record = { const resultData = asRecord(data?.data) const toolCallId: string | undefined = data?.toolCallId || (resultData.id as string | undefined) - const success: boolean | undefined = data?.success !== false + // Determine success: explicit `success` field takes priority; otherwise + // infer from presence of result data vs error (same logic as server-side + // inferToolSuccess). The Go backend uses `*bool` with omitempty so + // `success` is present when explicitly set, and absent for non-tool events. + const hasExplicitSuccess = + data?.success !== undefined || resultData.success !== undefined + const explicitSuccess = data?.success ?? resultData.success + const hasResultData = data?.result !== undefined || resultData.result !== undefined + const hasError = !!data?.error || !!resultData.error + const success: boolean = hasExplicitSuccess + ? !!explicitSuccess + : hasResultData && !hasError if (!toolCallId) return if (!context.subAgentToolCalls[parentToolCallId]) return diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts index 9a061029e9..8b5025897c 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts @@ -7,7 +7,10 @@ import { markToolResultSeen, wasToolResultSeen, } from '@/lib/copilot/orchestrator/sse-utils' -import { markToolComplete } from '@/lib/copilot/orchestrator/tool-executor' +import { + isToolAvailableOnSimSide, + markToolComplete, +} from '@/lib/copilot/orchestrator/tool-executor' import type { ContentBlock, ExecutionContext, @@ -360,6 +363,15 @@ export const subAgentHandlers: Record = { return } + // Tools that only exist on the Go backend (e.g. search_patterns, + // search_errors, remember_debug) should NOT be re-executed on the Sim side. + // The Go backend already executed them and will send its own tool_result + // SSE event with the real outcome. Trying to execute them here would fail + // with "Tool not found" and incorrectly mark the tool as failed. + if (!isToolAvailableOnSimSide(toolName)) { + return + } + if (options.autoExecuteTools !== false) { await executeToolAndReport(toolCallId, context, execContext, options) } diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts index 12dbbf5987..b56abd4366 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts @@ -136,6 +136,21 @@ const SIM_WORKFLOW_TOOL_HANDLERS: Record< executeCreateWorkspaceMcpServer(p as CreateWorkspaceMcpServerParams, c), } +/** + * Check whether a tool can be executed on the Sim (TypeScript) side. + * + * Tools that are only available on the Go backend (e.g. search_patterns, + * search_errors, remember_debug) will return false. The subagent tool_call + * handler uses this to decide whether to execute a tool locally or let the + * Go backend's own tool_result SSE event handle it. + */ +export function isToolAvailableOnSimSide(toolName: string): boolean { + if (SERVER_TOOLS.has(toolName)) return true + if (toolName in SIM_WORKFLOW_TOOL_HANDLERS) return true + const resolvedToolName = resolveToolId(toolName) + return !!getTool(resolvedToolName) +} + /** * Execute a tool server-side without calling internal routes. */ From 25d255a6b2a67fdc966f23b1e4d8ca0203393644 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Mon, 9 Feb 2026 10:15:24 -0800 Subject: [PATCH 48/72] Update docs --- apps/docs/content/docs/en/copilot/index.mdx | 96 +++++++++++++++++++++ 1 file changed, 96 insertions(+) diff --git a/apps/docs/content/docs/en/copilot/index.mdx b/apps/docs/content/docs/en/copilot/index.mdx index 3bdb0a579f..9b5500aacc 100644 --- a/apps/docs/content/docs/en/copilot/index.mdx +++ b/apps/docs/content/docs/en/copilot/index.mdx @@ -190,3 +190,99 @@ Copilot usage is billed per token from the underlying LLM. If you reach your usa See the [Cost Calculation page](/execution/costs) for billing details. +## Copilot MCP + +You can use Copilot as an MCP server in your favorite editor or AI client. This lets you build, test, deploy, and manage Sim workflows directly from tools like Cursor, Claude Code, Claude Desktop, and VS Code. + +### Generating a Copilot API Key + +To connect to the Copilot MCP server, you need a **Copilot API key**: + +1. Go to [sim.ai](https://sim.ai) and sign in +2. Navigate to **Settings** → **Copilot** +3. Click **Generate API Key** +4. Copy the key — it is only shown once + +The key will look like `sk-sim-copilot-...`. You will use this in the configuration below. + +### Cursor + +Add the following to your `.cursor/mcp.json` (project-level) or global Cursor MCP settings: + +```json +{ + "mcpServers": { + "sim-copilot": { + "url": "https://www.sim.ai/api/mcp/copilot", + "headers": { + "X-API-Key": "YOUR_COPILOT_API_KEY" + } + } + } +} +``` + +Replace `YOUR_COPILOT_API_KEY` with the key you generated above. + +### Claude Code + +Run the following command to add the Copilot MCP server: + +```bash +claude mcp add sim-copilot \ + --transport http \ + https://www.sim.ai/api/mcp/copilot \ + --header "X-API-Key: YOUR_COPILOT_API_KEY" +``` + +Replace `YOUR_COPILOT_API_KEY` with your key. + +### Claude Desktop + +Claude Desktop requires [`mcp-remote`](https://www.npmjs.com/package/mcp-remote) to connect to HTTP-based MCP servers. Add the following to your Claude Desktop config file (`~/Library/Application Support/Claude/claude_desktop_config.json` on macOS): + +```json +{ + "mcpServers": { + "sim-copilot": { + "command": "npx", + "args": [ + "-y", + "mcp-remote", + "https://www.sim.ai/api/mcp/copilot", + "--header", + "X-API-Key: YOUR_COPILOT_API_KEY" + ] + } + } +} +``` + +Replace `YOUR_COPILOT_API_KEY` with your key. + +### VS Code + +Add the following to your VS Code `settings.json` or workspace `.vscode/settings.json`: + +```json +{ + "mcp": { + "servers": { + "sim-copilot": { + "type": "http", + "url": "https://www.sim.ai/api/mcp/copilot", + "headers": { + "X-API-Key": "YOUR_COPILOT_API_KEY" + } + } + } + } +} +``` + +Replace `YOUR_COPILOT_API_KEY` with your key. + + + For self-hosted deployments, replace `https://www.sim.ai` with your self-hosted Sim URL. + + From b4361b8585590ba9b38af4b88e5fe19297fa66e8 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Mon, 9 Feb 2026 10:30:02 -0800 Subject: [PATCH 49/72] Fix hydration --- apps/sim/lib/copilot/client-sse/handlers.ts | 98 +++++++++++++++++++++ 1 file changed, 98 insertions(+) diff --git a/apps/sim/lib/copilot/client-sse/handlers.ts b/apps/sim/lib/copilot/client-sse/handlers.ts index 27e84972c1..ddfa69fb30 100644 --- a/apps/sim/lib/copilot/client-sse/handlers.ts +++ b/apps/sim/lib/copilot/client-sse/handlers.ts @@ -10,7 +10,10 @@ import { } from '@/lib/copilot/store-utils' import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry' import type { CopilotStore, CopilotStreamInfo, CopilotToolCall } from '@/stores/panel/copilot/types' +import { useEnvironmentStore } from '@/stores/settings/environment/store' +import { useVariablesStore } from '@/stores/panel/variables/store' import { useWorkflowDiffStore } from '@/stores/workflow-diff/store' +import { useWorkflowRegistry } from '@/stores/workflows/registry/store' import type { WorkflowState } from '@/stores/workflows/workflow/types' import { appendTextBlock, beginThinkingBlock, finalizeThinkingBlock } from './content-blocks' import type { ClientContentBlock, ClientStreamingContext } from './types' @@ -318,6 +321,76 @@ export const sseHandlers: Record = { }) } } + + // Deploy tools: update deployment status in workflow registry + if ( + targetState === ClientToolCallState.success && + (current.name === 'deploy_api' || + current.name === 'deploy_chat' || + current.name === 'deploy_mcp' || + current.name === 'redeploy') + ) { + try { + const resultPayload = asRecord( + data?.result || eventData.result || eventData.data || data?.data + ) + const input = asRecord(current.params) + const workflowId = + (resultPayload?.workflowId as string) || + (input?.workflowId as string) || + useWorkflowRegistry.getState().activeWorkflowId + const isDeployed = resultPayload?.isDeployed !== false + if (workflowId) { + useWorkflowRegistry + .getState() + .setDeploymentStatus(workflowId, isDeployed, isDeployed ? new Date() : undefined) + logger.info('[SSE] Updated deployment status from tool result', { + toolName: current.name, + workflowId, + isDeployed, + }) + } + } catch (err) { + logger.warn('[SSE] Failed to hydrate deployment status', { + error: err instanceof Error ? err.message : String(err), + }) + } + } + + // Environment variables: reload store after successful set + if ( + targetState === ClientToolCallState.success && + current.name === 'set_environment_variables' + ) { + try { + useEnvironmentStore.getState().loadEnvironmentVariables() + logger.info('[SSE] Triggered environment variables reload') + } catch (err) { + logger.warn('[SSE] Failed to reload environment variables', { + error: err instanceof Error ? err.message : String(err), + }) + } + } + + // Workflow variables: reload store after successful set + if ( + targetState === ClientToolCallState.success && + current.name === 'set_global_workflow_variables' + ) { + try { + const input = asRecord(current.params) + const workflowId = + (input?.workflowId as string) || useWorkflowRegistry.getState().activeWorkflowId + if (workflowId) { + useVariablesStore.getState().loadForWorkflow(workflowId) + logger.info('[SSE] Triggered workflow variables reload', { workflowId }) + } + } catch (err) { + logger.warn('[SSE] Failed to reload workflow variables', { + error: err instanceof Error ? err.message : String(err), + }) + } + } } for (let i = 0; i < context.contentBlocks.length; i++) { @@ -476,6 +549,31 @@ export const sseHandlers: Record = { return } + // OAuth: dispatch event to open the OAuth connect modal + if (name === 'oauth_request_access' && args && typeof window !== 'undefined') { + try { + window.dispatchEvent( + new CustomEvent('open-oauth-connect', { + detail: { + providerName: (args.providerName || args.provider_name || '') as string, + serviceId: (args.serviceId || args.service_id || '') as string, + providerId: (args.providerId || args.provider_id || '') as string, + requiredScopes: (args.requiredScopes || args.required_scopes || []) as string[], + newScopes: (args.newScopes || args.new_scopes || []) as string[], + }, + }) + ) + logger.info('[SSE] Dispatched OAuth connect event', { + providerId: args.providerId || args.provider_id, + providerName: args.providerName || args.provider_name, + }) + } catch (err) { + logger.warn('[SSE] Failed to dispatch OAuth connect event', { + error: err instanceof Error ? err.message : String(err), + }) + } + } + return }, reasoning: (data, context, _get, set) => { From d2c028f7cd8a58e0a512f796dc54c7e2cea57243 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Mon, 9 Feb 2026 10:53:45 -0800 Subject: [PATCH 50/72] Fix tool call resolution --- .../components/tool-call/tool-call.tsx | 17 ++++++++- apps/sim/lib/copilot/client-sse/handlers.ts | 37 ++++++++++++++----- .../copilot/client-sse/subagent-handlers.ts | 26 +++++++++---- .../sim/lib/copilot/messages/serialization.ts | 28 ++++++++++++++ 4 files changed, 90 insertions(+), 18 deletions(-) diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx index 0791b4a03a..3b7ade1855 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx @@ -1221,13 +1221,26 @@ function isIntegrationTool(toolName: string): boolean { } function shouldShowRunSkipButtons(toolCall: CopilotToolCall): boolean { + if (!toolCall.name || toolCall.name === 'unknown_tool') { + return false + } + + if (toolCall.state !== ClientToolCallState.pending) { + return false + } + + // Never show buttons for tools the user has marked as always-allowed + if (useCopilotStore.getState().autoAllowedTools.includes(toolCall.name)) { + return false + } + const hasInterrupt = TOOL_DISPLAY_REGISTRY[toolCall.name]?.uiConfig?.interrupt === true - if (hasInterrupt && toolCall.state === 'pending') { + if (hasInterrupt) { return true } const mode = useCopilotStore.getState().mode - if (mode === 'build' && isIntegrationTool(toolCall.name) && toolCall.state === 'pending') { + if (mode === 'build' && isIntegrationTool(toolCall.name)) { return true } diff --git a/apps/sim/lib/copilot/client-sse/handlers.ts b/apps/sim/lib/copilot/client-sse/handlers.ts index ddfa69fb30..4dcd2dc2b4 100644 --- a/apps/sim/lib/copilot/client-sse/handlers.ts +++ b/apps/sim/lib/copilot/client-sse/handlers.ts @@ -10,8 +10,8 @@ import { } from '@/lib/copilot/store-utils' import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry' import type { CopilotStore, CopilotStreamInfo, CopilotToolCall } from '@/stores/panel/copilot/types' -import { useEnvironmentStore } from '@/stores/settings/environment/store' import { useVariablesStore } from '@/stores/panel/variables/store' +import { useEnvironmentStore } from '@/stores/settings/environment/store' import { useWorkflowDiffStore } from '@/stores/workflow-diff/store' import { useWorkflowRegistry } from '@/stores/workflows/registry/store' import type { WorkflowState } from '@/stores/workflows/workflow/types' @@ -499,7 +499,10 @@ export const sseHandlers: Record = { const { toolCallsById } = get() if (!toolCallsById[toolCallId]) { - const initialState = ClientToolCallState.pending + const isAutoAllowed = get().autoAllowedTools.includes(toolName) + const initialState = isAutoAllowed + ? ClientToolCallState.executing + : ClientToolCallState.pending const tc: CopilotToolCall = { id: toolCallId, name: toolName, @@ -524,23 +527,39 @@ export const sseHandlers: Record = { const { toolCallsById } = get() const existing = toolCallsById[id] + const toolName = name || existing?.name || 'unknown_tool' + const autoAllowedTools = get().autoAllowedTools + const isAutoAllowed = + autoAllowedTools.includes(toolName) || + (existing?.name ? autoAllowedTools.includes(existing.name) : false) + let initialState = isAutoAllowed ? ClientToolCallState.executing : ClientToolCallState.pending + + // Avoid flickering back to pending on partial/duplicate events once a tool is executing. + if ( + existing?.state === ClientToolCallState.executing && + initialState === ClientToolCallState.pending + ) { + initialState = ClientToolCallState.executing + } + const next: CopilotToolCall = existing ? { ...existing, - state: ClientToolCallState.pending, + name: toolName, + state: initialState, ...(args ? { params: args } : {}), - display: resolveToolDisplay(name, ClientToolCallState.pending, id, args), + display: resolveToolDisplay(toolName, initialState, id, args || existing.params), } : { id, - name: name || 'unknown_tool', - state: ClientToolCallState.pending, + name: toolName, + state: initialState, ...(args ? { params: args } : {}), - display: resolveToolDisplay(name, ClientToolCallState.pending, id, args), + display: resolveToolDisplay(toolName, initialState, id, args), } const updated = { ...toolCallsById, [id]: next } set({ toolCallsById: updated }) - logger.info('[toolCallsById] → pending', { id, name, params: args }) + logger.info(`[toolCallsById] → ${initialState}`, { id, name: toolName, params: args }) upsertToolCallBlock(context, next) updateStreamingMessage(set, context) @@ -550,7 +569,7 @@ export const sseHandlers: Record = { } // OAuth: dispatch event to open the OAuth connect modal - if (name === 'oauth_request_access' && args && typeof window !== 'undefined') { + if (toolName === 'oauth_request_access' && args && typeof window !== 'undefined') { try { window.dispatchEvent( new CustomEvent('open-oauth-connect', { diff --git a/apps/sim/lib/copilot/client-sse/subagent-handlers.ts b/apps/sim/lib/copilot/client-sse/subagent-handlers.ts index 98abb45e47..a974ab4c87 100644 --- a/apps/sim/lib/copilot/client-sse/subagent-handlers.ts +++ b/apps/sim/lib/copilot/client-sse/subagent-handlers.ts @@ -190,12 +190,27 @@ export const subAgentSSEHandlers: Record = { const existingIndex = context.subAgentToolCalls[parentToolCallId].findIndex( (tc: CopilotToolCall) => tc.id === id ) + const existingToolCall = + existingIndex >= 0 ? context.subAgentToolCalls[parentToolCallId][existingIndex] : undefined + + // Auto-allowed tools skip pending state to avoid flashing interrupt buttons + const isAutoAllowed = get().autoAllowedTools.includes(name) + let initialState = isAutoAllowed ? ClientToolCallState.executing : ClientToolCallState.pending + + // Avoid flickering back to pending on partial/duplicate events once a tool is executing. + if ( + existingToolCall?.state === ClientToolCallState.executing && + initialState === ClientToolCallState.pending + ) { + initialState = ClientToolCallState.executing + } + const subAgentToolCall: CopilotToolCall = { id, name, - state: ClientToolCallState.pending, + state: initialState, ...(args ? { params: args } : {}), - display: resolveToolDisplay(name, ClientToolCallState.pending, id, args), + display: resolveToolDisplay(name, initialState, id, args), } if (existingIndex >= 0) { @@ -231,14 +246,11 @@ export const subAgentSSEHandlers: Record = { // infer from presence of result data vs error (same logic as server-side // inferToolSuccess). The Go backend uses `*bool` with omitempty so // `success` is present when explicitly set, and absent for non-tool events. - const hasExplicitSuccess = - data?.success !== undefined || resultData.success !== undefined + const hasExplicitSuccess = data?.success !== undefined || resultData.success !== undefined const explicitSuccess = data?.success ?? resultData.success const hasResultData = data?.result !== undefined || resultData.result !== undefined const hasError = !!data?.error || !!resultData.error - const success: boolean = hasExplicitSuccess - ? !!explicitSuccess - : hasResultData && !hasError + const success: boolean = hasExplicitSuccess ? !!explicitSuccess : hasResultData && !hasError if (!toolCallId) return if (!context.subAgentToolCalls[parentToolCallId]) return diff --git a/apps/sim/lib/copilot/messages/serialization.ts b/apps/sim/lib/copilot/messages/serialization.ts index bcc58e0cf8..29686f6bcf 100644 --- a/apps/sim/lib/copilot/messages/serialization.ts +++ b/apps/sim/lib/copilot/messages/serialization.ts @@ -1,14 +1,42 @@ import { createLogger } from '@sim/logger' +import { resolveToolDisplay } from '@/lib/copilot/store-utils' +import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry' import type { CopilotMessage, CopilotToolCall } from '@/stores/panel/copilot/types' import { maskCredentialIdsInValue } from './credential-masking' const logger = createLogger('CopilotMessageSerialization') +const TERMINAL_STATES = new Set([ + ClientToolCallState.success, + ClientToolCallState.error, + ClientToolCallState.rejected, + ClientToolCallState.aborted, + ClientToolCallState.review, + ClientToolCallState.background, +]) + +/** + * Clears streaming flags and normalizes non-terminal tool call states to 'aborted'. + * This ensures that tool calls loaded from DB after a refresh/abort don't render + * as in-progress with shimmer animations or interrupt buttons. + */ export function clearStreamingFlags(toolCall: CopilotToolCall): void { if (!toolCall) return toolCall.subAgentStreaming = false + // Normalize non-terminal states when loading from DB. + // 'executing' → 'success': the server was running it, assume it completed. + // 'pending'/'generating' → 'aborted': never reached execution. + if (toolCall.state && !TERMINAL_STATES.has(toolCall.state)) { + const normalized = + toolCall.state === ClientToolCallState.executing + ? ClientToolCallState.success + : ClientToolCallState.aborted + toolCall.state = normalized + toolCall.display = resolveToolDisplay(toolCall.name, normalized, toolCall.id, toolCall.params) + } + if (Array.isArray(toolCall.subAgentBlocks)) { for (const block of toolCall.subAgentBlocks) { if (block?.type === 'subagent_tool_call' && block.toolCall) { From ed613c3a4f91689a2a9b55079c45e79e88d2c8d5 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Mon, 9 Feb 2026 11:23:38 -0800 Subject: [PATCH 51/72] Fix --- .../components/tool-call/tool-call.tsx | 8 +- apps/sim/lib/copilot/client-sse/handlers.ts | 7 +- .../copilot/client-sse/subagent-handlers.ts | 2 +- apps/sim/lib/copilot/store-utils.ts | 29 ++-- apps/sim/stores/panel/copilot/store.ts | 128 +++++++++++++----- apps/sim/stores/panel/copilot/types.ts | 1 + 6 files changed, 125 insertions(+), 50 deletions(-) diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx index 3b7ade1855..728277c31b 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx @@ -1230,7 +1230,7 @@ function shouldShowRunSkipButtons(toolCall: CopilotToolCall): boolean { } // Never show buttons for tools the user has marked as always-allowed - if (useCopilotStore.getState().autoAllowedTools.includes(toolCall.name)) { + if (useCopilotStore.getState().isToolAutoAllowed(toolCall.name)) { return false } @@ -1438,10 +1438,10 @@ export function ToolCall({ const paramsRef = useRef(params) // Check if this integration tool is auto-allowed - // Subscribe to autoAllowedTools so we re-render when it changes - const autoAllowedTools = useCopilotStore((s) => s.autoAllowedTools) const { removeAutoAllowedTool, setToolCallState } = useCopilotStore() - const isAutoAllowed = isIntegrationTool(toolCall.name) && autoAllowedTools.includes(toolCall.name) + const isAutoAllowed = useCopilotStore( + (s) => isIntegrationTool(toolCall.name) && s.isToolAutoAllowed(toolCall.name) + ) // Update edited params when toolCall params change (deep comparison to avoid resetting user edits on ref change) useEffect(() => { diff --git a/apps/sim/lib/copilot/client-sse/handlers.ts b/apps/sim/lib/copilot/client-sse/handlers.ts index 4dcd2dc2b4..ac9f40fb48 100644 --- a/apps/sim/lib/copilot/client-sse/handlers.ts +++ b/apps/sim/lib/copilot/client-sse/handlers.ts @@ -499,7 +499,7 @@ export const sseHandlers: Record = { const { toolCallsById } = get() if (!toolCallsById[toolCallId]) { - const isAutoAllowed = get().autoAllowedTools.includes(toolName) + const isAutoAllowed = get().isToolAutoAllowed(toolName) const initialState = isAutoAllowed ? ClientToolCallState.executing : ClientToolCallState.pending @@ -528,10 +528,7 @@ export const sseHandlers: Record = { const existing = toolCallsById[id] const toolName = name || existing?.name || 'unknown_tool' - const autoAllowedTools = get().autoAllowedTools - const isAutoAllowed = - autoAllowedTools.includes(toolName) || - (existing?.name ? autoAllowedTools.includes(existing.name) : false) + const isAutoAllowed = get().isToolAutoAllowed(toolName) let initialState = isAutoAllowed ? ClientToolCallState.executing : ClientToolCallState.pending // Avoid flickering back to pending on partial/duplicate events once a tool is executing. diff --git a/apps/sim/lib/copilot/client-sse/subagent-handlers.ts b/apps/sim/lib/copilot/client-sse/subagent-handlers.ts index a974ab4c87..2cdac76e29 100644 --- a/apps/sim/lib/copilot/client-sse/subagent-handlers.ts +++ b/apps/sim/lib/copilot/client-sse/subagent-handlers.ts @@ -194,7 +194,7 @@ export const subAgentSSEHandlers: Record = { existingIndex >= 0 ? context.subAgentToolCalls[parentToolCallId][existingIndex] : undefined // Auto-allowed tools skip pending state to avoid flashing interrupt buttons - const isAutoAllowed = get().autoAllowedTools.includes(name) + const isAutoAllowed = get().isToolAutoAllowed(name) let initialState = isAutoAllowed ? ClientToolCallState.executing : ClientToolCallState.pending // Avoid flickering back to pending on partial/duplicate events once a tool is executing. diff --git a/apps/sim/lib/copilot/store-utils.ts b/apps/sim/lib/copilot/store-utils.ts index 9a124850d8..f99c42f165 100644 --- a/apps/sim/lib/copilot/store-utils.ts +++ b/apps/sim/lib/copilot/store-utils.ts @@ -90,11 +90,22 @@ export function isTerminalState(state: string): boolean { ) } +/** + * Resolves the appropriate terminal state for a non-terminal tool call. + * 'executing' → 'success': the server was running it, assume it completed. + * Everything else → 'aborted': never reached execution. + */ +function resolveAbortState(currentState: string): ClientToolCallState { + return currentState === ClientToolCallState.executing + ? ClientToolCallState.success + : ClientToolCallState.aborted +} + export function abortAllInProgressTools(set: StoreSet, get: () => CopilotStore) { try { const { toolCallsById, messages } = get() const updatedMap = { ...toolCallsById } - const abortedIds = new Set() + const resolvedIds = new Map() let hasUpdates = false for (const [id, tc] of Object.entries(toolCallsById)) { const st = tc.state @@ -104,12 +115,13 @@ export function abortAllInProgressTools(set: StoreSet, get: () => CopilotStore) st === ClientToolCallState.rejected || st === ClientToolCallState.aborted if (!isTerminal || isReviewState(st)) { - abortedIds.add(id) + const resolved = resolveAbortState(st) + resolvedIds.set(id, resolved) updatedMap[id] = { ...tc, - state: ClientToolCallState.aborted, + state: resolved, subAgentStreaming: false, - display: resolveToolDisplay(tc.name, ClientToolCallState.aborted, id, tc.params), + display: resolveToolDisplay(tc.name, resolved, id, tc.params), } hasUpdates = true } else if (tc.subAgentStreaming) { @@ -120,7 +132,7 @@ export function abortAllInProgressTools(set: StoreSet, get: () => CopilotStore) hasUpdates = true } } - if (abortedIds.size > 0 || hasUpdates) { + if (resolvedIds.size > 0 || hasUpdates) { set({ toolCallsById: updatedMap }) set((s: CopilotStore) => { const msgs = [...s.messages] @@ -129,17 +141,18 @@ export function abortAllInProgressTools(set: StoreSet, get: () => CopilotStore) if (m.role !== 'assistant' || !Array.isArray(m.contentBlocks)) continue let changed = false const blocks = m.contentBlocks.map((b: any) => { - if (b?.type === 'tool_call' && b.toolCall?.id && abortedIds.has(b.toolCall.id)) { + if (b?.type === 'tool_call' && b.toolCall?.id && resolvedIds.has(b.toolCall.id)) { changed = true const prev = b.toolCall + const resolved = resolvedIds.get(b.toolCall.id)! return { ...b, toolCall: { ...prev, - state: ClientToolCallState.aborted, + state: resolved, display: resolveToolDisplay( prev?.name, - ClientToolCallState.aborted, + resolved, prev?.id, prev?.params ), diff --git a/apps/sim/stores/panel/copilot/store.ts b/apps/sim/stores/panel/copilot/store.ts index 6c4c867a98..e0283aa01c 100644 --- a/apps/sim/stores/panel/copilot/store.ts +++ b/apps/sim/stores/panel/copilot/store.ts @@ -138,6 +138,41 @@ function updateActiveStreamEventId( writeActiveStreamToStorage(next) } +const AUTO_ALLOWED_TOOLS_STORAGE_KEY = 'copilot_auto_allowed_tools' + +function readAutoAllowedToolsFromStorage(): string[] | null { + if (typeof window === 'undefined') return null + try { + const raw = window.localStorage.getItem(AUTO_ALLOWED_TOOLS_STORAGE_KEY) + if (!raw) return null + const parsed = JSON.parse(raw) + if (!Array.isArray(parsed)) return null + return parsed.filter((item): item is string => typeof item === 'string') + } catch (error) { + logger.warn('[AutoAllowedTools] Failed to read local cache', { + error: error instanceof Error ? error.message : String(error), + }) + return null + } +} + +function writeAutoAllowedToolsToStorage(tools: string[]): void { + if (typeof window === 'undefined') return + try { + window.localStorage.setItem(AUTO_ALLOWED_TOOLS_STORAGE_KEY, JSON.stringify(tools)) + } catch (error) { + logger.warn('[AutoAllowedTools] Failed to write local cache', { + error: error instanceof Error ? error.message : String(error), + }) + } +} + +function isToolAutoAllowedByList(toolId: string, autoAllowedTools: string[]): boolean { + if (!toolId) return false + const normalizedTarget = toolId.trim() + return autoAllowedTools.some((allowed) => allowed?.trim() === normalizedTarget) +} + /** * Clear any lingering diff preview from a previous session. * Called lazily when the store is first activated (setWorkflowId). @@ -870,6 +905,8 @@ async function resumeFromLiveStream( return false } +const cachedAutoAllowedTools = readAutoAllowedToolsFromStorage() + // Initial state (subset required for UI/streaming) const initialState = { mode: 'build' as const, @@ -903,7 +940,8 @@ const initialState = { streamingPlanContent: '', toolCallsById: {} as Record, suppressAutoSelect: false, - autoAllowedTools: [] as string[], + autoAllowedTools: cachedAutoAllowedTools ?? ([] as string[]), + autoAllowedToolsLoaded: cachedAutoAllowedTools !== null, activeStream: null as CopilotStreamInfo | null, messageQueue: [] as import('./types').QueuedMessage[], suppressAbortContinueOption: false, @@ -940,6 +978,9 @@ export const useCopilotStore = create()( mode: get().mode, selectedModel: get().selectedModel, agentPrefetch: get().agentPrefetch, + enabledModels: get().enabledModels, + autoAllowedTools: get().autoAllowedTools, + autoAllowedToolsLoaded: get().autoAllowedToolsLoaded, }) }, @@ -1245,6 +1286,16 @@ export const useCopilotStore = create()( // Send a message (streaming only) sendMessage: async (message: string, options = {}) => { + if (!get().autoAllowedToolsLoaded) { + try { + await get().loadAutoAllowedTools() + } catch (error) { + logger.warn('[Copilot] Failed to preload auto-allowed tools before send', { + error: error instanceof Error ? error.message : String(error), + }) + } + } + const prepared = prepareSendContext(get, set, message, options as SendMessageOptionsInput) if (!prepared) return @@ -1848,6 +1899,8 @@ export const useCopilotStore = create()( context.wasAborted && !context.suppressContinueOption ? appendContinueOption(finalContent) : finalContentStripped + // Step 1: Update messages in state but keep isSendingMessage: true. + // This prevents loadChats from overwriting with stale DB data during persist. set((state) => { const snapshotId = state.currentUserMessageId const nextSnapshots = @@ -1868,9 +1921,7 @@ export const useCopilotStore = create()( } : msg ), - isSendingMessage: false, isAborting: false, - abortController: null, currentUserMessageId: null, messageSnapshots: nextSnapshots, } @@ -1887,31 +1938,9 @@ export const useCopilotStore = create()( await get().handleNewChatCreation(context.newChatId) } - // Process next message in queue if any - const nextInQueue = get().messageQueue[0] - if (nextInQueue) { - // Use originalMessageId if available (from edit/resend), otherwise use queue entry id - const messageIdToUse = nextInQueue.originalMessageId || nextInQueue.id - logger.debug('[Queue] Processing next queued message', { - id: nextInQueue.id, - originalMessageId: nextInQueue.originalMessageId, - messageIdToUse, - queueLength: get().messageQueue.length, - }) - // Remove from queue and send - get().removeFromQueue(nextInQueue.id) - // Use setTimeout to avoid blocking the current execution - setTimeout(() => { - get().sendMessage(nextInQueue.content, { - stream: true, - fileAttachments: nextInQueue.fileAttachments, - contexts: nextInQueue.contexts, - messageId: messageIdToUse, - }) - }, QUEUE_PROCESS_DELAY_MS) - } - - // Persist full message state (including contentBlocks), plan artifact, and config to database + // Step 2: Persist messages to DB BEFORE marking stream as done. + // loadChats checks isSendingMessage — while true it preserves in-memory messages. + // Persisting first ensures the DB is up-to-date before we allow overwrites. const { currentChat, streamingPlanContent, mode, selectedModel } = get() if (currentChat) { try { @@ -1964,6 +1993,34 @@ export const useCopilotStore = create()( } } + // Step 3: NOW mark stream as done. DB is up-to-date, so if loadChats + // overwrites messages it will use the persisted (correct) data. + set({ isSendingMessage: false, abortController: null }) + + // Process next message in queue if any + const nextInQueue = get().messageQueue[0] + if (nextInQueue) { + // Use originalMessageId if available (from edit/resend), otherwise use queue entry id + const messageIdToUse = nextInQueue.originalMessageId || nextInQueue.id + logger.debug('[Queue] Processing next queued message', { + id: nextInQueue.id, + originalMessageId: nextInQueue.originalMessageId, + messageIdToUse, + queueLength: get().messageQueue.length, + }) + // Remove from queue and send + get().removeFromQueue(nextInQueue.id) + // Use setTimeout to avoid blocking the current execution + setTimeout(() => { + get().sendMessage(nextInQueue.content, { + stream: true, + fileAttachments: nextInQueue.fileAttachments, + contexts: nextInQueue.contexts, + messageId: messageIdToUse, + }) + }, QUEUE_PROCESS_DELAY_MS) + } + // Invalidate subscription queries to update usage setTimeout(() => { const queryClient = getQueryClient() @@ -2142,12 +2199,15 @@ export const useCopilotStore = create()( if (res.ok) { const data = await res.json() const tools = data.autoAllowedTools ?? [] - set({ autoAllowedTools: tools }) + set({ autoAllowedTools: tools, autoAllowedToolsLoaded: true }) + writeAutoAllowedToolsToStorage(tools) logger.debug('[AutoAllowedTools] Loaded successfully', { count: tools.length, tools }) } else { + set({ autoAllowedToolsLoaded: true }) logger.warn('[AutoAllowedTools] Load failed with status', { status: res.status }) } } catch (err) { + set({ autoAllowedToolsLoaded: true }) logger.error('[AutoAllowedTools] Failed to load', { error: err }) } }, @@ -2164,7 +2224,9 @@ export const useCopilotStore = create()( if (res.ok) { const data = await res.json() logger.debug('[AutoAllowedTools] API returned', { toolId, tools: data.autoAllowedTools }) - set({ autoAllowedTools: data.autoAllowedTools ?? [] }) + const tools = data.autoAllowedTools ?? [] + set({ autoAllowedTools: tools, autoAllowedToolsLoaded: true }) + writeAutoAllowedToolsToStorage(tools) logger.debug('[AutoAllowedTools] Added tool to store', { toolId }) } } catch (err) { @@ -2182,7 +2244,9 @@ export const useCopilotStore = create()( ) if (res.ok) { const data = await res.json() - set({ autoAllowedTools: data.autoAllowedTools ?? [] }) + const tools = data.autoAllowedTools ?? [] + set({ autoAllowedTools: tools, autoAllowedToolsLoaded: true }) + writeAutoAllowedToolsToStorage(tools) logger.debug('[AutoAllowedTools] Removed tool', { toolId }) } } catch (err) { @@ -2192,7 +2256,7 @@ export const useCopilotStore = create()( isToolAutoAllowed: (toolId: string) => { const { autoAllowedTools } = get() - return autoAllowedTools.includes(toolId) + return isToolAutoAllowedByList(toolId, autoAllowedTools) }, // Credential masking diff --git a/apps/sim/stores/panel/copilot/types.ts b/apps/sim/stores/panel/copilot/types.ts index e03c07f9dc..06b7532321 100644 --- a/apps/sim/stores/panel/copilot/types.ts +++ b/apps/sim/stores/panel/copilot/types.ts @@ -167,6 +167,7 @@ export interface CopilotState { // Auto-allowed integration tools (tools that can run without confirmation) autoAllowedTools: string[] + autoAllowedToolsLoaded: boolean // Active stream metadata for reconnect/replay activeStream: CopilotStreamInfo | null From 4698f731a95b219d65e373a5e6660c433dffcd09 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Mon, 9 Feb 2026 11:24:23 -0800 Subject: [PATCH 52/72] Fix lint --- .../[...issuer]/route.ts | 2 +- .../api/mcp/copilot/route.ts | 2 +- .../oauth-authorization-server/route.ts | 2 +- .../api/mcp/copilot/route.ts | 2 +- .../.well-known/oauth-protected-resource/route.ts | 2 +- .../oauth-authorization-server/route.ts | 2 +- .../.well-known/oauth-protected-resource/route.ts | 2 +- apps/sim/app/api/mcp/copilot/route.ts | 15 +++++++++------ .../copilot/orchestrator/tool-executor/index.ts | 3 +-- apps/sim/lib/copilot/store-utils.ts | 7 +------ apps/sim/lib/copilot/tools/mcp/definitions.ts | 3 +-- apps/sim/lib/core/config/feature-flags.ts | 6 +++--- 12 files changed, 22 insertions(+), 26 deletions(-) diff --git a/apps/sim/app/.well-known/oauth-authorization-server/[...issuer]/route.ts b/apps/sim/app/.well-known/oauth-authorization-server/[...issuer]/route.ts index fb83dcfbee..9dd3d6bd4c 100644 --- a/apps/sim/app/.well-known/oauth-authorization-server/[...issuer]/route.ts +++ b/apps/sim/app/.well-known/oauth-authorization-server/[...issuer]/route.ts @@ -1,4 +1,4 @@ -import { type NextRequest, NextResponse } from 'next/server' +import type { NextRequest, NextResponse } from 'next/server' import { createMcpAuthorizationServerMetadataResponse } from '@/lib/mcp/oauth-discovery' export async function GET(request: NextRequest): Promise { diff --git a/apps/sim/app/.well-known/oauth-authorization-server/api/mcp/copilot/route.ts b/apps/sim/app/.well-known/oauth-authorization-server/api/mcp/copilot/route.ts index fb83dcfbee..9dd3d6bd4c 100644 --- a/apps/sim/app/.well-known/oauth-authorization-server/api/mcp/copilot/route.ts +++ b/apps/sim/app/.well-known/oauth-authorization-server/api/mcp/copilot/route.ts @@ -1,4 +1,4 @@ -import { type NextRequest, NextResponse } from 'next/server' +import type { NextRequest, NextResponse } from 'next/server' import { createMcpAuthorizationServerMetadataResponse } from '@/lib/mcp/oauth-discovery' export async function GET(request: NextRequest): Promise { diff --git a/apps/sim/app/.well-known/oauth-authorization-server/route.ts b/apps/sim/app/.well-known/oauth-authorization-server/route.ts index fb83dcfbee..9dd3d6bd4c 100644 --- a/apps/sim/app/.well-known/oauth-authorization-server/route.ts +++ b/apps/sim/app/.well-known/oauth-authorization-server/route.ts @@ -1,4 +1,4 @@ -import { type NextRequest, NextResponse } from 'next/server' +import type { NextRequest, NextResponse } from 'next/server' import { createMcpAuthorizationServerMetadataResponse } from '@/lib/mcp/oauth-discovery' export async function GET(request: NextRequest): Promise { diff --git a/apps/sim/app/.well-known/oauth-protected-resource/api/mcp/copilot/route.ts b/apps/sim/app/.well-known/oauth-protected-resource/api/mcp/copilot/route.ts index 2ab9b52be6..d1136b555c 100644 --- a/apps/sim/app/.well-known/oauth-protected-resource/api/mcp/copilot/route.ts +++ b/apps/sim/app/.well-known/oauth-protected-resource/api/mcp/copilot/route.ts @@ -1,4 +1,4 @@ -import { type NextRequest, NextResponse } from 'next/server' +import type { NextRequest, NextResponse } from 'next/server' import { createMcpProtectedResourceMetadataResponse } from '@/lib/mcp/oauth-discovery' export async function GET(request: NextRequest): Promise { diff --git a/apps/sim/app/.well-known/oauth-protected-resource/route.ts b/apps/sim/app/.well-known/oauth-protected-resource/route.ts index 2ab9b52be6..d1136b555c 100644 --- a/apps/sim/app/.well-known/oauth-protected-resource/route.ts +++ b/apps/sim/app/.well-known/oauth-protected-resource/route.ts @@ -1,4 +1,4 @@ -import { type NextRequest, NextResponse } from 'next/server' +import type { NextRequest, NextResponse } from 'next/server' import { createMcpProtectedResourceMetadataResponse } from '@/lib/mcp/oauth-discovery' export async function GET(request: NextRequest): Promise { diff --git a/apps/sim/app/api/mcp/copilot/.well-known/oauth-authorization-server/route.ts b/apps/sim/app/api/mcp/copilot/.well-known/oauth-authorization-server/route.ts index fb83dcfbee..9dd3d6bd4c 100644 --- a/apps/sim/app/api/mcp/copilot/.well-known/oauth-authorization-server/route.ts +++ b/apps/sim/app/api/mcp/copilot/.well-known/oauth-authorization-server/route.ts @@ -1,4 +1,4 @@ -import { type NextRequest, NextResponse } from 'next/server' +import type { NextRequest, NextResponse } from 'next/server' import { createMcpAuthorizationServerMetadataResponse } from '@/lib/mcp/oauth-discovery' export async function GET(request: NextRequest): Promise { diff --git a/apps/sim/app/api/mcp/copilot/.well-known/oauth-protected-resource/route.ts b/apps/sim/app/api/mcp/copilot/.well-known/oauth-protected-resource/route.ts index 2ab9b52be6..d1136b555c 100644 --- a/apps/sim/app/api/mcp/copilot/.well-known/oauth-protected-resource/route.ts +++ b/apps/sim/app/api/mcp/copilot/.well-known/oauth-protected-resource/route.ts @@ -1,4 +1,4 @@ -import { type NextRequest, NextResponse } from 'next/server' +import type { NextRequest, NextResponse } from 'next/server' import { createMcpProtectedResourceMetadataResponse } from '@/lib/mcp/oauth-discovery' export async function GET(request: NextRequest): Promise { diff --git a/apps/sim/app/api/mcp/copilot/route.ts b/apps/sim/app/api/mcp/copilot/route.ts index a492889944..4d02ab122f 100644 --- a/apps/sim/app/api/mcp/copilot/route.ts +++ b/apps/sim/app/api/mcp/copilot/route.ts @@ -1,3 +1,4 @@ +import { randomUUID } from 'node:crypto' import { Server } from '@modelcontextprotocol/sdk/server/index.js' import { StreamableHTTPServerTransport } from '@modelcontextprotocol/sdk/server/streamableHttp.js' import { @@ -5,15 +6,14 @@ import { type CallToolResult, ErrorCode, type JSONRPCError, - type ListToolsResult, ListToolsRequestSchema, + type ListToolsResult, McpError, type RequestId, } from '@modelcontextprotocol/sdk/types.js' import { db } from '@sim/db' import { userStats } from '@sim/db/schema' import { createLogger } from '@sim/logger' -import { randomUUID } from 'node:crypto' import { eq, sql } from 'drizzle-orm' import { type NextRequest, NextResponse } from 'next/server' import { getHighestPrioritySubscription } from '@/lib/billing/core/subscription' @@ -23,8 +23,6 @@ import { SIM_AGENT_API_URL, SIM_AGENT_VERSION, } from '@/lib/copilot/constants' -import { RateLimiter } from '@/lib/core/rate-limiter' -import { env } from '@/lib/core/config/env' import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator' import { orchestrateSubagentStream } from '@/lib/copilot/orchestrator/subagent' import { @@ -32,6 +30,8 @@ import { prepareExecutionContext, } from '@/lib/copilot/orchestrator/tool-executor' import { DIRECT_TOOL_DEFS, SUBAGENT_TOOL_DEFS } from '@/lib/copilot/tools/mcp/definitions' +import { env } from '@/lib/core/config/env' +import { RateLimiter } from '@/lib/core/rate-limiter' import { resolveWorkflowIdForUser } from '@/lib/workflows/utils' const logger = createLogger('CopilotMcpAPI') @@ -103,7 +103,8 @@ async function authenticateCopilotApiKey(apiKey: string): Promise } | undefined + const params = request.params as + | { name?: string; arguments?: Record } + | undefined if (!params?.name) { throw new McpError(ErrorCode.InvalidParams, 'Tool name required') } diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts index b56abd4366..cb59feb4cd 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts @@ -265,8 +265,7 @@ export async function markToolComplete( clearTimeout(timeoutId) } } catch (error) { - const isTimeout = - error instanceof DOMException && error.name === 'AbortError' + const isTimeout = error instanceof DOMException && error.name === 'AbortError' logger.error('Mark-complete call failed', { toolCallId, toolName, diff --git a/apps/sim/lib/copilot/store-utils.ts b/apps/sim/lib/copilot/store-utils.ts index f99c42f165..267d368c2c 100644 --- a/apps/sim/lib/copilot/store-utils.ts +++ b/apps/sim/lib/copilot/store-utils.ts @@ -150,12 +150,7 @@ export function abortAllInProgressTools(set: StoreSet, get: () => CopilotStore) toolCall: { ...prev, state: resolved, - display: resolveToolDisplay( - prev?.name, - resolved, - prev?.id, - prev?.params - ), + display: resolveToolDisplay(prev?.name, resolved, prev?.id, prev?.params), }, } } diff --git a/apps/sim/lib/copilot/tools/mcp/definitions.ts b/apps/sim/lib/copilot/tools/mcp/definitions.ts index a2876a56a8..859c23405c 100644 --- a/apps/sim/lib/copilot/tools/mcp/definitions.ts +++ b/apps/sim/lib/copilot/tools/mcp/definitions.ts @@ -481,8 +481,7 @@ After sim_edit completes, you can test immediately with sim_test, or deploy with }, plan: { type: 'object', - description: - 'The plan object from sim_plan. Pass it EXACTLY as returned, do not modify.', + description: 'The plan object from sim_plan. Pass it EXACTLY as returned, do not modify.', }, context: { type: 'object', diff --git a/apps/sim/lib/core/config/feature-flags.ts b/apps/sim/lib/core/config/feature-flags.ts index 6e65bebd4e..81bc4398f9 100644 --- a/apps/sim/lib/core/config/feature-flags.ts +++ b/apps/sim/lib/core/config/feature-flags.ts @@ -1,7 +1,7 @@ /** * Environment utility functions for consistent environment detection across the application */ -import { env, getEnv, isFalsy, isTruthy } from './env' +import { env, isFalsy, isTruthy } from './env' /** * Is the application running in production mode @@ -22,8 +22,8 @@ export const isTest = env.NODE_ENV === 'test' * Is this the hosted version of the application */ export const isHosted = true - // getEnv('NEXT_PUBLIC_APP_URL') === 'https://www.sim.ai' || - // getEnv('NEXT_PUBLIC_APP_URL') === 'https://www.staging.sim.ai' +// getEnv('NEXT_PUBLIC_APP_URL') === 'https://www.sim.ai' || +// getEnv('NEXT_PUBLIC_APP_URL') === 'https://www.staging.sim.ai' /** * Is billing enforcement enabled From 79af30326574d0d83eb799bc56cf76263df64498 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Mon, 9 Feb 2026 11:57:27 -0800 Subject: [PATCH 53/72] Fix superagent and autoallow integrations --- .../components/tool-call/tool-call.tsx | 4 +- apps/sim/lib/copilot/chat-payload.ts | 39 ++++++----- apps/sim/lib/copilot/client-sse/handlers.ts | 25 ++++++- .../copilot/client-sse/subagent-handlers.ts | 13 +++- .../orchestrator/sse-handlers/handlers.ts | 65 ++++++++++++++++++- .../orchestrator/tool-executor/index.ts | 13 ++++ apps/sim/tools/params.ts | 1 + 7 files changed, 139 insertions(+), 21 deletions(-) diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx index 728277c31b..9c5ca7b78d 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx @@ -1239,8 +1239,8 @@ function shouldShowRunSkipButtons(toolCall: CopilotToolCall): boolean { return true } - const mode = useCopilotStore.getState().mode - if (mode === 'build' && isIntegrationTool(toolCall.name)) { + // Integration tools (user-installed) always require approval + if (isIntegrationTool(toolCall.name)) { return true } diff --git a/apps/sim/lib/copilot/chat-payload.ts b/apps/sim/lib/copilot/chat-payload.ts index f6eefbab66..9f12f3730f 100644 --- a/apps/sim/lib/copilot/chat-payload.ts +++ b/apps/sim/lib/copilot/chat-payload.ts @@ -155,7 +155,7 @@ export async function buildCopilotRequestPayload( messages.push({ role: 'user', content: message }) } - let integrationTools: ToolSchema[] = [] + const integrationTools: ToolSchema[] = [] let credentials: CredentialsPayload | null = null if (effectiveMode === 'build') { @@ -195,22 +195,29 @@ export async function buildCopilotRequestPayload( const { createUserToolSchema } = await import('@/tools/params') const latestTools = getLatestVersionTools(tools) - integrationTools = Object.entries(latestTools).map(([toolId, toolConfig]) => { - const userSchema = createUserToolSchema(toolConfig) - const strippedName = stripVersionSuffix(toolId) - return { - name: strippedName, - description: toolConfig.description || toolConfig.name || strippedName, - input_schema: userSchema as unknown as Record, - defer_loading: true, - ...(toolConfig.oauth?.required && { - oauth: { - required: true, - provider: toolConfig.oauth.provider, - }, - }), + for (const [toolId, toolConfig] of Object.entries(latestTools)) { + try { + const userSchema = createUserToolSchema(toolConfig) + const strippedName = stripVersionSuffix(toolId) + integrationTools.push({ + name: strippedName, + description: toolConfig.description || toolConfig.name || strippedName, + input_schema: userSchema as unknown as Record, + defer_loading: true, + ...(toolConfig.oauth?.required && { + oauth: { + required: true, + provider: toolConfig.oauth.provider, + }, + }), + }) + } catch (toolError) { + logger.warn('Failed to build schema for tool, skipping', { + toolId, + error: toolError instanceof Error ? toolError.message : String(toolError), + }) } - }) + } } catch (error) { logger.warn('Failed to build tool schemas for payload', { error: error instanceof Error ? error.message : String(error), diff --git a/apps/sim/lib/copilot/client-sse/handlers.ts b/apps/sim/lib/copilot/client-sse/handlers.ts index ac9f40fb48..cc294fb602 100644 --- a/apps/sim/lib/copilot/client-sse/handlers.ts +++ b/apps/sim/lib/copilot/client-sse/handlers.ts @@ -1,5 +1,5 @@ import { createLogger } from '@sim/logger' -import { STREAM_STORAGE_KEY } from '@/lib/copilot/constants' +import { COPILOT_CONFIRM_API_PATH, STREAM_STORAGE_KEY } from '@/lib/copilot/constants' import { asRecord } from '@/lib/copilot/orchestrator/sse-utils' import type { SSEEvent } from '@/lib/copilot/orchestrator/types' import { @@ -24,6 +24,23 @@ const MAX_BATCH_INTERVAL = 50 const MIN_BATCH_INTERVAL = 16 const MAX_QUEUE_SIZE = 5 +/** + * Send an auto-accept confirmation to the server for auto-allowed tools. + * The server-side orchestrator polls Redis for this decision. + */ +export function sendAutoAcceptConfirmation(toolCallId: string): void { + fetch(COPILOT_CONFIRM_API_PATH, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ toolCallId, status: 'accepted' }), + }).catch((error) => { + logger.warn('Failed to send auto-accept confirmation', { + toolCallId, + error: error instanceof Error ? error.message : String(error), + }) + }) +} + function writeActiveStreamToStorage(info: CopilotStreamInfo | null): void { if (typeof window === 'undefined') return try { @@ -565,6 +582,12 @@ export const sseHandlers: Record = { return } + // Auto-allowed tools: send confirmation to the server so it can proceed + // without waiting for the user to click "Allow". + if (isAutoAllowed) { + sendAutoAcceptConfirmation(id) + } + // OAuth: dispatch event to open the OAuth connect modal if (toolName === 'oauth_request_access' && args && typeof window !== 'undefined') { try { diff --git a/apps/sim/lib/copilot/client-sse/subagent-handlers.ts b/apps/sim/lib/copilot/client-sse/subagent-handlers.ts index 2cdac76e29..aa07b21d35 100644 --- a/apps/sim/lib/copilot/client-sse/subagent-handlers.ts +++ b/apps/sim/lib/copilot/client-sse/subagent-handlers.ts @@ -9,7 +9,12 @@ import type { SSEEvent } from '@/lib/copilot/orchestrator/types' import { resolveToolDisplay } from '@/lib/copilot/store-utils' import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry' import type { CopilotStore, CopilotToolCall } from '@/stores/panel/copilot/types' -import { type SSEHandler, sseHandlers, updateStreamingMessage } from './handlers' +import { + type SSEHandler, + sendAutoAcceptConfirmation, + sseHandlers, + updateStreamingMessage, +} from './handlers' import type { ClientStreamingContext } from './types' const logger = createLogger('CopilotClientSubagentHandlers') @@ -234,6 +239,12 @@ export const subAgentSSEHandlers: Record = { if (isPartial) { return } + + // Auto-allowed tools: send confirmation to the server so it can proceed + // without waiting for the user to click "Allow". + if (isAutoAllowed) { + sendAutoAcceptConfirmation(id) + } }, tool_result: (data, context, get, set) => { diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts index 8b5025897c..111fe20477 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts @@ -8,6 +8,7 @@ import { wasToolResultSeen, } from '@/lib/copilot/orchestrator/sse-utils' import { + isIntegrationTool, isToolAvailableOnSimSide, markToolComplete, } from '@/lib/copilot/orchestrator/tool-executor' @@ -171,8 +172,10 @@ export const sseHandlers: Record = { const isInterruptTool = isInterruptToolName(toolName) const isInteractive = options.interactive === true + // Integration tools (user-installed) also require approval in interactive mode + const needsApproval = isInterruptTool || isIntegrationTool(toolName) - if (isInterruptTool && isInteractive) { + if (needsApproval && isInteractive) { const decision = await waitForToolDecision( toolCallId, options.timeout || STREAM_TIMEOUT_MS, @@ -372,6 +375,66 @@ export const subAgentHandlers: Record = { return } + // Integration tools (user-installed) require approval in interactive mode, + // same as top-level interrupt tools. + if (options.interactive === true && isIntegrationTool(toolName)) { + const decision = await waitForToolDecision( + toolCallId, + options.timeout || STREAM_TIMEOUT_MS, + options.abortSignal + ) + if (decision?.status === 'accepted' || decision?.status === 'success') { + await executeToolAndReport(toolCallId, context, execContext, options) + return + } + if (decision?.status === 'rejected' || decision?.status === 'error') { + toolCall.status = 'rejected' + toolCall.endTime = Date.now() + await markToolComplete( + toolCall.id, + toolCall.name, + 400, + decision.message || 'Tool execution rejected', + { skipped: true, reason: 'user_rejected' } + ) + markToolResultSeen(toolCall.id) + await options?.onEvent?.({ + type: 'tool_result', + toolCallId: toolCall.id, + data: { + id: toolCall.id, + name: toolCall.name, + success: false, + result: { skipped: true, reason: 'user_rejected' }, + }, + }) + return + } + if (decision?.status === 'background') { + toolCall.status = 'skipped' + toolCall.endTime = Date.now() + await markToolComplete( + toolCall.id, + toolCall.name, + 202, + decision.message || 'Tool execution moved to background', + { background: true } + ) + markToolResultSeen(toolCall.id) + await options?.onEvent?.({ + type: 'tool_result', + toolCallId: toolCall.id, + data: { + id: toolCall.id, + name: toolCall.name, + success: true, + result: { background: true }, + }, + }) + return + } + } + if (options.autoExecuteTools !== false) { await executeToolAndReport(toolCallId, context, execContext, options) } diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts index cb59feb4cd..ed16a16e51 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts @@ -151,6 +151,19 @@ export function isToolAvailableOnSimSide(toolName: string): boolean { return !!getTool(resolvedToolName) } +/** + * Check whether a tool is a user-installed integration tool (e.g. Gmail, Slack). + * These tools exist in the tool registry but are NOT copilot server tools or + * known workflow manipulation tools. They should require user approval in + * interactive mode. + */ +export function isIntegrationTool(toolName: string): boolean { + if (SERVER_TOOLS.has(toolName)) return false + if (toolName in SIM_WORKFLOW_TOOL_HANDLERS) return false + const resolvedToolName = resolveToolId(toolName) + return !!getTool(resolvedToolName) +} + /** * Execute a tool server-side without calling internal routes. */ diff --git a/apps/sim/tools/params.ts b/apps/sim/tools/params.ts index 9ac5a9788b..e1bb8fe7b5 100644 --- a/apps/sim/tools/params.ts +++ b/apps/sim/tools/params.ts @@ -401,6 +401,7 @@ export function createUserToolSchema(toolConfig: ToolConfig): ToolSchema { } for (const [paramId, param] of Object.entries(toolConfig.params)) { + if (!param) continue const visibility = param.visibility ?? 'user-or-llm' if (visibility === 'hidden') { continue From c086912781746e3cdb3163cfa615d5ec5b0c46eb Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Mon, 9 Feb 2026 12:11:53 -0800 Subject: [PATCH 54/72] Fix always allow --- .../components/copilot/components/tool-call/tool-call.tsx | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx index 9c5ca7b78d..98fd19a7e6 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx @@ -1362,9 +1362,7 @@ function RunSkipButtons({ setButtonsHidden(true) try { await addAutoAllowedTool(toolCall.name) - if (!isIntegrationTool(toolCall.name)) { - await handleRun(toolCall, setToolCallState, onStateChange, editedParams) - } + await handleRun(toolCall, setToolCallState, onStateChange, editedParams) } finally { setIsProcessing(false) actionInProgressRef.current = false From 9a47033f2299b496ccffa6bef59d42a7b6f88667 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Mon, 9 Feb 2026 13:07:39 -0800 Subject: [PATCH 55/72] Update block --- .../tool-executor/deployment-tools/deploy.ts | 37 +- .../orchestrator/tool-executor/index.ts | 27 + apps/sim/lib/copilot/tools/mcp/definitions.ts | 2 +- bun.lock | 512 +++++++++--------- 4 files changed, 320 insertions(+), 258 deletions(-) diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/deploy.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/deploy.ts index e876ed19d7..9d8f2b7839 100644 --- a/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/deploy.ts +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/deploy.ts @@ -3,6 +3,7 @@ import { db } from '@sim/db' import { chat, workflowMcpTool } from '@sim/db/schema' import { and, eq } from 'drizzle-orm' import type { ExecutionContext, ToolCallResult } from '@/lib/copilot/orchestrator/types' +import { getBaseUrl } from '@/lib/core/utils/urls' import { sanitizeToolName } from '@/lib/mcp/workflow-tool-schema' import { deployWorkflow, undeployWorkflow } from '@/lib/workflows/persistence/utils' import { checkChatAccess, checkWorkflowAccessForChatCreation } from '@/app/api/chat/utils' @@ -38,6 +39,7 @@ export async function executeDeployApi( return { success: false, error: result.error || 'Failed to deploy workflow' } } + const baseUrl = getBaseUrl() return { success: true, output: { @@ -45,6 +47,8 @@ export async function executeDeployApi( isDeployed: true, deployedAt: result.deployedAt, version: result.version, + apiEndpoint: `${baseUrl}/api/workflows/${workflowId}/run`, + baseUrl, }, } } catch (error) { @@ -177,9 +181,18 @@ export async function executeDeployChat( }) } + const baseUrl = getBaseUrl() return { success: true, - output: { success: true, action: 'deploy', isDeployed: true, identifier }, + output: { + success: true, + action: 'deploy', + isDeployed: true, + identifier, + chatUrl: `${baseUrl}/chat/${identifier}`, + apiEndpoint: `${baseUrl}/api/workflows/${workflowId}/run`, + baseUrl, + }, } } catch (error) { return { success: false, error: error instanceof Error ? error.message : String(error) } @@ -234,6 +247,9 @@ export async function executeDeployMcp( `Execute ${workflowRecord.name} workflow` const parameterSchema = params.parameterSchema || {} + const baseUrl = getBaseUrl() + const mcpServerUrl = `${baseUrl}/api/mcp/serve/${serverId}` + if (existingTool.length > 0) { const toolId = existingTool[0].id await db @@ -245,7 +261,10 @@ export async function executeDeployMcp( updatedAt: new Date(), }) .where(eq(workflowMcpTool.id, toolId)) - return { success: true, output: { toolId, toolName, toolDescription, updated: true } } + return { + success: true, + output: { toolId, toolName, toolDescription, updated: true, mcpServerUrl, baseUrl }, + } } const toolId = crypto.randomUUID() @@ -260,7 +279,10 @@ export async function executeDeployMcp( updatedAt: new Date(), }) - return { success: true, output: { toolId, toolName, toolDescription, updated: false } } + return { + success: true, + output: { toolId, toolName, toolDescription, updated: false, mcpServerUrl, baseUrl }, + } } catch (error) { return { success: false, error: error instanceof Error ? error.message : String(error) } } @@ -278,9 +300,16 @@ export async function executeRedeploy(context: ExecutionContext): Promise executeCreateWorkspaceMcpServer(p as CreateWorkspaceMcpServerParams, c), + oauth_get_auth_link: async (p, _c) => { + const providerName = (p.providerName || p.provider_name || 'the provider') as string + try { + const baseUrl = getBaseUrl() + const settingsUrl = `${baseUrl}/workspace` + return { + success: true, + output: { + message: `To connect ${providerName}, the user must authorize via their browser.`, + oauth_url: settingsUrl, + instructions: `Open ${settingsUrl} in a browser and go to the workflow editor to connect ${providerName} credentials.`, + provider: providerName, + baseUrl, + }, + } + } catch { + return { + success: true, + output: { + message: `To connect ${providerName}, the user must authorize via their browser.`, + instructions: `Open the Sim workspace in a browser and go to the workflow editor to connect ${providerName} credentials.`, + provider: providerName, + }, + } + } + }, } /** diff --git a/apps/sim/lib/copilot/tools/mcp/definitions.ts b/apps/sim/lib/copilot/tools/mcp/definitions.ts index 859c23405c..0dc26951b2 100644 --- a/apps/sim/lib/copilot/tools/mcp/definitions.ts +++ b/apps/sim/lib/copilot/tools/mcp/definitions.ts @@ -567,7 +567,7 @@ Supports full and partial execution: name: 'sim_auth', agentId: 'auth', description: - 'Check OAuth connection status, list connected services, and initiate new OAuth connections. Use when a workflow needs third-party service access (Google, Slack, GitHub, etc.).', + 'Check OAuth connection status, list connected services, and initiate new OAuth connections. Use when a workflow needs third-party service access (Google, Slack, GitHub, etc.). In MCP/headless mode, returns an authorization URL the user must open in their browser to complete the OAuth flow.', inputSchema: { type: 'object', properties: { diff --git a/bun.lock b/bun.lock index 4b18dedf8a..55ae41dde4 100644 --- a/bun.lock +++ b/bun.lock @@ -329,19 +329,19 @@ "@adobe/css-tools": ["@adobe/css-tools@4.4.4", "", {}, "sha512-Elp+iwUx5rN5+Y8xLt5/GRoG20WGoDCQ/1Fb+1LiGtvwbDavuSk0jhD/eZdckHAuzcDzccnkv+rEjyWfRx18gg=="], - "@ai-sdk/anthropic": ["@ai-sdk/anthropic@2.0.57", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-DREpYqW2pylgaj69gZ+K8u92bo9DaMgFdictYnY+IwYeY3bawQ4zI7l/o1VkDsBDljAx8iYz5lPURwVZNu+Xpg=="], + "@ai-sdk/anthropic": ["@ai-sdk/anthropic@2.0.60", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-hpabbvnTHIP7y85TeFwkDHPveOxsMaCWTRRd1vb9My2EtJBKXGBG4eZhcR+DU98z1lXOlPRu1oGZhVNPttDW8g=="], "@ai-sdk/azure": ["@ai-sdk/azure@2.0.91", "", { "dependencies": { "@ai-sdk/openai": "2.0.89", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-9tznVSs6LGQNKKxb8pKd7CkBV9yk+a/ENpFicHCj2CmBUKefxzwJ9JbUqrlK3VF6dGZw3LXq0dWxt7/Yekaj1w=="], - "@ai-sdk/cerebras": ["@ai-sdk/cerebras@1.0.35", "", { "dependencies": { "@ai-sdk/openai-compatible": "1.0.31", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-JrNdMYptrOUjNthibgBeAcBjZ/H+fXb49sSrWhOx5Aq8eUcrYvwQ2DtSAi8VraHssZu78NAnBMrgFWSUOTXFxw=="], + "@ai-sdk/cerebras": ["@ai-sdk/cerebras@1.0.36", "", { "dependencies": { "@ai-sdk/openai-compatible": "1.0.32", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-zoJYL33+ieyd86FSP0Whm86D79d1lKPR7wUzh1SZ1oTxwYmsGyvIrmMf2Ll0JA9Ds2Es6qik4VaFCrjwGYRTIQ=="], "@ai-sdk/deepseek": ["@ai-sdk/deepseek@1.0.33", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-NiKjvqXI/96e/7SjZGgQH141PBqggsF7fNbjGTv4RgVWayMXp9mj0Ou2NjAUGwwxJwj/qseY0gXiDCYaHWFBkw=="], - "@ai-sdk/gateway": ["@ai-sdk/gateway@2.0.29", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20", "@vercel/oidc": "3.1.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-1b7E9F/B5gex/1uCkhs+sGIbH0KsZOItHnNz3iY5ir+nc4ZUA6WOU5Cu2w1USlc+3UVbhf+H+iNLlxVjLe4VvQ=="], + "@ai-sdk/gateway": ["@ai-sdk/gateway@2.0.35", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20", "@vercel/oidc": "3.1.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-fMzhC9artgY2s2GgXEWB+cECRJEHHoFJKzDpzsuneguNQ656vydPHhvDdoMjbWW+UtLc4nGf3VwlqG0t4FeQ/w=="], "@ai-sdk/google": ["@ai-sdk/google@2.0.52", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-2XUnGi3f7TV4ujoAhA+Fg3idUoG/+Y2xjCRg70a1/m0DH1KSQqYaCboJ1C19y6ZHGdf5KNT20eJdswP6TvrY2g=="], - "@ai-sdk/google-vertex": ["@ai-sdk/google-vertex@3.0.97", "", { "dependencies": { "@ai-sdk/anthropic": "2.0.57", "@ai-sdk/google": "2.0.52", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20", "google-auth-library": "^10.5.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-s4tI7Z15i6FlbtCvS4SBRal8wRfkOXJzKxlS6cU4mJW/QfUfoVy4b22836NVNJwDvkG/HkDSfzwm/X8mn46MhA=="], + "@ai-sdk/google-vertex": ["@ai-sdk/google-vertex@3.0.100", "", { "dependencies": { "@ai-sdk/anthropic": "2.0.60", "@ai-sdk/google": "2.0.52", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20", "google-auth-library": "^10.5.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-4iqwr5mRdUanNbCP0S+7IDxgNtMLR/4oj5UaFwzfw6jR5yq9wKujfuKD4TCbgOExVIfP+ASQ8tS6RXtBTyuDXA=="], "@ai-sdk/groq": ["@ai-sdk/groq@2.0.34", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-wfCYkVgmVjxNA32T57KbLabVnv9aFUflJ4urJ7eWgTwbnmGQHElCTu+rJ3ydxkXSqxOkXPwMOttDm7XNrvPjmg=="], @@ -349,7 +349,7 @@ "@ai-sdk/openai": ["@ai-sdk/openai@2.0.89", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-4+qWkBCbL9HPKbgrUO/F2uXZ8GqrYxHa8SWEYIzxEJ9zvWw3ISr3t1/27O1i8MGSym+PzEyHBT48EV4LAwWaEw=="], - "@ai-sdk/openai-compatible": ["@ai-sdk/openai-compatible@1.0.31", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-znBvaVHM0M6yWNerIEy3hR+O8ZK2sPcE7e2cxfb6kYLEX3k//JH5VDnRnajseVofg7LXtTCFFdjsB7WLf1BdeQ=="], + "@ai-sdk/openai-compatible": ["@ai-sdk/openai-compatible@1.0.32", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-YspqqyJPzHjqWrjt4y/Wgc2aJgCcQj5uIJgZpq2Ar/lH30cEVhgE+keePDbjKpetD9UwNggCj7u6kO3unS23OQ=="], "@ai-sdk/perplexity": ["@ai-sdk/perplexity@2.0.23", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-aiaRvnc6mhQZKhTTSXPCjPH8Iqr5D/PfCN1hgVP/3RGTBbJtsd9HemIBSABeSdAKbsMH/PwJxgnqH75HEamcBA=="], @@ -357,9 +357,9 @@ "@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.20", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-iXHVe0apM2zUEzauqJwqmpC37A5rihrStAih5Ks+JE32iTe4LZ58y17UGBjpQQTCRw9YxMeo2UFLxLpBluyvLQ=="], - "@ai-sdk/togetherai": ["@ai-sdk/togetherai@1.0.33", "", { "dependencies": { "@ai-sdk/openai-compatible": "1.0.31", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-zb9sEr94tLGNtuqyMfVhLJqyfBPsfa4E21PIAo+Bm/tgw7xPqSBpQp8iDj7ydetl2wzQfw20zE97UgZv6mR/OQ=="], + "@ai-sdk/togetherai": ["@ai-sdk/togetherai@1.0.34", "", { "dependencies": { "@ai-sdk/openai-compatible": "1.0.32", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-jjJmJms6kdEc4nC3MDGFJfhV8F1ifY4nolV2dbnT7BM4ab+Wkskc0GwCsJ7G7WdRMk7xDbFh4he3DPL8KJ/cyA=="], - "@ai-sdk/xai": ["@ai-sdk/xai@2.0.55", "", { "dependencies": { "@ai-sdk/openai-compatible": "1.0.31", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-rzecWIMUJvja41kbH3d1CyKYxbuRRCM8J316I7HLrNxwztFQXwMKYllu4oOyQc33tlOASytKRpY4WF5QyoUkQQ=="], + "@ai-sdk/xai": ["@ai-sdk/xai@2.0.57", "", { "dependencies": { "@ai-sdk/openai-compatible": "1.0.32", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-qeEPqKtE+bFfeY/60+wBQYwea8ULeY0KEFh+Hr4BaMzzOvAUM4vsKbeccSu0nekVC+gH5WNb/vTfVOP6m8XeIg=="], "@alloc/quick-lru": ["@alloc/quick-lru@5.2.0", "", {}, "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw=="], @@ -395,9 +395,9 @@ "@aws-sdk/client-rds-data": ["@aws-sdk/client-rds-data@3.940.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "3.940.0", "@aws-sdk/credential-provider-node": "3.940.0", "@aws-sdk/middleware-host-header": "3.936.0", "@aws-sdk/middleware-logger": "3.936.0", "@aws-sdk/middleware-recursion-detection": "3.936.0", "@aws-sdk/middleware-user-agent": "3.940.0", "@aws-sdk/region-config-resolver": "3.936.0", "@aws-sdk/types": "3.936.0", "@aws-sdk/util-endpoints": "3.936.0", "@aws-sdk/util-user-agent-browser": "3.936.0", "@aws-sdk/util-user-agent-node": "3.940.0", "@smithy/config-resolver": "^4.4.3", "@smithy/core": "^3.18.5", "@smithy/fetch-http-handler": "^5.3.6", "@smithy/hash-node": "^4.2.5", "@smithy/invalid-dependency": "^4.2.5", "@smithy/middleware-content-length": "^4.2.5", "@smithy/middleware-endpoint": "^4.3.12", "@smithy/middleware-retry": "^4.4.12", "@smithy/middleware-serde": "^4.2.6", "@smithy/middleware-stack": "^4.2.5", "@smithy/node-config-provider": "^4.3.5", "@smithy/node-http-handler": "^4.4.5", "@smithy/protocol-http": "^5.3.5", "@smithy/smithy-client": "^4.9.8", "@smithy/types": "^4.9.0", "@smithy/url-parser": "^4.2.5", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.11", "@smithy/util-defaults-mode-node": "^4.2.14", "@smithy/util-endpoints": "^3.2.5", "@smithy/util-middleware": "^4.2.5", "@smithy/util-retry": "^4.2.5", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-68NH61MvS48CVPfzBNCPdCG4KnNjM+Uj/3DSw7rT9PJvdML9ARS4M2Uqco9POPw+Aj20KBumsEUd6FMVcYBXAA=="], - "@aws-sdk/client-s3": ["@aws-sdk/client-s3@3.978.0", "", { "dependencies": { "@aws-crypto/sha1-browser": "5.2.0", "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.4", "@aws-sdk/credential-provider-node": "^3.972.2", "@aws-sdk/middleware-bucket-endpoint": "^3.972.2", "@aws-sdk/middleware-expect-continue": "^3.972.2", "@aws-sdk/middleware-flexible-checksums": "^3.972.2", "@aws-sdk/middleware-host-header": "^3.972.2", "@aws-sdk/middleware-location-constraint": "^3.972.2", "@aws-sdk/middleware-logger": "^3.972.2", "@aws-sdk/middleware-recursion-detection": "^3.972.2", "@aws-sdk/middleware-sdk-s3": "^3.972.4", "@aws-sdk/middleware-ssec": "^3.972.2", "@aws-sdk/middleware-user-agent": "^3.972.4", "@aws-sdk/region-config-resolver": "^3.972.2", "@aws-sdk/signature-v4-multi-region": "3.972.0", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.972.0", "@aws-sdk/util-user-agent-browser": "^3.972.2", "@aws-sdk/util-user-agent-node": "^3.972.2", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.22.0", "@smithy/eventstream-serde-browser": "^4.2.8", "@smithy/eventstream-serde-config-resolver": "^4.3.8", "@smithy/eventstream-serde-node": "^4.2.8", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-blob-browser": "^4.2.9", "@smithy/hash-node": "^4.2.8", "@smithy/hash-stream-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/md5-js": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.12", "@smithy/middleware-retry": "^4.4.29", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.8", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.1", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.28", "@smithy/util-defaults-mode-node": "^4.2.31", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-stream": "^4.5.10", "@smithy/util-utf8": "^4.2.0", "@smithy/util-waiter": "^4.2.8", "tslib": "^2.6.2" } }, "sha512-2chs05VbfgRNb5ZEYIwooeHCaL+DjwvrW3ElkslI71ltEqVNdeWvB7hbkLWPPKazV3kjY3H90pLDY8mMqsET+A=="], + "@aws-sdk/client-s3": ["@aws-sdk/client-s3@3.985.0", "", { "dependencies": { "@aws-crypto/sha1-browser": "5.2.0", "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.7", "@aws-sdk/credential-provider-node": "^3.972.6", "@aws-sdk/middleware-bucket-endpoint": "^3.972.3", "@aws-sdk/middleware-expect-continue": "^3.972.3", "@aws-sdk/middleware-flexible-checksums": "^3.972.5", "@aws-sdk/middleware-host-header": "^3.972.3", "@aws-sdk/middleware-location-constraint": "^3.972.3", "@aws-sdk/middleware-logger": "^3.972.3", "@aws-sdk/middleware-recursion-detection": "^3.972.3", "@aws-sdk/middleware-sdk-s3": "^3.972.7", "@aws-sdk/middleware-ssec": "^3.972.3", "@aws-sdk/middleware-user-agent": "^3.972.7", "@aws-sdk/region-config-resolver": "^3.972.3", "@aws-sdk/signature-v4-multi-region": "3.985.0", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.985.0", "@aws-sdk/util-user-agent-browser": "^3.972.3", "@aws-sdk/util-user-agent-node": "^3.972.5", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.22.1", "@smithy/eventstream-serde-browser": "^4.2.8", "@smithy/eventstream-serde-config-resolver": "^4.3.8", "@smithy/eventstream-serde-node": "^4.2.8", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-blob-browser": "^4.2.9", "@smithy/hash-node": "^4.2.8", "@smithy/hash-stream-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/md5-js": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.13", "@smithy/middleware-retry": "^4.4.30", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.9", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.29", "@smithy/util-defaults-mode-node": "^4.2.32", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-stream": "^4.5.11", "@smithy/util-utf8": "^4.2.0", "@smithy/util-waiter": "^4.2.8", "tslib": "^2.6.2" } }, "sha512-S9TqjzzZEEIKBnC7yFpvqM7CG9ALpY5qhQ5BnDBJtdG20NoGpjKLGUUfD2wmZItuhbrcM4Z8c6m6Fg0XYIOVvw=="], - "@aws-sdk/client-sesv2": ["@aws-sdk/client-sesv2@3.978.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.4", "@aws-sdk/credential-provider-node": "^3.972.2", "@aws-sdk/middleware-host-header": "^3.972.2", "@aws-sdk/middleware-logger": "^3.972.2", "@aws-sdk/middleware-recursion-detection": "^3.972.2", "@aws-sdk/middleware-user-agent": "^3.972.4", "@aws-sdk/region-config-resolver": "^3.972.2", "@aws-sdk/signature-v4-multi-region": "3.972.0", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.972.0", "@aws-sdk/util-user-agent-browser": "^3.972.2", "@aws-sdk/util-user-agent-node": "^3.972.2", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.22.0", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.12", "@smithy/middleware-retry": "^4.4.29", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.8", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.1", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.28", "@smithy/util-defaults-mode-node": "^4.2.31", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-x0jQtqJADxbqam7JJTmAlkRPptb7rA2NmDuv0UG2ImBtllB+wF+Ar8uq569V4ylFtEsfZS9yiNK5+CdmTc6+Wg=="], + "@aws-sdk/client-sesv2": ["@aws-sdk/client-sesv2@3.985.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.7", "@aws-sdk/credential-provider-node": "^3.972.6", "@aws-sdk/middleware-host-header": "^3.972.3", "@aws-sdk/middleware-logger": "^3.972.3", "@aws-sdk/middleware-recursion-detection": "^3.972.3", "@aws-sdk/middleware-user-agent": "^3.972.7", "@aws-sdk/region-config-resolver": "^3.972.3", "@aws-sdk/signature-v4-multi-region": "3.985.0", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.985.0", "@aws-sdk/util-user-agent-browser": "^3.972.3", "@aws-sdk/util-user-agent-node": "^3.972.5", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.22.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.13", "@smithy/middleware-retry": "^4.4.30", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.9", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.29", "@smithy/util-defaults-mode-node": "^4.2.32", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-RqeSpVUFeg/fI874lNNdJP5nZ+3mUY5qRDDHYiOta3+2esOC/RAG1XcfYnupFR8wDDiIYsi6gHakRUYgiIW13w=="], "@aws-sdk/client-sqs": ["@aws-sdk/client-sqs@3.947.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "3.947.0", "@aws-sdk/credential-provider-node": "3.947.0", "@aws-sdk/middleware-host-header": "3.936.0", "@aws-sdk/middleware-logger": "3.936.0", "@aws-sdk/middleware-recursion-detection": "3.936.0", "@aws-sdk/middleware-sdk-sqs": "3.946.0", "@aws-sdk/middleware-user-agent": "3.947.0", "@aws-sdk/region-config-resolver": "3.936.0", "@aws-sdk/types": "3.936.0", "@aws-sdk/util-endpoints": "3.936.0", "@aws-sdk/util-user-agent-browser": "3.936.0", "@aws-sdk/util-user-agent-node": "3.947.0", "@smithy/config-resolver": "^4.4.3", "@smithy/core": "^3.18.7", "@smithy/fetch-http-handler": "^5.3.6", "@smithy/hash-node": "^4.2.5", "@smithy/invalid-dependency": "^4.2.5", "@smithy/md5-js": "^4.2.5", "@smithy/middleware-content-length": "^4.2.5", "@smithy/middleware-endpoint": "^4.3.14", "@smithy/middleware-retry": "^4.4.14", "@smithy/middleware-serde": "^4.2.6", "@smithy/middleware-stack": "^4.2.5", "@smithy/node-config-provider": "^4.3.5", "@smithy/node-http-handler": "^4.4.5", "@smithy/protocol-http": "^5.3.5", "@smithy/smithy-client": "^4.9.10", "@smithy/types": "^4.9.0", "@smithy/url-parser": "^4.2.5", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.13", "@smithy/util-defaults-mode-node": "^4.2.16", "@smithy/util-endpoints": "^3.2.5", "@smithy/util-middleware": "^4.2.5", "@smithy/util-retry": "^4.2.5", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-8tzFyYGAAnQg+G9eB5zAe0oEo+MJMZ3YEk+8EL4uf2zG5wKxJvTBJZr6U9I1CEXYUde374OyLMyKng+sWyN+wg=="], @@ -429,29 +429,29 @@ "@aws-sdk/lib-dynamodb": ["@aws-sdk/lib-dynamodb@3.940.0", "", { "dependencies": { "@aws-sdk/core": "3.940.0", "@aws-sdk/util-dynamodb": "3.940.0", "@smithy/core": "^3.18.5", "@smithy/smithy-client": "^4.9.8", "@smithy/types": "^4.9.0", "tslib": "^2.6.2" }, "peerDependencies": { "@aws-sdk/client-dynamodb": "^3.940.0" } }, "sha512-5ApYAix2wvJuMszj1lrpg8lm4ipoZMFO8crxtzsdAvxM8TV5bKSRQQ2GA3CMIODrBuSzpXvWueHHrfkx05ZAQw=="], - "@aws-sdk/middleware-bucket-endpoint": ["@aws-sdk/middleware-bucket-endpoint@3.972.2", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-arn-parser": "^3.972.2", "@smithy/node-config-provider": "^4.3.8", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "@smithy/util-config-provider": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-ofuXBnitp9j8t05O4NQVrpMZDECPtUhRIWdLzR35baR5njOIPY7YqNtJE+yELVpSn2m4jt2sV1ezYMBY4/Lo+w=="], + "@aws-sdk/middleware-bucket-endpoint": ["@aws-sdk/middleware-bucket-endpoint@3.972.3", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-arn-parser": "^3.972.2", "@smithy/node-config-provider": "^4.3.8", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "@smithy/util-config-provider": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-fmbgWYirF67YF1GfD7cg5N6HHQ96EyRNx/rDIrTF277/zTWVuPI2qS/ZHgofwR1NZPe/NWvoppflQY01LrbVLg=="], "@aws-sdk/middleware-endpoint-discovery": ["@aws-sdk/middleware-endpoint-discovery@3.936.0", "", { "dependencies": { "@aws-sdk/endpoint-cache": "3.893.0", "@aws-sdk/types": "3.936.0", "@smithy/node-config-provider": "^4.3.5", "@smithy/protocol-http": "^5.3.5", "@smithy/types": "^4.9.0", "tslib": "^2.6.2" } }, "sha512-wNJZ8PDw0eQK2x4z1q8JqiDvw9l9xd36EoklVT2CIBt8FnqGdrMGjAx93RRbH3G6Fmvwoe+D3VJXbWHBlhD0Bw=="], "@aws-sdk/middleware-eventstream": ["@aws-sdk/middleware-eventstream@3.936.0", "", { "dependencies": { "@aws-sdk/types": "3.936.0", "@smithy/protocol-http": "^5.3.5", "@smithy/types": "^4.9.0", "tslib": "^2.6.2" } }, "sha512-XQSH8gzLkk8CDUDxyt4Rdm9owTpRIPdtg2yw9Y2Wl5iSI55YQSiC3x8nM3c4Y4WqReJprunFPK225ZUDoYCfZA=="], - "@aws-sdk/middleware-expect-continue": ["@aws-sdk/middleware-expect-continue@3.972.2", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-d9bBQlGk1T5j5rWfof20M2tErddOSoSLDauP2/yyuXfeOfQRCSBUZNrApSxjJ9Hw+/RDGR/XL+LEOqmXxSlV3A=="], + "@aws-sdk/middleware-expect-continue": ["@aws-sdk/middleware-expect-continue@3.972.3", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-4msC33RZsXQpUKR5QR4HnvBSNCPLGHmB55oDiROqqgyOc+TOfVu2xgi5goA7ms6MdZLeEh2905UfWMnMMF4mRg=="], - "@aws-sdk/middleware-flexible-checksums": ["@aws-sdk/middleware-flexible-checksums@3.972.2", "", { "dependencies": { "@aws-crypto/crc32": "5.2.0", "@aws-crypto/crc32c": "5.2.0", "@aws-crypto/util": "5.2.0", "@aws-sdk/core": "^3.973.2", "@aws-sdk/crc64-nvme": "3.972.0", "@aws-sdk/types": "^3.973.1", "@smithy/is-array-buffer": "^4.2.0", "@smithy/node-config-provider": "^4.3.8", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-stream": "^4.5.10", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-GgWVZJdzXzqhXxzNAYB3TnZCj7d5rZNdovqSIV91e97nowHVaExRoyaZ3H/Ydqot7veHGPTl8nBp464zZeLDTQ=="], + "@aws-sdk/middleware-flexible-checksums": ["@aws-sdk/middleware-flexible-checksums@3.972.5", "", { "dependencies": { "@aws-crypto/crc32": "5.2.0", "@aws-crypto/crc32c": "5.2.0", "@aws-crypto/util": "5.2.0", "@aws-sdk/core": "^3.973.7", "@aws-sdk/crc64-nvme": "3.972.0", "@aws-sdk/types": "^3.973.1", "@smithy/is-array-buffer": "^4.2.0", "@smithy/node-config-provider": "^4.3.8", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-stream": "^4.5.11", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-SF/1MYWx67OyCrLA4icIpWUfCkdlOi8Y1KecQ9xYxkL10GMjVdPTGPnYhAg0dw5U43Y9PVUWhAV2ezOaG+0BLg=="], "@aws-sdk/middleware-host-header": ["@aws-sdk/middleware-host-header@3.936.0", "", { "dependencies": { "@aws-sdk/types": "3.936.0", "@smithy/protocol-http": "^5.3.5", "@smithy/types": "^4.9.0", "tslib": "^2.6.2" } }, "sha512-tAaObaAnsP1XnLGndfkGWFuzrJYuk9W0b/nLvol66t8FZExIAf/WdkT2NNAWOYxljVs++oHnyHBCxIlaHrzSiw=="], - "@aws-sdk/middleware-location-constraint": ["@aws-sdk/middleware-location-constraint@3.972.2", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-pyayzpq+VQiG1o9pEUyr6BXEJ2g2t4JIPdNxDkIHp2AhR63Gy/10WQkXTBOgRnfQ7/aLPLOnjRIWwOPp0CfUlA=="], + "@aws-sdk/middleware-location-constraint": ["@aws-sdk/middleware-location-constraint@3.972.3", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-nIg64CVrsXp67vbK0U1/Is8rik3huS3QkRHn2DRDx4NldrEFMgdkZGI/+cZMKD9k4YOS110Dfu21KZLHrFA/1g=="], "@aws-sdk/middleware-logger": ["@aws-sdk/middleware-logger@3.936.0", "", { "dependencies": { "@aws-sdk/types": "3.936.0", "@smithy/types": "^4.9.0", "tslib": "^2.6.2" } }, "sha512-aPSJ12d3a3Ea5nyEnLbijCaaYJT2QjQ9iW+zGh5QcZYXmOGWbKVyPSxmVOboZQG+c1M8t6d2O7tqrwzIq8L8qw=="], "@aws-sdk/middleware-recursion-detection": ["@aws-sdk/middleware-recursion-detection@3.936.0", "", { "dependencies": { "@aws-sdk/types": "3.936.0", "@aws/lambda-invoke-store": "^0.2.0", "@smithy/protocol-http": "^5.3.5", "@smithy/types": "^4.9.0", "tslib": "^2.6.2" } }, "sha512-l4aGbHpXM45YNgXggIux1HgsCVAvvBoqHPkqLnqMl9QVapfuSTjJHfDYDsx1Xxct6/m7qSMUzanBALhiaGO2fA=="], - "@aws-sdk/middleware-sdk-s3": ["@aws-sdk/middleware-sdk-s3@3.972.4", "", { "dependencies": { "@aws-sdk/core": "^3.973.4", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-arn-parser": "^3.972.2", "@smithy/core": "^3.22.0", "@smithy/node-config-provider": "^4.3.8", "@smithy/protocol-http": "^5.3.8", "@smithy/signature-v4": "^5.3.8", "@smithy/smithy-client": "^4.11.1", "@smithy/types": "^4.12.0", "@smithy/util-config-provider": "^4.2.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-stream": "^4.5.10", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-lradfn72Td7lswhZKi86VKRNkDtmQR7bq9shX1kaPK1itjThxfcx7ogXSvMm/0cuqoYGic8UUXQOaK4kpU933g=="], + "@aws-sdk/middleware-sdk-s3": ["@aws-sdk/middleware-sdk-s3@3.972.7", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-arn-parser": "^3.972.2", "@smithy/core": "^3.22.1", "@smithy/node-config-provider": "^4.3.8", "@smithy/protocol-http": "^5.3.8", "@smithy/signature-v4": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/util-config-provider": "^4.2.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-stream": "^4.5.11", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-VtZ7tMIw18VzjG+I6D6rh2eLkJfTtByiFoCIauGDtTTPBEUMQUiGaJ/zZrPlCY6BsvLLeFKz3+E5mntgiOWmIg=="], "@aws-sdk/middleware-sdk-sqs": ["@aws-sdk/middleware-sdk-sqs@3.946.0", "", { "dependencies": { "@aws-sdk/types": "3.936.0", "@smithy/smithy-client": "^4.9.10", "@smithy/types": "^4.9.0", "@smithy/util-hex-encoding": "^4.2.0", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-+KedlcXUqA1Bdafvw264SWvwyHYvFxn47y831tEKc85fp5VF5LGE9uMlU13hsWySftLmDd/ZFwSQI6RN2zSpAg=="], - "@aws-sdk/middleware-ssec": ["@aws-sdk/middleware-ssec@3.972.2", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-HJ3OmQnlQ1es6esrDWnx3nVPhBAN89WaFCzsDcb6oT7TMjBPUfZ5+1BpI7B0Hnme8cc6kp7qc4cgo2plrlROJA=="], + "@aws-sdk/middleware-ssec": ["@aws-sdk/middleware-ssec@3.972.3", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-dU6kDuULN3o3jEHcjm0c4zWJlY1zWVkjG9NPe9qxYLLpcbdj5kRYBS2DdWYD+1B9f910DezRuws7xDEqKkHQIg=="], "@aws-sdk/middleware-user-agent": ["@aws-sdk/middleware-user-agent@3.940.0", "", { "dependencies": { "@aws-sdk/core": "3.940.0", "@aws-sdk/types": "3.936.0", "@aws-sdk/util-endpoints": "3.936.0", "@smithy/core": "^3.18.5", "@smithy/protocol-http": "^5.3.5", "@smithy/types": "^4.9.0", "tslib": "^2.6.2" } }, "sha512-nJbLrUj6fY+l2W2rIB9P4Qvpiy0tnTdg/dmixRxrU1z3e8wBdspJlyE+AZN4fuVbeL6rrRrO/zxQC1bB3cw5IA=="], @@ -461,9 +461,9 @@ "@aws-sdk/region-config-resolver": ["@aws-sdk/region-config-resolver@3.936.0", "", { "dependencies": { "@aws-sdk/types": "3.936.0", "@smithy/config-resolver": "^4.4.3", "@smithy/node-config-provider": "^4.3.5", "@smithy/types": "^4.9.0", "tslib": "^2.6.2" } }, "sha512-wOKhzzWsshXGduxO4pqSiNyL9oUtk4BEvjWm9aaq6Hmfdoydq6v6t0rAGHWPjFwy9z2haovGRi3C8IxdMB4muw=="], - "@aws-sdk/s3-request-presigner": ["@aws-sdk/s3-request-presigner@3.978.0", "", { "dependencies": { "@aws-sdk/signature-v4-multi-region": "3.972.0", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-format-url": "^3.972.2", "@smithy/middleware-endpoint": "^4.4.12", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.1", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-P+SZrny/BT/x9w4BBM9IUr17jjDL7Rg/FjXKqh9viV81i/68Eu6gHBtS/JzvNF+rpG5gdZcMnBSANZqbnEbDmA=="], + "@aws-sdk/s3-request-presigner": ["@aws-sdk/s3-request-presigner@3.985.0", "", { "dependencies": { "@aws-sdk/signature-v4-multi-region": "3.985.0", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-format-url": "^3.972.3", "@smithy/middleware-endpoint": "^4.4.13", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-lPnf977GFM4cMLJ7X+ThktKMe/0CXIfX+wz1z+sUT7yagPL2IRyiNUPFZ0VTEGBo1gRhHEDPWy6yzk8WWRFsvg=="], - "@aws-sdk/signature-v4-multi-region": ["@aws-sdk/signature-v4-multi-region@3.972.0", "", { "dependencies": { "@aws-sdk/middleware-sdk-s3": "3.972.0", "@aws-sdk/types": "3.972.0", "@smithy/protocol-http": "^5.3.8", "@smithy/signature-v4": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-2udiRijmjpN81Pvajje4TsjbXDZNP6K9bYUanBYH8hXa/tZG5qfGCySD+TyX0sgDxCQmEDMg3LaQdfjNHBDEgQ=="], + "@aws-sdk/signature-v4-multi-region": ["@aws-sdk/signature-v4-multi-region@3.985.0", "", { "dependencies": { "@aws-sdk/middleware-sdk-s3": "^3.972.7", "@aws-sdk/types": "^3.973.1", "@smithy/protocol-http": "^5.3.8", "@smithy/signature-v4": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-W6hTSOPiSbh4IdTYVxN7xHjpCh0qvfQU1GKGBzGQm0ZEIOaMmWqiDEvFfyGYKmfBvumT8vHKxQRTX0av9omtIg=="], "@aws-sdk/token-providers": ["@aws-sdk/token-providers@3.940.0", "", { "dependencies": { "@aws-sdk/core": "3.940.0", "@aws-sdk/nested-clients": "3.940.0", "@aws-sdk/types": "3.936.0", "@smithy/property-provider": "^4.2.5", "@smithy/shared-ini-file-loader": "^4.4.0", "@smithy/types": "^4.9.0", "tslib": "^2.6.2" } }, "sha512-k5qbRe/ZFjW9oWEdzLIa2twRVIEx7p/9rutofyrRysrtEnYh3HAWCngAnwbgKMoiwa806UzcTRx0TjyEpnKcCg=="], @@ -475,7 +475,7 @@ "@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.936.0", "", { "dependencies": { "@aws-sdk/types": "3.936.0", "@smithy/types": "^4.9.0", "@smithy/url-parser": "^4.2.5", "@smithy/util-endpoints": "^3.2.5", "tslib": "^2.6.2" } }, "sha512-0Zx3Ntdpu+z9Wlm7JKUBOzS9EunwKAb4KdGUQQxDqh5Lc3ta5uBoub+FgmVuzwnmBu9U1Os8UuwVTH0Lgu+P5w=="], - "@aws-sdk/util-format-url": ["@aws-sdk/util-format-url@3.972.2", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/querystring-builder": "^4.2.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-RCd8eur5wzDLgFBvbBhoFQ1bw1wxHJiN88MQ82IiJBs6OGXTWaf0oFgLbK06qJvnVUqL13t3jEnlYPHPNdgBWw=="], + "@aws-sdk/util-format-url": ["@aws-sdk/util-format-url@3.972.3", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/querystring-builder": "^4.2.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-n7F2ycckcKFXa01vAsT/SJdjFHfKH9s96QHcs5gn8AaaigASICeME8WdUL9uBp8XV/OVwEt8+6gzn6KFUgQa8g=="], "@aws-sdk/util-locate-window": ["@aws-sdk/util-locate-window@3.965.4", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-H1onv5SkgPBK2P6JR2MjGgbOnttoNzSPIRoeZTNPZYyaplwGg50zS3amXvXqF0/qfXpWEC9rLWU564QTB9bSog=="], @@ -499,7 +499,7 @@ "@azure/core-client": ["@azure/core-client@1.10.1", "", { "dependencies": { "@azure/abort-controller": "^2.1.2", "@azure/core-auth": "^1.10.0", "@azure/core-rest-pipeline": "^1.22.0", "@azure/core-tracing": "^1.3.0", "@azure/core-util": "^1.13.0", "@azure/logger": "^1.3.0", "tslib": "^2.6.2" } }, "sha512-Nh5PhEOeY6PrnxNPsEHRr9eimxLwgLlpmguQaHKBinFYA/RU9+kOYVOQqOrTsCL+KSxrLLl1gD8Dk5BFW/7l/w=="], - "@azure/core-http-compat": ["@azure/core-http-compat@2.3.1", "", { "dependencies": { "@azure/abort-controller": "^2.1.2", "@azure/core-client": "^1.10.0", "@azure/core-rest-pipeline": "^1.22.0" } }, "sha512-az9BkXND3/d5VgdRRQVkiJb2gOmDU8Qcq4GvjtBmDICNiQ9udFmDk4ZpSB5Qq1OmtDJGlQAfBaS4palFsazQ5g=="], + "@azure/core-http-compat": ["@azure/core-http-compat@2.3.2", "", { "dependencies": { "@azure/abort-controller": "^2.1.2" }, "peerDependencies": { "@azure/core-client": "^1.10.0", "@azure/core-rest-pipeline": "^1.22.0" } }, "sha512-Tf6ltdKzOJEgxZeWLCjMxrxbodB/ZeCbzzA1A2qHbhzAjzjHoBVSUeSl/baT/oHAxhc4qdqVaDKnc2+iE932gw=="], "@azure/core-lro": ["@azure/core-lro@2.7.2", "", { "dependencies": { "@azure/abort-controller": "^2.0.0", "@azure/core-util": "^1.2.0", "@azure/logger": "^1.0.0", "tslib": "^2.6.2" } }, "sha512-0YIpccoX8m/k00O7mDDMdJpbr6mf1yWo2dfmxt5A8XVZVVMz2SSKaEbMCeJRvgQ0IaSlqhjT47p4hVIRRy90xw=="], @@ -517,13 +517,13 @@ "@azure/storage-blob": ["@azure/storage-blob@12.27.0", "", { "dependencies": { "@azure/abort-controller": "^2.1.2", "@azure/core-auth": "^1.4.0", "@azure/core-client": "^1.6.2", "@azure/core-http-compat": "^2.0.0", "@azure/core-lro": "^2.2.0", "@azure/core-paging": "^1.1.1", "@azure/core-rest-pipeline": "^1.10.1", "@azure/core-tracing": "^1.1.2", "@azure/core-util": "^1.6.1", "@azure/core-xml": "^1.4.3", "@azure/logger": "^1.0.0", "events": "^3.0.0", "tslib": "^2.2.0" } }, "sha512-IQjj9RIzAKatmNca3D6bT0qJ+Pkox1WZGOg2esJF2YLHb45pQKOwGPIAV+w3rfgkj7zV3RMxpn/c6iftzSOZJQ=="], - "@babel/code-frame": ["@babel/code-frame@7.28.6", "", { "dependencies": { "@babel/helper-validator-identifier": "^7.28.5", "js-tokens": "^4.0.0", "picocolors": "^1.1.1" } }, "sha512-JYgintcMjRiCvS8mMECzaEn+m3PfoQiyqukOMCCVQtoJGYJw8j/8LBJEiqkHLkfwCcs74E3pbAUFNg7d9VNJ+Q=="], + "@babel/code-frame": ["@babel/code-frame@7.29.0", "", { "dependencies": { "@babel/helper-validator-identifier": "^7.28.5", "js-tokens": "^4.0.0", "picocolors": "^1.1.1" } }, "sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw=="], - "@babel/compat-data": ["@babel/compat-data@7.28.6", "", {}, "sha512-2lfu57JtzctfIrcGMz992hyLlByuzgIk58+hhGCxjKZ3rWI82NnVLjXcaTqkI2NvlcvOskZaiZ5kjUALo3Lpxg=="], + "@babel/compat-data": ["@babel/compat-data@7.29.0", "", {}, "sha512-T1NCJqT/j9+cn8fvkt7jtwbLBfLC/1y1c7NtCeXFRgzGTsafi68MRv8yzkYSapBnFA6L3U2VSc02ciDzoAJhJg=="], - "@babel/core": ["@babel/core@7.28.6", "", { "dependencies": { "@babel/code-frame": "^7.28.6", "@babel/generator": "^7.28.6", "@babel/helper-compilation-targets": "^7.28.6", "@babel/helper-module-transforms": "^7.28.6", "@babel/helpers": "^7.28.6", "@babel/parser": "^7.28.6", "@babel/template": "^7.28.6", "@babel/traverse": "^7.28.6", "@babel/types": "^7.28.6", "@jridgewell/remapping": "^2.3.5", "convert-source-map": "^2.0.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.2", "json5": "^2.2.3", "semver": "^6.3.1" } }, "sha512-H3mcG6ZDLTlYfaSNi0iOKkigqMFvkTKlGUYlD8GW7nNOYRrevuA46iTypPyv+06V3fEmvvazfntkBU34L0azAw=="], + "@babel/core": ["@babel/core@7.29.0", "", { "dependencies": { "@babel/code-frame": "^7.29.0", "@babel/generator": "^7.29.0", "@babel/helper-compilation-targets": "^7.28.6", "@babel/helper-module-transforms": "^7.28.6", "@babel/helpers": "^7.28.6", "@babel/parser": "^7.29.0", "@babel/template": "^7.28.6", "@babel/traverse": "^7.29.0", "@babel/types": "^7.29.0", "@jridgewell/remapping": "^2.3.5", "convert-source-map": "^2.0.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.2", "json5": "^2.2.3", "semver": "^6.3.1" } }, "sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA=="], - "@babel/generator": ["@babel/generator@7.28.6", "", { "dependencies": { "@babel/parser": "^7.28.6", "@babel/types": "^7.28.6", "@jridgewell/gen-mapping": "^0.3.12", "@jridgewell/trace-mapping": "^0.3.28", "jsesc": "^3.0.2" } }, "sha512-lOoVRwADj8hjf7al89tvQ2a1lf53Z+7tiXMgpZJL3maQPDxh0DgLMN62B2MKUOFcoodBHLMbDM6WAbKgNy5Suw=="], + "@babel/generator": ["@babel/generator@7.29.1", "", { "dependencies": { "@babel/parser": "^7.29.0", "@babel/types": "^7.29.0", "@jridgewell/gen-mapping": "^0.3.12", "@jridgewell/trace-mapping": "^0.3.28", "jsesc": "^3.0.2" } }, "sha512-qsaF+9Qcm2Qv8SRIMMscAvG4O3lJ0F1GuMo5HR/Bp02LopNgnZBC/EkbevHFeGs4ls/oPz9v+Bsmzbkbe+0dUw=="], "@babel/helper-compilation-targets": ["@babel/helper-compilation-targets@7.28.6", "", { "dependencies": { "@babel/compat-data": "^7.28.6", "@babel/helper-validator-option": "^7.27.1", "browserslist": "^4.24.0", "lru-cache": "^5.1.1", "semver": "^6.3.1" } }, "sha512-JYtls3hqi15fcx5GaSNL7SCTJ2MNmjrkHXg4FSpOA/grxK8KwyZ5bubHsCq8FXCkua6xhuaaBit+3b7+VZRfcA=="], @@ -543,7 +543,7 @@ "@babel/helpers": ["@babel/helpers@7.28.6", "", { "dependencies": { "@babel/template": "^7.28.6", "@babel/types": "^7.28.6" } }, "sha512-xOBvwq86HHdB7WUDTfKfT/Vuxh7gElQ+Sfti2Cy6yIWNW05P8iUslOVcZ4/sKbE+/jQaukQAdz/gf3724kYdqw=="], - "@babel/parser": ["@babel/parser@7.28.6", "", { "dependencies": { "@babel/types": "^7.28.6" }, "bin": "./bin/babel-parser.js" }, "sha512-TeR9zWR18BvbfPmGbLampPMW+uW1NZnJlRuuHso8i87QZNq2JRF9i6RgxRqtEq+wQGsS19NNTWr2duhnE49mfQ=="], + "@babel/parser": ["@babel/parser@7.29.0", "", { "dependencies": { "@babel/types": "^7.29.0" }, "bin": "./bin/babel-parser.js" }, "sha512-IyDgFV5GeDUVX4YdF/3CPULtVGSXXMLh1xVIgdCgxApktqnQV0r7/8Nqthg+8YLGaAtdyIlo2qIdZrbCv4+7ww=="], "@babel/plugin-transform-react-jsx-self": ["@babel/plugin-transform-react-jsx-self@7.27.1", "", { "dependencies": { "@babel/helper-plugin-utils": "^7.27.1" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, "sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw=="], @@ -553,9 +553,9 @@ "@babel/template": ["@babel/template@7.28.6", "", { "dependencies": { "@babel/code-frame": "^7.28.6", "@babel/parser": "^7.28.6", "@babel/types": "^7.28.6" } }, "sha512-YA6Ma2KsCdGb+WC6UpBVFJGXL58MDA6oyONbjyF/+5sBgxY/dwkhLogbMT2GXXyU84/IhRw/2D1Os1B/giz+BQ=="], - "@babel/traverse": ["@babel/traverse@7.28.6", "", { "dependencies": { "@babel/code-frame": "^7.28.6", "@babel/generator": "^7.28.6", "@babel/helper-globals": "^7.28.0", "@babel/parser": "^7.28.6", "@babel/template": "^7.28.6", "@babel/types": "^7.28.6", "debug": "^4.3.1" } }, "sha512-fgWX62k02qtjqdSNTAGxmKYY/7FSL9WAS1o2Hu5+I5m9T0yxZzr4cnrfXQ/MX0rIifthCSs6FKTlzYbJcPtMNg=="], + "@babel/traverse": ["@babel/traverse@7.29.0", "", { "dependencies": { "@babel/code-frame": "^7.29.0", "@babel/generator": "^7.29.0", "@babel/helper-globals": "^7.28.0", "@babel/parser": "^7.29.0", "@babel/template": "^7.28.6", "@babel/types": "^7.29.0", "debug": "^4.3.1" } }, "sha512-4HPiQr0X7+waHfyXPZpWPfWL/J7dcN1mx9gL6WdQVMbPnF3+ZhSMs8tCxN7oHddJE9fhNE7+lxdnlyemKfJRuA=="], - "@babel/types": ["@babel/types@7.28.6", "", { "dependencies": { "@babel/helper-string-parser": "^7.27.1", "@babel/helper-validator-identifier": "^7.28.5" } }, "sha512-0ZrskXVEHSWIqZM/sQZ4EV3jZJXRkio/WCxaqKZP1g//CEWEPSfeZFcms4XeKBCHU0ZKnIkdJeU/kF+eRp5lBg=="], + "@babel/types": ["@babel/types@7.29.0", "", { "dependencies": { "@babel/helper-string-parser": "^7.27.1", "@babel/helper-validator-identifier": "^7.28.5" } }, "sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A=="], "@bcoe/v8-coverage": ["@bcoe/v8-coverage@1.0.2", "", {}, "sha512-6zABk/ECA/QYSCQ1NGiVwwbQerUCZ+TQbp64Q3AgmfNvurHH0j8TtXa1qbShXA6qqkpAj4V5W8pP6mLe1mcMqA=="], @@ -759,9 +759,9 @@ "@isaacs/balanced-match": ["@isaacs/balanced-match@4.0.1", "", {}, "sha512-yzMTt9lEb8Gv7zRioUilSglI0c0smZ9k5D65677DLWLtWJaXIS3CqcGyUFByYKlnUj6TkjLVs54fBl6+TiGQDQ=="], - "@isaacs/brace-expansion": ["@isaacs/brace-expansion@5.0.0", "", { "dependencies": { "@isaacs/balanced-match": "^4.0.1" } }, "sha512-ZT55BDLV0yv0RBm2czMiZ+SqCGO7AvmOM3G/w2xhVPH+te0aKgFjmBvGlL1dH+ql2tgGO3MVrbb3jCKyvpgnxA=="], + "@isaacs/brace-expansion": ["@isaacs/brace-expansion@5.0.1", "", { "dependencies": { "@isaacs/balanced-match": "^4.0.1" } }, "sha512-WMz71T1JS624nWj2n2fnYAuPovhv7EUhk69R6i9dsVyzxt5eM3bjwvgk9L+APE1TRscGysAVMANkB0jh0LQZrQ=="], - "@isaacs/cliui": ["@isaacs/cliui@8.0.2", "", { "dependencies": { "string-width": "^5.1.2", "string-width-cjs": "npm:string-width@^4.2.0", "strip-ansi": "^7.0.1", "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", "wrap-ansi": "^8.1.0", "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" } }, "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA=="], + "@isaacs/cliui": ["@isaacs/cliui@9.0.0", "", {}, "sha512-AokJm4tuBHillT+FpMtxQ60n8ObyXBatq7jD2/JA9dxbDDokKQm8KMht5ibGzLVU9IJDIKK4TPKgMHEYMn3lMg=="], "@isaacs/fs-minipass": ["@isaacs/fs-minipass@4.0.1", "", { "dependencies": { "minipass": "^7.0.4" } }, "sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w=="], @@ -799,29 +799,29 @@ "@mongodb-js/saslprep": ["@mongodb-js/saslprep@1.4.5", "", { "dependencies": { "sparse-bitfield": "^3.0.3" } }, "sha512-k64Lbyb7ycCSXHSLzxVdb2xsKGPMvYZfCICXvDsI8Z65CeWQzTEKS4YmGbnqw+U9RBvLPTsB6UCmwkgsDTGWIw=="], - "@napi-rs/canvas": ["@napi-rs/canvas@0.1.89", "", { "optionalDependencies": { "@napi-rs/canvas-android-arm64": "0.1.89", "@napi-rs/canvas-darwin-arm64": "0.1.89", "@napi-rs/canvas-darwin-x64": "0.1.89", "@napi-rs/canvas-linux-arm-gnueabihf": "0.1.89", "@napi-rs/canvas-linux-arm64-gnu": "0.1.89", "@napi-rs/canvas-linux-arm64-musl": "0.1.89", "@napi-rs/canvas-linux-riscv64-gnu": "0.1.89", "@napi-rs/canvas-linux-x64-gnu": "0.1.89", "@napi-rs/canvas-linux-x64-musl": "0.1.89", "@napi-rs/canvas-win32-arm64-msvc": "0.1.89", "@napi-rs/canvas-win32-x64-msvc": "0.1.89" } }, "sha512-7GjmkMirJHejeALCqUnZY3QwID7bbumOiLrqq2LKgxrdjdmxWQBTc6rcASa2u8wuWrH7qo4/4n/VNrOwCoKlKg=="], + "@napi-rs/canvas": ["@napi-rs/canvas@0.1.91", "", { "optionalDependencies": { "@napi-rs/canvas-android-arm64": "0.1.91", "@napi-rs/canvas-darwin-arm64": "0.1.91", "@napi-rs/canvas-darwin-x64": "0.1.91", "@napi-rs/canvas-linux-arm-gnueabihf": "0.1.91", "@napi-rs/canvas-linux-arm64-gnu": "0.1.91", "@napi-rs/canvas-linux-arm64-musl": "0.1.91", "@napi-rs/canvas-linux-riscv64-gnu": "0.1.91", "@napi-rs/canvas-linux-x64-gnu": "0.1.91", "@napi-rs/canvas-linux-x64-musl": "0.1.91", "@napi-rs/canvas-win32-arm64-msvc": "0.1.91", "@napi-rs/canvas-win32-x64-msvc": "0.1.91" } }, "sha512-eeIe1GoB74P1B0Nkw6pV8BCQ3hfCfvyYr4BntzlCsnFXzVJiPMDnLeIx3gVB0xQMblHYnjK/0nCLvirEhOjr5g=="], - "@napi-rs/canvas-android-arm64": ["@napi-rs/canvas-android-arm64@0.1.89", "", { "os": "android", "cpu": "arm64" }, "sha512-CXxQTXsjtQqKGENS8Ejv9pZOFJhOPIl2goenS+aU8dY4DygvkyagDhy/I07D1YLqrDtPvLEX5zZHt8qUdnuIpQ=="], + "@napi-rs/canvas-android-arm64": ["@napi-rs/canvas-android-arm64@0.1.91", "", { "os": "android", "cpu": "arm64" }, "sha512-SLLzXXgSnfct4zy/BVAfweZQkYkPJsNsJ2e5DOE8DFEHC6PufyUrwb12yqeu2So2IOIDpWJJaDAxKY/xpy6MYQ=="], - "@napi-rs/canvas-darwin-arm64": ["@napi-rs/canvas-darwin-arm64@0.1.89", "", { "os": "darwin", "cpu": "arm64" }, "sha512-k29cR/Zl20WLYM7M8YePevRu2VQRaKcRedYr1V/8FFHkyIQ8kShEV+MPoPGi+znvmd17Eqjy2Pk2F2kpM2umVg=="], + "@napi-rs/canvas-darwin-arm64": ["@napi-rs/canvas-darwin-arm64@0.1.91", "", { "os": "darwin", "cpu": "arm64" }, "sha512-bzdbCjIjw3iRuVFL+uxdSoMra/l09ydGNX9gsBxO/zg+5nlppscIpj6gg+nL6VNG85zwUarDleIrUJ+FWHvmuA=="], - "@napi-rs/canvas-darwin-x64": ["@napi-rs/canvas-darwin-x64@0.1.89", "", { "os": "darwin", "cpu": "x64" }, "sha512-iUragqhBrA5FqU13pkhYBDbUD1WEAIlT8R2+fj6xHICY2nemzwMUI8OENDhRh7zuL06YDcRwENbjAVxOmaX9jg=="], + "@napi-rs/canvas-darwin-x64": ["@napi-rs/canvas-darwin-x64@0.1.91", "", { "os": "darwin", "cpu": "x64" }, "sha512-q3qpkpw0IsG9fAS/dmcGIhCVoNxj8ojbexZKWwz3HwxlEWsLncEQRl4arnxrwbpLc2nTNTyj4WwDn7QR5NDAaA=="], - "@napi-rs/canvas-linux-arm-gnueabihf": ["@napi-rs/canvas-linux-arm-gnueabihf@0.1.89", "", { "os": "linux", "cpu": "arm" }, "sha512-y3SM9sfDWasY58ftoaI09YBFm35Ig8tosZqgahLJ2WGqawCusGNPV9P0/4PsrLOCZqGg629WxexQMY25n7zcvA=="], + "@napi-rs/canvas-linux-arm-gnueabihf": ["@napi-rs/canvas-linux-arm-gnueabihf@0.1.91", "", { "os": "linux", "cpu": "arm" }, "sha512-Io3g8wJZVhK8G+Fpg1363BE90pIPqg+ZbeehYNxPWDSzbgwU3xV0l8r/JBzODwC7XHi1RpFEk+xyUTMa2POj6w=="], - "@napi-rs/canvas-linux-arm64-gnu": ["@napi-rs/canvas-linux-arm64-gnu@0.1.89", "", { "os": "linux", "cpu": "arm64" }, "sha512-NEoF9y8xq5fX8HG8aZunBom1ILdTwt7ayBzSBIwrmitk7snj4W6Fz/yN/ZOmlM1iyzHDNX5Xn0n+VgWCF8BEdA=="], + "@napi-rs/canvas-linux-arm64-gnu": ["@napi-rs/canvas-linux-arm64-gnu@0.1.91", "", { "os": "linux", "cpu": "arm64" }, "sha512-HBnto+0rxx1bQSl8bCWA9PyBKtlk2z/AI32r3cu4kcNO+M/5SD4b0v1MWBWZyqMQyxFjWgy3ECyDjDKMC6tY1A=="], - "@napi-rs/canvas-linux-arm64-musl": ["@napi-rs/canvas-linux-arm64-musl@0.1.89", "", { "os": "linux", "cpu": "arm64" }, "sha512-UQQkIEzV12/l60j1ziMjZ+mtodICNUbrd205uAhbyTw0t60CrC/EsKb5/aJWGq1wM0agvcgZV72JJCKfLS6+4w=="], + "@napi-rs/canvas-linux-arm64-musl": ["@napi-rs/canvas-linux-arm64-musl@0.1.91", "", { "os": "linux", "cpu": "arm64" }, "sha512-/eJtVe2Xw9A86I4kwXpxxoNagdGclu12/NSMsfoL8q05QmeRCbfjhg1PJS7ENAuAvaiUiALGrbVfeY1KU1gztQ=="], - "@napi-rs/canvas-linux-riscv64-gnu": ["@napi-rs/canvas-linux-riscv64-gnu@0.1.89", "", { "os": "linux", "cpu": "none" }, "sha512-1/VmEoFaIO6ONeeEMGoWF17wOYZOl5hxDC1ios2Bkz/oQjbJJ8DY/X22vWTmvuUKWWhBVlo63pxLGZbjJU/heA=="], + "@napi-rs/canvas-linux-riscv64-gnu": ["@napi-rs/canvas-linux-riscv64-gnu@0.1.91", "", { "os": "linux", "cpu": "none" }, "sha512-floNK9wQuRWevUhhXRcuis7h0zirdytVxPgkonWO+kQlbvxV7gEUHGUFQyq4n55UHYFwgck1SAfJ1HuXv/+ppQ=="], - "@napi-rs/canvas-linux-x64-gnu": ["@napi-rs/canvas-linux-x64-gnu@0.1.89", "", { "os": "linux", "cpu": "x64" }, "sha512-ebLuqkCuaPIkKgKH9q4+pqWi1tkPOfiTk5PM1LKR1tB9iO9sFNVSIgwEp+SJreTSbA2DK5rW8lQXiN78SjtcvA=="], + "@napi-rs/canvas-linux-x64-gnu": ["@napi-rs/canvas-linux-x64-gnu@0.1.91", "", { "os": "linux", "cpu": "x64" }, "sha512-c3YDqBdf7KETuZy2AxsHFMsBBX1dWT43yFfWUq+j1IELdgesWtxf/6N7csi3VPf6VA3PmnT9EhMyb+M1wfGtqw=="], - "@napi-rs/canvas-linux-x64-musl": ["@napi-rs/canvas-linux-x64-musl@0.1.89", "", { "os": "linux", "cpu": "x64" }, "sha512-w+5qxHzplvA4BkHhCaizNMLLXiI+CfP84YhpHm/PqMub4u8J0uOAv+aaGv40rYEYra5hHRWr9LUd6cfW32o9/A=="], + "@napi-rs/canvas-linux-x64-musl": ["@napi-rs/canvas-linux-x64-musl@0.1.91", "", { "os": "linux", "cpu": "x64" }, "sha512-RpZ3RPIwgEcNBHSHSX98adm+4VP8SMT5FN6250s5jQbWpX/XNUX5aLMfAVJS/YnDjS1QlsCgQxFOPU0aCCWgag=="], - "@napi-rs/canvas-win32-arm64-msvc": ["@napi-rs/canvas-win32-arm64-msvc@0.1.89", "", { "os": "win32", "cpu": "arm64" }, "sha512-DmyXa5lJHcjOsDC78BM3bnEECqbK3xASVMrKfvtT/7S7Z8NGQOugvu+L7b41V6cexCd34mBWgMOsjoEBceeB1Q=="], + "@napi-rs/canvas-win32-arm64-msvc": ["@napi-rs/canvas-win32-arm64-msvc@0.1.91", "", { "os": "win32", "cpu": "arm64" }, "sha512-gF8MBp4X134AgVurxqlCdDA2qO0WaDdi9o6Sd5rWRVXRhWhYQ6wkdEzXNLIrmmros0Tsp2J0hQzx4ej/9O8trQ=="], - "@napi-rs/canvas-win32-x64-msvc": ["@napi-rs/canvas-win32-x64-msvc@0.1.89", "", { "os": "win32", "cpu": "x64" }, "sha512-WMej0LZrIqIncQcx0JHaMXlnAG7sncwJh7obs/GBgp0xF9qABjwoRwIooMWCZkSansapKGNUHhamY6qEnFN7gA=="], + "@napi-rs/canvas-win32-x64-msvc": ["@napi-rs/canvas-win32-x64-msvc@0.1.91", "", { "os": "win32", "cpu": "x64" }, "sha512-++gtW9EV/neKI8TshD8WFxzBYALSPag2kFRahIJV+LYsyt5kBn21b1dBhEUDHf7O+wiZmuFCeUa7QKGHnYRZBA=="], "@next/env": ["@next/env@16.1.0-canary.21", "", {}, "sha512-J5inWwxC8EpAr/a2GApmQK1KkftG7K2nM6SuzNvciNaPt9Z0AHFeazvFuQxbvXn024p+akBHRlo8P7ZJRoU7kA=="], @@ -1163,75 +1163,75 @@ "@rolldown/pluginutils": ["@rolldown/pluginutils@1.0.0-beta.27", "", {}, "sha512-+d0F4MKMCbeVUJwG96uQ4SgAznZNSq93I3V+9NHA4OpvqG8mRCpGdKmK8l/dl02h2CCDHwW2FqilnTyDcAnqjA=="], - "@rollup/rollup-android-arm-eabi": ["@rollup/rollup-android-arm-eabi@4.57.0", "", { "os": "android", "cpu": "arm" }, "sha512-tPgXB6cDTndIe1ah7u6amCI1T0SsnlOuKgg10Xh3uizJk4e5M1JGaUMk7J4ciuAUcFpbOiNhm2XIjP9ON0dUqA=="], + "@rollup/rollup-android-arm-eabi": ["@rollup/rollup-android-arm-eabi@4.57.1", "", { "os": "android", "cpu": "arm" }, "sha512-A6ehUVSiSaaliTxai040ZpZ2zTevHYbvu/lDoeAteHI8QnaosIzm4qwtezfRg1jOYaUmnzLX1AOD6Z+UJjtifg=="], - "@rollup/rollup-android-arm64": ["@rollup/rollup-android-arm64@4.57.0", "", { "os": "android", "cpu": "arm64" }, "sha512-sa4LyseLLXr1onr97StkU1Nb7fWcg6niokTwEVNOO7awaKaoRObQ54+V/hrF/BP1noMEaaAW6Fg2d/CfLiq3Mg=="], + "@rollup/rollup-android-arm64": ["@rollup/rollup-android-arm64@4.57.1", "", { "os": "android", "cpu": "arm64" }, "sha512-dQaAddCY9YgkFHZcFNS/606Exo8vcLHwArFZ7vxXq4rigo2bb494/xKMMwRRQW6ug7Js6yXmBZhSBRuBvCCQ3w=="], - "@rollup/rollup-darwin-arm64": ["@rollup/rollup-darwin-arm64@4.57.0", "", { "os": "darwin", "cpu": "arm64" }, "sha512-/NNIj9A7yLjKdmkx5dC2XQ9DmjIECpGpwHoGmA5E1AhU0fuICSqSWScPhN1yLCkEdkCwJIDu2xIeLPs60MNIVg=="], + "@rollup/rollup-darwin-arm64": ["@rollup/rollup-darwin-arm64@4.57.1", "", { "os": "darwin", "cpu": "arm64" }, "sha512-crNPrwJOrRxagUYeMn/DZwqN88SDmwaJ8Cvi/TN1HnWBU7GwknckyosC2gd0IqYRsHDEnXf328o9/HC6OkPgOg=="], - "@rollup/rollup-darwin-x64": ["@rollup/rollup-darwin-x64@4.57.0", "", { "os": "darwin", "cpu": "x64" }, "sha512-xoh8abqgPrPYPr7pTYipqnUi1V3em56JzE/HgDgitTqZBZ3yKCWI+7KUkceM6tNweyUKYru1UMi7FC060RyKwA=="], + "@rollup/rollup-darwin-x64": ["@rollup/rollup-darwin-x64@4.57.1", "", { "os": "darwin", "cpu": "x64" }, "sha512-Ji8g8ChVbKrhFtig5QBV7iMaJrGtpHelkB3lsaKzadFBe58gmjfGXAOfI5FV0lYMH8wiqsxKQ1C9B0YTRXVy4w=="], - "@rollup/rollup-freebsd-arm64": ["@rollup/rollup-freebsd-arm64@4.57.0", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-PCkMh7fNahWSbA0OTUQ2OpYHpjZZr0hPr8lId8twD7a7SeWrvT3xJVyza+dQwXSSq4yEQTMoXgNOfMCsn8584g=="], + "@rollup/rollup-freebsd-arm64": ["@rollup/rollup-freebsd-arm64@4.57.1", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-R+/WwhsjmwodAcz65guCGFRkMb4gKWTcIeLy60JJQbXrJ97BOXHxnkPFrP+YwFlaS0m+uWJTstrUA9o+UchFug=="], - "@rollup/rollup-freebsd-x64": ["@rollup/rollup-freebsd-x64@4.57.0", "", { "os": "freebsd", "cpu": "x64" }, "sha512-1j3stGx+qbhXql4OCDZhnK7b01s6rBKNybfsX+TNrEe9JNq4DLi1yGiR1xW+nL+FNVvI4D02PUnl6gJ/2y6WJA=="], + "@rollup/rollup-freebsd-x64": ["@rollup/rollup-freebsd-x64@4.57.1", "", { "os": "freebsd", "cpu": "x64" }, "sha512-IEQTCHeiTOnAUC3IDQdzRAGj3jOAYNr9kBguI7MQAAZK3caezRrg0GxAb6Hchg4lxdZEI5Oq3iov/w/hnFWY9Q=="], - "@rollup/rollup-linux-arm-gnueabihf": ["@rollup/rollup-linux-arm-gnueabihf@4.57.0", "", { "os": "linux", "cpu": "arm" }, "sha512-eyrr5W08Ms9uM0mLcKfM/Uzx7hjhz2bcjv8P2uynfj0yU8GGPdz8iYrBPhiLOZqahoAMB8ZiolRZPbbU2MAi6Q=="], + "@rollup/rollup-linux-arm-gnueabihf": ["@rollup/rollup-linux-arm-gnueabihf@4.57.1", "", { "os": "linux", "cpu": "arm" }, "sha512-F8sWbhZ7tyuEfsmOxwc2giKDQzN3+kuBLPwwZGyVkLlKGdV1nvnNwYD0fKQ8+XS6hp9nY7B+ZeK01EBUE7aHaw=="], - "@rollup/rollup-linux-arm-musleabihf": ["@rollup/rollup-linux-arm-musleabihf@4.57.0", "", { "os": "linux", "cpu": "arm" }, "sha512-Xds90ITXJCNyX9pDhqf85MKWUI4lqjiPAipJ8OLp8xqI2Ehk+TCVhF9rvOoN8xTbcafow3QOThkNnrM33uCFQA=="], + "@rollup/rollup-linux-arm-musleabihf": ["@rollup/rollup-linux-arm-musleabihf@4.57.1", "", { "os": "linux", "cpu": "arm" }, "sha512-rGfNUfn0GIeXtBP1wL5MnzSj98+PZe/AXaGBCRmT0ts80lU5CATYGxXukeTX39XBKsxzFpEeK+Mrp9faXOlmrw=="], - "@rollup/rollup-linux-arm64-gnu": ["@rollup/rollup-linux-arm64-gnu@4.57.0", "", { "os": "linux", "cpu": "arm64" }, "sha512-Xws2KA4CLvZmXjy46SQaXSejuKPhwVdaNinldoYfqruZBaJHqVo6hnRa8SDo9z7PBW5x84SH64+izmldCgbezw=="], + "@rollup/rollup-linux-arm64-gnu": ["@rollup/rollup-linux-arm64-gnu@4.57.1", "", { "os": "linux", "cpu": "arm64" }, "sha512-MMtej3YHWeg/0klK2Qodf3yrNzz6CGjo2UntLvk2RSPlhzgLvYEB3frRvbEF2wRKh1Z2fDIg9KRPe1fawv7C+g=="], - "@rollup/rollup-linux-arm64-musl": ["@rollup/rollup-linux-arm64-musl@4.57.0", "", { "os": "linux", "cpu": "arm64" }, "sha512-hrKXKbX5FdaRJj7lTMusmvKbhMJSGWJ+w++4KmjiDhpTgNlhYobMvKfDoIWecy4O60K6yA4SnztGuNTQF+Lplw=="], + "@rollup/rollup-linux-arm64-musl": ["@rollup/rollup-linux-arm64-musl@4.57.1", "", { "os": "linux", "cpu": "arm64" }, "sha512-1a/qhaaOXhqXGpMFMET9VqwZakkljWHLmZOX48R0I/YLbhdxr1m4gtG1Hq7++VhVUmf+L3sTAf9op4JlhQ5u1Q=="], - "@rollup/rollup-linux-loong64-gnu": ["@rollup/rollup-linux-loong64-gnu@4.57.0", "", { "os": "linux", "cpu": "none" }, "sha512-6A+nccfSDGKsPm00d3xKcrsBcbqzCTAukjwWK6rbuAnB2bHaL3r9720HBVZ/no7+FhZLz/U3GwwZZEh6tOSI8Q=="], + "@rollup/rollup-linux-loong64-gnu": ["@rollup/rollup-linux-loong64-gnu@4.57.1", "", { "os": "linux", "cpu": "none" }, "sha512-QWO6RQTZ/cqYtJMtxhkRkidoNGXc7ERPbZN7dVW5SdURuLeVU7lwKMpo18XdcmpWYd0qsP1bwKPf7DNSUinhvA=="], - "@rollup/rollup-linux-loong64-musl": ["@rollup/rollup-linux-loong64-musl@4.57.0", "", { "os": "linux", "cpu": "none" }, "sha512-4P1VyYUe6XAJtQH1Hh99THxr0GKMMwIXsRNOceLrJnaHTDgk1FTcTimDgneRJPvB3LqDQxUmroBclQ1S0cIJwQ=="], + "@rollup/rollup-linux-loong64-musl": ["@rollup/rollup-linux-loong64-musl@4.57.1", "", { "os": "linux", "cpu": "none" }, "sha512-xpObYIf+8gprgWaPP32xiN5RVTi/s5FCR+XMXSKmhfoJjrpRAjCuuqQXyxUa/eJTdAE6eJ+KDKaoEqjZQxh3Gw=="], - "@rollup/rollup-linux-ppc64-gnu": ["@rollup/rollup-linux-ppc64-gnu@4.57.0", "", { "os": "linux", "cpu": "ppc64" }, "sha512-8Vv6pLuIZCMcgXre6c3nOPhE0gjz1+nZP6T+hwWjr7sVH8k0jRkH+XnfjjOTglyMBdSKBPPz54/y1gToSKwrSQ=="], + "@rollup/rollup-linux-ppc64-gnu": ["@rollup/rollup-linux-ppc64-gnu@4.57.1", "", { "os": "linux", "cpu": "ppc64" }, "sha512-4BrCgrpZo4hvzMDKRqEaW1zeecScDCR+2nZ86ATLhAoJ5FQ+lbHVD3ttKe74/c7tNT9c6F2viwB3ufwp01Oh2w=="], - "@rollup/rollup-linux-ppc64-musl": ["@rollup/rollup-linux-ppc64-musl@4.57.0", "", { "os": "linux", "cpu": "ppc64" }, "sha512-r1te1M0Sm2TBVD/RxBPC6RZVwNqUTwJTA7w+C/IW5v9Ssu6xmxWEi+iJQlpBhtUiT1raJ5b48pI8tBvEjEFnFA=="], + "@rollup/rollup-linux-ppc64-musl": ["@rollup/rollup-linux-ppc64-musl@4.57.1", "", { "os": "linux", "cpu": "ppc64" }, "sha512-NOlUuzesGauESAyEYFSe3QTUguL+lvrN1HtwEEsU2rOwdUDeTMJdO5dUYl/2hKf9jWydJrO9OL/XSSf65R5+Xw=="], - "@rollup/rollup-linux-riscv64-gnu": ["@rollup/rollup-linux-riscv64-gnu@4.57.0", "", { "os": "linux", "cpu": "none" }, "sha512-say0uMU/RaPm3CDQLxUUTF2oNWL8ysvHkAjcCzV2znxBr23kFfaxocS9qJm+NdkRhF8wtdEEAJuYcLPhSPbjuQ=="], + "@rollup/rollup-linux-riscv64-gnu": ["@rollup/rollup-linux-riscv64-gnu@4.57.1", "", { "os": "linux", "cpu": "none" }, "sha512-ptA88htVp0AwUUqhVghwDIKlvJMD/fmL/wrQj99PRHFRAG6Z5nbWoWG4o81Nt9FT+IuqUQi+L31ZKAFeJ5Is+A=="], - "@rollup/rollup-linux-riscv64-musl": ["@rollup/rollup-linux-riscv64-musl@4.57.0", "", { "os": "linux", "cpu": "none" }, "sha512-/MU7/HizQGsnBREtRpcSbSV1zfkoxSTR7wLsRmBPQ8FwUj5sykrP1MyJTvsxP5KBq9SyE6kH8UQQQwa0ASeoQQ=="], + "@rollup/rollup-linux-riscv64-musl": ["@rollup/rollup-linux-riscv64-musl@4.57.1", "", { "os": "linux", "cpu": "none" }, "sha512-S51t7aMMTNdmAMPpBg7OOsTdn4tySRQvklmL3RpDRyknk87+Sp3xaumlatU+ppQ+5raY7sSTcC2beGgvhENfuw=="], - "@rollup/rollup-linux-s390x-gnu": ["@rollup/rollup-linux-s390x-gnu@4.57.0", "", { "os": "linux", "cpu": "s390x" }, "sha512-Q9eh+gUGILIHEaJf66aF6a414jQbDnn29zeu0eX3dHMuysnhTvsUvZTCAyZ6tJhUjnvzBKE4FtuaYxutxRZpOg=="], + "@rollup/rollup-linux-s390x-gnu": ["@rollup/rollup-linux-s390x-gnu@4.57.1", "", { "os": "linux", "cpu": "s390x" }, "sha512-Bl00OFnVFkL82FHbEqy3k5CUCKH6OEJL54KCyx2oqsmZnFTR8IoNqBF+mjQVcRCT5sB6yOvK8A37LNm/kPJiZg=="], - "@rollup/rollup-linux-x64-gnu": ["@rollup/rollup-linux-x64-gnu@4.57.0", "", { "os": "linux", "cpu": "x64" }, "sha512-OR5p5yG5OKSxHReWmwvM0P+VTPMwoBS45PXTMYaskKQqybkS3Kmugq1W+YbNWArF8/s7jQScgzXUhArzEQ7x0A=="], + "@rollup/rollup-linux-x64-gnu": ["@rollup/rollup-linux-x64-gnu@4.57.1", "", { "os": "linux", "cpu": "x64" }, "sha512-ABca4ceT4N+Tv/GtotnWAeXZUZuM/9AQyCyKYyKnpk4yoA7QIAuBt6Hkgpw8kActYlew2mvckXkvx0FfoInnLg=="], - "@rollup/rollup-linux-x64-musl": ["@rollup/rollup-linux-x64-musl@4.57.0", "", { "os": "linux", "cpu": "x64" }, "sha512-XeatKzo4lHDsVEbm1XDHZlhYZZSQYym6dg2X/Ko0kSFgio+KXLsxwJQprnR48GvdIKDOpqWqssC3iBCjoMcMpw=="], + "@rollup/rollup-linux-x64-musl": ["@rollup/rollup-linux-x64-musl@4.57.1", "", { "os": "linux", "cpu": "x64" }, "sha512-HFps0JeGtuOR2convgRRkHCekD7j+gdAuXM+/i6kGzQtFhlCtQkpwtNzkNj6QhCDp7DRJ7+qC/1Vg2jt5iSOFw=="], - "@rollup/rollup-openbsd-x64": ["@rollup/rollup-openbsd-x64@4.57.0", "", { "os": "openbsd", "cpu": "x64" }, "sha512-Lu71y78F5qOfYmubYLHPcJm74GZLU6UJ4THkf/a1K7Tz2ycwC2VUbsqbJAXaR6Bx70SRdlVrt2+n5l7F0agTUw=="], + "@rollup/rollup-openbsd-x64": ["@rollup/rollup-openbsd-x64@4.57.1", "", { "os": "openbsd", "cpu": "x64" }, "sha512-H+hXEv9gdVQuDTgnqD+SQffoWoc0Of59AStSzTEj/feWTBAnSfSD3+Dql1ZruJQxmykT/JVY0dE8Ka7z0DH1hw=="], - "@rollup/rollup-openharmony-arm64": ["@rollup/rollup-openharmony-arm64@4.57.0", "", { "os": "none", "cpu": "arm64" }, "sha512-v5xwKDWcu7qhAEcsUubiav7r+48Uk/ENWdr82MBZZRIm7zThSxCIVDfb3ZeRRq9yqk+oIzMdDo6fCcA5DHfMyA=="], + "@rollup/rollup-openharmony-arm64": ["@rollup/rollup-openharmony-arm64@4.57.1", "", { "os": "none", "cpu": "arm64" }, "sha512-4wYoDpNg6o/oPximyc/NG+mYUejZrCU2q+2w6YZqrAs2UcNUChIZXjtafAiiZSUc7On8v5NyNj34Kzj/Ltk6dQ=="], - "@rollup/rollup-win32-arm64-msvc": ["@rollup/rollup-win32-arm64-msvc@4.57.0", "", { "os": "win32", "cpu": "arm64" }, "sha512-XnaaaSMGSI6Wk8F4KK3QP7GfuuhjGchElsVerCplUuxRIzdvZ7hRBpLR0omCmw+kI2RFJB80nenhOoGXlJ5TfQ=="], + "@rollup/rollup-win32-arm64-msvc": ["@rollup/rollup-win32-arm64-msvc@4.57.1", "", { "os": "win32", "cpu": "arm64" }, "sha512-O54mtsV/6LW3P8qdTcamQmuC990HDfR71lo44oZMZlXU4tzLrbvTii87Ni9opq60ds0YzuAlEr/GNwuNluZyMQ=="], - "@rollup/rollup-win32-ia32-msvc": ["@rollup/rollup-win32-ia32-msvc@4.57.0", "", { "os": "win32", "cpu": "ia32" }, "sha512-3K1lP+3BXY4t4VihLw5MEg6IZD3ojSYzqzBG571W3kNQe4G4CcFpSUQVgurYgib5d+YaCjeFow8QivWp8vuSvA=="], + "@rollup/rollup-win32-ia32-msvc": ["@rollup/rollup-win32-ia32-msvc@4.57.1", "", { "os": "win32", "cpu": "ia32" }, "sha512-P3dLS+IerxCT/7D2q2FYcRdWRl22dNbrbBEtxdWhXrfIMPP9lQhb5h4Du04mdl5Woq05jVCDPCMF7Ub0NAjIew=="], - "@rollup/rollup-win32-x64-gnu": ["@rollup/rollup-win32-x64-gnu@4.57.0", "", { "os": "win32", "cpu": "x64" }, "sha512-MDk610P/vJGc5L5ImE4k5s+GZT3en0KoK1MKPXCRgzmksAMk79j4h3k1IerxTNqwDLxsGxStEZVBqG0gIqZqoA=="], + "@rollup/rollup-win32-x64-gnu": ["@rollup/rollup-win32-x64-gnu@4.57.1", "", { "os": "win32", "cpu": "x64" }, "sha512-VMBH2eOOaKGtIJYleXsi2B8CPVADrh+TyNxJ4mWPnKfLB/DBUmzW+5m1xUrcwWoMfSLagIRpjUFeW5CO5hyciQ=="], - "@rollup/rollup-win32-x64-msvc": ["@rollup/rollup-win32-x64-msvc@4.57.0", "", { "os": "win32", "cpu": "x64" }, "sha512-Zv7v6q6aV+VslnpwzqKAmrk5JdVkLUzok2208ZXGipjb+msxBr/fJPZyeEXiFgH7k62Ak0SLIfxQRZQvTuf7rQ=="], + "@rollup/rollup-win32-x64-msvc": ["@rollup/rollup-win32-x64-msvc@4.57.1", "", { "os": "win32", "cpu": "x64" }, "sha512-mxRFDdHIWRxg3UfIIAwCm6NzvxG0jDX/wBN6KsQFTvKFqqg9vTrWUE68qEjHt19A5wwx5X5aUi2zuZT7YR0jrA=="], "@s2-dev/streamstore": ["@s2-dev/streamstore@0.17.3", "", { "dependencies": { "@protobuf-ts/runtime": "^2.11.1" }, "peerDependencies": { "typescript": "^5.9.3" } }, "sha512-UeXL5+MgZQfNkbhCgEDVm7PrV5B3bxh6Zp4C5pUzQQwaoA+iGh2QiiIptRZynWgayzRv4vh0PYfnKpTzJEXegQ=="], "@selderee/plugin-htmlparser2": ["@selderee/plugin-htmlparser2@0.11.0", "", { "dependencies": { "domhandler": "^5.0.3", "selderee": "^0.11.0" } }, "sha512-P33hHGdldxGabLFjPPpaTxVolMrzrcegejx+0GxjrIb9Zv48D8yAIA/QTDR2dFl7Uz7urX8aX6+5bCZslr+gWQ=="], - "@shikijs/core": ["@shikijs/core@3.21.0", "", { "dependencies": { "@shikijs/types": "3.21.0", "@shikijs/vscode-textmate": "^10.0.2", "@types/hast": "^3.0.4", "hast-util-to-html": "^9.0.5" } }, "sha512-AXSQu/2n1UIQekY8euBJlvFYZIw0PHY63jUzGbrOma4wPxzznJXTXkri+QcHeBNaFxiiOljKxxJkVSoB3PjbyA=="], + "@shikijs/core": ["@shikijs/core@3.22.0", "", { "dependencies": { "@shikijs/types": "3.22.0", "@shikijs/vscode-textmate": "^10.0.2", "@types/hast": "^3.0.4", "hast-util-to-html": "^9.0.5" } }, "sha512-iAlTtSDDbJiRpvgL5ugKEATDtHdUVkqgHDm/gbD2ZS9c88mx7G1zSYjjOxp5Qa0eaW0MAQosFRmJSk354PRoQA=="], - "@shikijs/engine-javascript": ["@shikijs/engine-javascript@3.21.0", "", { "dependencies": { "@shikijs/types": "3.21.0", "@shikijs/vscode-textmate": "^10.0.2", "oniguruma-to-es": "^4.3.4" } }, "sha512-ATwv86xlbmfD9n9gKRiwuPpWgPENAWCLwYCGz9ugTJlsO2kOzhOkvoyV/UD+tJ0uT7YRyD530x6ugNSffmvIiQ=="], + "@shikijs/engine-javascript": ["@shikijs/engine-javascript@3.22.0", "", { "dependencies": { "@shikijs/types": "3.22.0", "@shikijs/vscode-textmate": "^10.0.2", "oniguruma-to-es": "^4.3.4" } }, "sha512-jdKhfgW9CRtj3Tor0L7+yPwdG3CgP7W+ZEqSsojrMzCjD1e0IxIbwUMDDpYlVBlC08TACg4puwFGkZfLS+56Tw=="], - "@shikijs/engine-oniguruma": ["@shikijs/engine-oniguruma@3.21.0", "", { "dependencies": { "@shikijs/types": "3.21.0", "@shikijs/vscode-textmate": "^10.0.2" } }, "sha512-OYknTCct6qiwpQDqDdf3iedRdzj6hFlOPv5hMvI+hkWfCKs5mlJ4TXziBG9nyabLwGulrUjHiCq3xCspSzErYQ=="], + "@shikijs/engine-oniguruma": ["@shikijs/engine-oniguruma@3.22.0", "", { "dependencies": { "@shikijs/types": "3.22.0", "@shikijs/vscode-textmate": "^10.0.2" } }, "sha512-DyXsOG0vGtNtl7ygvabHd7Mt5EY8gCNqR9Y7Lpbbd/PbJvgWrqaKzH1JW6H6qFkuUa8aCxoiYVv8/YfFljiQxA=="], - "@shikijs/langs": ["@shikijs/langs@3.21.0", "", { "dependencies": { "@shikijs/types": "3.21.0" } }, "sha512-g6mn5m+Y6GBJ4wxmBYqalK9Sp0CFkUqfNzUy2pJglUginz6ZpWbaWjDB4fbQ/8SHzFjYbtU6Ddlp1pc+PPNDVA=="], + "@shikijs/langs": ["@shikijs/langs@3.22.0", "", { "dependencies": { "@shikijs/types": "3.22.0" } }, "sha512-x/42TfhWmp6H00T6uwVrdTJGKgNdFbrEdhaDwSR5fd5zhQ1Q46bHq9EO61SCEWJR0HY7z2HNDMaBZp8JRmKiIA=="], - "@shikijs/rehype": ["@shikijs/rehype@3.21.0", "", { "dependencies": { "@shikijs/types": "3.21.0", "@types/hast": "^3.0.4", "hast-util-to-string": "^3.0.1", "shiki": "3.21.0", "unified": "^11.0.5", "unist-util-visit": "^5.0.0" } }, "sha512-fTQvwsZL67QdosMFdTgQ5SNjW3nxaPplRy//312hqOctRbIwviTV0nAbhv3NfnztHXvFli2zLYNKsTz/f9tbpQ=="], + "@shikijs/rehype": ["@shikijs/rehype@3.22.0", "", { "dependencies": { "@shikijs/types": "3.22.0", "@types/hast": "^3.0.4", "hast-util-to-string": "^3.0.1", "shiki": "3.22.0", "unified": "^11.0.5", "unist-util-visit": "^5.1.0" } }, "sha512-69b2VPc6XBy/VmAJlpBU5By+bJSBdE2nvgRCZXav7zujbrjXuT0F60DIrjKuutjPqNufuizE+E8tIZr2Yn8Z+g=="], - "@shikijs/themes": ["@shikijs/themes@3.21.0", "", { "dependencies": { "@shikijs/types": "3.21.0" } }, "sha512-BAE4cr9EDiZyYzwIHEk7JTBJ9CzlPuM4PchfcA5ao1dWXb25nv6hYsoDiBq2aZK9E3dlt3WB78uI96UESD+8Mw=="], + "@shikijs/themes": ["@shikijs/themes@3.22.0", "", { "dependencies": { "@shikijs/types": "3.22.0" } }, "sha512-o+tlOKqsr6FE4+mYJG08tfCFDS+3CG20HbldXeVoyP+cYSUxDhrFf3GPjE60U55iOkkjbpY2uC3It/eeja35/g=="], - "@shikijs/transformers": ["@shikijs/transformers@3.21.0", "", { "dependencies": { "@shikijs/core": "3.21.0", "@shikijs/types": "3.21.0" } }, "sha512-CZwvCWWIiRRiFk9/JKzdEooakAP8mQDtBOQ1TKiCaS2E1bYtyBCOkUzS8akO34/7ufICQ29oeSfkb3tT5KtrhA=="], + "@shikijs/transformers": ["@shikijs/transformers@3.22.0", "", { "dependencies": { "@shikijs/core": "3.22.0", "@shikijs/types": "3.22.0" } }, "sha512-E7eRV7mwDBjueLF6852n2oYeJYxBq3NSsDk+uyruYAXONv4U8holGmIrT+mPRJQ1J1SNOH6L8G19KRzmBawrFw=="], - "@shikijs/types": ["@shikijs/types@3.21.0", "", { "dependencies": { "@shikijs/vscode-textmate": "^10.0.2", "@types/hast": "^3.0.4" } }, "sha512-zGrWOxZ0/+0ovPY7PvBU2gIS9tmhSUUt30jAcNV0Bq0gb2S98gwfjIs1vxlmH5zM7/4YxLamT6ChlqqAJmPPjA=="], + "@shikijs/types": ["@shikijs/types@3.22.0", "", { "dependencies": { "@shikijs/vscode-textmate": "^10.0.2", "@types/hast": "^3.0.4" } }, "sha512-491iAekgKDBFE67z70Ok5a8KBMsQ2IJwOWw3us/7ffQkIBCyOQfm/aNwVMBUriP02QshIfgHCBSIYAl3u2eWjg=="], "@shikijs/vscode-textmate": ["@shikijs/vscode-textmate@10.0.2", "", {}, "sha512-83yeghZ2xxin3Nj8z1NMd/NCuca+gsYXswywDy5bHvwlWL8tpTQmzGeUuHd9FC3E/SBEMvzJRwWEOz5gGes9Qg=="], @@ -1257,7 +1257,7 @@ "@smithy/config-resolver": ["@smithy/config-resolver@4.4.6", "", { "dependencies": { "@smithy/node-config-provider": "^4.3.8", "@smithy/types": "^4.12.0", "@smithy/util-config-provider": "^4.2.0", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "tslib": "^2.6.2" } }, "sha512-qJpzYC64kaj3S0fueiu3kXm8xPrR3PcXDPEgnaNMRn0EjNSZFoFjvbUp0YUDsRhN1CB90EnHJtbxWKevnH99UQ=="], - "@smithy/core": ["@smithy/core@3.22.0", "", { "dependencies": { "@smithy/middleware-serde": "^4.2.9", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-stream": "^4.5.10", "@smithy/util-utf8": "^4.2.0", "@smithy/uuid": "^1.1.0", "tslib": "^2.6.2" } }, "sha512-6vjCHD6vaY8KubeNw2Fg3EK0KLGQYdldG4fYgQmA0xSW0dJ8G2xFhSOdrlUakWVoP5JuWHtFODg3PNd/DN3FDA=="], + "@smithy/core": ["@smithy/core@3.22.1", "", { "dependencies": { "@smithy/middleware-serde": "^4.2.9", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-stream": "^4.5.11", "@smithy/util-utf8": "^4.2.0", "@smithy/uuid": "^1.1.0", "tslib": "^2.6.2" } }, "sha512-x3ie6Crr58MWrm4viHqqy2Du2rHYZjwu8BekasrQx4ca+Y24dzVAwq3yErdqIbc2G3I0kLQA13PQ+/rde+u65g=="], "@smithy/credential-provider-imds": ["@smithy/credential-provider-imds@4.2.8", "", { "dependencies": { "@smithy/node-config-provider": "^4.3.8", "@smithy/property-provider": "^4.2.8", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "tslib": "^2.6.2" } }, "sha512-FNT0xHS1c/CPN8upqbMFP83+ul5YgdisfCfkZ86Jh2NSmnqw/AJ6x5pEogVCTVvSm7j9MopRU89bmDelxuDMYw=="], @@ -1287,9 +1287,9 @@ "@smithy/middleware-content-length": ["@smithy/middleware-content-length@4.2.8", "", { "dependencies": { "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-RO0jeoaYAB1qBRhfVyq0pMgBoUK34YEJxVxyjOWYZiOKOq2yMZ4MnVXMZCUDenpozHue207+9P5ilTV1zeda0A=="], - "@smithy/middleware-endpoint": ["@smithy/middleware-endpoint@4.4.12", "", { "dependencies": { "@smithy/core": "^3.22.0", "@smithy/middleware-serde": "^4.2.9", "@smithy/node-config-provider": "^4.3.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-middleware": "^4.2.8", "tslib": "^2.6.2" } }, "sha512-9JMKHVJtW9RysTNjcBZQHDwB0p3iTP6B1IfQV4m+uCevkVd/VuLgwfqk5cnI4RHcp4cPwoIvxQqN4B1sxeHo8Q=="], + "@smithy/middleware-endpoint": ["@smithy/middleware-endpoint@4.4.13", "", { "dependencies": { "@smithy/core": "^3.22.1", "@smithy/middleware-serde": "^4.2.9", "@smithy/node-config-provider": "^4.3.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-middleware": "^4.2.8", "tslib": "^2.6.2" } }, "sha512-x6vn0PjYmGdNuKh/juUJJewZh7MoQ46jYaJ2mvekF4EesMuFfrl4LaW/k97Zjf8PTCPQmPgMvwewg7eNoH9n5w=="], - "@smithy/middleware-retry": ["@smithy/middleware-retry@4.4.29", "", { "dependencies": { "@smithy/node-config-provider": "^4.3.8", "@smithy/protocol-http": "^5.3.8", "@smithy/service-error-classification": "^4.2.8", "@smithy/smithy-client": "^4.11.1", "@smithy/types": "^4.12.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/uuid": "^1.1.0", "tslib": "^2.6.2" } }, "sha512-bmTn75a4tmKRkC5w61yYQLb3DmxNzB8qSVu9SbTYqW6GAL0WXO2bDZuMAn/GJSbOdHEdjZvWxe+9Kk015bw6Cg=="], + "@smithy/middleware-retry": ["@smithy/middleware-retry@4.4.30", "", { "dependencies": { "@smithy/node-config-provider": "^4.3.8", "@smithy/protocol-http": "^5.3.8", "@smithy/service-error-classification": "^4.2.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/uuid": "^1.1.0", "tslib": "^2.6.2" } }, "sha512-CBGyFvN0f8hlnqKH/jckRDz78Snrp345+PVk8Ux7pnkUCW97Iinse59lY78hBt04h1GZ6hjBN94BRwZy1xC8Bg=="], "@smithy/middleware-serde": ["@smithy/middleware-serde@4.2.9", "", { "dependencies": { "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-eMNiej0u/snzDvlqRGSN3Vl0ESn3838+nKyVfF2FKNXFbi4SERYT6PR392D39iczngbqqGG0Jl1DlCnp7tBbXQ=="], @@ -1297,7 +1297,7 @@ "@smithy/node-config-provider": ["@smithy/node-config-provider@4.3.8", "", { "dependencies": { "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-aFP1ai4lrbVlWjfpAfRSL8KFcnJQYfTl5QxLJXY32vghJrDuFyPZ6LtUL+JEGYiFRG1PfPLHLoxj107ulncLIg=="], - "@smithy/node-http-handler": ["@smithy/node-http-handler@4.4.8", "", { "dependencies": { "@smithy/abort-controller": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/querystring-builder": "^4.2.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-q9u+MSbJVIJ1QmJ4+1u+cERXkrhuILCBDsJUBAW1MPE6sFonbCNaegFuwW9ll8kh5UdyY3jOkoOGlc7BesoLpg=="], + "@smithy/node-http-handler": ["@smithy/node-http-handler@4.4.9", "", { "dependencies": { "@smithy/abort-controller": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/querystring-builder": "^4.2.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-KX5Wml5mF+luxm1szW4QDz32e3NObgJ4Fyw+irhph4I/2geXwUy4jkIMUs5ZPGflRBeR6BUkC2wqIab4Llgm3w=="], "@smithy/property-provider": ["@smithy/property-provider@4.2.8", "", { "dependencies": { "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-EtCTbyIveCKeOXDSWSdze3k612yCPq1YbXsbqX3UHhkOSW8zKsM9NOJG5gTIya0vbY2DIaieG8pKo1rITHYL0w=="], @@ -1313,7 +1313,7 @@ "@smithy/signature-v4": ["@smithy/signature-v4@5.3.8", "", { "dependencies": { "@smithy/is-array-buffer": "^4.2.0", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "@smithy/util-hex-encoding": "^4.2.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-uri-escape": "^4.2.0", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-6A4vdGj7qKNRF16UIcO8HhHjKW27thsxYci+5r/uVRkdcBEkOEiY8OMPuydLX4QHSrJqGHPJzPRwwVTqbLZJhg=="], - "@smithy/smithy-client": ["@smithy/smithy-client@4.11.1", "", { "dependencies": { "@smithy/core": "^3.22.0", "@smithy/middleware-endpoint": "^4.4.12", "@smithy/middleware-stack": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "@smithy/util-stream": "^4.5.10", "tslib": "^2.6.2" } }, "sha512-SERgNg5Z1U+jfR6/2xPYjSEHY1t3pyTHC/Ma3YQl6qWtmiL42bvNId3W/oMUWIwu7ekL2FMPdqAmwbQegM7HeQ=="], + "@smithy/smithy-client": ["@smithy/smithy-client@4.11.2", "", { "dependencies": { "@smithy/core": "^3.22.1", "@smithy/middleware-endpoint": "^4.4.13", "@smithy/middleware-stack": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "@smithy/util-stream": "^4.5.11", "tslib": "^2.6.2" } }, "sha512-SCkGmFak/xC1n7hKRsUr6wOnBTJ3L22Qd4e8H1fQIuKTAjntwgU8lrdMe7uHdiT2mJAOWA/60qaW9tiMu69n1A=="], "@smithy/types": ["@smithy/types@4.12.0", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-9YcuJVTOBDjg9LWo23Qp0lTQ3D7fQsQtwle0jVfpbUHy9qBwCEgKuVH4FqFB3VYu0nwdHKiEMA+oXz7oV8X1kw=="], @@ -1329,9 +1329,9 @@ "@smithy/util-config-provider": ["@smithy/util-config-provider@4.2.0", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-YEjpl6XJ36FTKmD+kRJJWYvrHeUvm5ykaUS5xK+6oXffQPHeEM4/nXlZPe+Wu0lsgRUcNZiliYNh/y7q9c2y6Q=="], - "@smithy/util-defaults-mode-browser": ["@smithy/util-defaults-mode-browser@4.3.28", "", { "dependencies": { "@smithy/property-provider": "^4.2.8", "@smithy/smithy-client": "^4.11.1", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-/9zcatsCao9h6g18p/9vH9NIi5PSqhCkxQ/tb7pMgRFnqYp9XUOyOlGPDMHzr8n5ih6yYgwJEY2MLEobUgi47w=="], + "@smithy/util-defaults-mode-browser": ["@smithy/util-defaults-mode-browser@4.3.29", "", { "dependencies": { "@smithy/property-provider": "^4.2.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-nIGy3DNRmOjaYaaKcQDzmWsro9uxlaqUOhZDHQed9MW/GmkBZPtnU70Pu1+GT9IBmUXwRdDuiyaeiy9Xtpn3+Q=="], - "@smithy/util-defaults-mode-node": ["@smithy/util-defaults-mode-node@4.2.31", "", { "dependencies": { "@smithy/config-resolver": "^4.4.6", "@smithy/credential-provider-imds": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/property-provider": "^4.2.8", "@smithy/smithy-client": "^4.11.1", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-JTvoApUXA5kbpceI2vuqQzRjeTbLpx1eoa5R/YEZbTgtxvIB7AQZxFJ0SEyfCpgPCyVV9IT7we+ytSeIB3CyWA=="], + "@smithy/util-defaults-mode-node": ["@smithy/util-defaults-mode-node@4.2.32", "", { "dependencies": { "@smithy/config-resolver": "^4.4.6", "@smithy/credential-provider-imds": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/property-provider": "^4.2.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-7dtFff6pu5fsjqrVve0YMhrnzJtccCWDacNKOkiZjJ++fmjGExmmSu341x+WU6Oc1IccL7lDuaUj7SfrHpWc5Q=="], "@smithy/util-endpoints": ["@smithy/util-endpoints@3.2.8", "", { "dependencies": { "@smithy/node-config-provider": "^4.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-8JaVTn3pBDkhZgHQ8R0epwWt+BqPSLCjdjXXusK1onwJlRuN69fbvSK66aIKKO7SwVFM6x2J2ox5X8pOaWcUEw=="], @@ -1341,7 +1341,7 @@ "@smithy/util-retry": ["@smithy/util-retry@4.2.8", "", { "dependencies": { "@smithy/service-error-classification": "^4.2.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-CfJqwvoRY0kTGe5AkQokpURNCT1u/MkRzMTASWMPPo2hNSnKtF1D45dQl3DE2LKLr4m+PW9mCeBMJr5mCAVThg=="], - "@smithy/util-stream": ["@smithy/util-stream@4.5.10", "", { "dependencies": { "@smithy/fetch-http-handler": "^5.3.9", "@smithy/node-http-handler": "^4.4.8", "@smithy/types": "^4.12.0", "@smithy/util-base64": "^4.3.0", "@smithy/util-buffer-from": "^4.2.0", "@smithy/util-hex-encoding": "^4.2.0", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-jbqemy51UFSZSp2y0ZmRfckmrzuKww95zT9BYMmuJ8v3altGcqjwoV1tzpOwuHaKrwQrCjIzOib499ymr2f98g=="], + "@smithy/util-stream": ["@smithy/util-stream@4.5.11", "", { "dependencies": { "@smithy/fetch-http-handler": "^5.3.9", "@smithy/node-http-handler": "^4.4.9", "@smithy/types": "^4.12.0", "@smithy/util-base64": "^4.3.0", "@smithy/util-buffer-from": "^4.2.0", "@smithy/util-hex-encoding": "^4.2.0", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-lKmZ0S/3Qj2OF5H1+VzvDLb6kRxGzZHq6f3rAsoSu5cTLGsn3v3VQBA8czkNNXlLjoFEtVu3OQT2jEeOtOE2CA=="], "@smithy/util-uri-escape": ["@smithy/util-uri-escape@4.2.0", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-igZpCKV9+E/Mzrpq6YacdTQ0qTiLm85gD6N/IrmyDvQFA4UnU3d5g3m8tMT/6zG/vVkWSU+VxeUyGonL62DuxA=="], @@ -1529,7 +1529,7 @@ "@types/ms": ["@types/ms@2.1.0", "", {}, "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA=="], - "@types/node": ["@types/node@22.19.7", "", { "dependencies": { "undici-types": "~6.21.0" } }, "sha512-MciR4AKGHWl7xwxkBa6xUGxQJ4VBOmPTF7sL+iGzuahOFaO0jHCsuEfS80pan1ef4gWId1oWOweIhrDEYLuaOw=="], + "@types/node": ["@types/node@22.19.10", "", { "dependencies": { "undici-types": "~6.21.0" } }, "sha512-tF5VOugLS/EuDlTBijk0MqABfP8UxgYazTLo3uIn3b4yJgg26QRbVYJYsDtHrjdDUIRfP70+VfhTTc+CE1yskw=="], "@types/node-fetch": ["@types/node-fetch@2.6.13", "", { "dependencies": { "@types/node": "*", "form-data": "^4.0.4" } }, "sha512-QGpRVpzSaUs30JBSGPjOg4Uveu384erbHBoT1zeONvyCfwQxIkUshLAOqN/k9EjGviPRmWTTe6aH2qySWKTVSw=="], @@ -1539,7 +1539,7 @@ "@types/prismjs": ["@types/prismjs@1.26.5", "", {}, "sha512-AUZTa7hQ2KY5L7AmtSiqxlhWxb4ina0yd8hNbl4TWuqnv/pFP0nDMb3YrfSBf4hJVGLh2YEIBfKaBW/9UEl6IQ=="], - "@types/react": ["@types/react@19.2.10", "", { "dependencies": { "csstype": "^3.2.2" } }, "sha512-WPigyYuGhgZ/cTPRXB2EwUw+XvsRA3GqHlsP4qteqrnnjDrApbS7MxcGr/hke5iUoeB7E/gQtrs9I37zAJ0Vjw=="], + "@types/react": ["@types/react@19.2.13", "", { "dependencies": { "csstype": "^3.2.2" } }, "sha512-KkiJeU6VbYbUOp5ITMIc7kBfqlYkKA5KhEHVrGMmUUMt7NeaZg65ojdPk+FtNrBAOXNVM5QM72jnADjM+XVRAQ=="], "@types/react-dom": ["@types/react-dom@19.2.3", "", { "peerDependencies": { "@types/react": "^19.2.0" } }, "sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ=="], @@ -1573,7 +1573,7 @@ "@types/yauzl": ["@types/yauzl@2.10.3", "", { "dependencies": { "@types/node": "*" } }, "sha512-oJoftv0LSuaDZE3Le4DbKX+KS9G36NzOeSap90UIK0yMA/NhKJhqlSGtNDORNRaIbQfzjXDrQa0ytJ6mNRGz/Q=="], - "@typespec/ts-http-runtime": ["@typespec/ts-http-runtime@0.3.2", "", { "dependencies": { "http-proxy-agent": "^7.0.0", "https-proxy-agent": "^7.0.0", "tslib": "^2.6.2" } }, "sha512-IlqQ/Gv22xUC1r/WQm4StLkYQmaaTsXAhUVsNE0+xiyf0yRFiH5++q78U3bw6bLKDCTmh0uqKB9eG9+Bt75Dkg=="], + "@typespec/ts-http-runtime": ["@typespec/ts-http-runtime@0.3.3", "", { "dependencies": { "http-proxy-agent": "^7.0.0", "https-proxy-agent": "^7.0.0", "tslib": "^2.6.2" } }, "sha512-91fp6CAAJSRtH5ja95T1FHSKa8aPW9/Zw6cta81jlZTUw/+Vq8jM/AfF/14h2b71wwR84JUTW/3Y8QPhDAawFA=="], "@ungap/structured-clone": ["@ungap/structured-clone@1.3.0", "", {}, "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g=="], @@ -1623,7 +1623,7 @@ "agentkeepalive": ["agentkeepalive@4.6.0", "", { "dependencies": { "humanize-ms": "^1.2.1" } }, "sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ=="], - "ai": ["ai@5.0.123", "", { "dependencies": { "@ai-sdk/gateway": "2.0.29", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20", "@opentelemetry/api": "1.9.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-V3Imb0tg0pHCa6a/VsoW/FZpT07mwUw/4Hj6nexJC1Nvf1eyKQJyaYVkl+YTLnA8cKQSUkoarKhXWbFy4CSgjw=="], + "ai": ["ai@5.0.129", "", { "dependencies": { "@ai-sdk/gateway": "2.0.35", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20", "@opentelemetry/api": "1.9.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-IARdFetNTedDfqpByNMm9p0oHj7JS+SpOrbgLdQdyCiDe70Xk07wnKP4Lub1ckCrxkhAxY3yxOHllGEjbpXgpQ=="], "ajv": ["ajv@6.12.6", "", { "dependencies": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", "json-schema-traverse": "^0.4.1", "uri-js": "^4.2.2" } }, "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g=="], @@ -1661,7 +1661,7 @@ "ast-types": ["ast-types@0.13.4", "", { "dependencies": { "tslib": "^2.0.1" } }, "sha512-x1FCFnFifvYDDzTaLII71vG5uvDwgtmDTEVWAxrgeiR8VjMONcCXJx7E+USjDtHlwFmt9MysbqgF9b9Vjr6w+w=="], - "ast-v8-to-istanbul": ["ast-v8-to-istanbul@0.3.10", "", { "dependencies": { "@jridgewell/trace-mapping": "^0.3.31", "estree-walker": "^3.0.3", "js-tokens": "^9.0.1" } }, "sha512-p4K7vMz2ZSk3wN8l5o3y2bJAoZXT3VuJI5OLTATY/01CYWumWvwkUw0SqDBnNq6IiTO3qDa1eSQDibAV8g7XOQ=="], + "ast-v8-to-istanbul": ["ast-v8-to-istanbul@0.3.11", "", { "dependencies": { "@jridgewell/trace-mapping": "^0.3.31", "estree-walker": "^3.0.3", "js-tokens": "^10.0.0" } }, "sha512-Qya9fkoofMjCBNVdWINMjB5KZvkYfaO9/anwkWnjxibpWUxo5iHl2sOdP7/uAqaRuUYuoo8rDwnbaaKVFxoUvw=="], "astring": ["astring@1.9.0", "", { "bin": { "astring": "bin/astring" } }, "sha512-LElXdjswlqjWrPpJFg1Fx4wpkOCxj1TDHlSV4PlaRxHGWko024xICaa97ZkMfs6DRKlCguiAI+rbXv5GWwXIkg=="], @@ -1675,7 +1675,7 @@ "aws-ssl-profiles": ["aws-ssl-profiles@1.1.2", "", {}, "sha512-NZKeq9AfyQvEeNlN0zSYAaWrmBffJh3IELMZfRpJVWgrpEbtEpnjvzqBPf+mxoI287JohRDoa+/nsfqqiZmF6g=="], - "axios": ["axios@1.13.4", "", { "dependencies": { "follow-redirects": "^1.15.6", "form-data": "^4.0.4", "proxy-from-env": "^1.1.0" } }, "sha512-1wVkUaAO6WyaYtCkcYCOx12ZgpGf9Zif+qXa4n+oYzK558YryKqiL6UWwd5DqiH3VRW0GYhTZQ/vlgJrCoNQlg=="], + "axios": ["axios@1.13.5", "", { "dependencies": { "follow-redirects": "^1.15.11", "form-data": "^4.0.5", "proxy-from-env": "^1.1.0" } }, "sha512-cz4ur7Vb0xS4/KUN0tPWe44eqxrIu31me+fbang3ijiNscE129POzipJJA6zniq2C/Z6sJCjMimjS8Lc/GAs8Q=="], "b4a": ["b4a@1.7.3", "", { "peerDependencies": { "react-native-b4a": "*" }, "optionalPeers": ["react-native-b4a"] }, "sha512-5Q2mfq2WfGuFp3uS//0s6baOJLMoVduPYVeNmDYxu5OUA1/cBfvr2RIS7vi62LdNj/urk1hfmj867I3qt6uZ7Q=="], @@ -1727,7 +1727,7 @@ "boolbase": ["boolbase@1.0.0", "", {}, "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww=="], - "bowser": ["bowser@2.13.1", "", {}, "sha512-OHawaAbjwx6rqICCKgSG0SAnT05bzd7ppyKLVUITZpANBaaMFBAsaNkto3LoQ31tyFP5kNujE8Cdx85G9VzOkw=="], + "bowser": ["bowser@2.14.1", "", {}, "sha512-tzPjzCxygAKWFOJP011oxFHs57HzIhOEracIgAePE4pqB3LikALKnSzUyU4MGs9/iCEUuHlAJTjTc5M+u7YEGg=="], "brace-expansion": ["brace-expansion@2.0.2", "", { "dependencies": { "balanced-match": "^1.0.0" } }, "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ=="], @@ -1769,7 +1769,7 @@ "camelize": ["camelize@1.0.1", "", {}, "sha512-dU+Tx2fsypxTgtLoE36npi3UqcjSSMNYfkqgmoEhtZrraP5VWq0K7FkWVTYa8eMPtnU/G2txVsfdCJTn9uzpuQ=="], - "caniuse-lite": ["caniuse-lite@1.0.30001766", "", {}, "sha512-4C0lfJ0/YPjJQHagaE9x2Elb69CIqEPZeG0anQt9SIvIoOH4a4uaRl73IavyO+0qZh6MDLH//DrXThEYKHkmYA=="], + "caniuse-lite": ["caniuse-lite@1.0.30001769", "", {}, "sha512-BCfFL1sHijQlBGWBMuJyhZUhzo7wer5sVj9hqekB/7xn0Ypy+pER/edCYQm4exbXj4WiySGp40P8UuTh6w1srg=="], "caseless": ["caseless@0.12.0", "", {}, "sha512-4tYFyifaFfGacoiObjJegolkwSU4xQNGbVgUiNYVUxbQ2x2lUsFvY4hVgVzGiIe6WLOPqycWXA40l+PWsxthUw=="], @@ -2023,7 +2023,7 @@ "dotenv-expand": ["dotenv-expand@10.0.0", "", {}, "sha512-GopVGCpVS1UKH75VKHGuQFqS1Gusej0z4FyQkPdwjil2gNIv+LNsqBlboOzpJFZKVT95GkCyWJbBSdFEFUWI2A=="], - "drizzle-kit": ["drizzle-kit@0.31.8", "", { "dependencies": { "@drizzle-team/brocli": "^0.10.2", "@esbuild-kit/esm-loader": "^2.5.5", "esbuild": "^0.25.4", "esbuild-register": "^3.5.0" }, "bin": { "drizzle-kit": "bin.cjs" } }, "sha512-O9EC/miwdnRDY10qRxM8P3Pg8hXe3LyU4ZipReKOgTwn4OqANmftj8XJz1UPUAS6NMHf0E2htjsbQujUTkncCg=="], + "drizzle-kit": ["drizzle-kit@0.31.9", "", { "dependencies": { "@drizzle-team/brocli": "^0.10.2", "@esbuild-kit/esm-loader": "^2.5.5", "esbuild": "^0.25.4", "esbuild-register": "^3.5.0" }, "bin": { "drizzle-kit": "bin.cjs" } }, "sha512-GViD3IgsXn7trFyBUUHyTFBpH/FsHTxYJ66qdbVggxef4UBPHRYxQaRzYLTuekYnk9i5FIEL9pbBIwMqX/Uwrg=="], "drizzle-orm": ["drizzle-orm@0.44.7", "", { "peerDependencies": { "@aws-sdk/client-rds-data": ">=3", "@cloudflare/workers-types": ">=4", "@electric-sql/pglite": ">=0.2.0", "@libsql/client": ">=0.10.0", "@libsql/client-wasm": ">=0.10.0", "@neondatabase/serverless": ">=0.10.0", "@op-engineering/op-sqlite": ">=2", "@opentelemetry/api": "^1.4.1", "@planetscale/database": ">=1.13", "@prisma/client": "*", "@tidbcloud/serverless": "*", "@types/better-sqlite3": "*", "@types/pg": "*", "@types/sql.js": "*", "@upstash/redis": ">=1.34.7", "@vercel/postgres": ">=0.8.0", "@xata.io/client": "*", "better-sqlite3": ">=7", "bun-types": "*", "expo-sqlite": ">=14.0.0", "gel": ">=2", "knex": "*", "kysely": "*", "mysql2": ">=2", "pg": ">=8", "postgres": ">=3", "sql.js": ">=1", "sqlite3": ">=5" }, "optionalPeers": ["@aws-sdk/client-rds-data", "@cloudflare/workers-types", "@electric-sql/pglite", "@libsql/client", "@libsql/client-wasm", "@neondatabase/serverless", "@op-engineering/op-sqlite", "@opentelemetry/api", "@planetscale/database", "@prisma/client", "@tidbcloud/serverless", "@types/better-sqlite3", "@types/pg", "@types/sql.js", "@upstash/redis", "@vercel/postgres", "@xata.io/client", "better-sqlite3", "bun-types", "expo-sqlite", "gel", "knex", "kysely", "mysql2", "pg", "postgres", "sql.js", "sqlite3"] }, "sha512-quIpnYznjU9lHshEOAYLoZ9s3jweleHlZIAWR/jX9gAWNg/JhQ1wj0KGRf7/Zm+obRrYd9GjPVJg790QY9N5AQ=="], @@ -2031,7 +2031,7 @@ "dunder-proto": ["dunder-proto@1.0.1", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.1", "es-errors": "^1.3.0", "gopd": "^1.2.0" } }, "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A=="], - "e2b": ["e2b@2.12.0", "", { "dependencies": { "@bufbuild/protobuf": "^2.6.2", "@connectrpc/connect": "2.0.0-rc.3", "@connectrpc/connect-web": "2.0.0-rc.3", "chalk": "^5.3.0", "compare-versions": "^6.1.0", "dockerfile-ast": "^0.7.1", "glob": "^11.1.0", "openapi-fetch": "^0.14.1", "platform": "^1.3.6", "tar": "^7.5.4" } }, "sha512-uzMEg11JQ6o90ODBUgPaQXKJ3tQNiQMAYi5yU5jK60Y0l+CSs7U8qoQcgTiSCemkIEyrmIDFub/ega8dv5vMCw=="], + "e2b": ["e2b@2.12.1", "", { "dependencies": { "@bufbuild/protobuf": "^2.6.2", "@connectrpc/connect": "2.0.0-rc.3", "@connectrpc/connect-web": "2.0.0-rc.3", "chalk": "^5.3.0", "compare-versions": "^6.1.0", "dockerfile-ast": "^0.7.1", "glob": "^11.1.0", "openapi-fetch": "^0.14.1", "platform": "^1.3.6", "tar": "^7.5.4" } }, "sha512-qKYwS0VSZqvtWAT4OrCtOwRhhMlcd359zyFRGAZZ1wpYHHjr9zR872UCoDb/d5jFVUsREcUgktURc47XxfznPg=="], "eastasianwidth": ["eastasianwidth@0.2.0", "", {}, "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA=="], @@ -2041,7 +2041,7 @@ "effect": ["effect@3.18.4", "", { "dependencies": { "@standard-schema/spec": "^1.0.0", "fast-check": "^3.23.1" } }, "sha512-b1LXQJLe9D11wfnOKAk3PKxuqYshQ0Heez+y5pnkd3jLj1yx9QhM72zZ9uUrOQyNvrs2GZZd/3maL0ZV18YuDA=="], - "electron-to-chromium": ["electron-to-chromium@1.5.282", "", {}, "sha512-FCPkJtpst28UmFzd903iU7PdeVTfY0KAeJy+Lk0GLZRwgwYHn/irRcaCbQQOmr5Vytc/7rcavsYLvTM8RiHYhQ=="], + "electron-to-chromium": ["electron-to-chromium@1.5.286", "", {}, "sha512-9tfDXhJ4RKFNerfjdCcZfufu49vg620741MNs26a9+bhLThdB+plgMeou98CAaHu/WATj2iHOOHTp1hWtABj2A=="], "emoji-regex": ["emoji-regex@10.6.0", "", {}, "sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A=="], @@ -2063,7 +2063,7 @@ "engine.io-parser": ["engine.io-parser@5.2.3", "", {}, "sha512-HqD3yTBfnBxIrbnM1DoD6Pcq8NECnh8d4As1Qgh0z5Gg3jRRIqijury0CL3ghu/edArpUYiYqQiDUQBIs4np3Q=="], - "enhanced-resolve": ["enhanced-resolve@5.18.4", "", { "dependencies": { "graceful-fs": "^4.2.4", "tapable": "^2.2.0" } }, "sha512-LgQMM4WXU3QI+SYgEc2liRgznaD5ojbmY3sb8LxyguVkIg5FxdpTkvk72te2R38/TGKxH634oLxXRGY6d7AP+Q=="], + "enhanced-resolve": ["enhanced-resolve@5.19.0", "", { "dependencies": { "graceful-fs": "^4.2.4", "tapable": "^2.3.0" } }, "sha512-phv3E1Xl4tQOShqSte26C7Fl84EwUdZsyOuSSk9qtAGyyQs2s3jJzComh+Abf4g187lUUAvH+H26omrqia2aGg=="], "entities": ["entities@6.0.1", "", {}, "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g=="], @@ -2171,7 +2171,7 @@ "fast-safe-stringify": ["fast-safe-stringify@2.1.1", "", {}, "sha512-W+KJc2dmILlPplD/H4K9l9LcAHAfPtP6BY84uVLXQ6Evcz9Lcg33Y2z1IVblT6xdY54PXYVHEv+0Wpq8Io6zkA=="], - "fast-xml-parser": ["fast-xml-parser@5.3.3", "", { "dependencies": { "strnum": "^2.1.0" }, "bin": { "fxparser": "src/cli/cli.js" } }, "sha512-2O3dkPAAC6JavuMm8+4+pgTk+5hoAs+CjZ+sWcQLkX9+/tHRuTkQh/Oaifr8qDmZ8iEHb771Ea6G8CdwkrgvYA=="], + "fast-xml-parser": ["fast-xml-parser@5.3.5", "", { "dependencies": { "strnum": "^2.1.2" }, "bin": { "fxparser": "src/cli/cli.js" } }, "sha512-JeaA2Vm9ffQKp9VjvfzObuMCjUYAp5WDYhRYL5LrBPY/jUDlUtOvDfot0vKSkB9tuX885BDHjtw4fZadD95wnA=="], "fastq": ["fastq@1.20.1", "", { "dependencies": { "reusify": "^1.0.4" } }, "sha512-GGToxJ/w1x32s/D2EKND7kTil4n8OVk/9mycTc4VDza13lOvpUZTGX3mFSCtV9ksdGBVzvsyAVLM6mHFThxXxw=="], @@ -2215,7 +2215,7 @@ "fraction.js": ["fraction.js@4.3.7", "", {}, "sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew=="], - "framer-motion": ["framer-motion@12.29.2", "", { "dependencies": { "motion-dom": "^12.29.2", "motion-utils": "^12.29.2", "tslib": "^2.4.0" }, "peerDependencies": { "@emotion/is-prop-valid": "*", "react": "^18.0.0 || ^19.0.0", "react-dom": "^18.0.0 || ^19.0.0" }, "optionalPeers": ["@emotion/is-prop-valid", "react", "react-dom"] }, "sha512-lSNRzBJk4wuIy0emYQ/nfZ7eWhqud2umPKw2QAQki6uKhZPKm2hRQHeQoHTG9MIvfobb+A/LbEWPJU794ZUKrg=="], + "framer-motion": ["framer-motion@12.34.0", "", { "dependencies": { "motion-dom": "^12.34.0", "motion-utils": "^12.29.2", "tslib": "^2.4.0" }, "peerDependencies": { "@emotion/is-prop-valid": "*", "react": "^18.0.0 || ^19.0.0", "react-dom": "^18.0.0 || ^19.0.0" }, "optionalPeers": ["@emotion/is-prop-valid", "react", "react-dom"] }, "sha512-+/H49owhzkzQyxtn7nZeF4kdH++I2FWrESQ184Zbcw5cEqNHYkE5yxWxcTLSj5lNx3NWdbIRy5FHqUvetD8FWg=="], "fresh": ["fresh@2.0.0", "", {}, "sha512-Rx/WycZ60HOaqLKAi6cHRKKI7zxWbJ31MhntmtwMoaTeF7XFH9hhBp8vITaMidfljRQ6eYWCKkaTK+ykVJHP2A=="], @@ -2251,7 +2251,7 @@ "get-stream": ["get-stream@8.0.1", "", {}, "sha512-VaUJspBffn/LMCJVoMvSAdmscJyS1auj5Zulnn5UoYcY531UWmdwhRWkcGKnGU93m5HSXP9LP2usOryrBtQowA=="], - "get-tsconfig": ["get-tsconfig@4.13.0", "", { "dependencies": { "resolve-pkg-maps": "^1.0.0" } }, "sha512-1VKTZJCwBrvbd+Wn3AOgQP/2Av+TfTCOlE4AcRJE72W1ksZXbAx8PPBR9RzgTeSPzlPMHrbANMH3LbltH73wxQ=="], + "get-tsconfig": ["get-tsconfig@4.13.6", "", { "dependencies": { "resolve-pkg-maps": "^1.0.0" } }, "sha512-shZT/QMiSHc/YBLxxOkMtgSid5HFoauqCE3/exfsEcwg1WkeqjG+V40yBbBrsD+jW2HDXcs28xOfcbm2jI8Ddw=="], "get-uri": ["get-uri@6.0.5", "", { "dependencies": { "basic-ftp": "^5.0.2", "data-uri-to-buffer": "^6.0.2", "debug": "^4.3.4" } }, "sha512-b1O07XYq8eRuVzBNgJLstU6FYc1tS6wnMtF1I1D9lE8LxZSOGZ7LhxN54yPP6mGw5f2CkXY2BQUL9Fx41qvcIg=="], @@ -2435,7 +2435,7 @@ "istanbul-reports": ["istanbul-reports@3.2.0", "", { "dependencies": { "html-escaper": "^2.0.0", "istanbul-lib-report": "^3.0.0" } }, "sha512-HGYWWS/ehqTV3xN10i23tkPkpH46MLCIMFNCaaKNavAXTF1RkqxawEPtnjnGZ6XKSInBKkiOA5BKS+aZiY3AvA=="], - "jackspeak": ["jackspeak@4.1.1", "", { "dependencies": { "@isaacs/cliui": "^8.0.2" } }, "sha512-zptv57P3GpL+O0I7VdMJNBZCu+BPHVQUk55Ft8/QCJjTVxrnJHuVuX/0Bl2A6/+2oyR/ZMEuFKwmzqqZ/U5nPQ=="], + "jackspeak": ["jackspeak@4.2.3", "", { "dependencies": { "@isaacs/cliui": "^9.0.0" } }, "sha512-ykkVRwrYvFm1nb2AJfKKYPr0emF6IiXDYUaFx4Zn9ZuIH7MrzEZ3sD5RlqGXNRpHtvUHJyOnCEFxOlNDtGo7wg=="], "jaeger-client": ["jaeger-client@3.19.0", "", { "dependencies": { "node-int64": "^0.4.0", "opentracing": "^0.14.4", "thriftrw": "^3.5.0", "uuid": "^8.3.2", "xorshift": "^1.1.1" } }, "sha512-M0c7cKHmdyEUtjemnJyx/y9uX16XHocL46yQvyqDlPdvAcwPDbHrIbKjQdBqtiE4apQ/9dmr+ZLJYYPGnurgpw=="], @@ -2447,7 +2447,7 @@ "js-tiktoken": ["js-tiktoken@1.0.21", "", { "dependencies": { "base64-js": "^1.5.1" } }, "sha512-biOj/6M5qdgx5TKjDnFT1ymSpM5tbd3ylwDtrQvFQSu0Z7bBYko2dF+W/aUkXUPuk6IVpRxk/3Q2sHOzGlS36g=="], - "js-tokens": ["js-tokens@9.0.1", "", {}, "sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ=="], + "js-tokens": ["js-tokens@10.0.0", "", {}, "sha512-lM/UBzQmfJRo9ABXbPWemivdCW8V2G8FHaHdypQaIy523snUjog0W71ayWXTjiR+ixeMyVHN2XcpnTd/liPg/Q=="], "js-yaml": ["js-yaml@4.1.0", "", { "dependencies": { "argparse": "^2.0.1" }, "bin": { "js-yaml": "bin/js-yaml.js" } }, "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA=="], @@ -2479,7 +2479,7 @@ "kleur": ["kleur@3.0.3", "", {}, "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w=="], - "kysely": ["kysely@0.28.10", "", {}, "sha512-ksNxfzIW77OcZ+QWSAPC7yDqUSaIVwkTWnTPNiIy//vifNbwsSgQ57OkkncHxxpcBHM3LRfLAZVEh7kjq5twVA=="], + "kysely": ["kysely@0.28.11", "", {}, "sha512-zpGIFg0HuoC893rIjYX1BETkVWdDnzTzF5e0kWXJFg5lE0k1/LfNWBejrcnOFu8Q2Rfq/hTDTU7XLUM8QOrpzg=="], "langsmith": ["langsmith@0.3.87", "", { "dependencies": { "@types/uuid": "^10.0.0", "chalk": "^4.1.2", "console-table-printer": "^2.12.1", "p-queue": "^6.6.2", "semver": "^7.6.3", "uuid": "^10.0.0" }, "peerDependencies": { "@opentelemetry/api": "*", "@opentelemetry/exporter-trace-otlp-proto": "*", "@opentelemetry/sdk-trace-base": "*", "openai": "*" }, "optionalPeers": ["@opentelemetry/api", "@opentelemetry/exporter-trace-otlp-proto", "@opentelemetry/sdk-trace-base", "openai"] }, "sha512-XXR1+9INH8YX96FKWc5tie0QixWz6tOqAsAKfcJyPkE0xPep+NDz0IQLR32q4bn10QK3LqD2HN6T3n6z1YLW7Q=="], @@ -2567,7 +2567,7 @@ "lru-cache": ["lru-cache@11.2.5", "", {}, "sha512-vFrFJkWtJvJnD5hg+hJvVE8Lh/TcMzKnTgCWmtBipwI5yLX/iX+5UB2tfuyODF5E7k9xEzMdYgGqaSb1c0c5Yw=="], - "lru.min": ["lru.min@1.1.3", "", {}, "sha512-Lkk/vx6ak3rYkRR0Nhu4lFUT2VDnQSxBe8Hbl7f36358p6ow8Bnvr8lrLt98H8J1aGxfhbX4Fs5tYg2+FTwr5Q=="], + "lru.min": ["lru.min@1.1.4", "", {}, "sha512-DqC6n3QQ77zdFpCMASA1a3Jlb64Hv2N2DciFGkO/4L9+q/IpIAuRlKOvCXabtRW6cQf8usbmM6BE/TOPysCdIA=="], "lucide-react": ["lucide-react@0.511.0", "", { "peerDependencies": { "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0" } }, "sha512-VK5a2ydJ7xm8GvBeKLS9mu1pVK6ucef9780JVUjw6bAjJL/QXnd4Y0p7SPeOUMC27YhzNCZvm5d/QX0Tp3rc0w=="], @@ -2727,7 +2727,7 @@ "minimal-polyfills": ["minimal-polyfills@2.2.3", "", {}, "sha512-oxdmJ9cL+xV72h0xYxp4tP2d5/fTBpP45H8DIOn9pASuF8a3IYTf+25fMGDYGiWW+MFsuog6KD6nfmhZJQ+uUw=="], - "minimatch": ["minimatch@10.1.1", "", { "dependencies": { "@isaacs/brace-expansion": "^5.0.0" } }, "sha512-enIvLvRAFZYXJzkCYG5RKmPfrFArdLv+R+lbQ53BmIMLIry74bjKzX6iHAm8WYamJkhSSEabrWN5D97XnKObjQ=="], + "minimatch": ["minimatch@10.1.2", "", { "dependencies": { "@isaacs/brace-expansion": "^5.0.1" } }, "sha512-fu656aJ0n2kcXwsnwnv9g24tkU5uSmOlTjd6WyyaKm2Z+h1qmY6bAjrcaIxF/BslFqbZ8UBtbJi7KgQOZD2PTw=="], "minimist": ["minimist@1.2.8", "", {}, "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA=="], @@ -2749,7 +2749,7 @@ "mongodb-connection-string-url": ["mongodb-connection-string-url@3.0.2", "", { "dependencies": { "@types/whatwg-url": "^11.0.2", "whatwg-url": "^14.1.0 || ^13.0.0" } }, "sha512-rMO7CGo/9BFwyZABcKAWL8UJwH/Kc2x0g72uhDWzG48URRax5TCIcJ7Rc3RZqffZzO/Gwff/jyKwCU9TN8gehA=="], - "motion-dom": ["motion-dom@12.29.2", "", { "dependencies": { "motion-utils": "^12.29.2" } }, "sha512-/k+NuycVV8pykxyiTCoFzIVLA95Nb1BFIVvfSu9L50/6K6qNeAYtkxXILy/LRutt7AzaYDc2myj0wkCVVYAPPA=="], + "motion-dom": ["motion-dom@12.34.0", "", { "dependencies": { "motion-utils": "^12.29.2" } }, "sha512-Lql3NuEcScRDxTAO6GgUsRHBZOWI/3fnMlkMcH5NftzcN37zJta+bpbMAV9px4Nj057TuvRooMK7QrzMCgtz6Q=="], "motion-utils": ["motion-utils@12.29.2", "", {}, "sha512-G3kc34H2cX2gI63RqU+cZq+zWRRPSsNIOjpdl9TN4AQwC4sgwYPl/Q/Obf/d53nOm569T0fYK+tcoSV50BWx8A=="], @@ -2809,6 +2809,8 @@ "node-int64": ["node-int64@0.4.0", "", {}, "sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw=="], + "node-readable-to-web-readable-stream": ["node-readable-to-web-readable-stream@0.4.2", "", {}, "sha512-/cMZNI34v//jUTrI+UIo4ieHAB5EZRY/+7OmXZgBxaWBMcW2tGdceIw06RFxWxrKZ5Jp3sI2i5TsRo+CBhtVLQ=="], + "node-releases": ["node-releases@2.0.27", "", {}, "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA=="], "node-rsa": ["node-rsa@1.1.1", "", { "dependencies": { "asn1": "^0.2.4" } }, "sha512-Jd4cvbJMryN21r5HgxQOpMEqv+ooke/korixNNK3mGqfGJmy0M77WDDzo/05969+OkMy3XW1UuZsSmW9KQm7Fw=="], @@ -2925,7 +2927,7 @@ "pdf-lib": ["pdf-lib@1.17.1", "", { "dependencies": { "@pdf-lib/standard-fonts": "^1.0.0", "@pdf-lib/upng": "^1.0.1", "pako": "^1.0.11", "tslib": "^1.11.1" } }, "sha512-V/mpyJAoTsN4cnP31vc0wfNA1+p20evqqnap0KLoRUN0Yk/p3wN52DOEsL4oBFcLdb76hlpKPtzJIgo67j/XLw=="], - "pdfjs-dist": ["pdfjs-dist@5.4.530", "", { "optionalDependencies": { "@napi-rs/canvas": "^0.1.84" } }, "sha512-r1hWsSIGGmyYUAHR26zSXkxYWLXLMd6AwqcaFYG9YUZ0GBf5GvcjJSeo512tabM4GYFhxhl5pMCmPr7Q72Rq2Q=="], + "pdfjs-dist": ["pdfjs-dist@5.4.624", "", { "optionalDependencies": { "@napi-rs/canvas": "^0.1.88", "node-readable-to-web-readable-stream": "^0.4.2" } }, "sha512-sm6TxKTtWv1Oh6n3C6J6a8odejb5uO4A4zo/2dgkHuC0iu8ZMAXOezEODkVaoVp8nX1Xzr+0WxFJJmUr45hQzg=="], "peberminta": ["peberminta@0.9.0", "", {}, "sha512-XIxfHpEuSJbITd1H3EeQwpcZbTLHc+VVr8ANI9t5sit565tsI4/xK3KWTUFE2e6QiangUkh3B0jihzmGnNrRsQ=="], @@ -2959,9 +2961,9 @@ "platform": ["platform@1.3.6", "", {}, "sha512-fnWVljUchTro6RiCFvCXBbNhJc2NijN7oIQxbwsyL0buWJPG85v81ehlHI9fXrJsMNgTofEoWIQeClKpgxFLrg=="], - "playwright": ["playwright@1.58.0", "", { "dependencies": { "playwright-core": "1.58.0" }, "optionalDependencies": { "fsevents": "2.3.2" }, "bin": { "playwright": "cli.js" } }, "sha512-2SVA0sbPktiIY/MCOPX8e86ehA/e+tDNq+e5Y8qjKYti2Z/JG7xnronT/TXTIkKbYGWlCbuucZ6dziEgkoEjQQ=="], + "playwright": ["playwright@1.58.2", "", { "dependencies": { "playwright-core": "1.58.2" }, "optionalDependencies": { "fsevents": "2.3.2" }, "bin": { "playwright": "cli.js" } }, "sha512-vA30H8Nvkq/cPBnNw4Q8TWz1EJyqgpuinBcHET0YVJVFldr8JDNiU9LaWAE1KqSkRYazuaBhTpB5ZzShOezQ6A=="], - "playwright-core": ["playwright-core@1.58.0", "", { "bin": { "playwright-core": "cli.js" } }, "sha512-aaoB1RWrdNi3//rOeKuMiS65UCcgOVljU46At6eFcOFPFHWtd2weHRRow6z/n+Lec0Lvu0k9ZPKJSjPugikirw=="], + "playwright-core": ["playwright-core@1.58.2", "", { "bin": { "playwright-core": "cli.js" } }, "sha512-yZkEtftgwS8CsfYo7nm0KE8jsvm6i/PTgVtB8DL726wNf6H2IMsDuxCpJj59KDaxCtSnrWan2AeDqM7JBaultg=="], "postcss": ["postcss@8.5.6", "", { "dependencies": { "nanoid": "^3.3.11", "picocolors": "^1.1.1", "source-map-js": "^1.2.1" } }, "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg=="], @@ -2985,7 +2987,7 @@ "posthog-node": ["posthog-node@5.9.2", "", { "dependencies": { "@posthog/core": "1.2.2" } }, "sha512-oU7FbFcH5cn40nhP04cBeT67zE76EiGWjKKzDvm6IOm5P83sqM0Ij0wMJQSHp+QI6ZN7MLzb+4xfMPUEZ4q6CA=="], - "preact": ["preact@10.28.2", "", {}, "sha512-lbteaWGzGHdlIuiJ0l2Jq454m6kcpI1zNje6d8MlGAFlYvP2GO4ibnat7P74Esfz4sPTdM6UxtTwh/d3pwM9JA=="], + "preact": ["preact@10.28.3", "", {}, "sha512-tCmoRkPQLpBeWzpmbhryairGnhW9tKV6c6gr/w+RhoRoKEJwsjzipwp//1oCpGPOchvSLaAPlpcJi9MwMmoPyA=="], "prebuild-install": ["prebuild-install@7.1.3", "", { "dependencies": { "detect-libc": "^2.0.0", "expand-template": "^2.0.3", "github-from-package": "0.0.0", "minimist": "^1.2.3", "mkdirp-classic": "^0.5.3", "napi-build-utils": "^2.0.0", "node-abi": "^3.3.0", "pump": "^3.0.0", "rc": "^1.2.7", "simple-get": "^4.0.0", "tar-fs": "^2.0.0", "tunnel-agent": "^0.6.0" }, "bin": { "prebuild-install": "bin.js" } }, "sha512-8Mf2cbV7x1cXPUILADGI3wuhfqWvtiLA1iclTDbFRZkgRQS0NqsPZphna9V+HyTEadheuPmjaJMsbzKQFOzLug=="], @@ -3145,7 +3147,7 @@ "rimraf": ["rimraf@5.0.10", "", { "dependencies": { "glob": "^10.3.7" }, "bin": { "rimraf": "dist/esm/bin.mjs" } }, "sha512-l0OE8wL34P4nJH/H2ffoaniAokM2qSmrtXHmlpvYr5AVVX8msAyW0l8NVJFDxlSK4u3Uh/f41cQheDVdnYijwQ=="], - "rollup": ["rollup@4.57.0", "", { "dependencies": { "@types/estree": "1.0.8" }, "optionalDependencies": { "@rollup/rollup-android-arm-eabi": "4.57.0", "@rollup/rollup-android-arm64": "4.57.0", "@rollup/rollup-darwin-arm64": "4.57.0", "@rollup/rollup-darwin-x64": "4.57.0", "@rollup/rollup-freebsd-arm64": "4.57.0", "@rollup/rollup-freebsd-x64": "4.57.0", "@rollup/rollup-linux-arm-gnueabihf": "4.57.0", "@rollup/rollup-linux-arm-musleabihf": "4.57.0", "@rollup/rollup-linux-arm64-gnu": "4.57.0", "@rollup/rollup-linux-arm64-musl": "4.57.0", "@rollup/rollup-linux-loong64-gnu": "4.57.0", "@rollup/rollup-linux-loong64-musl": "4.57.0", "@rollup/rollup-linux-ppc64-gnu": "4.57.0", "@rollup/rollup-linux-ppc64-musl": "4.57.0", "@rollup/rollup-linux-riscv64-gnu": "4.57.0", "@rollup/rollup-linux-riscv64-musl": "4.57.0", "@rollup/rollup-linux-s390x-gnu": "4.57.0", "@rollup/rollup-linux-x64-gnu": "4.57.0", "@rollup/rollup-linux-x64-musl": "4.57.0", "@rollup/rollup-openbsd-x64": "4.57.0", "@rollup/rollup-openharmony-arm64": "4.57.0", "@rollup/rollup-win32-arm64-msvc": "4.57.0", "@rollup/rollup-win32-ia32-msvc": "4.57.0", "@rollup/rollup-win32-x64-gnu": "4.57.0", "@rollup/rollup-win32-x64-msvc": "4.57.0", "fsevents": "~2.3.2" }, "bin": { "rollup": "dist/bin/rollup" } }, "sha512-e5lPJi/aui4TO1LpAXIRLySmwXSE8k3b9zoGfd42p67wzxog4WHjiZF3M2uheQih4DGyc25QEV4yRBbpueNiUA=="], + "rollup": ["rollup@4.57.1", "", { "dependencies": { "@types/estree": "1.0.8" }, "optionalDependencies": { "@rollup/rollup-android-arm-eabi": "4.57.1", "@rollup/rollup-android-arm64": "4.57.1", "@rollup/rollup-darwin-arm64": "4.57.1", "@rollup/rollup-darwin-x64": "4.57.1", "@rollup/rollup-freebsd-arm64": "4.57.1", "@rollup/rollup-freebsd-x64": "4.57.1", "@rollup/rollup-linux-arm-gnueabihf": "4.57.1", "@rollup/rollup-linux-arm-musleabihf": "4.57.1", "@rollup/rollup-linux-arm64-gnu": "4.57.1", "@rollup/rollup-linux-arm64-musl": "4.57.1", "@rollup/rollup-linux-loong64-gnu": "4.57.1", "@rollup/rollup-linux-loong64-musl": "4.57.1", "@rollup/rollup-linux-ppc64-gnu": "4.57.1", "@rollup/rollup-linux-ppc64-musl": "4.57.1", "@rollup/rollup-linux-riscv64-gnu": "4.57.1", "@rollup/rollup-linux-riscv64-musl": "4.57.1", "@rollup/rollup-linux-s390x-gnu": "4.57.1", "@rollup/rollup-linux-x64-gnu": "4.57.1", "@rollup/rollup-linux-x64-musl": "4.57.1", "@rollup/rollup-openbsd-x64": "4.57.1", "@rollup/rollup-openharmony-arm64": "4.57.1", "@rollup/rollup-win32-arm64-msvc": "4.57.1", "@rollup/rollup-win32-ia32-msvc": "4.57.1", "@rollup/rollup-win32-x64-gnu": "4.57.1", "@rollup/rollup-win32-x64-msvc": "4.57.1", "fsevents": "~2.3.2" }, "bin": { "rollup": "dist/bin/rollup" } }, "sha512-oQL6lgK3e2QZeQ7gcgIkS2YZPg5slw37hYufJ3edKlfQSGGm8ICoxswK15ntSzF/a8+h7ekRy7k7oWc3BQ7y8A=="], "rou3": ["rou3@0.5.1", "", {}, "sha512-OXMmJ3zRk2xeXFGfA3K+EOPHC5u7RDFG7lIOx0X1pdnhUkI8MdVrbV+sNsD80ElpUZ+MRHdyxPnFthq9VHs8uQ=="], @@ -3189,7 +3191,7 @@ "selderee": ["selderee@0.11.0", "", { "dependencies": { "parseley": "^0.12.0" } }, "sha512-5TF+l7p4+OsnP8BCCvSyZiSPc4x4//p5uPwK8TCnVPJYRmU2aYKMpOXvw8zM5a5JvuuCGN1jmsMwuU2W02ukfA=="], - "semver": ["semver@7.7.3", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q=="], + "semver": ["semver@7.7.4", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA=="], "send": ["send@1.2.1", "", { "dependencies": { "debug": "^4.4.3", "encodeurl": "^2.0.0", "escape-html": "^1.0.3", "etag": "^1.8.1", "fresh": "^2.0.0", "http-errors": "^2.0.1", "mime-types": "^3.0.2", "ms": "^2.1.3", "on-finished": "^2.4.1", "range-parser": "^1.2.1", "statuses": "^2.0.2" } }, "sha512-1gnZf7DFcoIcajTjTwjwuDjzuz4PPcY2StKPlsGAQ1+YH20IRVrBaXSWmdjowTJ6u8Rc01PoYOGHXfP1mYcZNQ=="], @@ -3211,7 +3213,7 @@ "shell-quote": ["shell-quote@1.8.3", "", {}, "sha512-ObmnIF4hXNg1BqhnHmgbDETF8dLPCggZWBjkQfhZpbszZnYur5DUljTcCHii5LC3J5E0yeO/1LIMyH+UvHQgyw=="], - "shiki": ["shiki@3.21.0", "", { "dependencies": { "@shikijs/core": "3.21.0", "@shikijs/engine-javascript": "3.21.0", "@shikijs/engine-oniguruma": "3.21.0", "@shikijs/langs": "3.21.0", "@shikijs/themes": "3.21.0", "@shikijs/types": "3.21.0", "@shikijs/vscode-textmate": "^10.0.2", "@types/hast": "^3.0.4" } }, "sha512-N65B/3bqL/TI2crrXr+4UivctrAGEjmsib5rPMMPpFp1xAx/w03v8WZ9RDDFYteXoEgY7qZ4HGgl5KBIu1153w=="], + "shiki": ["shiki@3.22.0", "", { "dependencies": { "@shikijs/core": "3.22.0", "@shikijs/engine-javascript": "3.22.0", "@shikijs/engine-oniguruma": "3.22.0", "@shikijs/langs": "3.22.0", "@shikijs/themes": "3.22.0", "@shikijs/types": "3.22.0", "@shikijs/vscode-textmate": "^10.0.2", "@types/hast": "^3.0.4" } }, "sha512-LBnhsoYEe0Eou4e1VgJACes+O6S6QC0w71fCSp5Oya79inkwkm15gQ1UF6VtQ8j/taMDh79hAB49WUk8ALQW3g=="], "shimmer": ["shimmer@1.2.1", "", {}, "sha512-sQTKC1Re/rM6XyFM6fIAGHRPVGvyXfgzIDvzoq608vM+jeyVD0Tu1E6Np0Kc2zAIFWIj963V2800iF/9LPieQw=="], @@ -3467,7 +3469,7 @@ "underscore": ["underscore@1.13.7", "", {}, "sha512-GMXzWtsc57XAtguZgaQViUOzs0KTkk8ojr3/xAxXLITqf/3EMwxC0inyETfDFjH/Krbhuep0HNbbjI9i/q3F3g=="], - "undici": ["undici@7.19.2", "", {}, "sha512-4VQSpGEGsWzk0VYxyB/wVX/Q7qf9t5znLRgs0dzszr9w9Fej/8RVNQ+S20vdXSAyra/bJ7ZQfGv6ZMj7UEbzSg=="], + "undici": ["undici@7.21.0", "", {}, "sha512-Hn2tCQpoDt1wv23a68Ctc8Cr/BHpUSfaPYrkajTXOS9IKpxVRx/X5m1K2YkbK2ipgZgxXSgsUinl3x+2YdSSfg=="], "undici-types": ["undici-types@6.21.0", "", {}, "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ=="], @@ -3639,49 +3641,49 @@ "@aws-crypto/util/@smithy/util-utf8": ["@smithy/util-utf8@2.3.0", "", { "dependencies": { "@smithy/util-buffer-from": "^2.2.0", "tslib": "^2.6.2" } }, "sha512-R8Rdn8Hy72KKcebgLiv8jQcQkXoLMOGGv5uI1/k0l+snqkOzQ1R0ChUBCxWMlBsFMekWjq0wRudIweFs7sKT5A=="], - "@aws-sdk/client-s3/@aws-sdk/core": ["@aws-sdk/core@3.973.4", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@aws-sdk/xml-builder": "^3.972.2", "@smithy/core": "^3.22.0", "@smithy/node-config-provider": "^4.3.8", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/signature-v4": "^5.3.8", "@smithy/smithy-client": "^4.11.1", "@smithy/types": "^4.12.0", "@smithy/util-base64": "^4.3.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-8Rk+kPP74YiR47x54bxYlKZswsaSh0a4XvvRUMLvyS/koNawhsGu/+qSZxREqUeTO+GkKpFvSQIsAZR+deUP+g=="], + "@aws-sdk/client-s3/@aws-sdk/core": ["@aws-sdk/core@3.973.7", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@aws-sdk/xml-builder": "^3.972.4", "@smithy/core": "^3.22.1", "@smithy/node-config-provider": "^4.3.8", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/signature-v4": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/util-base64": "^4.3.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-wNZZQQNlJ+hzD49cKdo+PY6rsTDElO8yDImnrI69p2PLBa7QomeUKAJWYp9xnaR38nlHqWhMHZuYLCQ3oSX+xg=="], - "@aws-sdk/client-s3/@aws-sdk/credential-provider-node": ["@aws-sdk/credential-provider-node@3.972.3", "", { "dependencies": { "@aws-sdk/credential-provider-env": "^3.972.2", "@aws-sdk/credential-provider-http": "^3.972.4", "@aws-sdk/credential-provider-ini": "^3.972.2", "@aws-sdk/credential-provider-process": "^3.972.2", "@aws-sdk/credential-provider-sso": "^3.972.2", "@aws-sdk/credential-provider-web-identity": "^3.972.2", "@aws-sdk/types": "^3.973.1", "@smithy/credential-provider-imds": "^4.2.8", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-iu+JwWHM7tHowKqE+8wNmI3sM6mPEiI9Egscz2BEV7adyKmV95oR9tBO4VIOl72FGDi7X9mXg19VtqIpSkEEsA=="], + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node": ["@aws-sdk/credential-provider-node@3.972.6", "", { "dependencies": { "@aws-sdk/credential-provider-env": "^3.972.5", "@aws-sdk/credential-provider-http": "^3.972.7", "@aws-sdk/credential-provider-ini": "^3.972.5", "@aws-sdk/credential-provider-process": "^3.972.5", "@aws-sdk/credential-provider-sso": "^3.972.5", "@aws-sdk/credential-provider-web-identity": "^3.972.5", "@aws-sdk/types": "^3.973.1", "@smithy/credential-provider-imds": "^4.2.8", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-DZ3CnAAtSVtVz+G+ogqecaErMLgzph4JH5nYbHoBMgBkwTUV+SUcjsjOJwdBJTHu3Dm6l5LBYekZoU2nDqQk2A=="], - "@aws-sdk/client-s3/@aws-sdk/middleware-host-header": ["@aws-sdk/middleware-host-header@3.972.2", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-42hZ8jEXT2uR6YybCzNq9OomqHPw43YIfRfz17biZjMQA4jKSQUaHIl6VvqO2Ddl5904pXg2Yd/ku78S0Ikgog=="], + "@aws-sdk/client-s3/@aws-sdk/middleware-host-header": ["@aws-sdk/middleware-host-header@3.972.3", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-aknPTb2M+G3s+0qLCx4Li/qGZH8IIYjugHMv15JTYMe6mgZO8VBpYgeGYsNMGCqCZOcWzuf900jFBG5bopfzmA=="], - "@aws-sdk/client-s3/@aws-sdk/middleware-logger": ["@aws-sdk/middleware-logger@3.972.2", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-iUzdXKOgi4JVDDEG/VvoNw50FryRCEm0qAudw12DcZoiNJWl0rN6SYVLcL1xwugMfQncCXieK5UBlG6mhH7iYA=="], + "@aws-sdk/client-s3/@aws-sdk/middleware-logger": ["@aws-sdk/middleware-logger@3.972.3", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-Ftg09xNNRqaz9QNzlfdQWfpqMCJbsQdnZVJP55jfhbKi1+FTWxGuvfPoBhDHIovqWKjqbuiew3HuhxbJ0+OjgA=="], - "@aws-sdk/client-s3/@aws-sdk/middleware-recursion-detection": ["@aws-sdk/middleware-recursion-detection@3.972.2", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@aws/lambda-invoke-store": "^0.2.2", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-/mzlyzJDtngNFd/rAYvqx29a2d0VuiYKN84Y/Mu9mGw7cfMOCyRK+896tb9wV6MoPRHUX7IXuKCIL8nzz2Pz5A=="], + "@aws-sdk/client-s3/@aws-sdk/middleware-recursion-detection": ["@aws-sdk/middleware-recursion-detection@3.972.3", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@aws/lambda-invoke-store": "^0.2.2", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-PY57QhzNuXHnwbJgbWYTrqIDHYSeOlhfYERTAuc16LKZpTZRJUjzBFokp9hF7u1fuGeE3D70ERXzdbMBOqQz7Q=="], - "@aws-sdk/client-s3/@aws-sdk/middleware-user-agent": ["@aws-sdk/middleware-user-agent@3.972.4", "", { "dependencies": { "@aws-sdk/core": "^3.973.4", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.972.0", "@smithy/core": "^3.22.0", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-6sU8jrSJvY/lqSnU6IYsa8SrCKwOZ4Enl6O4xVJo8RCq9Bdr5Giuw2eUaJAk9GPcpr4OFcmSFv3JOLhpKGeRZA=="], + "@aws-sdk/client-s3/@aws-sdk/middleware-user-agent": ["@aws-sdk/middleware-user-agent@3.972.7", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.985.0", "@smithy/core": "^3.22.1", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-HUD+geASjXSCyL/DHPQc/Ua7JhldTcIglVAoCV8kiVm99IaFSlAbTvEnyhZwdE6bdFyTL+uIaWLaCFSRsglZBQ=="], - "@aws-sdk/client-s3/@aws-sdk/region-config-resolver": ["@aws-sdk/region-config-resolver@3.972.2", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/config-resolver": "^4.4.6", "@smithy/node-config-provider": "^4.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-/7vRBsfmiOlg2X67EdKrzzQGw5/SbkXb7ALHQmlQLkZh8qNgvS2G2dDC6NtF3hzFlpP3j2k+KIEtql/6VrI6JA=="], + "@aws-sdk/client-s3/@aws-sdk/region-config-resolver": ["@aws-sdk/region-config-resolver@3.972.3", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/config-resolver": "^4.4.6", "@smithy/node-config-provider": "^4.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-v4J8qYAWfOMcZ4MJUyatntOicTzEMaU7j3OpkRCGGFSL2NgXQ5VbxauIyORA+pxdKZ0qQG2tCQjQjZDlXEC3Ow=="], "@aws-sdk/client-s3/@aws-sdk/types": ["@aws-sdk/types@3.973.1", "", { "dependencies": { "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-DwHBiMNOB468JiX6+i34c+THsKHErYUdNQ3HexeXZvVn4zouLjgaS4FejiGSi2HyBuzuyHg7SuOPmjSvoU9NRg=="], - "@aws-sdk/client-s3/@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.972.0", "", { "dependencies": { "@aws-sdk/types": "3.972.0", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-endpoints": "^3.2.8", "tslib": "^2.6.2" } }, "sha512-6JHsl1V/a1ZW8D8AFfd4R52fwZPnZ5H4U6DS8m/bWT8qad72NvbOFAC7U2cDtFs2TShqUO3TEiX/EJibtY3ijg=="], + "@aws-sdk/client-s3/@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.985.0", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-endpoints": "^3.2.8", "tslib": "^2.6.2" } }, "sha512-vth7UfGSUR3ljvaq8V4Rc62FsM7GUTH/myxPWkaEgOrprz1/Pc72EgTXxj+cPPPDAfHFIpjhkB7T7Td0RJx+BA=="], - "@aws-sdk/client-s3/@aws-sdk/util-user-agent-browser": ["@aws-sdk/util-user-agent-browser@3.972.2", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "bowser": "^2.11.0", "tslib": "^2.6.2" } }, "sha512-gz76bUyebPZRxIsBHJUd/v+yiyFzm9adHbr8NykP2nm+z/rFyvQneOHajrUejtmnc5tTBeaDPL4X25TnagRk4A=="], + "@aws-sdk/client-s3/@aws-sdk/util-user-agent-browser": ["@aws-sdk/util-user-agent-browser@3.972.3", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "bowser": "^2.11.0", "tslib": "^2.6.2" } }, "sha512-JurOwkRUcXD/5MTDBcqdyQ9eVedtAsZgw5rBwktsPTN7QtPiS2Ld1jkJepNgYoCufz1Wcut9iup7GJDoIHp8Fw=="], - "@aws-sdk/client-s3/@aws-sdk/util-user-agent-node": ["@aws-sdk/util-user-agent-node@3.972.2", "", { "dependencies": { "@aws-sdk/middleware-user-agent": "^3.972.3", "@aws-sdk/types": "^3.973.1", "@smithy/node-config-provider": "^4.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" }, "peerDependencies": { "aws-crt": ">=1.0.0" }, "optionalPeers": ["aws-crt"] }, "sha512-vnxOc4C6AR7hVbwyFo1YuH0GB6dgJlWt8nIOOJpnzJAWJPkUMPJ9Zv2lnKsSU7TTZbhP2hEO8OZ4PYH59XFv8Q=="], + "@aws-sdk/client-s3/@aws-sdk/util-user-agent-node": ["@aws-sdk/util-user-agent-node@3.972.5", "", { "dependencies": { "@aws-sdk/middleware-user-agent": "^3.972.7", "@aws-sdk/types": "^3.973.1", "@smithy/node-config-provider": "^4.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" }, "peerDependencies": { "aws-crt": ">=1.0.0" }, "optionalPeers": ["aws-crt"] }, "sha512-GsUDF+rXyxDZkkJxUsDxnA67FG+kc5W1dnloCFLl6fWzceevsCYzJpASBzT+BPjwUgREE6FngfJYYYMQUY5fZQ=="], - "@aws-sdk/client-sesv2/@aws-sdk/core": ["@aws-sdk/core@3.973.4", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@aws-sdk/xml-builder": "^3.972.2", "@smithy/core": "^3.22.0", "@smithy/node-config-provider": "^4.3.8", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/signature-v4": "^5.3.8", "@smithy/smithy-client": "^4.11.1", "@smithy/types": "^4.12.0", "@smithy/util-base64": "^4.3.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-8Rk+kPP74YiR47x54bxYlKZswsaSh0a4XvvRUMLvyS/koNawhsGu/+qSZxREqUeTO+GkKpFvSQIsAZR+deUP+g=="], + "@aws-sdk/client-sesv2/@aws-sdk/core": ["@aws-sdk/core@3.973.7", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@aws-sdk/xml-builder": "^3.972.4", "@smithy/core": "^3.22.1", "@smithy/node-config-provider": "^4.3.8", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/signature-v4": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/util-base64": "^4.3.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-wNZZQQNlJ+hzD49cKdo+PY6rsTDElO8yDImnrI69p2PLBa7QomeUKAJWYp9xnaR38nlHqWhMHZuYLCQ3oSX+xg=="], - "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node": ["@aws-sdk/credential-provider-node@3.972.3", "", { "dependencies": { "@aws-sdk/credential-provider-env": "^3.972.2", "@aws-sdk/credential-provider-http": "^3.972.4", "@aws-sdk/credential-provider-ini": "^3.972.2", "@aws-sdk/credential-provider-process": "^3.972.2", "@aws-sdk/credential-provider-sso": "^3.972.2", "@aws-sdk/credential-provider-web-identity": "^3.972.2", "@aws-sdk/types": "^3.973.1", "@smithy/credential-provider-imds": "^4.2.8", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-iu+JwWHM7tHowKqE+8wNmI3sM6mPEiI9Egscz2BEV7adyKmV95oR9tBO4VIOl72FGDi7X9mXg19VtqIpSkEEsA=="], + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node": ["@aws-sdk/credential-provider-node@3.972.6", "", { "dependencies": { "@aws-sdk/credential-provider-env": "^3.972.5", "@aws-sdk/credential-provider-http": "^3.972.7", "@aws-sdk/credential-provider-ini": "^3.972.5", "@aws-sdk/credential-provider-process": "^3.972.5", "@aws-sdk/credential-provider-sso": "^3.972.5", "@aws-sdk/credential-provider-web-identity": "^3.972.5", "@aws-sdk/types": "^3.973.1", "@smithy/credential-provider-imds": "^4.2.8", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-DZ3CnAAtSVtVz+G+ogqecaErMLgzph4JH5nYbHoBMgBkwTUV+SUcjsjOJwdBJTHu3Dm6l5LBYekZoU2nDqQk2A=="], - "@aws-sdk/client-sesv2/@aws-sdk/middleware-host-header": ["@aws-sdk/middleware-host-header@3.972.2", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-42hZ8jEXT2uR6YybCzNq9OomqHPw43YIfRfz17biZjMQA4jKSQUaHIl6VvqO2Ddl5904pXg2Yd/ku78S0Ikgog=="], + "@aws-sdk/client-sesv2/@aws-sdk/middleware-host-header": ["@aws-sdk/middleware-host-header@3.972.3", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-aknPTb2M+G3s+0qLCx4Li/qGZH8IIYjugHMv15JTYMe6mgZO8VBpYgeGYsNMGCqCZOcWzuf900jFBG5bopfzmA=="], - "@aws-sdk/client-sesv2/@aws-sdk/middleware-logger": ["@aws-sdk/middleware-logger@3.972.2", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-iUzdXKOgi4JVDDEG/VvoNw50FryRCEm0qAudw12DcZoiNJWl0rN6SYVLcL1xwugMfQncCXieK5UBlG6mhH7iYA=="], + "@aws-sdk/client-sesv2/@aws-sdk/middleware-logger": ["@aws-sdk/middleware-logger@3.972.3", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-Ftg09xNNRqaz9QNzlfdQWfpqMCJbsQdnZVJP55jfhbKi1+FTWxGuvfPoBhDHIovqWKjqbuiew3HuhxbJ0+OjgA=="], - "@aws-sdk/client-sesv2/@aws-sdk/middleware-recursion-detection": ["@aws-sdk/middleware-recursion-detection@3.972.2", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@aws/lambda-invoke-store": "^0.2.2", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-/mzlyzJDtngNFd/rAYvqx29a2d0VuiYKN84Y/Mu9mGw7cfMOCyRK+896tb9wV6MoPRHUX7IXuKCIL8nzz2Pz5A=="], + "@aws-sdk/client-sesv2/@aws-sdk/middleware-recursion-detection": ["@aws-sdk/middleware-recursion-detection@3.972.3", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@aws/lambda-invoke-store": "^0.2.2", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-PY57QhzNuXHnwbJgbWYTrqIDHYSeOlhfYERTAuc16LKZpTZRJUjzBFokp9hF7u1fuGeE3D70ERXzdbMBOqQz7Q=="], - "@aws-sdk/client-sesv2/@aws-sdk/middleware-user-agent": ["@aws-sdk/middleware-user-agent@3.972.4", "", { "dependencies": { "@aws-sdk/core": "^3.973.4", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.972.0", "@smithy/core": "^3.22.0", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-6sU8jrSJvY/lqSnU6IYsa8SrCKwOZ4Enl6O4xVJo8RCq9Bdr5Giuw2eUaJAk9GPcpr4OFcmSFv3JOLhpKGeRZA=="], + "@aws-sdk/client-sesv2/@aws-sdk/middleware-user-agent": ["@aws-sdk/middleware-user-agent@3.972.7", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.985.0", "@smithy/core": "^3.22.1", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-HUD+geASjXSCyL/DHPQc/Ua7JhldTcIglVAoCV8kiVm99IaFSlAbTvEnyhZwdE6bdFyTL+uIaWLaCFSRsglZBQ=="], - "@aws-sdk/client-sesv2/@aws-sdk/region-config-resolver": ["@aws-sdk/region-config-resolver@3.972.2", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/config-resolver": "^4.4.6", "@smithy/node-config-provider": "^4.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-/7vRBsfmiOlg2X67EdKrzzQGw5/SbkXb7ALHQmlQLkZh8qNgvS2G2dDC6NtF3hzFlpP3j2k+KIEtql/6VrI6JA=="], + "@aws-sdk/client-sesv2/@aws-sdk/region-config-resolver": ["@aws-sdk/region-config-resolver@3.972.3", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/config-resolver": "^4.4.6", "@smithy/node-config-provider": "^4.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-v4J8qYAWfOMcZ4MJUyatntOicTzEMaU7j3OpkRCGGFSL2NgXQ5VbxauIyORA+pxdKZ0qQG2tCQjQjZDlXEC3Ow=="], "@aws-sdk/client-sesv2/@aws-sdk/types": ["@aws-sdk/types@3.973.1", "", { "dependencies": { "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-DwHBiMNOB468JiX6+i34c+THsKHErYUdNQ3HexeXZvVn4zouLjgaS4FejiGSi2HyBuzuyHg7SuOPmjSvoU9NRg=="], - "@aws-sdk/client-sesv2/@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.972.0", "", { "dependencies": { "@aws-sdk/types": "3.972.0", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-endpoints": "^3.2.8", "tslib": "^2.6.2" } }, "sha512-6JHsl1V/a1ZW8D8AFfd4R52fwZPnZ5H4U6DS8m/bWT8qad72NvbOFAC7U2cDtFs2TShqUO3TEiX/EJibtY3ijg=="], + "@aws-sdk/client-sesv2/@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.985.0", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-endpoints": "^3.2.8", "tslib": "^2.6.2" } }, "sha512-vth7UfGSUR3ljvaq8V4Rc62FsM7GUTH/myxPWkaEgOrprz1/Pc72EgTXxj+cPPPDAfHFIpjhkB7T7Td0RJx+BA=="], - "@aws-sdk/client-sesv2/@aws-sdk/util-user-agent-browser": ["@aws-sdk/util-user-agent-browser@3.972.2", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "bowser": "^2.11.0", "tslib": "^2.6.2" } }, "sha512-gz76bUyebPZRxIsBHJUd/v+yiyFzm9adHbr8NykP2nm+z/rFyvQneOHajrUejtmnc5tTBeaDPL4X25TnagRk4A=="], + "@aws-sdk/client-sesv2/@aws-sdk/util-user-agent-browser": ["@aws-sdk/util-user-agent-browser@3.972.3", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "bowser": "^2.11.0", "tslib": "^2.6.2" } }, "sha512-JurOwkRUcXD/5MTDBcqdyQ9eVedtAsZgw5rBwktsPTN7QtPiS2Ld1jkJepNgYoCufz1Wcut9iup7GJDoIHp8Fw=="], - "@aws-sdk/client-sesv2/@aws-sdk/util-user-agent-node": ["@aws-sdk/util-user-agent-node@3.972.2", "", { "dependencies": { "@aws-sdk/middleware-user-agent": "^3.972.3", "@aws-sdk/types": "^3.973.1", "@smithy/node-config-provider": "^4.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" }, "peerDependencies": { "aws-crt": ">=1.0.0" }, "optionalPeers": ["aws-crt"] }, "sha512-vnxOc4C6AR7hVbwyFo1YuH0GB6dgJlWt8nIOOJpnzJAWJPkUMPJ9Zv2lnKsSU7TTZbhP2hEO8OZ4PYH59XFv8Q=="], + "@aws-sdk/client-sesv2/@aws-sdk/util-user-agent-node": ["@aws-sdk/util-user-agent-node@3.972.5", "", { "dependencies": { "@aws-sdk/middleware-user-agent": "^3.972.7", "@aws-sdk/types": "^3.973.1", "@smithy/node-config-provider": "^4.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" }, "peerDependencies": { "aws-crt": ">=1.0.0" }, "optionalPeers": ["aws-crt"] }, "sha512-GsUDF+rXyxDZkkJxUsDxnA67FG+kc5W1dnloCFLl6fWzceevsCYzJpASBzT+BPjwUgREE6FngfJYYYMQUY5fZQ=="], "@aws-sdk/client-sqs/@aws-sdk/core": ["@aws-sdk/core@3.947.0", "", { "dependencies": { "@aws-sdk/types": "3.936.0", "@aws-sdk/xml-builder": "3.930.0", "@smithy/core": "^3.18.7", "@smithy/node-config-provider": "^4.3.5", "@smithy/property-provider": "^4.2.5", "@smithy/protocol-http": "^5.3.5", "@smithy/signature-v4": "^5.3.5", "@smithy/smithy-client": "^4.9.10", "@smithy/types": "^4.9.0", "@smithy/util-base64": "^4.3.0", "@smithy/util-middleware": "^4.2.5", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-Khq4zHhuAkvCFuFbgcy3GrZTzfSX7ZIjIcW1zRDxXRLZKRtuhnZdonqTUfaWi5K42/4OmxkYNpsO7X7trQOeHw=="], @@ -3695,13 +3697,13 @@ "@aws-sdk/middleware-expect-continue/@aws-sdk/types": ["@aws-sdk/types@3.973.1", "", { "dependencies": { "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-DwHBiMNOB468JiX6+i34c+THsKHErYUdNQ3HexeXZvVn4zouLjgaS4FejiGSi2HyBuzuyHg7SuOPmjSvoU9NRg=="], - "@aws-sdk/middleware-flexible-checksums/@aws-sdk/core": ["@aws-sdk/core@3.973.4", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@aws-sdk/xml-builder": "^3.972.2", "@smithy/core": "^3.22.0", "@smithy/node-config-provider": "^4.3.8", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/signature-v4": "^5.3.8", "@smithy/smithy-client": "^4.11.1", "@smithy/types": "^4.12.0", "@smithy/util-base64": "^4.3.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-8Rk+kPP74YiR47x54bxYlKZswsaSh0a4XvvRUMLvyS/koNawhsGu/+qSZxREqUeTO+GkKpFvSQIsAZR+deUP+g=="], + "@aws-sdk/middleware-flexible-checksums/@aws-sdk/core": ["@aws-sdk/core@3.973.7", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@aws-sdk/xml-builder": "^3.972.4", "@smithy/core": "^3.22.1", "@smithy/node-config-provider": "^4.3.8", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/signature-v4": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/util-base64": "^4.3.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-wNZZQQNlJ+hzD49cKdo+PY6rsTDElO8yDImnrI69p2PLBa7QomeUKAJWYp9xnaR38nlHqWhMHZuYLCQ3oSX+xg=="], "@aws-sdk/middleware-flexible-checksums/@aws-sdk/types": ["@aws-sdk/types@3.973.1", "", { "dependencies": { "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-DwHBiMNOB468JiX6+i34c+THsKHErYUdNQ3HexeXZvVn4zouLjgaS4FejiGSi2HyBuzuyHg7SuOPmjSvoU9NRg=="], "@aws-sdk/middleware-location-constraint/@aws-sdk/types": ["@aws-sdk/types@3.973.1", "", { "dependencies": { "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-DwHBiMNOB468JiX6+i34c+THsKHErYUdNQ3HexeXZvVn4zouLjgaS4FejiGSi2HyBuzuyHg7SuOPmjSvoU9NRg=="], - "@aws-sdk/middleware-sdk-s3/@aws-sdk/core": ["@aws-sdk/core@3.973.4", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@aws-sdk/xml-builder": "^3.972.2", "@smithy/core": "^3.22.0", "@smithy/node-config-provider": "^4.3.8", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/signature-v4": "^5.3.8", "@smithy/smithy-client": "^4.11.1", "@smithy/types": "^4.12.0", "@smithy/util-base64": "^4.3.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-8Rk+kPP74YiR47x54bxYlKZswsaSh0a4XvvRUMLvyS/koNawhsGu/+qSZxREqUeTO+GkKpFvSQIsAZR+deUP+g=="], + "@aws-sdk/middleware-sdk-s3/@aws-sdk/core": ["@aws-sdk/core@3.973.7", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@aws-sdk/xml-builder": "^3.972.4", "@smithy/core": "^3.22.1", "@smithy/node-config-provider": "^4.3.8", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/signature-v4": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/util-base64": "^4.3.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-wNZZQQNlJ+hzD49cKdo+PY6rsTDElO8yDImnrI69p2PLBa7QomeUKAJWYp9xnaR38nlHqWhMHZuYLCQ3oSX+xg=="], "@aws-sdk/middleware-sdk-s3/@aws-sdk/types": ["@aws-sdk/types@3.973.1", "", { "dependencies": { "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-DwHBiMNOB468JiX6+i34c+THsKHErYUdNQ3HexeXZvVn4zouLjgaS4FejiGSi2HyBuzuyHg7SuOPmjSvoU9NRg=="], @@ -3711,9 +3713,7 @@ "@aws-sdk/s3-request-presigner/@aws-sdk/types": ["@aws-sdk/types@3.973.1", "", { "dependencies": { "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-DwHBiMNOB468JiX6+i34c+THsKHErYUdNQ3HexeXZvVn4zouLjgaS4FejiGSi2HyBuzuyHg7SuOPmjSvoU9NRg=="], - "@aws-sdk/signature-v4-multi-region/@aws-sdk/middleware-sdk-s3": ["@aws-sdk/middleware-sdk-s3@3.972.0", "", { "dependencies": { "@aws-sdk/core": "3.972.0", "@aws-sdk/types": "3.972.0", "@aws-sdk/util-arn-parser": "3.972.0", "@smithy/core": "^3.20.6", "@smithy/node-config-provider": "^4.3.8", "@smithy/protocol-http": "^5.3.8", "@smithy/signature-v4": "^5.3.8", "@smithy/smithy-client": "^4.10.8", "@smithy/types": "^4.12.0", "@smithy/util-config-provider": "^4.2.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-stream": "^4.5.10", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-0bcKFXWx+NZ7tIlOo7KjQ+O2rydiHdIQahrq+fN6k9Osky29v17guy68urUKfhTobR6iY6KvxkroFWaFtTgS5w=="], - - "@aws-sdk/signature-v4-multi-region/@aws-sdk/types": ["@aws-sdk/types@3.972.0", "", { "dependencies": { "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-U7xBIbLSetONxb2bNzHyDgND3oKGoIfmknrEVnoEU4GUSs+0augUOIn9DIWGUO2ETcRFdsRUnmx9KhPT9Ojbug=="], + "@aws-sdk/signature-v4-multi-region/@aws-sdk/types": ["@aws-sdk/types@3.973.1", "", { "dependencies": { "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-DwHBiMNOB468JiX6+i34c+THsKHErYUdNQ3HexeXZvVn4zouLjgaS4FejiGSi2HyBuzuyHg7SuOPmjSvoU9NRg=="], "@aws-sdk/util-format-url/@aws-sdk/types": ["@aws-sdk/types@3.973.1", "", { "dependencies": { "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-DwHBiMNOB468JiX6+i34c+THsKHErYUdNQ3HexeXZvVn4zouLjgaS4FejiGSi2HyBuzuyHg7SuOPmjSvoU9NRg=="], @@ -3751,12 +3751,6 @@ "@inquirer/external-editor/iconv-lite": ["iconv-lite@0.7.1", "", { "dependencies": { "safer-buffer": ">= 2.1.2 < 3.0.0" } }, "sha512-2Tth85cXwGFHfvRgZWszZSvdo+0Xsqmw8k8ZwxScfcBneNUraK+dxRxRm24nszx80Y0TVio8kKLt5sLE7ZCLlw=="], - "@isaacs/cliui/string-width": ["string-width@5.1.2", "", { "dependencies": { "eastasianwidth": "^0.2.0", "emoji-regex": "^9.2.2", "strip-ansi": "^7.0.1" } }, "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA=="], - - "@isaacs/cliui/strip-ansi": ["strip-ansi@7.1.2", "", { "dependencies": { "ansi-regex": "^6.0.1" } }, "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA=="], - - "@isaacs/cliui/wrap-ansi": ["wrap-ansi@8.1.0", "", { "dependencies": { "ansi-styles": "^6.1.0", "string-width": "^5.0.1", "strip-ansi": "^7.0.1" } }, "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ=="], - "@langchain/core/ansi-styles": ["ansi-styles@5.2.0", "", {}, "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA=="], "@langchain/core/uuid": ["uuid@10.0.0", "", { "bin": { "uuid": "dist/bin/uuid" } }, "sha512-8XkAphELsDnEGrDxUOHB3RGvXz6TeuYSGEZBOjtTtPm2lwhGBjLgOzLHB63IUWfBpNucQjND6d3AOudO+H3RWQ=="], @@ -3967,7 +3961,7 @@ "c12/chokidar": ["chokidar@4.0.3", "", { "dependencies": { "readdirp": "^4.0.1" } }, "sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA=="], - "c12/confbox": ["confbox@0.2.2", "", {}, "sha512-1NB+BKqhtNipMsov4xI/NnhCKp9XG9NamYp5PVm9klAT0fsrNPjaFICsCFhNhwZJKNh7zB/3q8qXz0E9oaMNtQ=="], + "c12/confbox": ["confbox@0.2.4", "", {}, "sha512-ysOGlgTFbN2/Y6Cg3Iye8YKulHw+R2fNXHrgSmXISQdMnomY6eNDprVdW9R5xBguEqI954+S6709UyiO7B+6OQ=="], "c12/pkg-types": ["pkg-types@2.3.0", "", { "dependencies": { "confbox": "^0.2.2", "exsolve": "^1.0.7", "pathe": "^2.0.3" } }, "sha512-SIqCzDRg0s9npO5XQ3tNZioRY1uK06lA41ynBC1YmFTmnY6FjUjVt6s4LoADmwoig1qqD0oK8h1p/8mlMx8Oig=="], @@ -4021,7 +4015,7 @@ "form-data/mime-types": ["mime-types@2.1.35", "", { "dependencies": { "mime-db": "1.52.0" } }, "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw=="], - "fumadocs-mdx/esbuild": ["esbuild@0.27.2", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.27.2", "@esbuild/android-arm": "0.27.2", "@esbuild/android-arm64": "0.27.2", "@esbuild/android-x64": "0.27.2", "@esbuild/darwin-arm64": "0.27.2", "@esbuild/darwin-x64": "0.27.2", "@esbuild/freebsd-arm64": "0.27.2", "@esbuild/freebsd-x64": "0.27.2", "@esbuild/linux-arm": "0.27.2", "@esbuild/linux-arm64": "0.27.2", "@esbuild/linux-ia32": "0.27.2", "@esbuild/linux-loong64": "0.27.2", "@esbuild/linux-mips64el": "0.27.2", "@esbuild/linux-ppc64": "0.27.2", "@esbuild/linux-riscv64": "0.27.2", "@esbuild/linux-s390x": "0.27.2", "@esbuild/linux-x64": "0.27.2", "@esbuild/netbsd-arm64": "0.27.2", "@esbuild/netbsd-x64": "0.27.2", "@esbuild/openbsd-arm64": "0.27.2", "@esbuild/openbsd-x64": "0.27.2", "@esbuild/openharmony-arm64": "0.27.2", "@esbuild/sunos-x64": "0.27.2", "@esbuild/win32-arm64": "0.27.2", "@esbuild/win32-ia32": "0.27.2", "@esbuild/win32-x64": "0.27.2" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-HyNQImnsOC7X9PMNaCIeAm4ISCQXs5a5YasTXVliKv4uuBo1dKrG0A+uQS8M5eXjVMnLg3WgXaKvprHlFJQffw=="], + "fumadocs-mdx/esbuild": ["esbuild@0.27.3", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.27.3", "@esbuild/android-arm": "0.27.3", "@esbuild/android-arm64": "0.27.3", "@esbuild/android-x64": "0.27.3", "@esbuild/darwin-arm64": "0.27.3", "@esbuild/darwin-x64": "0.27.3", "@esbuild/freebsd-arm64": "0.27.3", "@esbuild/freebsd-x64": "0.27.3", "@esbuild/linux-arm": "0.27.3", "@esbuild/linux-arm64": "0.27.3", "@esbuild/linux-ia32": "0.27.3", "@esbuild/linux-loong64": "0.27.3", "@esbuild/linux-mips64el": "0.27.3", "@esbuild/linux-ppc64": "0.27.3", "@esbuild/linux-riscv64": "0.27.3", "@esbuild/linux-s390x": "0.27.3", "@esbuild/linux-x64": "0.27.3", "@esbuild/netbsd-arm64": "0.27.3", "@esbuild/netbsd-x64": "0.27.3", "@esbuild/openbsd-arm64": "0.27.3", "@esbuild/openbsd-x64": "0.27.3", "@esbuild/openharmony-arm64": "0.27.3", "@esbuild/sunos-x64": "0.27.3", "@esbuild/win32-arm64": "0.27.3", "@esbuild/win32-ia32": "0.27.3", "@esbuild/win32-x64": "0.27.3" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-8VwMnyGCONIs6cWue2IdpHxHnAjzxnw2Zr7MkVxB2vjmQ2ivqGFb4LEG3SMnv0Gb2F/G/2yA8zUaiL1gywDCCg=="], "fumadocs-mdx/js-yaml": ["js-yaml@4.1.1", "", { "dependencies": { "argparse": "^2.0.1" }, "bin": { "js-yaml": "bin/js-yaml.js" } }, "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA=="], @@ -4185,15 +4179,15 @@ "sim/nanoid": ["nanoid@3.3.11", "", { "bin": { "nanoid": "bin/nanoid.cjs" } }, "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w=="], - "sim/tailwind-merge": ["tailwind-merge@2.6.0", "", {}, "sha512-P+Vu1qXfzediirmHOC3xKGAYeZtPcV9g76X+xg2FD4tYgR71ewMA35Y3sCz3zhiN/dwefRpJX0yBcgwi1fXNQA=="], + "sim/tailwind-merge": ["tailwind-merge@2.6.1", "", {}, "sha512-Oo6tHdpZsGpkKG88HJ8RR1rg/RdnEkQEfMoEk2x1XRI3F1AxeU+ijRXpiVUF4UbLfcxxRGw6TbUINKYdWVsQTQ=="], "sim/tailwindcss": ["tailwindcss@3.4.19", "", { "dependencies": { "@alloc/quick-lru": "^5.2.0", "arg": "^5.0.2", "chokidar": "^3.6.0", "didyoumean": "^1.2.2", "dlv": "^1.1.3", "fast-glob": "^3.3.2", "glob-parent": "^6.0.2", "is-glob": "^4.0.3", "jiti": "^1.21.7", "lilconfig": "^3.1.3", "micromatch": "^4.0.8", "normalize-path": "^3.0.0", "object-hash": "^3.0.0", "picocolors": "^1.1.1", "postcss": "^8.4.47", "postcss-import": "^15.1.0", "postcss-js": "^4.0.1", "postcss-load-config": "^4.0.2 || ^5.0 || ^6.0", "postcss-nested": "^6.2.0", "postcss-selector-parser": "^6.1.2", "resolve": "^1.22.8", "sucrase": "^3.35.0" }, "bin": { "tailwind": "lib/cli.js", "tailwindcss": "lib/cli.js" } }, "sha512-3ofp+LL8E+pK/JuPLPggVAIaEuhvIz4qNcf3nA1Xn2o/7fb7s/TYpHhwGDv1ZU3PkBluUVaF8PyCHcm48cKLWQ=="], - "simstudio/@types/node": ["@types/node@20.19.30", "", { "dependencies": { "undici-types": "~6.21.0" } }, "sha512-WJtwWJu7UdlvzEAUm484QNg5eAoq5QR08KDNx7g45Usrs2NtOPiX8ugDqmKdXkyL03rBqU5dYNYVQetEpBHq2g=="], + "simstudio/@types/node": ["@types/node@20.19.33", "", { "dependencies": { "undici-types": "~6.21.0" } }, "sha512-Rs1bVAIdBs5gbTIKza/tgpMuG1k3U/UMJLWecIMxNdJFDMzcM5LOiLVRYh3PilWEYDIeUDv7bpiHPLPsbydGcw=="], "simstudio/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], - "simstudio-ts-sdk/@types/node": ["@types/node@20.19.30", "", { "dependencies": { "undici-types": "~6.21.0" } }, "sha512-WJtwWJu7UdlvzEAUm484QNg5eAoq5QR08KDNx7g45Usrs2NtOPiX8ugDqmKdXkyL03rBqU5dYNYVQetEpBHq2g=="], + "simstudio-ts-sdk/@types/node": ["@types/node@20.19.33", "", { "dependencies": { "undici-types": "~6.21.0" } }, "sha512-Rs1bVAIdBs5gbTIKza/tgpMuG1k3U/UMJLWecIMxNdJFDMzcM5LOiLVRYh3PilWEYDIeUDv7bpiHPLPsbydGcw=="], "slice-ansi/ansi-styles": ["ansi-styles@6.2.3", "", {}, "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg=="], @@ -4211,6 +4205,8 @@ "string_decoder/safe-buffer": ["safe-buffer@5.1.2", "", {}, "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g=="], + "strip-literal/js-tokens": ["js-tokens@9.0.1", "", {}, "sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ=="], + "sucrase/commander": ["commander@4.1.1", "", {}, "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA=="], "tar-fs/chownr": ["chownr@1.1.4", "", {}, "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg=="], @@ -4239,7 +4235,7 @@ "unist-util-remove/unist-util-visit-parents": ["unist-util-visit-parents@5.1.3", "", { "dependencies": { "@types/unist": "^2.0.0", "unist-util-is": "^5.0.0" } }, "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg=="], - "vite/esbuild": ["esbuild@0.27.2", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.27.2", "@esbuild/android-arm": "0.27.2", "@esbuild/android-arm64": "0.27.2", "@esbuild/android-x64": "0.27.2", "@esbuild/darwin-arm64": "0.27.2", "@esbuild/darwin-x64": "0.27.2", "@esbuild/freebsd-arm64": "0.27.2", "@esbuild/freebsd-x64": "0.27.2", "@esbuild/linux-arm": "0.27.2", "@esbuild/linux-arm64": "0.27.2", "@esbuild/linux-ia32": "0.27.2", "@esbuild/linux-loong64": "0.27.2", "@esbuild/linux-mips64el": "0.27.2", "@esbuild/linux-ppc64": "0.27.2", "@esbuild/linux-riscv64": "0.27.2", "@esbuild/linux-s390x": "0.27.2", "@esbuild/linux-x64": "0.27.2", "@esbuild/netbsd-arm64": "0.27.2", "@esbuild/netbsd-x64": "0.27.2", "@esbuild/openbsd-arm64": "0.27.2", "@esbuild/openbsd-x64": "0.27.2", "@esbuild/openharmony-arm64": "0.27.2", "@esbuild/sunos-x64": "0.27.2", "@esbuild/win32-arm64": "0.27.2", "@esbuild/win32-ia32": "0.27.2", "@esbuild/win32-x64": "0.27.2" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-HyNQImnsOC7X9PMNaCIeAm4ISCQXs5a5YasTXVliKv4uuBo1dKrG0A+uQS8M5eXjVMnLg3WgXaKvprHlFJQffw=="], + "vite/esbuild": ["esbuild@0.27.3", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.27.3", "@esbuild/android-arm": "0.27.3", "@esbuild/android-arm64": "0.27.3", "@esbuild/android-x64": "0.27.3", "@esbuild/darwin-arm64": "0.27.3", "@esbuild/darwin-x64": "0.27.3", "@esbuild/freebsd-arm64": "0.27.3", "@esbuild/freebsd-x64": "0.27.3", "@esbuild/linux-arm": "0.27.3", "@esbuild/linux-arm64": "0.27.3", "@esbuild/linux-ia32": "0.27.3", "@esbuild/linux-loong64": "0.27.3", "@esbuild/linux-mips64el": "0.27.3", "@esbuild/linux-ppc64": "0.27.3", "@esbuild/linux-riscv64": "0.27.3", "@esbuild/linux-s390x": "0.27.3", "@esbuild/linux-x64": "0.27.3", "@esbuild/netbsd-arm64": "0.27.3", "@esbuild/netbsd-x64": "0.27.3", "@esbuild/openbsd-arm64": "0.27.3", "@esbuild/openbsd-x64": "0.27.3", "@esbuild/openharmony-arm64": "0.27.3", "@esbuild/sunos-x64": "0.27.3", "@esbuild/win32-arm64": "0.27.3", "@esbuild/win32-ia32": "0.27.3", "@esbuild/win32-x64": "0.27.3" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-8VwMnyGCONIs6cWue2IdpHxHnAjzxnw2Zr7MkVxB2vjmQ2ivqGFb4LEG3SMnv0Gb2F/G/2yA8zUaiL1gywDCCg=="], "xml-crypto/xpath": ["xpath@0.0.33", "", {}, "sha512-NNXnzrkDrAzalLhIUc01jO2mOzXGXh1JwPgkihcLLzw98c0WgYDmmjSh1Kl3wzaxSVWMuA+fe0WTWOBDWCBmNA=="], @@ -4251,37 +4247,33 @@ "@aws-crypto/util/@smithy/util-utf8/@smithy/util-buffer-from": ["@smithy/util-buffer-from@2.2.0", "", { "dependencies": { "@smithy/is-array-buffer": "^2.2.0", "tslib": "^2.6.2" } }, "sha512-IJdWBbTcMQ6DA0gdNhh/BwrLkDR+ADW5Kr1aZmd4k3DIF6ezMV4R2NIAmT08wQJ3yUK82thHWmC/TnK/wpMMIA=="], - "@aws-sdk/client-s3/@aws-sdk/core/@aws-sdk/xml-builder": ["@aws-sdk/xml-builder@3.972.2", "", { "dependencies": { "@smithy/types": "^4.12.0", "fast-xml-parser": "5.2.5", "tslib": "^2.6.2" } }, "sha512-jGOOV/bV1DhkkUhHiZ3/1GZ67cZyOXaDb7d1rYD6ZiXf5V9tBNOcgqXwRRPvrCbYaFRa1pPMFb3ZjqjWpR3YfA=="], - - "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-env": ["@aws-sdk/credential-provider-env@3.972.2", "", { "dependencies": { "@aws-sdk/core": "^3.973.2", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-wzH1EdrZsytG1xN9UHaK12J9+kfrnd2+c8y0LVoS4O4laEjPoie1qVK3k8/rZe7KOtvULzyMnO3FT4Krr9Z0Dg=="], - - "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-http": ["@aws-sdk/credential-provider-http@3.972.4", "", { "dependencies": { "@aws-sdk/core": "^3.973.4", "@aws-sdk/types": "^3.973.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/node-http-handler": "^4.4.8", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.1", "@smithy/types": "^4.12.0", "@smithy/util-stream": "^4.5.10", "tslib": "^2.6.2" } }, "sha512-OC7F3ipXV12QfDEWybQGHLzoeHBlAdx/nLzPfHP0Wsabu3JBffu5nlzSaJNf7to9HGtOW8Bpu8NX0ugmDrCbtw=="], + "@aws-sdk/client-s3/@aws-sdk/core/@aws-sdk/xml-builder": ["@aws-sdk/xml-builder@3.972.4", "", { "dependencies": { "@smithy/types": "^4.12.0", "fast-xml-parser": "5.3.4", "tslib": "^2.6.2" } }, "sha512-0zJ05ANfYqI6+rGqj8samZBFod0dPPousBjLEqg8WdxSgbMAkRgLyn81lP215Do0rFJ/17LIXwr7q0yK24mP6Q=="], - "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-ini": ["@aws-sdk/credential-provider-ini@3.972.2", "", { "dependencies": { "@aws-sdk/core": "^3.973.2", "@aws-sdk/credential-provider-env": "^3.972.2", "@aws-sdk/credential-provider-http": "^3.972.3", "@aws-sdk/credential-provider-login": "^3.972.2", "@aws-sdk/credential-provider-process": "^3.972.2", "@aws-sdk/credential-provider-sso": "^3.972.2", "@aws-sdk/credential-provider-web-identity": "^3.972.2", "@aws-sdk/nested-clients": "3.975.0", "@aws-sdk/types": "^3.973.1", "@smithy/credential-provider-imds": "^4.2.8", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-Jrb8sLm6k8+L7520irBrvCtdLxNtrG7arIxe9TCeMJt/HxqMGJdbIjw8wILzkEHLMIi4MecF2FbXCln7OT1Tag=="], + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-env": ["@aws-sdk/credential-provider-env@3.972.5", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-LxJ9PEO4gKPXzkufvIESUysykPIdrV7+Ocb9yAhbhJLE4TiAYqbCVUE+VuKP1leGR1bBfjWjYgSV5MxprlX3mQ=="], - "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-process": ["@aws-sdk/credential-provider-process@3.972.2", "", { "dependencies": { "@aws-sdk/core": "^3.973.2", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-NLKLTT7jnUe9GpQAVkPTJO+cs2FjlQDt5fArIYS7h/Iw/CvamzgGYGFRVD2SE05nOHCMwafUSi42If8esGFV+g=="], + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-http": ["@aws-sdk/credential-provider-http@3.972.7", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/types": "^3.973.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/node-http-handler": "^4.4.9", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/util-stream": "^4.5.11", "tslib": "^2.6.2" } }, "sha512-L2uOGtvp2x3bTcxFTpSM+GkwFIPd8pHfGWO1764icMbo7e5xJh0nfhx1UwkXLnwvocTNEf8A7jISZLYjUSNaTg=="], - "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso": ["@aws-sdk/credential-provider-sso@3.972.2", "", { "dependencies": { "@aws-sdk/client-sso": "3.975.0", "@aws-sdk/core": "^3.973.2", "@aws-sdk/token-providers": "3.975.0", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-YpwDn8g3gCGUl61cCV0sRxP2pFIwg+ZsMfWQ/GalSyjXtRkctCMFA+u0yPb/Q4uTfNEiya1Y4nm0C5rIHyPW5Q=="], + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-ini": ["@aws-sdk/credential-provider-ini@3.972.5", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/credential-provider-env": "^3.972.5", "@aws-sdk/credential-provider-http": "^3.972.7", "@aws-sdk/credential-provider-login": "^3.972.5", "@aws-sdk/credential-provider-process": "^3.972.5", "@aws-sdk/credential-provider-sso": "^3.972.5", "@aws-sdk/credential-provider-web-identity": "^3.972.5", "@aws-sdk/nested-clients": "3.985.0", "@aws-sdk/types": "^3.973.1", "@smithy/credential-provider-imds": "^4.2.8", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-SdDTYE6jkARzOeL7+kudMIM4DaFnP5dZVeatzw849k4bSXDdErDS188bgeNzc/RA2WGrlEpsqHUKP6G7sVXhZg=="], - "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-web-identity": ["@aws-sdk/credential-provider-web-identity@3.972.2", "", { "dependencies": { "@aws-sdk/core": "^3.973.2", "@aws-sdk/nested-clients": "3.975.0", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-x9DAiN9Qz+NjJ99ltDiVQ8d511M/tuF/9MFbe2jUgo7HZhD6+x4S3iT1YcP07ndwDUjmzKGmeOEgE24k4qvfdg=="], + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-process": ["@aws-sdk/credential-provider-process@3.972.5", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-HDKF3mVbLnuqGg6dMnzBf1VUOywE12/N286msI9YaK9mEIzdsGCtLTvrDhe3Up0R9/hGFbB+9l21/TwF5L1C6g=="], - "@aws-sdk/client-s3/@aws-sdk/util-endpoints/@aws-sdk/types": ["@aws-sdk/types@3.972.0", "", { "dependencies": { "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-U7xBIbLSetONxb2bNzHyDgND3oKGoIfmknrEVnoEU4GUSs+0augUOIn9DIWGUO2ETcRFdsRUnmx9KhPT9Ojbug=="], + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso": ["@aws-sdk/credential-provider-sso@3.972.5", "", { "dependencies": { "@aws-sdk/client-sso": "3.985.0", "@aws-sdk/core": "^3.973.7", "@aws-sdk/token-providers": "3.985.0", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-8urj3AoeNeQisjMmMBhFeiY2gxt6/7wQQbEGun0YV/OaOOiXrIudTIEYF8ZfD+NQI6X1FY5AkRsx6O/CaGiybA=="], - "@aws-sdk/client-sesv2/@aws-sdk/core/@aws-sdk/xml-builder": ["@aws-sdk/xml-builder@3.972.2", "", { "dependencies": { "@smithy/types": "^4.12.0", "fast-xml-parser": "5.2.5", "tslib": "^2.6.2" } }, "sha512-jGOOV/bV1DhkkUhHiZ3/1GZ67cZyOXaDb7d1rYD6ZiXf5V9tBNOcgqXwRRPvrCbYaFRa1pPMFb3ZjqjWpR3YfA=="], + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-web-identity": ["@aws-sdk/credential-provider-web-identity@3.972.5", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/nested-clients": "3.985.0", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-OK3cULuJl6c+RcDZfPpaK5o3deTOnKZbxm7pzhFNGA3fI2hF9yDih17fGRazJzGGWaDVlR9ejZrpDef4DJCEsw=="], - "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-env": ["@aws-sdk/credential-provider-env@3.972.2", "", { "dependencies": { "@aws-sdk/core": "^3.973.2", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-wzH1EdrZsytG1xN9UHaK12J9+kfrnd2+c8y0LVoS4O4laEjPoie1qVK3k8/rZe7KOtvULzyMnO3FT4Krr9Z0Dg=="], + "@aws-sdk/client-sesv2/@aws-sdk/core/@aws-sdk/xml-builder": ["@aws-sdk/xml-builder@3.972.4", "", { "dependencies": { "@smithy/types": "^4.12.0", "fast-xml-parser": "5.3.4", "tslib": "^2.6.2" } }, "sha512-0zJ05ANfYqI6+rGqj8samZBFod0dPPousBjLEqg8WdxSgbMAkRgLyn81lP215Do0rFJ/17LIXwr7q0yK24mP6Q=="], - "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-http": ["@aws-sdk/credential-provider-http@3.972.4", "", { "dependencies": { "@aws-sdk/core": "^3.973.4", "@aws-sdk/types": "^3.973.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/node-http-handler": "^4.4.8", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.1", "@smithy/types": "^4.12.0", "@smithy/util-stream": "^4.5.10", "tslib": "^2.6.2" } }, "sha512-OC7F3ipXV12QfDEWybQGHLzoeHBlAdx/nLzPfHP0Wsabu3JBffu5nlzSaJNf7to9HGtOW8Bpu8NX0ugmDrCbtw=="], + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-env": ["@aws-sdk/credential-provider-env@3.972.5", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-LxJ9PEO4gKPXzkufvIESUysykPIdrV7+Ocb9yAhbhJLE4TiAYqbCVUE+VuKP1leGR1bBfjWjYgSV5MxprlX3mQ=="], - "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-ini": ["@aws-sdk/credential-provider-ini@3.972.2", "", { "dependencies": { "@aws-sdk/core": "^3.973.2", "@aws-sdk/credential-provider-env": "^3.972.2", "@aws-sdk/credential-provider-http": "^3.972.3", "@aws-sdk/credential-provider-login": "^3.972.2", "@aws-sdk/credential-provider-process": "^3.972.2", "@aws-sdk/credential-provider-sso": "^3.972.2", "@aws-sdk/credential-provider-web-identity": "^3.972.2", "@aws-sdk/nested-clients": "3.975.0", "@aws-sdk/types": "^3.973.1", "@smithy/credential-provider-imds": "^4.2.8", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-Jrb8sLm6k8+L7520irBrvCtdLxNtrG7arIxe9TCeMJt/HxqMGJdbIjw8wILzkEHLMIi4MecF2FbXCln7OT1Tag=="], + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-http": ["@aws-sdk/credential-provider-http@3.972.7", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/types": "^3.973.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/node-http-handler": "^4.4.9", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/util-stream": "^4.5.11", "tslib": "^2.6.2" } }, "sha512-L2uOGtvp2x3bTcxFTpSM+GkwFIPd8pHfGWO1764icMbo7e5xJh0nfhx1UwkXLnwvocTNEf8A7jISZLYjUSNaTg=="], - "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-process": ["@aws-sdk/credential-provider-process@3.972.2", "", { "dependencies": { "@aws-sdk/core": "^3.973.2", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-NLKLTT7jnUe9GpQAVkPTJO+cs2FjlQDt5fArIYS7h/Iw/CvamzgGYGFRVD2SE05nOHCMwafUSi42If8esGFV+g=="], + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-ini": ["@aws-sdk/credential-provider-ini@3.972.5", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/credential-provider-env": "^3.972.5", "@aws-sdk/credential-provider-http": "^3.972.7", "@aws-sdk/credential-provider-login": "^3.972.5", "@aws-sdk/credential-provider-process": "^3.972.5", "@aws-sdk/credential-provider-sso": "^3.972.5", "@aws-sdk/credential-provider-web-identity": "^3.972.5", "@aws-sdk/nested-clients": "3.985.0", "@aws-sdk/types": "^3.973.1", "@smithy/credential-provider-imds": "^4.2.8", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-SdDTYE6jkARzOeL7+kudMIM4DaFnP5dZVeatzw849k4bSXDdErDS188bgeNzc/RA2WGrlEpsqHUKP6G7sVXhZg=="], - "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso": ["@aws-sdk/credential-provider-sso@3.972.2", "", { "dependencies": { "@aws-sdk/client-sso": "3.975.0", "@aws-sdk/core": "^3.973.2", "@aws-sdk/token-providers": "3.975.0", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-YpwDn8g3gCGUl61cCV0sRxP2pFIwg+ZsMfWQ/GalSyjXtRkctCMFA+u0yPb/Q4uTfNEiya1Y4nm0C5rIHyPW5Q=="], + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-process": ["@aws-sdk/credential-provider-process@3.972.5", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-HDKF3mVbLnuqGg6dMnzBf1VUOywE12/N286msI9YaK9mEIzdsGCtLTvrDhe3Up0R9/hGFbB+9l21/TwF5L1C6g=="], - "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-web-identity": ["@aws-sdk/credential-provider-web-identity@3.972.2", "", { "dependencies": { "@aws-sdk/core": "^3.973.2", "@aws-sdk/nested-clients": "3.975.0", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-x9DAiN9Qz+NjJ99ltDiVQ8d511M/tuF/9MFbe2jUgo7HZhD6+x4S3iT1YcP07ndwDUjmzKGmeOEgE24k4qvfdg=="], + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso": ["@aws-sdk/credential-provider-sso@3.972.5", "", { "dependencies": { "@aws-sdk/client-sso": "3.985.0", "@aws-sdk/core": "^3.973.7", "@aws-sdk/token-providers": "3.985.0", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-8urj3AoeNeQisjMmMBhFeiY2gxt6/7wQQbEGun0YV/OaOOiXrIudTIEYF8ZfD+NQI6X1FY5AkRsx6O/CaGiybA=="], - "@aws-sdk/client-sesv2/@aws-sdk/util-endpoints/@aws-sdk/types": ["@aws-sdk/types@3.972.0", "", { "dependencies": { "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-U7xBIbLSetONxb2bNzHyDgND3oKGoIfmknrEVnoEU4GUSs+0augUOIn9DIWGUO2ETcRFdsRUnmx9KhPT9Ojbug=="], + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-web-identity": ["@aws-sdk/credential-provider-web-identity@3.972.5", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/nested-clients": "3.985.0", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-OK3cULuJl6c+RcDZfPpaK5o3deTOnKZbxm7pzhFNGA3fI2hF9yDih17fGRazJzGGWaDVlR9ejZrpDef4DJCEsw=="], "@aws-sdk/client-sqs/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-env": ["@aws-sdk/credential-provider-env@3.947.0", "", { "dependencies": { "@aws-sdk/core": "3.947.0", "@aws-sdk/types": "3.936.0", "@smithy/property-provider": "^4.2.5", "@smithy/types": "^4.9.0", "tslib": "^2.6.2" } }, "sha512-VR2V6dRELmzwAsCpK4GqxUi6UW5WNhAXS9F9AzWi5jvijwJo3nH92YNJUP4quMpgFZxJHEWyXLWgPjh9u0zYOA=="], @@ -4295,13 +4287,9 @@ "@aws-sdk/client-sqs/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-web-identity": ["@aws-sdk/credential-provider-web-identity@3.947.0", "", { "dependencies": { "@aws-sdk/core": "3.947.0", "@aws-sdk/nested-clients": "3.947.0", "@aws-sdk/types": "3.936.0", "@smithy/property-provider": "^4.2.5", "@smithy/shared-ini-file-loader": "^4.4.0", "@smithy/types": "^4.9.0", "tslib": "^2.6.2" } }, "sha512-gokm/e/YHiHLrZgLq4j8tNAn8RJDPbIcglFRKgy08q8DmAqHQ8MXAKW3eS0QjAuRXU9mcMmUo1NrX6FRNBCCPw=="], - "@aws-sdk/middleware-flexible-checksums/@aws-sdk/core/@aws-sdk/xml-builder": ["@aws-sdk/xml-builder@3.972.2", "", { "dependencies": { "@smithy/types": "^4.12.0", "fast-xml-parser": "5.2.5", "tslib": "^2.6.2" } }, "sha512-jGOOV/bV1DhkkUhHiZ3/1GZ67cZyOXaDb7d1rYD6ZiXf5V9tBNOcgqXwRRPvrCbYaFRa1pPMFb3ZjqjWpR3YfA=="], + "@aws-sdk/middleware-flexible-checksums/@aws-sdk/core/@aws-sdk/xml-builder": ["@aws-sdk/xml-builder@3.972.4", "", { "dependencies": { "@smithy/types": "^4.12.0", "fast-xml-parser": "5.3.4", "tslib": "^2.6.2" } }, "sha512-0zJ05ANfYqI6+rGqj8samZBFod0dPPousBjLEqg8WdxSgbMAkRgLyn81lP215Do0rFJ/17LIXwr7q0yK24mP6Q=="], - "@aws-sdk/middleware-sdk-s3/@aws-sdk/core/@aws-sdk/xml-builder": ["@aws-sdk/xml-builder@3.972.2", "", { "dependencies": { "@smithy/types": "^4.12.0", "fast-xml-parser": "5.2.5", "tslib": "^2.6.2" } }, "sha512-jGOOV/bV1DhkkUhHiZ3/1GZ67cZyOXaDb7d1rYD6ZiXf5V9tBNOcgqXwRRPvrCbYaFRa1pPMFb3ZjqjWpR3YfA=="], - - "@aws-sdk/signature-v4-multi-region/@aws-sdk/middleware-sdk-s3/@aws-sdk/core": ["@aws-sdk/core@3.972.0", "", { "dependencies": { "@aws-sdk/types": "3.972.0", "@aws-sdk/xml-builder": "3.972.0", "@smithy/core": "^3.20.6", "@smithy/node-config-provider": "^4.3.8", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/signature-v4": "^5.3.8", "@smithy/smithy-client": "^4.10.8", "@smithy/types": "^4.12.0", "@smithy/util-base64": "^4.3.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-nEeUW2M9F+xdIaD98F5MBcQ4ITtykj3yKbgFZ6J0JtL3bq+Z90szQ6Yy8H/BLPYXTs3V4n9ifnBo8cprRDiE6A=="], - - "@aws-sdk/signature-v4-multi-region/@aws-sdk/middleware-sdk-s3/@aws-sdk/util-arn-parser": ["@aws-sdk/util-arn-parser@3.972.0", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-RM5Mmo/KJ593iMSrALlHEOcc9YOIyOsDmS5x2NLOMdEmzv1o00fcpAkCQ02IGu1eFneBFT7uX0Mpag0HI+Cz2g=="], + "@aws-sdk/middleware-sdk-s3/@aws-sdk/core/@aws-sdk/xml-builder": ["@aws-sdk/xml-builder@3.972.4", "", { "dependencies": { "@smithy/types": "^4.12.0", "fast-xml-parser": "5.3.4", "tslib": "^2.6.2" } }, "sha512-0zJ05ANfYqI6+rGqj8samZBFod0dPPousBjLEqg8WdxSgbMAkRgLyn81lP215Do0rFJ/17LIXwr7q0yK24mP6Q=="], "@babel/helper-compilation-targets/lru-cache/yallist": ["yallist@3.1.1", "", {}, "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g=="], @@ -4361,12 +4349,6 @@ "@esbuild-kit/core-utils/esbuild/@esbuild/win32-x64": ["@esbuild/win32-x64@0.18.20", "", { "os": "win32", "cpu": "x64" }, "sha512-kTdfRcSiDfQca/y9QIkng02avJ+NCaQvrMejlsB3RRv5sE9rRoeBPISaZpKxHELzRxZyLvNts1P27W3wV+8geQ=="], - "@isaacs/cliui/string-width/emoji-regex": ["emoji-regex@9.2.2", "", {}, "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg=="], - - "@isaacs/cliui/strip-ansi/ansi-regex": ["ansi-regex@6.2.2", "", {}, "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg=="], - - "@isaacs/cliui/wrap-ansi/ansi-styles": ["ansi-styles@6.2.3", "", {}, "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg=="], - "@octokit/plugin-paginate-rest/@octokit/types/@octokit/openapi-types": ["@octokit/openapi-types@24.2.0", "", {}, "sha512-9sIH3nSUttelJSXUrmGzl7QUBFul0/mB8HRYl3fOlgHbIWG+WnYDXU3v/2zMtAvuzZ/ed00Ei6on975FhBfzrg=="], "@octokit/plugin-rest-endpoint-methods/@octokit/types/@octokit/openapi-types": ["@octokit/openapi-types@24.2.0", "", {}, "sha512-9sIH3nSUttelJSXUrmGzl7QUBFul0/mB8HRYl3fOlgHbIWG+WnYDXU3v/2zMtAvuzZ/ed00Ei6on975FhBfzrg=="], @@ -4451,63 +4433,63 @@ "engine.io/@types/node/undici-types": ["undici-types@7.10.0", "", {}, "sha512-t5Fy/nfn+14LuOc2KNYg75vZqClpAiqscVvMygNnlsHBFpSXdJaYtXMcdNLpl/Qvc3P2cB3s6lOV51nqsFq4ag=="], - "fetch-cookie/tough-cookie/tldts": ["tldts@7.0.19", "", { "dependencies": { "tldts-core": "^7.0.19" }, "bin": { "tldts": "bin/cli.js" } }, "sha512-8PWx8tvC4jDB39BQw1m4x8y5MH1BcQ5xHeL2n7UVFulMPH/3Q0uiamahFJ3lXA0zO2SUyRXuVVbWSDmstlt9YA=="], + "fetch-cookie/tough-cookie/tldts": ["tldts@7.0.23", "", { "dependencies": { "tldts-core": "^7.0.23" }, "bin": { "tldts": "bin/cli.js" } }, "sha512-ASdhgQIBSay0R/eXggAkQ53G4nTJqTXqC2kbaBbdDwM7SkjyZyO0OaaN1/FH7U/yCeqOHDwFO5j8+Os/IS1dXw=="], "ffmpeg-static/https-proxy-agent/agent-base": ["agent-base@6.0.2", "", { "dependencies": { "debug": "4" } }, "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ=="], "form-data/mime-types/mime-db": ["mime-db@1.52.0", "", {}, "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg=="], - "fumadocs-mdx/esbuild/@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.27.2", "", { "os": "aix", "cpu": "ppc64" }, "sha512-GZMB+a0mOMZs4MpDbj8RJp4cw+w1WV5NYD6xzgvzUJ5Ek2jerwfO2eADyI6ExDSUED+1X8aMbegahsJi+8mgpw=="], + "fumadocs-mdx/esbuild/@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.27.3", "", { "os": "aix", "cpu": "ppc64" }, "sha512-9fJMTNFTWZMh5qwrBItuziu834eOCUcEqymSH7pY+zoMVEZg3gcPuBNxH1EvfVYe9h0x/Ptw8KBzv7qxb7l8dg=="], - "fumadocs-mdx/esbuild/@esbuild/android-arm": ["@esbuild/android-arm@0.27.2", "", { "os": "android", "cpu": "arm" }, "sha512-DVNI8jlPa7Ujbr1yjU2PfUSRtAUZPG9I1RwW4F4xFB1Imiu2on0ADiI/c3td+KmDtVKNbi+nffGDQMfcIMkwIA=="], + "fumadocs-mdx/esbuild/@esbuild/android-arm": ["@esbuild/android-arm@0.27.3", "", { "os": "android", "cpu": "arm" }, "sha512-i5D1hPY7GIQmXlXhs2w8AWHhenb00+GxjxRncS2ZM7YNVGNfaMxgzSGuO8o8SJzRc/oZwU2bcScvVERk03QhzA=="], - "fumadocs-mdx/esbuild/@esbuild/android-arm64": ["@esbuild/android-arm64@0.27.2", "", { "os": "android", "cpu": "arm64" }, "sha512-pvz8ZZ7ot/RBphf8fv60ljmaoydPU12VuXHImtAs0XhLLw+EXBi2BLe3OYSBslR4rryHvweW5gmkKFwTiFy6KA=="], + "fumadocs-mdx/esbuild/@esbuild/android-arm64": ["@esbuild/android-arm64@0.27.3", "", { "os": "android", "cpu": "arm64" }, "sha512-YdghPYUmj/FX2SYKJ0OZxf+iaKgMsKHVPF1MAq/P8WirnSpCStzKJFjOjzsW0QQ7oIAiccHdcqjbHmJxRb/dmg=="], - "fumadocs-mdx/esbuild/@esbuild/android-x64": ["@esbuild/android-x64@0.27.2", "", { "os": "android", "cpu": "x64" }, "sha512-z8Ank4Byh4TJJOh4wpz8g2vDy75zFL0TlZlkUkEwYXuPSgX8yzep596n6mT7905kA9uHZsf/o2OJZubl2l3M7A=="], + "fumadocs-mdx/esbuild/@esbuild/android-x64": ["@esbuild/android-x64@0.27.3", "", { "os": "android", "cpu": "x64" }, "sha512-IN/0BNTkHtk8lkOM8JWAYFg4ORxBkZQf9zXiEOfERX/CzxW3Vg1ewAhU7QSWQpVIzTW+b8Xy+lGzdYXV6UZObQ=="], - "fumadocs-mdx/esbuild/@esbuild/darwin-arm64": ["@esbuild/darwin-arm64@0.27.2", "", { "os": "darwin", "cpu": "arm64" }, "sha512-davCD2Zc80nzDVRwXTcQP/28fiJbcOwvdolL0sOiOsbwBa72kegmVU0Wrh1MYrbuCL98Omp5dVhQFWRKR2ZAlg=="], + "fumadocs-mdx/esbuild/@esbuild/darwin-arm64": ["@esbuild/darwin-arm64@0.27.3", "", { "os": "darwin", "cpu": "arm64" }, "sha512-Re491k7ByTVRy0t3EKWajdLIr0gz2kKKfzafkth4Q8A5n1xTHrkqZgLLjFEHVD+AXdUGgQMq+Godfq45mGpCKg=="], - "fumadocs-mdx/esbuild/@esbuild/darwin-x64": ["@esbuild/darwin-x64@0.27.2", "", { "os": "darwin", "cpu": "x64" }, "sha512-ZxtijOmlQCBWGwbVmwOF/UCzuGIbUkqB1faQRf5akQmxRJ1ujusWsb3CVfk/9iZKr2L5SMU5wPBi1UWbvL+VQA=="], + "fumadocs-mdx/esbuild/@esbuild/darwin-x64": ["@esbuild/darwin-x64@0.27.3", "", { "os": "darwin", "cpu": "x64" }, "sha512-vHk/hA7/1AckjGzRqi6wbo+jaShzRowYip6rt6q7VYEDX4LEy1pZfDpdxCBnGtl+A5zq8iXDcyuxwtv3hNtHFg=="], - "fumadocs-mdx/esbuild/@esbuild/freebsd-arm64": ["@esbuild/freebsd-arm64@0.27.2", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-lS/9CN+rgqQ9czogxlMcBMGd+l8Q3Nj1MFQwBZJyoEKI50XGxwuzznYdwcav6lpOGv5BqaZXqvBSiB/kJ5op+g=="], + "fumadocs-mdx/esbuild/@esbuild/freebsd-arm64": ["@esbuild/freebsd-arm64@0.27.3", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-ipTYM2fjt3kQAYOvo6vcxJx3nBYAzPjgTCk7QEgZG8AUO3ydUhvelmhrbOheMnGOlaSFUoHXB6un+A7q4ygY9w=="], - "fumadocs-mdx/esbuild/@esbuild/freebsd-x64": ["@esbuild/freebsd-x64@0.27.2", "", { "os": "freebsd", "cpu": "x64" }, "sha512-tAfqtNYb4YgPnJlEFu4c212HYjQWSO/w/h/lQaBK7RbwGIkBOuNKQI9tqWzx7Wtp7bTPaGC6MJvWI608P3wXYA=="], + "fumadocs-mdx/esbuild/@esbuild/freebsd-x64": ["@esbuild/freebsd-x64@0.27.3", "", { "os": "freebsd", "cpu": "x64" }, "sha512-dDk0X87T7mI6U3K9VjWtHOXqwAMJBNN2r7bejDsc+j03SEjtD9HrOl8gVFByeM0aJksoUuUVU9TBaZa2rgj0oA=="], - "fumadocs-mdx/esbuild/@esbuild/linux-arm": ["@esbuild/linux-arm@0.27.2", "", { "os": "linux", "cpu": "arm" }, "sha512-vWfq4GaIMP9AIe4yj1ZUW18RDhx6EPQKjwe7n8BbIecFtCQG4CfHGaHuh7fdfq+y3LIA2vGS/o9ZBGVxIDi9hw=="], + "fumadocs-mdx/esbuild/@esbuild/linux-arm": ["@esbuild/linux-arm@0.27.3", "", { "os": "linux", "cpu": "arm" }, "sha512-s6nPv2QkSupJwLYyfS+gwdirm0ukyTFNl3KTgZEAiJDd+iHZcbTPPcWCcRYH+WlNbwChgH2QkE9NSlNrMT8Gfw=="], - "fumadocs-mdx/esbuild/@esbuild/linux-arm64": ["@esbuild/linux-arm64@0.27.2", "", { "os": "linux", "cpu": "arm64" }, "sha512-hYxN8pr66NsCCiRFkHUAsxylNOcAQaxSSkHMMjcpx0si13t1LHFphxJZUiGwojB1a/Hd5OiPIqDdXONia6bhTw=="], + "fumadocs-mdx/esbuild/@esbuild/linux-arm64": ["@esbuild/linux-arm64@0.27.3", "", { "os": "linux", "cpu": "arm64" }, "sha512-sZOuFz/xWnZ4KH3YfFrKCf1WyPZHakVzTiqji3WDc0BCl2kBwiJLCXpzLzUBLgmp4veFZdvN5ChW4Eq/8Fc2Fg=="], - "fumadocs-mdx/esbuild/@esbuild/linux-ia32": ["@esbuild/linux-ia32@0.27.2", "", { "os": "linux", "cpu": "ia32" }, "sha512-MJt5BRRSScPDwG2hLelYhAAKh9imjHK5+NE/tvnRLbIqUWa+0E9N4WNMjmp/kXXPHZGqPLxggwVhz7QP8CTR8w=="], + "fumadocs-mdx/esbuild/@esbuild/linux-ia32": ["@esbuild/linux-ia32@0.27.3", "", { "os": "linux", "cpu": "ia32" }, "sha512-yGlQYjdxtLdh0a3jHjuwOrxQjOZYD/C9PfdbgJJF3TIZWnm/tMd/RcNiLngiu4iwcBAOezdnSLAwQDPqTmtTYg=="], - "fumadocs-mdx/esbuild/@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.27.2", "", { "os": "linux", "cpu": "none" }, "sha512-lugyF1atnAT463aO6KPshVCJK5NgRnU4yb3FUumyVz+cGvZbontBgzeGFO1nF+dPueHD367a2ZXe1NtUkAjOtg=="], + "fumadocs-mdx/esbuild/@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.27.3", "", { "os": "linux", "cpu": "none" }, "sha512-WO60Sn8ly3gtzhyjATDgieJNet/KqsDlX5nRC5Y3oTFcS1l0KWba+SEa9Ja1GfDqSF1z6hif/SkpQJbL63cgOA=="], - "fumadocs-mdx/esbuild/@esbuild/linux-mips64el": ["@esbuild/linux-mips64el@0.27.2", "", { "os": "linux", "cpu": "none" }, "sha512-nlP2I6ArEBewvJ2gjrrkESEZkB5mIoaTswuqNFRv/WYd+ATtUpe9Y09RnJvgvdag7he0OWgEZWhviS1OTOKixw=="], + "fumadocs-mdx/esbuild/@esbuild/linux-mips64el": ["@esbuild/linux-mips64el@0.27.3", "", { "os": "linux", "cpu": "none" }, "sha512-APsymYA6sGcZ4pD6k+UxbDjOFSvPWyZhjaiPyl/f79xKxwTnrn5QUnXR5prvetuaSMsb4jgeHewIDCIWljrSxw=="], - "fumadocs-mdx/esbuild/@esbuild/linux-ppc64": ["@esbuild/linux-ppc64@0.27.2", "", { "os": "linux", "cpu": "ppc64" }, "sha512-C92gnpey7tUQONqg1n6dKVbx3vphKtTHJaNG2Ok9lGwbZil6DrfyecMsp9CrmXGQJmZ7iiVXvvZH6Ml5hL6XdQ=="], + "fumadocs-mdx/esbuild/@esbuild/linux-ppc64": ["@esbuild/linux-ppc64@0.27.3", "", { "os": "linux", "cpu": "ppc64" }, "sha512-eizBnTeBefojtDb9nSh4vvVQ3V9Qf9Df01PfawPcRzJH4gFSgrObw+LveUyDoKU3kxi5+9RJTCWlj4FjYXVPEA=="], - "fumadocs-mdx/esbuild/@esbuild/linux-riscv64": ["@esbuild/linux-riscv64@0.27.2", "", { "os": "linux", "cpu": "none" }, "sha512-B5BOmojNtUyN8AXlK0QJyvjEZkWwy/FKvakkTDCziX95AowLZKR6aCDhG7LeF7uMCXEJqwa8Bejz5LTPYm8AvA=="], + "fumadocs-mdx/esbuild/@esbuild/linux-riscv64": ["@esbuild/linux-riscv64@0.27.3", "", { "os": "linux", "cpu": "none" }, "sha512-3Emwh0r5wmfm3ssTWRQSyVhbOHvqegUDRd0WhmXKX2mkHJe1SFCMJhagUleMq+Uci34wLSipf8Lagt4LlpRFWQ=="], - "fumadocs-mdx/esbuild/@esbuild/linux-s390x": ["@esbuild/linux-s390x@0.27.2", "", { "os": "linux", "cpu": "s390x" }, "sha512-p4bm9+wsPwup5Z8f4EpfN63qNagQ47Ua2znaqGH6bqLlmJ4bx97Y9JdqxgGZ6Y8xVTixUnEkoKSHcpRlDnNr5w=="], + "fumadocs-mdx/esbuild/@esbuild/linux-s390x": ["@esbuild/linux-s390x@0.27.3", "", { "os": "linux", "cpu": "s390x" }, "sha512-pBHUx9LzXWBc7MFIEEL0yD/ZVtNgLytvx60gES28GcWMqil8ElCYR4kvbV2BDqsHOvVDRrOxGySBM9Fcv744hw=="], - "fumadocs-mdx/esbuild/@esbuild/linux-x64": ["@esbuild/linux-x64@0.27.2", "", { "os": "linux", "cpu": "x64" }, "sha512-uwp2Tip5aPmH+NRUwTcfLb+W32WXjpFejTIOWZFw/v7/KnpCDKG66u4DLcurQpiYTiYwQ9B7KOeMJvLCu/OvbA=="], + "fumadocs-mdx/esbuild/@esbuild/linux-x64": ["@esbuild/linux-x64@0.27.3", "", { "os": "linux", "cpu": "x64" }, "sha512-Czi8yzXUWIQYAtL/2y6vogER8pvcsOsk5cpwL4Gk5nJqH5UZiVByIY8Eorm5R13gq+DQKYg0+JyQoytLQas4dA=="], - "fumadocs-mdx/esbuild/@esbuild/netbsd-arm64": ["@esbuild/netbsd-arm64@0.27.2", "", { "os": "none", "cpu": "arm64" }, "sha512-Kj6DiBlwXrPsCRDeRvGAUb/LNrBASrfqAIok+xB0LxK8CHqxZ037viF13ugfsIpePH93mX7xfJp97cyDuTZ3cw=="], + "fumadocs-mdx/esbuild/@esbuild/netbsd-arm64": ["@esbuild/netbsd-arm64@0.27.3", "", { "os": "none", "cpu": "arm64" }, "sha512-sDpk0RgmTCR/5HguIZa9n9u+HVKf40fbEUt+iTzSnCaGvY9kFP0YKBWZtJaraonFnqef5SlJ8/TiPAxzyS+UoA=="], - "fumadocs-mdx/esbuild/@esbuild/netbsd-x64": ["@esbuild/netbsd-x64@0.27.2", "", { "os": "none", "cpu": "x64" }, "sha512-HwGDZ0VLVBY3Y+Nw0JexZy9o/nUAWq9MlV7cahpaXKW6TOzfVno3y3/M8Ga8u8Yr7GldLOov27xiCnqRZf0tCA=="], + "fumadocs-mdx/esbuild/@esbuild/netbsd-x64": ["@esbuild/netbsd-x64@0.27.3", "", { "os": "none", "cpu": "x64" }, "sha512-P14lFKJl/DdaE00LItAukUdZO5iqNH7+PjoBm+fLQjtxfcfFE20Xf5CrLsmZdq5LFFZzb5JMZ9grUwvtVYzjiA=="], - "fumadocs-mdx/esbuild/@esbuild/openbsd-arm64": ["@esbuild/openbsd-arm64@0.27.2", "", { "os": "openbsd", "cpu": "arm64" }, "sha512-DNIHH2BPQ5551A7oSHD0CKbwIA/Ox7+78/AWkbS5QoRzaqlev2uFayfSxq68EkonB+IKjiuxBFoV8ESJy8bOHA=="], + "fumadocs-mdx/esbuild/@esbuild/openbsd-arm64": ["@esbuild/openbsd-arm64@0.27.3", "", { "os": "openbsd", "cpu": "arm64" }, "sha512-AIcMP77AvirGbRl/UZFTq5hjXK+2wC7qFRGoHSDrZ5v5b8DK/GYpXW3CPRL53NkvDqb9D+alBiC/dV0Fb7eJcw=="], - "fumadocs-mdx/esbuild/@esbuild/openbsd-x64": ["@esbuild/openbsd-x64@0.27.2", "", { "os": "openbsd", "cpu": "x64" }, "sha512-/it7w9Nb7+0KFIzjalNJVR5bOzA9Vay+yIPLVHfIQYG/j+j9VTH84aNB8ExGKPU4AzfaEvN9/V4HV+F+vo8OEg=="], + "fumadocs-mdx/esbuild/@esbuild/openbsd-x64": ["@esbuild/openbsd-x64@0.27.3", "", { "os": "openbsd", "cpu": "x64" }, "sha512-DnW2sRrBzA+YnE70LKqnM3P+z8vehfJWHXECbwBmH/CU51z6FiqTQTHFenPlHmo3a8UgpLyH3PT+87OViOh1AQ=="], - "fumadocs-mdx/esbuild/@esbuild/openharmony-arm64": ["@esbuild/openharmony-arm64@0.27.2", "", { "os": "none", "cpu": "arm64" }, "sha512-LRBbCmiU51IXfeXk59csuX/aSaToeG7w48nMwA6049Y4J4+VbWALAuXcs+qcD04rHDuSCSRKdmY63sruDS5qag=="], + "fumadocs-mdx/esbuild/@esbuild/openharmony-arm64": ["@esbuild/openharmony-arm64@0.27.3", "", { "os": "none", "cpu": "arm64" }, "sha512-NinAEgr/etERPTsZJ7aEZQvvg/A6IsZG/LgZy+81wON2huV7SrK3e63dU0XhyZP4RKGyTm7aOgmQk0bGp0fy2g=="], - "fumadocs-mdx/esbuild/@esbuild/sunos-x64": ["@esbuild/sunos-x64@0.27.2", "", { "os": "sunos", "cpu": "x64" }, "sha512-kMtx1yqJHTmqaqHPAzKCAkDaKsffmXkPHThSfRwZGyuqyIeBvf08KSsYXl+abf5HDAPMJIPnbBfXvP2ZC2TfHg=="], + "fumadocs-mdx/esbuild/@esbuild/sunos-x64": ["@esbuild/sunos-x64@0.27.3", "", { "os": "sunos", "cpu": "x64" }, "sha512-PanZ+nEz+eWoBJ8/f8HKxTTD172SKwdXebZ0ndd953gt1HRBbhMsaNqjTyYLGLPdoWHy4zLU7bDVJztF5f3BHA=="], - "fumadocs-mdx/esbuild/@esbuild/win32-arm64": ["@esbuild/win32-arm64@0.27.2", "", { "os": "win32", "cpu": "arm64" }, "sha512-Yaf78O/B3Kkh+nKABUF++bvJv5Ijoy9AN1ww904rOXZFLWVc5OLOfL56W+C8F9xn5JQZa3UX6m+IktJnIb1Jjg=="], + "fumadocs-mdx/esbuild/@esbuild/win32-arm64": ["@esbuild/win32-arm64@0.27.3", "", { "os": "win32", "cpu": "arm64" }, "sha512-B2t59lWWYrbRDw/tjiWOuzSsFh1Y/E95ofKz7rIVYSQkUYBjfSgf6oeYPNWHToFRr2zx52JKApIcAS/D5TUBnA=="], - "fumadocs-mdx/esbuild/@esbuild/win32-ia32": ["@esbuild/win32-ia32@0.27.2", "", { "os": "win32", "cpu": "ia32" }, "sha512-Iuws0kxo4yusk7sw70Xa2E2imZU5HoixzxfGCdxwBdhiDgt9vX9VUCBhqcwY7/uh//78A1hMkkROMJq9l27oLQ=="], + "fumadocs-mdx/esbuild/@esbuild/win32-ia32": ["@esbuild/win32-ia32@0.27.3", "", { "os": "win32", "cpu": "ia32" }, "sha512-QLKSFeXNS8+tHW7tZpMtjlNb7HKau0QDpwm49u0vUp9y1WOF+PEzkU84y9GqYaAVW8aH8f3GcBck26jh54cX4Q=="], - "fumadocs-mdx/esbuild/@esbuild/win32-x64": ["@esbuild/win32-x64@0.27.2", "", { "os": "win32", "cpu": "x64" }, "sha512-sRdU18mcKf7F+YgheI/zGf5alZatMUTKj/jNS6l744f9u3WFu4v7twcUI9vu4mknF4Y9aDlblIie0IM+5xxaqQ=="], + "fumadocs-mdx/esbuild/@esbuild/win32-x64": ["@esbuild/win32-x64@0.27.3", "", { "os": "win32", "cpu": "x64" }, "sha512-4uJGhsxuptu3OcpVAzli+/gWusVGwZZHTlS63hh++ehExkVT8SgiEf7/uC/PclrPPkLhZqGgCTjd0VWLo6xMqA=="], "gray-matter/js-yaml/argparse": ["argparse@1.0.10", "", { "dependencies": { "sprintf-js": "~1.0.2" } }, "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg=="], @@ -4597,7 +4579,7 @@ "next/sharp/@img/sharp-win32-x64": ["@img/sharp-win32-x64@0.34.5", "", { "os": "win32", "cpu": "x64" }, "sha512-+29YMsqY2/9eFEiW93eqWnuLcWcufowXewwSNIT6UwZdUUCrM3oFjMWH/Z6/TMmb4hlFenmfAVbpWeup2jryCw=="], - "nypm/pkg-types/confbox": ["confbox@0.2.2", "", {}, "sha512-1NB+BKqhtNipMsov4xI/NnhCKp9XG9NamYp5PVm9klAT0fsrNPjaFICsCFhNhwZJKNh7zB/3q8qXz0E9oaMNtQ=="], + "nypm/pkg-types/confbox": ["confbox@0.2.4", "", {}, "sha512-ysOGlgTFbN2/Y6Cg3Iye8YKulHw+R2fNXHrgSmXISQdMnomY6eNDprVdW9R5xBguEqI954+S6709UyiO7B+6OQ=="], "oauth2-mock-server/express/body-parser": ["body-parser@1.20.4", "", { "dependencies": { "bytes": "~3.1.2", "content-type": "~1.0.5", "debug": "2.6.9", "depd": "2.0.0", "destroy": "~1.2.0", "http-errors": "~2.0.1", "iconv-lite": "~0.4.24", "on-finished": "~2.4.1", "qs": "~6.14.0", "raw-body": "~2.5.3", "type-is": "~1.6.18", "unpipe": "~1.0.0" } }, "sha512-ZTgYYLMOXY9qKU/57FAo8F+HA2dGX7bqGc71txDRC1rS4frdFI5R7NhluHxH6M0YItAP0sHB4uqAOcYKxO6uGA=="], @@ -4673,57 +4655,57 @@ "twilio/https-proxy-agent/agent-base": ["agent-base@6.0.2", "", { "dependencies": { "debug": "4" } }, "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ=="], - "vite/esbuild/@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.27.2", "", { "os": "aix", "cpu": "ppc64" }, "sha512-GZMB+a0mOMZs4MpDbj8RJp4cw+w1WV5NYD6xzgvzUJ5Ek2jerwfO2eADyI6ExDSUED+1X8aMbegahsJi+8mgpw=="], + "vite/esbuild/@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.27.3", "", { "os": "aix", "cpu": "ppc64" }, "sha512-9fJMTNFTWZMh5qwrBItuziu834eOCUcEqymSH7pY+zoMVEZg3gcPuBNxH1EvfVYe9h0x/Ptw8KBzv7qxb7l8dg=="], - "vite/esbuild/@esbuild/android-arm": ["@esbuild/android-arm@0.27.2", "", { "os": "android", "cpu": "arm" }, "sha512-DVNI8jlPa7Ujbr1yjU2PfUSRtAUZPG9I1RwW4F4xFB1Imiu2on0ADiI/c3td+KmDtVKNbi+nffGDQMfcIMkwIA=="], + "vite/esbuild/@esbuild/android-arm": ["@esbuild/android-arm@0.27.3", "", { "os": "android", "cpu": "arm" }, "sha512-i5D1hPY7GIQmXlXhs2w8AWHhenb00+GxjxRncS2ZM7YNVGNfaMxgzSGuO8o8SJzRc/oZwU2bcScvVERk03QhzA=="], - "vite/esbuild/@esbuild/android-arm64": ["@esbuild/android-arm64@0.27.2", "", { "os": "android", "cpu": "arm64" }, "sha512-pvz8ZZ7ot/RBphf8fv60ljmaoydPU12VuXHImtAs0XhLLw+EXBi2BLe3OYSBslR4rryHvweW5gmkKFwTiFy6KA=="], + "vite/esbuild/@esbuild/android-arm64": ["@esbuild/android-arm64@0.27.3", "", { "os": "android", "cpu": "arm64" }, "sha512-YdghPYUmj/FX2SYKJ0OZxf+iaKgMsKHVPF1MAq/P8WirnSpCStzKJFjOjzsW0QQ7oIAiccHdcqjbHmJxRb/dmg=="], - "vite/esbuild/@esbuild/android-x64": ["@esbuild/android-x64@0.27.2", "", { "os": "android", "cpu": "x64" }, "sha512-z8Ank4Byh4TJJOh4wpz8g2vDy75zFL0TlZlkUkEwYXuPSgX8yzep596n6mT7905kA9uHZsf/o2OJZubl2l3M7A=="], + "vite/esbuild/@esbuild/android-x64": ["@esbuild/android-x64@0.27.3", "", { "os": "android", "cpu": "x64" }, "sha512-IN/0BNTkHtk8lkOM8JWAYFg4ORxBkZQf9zXiEOfERX/CzxW3Vg1ewAhU7QSWQpVIzTW+b8Xy+lGzdYXV6UZObQ=="], - "vite/esbuild/@esbuild/darwin-arm64": ["@esbuild/darwin-arm64@0.27.2", "", { "os": "darwin", "cpu": "arm64" }, "sha512-davCD2Zc80nzDVRwXTcQP/28fiJbcOwvdolL0sOiOsbwBa72kegmVU0Wrh1MYrbuCL98Omp5dVhQFWRKR2ZAlg=="], + "vite/esbuild/@esbuild/darwin-arm64": ["@esbuild/darwin-arm64@0.27.3", "", { "os": "darwin", "cpu": "arm64" }, "sha512-Re491k7ByTVRy0t3EKWajdLIr0gz2kKKfzafkth4Q8A5n1xTHrkqZgLLjFEHVD+AXdUGgQMq+Godfq45mGpCKg=="], - "vite/esbuild/@esbuild/darwin-x64": ["@esbuild/darwin-x64@0.27.2", "", { "os": "darwin", "cpu": "x64" }, "sha512-ZxtijOmlQCBWGwbVmwOF/UCzuGIbUkqB1faQRf5akQmxRJ1ujusWsb3CVfk/9iZKr2L5SMU5wPBi1UWbvL+VQA=="], + "vite/esbuild/@esbuild/darwin-x64": ["@esbuild/darwin-x64@0.27.3", "", { "os": "darwin", "cpu": "x64" }, "sha512-vHk/hA7/1AckjGzRqi6wbo+jaShzRowYip6rt6q7VYEDX4LEy1pZfDpdxCBnGtl+A5zq8iXDcyuxwtv3hNtHFg=="], - "vite/esbuild/@esbuild/freebsd-arm64": ["@esbuild/freebsd-arm64@0.27.2", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-lS/9CN+rgqQ9czogxlMcBMGd+l8Q3Nj1MFQwBZJyoEKI50XGxwuzznYdwcav6lpOGv5BqaZXqvBSiB/kJ5op+g=="], + "vite/esbuild/@esbuild/freebsd-arm64": ["@esbuild/freebsd-arm64@0.27.3", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-ipTYM2fjt3kQAYOvo6vcxJx3nBYAzPjgTCk7QEgZG8AUO3ydUhvelmhrbOheMnGOlaSFUoHXB6un+A7q4ygY9w=="], - "vite/esbuild/@esbuild/freebsd-x64": ["@esbuild/freebsd-x64@0.27.2", "", { "os": "freebsd", "cpu": "x64" }, "sha512-tAfqtNYb4YgPnJlEFu4c212HYjQWSO/w/h/lQaBK7RbwGIkBOuNKQI9tqWzx7Wtp7bTPaGC6MJvWI608P3wXYA=="], + "vite/esbuild/@esbuild/freebsd-x64": ["@esbuild/freebsd-x64@0.27.3", "", { "os": "freebsd", "cpu": "x64" }, "sha512-dDk0X87T7mI6U3K9VjWtHOXqwAMJBNN2r7bejDsc+j03SEjtD9HrOl8gVFByeM0aJksoUuUVU9TBaZa2rgj0oA=="], - "vite/esbuild/@esbuild/linux-arm": ["@esbuild/linux-arm@0.27.2", "", { "os": "linux", "cpu": "arm" }, "sha512-vWfq4GaIMP9AIe4yj1ZUW18RDhx6EPQKjwe7n8BbIecFtCQG4CfHGaHuh7fdfq+y3LIA2vGS/o9ZBGVxIDi9hw=="], + "vite/esbuild/@esbuild/linux-arm": ["@esbuild/linux-arm@0.27.3", "", { "os": "linux", "cpu": "arm" }, "sha512-s6nPv2QkSupJwLYyfS+gwdirm0ukyTFNl3KTgZEAiJDd+iHZcbTPPcWCcRYH+WlNbwChgH2QkE9NSlNrMT8Gfw=="], - "vite/esbuild/@esbuild/linux-arm64": ["@esbuild/linux-arm64@0.27.2", "", { "os": "linux", "cpu": "arm64" }, "sha512-hYxN8pr66NsCCiRFkHUAsxylNOcAQaxSSkHMMjcpx0si13t1LHFphxJZUiGwojB1a/Hd5OiPIqDdXONia6bhTw=="], + "vite/esbuild/@esbuild/linux-arm64": ["@esbuild/linux-arm64@0.27.3", "", { "os": "linux", "cpu": "arm64" }, "sha512-sZOuFz/xWnZ4KH3YfFrKCf1WyPZHakVzTiqji3WDc0BCl2kBwiJLCXpzLzUBLgmp4veFZdvN5ChW4Eq/8Fc2Fg=="], - "vite/esbuild/@esbuild/linux-ia32": ["@esbuild/linux-ia32@0.27.2", "", { "os": "linux", "cpu": "ia32" }, "sha512-MJt5BRRSScPDwG2hLelYhAAKh9imjHK5+NE/tvnRLbIqUWa+0E9N4WNMjmp/kXXPHZGqPLxggwVhz7QP8CTR8w=="], + "vite/esbuild/@esbuild/linux-ia32": ["@esbuild/linux-ia32@0.27.3", "", { "os": "linux", "cpu": "ia32" }, "sha512-yGlQYjdxtLdh0a3jHjuwOrxQjOZYD/C9PfdbgJJF3TIZWnm/tMd/RcNiLngiu4iwcBAOezdnSLAwQDPqTmtTYg=="], - "vite/esbuild/@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.27.2", "", { "os": "linux", "cpu": "none" }, "sha512-lugyF1atnAT463aO6KPshVCJK5NgRnU4yb3FUumyVz+cGvZbontBgzeGFO1nF+dPueHD367a2ZXe1NtUkAjOtg=="], + "vite/esbuild/@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.27.3", "", { "os": "linux", "cpu": "none" }, "sha512-WO60Sn8ly3gtzhyjATDgieJNet/KqsDlX5nRC5Y3oTFcS1l0KWba+SEa9Ja1GfDqSF1z6hif/SkpQJbL63cgOA=="], - "vite/esbuild/@esbuild/linux-mips64el": ["@esbuild/linux-mips64el@0.27.2", "", { "os": "linux", "cpu": "none" }, "sha512-nlP2I6ArEBewvJ2gjrrkESEZkB5mIoaTswuqNFRv/WYd+ATtUpe9Y09RnJvgvdag7he0OWgEZWhviS1OTOKixw=="], + "vite/esbuild/@esbuild/linux-mips64el": ["@esbuild/linux-mips64el@0.27.3", "", { "os": "linux", "cpu": "none" }, "sha512-APsymYA6sGcZ4pD6k+UxbDjOFSvPWyZhjaiPyl/f79xKxwTnrn5QUnXR5prvetuaSMsb4jgeHewIDCIWljrSxw=="], - "vite/esbuild/@esbuild/linux-ppc64": ["@esbuild/linux-ppc64@0.27.2", "", { "os": "linux", "cpu": "ppc64" }, "sha512-C92gnpey7tUQONqg1n6dKVbx3vphKtTHJaNG2Ok9lGwbZil6DrfyecMsp9CrmXGQJmZ7iiVXvvZH6Ml5hL6XdQ=="], + "vite/esbuild/@esbuild/linux-ppc64": ["@esbuild/linux-ppc64@0.27.3", "", { "os": "linux", "cpu": "ppc64" }, "sha512-eizBnTeBefojtDb9nSh4vvVQ3V9Qf9Df01PfawPcRzJH4gFSgrObw+LveUyDoKU3kxi5+9RJTCWlj4FjYXVPEA=="], - "vite/esbuild/@esbuild/linux-riscv64": ["@esbuild/linux-riscv64@0.27.2", "", { "os": "linux", "cpu": "none" }, "sha512-B5BOmojNtUyN8AXlK0QJyvjEZkWwy/FKvakkTDCziX95AowLZKR6aCDhG7LeF7uMCXEJqwa8Bejz5LTPYm8AvA=="], + "vite/esbuild/@esbuild/linux-riscv64": ["@esbuild/linux-riscv64@0.27.3", "", { "os": "linux", "cpu": "none" }, "sha512-3Emwh0r5wmfm3ssTWRQSyVhbOHvqegUDRd0WhmXKX2mkHJe1SFCMJhagUleMq+Uci34wLSipf8Lagt4LlpRFWQ=="], - "vite/esbuild/@esbuild/linux-s390x": ["@esbuild/linux-s390x@0.27.2", "", { "os": "linux", "cpu": "s390x" }, "sha512-p4bm9+wsPwup5Z8f4EpfN63qNagQ47Ua2znaqGH6bqLlmJ4bx97Y9JdqxgGZ6Y8xVTixUnEkoKSHcpRlDnNr5w=="], + "vite/esbuild/@esbuild/linux-s390x": ["@esbuild/linux-s390x@0.27.3", "", { "os": "linux", "cpu": "s390x" }, "sha512-pBHUx9LzXWBc7MFIEEL0yD/ZVtNgLytvx60gES28GcWMqil8ElCYR4kvbV2BDqsHOvVDRrOxGySBM9Fcv744hw=="], - "vite/esbuild/@esbuild/linux-x64": ["@esbuild/linux-x64@0.27.2", "", { "os": "linux", "cpu": "x64" }, "sha512-uwp2Tip5aPmH+NRUwTcfLb+W32WXjpFejTIOWZFw/v7/KnpCDKG66u4DLcurQpiYTiYwQ9B7KOeMJvLCu/OvbA=="], + "vite/esbuild/@esbuild/linux-x64": ["@esbuild/linux-x64@0.27.3", "", { "os": "linux", "cpu": "x64" }, "sha512-Czi8yzXUWIQYAtL/2y6vogER8pvcsOsk5cpwL4Gk5nJqH5UZiVByIY8Eorm5R13gq+DQKYg0+JyQoytLQas4dA=="], - "vite/esbuild/@esbuild/netbsd-arm64": ["@esbuild/netbsd-arm64@0.27.2", "", { "os": "none", "cpu": "arm64" }, "sha512-Kj6DiBlwXrPsCRDeRvGAUb/LNrBASrfqAIok+xB0LxK8CHqxZ037viF13ugfsIpePH93mX7xfJp97cyDuTZ3cw=="], + "vite/esbuild/@esbuild/netbsd-arm64": ["@esbuild/netbsd-arm64@0.27.3", "", { "os": "none", "cpu": "arm64" }, "sha512-sDpk0RgmTCR/5HguIZa9n9u+HVKf40fbEUt+iTzSnCaGvY9kFP0YKBWZtJaraonFnqef5SlJ8/TiPAxzyS+UoA=="], - "vite/esbuild/@esbuild/netbsd-x64": ["@esbuild/netbsd-x64@0.27.2", "", { "os": "none", "cpu": "x64" }, "sha512-HwGDZ0VLVBY3Y+Nw0JexZy9o/nUAWq9MlV7cahpaXKW6TOzfVno3y3/M8Ga8u8Yr7GldLOov27xiCnqRZf0tCA=="], + "vite/esbuild/@esbuild/netbsd-x64": ["@esbuild/netbsd-x64@0.27.3", "", { "os": "none", "cpu": "x64" }, "sha512-P14lFKJl/DdaE00LItAukUdZO5iqNH7+PjoBm+fLQjtxfcfFE20Xf5CrLsmZdq5LFFZzb5JMZ9grUwvtVYzjiA=="], - "vite/esbuild/@esbuild/openbsd-arm64": ["@esbuild/openbsd-arm64@0.27.2", "", { "os": "openbsd", "cpu": "arm64" }, "sha512-DNIHH2BPQ5551A7oSHD0CKbwIA/Ox7+78/AWkbS5QoRzaqlev2uFayfSxq68EkonB+IKjiuxBFoV8ESJy8bOHA=="], + "vite/esbuild/@esbuild/openbsd-arm64": ["@esbuild/openbsd-arm64@0.27.3", "", { "os": "openbsd", "cpu": "arm64" }, "sha512-AIcMP77AvirGbRl/UZFTq5hjXK+2wC7qFRGoHSDrZ5v5b8DK/GYpXW3CPRL53NkvDqb9D+alBiC/dV0Fb7eJcw=="], - "vite/esbuild/@esbuild/openbsd-x64": ["@esbuild/openbsd-x64@0.27.2", "", { "os": "openbsd", "cpu": "x64" }, "sha512-/it7w9Nb7+0KFIzjalNJVR5bOzA9Vay+yIPLVHfIQYG/j+j9VTH84aNB8ExGKPU4AzfaEvN9/V4HV+F+vo8OEg=="], + "vite/esbuild/@esbuild/openbsd-x64": ["@esbuild/openbsd-x64@0.27.3", "", { "os": "openbsd", "cpu": "x64" }, "sha512-DnW2sRrBzA+YnE70LKqnM3P+z8vehfJWHXECbwBmH/CU51z6FiqTQTHFenPlHmo3a8UgpLyH3PT+87OViOh1AQ=="], - "vite/esbuild/@esbuild/openharmony-arm64": ["@esbuild/openharmony-arm64@0.27.2", "", { "os": "none", "cpu": "arm64" }, "sha512-LRBbCmiU51IXfeXk59csuX/aSaToeG7w48nMwA6049Y4J4+VbWALAuXcs+qcD04rHDuSCSRKdmY63sruDS5qag=="], + "vite/esbuild/@esbuild/openharmony-arm64": ["@esbuild/openharmony-arm64@0.27.3", "", { "os": "none", "cpu": "arm64" }, "sha512-NinAEgr/etERPTsZJ7aEZQvvg/A6IsZG/LgZy+81wON2huV7SrK3e63dU0XhyZP4RKGyTm7aOgmQk0bGp0fy2g=="], - "vite/esbuild/@esbuild/sunos-x64": ["@esbuild/sunos-x64@0.27.2", "", { "os": "sunos", "cpu": "x64" }, "sha512-kMtx1yqJHTmqaqHPAzKCAkDaKsffmXkPHThSfRwZGyuqyIeBvf08KSsYXl+abf5HDAPMJIPnbBfXvP2ZC2TfHg=="], + "vite/esbuild/@esbuild/sunos-x64": ["@esbuild/sunos-x64@0.27.3", "", { "os": "sunos", "cpu": "x64" }, "sha512-PanZ+nEz+eWoBJ8/f8HKxTTD172SKwdXebZ0ndd953gt1HRBbhMsaNqjTyYLGLPdoWHy4zLU7bDVJztF5f3BHA=="], - "vite/esbuild/@esbuild/win32-arm64": ["@esbuild/win32-arm64@0.27.2", "", { "os": "win32", "cpu": "arm64" }, "sha512-Yaf78O/B3Kkh+nKABUF++bvJv5Ijoy9AN1ww904rOXZFLWVc5OLOfL56W+C8F9xn5JQZa3UX6m+IktJnIb1Jjg=="], + "vite/esbuild/@esbuild/win32-arm64": ["@esbuild/win32-arm64@0.27.3", "", { "os": "win32", "cpu": "arm64" }, "sha512-B2t59lWWYrbRDw/tjiWOuzSsFh1Y/E95ofKz7rIVYSQkUYBjfSgf6oeYPNWHToFRr2zx52JKApIcAS/D5TUBnA=="], - "vite/esbuild/@esbuild/win32-ia32": ["@esbuild/win32-ia32@0.27.2", "", { "os": "win32", "cpu": "ia32" }, "sha512-Iuws0kxo4yusk7sw70Xa2E2imZU5HoixzxfGCdxwBdhiDgt9vX9VUCBhqcwY7/uh//78A1hMkkROMJq9l27oLQ=="], + "vite/esbuild/@esbuild/win32-ia32": ["@esbuild/win32-ia32@0.27.3", "", { "os": "win32", "cpu": "ia32" }, "sha512-QLKSFeXNS8+tHW7tZpMtjlNb7HKau0QDpwm49u0vUp9y1WOF+PEzkU84y9GqYaAVW8aH8f3GcBck26jh54cX4Q=="], - "vite/esbuild/@esbuild/win32-x64": ["@esbuild/win32-x64@0.27.2", "", { "os": "win32", "cpu": "x64" }, "sha512-sRdU18mcKf7F+YgheI/zGf5alZatMUTKj/jNS6l744f9u3WFu4v7twcUI9vu4mknF4Y9aDlblIie0IM+5xxaqQ=="], + "vite/esbuild/@esbuild/win32-x64": ["@esbuild/win32-x64@0.27.3", "", { "os": "win32", "cpu": "x64" }, "sha512-4uJGhsxuptu3OcpVAzli+/gWusVGwZZHTlS63hh++ehExkVT8SgiEf7/uC/PclrPPkLhZqGgCTjd0VWLo6xMqA=="], "@aws-crypto/sha1-browser/@smithy/util-utf8/@smithy/util-buffer-from/@smithy/is-array-buffer": ["@smithy/is-array-buffer@2.2.0", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-GGP3O9QFD24uGeAXYUjwSTXARoqpZykHadOmA8G5vfJPK0/DC67qa//0qvqrJzL1xc8WQWX7/yc7fwudjPHPhA=="], @@ -4731,29 +4713,29 @@ "@aws-crypto/util/@smithy/util-utf8/@smithy/util-buffer-from/@smithy/is-array-buffer": ["@smithy/is-array-buffer@2.2.0", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-GGP3O9QFD24uGeAXYUjwSTXARoqpZykHadOmA8G5vfJPK0/DC67qa//0qvqrJzL1xc8WQWX7/yc7fwudjPHPhA=="], - "@aws-sdk/client-s3/@aws-sdk/core/@aws-sdk/xml-builder/fast-xml-parser": ["fast-xml-parser@5.2.5", "", { "dependencies": { "strnum": "^2.1.0" }, "bin": { "fxparser": "src/cli/cli.js" } }, "sha512-pfX9uG9Ki0yekDHx2SiuRIyFdyAr1kMIMitPvb0YBo8SUfKvia7w7FIyd/l6av85pFYRhZscS75MwMnbvY+hcQ=="], + "@aws-sdk/client-s3/@aws-sdk/core/@aws-sdk/xml-builder/fast-xml-parser": ["fast-xml-parser@5.3.4", "", { "dependencies": { "strnum": "^2.1.0" }, "bin": { "fxparser": "src/cli/cli.js" } }, "sha512-EFd6afGmXlCx8H8WTZHhAoDaWaGyuIBoZJ2mknrNxug+aZKjkp0a0dlars9Izl+jF+7Gu1/5f/2h68cQpe0IiA=="], - "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-ini/@aws-sdk/credential-provider-login": ["@aws-sdk/credential-provider-login@3.972.2", "", { "dependencies": { "@aws-sdk/core": "^3.973.2", "@aws-sdk/nested-clients": "3.975.0", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-mlaw2aiI3DrimW85ZMn3g7qrtHueidS58IGytZ+mbFpsYLK5wMjCAKZQtt7VatLMtSBG/dn/EY4njbnYXIDKeQ=="], + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-ini/@aws-sdk/credential-provider-login": ["@aws-sdk/credential-provider-login@3.972.5", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/nested-clients": "3.985.0", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-uYq1ILyTSI6ZDCMY5+vUsRM0SOCVI7kaW4wBrehVVkhAxC6y+e9rvGtnoZqCOWL1gKjTMouvsf4Ilhc5NCg1Aw=="], - "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-ini/@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.975.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.1", "@aws-sdk/middleware-host-header": "^3.972.1", "@aws-sdk/middleware-logger": "^3.972.1", "@aws-sdk/middleware-recursion-detection": "^3.972.1", "@aws-sdk/middleware-user-agent": "^3.972.2", "@aws-sdk/region-config-resolver": "^3.972.1", "@aws-sdk/types": "^3.973.0", "@aws-sdk/util-endpoints": "3.972.0", "@aws-sdk/util-user-agent-browser": "^3.972.1", "@aws-sdk/util-user-agent-node": "^3.972.1", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.21.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.11", "@smithy/middleware-retry": "^4.4.27", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.8", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.10.12", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.26", "@smithy/util-defaults-mode-node": "^4.2.29", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-OkeFHPlQj2c/Y5bQGkX14pxhDWUGUFt3LRHhjcDKsSCw6lrxKcxN3WFZN0qbJwKNydP+knL5nxvfgKiCLpTLRA=="], + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-ini/@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.985.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.7", "@aws-sdk/middleware-host-header": "^3.972.3", "@aws-sdk/middleware-logger": "^3.972.3", "@aws-sdk/middleware-recursion-detection": "^3.972.3", "@aws-sdk/middleware-user-agent": "^3.972.7", "@aws-sdk/region-config-resolver": "^3.972.3", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.985.0", "@aws-sdk/util-user-agent-browser": "^3.972.3", "@aws-sdk/util-user-agent-node": "^3.972.5", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.22.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.13", "@smithy/middleware-retry": "^4.4.30", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.9", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.29", "@smithy/util-defaults-mode-node": "^4.2.32", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-TsWwKzb/2WHafAY0CE7uXgLj0FmnkBTgfioG9HO+7z/zCPcl1+YU+i7dW4o0y+aFxFgxTMG+ExBQpqT/k2ao8g=="], - "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/client-sso": ["@aws-sdk/client-sso@3.975.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.1", "@aws-sdk/middleware-host-header": "^3.972.1", "@aws-sdk/middleware-logger": "^3.972.1", "@aws-sdk/middleware-recursion-detection": "^3.972.1", "@aws-sdk/middleware-user-agent": "^3.972.2", "@aws-sdk/region-config-resolver": "^3.972.1", "@aws-sdk/types": "^3.973.0", "@aws-sdk/util-endpoints": "3.972.0", "@aws-sdk/util-user-agent-browser": "^3.972.1", "@aws-sdk/util-user-agent-node": "^3.972.1", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.21.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.11", "@smithy/middleware-retry": "^4.4.27", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.8", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.10.12", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.26", "@smithy/util-defaults-mode-node": "^4.2.29", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-HpgJuleH7P6uILxzJKQOmlHdwaCY+xYC6VgRDzlwVEqU/HXjo4m2gOAyjUbpXlBOCWfGgMUzfBlNJ9z3MboqEQ=="], + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/client-sso": ["@aws-sdk/client-sso@3.985.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.7", "@aws-sdk/middleware-host-header": "^3.972.3", "@aws-sdk/middleware-logger": "^3.972.3", "@aws-sdk/middleware-recursion-detection": "^3.972.3", "@aws-sdk/middleware-user-agent": "^3.972.7", "@aws-sdk/region-config-resolver": "^3.972.3", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.985.0", "@aws-sdk/util-user-agent-browser": "^3.972.3", "@aws-sdk/util-user-agent-node": "^3.972.5", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.22.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.13", "@smithy/middleware-retry": "^4.4.30", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.9", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.29", "@smithy/util-defaults-mode-node": "^4.2.32", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-81J8iE8MuXhdbMfIz4sWFj64Pe41bFi/uqqmqOC5SlGv+kwoyLsyKS/rH2tW2t5buih4vTUxskRjxlqikTD4oQ=="], - "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/token-providers": ["@aws-sdk/token-providers@3.975.0", "", { "dependencies": { "@aws-sdk/core": "^3.973.1", "@aws-sdk/nested-clients": "3.975.0", "@aws-sdk/types": "^3.973.0", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-AWQt64hkVbDQ+CmM09wnvSk2mVyH4iRROkmYkr3/lmUtFNbE2L/fnw26sckZnUcFCsHPqbkQrcsZAnTcBLbH4w=="], + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/token-providers": ["@aws-sdk/token-providers@3.985.0", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/nested-clients": "3.985.0", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-+hwpHZyEq8k+9JL2PkE60V93v2kNhUIv7STFt+EAez1UJsJOQDhc5LpzEX66pNjclI5OTwBROs/DhJjC/BtMjQ=="], - "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-web-identity/@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.975.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.1", "@aws-sdk/middleware-host-header": "^3.972.1", "@aws-sdk/middleware-logger": "^3.972.1", "@aws-sdk/middleware-recursion-detection": "^3.972.1", "@aws-sdk/middleware-user-agent": "^3.972.2", "@aws-sdk/region-config-resolver": "^3.972.1", "@aws-sdk/types": "^3.973.0", "@aws-sdk/util-endpoints": "3.972.0", "@aws-sdk/util-user-agent-browser": "^3.972.1", "@aws-sdk/util-user-agent-node": "^3.972.1", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.21.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.11", "@smithy/middleware-retry": "^4.4.27", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.8", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.10.12", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.26", "@smithy/util-defaults-mode-node": "^4.2.29", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-OkeFHPlQj2c/Y5bQGkX14pxhDWUGUFt3LRHhjcDKsSCw6lrxKcxN3WFZN0qbJwKNydP+knL5nxvfgKiCLpTLRA=="], + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-web-identity/@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.985.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.7", "@aws-sdk/middleware-host-header": "^3.972.3", "@aws-sdk/middleware-logger": "^3.972.3", "@aws-sdk/middleware-recursion-detection": "^3.972.3", "@aws-sdk/middleware-user-agent": "^3.972.7", "@aws-sdk/region-config-resolver": "^3.972.3", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.985.0", "@aws-sdk/util-user-agent-browser": "^3.972.3", "@aws-sdk/util-user-agent-node": "^3.972.5", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.22.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.13", "@smithy/middleware-retry": "^4.4.30", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.9", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.29", "@smithy/util-defaults-mode-node": "^4.2.32", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-TsWwKzb/2WHafAY0CE7uXgLj0FmnkBTgfioG9HO+7z/zCPcl1+YU+i7dW4o0y+aFxFgxTMG+ExBQpqT/k2ao8g=="], - "@aws-sdk/client-sesv2/@aws-sdk/core/@aws-sdk/xml-builder/fast-xml-parser": ["fast-xml-parser@5.2.5", "", { "dependencies": { "strnum": "^2.1.0" }, "bin": { "fxparser": "src/cli/cli.js" } }, "sha512-pfX9uG9Ki0yekDHx2SiuRIyFdyAr1kMIMitPvb0YBo8SUfKvia7w7FIyd/l6av85pFYRhZscS75MwMnbvY+hcQ=="], + "@aws-sdk/client-sesv2/@aws-sdk/core/@aws-sdk/xml-builder/fast-xml-parser": ["fast-xml-parser@5.3.4", "", { "dependencies": { "strnum": "^2.1.0" }, "bin": { "fxparser": "src/cli/cli.js" } }, "sha512-EFd6afGmXlCx8H8WTZHhAoDaWaGyuIBoZJ2mknrNxug+aZKjkp0a0dlars9Izl+jF+7Gu1/5f/2h68cQpe0IiA=="], - "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-ini/@aws-sdk/credential-provider-login": ["@aws-sdk/credential-provider-login@3.972.2", "", { "dependencies": { "@aws-sdk/core": "^3.973.2", "@aws-sdk/nested-clients": "3.975.0", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-mlaw2aiI3DrimW85ZMn3g7qrtHueidS58IGytZ+mbFpsYLK5wMjCAKZQtt7VatLMtSBG/dn/EY4njbnYXIDKeQ=="], + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-ini/@aws-sdk/credential-provider-login": ["@aws-sdk/credential-provider-login@3.972.5", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/nested-clients": "3.985.0", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-uYq1ILyTSI6ZDCMY5+vUsRM0SOCVI7kaW4wBrehVVkhAxC6y+e9rvGtnoZqCOWL1gKjTMouvsf4Ilhc5NCg1Aw=="], - "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-ini/@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.975.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.1", "@aws-sdk/middleware-host-header": "^3.972.1", "@aws-sdk/middleware-logger": "^3.972.1", "@aws-sdk/middleware-recursion-detection": "^3.972.1", "@aws-sdk/middleware-user-agent": "^3.972.2", "@aws-sdk/region-config-resolver": "^3.972.1", "@aws-sdk/types": "^3.973.0", "@aws-sdk/util-endpoints": "3.972.0", "@aws-sdk/util-user-agent-browser": "^3.972.1", "@aws-sdk/util-user-agent-node": "^3.972.1", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.21.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.11", "@smithy/middleware-retry": "^4.4.27", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.8", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.10.12", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.26", "@smithy/util-defaults-mode-node": "^4.2.29", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-OkeFHPlQj2c/Y5bQGkX14pxhDWUGUFt3LRHhjcDKsSCw6lrxKcxN3WFZN0qbJwKNydP+knL5nxvfgKiCLpTLRA=="], + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-ini/@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.985.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.7", "@aws-sdk/middleware-host-header": "^3.972.3", "@aws-sdk/middleware-logger": "^3.972.3", "@aws-sdk/middleware-recursion-detection": "^3.972.3", "@aws-sdk/middleware-user-agent": "^3.972.7", "@aws-sdk/region-config-resolver": "^3.972.3", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.985.0", "@aws-sdk/util-user-agent-browser": "^3.972.3", "@aws-sdk/util-user-agent-node": "^3.972.5", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.22.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.13", "@smithy/middleware-retry": "^4.4.30", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.9", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.29", "@smithy/util-defaults-mode-node": "^4.2.32", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-TsWwKzb/2WHafAY0CE7uXgLj0FmnkBTgfioG9HO+7z/zCPcl1+YU+i7dW4o0y+aFxFgxTMG+ExBQpqT/k2ao8g=="], - "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/client-sso": ["@aws-sdk/client-sso@3.975.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.1", "@aws-sdk/middleware-host-header": "^3.972.1", "@aws-sdk/middleware-logger": "^3.972.1", "@aws-sdk/middleware-recursion-detection": "^3.972.1", "@aws-sdk/middleware-user-agent": "^3.972.2", "@aws-sdk/region-config-resolver": "^3.972.1", "@aws-sdk/types": "^3.973.0", "@aws-sdk/util-endpoints": "3.972.0", "@aws-sdk/util-user-agent-browser": "^3.972.1", "@aws-sdk/util-user-agent-node": "^3.972.1", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.21.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.11", "@smithy/middleware-retry": "^4.4.27", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.8", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.10.12", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.26", "@smithy/util-defaults-mode-node": "^4.2.29", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-HpgJuleH7P6uILxzJKQOmlHdwaCY+xYC6VgRDzlwVEqU/HXjo4m2gOAyjUbpXlBOCWfGgMUzfBlNJ9z3MboqEQ=="], + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/client-sso": ["@aws-sdk/client-sso@3.985.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.7", "@aws-sdk/middleware-host-header": "^3.972.3", "@aws-sdk/middleware-logger": "^3.972.3", "@aws-sdk/middleware-recursion-detection": "^3.972.3", "@aws-sdk/middleware-user-agent": "^3.972.7", "@aws-sdk/region-config-resolver": "^3.972.3", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.985.0", "@aws-sdk/util-user-agent-browser": "^3.972.3", "@aws-sdk/util-user-agent-node": "^3.972.5", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.22.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.13", "@smithy/middleware-retry": "^4.4.30", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.9", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.29", "@smithy/util-defaults-mode-node": "^4.2.32", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-81J8iE8MuXhdbMfIz4sWFj64Pe41bFi/uqqmqOC5SlGv+kwoyLsyKS/rH2tW2t5buih4vTUxskRjxlqikTD4oQ=="], - "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/token-providers": ["@aws-sdk/token-providers@3.975.0", "", { "dependencies": { "@aws-sdk/core": "^3.973.1", "@aws-sdk/nested-clients": "3.975.0", "@aws-sdk/types": "^3.973.0", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-AWQt64hkVbDQ+CmM09wnvSk2mVyH4iRROkmYkr3/lmUtFNbE2L/fnw26sckZnUcFCsHPqbkQrcsZAnTcBLbH4w=="], + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/token-providers": ["@aws-sdk/token-providers@3.985.0", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/nested-clients": "3.985.0", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-+hwpHZyEq8k+9JL2PkE60V93v2kNhUIv7STFt+EAez1UJsJOQDhc5LpzEX66pNjclI5OTwBROs/DhJjC/BtMjQ=="], - "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-web-identity/@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.975.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.1", "@aws-sdk/middleware-host-header": "^3.972.1", "@aws-sdk/middleware-logger": "^3.972.1", "@aws-sdk/middleware-recursion-detection": "^3.972.1", "@aws-sdk/middleware-user-agent": "^3.972.2", "@aws-sdk/region-config-resolver": "^3.972.1", "@aws-sdk/types": "^3.973.0", "@aws-sdk/util-endpoints": "3.972.0", "@aws-sdk/util-user-agent-browser": "^3.972.1", "@aws-sdk/util-user-agent-node": "^3.972.1", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.21.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.11", "@smithy/middleware-retry": "^4.4.27", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.8", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.10.12", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.26", "@smithy/util-defaults-mode-node": "^4.2.29", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-OkeFHPlQj2c/Y5bQGkX14pxhDWUGUFt3LRHhjcDKsSCw6lrxKcxN3WFZN0qbJwKNydP+knL5nxvfgKiCLpTLRA=="], + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-web-identity/@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.985.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.7", "@aws-sdk/middleware-host-header": "^3.972.3", "@aws-sdk/middleware-logger": "^3.972.3", "@aws-sdk/middleware-recursion-detection": "^3.972.3", "@aws-sdk/middleware-user-agent": "^3.972.7", "@aws-sdk/region-config-resolver": "^3.972.3", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.985.0", "@aws-sdk/util-user-agent-browser": "^3.972.3", "@aws-sdk/util-user-agent-node": "^3.972.5", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.22.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.13", "@smithy/middleware-retry": "^4.4.30", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.9", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.29", "@smithy/util-defaults-mode-node": "^4.2.32", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-TsWwKzb/2WHafAY0CE7uXgLj0FmnkBTgfioG9HO+7z/zCPcl1+YU+i7dW4o0y+aFxFgxTMG+ExBQpqT/k2ao8g=="], "@aws-sdk/client-sqs/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-ini/@aws-sdk/credential-provider-login": ["@aws-sdk/credential-provider-login@3.947.0", "", { "dependencies": { "@aws-sdk/core": "3.947.0", "@aws-sdk/nested-clients": "3.947.0", "@aws-sdk/types": "3.936.0", "@smithy/property-provider": "^4.2.5", "@smithy/protocol-http": "^5.3.5", "@smithy/shared-ini-file-loader": "^4.4.0", "@smithy/types": "^4.9.0", "tslib": "^2.6.2" } }, "sha512-u7M3hazcB7aJiVwosNdJRbIJDzbwQ861NTtl6S0HmvWpixaVb7iyhJZWg8/plyUznboZGBm7JVEdxtxv3u0bTA=="], @@ -4765,11 +4747,9 @@ "@aws-sdk/client-sqs/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-web-identity/@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.947.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "3.947.0", "@aws-sdk/middleware-host-header": "3.936.0", "@aws-sdk/middleware-logger": "3.936.0", "@aws-sdk/middleware-recursion-detection": "3.936.0", "@aws-sdk/middleware-user-agent": "3.947.0", "@aws-sdk/region-config-resolver": "3.936.0", "@aws-sdk/types": "3.936.0", "@aws-sdk/util-endpoints": "3.936.0", "@aws-sdk/util-user-agent-browser": "3.936.0", "@aws-sdk/util-user-agent-node": "3.947.0", "@smithy/config-resolver": "^4.4.3", "@smithy/core": "^3.18.7", "@smithy/fetch-http-handler": "^5.3.6", "@smithy/hash-node": "^4.2.5", "@smithy/invalid-dependency": "^4.2.5", "@smithy/middleware-content-length": "^4.2.5", "@smithy/middleware-endpoint": "^4.3.14", "@smithy/middleware-retry": "^4.4.14", "@smithy/middleware-serde": "^4.2.6", "@smithy/middleware-stack": "^4.2.5", "@smithy/node-config-provider": "^4.3.5", "@smithy/node-http-handler": "^4.4.5", "@smithy/protocol-http": "^5.3.5", "@smithy/smithy-client": "^4.9.10", "@smithy/types": "^4.9.0", "@smithy/url-parser": "^4.2.5", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.13", "@smithy/util-defaults-mode-node": "^4.2.16", "@smithy/util-endpoints": "^3.2.5", "@smithy/util-middleware": "^4.2.5", "@smithy/util-retry": "^4.2.5", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-DjRJEYNnHUTu9kGPPQDTSXquwSEd6myKR4ssI4FaYLFhdT3ldWpj73yYt807H3tdmhS7vPmdVqchSJnjurUQAw=="], - "@aws-sdk/middleware-flexible-checksums/@aws-sdk/core/@aws-sdk/xml-builder/fast-xml-parser": ["fast-xml-parser@5.2.5", "", { "dependencies": { "strnum": "^2.1.0" }, "bin": { "fxparser": "src/cli/cli.js" } }, "sha512-pfX9uG9Ki0yekDHx2SiuRIyFdyAr1kMIMitPvb0YBo8SUfKvia7w7FIyd/l6av85pFYRhZscS75MwMnbvY+hcQ=="], - - "@aws-sdk/middleware-sdk-s3/@aws-sdk/core/@aws-sdk/xml-builder/fast-xml-parser": ["fast-xml-parser@5.2.5", "", { "dependencies": { "strnum": "^2.1.0" }, "bin": { "fxparser": "src/cli/cli.js" } }, "sha512-pfX9uG9Ki0yekDHx2SiuRIyFdyAr1kMIMitPvb0YBo8SUfKvia7w7FIyd/l6av85pFYRhZscS75MwMnbvY+hcQ=="], + "@aws-sdk/middleware-flexible-checksums/@aws-sdk/core/@aws-sdk/xml-builder/fast-xml-parser": ["fast-xml-parser@5.3.4", "", { "dependencies": { "strnum": "^2.1.0" }, "bin": { "fxparser": "src/cli/cli.js" } }, "sha512-EFd6afGmXlCx8H8WTZHhAoDaWaGyuIBoZJ2mknrNxug+aZKjkp0a0dlars9Izl+jF+7Gu1/5f/2h68cQpe0IiA=="], - "@aws-sdk/signature-v4-multi-region/@aws-sdk/middleware-sdk-s3/@aws-sdk/core/@aws-sdk/xml-builder": ["@aws-sdk/xml-builder@3.972.0", "", { "dependencies": { "@smithy/types": "^4.12.0", "fast-xml-parser": "5.2.5", "tslib": "^2.6.2" } }, "sha512-POaGMcXnozzqBUyJM3HLUZ9GR6OKJWPGJEmhtTnxZXt8B6JcJ/6K3xRJ5H/j8oovVLz8Wg6vFxAHv8lvuASxMg=="], + "@aws-sdk/middleware-sdk-s3/@aws-sdk/core/@aws-sdk/xml-builder/fast-xml-parser": ["fast-xml-parser@5.3.4", "", { "dependencies": { "strnum": "^2.1.0" }, "bin": { "fxparser": "src/cli/cli.js" } }, "sha512-EFd6afGmXlCx8H8WTZHhAoDaWaGyuIBoZJ2mknrNxug+aZKjkp0a0dlars9Izl+jF+7Gu1/5f/2h68cQpe0IiA=="], "@browserbasehq/sdk/node-fetch/whatwg-url/tr46": ["tr46@0.0.3", "", {}, "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="], @@ -4799,7 +4779,7 @@ "cli-truncate/string-width/strip-ansi/ansi-regex": ["ansi-regex@6.2.2", "", {}, "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg=="], - "fetch-cookie/tough-cookie/tldts/tldts-core": ["tldts-core@7.0.19", "", {}, "sha512-lJX2dEWx0SGH4O6p+7FPwYmJ/bu1JbcGJ8RLaG9b7liIgZ85itUVEPbMtWRVrde/0fnDPEPHW10ZsKW3kVsE9A=="], + "fetch-cookie/tough-cookie/tldts/tldts-core": ["tldts-core@7.0.23", "", {}, "sha512-0g9vrtDQLrNIiCj22HSe9d4mLVG3g5ph5DZ8zCKBr4OtrspmNB6ss7hVyzArAeE88ceZocIEGkyW1Ime7fxPtQ=="], "groq-sdk/node-fetch/whatwg-url/tr46": ["tr46@0.0.3", "", {}, "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="], @@ -4811,7 +4791,7 @@ "lint-staged/listr2/cli-truncate/string-width": ["string-width@7.2.0", "", { "dependencies": { "emoji-regex": "^10.3.0", "get-east-asian-width": "^1.0.0", "strip-ansi": "^7.1.0" } }, "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ=="], - "lint-staged/listr2/log-update/ansi-escapes": ["ansi-escapes@7.2.0", "", { "dependencies": { "environment": "^1.0.0" } }, "sha512-g6LhBsl+GBPRWGWsBtutpzBYuIIdBkLEvad5C/va/74Db018+5TZiyA26cZJAr3Rft5lprVqOIPxf5Vid6tqAw=="], + "lint-staged/listr2/log-update/ansi-escapes": ["ansi-escapes@7.3.0", "", { "dependencies": { "environment": "^1.0.0" } }, "sha512-BvU8nYgGQBxcmMuEeUEmNTvrMVjJNSH7RgW24vXexN4Ven6qCvy4TntnvlnwnMLTVlcRQQdbRY8NKnaIoeWDNg=="], "lint-staged/listr2/log-update/cli-cursor": ["cli-cursor@5.0.0", "", { "dependencies": { "restore-cursor": "^5.0.0" } }, "sha512-aCj4O5wKyszjMmDT4tZj93kxyydN/K5zPWSCe6/0AV/AA1pqe5ZBIw0a2ZfPQV7lL5/yb5HsUreJ6UFAF1tEQw=="], @@ -4857,22 +4837,24 @@ "posthog-js/@opentelemetry/exporter-logs-otlp-http/@opentelemetry/otlp-transformer/@opentelemetry/sdk-trace-base": ["@opentelemetry/sdk-trace-base@2.2.0", "", { "dependencies": { "@opentelemetry/core": "2.2.0", "@opentelemetry/resources": "2.2.0", "@opentelemetry/semantic-conventions": "^1.29.0" }, "peerDependencies": { "@opentelemetry/api": ">=1.3.0 <1.10.0" } }, "sha512-xWQgL0Bmctsalg6PaXExmzdedSp3gyKV8mQBwK/j9VGdCDu2fmXIb2gAehBKbkXCpJ4HPkgv3QfoJWRT4dHWbw=="], + "rimraf/glob/jackspeak/@isaacs/cliui": ["@isaacs/cliui@8.0.2", "", { "dependencies": { "string-width": "^5.1.2", "string-width-cjs": "npm:string-width@^4.2.0", "strip-ansi": "^7.0.1", "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", "wrap-ansi": "^8.1.0", "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" } }, "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA=="], + "rimraf/glob/path-scurry/lru-cache": ["lru-cache@10.4.3", "", {}, "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ=="], "sim/tailwindcss/chokidar/glob-parent": ["glob-parent@5.1.2", "", { "dependencies": { "is-glob": "^4.0.1" } }, "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow=="], "sim/tailwindcss/chokidar/readdirp": ["readdirp@3.6.0", "", { "dependencies": { "picomatch": "^2.2.1" } }, "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA=="], + "test-exclude/glob/jackspeak/@isaacs/cliui": ["@isaacs/cliui@8.0.2", "", { "dependencies": { "string-width": "^5.1.2", "string-width-cjs": "npm:string-width@^4.2.0", "strip-ansi": "^7.0.1", "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", "wrap-ansi": "^8.1.0", "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" } }, "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA=="], + "test-exclude/glob/path-scurry/lru-cache": ["lru-cache@10.4.3", "", {}, "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ=="], - "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/token-providers/@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.975.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.1", "@aws-sdk/middleware-host-header": "^3.972.1", "@aws-sdk/middleware-logger": "^3.972.1", "@aws-sdk/middleware-recursion-detection": "^3.972.1", "@aws-sdk/middleware-user-agent": "^3.972.2", "@aws-sdk/region-config-resolver": "^3.972.1", "@aws-sdk/types": "^3.973.0", "@aws-sdk/util-endpoints": "3.972.0", "@aws-sdk/util-user-agent-browser": "^3.972.1", "@aws-sdk/util-user-agent-node": "^3.972.1", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.21.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.11", "@smithy/middleware-retry": "^4.4.27", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.8", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.10.12", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.26", "@smithy/util-defaults-mode-node": "^4.2.29", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-OkeFHPlQj2c/Y5bQGkX14pxhDWUGUFt3LRHhjcDKsSCw6lrxKcxN3WFZN0qbJwKNydP+knL5nxvfgKiCLpTLRA=="], + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/token-providers/@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.985.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.7", "@aws-sdk/middleware-host-header": "^3.972.3", "@aws-sdk/middleware-logger": "^3.972.3", "@aws-sdk/middleware-recursion-detection": "^3.972.3", "@aws-sdk/middleware-user-agent": "^3.972.7", "@aws-sdk/region-config-resolver": "^3.972.3", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.985.0", "@aws-sdk/util-user-agent-browser": "^3.972.3", "@aws-sdk/util-user-agent-node": "^3.972.5", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.22.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.13", "@smithy/middleware-retry": "^4.4.30", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.9", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.29", "@smithy/util-defaults-mode-node": "^4.2.32", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-TsWwKzb/2WHafAY0CE7uXgLj0FmnkBTgfioG9HO+7z/zCPcl1+YU+i7dW4o0y+aFxFgxTMG+ExBQpqT/k2ao8g=="], - "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/token-providers/@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.975.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.1", "@aws-sdk/middleware-host-header": "^3.972.1", "@aws-sdk/middleware-logger": "^3.972.1", "@aws-sdk/middleware-recursion-detection": "^3.972.1", "@aws-sdk/middleware-user-agent": "^3.972.2", "@aws-sdk/region-config-resolver": "^3.972.1", "@aws-sdk/types": "^3.973.0", "@aws-sdk/util-endpoints": "3.972.0", "@aws-sdk/util-user-agent-browser": "^3.972.1", "@aws-sdk/util-user-agent-node": "^3.972.1", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.21.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.11", "@smithy/middleware-retry": "^4.4.27", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.8", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.10.12", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.26", "@smithy/util-defaults-mode-node": "^4.2.29", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-OkeFHPlQj2c/Y5bQGkX14pxhDWUGUFt3LRHhjcDKsSCw6lrxKcxN3WFZN0qbJwKNydP+knL5nxvfgKiCLpTLRA=="], + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/token-providers/@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.985.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.7", "@aws-sdk/middleware-host-header": "^3.972.3", "@aws-sdk/middleware-logger": "^3.972.3", "@aws-sdk/middleware-recursion-detection": "^3.972.3", "@aws-sdk/middleware-user-agent": "^3.972.7", "@aws-sdk/region-config-resolver": "^3.972.3", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.985.0", "@aws-sdk/util-user-agent-browser": "^3.972.3", "@aws-sdk/util-user-agent-node": "^3.972.5", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.22.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.13", "@smithy/middleware-retry": "^4.4.30", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.9", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.29", "@smithy/util-defaults-mode-node": "^4.2.32", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-TsWwKzb/2WHafAY0CE7uXgLj0FmnkBTgfioG9HO+7z/zCPcl1+YU+i7dW4o0y+aFxFgxTMG+ExBQpqT/k2ao8g=="], "@aws-sdk/client-sqs/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/token-providers/@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.947.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "3.947.0", "@aws-sdk/middleware-host-header": "3.936.0", "@aws-sdk/middleware-logger": "3.936.0", "@aws-sdk/middleware-recursion-detection": "3.936.0", "@aws-sdk/middleware-user-agent": "3.947.0", "@aws-sdk/region-config-resolver": "3.936.0", "@aws-sdk/types": "3.936.0", "@aws-sdk/util-endpoints": "3.936.0", "@aws-sdk/util-user-agent-browser": "3.936.0", "@aws-sdk/util-user-agent-node": "3.947.0", "@smithy/config-resolver": "^4.4.3", "@smithy/core": "^3.18.7", "@smithy/fetch-http-handler": "^5.3.6", "@smithy/hash-node": "^4.2.5", "@smithy/invalid-dependency": "^4.2.5", "@smithy/middleware-content-length": "^4.2.5", "@smithy/middleware-endpoint": "^4.3.14", "@smithy/middleware-retry": "^4.4.14", "@smithy/middleware-serde": "^4.2.6", "@smithy/middleware-stack": "^4.2.5", "@smithy/node-config-provider": "^4.3.5", "@smithy/node-http-handler": "^4.4.5", "@smithy/protocol-http": "^5.3.5", "@smithy/smithy-client": "^4.9.10", "@smithy/types": "^4.9.0", "@smithy/url-parser": "^4.2.5", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.13", "@smithy/util-defaults-mode-node": "^4.2.16", "@smithy/util-endpoints": "^3.2.5", "@smithy/util-middleware": "^4.2.5", "@smithy/util-retry": "^4.2.5", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-DjRJEYNnHUTu9kGPPQDTSXquwSEd6myKR4ssI4FaYLFhdT3ldWpj73yYt807H3tdmhS7vPmdVqchSJnjurUQAw=="], - "@aws-sdk/signature-v4-multi-region/@aws-sdk/middleware-sdk-s3/@aws-sdk/core/@aws-sdk/xml-builder/fast-xml-parser": ["fast-xml-parser@5.2.5", "", { "dependencies": { "strnum": "^2.1.0" }, "bin": { "fxparser": "src/cli/cli.js" } }, "sha512-pfX9uG9Ki0yekDHx2SiuRIyFdyAr1kMIMitPvb0YBo8SUfKvia7w7FIyd/l6av85pFYRhZscS75MwMnbvY+hcQ=="], - "@browserbasehq/stagehand/@anthropic-ai/sdk/node-fetch/whatwg-url/tr46": ["tr46@0.0.3", "", {}, "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="], "@browserbasehq/stagehand/@anthropic-ai/sdk/node-fetch/whatwg-url/webidl-conversions": ["webidl-conversions@3.0.1", "", {}, "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ=="], @@ -4895,10 +4877,34 @@ "oauth2-mock-server/express/type-is/mime-types/mime-db": ["mime-db@1.52.0", "", {}, "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg=="], + "rimraf/glob/jackspeak/@isaacs/cliui/string-width": ["string-width@5.1.2", "", { "dependencies": { "eastasianwidth": "^0.2.0", "emoji-regex": "^9.2.2", "strip-ansi": "^7.0.1" } }, "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA=="], + + "rimraf/glob/jackspeak/@isaacs/cliui/strip-ansi": ["strip-ansi@7.1.2", "", { "dependencies": { "ansi-regex": "^6.0.1" } }, "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA=="], + + "rimraf/glob/jackspeak/@isaacs/cliui/wrap-ansi": ["wrap-ansi@8.1.0", "", { "dependencies": { "ansi-styles": "^6.1.0", "string-width": "^5.0.1", "strip-ansi": "^7.0.1" } }, "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ=="], + "sim/tailwindcss/chokidar/readdirp/picomatch": ["picomatch@2.3.1", "", {}, "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA=="], + "test-exclude/glob/jackspeak/@isaacs/cliui/string-width": ["string-width@5.1.2", "", { "dependencies": { "eastasianwidth": "^0.2.0", "emoji-regex": "^9.2.2", "strip-ansi": "^7.0.1" } }, "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA=="], + + "test-exclude/glob/jackspeak/@isaacs/cliui/strip-ansi": ["strip-ansi@7.1.2", "", { "dependencies": { "ansi-regex": "^6.0.1" } }, "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA=="], + + "test-exclude/glob/jackspeak/@isaacs/cliui/wrap-ansi": ["wrap-ansi@8.1.0", "", { "dependencies": { "ansi-styles": "^6.1.0", "string-width": "^5.0.1", "strip-ansi": "^7.0.1" } }, "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ=="], + "lint-staged/listr2/cli-truncate/string-width/strip-ansi/ansi-regex": ["ansi-regex@6.2.2", "", {}, "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg=="], "lint-staged/listr2/log-update/cli-cursor/restore-cursor/onetime": ["onetime@7.0.0", "", { "dependencies": { "mimic-function": "^5.0.0" } }, "sha512-VXJjc87FScF88uafS3JllDgvAm+c/Slfz06lorj2uAY34rlUu0Nt+v8wreiImcrgAjjIHp1rXpTDlLOGw29WwQ=="], + + "rimraf/glob/jackspeak/@isaacs/cliui/string-width/emoji-regex": ["emoji-regex@9.2.2", "", {}, "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg=="], + + "rimraf/glob/jackspeak/@isaacs/cliui/strip-ansi/ansi-regex": ["ansi-regex@6.2.2", "", {}, "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg=="], + + "rimraf/glob/jackspeak/@isaacs/cliui/wrap-ansi/ansi-styles": ["ansi-styles@6.2.3", "", {}, "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg=="], + + "test-exclude/glob/jackspeak/@isaacs/cliui/string-width/emoji-regex": ["emoji-regex@9.2.2", "", {}, "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg=="], + + "test-exclude/glob/jackspeak/@isaacs/cliui/strip-ansi/ansi-regex": ["ansi-regex@6.2.2", "", {}, "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg=="], + + "test-exclude/glob/jackspeak/@isaacs/cliui/wrap-ansi/ansi-styles": ["ansi-styles@6.2.3", "", {}, "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg=="], } } From 7200421c7621038329228f48a96ddf807ae12ee4 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Mon, 9 Feb 2026 13:08:12 -0800 Subject: [PATCH 56/72] Remove plan docs --- docs/COPILOT_SERVER_REFACTOR.md | 936 -------------------------------- 1 file changed, 936 deletions(-) delete mode 100644 docs/COPILOT_SERVER_REFACTOR.md diff --git a/docs/COPILOT_SERVER_REFACTOR.md b/docs/COPILOT_SERVER_REFACTOR.md deleted file mode 100644 index 3184fa3a15..0000000000 --- a/docs/COPILOT_SERVER_REFACTOR.md +++ /dev/null @@ -1,936 +0,0 @@ -# Copilot Server-Side Refactor Plan - -> **Goal**: Move copilot orchestration logic from the browser (React/Zustand) to the Next.js server, enabling both headless API access and a simplified interactive client. - -## Table of Contents - -1. [Executive Summary](#executive-summary) -2. [Current Architecture](#current-architecture) -3. [Target Architecture](#target-architecture) -4. [Scope & Boundaries](#scope--boundaries) -5. [Module Design](#module-design) -6. [Implementation Plan](#implementation-plan) -7. [API Contracts](#api-contracts) -8. [Migration Strategy](#migration-strategy) -9. [Testing Strategy](#testing-strategy) -10. [Risks & Mitigations](#risks--mitigations) -11. [File Inventory](#file-inventory) - ---- - -## Executive Summary - -### Problem - -The current copilot implementation in Sim has all orchestration logic in the browser: -- SSE stream parsing happens in the React client -- Tool execution is triggered from the browser -- OAuth tokens are sent to the client -- No headless/API access is possible -- The Zustand store is ~4,200 lines of complex async logic - -### Solution - -Move orchestration to the Next.js server: -- Server parses SSE from copilot backend -- Server executes tools directly (no HTTP round-trips) -- Server forwards events to client (if attached) -- Headless API returns JSON response -- Client store becomes a thin UI layer (~600 lines) - -### Benefits - -| Aspect | Before | After | -|--------|--------|-------| -| Security | OAuth tokens in browser | Tokens stay server-side | -| Headless access | Not possible | Full API support | -| Store complexity | ~4,200 lines | ~600 lines | -| Tool execution | Browser-initiated | Server-side | -| Testing | Complex async | Simple state | -| Bundle size | Large (tool classes) | Minimal | - ---- - -## Current Architecture - -``` -┌─────────────────────────────────────────────────────────────────────────────┐ -│ BROWSER (React) │ -├─────────────────────────────────────────────────────────────────────────────┤ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────┐│ -│ │ Copilot Store (4,200 lines) ││ -│ │ ││ -│ │ • SSE stream parsing (parseSSEStream) ││ -│ │ • Event handlers (sseHandlers, subAgentSSEHandlers) ││ -│ │ • Tool execution logic ││ -│ │ • Client tool instantiation ││ -│ │ • Content block processing ││ -│ │ • State management ││ -│ │ • UI state ││ -│ └─────────────────────────────────────────────────────────────────────────┘│ -│ │ │ -│ │ HTTP calls for tool execution │ -│ ▼ │ -└─────────────────────────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────┐ -│ NEXT.JS SERVER │ -├─────────────────────────────────────────────────────────────────────────────┤ -│ │ -│ /api/copilot/chat - Proxy to copilot backend (pass-through) │ -│ /api/copilot/execute-tool - Execute integration tools │ -│ /api/copilot/confirm - Update Redis with tool status │ -│ /api/copilot/tools/mark-complete - Notify copilot backend │ -│ /api/copilot/execute-copilot-server-tool - Execute server tools │ -│ │ -└─────────────────────────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────────────────────┐ -│ COPILOT BACKEND (Go) │ -│ copilot.sim.ai │ -├─────────────────────────────────────────────────────────────────────────────┤ -│ │ -│ • LLM orchestration │ -│ • Subagent system (plan, edit, debug, etc.) │ -│ • Tool definitions │ -│ • Conversation management │ -│ • SSE streaming │ -│ │ -└─────────────────────────────────────────────────────────────────────────────┘ -``` - -### Current Flow (Interactive) - -1. User sends message in UI -2. Store calls `/api/copilot/chat` -3. Chat route proxies to copilot backend, streams SSE back -4. **Store parses SSE in browser** -5. On `tool_call` event: - - Store decides if tool needs confirmation - - Store calls `/api/copilot/execute-tool` or `/api/copilot/execute-copilot-server-tool` - - Store calls `/api/copilot/tools/mark-complete` -6. Store updates UI state - -### Problems with Current Flow - -1. **No headless access**: Must have browser client -2. **Security**: OAuth tokens sent to browser for tool execution -3. **Complexity**: All orchestration logic in Zustand store -4. **Performance**: Multiple HTTP round-trips from browser -5. **Reliability**: Browser can disconnect mid-operation -6. **Testing**: Hard to test async browser logic - ---- - -## Target Architecture - -``` -┌─────────────────────────────────────────────────────────────────────────────┐ -│ BROWSER (React) │ -├─────────────────────────────────────────────────────────────────────────────┤ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────┐│ -│ │ Copilot Store (~600 lines) ││ -│ │ ││ -│ │ • UI state (messages, toolCalls display) ││ -│ │ • Event listener (receive server events) ││ -│ │ • User actions (send message, confirm/reject) ││ -│ │ • Simple API calls ││ -│ └─────────────────────────────────────────────────────────────────────────┘│ -│ │ │ -│ │ SSE events from server │ -│ │ │ -└─────────────────────────────────────────────────────────────────────────────┘ - ▲ - │ (Optional - headless mode has no client) - │ -┌─────────────────────────────────────────────────────────────────────────────┐ -│ NEXT.JS SERVER │ -├─────────────────────────────────────────────────────────────────────────────┤ -│ │ -│ ┌─────────────────────────────────────────────────────────────────────────┐│ -│ │ Orchestrator Module (NEW) ││ -│ │ lib/copilot/orchestrator/ ││ -│ │ ││ -│ │ • SSE stream parsing ││ -│ │ • Event handlers ││ -│ │ • Tool execution (direct function calls) ││ -│ │ • Response building ││ -│ │ • Event forwarding (to client if attached) ││ -│ └─────────────────────────────────────────────────────────────────────────┘│ -│ │ │ -│ ┌──────┴──────┐ │ -│ │ │ │ -│ ▼ ▼ │ -│ /api/copilot/chat /api/v1/copilot/chat │ -│ (Interactive) (Headless) │ -│ - Session auth - API key auth │ -│ - SSE to client - JSON response │ -│ │ -└─────────────────────────────────────────────────────────────────────────────┘ - │ - │ (Single external HTTP call) - ▼ -┌─────────────────────────────────────────────────────────────────────────────┐ -│ COPILOT BACKEND (Go) │ -│ (UNCHANGED - no modifications) │ -└─────────────────────────────────────────────────────────────────────────────┘ -``` - -### Target Flow (Headless) - -1. External client calls `POST /api/v1/copilot/chat` with API key -2. Orchestrator calls copilot backend -3. **Server parses SSE stream** -4. **Server executes tools directly** (no HTTP) -5. Server notifies copilot backend (mark-complete) -6. Server returns JSON response - -### Target Flow (Interactive) - -1. User sends message in UI -2. Store calls `/api/copilot/chat` -3. **Server orchestrates everything** -4. Server forwards events to client via SSE -5. Client just updates UI from events -6. Server returns when complete - ---- - -## Scope & Boundaries - -### In Scope - -| Item | Description | -|------|-------------| -| Orchestrator module | New module in `lib/copilot/orchestrator/` | -| Headless API route | New route `POST /api/v1/copilot/chat` | -| SSE parsing | Move from store to server | -| Tool execution | Direct function calls on server | -| Event forwarding | SSE to client (interactive mode) | -| Store simplification | Reduce to UI-only logic | - -### Out of Scope - -| Item | Reason | -|------|--------| -| Copilot backend (Go) | Separate repo, working correctly | -| Tool definitions | Already work, just called differently | -| LLM providers | Handled by copilot backend | -| Subagent system | Handled by copilot backend | - -### Boundaries - -``` - ┌─────────────────────────────────────┐ - │ MODIFICATION ZONE │ - │ │ - ┌────────────────┼─────────────────────────────────────┼────────────────┐ - │ │ │ │ - │ UNCHANGED │ apps/sim/ │ UNCHANGED │ - │ │ ├── lib/copilot/orchestrator/ │ │ - │ copilot/ │ │ └── (NEW) │ apps/sim/ │ - │ (Go backend) │ ├── app/api/v1/copilot/ │ tools/ │ - │ │ │ └── (NEW) │ (definitions)│ - │ │ ├── app/api/copilot/chat/ │ │ - │ │ │ └── (MODIFIED) │ │ - │ │ └── stores/panel/copilot/ │ │ - │ │ └── (SIMPLIFIED) │ │ - │ │ │ │ - └────────────────┼─────────────────────────────────────┼────────────────┘ - │ │ - └─────────────────────────────────────┘ -``` - ---- - -## Module Design - -### Directory Structure - -``` -apps/sim/lib/copilot/orchestrator/ -├── index.ts # Main orchestrator function -├── types.ts # Type definitions -├── sse-parser.ts # Parse SSE stream from copilot backend -├── sse-handlers.ts # Handle each SSE event type -├── tool-executor.ts # Execute tools directly (no HTTP) -├── persistence.ts # Database and Redis operations -└── response-builder.ts # Build final response -``` - -### Module Responsibilities - -#### `types.ts` - -Defines all types used by the orchestrator: - -```typescript -// SSE Events -interface SSEEvent { type, data, subagent?, toolCallId?, toolName? } -type SSEEventType = 'content' | 'tool_call' | 'tool_result' | 'done' | ... - -// Tool State -interface ToolCallState { id, name, status, params?, result?, error? } -type ToolCallStatus = 'pending' | 'executing' | 'success' | 'error' | 'skipped' - -// Streaming Context (internal state during orchestration) -interface StreamingContext { - chatId?, conversationId?, messageId - accumulatedContent, contentBlocks - toolCalls: Map - streamComplete, errors[] -} - -// Orchestrator API -interface OrchestratorRequest { message, workflowId, userId, chatId?, mode?, ... } -interface OrchestratorOptions { autoExecuteTools?, onEvent?, timeout?, ... } -interface OrchestratorResult { success, content, toolCalls[], chatId?, error? } - -// Execution Context (passed to tool executors) -interface ExecutionContext { userId, workflowId, workspaceId?, decryptedEnvVars? } -``` - -#### `sse-parser.ts` - -Parses SSE stream into typed events: - -```typescript -async function* parseSSEStream( - reader: ReadableStreamDefaultReader, - decoder: TextDecoder, - abortSignal?: AbortSignal -): AsyncGenerator -``` - -- Handles buffering for partial lines -- Parses JSON from `data:` lines -- Yields typed `SSEEvent` objects -- Supports abort signal - -#### `sse-handlers.ts` - -Handles each SSE event type: - -```typescript -const sseHandlers: Record = { - content: (event, context) => { /* append to accumulated content */ }, - tool_call: async (event, context, execContext, options) => { - /* track tool, execute if autoExecuteTools */ - }, - tool_result: (event, context) => { /* update tool status */ }, - tool_generating: (event, context) => { /* create pending tool */ }, - reasoning: (event, context) => { /* handle thinking blocks */ }, - done: (event, context) => { /* mark stream complete */ }, - error: (event, context) => { /* record error */ }, - // ... etc -} - -const subAgentHandlers: Record = { - // Handlers for events within subagent context -} -``` - -#### `tool-executor.ts` - -Executes tools directly without HTTP: - -```typescript -// Main entry point -async function executeToolServerSide( - toolCall: ToolCallState, - context: ExecutionContext -): Promise - -// Server tools (edit_workflow, search_documentation, etc.) -async function executeServerToolDirect( - toolName: string, - params: Record, - context: ExecutionContext -): Promise - -// Integration tools (slack_send, gmail_read, etc.) -async function executeIntegrationToolDirect( - toolCallId: string, - toolName: string, - toolConfig: ToolConfig, - params: Record, - context: ExecutionContext -): Promise - -// Notify copilot backend (external HTTP - required) -async function markToolComplete( - toolCallId: string, - toolName: string, - status: number, - message?: any, - data?: any -): Promise - -// Prepare cached context for tool execution -async function prepareExecutionContext( - userId: string, - workflowId: string -): Promise -``` - -**Key principle**: Internal tool execution uses direct function calls. Only `markToolComplete` makes HTTP call (to copilot backend - external). - -#### `persistence.ts` - -Database and Redis operations: - -```typescript -// Chat persistence -async function createChat(params): Promise<{ id: string }> -async function loadChat(chatId, userId): Promise -async function saveMessages(chatId, messages, options?): Promise -async function updateChatConversationId(chatId, conversationId): Promise - -// Tool confirmation (Redis) -async function setToolConfirmation(toolCallId, status, message?): Promise -async function getToolConfirmation(toolCallId): Promise -``` - -#### `index.ts` - -Main orchestrator function: - -```typescript -async function orchestrateCopilotRequest( - request: OrchestratorRequest, - options: OrchestratorOptions = {} -): Promise { - - // 1. Prepare execution context (cache env vars, etc.) - const execContext = await prepareExecutionContext(userId, workflowId) - - // 2. Handle chat creation/loading - let chatId = await resolveChat(request) - - // 3. Build request payload for copilot backend - const payload = buildCopilotPayload(request) - - // 4. Call copilot backend - const response = await fetch(COPILOT_URL, { body: JSON.stringify(payload) }) - - // 5. Create streaming context - const context = createStreamingContext(chatId) - - // 6. Parse and handle SSE stream - for await (const event of parseSSEStream(response.body)) { - // Forward to client if attached - options.onEvent?.(event) - - // Handle event - const handler = getHandler(event) - await handler(event, context, execContext, options) - - if (context.streamComplete) break - } - - // 7. Persist to database - await persistChat(chatId, context) - - // 8. Build and return result - return buildResult(context) -} -``` - ---- - -## Implementation Plan - -### Phase 1: Create Orchestrator Module (3-4 days) - -**Goal**: Build the orchestrator module that can run independently. - -#### Tasks - -1. **Create `types.ts`** (~200 lines) - - [ ] Define SSE event types - - [ ] Define tool call state types - - [ ] Define streaming context type - - [ ] Define orchestrator request/response types - - [ ] Define execution context type - -2. **Create `sse-parser.ts`** (~80 lines) - - [ ] Extract parsing logic from store.ts - - [ ] Add abort signal support - - [ ] Add error handling - -3. **Create `persistence.ts`** (~120 lines) - - [ ] Extract DB operations from chat route - - [ ] Extract Redis operations from confirm route - - [ ] Add chat creation/loading - - [ ] Add message saving - -4. **Create `tool-executor.ts`** (~300 lines) - - [ ] Create `executeToolServerSide()` main entry - - [ ] Create `executeServerToolDirect()` for server tools - - [ ] Create `executeIntegrationToolDirect()` for integration tools - - [ ] Create `markToolComplete()` for copilot backend notification - - [ ] Create `prepareExecutionContext()` for caching - - [ ] Handle OAuth token resolution - - [ ] Handle env var resolution - -5. **Create `sse-handlers.ts`** (~350 lines) - - [ ] Extract handlers from store.ts - - [ ] Adapt for server-side context - - [ ] Add tool execution integration - - [ ] Add subagent handlers - -6. **Create `index.ts`** (~250 lines) - - [ ] Create `orchestrateCopilotRequest()` main function - - [ ] Wire together all modules - - [ ] Add timeout handling - - [ ] Add abort signal support - - [ ] Add event forwarding - -#### Deliverables - -- Complete `lib/copilot/orchestrator/` module -- Unit tests for each component -- Integration test for full orchestration - -### Phase 2: Create Headless API Route (1 day) - -**Goal**: Create API endpoint for headless copilot access. - -#### Tasks - -1. **Create route** `app/api/v1/copilot/chat/route.ts` (~100 lines) - - [ ] Add API key authentication - - [ ] Parse and validate request - - [ ] Call orchestrator - - [ ] Return JSON response - -2. **Add to API documentation** - - [ ] Document request format - - [ ] Document response format - - [ ] Document error codes - -#### Deliverables - -- Working `POST /api/v1/copilot/chat` endpoint -- API documentation -- E2E test - -### Phase 3: Wire Interactive Route (2 days) - -**Goal**: Use orchestrator for existing interactive flow. - -#### Tasks - -1. **Modify `/api/copilot/chat/route.ts`** - - [ ] Add feature flag for new vs old flow - - [ ] Call orchestrator with `onEvent` callback - - [ ] Forward events to client via SSE - - [ ] Maintain backward compatibility - -2. **Test both flows** - - [ ] Verify interactive works with new orchestrator - - [ ] Verify old flow still works (feature flag off) - -#### Deliverables - -- Interactive route using orchestrator -- Feature flag for gradual rollout -- No breaking changes - -### Phase 4: Simplify Client Store (2-3 days) - -**Goal**: Remove orchestration logic from client, keep UI-only. - -#### Tasks - -1. **Create simplified store** (new file or gradual refactor) - - [ ] Keep: UI state, messages, tool display - - [ ] Keep: Simple API calls - - [ ] Keep: Event listener - - [ ] Remove: SSE parsing - - [ ] Remove: Tool execution logic - - [ ] Remove: Client tool instantiators - -2. **Update components** - - [ ] Update components to use simplified store - - [ ] Remove tool execution from UI components - - [ ] Simplify tool display components - -3. **Remove dead code** - - [ ] Remove unused imports - - [ ] Remove unused helper functions - - [ ] Remove client tool classes (if no longer needed) - -#### Deliverables - -- Simplified store (~600 lines) -- Updated components -- Reduced bundle size - -### Phase 5: Testing & Polish (2-3 days) - -#### Tasks - -1. **E2E testing** - - [ ] Test headless API with various prompts - - [ ] Test interactive with various prompts - - [ ] Test tool execution scenarios - - [ ] Test error handling - - [ ] Test abort/timeout scenarios - -2. **Performance testing** - - [ ] Compare latency (old vs new) - - [ ] Check memory usage - - [ ] Check for connection issues - -3. **Documentation** - - [ ] Update developer docs - - [ ] Add architecture diagram - - [ ] Document new API - -#### Deliverables - -- Comprehensive test suite -- Performance benchmarks -- Complete documentation - ---- - -## API Contracts - -### Headless API - -#### Request - -```http -POST /api/v1/copilot/chat -Content-Type: application/json -X-API-Key: sim_xxx - -{ - "message": "Create a Slack notification workflow", - "workflowId": "wf_abc123", - "chatId": "chat_xyz", // Optional: continue existing chat - "mode": "agent", // Optional: "agent" | "ask" | "plan" - "model": "claude-4-sonnet", // Optional - "autoExecuteTools": true, // Optional: default true - "timeout": 300000 // Optional: default 5 minutes -} -``` - -#### Response (Success) - -```json -{ - "success": true, - "content": "I've created a Slack notification workflow that...", - "toolCalls": [ - { - "id": "tc_001", - "name": "search_patterns", - "status": "success", - "params": { "query": "slack notification" }, - "result": { "patterns": [...] }, - "durationMs": 234 - }, - { - "id": "tc_002", - "name": "edit_workflow", - "status": "success", - "params": { "operations": [...] }, - "result": { "blocksAdded": 3 }, - "durationMs": 1523 - } - ], - "chatId": "chat_xyz", - "conversationId": "conv_123" -} -``` - -#### Response (Error) - -```json -{ - "success": false, - "error": "Workflow not found", - "content": "", - "toolCalls": [] -} -``` - -#### Error Codes - -| Status | Error | Description | -|--------|-------|-------------| -| 400 | Invalid request | Missing required fields | -| 401 | Unauthorized | Invalid or missing API key | -| 404 | Workflow not found | Workflow ID doesn't exist | -| 500 | Internal error | Server-side failure | -| 504 | Timeout | Request exceeded timeout | - -### Interactive API (Existing - Modified) - -The existing `/api/copilot/chat` endpoint continues to work but now uses the orchestrator internally. SSE events forwarded to client remain the same format. - ---- - -## Migration Strategy - -### Rollout Plan - -``` -Week 1: Phase 1 (Orchestrator) -├── Day 1-2: Types + SSE Parser -├── Day 3: Tool Executor -└── Day 4-5: Handlers + Main Orchestrator - -Week 2: Phase 2-3 (Routes) -├── Day 1: Headless API route -├── Day 2-3: Wire interactive route -└── Day 4-5: Testing both modes - -Week 3: Phase 4-5 (Cleanup) -├── Day 1-3: Simplify store -├── Day 4: Testing -└── Day 5: Documentation -``` - -### Feature Flags - -```typescript -// lib/copilot/config.ts - -export const COPILOT_FLAGS = { - // Use new orchestrator for interactive mode - USE_SERVER_ORCHESTRATOR: process.env.COPILOT_USE_SERVER_ORCHESTRATOR === 'true', - - // Enable headless API - ENABLE_HEADLESS_API: process.env.COPILOT_ENABLE_HEADLESS_API === 'true', -} -``` - -### Rollback Plan - -If issues arise: -1. Set `COPILOT_USE_SERVER_ORCHESTRATOR=false` -2. Interactive mode falls back to old client-side flow -3. Headless API returns 503 Service Unavailable - ---- - -## Testing Strategy - -### Unit Tests - -``` -lib/copilot/orchestrator/ -├── __tests__/ -│ ├── sse-parser.test.ts -│ ├── sse-handlers.test.ts -│ ├── tool-executor.test.ts -│ ├── persistence.test.ts -│ └── index.test.ts -``` - -#### SSE Parser Tests - -```typescript -describe('parseSSEStream', () => { - it('parses content events') - it('parses tool_call events') - it('handles partial lines') - it('handles malformed JSON') - it('respects abort signal') -}) -``` - -#### Tool Executor Tests - -```typescript -describe('executeToolServerSide', () => { - it('executes server tools directly') - it('executes integration tools with OAuth') - it('resolves env var references') - it('handles tool not found') - it('handles execution errors') -}) -``` - -### Integration Tests - -```typescript -describe('orchestrateCopilotRequest', () => { - it('handles simple message without tools') - it('handles message with single tool call') - it('handles message with multiple tool calls') - it('handles subagent tool calls') - it('handles stream errors') - it('respects timeout') - it('forwards events to callback') -}) -``` - -### E2E Tests - -```typescript -describe('POST /api/v1/copilot/chat', () => { - it('returns 401 without API key') - it('returns 400 with invalid request') - it('executes simple ask query') - it('executes workflow modification') - it('handles tool execution') -}) -``` - ---- - -## Risks & Mitigations - -### Risk 1: Breaking Interactive Mode - -**Risk**: Refactoring could break existing interactive copilot. - -**Mitigation**: -- Feature flag for gradual rollout -- Keep old code path available -- Extensive E2E testing -- Staged deployment (internal → beta → production) - -### Risk 2: Tool Execution Differences - -**Risk**: Tool behavior differs between client and server execution. - -**Mitigation**: -- Reuse existing tool execution logic (same functions) -- Compare outputs in parallel testing -- Log discrepancies for investigation - -### Risk 3: Performance Regression - -**Risk**: Server-side orchestration could be slower. - -**Mitigation**: -- Actually should be faster (no browser round-trips) -- Benchmark before/after -- Profile critical paths - -### Risk 4: Memory Usage - -**Risk**: Server accumulates state during long-running requests. - -**Mitigation**: -- Set reasonable timeouts -- Clean up context after request -- Monitor memory in production - -### Risk 5: Connection Issues - -**Risk**: Long-running SSE connections could drop. - -**Mitigation**: -- Implement reconnection logic -- Save checkpoints to resume -- Handle partial completions gracefully - -### Risk 6: Process-Local Dedupe - -**Risk**: Tool call/result dedupe caches are in-memory and scoped to a single process, so duplicate events can still appear across ECS tasks. - -**Mitigation**: -- Treat dedupe as best-effort, not global -- Prefer idempotent state updates on the client -- Use Redis-backed stream replay for authoritative ordering - ---- - -## File Inventory - -### New Files - -| File | Lines | Description | -|------|-------|-------------| -| `lib/copilot/orchestrator/types.ts` | ~200 | Type definitions | -| `lib/copilot/orchestrator/sse-parser.ts` | ~80 | SSE stream parsing | -| `lib/copilot/orchestrator/sse-handlers.ts` | ~350 | Event handlers | -| `lib/copilot/orchestrator/tool-executor.ts` | ~300 | Tool execution | -| `lib/copilot/orchestrator/persistence.ts` | ~120 | DB/Redis operations | -| `lib/copilot/orchestrator/index.ts` | ~250 | Main orchestrator | -| `app/api/v1/copilot/chat/route.ts` | ~100 | Headless API | -| **Total New** | **~1,400** | | - -### Modified Files - -| File | Change | -|------|--------| -| `app/api/copilot/chat/route.ts` | Use orchestrator (optional) | -| `stores/panel/copilot/store.ts` | Simplify to ~600 lines | - -### Deleted Code (from store.ts) - -| Section | Lines Removed | -|---------|---------------| -| SSE parsing logic | ~150 | -| `sseHandlers` object | ~750 | -| `subAgentSSEHandlers` | ~280 | -| Tool execution logic | ~400 | -| Client tool instantiators | ~120 | -| Content block helpers | ~200 | -| Streaming context | ~100 | -| **Total Removed** | **~2,000** | - -### Net Change - -``` -New code: +1,400 lines (orchestrator module) -Removed code: -2,000 lines (from store) -Modified code: ~200 lines (route changes) -─────────────────────────────────────── -Net change: -400 lines (cleaner, more maintainable) -``` - ---- - -## Appendix: Code Extraction Map - -### From `stores/panel/copilot/store.ts` - -| Source Lines | Destination | Notes | -|--------------|-------------|-------| -| 900-1050 (parseSSEStream) | `sse-parser.ts` | Adapt for server | -| 1120-1867 (sseHandlers) | `sse-handlers.ts` | Remove Zustand deps | -| 1940-2217 (subAgentSSEHandlers) | `sse-handlers.ts` | Merge with above | -| 1365-1583 (tool execution) | `tool-executor.ts` | Direct calls | -| 330-380 (StreamingContext) | `types.ts` | Clean up | -| 3328-3648 (handleStreamingResponse) | `index.ts` | Main loop | - -### From `app/api/copilot/execute-tool/route.ts` - -| Source Lines | Destination | Notes | -|--------------|-------------|-------| -| 30-247 (POST handler) | `tool-executor.ts` | Extract core logic | - -### From `app/api/copilot/confirm/route.ts` - -| Source Lines | Destination | Notes | -|--------------|-------------|-------| -| 28-89 (updateToolCallStatus) | `persistence.ts` | Redis operations | - ---- - -## Approval & Sign-off - -- [ ] Technical review complete -- [ ] Security review complete -- [ ] Performance impact assessed -- [ ] Rollback plan approved -- [ ] Testing plan approved - ---- - -*Document created: January 2026* -*Last updated: January 2026* - From ab39a4f44fb8089d36a40a6b2c711c6f00ac9b26 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Mon, 9 Feb 2026 13:12:01 -0800 Subject: [PATCH 57/72] Fix hardcoded ff --- apps/sim/lib/core/config/feature-flags.ts | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/apps/sim/lib/core/config/feature-flags.ts b/apps/sim/lib/core/config/feature-flags.ts index 81bc4398f9..50491e4200 100644 --- a/apps/sim/lib/core/config/feature-flags.ts +++ b/apps/sim/lib/core/config/feature-flags.ts @@ -1,7 +1,7 @@ /** * Environment utility functions for consistent environment detection across the application */ -import { env, isFalsy, isTruthy } from './env' +import { env, getEnv, isFalsy, isTruthy } from './env' /** * Is the application running in production mode @@ -21,9 +21,9 @@ export const isTest = env.NODE_ENV === 'test' /** * Is this the hosted version of the application */ -export const isHosted = true -// getEnv('NEXT_PUBLIC_APP_URL') === 'https://www.sim.ai' || -// getEnv('NEXT_PUBLIC_APP_URL') === 'https://www.staging.sim.ai' +export const isHosted = +getEnv('NEXT_PUBLIC_APP_URL') === 'https://www.sim.ai' || +getEnv('NEXT_PUBLIC_APP_URL') === 'https://www.staging.sim.ai' /** * Is billing enforcement enabled From dba4e61a35e0e2b96ba9db00a79af0293023e6bf Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Mon, 9 Feb 2026 13:16:14 -0800 Subject: [PATCH 58/72] Fix dropped provider --- apps/sim/lib/copilot/chat-payload.ts | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/apps/sim/lib/copilot/chat-payload.ts b/apps/sim/lib/copilot/chat-payload.ts index 9f12f3730f..951e5134c6 100644 --- a/apps/sim/lib/copilot/chat-payload.ts +++ b/apps/sim/lib/copilot/chat-payload.ts @@ -70,6 +70,16 @@ function buildProviderConfig(selectedModel: string): CopilotProviderConfig | und } } + if (providerEnv === 'azure-anthropic') { + return { + provider: 'azure-anthropic', + model: envModel, + apiKey: env.AZURE_ANTHROPIC_API_KEY, + apiVersion: env.AZURE_ANTHROPIC_API_VERSION, + endpoint: env.AZURE_ANTHROPIC_ENDPOINT, + } + } + if (providerEnv === 'vertex') { return { provider: 'vertex', From b14e8448681375716cd689fc550d61af9471a35a Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Mon, 9 Feb 2026 13:16:26 -0800 Subject: [PATCH 59/72] Fix lint --- apps/sim/lib/core/config/feature-flags.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/apps/sim/lib/core/config/feature-flags.ts b/apps/sim/lib/core/config/feature-flags.ts index 50491e4200..9f746c5b12 100644 --- a/apps/sim/lib/core/config/feature-flags.ts +++ b/apps/sim/lib/core/config/feature-flags.ts @@ -22,8 +22,8 @@ export const isTest = env.NODE_ENV === 'test' * Is this the hosted version of the application */ export const isHosted = -getEnv('NEXT_PUBLIC_APP_URL') === 'https://www.sim.ai' || -getEnv('NEXT_PUBLIC_APP_URL') === 'https://www.staging.sim.ai' + getEnv('NEXT_PUBLIC_APP_URL') === 'https://www.sim.ai' || + getEnv('NEXT_PUBLIC_APP_URL') === 'https://www.staging.sim.ai' /** * Is billing enforcement enabled From 395f8901fe5536beb38fc198142b1fb32507dafd Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Mon, 9 Feb 2026 13:24:49 -0800 Subject: [PATCH 60/72] Fix tests --- apps/sim/app/api/copilot/api-keys/route.test.ts | 1 + apps/sim/app/api/copilot/confirm/route.test.ts | 7 +++---- apps/sim/app/api/copilot/stats/route.test.ts | 1 + .../sim/lib/copilot/orchestrator/sse-handlers.test.ts | 11 +++++++++-- 4 files changed, 14 insertions(+), 6 deletions(-) diff --git a/apps/sim/app/api/copilot/api-keys/route.test.ts b/apps/sim/app/api/copilot/api-keys/route.test.ts index 8b8f630a09..7ec617abf1 100644 --- a/apps/sim/app/api/copilot/api-keys/route.test.ts +++ b/apps/sim/app/api/copilot/api-keys/route.test.ts @@ -19,6 +19,7 @@ describe('Copilot API Keys API Route', () => { vi.doMock('@/lib/copilot/constants', () => ({ SIM_AGENT_API_URL_DEFAULT: 'https://agent.sim.example.com', + SIM_AGENT_API_URL: 'https://agent.sim.example.com', })) vi.doMock('@/lib/core/config/env', async () => { diff --git a/apps/sim/app/api/copilot/confirm/route.test.ts b/apps/sim/app/api/copilot/confirm/route.test.ts index 5bb9efd684..78c46982ed 100644 --- a/apps/sim/app/api/copilot/confirm/route.test.ts +++ b/apps/sim/app/api/copilot/confirm/route.test.ts @@ -139,7 +139,6 @@ describe('Copilot Confirm API Route', () => { status: 'success', }) - expect(mockRedisExists).toHaveBeenCalled() expect(mockRedisSet).toHaveBeenCalled() }) @@ -256,11 +255,11 @@ describe('Copilot Confirm API Route', () => { expect(responseData.error).toBe('Failed to update tool call status or tool call not found') }) - it('should return 400 when tool call is not found in Redis', async () => { + it('should return 400 when Redis set fails', async () => { const authMocks = mockAuth() authMocks.setAuthenticated() - mockRedisExists.mockResolvedValue(0) + mockRedisSet.mockRejectedValueOnce(new Error('Redis set failed')) const req = createMockRequest('POST', { toolCallId: 'non-existent-tool', @@ -279,7 +278,7 @@ describe('Copilot Confirm API Route', () => { const authMocks = mockAuth() authMocks.setAuthenticated() - mockRedisExists.mockRejectedValue(new Error('Redis connection failed')) + mockRedisSet.mockRejectedValueOnce(new Error('Redis connection failed')) const req = createMockRequest('POST', { toolCallId: 'tool-call-123', diff --git a/apps/sim/app/api/copilot/stats/route.test.ts b/apps/sim/app/api/copilot/stats/route.test.ts index 35a0ad1dfc..1732a686fe 100644 --- a/apps/sim/app/api/copilot/stats/route.test.ts +++ b/apps/sim/app/api/copilot/stats/route.test.ts @@ -40,6 +40,7 @@ describe('Copilot Stats API Route', () => { vi.doMock('@/lib/copilot/constants', () => ({ SIM_AGENT_API_URL_DEFAULT: 'https://agent.sim.example.com', + SIM_AGENT_API_URL: 'https://agent.sim.example.com', })) vi.doMock('@/lib/core/config/env', async () => { diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers.test.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers.test.ts index cc2586b2cc..fcc5abf433 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-handlers.test.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers.test.ts @@ -7,12 +7,19 @@ import { beforeEach, describe, expect, it, vi } from 'vitest' vi.mock('@sim/logger', () => loggerMock) -const executeToolServerSide = vi.fn() -const markToolComplete = vi.fn() +const { executeToolServerSide, markToolComplete, isIntegrationTool, isToolAvailableOnSimSide } = + vi.hoisted(() => ({ + executeToolServerSide: vi.fn(), + markToolComplete: vi.fn(), + isIntegrationTool: vi.fn().mockReturnValue(false), + isToolAvailableOnSimSide: vi.fn().mockReturnValue(true), + })) vi.mock('@/lib/copilot/orchestrator/tool-executor', () => ({ executeToolServerSide, markToolComplete, + isIntegrationTool, + isToolAvailableOnSimSide, })) import { sseHandlers } from '@/lib/copilot/orchestrator/sse-handlers' From 7458bbdc208e2c80fff5573a0d7414632fff8e4c Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Mon, 9 Feb 2026 13:59:28 -0800 Subject: [PATCH 61/72] Fix dead messages array --- apps/sim/lib/copilot/chat-payload.ts | 45 ---------------------------- 1 file changed, 45 deletions(-) diff --git a/apps/sim/lib/copilot/chat-payload.ts b/apps/sim/lib/copilot/chat-payload.ts index 951e5134c6..54763ee028 100644 --- a/apps/sim/lib/copilot/chat-payload.ts +++ b/apps/sim/lib/copilot/chat-payload.ts @@ -46,13 +46,6 @@ interface CredentialsPayload { } } -type MessageContent = string | Array<{ type: string; text?: string; [key: string]: unknown }> - -interface ConversationMessage { - role: string - content: MessageContent -} - function buildProviderConfig(selectedModel: string): CopilotProviderConfig | undefined { const defaults = getCopilotModel('chat') const envModel = env.COPILOT_MODEL || defaults.model @@ -113,12 +106,10 @@ export async function buildCopilotRequestPayload( userId, userMessageId, mode, - conversationHistory = [], contexts, fileAttachments, commands, chatId, - implicitFeedback, } = params const selectedModel = options.selectedModel @@ -129,42 +120,6 @@ export async function buildCopilotRequestPayload( const processedFileContents = await processFileAttachments(fileAttachments ?? [], userId) - const messages: ConversationMessage[] = [] - for (const msg of conversationHistory as Array>) { - const msgAttachments = msg.fileAttachments as Array> | undefined - if (Array.isArray(msgAttachments) && msgAttachments.length > 0) { - const content: Array<{ type: string; text?: string; [key: string]: unknown }> = [ - { type: 'text', text: msg.content as string }, - ] - const processedHistoricalAttachments = await processFileAttachments( - (msgAttachments as BuildPayloadParams['fileAttachments']) ?? [], - userId - ) - for (const fileContent of processedHistoricalAttachments) { - content.push(fileContent) - } - messages.push({ role: msg.role as string, content }) - } else { - messages.push({ role: msg.role as string, content: msg.content as string }) - } - } - - if (implicitFeedback) { - messages.push({ role: 'system', content: implicitFeedback }) - } - - if (processedFileContents.length > 0) { - const content: Array<{ type: string; text?: string; [key: string]: unknown }> = [ - { type: 'text', text: message }, - ] - for (const fileContent of processedFileContents) { - content.push(fileContent) - } - messages.push({ role: 'user', content }) - } else { - messages.push({ role: 'user', content: message }) - } - const integrationTools: ToolSchema[] = [] let credentials: CredentialsPayload | null = null From 48c9a3a33eff8fedde29c718b78865069d58a138 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Mon, 9 Feb 2026 15:18:35 -0800 Subject: [PATCH 62/72] Fix discovery --- .../tools/client/tool-display-registry.ts | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/apps/sim/lib/copilot/tools/client/tool-display-registry.ts b/apps/sim/lib/copilot/tools/client/tool-display-registry.ts index b106ea59f1..539414e963 100644 --- a/apps/sim/lib/copilot/tools/client/tool-display-registry.ts +++ b/apps/sim/lib/copilot/tools/client/tool-display-registry.ts @@ -306,6 +306,26 @@ const META_custom_tool: ToolMetadata = { }, } +const META_build: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Building', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Building', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Building', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Built', icon: Wrench }, + [ClientToolCallState.error]: { text: 'Failed to build', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped build', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted build', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Building', + completedLabel: 'Built', + shouldCollapse: true, + outputArtifacts: [], + }, + }, +} + const META_debug: ToolMetadata = { displayNames: { [ClientToolCallState.generating]: { text: 'Debugging', icon: Loader2 }, @@ -326,6 +346,26 @@ const META_debug: ToolMetadata = { }, } +const META_discovery: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Discovering', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Discovering', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Discovering', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Discovered', icon: Search }, + [ClientToolCallState.error]: { text: 'Failed to discover', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped discovery', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted discovery', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Discovering', + completedLabel: 'Discovered', + shouldCollapse: true, + outputArtifacts: [], + }, + }, +} + const META_deploy: ToolMetadata = { displayNames: { [ClientToolCallState.generating]: { text: 'Deploying', icon: Loader2 }, @@ -2252,9 +2292,11 @@ const TOOL_METADATA_BY_ID: Record = { checkoff_todo: META_checkoff_todo, crawl_website: META_crawl_website, create_workspace_mcp_server: META_create_workspace_mcp_server, + build: META_build, custom_tool: META_custom_tool, debug: META_debug, deploy: META_deploy, + discovery: META_discovery, deploy_api: META_deploy_api, deploy_chat: META_deploy_chat, deploy_mcp: META_deploy_mcp, From 7153141ee4c6ccaf23fdc04bb4c47e637662b0ec Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Mon, 9 Feb 2026 16:29:00 -0800 Subject: [PATCH 63/72] Fix run workflow --- .../app/api/workflows/[id]/execute/route.ts | 54 +++-- .../components/tool-call/tool-call.tsx | 12 + .../utils/workflow-execution-utils.ts | 15 ++ apps/sim/lib/copilot/client-sse/handlers.ts | 48 ++++ .../copilot/client-sse/run-tool-execution.ts | 220 +++++++++++++++++ .../copilot/client-sse/subagent-handlers.ts | 11 + .../orchestrator/sse-handlers/handlers.ts | 72 +++++- .../sse-handlers/tool-execution.ts | 31 +++ .../tools/client/tool-display-registry.ts | 223 ++++++++++++++++++ 9 files changed, 670 insertions(+), 16 deletions(-) create mode 100644 apps/sim/lib/copilot/client-sse/run-tool-execution.ts diff --git a/apps/sim/app/api/workflows/[id]/execute/route.ts b/apps/sim/app/api/workflows/[id]/execute/route.ts index 06984a3e22..4564ff8be4 100644 --- a/apps/sim/app/api/workflows/[id]/execute/route.ts +++ b/apps/sim/app/api/workflows/[id]/execute/route.ts @@ -62,20 +62,23 @@ const ExecuteWorkflowSchema = z.object({ runFromBlock: z .object({ startBlockId: z.string().min(1, 'Start block ID is required'), - sourceSnapshot: z.object({ - blockStates: z.record(z.any()), - executedBlocks: z.array(z.string()), - blockLogs: z.array(z.any()), - decisions: z.object({ - router: z.record(z.string()), - condition: z.record(z.string()), - }), - completedLoops: z.array(z.string()), - loopExecutions: z.record(z.any()).optional(), - parallelExecutions: z.record(z.any()).optional(), - parallelBlockMapping: z.record(z.any()).optional(), - activeExecutionPath: z.array(z.string()), - }), + sourceSnapshot: z + .object({ + blockStates: z.record(z.any()), + executedBlocks: z.array(z.string()), + blockLogs: z.array(z.any()), + decisions: z.object({ + router: z.record(z.string()), + condition: z.record(z.string()), + }), + completedLoops: z.array(z.string()), + loopExecutions: z.record(z.any()).optional(), + parallelExecutions: z.record(z.any()).optional(), + parallelBlockMapping: z.record(z.any()).optional(), + activeExecutionPath: z.array(z.string()), + }) + .optional(), + executionId: z.string().optional(), }) .optional(), }) @@ -269,9 +272,30 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id: base64MaxBytes, workflowStateOverride, stopAfterBlockId, - runFromBlock, + runFromBlock: rawRunFromBlock, } = validation.data + // Resolve runFromBlock snapshot from executionId if needed + let runFromBlock = rawRunFromBlock + if (runFromBlock && !runFromBlock.sourceSnapshot && runFromBlock.executionId) { + const { + getExecutionState, + getLatestExecutionState, + } = await import('@/lib/workflows/executor/execution-state') + const snapshot = runFromBlock.executionId === 'latest' + ? await getLatestExecutionState(id) + : await getExecutionState(runFromBlock.executionId) + if (!snapshot) { + return NextResponse.json( + { + error: `No execution state found for ${runFromBlock.executionId === 'latest' ? 'workflow' : `execution ${runFromBlock.executionId}`}. Run the full workflow first.`, + }, + { status: 400 } + ) + } + runFromBlock = { startBlockId: runFromBlock.startBlockId, sourceSnapshot: snapshot } + } + // For API key and internal JWT auth, the entire body is the input (except for our control fields) // For session auth, the input is explicitly provided in the input field const input = diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx index 98fd19a7e6..54fe62fc4f 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx @@ -18,6 +18,10 @@ import { LoopTool } from '@/app/workspace/[workspaceId]/w/[workflowId]/component import { ParallelTool } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/subflows/parallel/parallel-config' import { getDisplayValue } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/workflow-block/workflow-block' import { getBlock } from '@/blocks/registry' +import { + CLIENT_EXECUTABLE_RUN_TOOLS, + executeRunToolOnClient, +} from '@/lib/copilot/client-sse/run-tool-execution' import type { CopilotToolCall } from '@/stores/panel' import { useCopilotStore } from '@/stores/panel' import type { SubAgentContentBlock } from '@/stores/panel/copilot/types' @@ -1277,6 +1281,14 @@ async function handleRun( setToolCallState(toolCall, 'executing', editedParams ? { params: editedParams } : undefined) onStateChange?.('executing') await sendToolDecision(toolCall.id, 'accepted') + + // Client-executable run tools: execute on the client for real-time feedback + // (block pulsing, console logs, stop button). The server defers execution + // for these tools; the client reports back via mark-complete. + if (CLIENT_EXECUTABLE_RUN_TOOLS.has(toolCall.name)) { + const params = editedParams || toolCall.params || {} + executeRunToolOnClient(toolCall.id, toolCall.name, params) + } } async function handleSkip(toolCall: CopilotToolCall, setToolCallState: any, onStateChange?: any) { diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/utils/workflow-execution-utils.ts b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/utils/workflow-execution-utils.ts index 0d0597f9ac..03eb068b2b 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/utils/workflow-execution-utils.ts +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/utils/workflow-execution-utils.ts @@ -11,6 +11,12 @@ export interface WorkflowExecutionOptions { executionId?: string onBlockComplete?: (blockId: string, output: any) => Promise overrideTriggerType?: 'chat' | 'manual' | 'api' + stopAfterBlockId?: string + /** For run_from_block / run_block: start from a specific block using cached state */ + runFromBlock?: { + startBlockId: string + executionId?: string + } } /** @@ -39,6 +45,15 @@ export async function executeWorkflowWithFullLogging( triggerType: options.overrideTriggerType || 'manual', useDraftState: true, isClientSession: true, + ...(options.stopAfterBlockId ? { stopAfterBlockId: options.stopAfterBlockId } : {}), + ...(options.runFromBlock + ? { + runFromBlock: { + startBlockId: options.runFromBlock.startBlockId, + executionId: options.runFromBlock.executionId || 'latest', + }, + } + : {}), } const response = await fetch(`/api/workflows/${activeWorkflowId}/execute`, { diff --git a/apps/sim/lib/copilot/client-sse/handlers.ts b/apps/sim/lib/copilot/client-sse/handlers.ts index cc294fb602..adbde8f6e9 100644 --- a/apps/sim/lib/copilot/client-sse/handlers.ts +++ b/apps/sim/lib/copilot/client-sse/handlers.ts @@ -16,10 +16,15 @@ import { useWorkflowDiffStore } from '@/stores/workflow-diff/store' import { useWorkflowRegistry } from '@/stores/workflows/registry/store' import type { WorkflowState } from '@/stores/workflows/workflow/types' import { appendTextBlock, beginThinkingBlock, finalizeThinkingBlock } from './content-blocks' +import { + CLIENT_EXECUTABLE_RUN_TOOLS, + executeRunToolOnClient, +} from './run-tool-execution' import type { ClientContentBlock, ClientStreamingContext } from './types' const logger = createLogger('CopilotClientSseHandlers') const TEXT_BLOCK_TYPE = 'text' + const MAX_BATCH_INTERVAL = 50 const MIN_BATCH_INTERVAL = 16 const MAX_QUEUE_SIZE = 5 @@ -408,6 +413,39 @@ export const sseHandlers: Record = { }) } } + + // Generate API key: update deployment status with the new key + if (targetState === ClientToolCallState.success && current.name === 'generate_api_key') { + try { + const resultPayload = asRecord( + data?.result || eventData.result || eventData.data || data?.data + ) + const input = asRecord(current.params) + const workflowId = + (input?.workflowId as string) || useWorkflowRegistry.getState().activeWorkflowId + const apiKey = (resultPayload?.apiKey || resultPayload?.key) as string | undefined + if (workflowId) { + const existingStatus = + useWorkflowRegistry.getState().getWorkflowDeploymentStatus(workflowId) + useWorkflowRegistry + .getState() + .setDeploymentStatus( + workflowId, + existingStatus?.isDeployed ?? false, + existingStatus?.deployedAt, + apiKey + ) + logger.info('[SSE] Updated deployment status with API key', { + workflowId, + hasKey: !!apiKey, + }) + } + } catch (err) { + logger.warn('[SSE] Failed to hydrate API key status', { + error: err instanceof Error ? err.message : String(err), + }) + } + } } for (let i = 0; i < context.contentBlocks.length; i++) { @@ -588,6 +626,16 @@ export const sseHandlers: Record = { sendAutoAcceptConfirmation(id) } + // Client-executable run tools: execute on the client for real-time feedback + // (block pulsing, console logs, stop button). The server defers execution + // for these tools in interactive mode; the client reports back via mark-complete. + if ( + CLIENT_EXECUTABLE_RUN_TOOLS.has(toolName) && + initialState === ClientToolCallState.executing + ) { + executeRunToolOnClient(id, toolName, args || existing?.params || {}) + } + // OAuth: dispatch event to open the OAuth connect modal if (toolName === 'oauth_request_access' && args && typeof window !== 'undefined') { try { diff --git a/apps/sim/lib/copilot/client-sse/run-tool-execution.ts b/apps/sim/lib/copilot/client-sse/run-tool-execution.ts new file mode 100644 index 0000000000..3836eecacc --- /dev/null +++ b/apps/sim/lib/copilot/client-sse/run-tool-execution.ts @@ -0,0 +1,220 @@ +import { createLogger } from '@sim/logger' +import { v4 as uuidv4 } from 'uuid' +import { executeWorkflowWithFullLogging } from '@/app/workspace/[workspaceId]/w/[workflowId]/utils/workflow-execution-utils' +import { COPILOT_CONFIRM_API_PATH } from '@/lib/copilot/constants' +import { resolveToolDisplay } from '@/lib/copilot/store-utils' +import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry' +import { useExecutionStore } from '@/stores/execution/store' +import { useCopilotStore } from '@/stores/panel/copilot/store' +import { useWorkflowRegistry } from '@/stores/workflows/registry/store' + +const logger = createLogger('CopilotRunToolExecution') + +/** + * Run tools that execute client-side for real-time feedback + * (block pulsing, logs, stop button, etc.). + */ +export const CLIENT_EXECUTABLE_RUN_TOOLS = new Set([ + 'run_workflow', + 'run_workflow_until_block', + 'run_from_block', + 'run_block', +]) + +/** + * Execute a run tool on the client side using the streaming execute endpoint. + * This gives full interactive feedback: block pulsing, console logs, stop button. + * + * Mirrors staging's RunWorkflowClientTool.handleAccept(): + * 1. Execute via executeWorkflowWithFullLogging + * 2. Update client tool state directly (success/error) + * 3. Report completion to server via /api/copilot/confirm (Redis), + * where the server-side handler picks it up and tells Go + */ +export function executeRunToolOnClient( + toolCallId: string, + toolName: string, + params: Record +): void { + doExecuteRunTool(toolCallId, toolName, params).catch((err) => { + logger.error('[RunTool] Unhandled error in client-side run tool execution', { + toolCallId, + toolName, + error: err instanceof Error ? err.message : String(err), + }) + }) +} + +async function doExecuteRunTool( + toolCallId: string, + toolName: string, + params: Record +): Promise { + const { isExecuting, setIsExecuting } = useExecutionStore.getState() + + if (isExecuting) { + logger.warn('[RunTool] Execution prevented: already executing', { toolCallId, toolName }) + setToolState(toolCallId, ClientToolCallState.error) + await reportCompletion(toolCallId, false, 'Workflow is already executing. Try again later') + return + } + + const { activeWorkflowId } = useWorkflowRegistry.getState() + if (!activeWorkflowId) { + logger.warn('[RunTool] Execution prevented: no active workflow', { toolCallId, toolName }) + setToolState(toolCallId, ClientToolCallState.error) + await reportCompletion(toolCallId, false, 'No active workflow found') + return + } + + // Extract params for all tool types + const workflowInput = (params.workflow_input || params.input || undefined) as + | Record + | undefined + + const stopAfterBlockId = (() => { + if (toolName === 'run_workflow_until_block') return params.stopAfterBlockId as string | undefined + if (toolName === 'run_block') return params.blockId as string | undefined + return undefined + })() + + const runFromBlock = (() => { + if (toolName === 'run_from_block' && params.startBlockId) { + return { + startBlockId: params.startBlockId as string, + executionId: (params.executionId as string | undefined) || 'latest', + } + } + if (toolName === 'run_block' && params.blockId) { + return { + startBlockId: params.blockId as string, + executionId: (params.executionId as string | undefined) || 'latest', + } + } + return undefined + })() + + setIsExecuting(true) + const executionId = uuidv4() + const executionStartTime = new Date().toISOString() + + logger.info('[RunTool] Starting client-side workflow execution', { + toolCallId, + toolName, + executionId, + activeWorkflowId, + hasInput: !!workflowInput, + stopAfterBlockId, + runFromBlock: runFromBlock ? { startBlockId: runFromBlock.startBlockId } : undefined, + }) + + try { + const result = await executeWorkflowWithFullLogging({ + workflowInput, + executionId, + stopAfterBlockId, + runFromBlock, + }) + + // Determine success (same logic as staging's RunWorkflowClientTool) + let succeeded = true + let errorMessage: string | undefined + try { + if (result && typeof result === 'object' && 'success' in (result as any)) { + succeeded = Boolean((result as any).success) + if (!succeeded) { + errorMessage = (result as any)?.error || (result as any)?.output?.error + } + } else if ( + result && + typeof result === 'object' && + 'execution' in (result as any) && + (result as any).execution + ) { + succeeded = Boolean((result as any).execution.success) + if (!succeeded) { + errorMessage = + (result as any).execution?.error || (result as any).execution?.output?.error + } + } + } catch {} + + if (succeeded) { + logger.info('[RunTool] Workflow execution succeeded', { toolCallId, toolName }) + setToolState(toolCallId, ClientToolCallState.success) + await reportCompletion( + toolCallId, + true, + `Workflow execution completed. Started at: ${executionStartTime}` + ) + } else { + const msg = errorMessage || 'Workflow execution failed' + logger.error('[RunTool] Workflow execution failed', { toolCallId, toolName, error: msg }) + setToolState(toolCallId, ClientToolCallState.error) + await reportCompletion(toolCallId, false, msg) + } + } catch (err) { + const msg = err instanceof Error ? err.message : String(err) + logger.error('[RunTool] Workflow execution threw', { toolCallId, toolName, error: msg }) + setToolState(toolCallId, ClientToolCallState.error) + await reportCompletion(toolCallId, false, msg) + } finally { + setIsExecuting(false) + } +} + +/** Update the tool call state directly in the copilot store (like staging's setState). */ +function setToolState(toolCallId: string, state: ClientToolCallState): void { + try { + const store = useCopilotStore.getState() + const current = store.toolCallsById[toolCallId] + if (!current) return + const updated = { + ...store.toolCallsById, + [toolCallId]: { + ...current, + state, + display: resolveToolDisplay(current.name, state, toolCallId, current.params), + }, + } + useCopilotStore.setState({ toolCallsById: updated }) + } catch (err) { + logger.warn('[RunTool] Failed to update tool state', { + toolCallId, + state, + error: err instanceof Error ? err.message : String(err), + }) + } +} + +/** + * Report tool completion to the server via the existing /api/copilot/confirm endpoint. + * This writes {status: 'success'|'error', message} to Redis. The server-side handler + * is polling Redis via waitForToolCompletion() and will pick this up, then fire-and-forget + * markToolComplete to the Go backend. + */ +async function reportCompletion( + toolCallId: string, + success: boolean, + message?: string +): Promise { + try { + const res = await fetch(COPILOT_CONFIRM_API_PATH, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + toolCallId, + status: success ? 'success' : 'error', + message: message || (success ? 'Tool completed' : 'Tool failed'), + }), + }) + if (!res.ok) { + logger.warn('[RunTool] reportCompletion failed', { toolCallId, status: res.status }) + } + } catch (err) { + logger.error('[RunTool] reportCompletion error', { + toolCallId, + error: err instanceof Error ? err.message : String(err), + }) + } +} diff --git a/apps/sim/lib/copilot/client-sse/subagent-handlers.ts b/apps/sim/lib/copilot/client-sse/subagent-handlers.ts index aa07b21d35..394c11f6d4 100644 --- a/apps/sim/lib/copilot/client-sse/subagent-handlers.ts +++ b/apps/sim/lib/copilot/client-sse/subagent-handlers.ts @@ -15,6 +15,10 @@ import { sseHandlers, updateStreamingMessage, } from './handlers' +import { + CLIENT_EXECUTABLE_RUN_TOOLS, + executeRunToolOnClient, +} from './run-tool-execution' import type { ClientStreamingContext } from './types' const logger = createLogger('CopilotClientSubagentHandlers') @@ -245,6 +249,13 @@ export const subAgentSSEHandlers: Record = { if (isAutoAllowed) { sendAutoAcceptConfirmation(id) } + + // Client-executable run tools: execute on the client for real-time feedback. + // The server defers execution in interactive mode; we execute here and + // report back via mark-complete. + if (CLIENT_EXECUTABLE_RUN_TOOLS.has(name)) { + executeRunToolOnClient(id, name, args || {}) + } }, tool_result: (data, context, get, set) => { diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts index 111fe20477..6e0b28cfcc 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts @@ -20,10 +20,27 @@ import type { StreamingContext, ToolCallState, } from '@/lib/copilot/orchestrator/types' -import { executeToolAndReport, isInterruptToolName, waitForToolDecision } from './tool-execution' +import { + executeToolAndReport, + isInterruptToolName, + waitForToolCompletion, + waitForToolDecision, +} from './tool-execution' const logger = createLogger('CopilotSseHandlers') +/** + * Run tools that can be executed client-side for real-time feedback + * (block pulsing, logs, stop button). When interactive, the server defers + * execution to the browser client instead of running executeWorkflow directly. + */ +const CLIENT_EXECUTABLE_RUN_TOOLS = new Set([ + 'run_workflow', + 'run_workflow_until_block', + 'run_from_block', + 'run_block', +]) + // Normalization + dedupe helpers live in sse-utils to keep server/client in sync. function inferToolSuccess(data: Record | undefined): { @@ -182,6 +199,35 @@ export const sseHandlers: Record = { options.abortSignal ) if (decision?.status === 'accepted' || decision?.status === 'success') { + // Client-executable run tools: defer execution to the browser client. + // The client calls executeWorkflowWithFullLogging for real-time feedback + // (block pulsing, logs, stop button) and reports completion via + // /api/copilot/confirm with status success/error. We poll Redis for + // that completion signal, then fire-and-forget markToolComplete to Go. + if (CLIENT_EXECUTABLE_RUN_TOOLS.has(toolName)) { + toolCall.status = 'executing' + const completion = await waitForToolCompletion( + toolCallId, + options.timeout || STREAM_TIMEOUT_MS, + options.abortSignal + ) + const success = completion?.status === 'success' + toolCall.status = success ? 'success' : 'error' + toolCall.endTime = Date.now() + const msg = + completion?.message || (success ? 'Tool completed' : 'Tool failed or timed out') + // Fire-and-forget: tell Go backend the tool is done + // (must NOT await — see deadlock note in executeToolAndReport) + markToolComplete(toolCall.id, toolCall.name, success ? 200 : 500, msg).catch((err) => { + logger.error('markToolComplete fire-and-forget failed (run tool)', { + toolCallId: toolCall.id, + toolName: toolCall.name, + error: err instanceof Error ? err.message : String(err), + }) + }) + markToolResultSeen(toolCallId) + return + } await executeToolAndReport(toolCallId, context, execContext, options) return } @@ -435,6 +481,30 @@ export const subAgentHandlers: Record = { } } + // Client-executable run tools in interactive mode: defer to client. + // Same pattern as main handler: wait for client completion, then tell Go. + if (options.interactive === true && CLIENT_EXECUTABLE_RUN_TOOLS.has(toolName)) { + toolCall.status = 'executing' + const completion = await waitForToolCompletion( + toolCallId, + options.timeout || STREAM_TIMEOUT_MS, + options.abortSignal + ) + const success = completion?.status === 'success' + toolCall.status = success ? 'success' : 'error' + toolCall.endTime = Date.now() + const msg = completion?.message || (success ? 'Tool completed' : 'Tool failed or timed out') + markToolComplete(toolCall.id, toolCall.name, success ? 200 : 500, msg).catch((err) => { + logger.error('markToolComplete fire-and-forget failed (subagent run tool)', { + toolCallId: toolCall.id, + toolName: toolCall.name, + error: err instanceof Error ? err.message : String(err), + }) + }) + markToolResultSeen(toolCallId) + return + } + if (options.autoExecuteTools !== false) { await executeToolAndReport(toolCallId, context, execContext, options) } diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts index 80c4c60361..8c48405ade 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts @@ -144,3 +144,34 @@ export async function waitForToolDecision( } return null } + +/** + * Wait for a tool completion signal (success/error) from the client. + * Unlike waitForToolDecision which returns on any status, this ignores + * intermediate statuses like 'accepted'/'rejected'/'background' and only + * returns when the client reports final completion via success/error. + * + * Used for client-executable run tools: the client executes the workflow + * and posts success/error to /api/copilot/confirm when done. The server + * polls here until that completion signal arrives. + */ +export async function waitForToolCompletion( + toolCallId: string, + timeoutMs: number, + abortSignal?: AbortSignal +): Promise<{ status: string; message?: string } | null> { + const start = Date.now() + let interval = TOOL_DECISION_INITIAL_POLL_MS + const maxInterval = TOOL_DECISION_MAX_POLL_MS + while (Date.now() - start < timeoutMs) { + if (abortSignal?.aborted) return null + const decision = await getToolConfirmation(toolCallId) + // Only return on completion statuses, not accept/reject decisions + if (decision?.status === 'success' || decision?.status === 'error') { + return decision + } + await new Promise((resolve) => setTimeout(resolve, interval)) + interval = Math.min(interval * TOOL_DECISION_POLL_BACKOFF, maxInterval) + } + return null +} diff --git a/apps/sim/lib/copilot/tools/client/tool-display-registry.ts b/apps/sim/lib/copilot/tools/client/tool-display-registry.ts index 539414e963..9cfa68075c 100644 --- a/apps/sim/lib/copilot/tools/client/tool-display-registry.ts +++ b/apps/sim/lib/copilot/tools/client/tool-display-registry.ts @@ -1697,6 +1697,225 @@ const META_research: ToolMetadata = { }, } +const META_generate_api_key: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Preparing to generate API key', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Generate API key?', icon: KeyRound }, + [ClientToolCallState.executing]: { text: 'Generating API key', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Generated API key', icon: KeyRound }, + [ClientToolCallState.error]: { text: 'Failed to generate API key', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped generating API key', icon: MinusCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted generating API key', icon: XCircle }, + }, + interrupt: { + accept: { text: 'Generate', icon: KeyRound }, + reject: { text: 'Skip', icon: MinusCircle }, + }, + uiConfig: { + interrupt: { + accept: { text: 'Generate', icon: KeyRound }, + reject: { text: 'Skip', icon: MinusCircle }, + showAllowOnce: true, + showAllowAlways: true, + }, + }, + getDynamicText: (params, state) => { + const name = params?.name + if (name && typeof name === 'string') { + switch (state) { + case ClientToolCallState.success: + return `Generated API key "${name}"` + case ClientToolCallState.executing: + return `Generating API key "${name}"` + case ClientToolCallState.generating: + return `Preparing to generate "${name}"` + case ClientToolCallState.pending: + return `Generate API key "${name}"?` + case ClientToolCallState.error: + return `Failed to generate "${name}"` + } + } + return undefined + }, +} + +const META_run_block: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Preparing to run block', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Run this block?', icon: Play }, + [ClientToolCallState.executing]: { text: 'Running block', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Executed block', icon: Play }, + [ClientToolCallState.error]: { text: 'Failed to run block', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped block execution', icon: MinusCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted block execution', icon: MinusCircle }, + [ClientToolCallState.background]: { text: 'Running block in background', icon: Play }, + }, + interrupt: { + accept: { text: 'Run', icon: Play }, + reject: { text: 'Skip', icon: MinusCircle }, + }, + uiConfig: { + isSpecial: true, + interrupt: { + accept: { text: 'Run', icon: Play }, + reject: { text: 'Skip', icon: MinusCircle }, + showAllowOnce: true, + showAllowAlways: true, + }, + secondaryAction: { + text: 'Move to Background', + title: 'Move to Background', + variant: 'tertiary', + showInStates: [ClientToolCallState.executing], + completionMessage: + 'The user has chosen to move the block execution to the background. Check back with them later to know when the block execution is complete', + targetState: ClientToolCallState.background, + }, + }, + getDynamicText: (params, state) => { + const blockId = params?.blockId || params?.block_id + if (blockId && typeof blockId === 'string') { + switch (state) { + case ClientToolCallState.success: + return `Executed block ${blockId}` + case ClientToolCallState.executing: + return `Running block ${blockId}` + case ClientToolCallState.generating: + return `Preparing to run block ${blockId}` + case ClientToolCallState.pending: + return `Run block ${blockId}?` + case ClientToolCallState.error: + return `Failed to run block ${blockId}` + case ClientToolCallState.rejected: + return `Skipped running block ${blockId}` + case ClientToolCallState.aborted: + return `Aborted running block ${blockId}` + case ClientToolCallState.background: + return `Running block ${blockId} in background` + } + } + return undefined + }, +} + +const META_run_from_block: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Preparing to run from block', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Run from this block?', icon: Play }, + [ClientToolCallState.executing]: { text: 'Running from block', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Executed from block', icon: Play }, + [ClientToolCallState.error]: { text: 'Failed to run from block', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped run from block', icon: MinusCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted run from block', icon: MinusCircle }, + [ClientToolCallState.background]: { text: 'Running from block in background', icon: Play }, + }, + interrupt: { + accept: { text: 'Run', icon: Play }, + reject: { text: 'Skip', icon: MinusCircle }, + }, + uiConfig: { + isSpecial: true, + interrupt: { + accept: { text: 'Run', icon: Play }, + reject: { text: 'Skip', icon: MinusCircle }, + showAllowOnce: true, + showAllowAlways: true, + }, + secondaryAction: { + text: 'Move to Background', + title: 'Move to Background', + variant: 'tertiary', + showInStates: [ClientToolCallState.executing], + completionMessage: + 'The user has chosen to move the workflow execution to the background. Check back with them later to know when the workflow execution is complete', + targetState: ClientToolCallState.background, + }, + }, + getDynamicText: (params, state) => { + const blockId = params?.startBlockId || params?.start_block_id + if (blockId && typeof blockId === 'string') { + switch (state) { + case ClientToolCallState.success: + return `Executed from block ${blockId}` + case ClientToolCallState.executing: + return `Running from block ${blockId}` + case ClientToolCallState.generating: + return `Preparing to run from block ${blockId}` + case ClientToolCallState.pending: + return `Run from block ${blockId}?` + case ClientToolCallState.error: + return `Failed to run from block ${blockId}` + case ClientToolCallState.rejected: + return `Skipped running from block ${blockId}` + case ClientToolCallState.aborted: + return `Aborted running from block ${blockId}` + case ClientToolCallState.background: + return `Running from block ${blockId} in background` + } + } + return undefined + }, +} + +const META_run_workflow_until_block: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Preparing to run until block', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Run until this block?', icon: Play }, + [ClientToolCallState.executing]: { text: 'Running until block', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Executed until block', icon: Play }, + [ClientToolCallState.error]: { text: 'Failed to run until block', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped run until block', icon: MinusCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted run until block', icon: MinusCircle }, + [ClientToolCallState.background]: { text: 'Running until block in background', icon: Play }, + }, + interrupt: { + accept: { text: 'Run', icon: Play }, + reject: { text: 'Skip', icon: MinusCircle }, + }, + uiConfig: { + isSpecial: true, + interrupt: { + accept: { text: 'Run', icon: Play }, + reject: { text: 'Skip', icon: MinusCircle }, + showAllowOnce: true, + showAllowAlways: true, + }, + secondaryAction: { + text: 'Move to Background', + title: 'Move to Background', + variant: 'tertiary', + showInStates: [ClientToolCallState.executing], + completionMessage: + 'The user has chosen to move the workflow execution to the background. Check back with them later to know when the workflow execution is complete', + targetState: ClientToolCallState.background, + }, + }, + getDynamicText: (params, state) => { + const blockId = params?.stopAfterBlockId || params?.stop_after_block_id + if (blockId && typeof blockId === 'string') { + switch (state) { + case ClientToolCallState.success: + return `Executed until block ${blockId}` + case ClientToolCallState.executing: + return `Running until block ${blockId}` + case ClientToolCallState.generating: + return `Preparing to run until block ${blockId}` + case ClientToolCallState.pending: + return `Run until block ${blockId}?` + case ClientToolCallState.error: + return `Failed to run until block ${blockId}` + case ClientToolCallState.rejected: + return `Skipped running until block ${blockId}` + case ClientToolCallState.aborted: + return `Aborted running until block ${blockId}` + case ClientToolCallState.background: + return `Running until block ${blockId} in background` + } + } + return undefined + }, +} + const META_run_workflow: ToolMetadata = { displayNames: { [ClientToolCallState.generating]: { text: 'Preparing to run your workflow', icon: Loader2 }, @@ -2310,6 +2529,7 @@ const TOOL_METADATA_BY_ID: Record = { get_blocks_and_tools: META_get_blocks_and_tools, get_blocks_metadata: META_get_blocks_metadata, get_credentials: META_get_credentials, + generate_api_key: META_generate_api_key, get_examples_rag: META_get_examples_rag, get_operations_examples: META_get_operations_examples, get_page_contents: META_get_page_contents, @@ -2335,7 +2555,10 @@ const TOOL_METADATA_BY_ID: Record = { redeploy: META_redeploy, remember_debug: META_remember_debug, research: META_research, + run_block: META_run_block, + run_from_block: META_run_from_block, run_workflow: META_run_workflow, + run_workflow_until_block: META_run_workflow_until_block, scrape_page: META_scrape_page, search_documentation: META_search_documentation, search_errors: META_search_errors, From 7670cdfadf7e3a2a278be490c4c2998fd4f986f6 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Mon, 9 Feb 2026 16:31:07 -0800 Subject: [PATCH 64/72] Fix run block --- apps/sim/app/api/workflows/[id]/execute/route.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apps/sim/app/api/workflows/[id]/execute/route.ts b/apps/sim/app/api/workflows/[id]/execute/route.ts index 4564ff8be4..fafebc73de 100644 --- a/apps/sim/app/api/workflows/[id]/execute/route.ts +++ b/apps/sim/app/api/workflows/[id]/execute/route.ts @@ -283,7 +283,7 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id: getLatestExecutionState, } = await import('@/lib/workflows/executor/execution-state') const snapshot = runFromBlock.executionId === 'latest' - ? await getLatestExecutionState(id) + ? await getLatestExecutionState(workflowId) : await getExecutionState(runFromBlock.executionId) if (!snapshot) { return NextResponse.json( From 1beb35c225d95ef299c26c458087ee0f660f1d4e Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Mon, 9 Feb 2026 16:52:40 -0800 Subject: [PATCH 65/72] Fix run from block in copilot --- .../copilot/client-sse/subagent-handlers.ts | 8 +- .../orchestrator/sse-handlers/handlers.ts | 17 ++++ .../sse-handlers/tool-execution.ts | 18 ++-- .../tools/client/tool-display-registry.ts | 87 +++++++++++-------- 4 files changed, 84 insertions(+), 46 deletions(-) diff --git a/apps/sim/lib/copilot/client-sse/subagent-handlers.ts b/apps/sim/lib/copilot/client-sse/subagent-handlers.ts index 394c11f6d4..d40513da7a 100644 --- a/apps/sim/lib/copilot/client-sse/subagent-handlers.ts +++ b/apps/sim/lib/copilot/client-sse/subagent-handlers.ts @@ -250,10 +250,10 @@ export const subAgentSSEHandlers: Record = { sendAutoAcceptConfirmation(id) } - // Client-executable run tools: execute on the client for real-time feedback. - // The server defers execution in interactive mode; we execute here and - // report back via mark-complete. - if (CLIENT_EXECUTABLE_RUN_TOOLS.has(name)) { + // Client-executable run tools: if auto-allowed, execute immediately for + // real-time feedback. For non-auto-allowed, the user must click "Allow" + // first — handleRun in tool-call.tsx triggers executeRunToolOnClient. + if (CLIENT_EXECUTABLE_RUN_TOOLS.has(name) && isAutoAllowed) { executeRunToolOnClient(id, name, args || {}) } }, diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts index 6e0b28cfcc..809eb65959 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts @@ -490,6 +490,23 @@ export const subAgentHandlers: Record = { options.timeout || STREAM_TIMEOUT_MS, options.abortSignal ) + if (completion?.status === 'rejected') { + toolCall.status = 'rejected' + toolCall.endTime = Date.now() + markToolComplete( + toolCall.id, + toolCall.name, + 400, + completion.message || 'Tool execution rejected' + ).catch((err) => { + logger.error('markToolComplete fire-and-forget failed (subagent run tool rejected)', { + toolCallId: toolCall.id, + error: err instanceof Error ? err.message : String(err), + }) + }) + markToolResultSeen(toolCallId) + return + } const success = completion?.status === 'success' toolCall.status = success ? 'success' : 'error' toolCall.endTime = Date.now() diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts index 8c48405ade..739b11c46a 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts @@ -146,10 +146,12 @@ export async function waitForToolDecision( } /** - * Wait for a tool completion signal (success/error) from the client. - * Unlike waitForToolDecision which returns on any status, this ignores - * intermediate statuses like 'accepted'/'rejected'/'background' and only - * returns when the client reports final completion via success/error. + * Wait for a tool completion signal (success/error/rejected) from the client. + * Unlike waitForToolDecision which returns on any status, this ignores the + * initial 'accepted' status and only returns on terminal statuses: + * - success: client finished executing successfully + * - error: client execution failed + * - rejected: user clicked Skip (subagent run tools where user hasn't auto-allowed) * * Used for client-executable run tools: the client executes the workflow * and posts success/error to /api/copilot/confirm when done. The server @@ -166,8 +168,12 @@ export async function waitForToolCompletion( while (Date.now() - start < timeoutMs) { if (abortSignal?.aborted) return null const decision = await getToolConfirmation(toolCallId) - // Only return on completion statuses, not accept/reject decisions - if (decision?.status === 'success' || decision?.status === 'error') { + // Return on completion/terminal statuses, not intermediate 'accepted' + if ( + decision?.status === 'success' || + decision?.status === 'error' || + decision?.status === 'rejected' + ) { return decision } await new Promise((resolve) => setTimeout(resolve, interval)) diff --git a/apps/sim/lib/copilot/tools/client/tool-display-registry.ts b/apps/sim/lib/copilot/tools/client/tool-display-registry.ts index 9cfa68075c..f3d9089547 100644 --- a/apps/sim/lib/copilot/tools/client/tool-display-registry.ts +++ b/apps/sim/lib/copilot/tools/client/tool-display-registry.ts @@ -50,6 +50,18 @@ import { import { getLatestBlock } from '@/blocks/registry' import { getCustomTool } from '@/hooks/queries/custom-tools' import { useWorkflowRegistry } from '@/stores/workflows/registry/store' +import { useWorkflowStore } from '@/stores/workflows/workflow/store' + +/** Resolve a block ID to its human-readable name from the workflow store. */ +function resolveBlockName(blockId: string | undefined): string | undefined { + if (!blockId) return undefined + try { + const blocks = useWorkflowStore.getState().blocks + return blocks[blockId]?.name || undefined + } catch { + return undefined + } +} export enum ClientToolCallState { generating = 'generating', @@ -1742,12 +1754,12 @@ const META_generate_api_key: ToolMetadata = { const META_run_block: ToolMetadata = { displayNames: { [ClientToolCallState.generating]: { text: 'Preparing to run block', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Run this block?', icon: Play }, + [ClientToolCallState.pending]: { text: 'Run block?', icon: Play }, [ClientToolCallState.executing]: { text: 'Running block', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Executed block', icon: Play }, + [ClientToolCallState.success]: { text: 'Ran block', icon: Play }, [ClientToolCallState.error]: { text: 'Failed to run block', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped block execution', icon: MinusCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted block execution', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped running block', icon: MinusCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted running block', icon: MinusCircle }, [ClientToolCallState.background]: { text: 'Running block in background', icon: Play }, }, interrupt: { @@ -1775,23 +1787,24 @@ const META_run_block: ToolMetadata = { getDynamicText: (params, state) => { const blockId = params?.blockId || params?.block_id if (blockId && typeof blockId === 'string') { + const name = resolveBlockName(blockId) || blockId switch (state) { case ClientToolCallState.success: - return `Executed block ${blockId}` + return `Ran ${name}` case ClientToolCallState.executing: - return `Running block ${blockId}` + return `Running ${name}` case ClientToolCallState.generating: - return `Preparing to run block ${blockId}` + return `Preparing to run ${name}` case ClientToolCallState.pending: - return `Run block ${blockId}?` + return `Run ${name}?` case ClientToolCallState.error: - return `Failed to run block ${blockId}` + return `Failed to run ${name}` case ClientToolCallState.rejected: - return `Skipped running block ${blockId}` + return `Skipped running ${name}` case ClientToolCallState.aborted: - return `Aborted running block ${blockId}` + return `Aborted running ${name}` case ClientToolCallState.background: - return `Running block ${blockId} in background` + return `Running ${name} in background` } } return undefined @@ -1801,12 +1814,12 @@ const META_run_block: ToolMetadata = { const META_run_from_block: ToolMetadata = { displayNames: { [ClientToolCallState.generating]: { text: 'Preparing to run from block', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Run from this block?', icon: Play }, + [ClientToolCallState.pending]: { text: 'Run from block?', icon: Play }, [ClientToolCallState.executing]: { text: 'Running from block', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Executed from block', icon: Play }, + [ClientToolCallState.success]: { text: 'Ran from block', icon: Play }, [ClientToolCallState.error]: { text: 'Failed to run from block', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped run from block', icon: MinusCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted run from block', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped running from block', icon: MinusCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted running from block', icon: MinusCircle }, [ClientToolCallState.background]: { text: 'Running from block in background', icon: Play }, }, interrupt: { @@ -1834,23 +1847,24 @@ const META_run_from_block: ToolMetadata = { getDynamicText: (params, state) => { const blockId = params?.startBlockId || params?.start_block_id if (blockId && typeof blockId === 'string') { + const name = resolveBlockName(blockId) || blockId switch (state) { case ClientToolCallState.success: - return `Executed from block ${blockId}` + return `Ran from ${name}` case ClientToolCallState.executing: - return `Running from block ${blockId}` + return `Running from ${name}` case ClientToolCallState.generating: - return `Preparing to run from block ${blockId}` + return `Preparing to run from ${name}` case ClientToolCallState.pending: - return `Run from block ${blockId}?` + return `Run from ${name}?` case ClientToolCallState.error: - return `Failed to run from block ${blockId}` + return `Failed to run from ${name}` case ClientToolCallState.rejected: - return `Skipped running from block ${blockId}` + return `Skipped running from ${name}` case ClientToolCallState.aborted: - return `Aborted running from block ${blockId}` + return `Aborted running from ${name}` case ClientToolCallState.background: - return `Running from block ${blockId} in background` + return `Running from ${name} in background` } } return undefined @@ -1860,12 +1874,12 @@ const META_run_from_block: ToolMetadata = { const META_run_workflow_until_block: ToolMetadata = { displayNames: { [ClientToolCallState.generating]: { text: 'Preparing to run until block', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Run until this block?', icon: Play }, + [ClientToolCallState.pending]: { text: 'Run until block?', icon: Play }, [ClientToolCallState.executing]: { text: 'Running until block', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Executed until block', icon: Play }, + [ClientToolCallState.success]: { text: 'Ran until block', icon: Play }, [ClientToolCallState.error]: { text: 'Failed to run until block', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped run until block', icon: MinusCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted run until block', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped running until block', icon: MinusCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted running until block', icon: MinusCircle }, [ClientToolCallState.background]: { text: 'Running until block in background', icon: Play }, }, interrupt: { @@ -1893,23 +1907,24 @@ const META_run_workflow_until_block: ToolMetadata = { getDynamicText: (params, state) => { const blockId = params?.stopAfterBlockId || params?.stop_after_block_id if (blockId && typeof blockId === 'string') { + const name = resolveBlockName(blockId) || blockId switch (state) { case ClientToolCallState.success: - return `Executed until block ${blockId}` + return `Ran until ${name}` case ClientToolCallState.executing: - return `Running until block ${blockId}` + return `Running until ${name}` case ClientToolCallState.generating: - return `Preparing to run until block ${blockId}` + return `Preparing to run until ${name}` case ClientToolCallState.pending: - return `Run until block ${blockId}?` + return `Run until ${name}?` case ClientToolCallState.error: - return `Failed to run until block ${blockId}` + return `Failed to run until ${name}` case ClientToolCallState.rejected: - return `Skipped running until block ${blockId}` + return `Skipped running until ${name}` case ClientToolCallState.aborted: - return `Aborted running until block ${blockId}` + return `Aborted running until ${name}` case ClientToolCallState.background: - return `Running until block ${blockId} in background` + return `Running until ${name} in background` } } return undefined From 18f1d7620665c21a6ba5836d26101f34dc8e415e Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Mon, 9 Feb 2026 17:04:57 -0800 Subject: [PATCH 66/72] Fix lint --- apps/sim/app/api/workflows/[id]/execute/route.ts | 14 +++++++------- .../copilot/components/tool-call/tool-call.tsx | 10 +++++----- apps/sim/lib/copilot/client-sse/handlers.ts | 10 ++++------ .../lib/copilot/client-sse/run-tool-execution.ts | 5 +++-- .../lib/copilot/client-sse/subagent-handlers.ts | 5 +---- .../copilot/tools/client/tool-display-registry.ts | 12 +++++++++--- 6 files changed, 29 insertions(+), 27 deletions(-) diff --git a/apps/sim/app/api/workflows/[id]/execute/route.ts b/apps/sim/app/api/workflows/[id]/execute/route.ts index fafebc73de..021cdbdd1f 100644 --- a/apps/sim/app/api/workflows/[id]/execute/route.ts +++ b/apps/sim/app/api/workflows/[id]/execute/route.ts @@ -278,13 +278,13 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id: // Resolve runFromBlock snapshot from executionId if needed let runFromBlock = rawRunFromBlock if (runFromBlock && !runFromBlock.sourceSnapshot && runFromBlock.executionId) { - const { - getExecutionState, - getLatestExecutionState, - } = await import('@/lib/workflows/executor/execution-state') - const snapshot = runFromBlock.executionId === 'latest' - ? await getLatestExecutionState(workflowId) - : await getExecutionState(runFromBlock.executionId) + const { getExecutionState, getLatestExecutionState } = await import( + '@/lib/workflows/executor/execution-state' + ) + const snapshot = + runFromBlock.executionId === 'latest' + ? await getLatestExecutionState(workflowId) + : await getExecutionState(runFromBlock.executionId) if (!snapshot) { return NextResponse.json( { diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx index 54fe62fc4f..c7f1032094 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx @@ -6,6 +6,10 @@ import clsx from 'clsx' import { ChevronUp, LayoutList } from 'lucide-react' import Editor from 'react-simple-code-editor' import { Button, Code, getCodeEditorProps, highlight, languages } from '@/components/emcn' +import { + CLIENT_EXECUTABLE_RUN_TOOLS, + executeRunToolOnClient, +} from '@/lib/copilot/client-sse/run-tool-execution' import { ClientToolCallState, TOOL_DISPLAY_REGISTRY, @@ -18,10 +22,6 @@ import { LoopTool } from '@/app/workspace/[workspaceId]/w/[workflowId]/component import { ParallelTool } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/subflows/parallel/parallel-config' import { getDisplayValue } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/workflow-block/workflow-block' import { getBlock } from '@/blocks/registry' -import { - CLIENT_EXECUTABLE_RUN_TOOLS, - executeRunToolOnClient, -} from '@/lib/copilot/client-sse/run-tool-execution' import type { CopilotToolCall } from '@/stores/panel' import { useCopilotStore } from '@/stores/panel' import type { SubAgentContentBlock } from '@/stores/panel/copilot/types' @@ -1238,7 +1238,7 @@ function shouldShowRunSkipButtons(toolCall: CopilotToolCall): boolean { return false } - const hasInterrupt = TOOL_DISPLAY_REGISTRY[toolCall.name]?.uiConfig?.interrupt === true + const hasInterrupt = !!TOOL_DISPLAY_REGISTRY[toolCall.name]?.uiConfig?.interrupt if (hasInterrupt) { return true } diff --git a/apps/sim/lib/copilot/client-sse/handlers.ts b/apps/sim/lib/copilot/client-sse/handlers.ts index adbde8f6e9..82f887a48e 100644 --- a/apps/sim/lib/copilot/client-sse/handlers.ts +++ b/apps/sim/lib/copilot/client-sse/handlers.ts @@ -16,10 +16,7 @@ import { useWorkflowDiffStore } from '@/stores/workflow-diff/store' import { useWorkflowRegistry } from '@/stores/workflows/registry/store' import type { WorkflowState } from '@/stores/workflows/workflow/types' import { appendTextBlock, beginThinkingBlock, finalizeThinkingBlock } from './content-blocks' -import { - CLIENT_EXECUTABLE_RUN_TOOLS, - executeRunToolOnClient, -} from './run-tool-execution' +import { CLIENT_EXECUTABLE_RUN_TOOLS, executeRunToolOnClient } from './run-tool-execution' import type { ClientContentBlock, ClientStreamingContext } from './types' const logger = createLogger('CopilotClientSseHandlers') @@ -425,8 +422,9 @@ export const sseHandlers: Record = { (input?.workflowId as string) || useWorkflowRegistry.getState().activeWorkflowId const apiKey = (resultPayload?.apiKey || resultPayload?.key) as string | undefined if (workflowId) { - const existingStatus = - useWorkflowRegistry.getState().getWorkflowDeploymentStatus(workflowId) + const existingStatus = useWorkflowRegistry + .getState() + .getWorkflowDeploymentStatus(workflowId) useWorkflowRegistry .getState() .setDeploymentStatus( diff --git a/apps/sim/lib/copilot/client-sse/run-tool-execution.ts b/apps/sim/lib/copilot/client-sse/run-tool-execution.ts index 3836eecacc..1835967aa1 100644 --- a/apps/sim/lib/copilot/client-sse/run-tool-execution.ts +++ b/apps/sim/lib/copilot/client-sse/run-tool-execution.ts @@ -1,9 +1,9 @@ import { createLogger } from '@sim/logger' import { v4 as uuidv4 } from 'uuid' -import { executeWorkflowWithFullLogging } from '@/app/workspace/[workspaceId]/w/[workflowId]/utils/workflow-execution-utils' import { COPILOT_CONFIRM_API_PATH } from '@/lib/copilot/constants' import { resolveToolDisplay } from '@/lib/copilot/store-utils' import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry' +import { executeWorkflowWithFullLogging } from '@/app/workspace/[workspaceId]/w/[workflowId]/utils/workflow-execution-utils' import { useExecutionStore } from '@/stores/execution/store' import { useCopilotStore } from '@/stores/panel/copilot/store' import { useWorkflowRegistry } from '@/stores/workflows/registry/store' @@ -73,7 +73,8 @@ async function doExecuteRunTool( | undefined const stopAfterBlockId = (() => { - if (toolName === 'run_workflow_until_block') return params.stopAfterBlockId as string | undefined + if (toolName === 'run_workflow_until_block') + return params.stopAfterBlockId as string | undefined if (toolName === 'run_block') return params.blockId as string | undefined return undefined })() diff --git a/apps/sim/lib/copilot/client-sse/subagent-handlers.ts b/apps/sim/lib/copilot/client-sse/subagent-handlers.ts index d40513da7a..314a40573e 100644 --- a/apps/sim/lib/copilot/client-sse/subagent-handlers.ts +++ b/apps/sim/lib/copilot/client-sse/subagent-handlers.ts @@ -15,10 +15,7 @@ import { sseHandlers, updateStreamingMessage, } from './handlers' -import { - CLIENT_EXECUTABLE_RUN_TOOLS, - executeRunToolOnClient, -} from './run-tool-execution' +import { CLIENT_EXECUTABLE_RUN_TOOLS, executeRunToolOnClient } from './run-tool-execution' import type { ClientStreamingContext } from './types' const logger = createLogger('CopilotClientSubagentHandlers') diff --git a/apps/sim/lib/copilot/tools/client/tool-display-registry.ts b/apps/sim/lib/copilot/tools/client/tool-display-registry.ts index f3d9089547..137ab08216 100644 --- a/apps/sim/lib/copilot/tools/client/tool-display-registry.ts +++ b/apps/sim/lib/copilot/tools/client/tool-display-registry.ts @@ -148,12 +148,14 @@ function toUiConfig(metadata?: ToolMetadata): ToolUIConfig | undefined { const legacy = metadata?.uiConfig const subagent = legacy?.subagent const dynamicText = metadata?.getDynamicText - if (!legacy && !dynamicText) return undefined + // Check both nested uiConfig.interrupt AND top-level interrupt + const hasInterrupt = !!legacy?.interrupt || !!metadata?.interrupt + if (!legacy && !dynamicText && !hasInterrupt) return undefined const config: ToolUIConfig = { isSpecial: legacy?.isSpecial === true, subagent: !!legacy?.subagent, - interrupt: !!legacy?.interrupt, + interrupt: hasInterrupt, customRenderer: legacy?.customRenderer, paramsTable: legacy?.paramsTable, dynamicText, @@ -1180,13 +1182,17 @@ const META_knowledge: ToolMetadata = { const META_knowledge_base: ToolMetadata = { displayNames: { [ClientToolCallState.generating]: { text: 'Accessing knowledge base', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Accessing knowledge base', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Access knowledge base?', icon: Database }, [ClientToolCallState.executing]: { text: 'Accessing knowledge base', icon: Loader2 }, [ClientToolCallState.success]: { text: 'Accessed knowledge base', icon: Database }, [ClientToolCallState.error]: { text: 'Failed to access knowledge base', icon: XCircle }, [ClientToolCallState.aborted]: { text: 'Aborted knowledge base access', icon: MinusCircle }, [ClientToolCallState.rejected]: { text: 'Skipped knowledge base access', icon: MinusCircle }, }, + interrupt: { + accept: { text: 'Allow', icon: Database }, + reject: { text: 'Skip', icon: MinusCircle }, + }, getDynamicText: (params: Record, state: ClientToolCallState) => { const operation = params?.operation as string | undefined const name = params?.args?.name as string | undefined From 8a2eacf1795751f27cff7ba2cc43b026f8fe1fa2 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Mon, 9 Feb 2026 17:42:53 -0800 Subject: [PATCH 67/72] Fix skip and mtb --- .../orchestrator/sse-handlers/handlers.ts | 116 ++++++++++-------- .../sse-handlers/tool-execution.ts | 3 +- apps/sim/stores/panel/copilot/store.ts | 1 + 3 files changed, 71 insertions(+), 49 deletions(-) diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts index 809eb65959..85151381af 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts @@ -211,6 +211,24 @@ export const sseHandlers: Record = { options.timeout || STREAM_TIMEOUT_MS, options.abortSignal ) + if (completion?.status === 'background') { + toolCall.status = 'skipped' + toolCall.endTime = Date.now() + markToolComplete( + toolCall.id, + toolCall.name, + 202, + completion.message || 'Tool execution moved to background', + { background: true } + ).catch((err) => { + logger.error('markToolComplete fire-and-forget failed (run tool background)', { + toolCallId: toolCall.id, + error: err instanceof Error ? err.message : String(err), + }) + }) + markToolResultSeen(toolCallId) + return + } const success = completion?.status === 'success' toolCall.status = success ? 'success' : 'error' toolCall.endTime = Date.now() @@ -235,48 +253,40 @@ export const sseHandlers: Record = { if (decision?.status === 'rejected' || decision?.status === 'error') { toolCall.status = 'rejected' toolCall.endTime = Date.now() - await markToolComplete( + // Fire-and-forget: must NOT await — see deadlock note in executeToolAndReport + markToolComplete( toolCall.id, toolCall.name, 400, decision.message || 'Tool execution rejected', { skipped: true, reason: 'user_rejected' } - ) - markToolResultSeen(toolCall.id) - await options.onEvent?.({ - type: 'tool_result', - toolCallId: toolCall.id, - data: { - id: toolCall.id, - name: toolCall.name, - success: false, - result: { skipped: true, reason: 'user_rejected' }, - }, + ).catch((err) => { + logger.error('markToolComplete fire-and-forget failed (rejected)', { + toolCallId: toolCall.id, + error: err instanceof Error ? err.message : String(err), + }) }) + markToolResultSeen(toolCall.id) return } if (decision?.status === 'background') { toolCall.status = 'skipped' toolCall.endTime = Date.now() - await markToolComplete( + // Fire-and-forget: must NOT await — see deadlock note in executeToolAndReport + markToolComplete( toolCall.id, toolCall.name, 202, decision.message || 'Tool execution moved to background', { background: true } - ) - markToolResultSeen(toolCall.id) - await options.onEvent?.({ - type: 'tool_result', - toolCallId: toolCall.id, - data: { - id: toolCall.id, - name: toolCall.name, - success: true, - result: { background: true }, - }, + ).catch((err) => { + logger.error('markToolComplete fire-and-forget failed (background)', { + toolCallId: toolCall.id, + error: err instanceof Error ? err.message : String(err), + }) }) + markToolResultSeen(toolCall.id) return } } @@ -436,47 +446,39 @@ export const subAgentHandlers: Record = { if (decision?.status === 'rejected' || decision?.status === 'error') { toolCall.status = 'rejected' toolCall.endTime = Date.now() - await markToolComplete( + // Fire-and-forget: must NOT await — see deadlock note in executeToolAndReport + markToolComplete( toolCall.id, toolCall.name, 400, decision.message || 'Tool execution rejected', { skipped: true, reason: 'user_rejected' } - ) - markToolResultSeen(toolCall.id) - await options?.onEvent?.({ - type: 'tool_result', - toolCallId: toolCall.id, - data: { - id: toolCall.id, - name: toolCall.name, - success: false, - result: { skipped: true, reason: 'user_rejected' }, - }, + ).catch((err) => { + logger.error('markToolComplete fire-and-forget failed (subagent rejected)', { + toolCallId: toolCall.id, + error: err instanceof Error ? err.message : String(err), + }) }) + markToolResultSeen(toolCall.id) return } if (decision?.status === 'background') { toolCall.status = 'skipped' toolCall.endTime = Date.now() - await markToolComplete( + // Fire-and-forget: must NOT await — see deadlock note in executeToolAndReport + markToolComplete( toolCall.id, toolCall.name, 202, decision.message || 'Tool execution moved to background', { background: true } - ) - markToolResultSeen(toolCall.id) - await options?.onEvent?.({ - type: 'tool_result', - toolCallId: toolCall.id, - data: { - id: toolCall.id, - name: toolCall.name, - success: true, - result: { background: true }, - }, + ).catch((err) => { + logger.error('markToolComplete fire-and-forget failed (subagent background)', { + toolCallId: toolCall.id, + error: err instanceof Error ? err.message : String(err), + }) }) + markToolResultSeen(toolCall.id) return } } @@ -507,6 +509,24 @@ export const subAgentHandlers: Record = { markToolResultSeen(toolCallId) return } + if (completion?.status === 'background') { + toolCall.status = 'skipped' + toolCall.endTime = Date.now() + markToolComplete( + toolCall.id, + toolCall.name, + 202, + completion.message || 'Tool execution moved to background', + { background: true } + ).catch((err) => { + logger.error('markToolComplete fire-and-forget failed (subagent run tool background)', { + toolCallId: toolCall.id, + error: err instanceof Error ? err.message : String(err), + }) + }) + markToolResultSeen(toolCallId) + return + } const success = completion?.status === 'success' toolCall.status = success ? 'success' : 'error' toolCall.endTime = Date.now() diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts index 739b11c46a..26865176cf 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts @@ -172,7 +172,8 @@ export async function waitForToolCompletion( if ( decision?.status === 'success' || decision?.status === 'error' || - decision?.status === 'rejected' + decision?.status === 'rejected' || + decision?.status === 'background' ) { return decision } diff --git a/apps/sim/stores/panel/copilot/store.ts b/apps/sim/stores/panel/copilot/store.ts index e0283aa01c..1dd8540ee0 100644 --- a/apps/sim/stores/panel/copilot/store.ts +++ b/apps/sim/stores/panel/copilot/store.ts @@ -1500,6 +1500,7 @@ export const useCopilotStore = create()( else if (newState === 'success' || newState === 'accepted') norm = ClientToolCallState.success else if (newState === 'aborted') norm = ClientToolCallState.aborted + else if (newState === 'background') norm = ClientToolCallState.background else if (typeof newState === 'number') norm = newState as unknown as ClientToolCallState map[id] = { ...current, From bd6a103db159be8b693a4d14c88a5f2683affef7 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Mon, 9 Feb 2026 17:50:56 -0800 Subject: [PATCH 68/72] Fix typing --- .../app/api/workflows/[id]/execute/route.ts | 55 +++++++++++++------ 1 file changed, 38 insertions(+), 17 deletions(-) diff --git a/apps/sim/app/api/workflows/[id]/execute/route.ts b/apps/sim/app/api/workflows/[id]/execute/route.ts index 021cdbdd1f..a343fb3e9a 100644 --- a/apps/sim/app/api/workflows/[id]/execute/route.ts +++ b/apps/sim/app/api/workflows/[id]/execute/route.ts @@ -33,7 +33,11 @@ import { createHttpResponseFromBlock, workflowHasResponseBlock } from '@/lib/wor import { executeWorkflowJob, type WorkflowExecutionPayload } from '@/background/workflow-execution' import { normalizeName } from '@/executor/constants' import { ExecutionSnapshot } from '@/executor/execution/snapshot' -import type { ExecutionMetadata, IterationContext } from '@/executor/execution/types' +import type { + ExecutionMetadata, + IterationContext, + SerializableExecutionState, +} from '@/executor/execution/types' import type { NormalizedBlockOutput, StreamingExecution } from '@/executor/types' import { hasExecutionResult } from '@/executor/utils/errors' import { Serializer } from '@/serializer' @@ -276,24 +280,41 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id: } = validation.data // Resolve runFromBlock snapshot from executionId if needed - let runFromBlock = rawRunFromBlock - if (runFromBlock && !runFromBlock.sourceSnapshot && runFromBlock.executionId) { - const { getExecutionState, getLatestExecutionState } = await import( - '@/lib/workflows/executor/execution-state' - ) - const snapshot = - runFromBlock.executionId === 'latest' - ? await getLatestExecutionState(workflowId) - : await getExecutionState(runFromBlock.executionId) - if (!snapshot) { + let resolvedRunFromBlock: + | { startBlockId: string; sourceSnapshot: SerializableExecutionState } + | undefined + if (rawRunFromBlock) { + if (rawRunFromBlock.sourceSnapshot) { + resolvedRunFromBlock = { + startBlockId: rawRunFromBlock.startBlockId, + sourceSnapshot: rawRunFromBlock.sourceSnapshot as SerializableExecutionState, + } + } else if (rawRunFromBlock.executionId) { + const { getExecutionState, getLatestExecutionState } = await import( + '@/lib/workflows/executor/execution-state' + ) + const snapshot = + rawRunFromBlock.executionId === 'latest' + ? await getLatestExecutionState(workflowId) + : await getExecutionState(rawRunFromBlock.executionId) + if (!snapshot) { + return NextResponse.json( + { + error: `No execution state found for ${rawRunFromBlock.executionId === 'latest' ? 'workflow' : `execution ${rawRunFromBlock.executionId}`}. Run the full workflow first.`, + }, + { status: 400 } + ) + } + resolvedRunFromBlock = { + startBlockId: rawRunFromBlock.startBlockId, + sourceSnapshot: snapshot, + } + } else { return NextResponse.json( - { - error: `No execution state found for ${runFromBlock.executionId === 'latest' ? 'workflow' : `execution ${runFromBlock.executionId}`}. Run the full workflow first.`, - }, + { error: 'runFromBlock requires either sourceSnapshot or executionId' }, { status: 400 } ) } - runFromBlock = { startBlockId: runFromBlock.startBlockId, sourceSnapshot: snapshot } } // For API key and internal JWT auth, the entire body is the input (except for our control fields) @@ -520,7 +541,7 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id: includeFileBase64, base64MaxBytes, stopAfterBlockId, - runFromBlock, + runFromBlock: resolvedRunFromBlock, abortSignal: timeoutController.signal, }) @@ -861,7 +882,7 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id: includeFileBase64, base64MaxBytes, stopAfterBlockId, - runFromBlock, + runFromBlock: resolvedRunFromBlock, }) if (result.status === 'paused') { From ddc516461b808e606bf44ee391fc4186ca36648f Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Mon, 9 Feb 2026 19:08:54 -0800 Subject: [PATCH 69/72] Fix tool call --- .../orchestrator/sse-handlers/handlers.ts | 40 +++++++++++++++++-- 1 file changed, 37 insertions(+), 3 deletions(-) diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts index 85151381af..0f29ef3b35 100644 --- a/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts @@ -289,6 +289,23 @@ export const sseHandlers: Record = { markToolResultSeen(toolCall.id) return } + + // Decision was null — timed out or aborted. + // Do NOT fall through to auto-execute. Mark the tool as timed out + // and notify Go so it can unblock waitForExternalTool. + toolCall.status = 'rejected' + toolCall.endTime = Date.now() + markToolComplete(toolCall.id, toolCall.name, 408, 'Tool approval timed out', { + skipped: true, + reason: 'timeout', + }).catch((err) => { + logger.error('markToolComplete fire-and-forget failed (timeout)', { + toolCallId: toolCall.id, + error: err instanceof Error ? err.message : String(err), + }) + }) + markToolResultSeen(toolCall.id) + return } if (options.autoExecuteTools !== false) { @@ -431,9 +448,10 @@ export const subAgentHandlers: Record = { return } - // Integration tools (user-installed) require approval in interactive mode, - // same as top-level interrupt tools. - if (options.interactive === true && isIntegrationTool(toolName)) { + // Interrupt tools and integration tools (user-installed) require approval + // in interactive mode, same as top-level handler. + const needsSubagentApproval = isInterruptToolName(toolName) || isIntegrationTool(toolName) + if (options.interactive === true && needsSubagentApproval) { const decision = await waitForToolDecision( toolCallId, options.timeout || STREAM_TIMEOUT_MS, @@ -481,6 +499,22 @@ export const subAgentHandlers: Record = { markToolResultSeen(toolCall.id) return } + + // Decision was null — timed out or aborted. + // Do NOT fall through to auto-execute. + toolCall.status = 'rejected' + toolCall.endTime = Date.now() + markToolComplete(toolCall.id, toolCall.name, 408, 'Tool approval timed out', { + skipped: true, + reason: 'timeout', + }).catch((err) => { + logger.error('markToolComplete fire-and-forget failed (subagent timeout)', { + toolCallId: toolCall.id, + error: err instanceof Error ? err.message : String(err), + }) + }) + markToolResultSeen(toolCall.id) + return } // Client-executable run tools in interactive mode: defer to client. From bb4e072f8e7e270e9cf881f9aa64ea079c51799b Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Mon, 9 Feb 2026 19:13:25 -0800 Subject: [PATCH 70/72] Bump api version --- apps/sim/lib/copilot/constants.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apps/sim/lib/copilot/constants.ts b/apps/sim/lib/copilot/constants.ts index 35c1acd2ce..f95ec48b3d 100644 --- a/apps/sim/lib/copilot/constants.ts +++ b/apps/sim/lib/copilot/constants.ts @@ -1,7 +1,7 @@ import { env } from '@/lib/core/config/env' export const SIM_AGENT_API_URL_DEFAULT = 'https://copilot.sim.ai' -export const SIM_AGENT_VERSION = '1.0.3' +export const SIM_AGENT_VERSION = '3.0.0' /** Resolved copilot backend URL — reads from env with fallback to default. */ const rawAgentUrl = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT From 98df298cd0e49274044fb0dcaea85f48192ec5e0 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Mon, 9 Feb 2026 19:22:09 -0800 Subject: [PATCH 71/72] Fix bun lock --- bun.lock | 36 ++++++++++++++++++++++++++++-------- 1 file changed, 28 insertions(+), 8 deletions(-) diff --git a/bun.lock b/bun.lock index 55ae41dde4..7421c54278 100644 --- a/bun.lock +++ b/bun.lock @@ -395,9 +395,9 @@ "@aws-sdk/client-rds-data": ["@aws-sdk/client-rds-data@3.940.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "3.940.0", "@aws-sdk/credential-provider-node": "3.940.0", "@aws-sdk/middleware-host-header": "3.936.0", "@aws-sdk/middleware-logger": "3.936.0", "@aws-sdk/middleware-recursion-detection": "3.936.0", "@aws-sdk/middleware-user-agent": "3.940.0", "@aws-sdk/region-config-resolver": "3.936.0", "@aws-sdk/types": "3.936.0", "@aws-sdk/util-endpoints": "3.936.0", "@aws-sdk/util-user-agent-browser": "3.936.0", "@aws-sdk/util-user-agent-node": "3.940.0", "@smithy/config-resolver": "^4.4.3", "@smithy/core": "^3.18.5", "@smithy/fetch-http-handler": "^5.3.6", "@smithy/hash-node": "^4.2.5", "@smithy/invalid-dependency": "^4.2.5", "@smithy/middleware-content-length": "^4.2.5", "@smithy/middleware-endpoint": "^4.3.12", "@smithy/middleware-retry": "^4.4.12", "@smithy/middleware-serde": "^4.2.6", "@smithy/middleware-stack": "^4.2.5", "@smithy/node-config-provider": "^4.3.5", "@smithy/node-http-handler": "^4.4.5", "@smithy/protocol-http": "^5.3.5", "@smithy/smithy-client": "^4.9.8", "@smithy/types": "^4.9.0", "@smithy/url-parser": "^4.2.5", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.11", "@smithy/util-defaults-mode-node": "^4.2.14", "@smithy/util-endpoints": "^3.2.5", "@smithy/util-middleware": "^4.2.5", "@smithy/util-retry": "^4.2.5", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-68NH61MvS48CVPfzBNCPdCG4KnNjM+Uj/3DSw7rT9PJvdML9ARS4M2Uqco9POPw+Aj20KBumsEUd6FMVcYBXAA=="], - "@aws-sdk/client-s3": ["@aws-sdk/client-s3@3.985.0", "", { "dependencies": { "@aws-crypto/sha1-browser": "5.2.0", "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.7", "@aws-sdk/credential-provider-node": "^3.972.6", "@aws-sdk/middleware-bucket-endpoint": "^3.972.3", "@aws-sdk/middleware-expect-continue": "^3.972.3", "@aws-sdk/middleware-flexible-checksums": "^3.972.5", "@aws-sdk/middleware-host-header": "^3.972.3", "@aws-sdk/middleware-location-constraint": "^3.972.3", "@aws-sdk/middleware-logger": "^3.972.3", "@aws-sdk/middleware-recursion-detection": "^3.972.3", "@aws-sdk/middleware-sdk-s3": "^3.972.7", "@aws-sdk/middleware-ssec": "^3.972.3", "@aws-sdk/middleware-user-agent": "^3.972.7", "@aws-sdk/region-config-resolver": "^3.972.3", "@aws-sdk/signature-v4-multi-region": "3.985.0", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.985.0", "@aws-sdk/util-user-agent-browser": "^3.972.3", "@aws-sdk/util-user-agent-node": "^3.972.5", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.22.1", "@smithy/eventstream-serde-browser": "^4.2.8", "@smithy/eventstream-serde-config-resolver": "^4.3.8", "@smithy/eventstream-serde-node": "^4.2.8", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-blob-browser": "^4.2.9", "@smithy/hash-node": "^4.2.8", "@smithy/hash-stream-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/md5-js": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.13", "@smithy/middleware-retry": "^4.4.30", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.9", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.29", "@smithy/util-defaults-mode-node": "^4.2.32", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-stream": "^4.5.11", "@smithy/util-utf8": "^4.2.0", "@smithy/util-waiter": "^4.2.8", "tslib": "^2.6.2" } }, "sha512-S9TqjzzZEEIKBnC7yFpvqM7CG9ALpY5qhQ5BnDBJtdG20NoGpjKLGUUfD2wmZItuhbrcM4Z8c6m6Fg0XYIOVvw=="], + "@aws-sdk/client-s3": ["@aws-sdk/client-s3@3.986.0", "", { "dependencies": { "@aws-crypto/sha1-browser": "5.2.0", "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.7", "@aws-sdk/credential-provider-node": "^3.972.6", "@aws-sdk/middleware-bucket-endpoint": "^3.972.3", "@aws-sdk/middleware-expect-continue": "^3.972.3", "@aws-sdk/middleware-flexible-checksums": "^3.972.5", "@aws-sdk/middleware-host-header": "^3.972.3", "@aws-sdk/middleware-location-constraint": "^3.972.3", "@aws-sdk/middleware-logger": "^3.972.3", "@aws-sdk/middleware-recursion-detection": "^3.972.3", "@aws-sdk/middleware-sdk-s3": "^3.972.7", "@aws-sdk/middleware-ssec": "^3.972.3", "@aws-sdk/middleware-user-agent": "^3.972.7", "@aws-sdk/region-config-resolver": "^3.972.3", "@aws-sdk/signature-v4-multi-region": "3.986.0", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.986.0", "@aws-sdk/util-user-agent-browser": "^3.972.3", "@aws-sdk/util-user-agent-node": "^3.972.5", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.22.1", "@smithy/eventstream-serde-browser": "^4.2.8", "@smithy/eventstream-serde-config-resolver": "^4.3.8", "@smithy/eventstream-serde-node": "^4.2.8", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-blob-browser": "^4.2.9", "@smithy/hash-node": "^4.2.8", "@smithy/hash-stream-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/md5-js": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.13", "@smithy/middleware-retry": "^4.4.30", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.9", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.29", "@smithy/util-defaults-mode-node": "^4.2.32", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-stream": "^4.5.11", "@smithy/util-utf8": "^4.2.0", "@smithy/util-waiter": "^4.2.8", "tslib": "^2.6.2" } }, "sha512-IcDJ8shVVvbxgMe8+dLWcv6uhSwmX65PHTVGX81BhWAElPnp3CL8w/5uzOPRo4n4/bqIk9eskGVEIicw2o+SrA=="], - "@aws-sdk/client-sesv2": ["@aws-sdk/client-sesv2@3.985.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.7", "@aws-sdk/credential-provider-node": "^3.972.6", "@aws-sdk/middleware-host-header": "^3.972.3", "@aws-sdk/middleware-logger": "^3.972.3", "@aws-sdk/middleware-recursion-detection": "^3.972.3", "@aws-sdk/middleware-user-agent": "^3.972.7", "@aws-sdk/region-config-resolver": "^3.972.3", "@aws-sdk/signature-v4-multi-region": "3.985.0", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.985.0", "@aws-sdk/util-user-agent-browser": "^3.972.3", "@aws-sdk/util-user-agent-node": "^3.972.5", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.22.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.13", "@smithy/middleware-retry": "^4.4.30", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.9", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.29", "@smithy/util-defaults-mode-node": "^4.2.32", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-RqeSpVUFeg/fI874lNNdJP5nZ+3mUY5qRDDHYiOta3+2esOC/RAG1XcfYnupFR8wDDiIYsi6gHakRUYgiIW13w=="], + "@aws-sdk/client-sesv2": ["@aws-sdk/client-sesv2@3.986.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.7", "@aws-sdk/credential-provider-node": "^3.972.6", "@aws-sdk/middleware-host-header": "^3.972.3", "@aws-sdk/middleware-logger": "^3.972.3", "@aws-sdk/middleware-recursion-detection": "^3.972.3", "@aws-sdk/middleware-user-agent": "^3.972.7", "@aws-sdk/region-config-resolver": "^3.972.3", "@aws-sdk/signature-v4-multi-region": "3.986.0", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.986.0", "@aws-sdk/util-user-agent-browser": "^3.972.3", "@aws-sdk/util-user-agent-node": "^3.972.5", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.22.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.13", "@smithy/middleware-retry": "^4.4.30", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.9", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.29", "@smithy/util-defaults-mode-node": "^4.2.32", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-bMt0cloSsbDQ6S3u9YiHxwnbo7Gvzd8+e6PQQFC4wTUJRKHva4jY1EM2mq4j6iDy3MFYPk61HQlmoZ7krCdQEA=="], "@aws-sdk/client-sqs": ["@aws-sdk/client-sqs@3.947.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "3.947.0", "@aws-sdk/credential-provider-node": "3.947.0", "@aws-sdk/middleware-host-header": "3.936.0", "@aws-sdk/middleware-logger": "3.936.0", "@aws-sdk/middleware-recursion-detection": "3.936.0", "@aws-sdk/middleware-sdk-sqs": "3.946.0", "@aws-sdk/middleware-user-agent": "3.947.0", "@aws-sdk/region-config-resolver": "3.936.0", "@aws-sdk/types": "3.936.0", "@aws-sdk/util-endpoints": "3.936.0", "@aws-sdk/util-user-agent-browser": "3.936.0", "@aws-sdk/util-user-agent-node": "3.947.0", "@smithy/config-resolver": "^4.4.3", "@smithy/core": "^3.18.7", "@smithy/fetch-http-handler": "^5.3.6", "@smithy/hash-node": "^4.2.5", "@smithy/invalid-dependency": "^4.2.5", "@smithy/md5-js": "^4.2.5", "@smithy/middleware-content-length": "^4.2.5", "@smithy/middleware-endpoint": "^4.3.14", "@smithy/middleware-retry": "^4.4.14", "@smithy/middleware-serde": "^4.2.6", "@smithy/middleware-stack": "^4.2.5", "@smithy/node-config-provider": "^4.3.5", "@smithy/node-http-handler": "^4.4.5", "@smithy/protocol-http": "^5.3.5", "@smithy/smithy-client": "^4.9.10", "@smithy/types": "^4.9.0", "@smithy/url-parser": "^4.2.5", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.13", "@smithy/util-defaults-mode-node": "^4.2.16", "@smithy/util-endpoints": "^3.2.5", "@smithy/util-middleware": "^4.2.5", "@smithy/util-retry": "^4.2.5", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-8tzFyYGAAnQg+G9eB5zAe0oEo+MJMZ3YEk+8EL4uf2zG5wKxJvTBJZr6U9I1CEXYUde374OyLMyKng+sWyN+wg=="], @@ -461,9 +461,9 @@ "@aws-sdk/region-config-resolver": ["@aws-sdk/region-config-resolver@3.936.0", "", { "dependencies": { "@aws-sdk/types": "3.936.0", "@smithy/config-resolver": "^4.4.3", "@smithy/node-config-provider": "^4.3.5", "@smithy/types": "^4.9.0", "tslib": "^2.6.2" } }, "sha512-wOKhzzWsshXGduxO4pqSiNyL9oUtk4BEvjWm9aaq6Hmfdoydq6v6t0rAGHWPjFwy9z2haovGRi3C8IxdMB4muw=="], - "@aws-sdk/s3-request-presigner": ["@aws-sdk/s3-request-presigner@3.985.0", "", { "dependencies": { "@aws-sdk/signature-v4-multi-region": "3.985.0", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-format-url": "^3.972.3", "@smithy/middleware-endpoint": "^4.4.13", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-lPnf977GFM4cMLJ7X+ThktKMe/0CXIfX+wz1z+sUT7yagPL2IRyiNUPFZ0VTEGBo1gRhHEDPWy6yzk8WWRFsvg=="], + "@aws-sdk/s3-request-presigner": ["@aws-sdk/s3-request-presigner@3.986.0", "", { "dependencies": { "@aws-sdk/signature-v4-multi-region": "3.986.0", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-format-url": "^3.972.3", "@smithy/middleware-endpoint": "^4.4.13", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-+yopxtoXwRXZ2Ai9H4GzkN+T2D07sGrURYcm7Eh2OQe3p+Ys/3VrR6UrzILssaJGYtR2vQqVKnGJBHVYqaM1EQ=="], - "@aws-sdk/signature-v4-multi-region": ["@aws-sdk/signature-v4-multi-region@3.985.0", "", { "dependencies": { "@aws-sdk/middleware-sdk-s3": "^3.972.7", "@aws-sdk/types": "^3.973.1", "@smithy/protocol-http": "^5.3.8", "@smithy/signature-v4": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-W6hTSOPiSbh4IdTYVxN7xHjpCh0qvfQU1GKGBzGQm0ZEIOaMmWqiDEvFfyGYKmfBvumT8vHKxQRTX0av9omtIg=="], + "@aws-sdk/signature-v4-multi-region": ["@aws-sdk/signature-v4-multi-region@3.986.0", "", { "dependencies": { "@aws-sdk/middleware-sdk-s3": "^3.972.7", "@aws-sdk/types": "^3.973.1", "@smithy/protocol-http": "^5.3.8", "@smithy/signature-v4": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-Upw+rw7wCH93E6QWxqpAqJLrUmJYVUAWrk4tCOBnkeuwzGERZvJFL5UQ6TAJFj9T18Ih+vNFaACh8J5aP4oTBw=="], "@aws-sdk/token-providers": ["@aws-sdk/token-providers@3.940.0", "", { "dependencies": { "@aws-sdk/core": "3.940.0", "@aws-sdk/nested-clients": "3.940.0", "@aws-sdk/types": "3.936.0", "@smithy/property-provider": "^4.2.5", "@smithy/shared-ini-file-loader": "^4.4.0", "@smithy/types": "^4.9.0", "tslib": "^2.6.2" } }, "sha512-k5qbRe/ZFjW9oWEdzLIa2twRVIEx7p/9rutofyrRysrtEnYh3HAWCngAnwbgKMoiwa806UzcTRx0TjyEpnKcCg=="], @@ -3637,8 +3637,6 @@ "@aws-crypto/sha256-browser/@smithy/util-utf8": ["@smithy/util-utf8@2.3.0", "", { "dependencies": { "@smithy/util-buffer-from": "^2.2.0", "tslib": "^2.6.2" } }, "sha512-R8Rdn8Hy72KKcebgLiv8jQcQkXoLMOGGv5uI1/k0l+snqkOzQ1R0ChUBCxWMlBsFMekWjq0wRudIweFs7sKT5A=="], - "@aws-crypto/util/@aws-sdk/types": ["@aws-sdk/types@3.973.1", "", { "dependencies": { "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-DwHBiMNOB468JiX6+i34c+THsKHErYUdNQ3HexeXZvVn4zouLjgaS4FejiGSi2HyBuzuyHg7SuOPmjSvoU9NRg=="], - "@aws-crypto/util/@smithy/util-utf8": ["@smithy/util-utf8@2.3.0", "", { "dependencies": { "@smithy/util-buffer-from": "^2.2.0", "tslib": "^2.6.2" } }, "sha512-R8Rdn8Hy72KKcebgLiv8jQcQkXoLMOGGv5uI1/k0l+snqkOzQ1R0ChUBCxWMlBsFMekWjq0wRudIweFs7sKT5A=="], "@aws-sdk/client-s3/@aws-sdk/core": ["@aws-sdk/core@3.973.7", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@aws-sdk/xml-builder": "^3.972.4", "@smithy/core": "^3.22.1", "@smithy/node-config-provider": "^4.3.8", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/signature-v4": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/util-base64": "^4.3.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-wNZZQQNlJ+hzD49cKdo+PY6rsTDElO8yDImnrI69p2PLBa7QomeUKAJWYp9xnaR38nlHqWhMHZuYLCQ3oSX+xg=="], @@ -3657,7 +3655,7 @@ "@aws-sdk/client-s3/@aws-sdk/types": ["@aws-sdk/types@3.973.1", "", { "dependencies": { "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-DwHBiMNOB468JiX6+i34c+THsKHErYUdNQ3HexeXZvVn4zouLjgaS4FejiGSi2HyBuzuyHg7SuOPmjSvoU9NRg=="], - "@aws-sdk/client-s3/@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.985.0", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-endpoints": "^3.2.8", "tslib": "^2.6.2" } }, "sha512-vth7UfGSUR3ljvaq8V4Rc62FsM7GUTH/myxPWkaEgOrprz1/Pc72EgTXxj+cPPPDAfHFIpjhkB7T7Td0RJx+BA=="], + "@aws-sdk/client-s3/@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.986.0", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-endpoints": "^3.2.8", "tslib": "^2.6.2" } }, "sha512-Mqi79L38qi1gCG3adlVdbNrSxvcm1IPDLiJPA3OBypY5ewxUyWbaA3DD4goG+EwET6LSFgZJcRSIh6KBNpP5pA=="], "@aws-sdk/client-s3/@aws-sdk/util-user-agent-browser": ["@aws-sdk/util-user-agent-browser@3.972.3", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "bowser": "^2.11.0", "tslib": "^2.6.2" } }, "sha512-JurOwkRUcXD/5MTDBcqdyQ9eVedtAsZgw5rBwktsPTN7QtPiS2Ld1jkJepNgYoCufz1Wcut9iup7GJDoIHp8Fw=="], @@ -3679,7 +3677,7 @@ "@aws-sdk/client-sesv2/@aws-sdk/types": ["@aws-sdk/types@3.973.1", "", { "dependencies": { "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-DwHBiMNOB468JiX6+i34c+THsKHErYUdNQ3HexeXZvVn4zouLjgaS4FejiGSi2HyBuzuyHg7SuOPmjSvoU9NRg=="], - "@aws-sdk/client-sesv2/@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.985.0", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-endpoints": "^3.2.8", "tslib": "^2.6.2" } }, "sha512-vth7UfGSUR3ljvaq8V4Rc62FsM7GUTH/myxPWkaEgOrprz1/Pc72EgTXxj+cPPPDAfHFIpjhkB7T7Td0RJx+BA=="], + "@aws-sdk/client-sesv2/@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.986.0", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-endpoints": "^3.2.8", "tslib": "^2.6.2" } }, "sha512-Mqi79L38qi1gCG3adlVdbNrSxvcm1IPDLiJPA3OBypY5ewxUyWbaA3DD4goG+EwET6LSFgZJcRSIh6KBNpP5pA=="], "@aws-sdk/client-sesv2/@aws-sdk/util-user-agent-browser": ["@aws-sdk/util-user-agent-browser@3.972.3", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "bowser": "^2.11.0", "tslib": "^2.6.2" } }, "sha512-JurOwkRUcXD/5MTDBcqdyQ9eVedtAsZgw5rBwktsPTN7QtPiS2Ld1jkJepNgYoCufz1Wcut9iup7GJDoIHp8Fw=="], @@ -4261,6 +4259,8 @@ "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-web-identity": ["@aws-sdk/credential-provider-web-identity@3.972.5", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/nested-clients": "3.985.0", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-OK3cULuJl6c+RcDZfPpaK5o3deTOnKZbxm7pzhFNGA3fI2hF9yDih17fGRazJzGGWaDVlR9ejZrpDef4DJCEsw=="], + "@aws-sdk/client-s3/@aws-sdk/middleware-user-agent/@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.985.0", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-endpoints": "^3.2.8", "tslib": "^2.6.2" } }, "sha512-vth7UfGSUR3ljvaq8V4Rc62FsM7GUTH/myxPWkaEgOrprz1/Pc72EgTXxj+cPPPDAfHFIpjhkB7T7Td0RJx+BA=="], + "@aws-sdk/client-sesv2/@aws-sdk/core/@aws-sdk/xml-builder": ["@aws-sdk/xml-builder@3.972.4", "", { "dependencies": { "@smithy/types": "^4.12.0", "fast-xml-parser": "5.3.4", "tslib": "^2.6.2" } }, "sha512-0zJ05ANfYqI6+rGqj8samZBFod0dPPousBjLEqg8WdxSgbMAkRgLyn81lP215Do0rFJ/17LIXwr7q0yK24mP6Q=="], "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-env": ["@aws-sdk/credential-provider-env@3.972.5", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-LxJ9PEO4gKPXzkufvIESUysykPIdrV7+Ocb9yAhbhJLE4TiAYqbCVUE+VuKP1leGR1bBfjWjYgSV5MxprlX3mQ=="], @@ -4275,6 +4275,8 @@ "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-web-identity": ["@aws-sdk/credential-provider-web-identity@3.972.5", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/nested-clients": "3.985.0", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-OK3cULuJl6c+RcDZfPpaK5o3deTOnKZbxm7pzhFNGA3fI2hF9yDih17fGRazJzGGWaDVlR9ejZrpDef4DJCEsw=="], + "@aws-sdk/client-sesv2/@aws-sdk/middleware-user-agent/@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.985.0", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-endpoints": "^3.2.8", "tslib": "^2.6.2" } }, "sha512-vth7UfGSUR3ljvaq8V4Rc62FsM7GUTH/myxPWkaEgOrprz1/Pc72EgTXxj+cPPPDAfHFIpjhkB7T7Td0RJx+BA=="], + "@aws-sdk/client-sqs/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-env": ["@aws-sdk/credential-provider-env@3.947.0", "", { "dependencies": { "@aws-sdk/core": "3.947.0", "@aws-sdk/types": "3.936.0", "@smithy/property-provider": "^4.2.5", "@smithy/types": "^4.9.0", "tslib": "^2.6.2" } }, "sha512-VR2V6dRELmzwAsCpK4GqxUi6UW5WNhAXS9F9AzWi5jvijwJo3nH92YNJUP4quMpgFZxJHEWyXLWgPjh9u0zYOA=="], "@aws-sdk/client-sqs/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-http": ["@aws-sdk/credential-provider-http@3.947.0", "", { "dependencies": { "@aws-sdk/core": "3.947.0", "@aws-sdk/types": "3.936.0", "@smithy/fetch-http-handler": "^5.3.6", "@smithy/node-http-handler": "^4.4.5", "@smithy/property-provider": "^4.2.5", "@smithy/protocol-http": "^5.3.5", "@smithy/smithy-client": "^4.9.10", "@smithy/types": "^4.9.0", "@smithy/util-stream": "^4.5.6", "tslib": "^2.6.2" } }, "sha512-inF09lh9SlHj63Vmr5d+LmwPXZc2IbK8lAruhOr3KLsZAIHEgHgGPXWDC2ukTEMzg0pkexQ6FOhXXad6klK4RA=="], @@ -4841,6 +4843,8 @@ "rimraf/glob/path-scurry/lru-cache": ["lru-cache@10.4.3", "", {}, "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ=="], + "sim/tailwindcss/chokidar/fsevents": ["fsevents@2.3.2", "", { "os": "darwin" }, "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA=="], + "sim/tailwindcss/chokidar/glob-parent": ["glob-parent@5.1.2", "", { "dependencies": { "is-glob": "^4.0.1" } }, "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow=="], "sim/tailwindcss/chokidar/readdirp": ["readdirp@3.6.0", "", { "dependencies": { "picomatch": "^2.2.1" } }, "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA=="], @@ -4849,10 +4853,22 @@ "test-exclude/glob/path-scurry/lru-cache": ["lru-cache@10.4.3", "", {}, "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ=="], + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-ini/@aws-sdk/nested-clients/@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.985.0", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-endpoints": "^3.2.8", "tslib": "^2.6.2" } }, "sha512-vth7UfGSUR3ljvaq8V4Rc62FsM7GUTH/myxPWkaEgOrprz1/Pc72EgTXxj+cPPPDAfHFIpjhkB7T7Td0RJx+BA=="], + + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/client-sso/@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.985.0", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-endpoints": "^3.2.8", "tslib": "^2.6.2" } }, "sha512-vth7UfGSUR3ljvaq8V4Rc62FsM7GUTH/myxPWkaEgOrprz1/Pc72EgTXxj+cPPPDAfHFIpjhkB7T7Td0RJx+BA=="], + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/token-providers/@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.985.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.7", "@aws-sdk/middleware-host-header": "^3.972.3", "@aws-sdk/middleware-logger": "^3.972.3", "@aws-sdk/middleware-recursion-detection": "^3.972.3", "@aws-sdk/middleware-user-agent": "^3.972.7", "@aws-sdk/region-config-resolver": "^3.972.3", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.985.0", "@aws-sdk/util-user-agent-browser": "^3.972.3", "@aws-sdk/util-user-agent-node": "^3.972.5", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.22.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.13", "@smithy/middleware-retry": "^4.4.30", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.9", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.29", "@smithy/util-defaults-mode-node": "^4.2.32", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-TsWwKzb/2WHafAY0CE7uXgLj0FmnkBTgfioG9HO+7z/zCPcl1+YU+i7dW4o0y+aFxFgxTMG+ExBQpqT/k2ao8g=="], + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-web-identity/@aws-sdk/nested-clients/@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.985.0", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-endpoints": "^3.2.8", "tslib": "^2.6.2" } }, "sha512-vth7UfGSUR3ljvaq8V4Rc62FsM7GUTH/myxPWkaEgOrprz1/Pc72EgTXxj+cPPPDAfHFIpjhkB7T7Td0RJx+BA=="], + + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-ini/@aws-sdk/nested-clients/@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.985.0", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-endpoints": "^3.2.8", "tslib": "^2.6.2" } }, "sha512-vth7UfGSUR3ljvaq8V4Rc62FsM7GUTH/myxPWkaEgOrprz1/Pc72EgTXxj+cPPPDAfHFIpjhkB7T7Td0RJx+BA=="], + + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/client-sso/@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.985.0", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-endpoints": "^3.2.8", "tslib": "^2.6.2" } }, "sha512-vth7UfGSUR3ljvaq8V4Rc62FsM7GUTH/myxPWkaEgOrprz1/Pc72EgTXxj+cPPPDAfHFIpjhkB7T7Td0RJx+BA=="], + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/token-providers/@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.985.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.7", "@aws-sdk/middleware-host-header": "^3.972.3", "@aws-sdk/middleware-logger": "^3.972.3", "@aws-sdk/middleware-recursion-detection": "^3.972.3", "@aws-sdk/middleware-user-agent": "^3.972.7", "@aws-sdk/region-config-resolver": "^3.972.3", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.985.0", "@aws-sdk/util-user-agent-browser": "^3.972.3", "@aws-sdk/util-user-agent-node": "^3.972.5", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.22.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.13", "@smithy/middleware-retry": "^4.4.30", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.9", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.29", "@smithy/util-defaults-mode-node": "^4.2.32", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-TsWwKzb/2WHafAY0CE7uXgLj0FmnkBTgfioG9HO+7z/zCPcl1+YU+i7dW4o0y+aFxFgxTMG+ExBQpqT/k2ao8g=="], + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-web-identity/@aws-sdk/nested-clients/@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.985.0", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-endpoints": "^3.2.8", "tslib": "^2.6.2" } }, "sha512-vth7UfGSUR3ljvaq8V4Rc62FsM7GUTH/myxPWkaEgOrprz1/Pc72EgTXxj+cPPPDAfHFIpjhkB7T7Td0RJx+BA=="], + "@aws-sdk/client-sqs/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/token-providers/@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.947.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "3.947.0", "@aws-sdk/middleware-host-header": "3.936.0", "@aws-sdk/middleware-logger": "3.936.0", "@aws-sdk/middleware-recursion-detection": "3.936.0", "@aws-sdk/middleware-user-agent": "3.947.0", "@aws-sdk/region-config-resolver": "3.936.0", "@aws-sdk/types": "3.936.0", "@aws-sdk/util-endpoints": "3.936.0", "@aws-sdk/util-user-agent-browser": "3.936.0", "@aws-sdk/util-user-agent-node": "3.947.0", "@smithy/config-resolver": "^4.4.3", "@smithy/core": "^3.18.7", "@smithy/fetch-http-handler": "^5.3.6", "@smithy/hash-node": "^4.2.5", "@smithy/invalid-dependency": "^4.2.5", "@smithy/middleware-content-length": "^4.2.5", "@smithy/middleware-endpoint": "^4.3.14", "@smithy/middleware-retry": "^4.4.14", "@smithy/middleware-serde": "^4.2.6", "@smithy/middleware-stack": "^4.2.5", "@smithy/node-config-provider": "^4.3.5", "@smithy/node-http-handler": "^4.4.5", "@smithy/protocol-http": "^5.3.5", "@smithy/smithy-client": "^4.9.10", "@smithy/types": "^4.9.0", "@smithy/url-parser": "^4.2.5", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.13", "@smithy/util-defaults-mode-node": "^4.2.16", "@smithy/util-endpoints": "^3.2.5", "@smithy/util-middleware": "^4.2.5", "@smithy/util-retry": "^4.2.5", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-DjRJEYNnHUTu9kGPPQDTSXquwSEd6myKR4ssI4FaYLFhdT3ldWpj73yYt807H3tdmhS7vPmdVqchSJnjurUQAw=="], "@browserbasehq/stagehand/@anthropic-ai/sdk/node-fetch/whatwg-url/tr46": ["tr46@0.0.3", "", {}, "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="], @@ -4891,6 +4907,10 @@ "test-exclude/glob/jackspeak/@isaacs/cliui/wrap-ansi": ["wrap-ansi@8.1.0", "", { "dependencies": { "ansi-styles": "^6.1.0", "string-width": "^5.0.1", "strip-ansi": "^7.0.1" } }, "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ=="], + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/token-providers/@aws-sdk/nested-clients/@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.985.0", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-endpoints": "^3.2.8", "tslib": "^2.6.2" } }, "sha512-vth7UfGSUR3ljvaq8V4Rc62FsM7GUTH/myxPWkaEgOrprz1/Pc72EgTXxj+cPPPDAfHFIpjhkB7T7Td0RJx+BA=="], + + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/token-providers/@aws-sdk/nested-clients/@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.985.0", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-endpoints": "^3.2.8", "tslib": "^2.6.2" } }, "sha512-vth7UfGSUR3ljvaq8V4Rc62FsM7GUTH/myxPWkaEgOrprz1/Pc72EgTXxj+cPPPDAfHFIpjhkB7T7Td0RJx+BA=="], + "lint-staged/listr2/cli-truncate/string-width/strip-ansi/ansi-regex": ["ansi-regex@6.2.2", "", {}, "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg=="], "lint-staged/listr2/log-update/cli-cursor/restore-cursor/onetime": ["onetime@7.0.0", "", { "dependencies": { "mimic-function": "^5.0.0" } }, "sha512-VXJjc87FScF88uafS3JllDgvAm+c/Slfz06lorj2uAY34rlUu0Nt+v8wreiImcrgAjjIHp1rXpTDlLOGw29WwQ=="], From 621dd238a8c0693efab3303fff250f607692d8c5 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan Date: Mon, 9 Feb 2026 19:27:17 -0800 Subject: [PATCH 72/72] Nuke bad files --- apps/sim/lib/workflows/blocks/index.ts | 7 - .../lib/workflows/blocks/schema-resolver.ts | 208 ------------------ apps/sim/lib/workflows/blocks/schema-types.ts | 75 ------- 3 files changed, 290 deletions(-) delete mode 100644 apps/sim/lib/workflows/blocks/index.ts delete mode 100644 apps/sim/lib/workflows/blocks/schema-resolver.ts delete mode 100644 apps/sim/lib/workflows/blocks/schema-types.ts diff --git a/apps/sim/lib/workflows/blocks/index.ts b/apps/sim/lib/workflows/blocks/index.ts deleted file mode 100644 index 7f9067cc0a..0000000000 --- a/apps/sim/lib/workflows/blocks/index.ts +++ /dev/null @@ -1,7 +0,0 @@ -export { BlockSchemaResolver, blockSchemaResolver } from './schema-resolver' -export type { - ResolvedBlock, - ResolvedOption, - ResolvedOutput, - ResolvedSubBlock, -} from './schema-types' diff --git a/apps/sim/lib/workflows/blocks/schema-resolver.ts b/apps/sim/lib/workflows/blocks/schema-resolver.ts deleted file mode 100644 index cd02992e77..0000000000 --- a/apps/sim/lib/workflows/blocks/schema-resolver.ts +++ /dev/null @@ -1,208 +0,0 @@ -import { createLogger } from '@sim/logger' -import { getAllBlocks, getBlock } from '@/blocks/registry' -import type { BlockConfig, SubBlockConfig } from '@/blocks/types' -import type { - ResolvedBlock, - ResolvedOption, - ResolvedOutput, - ResolvedSubBlock, -} from './schema-types' - -const logger = createLogger('BlockSchemaResolver') - -/** - * BlockSchemaResolver provides typed access to block configurations. - * - * It wraps the raw block registry and returns resolved, typed schemas - * that consumers can use without any type assertions. - */ -export class BlockSchemaResolver { - private cache = new Map() - - /** Resolve a single block by type */ - resolveBlock(type: string): ResolvedBlock | null { - const cached = this.cache.get(type) - if (cached) return cached - - const config = getBlock(type) - if (!config) return null - - const resolved = this.buildResolvedBlock(config) - this.cache.set(type, resolved) - return resolved - } - - /** Resolve all available blocks */ - resolveAllBlocks(options?: { includeHidden?: boolean }): ResolvedBlock[] { - const configs = getAllBlocks() - return configs - .filter((config) => options?.includeHidden || !config.hideFromToolbar) - .map((config) => this.resolveBlock(config.type)) - .filter((block): block is ResolvedBlock => block !== null) - } - - /** Clear the cache (call when block registry changes) */ - clearCache(): void { - this.cache.clear() - } - - private buildResolvedBlock(config: BlockConfig): ResolvedBlock { - return { - type: config.type, - name: config.name, - description: config.description, - category: config.category, - icon: config.icon as unknown as ResolvedBlock['icon'], - isTrigger: this.isTriggerBlock(config), - hideFromToolbar: config.hideFromToolbar ?? false, - subBlocks: config.subBlocks.map((subBlock) => this.resolveSubBlock(subBlock)), - outputs: this.resolveOutputs(config), - supportsTriggerMode: this.supportsTriggerMode(config), - hasAdvancedMode: config.subBlocks.some((subBlock) => subBlock.mode === 'advanced'), - raw: config, - } - } - - private resolveSubBlock(sb: SubBlockConfig): ResolvedSubBlock { - const resolved: ResolvedSubBlock = { - id: sb.id, - type: sb.type, - label: sb.title, - placeholder: sb.placeholder, - required: typeof sb.required === 'boolean' ? sb.required : undefined, - password: sb.password, - hasCondition: Boolean(sb.condition), - defaultValue: sb.defaultValue, - validation: { - min: sb.min, - max: sb.max, - pattern: this.resolvePattern(sb), - }, - } - - const condition = this.resolveCondition(sb) - if (condition) { - resolved.condition = condition - } - - const options = this.resolveOptions(sb) - if (options.length > 0) { - resolved.options = options - } - - if (!resolved.validation?.min && !resolved.validation?.max && !resolved.validation?.pattern) { - resolved.validation = undefined - } - - return resolved - } - - private resolveCondition(sb: SubBlockConfig): ResolvedSubBlock['condition'] | undefined { - try { - const condition = typeof sb.condition === 'function' ? sb.condition() : sb.condition - if (!condition || typeof condition !== 'object') { - return undefined - } - - return { - field: String(condition.field), - value: condition.value, - } - } catch (error) { - logger.warn('Failed to resolve sub-block condition', { - subBlockId: sb.id, - error: error instanceof Error ? error.message : String(error), - }) - return undefined - } - } - - private resolveOptions(sb: SubBlockConfig): ResolvedOption[] { - try { - if (Array.isArray(sb.options)) { - return sb.options.map((opt) => { - if (typeof opt === 'string') { - return { label: opt, value: opt } - } - - const label = String(opt.label || opt.id || '') - const value = String(opt.id || opt.label || '') - - return { - label, - value, - id: opt.id, - } - }) - } - - // For function-based or dynamic options, return empty. - // Consumers can evaluate these options if they need runtime resolution. - return [] - } catch (error) { - logger.warn('Failed to resolve sub-block options', { - subBlockId: sb.id, - error: error instanceof Error ? error.message : String(error), - }) - return [] - } - } - - private resolveOutputs(config: BlockConfig): ResolvedOutput[] { - try { - // eslint-disable-next-line @typescript-eslint/no-require-imports - const blockOutputs = require('@/lib/workflows/blocks/block-outputs') as { - getBlockOutputPaths: ( - blockType: string, - subBlocks?: Record, - triggerMode?: boolean - ) => string[] - } - - const paths = blockOutputs.getBlockOutputPaths(config.type, {}, false) - return paths.map((path) => ({ - name: path, - type: 'string', - })) - } catch (error) { - logger.warn('Failed to resolve block outputs, using fallback', { - blockType: config.type, - error: error instanceof Error ? error.message : String(error), - }) - return [{ name: 'result', type: 'string' }] - } - } - - private isTriggerBlock(config: BlockConfig): boolean { - try { - // eslint-disable-next-line @typescript-eslint/no-require-imports - const triggerUtils = require('@/lib/workflows/triggers/input-definition-triggers') as { - isInputDefinitionTrigger: (blockType: string) => boolean - } - return triggerUtils.isInputDefinitionTrigger(config.type) - } catch (error) { - logger.warn('Failed to detect trigger block, using fallback', { - blockType: config.type, - error: error instanceof Error ? error.message : String(error), - }) - return config.type === 'starter' - } - } - - private supportsTriggerMode(config: BlockConfig): boolean { - return Boolean( - config.triggerAllowed || - config.subBlocks.some( - (subBlock) => subBlock.id === 'triggerMode' || subBlock.mode === 'trigger' - ) - ) - } - - private resolvePattern(sb: SubBlockConfig): string | undefined { - const maybePattern = (sb as SubBlockConfig & { pattern?: string }).pattern - return typeof maybePattern === 'string' ? maybePattern : undefined - } -} - -/** Singleton resolver instance */ -export const blockSchemaResolver = new BlockSchemaResolver() diff --git a/apps/sim/lib/workflows/blocks/schema-types.ts b/apps/sim/lib/workflows/blocks/schema-types.ts deleted file mode 100644 index 068b3b9370..0000000000 --- a/apps/sim/lib/workflows/blocks/schema-types.ts +++ /dev/null @@ -1,75 +0,0 @@ -import type { LucideIcon } from 'lucide-react' - -/** A fully resolved block schema with all sub-blocks expanded */ -export interface ResolvedBlock { - type: string - name: string - description?: string - category: string - icon?: LucideIcon - isTrigger: boolean - hideFromToolbar: boolean - - /** Resolved sub-blocks with options, conditions, and validation info */ - subBlocks: ResolvedSubBlock[] - - /** Block-level outputs */ - outputs: ResolvedOutput[] - - /** Whether this block supports trigger mode */ - supportsTriggerMode: boolean - - /** Whether this block has advanced mode */ - hasAdvancedMode: boolean - - /** Raw config reference for consumers that need it */ - raw: unknown -} - -/** A resolved sub-block with options and metadata */ -export interface ResolvedSubBlock { - id: string - type: string - label?: string - placeholder?: string - required?: boolean - password?: boolean - - /** Resolved options (for dropdowns/selectors, etc.) */ - options?: ResolvedOption[] - - /** Whether this sub-block has a condition that controls visibility */ - hasCondition: boolean - - /** Condition details if present */ - condition?: { - field: string - value: unknown - /** Whether condition is currently met (if evaluable statically) */ - met?: boolean - } - - /** Validation constraints */ - validation?: { - min?: number - max?: number - pattern?: string - } - - /** Default value */ - defaultValue?: unknown -} - -/** A resolved option for dropdowns/selectors */ -export interface ResolvedOption { - label: string - value: string - id?: string -} - -/** A resolved output definition */ -export interface ResolvedOutput { - name: string - type: string - description?: string -}