From 34ad9613dce0cb4657411b11cc5a204aece4af29 Mon Sep 17 00:00:00 2001 From: BinaryBeastMaster Date: Mon, 18 Aug 2025 16:37:00 -0700 Subject: [PATCH] feat: add prompt preprocessor flag and condensed prompt --- packages/types/src/experiment.ts | 17 +- src/api/index.ts | 19 ++- src/api/providers/anthropic.ts | 42 ++--- src/api/providers/openai-native.ts | 23 +-- src/api/providers/openai.ts | 89 +++++----- src/core/orchestration/prompt-preprocessor.ts | 7 + .../prompts/__tests__/system-prompt.spec.ts | 9 +- src/core/prompts/system.ts | 157 +++++++++--------- src/core/task/Task.ts | 68 ++++---- src/core/webview/ClineProvider.ts | 15 +- src/core/webview/generateSystemPrompt.ts | 16 +- src/package.json | 25 +-- src/package.nls.json | 5 +- src/shared/__tests__/experiments.spec.ts | 45 ++--- src/shared/experiments.ts | 18 +- .../__tests__/ExtensionStateContext.spec.tsx | 40 ++--- webview-ui/src/i18n/locales/en/settings.json | 22 ++- 17 files changed, 336 insertions(+), 281 deletions(-) create mode 100644 src/core/orchestration/prompt-preprocessor.ts diff --git a/packages/types/src/experiment.ts b/packages/types/src/experiment.ts index 6574124629..5ab1c2173a 100644 --- a/packages/types/src/experiment.ts +++ b/packages/types/src/experiment.ts @@ -6,7 +6,13 @@ import type { Keys, Equals, AssertEqual } from "./type-fu.js" * ExperimentId */ -export const experimentIds = ["powerSteering", "multiFileApplyDiff", "preventFocusDisruption", "assistantMessageParser"] as const +export const experimentIds = [ + "powerSteering", + "multiFileApplyDiff", + "preventFocusDisruption", + "assistantMessageParser", + "promptPreprocessor", +] as const export const experimentIdsSchema = z.enum(experimentIds) @@ -17,10 +23,11 @@ export type ExperimentId = z.infer */ export const experimentsSchema = z.object({ - powerSteering: z.boolean().optional(), - multiFileApplyDiff: z.boolean().optional(), - preventFocusDisruption: z.boolean().optional(), - assistantMessageParser: z.boolean().optional(), + powerSteering: z.boolean().optional(), + multiFileApplyDiff: z.boolean().optional(), + preventFocusDisruption: z.boolean().optional(), + assistantMessageParser: z.boolean().optional(), + promptPreprocessor: z.boolean().optional(), }) export type Experiments = z.infer diff --git a/src/api/index.ts b/src/api/index.ts index 92a5c95770..6126562752 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -43,15 +43,16 @@ export interface SingleCompletionHandler { } export interface ApiHandlerCreateMessageMetadata { - mode?: string - taskId: string - previousResponseId?: string - /** - * When true, the provider must NOT fall back to internal continuity state - * (e.g., lastResponseId) if previousResponseId is absent. - * Used to enforce "skip once" after a condense operation. - */ - suppressPreviousResponseId?: boolean + mode?: string + taskId: string + previousResponseId?: string + /** + * When true, the provider must NOT fall back to internal continuity state + * (e.g., lastResponseId) if previousResponseId is absent. + * Used to enforce "skip once" after a condense operation. + */ + suppressPreviousResponseId?: boolean + useCondensedPrompt?: boolean } export interface ApiHandler { diff --git a/src/api/providers/anthropic.ts b/src/api/providers/anthropic.ts index cb48492b60..7976db8d66 100644 --- a/src/api/providers/anthropic.ts +++ b/src/api/providers/anthropic.ts @@ -77,18 +77,18 @@ export class AnthropicHandler extends BaseProvider implements SingleCompletionHa const lastUserMsgIndex = userMsgIndices[userMsgIndices.length - 1] ?? -1 const secondLastMsgUserIndex = userMsgIndices[userMsgIndices.length - 2] ?? -1 - stream = await this.client.messages.create( - { - model: modelId, - max_tokens: maxTokens ?? ANTHROPIC_DEFAULT_MAX_TOKENS, - temperature, - thinking, - // Setting cache breakpoint for system prompt so new tasks can reuse it. - system: [{ text: systemPrompt, type: "text", cache_control: cacheControl }], - messages: messages.map((message, index) => { - if (index === lastUserMsgIndex || index === secondLastMsgUserIndex) { - return { - ...message, + stream = await this.client.messages.create( + { + model: modelId, + max_tokens: maxTokens ?? ANTHROPIC_DEFAULT_MAX_TOKENS, + temperature, + thinking, + // Setting cache breakpoint for system prompt so new tasks can reuse it. + system: [{ text: systemPromptToUse, type: "text", cache_control: cacheControl }], + messages: messages.map((message, index) => { + if (index === lastUserMsgIndex || index === secondLastMsgUserIndex) { + return { + ...message, content: typeof message.content === "string" ? [{ type: "text", text: message.content, cache_control: cacheControl }] @@ -128,15 +128,15 @@ export class AnthropicHandler extends BaseProvider implements SingleCompletionHa break } default: { - stream = (await this.client.messages.create({ - model: modelId, - max_tokens: maxTokens ?? ANTHROPIC_DEFAULT_MAX_TOKENS, - temperature, - system: [{ text: systemPrompt, type: "text" }], - messages, - stream: true, - })) as any - break + stream = (await this.client.messages.create({ + model: modelId, + max_tokens: maxTokens ?? ANTHROPIC_DEFAULT_MAX_TOKENS, + temperature, + system: [{ text: systemPromptToUse, type: "text" }], + messages, + stream: true, + })) as any + break } } diff --git a/src/api/providers/openai-native.ts b/src/api/providers/openai-native.ts index 2ba8566963..b108a28294 100644 --- a/src/api/providers/openai-native.ts +++ b/src/api/providers/openai-native.ts @@ -97,12 +97,13 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio } } - override async *createMessage( - systemPrompt: string, - messages: Anthropic.Messages.MessageParam[], - metadata?: ApiHandlerCreateMessageMetadata, - ): ApiStream { - const model = this.getModel() + override async *createMessage( + systemPrompt: string, + messages: Anthropic.Messages.MessageParam[], + metadata?: ApiHandlerCreateMessageMetadata, + ): ApiStream { + const model = this.getModel() + const systemPromptToUse = metadata?.useCondensedPrompt ? systemPrompt : systemPrompt let id: "o3-mini" | "o3" | "o4-mini" | undefined if (model.id.startsWith("o3-mini")) { @@ -114,15 +115,15 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio } if (id) { - yield* this.handleReasonerMessage(model, id, systemPrompt, messages) + yield* this.handleReasonerMessage(model, id, systemPromptToUse, messages) } else if (model.id.startsWith("o1")) { - yield* this.handleO1FamilyMessage(model, systemPrompt, messages) + yield* this.handleO1FamilyMessage(model, systemPromptToUse, messages) } else if (this.isResponsesApiModel(model.id)) { // Both GPT-5 and Codex Mini use the v1/responses endpoint - yield* this.handleResponsesApiMessage(model, systemPrompt, messages, metadata) + yield* this.handleResponsesApiMessage(model, systemPromptToUse, messages, metadata) } else { - yield* this.handleDefaultModelMessage(model, systemPrompt, messages) - } + yield* this.handleDefaultModelMessage(model, systemPromptToUse, messages) + } } private async *handleO1FamilyMessage( diff --git a/src/api/providers/openai.ts b/src/api/providers/openai.ts index 939816480a..d355e86cfb 100644 --- a/src/api/providers/openai.ts +++ b/src/api/providers/openai.ts @@ -78,13 +78,14 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl } } - override async *createMessage( - systemPrompt: string, - messages: Anthropic.Messages.MessageParam[], - metadata?: ApiHandlerCreateMessageMetadata, - ): ApiStream { - const { info: modelInfo, reasoning } = this.getModel() - const modelUrl = this.options.openAiBaseUrl ?? "" + override async *createMessage( + systemPrompt: string, + messages: Anthropic.Messages.MessageParam[], + metadata?: ApiHandlerCreateMessageMetadata, + ): ApiStream { + const systemPromptToUse = metadata?.useCondensedPrompt ? systemPrompt : systemPrompt + const { info: modelInfo, reasoning } = this.getModel() + const modelUrl = this.options.openAiBaseUrl ?? "" const modelId = this.options.openAiModelId ?? "" const enabledR1Format = this.options.openAiR1FormatEnabled ?? false const enabledLegacyFormat = this.options.openAiLegacyFormat ?? false @@ -93,38 +94,38 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl const ark = modelUrl.includes(".volces.com") if (modelId.includes("o1") || modelId.includes("o3") || modelId.includes("o4")) { - yield* this.handleO3FamilyMessage(modelId, systemPrompt, messages) - return - } + yield* this.handleO3FamilyMessage(modelId, systemPromptToUse, messages) + return + } if (this.options.openAiStreamingEnabled ?? true) { - let systemMessage: OpenAI.Chat.ChatCompletionSystemMessageParam = { - role: "system", - content: systemPrompt, - } + let systemMessage: OpenAI.Chat.ChatCompletionSystemMessageParam = { + role: "system", + content: systemPromptToUse, + } let convertedMessages - if (deepseekReasoner) { - convertedMessages = convertToR1Format([{ role: "user", content: systemPrompt }, ...messages]) - } else if (ark || enabledLegacyFormat) { - convertedMessages = [systemMessage, ...convertToSimpleMessages(messages)] - } else { - if (modelInfo.supportsPromptCache) { - systemMessage = { - role: "system", - content: [ - { - type: "text", - text: systemPrompt, - // @ts-ignore-next-line - cache_control: { type: "ephemeral" }, - }, - ], - } - } - - convertedMessages = [systemMessage, ...convertToOpenAiMessages(messages)] + if (deepseekReasoner) { + convertedMessages = convertToR1Format([{ role: "user", content: systemPromptToUse }, ...messages]) + } else if (ark || enabledLegacyFormat) { + convertedMessages = [systemMessage, ...convertToSimpleMessages(messages)] + } else { + if (modelInfo.supportsPromptCache) { + systemMessage = { + role: "system", + content: [ + { + type: "text", + text: systemPromptToUse, + // @ts-ignore-next-line + cache_control: { type: "ephemeral" }, + }, + ], + } + } + + convertedMessages = [systemMessage, ...convertToOpenAiMessages(messages)] if (modelInfo.supportsPromptCache) { // Note: the following logic is copied from openrouter: @@ -212,18 +213,18 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl } } else { // o1 for instance doesnt support streaming, non-1 temp, or system prompt - const systemMessage: OpenAI.Chat.ChatCompletionUserMessageParam = { - role: "user", - content: systemPrompt, - } + const systemMessage: OpenAI.Chat.ChatCompletionUserMessageParam = { + role: "user", + content: systemPromptToUse, + } const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = { - model: modelId, - messages: deepseekReasoner - ? convertToR1Format([{ role: "user", content: systemPrompt }, ...messages]) - : enabledLegacyFormat - ? [systemMessage, ...convertToSimpleMessages(messages)] - : [systemMessage, ...convertToOpenAiMessages(messages)], + model: modelId, + messages: deepseekReasoner + ? convertToR1Format([{ role: "user", content: systemPromptToUse }, ...messages]) + : enabledLegacyFormat + ? [systemMessage, ...convertToSimpleMessages(messages)] + : [systemMessage, ...convertToOpenAiMessages(messages)], } // Add max_tokens if needed diff --git a/src/core/orchestration/prompt-preprocessor.ts b/src/core/orchestration/prompt-preprocessor.ts new file mode 100644 index 0000000000..bca8640314 --- /dev/null +++ b/src/core/orchestration/prompt-preprocessor.ts @@ -0,0 +1,7 @@ +export type PreprocessorDecision = + | { decision: "tool"; tool: { name: string; params: Record }; reasoning?: string } + | { decision: "big"; reasoning?: string } + +export async function decidePreprocessor(): Promise { + return { decision: "big" } +} diff --git a/src/core/prompts/__tests__/system-prompt.spec.ts b/src/core/prompts/__tests__/system-prompt.spec.ts index 4d5579408c..eaa53e937d 100644 --- a/src/core/prompts/__tests__/system-prompt.spec.ts +++ b/src/core/prompts/__tests__/system-prompt.spec.ts @@ -638,7 +638,7 @@ describe("SYSTEM_PROMPT", () => { expect(prompt).toContain("## update_todo_list") }) - it("should include update_todo_list tool when todoListEnabled is undefined", async () => { + it("should include update_todo_list tool when todoListEnabled is undefined", async () => { const settings = { maxConcurrentFileReads: 5, todoListEnabled: true, @@ -665,9 +665,10 @@ describe("SYSTEM_PROMPT", () => { settings, // settings ) - expect(prompt).toContain("update_todo_list") - expect(prompt).toContain("## update_todo_list") - }) + expect(prompt).toContain("update_todo_list") + expect(prompt).toContain("## update_todo_list") + }) + afterAll(() => { vi.restoreAllMocks() diff --git a/src/core/prompts/system.ts b/src/core/prompts/system.ts index 4ed1185da7..496c927736 100644 --- a/src/core/prompts/system.ts +++ b/src/core/prompts/system.ts @@ -43,24 +43,25 @@ export function getPromptComponent( } async function generatePrompt( - context: vscode.ExtensionContext, - cwd: string, - supportsComputerUse: boolean, - mode: Mode, - mcpHub?: McpHub, - diffStrategy?: DiffStrategy, - browserViewportSize?: string, - promptComponent?: PromptComponent, - customModeConfigs?: ModeConfig[], - globalCustomInstructions?: string, - diffEnabled?: boolean, - experiments?: Record, - enableMcpServerCreation?: boolean, - language?: string, - rooIgnoreInstructions?: string, - partialReadsEnabled?: boolean, - settings?: SystemPromptSettings, - todoList?: TodoItem[], + context: vscode.ExtensionContext, + cwd: string, + supportsComputerUse: boolean, + mode: Mode, + mcpHub?: McpHub, + diffStrategy?: DiffStrategy, + browserViewportSize?: string, + promptComponent?: PromptComponent, + customModeConfigs?: ModeConfig[], + globalCustomInstructions?: string, + diffEnabled?: boolean, + experiments?: Record, + enableMcpServerCreation?: boolean, + language?: string, + rooIgnoreInstructions?: string, + partialReadsEnabled?: boolean, + settings?: SystemPromptSettings, + todoList?: TodoItem[], + condensed?: boolean, ): Promise { if (!context) { throw new Error("Extension context is required for generating system prompt") @@ -87,26 +88,28 @@ async function generatePrompt( const codeIndexManager = CodeIndexManager.getInstance(context, cwd) - const basePrompt = `${roleDefinition} + const basePrompt = `${roleDefinition} ${markdownFormattingSection()} -${getSharedToolUseSection()} - -${getToolDescriptionsForMode( - mode, - cwd, - supportsComputerUse, - codeIndexManager, - effectiveDiffStrategy, - browserViewportSize, - shouldIncludeMcp ? mcpHub : undefined, - customModeConfigs, - experiments, - partialReadsEnabled, - settings, - enableMcpServerCreation, -)} +${condensed ? "" : getSharedToolUseSection()} + +${condensed + ? "" + : getToolDescriptionsForMode( + mode, + cwd, + supportsComputerUse, + codeIndexManager, + effectiveDiffStrategy, + browserViewportSize, + shouldIncludeMcp ? mcpHub : undefined, + customModeConfigs, + experiments, + partialReadsEnabled, + settings, + enableMcpServerCreation, + )} ${getToolUseGuidelinesSection(codeIndexManager)} @@ -123,33 +126,34 @@ ${getSystemInfoSection(cwd)} ${getObjectiveSection(codeIndexManager, experiments)} ${await addCustomInstructions(baseInstructions, globalCustomInstructions || "", cwd, mode, { - language: language ?? formatLanguage(vscode.env.language), - rooIgnoreInstructions, - settings, + language: language ?? formatLanguage(vscode.env.language), + rooIgnoreInstructions, + settings, })}` return basePrompt } export const SYSTEM_PROMPT = async ( - context: vscode.ExtensionContext, - cwd: string, - supportsComputerUse: boolean, - mcpHub?: McpHub, - diffStrategy?: DiffStrategy, - browserViewportSize?: string, - mode: Mode = defaultModeSlug, - customModePrompts?: CustomModePrompts, - customModes?: ModeConfig[], - globalCustomInstructions?: string, - diffEnabled?: boolean, - experiments?: Record, - enableMcpServerCreation?: boolean, - language?: string, - rooIgnoreInstructions?: string, - partialReadsEnabled?: boolean, - settings?: SystemPromptSettings, - todoList?: TodoItem[], + context: vscode.ExtensionContext, + cwd: string, + supportsComputerUse: boolean, + mcpHub?: McpHub, + diffStrategy?: DiffStrategy, + browserViewportSize?: string, + mode: Mode = defaultModeSlug, + customModePrompts?: CustomModePrompts, + customModes?: ModeConfig[], + globalCustomInstructions?: string, + diffEnabled?: boolean, + experiments?: Record, + enableMcpServerCreation?: boolean, + language?: string, + rooIgnoreInstructions?: string, + partialReadsEnabled?: boolean, + settings?: SystemPromptSettings, + todoList?: TodoItem[], + condensed?: boolean, ): Promise => { if (!context) { throw new Error("Extension context is required for generating system prompt") @@ -202,24 +206,25 @@ ${customInstructions}` // If diff is disabled, don't pass the diffStrategy const effectiveDiffStrategy = diffEnabled ? diffStrategy : undefined - return generatePrompt( - context, - cwd, - supportsComputerUse, - currentMode.slug, - mcpHub, - effectiveDiffStrategy, - browserViewportSize, - promptComponent, - customModes, - globalCustomInstructions, - diffEnabled, - experiments, - enableMcpServerCreation, - language, - rooIgnoreInstructions, - partialReadsEnabled, - settings, - todoList, - ) + return generatePrompt( + context, + cwd, + supportsComputerUse, + currentMode.slug, + mcpHub, + effectiveDiffStrategy, + browserViewportSize, + promptComponent, + customModes, + globalCustomInstructions, + diffEnabled, + experiments, + enableMcpServerCreation, + language, + rooIgnoreInstructions, + partialReadsEnabled, + settings, + todoList, + condensed, + ) } diff --git a/src/core/task/Task.ts b/src/core/task/Task.ts index cff8d5aec3..15079bf586 100644 --- a/src/core/task/Task.ts +++ b/src/core/task/Task.ts @@ -2204,12 +2204,13 @@ export class Task extends EventEmitter implements TaskLike { throw new Error("Provider not available") } - return SYSTEM_PROMPT( - provider.context, - this.cwd, - (this.api.getModel().info.supportsComputerUse ?? false) && (browserToolEnabled ?? true), - mcpHub, - this.diffStrategy, + const useCondensed = experiments?.[EXPERIMENT_IDS.PROMPT_PREPROCESSOR] ?? false + return SYSTEM_PROMPT( + provider.context, + this.cwd, + (this.api.getModel().info.supportsComputerUse ?? false) && (browserToolEnabled ?? true), + mcpHub, + this.diffStrategy, browserViewportSize, mode, customModePrompts, @@ -2221,28 +2222,31 @@ export class Task extends EventEmitter implements TaskLike { language, rooIgnoreInstructions, maxReadFileLine !== -1, - { - maxConcurrentFileReads: maxConcurrentFileReads ?? 5, - todoListEnabled: apiConfiguration?.todoListEnabled ?? true, - useAgentRules: vscode.workspace.getConfiguration("roo-cline").get("useAgentRules") ?? true, - }, - ) - })() - } + { + maxConcurrentFileReads: maxConcurrentFileReads ?? 5, + todoListEnabled: apiConfiguration?.todoListEnabled ?? true, + useAgentRules: vscode.workspace.getConfiguration("roo-cline").get("useAgentRules") ?? true, + }, + undefined, + useCondensed, + ) + })() + } public async *attemptApiRequest(retryAttempt: number = 0): ApiStream { const state = await this.providerRef.deref()?.getState() - const { - apiConfiguration, - autoApprovalEnabled, - alwaysApproveResubmit, - requestDelaySeconds, - mode, - autoCondenseContext = true, - autoCondenseContextPercent = 100, - profileThresholds = {}, - } = state ?? {} + const { + apiConfiguration, + autoApprovalEnabled, + alwaysApproveResubmit, + requestDelaySeconds, + mode, + autoCondenseContext = true, + autoCondenseContextPercent = 100, + profileThresholds = {}, + experiments, + } = state ?? {} // Get condensing configuration for automatic triggers. const customCondensingPrompt = state?.customCondensingPrompt @@ -2393,13 +2397,15 @@ export class Task extends EventEmitter implements TaskLike { // non-fatal } - const metadata: ApiHandlerCreateMessageMetadata = { - mode: mode, - taskId: this.taskId, - ...(previousResponseId ? { previousResponseId } : {}), - // If a condense just occurred, explicitly suppress continuity fallback for the next call - ...(this.skipPrevResponseIdOnce ? { suppressPreviousResponseId: true } : {}), - } + const useCondensed = experiments?.[EXPERIMENT_IDS.PROMPT_PREPROCESSOR] ?? false + const metadata: ApiHandlerCreateMessageMetadata = { + mode: mode, + taskId: this.taskId, + ...(previousResponseId ? { previousResponseId } : {}), + // If a condense just occurred, explicitly suppress continuity fallback for the next call + ...(this.skipPrevResponseIdOnce ? { suppressPreviousResponseId: true } : {}), + ...(useCondensed ? { useCondensedPrompt: true } : {}), + } // Reset skip flag after applying (it only affects the immediate next call) if (this.skipPrevResponseIdOnce) { diff --git a/src/core/webview/ClineProvider.ts b/src/core/webview/ClineProvider.ts index 04d336d957..2215681a19 100644 --- a/src/core/webview/ClineProvider.ts +++ b/src/core/webview/ClineProvider.ts @@ -46,7 +46,7 @@ import { supportPrompt } from "../../shared/support-prompt" import { GlobalFileNames } from "../../shared/globalFileNames" import { ExtensionMessage, MarketplaceInstalledMetadata } from "../../shared/ExtensionMessage" import { Mode, defaultModeSlug, getModeBySlug } from "../../shared/modes" -import { experimentDefault } from "../../shared/experiments" +import { experimentDefault, EXPERIMENT_IDS } from "../../shared/experiments" import { formatLanguage } from "../../shared/language" import { WebviewMessage } from "../../shared/WebviewMessage" import { EMBEDDING_MODEL_PROFILES } from "../../shared/embeddingModels" @@ -2024,9 +2024,16 @@ export class ClineProvider modeApiConfigs: stateValues.modeApiConfigs ?? ({} as Record), customModePrompts: stateValues.customModePrompts ?? {}, customSupportPrompts: stateValues.customSupportPrompts ?? {}, - enhancementApiConfigId: stateValues.enhancementApiConfigId, - experiments: stateValues.experiments ?? experimentDefault, - autoApprovalEnabled: stateValues.autoApprovalEnabled ?? false, + enhancementApiConfigId: stateValues.enhancementApiConfigId, + experiments: (() => { + const cfg = vscode.workspace + .getConfiguration("roo-cline") + .get("enablePromptPreprocessor") ?? false + const exps = { ...(stateValues.experiments ?? experimentDefault) } + exps[EXPERIMENT_IDS.PROMPT_PREPROCESSOR] = cfg + return exps + })(), + autoApprovalEnabled: stateValues.autoApprovalEnabled ?? false, customModes, maxOpenTabsContext: stateValues.maxOpenTabsContext ?? 20, maxWorkspaceFiles: stateValues.maxWorkspaceFiles ?? 200, diff --git a/src/core/webview/generateSystemPrompt.ts b/src/core/webview/generateSystemPrompt.ts index 9b96dbedbb..394c72ed73 100644 --- a/src/core/webview/generateSystemPrompt.ts +++ b/src/core/webview/generateSystemPrompt.ts @@ -80,13 +80,15 @@ export const generateSystemPrompt = async (provider: ClineProvider, message: Web enableMcpServerCreation, language, rooIgnoreInstructions, - maxReadFileLine !== -1, - { - maxConcurrentFileReads: maxConcurrentFileReads ?? 5, - todoListEnabled: apiConfiguration?.todoListEnabled ?? true, - useAgentRules: vscode.workspace.getConfiguration("roo-cline").get("useAgentRules") ?? true, - }, - ) + maxReadFileLine !== -1, + { + maxConcurrentFileReads: maxConcurrentFileReads ?? 5, + todoListEnabled: apiConfiguration?.todoListEnabled ?? true, + useAgentRules: vscode.workspace.getConfiguration("roo-cline").get("useAgentRules") ?? true, + }, + undefined, + undefined, + ) return systemPrompt } diff --git a/src/package.json b/src/package.json index 48e785d7b9..c4b33606d4 100644 --- a/src/package.json +++ b/src/package.json @@ -392,16 +392,21 @@ "default": true, "description": "%settings.useAgentRules.description%" }, - "roo-cline.apiRequestTimeout": { - "type": "number", - "default": 600, - "minimum": 0, - "maximum": 3600, - "description": "%settings.apiRequestTimeout.description%" - } - } - } - }, + "roo-cline.apiRequestTimeout": { + "type": "number", + "default": 600, + "minimum": 0, + "maximum": 3600, + "description": "%settings.apiRequestTimeout.description%" + }, + "roo-cline.enablePromptPreprocessor": { + "type": "boolean", + "default": false, + "description": "%settings.enablePromptPreprocessor.description%" + } + } + } + }, "scripts": { "lint": "eslint . --ext=ts --max-warnings=0", "check-types": "tsc --noEmit", diff --git a/src/package.nls.json b/src/package.nls.json index f1c132be65..3bbc7ccd9f 100644 --- a/src/package.nls.json +++ b/src/package.nls.json @@ -38,6 +38,7 @@ "settings.customStoragePath.description": "Custom storage path. Leave empty to use the default location. Supports absolute paths (e.g. 'D:\\RooCodeStorage')", "settings.enableCodeActions.description": "Enable Roo Code quick fixes", "settings.autoImportSettingsPath.description": "Path to a RooCode configuration file to automatically import on extension startup. Supports absolute paths and paths relative to the home directory (e.g. '~/Documents/roo-code-settings.json'). Leave empty to disable auto-import.", - "settings.useAgentRules.description": "Enable loading of AGENTS.md files for agent-specific rules (see https://agent-rules.org/)", - "settings.apiRequestTimeout.description": "Maximum time in seconds to wait for API responses (0 = no timeout, 1-3600s, default: 600s). Higher values are recommended for local providers like LM Studio and Ollama that may need more processing time." + "settings.useAgentRules.description": "Enable loading of AGENTS.md files for agent-specific rules (see https://agent-rules.org/)", + "settings.apiRequestTimeout.description": "Maximum time in seconds to wait for API responses (0 = no timeout, 1-3600s, default: 600s). Higher values are recommended for local providers like LM Studio and Ollama that may need more processing time.", + "settings.enablePromptPreprocessor.description": "Enable the prompt preprocessor to reduce token usage by condensing prompts and routing simple actions to tools before calling large models." } diff --git a/src/shared/__tests__/experiments.spec.ts b/src/shared/__tests__/experiments.spec.ts index 21401dc759..c03879bfd8 100644 --- a/src/shared/__tests__/experiments.spec.ts +++ b/src/shared/__tests__/experiments.spec.ts @@ -24,33 +24,36 @@ describe("experiments", () => { }) describe("isEnabled", () => { - it("returns false when POWER_STEERING experiment is not enabled", () => { - const experiments: Record = { - powerSteering: false, - multiFileApplyDiff: false, - preventFocusDisruption: false, - assistantMessageParser: false, - } + it("returns false when POWER_STEERING experiment is not enabled", () => { + const experiments: Record = { + powerSteering: false, + multiFileApplyDiff: false, + preventFocusDisruption: false, + assistantMessageParser: false, + promptPreprocessor: false, + } expect(Experiments.isEnabled(experiments, EXPERIMENT_IDS.POWER_STEERING)).toBe(false) }) - it("returns true when experiment POWER_STEERING is enabled", () => { - const experiments: Record = { - powerSteering: true, - multiFileApplyDiff: false, - preventFocusDisruption: false, - assistantMessageParser: false, - } + it("returns true when experiment POWER_STEERING is enabled", () => { + const experiments: Record = { + powerSteering: true, + multiFileApplyDiff: false, + preventFocusDisruption: false, + assistantMessageParser: false, + promptPreprocessor: false, + } expect(Experiments.isEnabled(experiments, EXPERIMENT_IDS.POWER_STEERING)).toBe(true) }) - it("returns false when experiment is not present", () => { - const experiments: Record = { - powerSteering: false, - multiFileApplyDiff: false, - preventFocusDisruption: false, - assistantMessageParser: false, - } + it("returns false when experiment is not present", () => { + const experiments: Record = { + powerSteering: false, + multiFileApplyDiff: false, + preventFocusDisruption: false, + assistantMessageParser: false, + promptPreprocessor: false, + } expect(Experiments.isEnabled(experiments, EXPERIMENT_IDS.POWER_STEERING)).toBe(false) }) }) diff --git a/src/shared/experiments.ts b/src/shared/experiments.ts index 4be89afa1a..b971efec62 100644 --- a/src/shared/experiments.ts +++ b/src/shared/experiments.ts @@ -1,10 +1,11 @@ import type { AssertEqual, Equals, Keys, Values, ExperimentId, Experiments } from "@roo-code/types" export const EXPERIMENT_IDS = { - MULTI_FILE_APPLY_DIFF: "multiFileApplyDiff", - POWER_STEERING: "powerSteering", - PREVENT_FOCUS_DISRUPTION: "preventFocusDisruption", - ASSISTANT_MESSAGE_PARSER: "assistantMessageParser", + MULTI_FILE_APPLY_DIFF: "multiFileApplyDiff", + POWER_STEERING: "powerSteering", + PREVENT_FOCUS_DISRUPTION: "preventFocusDisruption", + ASSISTANT_MESSAGE_PARSER: "assistantMessageParser", + PROMPT_PREPROCESSOR: "promptPreprocessor", } as const satisfies Record type _AssertExperimentIds = AssertEqual>> @@ -16,10 +17,11 @@ interface ExperimentConfig { } export const experimentConfigsMap: Record = { - MULTI_FILE_APPLY_DIFF: { enabled: false }, - POWER_STEERING: { enabled: false }, - PREVENT_FOCUS_DISRUPTION: { enabled: false }, - ASSISTANT_MESSAGE_PARSER: { enabled: false }, + MULTI_FILE_APPLY_DIFF: { enabled: false }, + POWER_STEERING: { enabled: false }, + PREVENT_FOCUS_DISRUPTION: { enabled: false }, + ASSISTANT_MESSAGE_PARSER: { enabled: false }, + PROMPT_PREPROCESSOR: { enabled: false }, } export const experimentDefault = Object.fromEntries( diff --git a/webview-ui/src/context/__tests__/ExtensionStateContext.spec.tsx b/webview-ui/src/context/__tests__/ExtensionStateContext.spec.tsx index a688cac885..6776200c26 100644 --- a/webview-ui/src/context/__tests__/ExtensionStateContext.spec.tsx +++ b/webview-ui/src/context/__tests__/ExtensionStateContext.spec.tsx @@ -222,16 +222,17 @@ describe("mergeExtensionState", () => { const newState: ExtensionState = { ...baseState, apiConfiguration: { modelMaxThinkingTokens: 456, modelTemperature: 0.3 }, - experiments: { - powerSteering: true, - marketplace: false, - disableCompletionCommand: false, - concurrentFileReads: true, - multiFileApplyDiff: true, - preventFocusDisruption: false, - assistantMessageParser: false, - } as Record, - } + experiments: { + powerSteering: true, + marketplace: false, + disableCompletionCommand: false, + concurrentFileReads: true, + multiFileApplyDiff: true, + preventFocusDisruption: false, + assistantMessageParser: false, + promptPreprocessor: false, + } as Record, + } const result = mergeExtensionState(prevState, newState) @@ -240,14 +241,15 @@ describe("mergeExtensionState", () => { modelTemperature: 0.3, }) - expect(result.experiments).toEqual({ - powerSteering: true, - marketplace: false, - disableCompletionCommand: false, - concurrentFileReads: true, - multiFileApplyDiff: true, - preventFocusDisruption: false, - assistantMessageParser: false, - }) + expect(result.experiments).toEqual({ + powerSteering: true, + marketplace: false, + disableCompletionCommand: false, + concurrentFileReads: true, + multiFileApplyDiff: true, + preventFocusDisruption: false, + assistantMessageParser: false, + promptPreprocessor: false, + }) }) }) diff --git a/webview-ui/src/i18n/locales/en/settings.json b/webview-ui/src/i18n/locales/en/settings.json index fca3d1ade9..ab3a2a7a58 100644 --- a/webview-ui/src/i18n/locales/en/settings.json +++ b/webview-ui/src/i18n/locales/en/settings.json @@ -709,15 +709,19 @@ "name": "Background editing", "description": "Prevent editor focus disruption when enabled. File edits happen in the background without opening diff views or stealing focus. You can continue working uninterrupted while Roo makes changes. Files can be opened without focus to capture diagnostics or kept closed entirely." }, - "ASSISTANT_MESSAGE_PARSER": { - "name": "Use new message parser", - "description": "Enable the experimental streaming message parser that provides significant performance improvements for long assistant responses by processing messages more efficiently." - } - }, - "promptCaching": { - "label": "Disable prompt caching", - "description": "When checked, Roo will not use prompt caching for this model." - }, + "ASSISTANT_MESSAGE_PARSER": { + "name": "Use new message parser", + "description": "Enable the experimental streaming message parser that provides significant performance improvements for long assistant responses by processing messages more efficiently." + }, + "PROMPT_PREPROCESSOR": { + "name": "Enable prompt preprocessor", + "description": "When enabled, Roo will condense prompts and route simple actions to tools before calling large models to reduce token usage." + } + }, + "promptCaching": { + "label": "Disable prompt caching", + "description": "When checked, Roo will not use prompt caching for this model." + }, "temperature": { "useCustom": "Use custom temperature", "description": "Controls randomness in the model's responses.",