diff --git a/core/llm/openaiTypeConverters.test.ts b/core/llm/openaiTypeConverters.test.ts index b0c63862f4d..4568e8a9f2c 100644 --- a/core/llm/openaiTypeConverters.test.ts +++ b/core/llm/openaiTypeConverters.test.ts @@ -22,6 +22,11 @@ type MessageItem = { content: unknown; }; +type ReasoningItem = { + type: "reasoning"; + id: string; +}; + // Helper functions for filtering results function getFunctionCalls(items: ResponseInputItem[]): FunctionCallItem[] { return items.filter( @@ -47,6 +52,12 @@ function getMessagesByRole( }) as unknown as MessageItem[]; } +function getReasoningItems(items: ResponseInputItem[]): ReasoningItem[] { + return items.filter( + (item) => (item as { type?: string }).type === "reasoning", + ) as unknown as ReasoningItem[]; +} + describe("openaiTypeConverters", () => { describe("toResponsesInput", () => { describe("tool calls handling - OpenAI Responses API", () => { @@ -496,6 +507,44 @@ describe("openaiTypeConverters", () => { expect(userMessages.length).toBe(2); }); + it("should skip trailing thinking messages without a following assistant item", () => { + const messages: ChatMessage[] = [ + { + role: "thinking", + content: "", + reasoning_details: [{ type: "reasoning_id", id: "rs_trailing" }], + } as ChatMessage, + ]; + + const result = toResponsesInput(messages); + + const reasoningItems = getReasoningItems(result); + expect(reasoningItems.length).toBe(0); + }); + + it("should preserve thinking messages when followed by assistant output", () => { + const messages: ChatMessage[] = [ + { + role: "thinking", + content: "", + reasoning_details: [{ type: "reasoning_id", id: "rs_kept" }], + } as ChatMessage, + { + role: "assistant", + content: "Done.", + } as ChatMessage, + ]; + + const result = toResponsesInput(messages); + + const reasoningItems = getReasoningItems(result); + const assistantMessages = getMessagesByRole(result, "assistant"); + + expect(reasoningItems.length).toBe(1); + expect(reasoningItems[0].id).toBe("rs_kept"); + expect(assistantMessages.length).toBe(1); + }); + it("should handle system message (converted to developer role)", () => { const messages: ChatMessage[] = [ { role: "system", content: "You are a helpful assistant" }, diff --git a/core/llm/openaiTypeConverters.ts b/core/llm/openaiTypeConverters.ts index 9280a51c706..86f8e8fbc44 100644 --- a/core/llm/openaiTypeConverters.ts +++ b/core/llm/openaiTypeConverters.ts @@ -868,6 +868,18 @@ function convertThinkingMessageToReasoningItem( return reasoningItem; } +function hasFollowingAssistantMessage( + messages: ChatMessage[], + startIndex: number, +): boolean { + for (let i = startIndex + 1; i < messages.length; i++) { + const nextRole = messages[i]?.role; + if (nextRole === "thinking") continue; + return nextRole === "assistant"; + } + return false; +} + export function toResponsesInput(messages: ChatMessage[]): ResponseInput { const input: ResponseInput = []; @@ -967,6 +979,10 @@ export function toResponsesInput(messages: ChatMessage[]): ResponseInput { break; } case "thinking": { + if (!hasFollowingAssistantMessage(messages, i)) { + break; + } + const reasoningItem = convertThinkingMessageToReasoningItem( msg as ThinkingChatMessage, );