diff --git a/integrations/langgraph/typescript/src/message-conversion.test.ts b/integrations/langgraph/typescript/src/message-conversion.test.ts index ecbefbb286..ee5958d684 100644 --- a/integrations/langgraph/typescript/src/message-conversion.test.ts +++ b/integrations/langgraph/typescript/src/message-conversion.test.ts @@ -65,6 +65,104 @@ describe("Message Conversion - All Types", () => { expect(() => aguiMessagesToLangChain([msg])).toThrow("not supported"); }); + // Regression test: the AG-UI message history accumulated by the frontend + // includes a `role: "reasoning"` message whenever the agent emits + // REASONING_MESSAGE_* events. On the next turn, the frontend sends the + // full history back; the converter previously threw on the unknown role + // and the runtime surfaced it as a `RUN_ERROR` toast + // ("message role is not supported." / code INCOMPLETE_STREAM). + // + // Reasoning carries provider-specific encrypted state in + // `encryptedValue` (OpenAI Responses API `encrypted_content`, Anthropic + // `signature`) that providers use to maintain reasoning continuity + // across turns. We forward reasoning as an AIMessage with a + // `type: "reasoning"` content block so langchain-openai's Responses-API + // path threads it back as a reasoning input item. + it("should forward reasoning messages as AI messages with reasoning content blocks", () => { + const msgs: Message[] = [ + { id: "u1", role: "user", content: "Tokyo weather?" }, + { + id: "r1", + role: "reasoning", + content: "I should call get_weather.", + encryptedValue: "rs_encrypted_signature_abc", + } as any, + { id: "a1", role: "assistant", content: "Looking it up." }, + ]; + const result = aguiMessagesToLangChain(msgs); + expect(result).toHaveLength(3); + expect(result[0].type).toBe("human"); + expect(result[1].type).toBe("ai"); + const reasoningMsg = result[1] as any; + expect(Array.isArray(reasoningMsg.content)).toBe(true); + expect(reasoningMsg.content).toHaveLength(1); + expect(reasoningMsg.content[0].type).toBe("reasoning"); + expect(reasoningMsg.content[0].id).toBe("r1"); + expect(reasoningMsg.content[0].summary).toEqual([ + { type: "summary_text", text: "I should call get_weather." }, + ]); + // Encrypted state surfaces under both encrypted_content (OpenAI) and + // signature (Anthropic) so whichever provider serializes the message + // can pick up the reasoning state. + expect(reasoningMsg.content[0].encrypted_content).toBe("rs_encrypted_signature_abc"); + expect(reasoningMsg.content[0].signature).toBe("rs_encrypted_signature_abc"); + expect(result[2].type).toBe("ai"); + expect(result[2].content).toBe("Looking it up."); + }); + + it("should forward reasoning without encryptedValue (no signature key)", () => { + const msgs: Message[] = [ + { + id: "r1", + role: "reasoning", + content: "Plain rendered summary.", + } as any, + ]; + const result = aguiMessagesToLangChain(msgs); + expect(result).toHaveLength(1); + const block = (result[0] as any).content[0]; + expect(block.type).toBe("reasoning"); + expect(block.summary).toEqual([ + { type: "summary_text", text: "Plain rendered summary." }, + ]); + expect(block.encrypted_content).toBeUndefined(); + expect(block.signature).toBeUndefined(); + }); + + // Activity messages are display-only progress events (status pills, + // streaming progress bars, etc.) emitted via AG-UI events. They have + // no LLM-relevant content and no analogue in LangGraph's message + // types; skip rather than throw so multi-turn flows with activity + // history don't break. + it("should skip activity messages instead of throwing", () => { + const msgs: Message[] = [ + { id: "u1", role: "user", content: "Run the search." }, + { + id: "act1", + role: "activity", + activityType: "search-progress", + content: { phase: "running" }, + } as any, + { id: "a1", role: "assistant", content: "Done." }, + ]; + const result = aguiMessagesToLangChain(msgs); + expect(result).toHaveLength(2); + expect(result[0].type).toBe("human"); + expect(result[1].type).toBe("ai"); + }); + + // OpenAI's "developer" role supersedes "system" on newer models; in + // LangChain it still maps to a SystemMessage. Map rather than throw so + // demo agents that set `role: "developer"` system prompts still work. + it("should convert developer message to system", () => { + const msg: Message = { id: "d1", role: "developer", content: "Be concise." } as any; + const result = aguiMessagesToLangChain([msg]); + expect(result).toHaveLength(1); + expect(result[0].type).toBe("system"); + expect((result[0] as any).role).toBe("system"); + expect(result[0].content).toBe("Be concise."); + }); + it("should preserve message ordering", () => { const msgs: Message[] = [ { id: "1", role: "user", content: "Q" }, diff --git a/integrations/langgraph/typescript/src/utils.ts b/integrations/langgraph/typescript/src/utils.ts index d939af04ce..41b5179995 100644 --- a/integrations/langgraph/typescript/src/utils.ts +++ b/integrations/langgraph/typescript/src/utils.ts @@ -219,7 +219,8 @@ export function langchainMessagesToAgui(messages: LangGraphMessage[]): Message[] } export function aguiMessagesToLangChain(messages: Message[]): LangGraphMessage[] { - return messages.map((message, index) => { + const result: LangGraphMessage[] = []; + for (const message of messages) { switch (message.role) { case "user": // Handle multimodal content @@ -232,14 +233,15 @@ export function aguiMessagesToLangChain(messages: Message[]): LangGraphMessage[] content = String(message.content); } - return { + result.push({ id: message.id, role: message.role, content, type: "human", - } as LangGraphMessage; + } as LangGraphMessage); + break; case "assistant": - return { + result.push({ id: message.id, type: "ai", role: message.role, @@ -250,27 +252,96 @@ export function aguiMessagesToLangChain(messages: Message[]): LangGraphMessage[] args: JSON.parse(tc.function.arguments), type: "tool_call", })), - }; + } as LangGraphMessage); + break; case "system": - return { + result.push({ id: message.id, role: message.role, content: message.content, type: "system", - }; + } as LangGraphMessage); + break; + // OpenAI introduced "developer" as a role that supersedes "system" for + // newer models; in LangChain it still maps to a SystemMessage. Treating + // it as a system message preserves the prompt instead of throwing. + case "developer": + result.push({ + id: message.id, + role: "system", + content: message.content, + type: "system", + } as LangGraphMessage); + break; case "tool": - return { + result.push({ content: message.content, role: message.role, type: message.role, tool_call_id: message.toolCallId, id: message.id, + } as LangGraphMessage); + break; + // Reasoning messages preserve the agent's prior chain-of-thought across + // turns — the visible summary text plus an opaque `encryptedValue` + // (provider-specific encrypted reasoning state, e.g. OpenAI Responses + // API `encrypted_content` for caching, Anthropic extended-thinking + // `signature`). Dropping these would make the model "forget" what it + // was reasoning about on the previous turn. + // + // Forward them to LangGraph as a standalone AIMessage whose content + // carries an OpenAI Responses-API-shaped reasoning block. + // langchain-openai's `_construct_responses_api_input` recognizes + // `type: "reasoning"` content blocks and threads them through to the + // Responses API as reasoning input items, so the model sees its own + // prior reasoning state. + // + // Note: the original provider shape (Anthropic `thinking`, Bedrock + // `reasoning_content`, OpenAI `summary`) isn't preserved by AG-UI's + // event stream — only rendered text and `encryptedValue` survive. + // We emit the OpenAI summary shape because it's what + // langchain-openai's Responses-API path consumes; for Anthropic / + // Bedrock multi-turn reasoning continuity, additional plumbing + // would be needed (e.g. an AG-UI extension preserving the original + // block type). + case "reasoning": { + const reasoningBlock: Record = { + type: "reasoning", + id: message.id, + summary: message.content + ? [{ type: "summary_text", text: message.content }] + : [], }; + const encrypted = (message as { encryptedValue?: string }).encryptedValue; + if (encrypted) { + // OpenAI Responses API ships encrypted reasoning state under + // `encrypted_content`; langchain-openai forwards it verbatim. + // Anthropic uses `signature` on the thinking block. Set both so + // whichever path the provider takes, the state round-trips. + reasoningBlock.encrypted_content = encrypted; + reasoningBlock.signature = encrypted; + } + result.push({ + id: message.id, + type: "ai", + role: "assistant", + content: [reasoningBlock], + tool_calls: [], + } as LangGraphMessage); + break; + } + // Activity messages are display-only progress events (status pills, + // streaming progress bars, etc.). They have no LLM-relevant content + // and no analogue in LangGraph's message types; skip rather than + // throw so multi-turn flows with activity history don't break. + case "activity": + break; default: - console.error(`Message role ${message.role} is not implemented`); + console.error(`Message role ${(message as { role: string }).role} is not implemented`); throw new Error("message role is not supported."); } - }); + } + return result; } function stringifyIfNeeded(item: any) {