Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .changeset/fix-reasoning-approval-continuation.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
"@cloudflare/ai-chat": patch
---

Fixed approval auto-continuation streams so reasoning chunks keep a valid `reasoning-start` before `reasoning-delta` sequence when continuing from an assistant message that already has reasoning.
21 changes: 16 additions & 5 deletions packages/ai-chat/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3704,9 +3704,12 @@ export class AIChatAgent<
// state "streaming" (interrupted mid-generation). Parts with
// state "done" or no state create new blocks as usual (e.g.
// tool auto-continuation).
// - reasoning-start: always suppressed when an existing
// reasoning part exists — re-reasoning during continuation
// appends to the same block rather than creating a new one.
// - reasoning-start: server-side message building skips appending
// a new reasoning part when one already exists, so re-reasoning
// during continuation merges into the same persisted block. The
// chunk is still forwarded to clients to keep the UI stream
// protocol valid.
let skipServerApply = false;
if (continuation) {
if (!continuationTextResumed && data.type === "text-start") {
for (let k = message.parts.length - 1; k >= 0; k--) {
Expand All @@ -3733,7 +3736,13 @@ export class AIChatAgent<
break;
}
}
if (continuationReasoningResumed) continue;
// Keep the persisted continuation merged into the cloned
// reasoning part, but still forward reasoning-start to the
// client. AI SDK v6 requires reasoning-start before any
// reasoning-delta in the stream processor's active-part
// registry, even when the message already contains a
// completed reasoning part from earlier in the turn.
skipServerApply = continuationReasoningResumed;
}
}

Expand Down Expand Up @@ -3766,7 +3775,9 @@ export class AIChatAgent<
// Delegate message building to the shared parser.
// It handles: text, reasoning, file, source, tool lifecycle,
// step boundaries — all the part types needed for UIMessage.
const handled = applyChunkToParts(message.parts, data);
const handled = skipServerApply
? true
: applyChunkToParts(message.parts, data);

// When a tool enters approval-requested state, the stream is
// paused waiting for user approval. Persist the streaming message
Expand Down
141 changes: 141 additions & 0 deletions packages/ai-chat/src/tests/client-tools-continuation.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -509,6 +509,147 @@ describe("Client tools continuation", () => {
}
});

it("preserves reasoning-start before reasoning-delta during approval continuation (#1480)", async () => {
const room = crypto.randomUUID();
const { ws } = await connectChatWS(`/agents/test-chat-agent/${room}`);

try {
const userMessage: ChatMessage = {
id: "msg-issue-1480",
role: "user",
parts: [{ type: "text", text: "Hello" }]
};

let resolveInitialDone: (value: boolean) => void;
const initialDonePromise = new Promise<boolean>((res) => {
resolveInitialDone = res;
});
const initialTimeout = setTimeout(() => resolveInitialDone(false), 3000);

ws.addEventListener("message", function initialHandler(e: MessageEvent) {
const data = JSON.parse(e.data as string);
if (data.type === MessageType.CF_AGENT_USE_CHAT_RESPONSE && data.done) {
clearTimeout(initialTimeout);
resolveInitialDone(true);
ws.removeEventListener("message", initialHandler);
}
});

ws.send(
JSON.stringify({
type: MessageType.CF_AGENT_USE_CHAT_REQUEST,
id: "req-issue-1480-initial",
init: {
method: "POST",
body: JSON.stringify({
messages: [userMessage],
reasoningContinuation: true,
delayContinuationChunks: true
})
}
})
);
expect(await initialDonePromise).toBe(true);

const agentStub = await getAgentByName(env.TestChatAgent, room);
await agentStub.persistMessages([
userMessage,
{
id: "assistant-issue-1480",
role: "assistant",
parts: [
{
type: "reasoning",
text: "initial reasoning",
state: "done"
},
{
type: "tool-changeBackgroundColor",
toolCallId: "call_issue_1480",
state: "approval-requested",
input: { color: "blue" },
approval: { id: "approval_issue_1480" }
}
] as ChatMessage["parts"]
}
]);

const receivedMessages = collectMessages(ws);

ws.send(
JSON.stringify({
type: MessageType.CF_AGENT_TOOL_APPROVAL,
toolCallId: "call_issue_1480",
approved: true,
autoContinue: true
})
);

ws.send(
JSON.stringify({
type: MessageType.CF_AGENT_STREAM_RESUME_REQUEST
})
);

const resuming = (await waitForMessage(
receivedMessages,
(message) => message.type === MessageType.CF_AGENT_STREAM_RESUMING
)) as { id: string } | undefined;
expect(resuming).toBeDefined();

ws.send(
JSON.stringify({
type: MessageType.CF_AGENT_STREAM_RESUME_ACK,
id: resuming!.id
})
);

const done = await waitForMessage(
receivedMessages,
(message) =>
message.type === MessageType.CF_AGENT_USE_CHAT_RESPONSE &&
message.done === true
);
expect(done).toBeDefined();

const chunkTypes = receivedMessages
.filter(
(message) =>
message.type === MessageType.CF_AGENT_USE_CHAT_RESPONSE &&
typeof message.body === "string" &&
message.body.length > 0
)
.map((message) => JSON.parse(message.body as string).type as string);

const reasoningStartIndex = chunkTypes.indexOf("reasoning-start");
const reasoningDeltaIndex = chunkTypes.indexOf("reasoning-delta");

expect(reasoningStartIndex).toBeGreaterThanOrEqual(0);
expect(reasoningDeltaIndex).toBeGreaterThan(reasoningStartIndex);

const persistedMessagesBroadcast = (await waitForMessage(
receivedMessages,
(message) => message.type === MessageType.CF_AGENT_CHAT_MESSAGES
)) as { messages: ChatMessage[] } | undefined;
expect(persistedMessagesBroadcast).toBeDefined();

const persistedAssistant = persistedMessagesBroadcast!.messages.find(
(message) => message.id === "assistant-issue-1480"
);
expect(persistedAssistant).toBeDefined();
const reasoningParts = persistedAssistant!.parts.filter(
(part) => part.type === "reasoning"
);
expect(reasoningParts).toHaveLength(1);
expect(reasoningParts[0]).toMatchObject({
text: "initial reasoningcontinuation reasoning",
state: "done"
});
} finally {
ws.close(1000);
}
});

it("should send resume-none when an auto-continuation returns no body", async () => {
const room = crypto.randomUUID();
const { ws } = await connectChatWS(`/agents/test-chat-agent/${room}`);
Expand Down
30 changes: 30 additions & 0 deletions packages/ai-chat/src/tests/worker.ts
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,36 @@ export class TestChatAgent extends AIChatAgent<Env> {
]);
}

if (
options?.continuation === true &&
options.body?.reasoningContinuation === true
) {
const chunks = [
{ type: "start" },
{ type: "reasoning-start", id: "reasoning_issue_1480" },
{
type: "reasoning-delta",
id: "reasoning_issue_1480",
delta: "continuation reasoning"
},
{ type: "reasoning-end", id: "reasoning_issue_1480" },
{ type: "text-start", id: "text_issue_1480" },
{
type: "text-delta",
id: "text_issue_1480",
delta: "continuation answer"
},
{ type: "text-end", id: "text_issue_1480" },
{ type: "finish" }
];

if (options.body.delayContinuationChunks === true) {
return makeDelayedSSEChunkResponse(chunks, 100);
}

return makeSSEChunkResponse(chunks);
}

// Issue #1404: simulate the OpenAI Responses API "provider replay"
// pattern. When asked to continue after a tool result, some providers
// re-emit the prior tool call (start + delta + available) plus the
Expand Down
Loading