diff --git a/apps/dojo/scripts/run-dojo-everything.js b/apps/dojo/scripts/run-dojo-everything.js index d92989a3d6..37344ecf13 100755 --- a/apps/dojo/scripts/run-dojo-everything.js +++ b/apps/dojo/scripts/run-dojo-everything.js @@ -4,9 +4,11 @@ const { execSync } = require("child_process"); const path = require("path"); const concurrently = require("concurrently"); -// Pinned: @langchain/langgraph-api@1.1.14 regressed schema extraction, causing -// worker timeouts on CI runners. Re-evaluate when a newer version fixes the issue. -const LANGGRAPH_CLI_VERSION = "1.1.13"; +// 1.2.1 ships the v3 thread-stream protocol (POST /threads/:tid/commands, +// /stream/events, etc.) that the AG-UI transformer path depends on. +// 1.1.13 returned 404 on those routes. Re-evaluate if the schema-extraction +// regressions that prompted the original 1.1.13 pin resurface. +const LANGGRAPH_CLI_VERSION = "1.2.1"; // Parse command line arguments const args = process.argv.slice(2); diff --git a/apps/dojo/src/agents.ts b/apps/dojo/src/agents.ts index 6385ec1b85..aff030690c 100644 --- a/apps/dojo/src/agents.ts +++ b/apps/dojo/src/agents.ts @@ -198,9 +198,14 @@ export const agentsIntegrations = { "langgraph-typescript": async () => mapAgents( (graphId) => { + // All TS demos register the AG-UI streamTransformer at compile; + // route every one through the transformer path. The legacy + // translation in agent.ts is preserved for non-langgraph + // deployments via `useTransformer: false`. return new LangGraphAgent({ deploymentUrl: envVars.langgraphTypescriptUrl, graphId, + useTransformer: true, }); }, { diff --git a/apps/dojo/src/app/[integrationId]/feature/(v2)/tool_based_generative_ui/page.tsx b/apps/dojo/src/app/[integrationId]/feature/(v2)/tool_based_generative_ui/page.tsx index 2f301576c4..8ac8248de7 100644 --- a/apps/dojo/src/app/[integrationId]/feature/(v2)/tool_based_generative_ui/page.tsx +++ b/apps/dojo/src/app/[integrationId]/feature/(v2)/tool_based_generative_ui/page.tsx @@ -94,13 +94,14 @@ function HaikuDisplay() { useFrontendTool( { agentId: "tool_based_generative_ui", + description: "Generates a haiku with Japanese and English translations, an image name, and a CSS gradient for the background.", name: "generate_haiku", parameters: z.object({ japanese: z.array(z.string()).describe("3 lines of haiku in Japanese"), english: z.array(z.string()).describe("3 lines of haiku translated to English"), image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(", ")}`), gradient: z.string().describe("CSS Gradient color for the background"), - }) , + }), followUp: false, handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => { const newHaiku: Haiku = { diff --git a/apps/dojo/src/files.json b/apps/dojo/src/files.json index d71723cabb..39a1f60c16 100644 --- a/apps/dojo/src/files.json +++ b/apps/dojo/src/files.json @@ -82,7 +82,7 @@ "agent-spec-langgraph::tool_based_generative_ui": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }) ,\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", + "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n description: \"Generates a haiku with Japanese and English translations, an image name, and a CSS gradient for the background.\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }),\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", "language": "typescript", "type": "file" }, @@ -188,7 +188,7 @@ "agent-spec-wayflow::tool_based_generative_ui": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }) ,\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", + "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n description: \"Generates a haiku with Japanese and English translations, an image name, and a CSS gradient for the background.\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }),\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", "language": "typescript", "type": "file" }, @@ -232,7 +232,7 @@ }, { "name": "agent.ts", - "content": "/**\n * A simple agentic chat flow using LangGraph with AG-UI middleware.\n *\n * The AG-UI middleware handles:\n * - Injecting frontend tools from state.tools into the model\n * - Routing frontend tool calls (emit events, skip backend execution)\n */\n\nimport { createAgent } from \"langchain\";\nimport { MemorySaver } from \"@langchain/langgraph\";\nimport { copilotkitMiddleware } from \"@copilotkit/sdk-js/langgraph\";\n\nconst checkpointer = new MemorySaver();\n\nexport const agenticChatGraph = createAgent({\n model: \"openai:gpt-4o\",\n tools: [], // Backend tools go here\n middleware: [copilotkitMiddleware],\n systemPrompt: \"You are a helpful assistant.\",\n checkpointer\n});\n", + "content": "/**\n * A simple agentic chat flow using LangGraph with AG-UI middleware.\n *\n * The AG-UI middleware handles:\n * - Injecting frontend tools from state.tools into the model\n * - Routing frontend tool calls (emit events, skip backend execution)\n */\n\nimport { createAgent } from \"langchain\";\nimport { MemorySaver } from \"@langchain/langgraph\";\nimport { copilotkitMiddleware } from \"@copilotkit/sdk-js/langgraph\";\nimport { aguiTransformer } from \"@ag-ui/langgraph/transformer\";\n\nconst checkpointer = new MemorySaver();\n\nexport const agenticChatGraph = createAgent({\n model: \"openai:gpt-4o\",\n tools: [], // Backend tools go here\n middleware: [copilotkitMiddleware],\n systemPrompt: \"You are a helpful assistant.\",\n checkpointer,\n streamTransformers: [aguiTransformer],\n});\n", "language": "ts", "type": "file" } @@ -264,7 +264,7 @@ }, { "name": "agent.ts", - "content": "/**\n * A simple agentic chat flow using LangGraph with reasoning model support.\n *\n * This agent supports multiple model providers with reasoning/thinking capabilities:\n * - OpenAI (default): Uses o3 model\n * - Anthropic: Uses claude-sonnet-4-20250514 with thinking enabled\n * - Gemini: Uses gemini-2.5-pro with thinking budget\n *\n * The model is selected based on the `model` field in the agent state.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { ChatAnthropic } from \"@langchain/anthropic\";\nimport { ChatGoogleGenerativeAI } from \"@langchain/google-genai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { Annotation, MessagesAnnotation, StateGraph, Command, START, END } from \"@langchain/langgraph\";\n\nconst AgentStateAnnotation = Annotation.Root({\n tools: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n model: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => \"\"\n }),\n ...MessagesAnnotation.spec,\n});\n\ntype AgentState = typeof AgentStateAnnotation.State;\n\nasync function chatNode(state: AgentState, config?: RunnableConfig) {\n /**\n * Standard chat node based on the ReAct design pattern. It handles:\n * - The model to use (and binds in CopilotKit actions and the tools defined above)\n * - The system prompt\n * - Getting a response from the model\n * - Handling tool calls\n */\n\n // 1. Define the model based on state\n let model;\n if (state.model === \"Anthropic\") {\n model = new ChatAnthropic({\n model: \"claude-sonnet-4-20250514\",\n thinking: { type: \"enabled\", budget_tokens: 2000 },\n });\n } else if (state.model === \"Gemini\") {\n model = new ChatGoogleGenerativeAI({\n model: \"gemini-2.5-pro\",\n thinkingBudget: 1024,\n });\n } else {\n // Default: OpenAI\n model = new ChatOpenAI({\n model: \"o4-mini\",\n useResponsesApi: true,\n reasoning: { effort: \"high\", summary: \"auto\" },\n });\n }\n\n // Define config for the model\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // 2. Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...(state.tools ?? []),\n ],\n );\n\n // 3. Define the system message\n const systemMessage = new SystemMessage({\n content: \"You are a helpful assistant.\"\n });\n\n // 4. Run the model to generate a response\n const response = await modelWithTools.invoke([\n systemMessage,\n ...state.messages,\n ], config);\n\n // 5. Return the response\n return new Command({\n goto: END,\n update: {\n messages: [response]\n }\n });\n}\n\n// Define a new graph\nconst workflow = new StateGraph(AgentStateAnnotation)\n .addNode(\"chatNode\", chatNode)\n .addEdge(START, \"chatNode\")\n .addEdge(\"chatNode\", END);\n\n// Compile the graph\nexport const agenticChatReasoningGraph = workflow.compile();\n", + "content": "/**\n * A simple agentic chat flow using LangGraph with reasoning model support.\n *\n * This agent supports multiple model providers with reasoning/thinking capabilities:\n * - OpenAI (default): Uses o3 model\n * - Anthropic: Uses claude-sonnet-4-20250514 with thinking enabled\n * - Gemini: Uses gemini-2.5-pro with thinking budget\n *\n * The model is selected based on the `model` field in the agent state.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { ChatAnthropic } from \"@langchain/anthropic\";\nimport { ChatGoogleGenerativeAI } from \"@langchain/google-genai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { Annotation, MessagesAnnotation, StateGraph, Command, START, END } from \"@langchain/langgraph\";\nimport { aguiTransformer } from \"@ag-ui/langgraph/transformer\";\n\nconst AgentStateAnnotation = Annotation.Root({\n tools: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n model: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => \"\"\n }),\n ...MessagesAnnotation.spec,\n});\n\ntype AgentState = typeof AgentStateAnnotation.State;\n\nasync function chatNode(state: AgentState, config?: RunnableConfig) {\n /**\n * Standard chat node based on the ReAct design pattern. It handles:\n * - The model to use (and binds in CopilotKit actions and the tools defined above)\n * - The system prompt\n * - Getting a response from the model\n * - Handling tool calls\n */\n\n // 1. Define the model based on state\n let model;\n if (state.model === \"Anthropic\") {\n model = new ChatAnthropic({\n model: \"claude-sonnet-4-20250514\",\n thinking: { type: \"enabled\", budget_tokens: 2000 },\n });\n } else if (state.model === \"Gemini\") {\n model = new ChatGoogleGenerativeAI({\n model: \"gemini-2.5-pro\",\n thinkingBudget: 1024,\n });\n } else {\n // Default: OpenAI\n model = new ChatOpenAI({\n model: \"o4-mini\",\n useResponsesApi: true,\n reasoning: { effort: \"high\", summary: \"auto\" },\n });\n }\n\n // Define config for the model\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // 2. Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...(state.tools ?? []),\n ],\n );\n\n // 3. Define the system message\n const systemMessage = new SystemMessage({\n content: \"You are a helpful assistant.\"\n });\n\n // 4. Run the model to generate a response\n const response = await modelWithTools.invoke([\n systemMessage,\n ...state.messages,\n ], config);\n\n // 5. Return the response\n return new Command({\n goto: END,\n update: {\n messages: [response]\n }\n });\n}\n\n// Define a new graph\nconst workflow = new StateGraph(AgentStateAnnotation)\n .addNode(\"chatNode\", chatNode)\n .addEdge(START, \"chatNode\")\n .addEdge(\"chatNode\", END);\n\n// Compile the graph\nexport const agenticChatReasoningGraph = workflow.compile({\n transformers: [aguiTransformer],\n});\n", "language": "ts", "type": "file" } @@ -290,7 +290,7 @@ }, { "name": "agent.ts", - "content": "/**\n * A multimodal agentic chat that can analyze images and other media.\n *\n * This agent demonstrates how to:\n * 1. Receive user messages with images\n * 2. Process multimodal content (text + images)\n * 3. Use vision models to analyze images\n *\n * Example usage:\n *\n * ```typescript\n * import { UserMessage, TextInputContent, ImageInputContent } from \"@ag-ui/core\";\n *\n * // Create a multimodal user message\n * const message: UserMessage = {\n * id: \"user-123\",\n * role: \"user\",\n * content: [\n * { type: \"text\", text: \"What's in this image?\" },\n * {\n * type: \"image\",\n * mimeType: \"image/jpeg\",\n * url: \"https://example.com/photo.jpg\"\n * },\n * ],\n * };\n *\n * // Or with base64 encoded data\n * const messageWithData: UserMessage = {\n * id: \"user-124\",\n * role: \"user\",\n * content: [\n * { type: \"text\", text: \"Describe this picture\" },\n * {\n * type: \"image\",\n * mimeType: \"image/png\",\n * data: \"iVBORw0KGgoAAAANSUhEUgAAAAUA...\", // base64 encoded\n * filename: \"screenshot.png\"\n * },\n * ],\n * };\n * ```\n *\n * The LangGraph integration automatically handles:\n * 1. Converting AG-UI multimodal format to LangChain's format\n * 2. Passing multimodal messages to vision models\n * 3. Converting responses back to AG-UI format\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { Annotation, MessagesAnnotation, StateGraph, Command, START, END } from \"@langchain/langgraph\";\n\nconst AgentStateAnnotation = Annotation.Root({\n tools: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n ...MessagesAnnotation.spec,\n});\n\ntype AgentState = typeof AgentStateAnnotation.State;\n\nasync function visionChatNode(state: AgentState, config?: RunnableConfig) {\n /**\n * Chat node that uses a vision-capable model to handle multimodal input.\n *\n * Images and other media sent by the user are automatically converted\n * to LangChain's multimodal format by the AG-UI integration layer.\n */\n\n // Use a vision-capable model\n const model = new ChatOpenAI({ model: \"gpt-5.4\" });\n\n // Define config for the model\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Bind tools if needed\n const modelWithTools = model.bindTools(\n state.tools ?? [],\n {\n parallel_tool_calls: false,\n }\n );\n\n // Define the system message\n const systemMessage = new SystemMessage({\n content: \"You are a helpful assistant that can analyze images, documents, and other media. \" +\n \"When a user shares an image, describe what you see in detail. \" +\n \"When a user shares a document, summarize its contents.\"\n });\n\n // Run the model with multimodal messages\n const response = await modelWithTools.invoke([\n systemMessage,\n ...state.messages,\n ], config);\n\n // Return the response\n return new Command({\n goto: END,\n update: {\n messages: [response]\n }\n });\n}\n\n// Define a new graph\nconst workflow = new StateGraph(AgentStateAnnotation)\n .addNode(\"visionChatNode\", visionChatNode)\n .addEdge(START, \"visionChatNode\")\n .addEdge(\"visionChatNode\", END);\n\n// Compile the graph\nexport const agenticChatMultimodalGraph = workflow.compile();\n", + "content": "/**\n * A multimodal agentic chat that can analyze images and other media.\n *\n * This agent demonstrates how to:\n * 1. Receive user messages with images\n * 2. Process multimodal content (text + images)\n * 3. Use vision models to analyze images\n *\n * Example usage:\n *\n * ```typescript\n * import { UserMessage, TextInputContent, ImageInputContent } from \"@ag-ui/core\";\n *\n * // Create a multimodal user message\n * const message: UserMessage = {\n * id: \"user-123\",\n * role: \"user\",\n * content: [\n * { type: \"text\", text: \"What's in this image?\" },\n * {\n * type: \"image\",\n * mimeType: \"image/jpeg\",\n * url: \"https://example.com/photo.jpg\"\n * },\n * ],\n * };\n *\n * // Or with base64 encoded data\n * const messageWithData: UserMessage = {\n * id: \"user-124\",\n * role: \"user\",\n * content: [\n * { type: \"text\", text: \"Describe this picture\" },\n * {\n * type: \"image\",\n * mimeType: \"image/png\",\n * data: \"iVBORw0KGgoAAAANSUhEUgAAAAUA...\", // base64 encoded\n * filename: \"screenshot.png\"\n * },\n * ],\n * };\n * ```\n *\n * The LangGraph integration automatically handles:\n * 1. Converting AG-UI multimodal format to LangChain's format\n * 2. Passing multimodal messages to vision models\n * 3. Converting responses back to AG-UI format\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { Annotation, MessagesAnnotation, StateGraph, Command, START, END } from \"@langchain/langgraph\";\nimport { aguiTransformer } from \"@ag-ui/langgraph/transformer\";\n\nconst AgentStateAnnotation = Annotation.Root({\n tools: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n ...MessagesAnnotation.spec,\n});\n\ntype AgentState = typeof AgentStateAnnotation.State;\n\nasync function visionChatNode(state: AgentState, config?: RunnableConfig) {\n /**\n * Chat node that uses a vision-capable model to handle multimodal input.\n *\n * Images and other media sent by the user are automatically converted\n * to LangChain's multimodal format by the AG-UI integration layer.\n */\n\n // Use a vision-capable model\n const model = new ChatOpenAI({ model: \"gpt-5.4\" });\n\n // Define config for the model\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Bind tools if needed\n const modelWithTools = model.bindTools(\n state.tools ?? [],\n {\n parallel_tool_calls: false,\n }\n );\n\n // Define the system message\n const systemMessage = new SystemMessage({\n content: \"You are a helpful assistant that can analyze images, documents, and other media. \" +\n \"When a user shares an image, describe what you see in detail. \" +\n \"When a user shares a document, summarize its contents.\"\n });\n\n // Run the model with multimodal messages\n const response = await modelWithTools.invoke([\n systemMessage,\n ...state.messages,\n ], config);\n\n // Return the response\n return new Command({\n goto: END,\n update: {\n messages: [response]\n }\n });\n}\n\n// Define a new graph\nconst workflow = new StateGraph(AgentStateAnnotation)\n .addNode(\"visionChatNode\", visionChatNode)\n .addEdge(START, \"visionChatNode\")\n .addEdge(\"visionChatNode\", END);\n\n// Compile the graph\nexport const agenticChatMultimodalGraph = workflow.compile({\n transformers: [aguiTransformer],\n});\n", "language": "ts", "type": "file" } @@ -336,7 +336,7 @@ }, { "name": "agent.ts", - "content": "/**\n * A simple agentic chat flow using LangGraph instead of CrewAI.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { Annotation, MessagesAnnotation, StateGraph, Command, START, END } from \"@langchain/langgraph\";\n\nconst AgentStateAnnotation = Annotation.Root({\n tools: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n ...MessagesAnnotation.spec,\n});\n\ntype AgentState = typeof AgentStateAnnotation.State;\n\nasync function chatNode(state: AgentState, config?: RunnableConfig) {\n /**\n * Standard chat node based on the ReAct design pattern. It handles:\n * - The model to use (and binds in CopilotKit actions and the tools defined above)\n * - The system prompt\n * - Getting a response from the model\n * - Handling tool calls\n *\n * For more about the ReAct design pattern, see: \n * https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n */\n \n // 1. Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n \n // Define config for the model\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // 2. Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n // your_tool_here\n ],\n {\n // 2.1 Disable parallel tool calls to avoid race conditions,\n // enable this for faster performance if you want to manage\n // the complexity of running tool calls in parallel.\n parallel_tool_calls: false,\n }\n );\n\n // 3. Define the system message by which the chat model will be run\n const systemMessage = new SystemMessage({\n content: \"You are a helpful assistant.\"\n });\n\n // 4. Run the model to generate a response\n const response = await modelWithTools.invoke([\n systemMessage,\n ...state.messages,\n ], config);\n\n // 6. We've handled all tool calls, so we can end the graph.\n return new Command({\n goto: END,\n update: {\n messages: [response]\n }\n })\n}\n\n// Define a new graph \nconst workflow = new StateGraph(AgentStateAnnotation)\n .addNode(\"chat_node\", chatNode)\n .addEdge(START, \"chat_node\");\n\n// Compile the graph\nexport const agenticChatGraph = workflow.compile();", + "content": "/**\n * A simple agentic chat flow using LangGraph instead of CrewAI.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { Annotation, MessagesAnnotation, StateGraph, Command, START, END } from \"@langchain/langgraph\";\nimport { aguiTransformer } from \"@ag-ui/langgraph/transformer\";\n\nconst AgentStateAnnotation = Annotation.Root({\n tools: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n ...MessagesAnnotation.spec,\n});\n\ntype AgentState = typeof AgentStateAnnotation.State;\n\nasync function chatNode(state: AgentState, config?: RunnableConfig) {\n /**\n * Standard chat node based on the ReAct design pattern. It handles:\n * - The model to use (and binds in CopilotKit actions and the tools defined above)\n * - The system prompt\n * - Getting a response from the model\n * - Handling tool calls\n *\n * For more about the ReAct design pattern, see: \n * https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n */\n \n // 1. Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n \n // Define config for the model\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // 2. Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n // your_tool_here\n ],\n {\n // 2.1 Disable parallel tool calls to avoid race conditions,\n // enable this for faster performance if you want to manage\n // the complexity of running tool calls in parallel.\n parallel_tool_calls: false,\n }\n );\n\n // 3. Define the system message by which the chat model will be run\n const systemMessage = new SystemMessage({\n content: \"You are a helpful assistant.\"\n });\n\n // 4. Run the model to generate a response\n const response = await modelWithTools.invoke([\n systemMessage,\n ...state.messages,\n ], config);\n\n // 6. We've handled all tool calls, so we can end the graph.\n return new Command({\n goto: END,\n update: {\n messages: [response]\n }\n })\n}\n\n// Define a new graph \nconst workflow = new StateGraph(AgentStateAnnotation)\n .addNode(\"chat_node\", chatNode)\n .addEdge(START, \"chat_node\");\n\n// Compile the graph\nexport const agenticChatGraph = workflow.compile({\n transformers: [aguiTransformer],\n});", "language": "ts", "type": "file" } @@ -362,7 +362,7 @@ }, { "name": "agent.ts", - "content": "/**\n * A LangGraph implementation of the human-in-the-loop agent.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { Command, interrupt, Annotation, MessagesAnnotation, StateGraph, END, START } from \"@langchain/langgraph\";\n\nconst DEFINE_TASK_TOOL = {\n type: \"function\",\n function: {\n name: \"plan_execution_steps\",\n description: \"Make up 10 steps (only a couple of words per step) that are required for a task. The step should be in imperative form (i.e. Dig hole, Open door, ...)\",\n parameters: {\n type: \"object\",\n properties: {\n steps: {\n type: \"array\",\n items: {\n type: \"object\",\n properties: {\n description: {\n type: \"string\",\n description: \"The text of the step in imperative form\"\n },\n status: {\n type: \"string\",\n enum: [\"enabled\"],\n description: \"The status of the step, always 'enabled'\"\n }\n },\n required: [\"description\", \"status\"]\n },\n description: \"An array of 10 step objects, each containing text and status\"\n }\n },\n required: [\"steps\"]\n }\n }\n};\n\nexport const AgentStateAnnotation = Annotation.Root({\n steps: Annotation>({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n tools: Annotation(),\n user_response: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => undefined\n }),\n ...MessagesAnnotation.spec,\n});\nexport type AgentState = typeof AgentStateAnnotation.State;\n\nasync function startFlow(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * This is the entry point for the flow.\n */\n\n // Initialize steps list if not exists\n if (!state.steps) {\n state.steps = [];\n }\n\n return new Command({\n goto: \"chat_node\",\n update: {\n messages: state.messages,\n steps: state.steps,\n }\n });\n}\n\nasync function chatNode(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * Standard chat node where the agent processes messages and generates responses.\n * If task steps are defined, the user can enable/disable them using interrupts.\n */\n const systemPrompt = `\n You are a helpful assistant that can perform any task.\n You MUST call the \\`plan_execution_steps\\` function when the user asks you to perform a task.\n Always make sure you will provide tasks based on the user query\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o-mini\" });\n \n // Define config for the model\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Use \"predict_state\" metadata to set up streaming for the write_document tool\n if (!config.metadata) config.metadata = {};\n config.metadata.predict_state = [{\n state_key: \"steps\",\n tool: \"plan_execution_steps\",\n tool_argument: \"steps\"\n }];\n\n // Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n DEFINE_TASK_TOOL\n ],\n {\n // Disable parallel tool calls to avoid race conditions\n parallel_tool_calls: false,\n }\n );\n\n // Run the model and generate a response\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n // Update messages with the response\n const messages = [...state.messages, response];\n \n // Handle tool calls\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n\n if (toolCall.name === \"plan_execution_steps\") {\n // Get the steps from the tool call\n const stepsRaw = toolCall.args.steps || [];\n \n // Set initial status to \"enabled\" for all steps\n const stepsData: Array<{ description: string; status: string }> = [];\n \n // Handle different potential formats of steps data\n if (Array.isArray(stepsRaw)) {\n for (const step of stepsRaw) {\n if (typeof step === 'object' && step.description) {\n stepsData.push({\n description: step.description,\n status: \"enabled\"\n });\n } else if (typeof step === 'string') {\n stepsData.push({\n description: step,\n status: \"enabled\"\n });\n }\n }\n }\n \n // If no steps were processed correctly, return to END with the updated messages\n if (stepsData.length === 0) {\n return new Command({\n goto: END,\n update: {\n messages: messages,\n steps: state.steps,\n }\n });\n }\n\n // Update steps in state and emit to frontend\n state.steps = stepsData;\n \n // Add a tool response to satisfy OpenAI's requirements\n const toolResponse = {\n role: \"tool\" as const,\n content: \"Task steps generated.\",\n tool_call_id: toolCall.id\n };\n \n const updatedMessages = [...messages, toolResponse];\n\n // Move to the process_steps_node which will handle the interrupt and final response\n return new Command({\n goto: \"process_steps_node\",\n update: {\n messages: updatedMessages,\n steps: state.steps,\n }\n });\n }\n }\n \n // If no tool calls or not plan_execution_steps, return to END with the updated messages\n return new Command({\n goto: END,\n update: {\n messages: messages,\n steps: state.steps,\n }\n });\n}\n\nasync function processStepsNode(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * This node handles the user interrupt for step customization and generates the final response.\n */\n\n let userResponse: string;\n\n // Check if we already have a user_response in the state\n // This happens when the node restarts after an interrupt\n if (state.user_response) {\n userResponse = state.user_response;\n } else {\n // Use LangGraph interrupt to get user input on steps\n // This will pause execution and wait for user input in the frontend\n userResponse = interrupt({ steps: state.steps });\n // Store the user response in state for when the node restarts\n state.user_response = userResponse;\n }\n \n // Generate the creative completion response\n const finalPrompt = `\n Provide a textual description of how you are performing the task.\n If the user has disabled a step, you are not allowed to perform that step.\n However, you should find a creative workaround to perform the task, and if an essential step is disabled, you can even use\n some humor in the description of how you are performing the task.\n Don't just repeat a list of steps, come up with a creative but short description (3 sentences max) of how you are performing the task.\n `;\n \n const finalResponse = await new ChatOpenAI({ model: \"gpt-4o\" }).invoke([\n new SystemMessage({ content: finalPrompt }),\n { role: \"user\", content: userResponse }\n ], config);\n\n // Add the final response to messages\n const messages = [...state.messages, finalResponse];\n \n // Clear the user_response from state to prepare for future interactions\n const newState = { ...state };\n delete newState.user_response;\n \n // Return to END with the updated messages\n return new Command({\n goto: END,\n update: {\n messages: messages,\n steps: state.steps,\n }\n });\n}\n\n// Define the graph\nconst workflow = new StateGraph(AgentStateAnnotation);\n\n// Add nodes\nworkflow.addNode(\"start_flow\", startFlow);\nworkflow.addNode(\"chat_node\", chatNode);\nworkflow.addNode(\"process_steps_node\", processStepsNode);\n\n// Add edges\nworkflow.setEntryPoint(\"start_flow\");\nworkflow.addEdge(START, \"start_flow\");\nworkflow.addEdge(\"start_flow\", \"chat_node\");\nworkflow.addEdge(\"process_steps_node\", END);\n\n// Add conditional edges from chat_node\nworkflow.addConditionalEdges(\n \"chat_node\",\n (state: AgentState) => {\n // This would be determined by the Command returned from chat_node\n // For now, we'll assume the logic is handled in the Command's goto property\n return \"continue\";\n },\n {\n \"process_steps_node\": \"process_steps_node\",\n \"continue\": END,\n }\n);\n\n// Compile the graph\nexport const humanInTheLoopGraph = workflow.compile();", + "content": "/**\n * A LangGraph implementation of the human-in-the-loop agent.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { Command, interrupt, Annotation, MessagesAnnotation, StateGraph, END, START } from \"@langchain/langgraph\";\nimport { aguiTransformer } from \"@ag-ui/langgraph/transformer\";\n\nconst DEFINE_TASK_TOOL = {\n type: \"function\",\n function: {\n name: \"plan_execution_steps\",\n description: \"Make up 10 steps (only a couple of words per step) that are required for a task. The step should be in imperative form (i.e. Dig hole, Open door, ...)\",\n parameters: {\n type: \"object\",\n properties: {\n steps: {\n type: \"array\",\n items: {\n type: \"object\",\n properties: {\n description: {\n type: \"string\",\n description: \"The text of the step in imperative form\"\n },\n status: {\n type: \"string\",\n enum: [\"enabled\"],\n description: \"The status of the step, always 'enabled'\"\n }\n },\n required: [\"description\", \"status\"]\n },\n description: \"An array of 10 step objects, each containing text and status\"\n }\n },\n required: [\"steps\"]\n }\n }\n};\n\nexport const AgentStateAnnotation = Annotation.Root({\n steps: Annotation>({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n tools: Annotation(),\n user_response: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => undefined\n }),\n ...MessagesAnnotation.spec,\n});\nexport type AgentState = typeof AgentStateAnnotation.State;\n\nasync function startFlow(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * This is the entry point for the flow.\n */\n\n // Initialize steps list if not exists\n if (!state.steps) {\n state.steps = [];\n }\n\n return new Command({\n goto: \"chat_node\",\n update: {\n messages: state.messages,\n steps: state.steps,\n }\n });\n}\n\nasync function chatNode(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * Standard chat node where the agent processes messages and generates responses.\n * If task steps are defined, the user can enable/disable them using interrupts.\n */\n const systemPrompt = `\n You are a helpful assistant that can perform any task.\n You MUST call the \\`plan_execution_steps\\` function when the user asks you to perform a task.\n Always make sure you will provide tasks based on the user query\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o-mini\" });\n \n // Define config for the model\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Use \"predict_state\" metadata to set up streaming for the write_document tool\n if (!config.metadata) config.metadata = {};\n config.metadata.predict_state = [{\n state_key: \"steps\",\n tool: \"plan_execution_steps\",\n tool_argument: \"steps\"\n }];\n\n // Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n DEFINE_TASK_TOOL\n ],\n {\n // Disable parallel tool calls to avoid race conditions\n parallel_tool_calls: false,\n }\n );\n\n // Run the model and generate a response\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n // Update messages with the response\n const messages = [...state.messages, response];\n \n // Handle tool calls\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n\n if (toolCall.name === \"plan_execution_steps\") {\n // Get the steps from the tool call\n const stepsRaw = toolCall.args.steps || [];\n \n // Set initial status to \"enabled\" for all steps\n const stepsData: Array<{ description: string; status: string }> = [];\n \n // Handle different potential formats of steps data\n if (Array.isArray(stepsRaw)) {\n for (const step of stepsRaw) {\n if (typeof step === 'object' && step.description) {\n stepsData.push({\n description: step.description,\n status: \"enabled\"\n });\n } else if (typeof step === 'string') {\n stepsData.push({\n description: step,\n status: \"enabled\"\n });\n }\n }\n }\n \n // If no steps were processed correctly, return to END with the updated messages\n if (stepsData.length === 0) {\n return new Command({\n goto: END,\n update: {\n messages: messages,\n steps: state.steps,\n }\n });\n }\n\n // Update steps in state and emit to frontend\n state.steps = stepsData;\n \n // Add a tool response to satisfy OpenAI's requirements\n const toolResponse = {\n role: \"tool\" as const,\n content: \"Task steps generated.\",\n tool_call_id: toolCall.id\n };\n \n const updatedMessages = [...messages, toolResponse];\n\n // Move to the process_steps_node which will handle the interrupt and final response\n return new Command({\n goto: \"process_steps_node\",\n update: {\n messages: updatedMessages,\n steps: state.steps,\n }\n });\n }\n }\n \n // If no tool calls or not plan_execution_steps, return to END with the updated messages\n return new Command({\n goto: END,\n update: {\n messages: messages,\n steps: state.steps,\n }\n });\n}\n\nasync function processStepsNode(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * This node handles the user interrupt for step customization and generates the final response.\n */\n\n let userResponse: string;\n\n // Check if we already have a user_response in the state\n // This happens when the node restarts after an interrupt\n if (state.user_response) {\n userResponse = state.user_response;\n } else {\n // Use LangGraph interrupt to get user input on steps\n // This will pause execution and wait for user input in the frontend\n userResponse = interrupt({ steps: state.steps });\n // Store the user response in state for when the node restarts\n state.user_response = userResponse;\n }\n \n // Generate the creative completion response\n const finalPrompt = `\n Provide a textual description of how you are performing the task.\n If the user has disabled a step, you are not allowed to perform that step.\n However, you should find a creative workaround to perform the task, and if an essential step is disabled, you can even use\n some humor in the description of how you are performing the task.\n Don't just repeat a list of steps, come up with a creative but short description (3 sentences max) of how you are performing the task.\n `;\n \n const finalResponse = await new ChatOpenAI({ model: \"gpt-4o\" }).invoke([\n new SystemMessage({ content: finalPrompt }),\n { role: \"user\", content: userResponse }\n ], config);\n\n // Add the final response to messages\n const messages = [...state.messages, finalResponse];\n \n // Clear the user_response from state to prepare for future interactions\n const newState = { ...state };\n delete newState.user_response;\n \n // Return to END with the updated messages\n return new Command({\n goto: END,\n update: {\n messages: messages,\n steps: state.steps,\n }\n });\n}\n\n// Define the graph\nconst workflow = new StateGraph(AgentStateAnnotation);\n\n// Add nodes\nworkflow.addNode(\"start_flow\", startFlow);\nworkflow.addNode(\"chat_node\", chatNode);\nworkflow.addNode(\"process_steps_node\", processStepsNode);\n\n// Add edges\nworkflow.setEntryPoint(\"start_flow\");\nworkflow.addEdge(START, \"start_flow\");\nworkflow.addEdge(\"start_flow\", \"chat_node\");\nworkflow.addEdge(\"process_steps_node\", END);\n\n// Add conditional edges from chat_node\nworkflow.addConditionalEdges(\n \"chat_node\",\n (state: AgentState) => {\n // This would be determined by the Command returned from chat_node\n // For now, we'll assume the logic is handled in the Command's goto property\n return \"continue\";\n },\n {\n \"process_steps_node\": \"process_steps_node\",\n \"continue\": END,\n }\n);\n\n// Compile the graph\nexport const humanInTheLoopGraph = workflow.compile({\n transformers: [aguiTransformer],\n});", "language": "ts", "type": "file" } @@ -394,7 +394,7 @@ }, { "name": "agent.ts", - "content": "/**\n * An example demonstrating agentic generative UI using LangGraph.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { dispatchCustomEvent } from \"@langchain/core/callbacks/dispatch\";\nimport { Annotation, Command, MessagesAnnotation, StateGraph, END } from \"@langchain/langgraph\";\n\n// This tool simulates performing a task on the server.\n// The tool call will be streamed to the frontend as it is being generated.\nconst PERFORM_TASK_TOOL = {\n type: \"function\",\n function: {\n name: \"generate_task_steps_generative_ui\",\n description: \"Make up 10 steps (only a couple of words per step) that are required for a task. The step should be in gerund form (i.e. Digging hole, opening door, ...)\",\n parameters: {\n type: \"object\",\n properties: {\n steps: {\n type: \"array\",\n items: {\n type: \"object\",\n properties: {\n description: {\n type: \"string\",\n description: \"The text of the step in gerund form\"\n },\n status: {\n type: \"string\",\n enum: [\"pending\"],\n description: \"The status of the step, always 'pending'\"\n }\n },\n required: [\"description\", \"status\"]\n },\n description: \"An array of 10 step objects, each containing text and status\"\n }\n },\n required: [\"steps\"]\n }\n }\n};\n\nconst AgentStateAnnotation = Annotation.Root({\n steps: Annotation>({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n tools: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n ...MessagesAnnotation.spec,\n});\n\ntype AgentState = typeof AgentStateAnnotation.State;\n\nasync function startFlow(state: AgentState, config?: RunnableConfig) {\n /**\n * This is the entry point for the flow.\n * Always clear steps so old steps from previous runs don't persist.\n */\n return {\n steps: []\n };\n}\n\nasync function chatNode(state: AgentState, config?: RunnableConfig) {\n /**\n * Standard chat node.\n */\n const systemPrompt = `\n You are a helpful assistant assisting with any task. \n When asked to do something, you MUST call the function \\`generate_task_steps_generative_ui\\`\n that was provided to you.\n If you called the function, you MUST NOT repeat the steps in your next response to the user.\n Just give a very brief summary (one sentence) of what you did with some emojis. \n Always say you actually did the steps, not merely generated them.\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n \n // Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Use \"predict_state\" metadata to set up streaming for the write_document tool\n if (!config.metadata) config.metadata = {};\n config.metadata.predict_state = [{\n state_key: \"steps\",\n tool: \"generate_task_steps_generative_ui\",\n tool_argument: \"steps\",\n }];\n\n // Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n PERFORM_TASK_TOOL\n ],\n {\n // Disable parallel tool calls to avoid race conditions\n parallel_tool_calls: false,\n }\n );\n\n // Run the model to generate a response\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n const messages = [...state.messages, response];\n\n // Extract any tool calls from the response\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n \n if (toolCall.name === \"generate_task_steps_generative_ui\") {\n const steps = toolCall.args.steps.map((step: any) => ({\n description: step.description,\n status: step.status\n }));\n \n // Add the tool response to messages\n const toolResponse = {\n role: \"tool\" as const,\n content: \"Steps executed.\",\n tool_call_id: toolCall.id\n };\n\n const updatedMessages = [...messages, toolResponse];\n\n // Simulate executing the steps\n for (let i = 0; i < steps.length; i++) {\n // simulate executing the step\n await new Promise(resolve => setTimeout(resolve, 1000));\n steps[i].status = \"completed\";\n // Update the state with the completed step\n state.steps = steps;\n // Emit custom events to update the frontend\n await dispatchCustomEvent(\"manually_emit_state\", state, config);\n }\n \n return new Command({\n goto: \"chat_node\",\n update: {\n messages: updatedMessages,\n steps: state.steps\n }\n });\n }\n }\n\n return new Command({\n goto: END,\n update: {\n messages: messages,\n steps: state.steps\n }\n });\n}\n\n// Define the graph\nconst workflow = new StateGraph(AgentStateAnnotation)\n .addNode(\"start_flow\", startFlow)\n .addNode(\"chat_node\", chatNode)\n .addEdge(\"__start__\", \"start_flow\")\n .addEdge(\"start_flow\", \"chat_node\")\n .addEdge(\"chat_node\", \"__end__\");\n\n// Compile the graph\nexport const agenticGenerativeUiGraph = workflow.compile();", + "content": "/**\n * An example demonstrating agentic generative UI using LangGraph.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { dispatchCustomEvent } from \"@langchain/core/callbacks/dispatch\";\nimport { Annotation, Command, MessagesAnnotation, StateGraph, END } from \"@langchain/langgraph\";\nimport { aguiTransformer } from \"@ag-ui/langgraph/transformer\";\n\n// This tool simulates performing a task on the server.\n// The tool call will be streamed to the frontend as it is being generated.\nconst PERFORM_TASK_TOOL = {\n type: \"function\",\n function: {\n name: \"generate_task_steps_generative_ui\",\n description: \"Make up 10 steps (only a couple of words per step) that are required for a task. The step should be in gerund form (i.e. Digging hole, opening door, ...)\",\n parameters: {\n type: \"object\",\n properties: {\n steps: {\n type: \"array\",\n items: {\n type: \"object\",\n properties: {\n description: {\n type: \"string\",\n description: \"The text of the step in gerund form\"\n },\n status: {\n type: \"string\",\n enum: [\"pending\"],\n description: \"The status of the step, always 'pending'\"\n }\n },\n required: [\"description\", \"status\"]\n },\n description: \"An array of 10 step objects, each containing text and status\"\n }\n },\n required: [\"steps\"]\n }\n }\n};\n\nconst AgentStateAnnotation = Annotation.Root({\n steps: Annotation>({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n tools: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n ...MessagesAnnotation.spec,\n});\n\ntype AgentState = typeof AgentStateAnnotation.State;\n\nasync function startFlow(state: AgentState, config?: RunnableConfig) {\n /**\n * This is the entry point for the flow.\n * Always clear steps so old steps from previous runs don't persist.\n */\n return {\n steps: []\n };\n}\n\nasync function chatNode(state: AgentState, config?: RunnableConfig) {\n /**\n * Standard chat node.\n */\n const systemPrompt = `\n You are a helpful assistant assisting with any task. \n When asked to do something, you MUST call the function \\`generate_task_steps_generative_ui\\`\n that was provided to you.\n If you called the function, you MUST NOT repeat the steps in your next response to the user.\n Just give a very brief summary (one sentence) of what you did with some emojis. \n Always say you actually did the steps, not merely generated them.\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n \n // Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Use \"predict_state\" metadata to set up streaming for the write_document tool\n if (!config.metadata) config.metadata = {};\n config.metadata.predict_state = [{\n state_key: \"steps\",\n tool: \"generate_task_steps_generative_ui\",\n tool_argument: \"steps\",\n }];\n\n // Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n PERFORM_TASK_TOOL\n ],\n {\n // Disable parallel tool calls to avoid race conditions\n parallel_tool_calls: false,\n }\n );\n\n // Run the model to generate a response\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n const messages = [...state.messages, response];\n\n // Extract any tool calls from the response\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n \n if (toolCall.name === \"generate_task_steps_generative_ui\") {\n const steps = toolCall.args.steps.map((step: any) => ({\n description: step.description,\n status: step.status\n }));\n \n // Add the tool response to messages\n const toolResponse = {\n role: \"tool\" as const,\n content: \"Steps executed.\",\n tool_call_id: toolCall.id\n };\n\n const updatedMessages = [...messages, toolResponse];\n\n // Simulate executing the steps\n for (let i = 0; i < steps.length; i++) {\n // simulate executing the step\n await new Promise(resolve => setTimeout(resolve, 1000));\n steps[i].status = \"completed\";\n // Update the state with the completed step\n state.steps = steps;\n // Emit custom events to update the frontend\n await dispatchCustomEvent(\"manually_emit_state\", state, config);\n }\n \n return new Command({\n goto: \"chat_node\",\n update: {\n messages: updatedMessages,\n steps: state.steps\n }\n });\n }\n }\n\n return new Command({\n goto: END,\n update: {\n messages: messages,\n steps: state.steps\n }\n });\n}\n\n// Define the graph\nconst workflow = new StateGraph(AgentStateAnnotation)\n .addNode(\"start_flow\", startFlow)\n .addNode(\"chat_node\", chatNode)\n .addEdge(\"__start__\", \"start_flow\")\n .addEdge(\"start_flow\", \"chat_node\")\n .addEdge(\"chat_node\", \"__end__\");\n\n// Compile the graph\nexport const agenticGenerativeUiGraph = workflow.compile({\n transformers: [aguiTransformer],\n});", "language": "ts", "type": "file" } @@ -426,7 +426,7 @@ }, { "name": "agent.ts", - "content": "/**\n * A demo of predictive state updates using LangGraph.\n */\n\nimport { v4 as uuidv4 } from \"uuid\";\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { Command, Annotation, MessagesAnnotation, StateGraph, END, START } from \"@langchain/langgraph\";\n\nconst WRITE_DOCUMENT_TOOL = {\n type: \"function\",\n function: {\n name: \"write_document_local\",\n description: [\n \"Write a document. Use markdown formatting to format the document.\",\n \"It's good to format the document extensively so it's easy to read.\",\n \"You can use all kinds of markdown.\",\n \"However, do not use italic or strike-through formatting, it's reserved for another purpose.\",\n \"You MUST write the full document, even when changing only a few words.\",\n \"When making edits to the document, try to make them minimal - do not change every word.\",\n \"Keep stories SHORT!\"\n ].join(\" \"),\n parameters: {\n type: \"object\",\n properties: {\n document: {\n type: \"string\",\n description: \"The document to write\"\n },\n },\n }\n }\n};\n\nexport const AgentStateAnnotation = Annotation.Root({\n document: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => undefined\n }),\n tools: Annotation(),\n ...MessagesAnnotation.spec,\n});\nexport type AgentState = typeof AgentStateAnnotation.State;\n\nasync function chatNode(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * Standard chat node.\n */\n\n const systemPrompt = `\n You are a helpful assistant for writing documents.\n To write the document, you MUST use the write_document_local tool.\n You MUST write the full document, even when changing only a few words.\n When you wrote the document, DO NOT repeat it as a message.\n Just briefly summarize the changes you made. 2 sentences max.\n This is the current state of the document: ----\\n ${state.document || ''}\\n-----\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n\n // Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Use \"predict_state\" metadata to set up streaming for the write_document_local tool\n if (!config.metadata) config.metadata = {};\n config.metadata.predict_state = [{\n state_key: \"document\",\n tool: \"write_document_local\",\n tool_argument: \"document\"\n }];\n\n // Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n WRITE_DOCUMENT_TOOL\n ],\n {\n // Disable parallel tool calls to avoid race conditions\n parallel_tool_calls: false,\n }\n );\n\n // Run the model to generate a response\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n // Update messages with the response\n const messages = [...state.messages, response];\n\n // Extract any tool calls from the response\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n\n if (toolCall.name === \"write_document_local\") {\n // Add the tool response to messages\n const toolResponse = {\n role: \"tool\" as const,\n content: \"Document written.\",\n tool_call_id: toolCall.id\n };\n\n // Add confirmation tool call\n const confirmToolCall = {\n role: \"assistant\" as const,\n content: \"\",\n tool_calls: [{\n id: uuidv4(),\n type: \"function\" as const,\n function: {\n name: \"confirm_changes\",\n arguments: \"{}\"\n }\n }]\n };\n\n const updatedMessages = [...messages, toolResponse, confirmToolCall];\n\n // Return Command to route to end\n return new Command({\n goto: END,\n update: {\n messages: updatedMessages,\n document: toolCall.args.document\n }\n });\n }\n }\n\n // If no tool was called, go to end\n return new Command({\n goto: END,\n update: {\n messages: messages\n }\n });\n}\n\n// Define the graph\nconst workflow = new StateGraph(AgentStateAnnotation);\n\n// Add nodes\nworkflow.addNode(\"chat_node\", chatNode);\n\n// Add edges\nworkflow.addEdge(START, \"chat_node\");\nworkflow.addEdge(\"chat_node\", END);\n\n// Compile the graph\nexport const predictiveStateUpdatesGraph = workflow.compile();", + "content": "/**\n * A demo of predictive state updates using LangGraph.\n */\n\nimport { v4 as uuidv4 } from \"uuid\";\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { Command, Annotation, MessagesAnnotation, StateGraph, END, START } from \"@langchain/langgraph\";\nimport { aguiTransformer } from \"@ag-ui/langgraph/transformer\";\n\nconst WRITE_DOCUMENT_TOOL = {\n type: \"function\",\n function: {\n name: \"write_document_local\",\n description: [\n \"Write a document. Use markdown formatting to format the document.\",\n \"It's good to format the document extensively so it's easy to read.\",\n \"You can use all kinds of markdown.\",\n \"However, do not use italic or strike-through formatting, it's reserved for another purpose.\",\n \"You MUST write the full document, even when changing only a few words.\",\n \"When making edits to the document, try to make them minimal - do not change every word.\",\n \"Keep stories SHORT!\"\n ].join(\" \"),\n parameters: {\n type: \"object\",\n properties: {\n document: {\n type: \"string\",\n description: \"The document to write\"\n },\n },\n }\n }\n};\n\nexport const AgentStateAnnotation = Annotation.Root({\n document: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => undefined\n }),\n tools: Annotation(),\n ...MessagesAnnotation.spec,\n});\nexport type AgentState = typeof AgentStateAnnotation.State;\n\nasync function chatNode(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * Standard chat node.\n */\n\n const systemPrompt = `\n You are a helpful assistant for writing documents.\n To write the document, you MUST use the write_document_local tool.\n You MUST write the full document, even when changing only a few words.\n When you wrote the document, DO NOT repeat it as a message.\n Just briefly summarize the changes you made. 2 sentences max.\n This is the current state of the document: ----\\n ${state.document || ''}\\n-----\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n\n // Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Use \"predict_state\" metadata to set up streaming for the write_document_local tool\n if (!config.metadata) config.metadata = {};\n config.metadata.predict_state = [{\n state_key: \"document\",\n tool: \"write_document_local\",\n tool_argument: \"document\"\n }];\n\n // Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n WRITE_DOCUMENT_TOOL\n ],\n {\n // Disable parallel tool calls to avoid race conditions\n parallel_tool_calls: false,\n }\n );\n\n // Run the model to generate a response\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n // Update messages with the response\n const messages = [...state.messages, response];\n\n // Extract any tool calls from the response\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n\n if (toolCall.name === \"write_document_local\") {\n // Add the tool response to messages\n const toolResponse = {\n role: \"tool\" as const,\n content: \"Document written.\",\n tool_call_id: toolCall.id\n };\n\n // Add confirmation tool call\n const confirmToolCall = {\n role: \"assistant\" as const,\n content: \"\",\n tool_calls: [{\n id: uuidv4(),\n type: \"function\" as const,\n function: {\n name: \"confirm_changes\",\n arguments: \"{}\"\n }\n }]\n };\n\n const updatedMessages = [...messages, toolResponse, confirmToolCall];\n\n // Return Command to route to end\n return new Command({\n goto: END,\n update: {\n messages: updatedMessages,\n document: toolCall.args.document\n }\n });\n }\n }\n\n // If no tool was called, go to end\n return new Command({\n goto: END,\n update: {\n messages: messages\n }\n });\n}\n\n// Define the graph\nconst workflow = new StateGraph(AgentStateAnnotation);\n\n// Add nodes\nworkflow.addNode(\"chat_node\", chatNode);\n\n// Add edges\nworkflow.addEdge(START, \"chat_node\");\nworkflow.addEdge(\"chat_node\", END);\n\n// Compile the graph\nexport const predictiveStateUpdatesGraph = workflow.compile({\n transformers: [aguiTransformer],\n});", "language": "ts", "type": "file" } @@ -458,7 +458,7 @@ }, { "name": "agent.ts", - "content": "/**\n * A demo of shared state between the agent and CopilotKit using LangGraph.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { dispatchCustomEvent } from \"@langchain/core/callbacks/dispatch\";\nimport { Command, Annotation, MessagesAnnotation, StateGraph, END, START } from \"@langchain/langgraph\";\n\nenum SkillLevel {\n BEGINNER = \"Beginner\",\n INTERMEDIATE = \"Intermediate\",\n ADVANCED = \"Advanced\"\n}\n\nenum SpecialPreferences {\n HIGH_PROTEIN = \"High Protein\",\n LOW_CARB = \"Low Carb\",\n SPICY = \"Spicy\",\n BUDGET_FRIENDLY = \"Budget-Friendly\",\n ONE_POT_MEAL = \"One-Pot Meal\",\n VEGETARIAN = \"Vegetarian\",\n VEGAN = \"Vegan\"\n}\n\nenum CookingTime {\n FIVE_MIN = \"5 min\",\n FIFTEEN_MIN = \"15 min\",\n THIRTY_MIN = \"30 min\",\n FORTY_FIVE_MIN = \"45 min\",\n SIXTY_PLUS_MIN = \"60+ min\"\n}\n\ninterface Ingredient {\n icon: string;\n name: string;\n amount: string;\n}\n\ninterface Recipe {\n skill_level: SkillLevel;\n special_preferences: SpecialPreferences[];\n cooking_time: CookingTime;\n ingredients: Ingredient[];\n instructions: string[];\n changes?: string;\n}\n\nconst GENERATE_RECIPE_TOOL = {\n type: \"function\",\n function: {\n name: \"generate_recipe\",\n description: \"Using the existing (if any) ingredients and instructions, proceed with the recipe to finish it. Make sure the recipe is complete. ALWAYS provide the entire recipe, not just the changes.\",\n parameters: {\n type: \"object\",\n properties: {\n recipe: {\n type: \"object\",\n properties: {\n skill_level: {\n type: \"string\",\n enum: Object.values(SkillLevel),\n description: \"The skill level required for the recipe\"\n },\n special_preferences: {\n type: \"array\",\n items: {\n type: \"string\",\n enum: Object.values(SpecialPreferences)\n },\n description: \"A list of special preferences for the recipe\"\n },\n cooking_time: {\n type: \"string\",\n enum: Object.values(CookingTime),\n description: \"The cooking time of the recipe\"\n },\n ingredients: {\n type: \"array\",\n items: {\n type: \"object\",\n properties: {\n icon: { type: \"string\", description: \"The icon emoji (not emoji code like '\\\\u1f35e', but the actual emoji like 🥕) of the ingredient\" },\n name: { type: \"string\" },\n amount: { type: \"string\" }\n }\n },\n description: \"Entire list of ingredients for the recipe, including the new ingredients and the ones that are already in the recipe\"\n },\n instructions: {\n type: \"array\",\n items: { type: \"string\" },\n description: \"Entire list of instructions for the recipe, including the new instructions and the ones that are already there\"\n },\n changes: {\n type: \"string\",\n description: \"A description of the changes made to the recipe\"\n }\n },\n }\n },\n required: [\"recipe\"]\n }\n }\n};\n\nexport const AgentStateAnnotation = Annotation.Root({\n recipe: Annotation(),\n tools: Annotation(),\n ...MessagesAnnotation.spec,\n});\nexport type AgentState = typeof AgentStateAnnotation.State;\n\nasync function startFlow(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * This is the entry point for the flow.\n */\n\n // Initialize recipe if not exists\n if (!state.recipe) {\n state.recipe = {\n skill_level: SkillLevel.BEGINNER,\n special_preferences: [],\n cooking_time: CookingTime.FIFTEEN_MIN,\n ingredients: [{ icon: \"🍴\", name: \"Sample Ingredient\", amount: \"1 unit\" }],\n instructions: [\"First step instruction\"]\n };\n // Emit the initial state to ensure it's properly shared with the frontend\n await dispatchCustomEvent(\"manually_emit_intermediate_state\", state, config);\n }\n \n return new Command({\n goto: \"chat_node\",\n update: {\n messages: state.messages,\n recipe: state.recipe\n }\n });\n}\n\nasync function chatNode(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * Standard chat node.\n */\n // Create a safer serialization of the recipe\n let recipeJson = \"No recipe yet\";\n if (state.recipe) {\n try {\n recipeJson = JSON.stringify(state.recipe, null, 2);\n } catch (e) {\n recipeJson = `Error serializing recipe: ${e}`;\n }\n }\n\n const systemPrompt = `You are a helpful assistant for creating recipes. \n This is the current state of the recipe: ${recipeJson}\n You can improve the recipe by calling the generate_recipe tool.\n \n IMPORTANT:\n 1. Create a recipe using the existing ingredients and instructions. Make sure the recipe is complete.\n 2. For ingredients, append new ingredients to the existing ones.\n 3. For instructions, append new steps to the existing ones.\n 4. 'ingredients' is always an array of objects with 'icon', 'name', and 'amount' fields\n 5. 'instructions' is always an array of strings\n\n If you have just created or modified the recipe, just answer in one sentence what you did. dont describe the recipe, just say what you did.\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o-mini\" });\n \n // Define config for the model\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Use \"predict_state\" metadata to set up streaming for the write_document tool\n if (!config.metadata) config.metadata = {};\n config.metadata.predict_state = [{\n state_key: \"recipe\",\n tool: \"generate_recipe\",\n tool_argument: \"recipe\"\n }];\n\n // Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n GENERATE_RECIPE_TOOL\n ],\n {\n // Disable parallel tool calls to avoid race conditions\n parallel_tool_calls: false,\n }\n );\n\n // Run the model and generate a response\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n // Update messages with the response\n const messages = [...state.messages, response];\n \n // Handle tool calls\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n \n if (toolCall.name === \"generate_recipe\") {\n // Update recipe state with tool_call_args\n const recipeData = toolCall.args.recipe;\n let recipe: Recipe;\n // If we have an existing recipe, update it\n if (state.recipe) {\n recipe = { ...state.recipe };\n for (const [key, value] of Object.entries(recipeData)) {\n if (value !== null && value !== undefined) { // Only update fields that were provided\n (recipe as any)[key] = value;\n }\n }\n } else {\n // Create a new recipe\n recipe = {\n skill_level: recipeData.skill_level || SkillLevel.BEGINNER,\n special_preferences: recipeData.special_preferences || [],\n cooking_time: recipeData.cooking_time || CookingTime.FIFTEEN_MIN,\n ingredients: recipeData.ingredients || [],\n instructions: recipeData.instructions || []\n };\n }\n \n // Add tool response to messages\n const toolResponse = {\n role: \"tool\" as const,\n content: \"Recipe generated.\",\n tool_call_id: toolCall.id\n };\n \n const updatedMessages = [...messages, toolResponse];\n \n // Explicitly emit the updated state to ensure it's shared with frontend\n state.recipe = recipe;\n await dispatchCustomEvent(\"manually_emit_intermediate_state\", state, config);\n \n // Return command with updated recipe\n return new Command({\n goto: \"start_flow\",\n update: {\n messages: updatedMessages,\n recipe: recipe\n }\n });\n }\n }\n\n return new Command({\n goto: END,\n update: {\n messages: messages,\n recipe: state.recipe\n }\n });\n}\n\n// Define the graph\nconst workflow = new StateGraph(AgentStateAnnotation);\n\n// Add nodes\nworkflow.addNode(\"start_flow\", startFlow);\nworkflow.addNode(\"chat_node\", chatNode);\n\n// Add edges\nworkflow.setEntryPoint(\"start_flow\");\nworkflow.addEdge(START, \"start_flow\");\nworkflow.addEdge(\"start_flow\", \"chat_node\");\nworkflow.addEdge(\"chat_node\", END);\n\n// Compile the graph\nexport const sharedStateGraph = workflow.compile();", + "content": "/**\n * A demo of shared state between the agent and CopilotKit using LangGraph.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { dispatchCustomEvent } from \"@langchain/core/callbacks/dispatch\";\nimport { Command, Annotation, MessagesAnnotation, StateGraph, END, START } from \"@langchain/langgraph\";\nimport { aguiTransformer } from \"@ag-ui/langgraph/transformer\";\n\nenum SkillLevel {\n BEGINNER = \"Beginner\",\n INTERMEDIATE = \"Intermediate\",\n ADVANCED = \"Advanced\"\n}\n\nenum SpecialPreferences {\n HIGH_PROTEIN = \"High Protein\",\n LOW_CARB = \"Low Carb\",\n SPICY = \"Spicy\",\n BUDGET_FRIENDLY = \"Budget-Friendly\",\n ONE_POT_MEAL = \"One-Pot Meal\",\n VEGETARIAN = \"Vegetarian\",\n VEGAN = \"Vegan\"\n}\n\nenum CookingTime {\n FIVE_MIN = \"5 min\",\n FIFTEEN_MIN = \"15 min\",\n THIRTY_MIN = \"30 min\",\n FORTY_FIVE_MIN = \"45 min\",\n SIXTY_PLUS_MIN = \"60+ min\"\n}\n\ninterface Ingredient {\n icon: string;\n name: string;\n amount: string;\n}\n\ninterface Recipe {\n skill_level: SkillLevel;\n special_preferences: SpecialPreferences[];\n cooking_time: CookingTime;\n ingredients: Ingredient[];\n instructions: string[];\n changes?: string;\n}\n\nconst GENERATE_RECIPE_TOOL = {\n type: \"function\",\n function: {\n name: \"generate_recipe\",\n description: \"Using the existing (if any) ingredients and instructions, proceed with the recipe to finish it. Make sure the recipe is complete. ALWAYS provide the entire recipe, not just the changes.\",\n parameters: {\n type: \"object\",\n properties: {\n recipe: {\n type: \"object\",\n properties: {\n skill_level: {\n type: \"string\",\n enum: Object.values(SkillLevel),\n description: \"The skill level required for the recipe\"\n },\n special_preferences: {\n type: \"array\",\n items: {\n type: \"string\",\n enum: Object.values(SpecialPreferences)\n },\n description: \"A list of special preferences for the recipe\"\n },\n cooking_time: {\n type: \"string\",\n enum: Object.values(CookingTime),\n description: \"The cooking time of the recipe\"\n },\n ingredients: {\n type: \"array\",\n items: {\n type: \"object\",\n properties: {\n icon: { type: \"string\", description: \"The icon emoji (not emoji code like '\\\\u1f35e', but the actual emoji like 🥕) of the ingredient\" },\n name: { type: \"string\" },\n amount: { type: \"string\" }\n }\n },\n description: \"Entire list of ingredients for the recipe, including the new ingredients and the ones that are already in the recipe\"\n },\n instructions: {\n type: \"array\",\n items: { type: \"string\" },\n description: \"Entire list of instructions for the recipe, including the new instructions and the ones that are already there\"\n },\n changes: {\n type: \"string\",\n description: \"A description of the changes made to the recipe\"\n }\n },\n }\n },\n required: [\"recipe\"]\n }\n }\n};\n\nexport const AgentStateAnnotation = Annotation.Root({\n recipe: Annotation(),\n tools: Annotation(),\n ...MessagesAnnotation.spec,\n});\nexport type AgentState = typeof AgentStateAnnotation.State;\n\nasync function startFlow(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * This is the entry point for the flow.\n */\n\n // Initialize recipe if not exists\n if (!state.recipe) {\n state.recipe = {\n skill_level: SkillLevel.BEGINNER,\n special_preferences: [],\n cooking_time: CookingTime.FIFTEEN_MIN,\n ingredients: [{ icon: \"🍴\", name: \"Sample Ingredient\", amount: \"1 unit\" }],\n instructions: [\"First step instruction\"]\n };\n // Emit the initial state to ensure it's properly shared with the frontend\n await dispatchCustomEvent(\"manually_emit_intermediate_state\", state, config);\n }\n \n return new Command({\n goto: \"chat_node\",\n update: {\n messages: state.messages,\n recipe: state.recipe\n }\n });\n}\n\nasync function chatNode(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * Standard chat node.\n */\n // Create a safer serialization of the recipe\n let recipeJson = \"No recipe yet\";\n if (state.recipe) {\n try {\n recipeJson = JSON.stringify(state.recipe, null, 2);\n } catch (e) {\n recipeJson = `Error serializing recipe: ${e}`;\n }\n }\n\n const systemPrompt = `You are a helpful assistant for creating recipes. \n This is the current state of the recipe: ${recipeJson}\n You can improve the recipe by calling the generate_recipe tool.\n \n IMPORTANT:\n 1. Create a recipe using the existing ingredients and instructions. Make sure the recipe is complete.\n 2. For ingredients, append new ingredients to the existing ones.\n 3. For instructions, append new steps to the existing ones.\n 4. 'ingredients' is always an array of objects with 'icon', 'name', and 'amount' fields\n 5. 'instructions' is always an array of strings\n\n If you have just created or modified the recipe, just answer in one sentence what you did. dont describe the recipe, just say what you did.\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o-mini\" });\n \n // Define config for the model\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Use \"predict_state\" metadata to set up streaming for the write_document tool\n if (!config.metadata) config.metadata = {};\n config.metadata.predict_state = [{\n state_key: \"recipe\",\n tool: \"generate_recipe\",\n tool_argument: \"recipe\"\n }];\n\n // Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n GENERATE_RECIPE_TOOL\n ],\n {\n // Disable parallel tool calls to avoid race conditions\n parallel_tool_calls: false,\n }\n );\n\n // Run the model and generate a response\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n // Update messages with the response\n const messages = [...state.messages, response];\n \n // Handle tool calls\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n \n if (toolCall.name === \"generate_recipe\") {\n // Update recipe state with tool_call_args\n const recipeData = toolCall.args.recipe;\n let recipe: Recipe;\n // If we have an existing recipe, update it\n if (state.recipe) {\n recipe = { ...state.recipe };\n for (const [key, value] of Object.entries(recipeData)) {\n if (value !== null && value !== undefined) { // Only update fields that were provided\n (recipe as any)[key] = value;\n }\n }\n } else {\n // Create a new recipe\n recipe = {\n skill_level: recipeData.skill_level || SkillLevel.BEGINNER,\n special_preferences: recipeData.special_preferences || [],\n cooking_time: recipeData.cooking_time || CookingTime.FIFTEEN_MIN,\n ingredients: recipeData.ingredients || [],\n instructions: recipeData.instructions || []\n };\n }\n \n // Add tool response to messages\n const toolResponse = {\n role: \"tool\" as const,\n content: \"Recipe generated.\",\n tool_call_id: toolCall.id\n };\n \n const updatedMessages = [...messages, toolResponse];\n \n // Explicitly emit the updated state to ensure it's shared with frontend\n state.recipe = recipe;\n await dispatchCustomEvent(\"manually_emit_intermediate_state\", state, config);\n \n // Return command with updated recipe\n return new Command({\n goto: \"start_flow\",\n update: {\n messages: updatedMessages,\n recipe: recipe\n }\n });\n }\n }\n\n return new Command({\n goto: END,\n update: {\n messages: messages,\n recipe: state.recipe\n }\n });\n}\n\n// Define the graph\nconst workflow = new StateGraph(AgentStateAnnotation);\n\n// Add nodes\nworkflow.addNode(\"start_flow\", startFlow);\nworkflow.addNode(\"chat_node\", chatNode);\n\n// Add edges\nworkflow.setEntryPoint(\"start_flow\");\nworkflow.addEdge(START, \"start_flow\");\nworkflow.addEdge(\"start_flow\", \"chat_node\");\nworkflow.addEdge(\"chat_node\", END);\n\n// Compile the graph\nexport const sharedStateGraph = workflow.compile({\n transformers: [aguiTransformer],\n});", "language": "ts", "type": "file" } @@ -466,7 +466,7 @@ "langgraph::tool_based_generative_ui": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }) ,\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", + "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n description: \"Generates a haiku with Japanese and English translations, an image name, and a CSS gradient for the background.\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }),\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", "language": "typescript", "type": "file" }, @@ -490,7 +490,7 @@ }, { "name": "agent.ts", - "content": "/**\n * An example demonstrating tool-based generative UI using LangGraph.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { Command, Annotation, MessagesAnnotation, StateGraph, END, START } from \"@langchain/langgraph\";\n\n\nexport const AgentStateAnnotation = Annotation.Root({\n tools: Annotation(),\n ...MessagesAnnotation.spec,\n});\nexport type AgentState = typeof AgentStateAnnotation.State;\n\nasync function chatNode(state: AgentState, config?: RunnableConfig): Promise {\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n\n const modelWithTools = model.bindTools(\n [\n ...state.tools || []\n ],\n { parallel_tool_calls: false }\n );\n\n const systemMessage = new SystemMessage({\n content: 'Help the user with writing Haikus. If the user asks for a haiku, use the generate_haiku tool to display the haiku to the user.'\n });\n\n const response = await modelWithTools.invoke([\n systemMessage,\n ...state.messages,\n ], config);\n\n return new Command({\n goto: END,\n update: {\n messages: [response]\n }\n });\n}\n\nconst workflow = new StateGraph(AgentStateAnnotation);\nworkflow.addNode(\"chat_node\", chatNode);\n\nworkflow.addEdge(START, \"chat_node\");\n\nexport const toolBasedGenerativeUiGraph = workflow.compile();", + "content": "/**\n * An example demonstrating tool-based generative UI using LangGraph.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { Command, Annotation, MessagesAnnotation, StateGraph, END, START } from \"@langchain/langgraph\";\nimport { aguiTransformer } from \"@ag-ui/langgraph/transformer\";\n\n\nexport const AgentStateAnnotation = Annotation.Root({\n tools: Annotation(),\n ...MessagesAnnotation.spec,\n});\nexport type AgentState = typeof AgentStateAnnotation.State;\n\nasync function chatNode(state: AgentState, config?: RunnableConfig): Promise {\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n\n const modelWithTools = model.bindTools(\n [\n ...state.tools || []\n ],\n { parallel_tool_calls: false }\n );\n\n const systemMessage = new SystemMessage({\n content: 'Help the user with writing Haikus. If the user asks for a haiku, use the generate_haiku tool to display the haiku to the user.'\n });\n\n const response = await modelWithTools.invoke([\n systemMessage,\n ...state.messages,\n ], config);\n\n return new Command({\n goto: END,\n update: {\n messages: [response]\n }\n });\n}\n\nconst workflow = new StateGraph(AgentStateAnnotation);\nworkflow.addNode(\"chat_node\", chatNode);\n\nworkflow.addEdge(START, \"chat_node\");\n\nexport const toolBasedGenerativeUiGraph = workflow.compile({\n transformers: [aguiTransformer],\n});", "language": "ts", "type": "file" } @@ -522,7 +522,7 @@ }, { "name": "agent.ts", - "content": "/**\n * A travel agent supervisor demo showcasing multi-agent architecture with subgraphs.\n * The supervisor coordinates specialized agents: flights finder, hotels finder, and experiences finder.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage, AIMessage, ToolMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { \n Annotation, \n MessagesAnnotation, \n StateGraph, \n Command, \n START, \n END, \n interrupt \n} from \"@langchain/langgraph\";\n\n// Travel data interfaces\ninterface Flight {\n airline: string;\n departure: string;\n arrival: string;\n price: string;\n duration: string;\n}\n\ninterface Hotel {\n name: string;\n location: string;\n price_per_night: string;\n rating: string;\n}\n\ninterface Experience {\n name: string;\n type: \"restaurant\" | \"activity\";\n description: string;\n location: string;\n}\n\ninterface Itinerary {\n flight?: Flight;\n hotel?: Hotel;\n}\n\n// Custom reducer to merge itinerary updates\nfunction mergeItinerary(left: Itinerary | null, right?: Itinerary | null): Itinerary {\n if (!left) left = {};\n if (!right) right = {};\n return { ...left, ...right };\n}\n\n// State annotation for travel agent system\nexport const TravelAgentStateAnnotation = Annotation.Root({\n origin: Annotation(),\n destination: Annotation(),\n flights: Annotation(),\n hotels: Annotation(),\n experiences: Annotation(),\n\n // Itinerary with custom merger\n itinerary: Annotation({\n reducer: mergeItinerary,\n default: () => null\n }),\n\n // Tools available to all agents\n tools: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n\n // Supervisor routing\n next_agent: Annotation(),\n ...MessagesAnnotation.spec,\n});\n\nexport type TravelAgentState = typeof TravelAgentStateAnnotation.State;\n\n// Static data for demonstration\nconst STATIC_FLIGHTS: Flight[] = [\n { airline: \"KLM\", departure: \"Amsterdam (AMS)\", arrival: \"San Francisco (SFO)\", price: \"$650\", duration: \"11h 30m\" },\n { airline: \"United\", departure: \"Amsterdam (AMS)\", arrival: \"San Francisco (SFO)\", price: \"$720\", duration: \"12h 15m\" }\n];\n\nconst STATIC_HOTELS: Hotel[] = [\n { name: \"Hotel Zephyr\", location: \"Fisherman's Wharf\", price_per_night: \"$280/night\", rating: \"4.2 stars\" },\n { name: \"The Ritz-Carlton\", location: \"Nob Hill\", price_per_night: \"$550/night\", rating: \"4.8 stars\" },\n { name: \"Hotel Zoe\", location: \"Union Square\", price_per_night: \"$320/night\", rating: \"4.4 stars\" }\n];\n\nconst STATIC_EXPERIENCES: Experience[] = [\n { name: \"Pier 39\", type: \"activity\", description: \"Iconic waterfront destination with shops and sea lions\", location: \"Fisherman's Wharf\" },\n { name: \"Golden Gate Bridge\", type: \"activity\", description: \"World-famous suspension bridge with stunning views\", location: \"Golden Gate\" },\n { name: \"Swan Oyster Depot\", type: \"restaurant\", description: \"Historic seafood counter serving fresh oysters\", location: \"Polk Street\" },\n { name: \"Tartine Bakery\", type: \"restaurant\", description: \"Artisanal bakery famous for bread and pastries\", location: \"Mission District\" }\n];\n\nfunction createInterrupt(message: string, options: any[], recommendation: any, agent: string) {\n return interrupt({\n message,\n options,\n recommendation,\n agent,\n });\n}\n\n// Flights finder subgraph\nasync function flightsFinder(state: TravelAgentState, config?: RunnableConfig): Promise {\n // Simulate flight search with static data\n const flights = STATIC_FLIGHTS;\n\n const selectedFlight = state.itinerary?.flight;\n \n let flightChoice: Flight;\n const message = `Found ${flights.length} flight options from ${state.origin || 'Amsterdam'} to ${state.destination || 'San Francisco'}.\\n` +\n `I recommend choosing the flight by ${flights[0].airline} since it's known to be on time and cheaper.`\n if (!selectedFlight) {\n const interruptResult = createInterrupt(\n message,\n flights,\n flights[0],\n \"flights\"\n );\n \n // Parse the interrupt result if it's a string\n flightChoice = typeof interruptResult === 'string' ? JSON.parse(interruptResult) : interruptResult;\n } else {\n flightChoice = selectedFlight;\n }\n\n return new Command({\n goto: END,\n update: {\n flights: flights,\n itinerary: {\n flight: flightChoice\n },\n // Return all \"messages\" that the agent was sending\n messages: [\n ...state.messages,\n new AIMessage({\n content: message,\n }),\n new AIMessage({\n content: `Flights Agent: Great. I'll book you the ${flightChoice.airline} flight from ${flightChoice.departure} to ${flightChoice.arrival}.`,\n }),\n ]\n }\n });\n}\n\n// Hotels finder subgraph\nasync function hotelsFinder(state: TravelAgentState, config?: RunnableConfig): Promise {\n // Simulate hotel search with static data\n const hotels = STATIC_HOTELS;\n const selectedHotel = state.itinerary?.hotel;\n \n let hotelChoice: Hotel;\n const message = `Found ${hotels.length} accommodation options in ${state.destination || 'San Francisco'}.\\n\n I recommend choosing the ${hotels[2].name} since it strikes the balance between rating, price, and location.`\n if (!selectedHotel) {\n const interruptResult = createInterrupt(\n message,\n hotels,\n hotels[2],\n \"hotels\"\n );\n \n // Parse the interrupt result if it's a string\n hotelChoice = typeof interruptResult === 'string' ? JSON.parse(interruptResult) : interruptResult;\n } else {\n hotelChoice = selectedHotel;\n }\n\n return new Command({\n goto: END,\n update: {\n hotels: hotels,\n itinerary: {\n hotel: hotelChoice\n },\n // Return all \"messages\" that the agent was sending\n messages: [\n ...state.messages,\n new AIMessage({\n content: message,\n }),\n new AIMessage({\n content: `Hotels Agent: Excellent choice! You'll like ${hotelChoice.name}.`\n }),\n ]\n }\n });\n}\n\n// Experiences finder subgraph\nasync function experiencesFinder(state: TravelAgentState, config?: RunnableConfig): Promise {\n // Filter experiences (2 restaurants, 2 activities)\n const restaurants = STATIC_EXPERIENCES.filter(exp => exp.type === \"restaurant\").slice(0, 2);\n const activities = STATIC_EXPERIENCES.filter(exp => exp.type === \"activity\").slice(0, 2);\n const experiences = [...restaurants, ...activities];\n\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n const itinerary = state.itinerary || {};\n\n const systemPrompt = `\n You are the experiences agent. Your job is to find restaurants and activities for the user.\n You already went ahead and found a bunch of experiences. All you have to do now, is to let the user know of your findings.\n \n Current status:\n - Origin: ${state.origin || 'Amsterdam'}\n - Destination: ${state.destination || 'San Francisco'}\n - Flight chosen: ${JSON.stringify(itinerary.flight) || 'None'}\n - Hotel chosen: ${JSON.stringify(itinerary.hotel) || 'None'}\n - Activities found: ${JSON.stringify(activities)}\n - Restaurants found: ${JSON.stringify(restaurants)}\n `;\n\n // Get experiences response\n const response = await model.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n return new Command({\n goto: END,\n update: {\n experiences: experiences,\n messages: [...state.messages, response]\n }\n });\n}\n\n// Supervisor response tool\nconst SUPERVISOR_RESPONSE_TOOL = {\n type: \"function\" as const,\n function: {\n name: \"supervisor_response\",\n description: \"Always use this tool to structure your response to the user.\",\n parameters: {\n type: \"object\",\n properties: {\n answer: {\n type: \"string\",\n description: \"The answer to the user\"\n },\n next_agent: {\n type: \"string\",\n enum: [\"flights_agent\", \"hotels_agent\", \"experiences_agent\", \"complete\"],\n description: \"The agent to go to. Not required if you do not want to route to another agent.\"\n }\n },\n required: [\"answer\"]\n }\n }\n};\n\n// Supervisor agent\nasync function supervisorAgent(state: TravelAgentState, config?: RunnableConfig): Promise {\n const itinerary = state.itinerary || {};\n\n // Check what's already completed\n const hasFlights = itinerary.flight !== undefined;\n const hasHotels = itinerary.hotel !== undefined;\n const hasExperiences = state.experiences !== null;\n\n const systemPrompt = `\n You are a travel planning supervisor. Your job is to coordinate specialized agents to help plan a trip.\n \n Current status:\n - Origin: ${state.origin || 'Amsterdam'}\n - Destination: ${state.destination || 'San Francisco'}\n - Flights found: ${hasFlights}\n - Hotels found: ${hasHotels}\n - Experiences found: ${hasExperiences}\n - Itinerary (Things that the user has already confirmed selection on): ${JSON.stringify(itinerary, null, 2)}\n \n Available agents:\n - flights_agent: Finds flight options\n - hotels_agent: Finds hotel options \n - experiences_agent: Finds restaurant and activity recommendations\n - complete: Mark task as complete when all information is gathered\n \n You must route to the appropriate agent based on what's missing. Once all agents have completed their tasks, route to 'complete'.\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Bind the routing tool\n const modelWithTools = model.bindTools(\n [SUPERVISOR_RESPONSE_TOOL],\n {\n parallel_tool_calls: false,\n }\n );\n\n // Get supervisor decision\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n let messages = [...state.messages, response];\n\n // Handle tool calls for routing\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n const toolCallArgs = toolCall.args;\n const nextAgent = toolCallArgs.next_agent;\n\n const toolResponse = new ToolMessage({\n tool_call_id: toolCall.id!,\n content: `Routing to ${nextAgent} and providing the answer`,\n });\n\n messages = [\n ...messages, \n toolResponse, \n new AIMessage({ content: toolCallArgs.answer })\n ];\n\n if (nextAgent && nextAgent !== \"complete\") {\n return new Command({ goto: nextAgent });\n }\n }\n\n // Fallback if no tool call or complete\n return new Command({\n goto: END,\n update: { messages }\n });\n}\n\n// Create subgraphs\nconst flightsGraph = new StateGraph(TravelAgentStateAnnotation);\nflightsGraph.addNode(\"flights_agent_chat_node\", flightsFinder);\nflightsGraph.setEntryPoint(\"flights_agent_chat_node\");\nflightsGraph.addEdge(START, \"flights_agent_chat_node\");\nflightsGraph.addEdge(\"flights_agent_chat_node\", END);\nconst flightsSubgraph = flightsGraph.compile();\n\nconst hotelsGraph = new StateGraph(TravelAgentStateAnnotation);\nhotelsGraph.addNode(\"hotels_agent_chat_node\", hotelsFinder);\nhotelsGraph.setEntryPoint(\"hotels_agent_chat_node\");\nhotelsGraph.addEdge(START, \"hotels_agent_chat_node\");\nhotelsGraph.addEdge(\"hotels_agent_chat_node\", END);\nconst hotelsSubgraph = hotelsGraph.compile();\n\nconst experiencesGraph = new StateGraph(TravelAgentStateAnnotation);\nexperiencesGraph.addNode(\"experiences_agent_chat_node\", experiencesFinder);\nexperiencesGraph.setEntryPoint(\"experiences_agent_chat_node\");\nexperiencesGraph.addEdge(START, \"experiences_agent_chat_node\");\nexperiencesGraph.addEdge(\"experiences_agent_chat_node\", END);\nconst experiencesSubgraph = experiencesGraph.compile();\n\n// Main supervisor workflow\nconst workflow = new StateGraph(TravelAgentStateAnnotation);\n\n// Add supervisor and subgraphs as nodes\nworkflow.addNode(\"supervisor\", supervisorAgent, { ends: ['flights_agent', 'hotels_agent', 'experiences_agent', END] });\nworkflow.addNode(\"flights_agent\", flightsSubgraph);\nworkflow.addNode(\"hotels_agent\", hotelsSubgraph);\nworkflow.addNode(\"experiences_agent\", experiencesSubgraph);\n\n// Set entry point\nworkflow.setEntryPoint(\"supervisor\");\nworkflow.addEdge(START, \"supervisor\");\n\n// Add edges back to supervisor after each subgraph\nworkflow.addEdge(\"flights_agent\", \"supervisor\");\nworkflow.addEdge(\"hotels_agent\", \"supervisor\");\nworkflow.addEdge(\"experiences_agent\", \"supervisor\");\n\n// Compile the graph\nexport const subGraphsAgentGraph = workflow.compile();\n", + "content": "/**\n * A travel agent supervisor demo showcasing multi-agent architecture with subgraphs.\n * The supervisor coordinates specialized agents: flights finder, hotels finder, and experiences finder.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage, AIMessage, ToolMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport {\n Annotation,\n MessagesAnnotation,\n StateGraph,\n Command,\n START,\n END,\n interrupt\n} from \"@langchain/langgraph\";\nimport { aguiTransformer } from \"@ag-ui/langgraph/transformer\";\n\n// Travel data interfaces\ninterface Flight {\n airline: string;\n departure: string;\n arrival: string;\n price: string;\n duration: string;\n}\n\ninterface Hotel {\n name: string;\n location: string;\n price_per_night: string;\n rating: string;\n}\n\ninterface Experience {\n name: string;\n type: \"restaurant\" | \"activity\";\n description: string;\n location: string;\n}\n\ninterface Itinerary {\n flight?: Flight;\n hotel?: Hotel;\n}\n\n// Custom reducer to merge itinerary updates\nfunction mergeItinerary(left: Itinerary | null, right?: Itinerary | null): Itinerary {\n if (!left) left = {};\n if (!right) right = {};\n return { ...left, ...right };\n}\n\n// State annotation for travel agent system\nexport const TravelAgentStateAnnotation = Annotation.Root({\n origin: Annotation(),\n destination: Annotation(),\n flights: Annotation(),\n hotels: Annotation(),\n experiences: Annotation(),\n\n // Itinerary with custom merger\n itinerary: Annotation({\n reducer: mergeItinerary,\n default: () => null\n }),\n\n // Tools available to all agents\n tools: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n\n // Supervisor routing\n next_agent: Annotation(),\n ...MessagesAnnotation.spec,\n});\n\nexport type TravelAgentState = typeof TravelAgentStateAnnotation.State;\n\n// Static data for demonstration\nconst STATIC_FLIGHTS: Flight[] = [\n { airline: \"KLM\", departure: \"Amsterdam (AMS)\", arrival: \"San Francisco (SFO)\", price: \"$650\", duration: \"11h 30m\" },\n { airline: \"United\", departure: \"Amsterdam (AMS)\", arrival: \"San Francisco (SFO)\", price: \"$720\", duration: \"12h 15m\" }\n];\n\nconst STATIC_HOTELS: Hotel[] = [\n { name: \"Hotel Zephyr\", location: \"Fisherman's Wharf\", price_per_night: \"$280/night\", rating: \"4.2 stars\" },\n { name: \"The Ritz-Carlton\", location: \"Nob Hill\", price_per_night: \"$550/night\", rating: \"4.8 stars\" },\n { name: \"Hotel Zoe\", location: \"Union Square\", price_per_night: \"$320/night\", rating: \"4.4 stars\" }\n];\n\nconst STATIC_EXPERIENCES: Experience[] = [\n { name: \"Pier 39\", type: \"activity\", description: \"Iconic waterfront destination with shops and sea lions\", location: \"Fisherman's Wharf\" },\n { name: \"Golden Gate Bridge\", type: \"activity\", description: \"World-famous suspension bridge with stunning views\", location: \"Golden Gate\" },\n { name: \"Swan Oyster Depot\", type: \"restaurant\", description: \"Historic seafood counter serving fresh oysters\", location: \"Polk Street\" },\n { name: \"Tartine Bakery\", type: \"restaurant\", description: \"Artisanal bakery famous for bread and pastries\", location: \"Mission District\" }\n];\n\nfunction createInterrupt(message: string, options: any[], recommendation: any, agent: string) {\n return interrupt({\n message,\n options,\n recommendation,\n agent,\n });\n}\n\n// Flights finder subgraph\nasync function flightsFinder(state: TravelAgentState, config?: RunnableConfig): Promise {\n // Simulate flight search with static data\n const flights = STATIC_FLIGHTS;\n\n const selectedFlight = state.itinerary?.flight;\n \n let flightChoice: Flight;\n const message = `Found ${flights.length} flight options from ${state.origin || 'Amsterdam'} to ${state.destination || 'San Francisco'}.\\n` +\n `I recommend choosing the flight by ${flights[0].airline} since it's known to be on time and cheaper.`\n if (!selectedFlight) {\n const interruptResult = createInterrupt(\n message,\n flights,\n flights[0],\n \"flights\"\n );\n \n // Parse the interrupt result if it's a string\n flightChoice = typeof interruptResult === 'string' ? JSON.parse(interruptResult) : interruptResult;\n } else {\n flightChoice = selectedFlight;\n }\n\n return new Command({\n goto: END,\n update: {\n flights: flights,\n itinerary: {\n flight: flightChoice\n },\n // Return all \"messages\" that the agent was sending\n messages: [\n ...state.messages,\n new AIMessage({\n content: message,\n }),\n new AIMessage({\n content: `Flights Agent: Great. I'll book you the ${flightChoice.airline} flight from ${flightChoice.departure} to ${flightChoice.arrival}.`,\n }),\n ]\n }\n });\n}\n\n// Hotels finder subgraph\nasync function hotelsFinder(state: TravelAgentState, config?: RunnableConfig): Promise {\n // Simulate hotel search with static data\n const hotels = STATIC_HOTELS;\n const selectedHotel = state.itinerary?.hotel;\n \n let hotelChoice: Hotel;\n const message = `Found ${hotels.length} accommodation options in ${state.destination || 'San Francisco'}.\\n\n I recommend choosing the ${hotels[2].name} since it strikes the balance between rating, price, and location.`\n if (!selectedHotel) {\n const interruptResult = createInterrupt(\n message,\n hotels,\n hotels[2],\n \"hotels\"\n );\n \n // Parse the interrupt result if it's a string\n hotelChoice = typeof interruptResult === 'string' ? JSON.parse(interruptResult) : interruptResult;\n } else {\n hotelChoice = selectedHotel;\n }\n\n return new Command({\n goto: END,\n update: {\n hotels: hotels,\n itinerary: {\n hotel: hotelChoice\n },\n // Return all \"messages\" that the agent was sending\n messages: [\n ...state.messages,\n new AIMessage({\n content: message,\n }),\n new AIMessage({\n content: `Hotels Agent: Excellent choice! You'll like ${hotelChoice.name}.`\n }),\n ]\n }\n });\n}\n\n// Experiences finder subgraph\nasync function experiencesFinder(state: TravelAgentState, config?: RunnableConfig): Promise {\n // Filter experiences (2 restaurants, 2 activities)\n const restaurants = STATIC_EXPERIENCES.filter(exp => exp.type === \"restaurant\").slice(0, 2);\n const activities = STATIC_EXPERIENCES.filter(exp => exp.type === \"activity\").slice(0, 2);\n const experiences = [...restaurants, ...activities];\n\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n const itinerary = state.itinerary || {};\n\n const systemPrompt = `\n You are the experiences agent. Your job is to find restaurants and activities for the user.\n You already went ahead and found a bunch of experiences. All you have to do now, is to let the user know of your findings.\n \n Current status:\n - Origin: ${state.origin || 'Amsterdam'}\n - Destination: ${state.destination || 'San Francisco'}\n - Flight chosen: ${JSON.stringify(itinerary.flight) || 'None'}\n - Hotel chosen: ${JSON.stringify(itinerary.hotel) || 'None'}\n - Activities found: ${JSON.stringify(activities)}\n - Restaurants found: ${JSON.stringify(restaurants)}\n `;\n\n // Get experiences response\n const response = await model.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n return new Command({\n goto: END,\n update: {\n experiences: experiences,\n messages: [...state.messages, response]\n }\n });\n}\n\n// Supervisor response tool\nconst SUPERVISOR_RESPONSE_TOOL = {\n type: \"function\" as const,\n function: {\n name: \"supervisor_response\",\n description: \"Always use this tool to structure your response to the user.\",\n parameters: {\n type: \"object\",\n properties: {\n answer: {\n type: \"string\",\n description: \"The answer to the user\"\n },\n next_agent: {\n type: \"string\",\n enum: [\"flights_agent\", \"hotels_agent\", \"experiences_agent\", \"complete\"],\n description: \"The agent to go to. Not required if you do not want to route to another agent.\"\n }\n },\n required: [\"answer\"]\n }\n }\n};\n\n// Supervisor agent\nasync function supervisorAgent(state: TravelAgentState, config?: RunnableConfig): Promise {\n const itinerary = state.itinerary || {};\n\n // Check what's already completed\n const hasFlights = itinerary.flight !== undefined;\n const hasHotels = itinerary.hotel !== undefined;\n const hasExperiences = state.experiences !== null;\n\n const systemPrompt = `\n You are a travel planning supervisor. Your job is to coordinate specialized agents to help plan a trip.\n \n Current status:\n - Origin: ${state.origin || 'Amsterdam'}\n - Destination: ${state.destination || 'San Francisco'}\n - Flights found: ${hasFlights}\n - Hotels found: ${hasHotels}\n - Experiences found: ${hasExperiences}\n - Itinerary (Things that the user has already confirmed selection on): ${JSON.stringify(itinerary, null, 2)}\n \n Available agents:\n - flights_agent: Finds flight options\n - hotels_agent: Finds hotel options \n - experiences_agent: Finds restaurant and activity recommendations\n - complete: Mark task as complete when all information is gathered\n \n You must route to the appropriate agent based on what's missing. Once all agents have completed their tasks, route to 'complete'.\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Bind the routing tool\n const modelWithTools = model.bindTools(\n [SUPERVISOR_RESPONSE_TOOL],\n {\n parallel_tool_calls: false,\n }\n );\n\n // Get supervisor decision\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n let messages = [...state.messages, response];\n\n // Handle tool calls for routing\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n const toolCallArgs = toolCall.args;\n const nextAgent = toolCallArgs.next_agent;\n\n const toolResponse = new ToolMessage({\n tool_call_id: toolCall.id!,\n content: `Routing to ${nextAgent} and providing the answer`,\n });\n\n messages = [\n ...messages, \n toolResponse, \n new AIMessage({ content: toolCallArgs.answer })\n ];\n\n if (nextAgent && nextAgent !== \"complete\") {\n return new Command({ goto: nextAgent });\n }\n }\n\n // Fallback if no tool call or complete\n return new Command({\n goto: END,\n update: { messages }\n });\n}\n\n// Create subgraphs\nconst flightsGraph = new StateGraph(TravelAgentStateAnnotation);\nflightsGraph.addNode(\"flights_agent_chat_node\", flightsFinder);\nflightsGraph.setEntryPoint(\"flights_agent_chat_node\");\nflightsGraph.addEdge(START, \"flights_agent_chat_node\");\nflightsGraph.addEdge(\"flights_agent_chat_node\", END);\nconst flightsSubgraph = flightsGraph.compile();\n\nconst hotelsGraph = new StateGraph(TravelAgentStateAnnotation);\nhotelsGraph.addNode(\"hotels_agent_chat_node\", hotelsFinder);\nhotelsGraph.setEntryPoint(\"hotels_agent_chat_node\");\nhotelsGraph.addEdge(START, \"hotels_agent_chat_node\");\nhotelsGraph.addEdge(\"hotels_agent_chat_node\", END);\nconst hotelsSubgraph = hotelsGraph.compile();\n\nconst experiencesGraph = new StateGraph(TravelAgentStateAnnotation);\nexperiencesGraph.addNode(\"experiences_agent_chat_node\", experiencesFinder);\nexperiencesGraph.setEntryPoint(\"experiences_agent_chat_node\");\nexperiencesGraph.addEdge(START, \"experiences_agent_chat_node\");\nexperiencesGraph.addEdge(\"experiences_agent_chat_node\", END);\nconst experiencesSubgraph = experiencesGraph.compile();\n\n// Main supervisor workflow\nconst workflow = new StateGraph(TravelAgentStateAnnotation);\n\n// Add supervisor and subgraphs as nodes\nworkflow.addNode(\"supervisor\", supervisorAgent, { ends: ['flights_agent', 'hotels_agent', 'experiences_agent', END] });\nworkflow.addNode(\"flights_agent\", flightsSubgraph);\nworkflow.addNode(\"hotels_agent\", hotelsSubgraph);\nworkflow.addNode(\"experiences_agent\", experiencesSubgraph);\n\n// Set entry point\nworkflow.setEntryPoint(\"supervisor\");\nworkflow.addEdge(START, \"supervisor\");\n\n// Add edges back to supervisor after each subgraph\nworkflow.addEdge(\"flights_agent\", \"supervisor\");\nworkflow.addEdge(\"hotels_agent\", \"supervisor\");\nworkflow.addEdge(\"experiences_agent\", \"supervisor\");\n\n// Compile the graph\nexport const subGraphsAgentGraph = workflow.compile({\n transformers: [aguiTransformer],\n});\n", "language": "ts", "type": "file" } @@ -734,7 +734,7 @@ "langgraph-fastapi::tool_based_generative_ui": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }) ,\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", + "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n description: \"Generates a haiku with Japanese and English translations, an image name, and a CSS gradient for the background.\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }),\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", "language": "typescript", "type": "file" }, @@ -876,7 +876,7 @@ }, { "name": "agent.ts", - "content": "/**\n * A simple agentic chat flow using LangGraph with AG-UI middleware.\n *\n * The AG-UI middleware handles:\n * - Injecting frontend tools from state.tools into the model\n * - Routing frontend tool calls (emit events, skip backend execution)\n */\n\nimport { createAgent } from \"langchain\";\nimport { MemorySaver } from \"@langchain/langgraph\";\nimport { copilotkitMiddleware } from \"@copilotkit/sdk-js/langgraph\";\n\nconst checkpointer = new MemorySaver();\n\nexport const agenticChatGraph = createAgent({\n model: \"openai:gpt-4o\",\n tools: [], // Backend tools go here\n middleware: [copilotkitMiddleware],\n systemPrompt: \"You are a helpful assistant.\",\n checkpointer\n});\n", + "content": "/**\n * A simple agentic chat flow using LangGraph with AG-UI middleware.\n *\n * The AG-UI middleware handles:\n * - Injecting frontend tools from state.tools into the model\n * - Routing frontend tool calls (emit events, skip backend execution)\n */\n\nimport { createAgent } from \"langchain\";\nimport { MemorySaver } from \"@langchain/langgraph\";\nimport { copilotkitMiddleware } from \"@copilotkit/sdk-js/langgraph\";\nimport { aguiTransformer } from \"@ag-ui/langgraph/transformer\";\n\nconst checkpointer = new MemorySaver();\n\nexport const agenticChatGraph = createAgent({\n model: \"openai:gpt-4o\",\n tools: [], // Backend tools go here\n middleware: [copilotkitMiddleware],\n systemPrompt: \"You are a helpful assistant.\",\n checkpointer,\n streamTransformers: [aguiTransformer],\n});\n", "language": "ts", "type": "file" } @@ -908,7 +908,7 @@ }, { "name": "agent.ts", - "content": "/**\n * A simple agentic chat flow using LangGraph with reasoning model support.\n *\n * This agent supports multiple model providers with reasoning/thinking capabilities:\n * - OpenAI (default): Uses o3 model\n * - Anthropic: Uses claude-sonnet-4-20250514 with thinking enabled\n * - Gemini: Uses gemini-2.5-pro with thinking budget\n *\n * The model is selected based on the `model` field in the agent state.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { ChatAnthropic } from \"@langchain/anthropic\";\nimport { ChatGoogleGenerativeAI } from \"@langchain/google-genai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { Annotation, MessagesAnnotation, StateGraph, Command, START, END } from \"@langchain/langgraph\";\n\nconst AgentStateAnnotation = Annotation.Root({\n tools: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n model: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => \"\"\n }),\n ...MessagesAnnotation.spec,\n});\n\ntype AgentState = typeof AgentStateAnnotation.State;\n\nasync function chatNode(state: AgentState, config?: RunnableConfig) {\n /**\n * Standard chat node based on the ReAct design pattern. It handles:\n * - The model to use (and binds in CopilotKit actions and the tools defined above)\n * - The system prompt\n * - Getting a response from the model\n * - Handling tool calls\n */\n\n // 1. Define the model based on state\n let model;\n if (state.model === \"Anthropic\") {\n model = new ChatAnthropic({\n model: \"claude-sonnet-4-20250514\",\n thinking: { type: \"enabled\", budget_tokens: 2000 },\n });\n } else if (state.model === \"Gemini\") {\n model = new ChatGoogleGenerativeAI({\n model: \"gemini-2.5-pro\",\n thinkingBudget: 1024,\n });\n } else {\n // Default: OpenAI\n model = new ChatOpenAI({\n model: \"o4-mini\",\n useResponsesApi: true,\n reasoning: { effort: \"high\", summary: \"auto\" },\n });\n }\n\n // Define config for the model\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // 2. Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...(state.tools ?? []),\n ],\n );\n\n // 3. Define the system message\n const systemMessage = new SystemMessage({\n content: \"You are a helpful assistant.\"\n });\n\n // 4. Run the model to generate a response\n const response = await modelWithTools.invoke([\n systemMessage,\n ...state.messages,\n ], config);\n\n // 5. Return the response\n return new Command({\n goto: END,\n update: {\n messages: [response]\n }\n });\n}\n\n// Define a new graph\nconst workflow = new StateGraph(AgentStateAnnotation)\n .addNode(\"chatNode\", chatNode)\n .addEdge(START, \"chatNode\")\n .addEdge(\"chatNode\", END);\n\n// Compile the graph\nexport const agenticChatReasoningGraph = workflow.compile();\n", + "content": "/**\n * A simple agentic chat flow using LangGraph with reasoning model support.\n *\n * This agent supports multiple model providers with reasoning/thinking capabilities:\n * - OpenAI (default): Uses o3 model\n * - Anthropic: Uses claude-sonnet-4-20250514 with thinking enabled\n * - Gemini: Uses gemini-2.5-pro with thinking budget\n *\n * The model is selected based on the `model` field in the agent state.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { ChatAnthropic } from \"@langchain/anthropic\";\nimport { ChatGoogleGenerativeAI } from \"@langchain/google-genai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { Annotation, MessagesAnnotation, StateGraph, Command, START, END } from \"@langchain/langgraph\";\nimport { aguiTransformer } from \"@ag-ui/langgraph/transformer\";\n\nconst AgentStateAnnotation = Annotation.Root({\n tools: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n model: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => \"\"\n }),\n ...MessagesAnnotation.spec,\n});\n\ntype AgentState = typeof AgentStateAnnotation.State;\n\nasync function chatNode(state: AgentState, config?: RunnableConfig) {\n /**\n * Standard chat node based on the ReAct design pattern. It handles:\n * - The model to use (and binds in CopilotKit actions and the tools defined above)\n * - The system prompt\n * - Getting a response from the model\n * - Handling tool calls\n */\n\n // 1. Define the model based on state\n let model;\n if (state.model === \"Anthropic\") {\n model = new ChatAnthropic({\n model: \"claude-sonnet-4-20250514\",\n thinking: { type: \"enabled\", budget_tokens: 2000 },\n });\n } else if (state.model === \"Gemini\") {\n model = new ChatGoogleGenerativeAI({\n model: \"gemini-2.5-pro\",\n thinkingBudget: 1024,\n });\n } else {\n // Default: OpenAI\n model = new ChatOpenAI({\n model: \"o4-mini\",\n useResponsesApi: true,\n reasoning: { effort: \"high\", summary: \"auto\" },\n });\n }\n\n // Define config for the model\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // 2. Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...(state.tools ?? []),\n ],\n );\n\n // 3. Define the system message\n const systemMessage = new SystemMessage({\n content: \"You are a helpful assistant.\"\n });\n\n // 4. Run the model to generate a response\n const response = await modelWithTools.invoke([\n systemMessage,\n ...state.messages,\n ], config);\n\n // 5. Return the response\n return new Command({\n goto: END,\n update: {\n messages: [response]\n }\n });\n}\n\n// Define a new graph\nconst workflow = new StateGraph(AgentStateAnnotation)\n .addNode(\"chatNode\", chatNode)\n .addEdge(START, \"chatNode\")\n .addEdge(\"chatNode\", END);\n\n// Compile the graph\nexport const agenticChatReasoningGraph = workflow.compile({\n transformers: [aguiTransformer],\n});\n", "language": "ts", "type": "file" } @@ -934,7 +934,7 @@ }, { "name": "agent.ts", - "content": "/**\n * A multimodal agentic chat that can analyze images and other media.\n *\n * This agent demonstrates how to:\n * 1. Receive user messages with images\n * 2. Process multimodal content (text + images)\n * 3. Use vision models to analyze images\n *\n * Example usage:\n *\n * ```typescript\n * import { UserMessage, TextInputContent, ImageInputContent } from \"@ag-ui/core\";\n *\n * // Create a multimodal user message\n * const message: UserMessage = {\n * id: \"user-123\",\n * role: \"user\",\n * content: [\n * { type: \"text\", text: \"What's in this image?\" },\n * {\n * type: \"image\",\n * mimeType: \"image/jpeg\",\n * url: \"https://example.com/photo.jpg\"\n * },\n * ],\n * };\n *\n * // Or with base64 encoded data\n * const messageWithData: UserMessage = {\n * id: \"user-124\",\n * role: \"user\",\n * content: [\n * { type: \"text\", text: \"Describe this picture\" },\n * {\n * type: \"image\",\n * mimeType: \"image/png\",\n * data: \"iVBORw0KGgoAAAANSUhEUgAAAAUA...\", // base64 encoded\n * filename: \"screenshot.png\"\n * },\n * ],\n * };\n * ```\n *\n * The LangGraph integration automatically handles:\n * 1. Converting AG-UI multimodal format to LangChain's format\n * 2. Passing multimodal messages to vision models\n * 3. Converting responses back to AG-UI format\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { Annotation, MessagesAnnotation, StateGraph, Command, START, END } from \"@langchain/langgraph\";\n\nconst AgentStateAnnotation = Annotation.Root({\n tools: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n ...MessagesAnnotation.spec,\n});\n\ntype AgentState = typeof AgentStateAnnotation.State;\n\nasync function visionChatNode(state: AgentState, config?: RunnableConfig) {\n /**\n * Chat node that uses a vision-capable model to handle multimodal input.\n *\n * Images and other media sent by the user are automatically converted\n * to LangChain's multimodal format by the AG-UI integration layer.\n */\n\n // Use a vision-capable model\n const model = new ChatOpenAI({ model: \"gpt-5.4\" });\n\n // Define config for the model\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Bind tools if needed\n const modelWithTools = model.bindTools(\n state.tools ?? [],\n {\n parallel_tool_calls: false,\n }\n );\n\n // Define the system message\n const systemMessage = new SystemMessage({\n content: \"You are a helpful assistant that can analyze images, documents, and other media. \" +\n \"When a user shares an image, describe what you see in detail. \" +\n \"When a user shares a document, summarize its contents.\"\n });\n\n // Run the model with multimodal messages\n const response = await modelWithTools.invoke([\n systemMessage,\n ...state.messages,\n ], config);\n\n // Return the response\n return new Command({\n goto: END,\n update: {\n messages: [response]\n }\n });\n}\n\n// Define a new graph\nconst workflow = new StateGraph(AgentStateAnnotation)\n .addNode(\"visionChatNode\", visionChatNode)\n .addEdge(START, \"visionChatNode\")\n .addEdge(\"visionChatNode\", END);\n\n// Compile the graph\nexport const agenticChatMultimodalGraph = workflow.compile();\n", + "content": "/**\n * A multimodal agentic chat that can analyze images and other media.\n *\n * This agent demonstrates how to:\n * 1. Receive user messages with images\n * 2. Process multimodal content (text + images)\n * 3. Use vision models to analyze images\n *\n * Example usage:\n *\n * ```typescript\n * import { UserMessage, TextInputContent, ImageInputContent } from \"@ag-ui/core\";\n *\n * // Create a multimodal user message\n * const message: UserMessage = {\n * id: \"user-123\",\n * role: \"user\",\n * content: [\n * { type: \"text\", text: \"What's in this image?\" },\n * {\n * type: \"image\",\n * mimeType: \"image/jpeg\",\n * url: \"https://example.com/photo.jpg\"\n * },\n * ],\n * };\n *\n * // Or with base64 encoded data\n * const messageWithData: UserMessage = {\n * id: \"user-124\",\n * role: \"user\",\n * content: [\n * { type: \"text\", text: \"Describe this picture\" },\n * {\n * type: \"image\",\n * mimeType: \"image/png\",\n * data: \"iVBORw0KGgoAAAANSUhEUgAAAAUA...\", // base64 encoded\n * filename: \"screenshot.png\"\n * },\n * ],\n * };\n * ```\n *\n * The LangGraph integration automatically handles:\n * 1. Converting AG-UI multimodal format to LangChain's format\n * 2. Passing multimodal messages to vision models\n * 3. Converting responses back to AG-UI format\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { Annotation, MessagesAnnotation, StateGraph, Command, START, END } from \"@langchain/langgraph\";\nimport { aguiTransformer } from \"@ag-ui/langgraph/transformer\";\n\nconst AgentStateAnnotation = Annotation.Root({\n tools: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n ...MessagesAnnotation.spec,\n});\n\ntype AgentState = typeof AgentStateAnnotation.State;\n\nasync function visionChatNode(state: AgentState, config?: RunnableConfig) {\n /**\n * Chat node that uses a vision-capable model to handle multimodal input.\n *\n * Images and other media sent by the user are automatically converted\n * to LangChain's multimodal format by the AG-UI integration layer.\n */\n\n // Use a vision-capable model\n const model = new ChatOpenAI({ model: \"gpt-5.4\" });\n\n // Define config for the model\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Bind tools if needed\n const modelWithTools = model.bindTools(\n state.tools ?? [],\n {\n parallel_tool_calls: false,\n }\n );\n\n // Define the system message\n const systemMessage = new SystemMessage({\n content: \"You are a helpful assistant that can analyze images, documents, and other media. \" +\n \"When a user shares an image, describe what you see in detail. \" +\n \"When a user shares a document, summarize its contents.\"\n });\n\n // Run the model with multimodal messages\n const response = await modelWithTools.invoke([\n systemMessage,\n ...state.messages,\n ], config);\n\n // Return the response\n return new Command({\n goto: END,\n update: {\n messages: [response]\n }\n });\n}\n\n// Define a new graph\nconst workflow = new StateGraph(AgentStateAnnotation)\n .addNode(\"visionChatNode\", visionChatNode)\n .addEdge(START, \"visionChatNode\")\n .addEdge(\"visionChatNode\", END);\n\n// Compile the graph\nexport const agenticChatMultimodalGraph = workflow.compile({\n transformers: [aguiTransformer],\n});\n", "language": "ts", "type": "file" } @@ -974,7 +974,7 @@ }, { "name": "agent.ts", - "content": "/**\n * A LangGraph implementation of the human-in-the-loop agent.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { Command, interrupt, Annotation, MessagesAnnotation, StateGraph, END, START } from \"@langchain/langgraph\";\n\nconst DEFINE_TASK_TOOL = {\n type: \"function\",\n function: {\n name: \"plan_execution_steps\",\n description: \"Make up 10 steps (only a couple of words per step) that are required for a task. The step should be in imperative form (i.e. Dig hole, Open door, ...)\",\n parameters: {\n type: \"object\",\n properties: {\n steps: {\n type: \"array\",\n items: {\n type: \"object\",\n properties: {\n description: {\n type: \"string\",\n description: \"The text of the step in imperative form\"\n },\n status: {\n type: \"string\",\n enum: [\"enabled\"],\n description: \"The status of the step, always 'enabled'\"\n }\n },\n required: [\"description\", \"status\"]\n },\n description: \"An array of 10 step objects, each containing text and status\"\n }\n },\n required: [\"steps\"]\n }\n }\n};\n\nexport const AgentStateAnnotation = Annotation.Root({\n steps: Annotation>({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n tools: Annotation(),\n user_response: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => undefined\n }),\n ...MessagesAnnotation.spec,\n});\nexport type AgentState = typeof AgentStateAnnotation.State;\n\nasync function startFlow(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * This is the entry point for the flow.\n */\n\n // Initialize steps list if not exists\n if (!state.steps) {\n state.steps = [];\n }\n\n return new Command({\n goto: \"chat_node\",\n update: {\n messages: state.messages,\n steps: state.steps,\n }\n });\n}\n\nasync function chatNode(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * Standard chat node where the agent processes messages and generates responses.\n * If task steps are defined, the user can enable/disable them using interrupts.\n */\n const systemPrompt = `\n You are a helpful assistant that can perform any task.\n You MUST call the \\`plan_execution_steps\\` function when the user asks you to perform a task.\n Always make sure you will provide tasks based on the user query\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o-mini\" });\n \n // Define config for the model\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Use \"predict_state\" metadata to set up streaming for the write_document tool\n if (!config.metadata) config.metadata = {};\n config.metadata.predict_state = [{\n state_key: \"steps\",\n tool: \"plan_execution_steps\",\n tool_argument: \"steps\"\n }];\n\n // Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n DEFINE_TASK_TOOL\n ],\n {\n // Disable parallel tool calls to avoid race conditions\n parallel_tool_calls: false,\n }\n );\n\n // Run the model and generate a response\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n // Update messages with the response\n const messages = [...state.messages, response];\n \n // Handle tool calls\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n\n if (toolCall.name === \"plan_execution_steps\") {\n // Get the steps from the tool call\n const stepsRaw = toolCall.args.steps || [];\n \n // Set initial status to \"enabled\" for all steps\n const stepsData: Array<{ description: string; status: string }> = [];\n \n // Handle different potential formats of steps data\n if (Array.isArray(stepsRaw)) {\n for (const step of stepsRaw) {\n if (typeof step === 'object' && step.description) {\n stepsData.push({\n description: step.description,\n status: \"enabled\"\n });\n } else if (typeof step === 'string') {\n stepsData.push({\n description: step,\n status: \"enabled\"\n });\n }\n }\n }\n \n // If no steps were processed correctly, return to END with the updated messages\n if (stepsData.length === 0) {\n return new Command({\n goto: END,\n update: {\n messages: messages,\n steps: state.steps,\n }\n });\n }\n\n // Update steps in state and emit to frontend\n state.steps = stepsData;\n \n // Add a tool response to satisfy OpenAI's requirements\n const toolResponse = {\n role: \"tool\" as const,\n content: \"Task steps generated.\",\n tool_call_id: toolCall.id\n };\n \n const updatedMessages = [...messages, toolResponse];\n\n // Move to the process_steps_node which will handle the interrupt and final response\n return new Command({\n goto: \"process_steps_node\",\n update: {\n messages: updatedMessages,\n steps: state.steps,\n }\n });\n }\n }\n \n // If no tool calls or not plan_execution_steps, return to END with the updated messages\n return new Command({\n goto: END,\n update: {\n messages: messages,\n steps: state.steps,\n }\n });\n}\n\nasync function processStepsNode(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * This node handles the user interrupt for step customization and generates the final response.\n */\n\n let userResponse: string;\n\n // Check if we already have a user_response in the state\n // This happens when the node restarts after an interrupt\n if (state.user_response) {\n userResponse = state.user_response;\n } else {\n // Use LangGraph interrupt to get user input on steps\n // This will pause execution and wait for user input in the frontend\n userResponse = interrupt({ steps: state.steps });\n // Store the user response in state for when the node restarts\n state.user_response = userResponse;\n }\n \n // Generate the creative completion response\n const finalPrompt = `\n Provide a textual description of how you are performing the task.\n If the user has disabled a step, you are not allowed to perform that step.\n However, you should find a creative workaround to perform the task, and if an essential step is disabled, you can even use\n some humor in the description of how you are performing the task.\n Don't just repeat a list of steps, come up with a creative but short description (3 sentences max) of how you are performing the task.\n `;\n \n const finalResponse = await new ChatOpenAI({ model: \"gpt-4o\" }).invoke([\n new SystemMessage({ content: finalPrompt }),\n { role: \"user\", content: userResponse }\n ], config);\n\n // Add the final response to messages\n const messages = [...state.messages, finalResponse];\n \n // Clear the user_response from state to prepare for future interactions\n const newState = { ...state };\n delete newState.user_response;\n \n // Return to END with the updated messages\n return new Command({\n goto: END,\n update: {\n messages: messages,\n steps: state.steps,\n }\n });\n}\n\n// Define the graph\nconst workflow = new StateGraph(AgentStateAnnotation);\n\n// Add nodes\nworkflow.addNode(\"start_flow\", startFlow);\nworkflow.addNode(\"chat_node\", chatNode);\nworkflow.addNode(\"process_steps_node\", processStepsNode);\n\n// Add edges\nworkflow.setEntryPoint(\"start_flow\");\nworkflow.addEdge(START, \"start_flow\");\nworkflow.addEdge(\"start_flow\", \"chat_node\");\nworkflow.addEdge(\"process_steps_node\", END);\n\n// Add conditional edges from chat_node\nworkflow.addConditionalEdges(\n \"chat_node\",\n (state: AgentState) => {\n // This would be determined by the Command returned from chat_node\n // For now, we'll assume the logic is handled in the Command's goto property\n return \"continue\";\n },\n {\n \"process_steps_node\": \"process_steps_node\",\n \"continue\": END,\n }\n);\n\n// Compile the graph\nexport const humanInTheLoopGraph = workflow.compile();", + "content": "/**\n * A LangGraph implementation of the human-in-the-loop agent.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { Command, interrupt, Annotation, MessagesAnnotation, StateGraph, END, START } from \"@langchain/langgraph\";\nimport { aguiTransformer } from \"@ag-ui/langgraph/transformer\";\n\nconst DEFINE_TASK_TOOL = {\n type: \"function\",\n function: {\n name: \"plan_execution_steps\",\n description: \"Make up 10 steps (only a couple of words per step) that are required for a task. The step should be in imperative form (i.e. Dig hole, Open door, ...)\",\n parameters: {\n type: \"object\",\n properties: {\n steps: {\n type: \"array\",\n items: {\n type: \"object\",\n properties: {\n description: {\n type: \"string\",\n description: \"The text of the step in imperative form\"\n },\n status: {\n type: \"string\",\n enum: [\"enabled\"],\n description: \"The status of the step, always 'enabled'\"\n }\n },\n required: [\"description\", \"status\"]\n },\n description: \"An array of 10 step objects, each containing text and status\"\n }\n },\n required: [\"steps\"]\n }\n }\n};\n\nexport const AgentStateAnnotation = Annotation.Root({\n steps: Annotation>({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n tools: Annotation(),\n user_response: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => undefined\n }),\n ...MessagesAnnotation.spec,\n});\nexport type AgentState = typeof AgentStateAnnotation.State;\n\nasync function startFlow(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * This is the entry point for the flow.\n */\n\n // Initialize steps list if not exists\n if (!state.steps) {\n state.steps = [];\n }\n\n return new Command({\n goto: \"chat_node\",\n update: {\n messages: state.messages,\n steps: state.steps,\n }\n });\n}\n\nasync function chatNode(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * Standard chat node where the agent processes messages and generates responses.\n * If task steps are defined, the user can enable/disable them using interrupts.\n */\n const systemPrompt = `\n You are a helpful assistant that can perform any task.\n You MUST call the \\`plan_execution_steps\\` function when the user asks you to perform a task.\n Always make sure you will provide tasks based on the user query\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o-mini\" });\n \n // Define config for the model\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Use \"predict_state\" metadata to set up streaming for the write_document tool\n if (!config.metadata) config.metadata = {};\n config.metadata.predict_state = [{\n state_key: \"steps\",\n tool: \"plan_execution_steps\",\n tool_argument: \"steps\"\n }];\n\n // Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n DEFINE_TASK_TOOL\n ],\n {\n // Disable parallel tool calls to avoid race conditions\n parallel_tool_calls: false,\n }\n );\n\n // Run the model and generate a response\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n // Update messages with the response\n const messages = [...state.messages, response];\n \n // Handle tool calls\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n\n if (toolCall.name === \"plan_execution_steps\") {\n // Get the steps from the tool call\n const stepsRaw = toolCall.args.steps || [];\n \n // Set initial status to \"enabled\" for all steps\n const stepsData: Array<{ description: string; status: string }> = [];\n \n // Handle different potential formats of steps data\n if (Array.isArray(stepsRaw)) {\n for (const step of stepsRaw) {\n if (typeof step === 'object' && step.description) {\n stepsData.push({\n description: step.description,\n status: \"enabled\"\n });\n } else if (typeof step === 'string') {\n stepsData.push({\n description: step,\n status: \"enabled\"\n });\n }\n }\n }\n \n // If no steps were processed correctly, return to END with the updated messages\n if (stepsData.length === 0) {\n return new Command({\n goto: END,\n update: {\n messages: messages,\n steps: state.steps,\n }\n });\n }\n\n // Update steps in state and emit to frontend\n state.steps = stepsData;\n \n // Add a tool response to satisfy OpenAI's requirements\n const toolResponse = {\n role: \"tool\" as const,\n content: \"Task steps generated.\",\n tool_call_id: toolCall.id\n };\n \n const updatedMessages = [...messages, toolResponse];\n\n // Move to the process_steps_node which will handle the interrupt and final response\n return new Command({\n goto: \"process_steps_node\",\n update: {\n messages: updatedMessages,\n steps: state.steps,\n }\n });\n }\n }\n \n // If no tool calls or not plan_execution_steps, return to END with the updated messages\n return new Command({\n goto: END,\n update: {\n messages: messages,\n steps: state.steps,\n }\n });\n}\n\nasync function processStepsNode(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * This node handles the user interrupt for step customization and generates the final response.\n */\n\n let userResponse: string;\n\n // Check if we already have a user_response in the state\n // This happens when the node restarts after an interrupt\n if (state.user_response) {\n userResponse = state.user_response;\n } else {\n // Use LangGraph interrupt to get user input on steps\n // This will pause execution and wait for user input in the frontend\n userResponse = interrupt({ steps: state.steps });\n // Store the user response in state for when the node restarts\n state.user_response = userResponse;\n }\n \n // Generate the creative completion response\n const finalPrompt = `\n Provide a textual description of how you are performing the task.\n If the user has disabled a step, you are not allowed to perform that step.\n However, you should find a creative workaround to perform the task, and if an essential step is disabled, you can even use\n some humor in the description of how you are performing the task.\n Don't just repeat a list of steps, come up with a creative but short description (3 sentences max) of how you are performing the task.\n `;\n \n const finalResponse = await new ChatOpenAI({ model: \"gpt-4o\" }).invoke([\n new SystemMessage({ content: finalPrompt }),\n { role: \"user\", content: userResponse }\n ], config);\n\n // Add the final response to messages\n const messages = [...state.messages, finalResponse];\n \n // Clear the user_response from state to prepare for future interactions\n const newState = { ...state };\n delete newState.user_response;\n \n // Return to END with the updated messages\n return new Command({\n goto: END,\n update: {\n messages: messages,\n steps: state.steps,\n }\n });\n}\n\n// Define the graph\nconst workflow = new StateGraph(AgentStateAnnotation);\n\n// Add nodes\nworkflow.addNode(\"start_flow\", startFlow);\nworkflow.addNode(\"chat_node\", chatNode);\nworkflow.addNode(\"process_steps_node\", processStepsNode);\n\n// Add edges\nworkflow.setEntryPoint(\"start_flow\");\nworkflow.addEdge(START, \"start_flow\");\nworkflow.addEdge(\"start_flow\", \"chat_node\");\nworkflow.addEdge(\"process_steps_node\", END);\n\n// Add conditional edges from chat_node\nworkflow.addConditionalEdges(\n \"chat_node\",\n (state: AgentState) => {\n // This would be determined by the Command returned from chat_node\n // For now, we'll assume the logic is handled in the Command's goto property\n return \"continue\";\n },\n {\n \"process_steps_node\": \"process_steps_node\",\n \"continue\": END,\n }\n);\n\n// Compile the graph\nexport const humanInTheLoopGraph = workflow.compile({\n transformers: [aguiTransformer],\n});", "language": "ts", "type": "file" } @@ -1006,7 +1006,7 @@ }, { "name": "agent.ts", - "content": "/**\n * An example demonstrating agentic generative UI using LangGraph.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { dispatchCustomEvent } from \"@langchain/core/callbacks/dispatch\";\nimport { Annotation, Command, MessagesAnnotation, StateGraph, END } from \"@langchain/langgraph\";\n\n// This tool simulates performing a task on the server.\n// The tool call will be streamed to the frontend as it is being generated.\nconst PERFORM_TASK_TOOL = {\n type: \"function\",\n function: {\n name: \"generate_task_steps_generative_ui\",\n description: \"Make up 10 steps (only a couple of words per step) that are required for a task. The step should be in gerund form (i.e. Digging hole, opening door, ...)\",\n parameters: {\n type: \"object\",\n properties: {\n steps: {\n type: \"array\",\n items: {\n type: \"object\",\n properties: {\n description: {\n type: \"string\",\n description: \"The text of the step in gerund form\"\n },\n status: {\n type: \"string\",\n enum: [\"pending\"],\n description: \"The status of the step, always 'pending'\"\n }\n },\n required: [\"description\", \"status\"]\n },\n description: \"An array of 10 step objects, each containing text and status\"\n }\n },\n required: [\"steps\"]\n }\n }\n};\n\nconst AgentStateAnnotation = Annotation.Root({\n steps: Annotation>({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n tools: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n ...MessagesAnnotation.spec,\n});\n\ntype AgentState = typeof AgentStateAnnotation.State;\n\nasync function startFlow(state: AgentState, config?: RunnableConfig) {\n /**\n * This is the entry point for the flow.\n * Always clear steps so old steps from previous runs don't persist.\n */\n return {\n steps: []\n };\n}\n\nasync function chatNode(state: AgentState, config?: RunnableConfig) {\n /**\n * Standard chat node.\n */\n const systemPrompt = `\n You are a helpful assistant assisting with any task. \n When asked to do something, you MUST call the function \\`generate_task_steps_generative_ui\\`\n that was provided to you.\n If you called the function, you MUST NOT repeat the steps in your next response to the user.\n Just give a very brief summary (one sentence) of what you did with some emojis. \n Always say you actually did the steps, not merely generated them.\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n \n // Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Use \"predict_state\" metadata to set up streaming for the write_document tool\n if (!config.metadata) config.metadata = {};\n config.metadata.predict_state = [{\n state_key: \"steps\",\n tool: \"generate_task_steps_generative_ui\",\n tool_argument: \"steps\",\n }];\n\n // Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n PERFORM_TASK_TOOL\n ],\n {\n // Disable parallel tool calls to avoid race conditions\n parallel_tool_calls: false,\n }\n );\n\n // Run the model to generate a response\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n const messages = [...state.messages, response];\n\n // Extract any tool calls from the response\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n \n if (toolCall.name === \"generate_task_steps_generative_ui\") {\n const steps = toolCall.args.steps.map((step: any) => ({\n description: step.description,\n status: step.status\n }));\n \n // Add the tool response to messages\n const toolResponse = {\n role: \"tool\" as const,\n content: \"Steps executed.\",\n tool_call_id: toolCall.id\n };\n\n const updatedMessages = [...messages, toolResponse];\n\n // Simulate executing the steps\n for (let i = 0; i < steps.length; i++) {\n // simulate executing the step\n await new Promise(resolve => setTimeout(resolve, 1000));\n steps[i].status = \"completed\";\n // Update the state with the completed step\n state.steps = steps;\n // Emit custom events to update the frontend\n await dispatchCustomEvent(\"manually_emit_state\", state, config);\n }\n \n return new Command({\n goto: \"chat_node\",\n update: {\n messages: updatedMessages,\n steps: state.steps\n }\n });\n }\n }\n\n return new Command({\n goto: END,\n update: {\n messages: messages,\n steps: state.steps\n }\n });\n}\n\n// Define the graph\nconst workflow = new StateGraph(AgentStateAnnotation)\n .addNode(\"start_flow\", startFlow)\n .addNode(\"chat_node\", chatNode)\n .addEdge(\"__start__\", \"start_flow\")\n .addEdge(\"start_flow\", \"chat_node\")\n .addEdge(\"chat_node\", \"__end__\");\n\n// Compile the graph\nexport const agenticGenerativeUiGraph = workflow.compile();", + "content": "/**\n * An example demonstrating agentic generative UI using LangGraph.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { dispatchCustomEvent } from \"@langchain/core/callbacks/dispatch\";\nimport { Annotation, Command, MessagesAnnotation, StateGraph, END } from \"@langchain/langgraph\";\nimport { aguiTransformer } from \"@ag-ui/langgraph/transformer\";\n\n// This tool simulates performing a task on the server.\n// The tool call will be streamed to the frontend as it is being generated.\nconst PERFORM_TASK_TOOL = {\n type: \"function\",\n function: {\n name: \"generate_task_steps_generative_ui\",\n description: \"Make up 10 steps (only a couple of words per step) that are required for a task. The step should be in gerund form (i.e. Digging hole, opening door, ...)\",\n parameters: {\n type: \"object\",\n properties: {\n steps: {\n type: \"array\",\n items: {\n type: \"object\",\n properties: {\n description: {\n type: \"string\",\n description: \"The text of the step in gerund form\"\n },\n status: {\n type: \"string\",\n enum: [\"pending\"],\n description: \"The status of the step, always 'pending'\"\n }\n },\n required: [\"description\", \"status\"]\n },\n description: \"An array of 10 step objects, each containing text and status\"\n }\n },\n required: [\"steps\"]\n }\n }\n};\n\nconst AgentStateAnnotation = Annotation.Root({\n steps: Annotation>({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n tools: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n ...MessagesAnnotation.spec,\n});\n\ntype AgentState = typeof AgentStateAnnotation.State;\n\nasync function startFlow(state: AgentState, config?: RunnableConfig) {\n /**\n * This is the entry point for the flow.\n * Always clear steps so old steps from previous runs don't persist.\n */\n return {\n steps: []\n };\n}\n\nasync function chatNode(state: AgentState, config?: RunnableConfig) {\n /**\n * Standard chat node.\n */\n const systemPrompt = `\n You are a helpful assistant assisting with any task. \n When asked to do something, you MUST call the function \\`generate_task_steps_generative_ui\\`\n that was provided to you.\n If you called the function, you MUST NOT repeat the steps in your next response to the user.\n Just give a very brief summary (one sentence) of what you did with some emojis. \n Always say you actually did the steps, not merely generated them.\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n \n // Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Use \"predict_state\" metadata to set up streaming for the write_document tool\n if (!config.metadata) config.metadata = {};\n config.metadata.predict_state = [{\n state_key: \"steps\",\n tool: \"generate_task_steps_generative_ui\",\n tool_argument: \"steps\",\n }];\n\n // Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n PERFORM_TASK_TOOL\n ],\n {\n // Disable parallel tool calls to avoid race conditions\n parallel_tool_calls: false,\n }\n );\n\n // Run the model to generate a response\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n const messages = [...state.messages, response];\n\n // Extract any tool calls from the response\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n \n if (toolCall.name === \"generate_task_steps_generative_ui\") {\n const steps = toolCall.args.steps.map((step: any) => ({\n description: step.description,\n status: step.status\n }));\n \n // Add the tool response to messages\n const toolResponse = {\n role: \"tool\" as const,\n content: \"Steps executed.\",\n tool_call_id: toolCall.id\n };\n\n const updatedMessages = [...messages, toolResponse];\n\n // Simulate executing the steps\n for (let i = 0; i < steps.length; i++) {\n // simulate executing the step\n await new Promise(resolve => setTimeout(resolve, 1000));\n steps[i].status = \"completed\";\n // Update the state with the completed step\n state.steps = steps;\n // Emit custom events to update the frontend\n await dispatchCustomEvent(\"manually_emit_state\", state, config);\n }\n \n return new Command({\n goto: \"chat_node\",\n update: {\n messages: updatedMessages,\n steps: state.steps\n }\n });\n }\n }\n\n return new Command({\n goto: END,\n update: {\n messages: messages,\n steps: state.steps\n }\n });\n}\n\n// Define the graph\nconst workflow = new StateGraph(AgentStateAnnotation)\n .addNode(\"start_flow\", startFlow)\n .addNode(\"chat_node\", chatNode)\n .addEdge(\"__start__\", \"start_flow\")\n .addEdge(\"start_flow\", \"chat_node\")\n .addEdge(\"chat_node\", \"__end__\");\n\n// Compile the graph\nexport const agenticGenerativeUiGraph = workflow.compile({\n transformers: [aguiTransformer],\n});", "language": "ts", "type": "file" } @@ -1038,7 +1038,7 @@ }, { "name": "agent.ts", - "content": "/**\n * A demo of predictive state updates using LangGraph.\n */\n\nimport { v4 as uuidv4 } from \"uuid\";\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { Command, Annotation, MessagesAnnotation, StateGraph, END, START } from \"@langchain/langgraph\";\n\nconst WRITE_DOCUMENT_TOOL = {\n type: \"function\",\n function: {\n name: \"write_document_local\",\n description: [\n \"Write a document. Use markdown formatting to format the document.\",\n \"It's good to format the document extensively so it's easy to read.\",\n \"You can use all kinds of markdown.\",\n \"However, do not use italic or strike-through formatting, it's reserved for another purpose.\",\n \"You MUST write the full document, even when changing only a few words.\",\n \"When making edits to the document, try to make them minimal - do not change every word.\",\n \"Keep stories SHORT!\"\n ].join(\" \"),\n parameters: {\n type: \"object\",\n properties: {\n document: {\n type: \"string\",\n description: \"The document to write\"\n },\n },\n }\n }\n};\n\nexport const AgentStateAnnotation = Annotation.Root({\n document: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => undefined\n }),\n tools: Annotation(),\n ...MessagesAnnotation.spec,\n});\nexport type AgentState = typeof AgentStateAnnotation.State;\n\nasync function chatNode(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * Standard chat node.\n */\n\n const systemPrompt = `\n You are a helpful assistant for writing documents.\n To write the document, you MUST use the write_document_local tool.\n You MUST write the full document, even when changing only a few words.\n When you wrote the document, DO NOT repeat it as a message.\n Just briefly summarize the changes you made. 2 sentences max.\n This is the current state of the document: ----\\n ${state.document || ''}\\n-----\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n\n // Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Use \"predict_state\" metadata to set up streaming for the write_document_local tool\n if (!config.metadata) config.metadata = {};\n config.metadata.predict_state = [{\n state_key: \"document\",\n tool: \"write_document_local\",\n tool_argument: \"document\"\n }];\n\n // Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n WRITE_DOCUMENT_TOOL\n ],\n {\n // Disable parallel tool calls to avoid race conditions\n parallel_tool_calls: false,\n }\n );\n\n // Run the model to generate a response\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n // Update messages with the response\n const messages = [...state.messages, response];\n\n // Extract any tool calls from the response\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n\n if (toolCall.name === \"write_document_local\") {\n // Add the tool response to messages\n const toolResponse = {\n role: \"tool\" as const,\n content: \"Document written.\",\n tool_call_id: toolCall.id\n };\n\n // Add confirmation tool call\n const confirmToolCall = {\n role: \"assistant\" as const,\n content: \"\",\n tool_calls: [{\n id: uuidv4(),\n type: \"function\" as const,\n function: {\n name: \"confirm_changes\",\n arguments: \"{}\"\n }\n }]\n };\n\n const updatedMessages = [...messages, toolResponse, confirmToolCall];\n\n // Return Command to route to end\n return new Command({\n goto: END,\n update: {\n messages: updatedMessages,\n document: toolCall.args.document\n }\n });\n }\n }\n\n // If no tool was called, go to end\n return new Command({\n goto: END,\n update: {\n messages: messages\n }\n });\n}\n\n// Define the graph\nconst workflow = new StateGraph(AgentStateAnnotation);\n\n// Add nodes\nworkflow.addNode(\"chat_node\", chatNode);\n\n// Add edges\nworkflow.addEdge(START, \"chat_node\");\nworkflow.addEdge(\"chat_node\", END);\n\n// Compile the graph\nexport const predictiveStateUpdatesGraph = workflow.compile();", + "content": "/**\n * A demo of predictive state updates using LangGraph.\n */\n\nimport { v4 as uuidv4 } from \"uuid\";\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { Command, Annotation, MessagesAnnotation, StateGraph, END, START } from \"@langchain/langgraph\";\nimport { aguiTransformer } from \"@ag-ui/langgraph/transformer\";\n\nconst WRITE_DOCUMENT_TOOL = {\n type: \"function\",\n function: {\n name: \"write_document_local\",\n description: [\n \"Write a document. Use markdown formatting to format the document.\",\n \"It's good to format the document extensively so it's easy to read.\",\n \"You can use all kinds of markdown.\",\n \"However, do not use italic or strike-through formatting, it's reserved for another purpose.\",\n \"You MUST write the full document, even when changing only a few words.\",\n \"When making edits to the document, try to make them minimal - do not change every word.\",\n \"Keep stories SHORT!\"\n ].join(\" \"),\n parameters: {\n type: \"object\",\n properties: {\n document: {\n type: \"string\",\n description: \"The document to write\"\n },\n },\n }\n }\n};\n\nexport const AgentStateAnnotation = Annotation.Root({\n document: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => undefined\n }),\n tools: Annotation(),\n ...MessagesAnnotation.spec,\n});\nexport type AgentState = typeof AgentStateAnnotation.State;\n\nasync function chatNode(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * Standard chat node.\n */\n\n const systemPrompt = `\n You are a helpful assistant for writing documents.\n To write the document, you MUST use the write_document_local tool.\n You MUST write the full document, even when changing only a few words.\n When you wrote the document, DO NOT repeat it as a message.\n Just briefly summarize the changes you made. 2 sentences max.\n This is the current state of the document: ----\\n ${state.document || ''}\\n-----\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n\n // Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Use \"predict_state\" metadata to set up streaming for the write_document_local tool\n if (!config.metadata) config.metadata = {};\n config.metadata.predict_state = [{\n state_key: \"document\",\n tool: \"write_document_local\",\n tool_argument: \"document\"\n }];\n\n // Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n WRITE_DOCUMENT_TOOL\n ],\n {\n // Disable parallel tool calls to avoid race conditions\n parallel_tool_calls: false,\n }\n );\n\n // Run the model to generate a response\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n // Update messages with the response\n const messages = [...state.messages, response];\n\n // Extract any tool calls from the response\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n\n if (toolCall.name === \"write_document_local\") {\n // Add the tool response to messages\n const toolResponse = {\n role: \"tool\" as const,\n content: \"Document written.\",\n tool_call_id: toolCall.id\n };\n\n // Add confirmation tool call\n const confirmToolCall = {\n role: \"assistant\" as const,\n content: \"\",\n tool_calls: [{\n id: uuidv4(),\n type: \"function\" as const,\n function: {\n name: \"confirm_changes\",\n arguments: \"{}\"\n }\n }]\n };\n\n const updatedMessages = [...messages, toolResponse, confirmToolCall];\n\n // Return Command to route to end\n return new Command({\n goto: END,\n update: {\n messages: updatedMessages,\n document: toolCall.args.document\n }\n });\n }\n }\n\n // If no tool was called, go to end\n return new Command({\n goto: END,\n update: {\n messages: messages\n }\n });\n}\n\n// Define the graph\nconst workflow = new StateGraph(AgentStateAnnotation);\n\n// Add nodes\nworkflow.addNode(\"chat_node\", chatNode);\n\n// Add edges\nworkflow.addEdge(START, \"chat_node\");\nworkflow.addEdge(\"chat_node\", END);\n\n// Compile the graph\nexport const predictiveStateUpdatesGraph = workflow.compile({\n transformers: [aguiTransformer],\n});", "language": "ts", "type": "file" } @@ -1070,7 +1070,7 @@ }, { "name": "agent.ts", - "content": "/**\n * A demo of shared state between the agent and CopilotKit using LangGraph.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { dispatchCustomEvent } from \"@langchain/core/callbacks/dispatch\";\nimport { Command, Annotation, MessagesAnnotation, StateGraph, END, START } from \"@langchain/langgraph\";\n\nenum SkillLevel {\n BEGINNER = \"Beginner\",\n INTERMEDIATE = \"Intermediate\",\n ADVANCED = \"Advanced\"\n}\n\nenum SpecialPreferences {\n HIGH_PROTEIN = \"High Protein\",\n LOW_CARB = \"Low Carb\",\n SPICY = \"Spicy\",\n BUDGET_FRIENDLY = \"Budget-Friendly\",\n ONE_POT_MEAL = \"One-Pot Meal\",\n VEGETARIAN = \"Vegetarian\",\n VEGAN = \"Vegan\"\n}\n\nenum CookingTime {\n FIVE_MIN = \"5 min\",\n FIFTEEN_MIN = \"15 min\",\n THIRTY_MIN = \"30 min\",\n FORTY_FIVE_MIN = \"45 min\",\n SIXTY_PLUS_MIN = \"60+ min\"\n}\n\ninterface Ingredient {\n icon: string;\n name: string;\n amount: string;\n}\n\ninterface Recipe {\n skill_level: SkillLevel;\n special_preferences: SpecialPreferences[];\n cooking_time: CookingTime;\n ingredients: Ingredient[];\n instructions: string[];\n changes?: string;\n}\n\nconst GENERATE_RECIPE_TOOL = {\n type: \"function\",\n function: {\n name: \"generate_recipe\",\n description: \"Using the existing (if any) ingredients and instructions, proceed with the recipe to finish it. Make sure the recipe is complete. ALWAYS provide the entire recipe, not just the changes.\",\n parameters: {\n type: \"object\",\n properties: {\n recipe: {\n type: \"object\",\n properties: {\n skill_level: {\n type: \"string\",\n enum: Object.values(SkillLevel),\n description: \"The skill level required for the recipe\"\n },\n special_preferences: {\n type: \"array\",\n items: {\n type: \"string\",\n enum: Object.values(SpecialPreferences)\n },\n description: \"A list of special preferences for the recipe\"\n },\n cooking_time: {\n type: \"string\",\n enum: Object.values(CookingTime),\n description: \"The cooking time of the recipe\"\n },\n ingredients: {\n type: \"array\",\n items: {\n type: \"object\",\n properties: {\n icon: { type: \"string\", description: \"The icon emoji (not emoji code like '\\\\u1f35e', but the actual emoji like 🥕) of the ingredient\" },\n name: { type: \"string\" },\n amount: { type: \"string\" }\n }\n },\n description: \"Entire list of ingredients for the recipe, including the new ingredients and the ones that are already in the recipe\"\n },\n instructions: {\n type: \"array\",\n items: { type: \"string\" },\n description: \"Entire list of instructions for the recipe, including the new instructions and the ones that are already there\"\n },\n changes: {\n type: \"string\",\n description: \"A description of the changes made to the recipe\"\n }\n },\n }\n },\n required: [\"recipe\"]\n }\n }\n};\n\nexport const AgentStateAnnotation = Annotation.Root({\n recipe: Annotation(),\n tools: Annotation(),\n ...MessagesAnnotation.spec,\n});\nexport type AgentState = typeof AgentStateAnnotation.State;\n\nasync function startFlow(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * This is the entry point for the flow.\n */\n\n // Initialize recipe if not exists\n if (!state.recipe) {\n state.recipe = {\n skill_level: SkillLevel.BEGINNER,\n special_preferences: [],\n cooking_time: CookingTime.FIFTEEN_MIN,\n ingredients: [{ icon: \"🍴\", name: \"Sample Ingredient\", amount: \"1 unit\" }],\n instructions: [\"First step instruction\"]\n };\n // Emit the initial state to ensure it's properly shared with the frontend\n await dispatchCustomEvent(\"manually_emit_intermediate_state\", state, config);\n }\n \n return new Command({\n goto: \"chat_node\",\n update: {\n messages: state.messages,\n recipe: state.recipe\n }\n });\n}\n\nasync function chatNode(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * Standard chat node.\n */\n // Create a safer serialization of the recipe\n let recipeJson = \"No recipe yet\";\n if (state.recipe) {\n try {\n recipeJson = JSON.stringify(state.recipe, null, 2);\n } catch (e) {\n recipeJson = `Error serializing recipe: ${e}`;\n }\n }\n\n const systemPrompt = `You are a helpful assistant for creating recipes. \n This is the current state of the recipe: ${recipeJson}\n You can improve the recipe by calling the generate_recipe tool.\n \n IMPORTANT:\n 1. Create a recipe using the existing ingredients and instructions. Make sure the recipe is complete.\n 2. For ingredients, append new ingredients to the existing ones.\n 3. For instructions, append new steps to the existing ones.\n 4. 'ingredients' is always an array of objects with 'icon', 'name', and 'amount' fields\n 5. 'instructions' is always an array of strings\n\n If you have just created or modified the recipe, just answer in one sentence what you did. dont describe the recipe, just say what you did.\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o-mini\" });\n \n // Define config for the model\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Use \"predict_state\" metadata to set up streaming for the write_document tool\n if (!config.metadata) config.metadata = {};\n config.metadata.predict_state = [{\n state_key: \"recipe\",\n tool: \"generate_recipe\",\n tool_argument: \"recipe\"\n }];\n\n // Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n GENERATE_RECIPE_TOOL\n ],\n {\n // Disable parallel tool calls to avoid race conditions\n parallel_tool_calls: false,\n }\n );\n\n // Run the model and generate a response\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n // Update messages with the response\n const messages = [...state.messages, response];\n \n // Handle tool calls\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n \n if (toolCall.name === \"generate_recipe\") {\n // Update recipe state with tool_call_args\n const recipeData = toolCall.args.recipe;\n let recipe: Recipe;\n // If we have an existing recipe, update it\n if (state.recipe) {\n recipe = { ...state.recipe };\n for (const [key, value] of Object.entries(recipeData)) {\n if (value !== null && value !== undefined) { // Only update fields that were provided\n (recipe as any)[key] = value;\n }\n }\n } else {\n // Create a new recipe\n recipe = {\n skill_level: recipeData.skill_level || SkillLevel.BEGINNER,\n special_preferences: recipeData.special_preferences || [],\n cooking_time: recipeData.cooking_time || CookingTime.FIFTEEN_MIN,\n ingredients: recipeData.ingredients || [],\n instructions: recipeData.instructions || []\n };\n }\n \n // Add tool response to messages\n const toolResponse = {\n role: \"tool\" as const,\n content: \"Recipe generated.\",\n tool_call_id: toolCall.id\n };\n \n const updatedMessages = [...messages, toolResponse];\n \n // Explicitly emit the updated state to ensure it's shared with frontend\n state.recipe = recipe;\n await dispatchCustomEvent(\"manually_emit_intermediate_state\", state, config);\n \n // Return command with updated recipe\n return new Command({\n goto: \"start_flow\",\n update: {\n messages: updatedMessages,\n recipe: recipe\n }\n });\n }\n }\n\n return new Command({\n goto: END,\n update: {\n messages: messages,\n recipe: state.recipe\n }\n });\n}\n\n// Define the graph\nconst workflow = new StateGraph(AgentStateAnnotation);\n\n// Add nodes\nworkflow.addNode(\"start_flow\", startFlow);\nworkflow.addNode(\"chat_node\", chatNode);\n\n// Add edges\nworkflow.setEntryPoint(\"start_flow\");\nworkflow.addEdge(START, \"start_flow\");\nworkflow.addEdge(\"start_flow\", \"chat_node\");\nworkflow.addEdge(\"chat_node\", END);\n\n// Compile the graph\nexport const sharedStateGraph = workflow.compile();", + "content": "/**\n * A demo of shared state between the agent and CopilotKit using LangGraph.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { dispatchCustomEvent } from \"@langchain/core/callbacks/dispatch\";\nimport { Command, Annotation, MessagesAnnotation, StateGraph, END, START } from \"@langchain/langgraph\";\nimport { aguiTransformer } from \"@ag-ui/langgraph/transformer\";\n\nenum SkillLevel {\n BEGINNER = \"Beginner\",\n INTERMEDIATE = \"Intermediate\",\n ADVANCED = \"Advanced\"\n}\n\nenum SpecialPreferences {\n HIGH_PROTEIN = \"High Protein\",\n LOW_CARB = \"Low Carb\",\n SPICY = \"Spicy\",\n BUDGET_FRIENDLY = \"Budget-Friendly\",\n ONE_POT_MEAL = \"One-Pot Meal\",\n VEGETARIAN = \"Vegetarian\",\n VEGAN = \"Vegan\"\n}\n\nenum CookingTime {\n FIVE_MIN = \"5 min\",\n FIFTEEN_MIN = \"15 min\",\n THIRTY_MIN = \"30 min\",\n FORTY_FIVE_MIN = \"45 min\",\n SIXTY_PLUS_MIN = \"60+ min\"\n}\n\ninterface Ingredient {\n icon: string;\n name: string;\n amount: string;\n}\n\ninterface Recipe {\n skill_level: SkillLevel;\n special_preferences: SpecialPreferences[];\n cooking_time: CookingTime;\n ingredients: Ingredient[];\n instructions: string[];\n changes?: string;\n}\n\nconst GENERATE_RECIPE_TOOL = {\n type: \"function\",\n function: {\n name: \"generate_recipe\",\n description: \"Using the existing (if any) ingredients and instructions, proceed with the recipe to finish it. Make sure the recipe is complete. ALWAYS provide the entire recipe, not just the changes.\",\n parameters: {\n type: \"object\",\n properties: {\n recipe: {\n type: \"object\",\n properties: {\n skill_level: {\n type: \"string\",\n enum: Object.values(SkillLevel),\n description: \"The skill level required for the recipe\"\n },\n special_preferences: {\n type: \"array\",\n items: {\n type: \"string\",\n enum: Object.values(SpecialPreferences)\n },\n description: \"A list of special preferences for the recipe\"\n },\n cooking_time: {\n type: \"string\",\n enum: Object.values(CookingTime),\n description: \"The cooking time of the recipe\"\n },\n ingredients: {\n type: \"array\",\n items: {\n type: \"object\",\n properties: {\n icon: { type: \"string\", description: \"The icon emoji (not emoji code like '\\\\u1f35e', but the actual emoji like 🥕) of the ingredient\" },\n name: { type: \"string\" },\n amount: { type: \"string\" }\n }\n },\n description: \"Entire list of ingredients for the recipe, including the new ingredients and the ones that are already in the recipe\"\n },\n instructions: {\n type: \"array\",\n items: { type: \"string\" },\n description: \"Entire list of instructions for the recipe, including the new instructions and the ones that are already there\"\n },\n changes: {\n type: \"string\",\n description: \"A description of the changes made to the recipe\"\n }\n },\n }\n },\n required: [\"recipe\"]\n }\n }\n};\n\nexport const AgentStateAnnotation = Annotation.Root({\n recipe: Annotation(),\n tools: Annotation(),\n ...MessagesAnnotation.spec,\n});\nexport type AgentState = typeof AgentStateAnnotation.State;\n\nasync function startFlow(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * This is the entry point for the flow.\n */\n\n // Initialize recipe if not exists\n if (!state.recipe) {\n state.recipe = {\n skill_level: SkillLevel.BEGINNER,\n special_preferences: [],\n cooking_time: CookingTime.FIFTEEN_MIN,\n ingredients: [{ icon: \"🍴\", name: \"Sample Ingredient\", amount: \"1 unit\" }],\n instructions: [\"First step instruction\"]\n };\n // Emit the initial state to ensure it's properly shared with the frontend\n await dispatchCustomEvent(\"manually_emit_intermediate_state\", state, config);\n }\n \n return new Command({\n goto: \"chat_node\",\n update: {\n messages: state.messages,\n recipe: state.recipe\n }\n });\n}\n\nasync function chatNode(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * Standard chat node.\n */\n // Create a safer serialization of the recipe\n let recipeJson = \"No recipe yet\";\n if (state.recipe) {\n try {\n recipeJson = JSON.stringify(state.recipe, null, 2);\n } catch (e) {\n recipeJson = `Error serializing recipe: ${e}`;\n }\n }\n\n const systemPrompt = `You are a helpful assistant for creating recipes. \n This is the current state of the recipe: ${recipeJson}\n You can improve the recipe by calling the generate_recipe tool.\n \n IMPORTANT:\n 1. Create a recipe using the existing ingredients and instructions. Make sure the recipe is complete.\n 2. For ingredients, append new ingredients to the existing ones.\n 3. For instructions, append new steps to the existing ones.\n 4. 'ingredients' is always an array of objects with 'icon', 'name', and 'amount' fields\n 5. 'instructions' is always an array of strings\n\n If you have just created or modified the recipe, just answer in one sentence what you did. dont describe the recipe, just say what you did.\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o-mini\" });\n \n // Define config for the model\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Use \"predict_state\" metadata to set up streaming for the write_document tool\n if (!config.metadata) config.metadata = {};\n config.metadata.predict_state = [{\n state_key: \"recipe\",\n tool: \"generate_recipe\",\n tool_argument: \"recipe\"\n }];\n\n // Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n GENERATE_RECIPE_TOOL\n ],\n {\n // Disable parallel tool calls to avoid race conditions\n parallel_tool_calls: false,\n }\n );\n\n // Run the model and generate a response\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n // Update messages with the response\n const messages = [...state.messages, response];\n \n // Handle tool calls\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n \n if (toolCall.name === \"generate_recipe\") {\n // Update recipe state with tool_call_args\n const recipeData = toolCall.args.recipe;\n let recipe: Recipe;\n // If we have an existing recipe, update it\n if (state.recipe) {\n recipe = { ...state.recipe };\n for (const [key, value] of Object.entries(recipeData)) {\n if (value !== null && value !== undefined) { // Only update fields that were provided\n (recipe as any)[key] = value;\n }\n }\n } else {\n // Create a new recipe\n recipe = {\n skill_level: recipeData.skill_level || SkillLevel.BEGINNER,\n special_preferences: recipeData.special_preferences || [],\n cooking_time: recipeData.cooking_time || CookingTime.FIFTEEN_MIN,\n ingredients: recipeData.ingredients || [],\n instructions: recipeData.instructions || []\n };\n }\n \n // Add tool response to messages\n const toolResponse = {\n role: \"tool\" as const,\n content: \"Recipe generated.\",\n tool_call_id: toolCall.id\n };\n \n const updatedMessages = [...messages, toolResponse];\n \n // Explicitly emit the updated state to ensure it's shared with frontend\n state.recipe = recipe;\n await dispatchCustomEvent(\"manually_emit_intermediate_state\", state, config);\n \n // Return command with updated recipe\n return new Command({\n goto: \"start_flow\",\n update: {\n messages: updatedMessages,\n recipe: recipe\n }\n });\n }\n }\n\n return new Command({\n goto: END,\n update: {\n messages: messages,\n recipe: state.recipe\n }\n });\n}\n\n// Define the graph\nconst workflow = new StateGraph(AgentStateAnnotation);\n\n// Add nodes\nworkflow.addNode(\"start_flow\", startFlow);\nworkflow.addNode(\"chat_node\", chatNode);\n\n// Add edges\nworkflow.setEntryPoint(\"start_flow\");\nworkflow.addEdge(START, \"start_flow\");\nworkflow.addEdge(\"start_flow\", \"chat_node\");\nworkflow.addEdge(\"chat_node\", END);\n\n// Compile the graph\nexport const sharedStateGraph = workflow.compile({\n transformers: [aguiTransformer],\n});", "language": "ts", "type": "file" } @@ -1078,7 +1078,7 @@ "langgraph-typescript::tool_based_generative_ui": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }) ,\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", + "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n description: \"Generates a haiku with Japanese and English translations, an image name, and a CSS gradient for the background.\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }),\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", "language": "typescript", "type": "file" }, @@ -1102,7 +1102,7 @@ }, { "name": "agent.ts", - "content": "/**\n * An example demonstrating tool-based generative UI using LangGraph.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { Command, Annotation, MessagesAnnotation, StateGraph, END, START } from \"@langchain/langgraph\";\n\n\nexport const AgentStateAnnotation = Annotation.Root({\n tools: Annotation(),\n ...MessagesAnnotation.spec,\n});\nexport type AgentState = typeof AgentStateAnnotation.State;\n\nasync function chatNode(state: AgentState, config?: RunnableConfig): Promise {\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n\n const modelWithTools = model.bindTools(\n [\n ...state.tools || []\n ],\n { parallel_tool_calls: false }\n );\n\n const systemMessage = new SystemMessage({\n content: 'Help the user with writing Haikus. If the user asks for a haiku, use the generate_haiku tool to display the haiku to the user.'\n });\n\n const response = await modelWithTools.invoke([\n systemMessage,\n ...state.messages,\n ], config);\n\n return new Command({\n goto: END,\n update: {\n messages: [response]\n }\n });\n}\n\nconst workflow = new StateGraph(AgentStateAnnotation);\nworkflow.addNode(\"chat_node\", chatNode);\n\nworkflow.addEdge(START, \"chat_node\");\n\nexport const toolBasedGenerativeUiGraph = workflow.compile();", + "content": "/**\n * An example demonstrating tool-based generative UI using LangGraph.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { Command, Annotation, MessagesAnnotation, StateGraph, END, START } from \"@langchain/langgraph\";\nimport { aguiTransformer } from \"@ag-ui/langgraph/transformer\";\n\n\nexport const AgentStateAnnotation = Annotation.Root({\n tools: Annotation(),\n ...MessagesAnnotation.spec,\n});\nexport type AgentState = typeof AgentStateAnnotation.State;\n\nasync function chatNode(state: AgentState, config?: RunnableConfig): Promise {\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n\n const modelWithTools = model.bindTools(\n [\n ...state.tools || []\n ],\n { parallel_tool_calls: false }\n );\n\n const systemMessage = new SystemMessage({\n content: 'Help the user with writing Haikus. If the user asks for a haiku, use the generate_haiku tool to display the haiku to the user.'\n });\n\n const response = await modelWithTools.invoke([\n systemMessage,\n ...state.messages,\n ], config);\n\n return new Command({\n goto: END,\n update: {\n messages: [response]\n }\n });\n}\n\nconst workflow = new StateGraph(AgentStateAnnotation);\nworkflow.addNode(\"chat_node\", chatNode);\n\nworkflow.addEdge(START, \"chat_node\");\n\nexport const toolBasedGenerativeUiGraph = workflow.compile({\n transformers: [aguiTransformer],\n});", "language": "ts", "type": "file" } @@ -1134,7 +1134,7 @@ }, { "name": "agent.ts", - "content": "/**\n * A travel agent supervisor demo showcasing multi-agent architecture with subgraphs.\n * The supervisor coordinates specialized agents: flights finder, hotels finder, and experiences finder.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage, AIMessage, ToolMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { \n Annotation, \n MessagesAnnotation, \n StateGraph, \n Command, \n START, \n END, \n interrupt \n} from \"@langchain/langgraph\";\n\n// Travel data interfaces\ninterface Flight {\n airline: string;\n departure: string;\n arrival: string;\n price: string;\n duration: string;\n}\n\ninterface Hotel {\n name: string;\n location: string;\n price_per_night: string;\n rating: string;\n}\n\ninterface Experience {\n name: string;\n type: \"restaurant\" | \"activity\";\n description: string;\n location: string;\n}\n\ninterface Itinerary {\n flight?: Flight;\n hotel?: Hotel;\n}\n\n// Custom reducer to merge itinerary updates\nfunction mergeItinerary(left: Itinerary | null, right?: Itinerary | null): Itinerary {\n if (!left) left = {};\n if (!right) right = {};\n return { ...left, ...right };\n}\n\n// State annotation for travel agent system\nexport const TravelAgentStateAnnotation = Annotation.Root({\n origin: Annotation(),\n destination: Annotation(),\n flights: Annotation(),\n hotels: Annotation(),\n experiences: Annotation(),\n\n // Itinerary with custom merger\n itinerary: Annotation({\n reducer: mergeItinerary,\n default: () => null\n }),\n\n // Tools available to all agents\n tools: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n\n // Supervisor routing\n next_agent: Annotation(),\n ...MessagesAnnotation.spec,\n});\n\nexport type TravelAgentState = typeof TravelAgentStateAnnotation.State;\n\n// Static data for demonstration\nconst STATIC_FLIGHTS: Flight[] = [\n { airline: \"KLM\", departure: \"Amsterdam (AMS)\", arrival: \"San Francisco (SFO)\", price: \"$650\", duration: \"11h 30m\" },\n { airline: \"United\", departure: \"Amsterdam (AMS)\", arrival: \"San Francisco (SFO)\", price: \"$720\", duration: \"12h 15m\" }\n];\n\nconst STATIC_HOTELS: Hotel[] = [\n { name: \"Hotel Zephyr\", location: \"Fisherman's Wharf\", price_per_night: \"$280/night\", rating: \"4.2 stars\" },\n { name: \"The Ritz-Carlton\", location: \"Nob Hill\", price_per_night: \"$550/night\", rating: \"4.8 stars\" },\n { name: \"Hotel Zoe\", location: \"Union Square\", price_per_night: \"$320/night\", rating: \"4.4 stars\" }\n];\n\nconst STATIC_EXPERIENCES: Experience[] = [\n { name: \"Pier 39\", type: \"activity\", description: \"Iconic waterfront destination with shops and sea lions\", location: \"Fisherman's Wharf\" },\n { name: \"Golden Gate Bridge\", type: \"activity\", description: \"World-famous suspension bridge with stunning views\", location: \"Golden Gate\" },\n { name: \"Swan Oyster Depot\", type: \"restaurant\", description: \"Historic seafood counter serving fresh oysters\", location: \"Polk Street\" },\n { name: \"Tartine Bakery\", type: \"restaurant\", description: \"Artisanal bakery famous for bread and pastries\", location: \"Mission District\" }\n];\n\nfunction createInterrupt(message: string, options: any[], recommendation: any, agent: string) {\n return interrupt({\n message,\n options,\n recommendation,\n agent,\n });\n}\n\n// Flights finder subgraph\nasync function flightsFinder(state: TravelAgentState, config?: RunnableConfig): Promise {\n // Simulate flight search with static data\n const flights = STATIC_FLIGHTS;\n\n const selectedFlight = state.itinerary?.flight;\n \n let flightChoice: Flight;\n const message = `Found ${flights.length} flight options from ${state.origin || 'Amsterdam'} to ${state.destination || 'San Francisco'}.\\n` +\n `I recommend choosing the flight by ${flights[0].airline} since it's known to be on time and cheaper.`\n if (!selectedFlight) {\n const interruptResult = createInterrupt(\n message,\n flights,\n flights[0],\n \"flights\"\n );\n \n // Parse the interrupt result if it's a string\n flightChoice = typeof interruptResult === 'string' ? JSON.parse(interruptResult) : interruptResult;\n } else {\n flightChoice = selectedFlight;\n }\n\n return new Command({\n goto: END,\n update: {\n flights: flights,\n itinerary: {\n flight: flightChoice\n },\n // Return all \"messages\" that the agent was sending\n messages: [\n ...state.messages,\n new AIMessage({\n content: message,\n }),\n new AIMessage({\n content: `Flights Agent: Great. I'll book you the ${flightChoice.airline} flight from ${flightChoice.departure} to ${flightChoice.arrival}.`,\n }),\n ]\n }\n });\n}\n\n// Hotels finder subgraph\nasync function hotelsFinder(state: TravelAgentState, config?: RunnableConfig): Promise {\n // Simulate hotel search with static data\n const hotels = STATIC_HOTELS;\n const selectedHotel = state.itinerary?.hotel;\n \n let hotelChoice: Hotel;\n const message = `Found ${hotels.length} accommodation options in ${state.destination || 'San Francisco'}.\\n\n I recommend choosing the ${hotels[2].name} since it strikes the balance between rating, price, and location.`\n if (!selectedHotel) {\n const interruptResult = createInterrupt(\n message,\n hotels,\n hotels[2],\n \"hotels\"\n );\n \n // Parse the interrupt result if it's a string\n hotelChoice = typeof interruptResult === 'string' ? JSON.parse(interruptResult) : interruptResult;\n } else {\n hotelChoice = selectedHotel;\n }\n\n return new Command({\n goto: END,\n update: {\n hotels: hotels,\n itinerary: {\n hotel: hotelChoice\n },\n // Return all \"messages\" that the agent was sending\n messages: [\n ...state.messages,\n new AIMessage({\n content: message,\n }),\n new AIMessage({\n content: `Hotels Agent: Excellent choice! You'll like ${hotelChoice.name}.`\n }),\n ]\n }\n });\n}\n\n// Experiences finder subgraph\nasync function experiencesFinder(state: TravelAgentState, config?: RunnableConfig): Promise {\n // Filter experiences (2 restaurants, 2 activities)\n const restaurants = STATIC_EXPERIENCES.filter(exp => exp.type === \"restaurant\").slice(0, 2);\n const activities = STATIC_EXPERIENCES.filter(exp => exp.type === \"activity\").slice(0, 2);\n const experiences = [...restaurants, ...activities];\n\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n const itinerary = state.itinerary || {};\n\n const systemPrompt = `\n You are the experiences agent. Your job is to find restaurants and activities for the user.\n You already went ahead and found a bunch of experiences. All you have to do now, is to let the user know of your findings.\n \n Current status:\n - Origin: ${state.origin || 'Amsterdam'}\n - Destination: ${state.destination || 'San Francisco'}\n - Flight chosen: ${JSON.stringify(itinerary.flight) || 'None'}\n - Hotel chosen: ${JSON.stringify(itinerary.hotel) || 'None'}\n - Activities found: ${JSON.stringify(activities)}\n - Restaurants found: ${JSON.stringify(restaurants)}\n `;\n\n // Get experiences response\n const response = await model.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n return new Command({\n goto: END,\n update: {\n experiences: experiences,\n messages: [...state.messages, response]\n }\n });\n}\n\n// Supervisor response tool\nconst SUPERVISOR_RESPONSE_TOOL = {\n type: \"function\" as const,\n function: {\n name: \"supervisor_response\",\n description: \"Always use this tool to structure your response to the user.\",\n parameters: {\n type: \"object\",\n properties: {\n answer: {\n type: \"string\",\n description: \"The answer to the user\"\n },\n next_agent: {\n type: \"string\",\n enum: [\"flights_agent\", \"hotels_agent\", \"experiences_agent\", \"complete\"],\n description: \"The agent to go to. Not required if you do not want to route to another agent.\"\n }\n },\n required: [\"answer\"]\n }\n }\n};\n\n// Supervisor agent\nasync function supervisorAgent(state: TravelAgentState, config?: RunnableConfig): Promise {\n const itinerary = state.itinerary || {};\n\n // Check what's already completed\n const hasFlights = itinerary.flight !== undefined;\n const hasHotels = itinerary.hotel !== undefined;\n const hasExperiences = state.experiences !== null;\n\n const systemPrompt = `\n You are a travel planning supervisor. Your job is to coordinate specialized agents to help plan a trip.\n \n Current status:\n - Origin: ${state.origin || 'Amsterdam'}\n - Destination: ${state.destination || 'San Francisco'}\n - Flights found: ${hasFlights}\n - Hotels found: ${hasHotels}\n - Experiences found: ${hasExperiences}\n - Itinerary (Things that the user has already confirmed selection on): ${JSON.stringify(itinerary, null, 2)}\n \n Available agents:\n - flights_agent: Finds flight options\n - hotels_agent: Finds hotel options \n - experiences_agent: Finds restaurant and activity recommendations\n - complete: Mark task as complete when all information is gathered\n \n You must route to the appropriate agent based on what's missing. Once all agents have completed their tasks, route to 'complete'.\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Bind the routing tool\n const modelWithTools = model.bindTools(\n [SUPERVISOR_RESPONSE_TOOL],\n {\n parallel_tool_calls: false,\n }\n );\n\n // Get supervisor decision\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n let messages = [...state.messages, response];\n\n // Handle tool calls for routing\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n const toolCallArgs = toolCall.args;\n const nextAgent = toolCallArgs.next_agent;\n\n const toolResponse = new ToolMessage({\n tool_call_id: toolCall.id!,\n content: `Routing to ${nextAgent} and providing the answer`,\n });\n\n messages = [\n ...messages, \n toolResponse, \n new AIMessage({ content: toolCallArgs.answer })\n ];\n\n if (nextAgent && nextAgent !== \"complete\") {\n return new Command({ goto: nextAgent });\n }\n }\n\n // Fallback if no tool call or complete\n return new Command({\n goto: END,\n update: { messages }\n });\n}\n\n// Create subgraphs\nconst flightsGraph = new StateGraph(TravelAgentStateAnnotation);\nflightsGraph.addNode(\"flights_agent_chat_node\", flightsFinder);\nflightsGraph.setEntryPoint(\"flights_agent_chat_node\");\nflightsGraph.addEdge(START, \"flights_agent_chat_node\");\nflightsGraph.addEdge(\"flights_agent_chat_node\", END);\nconst flightsSubgraph = flightsGraph.compile();\n\nconst hotelsGraph = new StateGraph(TravelAgentStateAnnotation);\nhotelsGraph.addNode(\"hotels_agent_chat_node\", hotelsFinder);\nhotelsGraph.setEntryPoint(\"hotels_agent_chat_node\");\nhotelsGraph.addEdge(START, \"hotels_agent_chat_node\");\nhotelsGraph.addEdge(\"hotels_agent_chat_node\", END);\nconst hotelsSubgraph = hotelsGraph.compile();\n\nconst experiencesGraph = new StateGraph(TravelAgentStateAnnotation);\nexperiencesGraph.addNode(\"experiences_agent_chat_node\", experiencesFinder);\nexperiencesGraph.setEntryPoint(\"experiences_agent_chat_node\");\nexperiencesGraph.addEdge(START, \"experiences_agent_chat_node\");\nexperiencesGraph.addEdge(\"experiences_agent_chat_node\", END);\nconst experiencesSubgraph = experiencesGraph.compile();\n\n// Main supervisor workflow\nconst workflow = new StateGraph(TravelAgentStateAnnotation);\n\n// Add supervisor and subgraphs as nodes\nworkflow.addNode(\"supervisor\", supervisorAgent, { ends: ['flights_agent', 'hotels_agent', 'experiences_agent', END] });\nworkflow.addNode(\"flights_agent\", flightsSubgraph);\nworkflow.addNode(\"hotels_agent\", hotelsSubgraph);\nworkflow.addNode(\"experiences_agent\", experiencesSubgraph);\n\n// Set entry point\nworkflow.setEntryPoint(\"supervisor\");\nworkflow.addEdge(START, \"supervisor\");\n\n// Add edges back to supervisor after each subgraph\nworkflow.addEdge(\"flights_agent\", \"supervisor\");\nworkflow.addEdge(\"hotels_agent\", \"supervisor\");\nworkflow.addEdge(\"experiences_agent\", \"supervisor\");\n\n// Compile the graph\nexport const subGraphsAgentGraph = workflow.compile();\n", + "content": "/**\n * A travel agent supervisor demo showcasing multi-agent architecture with subgraphs.\n * The supervisor coordinates specialized agents: flights finder, hotels finder, and experiences finder.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage, AIMessage, ToolMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport {\n Annotation,\n MessagesAnnotation,\n StateGraph,\n Command,\n START,\n END,\n interrupt\n} from \"@langchain/langgraph\";\nimport { aguiTransformer } from \"@ag-ui/langgraph/transformer\";\n\n// Travel data interfaces\ninterface Flight {\n airline: string;\n departure: string;\n arrival: string;\n price: string;\n duration: string;\n}\n\ninterface Hotel {\n name: string;\n location: string;\n price_per_night: string;\n rating: string;\n}\n\ninterface Experience {\n name: string;\n type: \"restaurant\" | \"activity\";\n description: string;\n location: string;\n}\n\ninterface Itinerary {\n flight?: Flight;\n hotel?: Hotel;\n}\n\n// Custom reducer to merge itinerary updates\nfunction mergeItinerary(left: Itinerary | null, right?: Itinerary | null): Itinerary {\n if (!left) left = {};\n if (!right) right = {};\n return { ...left, ...right };\n}\n\n// State annotation for travel agent system\nexport const TravelAgentStateAnnotation = Annotation.Root({\n origin: Annotation(),\n destination: Annotation(),\n flights: Annotation(),\n hotels: Annotation(),\n experiences: Annotation(),\n\n // Itinerary with custom merger\n itinerary: Annotation({\n reducer: mergeItinerary,\n default: () => null\n }),\n\n // Tools available to all agents\n tools: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n\n // Supervisor routing\n next_agent: Annotation(),\n ...MessagesAnnotation.spec,\n});\n\nexport type TravelAgentState = typeof TravelAgentStateAnnotation.State;\n\n// Static data for demonstration\nconst STATIC_FLIGHTS: Flight[] = [\n { airline: \"KLM\", departure: \"Amsterdam (AMS)\", arrival: \"San Francisco (SFO)\", price: \"$650\", duration: \"11h 30m\" },\n { airline: \"United\", departure: \"Amsterdam (AMS)\", arrival: \"San Francisco (SFO)\", price: \"$720\", duration: \"12h 15m\" }\n];\n\nconst STATIC_HOTELS: Hotel[] = [\n { name: \"Hotel Zephyr\", location: \"Fisherman's Wharf\", price_per_night: \"$280/night\", rating: \"4.2 stars\" },\n { name: \"The Ritz-Carlton\", location: \"Nob Hill\", price_per_night: \"$550/night\", rating: \"4.8 stars\" },\n { name: \"Hotel Zoe\", location: \"Union Square\", price_per_night: \"$320/night\", rating: \"4.4 stars\" }\n];\n\nconst STATIC_EXPERIENCES: Experience[] = [\n { name: \"Pier 39\", type: \"activity\", description: \"Iconic waterfront destination with shops and sea lions\", location: \"Fisherman's Wharf\" },\n { name: \"Golden Gate Bridge\", type: \"activity\", description: \"World-famous suspension bridge with stunning views\", location: \"Golden Gate\" },\n { name: \"Swan Oyster Depot\", type: \"restaurant\", description: \"Historic seafood counter serving fresh oysters\", location: \"Polk Street\" },\n { name: \"Tartine Bakery\", type: \"restaurant\", description: \"Artisanal bakery famous for bread and pastries\", location: \"Mission District\" }\n];\n\nfunction createInterrupt(message: string, options: any[], recommendation: any, agent: string) {\n return interrupt({\n message,\n options,\n recommendation,\n agent,\n });\n}\n\n// Flights finder subgraph\nasync function flightsFinder(state: TravelAgentState, config?: RunnableConfig): Promise {\n // Simulate flight search with static data\n const flights = STATIC_FLIGHTS;\n\n const selectedFlight = state.itinerary?.flight;\n \n let flightChoice: Flight;\n const message = `Found ${flights.length} flight options from ${state.origin || 'Amsterdam'} to ${state.destination || 'San Francisco'}.\\n` +\n `I recommend choosing the flight by ${flights[0].airline} since it's known to be on time and cheaper.`\n if (!selectedFlight) {\n const interruptResult = createInterrupt(\n message,\n flights,\n flights[0],\n \"flights\"\n );\n \n // Parse the interrupt result if it's a string\n flightChoice = typeof interruptResult === 'string' ? JSON.parse(interruptResult) : interruptResult;\n } else {\n flightChoice = selectedFlight;\n }\n\n return new Command({\n goto: END,\n update: {\n flights: flights,\n itinerary: {\n flight: flightChoice\n },\n // Return all \"messages\" that the agent was sending\n messages: [\n ...state.messages,\n new AIMessage({\n content: message,\n }),\n new AIMessage({\n content: `Flights Agent: Great. I'll book you the ${flightChoice.airline} flight from ${flightChoice.departure} to ${flightChoice.arrival}.`,\n }),\n ]\n }\n });\n}\n\n// Hotels finder subgraph\nasync function hotelsFinder(state: TravelAgentState, config?: RunnableConfig): Promise {\n // Simulate hotel search with static data\n const hotels = STATIC_HOTELS;\n const selectedHotel = state.itinerary?.hotel;\n \n let hotelChoice: Hotel;\n const message = `Found ${hotels.length} accommodation options in ${state.destination || 'San Francisco'}.\\n\n I recommend choosing the ${hotels[2].name} since it strikes the balance between rating, price, and location.`\n if (!selectedHotel) {\n const interruptResult = createInterrupt(\n message,\n hotels,\n hotels[2],\n \"hotels\"\n );\n \n // Parse the interrupt result if it's a string\n hotelChoice = typeof interruptResult === 'string' ? JSON.parse(interruptResult) : interruptResult;\n } else {\n hotelChoice = selectedHotel;\n }\n\n return new Command({\n goto: END,\n update: {\n hotels: hotels,\n itinerary: {\n hotel: hotelChoice\n },\n // Return all \"messages\" that the agent was sending\n messages: [\n ...state.messages,\n new AIMessage({\n content: message,\n }),\n new AIMessage({\n content: `Hotels Agent: Excellent choice! You'll like ${hotelChoice.name}.`\n }),\n ]\n }\n });\n}\n\n// Experiences finder subgraph\nasync function experiencesFinder(state: TravelAgentState, config?: RunnableConfig): Promise {\n // Filter experiences (2 restaurants, 2 activities)\n const restaurants = STATIC_EXPERIENCES.filter(exp => exp.type === \"restaurant\").slice(0, 2);\n const activities = STATIC_EXPERIENCES.filter(exp => exp.type === \"activity\").slice(0, 2);\n const experiences = [...restaurants, ...activities];\n\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n const itinerary = state.itinerary || {};\n\n const systemPrompt = `\n You are the experiences agent. Your job is to find restaurants and activities for the user.\n You already went ahead and found a bunch of experiences. All you have to do now, is to let the user know of your findings.\n \n Current status:\n - Origin: ${state.origin || 'Amsterdam'}\n - Destination: ${state.destination || 'San Francisco'}\n - Flight chosen: ${JSON.stringify(itinerary.flight) || 'None'}\n - Hotel chosen: ${JSON.stringify(itinerary.hotel) || 'None'}\n - Activities found: ${JSON.stringify(activities)}\n - Restaurants found: ${JSON.stringify(restaurants)}\n `;\n\n // Get experiences response\n const response = await model.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n return new Command({\n goto: END,\n update: {\n experiences: experiences,\n messages: [...state.messages, response]\n }\n });\n}\n\n// Supervisor response tool\nconst SUPERVISOR_RESPONSE_TOOL = {\n type: \"function\" as const,\n function: {\n name: \"supervisor_response\",\n description: \"Always use this tool to structure your response to the user.\",\n parameters: {\n type: \"object\",\n properties: {\n answer: {\n type: \"string\",\n description: \"The answer to the user\"\n },\n next_agent: {\n type: \"string\",\n enum: [\"flights_agent\", \"hotels_agent\", \"experiences_agent\", \"complete\"],\n description: \"The agent to go to. Not required if you do not want to route to another agent.\"\n }\n },\n required: [\"answer\"]\n }\n }\n};\n\n// Supervisor agent\nasync function supervisorAgent(state: TravelAgentState, config?: RunnableConfig): Promise {\n const itinerary = state.itinerary || {};\n\n // Check what's already completed\n const hasFlights = itinerary.flight !== undefined;\n const hasHotels = itinerary.hotel !== undefined;\n const hasExperiences = state.experiences !== null;\n\n const systemPrompt = `\n You are a travel planning supervisor. Your job is to coordinate specialized agents to help plan a trip.\n \n Current status:\n - Origin: ${state.origin || 'Amsterdam'}\n - Destination: ${state.destination || 'San Francisco'}\n - Flights found: ${hasFlights}\n - Hotels found: ${hasHotels}\n - Experiences found: ${hasExperiences}\n - Itinerary (Things that the user has already confirmed selection on): ${JSON.stringify(itinerary, null, 2)}\n \n Available agents:\n - flights_agent: Finds flight options\n - hotels_agent: Finds hotel options \n - experiences_agent: Finds restaurant and activity recommendations\n - complete: Mark task as complete when all information is gathered\n \n You must route to the appropriate agent based on what's missing. Once all agents have completed their tasks, route to 'complete'.\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Bind the routing tool\n const modelWithTools = model.bindTools(\n [SUPERVISOR_RESPONSE_TOOL],\n {\n parallel_tool_calls: false,\n }\n );\n\n // Get supervisor decision\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n let messages = [...state.messages, response];\n\n // Handle tool calls for routing\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n const toolCallArgs = toolCall.args;\n const nextAgent = toolCallArgs.next_agent;\n\n const toolResponse = new ToolMessage({\n tool_call_id: toolCall.id!,\n content: `Routing to ${nextAgent} and providing the answer`,\n });\n\n messages = [\n ...messages, \n toolResponse, \n new AIMessage({ content: toolCallArgs.answer })\n ];\n\n if (nextAgent && nextAgent !== \"complete\") {\n return new Command({ goto: nextAgent });\n }\n }\n\n // Fallback if no tool call or complete\n return new Command({\n goto: END,\n update: { messages }\n });\n}\n\n// Create subgraphs\nconst flightsGraph = new StateGraph(TravelAgentStateAnnotation);\nflightsGraph.addNode(\"flights_agent_chat_node\", flightsFinder);\nflightsGraph.setEntryPoint(\"flights_agent_chat_node\");\nflightsGraph.addEdge(START, \"flights_agent_chat_node\");\nflightsGraph.addEdge(\"flights_agent_chat_node\", END);\nconst flightsSubgraph = flightsGraph.compile();\n\nconst hotelsGraph = new StateGraph(TravelAgentStateAnnotation);\nhotelsGraph.addNode(\"hotels_agent_chat_node\", hotelsFinder);\nhotelsGraph.setEntryPoint(\"hotels_agent_chat_node\");\nhotelsGraph.addEdge(START, \"hotels_agent_chat_node\");\nhotelsGraph.addEdge(\"hotels_agent_chat_node\", END);\nconst hotelsSubgraph = hotelsGraph.compile();\n\nconst experiencesGraph = new StateGraph(TravelAgentStateAnnotation);\nexperiencesGraph.addNode(\"experiences_agent_chat_node\", experiencesFinder);\nexperiencesGraph.setEntryPoint(\"experiences_agent_chat_node\");\nexperiencesGraph.addEdge(START, \"experiences_agent_chat_node\");\nexperiencesGraph.addEdge(\"experiences_agent_chat_node\", END);\nconst experiencesSubgraph = experiencesGraph.compile();\n\n// Main supervisor workflow\nconst workflow = new StateGraph(TravelAgentStateAnnotation);\n\n// Add supervisor and subgraphs as nodes\nworkflow.addNode(\"supervisor\", supervisorAgent, { ends: ['flights_agent', 'hotels_agent', 'experiences_agent', END] });\nworkflow.addNode(\"flights_agent\", flightsSubgraph);\nworkflow.addNode(\"hotels_agent\", hotelsSubgraph);\nworkflow.addNode(\"experiences_agent\", experiencesSubgraph);\n\n// Set entry point\nworkflow.setEntryPoint(\"supervisor\");\nworkflow.addEdge(START, \"supervisor\");\n\n// Add edges back to supervisor after each subgraph\nworkflow.addEdge(\"flights_agent\", \"supervisor\");\nworkflow.addEdge(\"hotels_agent\", \"supervisor\");\nworkflow.addEdge(\"experiences_agent\", \"supervisor\");\n\n// Compile the graph\nexport const subGraphsAgentGraph = workflow.compile({\n transformers: [aguiTransformer],\n});\n", "language": "ts", "type": "file" } @@ -1256,7 +1256,7 @@ "mastra::tool_based_generative_ui": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }) ,\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", + "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n description: \"Generates a haiku with Japanese and English translations, an image name, and a CSS gradient for the background.\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }),\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", "language": "typescript", "type": "file" }, @@ -1388,7 +1388,7 @@ "mastra-agent-local::tool_based_generative_ui": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }) ,\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", + "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n description: \"Generates a haiku with Japanese and English translations, an image name, and a CSS gradient for the background.\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }),\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", "language": "typescript", "type": "file" }, @@ -1462,7 +1462,7 @@ "spring-ai::tool_based_generative_ui": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }) ,\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", + "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n description: \"Generates a haiku with Japanese and English translations, an image name, and a CSS gradient for the background.\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }),\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", "language": "typescript", "type": "file" }, @@ -1648,7 +1648,7 @@ "pydantic-ai::tool_based_generative_ui": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }) ,\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", + "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n description: \"Generates a haiku with Japanese and English translations, an image name, and a CSS gradient for the background.\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }),\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", "language": "typescript", "type": "file" }, @@ -1806,7 +1806,7 @@ "adk-middleware::tool_based_generative_ui": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }) ,\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", + "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n description: \"Generates a haiku with Japanese and English translations, an image name, and a CSS gradient for the background.\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }),\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", "language": "typescript", "type": "file" }, @@ -2038,7 +2038,7 @@ "microsoft-agent-framework-dotnet::tool_based_generative_ui": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }) ,\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", + "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n description: \"Generates a haiku with Japanese and English translations, an image name, and a CSS gradient for the background.\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }),\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", "language": "typescript", "type": "file" }, @@ -2234,7 +2234,7 @@ "microsoft-agent-framework-python::tool_based_generative_ui": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }) ,\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", + "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n description: \"Generates a haiku with Japanese and English translations, an image name, and a CSS gradient for the background.\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }),\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", "language": "typescript", "type": "file" }, @@ -2392,7 +2392,7 @@ "ag2::tool_based_generative_ui": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }) ,\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", + "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n description: \"Generates a haiku with Japanese and English translations, an image name, and a CSS gradient for the background.\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }),\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", "language": "typescript", "type": "file" }, @@ -2498,7 +2498,7 @@ "agno::tool_based_generative_ui": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }) ,\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", + "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n description: \"Generates a haiku with Japanese and English translations, an image name, and a CSS gradient for the background.\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }),\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", "language": "typescript", "type": "file" }, @@ -2788,7 +2788,7 @@ "crewai::tool_based_generative_ui": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }) ,\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", + "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n description: \"Generates a haiku with Japanese and English translations, an image name, and a CSS gradient for the background.\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }),\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", "language": "typescript", "type": "file" }, @@ -3080,7 +3080,7 @@ "server-starter-all-features::tool_based_generative_ui": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }) ,\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", + "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n description: \"Generates a haiku with Japanese and English translations, an image name, and a CSS gradient for the background.\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }),\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", "language": "typescript", "type": "file" }, @@ -3376,7 +3376,7 @@ "claude-agent-sdk-python::tool_based_generative_ui": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }) ,\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", + "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n description: \"Generates a haiku with Japanese and English translations, an image name, and a CSS gradient for the background.\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }),\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", "language": "typescript", "type": "file" }, @@ -3494,7 +3494,7 @@ "claude-agent-sdk-typescript::tool_based_generative_ui": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }) ,\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", + "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-core/v2/styles.css\";\nimport { \n useFrontendTool,\n useConfigureSuggestions,\n CopilotSidebar,\n} from \"@copilotkit/react-core/v2\";\nimport { z } from \"zod\";\nimport {\n Carousel,\n CarouselContent,\n CarouselItem,\n CarouselNext,\n CarouselPrevious,\n} from \"@/components/ui/carousel\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\nimport { CopilotKit } from \"@copilotkit/react-core\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_name: string | null;\n gradient: string;\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { chatDefaultOpen } = useURLParams();\n\n return (\n \n \n \n \n );\n}\n\nfunction SidebarWithSuggestions({ defaultOpen }: { defaultOpen: boolean }) {\n useConfigureSuggestions({\n suggestions: [\n { title: \"Nature Haiku\", message: \"Write me a haiku about nature.\" },\n { title: \"Ocean Haiku\", message: \"Create a haiku about the ocean.\" },\n { title: \"Spring Haiku\", message: \"Generate a haiku about spring.\" },\n ],\n available: \"always\",\n });\n\n return (\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction HaikuDisplay() {\n const [activeIndex, setActiveIndex] = useState(0);\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_name: null,\n gradient: \"\",\n },\n ]);\n\n useFrontendTool(\n {\n agentId: \"tool_based_generative_ui\",\n description: \"Generates a haiku with Japanese and English translations, an image name, and a CSS gradient for the background.\",\n name: \"generate_haiku\",\n parameters: z.object({\n japanese: z.array(z.string()).describe(\"3 lines of haiku in Japanese\"),\n english: z.array(z.string()).describe(\"3 lines of haiku translated to English\"),\n image_name: z.string().describe(`One relevant image name from: ${VALID_IMAGE_NAMES.join(\", \")}`),\n gradient: z.string().describe(\"CSS Gradient color for the background\"),\n }),\n followUp: false,\n handler: async ({ japanese, english, image_name, gradient }: { japanese: string[]; english: string[]; image_name: string; gradient: string }) => {\n const newHaiku: Haiku = {\n japanese: japanese || [],\n english: english || [],\n image_name: image_name || null,\n gradient: gradient || \"\",\n };\n setHaikus((prev) => [\n newHaiku,\n ...prev.filter((h) => h.english[0] !== \"A placeholder verse—\"),\n ]);\n setActiveIndex(0);\n return \"Haiku generated!\";\n },\n render: ({ args }: { args: Partial }) => {\n if (!args.japanese) return <>;\n return ;\n },\n },\n [haikus],\n );\n\n const currentHaiku = haikus[activeIndex];\n\n return (\n
\n
\n \n \n {haikus.map((haiku, index) => (\n \n \n \n ))}\n \n {haikus.length > 1 && (\n <>\n \n \n \n )}\n \n
\n
\n );\n}\n\nfunction HaikuCard({ haiku }: { haiku: Partial }) {\n return (\n \n {/* Decorative background elements */}\n
\n
\n\n {/* Haiku Text */}\n
\n {haiku.japanese?.map((line, index) => (\n \n \n {line}\n

\n \n {haiku.english?.[index]}\n

\n
\n ))}\n
\n\n {/* Image */}\n {haiku.image_name && (\n
\n
\n \n
\n
\n
\n )}\n
\n );\n}\n", "language": "typescript", "type": "file" }, diff --git a/integrations/langgraph/typescript/examples/package.json b/integrations/langgraph/typescript/examples/package.json index 9f159c5c76..5ab2500636 100644 --- a/integrations/langgraph/typescript/examples/package.json +++ b/integrations/langgraph/typescript/examples/package.json @@ -5,18 +5,19 @@ "type": "module", "scripts": { "build": "tsc", - "dev": "pnpx @langchain/langgraph-cli@1.1.13 dev", + "dev": "pnpx @langchain/langgraph-cli@1.2.1 dev", "start": "node dist/index.js" }, "dependencies": { - "@copilotkit/sdk-js": "0.0.0-mme-ag-ui-0-0-46-20260227141603", - "@langchain/core": "^1.1.7", - "@langchain/anthropic": "^0.3.0", - "@langchain/google-genai": "^0.2.0", - "@langchain/openai": "^1.2.0", - "@langchain/langgraph": "^1.0.7", + "@ag-ui/langgraph": "link:../", + "@copilotkit/sdk-js": "1.57.0", + "@langchain/core": "^1.1.45", + "@langchain/anthropic": "^1.3.28", + "@langchain/google-genai": "^1.0.0", + "@langchain/openai": "^1.4.5", + "@langchain/langgraph": "^1.3.0", "dotenv": "^16.4.5", - "langchain": "^1.2.3", + "langchain": "^1.4.0", "uuid": "^10.0.0" }, "devDependencies": { diff --git a/integrations/langgraph/typescript/examples/pnpm-lock.yaml b/integrations/langgraph/typescript/examples/pnpm-lock.yaml index f953b1a51b..9bba99574d 100644 --- a/integrations/langgraph/typescript/examples/pnpm-lock.yaml +++ b/integrations/langgraph/typescript/examples/pnpm-lock.yaml @@ -8,30 +8,33 @@ importers: .: dependencies: + '@ag-ui/langgraph': + specifier: link:../ + version: link:.. '@copilotkit/sdk-js': - specifier: 0.0.0-mme-ag-ui-0-0-46-20260227141603 - version: 0.0.0-mme-ag-ui-0-0-46-20260227141603(@ag-ui/core@0.0.42)(@langchain/community@0.0.53(openai@6.15.0(zod@3.25.76)))(@langchain/core@1.1.7(openai@6.15.0(zod@3.25.76)))(@langchain/langgraph@1.0.7(@langchain/core@1.1.7(openai@6.15.0(zod@3.25.76)))(zod-to-json-schema@3.24.6(zod@3.25.76))(zod@3.25.76))(langchain@1.2.8(@langchain/core@1.1.7(openai@6.15.0(zod@3.25.76)))(openai@6.15.0(zod@3.25.76))(zod-to-json-schema@3.24.6(zod@3.25.76)))(typescript@5.8.3)(zod@3.25.76) + specifier: 1.57.0 + version: 1.57.0(@ag-ui/client@0.0.53)(@ag-ui/core@0.0.53)(@langchain/core@1.1.45(openai@6.37.0(zod@3.25.76)))(@langchain/langgraph@1.3.0(@langchain/core@1.1.45(openai@6.37.0(zod@3.25.76)))(openai@6.37.0(zod@3.25.76))(zod-to-json-schema@3.24.6(zod@3.25.76))(zod@3.25.76))(langchain@1.4.0(@langchain/core@1.1.45(openai@6.37.0(zod@3.25.76)))(openai@6.37.0(zod@3.25.76))(zod-to-json-schema@3.24.6(zod@3.25.76)))(openai@6.37.0(zod@3.25.76))(typescript@5.8.3)(zod-to-json-schema@3.24.6(zod@3.25.76))(zod@3.25.76) '@langchain/anthropic': - specifier: ^0.3.0 - version: 0.3.34(@langchain/core@1.1.7(openai@6.15.0(zod@3.25.76)))(zod@3.25.76) + specifier: ^1.3.28 + version: 1.3.29(@langchain/core@1.1.45(openai@6.37.0(zod@3.25.76))) '@langchain/core': - specifier: ^1.1.7 - version: 1.1.7(openai@6.15.0(zod@3.25.76)) + specifier: ^1.1.45 + version: 1.1.45(openai@6.37.0(zod@3.25.76)) '@langchain/google-genai': - specifier: ^0.2.0 - version: 0.2.18(@langchain/core@1.1.7(openai@6.15.0(zod@3.25.76))) + specifier: ^1.0.0 + version: 1.0.3(@langchain/core@1.1.45(openai@6.37.0(zod@3.25.76))) '@langchain/langgraph': - specifier: ^1.0.7 - version: 1.0.7(@langchain/core@1.1.7(openai@6.15.0(zod@3.25.76)))(zod-to-json-schema@3.24.6(zod@3.25.76))(zod@3.25.76) + specifier: ^1.3.0 + version: 1.3.0(@langchain/core@1.1.45(openai@6.37.0(zod@3.25.76)))(openai@6.37.0(zod@3.25.76))(zod-to-json-schema@3.24.6(zod@3.25.76))(zod@3.25.76) '@langchain/openai': - specifier: ^1.2.0 - version: 1.2.0(@langchain/core@1.1.7(openai@6.15.0(zod@3.25.76))) + specifier: ^1.4.5 + version: 1.4.5(@langchain/core@1.1.45(openai@6.37.0(zod@3.25.76))) dotenv: specifier: ^16.4.5 version: 16.6.1 langchain: - specifier: ^1.2.3 - version: 1.2.8(@langchain/core@1.1.7(openai@6.15.0(zod@3.25.76)))(openai@6.15.0(zod@3.25.76))(zod-to-json-schema@3.24.6(zod@3.25.76)) + specifier: ^1.4.0 + version: 1.4.0(@langchain/core@1.1.45(openai@6.37.0(zod@3.25.76)))(openai@6.37.0(zod@3.25.76))(zod-to-json-schema@3.24.6(zod@3.25.76)) uuid: specifier: ^10.0.0 version: 10.0.0 @@ -48,11 +51,26 @@ importers: packages: - '@ag-ui/core@0.0.42': - resolution: {integrity: sha512-C2hMg4Gs5oiUDgK9cA2RsTwSSmFZdIsqPklDrFw/Ue+quH6EU3vKp5YoOq7nuaQYO4pO8Em+Z+l5/M5PpcvP1g==} + '@ag-ui/client@0.0.53': + resolution: {integrity: sha512-Mkup36KUp0KXy9v89QtAOWDUoh8H1s1Vgl4zvQv9HqXuAK1TkbtpXJHpbgZJXIxTqd54KT6yCurmC2UkOP7FDQ==} - '@anthropic-ai/sdk@0.65.0': - resolution: {integrity: sha512-zIdPOcrCVEI8t3Di40nH4z9EoeyGZfXbYSvWdDLsB/KkaSYMnEgC7gmcgWu83g2NTn1ZTpbMvpdttWDGGIk6zw==} + '@ag-ui/core@0.0.53': + resolution: {integrity: sha512-11UocR7fFdMWw503bWCX2IOK15vbWfxT11Mn9xOiPBVO/UVcn57ywGrlLL4UaBlPgmUTvuzr2yYR2ElSqiN2wQ==} + + '@ag-ui/encoder@0.0.53': + resolution: {integrity: sha512-bAOcfVdm6U4H6G6tW+DZfwPEQm1w/snVBTwaFn9nJcEMW69M7/HZuwvEc/7Zo0rK1jRL32N/j60PwTAeky19fw==} + + '@ag-ui/langgraph@0.0.31': + resolution: {integrity: sha512-mK24pfQZiV5SlnDLhTka+873gw7QQOAWXqqDSnwkuyoQQQFX7KC8xZR+4Da2dWqyVhbhNPx+amE16X7twS1wcg==} + peerDependencies: + '@ag-ui/client': '>=0.0.42' + '@ag-ui/core': '>=0.0.42' + + '@ag-ui/proto@0.0.53': + resolution: {integrity: sha512-swjz22xWT8YUZt5OhmUwkARDQdwt8XM1hmGZbQrhRnNPXKwrKJX9ELlbnQ4iFUQIKkMWpphzE3vA3yNKs2bbKw==} + + '@anthropic-ai/sdk@0.91.1': + resolution: {integrity: sha512-LAmu761tSN9r66ixvmciswUj/ZC+1Q4iAfpedTfSVLeswRwnY3n2Nb6Tsk+cLPP28aLOPWeMgIuTuCcMC6W/iw==} hasBin: true peerDependencies: zod: ^3.25.0 || ^4.0.0 @@ -64,370 +82,91 @@ packages: resolution: {integrity: sha512-JiDShH45zKHWyGe4ZNVRrCjBz8Nh9TMmZG1kh4QTK8hCBTWBi8Da+i7s1fJw7/lYpM4ccepSNfqzZ/QvABBi5g==} engines: {node: '>=6.9.0'} + '@bufbuild/protobuf@2.12.0': + resolution: {integrity: sha512-B/XlCaFIP8LOwzo+bz5uFzATYokcwCKQcghqnlfwSmM5eX/qTkvDBnDPs+gXtX/RyjxJ4DRikECcPJbyALA8FA==} + '@cfworker/json-schema@4.1.1': resolution: {integrity: sha512-gAmrUZSGtKc3AiBL71iNWxDsyUC5uMaKKGdvzYsBoTW/xi42JQHl7eKV2OYzCUqvc+D2RCcf7EXY2iCyFIk6og==} - '@copilotkit/sdk-js@0.0.0-mme-ag-ui-0-0-46-20260227141603': - resolution: {integrity: sha512-qwPTcJiGixz5v3u1zWWp3onvvrS5LIjgKPf6XC58/+DJopPY4n5U0DRukVOUbIj+nPNrbfsitkB2IKFMUo3TyA==} + '@copilotkit/license-verifier@0.2.0': + resolution: {integrity: sha512-hliCifqy5a65YTozgRckuQmvBEQlt4L2PhbpSDY6fb/TKqPHyNDJgItRSnOpxOVDqvEfHEbUUqw3NaD88ZtdJA==} + + '@copilotkit/sdk-js@1.57.0': + resolution: {integrity: sha512-DsUtTRsh9dcrBmY4nU8D8di4YZXxXbrCy8tw+1gD0IFpfV0stOMTipK4BQnWA4s0lbj1+9lgIF1H3jrgR5m0eA==} peerDependencies: - '@langchain/community': ^0.3.58 '@langchain/core': '>=0.4.0 <2.0.0' '@langchain/langgraph': '>=0.4.0 <2.0.0' langchain: '>=1.0.0' typescript: ^5.2.3 zod: ^3.23.3 || ^3.24.0 || ^3.25.0 - '@copilotkit/shared@0.0.0-mme-ag-ui-0-0-46-20260227141603': - resolution: {integrity: sha512-b29dZR67mDq85v9h4ritwJ3dUVek8UpR4MZ0SHuFgZF7BYzMOGoGleh96H/8Mj1s6hTiQ781NVAPEJ6OiY4FDA==} + '@copilotkit/shared@1.57.0': + resolution: {integrity: sha512-X6uqeAWLDh08LUj4a0gpC9mrlmlyaviuybsnGJQPzAJonYIehYH63bEksj6xFgIA1FfkmEzK2XTxUnbrtpMd3g==} peerDependencies: - '@ag-ui/core': ^0.0.46 + '@ag-ui/core': '>=0.0.48' '@google/generative-ai@0.24.1': resolution: {integrity: sha512-MqO+MLfM6kjxcKoy0p1wRzG3b4ZZXtPI+z2IE26UogS2Cm/XHO+7gGRBh6gcJsOiIVoH93UwKvW4HdgiOZCy9Q==} engines: {node: '>=18.0.0'} - '@langchain/anthropic@0.3.34': - resolution: {integrity: sha512-8bOW1A2VHRCjbzdYElrjxutKNs9NSIxYRGtR+OJWVzluMqoKKh2NmmFrpPizEyqCUEG2tTq5xt6XA1lwfqMJRA==} - engines: {node: '>=18'} + '@langchain/anthropic@1.3.29': + resolution: {integrity: sha512-ep1qBIcV07bajsg3fDqMd39rYwoRLOEK/6lk+MCxlm1YB5SRoKKJAZANrblQ/4RYhZJnxf95c6BSQu8VoNbVAQ==} + engines: {node: '>=20'} peerDependencies: - '@langchain/core': '>=0.3.58 <0.4.0' + '@langchain/core': ^1.1.45 - '@langchain/community@0.0.53': - resolution: {integrity: sha512-iFqZPt4MRssGYsQoKSXWJQaYTZCC7WNuilp2JCCs3wKmJK3l6mR0eV+PDrnT+TaDHUVxt/b0rwgM0sOiy0j2jA==} - engines: {node: '>=18'} - peerDependencies: - '@aws-crypto/sha256-js': ^5.0.0 - '@aws-sdk/client-bedrock-agent-runtime': ^3.485.0 - '@aws-sdk/client-bedrock-runtime': ^3.422.0 - '@aws-sdk/client-dynamodb': ^3.310.0 - '@aws-sdk/client-kendra': ^3.352.0 - '@aws-sdk/client-lambda': ^3.310.0 - '@aws-sdk/client-sagemaker-runtime': ^3.310.0 - '@aws-sdk/client-sfn': ^3.310.0 - '@aws-sdk/credential-provider-node': ^3.388.0 - '@azure/search-documents': ^12.0.0 - '@clickhouse/client': ^0.2.5 - '@cloudflare/ai': '*' - '@datastax/astra-db-ts': ^1.0.0 - '@elastic/elasticsearch': ^8.4.0 - '@getmetal/metal-sdk': '*' - '@getzep/zep-js': ^0.9.0 - '@gomomento/sdk': ^1.51.1 - '@gomomento/sdk-core': ^1.51.1 - '@google-ai/generativelanguage': ^0.2.1 - '@gradientai/nodejs-sdk': ^1.2.0 - '@huggingface/inference': ^2.6.4 - '@mozilla/readability': '*' - '@neondatabase/serverless': '*' - '@opensearch-project/opensearch': '*' - '@pinecone-database/pinecone': '*' - '@planetscale/database': ^1.8.0 - '@premai/prem-sdk': ^0.3.25 - '@qdrant/js-client-rest': ^1.8.2 - '@raycast/api': ^1.55.2 - '@rockset/client': ^0.9.1 - '@smithy/eventstream-codec': ^2.0.5 - '@smithy/protocol-http': ^3.0.6 - '@smithy/signature-v4': ^2.0.10 - '@smithy/util-utf8': ^2.0.0 - '@supabase/postgrest-js': ^1.1.1 - '@supabase/supabase-js': ^2.10.0 - '@tensorflow-models/universal-sentence-encoder': '*' - '@tensorflow/tfjs-converter': '*' - '@tensorflow/tfjs-core': '*' - '@upstash/redis': ^1.20.6 - '@upstash/vector': ^1.0.7 - '@vercel/kv': ^0.2.3 - '@vercel/postgres': ^0.5.0 - '@writerai/writer-sdk': ^0.40.2 - '@xata.io/client': ^0.28.0 - '@xenova/transformers': ^2.5.4 - '@zilliz/milvus2-sdk-node': '>=2.2.7' - better-sqlite3: ^9.4.0 - cassandra-driver: ^4.7.2 - cborg: ^4.1.1 - chromadb: '*' - closevector-common: 0.1.3 - closevector-node: 0.1.6 - closevector-web: 0.1.6 - cohere-ai: '*' - convex: ^1.3.1 - couchbase: ^4.3.0 - discord.js: ^14.14.1 - dria: ^0.0.3 - duck-duck-scrape: ^2.2.5 - faiss-node: ^0.5.1 - firebase-admin: ^11.9.0 || ^12.0.0 - google-auth-library: ^8.9.0 - googleapis: ^126.0.1 - hnswlib-node: ^3.0.0 - html-to-text: ^9.0.5 - interface-datastore: ^8.2.11 - ioredis: ^5.3.2 - it-all: ^3.0.4 - jsdom: '*' - jsonwebtoken: ^9.0.2 - llmonitor: ^0.5.9 - lodash: ^4.17.21 - lunary: ^0.6.11 - mongodb: '>=5.2.0' - mysql2: ^3.3.3 - neo4j-driver: '*' - node-llama-cpp: '*' - pg: ^8.11.0 - pg-copy-streams: ^6.0.5 - pickleparser: ^0.2.1 - portkey-ai: ^0.1.11 - redis: '*' - replicate: ^0.18.0 - typeorm: ^0.3.12 - typesense: ^1.5.3 - usearch: ^1.1.1 - vectordb: ^0.1.4 - voy-search: 0.6.2 - weaviate-ts-client: '*' - web-auth-library: ^1.0.3 - ws: ^8.14.2 - peerDependenciesMeta: - '@aws-crypto/sha256-js': - optional: true - '@aws-sdk/client-bedrock-agent-runtime': - optional: true - '@aws-sdk/client-bedrock-runtime': - optional: true - '@aws-sdk/client-dynamodb': - optional: true - '@aws-sdk/client-kendra': - optional: true - '@aws-sdk/client-lambda': - optional: true - '@aws-sdk/client-sagemaker-runtime': - optional: true - '@aws-sdk/client-sfn': - optional: true - '@aws-sdk/credential-provider-node': - optional: true - '@azure/search-documents': - optional: true - '@clickhouse/client': - optional: true - '@cloudflare/ai': - optional: true - '@datastax/astra-db-ts': - optional: true - '@elastic/elasticsearch': - optional: true - '@getmetal/metal-sdk': - optional: true - '@getzep/zep-js': - optional: true - '@gomomento/sdk': - optional: true - '@gomomento/sdk-core': - optional: true - '@google-ai/generativelanguage': - optional: true - '@gradientai/nodejs-sdk': - optional: true - '@huggingface/inference': - optional: true - '@mozilla/readability': - optional: true - '@neondatabase/serverless': - optional: true - '@opensearch-project/opensearch': - optional: true - '@pinecone-database/pinecone': - optional: true - '@planetscale/database': - optional: true - '@premai/prem-sdk': - optional: true - '@qdrant/js-client-rest': - optional: true - '@raycast/api': - optional: true - '@rockset/client': - optional: true - '@smithy/eventstream-codec': - optional: true - '@smithy/protocol-http': - optional: true - '@smithy/signature-v4': - optional: true - '@smithy/util-utf8': - optional: true - '@supabase/postgrest-js': - optional: true - '@supabase/supabase-js': - optional: true - '@tensorflow-models/universal-sentence-encoder': - optional: true - '@tensorflow/tfjs-converter': - optional: true - '@tensorflow/tfjs-core': - optional: true - '@upstash/redis': - optional: true - '@upstash/vector': - optional: true - '@vercel/kv': - optional: true - '@vercel/postgres': - optional: true - '@writerai/writer-sdk': - optional: true - '@xata.io/client': - optional: true - '@xenova/transformers': - optional: true - '@zilliz/milvus2-sdk-node': - optional: true - better-sqlite3: - optional: true - cassandra-driver: - optional: true - cborg: - optional: true - chromadb: - optional: true - closevector-common: - optional: true - closevector-node: - optional: true - closevector-web: - optional: true - cohere-ai: - optional: true - convex: - optional: true - couchbase: - optional: true - discord.js: - optional: true - dria: - optional: true - duck-duck-scrape: - optional: true - faiss-node: - optional: true - firebase-admin: - optional: true - google-auth-library: - optional: true - googleapis: - optional: true - hnswlib-node: - optional: true - html-to-text: - optional: true - interface-datastore: - optional: true - ioredis: - optional: true - it-all: - optional: true - jsdom: - optional: true - jsonwebtoken: - optional: true - llmonitor: - optional: true - lodash: - optional: true - lunary: - optional: true - mongodb: - optional: true - mysql2: - optional: true - neo4j-driver: - optional: true - node-llama-cpp: - optional: true - pg: - optional: true - pg-copy-streams: - optional: true - pickleparser: - optional: true - portkey-ai: - optional: true - redis: - optional: true - replicate: - optional: true - typeorm: - optional: true - typesense: - optional: true - usearch: - optional: true - vectordb: - optional: true - voy-search: - optional: true - weaviate-ts-client: - optional: true - web-auth-library: - optional: true - ws: - optional: true - - '@langchain/core@0.1.63': - resolution: {integrity: sha512-+fjyYi8wy6x1P+Ee1RWfIIEyxd9Ee9jksEwvrggPwwI/p45kIDTdYTblXsM13y4mNWTiACyLSdbwnPaxxdoz+w==} - engines: {node: '>=18'} - - '@langchain/core@1.1.7': - resolution: {integrity: sha512-NSZSi33+V/8RVv1szsUiX7u+jXVCDImr2VO74SiKgJrhyxXKdJcxa3HMPKwdU+tkgQ6T+R7wxVYQ1Cnd4Z48tA==} + '@langchain/core@1.1.45': + resolution: {integrity: sha512-Y/wvuglLTMKJahkl4QD9dBIdF/z/CxZJWdTfHJF/q2jtlJtoFf6Mb5JpGxZfsi3mBY6NSG941FSLTcqhCKrhBA==} engines: {node: '>=20'} - '@langchain/google-genai@0.2.18': - resolution: {integrity: sha512-m9EiN3VKC01A7/625YQ6Q1Lqq8zueewADX4W5Tcme4RImN75zkg2Z7FYbD1Fo6Zwolc4wBNO6LUtbg3no4rv1Q==} - engines: {node: '>=18'} + '@langchain/google-genai@1.0.3': + resolution: {integrity: sha512-ZN3f6SPFZI3FMjJ1C0y5A/lWlZ/x+A9RoIKg1PNYdX6bEu7/BR7oz0dYYI2+YGl3TRp1u75e3SzzL0MxmfWfDA==} + engines: {node: '>=20'} peerDependencies: - '@langchain/core': '>=0.3.58 <0.4.0' + '@langchain/core': 1.0.6 - '@langchain/langgraph-checkpoint@1.0.0': - resolution: {integrity: sha512-xrclBGvNCXDmi0Nz28t3vjpxSH6UYx6w5XAXSiiB1WEdc2xD2iY/a913I3x3a31XpInUW/GGfXXfePfaghV54A==} + '@langchain/langgraph-checkpoint@1.0.2': + resolution: {integrity: sha512-F4E5Tr0nt8FGghgdscJtHw+ABzChOHeI80R7Y1pjIHdiJom6c2ieo76vL+FWiny80JmoGqhrVAEIWrw0cXKPxg==} engines: {node: '>=18'} peerDependencies: - '@langchain/core': ^1.0.1 + '@langchain/core': ^1.1.44 - '@langchain/langgraph-sdk@1.3.1': - resolution: {integrity: sha512-zTi7DZHwqtMEzapvm3I1FL4Q7OZsxtq9tTXy6s2gcCxyIU3sphqRboqytqVN7dNHLdTCLb8nXy49QKurs2MIBg==} + '@langchain/langgraph-sdk@1.9.1': + resolution: {integrity: sha512-pHojybde9HoMz7ZDtyW3pgDdomAN4C0pbdgXASjccFS+S2Cqx75iZBtBUUg1A/CSer5xh9GakdIqyihxZOf7VA==} peerDependencies: - '@langchain/core': ^1.0.1 react: ^18 || ^19 react-dom: ^18 || ^19 + svelte: ^4.0.0 || ^5.0.0 + vue: ^3.0.0 peerDependenciesMeta: - '@langchain/core': - optional: true react: optional: true react-dom: optional: true + svelte: + optional: true + vue: + optional: true - '@langchain/langgraph@1.0.7': - resolution: {integrity: sha512-EBGqNOWoRiEoLUaeuiXRpUM8/DE6QcwiirNyd97XhezStebBoTTilWH8CUt6S94JRGl5zwfBBRHfzotDnZS/eA==} + '@langchain/langgraph@1.3.0': + resolution: {integrity: sha512-QvhTjiyqFPz81A+y6LHs223w6DTjv5+882DT4mup72bd72rRhNjTYo5fhes5um0swnKArvY/arc7KeFInfHHWw==} engines: {node: '>=18'} peerDependencies: - '@langchain/core': ^1.0.1 - zod: ^3.25.32 || ^4.1.0 + '@langchain/core': ^1.1.44 + zod: ^3.25.32 || ^4.2.0 zod-to-json-schema: ^3.x peerDependenciesMeta: zod-to-json-schema: optional: true - '@langchain/openai@0.0.34': - resolution: {integrity: sha512-M+CW4oXle5fdoz2T2SwdOef8pl3/1XmUx1vjn2mXUVM/128aO0l23FMF0SNBsAbRV6P+p/TuzjodchJbi0Ht/A==} - engines: {node: '>=18'} - - '@langchain/openai@1.2.0': - resolution: {integrity: sha512-r2g5Be3Sygw7VTJ89WVM/M94RzYToNTwXf8me1v+kgKxzdHbd/8XPYDFxpXEp3REyPgUrtJs+Oplba9pkTH5ug==} + '@langchain/openai@1.4.5': + resolution: {integrity: sha512-bQ2WMIZfSh02trJLYSAtiIcD3j6EBCiAm9nw0dZWQsVaUxmWc3JJqs8uUte6AkMazmLHzcUIw+14UkXO5fRJvQ==} engines: {node: '>=20'} peerDependencies: - '@langchain/core': ^1.0.0 + '@langchain/core': ^1.1.42 + + '@langchain/protocol@0.0.15': + resolution: {integrity: sha512-MllvbpMjqHevUm+v94M422mH7XKN+wGCvJRBVROTWBotEDOATYB4Ktk2UheYP859y9o2LlhtPek5t1T9eyfAbQ==} '@lukeed/csprng@1.1.0': resolution: {integrity: sha512-Z7C/xXCiGWsg0KuKsHTKJxbWhpI3Vs5GwLfOean7MGyVFGqdRgBbAjOCh6u4bbjPc/8MJ2pZmK/0DLdCbivLDA==} @@ -437,6 +176,10 @@ packages: resolution: {integrity: sha512-qC72D4+CDdjGqJvkFMMEAtancHUQ7/d/tAiHf64z8MopFDmcrtbcJuerDtFceuAfQJ2pDSfCKCtbqoGBNnwg0w==} engines: {node: '>=8'} + '@protobuf-ts/protoc@2.11.1': + resolution: {integrity: sha512-mUZJaV0daGO6HUX90o/atzQ6A7bbN2RSuHtdwo8SSF2Qoe3zHwa4IHyCN1evftTeHfLmdz+45qo47sL+5P8nyg==} + hasBin: true + '@segment/analytics-core@1.8.2': resolution: {integrity: sha512-5FDy6l8chpzUfJcNlIcyqYQq4+JTUynlVoCeCUuVz+l+6W0PXg+ljKp34R4yLVCcY5VVZohuW+HH0VLWdwYVAg==} @@ -447,29 +190,18 @@ packages: resolution: {integrity: sha512-fOXLL8uY0uAWw/sTLmezze80hj8YGgXXlAfvSS6TUmivk4D/SP0C0sxnbpFdkUzWg2zT64qWIZj26afEtSnxUA==} engines: {node: '>=20'} - '@types/node-fetch@2.6.13': - resolution: {integrity: sha512-QGpRVpzSaUs30JBSGPjOg4Uveu384erbHBoT1zeONvyCfwQxIkUshLAOqN/k9EjGviPRmWTTe6aH2qySWKTVSw==} + '@standard-schema/spec@1.1.0': + resolution: {integrity: sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==} - '@types/node@18.19.130': - resolution: {integrity: sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg==} + '@types/json-schema@7.0.15': + resolution: {integrity: sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==} '@types/node@20.19.9': resolution: {integrity: sha512-cuVNgarYWZqxRJDQHEB58GEONhOK79QVR/qYx4S7kcUObQvUwvFnYxJuuHUKm2aieN9X3yZB4LZsuYNU1Qphsw==} - '@types/retry@0.12.0': - resolution: {integrity: sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==} - '@types/uuid@10.0.0': resolution: {integrity: sha512-7gqG38EyHgyP1S+7+xomFtL+ZNHcKv6DwNaCZmJmo1vgMugyF3TCnXVg4t1uk89mLNwnLtnY3TpOpCOyp1/xHQ==} - abort-controller@3.0.0: - resolution: {integrity: sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==} - engines: {node: '>=6.5'} - - agentkeepalive@4.6.0: - resolution: {integrity: sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ==} - engines: {node: '>= 8.0.0'} - ansi-styles@4.3.0: resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==} engines: {node: '>=8'} @@ -478,22 +210,12 @@ packages: resolution: {integrity: sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==} engines: {node: '>=10'} - asynckit@0.4.0: - resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==} - base64-js@1.5.1: resolution: {integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==} - binary-search@1.3.6: - resolution: {integrity: sha512-nbE1WxOTTrUWIfsfZ4aHGYu5DOuNkbxGokjV6Z2kxfJK3uaAb8zNK1muzOeipoLHZjInT4Br88BHpzevc681xA==} - buffer@6.0.3: resolution: {integrity: sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==} - call-bind-apply-helpers@1.0.2: - resolution: {integrity: sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==} - engines: {node: '>= 0.4'} - camelcase@6.3.0: resolution: {integrity: sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==} engines: {node: '>=10'} @@ -509,25 +231,13 @@ packages: color-name@1.1.4: resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==} - combined-stream@1.0.8: - resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==} - engines: {node: '>= 0.8'} - - commander@10.0.1: - resolution: {integrity: sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==} - engines: {node: '>=14'} - - console-table-printer@2.14.6: - resolution: {integrity: sha512-MCBl5HNVaFuuHW6FGbL/4fB7N/ormCy+tQ+sxTrF6QtSbSNETvPuOVbkJBhzDgYhvjWGrTma4eYJa37ZuoQsPw==} + compare-versions@6.1.1: + resolution: {integrity: sha512-4hm4VPpIecmlg59CHXnRDnqGplJFrbLG4aFEl5vl6cK1u76ws3LLvX7ikFnTDl5vo39sjWD6AaDPYodJp/NNHg==} decamelize@1.2.0: resolution: {integrity: sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==} engines: {node: '>=0.10.0'} - delayed-stream@1.0.0: - resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==} - engines: {node: '>=0.4.0'} - dotenv@16.6.1: resolution: {integrity: sha512-uBq4egWHTcTt33a72vpSG0z3HnPuIl6NqYcTrKEg2azoEyl2hpW0zqlxysq2pK9HlDIHyHyakeYaYnSAwd8bow==} engines: {node: '>=12'} @@ -536,69 +246,14 @@ packages: resolution: {integrity: sha512-2QF/g9/zTaPDc3BjNcVTGoBbXBgYfMTTceLaYcFJ/W9kggFUkhxD/hMEeuLKbugyef9SqAx8cpgwlIP/jinUTA==} engines: {node: '>=4'} - dunder-proto@1.0.1: - resolution: {integrity: sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==} - engines: {node: '>= 0.4'} - - es-define-property@1.0.1: - resolution: {integrity: sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==} - engines: {node: '>= 0.4'} - - es-errors@1.3.0: - resolution: {integrity: sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==} - engines: {node: '>= 0.4'} - - es-object-atoms@1.1.1: - resolution: {integrity: sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==} - engines: {node: '>= 0.4'} - - es-set-tostringtag@2.1.0: - resolution: {integrity: sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==} - engines: {node: '>= 0.4'} - - event-target-shim@5.0.1: - resolution: {integrity: sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==} - engines: {node: '>=6'} - eventemitter3@4.0.7: resolution: {integrity: sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==} - expr-eval@2.0.2: - resolution: {integrity: sha512-4EMSHGOPSwAfBiibw3ndnP0AvjDWLsMvGOvWEZ2F96IGk0bIVdjQisOHxReSkE13mHcfbuCiXw+G4y0zv6N8Eg==} - - fast-xml-parser@4.5.6: - resolution: {integrity: sha512-Yd4vkROfJf8AuJrDIVMVmYfULKmIJszVsMv7Vo71aocsKgFxpdlpSHXSaInvyYfgw2PRuObQSW2GFpVMUjxu9A==} - hasBin: true - - flat@5.0.2: - resolution: {integrity: sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==} - hasBin: true - - form-data-encoder@1.7.2: - resolution: {integrity: sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==} - - form-data@4.0.5: - resolution: {integrity: sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==} - engines: {node: '>= 6'} - - formdata-node@4.4.1: - resolution: {integrity: sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==} - engines: {node: '>= 12.20'} + eventemitter3@5.0.4: + resolution: {integrity: sha512-mlsTRyGaPBjPedk6Bvw+aqbsXDtoAyAzm5MO7JgU+yVRyMQ5O8bD4Kcci7BS85f93veegeCPkL8R4GLClnjLFw==} - function-bind@1.1.2: - resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==} - - get-intrinsic@1.3.0: - resolution: {integrity: sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==} - engines: {node: '>= 0.4'} - - get-proto@1.0.1: - resolution: {integrity: sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==} - engines: {node: '>= 0.4'} - - gopd@1.2.0: - resolution: {integrity: sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==} - engines: {node: '>= 0.4'} + fast-json-patch@3.1.1: + resolution: {integrity: sha512-vf6IHUX2SBcA+5/+4883dsIjpBTqmfBjmYiWK1savxQmFk4JfBMLa7ynTYOs1Rolp/T1betJxHiGD3g1Mn8lUQ==} graphql@16.12.0: resolution: {integrity: sha512-DKKrynuQRne0PNpEbzuEdHlYOMksHSUI8Zc9Unei5gTsMNA2/vMpoMz/yKba50pejK56qj98qM0SjYxAKi13gQ==} @@ -608,26 +263,12 @@ packages: resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} engines: {node: '>=8'} - has-symbols@1.1.0: - resolution: {integrity: sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==} - engines: {node: '>= 0.4'} - - has-tostringtag@1.0.2: - resolution: {integrity: sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==} - engines: {node: '>= 0.4'} - - hasown@2.0.2: - resolution: {integrity: sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==} - engines: {node: '>= 0.4'} - - humanize-ms@1.2.1: - resolution: {integrity: sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==} - ieee754@1.2.1: resolution: {integrity: sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==} - is-any-array@2.0.1: - resolution: {integrity: sha512-UtilS7hLRu++wb/WBAw9bNuP1Eg04Ivn1vERJck8zJthEvXCBEBpGR/33u/xLKWEQf95803oalHrVDptcAvFdQ==} + is-network-error@1.3.1: + resolution: {integrity: sha512-6QCxa49rQbmUWLfk0nuGqzql9U8uaV2H6279bRErPBHe/109hCzsLUBUHfbEtvLIHBd6hyXbgedBSHevm43Edw==} + engines: {node: '>=16'} jose@5.10.0: resolution: {integrity: sha512-s+3Al/p9g32Iq+oqXxkW//7jk2Vig6FF1CFqzVXoTUXt2qz89YWbL+OwS17NFYEvxC35n0FKeGO2LGYSxeM2Gg==} @@ -639,27 +280,20 @@ packages: resolution: {integrity: sha512-+DWg8jCJG2TEnpy7kOm/7/AxaYoaRbjVB4LFZLySZlWn8exGs3A4OLJR966cVvU26N7X9TWxl+Jsw7dzAqKT6g==} engines: {node: '>=16'} - langchain@1.2.8: - resolution: {integrity: sha512-d2WYSVBUK7d3rX1pfYoDBLANWpqgKlTPbc1zjQmJj1zsLPV5rUlF0jZXdy+fdxiqtG7vSaVuaX8H3cp26pWR5w==} + langchain@1.4.0: + resolution: {integrity: sha512-p3H5U1vfO0T4ri/xxqI6jccRP3LYmOW6KGxaJcwI5mIGVR/G6eNhZyjSZB9d7me6J7an4Pc6zA8tH8Qa6/7xwA==} engines: {node: '>=20'} peerDependencies: - '@langchain/core': 1.1.13 + '@langchain/core': ^1.1.44 - langsmith@0.1.68: - resolution: {integrity: sha512-otmiysWtVAqzMx3CJ4PrtUBhWRG5Co8Z4o7hSZENPjlit9/j3/vm3TSvbaxpDYakZxtMjhkcJTqrdYFipISEiQ==} - peerDependencies: - openai: '*' - peerDependenciesMeta: - openai: - optional: true - - langsmith@0.4.0: - resolution: {integrity: sha512-/X99fHBuBFFup778dNmgAVJMdFULz0S8yZUT1cD1RRSviMjxq1GZo8PulRR1ALDxpgYsJs8ueF9godUzF13LSw==} + langsmith@0.6.2: + resolution: {integrity: sha512-OrFt+a2P4UMaa2cSpp3fjYTJ+TWQFjnoz5j4njiZYWMpAJezTrMRN1mrNVzq/FACprgPwAMjq5YkZNRYJKorwg==} peerDependencies: '@opentelemetry/api': '*' '@opentelemetry/exporter-trace-otlp-proto': '*' '@opentelemetry/sdk-trace-base': '*' openai: '*' + ws: '>=7' peerDependenciesMeta: '@opentelemetry/api': optional: true @@ -669,46 +303,13 @@ packages: optional: true openai: optional: true - - math-intrinsics@1.1.0: - resolution: {integrity: sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==} - engines: {node: '>= 0.4'} - - mime-db@1.52.0: - resolution: {integrity: sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==} - engines: {node: '>= 0.6'} - - mime-types@2.1.35: - resolution: {integrity: sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==} - engines: {node: '>= 0.6'} - - ml-array-mean@1.1.6: - resolution: {integrity: sha512-MIdf7Zc8HznwIisyiJGRH9tRigg3Yf4FldW8DxKxpCCv/g5CafTw0RRu51nojVEOXuCQC7DRVVu5c7XXO/5joQ==} - - ml-array-sum@1.1.6: - resolution: {integrity: sha512-29mAh2GwH7ZmiRnup4UyibQZB9+ZLyMShvt4cH4eTK+cL2oEMIZFnSyB3SS8MlsTh6q/w/yh48KmqLxmovN4Dw==} - - ml-distance-euclidean@2.0.0: - resolution: {integrity: sha512-yC9/2o8QF0A3m/0IXqCTXCzz2pNEzvmcE/9HFKOZGnTjatvBbsn4lWYJkxENkA4Ug2fnYl7PXQxnPi21sgMy/Q==} - - ml-distance@4.0.1: - resolution: {integrity: sha512-feZ5ziXs01zhyFUUUeZV5hwc0f5JW0Sh0ckU1koZe/wdVkJdGxcP06KNQuF0WBTj8FttQUzcvQcpcrOp/XrlEw==} - - ml-tree-similarity@1.0.0: - resolution: {integrity: sha512-XJUyYqjSuUQkNQHMscr6tcjldsOoAekxADTplt40QKfwW6nd++1wHWV9AArl0Zvw/TIHgNaZZNvr8QGvE8wLRg==} - - ms@2.1.3: - resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} + ws: + optional: true mustache@4.2.0: resolution: {integrity: sha512-71ippSywq5Yb7/tVYyGbkBggbU8H3u5Rz56fH60jGFgr8uHwxs+aSKeqmluIVzM0m0kB7xQjKS6qPfd0b2ZoqQ==} hasBin: true - node-domexception@1.0.0: - resolution: {integrity: sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==} - engines: {node: '>=10.5.0'} - deprecated: Use your platform's native DOMException instead - node-fetch@2.7.0: resolution: {integrity: sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==} engines: {node: 4.x || >=6.0.0} @@ -718,24 +319,8 @@ packages: encoding: optional: true - num-sort@2.1.0: - resolution: {integrity: sha512-1MQz1Ed8z2yckoBeSfkQHHO9K1yDRxxtotKSJ9yvcTUUxSvfvzEq5GwBrjjHEpMlq/k5gvXdmJ1SbYxWtpNoVg==} - engines: {node: '>=8'} - - openai@4.104.0: - resolution: {integrity: sha512-p99EFNsA/yX6UhVO93f5kJsDRLAg+CTA2RBqdHK4RtK8u5IJw32Hyb2dTGKbnnFmnuoBv5r7Z2CURI9sGZpSuA==} - hasBin: true - peerDependencies: - ws: ^8.18.0 - zod: ^3.23.8 - peerDependenciesMeta: - ws: - optional: true - zod: - optional: true - - openai@6.15.0: - resolution: {integrity: sha512-F1Lvs5BoVvmZtzkUEVyh8mDQPPFolq4F+xdsx/DO8Hee8YF3IGAlZqUIsF+DVGhqf4aU0a3bTghsxB6OIsRy1g==} + openai@6.37.0: + resolution: {integrity: sha512-0H5dEGFmmLv6KSd0W1w2nyL8WsLkX6yoLeQpU+dZAOuGcany5qkYQMmj35ZrKgb6yiyYqpUzFOpR8mZQkgqeEQ==} hasBin: true peerDependencies: ws: ^8.18.0 @@ -754,32 +339,28 @@ packages: resolution: {integrity: sha512-RwFpb72c/BhQLEXIZ5K2e+AhgNVmIejGlTgiB9MzZ0e93GRvqZ7uSi0dvRF7/XIXDeNkra2fNHBxTyPDGySpjQ==} engines: {node: '>=8'} - p-retry@4.6.2: - resolution: {integrity: sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==} - engines: {node: '>=8'} + p-queue@9.2.0: + resolution: {integrity: sha512-dWgLE8AH0HjQ9fe74pUkKkvzzYT18Inp4zra3lKHnnwqGvcfcUBrvF2EAVX+envufDNBOzpPq/IBUONDbI7+3g==} + engines: {node: '>=20'} + + p-retry@7.1.1: + resolution: {integrity: sha512-J5ApzjyRkkf601HpEeykoiCvzHQjWxPAHhyjFcEUP2SWq0+35NKh8TLhpLw+Dkq5TZBFvUM6UigdE9hIVYTl5w==} + engines: {node: '>=20'} p-timeout@3.2.0: resolution: {integrity: sha512-rhIwUycgwwKcP9yTOOFK/AKsAopjjCakVqLHePO3CC6Mir1Z99xT+R63jZxAT5lFZLa2inS5h+ZS2GvR99/FBg==} engines: {node: '>=8'} - retry@0.13.1: - resolution: {integrity: sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==} - engines: {node: '>= 4'} + p-timeout@7.0.1: + resolution: {integrity: sha512-AxTM2wDGORHGEkPCt8yqxOTMgpfbEHqF51f/5fJCmwFC3C/zNcGT63SymH2ttOAaiIws2zVg4+izQCjrakcwHg==} + engines: {node: '>=20'} + + partial-json@0.1.7: + resolution: {integrity: sha512-Njv/59hHaokb/hRUjce3Hdv12wd60MtM9Z5Olmn+nehe0QDAsRtRbJPvJ0Z91TusF0SuZRIvnM+S4l6EIP8leA==} rxjs@7.8.1: resolution: {integrity: sha512-AA3TVj+0A2iuIoQkWEK/tqFjBq2j+6PO6Y0zJcvzLAFhEFIO3HL0vls9hWLncZbAAbK0mar7oZ4V079I/qPMxg==} - semver@7.7.2: - resolution: {integrity: sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==} - engines: {node: '>=10'} - hasBin: true - - simple-wcswidth@1.1.2: - resolution: {integrity: sha512-j7piyCjAeTDSjzTSQ7DokZtMNwNlEAyxqSZeCS+CXH7fJ4jx3FuJ/mTW3mE+6JLs4VJBbcll0Kjn+KXI5t21Iw==} - - strnum@1.1.2: - resolution: {integrity: sha512-vrN+B7DBIoTTZjnPNewwhx6cBA/H+IS7rfW68n7XxC1y7uoiGQBxaKzqucGUgavX15dJgiGztLJ8vxuEzwqBdA==} - supports-color@7.2.0: resolution: {integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==} engines: {node: '>=8'} @@ -798,12 +379,12 @@ packages: engines: {node: '>=14.17'} hasBin: true - undici-types@5.26.5: - resolution: {integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==} - undici-types@6.21.0: resolution: {integrity: sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==} + untruncate-json@0.0.1: + resolution: {integrity: sha512-4W9enDK4X1y1s2S/Rz7ysw6kDuMS3VmRjMFg7GZrNO+98OSe+x5Lh7PKYoVjy3lW/1wmhs6HW0lusnQRHgMarA==} + uuid@10.0.0: resolution: {integrity: sha512-8XkAphELsDnEGrDxUOHB3RGvXz6TeuYSGEZBOjtTtPm2lwhGBjLgOzLHB63IUWfBpNucQjND6d3AOudO+H3RWQ==} hasBin: true @@ -812,14 +393,10 @@ packages: resolution: {integrity: sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==} hasBin: true - uuid@9.0.1: - resolution: {integrity: sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==} + uuid@13.0.2: + resolution: {integrity: sha512-vzi9uRZ926x4XV73S/4qQaTwPXM2JBj6/6lI/byHH1jOpCzb0zDbfytgA9LcN/hzb2l7WQSQnxITOVx5un/wGw==} hasBin: true - web-streams-polyfill@4.0.0-beta.3: - resolution: {integrity: sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==} - engines: {node: '>= 14'} - webidl-conversions@3.0.1: resolution: {integrity: sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==} @@ -836,12 +413,56 @@ packages: snapshots: - '@ag-ui/core@0.0.42': + '@ag-ui/client@0.0.53': dependencies: + '@ag-ui/core': 0.0.53 + '@ag-ui/encoder': 0.0.53 + '@ag-ui/proto': 0.0.53 + '@types/uuid': 10.0.0 + compare-versions: 6.1.1 + fast-json-patch: 3.1.1 rxjs: 7.8.1 + untruncate-json: 0.0.1 + uuid: 11.1.0 + zod: 3.25.76 + + '@ag-ui/core@0.0.53': + dependencies: zod: 3.25.76 - '@anthropic-ai/sdk@0.65.0(zod@3.25.76)': + '@ag-ui/encoder@0.0.53': + dependencies: + '@ag-ui/core': 0.0.53 + '@ag-ui/proto': 0.0.53 + + '@ag-ui/langgraph@0.0.31(@ag-ui/client@0.0.53)(@ag-ui/core@0.0.53)(openai@6.37.0(zod@3.25.76))(zod-to-json-schema@3.24.6(zod@3.25.76))': + dependencies: + '@ag-ui/client': 0.0.53 + '@ag-ui/core': 0.0.53 + '@langchain/core': 1.1.45(openai@6.37.0(zod@3.25.76)) + '@langchain/langgraph-sdk': 1.9.1(openai@6.37.0(zod@3.25.76)) + langchain: 1.4.0(@langchain/core@1.1.45(openai@6.37.0(zod@3.25.76)))(openai@6.37.0(zod@3.25.76))(zod-to-json-schema@3.24.6(zod@3.25.76)) + partial-json: 0.1.7 + rxjs: 7.8.1 + transitivePeerDependencies: + - '@opentelemetry/api' + - '@opentelemetry/exporter-trace-otlp-proto' + - '@opentelemetry/sdk-trace-base' + - openai + - react + - react-dom + - svelte + - vue + - ws + - zod-to-json-schema + + '@ag-ui/proto@0.0.53': + dependencies: + '@ag-ui/core': 0.0.53 + '@bufbuild/protobuf': 2.12.0 + '@protobuf-ts/protoc': 2.11.1 + + '@anthropic-ai/sdk@0.91.1(zod@3.25.76)': dependencies: json-schema-to-ts: 3.1.1 optionalDependencies: @@ -849,166 +470,146 @@ snapshots: '@babel/runtime@7.29.2': {} + '@bufbuild/protobuf@2.12.0': {} + '@cfworker/json-schema@4.1.1': {} - '@copilotkit/sdk-js@0.0.0-mme-ag-ui-0-0-46-20260227141603(@ag-ui/core@0.0.42)(@langchain/community@0.0.53(openai@6.15.0(zod@3.25.76)))(@langchain/core@1.1.7(openai@6.15.0(zod@3.25.76)))(@langchain/langgraph@1.0.7(@langchain/core@1.1.7(openai@6.15.0(zod@3.25.76)))(zod-to-json-schema@3.24.6(zod@3.25.76))(zod@3.25.76))(langchain@1.2.8(@langchain/core@1.1.7(openai@6.15.0(zod@3.25.76)))(openai@6.15.0(zod@3.25.76))(zod-to-json-schema@3.24.6(zod@3.25.76)))(typescript@5.8.3)(zod@3.25.76)': + '@copilotkit/license-verifier@0.2.0': {} + + '@copilotkit/sdk-js@1.57.0(@ag-ui/client@0.0.53)(@ag-ui/core@0.0.53)(@langchain/core@1.1.45(openai@6.37.0(zod@3.25.76)))(@langchain/langgraph@1.3.0(@langchain/core@1.1.45(openai@6.37.0(zod@3.25.76)))(openai@6.37.0(zod@3.25.76))(zod-to-json-schema@3.24.6(zod@3.25.76))(zod@3.25.76))(langchain@1.4.0(@langchain/core@1.1.45(openai@6.37.0(zod@3.25.76)))(openai@6.37.0(zod@3.25.76))(zod-to-json-schema@3.24.6(zod@3.25.76)))(openai@6.37.0(zod@3.25.76))(typescript@5.8.3)(zod-to-json-schema@3.24.6(zod@3.25.76))(zod@3.25.76)': dependencies: - '@copilotkit/shared': 0.0.0-mme-ag-ui-0-0-46-20260227141603(@ag-ui/core@0.0.42) - '@langchain/community': 0.0.53(openai@6.15.0(zod@3.25.76)) - '@langchain/core': 1.1.7(openai@6.15.0(zod@3.25.76)) - '@langchain/langgraph': 1.0.7(@langchain/core@1.1.7(openai@6.15.0(zod@3.25.76)))(zod-to-json-schema@3.24.6(zod@3.25.76))(zod@3.25.76) - langchain: 1.2.8(@langchain/core@1.1.7(openai@6.15.0(zod@3.25.76)))(openai@6.15.0(zod@3.25.76))(zod-to-json-schema@3.24.6(zod@3.25.76)) + '@ag-ui/langgraph': 0.0.31(@ag-ui/client@0.0.53)(@ag-ui/core@0.0.53)(openai@6.37.0(zod@3.25.76))(zod-to-json-schema@3.24.6(zod@3.25.76)) + '@copilotkit/shared': 1.57.0(@ag-ui/core@0.0.53) + '@langchain/core': 1.1.45(openai@6.37.0(zod@3.25.76)) + '@langchain/langgraph': 1.3.0(@langchain/core@1.1.45(openai@6.37.0(zod@3.25.76)))(openai@6.37.0(zod@3.25.76))(zod-to-json-schema@3.24.6(zod@3.25.76))(zod@3.25.76) + langchain: 1.4.0(@langchain/core@1.1.45(openai@6.37.0(zod@3.25.76)))(openai@6.37.0(zod@3.25.76))(zod-to-json-schema@3.24.6(zod@3.25.76)) typescript: 5.8.3 zod: 3.25.76 transitivePeerDependencies: + - '@ag-ui/client' - '@ag-ui/core' + - '@opentelemetry/api' + - '@opentelemetry/exporter-trace-otlp-proto' + - '@opentelemetry/sdk-trace-base' - encoding + - openai + - react + - react-dom + - svelte + - vue + - ws + - zod-to-json-schema - '@copilotkit/shared@0.0.0-mme-ag-ui-0-0-46-20260227141603(@ag-ui/core@0.0.42)': + '@copilotkit/shared@1.57.0(@ag-ui/core@0.0.53)': dependencies: - '@ag-ui/core': 0.0.42 + '@ag-ui/client': 0.0.53 + '@ag-ui/core': 0.0.53 + '@copilotkit/license-verifier': 0.2.0 '@segment/analytics-node': 2.3.0 + '@standard-schema/spec': 1.1.0 chalk: 4.1.2 graphql: 16.12.0 - uuid: 10.0.0 - zod: 3.25.76 - transitivePeerDependencies: - - encoding - - '@google/generative-ai@0.24.1': {} - - '@langchain/anthropic@0.3.34(@langchain/core@1.1.7(openai@6.15.0(zod@3.25.76)))(zod@3.25.76)': - dependencies: - '@anthropic-ai/sdk': 0.65.0(zod@3.25.76) - '@langchain/core': 1.1.7(openai@6.15.0(zod@3.25.76)) - fast-xml-parser: 4.5.6 - transitivePeerDependencies: - - zod - - '@langchain/community@0.0.53(openai@6.15.0(zod@3.25.76))': - dependencies: - '@langchain/core': 0.1.63(openai@6.15.0(zod@3.25.76)) - '@langchain/openai': 0.0.34 - expr-eval: 2.0.2 - flat: 5.0.2 - langsmith: 0.1.68(openai@6.15.0(zod@3.25.76)) - uuid: 9.0.1 + partial-json: 0.1.7 + uuid: 11.1.0 zod: 3.25.76 zod-to-json-schema: 3.24.6(zod@3.25.76) transitivePeerDependencies: - encoding - - openai - '@langchain/core@0.1.63(openai@4.104.0(zod@3.25.76))': - dependencies: - ansi-styles: 5.2.0 - camelcase: 6.3.0 - decamelize: 1.2.0 - js-tiktoken: 1.0.20 - langsmith: 0.1.68(openai@4.104.0(zod@3.25.76)) - ml-distance: 4.0.1 - mustache: 4.2.0 - p-queue: 6.6.2 - p-retry: 4.6.2 - uuid: 9.0.1 - zod: 3.25.76 - zod-to-json-schema: 3.24.6(zod@3.25.76) - transitivePeerDependencies: - - openai + '@google/generative-ai@0.24.1': {} - '@langchain/core@0.1.63(openai@6.15.0(zod@3.25.76))': + '@langchain/anthropic@1.3.29(@langchain/core@1.1.45(openai@6.37.0(zod@3.25.76)))': dependencies: - ansi-styles: 5.2.0 - camelcase: 6.3.0 - decamelize: 1.2.0 - js-tiktoken: 1.0.20 - langsmith: 0.1.68(openai@6.15.0(zod@3.25.76)) - ml-distance: 4.0.1 - mustache: 4.2.0 - p-queue: 6.6.2 - p-retry: 4.6.2 - uuid: 9.0.1 + '@anthropic-ai/sdk': 0.91.1(zod@3.25.76) + '@langchain/core': 1.1.45(openai@6.37.0(zod@3.25.76)) zod: 3.25.76 - zod-to-json-schema: 3.24.6(zod@3.25.76) - transitivePeerDependencies: - - openai - '@langchain/core@1.1.7(openai@6.15.0(zod@3.25.76))': + '@langchain/core@1.1.45(openai@6.37.0(zod@3.25.76))': dependencies: '@cfworker/json-schema': 4.1.1 + '@standard-schema/spec': 1.1.0 ansi-styles: 5.2.0 camelcase: 6.3.0 decamelize: 1.2.0 js-tiktoken: 1.0.20 - langsmith: 0.4.0(openai@6.15.0(zod@3.25.76)) + langsmith: 0.6.2(openai@6.37.0(zod@3.25.76)) mustache: 4.2.0 p-queue: 6.6.2 - uuid: 10.0.0 zod: 3.25.76 transitivePeerDependencies: - '@opentelemetry/api' - '@opentelemetry/exporter-trace-otlp-proto' - '@opentelemetry/sdk-trace-base' - openai + - ws - '@langchain/google-genai@0.2.18(@langchain/core@1.1.7(openai@6.15.0(zod@3.25.76)))': + '@langchain/google-genai@1.0.3(@langchain/core@1.1.45(openai@6.37.0(zod@3.25.76)))': dependencies: '@google/generative-ai': 0.24.1 - '@langchain/core': 1.1.7(openai@6.15.0(zod@3.25.76)) + '@langchain/core': 1.1.45(openai@6.37.0(zod@3.25.76)) uuid: 11.1.0 - '@langchain/langgraph-checkpoint@1.0.0(@langchain/core@1.1.7(openai@6.15.0(zod@3.25.76)))': + '@langchain/langgraph-checkpoint@1.0.2(@langchain/core@1.1.45(openai@6.37.0(zod@3.25.76)))': dependencies: - '@langchain/core': 1.1.7(openai@6.15.0(zod@3.25.76)) + '@langchain/core': 1.1.45(openai@6.37.0(zod@3.25.76)) uuid: 10.0.0 - '@langchain/langgraph-sdk@1.3.1(@langchain/core@1.1.7(openai@6.15.0(zod@3.25.76)))': + '@langchain/langgraph-sdk@1.9.1(openai@6.37.0(zod@3.25.76))': dependencies: - p-queue: 6.6.2 - p-retry: 4.6.2 - uuid: 9.0.1 - optionalDependencies: - '@langchain/core': 1.1.7(openai@6.15.0(zod@3.25.76)) + '@langchain/core': 1.1.45(openai@6.37.0(zod@3.25.76)) + '@langchain/protocol': 0.0.15 + '@types/json-schema': 7.0.15 + p-queue: 9.2.0 + p-retry: 7.1.1 + uuid: 13.0.2 + transitivePeerDependencies: + - '@opentelemetry/api' + - '@opentelemetry/exporter-trace-otlp-proto' + - '@opentelemetry/sdk-trace-base' + - openai + - ws - '@langchain/langgraph@1.0.7(@langchain/core@1.1.7(openai@6.15.0(zod@3.25.76)))(zod-to-json-schema@3.24.6(zod@3.25.76))(zod@3.25.76)': + '@langchain/langgraph@1.3.0(@langchain/core@1.1.45(openai@6.37.0(zod@3.25.76)))(openai@6.37.0(zod@3.25.76))(zod-to-json-schema@3.24.6(zod@3.25.76))(zod@3.25.76)': dependencies: - '@langchain/core': 1.1.7(openai@6.15.0(zod@3.25.76)) - '@langchain/langgraph-checkpoint': 1.0.0(@langchain/core@1.1.7(openai@6.15.0(zod@3.25.76))) - '@langchain/langgraph-sdk': 1.3.1(@langchain/core@1.1.7(openai@6.15.0(zod@3.25.76))) + '@langchain/core': 1.1.45(openai@6.37.0(zod@3.25.76)) + '@langchain/langgraph-checkpoint': 1.0.2(@langchain/core@1.1.45(openai@6.37.0(zod@3.25.76))) + '@langchain/langgraph-sdk': 1.9.1(openai@6.37.0(zod@3.25.76)) + '@langchain/protocol': 0.0.15 + '@standard-schema/spec': 1.1.0 uuid: 10.0.0 zod: 3.25.76 optionalDependencies: zod-to-json-schema: 3.24.6(zod@3.25.76) transitivePeerDependencies: + - '@opentelemetry/api' + - '@opentelemetry/exporter-trace-otlp-proto' + - '@opentelemetry/sdk-trace-base' + - openai - react - react-dom - - '@langchain/openai@0.0.34': - dependencies: - '@langchain/core': 0.1.63(openai@4.104.0(zod@3.25.76)) - js-tiktoken: 1.0.20 - openai: 4.104.0(zod@3.25.76) - zod: 3.25.76 - zod-to-json-schema: 3.24.6(zod@3.25.76) - transitivePeerDependencies: - - encoding + - svelte + - vue - ws - '@langchain/openai@1.2.0(@langchain/core@1.1.7(openai@6.15.0(zod@3.25.76)))': + '@langchain/openai@1.4.5(@langchain/core@1.1.45(openai@6.37.0(zod@3.25.76)))': dependencies: - '@langchain/core': 1.1.7(openai@6.15.0(zod@3.25.76)) + '@langchain/core': 1.1.45(openai@6.37.0(zod@3.25.76)) js-tiktoken: 1.0.20 - openai: 6.15.0(zod@3.25.76) + openai: 6.37.0(zod@3.25.76) zod: 3.25.76 transitivePeerDependencies: - ws + '@langchain/protocol@0.0.15': {} + '@lukeed/csprng@1.1.0': {} '@lukeed/uuid@2.0.1': dependencies: '@lukeed/csprng': 1.1.0 + '@protobuf-ts/protoc@2.11.1': {} + '@segment/analytics-core@1.8.2': dependencies: '@lukeed/uuid': 2.0.1 @@ -1032,53 +633,29 @@ snapshots: transitivePeerDependencies: - encoding - '@types/node-fetch@2.6.13': - dependencies: - '@types/node': 20.19.9 - form-data: 4.0.5 + '@standard-schema/spec@1.1.0': {} - '@types/node@18.19.130': - dependencies: - undici-types: 5.26.5 + '@types/json-schema@7.0.15': {} '@types/node@20.19.9': dependencies: undici-types: 6.21.0 - '@types/retry@0.12.0': {} - '@types/uuid@10.0.0': {} - abort-controller@3.0.0: - dependencies: - event-target-shim: 5.0.1 - - agentkeepalive@4.6.0: - dependencies: - humanize-ms: 1.2.1 - ansi-styles@4.3.0: dependencies: color-convert: 2.0.1 ansi-styles@5.2.0: {} - asynckit@0.4.0: {} - base64-js@1.5.1: {} - binary-search@1.3.6: {} - buffer@6.0.3: dependencies: base64-js: 1.5.1 ieee754: 1.2.1 - call-bind-apply-helpers@1.0.2: - dependencies: - es-errors: 1.3.0 - function-bind: 1.1.2 - camelcase@6.3.0: {} chalk@4.1.2: @@ -1092,115 +669,27 @@ snapshots: color-name@1.1.4: {} - combined-stream@1.0.8: - dependencies: - delayed-stream: 1.0.0 - - commander@10.0.1: {} - - console-table-printer@2.14.6: - dependencies: - simple-wcswidth: 1.1.2 + compare-versions@6.1.1: {} decamelize@1.2.0: {} - delayed-stream@1.0.0: {} - dotenv@16.6.1: {} dset@3.1.4: {} - dunder-proto@1.0.1: - dependencies: - call-bind-apply-helpers: 1.0.2 - es-errors: 1.3.0 - gopd: 1.2.0 - - es-define-property@1.0.1: {} - - es-errors@1.3.0: {} - - es-object-atoms@1.1.1: - dependencies: - es-errors: 1.3.0 - - es-set-tostringtag@2.1.0: - dependencies: - es-errors: 1.3.0 - get-intrinsic: 1.3.0 - has-tostringtag: 1.0.2 - hasown: 2.0.2 - - event-target-shim@5.0.1: {} - eventemitter3@4.0.7: {} - expr-eval@2.0.2: {} - - fast-xml-parser@4.5.6: - dependencies: - strnum: 1.1.2 - - flat@5.0.2: {} - - form-data-encoder@1.7.2: {} - - form-data@4.0.5: - dependencies: - asynckit: 0.4.0 - combined-stream: 1.0.8 - es-set-tostringtag: 2.1.0 - hasown: 2.0.2 - mime-types: 2.1.35 - - formdata-node@4.4.1: - dependencies: - node-domexception: 1.0.0 - web-streams-polyfill: 4.0.0-beta.3 + eventemitter3@5.0.4: {} - function-bind@1.1.2: {} - - get-intrinsic@1.3.0: - dependencies: - call-bind-apply-helpers: 1.0.2 - es-define-property: 1.0.1 - es-errors: 1.3.0 - es-object-atoms: 1.1.1 - function-bind: 1.1.2 - get-proto: 1.0.1 - gopd: 1.2.0 - has-symbols: 1.1.0 - hasown: 2.0.2 - math-intrinsics: 1.1.0 - - get-proto@1.0.1: - dependencies: - dunder-proto: 1.0.1 - es-object-atoms: 1.1.1 - - gopd@1.2.0: {} + fast-json-patch@3.1.1: {} graphql@16.12.0: {} has-flag@4.0.0: {} - has-symbols@1.1.0: {} - - has-tostringtag@1.0.2: - dependencies: - has-symbols: 1.1.0 - - hasown@2.0.2: - dependencies: - function-bind: 1.1.2 - - humanize-ms@1.2.1: - dependencies: - ms: 2.1.3 - ieee754@1.2.1: {} - is-any-array@2.0.1: {} + is-network-error@1.3.1: {} jose@5.10.0: {} @@ -1213,13 +702,12 @@ snapshots: '@babel/runtime': 7.29.2 ts-algebra: 2.0.0 - langchain@1.2.8(@langchain/core@1.1.7(openai@6.15.0(zod@3.25.76)))(openai@6.15.0(zod@3.25.76))(zod-to-json-schema@3.24.6(zod@3.25.76)): + langchain@1.4.0(@langchain/core@1.1.45(openai@6.37.0(zod@3.25.76)))(openai@6.37.0(zod@3.25.76))(zod-to-json-schema@3.24.6(zod@3.25.76)): dependencies: - '@langchain/core': 1.1.7(openai@6.15.0(zod@3.25.76)) - '@langchain/langgraph': 1.0.7(@langchain/core@1.1.7(openai@6.15.0(zod@3.25.76)))(zod-to-json-schema@3.24.6(zod@3.25.76))(zod@3.25.76) - '@langchain/langgraph-checkpoint': 1.0.0(@langchain/core@1.1.7(openai@6.15.0(zod@3.25.76))) - langsmith: 0.4.0(openai@6.15.0(zod@3.25.76)) - uuid: 10.0.0 + '@langchain/core': 1.1.45(openai@6.37.0(zod@3.25.76)) + '@langchain/langgraph': 1.3.0(@langchain/core@1.1.45(openai@6.37.0(zod@3.25.76)))(openai@6.37.0(zod@3.25.76))(zod-to-json-schema@3.24.6(zod@3.25.76))(zod@3.25.76) + '@langchain/langgraph-checkpoint': 1.0.2(@langchain/core@1.1.45(openai@6.37.0(zod@3.25.76))) + langsmith: 0.6.2(openai@6.37.0(zod@3.25.76)) zod: 3.25.76 transitivePeerDependencies: - '@opentelemetry/api' @@ -1228,97 +716,24 @@ snapshots: - openai - react - react-dom + - svelte + - vue + - ws - zod-to-json-schema - langsmith@0.1.68(openai@4.104.0(zod@3.25.76)): + langsmith@0.6.2(openai@6.37.0(zod@3.25.76)): dependencies: - '@types/uuid': 10.0.0 - commander: 10.0.1 p-queue: 6.6.2 - p-retry: 4.6.2 - semver: 7.7.2 - uuid: 10.0.0 - optionalDependencies: - openai: 4.104.0(zod@3.25.76) - - langsmith@0.1.68(openai@6.15.0(zod@3.25.76)): - dependencies: - '@types/uuid': 10.0.0 - commander: 10.0.1 - p-queue: 6.6.2 - p-retry: 4.6.2 - semver: 7.7.2 - uuid: 10.0.0 optionalDependencies: - openai: 6.15.0(zod@3.25.76) - - langsmith@0.4.0(openai@6.15.0(zod@3.25.76)): - dependencies: - '@types/uuid': 10.0.0 - chalk: 4.1.2 - console-table-printer: 2.14.6 - p-queue: 6.6.2 - semver: 7.7.2 - uuid: 10.0.0 - optionalDependencies: - openai: 6.15.0(zod@3.25.76) - - math-intrinsics@1.1.0: {} - - mime-db@1.52.0: {} - - mime-types@2.1.35: - dependencies: - mime-db: 1.52.0 - - ml-array-mean@1.1.6: - dependencies: - ml-array-sum: 1.1.6 - - ml-array-sum@1.1.6: - dependencies: - is-any-array: 2.0.1 - - ml-distance-euclidean@2.0.0: {} - - ml-distance@4.0.1: - dependencies: - ml-array-mean: 1.1.6 - ml-distance-euclidean: 2.0.0 - ml-tree-similarity: 1.0.0 - - ml-tree-similarity@1.0.0: - dependencies: - binary-search: 1.3.6 - num-sort: 2.1.0 - - ms@2.1.3: {} + openai: 6.37.0(zod@3.25.76) mustache@4.2.0: {} - node-domexception@1.0.0: {} - node-fetch@2.7.0: dependencies: whatwg-url: 5.0.0 - num-sort@2.1.0: {} - - openai@4.104.0(zod@3.25.76): - dependencies: - '@types/node': 18.19.130 - '@types/node-fetch': 2.6.13 - abort-controller: 3.0.0 - agentkeepalive: 4.6.0 - form-data-encoder: 1.7.2 - formdata-node: 4.4.1 - node-fetch: 2.7.0 - optionalDependencies: - zod: 3.25.76 - transitivePeerDependencies: - - encoding - - openai@6.15.0(zod@3.25.76): + openai@6.37.0(zod@3.25.76): optionalDependencies: zod: 3.25.76 @@ -1329,27 +744,27 @@ snapshots: eventemitter3: 4.0.7 p-timeout: 3.2.0 - p-retry@4.6.2: + p-queue@9.2.0: + dependencies: + eventemitter3: 5.0.4 + p-timeout: 7.0.1 + + p-retry@7.1.1: dependencies: - '@types/retry': 0.12.0 - retry: 0.13.1 + is-network-error: 1.3.1 p-timeout@3.2.0: dependencies: p-finally: 1.0.0 - retry@0.13.1: {} + p-timeout@7.0.1: {} + + partial-json@0.1.7: {} rxjs@7.8.1: dependencies: tslib: 2.8.1 - semver@7.7.2: {} - - simple-wcswidth@1.1.2: {} - - strnum@1.1.2: {} - supports-color@7.2.0: dependencies: has-flag: 4.0.0 @@ -1362,17 +777,15 @@ snapshots: typescript@5.8.3: {} - undici-types@5.26.5: {} - undici-types@6.21.0: {} + untruncate-json@0.0.1: {} + uuid@10.0.0: {} uuid@11.1.0: {} - uuid@9.0.1: {} - - web-streams-polyfill@4.0.0-beta.3: {} + uuid@13.0.2: {} webidl-conversions@3.0.1: {} diff --git a/integrations/langgraph/typescript/examples/src/agents/agentic_chat/agent.ts b/integrations/langgraph/typescript/examples/src/agents/agentic_chat/agent.ts index 9cea9af813..c166da6f01 100644 --- a/integrations/langgraph/typescript/examples/src/agents/agentic_chat/agent.ts +++ b/integrations/langgraph/typescript/examples/src/agents/agentic_chat/agent.ts @@ -9,6 +9,7 @@ import { createAgent } from "langchain"; import { MemorySaver } from "@langchain/langgraph"; import { copilotkitMiddleware } from "@copilotkit/sdk-js/langgraph"; +import { aguiTransformer } from "@ag-ui/langgraph/transformer"; const checkpointer = new MemorySaver(); @@ -17,5 +18,6 @@ export const agenticChatGraph = createAgent({ tools: [], // Backend tools go here middleware: [copilotkitMiddleware], systemPrompt: "You are a helpful assistant.", - checkpointer + checkpointer, + streamTransformers: [aguiTransformer], }); diff --git a/integrations/langgraph/typescript/examples/src/agents/agentic_chat_multimodal/agent.ts b/integrations/langgraph/typescript/examples/src/agents/agentic_chat_multimodal/agent.ts index e0e9c2e89a..1e6873d613 100644 --- a/integrations/langgraph/typescript/examples/src/agents/agentic_chat_multimodal/agent.ts +++ b/integrations/langgraph/typescript/examples/src/agents/agentic_chat_multimodal/agent.ts @@ -51,6 +51,7 @@ import { ChatOpenAI } from "@langchain/openai"; import { SystemMessage } from "@langchain/core/messages"; import { RunnableConfig } from "@langchain/core/runnables"; import { Annotation, MessagesAnnotation, StateGraph, Command, START, END } from "@langchain/langgraph"; +import { aguiTransformer } from "@ag-ui/langgraph/transformer"; const AgentStateAnnotation = Annotation.Root({ tools: Annotation({ @@ -115,4 +116,6 @@ const workflow = new StateGraph(AgentStateAnnotation) .addEdge("visionChatNode", END); // Compile the graph -export const agenticChatMultimodalGraph = workflow.compile(); +export const agenticChatMultimodalGraph = workflow.compile({ + transformers: [aguiTransformer], +}); diff --git a/integrations/langgraph/typescript/examples/src/agents/agentic_chat_reasoning/agent.ts b/integrations/langgraph/typescript/examples/src/agents/agentic_chat_reasoning/agent.ts index 01dcdccde6..e3010310f8 100644 --- a/integrations/langgraph/typescript/examples/src/agents/agentic_chat_reasoning/agent.ts +++ b/integrations/langgraph/typescript/examples/src/agents/agentic_chat_reasoning/agent.ts @@ -15,6 +15,7 @@ import { ChatGoogleGenerativeAI } from "@langchain/google-genai"; import { SystemMessage } from "@langchain/core/messages"; import { RunnableConfig } from "@langchain/core/runnables"; import { Annotation, MessagesAnnotation, StateGraph, Command, START, END } from "@langchain/langgraph"; +import { aguiTransformer } from "@ag-ui/langgraph/transformer"; const AgentStateAnnotation = Annotation.Root({ tools: Annotation({ @@ -99,4 +100,6 @@ const workflow = new StateGraph(AgentStateAnnotation) .addEdge("chatNode", END); // Compile the graph -export const agenticChatReasoningGraph = workflow.compile(); +export const agenticChatReasoningGraph = workflow.compile({ + transformers: [aguiTransformer], +}); diff --git a/integrations/langgraph/typescript/examples/src/agents/agentic_generative_ui/agent.ts b/integrations/langgraph/typescript/examples/src/agents/agentic_generative_ui/agent.ts index 72da723859..a37940f956 100644 --- a/integrations/langgraph/typescript/examples/src/agents/agentic_generative_ui/agent.ts +++ b/integrations/langgraph/typescript/examples/src/agents/agentic_generative_ui/agent.ts @@ -7,6 +7,7 @@ import { SystemMessage } from "@langchain/core/messages"; import { RunnableConfig } from "@langchain/core/runnables"; import { dispatchCustomEvent } from "@langchain/core/callbacks/dispatch"; import { Annotation, Command, MessagesAnnotation, StateGraph, END } from "@langchain/langgraph"; +import { aguiTransformer } from "@ag-ui/langgraph/transformer"; // This tool simulates performing a task on the server. // The tool call will be streamed to the frontend as it is being generated. @@ -174,4 +175,6 @@ const workflow = new StateGraph(AgentStateAnnotation) .addEdge("chat_node", "__end__"); // Compile the graph -export const agenticGenerativeUiGraph = workflow.compile(); \ No newline at end of file +export const agenticGenerativeUiGraph = workflow.compile({ + transformers: [aguiTransformer], +}); \ No newline at end of file diff --git a/integrations/langgraph/typescript/examples/src/agents/backend_tool_rendering/agent.ts b/integrations/langgraph/typescript/examples/src/agents/backend_tool_rendering/agent.ts index cdb972499b..af73b68060 100644 --- a/integrations/langgraph/typescript/examples/src/agents/backend_tool_rendering/agent.ts +++ b/integrations/langgraph/typescript/examples/src/agents/backend_tool_rendering/agent.ts @@ -6,6 +6,7 @@ import { ChatOpenAI } from "@langchain/openai"; import { SystemMessage } from "@langchain/core/messages"; import { RunnableConfig } from "@langchain/core/runnables"; import { Annotation, MessagesAnnotation, StateGraph, Command, START, END } from "@langchain/langgraph"; +import { aguiTransformer } from "@ag-ui/langgraph/transformer"; const AgentStateAnnotation = Annotation.Root({ tools: Annotation({ @@ -77,4 +78,6 @@ const workflow = new StateGraph(AgentStateAnnotation) .addEdge(START, "chat_node"); // Compile the graph -export const agenticChatGraph = workflow.compile(); \ No newline at end of file +export const agenticChatGraph = workflow.compile({ + transformers: [aguiTransformer], +}); \ No newline at end of file diff --git a/integrations/langgraph/typescript/examples/src/agents/human_in_the_loop/agent.ts b/integrations/langgraph/typescript/examples/src/agents/human_in_the_loop/agent.ts index faafa00704..70c7c52ffc 100644 --- a/integrations/langgraph/typescript/examples/src/agents/human_in_the_loop/agent.ts +++ b/integrations/langgraph/typescript/examples/src/agents/human_in_the_loop/agent.ts @@ -6,6 +6,7 @@ import { ChatOpenAI } from "@langchain/openai"; import { SystemMessage } from "@langchain/core/messages"; import { RunnableConfig } from "@langchain/core/runnables"; import { Command, interrupt, Annotation, MessagesAnnotation, StateGraph, END, START } from "@langchain/langgraph"; +import { aguiTransformer } from "@ag-ui/langgraph/transformer"; const DEFINE_TASK_TOOL = { type: "function", @@ -272,4 +273,6 @@ workflow.addConditionalEdges( ); // Compile the graph -export const humanInTheLoopGraph = workflow.compile(); \ No newline at end of file +export const humanInTheLoopGraph = workflow.compile({ + transformers: [aguiTransformer], +}); \ No newline at end of file diff --git a/integrations/langgraph/typescript/examples/src/agents/multimodal_messages/agent.ts b/integrations/langgraph/typescript/examples/src/agents/multimodal_messages/agent.ts index f6c0ac7dad..ee5d78d6a4 100644 --- a/integrations/langgraph/typescript/examples/src/agents/multimodal_messages/agent.ts +++ b/integrations/langgraph/typescript/examples/src/agents/multimodal_messages/agent.ts @@ -51,6 +51,7 @@ import { ChatOpenAI } from "@langchain/openai"; import { SystemMessage } from "@langchain/core/messages"; import { RunnableConfig } from "@langchain/core/runnables"; import { Annotation, MessagesAnnotation, StateGraph, Command, START, END } from "@langchain/langgraph"; +import { aguiTransformer } from "@ag-ui/langgraph/transformer"; const AgentStateAnnotation = Annotation.Root({ tools: Annotation({ @@ -117,4 +118,6 @@ const workflow = new StateGraph(AgentStateAnnotation) .addEdge("visionChatNode", END); // Compile the graph -export const graph = workflow.compile(); +export const graph = workflow.compile({ + transformers: [aguiTransformer], +}); diff --git a/integrations/langgraph/typescript/examples/src/agents/predictive_state_updates/agent.ts b/integrations/langgraph/typescript/examples/src/agents/predictive_state_updates/agent.ts index 5ffece9636..6c3ec388f4 100644 --- a/integrations/langgraph/typescript/examples/src/agents/predictive_state_updates/agent.ts +++ b/integrations/langgraph/typescript/examples/src/agents/predictive_state_updates/agent.ts @@ -7,6 +7,7 @@ import { ChatOpenAI } from "@langchain/openai"; import { SystemMessage } from "@langchain/core/messages"; import { RunnableConfig } from "@langchain/core/runnables"; import { Command, Annotation, MessagesAnnotation, StateGraph, END, START } from "@langchain/langgraph"; +import { aguiTransformer } from "@ag-ui/langgraph/transformer"; const WRITE_DOCUMENT_TOOL = { type: "function", @@ -153,4 +154,6 @@ workflow.addEdge(START, "chat_node"); workflow.addEdge("chat_node", END); // Compile the graph -export const predictiveStateUpdatesGraph = workflow.compile(); \ No newline at end of file +export const predictiveStateUpdatesGraph = workflow.compile({ + transformers: [aguiTransformer], +}); \ No newline at end of file diff --git a/integrations/langgraph/typescript/examples/src/agents/shared_state/agent.ts b/integrations/langgraph/typescript/examples/src/agents/shared_state/agent.ts index 8626a907bb..b65d320d50 100644 --- a/integrations/langgraph/typescript/examples/src/agents/shared_state/agent.ts +++ b/integrations/langgraph/typescript/examples/src/agents/shared_state/agent.ts @@ -7,6 +7,7 @@ import { SystemMessage } from "@langchain/core/messages"; import { RunnableConfig } from "@langchain/core/runnables"; import { dispatchCustomEvent } from "@langchain/core/callbacks/dispatch"; import { Command, Annotation, MessagesAnnotation, StateGraph, END, START } from "@langchain/langgraph"; +import { aguiTransformer } from "@ag-ui/langgraph/transformer"; enum SkillLevel { BEGINNER = "Beginner", @@ -278,4 +279,6 @@ workflow.addEdge("start_flow", "chat_node"); workflow.addEdge("chat_node", END); // Compile the graph -export const sharedStateGraph = workflow.compile(); \ No newline at end of file +export const sharedStateGraph = workflow.compile({ + transformers: [aguiTransformer], +}); \ No newline at end of file diff --git a/integrations/langgraph/typescript/examples/src/agents/subgraphs/agent.ts b/integrations/langgraph/typescript/examples/src/agents/subgraphs/agent.ts index ddbc1a3974..cbc133b7bb 100644 --- a/integrations/langgraph/typescript/examples/src/agents/subgraphs/agent.ts +++ b/integrations/langgraph/typescript/examples/src/agents/subgraphs/agent.ts @@ -6,15 +6,16 @@ import { ChatOpenAI } from "@langchain/openai"; import { SystemMessage, AIMessage, ToolMessage } from "@langchain/core/messages"; import { RunnableConfig } from "@langchain/core/runnables"; -import { - Annotation, - MessagesAnnotation, - StateGraph, - Command, - START, - END, - interrupt +import { + Annotation, + MessagesAnnotation, + StateGraph, + Command, + START, + END, + interrupt } from "@langchain/langgraph"; +import { aguiTransformer } from "@ag-ui/langgraph/transformer"; // Travel data interfaces interface Flight { @@ -384,4 +385,6 @@ workflow.addEdge("hotels_agent", "supervisor"); workflow.addEdge("experiences_agent", "supervisor"); // Compile the graph -export const subGraphsAgentGraph = workflow.compile(); +export const subGraphsAgentGraph = workflow.compile({ + transformers: [aguiTransformer], +}); diff --git a/integrations/langgraph/typescript/examples/src/agents/tool_based_generative_ui/agent.ts b/integrations/langgraph/typescript/examples/src/agents/tool_based_generative_ui/agent.ts index cfe8a66d4b..57700825c7 100644 --- a/integrations/langgraph/typescript/examples/src/agents/tool_based_generative_ui/agent.ts +++ b/integrations/langgraph/typescript/examples/src/agents/tool_based_generative_ui/agent.ts @@ -6,6 +6,7 @@ import { ChatOpenAI } from "@langchain/openai"; import { SystemMessage } from "@langchain/core/messages"; import { RunnableConfig } from "@langchain/core/runnables"; import { Command, Annotation, MessagesAnnotation, StateGraph, END, START } from "@langchain/langgraph"; +import { aguiTransformer } from "@ag-ui/langgraph/transformer"; export const AgentStateAnnotation = Annotation.Root({ @@ -46,4 +47,6 @@ workflow.addNode("chat_node", chatNode); workflow.addEdge(START, "chat_node"); -export const toolBasedGenerativeUiGraph = workflow.compile(); \ No newline at end of file +export const toolBasedGenerativeUiGraph = workflow.compile({ + transformers: [aguiTransformer], +}); \ No newline at end of file diff --git a/integrations/langgraph/typescript/package.json b/integrations/langgraph/typescript/package.json index f5fc0007ad..f222b1cfa7 100644 --- a/integrations/langgraph/typescript/package.json +++ b/integrations/langgraph/typescript/package.json @@ -26,15 +26,16 @@ "unlink:global": "pnpm unlink --global" }, "dependencies": { - "@langchain/core": "^1.1.40", - "@langchain/langgraph-sdk": "^1.8.8", + "@langchain/core": "^1.1.45", + "@langchain/langgraph-sdk": "^1.9.2", "langchain": ">=1.2.0", "partial-json": "^0.1.7", "rxjs": "7.8.1" }, "peerDependencies": { "@ag-ui/core": ">=0.0.42", - "@ag-ui/client": ">=0.0.42" + "@ag-ui/client": ">=0.0.42", + "@langchain/langgraph": "^1.3.0" }, "devDependencies": { "@ag-ui/core": "workspace:*", @@ -49,12 +50,16 @@ }, "exports": { ".": { - "import": "./dist/index.mjs", - "require": "./dist/index.js" + "require": "./dist/index.js", + "import": "./dist/index.mjs" }, "./middlewares": { - "import": "./dist/middlewares/index.mjs", - "require": "./dist/middlewares/index.js" + "require": "./dist/middlewares/index.js", + "import": "./dist/middlewares/index.mjs" + }, + "./transformer": { + "require": "./dist/transformer/index.js", + "import": "./dist/transformer/index.mjs" }, "./package.json": "./package.json" } diff --git a/integrations/langgraph/typescript/src/agent.ts b/integrations/langgraph/typescript/src/agent.ts index 568334538c..618d216d12 100644 --- a/integrations/langgraph/typescript/src/agent.ts +++ b/integrations/langgraph/typescript/src/agent.ts @@ -10,6 +10,8 @@ import { Config, Interrupt, Thread, + ThreadStream, + SubscriptionHandle, } from "@langchain/langgraph-sdk"; import { randomUUID } from "@ag-ui/client"; import { @@ -25,36 +27,14 @@ import { PredictStateTool, LangGraphReasoning, StateEnrichment, - LangGraphToolWithName, + LangGraphToolWithName, ProcessedEvents, } from "./types"; import { AbstractAgent, AgentConfig, - CustomEvent, EventType, - MessagesSnapshotEvent, - RawEvent, RunAgentInput, RunErrorEvent, - RunFinishedEvent, - RunStartedEvent, - StateDeltaEvent, - StateSnapshotEvent, - StepFinishedEvent, - StepStartedEvent, - TextMessageContentEvent, - TextMessageEndEvent, - TextMessageStartEvent, - ToolCallArgsEvent, - ToolCallEndEvent, - ToolCallStartEvent, - ToolCallResultEvent, - ReasoningStartEvent, - ReasoningMessageStartEvent, - ReasoningMessageContentEvent, - ReasoningMessageEndEvent, - ReasoningEndEvent, - ReasoningEncryptedValueEvent, } from "@ag-ui/client"; import { RunsStreamPayload } from "@langchain/langgraph-sdk/dist/types"; import { @@ -67,33 +47,9 @@ import { resolveReasoningContent, resolveEncryptedReasoningContent, } from "@/utils"; -import { ToolMessage } from "@langchain/core/messages"; -import { ToolMessageFieldsWithToolCallId } from "@langchain/core/dist/messages/tool"; - -export type ProcessedEvents = - | TextMessageStartEvent - | TextMessageContentEvent - | TextMessageEndEvent - | ReasoningStartEvent - | ReasoningMessageStartEvent - | ReasoningMessageContentEvent - | ReasoningMessageEndEvent - | ReasoningEndEvent - | ReasoningEncryptedValueEvent - | ToolCallStartEvent - | ToolCallArgsEvent - | ToolCallEndEvent - | ToolCallResultEvent - | StateSnapshotEvent - | StateDeltaEvent - | MessagesSnapshotEvent - | RawEvent - | CustomEvent - | RunStartedEvent - | RunFinishedEvent - | RunErrorEvent - | StepStartedEvent - | StepFinishedEvent; +// `ToolMessageFields` already carries `tool_call_id` — the older +// `…WithToolCallId` alias was removed from `@langchain/core`. +import type { ToolMessageFields } from "@langchain/core/dist/messages/tool"; type RunAgentExtendedInput< TStreamMode extends StreamMode | StreamMode[] = StreamMode, @@ -109,6 +65,81 @@ interface RegenerateInput extends RunAgentExtendedInput { messageCheckpoint: LangGraphMessage; } +/** + * Cached per-thread (ThreadStream, custom:agui subscription) pair. + * + * The agui subscription is opened once and reused across every run on a + * given thread, so server-side `record.queuedEvents` replay never lands + * on a fresh sink. `submitRun`'s `#prepareForNextRun` auto-resumes + * the sub between runs. + */ +export interface TransformerThreadEntry { + thread: ThreadStream; + aguiSub: SubscriptionHandle; +} + +/** + * AI message shape we care about in the sanitizer (the actual SDK type + * is a discriminated union over message roles). + */ +type AssistantContentBlock = { type?: string; [key: string]: unknown }; +type AssistantResponseMetadata = { output_version?: string; [key: string]: unknown }; +type AssistantMessageLike = LangGraphMessage & { + content?: string | AssistantContentBlock[]; + response_metadata?: AssistantResponseMetadata; +}; + +/** + * Defensive cleanup for assistant messages being re-sent to the model. + * + * Two upstream issues we sidestep: + * + * 1. CopilotKit reconstructs assistant messages from TOOL_CALL_* events + * and stuffs `tool_call` blocks back into the message `content` + * array. langchain 1.4 + OpenAI reject that shape on the wire; the + * same data already lives on `tool_calls`, so the blocks are pure + * noise. Strip them. If only tool_call blocks remain, collapse + * `content` to "" since an empty content array trips other validators. + * + * 2. langchain-core's AIMessage v1 path (`response_metadata.output_version + * === "v1"`) routes `content` through a `contentBlocks` array that + * langchain-openai's Responses serializer mistypes — prior assistant + * text blocks come back as `input_text` and OpenAI returns a 400. + * Drop the v1 flag from re-sent messages so the legacy content-array + * path is used, which the Responses API accepts. + * + * Pure function — no side effects, no I/O. + */ +export function sanitizeAssistantMessages( + payloadInput: { messages?: LangGraphMessage[]; [key: string]: unknown } | null | undefined, +): { messages?: LangGraphMessage[]; [key: string]: unknown } | null | undefined { + if (!payloadInput || !payloadInput.messages) return payloadInput; + return { + ...payloadInput, + messages: payloadInput.messages.map((raw) => { + if (raw?.type !== "ai") return raw; + let next = raw as AssistantMessageLike; + if (Array.isArray(next.content)) { + const remaining = next.content.filter( + (block) => block?.type !== "tool_call", + ); + if (remaining.length !== next.content.length) { + next = { + ...next, + content: remaining.length === 0 ? "" : remaining, + }; + } + } + const rm = next.response_metadata; + if (rm && typeof rm === "object" && "output_version" in rm) { + const { output_version: _ov, ...rest } = rm; + next = { ...next, response_metadata: rest }; + } + return next as LangGraphMessage; + }), + }; +} + export interface LangGraphAgentConfig extends AgentConfig { client?: LangGraphClient; deploymentUrl: string; @@ -117,6 +148,14 @@ export interface LangGraphAgentConfig extends AgentConfig { assistantConfig?: LangGraphConfig; agentName?: string; graphId: string; + /** + * Opt into the v3-protocol transformer path. When true, the agent calls + * `client.threads.stream(...).run.start(...)` and forwards events from + * `thread.extensions.agui` instead of running the legacy translation. + * The graph must register the matching `aguiTransformer` at compile-time. + * Defaults to false (legacy translation). + */ + useTransformer?: boolean; } const ROOT_SUBGRAPH_NAME = "root"; @@ -147,6 +186,19 @@ export class LangGraphAgent extends AbstractAgent { subscriber: Subscriber; constantSchemaKeys: string[] = DEFAULT_SCHEMA_KEYS; config: LangGraphAgentConfig; + useTransformer: boolean; + // Per-thread cache of (ThreadStream + custom:agui SubscriptionHandle). + // Shared across `clone()`s so each request reuses the same connection + // and the same persistent subscription. Reusing matters because: + // - Server replays its per-thread `record.queuedEvents` to ANY new + // SSE sink, with no `since` exposed by the SDK. A fresh sub on a + // new ThreadStream therefore receives prior runs' events as replays. + // - The persistent sub here was attached BEFORE the first run, so it + // has nothing to replay; subsequent runs reuse it without + // triggering a stream rotation, so no replay occurs. + // Pause/resume bracket each run: SDK's `submitRun` calls + // `#prepareForNextRun` which auto-resumes the sub. + transformerThreads: Map = new Map(); constructor(config: LangGraphAgentConfig) { super(config); @@ -156,6 +208,7 @@ export class LangGraphAgent extends AbstractAgent { this.graphId = config.graphId; this.assistantConfig = config.assistantConfig; this.reasoningProcess = null; + this.useTransformer = config.useTransformer ?? true; this.client = config?.client ?? new LangGraphClient({ @@ -184,6 +237,9 @@ export class LangGraphAgent extends AbstractAgent { cancelSent: this.cancelSent, subgraphs: this.subgraphs ? new Set(this.subgraphs) : new Set(), currentSubgraph: ROOT_SUBGRAPH_NAME, + useTransformer: this.useTransformer, + // Share by reference — the cache lives across clones. + transformerThreads: this.transformerThreads, }); } @@ -204,6 +260,99 @@ export class LangGraphAgent extends AbstractAgent { }); } + /** + * Get-or-create the cached `(ThreadStream, custom:agui subscription)` + * pair for a thread. Shared across `clone()` instances by reference + * (see `clone()`), so every request to a given threadId reuses the + * same SSE wire and never receives a server-side replay of prior + * runs' events. + */ + protected async acquireTransformerThread( + threadId: string, + ): Promise { + const cached = this.transformerThreads.get(threadId); + if (cached) return cached; + if (!this.assistant) { + this.assistant = await this.getAssistant(); + } + const thread = this.client.threads.stream(threadId, { + assistantId: this.assistant.assistant_id, + }); + // Array form so the SDK applies its `unwrapNamedCustom` transform — + // for-await yields raw payloads, matching extensions.agui's shape. + const aguiSub = (await thread.subscribe([ + "custom:agui", + ])) as SubscriptionHandle; + const entry: TransformerThreadEntry = { thread, aguiSub }; + this.transformerThreads.set(threadId, entry); + return entry; + } + + /** + * Resolve the interrupt to resume against, given a possibly-empty + * `streamingThread.interrupts` (populated live by the SDK's + * lifecycle watcher) and the server-side `agentState.tasks` array + * (a cold-start fallback in case our ThreadStream cache was rebuilt + * but the server still has the interrupt parked). + * + * Returns `undefined` when there's no resume to perform. + */ + protected findPendingInterrupt( + streamingThread: ThreadStream, + agentState: ThreadState, + resumeRequested: boolean, + ): { interruptId: string; namespace: readonly string[] } | undefined { + if (!resumeRequested) return undefined; + const live = (streamingThread as { interrupts?: Array<{ interruptId: string; namespace: readonly string[] }> }) + .interrupts; + const last = live?.[live.length - 1]; + if (last?.interruptId) { + return { interruptId: last.interruptId, namespace: last.namespace }; + } + const fallback = (agentState.tasks ?? []) + .flatMap((t: any) => + (t.interrupts ?? []).map((i: any) => ({ task: t, interrupt: i })), + ) + .pop(); + if (fallback?.interrupt?.id) { + return { + interruptId: fallback.interrupt.id, + namespace: fallback.task?.checkpoint?.checkpoint_ns?.split("|") ?? [], + }; + } + return undefined; + } + + /** + * Register a one-shot listener that pauses the agui subscription when + * the run's root lifecycle terminates. `submitRun`'s + * `#prepareForNextRun` auto-resumes between runs, so pause is + * cheaper than close — the persistent sub stays alive across the + * lifetime of the cached ThreadStream. + */ + protected watchForRootTerminal( + streamingThread: ThreadStream, + aguiSub: SubscriptionHandle, + ): () => void { + const TERMINAL = new Set(["completed", "failed", "interrupted"]); + const unsubscribe = streamingThread.onEvent((event) => { + const ev = event as { + method?: string; + params?: { namespace?: unknown; data?: { event?: string } }; + }; + if ( + ev.method === "lifecycle" && + Array.isArray(ev.params?.namespace) && + ev.params!.namespace.length === 0 && + TERMINAL.has(ev.params!.data?.event ?? "") + ) { + aguiSub.pause(); + unsubscribe(); + } + }); + return unsubscribe; + } + async runAgentStream(input: RunAgentExtendedInput, subscriber: Subscriber) { this.activeRun = { id: input.runId, @@ -228,7 +377,11 @@ export class LangGraphAgent extends AbstractAgent { return subscriber.error("No stream to regenerate"); } - await this.handleStreamEvents(preparedStream, threadId, subscriber, input, Array.isArray(streamMode) ? streamMode : [streamMode]); + if (this.useTransformer) { + await this.handleTransformerStreamEvents(preparedStream, threadId, subscriber, input); + } else { + await this.handleStreamEvents(preparedStream, threadId, subscriber, input, Array.isArray(streamMode) ? streamMode : [streamMode]); + } } async prepareRegenerateStream(input: RegenerateInput, streamMode: StreamMode | StreamMode[]) { @@ -264,18 +417,50 @@ export class LangGraphAgent extends AbstractAgent { }); } + const forkedCheckpointId = (fork as { checkpoint: { checkpoint_id: string } }) + .checkpoint.checkpoint_id; + const regenInput = this.langGraphDefaultMergeState( + timeTravelCheckpoint.values, + [messageCheckpoint], + input, + ); const payload = { ...(input.forwardedProps ?? {}), - input: this.langGraphDefaultMergeState( - timeTravelCheckpoint.values, - [messageCheckpoint], - input, - ), - // @ts-ignore - checkpointId: fork.checkpoint.checkpoint_id!, + input: regenInput, + checkpointId: forkedCheckpointId, streamMode, config: payloadConfig, }; + + if (this.useTransformer) { + // Transformer-path regen: cached ThreadStream + persistent + // custom:agui sub, with the fork expressed via v3 `forkFrom` so + // the dev server roots the new run at the chosen checkpoint. + // Resume semantics don't apply on regen. + const sanitizedInput = sanitizeAssistantMessages(regenInput as Record); + const { thread: streamingThread, aguiSub } = + await this.acquireTransformerThread(threadId); + const unsubscribeOnEvent = this.watchForRootTerminal(streamingThread, aguiSub); + + const submitted = await streamingThread.submitRun({ + ...(input.forwardedProps ?? {}), + input: sanitizedInput, + config: payloadConfig, + metadata: (input.forwardedProps as { metadata?: Record })?.metadata, + forkFrom: { checkpointId: forkedCheckpointId }, + }); + this.activeRun!.id = submitted?.run_id ?? this.activeRun!.id; + + return { + streamResponse: aguiSub, + state: timeTravelCheckpoint as ThreadState, + streamMode, + close: () => { + unsubscribeOnEvent(); + }, + }; + } + return { streamResponse: this.client.runs.stream(threadId, this.assistant.assistant_id, payload), state: timeTravelCheckpoint as ThreadState, @@ -297,6 +482,7 @@ export class LangGraphAgent extends AbstractAgent { this.activeRun!.manuallyEmittedState = null; const nodeNameInput = forwardedProps?.nodeName; + const threadId = inputThreadId ?? randomUUID(); if (!this.assistant) { @@ -334,7 +520,11 @@ export class LangGraphAgent extends AbstractAgent { const stateNonSystemCount = agentStateMessages.filter((m: LangGraphPlatformMessage) => m.type !== "system").length; const inputNonSystemCount = messages.filter((m) => m.role !== "system").length; - if (stateNonSystemCount > inputNonSystemCount) { + // HITL resume: server state holds the interrupted-run messages but + // the frontend's `messages` array may not yet contain them. Skip + // the regenerate branch in this case — the user is responding to + // an interrupt, not asking us to fork from an earlier checkpoint. + if (stateNonSystemCount > inputNonSystemCount && !forwardedProps?.command?.resume) { let lastUserMessage: LangGraphMessage | null = null; // Find the first user message by working backwards from the last message for (let i = messages.length - 1; i >= 0; i--) { @@ -441,6 +631,54 @@ export class LangGraphAgent extends AbstractAgent { return this.subscriber.complete(); } + if (this.useTransformer) { + const sanitizedInput = sanitizeAssistantMessages(payloadInput); + const { thread: streamingThread, aguiSub } = + await this.acquireTransformerThread(threadId); + const unsubscribeOnEvent = this.watchForRootTerminal(streamingThread, aguiSub); + + const resumeRequested = + forwardedProps?.command?.resume !== undefined && + forwardedProps?.command?.resume !== null; + const pendingInterrupt = this.findPendingInterrupt( + streamingThread, + agentState, + resumeRequested, + ); + + let runId: string | undefined; + if (resumeRequested && pendingInterrupt) { + // Resume routes through input.respond on the cached + // ThreadStream. The server assigns a fresh run_id we don't + // see in the response; activeRun.id stays stale until an + // event with the new id flows through. + await streamingThread.respondInput({ + namespace: pendingInterrupt.namespace, + interrupt_id: pendingInterrupt.interruptId, + response: forwardedProps!.command!.resume, + }); + } else { + const submitted = await streamingThread.submitRun({ + ...payload, + input: sanitizedInput, + config: payload.config, + metadata: payload.metadata as Record, + }); + runId = submitted?.run_id; + } + this.activeRun!.id = runId ?? this.activeRun!.id; + + return { + streamResponse: aguiSub, + state: threadState as ThreadState, + // Per-run cleanup only — the cached thread + sub live on for + // the next request on this threadId. + close: () => { + unsubscribeOnEvent(); + }, + }; + } + return { // @ts-ignore streamResponse: this.client.runs.stream(threadId, this.assistant.assistant_id, payload), @@ -448,6 +686,55 @@ export class LangGraphAgent extends AbstractAgent { }; } + async handleTransformerStreamEvents( + stream: Awaited< + ReturnType | ReturnType + >, + threadId: string, + subscriber: Subscriber, + ) { + let runErrorEmitted = false; + try { + this.dispatchEvent({ + type: EventType.RUN_STARTED, + threadId, + runId: this.activeRun!.id, + }); + for await (const rawEvent of stream!.streamResponse as AsyncIterable) { + if (rawEvent?.type === "RUN_ERROR") runErrorEmitted = true; + this.dispatchEvent(rawEvent as ProcessedEvents); + } + + if (!runErrorEmitted) { + this.dispatchEvent({ + type: EventType.RUN_FINISHED, + threadId, + runId: this.activeRun!.id, + }); + } + return subscriber.complete(); + } catch (err) { + if (!runErrorEmitted) { + this.dispatchEvent({ + type: EventType.RUN_ERROR, + message: err instanceof Error ? err.message : String(err ?? "Unknown error"), + } as RunErrorEvent); + } + return subscriber.complete(); + } finally { + // Per-run cleanup hook lives on the transformer-path preparedStream + // (the legacy runs.stream return doesn't expose one). + const closer = (stream as { close?: () => void | Promise } | undefined)?.close; + if (typeof closer === "function") { + try { + await closer(); + } catch (_) { + // swallow — close is best-effort cleanup + } + } + } + } + async handleStreamEvents( stream: Awaited< ReturnType | ReturnType @@ -1017,7 +1304,7 @@ export class LangGraphAgent extends AbstractAgent { } if (toolCallOutput && toolCallOutput.update?.messages?.length) { - type MessageFields = ToolMessageFieldsWithToolCallId & { type: string } + type MessageFields = ToolMessageFields & { type: string } toolCallOutput.update?.messages.filter((message: MessageFields) => message.type === 'tool').forEach((message: MessageFields) => { if (!this.activeRun!.hasFunctionStreaming) { this.dispatchEvent({ diff --git a/integrations/langgraph/typescript/src/index.ts b/integrations/langgraph/typescript/src/index.ts index 5bea62b807..b926e3b0c8 100644 --- a/integrations/langgraph/typescript/src/index.ts +++ b/integrations/langgraph/typescript/src/index.ts @@ -1,4 +1,8 @@ import { HttpAgent } from "@ag-ui/client"; export * from './agent' +// Transformer is intentionally NOT re-exported from the main entry. It +// imports `@langchain/langgraph` (server-only) and would force every +// consumer (e.g. dojo's Next.js bundle) to resolve that dep. Demo agents +// import it from `@ag-ui/langgraph/transformer` instead. export class LangGraphHttpAgent extends HttpAgent {} \ No newline at end of file diff --git a/integrations/langgraph/typescript/src/messages-tuple.test.ts b/integrations/langgraph/typescript/src/messages-tuple.test.ts index 2bbbdb2dda..30a969b3f5 100644 --- a/integrations/langgraph/typescript/src/messages-tuple.test.ts +++ b/integrations/langgraph/typescript/src/messages-tuple.test.ts @@ -15,7 +15,10 @@ function createAgent() { const agent = new LangGraphAgent({ graphId: "test-graph", url: "http://localhost:8000", - }); + // Legacy `handleSingleEvent` routing test — explicitly opt out + // of the transformer path. + useTransformer: false, + } as any); // Wire up a mock subscriber and activeRun so dispatchEvent works const events: any[] = []; diff --git a/integrations/langgraph/typescript/src/predict-state-e2e.test.ts b/integrations/langgraph/typescript/src/predict-state-e2e.test.ts index 4263117a3e..67705dab23 100644 --- a/integrations/langgraph/typescript/src/predict-state-e2e.test.ts +++ b/integrations/langgraph/typescript/src/predict-state-e2e.test.ts @@ -17,6 +17,9 @@ import type { LangGraphAgentConfig } from "./agent"; function makeConfig(): LangGraphAgentConfig { return { + // Legacy `handleStreamEvents` path — see subgraph-streaming.test.ts + // for the same opt-out rationale. + useTransformer: false, deploymentUrl: "http://localhost:2024", graphId: "test-graph", client: { diff --git a/integrations/langgraph/typescript/src/prepare-regenerate-stream.test.ts b/integrations/langgraph/typescript/src/prepare-regenerate-stream.test.ts new file mode 100644 index 0000000000..f5ab597174 --- /dev/null +++ b/integrations/langgraph/typescript/src/prepare-regenerate-stream.test.ts @@ -0,0 +1,229 @@ +/** + * Failing tests describing the target shape of `prepareRegenerateStream` + * after the upcoming refactor. + * + * Today, `prepareRegenerateStream` unconditionally goes through + * `this.client.runs.stream(...)`, even when `useTransformer: true`. That + * means a regenerate doesn't get the AG-UI transformer's `custom:agui` + * channel and skips the cached per-thread ThreadStream entirely — so + * regen events round-trip through the legacy translator instead of the + * transformer. + * + * After the refactor: + * + * - When `useTransformer: true` AND a ThreadStream can be acquired + * (existing cache entry or a fresh one) for the threadId, regen calls + * `streamingThread.submitRun({ ..., forkFrom: { checkpointId } })` + * against the cached `custom:agui` subscription. `forkFrom.checkpointId` + * points at the forked checkpoint produced by `threads.updateState`. + * + * - When `useTransformer: false`, behavior is unchanged: regen uses + * `this.client.runs.stream(threadId, assistantId, payload)`. + */ + +import { describe, it, expect, vi } from "vitest"; +import type { Message as LangGraphMessage } from "@langchain/langgraph-sdk"; +import { LangGraphAgent } from "./agent"; +import type { LangGraphAgentConfig } from "./agent"; + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +function makeThreadStream() { + const aguiSub = { + pause: vi.fn(), + resume: vi.fn(), + [Symbol.asyncIterator]: async function* () {}, + }; + const thread: any = { + interrupts: [], + subscribe: vi.fn().mockResolvedValue(aguiSub), + onEvent: vi.fn().mockReturnValue(() => {}), + submitRun: vi.fn().mockResolvedValue({ run_id: "regen-run" }), + respondInput: vi.fn().mockResolvedValue(undefined), + }; + return { thread, aguiSub }; +} + +function makeConfig(opts: { useTransformer: boolean }) { + const threadStreams = new Map>(); + // LangGraph's `threads.getHistory` returns checkpoints newest-first. + // `getCheckpointByMessage` reverses to walk oldest→newest and finds + // the FIRST checkpoint containing the target message — that's the + // one we regenerate from. Order this fixture the same way. + const history = [ + { + // Newer checkpoint with a follow-up assistant message. + values: { + messages: [ + { id: "u1", type: "human", content: "first" } as LangGraphMessage, + { id: "a1", type: "ai", content: "answer" } as LangGraphMessage, + ], + }, + checkpoint: { checkpoint_id: "ck-new" }, + parent_checkpoint: { checkpoint_id: "ck-old", checkpoint_ns: "" }, + next: [], + tasks: [], + metadata: {}, + }, + { + // Older checkpoint — contains only the message we'll regenerate + // from; no `messagesAfter`, so the search terminates here. + values: { + messages: [ + { id: "u1", type: "human", content: "first" } as LangGraphMessage, + ], + }, + checkpoint: { checkpoint_id: "ck-old" }, + parent_checkpoint: null, + next: ["model"], + tasks: [], + metadata: {}, + }, + ]; + + const client: any = { + threads: { + get: vi.fn().mockResolvedValue({ thread_id: "thread-1" }), + create: vi.fn().mockResolvedValue({ thread_id: "thread-1" }), + // The state-after-fork response — its checkpoint_id is what should + // be passed to submitRun's `forkFrom`. + updateState: vi.fn().mockResolvedValue({ + checkpoint: { checkpoint_id: "ck-fork" }, + }), + getHistory: vi.fn().mockResolvedValue(history), + getState: vi.fn().mockResolvedValue({ + values: { messages: [] }, + tasks: [], + next: [], + metadata: {}, + }), + stream: vi.fn((threadId: string) => { + let entry = threadStreams.get(threadId); + if (!entry) { + entry = makeThreadStream(); + threadStreams.set(threadId, entry); + } + return entry.thread; + }), + }, + runs: { + cancel: vi.fn(), + // Legacy regen path target. + stream: vi.fn().mockReturnValue({ + [Symbol.asyncIterator]: async function* () {}, + }), + }, + assistants: { + search: vi.fn().mockResolvedValue([ + { assistant_id: "asst-1", graph_id: "test-graph", config: {}, metadata: {} }, + ]), + getGraph: vi.fn().mockResolvedValue({ nodes: [], edges: [] }), + getSchemas: vi.fn().mockResolvedValue({ + input_schema: { properties: { messages: {} } }, + output_schema: { properties: { messages: {} } }, + config_schema: { properties: {} }, + context_schema: { properties: {} }, + }), + }, + }; + + const config: LangGraphAgentConfig = { + useTransformer: opts.useTransformer, + deploymentUrl: "http://localhost:2024", + graphId: "test-graph", + client, + }; + return { config, client, threadStreams }; +} + +function makeAgent(config: LangGraphAgentConfig) { + const agent = new LangGraphAgent(config); + agent.dispatchEvent = (e: any) => e as any; + // prepareRegenerateStream needs `activeRun` to be set (it reads + // `activeRun!.schemaKeys`). Stub minimal shape — runAgentStream would + // normally populate this. + (agent as any).activeRun = { + id: "run-regen", + threadId: "thread-1", + hasFunctionStreaming: false, + modelMadeToolCall: false, + }; + return agent; +} + +const regenInput = { + threadId: "thread-1", + runId: "run-regen", + messages: [], + tools: [], + context: [], + state: {}, + forwardedProps: {}, + messageCheckpoint: { + id: "u1", + type: "human", + content: "first", + } as LangGraphMessage, +}; + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +describe("prepareRegenerateStream — transformer parity", () => { + it("useTransformer: true → routes regen through cached ThreadStream's submitRun with forkFrom.checkpointId", async () => { + const { config, client, threadStreams } = makeConfig({ useTransformer: true }); + const agent = makeAgent(config); + + await agent.prepareRegenerateStream(regenInput as any, ["events", "values"]); + + // The legacy path MUST NOT be used when the transformer is on. + expect(client.runs.stream).not.toHaveBeenCalled(); + + const entry = threadStreams.get("thread-1"); + expect(entry).toBeDefined(); + expect(entry!.thread.submitRun).toHaveBeenCalledTimes(1); + + const payload = entry!.thread.submitRun.mock.calls[0][0]; + expect(payload).toEqual( + expect.objectContaining({ + forkFrom: expect.objectContaining({ checkpointId: "ck-fork" }), + }), + ); + }); + + it("useTransformer: true → opens / reuses the same custom:agui subscription as prepareStream", async () => { + const { config, threadStreams } = makeConfig({ useTransformer: true }); + const agent = makeAgent(config); + + await agent.prepareRegenerateStream(regenInput as any, ["events", "values"]); + const entry = threadStreams.get("thread-1"); + expect(entry).toBeDefined(); + // Exactly one subscription was opened — the SAME shared-cache rule + // prepareStream uses. + expect(entry!.thread.subscribe).toHaveBeenCalledTimes(1); + expect(entry!.thread.subscribe).toHaveBeenCalledWith(["custom:agui"]); + }); + + it("useTransformer: false → falls back to client.runs.stream (legacy behavior preserved)", async () => { + const { config, client, threadStreams } = makeConfig({ useTransformer: false }); + const agent = makeAgent(config); + + await agent.prepareRegenerateStream(regenInput as any, ["events", "values"]); + + expect(client.runs.stream).toHaveBeenCalledTimes(1); + const [threadIdArg, assistantIdArg, payload] = client.runs.stream.mock.calls[0]; + expect(threadIdArg).toBe("thread-1"); + expect(assistantIdArg).toBe("asst-1"); + expect(payload).toEqual( + expect.objectContaining({ + checkpointId: "ck-fork", + }), + ); + + // No ThreadStream / submitRun involvement when useTransformer is off. + expect(threadStreams.size).toBe(0); + }); +}); diff --git a/integrations/langgraph/typescript/src/prepare-stream.test.ts b/integrations/langgraph/typescript/src/prepare-stream.test.ts new file mode 100644 index 0000000000..64433fdf17 --- /dev/null +++ b/integrations/langgraph/typescript/src/prepare-stream.test.ts @@ -0,0 +1,516 @@ +/** + * Failing tests describing the target shape of an upcoming refactor of + * `LangGraphAgent.prepareStream`. None of these helpers/exports exist yet; + * the impl follows in a separate pass. These tests must currently fail + * with messages naming the missing exports / behaviors. + * + * Refactor targets covered here: + * + * 1. `sanitizeAssistantMessages` — pure named helper extracted from the + * transformer branch's inline sanitizer. Strips `tool_call` content + * blocks from AI message `content` arrays and drops + * `response_metadata.output_version === "v1"`. + * + * 2. `transformerThreads` cache — shared across `clone()`s. Second + * request on the same threadId reuses the cached ThreadStream and the + * persistent `custom:agui` subscription instead of opening a new one. + * + * 3. Resume vs submitRun routing — when `forwardedProps.command.resume` + * is set AND a pending interrupt is reachable (either on the cached + * `streamingThread.interrupts` or on `agentState.tasks[].interrupts[]`), + * the transformer branch must call `streamingThread.respondInput(...)` + * instead of `streamingThread.submitRun(...)`. When no resume, the + * normal `submitRun(payload)` path runs. + */ + +import { describe, it, expect, vi, beforeEach } from "vitest"; +import type { Message as LangGraphMessage } from "@langchain/langgraph-sdk"; +import { LangGraphAgent } from "./agent"; +import type { LangGraphAgentConfig } from "./agent"; + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +/** + * Build a mock ThreadStream returned from `client.threads.stream(threadId, …)`. + * The transformer branch: + * - calls `thread.subscribe(["custom:agui"])` once per fresh entry, + * - registers a lifecycle watcher via `thread.onEvent(...)`, + * - calls either `thread.submitRun(...)` (normal) or + * `thread.respondInput(...)` (resume + pending interrupt). + * `interrupts` on the thread surfaces live pending interrupts populated by + * the SDK's lifecycle watcher. + */ +function makeThreadStream(opts?: { + interrupts?: Array<{ interruptId: string; namespace: readonly string[] }>; +}) { + const aguiSub = { + pause: vi.fn(), + resume: vi.fn(), + [Symbol.asyncIterator]: async function* () { + // Empty stream — these tests only care about the call-path before + // events would be consumed. + }, + }; + const thread: any = { + interrupts: opts?.interrupts ?? [], + subscribe: vi.fn().mockResolvedValue(aguiSub), + onEvent: vi.fn().mockReturnValue(() => {}), + submitRun: vi.fn().mockResolvedValue({ run_id: "run-from-submit" }), + respondInput: vi.fn().mockResolvedValue(undefined), + }; + return { thread, aguiSub }; +} + +/** + * Make a config + a per-test cache of ThreadStreams keyed by threadId so + * we can assert call counts across multiple `clone()`s or instances that + * share the same underlying client. + */ +function makeConfig(opts?: { + agentState?: any; + threadStreams?: Map>; +}): { + config: LangGraphAgentConfig; + threadStreams: Map>; + client: any; +} { + const threadStreams = opts?.threadStreams ?? new Map(); + const agentState = opts?.agentState ?? { + values: { messages: [] }, + tasks: [], + next: [], + metadata: { writes: {} }, + }; + + const client: any = { + threads: { + get: vi.fn().mockResolvedValue({ thread_id: "thread-1" }), + create: vi.fn().mockResolvedValue({ thread_id: "thread-1" }), + getState: vi.fn().mockResolvedValue(agentState), + updateState: vi.fn().mockResolvedValue({ + checkpoint: { checkpoint_id: "ck-1" }, + }), + // The hook under test. + stream: vi.fn((threadId: string, _assistantId: string) => { + let entry = threadStreams.get(threadId); + if (!entry) { + entry = makeThreadStream(); + threadStreams.set(threadId, entry); + } + return entry.thread; + }), + }, + runs: { + cancel: vi.fn(), + stream: vi.fn(), + }, + assistants: { + search: vi + .fn() + .mockResolvedValue([ + { assistant_id: "asst-1", graph_id: "test-graph", config: {}, metadata: {} }, + ]), + getGraph: vi.fn().mockResolvedValue({ nodes: [], edges: [] }), + getSchemas: vi.fn().mockResolvedValue({ + input_schema: { properties: { messages: {} } }, + output_schema: { properties: { messages: {} } }, + config_schema: { properties: {} }, + context_schema: { properties: {} }, + }), + }, + }; + + const config: LangGraphAgentConfig = { + useTransformer: true, + deploymentUrl: "http://localhost:2024", + graphId: "test-graph", + client, + }; + + return { config, threadStreams, client }; +} + +function makeAgent(config: LangGraphAgentConfig) { + const agent = new LangGraphAgent(config); + const dispatched: any[] = []; + agent.dispatchEvent = (event: any) => { + dispatched.push(event); + return event as any; + }; + // prepareStream reads `this.activeRun!` immediately. In production this + // is populated by `runAgentStream` before `prepareStream` runs; in + // isolation we stub it. + (agent as any).activeRun = { + id: "run-1", + threadId: "thread-1", + hasFunctionStreaming: false, + modelMadeToolCall: false, + }; + // A subscriber is read by `prepareStream` when it errors out. We stub + // a no-op subscriber so the happy paths don't touch a real Observable. + (agent as any).subscriber = { + next: vi.fn(), + error: vi.fn(), + complete: vi.fn(), + closed: false, + }; + return { agent, dispatched }; +} + +function aiMsg(id: string, fields: Partial = {}): LangGraphMessage { + return { id, type: "ai", content: "", ...fields } as LangGraphMessage; +} + +// --------------------------------------------------------------------------- +// 1. sanitizeAssistantMessages — pure helper +// --------------------------------------------------------------------------- + +describe("sanitizeAssistantMessages (named export)", () => { + /** + * Dynamic import inside each test so module-load failure (named export + * doesn't exist yet) surfaces as a clear failing assertion rather than + * a suite-load error. + */ + async function loadHelper() { + const mod: any = await import("./agent"); + return mod.sanitizeAssistantMessages as ( + payloadInput: any, + ) => any; + } + + it("is exported from ./agent", async () => { + const helper = await loadHelper(); + expect(typeof helper).toBe("function"); + }); + + it("returns the input untouched when payloadInput has no messages", async () => { + const helper = await loadHelper(); + const input = { foo: "bar" }; + expect(helper(input)).toEqual(input); + }); + + it("does not throw on null / undefined payloadInput", async () => { + const helper = await loadHelper(); + expect(() => helper(undefined)).not.toThrow(); + expect(() => helper(null)).not.toThrow(); + }); + + it("keeps text-only AI messages untouched", async () => { + const helper = await loadHelper(); + const m = aiMsg("a1", { content: "hello world" }); + const result = helper({ messages: [m] }); + expect(result.messages[0]).toEqual(m); + }); + + it("leaves non-AI messages alone", async () => { + const helper = await loadHelper(); + const human = { id: "h1", type: "human", content: "hi" }; + const tool = { + id: "t1", + type: "tool", + tool_call_id: "tc-1", + content: [{ type: "tool_call", id: "tc-1" }], + }; + const result = helper({ messages: [human as any, tool as any] }); + expect(result.messages[0]).toEqual(human); + expect(result.messages[1]).toEqual(tool); + }); + + it("flattens content to empty string when ONLY tool_call blocks remain", async () => { + const helper = await loadHelper(); + const m = aiMsg("a1", { + content: [ + { type: "tool_call", id: "tc-1", name: "search", args: {} }, + { type: "tool_call", id: "tc-2", name: "lookup", args: {} }, + ], + }); + const result = helper({ messages: [m] }); + expect(result.messages[0].content).toBe(""); + }); + + it("keeps remaining non-tool_call blocks when some are stripped", async () => { + const helper = await loadHelper(); + const m = aiMsg("a1", { + content: [ + { type: "text", text: "Here you go:" }, + { type: "tool_call", id: "tc-1", name: "search", args: {} }, + ], + }); + const result = helper({ messages: [m] }); + expect(Array.isArray(result.messages[0].content)).toBe(true); + expect(result.messages[0].content).toHaveLength(1); + expect(result.messages[0].content[0]).toEqual({ + type: "text", + text: "Here you go:", + }); + }); + + it("removes response_metadata.output_version v1 while preserving siblings", async () => { + const helper = await loadHelper(); + const m = aiMsg("a1", { + content: "ok", + response_metadata: { + output_version: "v1", + model_name: "gpt-4o", + finish_reason: "stop", + }, + }); + const result = helper({ messages: [m] }); + const rm = (result.messages[0] as any).response_metadata; + expect(rm).toBeDefined(); + expect(rm.output_version).toBeUndefined(); + expect(rm.model_name).toBe("gpt-4o"); + expect(rm.finish_reason).toBe("stop"); + }); + + it("missing response_metadata does not throw", async () => { + const helper = await loadHelper(); + const m = aiMsg("a1", { content: "ok" }); + expect(() => helper({ messages: [m] })).not.toThrow(); + }); + + it("preserves the rest of the payloadInput shape (non-messages keys)", async () => { + const helper = await loadHelper(); + const m = aiMsg("a1", { content: "ok" }); + const result = helper({ messages: [m], tools: ["t"], extra: { x: 1 } }); + expect(result.tools).toEqual(["t"]); + expect(result.extra).toEqual({ x: 1 }); + }); +}); + +// --------------------------------------------------------------------------- +// 2. transformerThreads cache reuse +// --------------------------------------------------------------------------- + +describe("transformerThreads cache (acquireThreadStream)", () => { + it("subscribes to custom:agui exactly once across two runs on the same threadId", async () => { + const { config, threadStreams, client } = makeConfig(); + const { agent } = makeAgent(config); + + const input: any = { + threadId: "thread-1", + runId: "run-1", + messages: [{ id: "u1", role: "user", content: "hi" }], + tools: [], + context: [], + state: {}, + forwardedProps: {}, + }; + + await agent.prepareStream(input, ["events", "values"]); + await agent.prepareStream({ ...input, runId: "run-2" }, ["events", "values"]); + + // Only one ThreadStream should ever be opened for thread-1. + const streamCalls = client.threads.stream.mock.calls.filter( + (c: any[]) => c[0] === "thread-1", + ); + expect(streamCalls).toHaveLength(1); + + // And only one subscribe(["custom:agui"]) on that stream. + const entry = threadStreams.get("thread-1")!; + expect(entry.thread.subscribe).toHaveBeenCalledTimes(1); + expect(entry.thread.subscribe).toHaveBeenCalledWith(["custom:agui"]); + }); + + it("clone() shares the cache with its parent — second run via clone reuses subscription", async () => { + const { config, threadStreams, client } = makeConfig(); + const { agent: parent } = makeAgent(config); + + const baseInput: any = { + threadId: "thread-1", + runId: "run-1", + messages: [{ id: "u1", role: "user", content: "hi" }], + tools: [], + context: [], + state: {}, + forwardedProps: {}, + }; + + await parent.prepareStream(baseInput, ["events", "values"]); + + const child = parent.clone() as LangGraphAgent; + child.dispatchEvent = (e: any) => e as any; + // clone() should share `transformerThreads` by reference. + expect((child as any).transformerThreads).toBe((parent as any).transformerThreads); + + await child.prepareStream({ ...baseInput, runId: "run-2" }, ["events", "values"]); + + const entry = threadStreams.get("thread-1")!; + expect(entry.thread.subscribe).toHaveBeenCalledTimes(1); + expect( + client.threads.stream.mock.calls.filter((c: any[]) => c[0] === "thread-1"), + ).toHaveLength(1); + }); + + it("different threadIds get separate cache entries (subscribe called per thread)", async () => { + const { config, threadStreams, client } = makeConfig(); + const { agent } = makeAgent(config); + + const baseInput: any = { + runId: "run-1", + messages: [{ id: "u1", role: "user", content: "hi" }], + tools: [], + context: [], + state: {}, + forwardedProps: {}, + }; + + // Adjust the get/create to echo whatever threadId we ask for. + (config.client as any).threads.get = vi.fn(async (id: string) => ({ + thread_id: id, + })); + (config.client as any).threads.create = vi.fn(async (payload: any) => ({ + thread_id: payload?.threadId ?? "thread-x", + })); + + await agent.prepareStream({ ...baseInput, threadId: "thread-a" }, [ + "events", + "values", + ]); + await agent.prepareStream({ ...baseInput, threadId: "thread-b" }, [ + "events", + "values", + ]); + + expect(threadStreams.has("thread-a")).toBe(true); + expect(threadStreams.has("thread-b")).toBe(true); + expect(threadStreams.get("thread-a")!.thread.subscribe).toHaveBeenCalledTimes(1); + expect(threadStreams.get("thread-b")!.thread.subscribe).toHaveBeenCalledTimes(1); + }); +}); + +// --------------------------------------------------------------------------- +// 3. Resume vs submitRun routing +// --------------------------------------------------------------------------- + +describe("resume vs submitRun routing (transformer branch)", () => { + it("no resume → submitRun is called with the prepared payload, respondInput is not", async () => { + const { config, threadStreams } = makeConfig(); + const { agent } = makeAgent(config); + + await agent.prepareStream( + { + threadId: "thread-1", + runId: "run-1", + messages: [{ id: "u1", role: "user", content: "hi" }], + tools: [], + context: [], + state: {}, + forwardedProps: {}, + } as any, + ["events", "values"], + ); + + const entry = threadStreams.get("thread-1")!; + expect(entry.thread.submitRun).toHaveBeenCalledTimes(1); + expect(entry.thread.respondInput).not.toHaveBeenCalled(); + }); + + it("resume + interrupt on streamingThread.interrupts → respondInput with that namespace/id", async () => { + // Seed the ThreadStream with a live pending interrupt BEFORE the agent + // runs, simulating that the lifecycle watcher already saw it during a + // prior run (cached ThreadStream survived). + const seededInterrupt = { + interruptId: "intr-A", + namespace: ["task-1"] as readonly string[], + }; + const seeded = makeThreadStream({ interrupts: [seededInterrupt] }); + const threadStreams = new Map>(); + threadStreams.set("thread-1", seeded); + + const { config } = makeConfig({ threadStreams }); + const { agent } = makeAgent(config); + // Pre-populate the cache so the agent reuses our seeded ThreadStream. + (agent as any).transformerThreads.set("thread-1", { + thread: seeded.thread, + aguiSub: seeded.aguiSub, + }); + + await agent.prepareStream( + { + threadId: "thread-1", + runId: "run-2", + messages: [{ id: "u1", role: "user", content: "approve" }], + tools: [], + context: [], + state: {}, + forwardedProps: { command: { resume: { ok: true } } }, + } as any, + ["events", "values"], + ); + + expect(seeded.thread.respondInput).toHaveBeenCalledTimes(1); + expect(seeded.thread.respondInput).toHaveBeenCalledWith( + expect.objectContaining({ + namespace: seededInterrupt.namespace, + interrupt_id: seededInterrupt.interruptId, + response: { ok: true }, + }), + ); + expect(seeded.thread.submitRun).not.toHaveBeenCalled(); + }); + + it("resume + interrupt only on agentState.tasks fallback → respondInput uses task interrupt", async () => { + const agentState = { + values: { messages: [] }, + tasks: [ + { + checkpoint: { checkpoint_ns: "ns-a|ns-b" }, + interrupts: [{ id: "intr-fallback", value: "needs approval" }], + }, + ], + next: [], + metadata: { writes: {} }, + }; + const { config, threadStreams } = makeConfig({ agentState }); + const { agent } = makeAgent(config); + + await agent.prepareStream( + { + threadId: "thread-1", + runId: "run-2", + messages: [{ id: "u1", role: "user", content: "approve" }], + tools: [], + context: [], + state: {}, + forwardedProps: { command: { resume: "yes" } }, + } as any, + ["events", "values"], + ); + + const entry = threadStreams.get("thread-1")!; + expect(entry.thread.respondInput).toHaveBeenCalledTimes(1); + const call = entry.thread.respondInput.mock.calls[0][0]; + expect(call.interrupt_id).toBe("intr-fallback"); + expect(call.namespace).toEqual(["ns-a", "ns-b"]); + expect(entry.thread.submitRun).not.toHaveBeenCalled(); + }); + + it("resume but NO pending interrupt anywhere → falls back to submitRun", async () => { + // Edge case: the user's command.resume is set but neither the cached + // ThreadStream nor the agentState has a pending interrupt. We can't + // call respondInput without an interrupt_id, so the refactor must + // route to submitRun (current inline code does this — keep parity). + const { config, threadStreams } = makeConfig(); + const { agent } = makeAgent(config); + + await agent.prepareStream( + { + threadId: "thread-1", + runId: "run-2", + messages: [{ id: "u1", role: "user", content: "approve" }], + tools: [], + context: [], + state: {}, + forwardedProps: { command: { resume: { ok: true } } }, + } as any, + ["events", "values"], + ); + + const entry = threadStreams.get("thread-1")!; + expect(entry.thread.submitRun).toHaveBeenCalledTimes(1); + expect(entry.thread.respondInput).not.toHaveBeenCalled(); + }); +}); diff --git a/integrations/langgraph/typescript/src/subgraph-streaming.test.ts b/integrations/langgraph/typescript/src/subgraph-streaming.test.ts index 7a682d6b82..fa6b34f058 100644 --- a/integrations/langgraph/typescript/src/subgraph-streaming.test.ts +++ b/integrations/langgraph/typescript/src/subgraph-streaming.test.ts @@ -30,6 +30,11 @@ function nsRoot(ns: string): string { function makeConfig(): LangGraphAgentConfig { return { + // These tests exercise the legacy `handleStreamEvents` translator + // — they synthesise events-mode chunks. The transformer path + // consumes a different stream shape (`thread.extensions.agui`), + // so opt out explicitly. + useTransformer: false, deploymentUrl: "http://localhost:2024", graphId: "test-graph", client: { diff --git a/integrations/langgraph/typescript/src/transformer/agui-transformer.ts b/integrations/langgraph/typescript/src/transformer/agui-transformer.ts new file mode 100644 index 0000000000..a78f4e3513 --- /dev/null +++ b/integrations/langgraph/typescript/src/transformer/agui-transformer.ts @@ -0,0 +1,641 @@ +/** + * AG-UI StreamTransformer. + * + * Wired into a graph at compile time via `streamTransformers: [aguiTransformer]`. + * Exposes a named `agui` channel reached by SDK clients via + * `thread.extensions.agui`. Translates langgraph `ProtocolEvent`s into AG-UI + * events across every family: lifecycle (RUN_*, STEP_*), messages (TEXT_*, + * TOOL_CALL_*, REASONING_*), state (STATE_SNAPSHOT / MESSAGES_SNAPSHOT), + * tasks (CUSTOM `OnInterrupt`), and custom (ManuallyEmit* + generic + * passthrough). RUN_STARTED / RUN_FINISHED are owned by `agent.ts`; this + * factory pushes everything in between. + */ + +import { + StreamChannel, + type ProtocolEvent, + type StreamTransformer, +} from "@langchain/langgraph"; +import { langchainMessagesToAgui } from "../utils"; +import { CustomEventNames, LangGraphEventTypes, ProcessedEvents, State } from "../types"; +import { EventType } from "@ag-ui/core"; + +/** + * Factory returning the transformer instance. Each run gets a fresh remote + * `agui` channel; SDK clients consume via `thread.extensions.agui`. + */ +export const aguiTransformer = (): StreamTransformer<{ + agui: StreamChannel; +}> => { + const aguiChannel = StreamChannel.remote("agui"); + let initialized = false; + + // Per-message tracking for text streaming. Keyed by content-block index so + // multi-block messages (e.g. text + tool call in one assistant turn) don't + // collide on a single shared id. + const textBlockMessageIds = new Map(); + // Per-tool-call tracking. Keyed by content-block index. Each tool call + // occupies one block; the chunk's id may be null until later updates so we + // also remember the assigned toolCallId here. `argsSoFar` carries the + // cumulative `args` string the engine has reported — block-delta carries + // the FULL accumulated value each time, not an incremental piece, so we + // diff against this to derive the delta AG-UI expects. + const toolBlocks = new Map< + number, + { toolCallId: string; toolCallName: string; argsSoFar: string } + >(); + let activeMessageId: string | undefined; + + // Per-reasoning-block tracking. Keyed by content-block index. The + // standardized v3 format (per langgraph streaming-cookbook) emits + // reasoning content blocks with `type: "reasoning"` and an optional + // initial `reasoning` string on start; deltas use + // `type: "reasoning-delta"` with a `reasoning` field. Encrypted + // material from Anthropic surfaces as `redacted_thinking` blocks + // (with `data`) or as a `signature` field on a reasoning block. + const reasoningBlocks = new Map< + number, + { messageId: string; messageStarted: boolean } + >(); + + // Per-run set of interrupt ids already converted into CUSTOM + // `OnInterrupt` pushes. Each `tasks` event carrying interrupts can + // fire multiple times during the run (input + result frames); dedup + // by interrupt id so the client only renders one prompt. + const emittedInterruptIds = new Set(); + + // Active graph-node steps keyed by full namespace path → stepName. + // The companion `activeStepNames` enforces AG-UI's name-uniqueness + // contract: at most one STEP_STARTED per stepName at a time. Inner + // subgraph nodes whose stripped head name collides with an already + // active step (e.g. an outer `experiences_agent` plus its inner + // graph also rooted under `experiences_agent`) are ignored so the + // outer STEP_FINISHED stays balanced. + const activeSteps = new Map(); + const activeStepNames = new Set(); + + const push = (ev: ProcessedEvents) => { + aguiChannel.push(ev); + }; + + const isRootNamespace = (ns: readonly string[]) => ns.length === 0; + + // Defer snapshot emission until the run reaches a stable point. Each + // Pregel step (including transient sub-steps inside copilotkitMiddleware + // that intercept then restore tool calls) emits its own `values` event; + // pushing a MESSAGES_SNAPSHOT for each one ships the in-between dip where + // the assistant message has lost its tool calls. Mirrors the legacy + // agent's behaviour, which only reads the canonical persisted state at + // run end. + let latestState: State | null = null; + let lastMessagesSnapshotHash = ""; + let lastStateSnapshotHash = ""; + + const cacheState = (state: State) => { + if (!state || typeof state !== "object") return; + // Shallow-merge instead of replace. Subsequent root `values` events + // may carry only the keys that just changed (e.g. an interrupt + // update without `messages` rebroadcast); replacing wholesale would + // drop the unchanged keys and ship an empty MESSAGES_SNAPSHOT, + // which CopilotKit treats as "no messages" and resets the UI. + latestState = { ...(latestState ?? {}), ...state }; + }; + + const flushSnapshots = () => { + if (!latestState) return; + const state = latestState; + + const { messages: _m, ...stateOnly } = state; + const stateHash = JSON.stringify(stateOnly); + if (stateHash !== lastStateSnapshotHash) { + lastStateSnapshotHash = stateHash; + push({ type: EventType.STATE_SNAPSHOT, snapshot: stateOnly }); + } + + const lcMessages = state.messages ?? []; + const aguiMessages = langchainMessagesToAgui(lcMessages); + const msgHash = JSON.stringify(aguiMessages); + if (msgHash !== lastMessagesSnapshotHash) { + lastMessagesSnapshotHash = msgHash; + push({ type: EventType.MESSAGES_SNAPSHOT, messages: aguiMessages }); + } + }; + + return { + init() { + initialized = true; + return { agui: aguiChannel }; + }, + + finalize() { + // Lifecycle (RUN_*) is owned by agent.ts. Here we only close any + // text/tool/reasoning blocks that didn't receive their + // `content-block-finish` before the run ended, so AG-UI verify + // doesn't reject the terminal event downstream. + for (const [index, messageId] of textBlockMessageIds) { + push({ type: EventType.TEXT_MESSAGE_END, messageId }); + textBlockMessageIds.delete(index); + } + for (const [index, tool] of toolBlocks) { + push({ type: EventType.TOOL_CALL_END, toolCallId: tool.toolCallId }); + toolBlocks.delete(index); + } + for (const [index, r] of reasoningBlocks) { + if (r.messageStarted) push({ type: EventType.REASONING_MESSAGE_END, messageId: r.messageId }); + push({ type: EventType.REASONING_END, messageId: r.messageId }); + reasoningBlocks.delete(index); + } + for (const [nsKey, stepName] of activeSteps) { + push({ type: EventType.STEP_FINISHED, stepName }); + activeSteps.delete(nsKey); + activeStepNames.delete(stepName); + } + }, + + process(event: ProtocolEvent): boolean { + // Mux wires the channel only after init() returns. Pushes before then + // are dropped on the wire. Skip until init has completed. + if (!initialized) return true; + + switch (event.method) { + case "lifecycle": { + const status = (event.params.data as { event?: string } | undefined)?.event; + + // Non-root lifecycle events bracket individual graph nodes. + // Translate them to AG-UI STEP_STARTED / STEP_FINISHED so + // consumers can show progress on multi-node graphs. The + // namespace head is `nodeName:uuid` — strip the uuid for a + // readable step name. AG-UI verify enforces a single active + // step per stepName, so we dedup by stepName and remember + // which namespace opened it; only that namespace's close + // emits STEP_FINISHED. Nested subgraph lifecycles whose + // stripped head name matches an already-active step (e.g. + // a subgraph node and an inner node both rooted under + // `experiences_agent`) are ignored — both ends would + // otherwise unbalance the pair. + if (!isRootNamespace(event.params.namespace)) { + const head = event.params.namespace[0]; + const nsKey = event.params.namespace.join("|"); + const stepName = typeof head === "string" ? head.split(":")[0] : ""; + if (!stepName) break; + if (status === "started") { + if (!activeStepNames.has(stepName)) { + activeStepNames.add(stepName); + activeSteps.set(nsKey, stepName); + push({ type: EventType.STEP_STARTED, stepName }); + } + } else if (status === "completed" || status === "failed" || status === "interrupted") { + const tracked = activeSteps.get(nsKey); + if (tracked) { + activeSteps.delete(nsKey); + activeStepNames.delete(tracked); + push({ type: EventType.STEP_FINISHED, stepName: tracked }); + } + // Lock in state at every node/subgraph boundary. A + // subgraph (or any node) can mutate state across many + // intermediate `values` events; flushing here ships a + // single coherent STATE_SNAPSHOT + MESSAGES_SNAPSHOT at + // the point its contribution is committed to the parent + // checkpoint. Snapshot push is hash-deduped, so flushing + // at every boundary is cheap when nothing changed. + if (status === "completed") flushSnapshots(); + } + break; + } + + // Lifecycle bracketing (RUN_STARTED / RUN_FINISHED) is owned by + // agent.ts. Here we only forward fatal failures so the client can + // surface the underlying message instead of a generic + // INCOMPLETE_STREAM error. + if (status === "completed" || status === "interrupted") { + // Stable point: the run is paused (interrupted) or done + // (completed). The state we cached from the last `values` + // event reflects the canonical shape at this boundary. + // Flush snapshots so consumers see updated state at both + // run end and interrupt — HITL graphs land here on every + // interrupt() call. + flushSnapshots(); + } else if (status === "failed") { + const message = (event.params.data as { error?: string } | undefined)?.error; + push({ type: EventType.RUN_ERROR, message: message ?? "Unknown error" }); + } + break; + } + + case "input.requested": { + // The graph hit an interrupt(...) call. Forward as AG-UI + // CUSTOM `OnInterrupt`, matching the legacy contract so dojo + // (and other clients) can render the same prompt UI they + // already drive off the legacy translation. + const data = event.params?.data as + | { interrupt_id?: string; payload?: unknown } + | undefined; + if (!data) break; + const value = + typeof data.payload === "string" + ? data.payload + : JSON.stringify(data.payload); + push({ + type: EventType.CUSTOM, + name: LangGraphEventTypes.OnInterrupt, + value, + } as ProcessedEvents); + break; + } + + case "messages": { + const data = event.params.data as + | { + event: string; + role?: string; + id?: string; + index?: number; + content?: { type?: string }; + delta?: { type?: string; text?: string }; + } + | undefined; + if (!data) break; + + switch (data.event) { + case "message-start": { + // The protocol declares `role` on MessageStartData but the + // langgraph dev server omits it in practice. Use any + // message-start as the signal to bind activeMessageId; the + // downstream content-block-start filter (type === "text") + // ensures we only emit text events for AI text blocks. + if (!data.id) break; + activeMessageId = data.id; + break; + } + + case "content-block-start": { + if (data.index == null) break; + const blockType = data.content?.type; + if (blockType === "text") { + if (!activeMessageId) break; + textBlockMessageIds.set(data.index, activeMessageId); + push({ + type: EventType.TEXT_MESSAGE_START, + messageId: activeMessageId, + role: "assistant", + }); + } else if (blockType === "reasoning" || blockType === "thinking") { + // Standardized v3 format ("reasoning") plus the older + // langchain-anthropic alias ("thinking"). Treat the + // content block as a single reasoning entity scoped to + // the current message + this content-block index. + if (!activeMessageId) break; + const reasoningId = `${activeMessageId}:r:${data.index}`; + reasoningBlocks.set(data.index, { + messageId: reasoningId, + messageStarted: false, + }); + push({ type: EventType.REASONING_START, messageId: reasoningId }); + const block = data.content as + | { reasoning?: string; thinking?: string; signature?: string } + | undefined; + const initial = block?.reasoning ?? block?.thinking ?? ""; + if (initial.length > 0) { + push({ + type: EventType.REASONING_MESSAGE_START, + messageId: reasoningId, + role: "reasoning", + }); + reasoningBlocks.get(data.index)!.messageStarted = true; + push({ + type: EventType.REASONING_MESSAGE_CONTENT, + messageId: reasoningId, + delta: initial, + }); + } + if (block?.signature) { + push({ + type: EventType.REASONING_ENCRYPTED_VALUE, + subtype: "message", + entityId: reasoningId, + encryptedValue: block.signature, + } as ProcessedEvents); + } + } else if (blockType === "redacted_thinking") { + // Anthropic redacted_thinking carries opaque encrypted + // chain-of-thought. Surface as a standalone + // REASONING_ENCRYPTED_VALUE without opening a + // visible reasoning message. + const block = data.content as { data?: string } | undefined; + if (activeMessageId && block?.data) { + push({ + type: EventType.REASONING_ENCRYPTED_VALUE, + subtype: "message", + entityId: activeMessageId, + encryptedValue: block.data, + } as ProcessedEvents); + } + } else if (blockType === "tool_call_chunk" || blockType === "tool_call") { + const block = data.content as + | { id?: string | null; name?: string | null; args?: string | null } + | undefined; + const toolCallId = block?.id ?? `tc-${data.index}`; + const toolCallName = block?.name ?? ""; + const initialArgs = typeof block?.args === "string" ? block.args : ""; + toolBlocks.set(data.index, { + toolCallId, + toolCallName, + argsSoFar: initialArgs, + }); + push({ + type: EventType.TOOL_CALL_START, + toolCallId, + toolCallName, + parentMessageId: activeMessageId, + }); + if (initialArgs.length > 0) { + push({ + type: EventType.TOOL_CALL_ARGS, + toolCallId, + delta: initialArgs, + }); + } + } + break; + } + + case "content-block-delta": { + if (data.index == null) break; + const deltaType = data.delta?.type; + if (deltaType === "text-delta") { + // Server may emit text deltas at a content-block index + // already occupied by another type (e.g. reasoning at + // idx=0, then text deltas at idx=0 with no preceding + // text content-block-start). Treat that as an implicit + // open: mint a TEXT_MESSAGE_START on first delta. End + // is taken care of on message-finish (or finalize). + let messageId = textBlockMessageIds.get(data.index); + if (!messageId && activeMessageId) { + messageId = activeMessageId; + textBlockMessageIds.set(data.index, messageId); + push({ + type: EventType.TEXT_MESSAGE_START, + messageId, + role: "assistant", + }); + } + if (!messageId) break; + push({ + type: EventType.TEXT_MESSAGE_CONTENT, + messageId, + delta: data.delta?.text ?? "", + }); + } else if (deltaType === "reasoning-delta" || deltaType === "thinking-delta") { + // Standardized v3 reasoning delta + older Anthropic + // thinking-delta alias. + const r = reasoningBlocks.get(data.index); + if (!r) break; + const delta = (data.delta as { reasoning?: string; thinking?: string } | undefined); + const text = delta?.reasoning ?? delta?.thinking ?? ""; + if (text.length === 0) break; + if (!r.messageStarted) { + push({ + type: EventType.REASONING_MESSAGE_START, + messageId: r.messageId, + role: "reasoning", + }); + r.messageStarted = true; + } + push({ + type: EventType.REASONING_MESSAGE_CONTENT, + messageId: r.messageId, + delta: text, + }); + } else if (deltaType === "block-delta") { + // BlockDelta carries shallow-merge fields. For tool calls + // `args` is the FULL cumulative JSON string, not an + // incremental piece. AG-UI's TOOL_CALL_ARGS expects a delta, + // so compute it by stripping the prefix we have already sent. + const tool = toolBlocks.get(data.index); + if (!tool) break; + const fields = (data.delta as { fields?: { args?: string; name?: string } } | undefined)?.fields; + if (fields?.name && !tool.toolCallName) { + tool.toolCallName = fields.name; + } + if (typeof fields?.args === "string") { + const cumulative = fields.args; + if (cumulative.startsWith(tool.argsSoFar)) { + const delta = cumulative.slice(tool.argsSoFar.length); + tool.argsSoFar = cumulative; + if (delta.length > 0) { + push({ + type: EventType.TOOL_CALL_ARGS, + toolCallId: tool.toolCallId, + delta, + }); + } + } else { + // Engine replaced the buffer (e.g. arg correction). Send + // the full new string as a single delta and reset the + // cumulative tracker. + tool.argsSoFar = cumulative; + push({ + type: EventType.TOOL_CALL_ARGS, + toolCallId: tool.toolCallId, + delta: cumulative, + }); + } + } + } + break; + } + + case "content-block-finish": { + if (data.index == null) break; + // Dispatch by the FINISHING block's type rather than by + // tracker-presence. Text and reasoning can share an + // index (server emits text deltas at the same idx as a + // reasoning block), so we can't infer the finish target + // from "first map that has this index". + const finishType = (data as any)?.content?.type; + if (finishType === "text") { + const messageId = textBlockMessageIds.get(data.index); + if (messageId) { + push({ type: EventType.TEXT_MESSAGE_END, messageId }); + textBlockMessageIds.delete(data.index); + } + } else if ( + finishType === "reasoning" || + finishType === "thinking" + ) { + const r = reasoningBlocks.get(data.index); + if (r) { + if (r.messageStarted) { + push({ type: EventType.REASONING_MESSAGE_END, messageId: r.messageId }); + } + push({ type: EventType.REASONING_END, messageId: r.messageId }); + reasoningBlocks.delete(data.index); + } + } else if ( + finishType === "tool_call_chunk" || + finishType === "tool_call" + ) { + const tool = toolBlocks.get(data.index); + if (tool) { + push({ type: EventType.TOOL_CALL_END, toolCallId: tool.toolCallId }); + toolBlocks.delete(data.index); + } + } + break; + } + + case "message-finish": { + // Close any text blocks still open on this message. The + // server omits `content-block-finish` for implicitly-opened + // text blocks (text deltas reusing a reasoning block's + // index), so we need to flush them here. + for (const [index, messageId] of textBlockMessageIds) { + push({ type: EventType.TEXT_MESSAGE_END, messageId }); + textBlockMessageIds.delete(index); + } + activeMessageId = undefined; + break; + } + + case "message-error": { + activeMessageId = undefined; + textBlockMessageIds.clear(); + break; + } + } + break; + } + + case "values": { + // Cache only — actual snapshot emission happens at root + // lifecycle.completed. This skips the transient + // copilotkitMiddleware "intercept then restore" dip where the + // assistant message briefly loses its tool calls. + if (!isRootNamespace(event.params.namespace)) break; + cacheState(event.params.data); + break; + } + + case "tasks": { + // v3 protocol surfaces interrupt() calls as `tasks` events + // with an `interrupts: [...]` field on the task result — + // NOT as `input.requested` lifecycle events. The root + // lifecycle still terminates with `completed`. Scan tasks + // for interrupt entries and emit AG-UI CUSTOM `OnInterrupt`. + const data = event.params?.data as + | { + id?: string; + name?: string; + interrupts?: Array<{ id?: string; value?: unknown }>; + } + | undefined; + if (!data?.interrupts?.length) break; + for (const it of data.interrupts) { + if (!it?.id) continue; + if (emittedInterruptIds.has(it.id)) continue; + emittedInterruptIds.add(it.id); + const value = + typeof it.value === "string" + ? it.value + : JSON.stringify(it.value); + push({ + type: EventType.CUSTOM, + name: LangGraphEventTypes.OnInterrupt, + value, + } as ProcessedEvents); + } + break; + } + + case "custom": { + // Graph nodes can dispatch custom events to drive UI side + // channels (CopilotKit ManuallyEmit* helpers, app-specific + // notifications, etc.). v3 routes them through the generic + // `custom` channel with `data: { name, payload }`. The legacy + // translator expanded the three well-known ManuallyEmit* + // names into their concrete AG-UI events and passed through + // everything else as `CUSTOM`. Mirror that contract here. + const data = event.params?.data as + | { name?: string; payload?: any } + | undefined; + if (!data?.name) break; + const name = data.name; + const payload = data.payload; + + if (name === CustomEventNames.ManuallyEmitMessage) { + const messageId = payload?.message_id; + const message = payload?.message; + if (messageId && typeof message === "string") { + push({ + type: EventType.TEXT_MESSAGE_START, + messageId, + role: "assistant", + }); + push({ + type: EventType.TEXT_MESSAGE_CONTENT, + messageId, + delta: message, + }); + push({ type: EventType.TEXT_MESSAGE_END, messageId }); + } + break; + } + + if (name === CustomEventNames.ManuallyEmitToolCall) { + const toolCallId = payload?.id; + const toolCallName = payload?.name; + const args = payload?.args; + if (toolCallId && toolCallName) { + push({ + type: EventType.TOOL_CALL_START, + toolCallId, + toolCallName, + parentMessageId: toolCallId, + }); + if (typeof args === "string" && args.length > 0) { + push({ + type: EventType.TOOL_CALL_ARGS, + toolCallId, + delta: args, + }); + } + push({ type: EventType.TOOL_CALL_END, toolCallId }); + } + break; + } + + if (name === CustomEventNames.ManuallyEmitState) { + // Manually-emitted state is the source of truth for the + // following snapshot. Merge into our cache so the next + // root-terminal flush carries the updated values, AND + // ship an immediate STATE_SNAPSHOT so consumers can react + // before the run ends (matches legacy behaviour). + if (payload && typeof payload === "object") { + cacheState(payload as State); + const { messages: _m, ...stateOnly } = payload as any; + push({ type: EventType.STATE_SNAPSHOT, snapshot: stateOnly }); + } + // Falls through to the generic CUSTOM passthrough below + // so application listeners that key off the event name + // still get it. + } + + // Generic passthrough: forward the event verbatim as CUSTOM. + push({ + type: EventType.CUSTOM, + name, + value: payload, + } as ProcessedEvents); + break; + } + + // checkpoints, updates, input — handled in subsequent phases. + // Drop through. + default: + break; + } + + return true; + }, + }; +}; diff --git a/integrations/langgraph/typescript/src/transformer/index.ts b/integrations/langgraph/typescript/src/transformer/index.ts new file mode 100644 index 0000000000..e72b548b0f --- /dev/null +++ b/integrations/langgraph/typescript/src/transformer/index.ts @@ -0,0 +1 @@ +export * from './agui-transformer' \ No newline at end of file diff --git a/integrations/langgraph/typescript/src/types.ts b/integrations/langgraph/typescript/src/types.ts index d3945a9b52..e795597415 100644 --- a/integrations/langgraph/typescript/src/types.ts +++ b/integrations/langgraph/typescript/src/types.ts @@ -1,6 +1,31 @@ import { AssistantGraph, Message as LangGraphMessage } from "@langchain/langgraph-sdk"; import { MessageType } from "@langchain/core/messages"; -import { RunAgentInput } from "@ag-ui/core"; +import { + CustomEvent, + MessagesSnapshotEvent, + RawEvent, + RunAgentInput, + RunErrorEvent, + RunFinishedEvent, + RunStartedEvent, + StateDeltaEvent, + StateSnapshotEvent, + StepFinishedEvent, + StepStartedEvent, + TextMessageContentEvent, + TextMessageEndEvent, + TextMessageStartEvent, + ToolCallArgsEvent, + ToolCallEndEvent, + ToolCallStartEvent, + ToolCallResultEvent, + ReasoningStartEvent, + ReasoningMessageStartEvent, + ReasoningMessageContentEvent, + ReasoningMessageEndEvent, + ReasoningEndEvent, + ReasoningEncryptedValueEvent, +} from "@ag-ui/client"; export enum LangGraphEventTypes { OnChainStart = "on_chain_start", @@ -135,3 +160,28 @@ export interface LangGraphReasoning { index: number; signature?: string; } + +export type ProcessedEvents = + | TextMessageStartEvent + | TextMessageContentEvent + | TextMessageEndEvent + | ReasoningStartEvent + | ReasoningMessageStartEvent + | ReasoningMessageContentEvent + | ReasoningMessageEndEvent + | ReasoningEndEvent + | ReasoningEncryptedValueEvent + | ToolCallStartEvent + | ToolCallArgsEvent + | ToolCallEndEvent + | ToolCallResultEvent + | StateSnapshotEvent + | StateDeltaEvent + | MessagesSnapshotEvent + | RawEvent + | CustomEvent + | RunStartedEvent + | RunFinishedEvent + | RunErrorEvent + | StepStartedEvent + | StepFinishedEvent; \ No newline at end of file diff --git a/integrations/langgraph/typescript/tsdown.config.ts b/integrations/langgraph/typescript/tsdown.config.ts index fde1484e48..dae4cda3ad 100644 --- a/integrations/langgraph/typescript/tsdown.config.ts +++ b/integrations/langgraph/typescript/tsdown.config.ts @@ -1,7 +1,11 @@ import { defineConfig } from "tsdown"; export default defineConfig({ - entry: ["src/index.ts", "src/middlewares/index.ts"], + entry: [ + "src/index.ts", + "src/middlewares/index.ts", + "src/transformer/index.ts", + ], format: ["cjs", "esm"], dts: true, exports: true, diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index c24bbe7979..bb50e35b89 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -172,7 +172,7 @@ importers: version: 1.55.1(@ag-ui/core@sdks+typescript+packages+core)(@types/mdast@4.0.4)(@types/react-dom@19.2.2(@types/react@19.2.2))(@types/react@19.2.2)(graphql@16.11.0)(micromark-util-types@2.0.2)(micromark@4.0.2)(react-dom@19.2.1(react@19.2.1))(react@19.2.1)(zod@3.25.76) '@copilotkit/runtime': specifier: 1.55.1 - version: 1.55.1(3dd72e1b331c7057f722d889cc32f1ce) + version: 1.55.1(a11b0067a6baa046a88cc5e752bf6496) '@copilotkit/runtime-client-gql': specifier: 1.55.1 version: 1.55.1(@ag-ui/core@sdks+typescript+packages+core)(graphql@16.11.0)(react@19.2.1) @@ -685,14 +685,17 @@ importers: integrations/langgraph/typescript: dependencies: '@langchain/core': - specifier: ^1.1.40 - version: 1.1.40(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3) + specifier: ^1.1.45 + version: 1.1.45(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3) + '@langchain/langgraph': + specifier: ^1.3.0 + version: 1.3.0(@langchain/core@1.1.45(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3))(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(react-dom@19.2.1(react@19.2.3))(react@19.2.3)(ws@8.18.3)(zod-to-json-schema@3.25.2(zod@3.25.76))(zod@3.25.76) '@langchain/langgraph-sdk': - specifier: ^1.8.8 - version: 1.8.8(@langchain/core@1.1.40(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3))(react-dom@19.2.1(react@19.2.3))(react@19.2.3) + specifier: ^1.9.2 + version: 1.9.2(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(react-dom@19.2.1(react@19.2.3))(react@19.2.3)(ws@8.18.3) langchain: specifier: '>=1.2.0' - version: 1.2.32(@langchain/core@1.1.40(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3))(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(react-dom@19.2.1(react@19.2.3))(react@19.2.3)(ws@8.18.3)(zod-to-json-schema@3.25.2(zod@3.25.76)) + version: 1.2.32(@langchain/core@1.1.45(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3))(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(react-dom@19.2.1(react@19.2.3))(react@19.2.3)(ws@8.18.3)(zod-to-json-schema@3.25.2(zod@3.25.76)) partial-json: specifier: ^0.1.7 version: 0.1.7 @@ -813,7 +816,7 @@ importers: version: 0.17.4 '@copilotkit/runtime': specifier: 0.0.0-mme-ag-ui-0-0-46-20260227141603 - version: 0.0.0-mme-ag-ui-0-0-46-20260227141603(d21f54b13b2c2aa4c046f6b9f7abd449) + version: 0.0.0-mme-ag-ui-0-0-46-20260227141603(978280ed8d59075a7db0b91c6dc87403) '@copilotkit/shared': specifier: 0.0.0-mme-ag-ui-0-0-46-20260227141603 version: 0.0.0-mme-ag-ui-0-0-46-20260227141603(@ag-ui/core@sdks+typescript+packages+core) @@ -1389,12 +1392,18 @@ packages: '@ag-ui/core@0.0.52': resolution: {integrity: sha512-Xo0bUaNV56EqylzcrAuhUkQX7et7+SZIrqZZtEByGwEq/I1EHny6ZMkWHLkKR7UNi0FJZwJyhKYmKJS3B2SEgA==} + '@ag-ui/core@0.0.53': + resolution: {integrity: sha512-11UocR7fFdMWw503bWCX2IOK15vbWfxT11Mn9xOiPBVO/UVcn57ywGrlLL4UaBlPgmUTvuzr2yYR2ElSqiN2wQ==} + '@ag-ui/encoder@0.0.46': resolution: {integrity: sha512-XU6dTgUOFZsXeO+CxCMNl5R8NCbdUyifWP7sRNIi61Et3F/0d0JotLo1y1/9GMGfsJNnP7bjb4YYsx21R7YMlw==} '@ag-ui/encoder@0.0.52': resolution: {integrity: sha512-6GVDTb1dv2rjap7VVnmXYypDutZi6nrsTcdfxoP6ryDG5ynlXtmmS+FSDAt62JbIMD5CtEE963xNCb6d1iXw9g==} + '@ag-ui/encoder@0.0.53': + resolution: {integrity: sha512-bAOcfVdm6U4H6G6tW+DZfwPEQm1w/snVBTwaFn9nJcEMW69M7/HZuwvEc/7Zo0rK1jRL32N/j60PwTAeky19fw==} + '@ag-ui/langgraph@0.0.24': resolution: {integrity: sha512-ebTYpUw28fvbmhqbpAbmfsDTfEqm1gSeZaBcnxMGHFivJLCzsJ/C9hYw6aV8yRKV3lMFBwh/QFxn1eRcr7yRkQ==} peerDependencies: @@ -1418,6 +1427,9 @@ packages: '@ag-ui/proto@0.0.52': resolution: {integrity: sha512-+iCGzNUNL50YIoThVmsolWPjG4MJidl+R9k8QAGVwErEfHRtQ64KFyrdpeOXNVuWtM3SViJqPSgFyv7eGVS63A==} + '@ag-ui/proto@0.0.53': + resolution: {integrity: sha512-swjz22xWT8YUZt5OhmUwkARDQdwt8XM1hmGZbQrhRnNPXKwrKJX9ELlbnQ4iFUQIKkMWpphzE3vA3yNKs2bbKw==} + '@ai-sdk/anthropic@2.0.23': resolution: {integrity: sha512-ZEBiiv1UhjGjBwUU63pFhLK5LCSlNDb1idY9K1oZHm5/Fda1cuTojf32tOp0opH0RPbPAN/F8fyyNjbU33n9Kw==} engines: {node: '>=18'} @@ -3687,8 +3699,8 @@ packages: resolution: {integrity: sha512-vcJDV2vk1AlCwSh3aBm/urQ1ZrlXFFBocv11bz/NBUfLWD5/UDNMzwPdaAd2dKvNmTWa9FM2lirLU3+JCf4cRA==} engines: {node: '>=18'} - '@langchain/core@1.1.40': - resolution: {integrity: sha512-RJ41GQEMxr9ZEZNoIiPgW0+v9nAY6FEZGlk+MjBghr2GR8He50abLam0XCe1aqUJjuKbqt2lUD6M+6SZ+2NIJg==} + '@langchain/core@1.1.45': + resolution: {integrity: sha512-Y/wvuglLTMKJahkl4QD9dBIdF/z/CxZJWdTfHJF/q2jtlJtoFf6Mb5JpGxZfsi3mBY6NSG941FSLTcqhCKrhBA==} engines: {node: '>=20'} '@langchain/google-common@0.1.8': @@ -3709,6 +3721,12 @@ packages: peerDependencies: '@langchain/core': ^1.0.1 + '@langchain/langgraph-checkpoint@1.0.2': + resolution: {integrity: sha512-F4E5Tr0nt8FGghgdscJtHw+ABzChOHeI80R7Y1pjIHdiJom6c2ieo76vL+FWiny80JmoGqhrVAEIWrw0cXKPxg==} + engines: {node: '>=18'} + peerDependencies: + '@langchain/core': ^1.1.44 + '@langchain/langgraph-sdk@0.1.10': resolution: {integrity: sha512-9srSCb2bSvcvehMgjA2sMMwX0o1VUgPN6ghwm5Fwc9JGAKsQa6n1S4eCwy1h4abuYxwajH5n3spBw+4I2WYbgw==} peerDependencies: @@ -3723,20 +3741,14 @@ packages: react-dom: optional: true - '@langchain/langgraph-sdk@1.7.2': - resolution: {integrity: sha512-8ad5OTwqc15J/DxLNJYLn3IC2mpfow09nxJdszxhwm3KgsolGZIUV6g7m67C2p4j3cbQZD5USHt3hKEL0ahqoA==} + '@langchain/langgraph-sdk@1.9.2': + resolution: {integrity: sha512-1kDPjR0VH/39q2h8k0Sxi35KxOvEQPModVCepxGLlRkbZmuWUH+zfICuJd3rmD1ByeOKQBZEaB7Y+VCYmSMt1w==} peerDependencies: - '@angular/core': ^18.0.0 || ^19.0.0 || ^20.0.0 - '@langchain/core': ^1.1.16 react: ^18 || ^19 react-dom: ^18 || ^19 svelte: ^4.0.0 || ^5.0.0 vue: ^3.0.0 peerDependenciesMeta: - '@angular/core': - optional: true - '@langchain/core': - optional: true react: optional: true react-dom: @@ -3746,31 +3758,11 @@ packages: vue: optional: true - '@langchain/langgraph-sdk@1.8.8': - resolution: {integrity: sha512-4OoqFAvPloOTZ6oPxXbJngz4FLJO8QSXb+BQV3qvNTvmfu1LQA7cCEqSNLYX9MoC340PbnDkHNgUtjajwkDHRg==} - peerDependencies: - '@langchain/core': ^1.1.16 - react: ^18 || ^19 - react-dom: ^18 || ^19 - svelte: ^4.0.0 || ^5.0.0 - vue: ^3.0.0 - peerDependenciesMeta: - '@langchain/core': - optional: true - react: - optional: true - react-dom: - optional: true - svelte: - optional: true - vue: - optional: true - - '@langchain/langgraph@1.2.2': - resolution: {integrity: sha512-1F94azb3b3TpHi5Jxa7gFvAYXuSIsEobEXk3/PD7+gkOobIC5Jty3+/ATSkH7joUo0bXCF/rgMOjRGusi6YvSQ==} + '@langchain/langgraph@1.3.0': + resolution: {integrity: sha512-QvhTjiyqFPz81A+y6LHs223w6DTjv5+882DT4mup72bd72rRhNjTYo5fhes5um0swnKArvY/arc7KeFInfHHWw==} engines: {node: '>=18'} peerDependencies: - '@langchain/core': ^1.1.16 + '@langchain/core': ^1.1.44 zod: ^3.25.32 || ^4.2.0 zod-to-json-schema: ^3.x peerDependenciesMeta: @@ -3783,6 +3775,9 @@ packages: peerDependencies: '@langchain/core': ^1.0.0 + '@langchain/protocol@0.0.15': + resolution: {integrity: sha512-MllvbpMjqHevUm+v94M422mH7XKN+wGCvJRBVROTWBotEDOATYB4Ktk2UheYP859y9o2LlhtPek5t1T9eyfAbQ==} + '@libsql/client@0.15.15': resolution: {integrity: sha512-twC0hQxPNHPKfeOv3sNT6u2pturQjLcI+CnpTM0SjRpocEGgfiZ7DWKXLNnsothjyJmDqEsBQJ5ztq9Wlu470w==} @@ -11421,6 +11416,7 @@ packages: uuid@9.0.1: resolution: {integrity: sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==} + deprecated: uuid@10 and below is no longer supported. For ESM codebases, update to uuid@latest. For CommonJS codebases, use uuid@11 (but be aware this version will likely be deprecated in 2028). hasBin: true uvu@0.5.6: @@ -11886,6 +11882,10 @@ snapshots: dependencies: zod: 3.25.76 + '@ag-ui/core@0.0.53': + dependencies: + zod: 3.25.76 + '@ag-ui/encoder@0.0.46': dependencies: '@ag-ui/core': 0.0.46 @@ -11896,6 +11896,11 @@ snapshots: '@ag-ui/core': 0.0.52 '@ag-ui/proto': 0.0.52 + '@ag-ui/encoder@0.0.53': + dependencies: + '@ag-ui/core': 0.0.53 + '@ag-ui/proto': 0.0.53 + '@ag-ui/langgraph@0.0.24(@ag-ui/client@0.0.46)(@ag-ui/core@0.0.46)(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76))(react-dom@19.2.1(react@19.2.3))(react@19.2.3)': dependencies: '@ag-ui/client': 0.0.46 @@ -11922,7 +11927,6 @@ snapshots: partial-json: 0.1.7 rxjs: 7.8.1 transitivePeerDependencies: - - '@angular/core' - '@opentelemetry/api' - '@opentelemetry/exporter-trace-otlp-proto' - '@opentelemetry/sdk-trace-base' @@ -11954,6 +11958,12 @@ snapshots: '@bufbuild/protobuf': 2.9.0 '@protobuf-ts/protoc': 2.11.1 + '@ag-ui/proto@0.0.53': + dependencies: + '@ag-ui/core': 0.0.53 + '@bufbuild/protobuf': 2.9.0 + '@protobuf-ts/protoc': 2.11.1 + '@ai-sdk/anthropic@2.0.23(zod@3.25.76)': dependencies: '@ai-sdk/provider': 2.0.0 @@ -13875,7 +13885,7 @@ snapshots: - encoding - graphql - '@copilotkit/runtime@0.0.0-mme-ag-ui-0-0-46-20260227141603(d21f54b13b2c2aa4c046f6b9f7abd449)': + '@copilotkit/runtime@0.0.0-mme-ag-ui-0-0-46-20260227141603(978280ed8d59075a7db0b91c6dc87403)': dependencies: '@ag-ui/client': 0.0.46 '@ag-ui/core': 0.0.46 @@ -13884,7 +13894,7 @@ snapshots: '@ai-sdk/openai': 2.0.52(zod@3.25.76) '@copilotkit/shared': 0.0.0-mme-ag-ui-0-0-46-20260227141603(@ag-ui/core@0.0.46) '@copilotkitnext/agent': 0.0.0-mme-ag-ui-0-0-46-20260227141603 - '@copilotkitnext/runtime': 0.0.0-mme-ag-ui-0-0-46-20260227141603(@ag-ui/client@0.0.46)(@ag-ui/core@0.0.46)(@ag-ui/encoder@0.0.52)(@copilotkitnext/shared@0.0.0-mme-ag-ui-0-0-46-20260227141603) + '@copilotkitnext/runtime': 0.0.0-mme-ag-ui-0-0-46-20260227141603(@ag-ui/client@0.0.46)(@ag-ui/core@0.0.46)(@ag-ui/encoder@0.0.53)(@copilotkitnext/shared@0.0.0-mme-ag-ui-0-0-46-20260227141603) '@graphql-yoga/plugin-defer-stream': 3.16.0(graphql-yoga@5.16.0(graphql@16.11.0))(graphql@16.11.0) '@hono/node-server': 1.19.7(hono@4.11.5) '@langchain/core': 0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)) @@ -13907,7 +13917,7 @@ snapshots: '@anthropic-ai/sdk': 0.57.0 '@langchain/aws': 0.1.15(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76))) '@langchain/google-gauth': 0.1.8(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(zod@3.25.76) - '@langchain/langgraph-sdk': 1.8.8(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(react-dom@19.2.1(react@19.2.3))(react@19.2.3) + '@langchain/langgraph-sdk': 1.9.2(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76))(react-dom@19.2.1(react@19.2.3))(react@19.2.3)(ws@8.18.3) '@langchain/openai': 1.0.0(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(ws@8.18.3) groq-sdk: 0.5.0 langchain: 1.2.32(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76))(react-dom@19.2.1(react@19.2.3))(react@19.2.3)(ws@8.18.3)(zod-to-json-schema@3.25.2(zod@3.25.76)) @@ -13923,7 +13933,7 @@ snapshots: - react-dom - supports-color - '@copilotkit/runtime@1.55.1(3dd72e1b331c7057f722d889cc32f1ce)': + '@copilotkit/runtime@1.55.1(a11b0067a6baa046a88cc5e752bf6496)': dependencies: '@ag-ui/a2ui-middleware': 0.0.3(@ag-ui/client@0.0.52)(rxjs@7.8.1) '@ag-ui/client': 0.0.52 @@ -13970,12 +13980,11 @@ snapshots: '@anthropic-ai/sdk': 0.57.0 '@langchain/aws': 0.1.15(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76))) '@langchain/google-gauth': 0.1.8(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(zod@3.25.76) - '@langchain/langgraph-sdk': 1.8.8(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(react-dom@19.2.1(react@19.2.1))(react@19.2.1) + '@langchain/langgraph-sdk': 1.9.2(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76))(react-dom@19.2.1(react@19.2.1))(react@19.2.1)(ws@8.18.3) '@langchain/openai': 1.0.0(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(ws@8.18.3) groq-sdk: 0.5.0 langchain: 1.2.32(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76))(react-dom@19.2.1(react@19.2.1))(react@19.2.1)(ws@8.18.3)(zod-to-json-schema@3.25.2(zod@3.25.76)) transitivePeerDependencies: - - '@angular/core' - '@opentelemetry/api' - '@opentelemetry/exporter-trace-otlp-proto' - '@opentelemetry/sdk-trace-base' @@ -14069,11 +14078,11 @@ snapshots: transitivePeerDependencies: - supports-color - '@copilotkitnext/runtime@0.0.0-mme-ag-ui-0-0-46-20260227141603(@ag-ui/client@0.0.46)(@ag-ui/core@0.0.46)(@ag-ui/encoder@0.0.52)(@copilotkitnext/shared@0.0.0-mme-ag-ui-0-0-46-20260227141603)': + '@copilotkitnext/runtime@0.0.0-mme-ag-ui-0-0-46-20260227141603(@ag-ui/client@0.0.46)(@ag-ui/core@0.0.46)(@ag-ui/encoder@0.0.53)(@copilotkitnext/shared@0.0.0-mme-ag-ui-0-0-46-20260227141603)': dependencies: '@ag-ui/client': 0.0.46 '@ag-ui/core': 0.0.46 - '@ag-ui/encoder': 0.0.52 + '@ag-ui/encoder': 0.0.53 '@copilotkitnext/shared': 0.0.0-mme-ag-ui-0-0-46-20260227141603 cors: 2.8.5 express: 4.21.2 @@ -15015,7 +15024,26 @@ snapshots: - '@opentelemetry/sdk-trace-base' - openai - '@langchain/core@1.1.40(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3)': + '@langchain/core@1.1.45(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3)': + dependencies: + '@cfworker/json-schema': 4.1.1 + '@standard-schema/spec': 1.1.0 + ansi-styles: 5.2.0 + camelcase: 6.3.0 + decamelize: 1.2.0 + js-tiktoken: 1.0.21 + langsmith: 0.5.10(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3) + mustache: 4.2.0 + p-queue: 6.6.2 + zod: 3.25.76 + transitivePeerDependencies: + - '@opentelemetry/api' + - '@opentelemetry/exporter-trace-otlp-proto' + - '@opentelemetry/sdk-trace-base' + - openai + - ws + + '@langchain/core@1.1.45(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3)': dependencies: '@cfworker/json-schema': 4.1.1 '@standard-schema/spec': 1.1.0 @@ -15026,7 +15054,6 @@ snapshots: langsmith: 0.5.10(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3) mustache: 4.2.0 p-queue: 6.6.2 - uuid: 11.1.0 zod: 3.25.76 transitivePeerDependencies: - '@opentelemetry/api' @@ -15060,9 +15087,19 @@ snapshots: '@langchain/core': 0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)) uuid: 10.0.0 - '@langchain/langgraph-checkpoint@1.0.0(@langchain/core@1.1.40(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3))': + '@langchain/langgraph-checkpoint@1.0.0(@langchain/core@1.1.45(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3))': + dependencies: + '@langchain/core': 1.1.45(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3) + uuid: 10.0.0 + + '@langchain/langgraph-checkpoint@1.0.2(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))': + dependencies: + '@langchain/core': 0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)) + uuid: 10.0.0 + + '@langchain/langgraph-checkpoint@1.0.2(@langchain/core@1.1.45(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3))': dependencies: - '@langchain/core': 1.1.40(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3) + '@langchain/core': 1.1.45(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3) uuid: 10.0.0 '@langchain/langgraph-sdk@0.1.10(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(react-dom@19.2.1(react@19.2.1))(react@19.2.1)': @@ -15087,126 +15124,127 @@ snapshots: react: 19.2.3 react-dom: 19.2.1(react@19.2.3) - '@langchain/langgraph-sdk@1.7.2(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(react-dom@19.2.1(react@19.2.1))(react@19.2.1)': - dependencies: - '@types/json-schema': 7.0.15 - p-queue: 9.1.0 - p-retry: 7.1.1 - uuid: 13.0.0 - optionalDependencies: - '@langchain/core': 0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)) - react: 19.2.1 - react-dom: 19.2.1(react@19.2.1) - - '@langchain/langgraph-sdk@1.7.2(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(react-dom@19.2.1(react@19.2.3))(react@19.2.3)': - dependencies: - '@types/json-schema': 7.0.15 - p-queue: 9.1.0 - p-retry: 7.1.1 - uuid: 13.0.0 - optionalDependencies: - '@langchain/core': 0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)) - react: 19.2.3 - react-dom: 19.2.1(react@19.2.3) - optional: true - - '@langchain/langgraph-sdk@1.7.2(@langchain/core@1.1.40(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3))(react-dom@19.2.1(react@19.2.3))(react@19.2.3)': - dependencies: - '@types/json-schema': 7.0.15 - p-queue: 9.1.0 - p-retry: 7.1.1 - uuid: 13.0.0 - optionalDependencies: - '@langchain/core': 1.1.40(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3) - react: 19.2.3 - react-dom: 19.2.1(react@19.2.3) - - '@langchain/langgraph-sdk@1.8.8(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(react-dom@19.2.1(react@19.2.1))(react@19.2.1)': + '@langchain/langgraph-sdk@1.9.2(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76))(react-dom@19.2.1(react@19.2.1))(react@19.2.1)(ws@8.18.3)': dependencies: + '@langchain/core': 1.1.45(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3) + '@langchain/protocol': 0.0.15 '@types/json-schema': 7.0.15 p-queue: 9.1.0 p-retry: 7.1.1 uuid: 13.0.0 optionalDependencies: - '@langchain/core': 0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)) react: 19.2.1 react-dom: 19.2.1(react@19.2.1) - optional: true + transitivePeerDependencies: + - '@opentelemetry/api' + - '@opentelemetry/exporter-trace-otlp-proto' + - '@opentelemetry/sdk-trace-base' + - openai + - ws - '@langchain/langgraph-sdk@1.8.8(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(react-dom@19.2.1(react@19.2.3))(react@19.2.3)': + '@langchain/langgraph-sdk@1.9.2(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76))(react-dom@19.2.1(react@19.2.3))(react@19.2.3)(ws@8.18.3)': dependencies: + '@langchain/core': 1.1.45(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3) + '@langchain/protocol': 0.0.15 '@types/json-schema': 7.0.15 p-queue: 9.1.0 p-retry: 7.1.1 uuid: 13.0.0 optionalDependencies: - '@langchain/core': 0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)) react: 19.2.3 react-dom: 19.2.1(react@19.2.3) + transitivePeerDependencies: + - '@opentelemetry/api' + - '@opentelemetry/exporter-trace-otlp-proto' + - '@opentelemetry/sdk-trace-base' + - openai + - ws optional: true - '@langchain/langgraph-sdk@1.8.8(@langchain/core@1.1.40(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3))(react-dom@19.2.1(react@19.2.3))(react@19.2.3)': + '@langchain/langgraph-sdk@1.9.2(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(react-dom@19.2.1(react@19.2.3))(react@19.2.3)(ws@8.18.3)': dependencies: + '@langchain/core': 1.1.45(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3) + '@langchain/protocol': 0.0.15 '@types/json-schema': 7.0.15 p-queue: 9.1.0 p-retry: 7.1.1 uuid: 13.0.0 optionalDependencies: - '@langchain/core': 1.1.40(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3) react: 19.2.3 react-dom: 19.2.1(react@19.2.3) + transitivePeerDependencies: + - '@opentelemetry/api' + - '@opentelemetry/exporter-trace-otlp-proto' + - '@opentelemetry/sdk-trace-base' + - openai + - ws - '@langchain/langgraph@1.2.2(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(react-dom@19.2.1(react@19.2.1))(react@19.2.1)(zod-to-json-schema@3.25.2(zod@3.25.76))(zod@3.25.76)': + '@langchain/langgraph@1.3.0(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76))(react-dom@19.2.1(react@19.2.1))(react@19.2.1)(ws@8.18.3)(zod-to-json-schema@3.25.2(zod@3.25.76))(zod@3.25.76)': dependencies: '@langchain/core': 0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)) - '@langchain/langgraph-checkpoint': 1.0.0(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76))) - '@langchain/langgraph-sdk': 1.7.2(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(react-dom@19.2.1(react@19.2.1))(react@19.2.1) + '@langchain/langgraph-checkpoint': 1.0.2(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76))) + '@langchain/langgraph-sdk': 1.9.2(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76))(react-dom@19.2.1(react@19.2.1))(react@19.2.1)(ws@8.18.3) + '@langchain/protocol': 0.0.15 '@standard-schema/spec': 1.1.0 uuid: 10.0.0 zod: 3.25.76 optionalDependencies: zod-to-json-schema: 3.25.2(zod@3.25.76) transitivePeerDependencies: - - '@angular/core' + - '@opentelemetry/api' + - '@opentelemetry/exporter-trace-otlp-proto' + - '@opentelemetry/sdk-trace-base' + - openai - react - react-dom - svelte - vue + - ws - '@langchain/langgraph@1.2.2(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(react-dom@19.2.1(react@19.2.3))(react@19.2.3)(zod-to-json-schema@3.25.2(zod@3.25.76))(zod@3.25.76)': + '@langchain/langgraph@1.3.0(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76))(react-dom@19.2.1(react@19.2.3))(react@19.2.3)(ws@8.18.3)(zod-to-json-schema@3.25.2(zod@3.25.76))(zod@3.25.76)': dependencies: '@langchain/core': 0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)) - '@langchain/langgraph-checkpoint': 1.0.0(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76))) - '@langchain/langgraph-sdk': 1.7.2(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(react-dom@19.2.1(react@19.2.3))(react@19.2.3) + '@langchain/langgraph-checkpoint': 1.0.2(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76))) + '@langchain/langgraph-sdk': 1.9.2(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76))(react-dom@19.2.1(react@19.2.3))(react@19.2.3)(ws@8.18.3) + '@langchain/protocol': 0.0.15 '@standard-schema/spec': 1.1.0 uuid: 10.0.0 zod: 3.25.76 optionalDependencies: zod-to-json-schema: 3.25.2(zod@3.25.76) transitivePeerDependencies: - - '@angular/core' + - '@opentelemetry/api' + - '@opentelemetry/exporter-trace-otlp-proto' + - '@opentelemetry/sdk-trace-base' + - openai - react - react-dom - svelte - vue + - ws optional: true - '@langchain/langgraph@1.2.2(@langchain/core@1.1.40(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3))(react-dom@19.2.1(react@19.2.3))(react@19.2.3)(zod-to-json-schema@3.25.2(zod@3.25.76))(zod@3.25.76)': + '@langchain/langgraph@1.3.0(@langchain/core@1.1.45(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3))(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(react-dom@19.2.1(react@19.2.3))(react@19.2.3)(ws@8.18.3)(zod-to-json-schema@3.25.2(zod@3.25.76))(zod@3.25.76)': dependencies: - '@langchain/core': 1.1.40(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3) - '@langchain/langgraph-checkpoint': 1.0.0(@langchain/core@1.1.40(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3)) - '@langchain/langgraph-sdk': 1.7.2(@langchain/core@1.1.40(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3))(react-dom@19.2.1(react@19.2.3))(react@19.2.3) + '@langchain/core': 1.1.45(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3) + '@langchain/langgraph-checkpoint': 1.0.2(@langchain/core@1.1.45(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3)) + '@langchain/langgraph-sdk': 1.9.2(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(react-dom@19.2.1(react@19.2.3))(react@19.2.3)(ws@8.18.3) + '@langchain/protocol': 0.0.15 '@standard-schema/spec': 1.1.0 uuid: 10.0.0 zod: 3.25.76 optionalDependencies: zod-to-json-schema: 3.25.2(zod@3.25.76) transitivePeerDependencies: - - '@angular/core' + - '@opentelemetry/api' + - '@opentelemetry/exporter-trace-otlp-proto' + - '@opentelemetry/sdk-trace-base' + - openai - react - react-dom - svelte - vue + - ws '@langchain/openai@1.0.0(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(ws@8.18.3)': dependencies: @@ -15217,6 +15255,8 @@ snapshots: transitivePeerDependencies: - ws + '@langchain/protocol@0.0.15': {} + '@libsql/client@0.15.15': dependencies: '@libsql/core': 0.15.15 @@ -19415,7 +19455,7 @@ snapshots: '@next/eslint-plugin-next': 16.0.7 eslint: 9.37.0(jiti@2.6.1) eslint-import-resolver-node: 0.3.9 - eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.48.1(eslint@9.37.0(jiti@2.6.1))(typescript@5.9.3))(eslint@9.37.0(jiti@2.6.1)))(eslint@9.37.0(jiti@2.6.1)) + eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.32.0)(eslint@9.37.0(jiti@2.6.1)) eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.48.1(eslint@9.37.0(jiti@2.6.1))(typescript@5.9.3))(eslint-import-resolver-typescript@3.10.1)(eslint@9.37.0(jiti@2.6.1)) eslint-plugin-jsx-a11y: 6.10.2(eslint@9.37.0(jiti@2.6.1)) eslint-plugin-react: 7.37.5(eslint@9.37.0(jiti@2.6.1)) @@ -19438,7 +19478,7 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.48.1(eslint@9.37.0(jiti@2.6.1))(typescript@5.9.3))(eslint@9.37.0(jiti@2.6.1)))(eslint@9.37.0(jiti@2.6.1)): + eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0)(eslint@9.37.0(jiti@2.6.1)): dependencies: '@nolyfill/is-core-module': 1.0.39 debug: 4.4.3 @@ -19453,14 +19493,14 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-module-utils@2.12.1(@typescript-eslint/parser@8.48.1(eslint@9.37.0(jiti@2.6.1))(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.48.1(eslint@9.37.0(jiti@2.6.1))(typescript@5.9.3))(eslint@9.37.0(jiti@2.6.1)))(eslint@9.37.0(jiti@2.6.1)))(eslint@9.37.0(jiti@2.6.1)): + eslint-module-utils@2.12.1(@typescript-eslint/parser@8.48.1(eslint@9.37.0(jiti@2.6.1))(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@9.37.0(jiti@2.6.1)): dependencies: debug: 3.2.7 optionalDependencies: '@typescript-eslint/parser': 8.48.1(eslint@9.37.0(jiti@2.6.1))(typescript@5.9.3) eslint: 9.37.0(jiti@2.6.1) eslint-import-resolver-node: 0.3.9 - eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.48.1(eslint@9.37.0(jiti@2.6.1))(typescript@5.9.3))(eslint@9.37.0(jiti@2.6.1)))(eslint@9.37.0(jiti@2.6.1)) + eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.32.0)(eslint@9.37.0(jiti@2.6.1)) transitivePeerDependencies: - supports-color @@ -19475,7 +19515,7 @@ snapshots: doctrine: 2.1.0 eslint: 9.37.0(jiti@2.6.1) eslint-import-resolver-node: 0.3.9 - eslint-module-utils: 2.12.1(@typescript-eslint/parser@8.48.1(eslint@9.37.0(jiti@2.6.1))(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.48.1(eslint@9.37.0(jiti@2.6.1))(typescript@5.9.3))(eslint@9.37.0(jiti@2.6.1)))(eslint@9.37.0(jiti@2.6.1)))(eslint@9.37.0(jiti@2.6.1)) + eslint-module-utils: 2.12.1(@typescript-eslint/parser@8.48.1(eslint@9.37.0(jiti@2.6.1))(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@9.37.0(jiti@2.6.1)) hasown: 2.0.2 is-core-module: 2.16.1 is-glob: 4.0.3 @@ -21214,13 +21254,12 @@ snapshots: langchain@1.2.32(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76))(react-dom@19.2.1(react@19.2.1))(react@19.2.1)(ws@8.18.3)(zod-to-json-schema@3.25.2(zod@3.25.76)): dependencies: '@langchain/core': 0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)) - '@langchain/langgraph': 1.2.2(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(react-dom@19.2.1(react@19.2.1))(react@19.2.1)(zod-to-json-schema@3.25.2(zod@3.25.76))(zod@3.25.76) + '@langchain/langgraph': 1.3.0(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76))(react-dom@19.2.1(react@19.2.1))(react@19.2.1)(ws@8.18.3)(zod-to-json-schema@3.25.2(zod@3.25.76))(zod@3.25.76) '@langchain/langgraph-checkpoint': 1.0.0(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76))) langsmith: 0.5.10(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3) uuid: 11.1.0 zod: 3.25.76 transitivePeerDependencies: - - '@angular/core' - '@opentelemetry/api' - '@opentelemetry/exporter-trace-otlp-proto' - '@opentelemetry/sdk-trace-base' @@ -21235,13 +21274,12 @@ snapshots: langchain@1.2.32(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76))(react-dom@19.2.1(react@19.2.3))(react@19.2.3)(ws@8.18.3)(zod-to-json-schema@3.25.2(zod@3.25.76)): dependencies: '@langchain/core': 0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)) - '@langchain/langgraph': 1.2.2(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(react-dom@19.2.1(react@19.2.3))(react@19.2.3)(zod-to-json-schema@3.25.2(zod@3.25.76))(zod@3.25.76) + '@langchain/langgraph': 1.3.0(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76))(react-dom@19.2.1(react@19.2.3))(react@19.2.3)(ws@8.18.3)(zod-to-json-schema@3.25.2(zod@3.25.76))(zod@3.25.76) '@langchain/langgraph-checkpoint': 1.0.0(@langchain/core@0.3.80(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76))) langsmith: 0.5.10(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3) uuid: 11.1.0 zod: 3.25.76 transitivePeerDependencies: - - '@angular/core' - '@opentelemetry/api' - '@opentelemetry/exporter-trace-otlp-proto' - '@opentelemetry/sdk-trace-base' @@ -21254,16 +21292,15 @@ snapshots: - zod-to-json-schema optional: true - langchain@1.2.32(@langchain/core@1.1.40(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3))(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(react-dom@19.2.1(react@19.2.3))(react@19.2.3)(ws@8.18.3)(zod-to-json-schema@3.25.2(zod@3.25.76)): + langchain@1.2.32(@langchain/core@1.1.45(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3))(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(react-dom@19.2.1(react@19.2.3))(react@19.2.3)(ws@8.18.3)(zod-to-json-schema@3.25.2(zod@3.25.76)): dependencies: - '@langchain/core': 1.1.40(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3) - '@langchain/langgraph': 1.2.2(@langchain/core@1.1.40(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3))(react-dom@19.2.1(react@19.2.3))(react@19.2.3)(zod-to-json-schema@3.25.2(zod@3.25.76))(zod@3.25.76) - '@langchain/langgraph-checkpoint': 1.0.0(@langchain/core@1.1.40(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3)) + '@langchain/core': 1.1.45(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3) + '@langchain/langgraph': 1.3.0(@langchain/core@1.1.45(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3))(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(react-dom@19.2.1(react@19.2.3))(react@19.2.3)(ws@8.18.3)(zod-to-json-schema@3.25.2(zod@3.25.76))(zod@3.25.76) + '@langchain/langgraph-checkpoint': 1.0.0(@langchain/core@1.1.45(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3)) langsmith: 0.5.10(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.2.0(@opentelemetry/api@1.9.0))(openai@6.10.0(ws@8.18.3)(zod@3.25.76))(ws@8.18.3) uuid: 11.1.0 zod: 3.25.76 transitivePeerDependencies: - - '@angular/core' - '@opentelemetry/api' - '@opentelemetry/exporter-trace-otlp-proto' - '@opentelemetry/sdk-trace-base'