docs/src/content/en/reference/ai-sdk/handle-chat-stream.mdx
import PropertiesTable from "@site/src/components/PropertiesTable";
Framework-agnostic handler for streaming agent chat in AI SDK-compatible format. Use this function directly when you need to handle chat streaming outside Hono or Mastra's own apiRoutes feature.
handleChatStream() returns a ReadableStream that you can wrap with createUIMessageStreamResponse().
handleChatStream() keeps the existing AI SDK v5/default behavior. If your app is typed against AI SDK v6, pass version: 'v6'.
Use chatRoute() if you want to create a chat route inside a Mastra server.
Next.js App Router example:
import { handleChatStream } from '@mastra/ai-sdk'
import { createUIMessageStreamResponse } from 'ai'
import { mastra } from '@/src/mastra'
export async function POST(req: Request) {
const params = await req.json()
const stream = await handleChatStream({
mastra,
agentId: 'weatherAgent',
params,
messageMetadata: () => ({ createdAt: new Date().toISOString() }),
})
return createUIMessageStreamResponse({ stream })
}
<PropertiesTable
content={[
{
name: 'version',
type: "'v5' | 'v6'",
description:
"Selects the AI SDK stream contract to emit. Omit it or pass 'v5' for the existing default behavior. Pass 'v6' when your app is typed against AI SDK v6 response helpers.",
isOptional: true,
defaultValue: "'v5'",
},
{
name: 'mastra',
type: 'Mastra',
description: 'The Mastra instance containing registered agents.',
isOptional: false,
},
{
name: 'agentId',
type: 'string',
description: 'The ID of the agent to use for chat.',
isOptional: false,
},
{
name: 'params',
type: 'ChatStreamHandlerParams',
description: 'Parameters for the chat stream, including messages and optional resume data.',
isOptional: false,
},
{
name: 'params.messages',
type: 'UIMessage[]',
description: 'Array of messages in the conversation.',
isOptional: false,
},
{
name: 'params.resumeData',
type: 'Record<string, any>',
description: 'Data for resuming a suspended agent execution. Requires runId to be set.',
isOptional: true,
},
{
name: 'params.runId',
type: 'string',
description: 'The run ID. Required when resumeData is provided.',
isOptional: true,
},
{
name: 'params.providerOptions',
type: 'Record<string, Record<string, unknown>>',
description:
'Provider-specific options passed to the language model (e.g. { openai: { reasoningEffort: "high" } }). Merged with defaultOptions.providerOptions, with params taking precedence.',
isOptional: true,
},
{
name: 'params.requestContext',
type: 'RequestContext',
description: 'Request context to pass to the agent execution.',
isOptional: true,
},
{
name: 'defaultOptions',
type: 'AgentExecutionOptions',
description:
'Default options passed to agent execution. These are merged with params, with params taking precedence.',
isOptional: true,
},
{
name: 'sendStart',
type: 'boolean',
description: 'Whether to send start events in the stream.',
isOptional: true,
defaultValue: 'true',
},
{
name: 'sendFinish',
type: 'boolean',
description: 'Whether to send finish events in the stream.',
isOptional: true,
defaultValue: 'true',
},
{
name: 'sendReasoning',
type: 'boolean',
description: 'Whether to include reasoning steps in the stream.',
isOptional: true,
defaultValue: 'false',
},
{
name: 'sendSources',
type: 'boolean',
description: 'Whether to include source citations in the stream.',
isOptional: true,
defaultValue: 'false',
},
{
name: 'messageMetadata',
type: '(options: { part: UIMessageStreamPart }) => Record<string, unknown> | undefined',
description:
'A function that receives the current stream part and returns metadata to attach to start and finish chunks. See the AI SDK message metadata docs for details.',
isOptional: true,
},
]}
/>