content/cookbook/01-next/21-stream-text-with-chat-prompt.mdx
Chat completion can sometimes take a long time to finish, especially when the response is big. In such cases, it is useful to stream the chat completion to the client in real-time. This allows the client to display the new message as it is being generated by the model, rather than have users wait for it to finish.
<Browser> <ChatGeneration stream history={[ { role: 'User', content: 'How is it going?' }, { role: 'Assistant', content: 'All good, how may I help you?' }, ]} inputMessage={{ role: 'User', content: 'Why is the sky blue?' }} outputMessage={{ role: 'Assistant', content: 'The sky is blue because of rayleigh scattering.', }} /> </Browser>Let's create a React component that imports the useChat hook from the @ai-sdk/react module. The useChat hook will call the /api/chat endpoint when the user sends a message. The endpoint will generate the assistant's response based on the conversation history and stream it to the client.
'use client';
import { useChat } from '@ai-sdk/react';
import { DefaultChatTransport } from 'ai';
import { useState } from 'react';
export default function Page() {
const [input, setInput] = useState('');
const { messages, sendMessage } = useChat({
transport: new DefaultChatTransport({
api: '/api/chat',
}),
});
return (
<div>
<input
value={input}
onChange={event => {
setInput(event.target.value);
}}
onKeyDown={async event => {
if (event.key === 'Enter') {
sendMessage({
parts: [{ type: 'text', text: input }],
});
}
}}
/>
{messages.map((message, index) => (
<div key={index}>
{message.parts.map(part => {
if (part.type === 'text') {
return <div key={`${message.id}-text`}>{part.text}</div>;
}
})}
</div>
))}
</div>
);
}
Next, let's create the /api/chat endpoint that generates the assistant's response based on the conversation history.
import { convertToModelMessages, streamText, type UIMessage } from 'ai';
export async function POST(req: Request) {
const { messages }: { messages: UIMessage[] } = await req.json();
const result = streamText({
model: 'openai/gpt-4o',
system: 'You are a helpful assistant.',
messages: await convertToModelMessages(messages),
});
return result.toUIMessageStreamResponse();
}