content/cookbook/01-next/80-send-custom-body-from-use-chat.mdx
By default, useChat sends all messages as well as information from the request to the server.
However, it is often desirable to control the entire body content that is sent to the server, e.g. to:
The prepareSendMessagesRequest option allows you to customize the entire body content that is sent to the server.
The function receives the message list, the request data, and the request body from the append call.
It should return the body content that will be sent to the server.
This example shows how to only send the text of the last message to the server. This can be useful if you want to reduce the amount of data sent to the server.
'use client';
import { useChat } from '@ai-sdk/react';
import { DefaultChatTransport } from 'ai';
import { useState } from 'react';
export default function Chat() {
const [input, setInput] = useState('');
const { messages, sendMessage } = useChat({
transport: new DefaultChatTransport({
prepareSendMessagesRequest: ({ id, messages }) => {
return {
body: {
id,
message: messages[messages.length - 1],
},
};
},
}),
});
return (
<div>
{messages.map((message, index) => (
<div key={index}>
{message.role === 'user' ? 'User: ' : 'AI: '}
{message.parts.map((part) => {
switch (part.type) {
case "text":
return <div key={`${message.id}-text`}>{part.text}</div>;
}
})}
</div>
))}
<form onSubmit={(e) => {
e.preventDefault();
sendMessage({text: input});
setInput('');
}}>
<input value={input} onChange={(e) => setInput(e.currentTarget.value)} />
</form>
</div>
);
}
We need to adjust the server to receive the custom request format with the chat ID and last message. The rest of the message history can be loaded from storage.
import { convertToModelMessages, streamText } from 'ai';
// Allow streaming responses up to 30 seconds
export const maxDuration = 30;
export async function POST(req: Request) {
const { id, message } = await req.json();
// Load existing messages and add the new one
const messages = await loadMessages(id);
messages.push(message);
// Call the language model
const result = streamText({
model: 'openai/gpt-4.1',
messages: await convertToModelMessages(messages),
});
// Respond with the stream
return result.toUIMessageStreamResponse({
originalMessages: messages,
onFinish: ({ messages: newMessages }) => {
saveMessages(id, newMessages);
},
});
}