content/docs/05-ai-sdk-rsc/06-loading-state.mdx
Given that responses from language models can often take a while to complete, it's crucial to be able to show loading state to users. This provides visual feedback that the system is working on their request and helps maintain a positive user experience.
There are three approaches you can take to handle loading state with the AI SDK RSC:
Let's create a simple Next.js page that will call the generateResponse function when the form is submitted. The function will take in the user's prompt (input) and then generate a response (response). To handle the loading state, use the loading state variable. When the form is submitted, set loading to true, and when the response is received, set it back to false. While the response is being streamed, the input field will be disabled.
'use client';
import { useState } from 'react';
import { generateResponse } from './actions';
import { readStreamableValue } from '@ai-sdk/rsc';
// Force the page to be dynamic and allow streaming responses up to 30 seconds
export const maxDuration = 30;
export default function Home() {
const [input, setInput] = useState<string>('');
const [generation, setGeneration] = useState<string>('');
const [loading, setLoading] = useState<boolean>(false);
return (
<div>
<div>{generation}</div>
<form
onSubmit={async e => {
e.preventDefault();
setLoading(true);
const response = await generateResponse(input);
let textContent = '';
for await (const delta of readStreamableValue(response)) {
textContent = `${textContent}${delta}`;
setGeneration(textContent);
}
setInput('');
setLoading(false);
}}
>
<input
type="text"
value={input}
disabled={loading}
className="disabled:opacity-50"
onChange={event => {
setInput(event.target.value);
}}
/>
<button>Send Message</button>
</form>
</div>
);
}
Now let's implement the generateResponse function. Use the streamText function to generate a response to the input.
'use server';
import { streamText } from 'ai';
__PROVIDER_IMPORT__;
import { createStreamableValue } from '@ai-sdk/rsc';
export async function generateResponse(prompt: string) {
const stream = createStreamableValue();
(async () => {
const { textStream } = streamText({
model: __MODEL__,
prompt,
});
for await (const text of textStream) {
stream.update(text);
}
stream.done();
})();
return stream.value;
}
If you are looking to track loading state on a more granular level, you can create a new streamable value to store a custom variable and then read this on the frontend. Let's update the example to create a new streamable value for tracking loading state:
'use server';
import { streamText } from 'ai';
__PROVIDER_IMPORT__;
import { createStreamableValue } from '@ai-sdk/rsc';
export async function generateResponse(prompt: string) {
const stream = createStreamableValue();
const loadingState = createStreamableValue({ loading: true });
(async () => {
const { textStream } = streamText({
model: __MODEL__,
prompt,
});
for await (const text of textStream) {
stream.update(text);
}
stream.done();
loadingState.done({ loading: false });
})();
return { response: stream.value, loadingState: loadingState.value };
}
'use client';
import { useState } from 'react';
import { generateResponse } from './actions';
import { readStreamableValue } from '@ai-sdk/rsc';
// Force the page to be dynamic and allow streaming responses up to 30 seconds
export const maxDuration = 30;
export default function Home() {
const [input, setInput] = useState<string>('');
const [generation, setGeneration] = useState<string>('');
const [loading, setLoading] = useState<boolean>(false);
return (
<div>
<div>{generation}</div>
<form
onSubmit={async e => {
e.preventDefault();
setLoading(true);
const { response, loadingState } = await generateResponse(input);
let textContent = '';
for await (const responseDelta of readStreamableValue(response)) {
textContent = `${textContent}${responseDelta}`;
setGeneration(textContent);
}
for await (const loadingDelta of readStreamableValue(loadingState)) {
if (loadingDelta) {
setLoading(loadingDelta.loading);
}
}
setInput('');
setLoading(false);
}}
>
<input
type="text"
value={input}
disabled={loading}
className="disabled:opacity-50"
onChange={event => {
setInput(event.target.value);
}}
/>
<button>Send Message</button>
</form>
</div>
);
}
This allows you to provide more detailed feedback about the generation process to your users.
streamUIIf you are using the streamUI function, you can stream the loading state to the client in the form of a React component. streamUI supports the usage of JavaScript generator functions , which allow you to yield some value (in this case a React component) while some other blocking work completes.
'use server';
import { openai } from '@ai-sdk/openai';
import { streamUI } from '@ai-sdk/rsc';
export async function generateResponse(prompt: string) {
const result = await streamUI({
model: openai('gpt-4o'),
prompt,
text: async function* ({ content }) {
yield <div>loading...</div>;
return <div>{content}</div>;
},
});
return result.value;
}
'use client';
import { useState } from 'react';
import { generateResponse } from './actions';
import { readStreamableValue } from '@ai-sdk/rsc';
// Force the page to be dynamic and allow streaming responses up to 30 seconds
export const maxDuration = 30;
export default function Home() {
const [input, setInput] = useState<string>('');
const [generation, setGeneration] = useState<React.ReactNode>();
return (
<div>
<div>{generation}</div>
<form
onSubmit={async e => {
e.preventDefault();
const result = await generateResponse(input);
setGeneration(result);
setInput('');
}}
>
<input
type="text"
value={input}
onChange={event => {
setInput(event.target.value);
}}
/>
<button>Send Message</button>
</form>
</div>
);
}