docs/src/content/en/models/providers/requesty.mdx
Access 38 Requesty models through Mastra's model router. Authentication is handled automatically using the REQUESTY_API_KEY environment variable.
Learn more in the Requesty documentation.
REQUESTY_API_KEY=your-api-key
import { Agent } from "@mastra/core/agent";
const agent = new Agent({
id: "my-agent",
name: "My Agent",
instructions: "You are a helpful assistant",
model: "requesty/anthropic/claude-3-7-sonnet"
});
// Generate a response
const response = await agent.generate("Hello!");
// Stream a response
const stream = await agent.stream("Tell me a story");
for await (const chunk of stream) {
console.log(chunk);
}
:::info
Mastra uses the OpenAI-compatible /chat/completions endpoint. Some provider-specific features may not be available. Check the Requesty documentation for details.
:::
<ProviderModelsTable models={[ { "model": "requesty/anthropic/claude-3-7-sonnet", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 200000, "maxOutput": 64000, "inputCost": 3, "outputCost": 15 }, { "model": "requesty/anthropic/claude-haiku-4-5", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 200000, "maxOutput": 62000, "inputCost": 1, "outputCost": 5 }, { "model": "requesty/anthropic/claude-opus-4", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 200000, "maxOutput": 32000, "inputCost": 15, "outputCost": 75 }, { "model": "requesty/anthropic/claude-opus-4-1", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 200000, "maxOutput": 32000, "inputCost": 15, "outputCost": 75 }, { "model": "requesty/anthropic/claude-opus-4-5", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 200000, "maxOutput": 64000, "inputCost": 5, "outputCost": 25 }, { "model": "requesty/anthropic/claude-opus-4-6", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 1000000, "maxOutput": 128000, "inputCost": 5, "outputCost": 25 }, { "model": "requesty/anthropic/claude-sonnet-4", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 200000, "maxOutput": 64000, "inputCost": 3, "outputCost": 15 }, { "model": "requesty/anthropic/claude-sonnet-4-5", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 1000000, "maxOutput": 64000, "inputCost": 3, "outputCost": 15 }, { "model": "requesty/anthropic/claude-sonnet-4-6", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 1000000, "maxOutput": 128000, "inputCost": 3, "outputCost": 15 }, { "model": "requesty/google/gemini-2.5-flash", "imageInput": true, "audioInput": true, "videoInput": true, "toolUsage": true, "reasoning": true, "contextWindow": 1048576, "maxOutput": 65536, "inputCost": 0.3, "outputCost": 2.5 }, { "model": "requesty/google/gemini-2.5-pro", "imageInput": true, "audioInput": true, "videoInput": true, "toolUsage": true, "reasoning": true, "contextWindow": 1048576, "maxOutput": 65536, "inputCost": 1.25, "outputCost": 10 }, { "model": "requesty/google/gemini-3-flash-preview", "imageInput": true, "audioInput": true, "videoInput": true, "toolUsage": true, "reasoning": true, "contextWindow": 1048576, "maxOutput": 65536, "inputCost": 0.5, "outputCost": 3 }, { "model": "requesty/google/gemini-3-pro-preview", "imageInput": true, "audioInput": true, "videoInput": true, "toolUsage": true, "reasoning": true, "contextWindow": 1048576, "maxOutput": 65536, "inputCost": 2, "outputCost": 12 }, { "model": "requesty/openai/gpt-4.1", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 1047576, "maxOutput": 32768, "inputCost": 2, "outputCost": 8 }, { "model": "requesty/openai/gpt-4.1-mini", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 1047576, "maxOutput": 32768, "inputCost": 0.4, "outputCost": 1.6 }, { "model": "requesty/openai/gpt-4o-mini", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 128000, "maxOutput": 16384, "inputCost": 0.15, "outputCost": 0.6 }, { "model": "requesty/openai/gpt-5", "imageInput": true, "audioInput": true, "videoInput": true, "toolUsage": true, "reasoning": true, "contextWindow": 400000, "maxOutput": 128000, "inputCost": 1.25, "outputCost": 10 }, { "model": "requesty/openai/gpt-5-chat", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": false, "reasoning": true, "contextWindow": 400000, "maxOutput": 128000, "inputCost": 1.25, "outputCost": 10 }, { "model": "requesty/openai/gpt-5-codex", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 400000, "maxOutput": 128000, "inputCost": 1.25, "outputCost": 10 }, { "model": "requesty/openai/gpt-5-image", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 400000, "maxOutput": 128000, "inputCost": 5, "outputCost": 10 }, { "model": "requesty/openai/gpt-5-mini", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 32000, "inputCost": 0.25, "outputCost": 2 }, { "model": "requesty/openai/gpt-5-nano", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 16000, "maxOutput": 4000, "inputCost": 0.05, "outputCost": 0.4 }, { "model": "requesty/openai/gpt-5-pro", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 400000, "maxOutput": 272000, "inputCost": 15, "outputCost": 120 }, { "model": "requesty/openai/gpt-5.1", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 400000, "maxOutput": 128000, "inputCost": 1.25, "outputCost": 10 }, { "model": "requesty/openai/gpt-5.1-chat", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 16384, "inputCost": 1.25, "outputCost": 10 }, { "model": "requesty/openai/gpt-5.1-codex", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 400000, "maxOutput": 128000, "inputCost": 1.25, "outputCost": 10 }, { "model": "requesty/openai/gpt-5.1-codex-max", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 400000, "maxOutput": 128000, "inputCost": 1.1, "outputCost": 9 }, { "model": "requesty/openai/gpt-5.1-codex-mini", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 400000, "maxOutput": 100000, "inputCost": 0.25, "outputCost": 2 }, { "model": "requesty/openai/gpt-5.2", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 400000, "maxOutput": 128000, "inputCost": 1.75, "outputCost": 14 }, { "model": "requesty/openai/gpt-5.2-chat", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 16384, "inputCost": 1.75, "outputCost": 14 }, { "model": "requesty/openai/gpt-5.2-codex", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 400000, "maxOutput": 128000, "inputCost": 1.75, "outputCost": 14 }, { "model": "requesty/openai/gpt-5.2-pro", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 400000, "maxOutput": 128000, "inputCost": 21, "outputCost": 168 }, { "model": "requesty/openai/gpt-5.3-codex", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 400000, "maxOutput": 128000, "inputCost": 1.75, "outputCost": 14 }, { "model": "requesty/openai/gpt-5.4", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 1050000, "maxOutput": 128000, "inputCost": 2.5, "outputCost": 15 }, { "model": "requesty/openai/gpt-5.4-pro", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 1050000, "maxOutput": 128000, "inputCost": 30, "outputCost": 180 }, { "model": "requesty/openai/o4-mini", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 200000, "maxOutput": 100000, "inputCost": 1.1, "outputCost": 4.4 }, { "model": "requesty/xai/grok-4", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 256000, "maxOutput": 64000, "inputCost": 3, "outputCost": 15 }, { "model": "requesty/xai/grok-4-fast", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 2000000, "maxOutput": 64000, "inputCost": 0.2, "outputCost": 0.5 } ]} />
const agent = new Agent({
id: "custom-agent",
name: "custom-agent",
model: {
url: "https://router.requesty.ai/v1",
id: "requesty/anthropic/claude-3-7-sonnet",
apiKey: process.env.REQUESTY_API_KEY,
headers: {
"X-Custom-Header": "value"
}
}
});
const agent = new Agent({
id: "dynamic-agent",
name: "Dynamic Agent",
model: ({ requestContext }) => {
const useAdvanced = requestContext.task === "complex";
return useAdvanced
? "requesty/xai/grok-4-fast"
: "requesty/anthropic/claude-3-7-sonnet";
}
});