docs/src/content/en/models/providers/openai.mdx
Access 46 OpenAI models through Mastra's model router. Authentication is handled automatically using the OPENAI_API_KEY environment variable.
Learn more in the OpenAI documentation.
OPENAI_API_KEY=your-api-key
import { Agent } from "@mastra/core/agent";
const agent = new Agent({
id: "my-agent",
name: "My Agent",
instructions: "You are a helpful assistant",
model: "openai/codex-mini-latest"
});
// Generate a response
const response = await agent.generate("Hello!");
// Stream a response
const stream = await agent.stream("Tell me a story");
for await (const chunk of stream) {
console.log(chunk);
}
<ProviderModelsTable models={[ { "model": "openai/codex-mini-latest", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 200000, "maxOutput": 100000, "inputCost": 1.5, "outputCost": 6 }, { "model": "openai/gpt-3.5-turbo", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": false, "reasoning": false, "contextWindow": 16385, "maxOutput": 4096, "inputCost": 0.5, "outputCost": 1.5 }, { "model": "openai/gpt-4", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 8192, "maxOutput": 8192, "inputCost": 30, "outputCost": 60 }, { "model": "openai/gpt-4-turbo", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 128000, "maxOutput": 4096, "inputCost": 10, "outputCost": 30 }, { "model": "openai/gpt-4.1", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 1047576, "maxOutput": 32768, "inputCost": 2, "outputCost": 8 }, { "model": "openai/gpt-4.1-mini", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 1047576, "maxOutput": 32768, "inputCost": 0.4, "outputCost": 1.6 }, { "model": "openai/gpt-4.1-nano", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 1047576, "maxOutput": 32768, "inputCost": 0.1, "outputCost": 0.4 }, { "model": "openai/gpt-4o", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 128000, "maxOutput": 16384, "inputCost": 2.5, "outputCost": 10 }, { "model": "openai/gpt-4o-2024-05-13", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 128000, "maxOutput": 4096, "inputCost": 5, "outputCost": 15 }, { "model": "openai/gpt-4o-2024-08-06", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 128000, "maxOutput": 16384, "inputCost": 2.5, "outputCost": 10 }, { "model": "openai/gpt-4o-2024-11-20", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 128000, "maxOutput": 16384, "inputCost": 2.5, "outputCost": 10 }, { "model": "openai/gpt-4o-mini", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 128000, "maxOutput": 16384, "inputCost": 0.15, "outputCost": 0.6 }, { "model": "openai/gpt-5", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 400000, "maxOutput": 128000, "inputCost": 1.25, "outputCost": 10 }, { "model": "openai/gpt-5-chat-latest", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": false, "reasoning": true, "contextWindow": 400000, "maxOutput": 128000, "inputCost": 1.25, "outputCost": 10 }, { "model": "openai/gpt-5-codex", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 400000, "maxOutput": 128000, "inputCost": 1.25, "outputCost": 10 }, { "model": "openai/gpt-5-mini", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 400000, "maxOutput": 128000, "inputCost": 0.25, "outputCost": 2 }, { "model": "openai/gpt-5-nano", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 400000, "maxOutput": 128000, "inputCost": 0.05, "outputCost": 0.4 }, { "model": "openai/gpt-5-pro", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 400000, "maxOutput": 272000, "inputCost": 15, "outputCost": 120 }, { "model": "openai/gpt-5.1", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 400000, "maxOutput": 128000, "inputCost": 1.25, "outputCost": 10 }, { "model": "openai/gpt-5.1-chat-latest", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 16384, "inputCost": 1.25, "outputCost": 10 }, { "model": "openai/gpt-5.1-codex", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 400000, "maxOutput": 128000, "inputCost": 1.25, "outputCost": 10 }, { "model": "openai/gpt-5.1-codex-max", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 400000, "maxOutput": 128000, "inputCost": 1.25, "outputCost": 10 }, { "model": "openai/gpt-5.1-codex-mini", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 400000, "maxOutput": 128000, "inputCost": 0.25, "outputCost": 2 }, { "model": "openai/gpt-5.2", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 400000, "maxOutput": 128000, "inputCost": 1.75, "outputCost": 14 }, { "model": "openai/gpt-5.2-chat-latest", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 16384, "inputCost": 1.75, "outputCost": 14 }, { "model": "openai/gpt-5.2-codex", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 400000, "maxOutput": 128000, "inputCost": 1.75, "outputCost": 14 }, { "model": "openai/gpt-5.2-pro", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 400000, "maxOutput": 128000, "inputCost": 21, "outputCost": 168 }, { "model": "openai/gpt-5.3-codex", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 400000, "maxOutput": 128000, "inputCost": 1.75, "outputCost": 14 }, { "model": "openai/gpt-5.3-codex-spark", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 32000, "inputCost": 1.75, "outputCost": 14 }, { "model": "openai/gpt-5.4", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 1050000, "maxOutput": 128000, "inputCost": 2.5, "outputCost": 15 }, { "model": "openai/gpt-5.4-mini", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 400000, "maxOutput": 128000, "inputCost": 0.75, "outputCost": 4.5 }, { "model": "openai/gpt-5.4-nano", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 400000, "maxOutput": 128000, "inputCost": 0.2, "outputCost": 1.25 }, { "model": "openai/gpt-5.4-pro", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 1050000, "maxOutput": 128000, "inputCost": 30, "outputCost": 180 }, { "model": "openai/o1", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 200000, "maxOutput": 100000, "inputCost": 15, "outputCost": 60 }, { "model": "openai/o1-mini", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": false, "reasoning": true, "contextWindow": 128000, "maxOutput": 65536, "inputCost": 1.1, "outputCost": 4.4 }, { "model": "openai/o1-preview", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": false, "reasoning": true, "contextWindow": 128000, "maxOutput": 32768, "inputCost": 15, "outputCost": 60 }, { "model": "openai/o1-pro", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 200000, "maxOutput": 100000, "inputCost": 150, "outputCost": 600 }, { "model": "openai/o3", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 200000, "maxOutput": 100000, "inputCost": 2, "outputCost": 8 }, { "model": "openai/o3-deep-research", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 200000, "maxOutput": 100000, "inputCost": 10, "outputCost": 40 }, { "model": "openai/o3-mini", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 200000, "maxOutput": 100000, "inputCost": 1.1, "outputCost": 4.4 }, { "model": "openai/o3-pro", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 200000, "maxOutput": 100000, "inputCost": 20, "outputCost": 80 }, { "model": "openai/o4-mini", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 200000, "maxOutput": 100000, "inputCost": 1.1, "outputCost": 4.4 }, { "model": "openai/o4-mini-deep-research", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 200000, "maxOutput": 100000, "inputCost": 2, "outputCost": 8 }, { "model": "openai/text-embedding-3-large", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": false, "reasoning": false, "contextWindow": 8191, "maxOutput": 3072, "inputCost": 0.13, "outputCost": null }, { "model": "openai/text-embedding-3-small", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": false, "reasoning": false, "contextWindow": 8191, "maxOutput": 1536, "inputCost": 0.02, "outputCost": null }, { "model": "openai/text-embedding-ada-002", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": false, "reasoning": false, "contextWindow": 8192, "maxOutput": 1536, "inputCost": 0.1, "outputCost": null } ]} />
const agent = new Agent({
id: "custom-agent",
name: "custom-agent",
model: {
id: "openai/codex-mini-latest",
apiKey: process.env.OPENAI_API_KEY,
headers: {
"X-Custom-Header": "value"
}
}
});
const agent = new Agent({
id: "dynamic-agent",
name: "Dynamic Agent",
model: ({ requestContext }) => {
const useAdvanced = requestContext.task === "complex";
return useAdvanced
? "openai/text-embedding-ada-002"
: "openai/codex-mini-latest";
}
});
OpenAI supports the following provider-specific options via the providerOptions parameter:
const response = await agent.generate("Hello!", {
providerOptions: {
openai: {
// See available options in the table below
}
}
});
<PropertiesTable content={[ { "name": "conversation", "type": "string | null | undefined", "description": "", "isOptional": true }, { "name": "include", "type": "("file_search_call.results" | "message.output_text.logprobs" | "reasoning.encrypted_content")[] | null | undefined", "description": "", "isOptional": true }, { "name": "instructions", "type": "string | null | undefined", "description": "", "isOptional": true }, { "name": "logprobs", "type": "number | boolean | undefined", "description": "", "isOptional": true }, { "name": "maxToolCalls", "type": "number | null | undefined", "description": "", "isOptional": true }, { "name": "metadata", "type": "any", "description": "", "isOptional": true }, { "name": "parallelToolCalls", "type": "boolean | null | undefined", "description": "", "isOptional": true }, { "name": "previousResponseId", "type": "string | null | undefined", "description": "", "isOptional": true }, { "name": "promptCacheKey", "type": "string | null | undefined", "description": "", "isOptional": true }, { "name": "promptCacheRetention", "type": ""in_memory" | "24h" | null | undefined", "description": "", "isOptional": true }, { "name": "reasoningEffort", "type": "string | null | undefined", "description": "", "isOptional": true }, { "name": "reasoningSummary", "type": "string | null | undefined", "description": "", "isOptional": true }, { "name": "safetyIdentifier", "type": "string | null | undefined", "description": "", "isOptional": true }, { "name": "serviceTier", "type": ""default" | "auto" | "flex" | "priority" | null | undefined", "description": "", "isOptional": true }, { "name": "store", "type": "boolean | null | undefined", "description": "Controls whether OpenAI stores your API requests for model training. Required to be "false" if your organization has zero data retention enabled. See: https://platform.openai.com/docs/guides/your-data#zero-data-retention", "isOptional": true }, { "name": "strictJsonSchema", "type": "boolean | null | undefined", "description": "", "isOptional": true }, { "name": "textVerbosity", "type": ""low" | "medium" | "high" | null | undefined", "description": "", "isOptional": true }, { "name": "truncation", "type": ""auto" | "disabled" | null | undefined", "description": "", "isOptional": true }, { "name": "user", "type": "string | null | undefined", "description": "", "isOptional": true } ]} />
This provider can also be installed directly as a standalone package, which can be used instead of the Mastra model router string. View the package documentation for more details.
npm install @ai-sdk/openai
For detailed provider-specific documentation, see the AI SDK OpenAI provider docs.