docs/src/content/en/models/providers/stackit.mdx
Access 8 STACKIT models through Mastra's model router. Authentication is handled automatically using the STACKIT_API_KEY environment variable.
Learn more in the STACKIT documentation.
STACKIT_API_KEY=your-api-key
import { Agent } from "@mastra/core/agent";
const agent = new Agent({
id: "my-agent",
name: "My Agent",
instructions: "You are a helpful assistant",
model: "stackit/Qwen/Qwen3-VL-235B-A22B-Instruct-FP8"
});
// Generate a response
const response = await agent.generate("Hello!");
// Stream a response
const stream = await agent.stream("Tell me a story");
for await (const chunk of stream) {
console.log(chunk);
}
:::info
Mastra uses the OpenAI-compatible /chat/completions endpoint. Some provider-specific features may not be available. Check the STACKIT documentation for details.
:::
<ProviderModelsTable models={[ { "model": "stackit/cortecs/Llama-3.3-70B-Instruct-FP8-Dynamic", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 128000, "maxOutput": 8192, "inputCost": 0.49, "outputCost": 0.71 }, { "model": "stackit/google/gemma-3-27b-it", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": false, "reasoning": false, "contextWindow": 37000, "maxOutput": 8192, "inputCost": 0.49, "outputCost": 0.71 }, { "model": "stackit/intfloat/e5-mistral-7b-instruct", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": false, "reasoning": false, "contextWindow": 4096, "maxOutput": 4096, "inputCost": 0.02, "outputCost": 0.02 }, { "model": "stackit/neuralmagic/Meta-Llama-3.1-8B-Instruct-FP8", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 128000, "maxOutput": 8192, "inputCost": 0.16, "outputCost": 0.27 }, { "model": "stackit/neuralmagic/Mistral-Nemo-Instruct-2407-FP8", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 128000, "maxOutput": 8192, "inputCost": 0.49, "outputCost": 0.71 }, { "model": "stackit/openai/gpt-oss-120b", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 131000, "maxOutput": 8192, "inputCost": 0.49, "outputCost": 0.71 }, { "model": "stackit/Qwen/Qwen3-VL-235B-A22B-Instruct-FP8", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 218000, "maxOutput": 8192, "inputCost": 1.64, "outputCost": 1.91 }, { "model": "stackit/Qwen/Qwen3-VL-Embedding-8B", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": false, "reasoning": false, "contextWindow": 32000, "maxOutput": 4096, "inputCost": 0.09, "outputCost": 0.09 } ]} />
const agent = new Agent({
id: "custom-agent",
name: "custom-agent",
model: {
url: "https://api.openai-compat.model-serving.eu01.onstackit.cloud/v1",
id: "stackit/Qwen/Qwen3-VL-235B-A22B-Instruct-FP8",
apiKey: process.env.STACKIT_API_KEY,
headers: {
"X-Custom-Header": "value"
}
}
});
const agent = new Agent({
id: "dynamic-agent",
name: "Dynamic Agent",
model: ({ requestContext }) => {
const useAdvanced = requestContext.task === "complex";
return useAdvanced
? "stackit/openai/gpt-oss-120b"
: "stackit/Qwen/Qwen3-VL-235B-A22B-Instruct-FP8";
}
});