Back to Mastra

IO.NET | Models

docs/src/content/en/models/providers/io-net.mdx

2025-12-186.7 KB
Original Source

IO.NET

Access 17 IO.NET models through Mastra's model router. Authentication is handled automatically using the IOINTELLIGENCE_API_KEY environment variable.

Learn more in the IO.NET documentation.

bash
IOINTELLIGENCE_API_KEY=your-api-key
typescript
import { Agent } from "@mastra/core/agent";

const agent = new Agent({
  id: "my-agent",
  name: "My Agent",
  instructions: "You are a helpful assistant",
  model: "io-net/Intel/Qwen3-Coder-480B-A35B-Instruct-int4-mixed-ar"
});

// Generate a response
const response = await agent.generate("Hello!");

// Stream a response
const stream = await agent.stream("Tell me a story");
for await (const chunk of stream) {
  console.log(chunk);
}

:::info

Mastra uses the OpenAI-compatible /chat/completions endpoint. Some provider-specific features may not be available. Check the IO.NET documentation for details.

:::

Models

<ProviderModelsTable models={[ { "model": "io-net/deepseek-ai/DeepSeek-R1-0528", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 4096, "inputCost": 2, "outputCost": 8.75 }, { "model": "io-net/Intel/Qwen3-Coder-480B-A35B-Instruct-int4-mixed-ar", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 106000, "maxOutput": 4096, "inputCost": 0.22, "outputCost": 0.95 }, { "model": "io-net/meta-llama/Llama-3.2-90B-Vision-Instruct", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 16000, "maxOutput": 4096, "inputCost": 0.35, "outputCost": 0.4 }, { "model": "io-net/meta-llama/Llama-3.3-70B-Instruct", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 128000, "maxOutput": 4096, "inputCost": 0.13, "outputCost": 0.38 }, { "model": "io-net/meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 430000, "maxOutput": 4096, "inputCost": 0.15, "outputCost": 0.6 }, { "model": "io-net/mistralai/Devstral-Small-2505", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 128000, "maxOutput": 4096, "inputCost": 0.05, "outputCost": 0.22 }, { "model": "io-net/mistralai/Magistral-Small-2506", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 128000, "maxOutput": 4096, "inputCost": 0.5, "outputCost": 1.5 }, { "model": "io-net/mistralai/Mistral-Large-Instruct-2411", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 128000, "maxOutput": 4096, "inputCost": 2, "outputCost": 6 }, { "model": "io-net/mistralai/Mistral-Nemo-Instruct-2407", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 128000, "maxOutput": 4096, "inputCost": 0.02, "outputCost": 0.04 }, { "model": "io-net/moonshotai/Kimi-K2-Instruct-0905", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 32768, "maxOutput": 4096, "inputCost": 0.39, "outputCost": 1.9 }, { "model": "io-net/moonshotai/Kimi-K2-Thinking", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 32768, "maxOutput": 4096, "inputCost": 0.55, "outputCost": 2.25 }, { "model": "io-net/openai/gpt-oss-120b", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 131072, "maxOutput": 4096, "inputCost": 0.04, "outputCost": 0.4 }, { "model": "io-net/openai/gpt-oss-20b", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 64000, "maxOutput": 4096, "inputCost": 0.03, "outputCost": 0.14 }, { "model": "io-net/Qwen/Qwen2.5-VL-32B-Instruct", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 32000, "maxOutput": 4096, "inputCost": 0.05, "outputCost": 0.22 }, { "model": "io-net/Qwen/Qwen3-235B-A22B-Thinking-2507", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 262144, "maxOutput": 4096, "inputCost": 0.11, "outputCost": 0.6 }, { "model": "io-net/Qwen/Qwen3-Next-80B-A3B-Instruct", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 262144, "maxOutput": 4096, "inputCost": 0.1, "outputCost": 0.8 }, { "model": "io-net/zai-org/GLM-4.6", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 200000, "maxOutput": 4096, "inputCost": 0.4, "outputCost": 1.75 } ]} />

Advanced configuration

Custom headers

typescript
const agent = new Agent({
  id: "custom-agent",
  name: "custom-agent",
  model: {
    url: "https://api.intelligence.io.solutions/api/v1",
    id: "io-net/Intel/Qwen3-Coder-480B-A35B-Instruct-int4-mixed-ar",
    apiKey: process.env.IOINTELLIGENCE_API_KEY,
    headers: {
      "X-Custom-Header": "value"
    }
  }
});

Dynamic model selection

typescript
const agent = new Agent({
  id: "dynamic-agent",
  name: "Dynamic Agent",
  model: ({ requestContext }) => {
    const useAdvanced = requestContext.task === "complex";
    return useAdvanced
      ? "io-net/zai-org/GLM-4.6"
      : "io-net/Intel/Qwen3-Coder-480B-A35B-Instruct-int4-mixed-ar";
  }
});