Back to Mastra

GitHub Models | Models

docs/src/content/en/models/providers/github-models.mdx

2025-12-1817.3 KB
Original Source

GitHub Models

Access 55 GitHub Models models through Mastra's model router. Authentication is handled automatically using the GITHUB_TOKEN environment variable.

Learn more in the GitHub Models documentation.

bash
GITHUB_TOKEN=your-api-key
typescript
import { Agent } from "@mastra/core/agent";

const agent = new Agent({
  id: "my-agent",
  name: "My Agent",
  instructions: "You are a helpful assistant",
  model: "github-models/ai21-labs/ai21-jamba-1.5-large"
});

// Generate a response
const response = await agent.generate("Hello!");

// Stream a response
const stream = await agent.stream("Tell me a story");
for await (const chunk of stream) {
  console.log(chunk);
}

:::info

Mastra uses the OpenAI-compatible /chat/completions endpoint. Some provider-specific features may not be available. Check the GitHub Models documentation for details.

:::

Models

<ProviderModelsTable models={[ { "model": "github-models/ai21-labs/ai21-jamba-1.5-large", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 256000, "maxOutput": 4096, "inputCost": null, "outputCost": null }, { "model": "github-models/ai21-labs/ai21-jamba-1.5-mini", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 256000, "maxOutput": 4096, "inputCost": null, "outputCost": null }, { "model": "github-models/cohere/cohere-command-a", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 4096, "inputCost": null, "outputCost": null }, { "model": "github-models/cohere/cohere-command-r", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 4096, "inputCost": null, "outputCost": null }, { "model": "github-models/cohere/cohere-command-r-08-2024", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 128000, "maxOutput": 4096, "inputCost": null, "outputCost": null }, { "model": "github-models/cohere/cohere-command-r-plus", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 128000, "maxOutput": 4096, "inputCost": null, "outputCost": null }, { "model": "github-models/cohere/cohere-command-r-plus-08-2024", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 128000, "maxOutput": 4096, "inputCost": null, "outputCost": null }, { "model": "github-models/core42/jais-30b-chat", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 8192, "maxOutput": 2048, "inputCost": null, "outputCost": null }, { "model": "github-models/deepseek/deepseek-r1", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 65536, "maxOutput": 8192, "inputCost": null, "outputCost": null }, { "model": "github-models/deepseek/deepseek-r1-0528", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 65536, "maxOutput": 8192, "inputCost": null, "outputCost": null }, { "model": "github-models/deepseek/deepseek-v3-0324", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 8192, "inputCost": null, "outputCost": null }, { "model": "github-models/meta/llama-3.2-11b-vision-instruct", "imageInput": true, "audioInput": true, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 8192, "inputCost": null, "outputCost": null }, { "model": "github-models/meta/llama-3.2-90b-vision-instruct", "imageInput": true, "audioInput": true, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 8192, "inputCost": null, "outputCost": null }, { "model": "github-models/meta/llama-3.3-70b-instruct", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 32768, "inputCost": null, "outputCost": null }, { "model": "github-models/meta/llama-4-maverick-17b-128e-instruct-fp8", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 8192, "inputCost": null, "outputCost": null }, { "model": "github-models/meta/llama-4-scout-17b-16e-instruct", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 8192, "inputCost": null, "outputCost": null }, { "model": "github-models/meta/meta-llama-3-70b-instruct", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 8192, "maxOutput": 2048, "inputCost": null, "outputCost": null }, { "model": "github-models/meta/meta-llama-3-8b-instruct", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 8192, "maxOutput": 2048, "inputCost": null, "outputCost": null }, { "model": "github-models/meta/meta-llama-3.1-405b-instruct", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 32768, "inputCost": null, "outputCost": null }, { "model": "github-models/meta/meta-llama-3.1-70b-instruct", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 32768, "inputCost": null, "outputCost": null }, { "model": "github-models/meta/meta-llama-3.1-8b-instruct", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 32768, "inputCost": null, "outputCost": null }, { "model": "github-models/microsoft/mai-ds-r1", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 65536, "maxOutput": 8192, "inputCost": null, "outputCost": null }, { "model": "github-models/microsoft/phi-3-medium-128k-instruct", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 4096, "inputCost": null, "outputCost": null }, { "model": "github-models/microsoft/phi-3-medium-4k-instruct", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 4096, "maxOutput": 1024, "inputCost": null, "outputCost": null }, { "model": "github-models/microsoft/phi-3-mini-128k-instruct", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 4096, "inputCost": null, "outputCost": null }, { "model": "github-models/microsoft/phi-3-mini-4k-instruct", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 4096, "maxOutput": 1024, "inputCost": null, "outputCost": null }, { "model": "github-models/microsoft/phi-3-small-128k-instruct", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 4096, "inputCost": null, "outputCost": null }, { "model": "github-models/microsoft/phi-3-small-8k-instruct", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 8192, "maxOutput": 2048, "inputCost": null, "outputCost": null }, { "model": "github-models/microsoft/phi-3.5-mini-instruct", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 4096, "inputCost": null, "outputCost": null }, { "model": "github-models/microsoft/phi-3.5-moe-instruct", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 4096, "inputCost": null, "outputCost": null }, { "model": "github-models/microsoft/phi-3.5-vision-instruct", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 4096, "inputCost": null, "outputCost": null }, { "model": "github-models/microsoft/phi-4", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 16000, "maxOutput": 4096, "inputCost": null, "outputCost": null }, { "model": "github-models/microsoft/phi-4-mini-instruct", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 4096, "inputCost": null, "outputCost": null }, { "model": "github-models/microsoft/phi-4-mini-reasoning", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 4096, "inputCost": null, "outputCost": null }, { "model": "github-models/microsoft/phi-4-multimodal-instruct", "imageInput": true, "audioInput": true, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 4096, "inputCost": null, "outputCost": null }, { "model": "github-models/microsoft/phi-4-reasoning", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 4096, "inputCost": null, "outputCost": null }, { "model": "github-models/mistral-ai/codestral-2501", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 32000, "maxOutput": 8192, "inputCost": null, "outputCost": null }, { "model": "github-models/mistral-ai/ministral-3b", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 8192, "inputCost": null, "outputCost": null }, { "model": "github-models/mistral-ai/mistral-large-2411", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 32768, "inputCost": null, "outputCost": null }, { "model": "github-models/mistral-ai/mistral-medium-2505", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 32768, "inputCost": null, "outputCost": null }, { "model": "github-models/mistral-ai/mistral-nemo", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 8192, "inputCost": null, "outputCost": null }, { "model": "github-models/mistral-ai/mistral-small-2503", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 32768, "inputCost": null, "outputCost": null }, { "model": "github-models/openai/gpt-4.1", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 128000, "maxOutput": 16384, "inputCost": null, "outputCost": null }, { "model": "github-models/openai/gpt-4.1-mini", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 128000, "maxOutput": 16384, "inputCost": null, "outputCost": null }, { "model": "github-models/openai/gpt-4.1-nano", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 128000, "maxOutput": 16384, "inputCost": null, "outputCost": null }, { "model": "github-models/openai/gpt-4o", "imageInput": true, "audioInput": true, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 128000, "maxOutput": 16384, "inputCost": null, "outputCost": null }, { "model": "github-models/openai/gpt-4o-mini", "imageInput": true, "audioInput": true, "videoInput": false, "toolUsage": true, "reasoning": false, "contextWindow": 128000, "maxOutput": 16384, "inputCost": null, "outputCost": null }, { "model": "github-models/openai/o1", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": false, "reasoning": true, "contextWindow": 200000, "maxOutput": 100000, "inputCost": null, "outputCost": null }, { "model": "github-models/openai/o1-mini", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": false, "reasoning": true, "contextWindow": 128000, "maxOutput": 65536, "inputCost": null, "outputCost": null }, { "model": "github-models/openai/o1-preview", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": false, "reasoning": true, "contextWindow": 128000, "maxOutput": 32768, "inputCost": null, "outputCost": null }, { "model": "github-models/openai/o3", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": false, "reasoning": true, "contextWindow": 200000, "maxOutput": 100000, "inputCost": null, "outputCost": null }, { "model": "github-models/openai/o3-mini", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": false, "reasoning": true, "contextWindow": 200000, "maxOutput": 100000, "inputCost": null, "outputCost": null }, { "model": "github-models/openai/o4-mini", "imageInput": true, "audioInput": false, "videoInput": false, "toolUsage": false, "reasoning": true, "contextWindow": 200000, "maxOutput": 100000, "inputCost": null, "outputCost": null }, { "model": "github-models/xai/grok-3", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 8192, "inputCost": null, "outputCost": null }, { "model": "github-models/xai/grok-3-mini", "imageInput": false, "audioInput": false, "videoInput": false, "toolUsage": true, "reasoning": true, "contextWindow": 128000, "maxOutput": 8192, "inputCost": null, "outputCost": null } ]} />

Advanced configuration

Custom headers

typescript
const agent = new Agent({
  id: "custom-agent",
  name: "custom-agent",
  model: {
    url: "https://models.github.ai/inference",
    id: "github-models/ai21-labs/ai21-jamba-1.5-large",
    apiKey: process.env.GITHUB_TOKEN,
    headers: {
      "X-Custom-Header": "value"
    }
  }
});

Dynamic model selection

typescript
const agent = new Agent({
  id: "dynamic-agent",
  name: "Dynamic Agent",
  model: ({ requestContext }) => {
    const useAdvanced = requestContext.task === "complex";
    return useAdvanced
      ? "github-models/xai/grok-3-mini"
      : "github-models/ai21-labs/ai21-jamba-1.5-large";
  }
});