Back to Db Gpt

Ollama Proxy LLM Configuration

docs/docs/config-reference/llm/ollama_ollamadeploymodelparameters_d55be6.mdx

0.8.02.1 KB
Original Source

import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";

<ConfigDetail config={{ "name": "OllamaDeployModelParameters", "description": "Ollama proxy LLM configuration.", "documentationUrl": "https://ollama.com/library", "parameters": [ { "name": "name", "type": "string", "required": true, "description": "The name of the model." }, { "name": "backend", "type": "string", "required": false, "description": "The real model name to pass to the provider, default is None. If backend is None, use name as the real model name." }, { "name": "provider", "type": "string", "required": false, "description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')", "defaultValue": "proxy/ollama" }, { "name": "verbose", "type": "boolean", "required": false, "description": "Show verbose output.", "defaultValue": "False" }, { "name": "concurrency", "type": "integer", "required": false, "description": "Model concurrency limit", "defaultValue": "5" }, { "name": "prompt_template", "type": "string", "required": false, "description": "Prompt template. If None, the prompt template is automatically determined from model. Just for local deployment." }, { "name": "context_length", "type": "integer", "required": false, "description": "The context length of the model. If None, it is automatically determined from model." }, { "name": "reasoning_model", "type": "boolean", "required": false, "description": "Whether the model is a reasoning model. If None, it is automatically determined from model." }, { "name": "api_base", "type": "string", "required": false, "description": "The base url of the Ollama API.", "defaultValue": "${env:OLLAMA_API_BASE:-http://localhost:11434}" } ] }} />