docs/integrate-langchain.md
Quickly connect LangChain to Nexa Serve (OpenAI-compatible endpoint).
nexa pull Qwen/Qwen3-7B-Instruct-GGUFnexa serve (default http://127.0.0.1:18181/v1)pip install langchain langchain-openai openai
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(
base_url="http://127.0.0.1:18181/v1",
api_key="not-needed",
model="Qwen/Qwen3-7B-Instruct-GGUF"
)
print(llm.invoke("Hello from Nexa!").content)
from langchain_openai import ChatOpenAI
from langchain_core.messages import SystemMessage, HumanMessage
llm = ChatOpenAI(base_url="http://127.0.0.1:18181/v1", api_key="not-needed", model="Qwen/Qwen3-7B-Instruct-GGUF")
messages = [
SystemMessage(content="You are a helpful assistant."),
HumanMessage(content="Explain decorators in Python")
]
print(llm.invoke(messages).content)