packages/graphrag-llm/notebooks/09_message_builder_and_history.ipynb
The completion API adheres to litellm completion API and thus the OpanAI SDK API. The messages parameter can be one of the following:
str: Raw string for the prompt.list[dict[str, Any]]: A list of dicts in the form {"role": "user|system|...", "content": "..."}list[ChatCompletionMessageParam]: A list of OpenAI ChatCompletionMessageParam.graphrag_llm.utils provides a ChatCompletionMessageParamBuilder to help construct these objects. Below are examples of using the builder.
# Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
import os
from dotenv import load_dotenv
from graphrag_llm.completion import LLMCompletion, create_completion
from graphrag_llm.config import AuthMethod, ModelConfig
from graphrag_llm.types import LLMCompletionResponse
from graphrag_llm.utils import (
CompletionMessagesBuilder,
)
load_dotenv()
api_key = os.getenv("GRAPHRAG_API_KEY")
model_config = ModelConfig(
model_provider="azure",
model=os.getenv("GRAPHRAG_MODEL", "gpt-4o"),
azure_deployment_name=os.getenv("GRAPHRAG_MODEL", "gpt-4o"),
api_base=os.getenv("GRAPHRAG_API_BASE"),
api_version=os.getenv("GRAPHRAG_API_VERSION", "2025-04-01-preview"),
api_key=api_key,
auth_method=AuthMethod.AzureManagedIdentity if not api_key else AuthMethod.ApiKey,
)
llm_completion: LLMCompletion = create_completion(model_config)
messages = (
CompletionMessagesBuilder()
.add_system_message(
"You are a helpful assistant that likes to talk like a pirate. Respond as if you are a pirate using pirate speak."
)
.add_user_message("Is pluto a planet? Respond with a yes or no.")
.add_assistant_message("Aye, matey! Pluto be a planet in me book.")
.add_user_message("Are you sure? I want the truth. Can you elaborate?")
.build()
)
response: LLMCompletionResponse = llm_completion.completion(messages=messages) # type: ignore
print(response.content)
Can use the ChatCompletionMessageParamBuilder along with ChatCompletionContentPartParamBuilder to build more complicated messages such as those using images.
from graphrag_llm.utils import CompletionContentPartBuilder
messages = (
CompletionMessagesBuilder()
.add_user_message(
# Instead of providing a string we are providing content parts
# By using the CompletionContentPartBuilder
CompletionContentPartBuilder()
.add_text_part("Describe this image")
.add_image_part(
# Can also be a base64 encoded image string
url="https://th.bing.com/th/id/OUG.0A10DBFCEB3A9A7C6707FCF6F0D96BFD?cb=ucfimg2&ucfimg=1&rs=1&pid=ImgDetMain&o=7&rm=3",
detail="high",
)
.build()
)
.build()
)
response: LLMCompletionResponse = llm_completion.completion(messages=messages) # type: ignore
print(response.content)
The first example eluded to how the ChatCompletionMessageParamBuilder can be used to track history.
user_messages = ["Is Pluto a planet? Answer with a yes or no.", "Can you elaborate?"]
messages_builder = CompletionMessagesBuilder()
for msg in user_messages:
print(f"User: {msg}")
messages_builder.add_user_message(msg)
response: LLMCompletionResponse = llm_completion.completion(
messages=messages_builder.build()
) # type: ignore
print(f"Assistant: {response.content}")
messages_builder.add_assistant_message(response.content)