packages/graphrag-llm/notebooks/12_mocking.ipynb
# Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
import os
from graphrag_llm.completion import LLMCompletion, create_completion
from graphrag_llm.config import LLMProviderType, ModelConfig
from graphrag_llm.types import LLMCompletionResponse
from pydantic import BaseModel, Field
class LocalWeather(BaseModel):
"""City weather information model."""
city: str = Field(description="The name of the city")
temperature: float = Field(description="The temperature in Celsius")
condition: str = Field(description="The weather condition description")
class WeatherReports(BaseModel):
"""Weather information model."""
reports: list[LocalWeather] = Field(
description="The weather reports for multiple cities"
)
weather_reports = WeatherReports(
reports=[
LocalWeather(city="New York", temperature=22.5, condition="Sunny"),
]
)
api_key = os.getenv("GRAPHRAG_API_KEY")
model_config = ModelConfig(
type=LLMProviderType.MockLLM,
model_provider="openai",
model="gpt-4o",
mock_responses=["Who cares?", "You tell me!", weather_reports.model_dump_json()],
)
llm_completion: LLMCompletion = create_completion(model_config)
response: LLMCompletionResponse = llm_completion.completion(
messages="What is the capital of France?",
) # type: ignore
print(response.content)
response: LLMCompletionResponse = llm_completion.completion(
messages="Should be second response",
) # type: ignore
print(response.content)
response_formatted: LLMCompletionResponse[WeatherReports] = llm_completion.completion(
messages="Structured response.",
response_format=WeatherReports,
) # type: ignore
print(response_formatted.formatted_response.model_dump_json()) # type: ignore
response: LLMCompletionResponse = llm_completion.completion(
messages="Should cycle back to first response",
) # type: ignore
print(response.content)
from graphrag_llm.embedding import LLMEmbedding, create_embedding
embedding_config = ModelConfig(
type=LLMProviderType.MockLLM,
model_provider="openai",
model="text-embedding-3-small",
mock_responses=[1.0, 2.0, 3.0],
)
llm_embedding: LLMEmbedding = create_embedding(embedding_config)
embeddings_response = llm_embedding.embedding(input=["Hello world", "How are you?"])
for embedding in embeddings_response.embeddings:
print(embedding[0:3])