packages/graphrag-llm/example_notebooks/basic_embedding_example.ipynb
# Copyright (c) 2026 Microsoft Corporation.
# Licensed under the MIT License.
This examples demonstrates how to generate text embeddings using the GraphRAG LLM library with Azure OpenAI's embedding service. It loads API credentials from environment variables, creates a ModelConfig for the Azure embedding model and configures authentication to use either API key or Azure Managed Identity. The script then creates an embedding client and processes a batch of two text strings ("Hello world" and "How are you?") to generate their vector embeddings.
import os
from graphrag_llm.config.model_config import ModelConfig
from graphrag_llm.config.types import AuthMethod
from graphrag_llm.embedding import LLMEmbedding, create_embedding
from graphrag_llm.types import LLMEmbeddingResponse
api_key = os.getenv("GRAPHRAG_API_KEY")
api_base = os.getenv("GRAPHRAG_API_BASE")
embedding_config = ModelConfig(
model_provider="azure",
model=os.getenv("GRAPHRAG_EMBEDDING_MODEL", "text-embedding-3-small"),
azure_deployment_name=os.getenv(
"GRAPHRAG_LLM_EMBEDDING_MODEL", "text-embedding-3-small"
),
api_base=api_base,
api_version=os.getenv("GRAPHRAG_API_VERSION", "2025-04-01-preview"),
api_key=api_key,
auth_method=AuthMethod.AzureManagedIdentity if not api_key else AuthMethod.ApiKey,
)
llm_embedding: LLMEmbedding = create_embedding(embedding_config)
embeddings_batch: LLMEmbeddingResponse = llm_embedding.embedding(
input=["Hello world", "How are you?"]
)
for data in embeddings_batch.data:
print(data.embedding[0:3])