docs/examples/vector_stores/azurepostgresql.ipynb
In this notebook we are going to show how to use Azure Postgresql and pg_diskann to perform vector searches in LlamaIndex. Please note that this document is mostly based on the document for PostgreSQL integration to simplify the transition.
!pip install llama-index
%load_ext sql
import subprocess
import os
from urllib.parse import quote_plus
cmd = [
"az",
"account",
"get-access-token",
"--resource",
"https://ossrdbms-aad.database.windows.net",
"--query",
"accessToken",
"--output",
"tsv",
]
try:
token = subprocess.check_output(cmd, text=True).strip()
except subprocess.CalledProcessError as exc:
raise RuntimeError(f"Failed to run command: {exc}") from exc
os.environ["PGPASSWORD"] = token
%sql postgresql://
%%sql
drop table if exists llamaindex_vectors;
import logging
import sys
import os
# Uncomment to see debug logs
# logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
# logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
SimpleDirectoryReader,
StorageContext,
VectorStoreIndex,
)
from llama_index.core.settings import Settings
from llama_index.llms.azure_openai import AzureOpenAI
from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding
import textwrap
# Import from the local file
from llama_index.vector_stores.azure_postgres import AzurePGVectorStore
from llama_index.vector_stores.azure_postgres.common import (
AzurePGConnectionPool,
DiskANN,
VectorOpClass,
)
The first step is to configure the Azure openai key. It will be used to created embeddings for the documents loaded into the index
import os
# Method 1: Using os.environ.get() with fallback values
aoai_api_key = os.environ.get("AOAI_API_KEY", "key")
aoai_endpoint = os.environ.get("AOAI_ENDPOINT", "endpoint")
aoai_api_version = os.environ.get("AOAI_API_VERSION", "2024-12-01-preview")
llm = AzureOpenAI(
model="o4-mini",
deployment_name="o4-mini",
api_key=aoai_api_key,
azure_endpoint=aoai_endpoint,
api_version=aoai_api_version,
)
# You need to deploy your own embedding model as well as your own chat completion model
embed_model = AzureOpenAIEmbedding(
model="text-embedding-3-small",
deployment_name="text-embedding-3-small",
api_key=aoai_api_key,
azure_endpoint=aoai_endpoint,
api_version=aoai_api_version,
)
Download Data
!mkdir -p 'data/paul_graham/'
!wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'
Load the documents stored in the data/paul_graham/ using the SimpleDirectoryReader
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
print("Document ID:", documents[0].doc_id)
Using an existing postgres instance running on Azure, we will use Microsoft Entra authentication to connect to the database. Please make sure you are logged in to your Azure account.
host = os.environ.get("PGHOST", "<your_host>")
port = int(os.environ.get("PGPORT", 5432))
database = os.environ.get("PGDATABASE", "postgres")
from psycopg import Connection
from psycopg.rows import dict_row
from llama_index.vector_stores.azure_postgres.common import (
ConnectionInfo,
create_extensions,
Extension,
)
def configure_connection(conn: Connection) -> None:
conn.autocommit = True
create_extensions(conn, [Extension(ext_name="vector")])
create_extensions(conn, [Extension(ext_name="pg_diskann")])
conn.row_factory = dict_row
azure_conn_info: ConnectionInfo = ConnectionInfo(
host=host, port=port, dbname=database, configure=configure_connection
)
conn = AzurePGConnectionPool(
azure_conn_info=azure_conn_info,
)
Here we create an index backed by Postgres using the documents loaded previously. AzurePGVectorStore takes a few arguments. The example below constructs a PGVectorStore with no index.
vector_store = AzurePGVectorStore.from_params(
connection_pool=conn,
table_name="llamaindex_vectors",
embed_dim=1536, # openai embedding dimension
)
Settings.llm = llm
Settings.embed_model = embed_model
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context, show_progress=True
)
query_engine = index.as_query_engine()
We can now ask questions.
response = query_engine.query("What did the author do?")
print(textwrap.fill(str(response), 100))
response = query_engine.query("What happened in the mid 1980s?")
print(textwrap.fill(str(response), 100))
Now, we create a pg_diskann index with max_neighbors = 32, l_value_ib = 100, and l_value_is = 100, with the vector_cosine_ops method on our embeddings and use it with a new vector store.
%%sql
create index on llamaindex_vectors
using diskann (embedding vector_cosine_ops)
with (
max_neighbors = 32,
l_value_ib = 100
);
set diskann.l_value_is to 100;
diskann = DiskANN(
op_class=VectorOpClass.vector_cosine_ops,
max_neighbors=32,
l_value_ib=100,
l_value_is=100,
)
vector_store = AzurePGVectorStore.from_params(
connection_pool=conn,
schema_name="public",
table_name="llamaindex_vectors",
embed_dim=1536, # openai embedding dimension
embedding_index=diskann,
)
index = VectorStoreIndex.from_vector_store(vector_store=vector_store)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do?")
print(textwrap.fill(str(response), 100))
Read a specific node by its id.
nodes = vector_store.get_nodes()
print(len(nodes))
node_id = nodes[0].node_id
print(node_id)
nodes = vector_store.get_nodes([node_id])
print(nodes[0])
Delete a single node and then the whole table.
vector_store.delete_nodes(node_ids=[node_id])
nodes = vector_store.get_nodes()
print(len(nodes))
vector_store.clear() # delete all
nodes = vector_store.get_nodes()
print(len(nodes))
AzurePGVectorStore supports storing metadata in nodes, and filtering based on that metadata during the retrieval step.
# !mkdir -p 'data/csv/'
# !wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/csv/commit_history_2.csv' -O 'data/csv/commit_history_2.csv'
import builtins
import csv
# TODO: Once the PR is merged: Change this to with open("data/csv/commit_history_2.csv", "r") as f:
with builtins.open("../data/csv/commit_history_2.csv", "r") as f:
commits = list(csv.DictReader(f))
print(commits[0])
print(len(commits))
# Create TextNode for each of the first 100 commits
from llama_index.core.schema import TextNode
from datetime import datetime
import re
nodes = []
dates = set()
authors = set()
for commit in commits[:100]:
author_email = commit["author"].split("<")[1][:-1]
commit_date = datetime.strptime(
commit["date"], "%a %b %d %H:%M:%S %Y %z"
).strftime("%Y-%m-%d")
commit_text = commit["change summary"]
if commit["change details"]:
commit_text += "\n\n" + commit["change details"]
fixes = re.findall(r"#(\d+)", commit_text, re.IGNORECASE)
nodes.append(
TextNode(
text=commit_text,
metadata={
"commit_date": commit_date,
"author": author_email,
"fixes": fixes,
},
)
)
dates.add(commit_date)
authors.add(author_email)
print(nodes[0])
print(min(dates), "to", max(dates))
print(authors)
vector_store = AzurePGVectorStore.from_params(
connection_pool=conn,
schema_name="public",
table_name="metadata_filter_demo3",
embed_dim=1536, # openai embedding dimension
)
index = VectorStoreIndex.from_vector_store(vector_store=vector_store)
index.insert_nodes(nodes)
print(index.as_query_engine().query("How did Leonhardt allow modal?"))
Now we can filter by commit author or by date when retrieving nodes.
from llama_index.core.vector_stores.types import (
MetadataFilter,
MetadataFilters,
)
filters = MetadataFilters(
filters=[
MetadataFilter(key="author", value="[email protected]"),
MetadataFilter(key="author", value="[email protected]"),
],
condition="or",
)
retriever = index.as_retriever(
similarity_top_k=10,
filters=filters,
)
retrieved_nodes = retriever.retrieve("What is this software project about?")
for node in retrieved_nodes:
print(node.node.metadata)
filters = MetadataFilters(
filters=[
MetadataFilter(key="commit_date", value="2025-08-20", operator=">="),
MetadataFilter(key="commit_date", value="2025-08-25", operator="<="),
],
condition="and",
)
retriever = index.as_retriever(
similarity_top_k=10,
filters=filters,
)
retrieved_nodes = retriever.retrieve("What is this software project about?")
for node in retrieved_nodes:
print(node.node.metadata)
In the above examples, we combined multiple filters using AND or OR. We can also combine multiple sets of filters.
e.g. in SQL:
WHERE (commit_date >= '2025-08-20' AND commit_date <= '2023-08-25') AND (author = '[email protected]' OR author = '[email protected]')
filters = MetadataFilters(
filters=[
MetadataFilters(
filters=[
MetadataFilter(
key="commit_date", value="2025-08-20", operator=">="
),
MetadataFilter(
key="commit_date", value="2025-08-25", operator="<="
),
],
condition="and",
),
MetadataFilters(
filters=[
MetadataFilter(key="author", value="[email protected]"),
MetadataFilter(
key="author", value="[email protected]"
),
],
condition="or",
),
],
condition="and",
)
retriever = index.as_retriever(
similarity_top_k=10,
filters=filters,
)
retrieved_nodes = retriever.retrieve("What is this software project about?")
for node in retrieved_nodes:
print(node.node.metadata)
The above can be simplified by using the IN operator. AzurePGVectorStore supports in, nin, and contains for comparing an element with a list.
filters = MetadataFilters(
filters=[
MetadataFilter(key="commit_date", value="2025-08-15", operator=">="),
MetadataFilter(key="commit_date", value="2025-08-20", operator="<="),
MetadataFilter(
key="author",
value=["[email protected]", "[email protected]"],
operator="in",
),
],
condition="and",
)
retriever = index.as_retriever(
similarity_top_k=10,
filters=filters,
)
retrieved_nodes = retriever.retrieve("What is this software project about?")
for node in retrieved_nodes:
print(node.node.metadata)
# Same thing, with NOT IN
filters = MetadataFilters(
filters=[
MetadataFilter(key="commit_date", value="2025-08-15", operator=">="),
MetadataFilter(key="commit_date", value="2025-08-20", operator="<="),
MetadataFilter(
key="author",
value=["[email protected]", "[email protected]"],
operator="nin",
),
],
condition="and",
)
retriever = index.as_retriever(
similarity_top_k=10,
filters=filters,
)
retrieved_nodes = retriever.retrieve("What is this software project about?")
for node in retrieved_nodes:
print(node.node.metadata)
# CONTAINS
filters = MetadataFilters(
filters=[
MetadataFilter(key="fixes", value="5680", operator="contains"),
]
)
retriever = index.as_retriever(
similarity_top_k=10,
filters=filters,
)
retrieved_nodes = retriever.retrieve("How did these commits fix the issue?")
for node in retrieved_nodes:
print(node.node.metadata)