Back to Llama Index

define query engine over paul graham's essay

llama-index-integrations/tools/llama-index-tools-text-to-image/examples/text_to_image-pg.ipynb

0.14.211.7 KB
Original Source
python
import os

os.environ["OPENAI_API_KEY"] = "sk-..."
python
from llama_index.core.agent.workflow import FunctionAgent
from llama_index.llms.openai import OpenAI
from llama_index.core.workflow import Context
from llama_index.tools import QueryEngineTool, ToolMetadata
python
# define query engine over paul graham's essay
from llama_index import SimpleDirectoryReader, VectorStoreIndex
import requests

# download paul graham's essay
response = requests.get(
    "https://www.dropbox.com/s/f6bmb19xdg0xedm/paul_graham_essay.txt?dl=1"
)
essay_txt = response.text
with open("pg_essay.txt", "w") as fp:
    fp.write(essay_txt)

# load documents
documents = SimpleDirectoryReader(input_files=["pg_essay.txt"]).load_data()

# build index
index = VectorStoreIndex.from_documents(documents)

# query engine
query_engine = index.as_query_engine()
python
# build query engine tool
query_engine_tool = QueryEngineTool(
    query_engine=query_engine,
    metadata=ToolMetadata(
        name="paul_graham",
        description=(
            "Provides a biography of Paul Graham, from childhood to college to adult"
            " life"
        ),
    ),
)
python
# Import and initialize our tool spec
from llama_index.tools.text_to_image.base import TextToImageToolSpec
from llama_index.llms import OpenAI

llm = OpenAI(model="gpt-4")

text_to_image_spec = TextToImageToolSpec()
tools = text_to_image_spec.to_tool_list()
# Create the Agent with our tools
agent = FunctionAgent(
    tools=tools + [query_engine_tool], llm=llm
)

ctx = Context(agent)
python
print(
    await agent.run(
        "generate an image of the car that Paul Graham bought after Yahoo bought his"
        " company",
        ctx=ctx
    )
)