Back to Llama Index

Setup OpenAI Agent

llama-index-integrations/tools/llama-index-tools-openapi/examples/openapi_and_requests.ipynb

0.14.211.6 KB
Original Source
python
# Setup OpenAI Agent
import os

os.environ["OPENAI_API_KEY"] = "sk-your-api-key"

from llama_index.core.agent.workflow import FunctionAgent
from llama_index.llms.openai import OpenAI
python
# Load the OpenAPI spec for OpenAI
import requests
import yaml

f = requests.get(
    "https://raw.githubusercontent.com/APIs-guru/openapi-directory/main/APIs/openai.com/1.2.0/openapi.yaml"
).text
open_api_spec = yaml.safe_load(f)
python
from llama_index.tools.openapi.base import OpenAPIToolSpec
from llama_index.tools.requests.base import RequestsToolSpec
from llama_index.tools.tool_spec.load_and_search.base import LoadAndSearchToolSpec

open_spec = OpenAPIToolSpec(open_api_spec)
# OR
open_spec = OpenAPIToolSpec(
    url="https://raw.githubusercontent.com/APIs-guru/openapi-directory/main/APIs/openai.com/1.2.0/openapi.yaml"
)

requests_spec = RequestsToolSpec(
    {
        "api.openai.com": {
            "Authorization": "Bearer sk-your-key",
            "Content-Type": "application/json",
        }
    }
)

# OpenAPI spec is too large for content, wrap the tool to separate loading and searching
wrapped_tools = LoadAndSearchToolSpec.from_defaults(
    open_spec.to_tool_list()[0],
).to_tool_list()
python
agent = FunctionAgent(
    tools=[*wrapped_tools, *requests_spec.to_tool_list()], 
    llm=OpenAI(model="gpt-4.1"),
)
python
print(
    await agent.run("what is the base url for the server")
)
python
print(
    await agent.run("what is the completions api")
)
python
print(
    await agent.run("ask the completions api for a joke")
)