Back to Llama Index

Azure Vision

llama-index-integrations/tools/llama-index-tools-azure-cv/examples/azure_vision.ipynb

0.14.21630 B
Original Source
python
import os
os.environ["OPENAI_API_KEY"] = "sk-proj-1234567890"
python
from llama_index.core.agent.workflow import FunctionAgent
from llama_index.llms.openai import OpenAI
python
from llama_index.tools.azure_cv.base import AzureCVToolSpec

cv_tool = AzureCVToolSpec(api_key="your-key", resource="your-resource")

agent = FunctionAgent(
    tools=cv_tool.to_tool_list(),
    llm=OpenAI(model="gpt-4.1")
)

print(
    await agent.run(
        "caption this image and tell me what tags are in it"
        " https://portal.vision.cognitive.azure.com/dist/assets/ImageCaptioningSample1-bbe41ac5.png"
    )
)