Back to Llama Index

Anthropic Haiku Cookbook

docs/examples/cookbooks/anthropic_haiku.ipynb

0.14.213.3 KB
Original Source

Anthropic Haiku Cookbook

Anthropic has released Claude 3 Haiku. This notebook provides you to get a quick start with using the Haiku model. It helps you explore the capabilities of model on text and vision tasks.

Installation

python
!pip install llama-index
!pip install llama-index-llms-anthropic
!pip install llama-index-multi-modal-llms-anthropic
python
from llama_index.llms.anthropic import Anthropic
from llama_index.multi_modal_llms.anthropic import AnthropicMultiModal

Set API keys

python
import os

os.environ["ANTHROPIC_API_KEY"] = "YOUR ANTHROPIC API KEY"

Using Model for Chat/ Completion

python
llm = Anthropic(model="claude-3-haiku-20240307")
python
response = llm.complete("LlamaIndex is ")
print(response)

Using Model for Multi-Modal

Download image
python
!wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/images/prometheus_paper_card.png' -O 'prometheus_paper_card.png'
python
from PIL import Image
import matplotlib.pyplot as plt

img = Image.open("prometheus_paper_card.png")
plt.imshow(img)

Load the image

python
from llama_index.core import SimpleDirectoryReader

# put your local directore here
image_documents = SimpleDirectoryReader(
    input_files=["prometheus_paper_card.png"]
).load_data()

# Initiated Anthropic MultiModal class
anthropic_mm_llm = AnthropicMultiModal(
    model="claude-3-haiku-20240307", max_tokens=300
)

Test query on image

python
response = anthropic_mm_llm.complete(
    prompt="Describe the images as an alternative text",
    image_documents=image_documents,
)

print(response)

Let's compare speed of the responses from different models

We will randomly generate 10 prompts and check the average response time.

Generate random 10 prompts
python
import random

# Lists of potential subjects and actions
subjects = ["a cat", "an astronaut", "a teacher", "a robot", "a pirate"]
actions = [
    "is exploring a mysterious cave",
    "finds a hidden treasure",
    "solves a complex puzzle",
    "invents a new gadget",
    "discovers a new planet",
]

prompts = []
# Generating 10 random prompts
for _ in range(10):
    subject = random.choice(subjects)
    action = random.choice(actions)
    prompt = f"{subject} {action}"
    prompts.append(prompt)
python
import time


# Computes average response time for model and prompts
def average_response_time(model, prompts):
    total_time_taken = 0
    llm = Anthropic(model=model, max_tokens=300)
    for prompt in prompts:
        start_time = time.time()
        _ = llm.complete(prompt)
        end_time = time.time()
        total_time_taken = total_time_taken + end_time - start_time

    return total_time_taken / len(prompts)
python
haiku_avg_response_time = average_response_time(
    "claude-3-haiku-20240307", prompts
)
python
opus_avg_response_time = average_response_time(
    "claude-3-opus-20240229", prompts
)
python
sonnet_avg_response_time = average_response_time(
    "claude-3-sonnet-20240229", prompts
)
python
print(f"Avg. time taken by Haiku model: {haiku_avg_response_time} seconds")
print(f"Avg. time taken by Opus model: {opus_avg_response_time} seconds")
print(f"Avg. time taken by Sonnet model: {sonnet_avg_response_time} seconds")