cookbook/LiteLLM_Azure_and_OpenAI_example.ipynb
This notebook covers the following for Azure OpenAI + OpenAI:
!pip install litellm
import os
from litellm import completion
# openai configs
os.environ["OPENAI_API_KEY"] = ""
# azure openai configs
os.environ["AZURE_API_KEY"] = ""
os.environ["AZURE_API_BASE"] = "https://openai-gpt-4-test-v-1.openai.azure.com/"
os.environ["AZURE_API_VERSION"] = "2023-05-15"
# openai call
response = completion(
model = "gpt-3.5-turbo",
messages = [{ "content": "Hello, how are you?","role": "user"}]
)
print("Openai Response\n")
print(response)
# azure call
response = completion(
model = "azure/your-azure-deployment",
messages = [{ "content": "Hello, how are you?","role": "user"}]
)
print("Azure Response\n")
print(response)
import os
from litellm import completion
# openai configs
os.environ["OPENAI_API_KEY"] = ""
# azure openai configs
os.environ["AZURE_API_KEY"] = ""
os.environ["AZURE_API_BASE"] = "https://openai-gpt-4-test-v-1.openai.azure.com/"
os.environ["AZURE_API_VERSION"] = "2023-05-15"
# openai call
response = completion(
model = "gpt-3.5-turbo",
messages = [{ "content": "Hello, how are you?","role": "user"}],
stream=True
)
print("OpenAI Streaming response")
for chunk in response:
print(chunk)
# azure call
response = completion(
model = "azure/your-azure-deployment",
messages = [{ "content": "Hello, how are you?","role": "user"}],
stream=True
)
print("Azure Streaming response")
for chunk in response:
print(chunk)
import os
import threading
from litellm import completion
# Function to make a completion call
def make_completion(model, messages):
response = completion(
model=model,
messages=messages
)
print(f"Response for {model}: {response}")
# openai configs
os.environ["OPENAI_API_KEY"] = ""
# azure openai configs
os.environ["AZURE_API_KEY"] = ""
os.environ["AZURE_API_BASE"] = "https://openai-gpt-4-test-v-1.openai.azure.com/"
os.environ["AZURE_API_VERSION"] = "2023-05-15"
# Define the messages for the completions
messages = [{"content": "Hello, how are you?", "role": "user"}]
# Create threads for making the completions
thread1 = threading.Thread(target=make_completion, args=("gpt-3.5-turbo", messages))
thread2 = threading.Thread(target=make_completion, args=("azure/your-azure-deployment", messages))
# Start both threads
thread1.start()
thread2.start()
# Wait for both threads to finish
thread1.join()
thread2.join()
print("Both completions are done.")
import os
import threading
from litellm import completion
# Function to make a completion call
def make_completion(model, messages):
response = completion(
model=model,
messages=messages
)
print(f"Response for {model}: {response}")
# Set your API keys
os.environ["OPENAI_API_KEY"] = ""
os.environ["AZURE_API_KEY"] = ""
os.environ["AZURE_API_BASE"] = "https://openai-gpt-4-test-v-1.openai.azure.com/"
os.environ["AZURE_API_VERSION"] = "2023-05-15"
# Define the messages for the completions
messages = [{"content": "Hello, how are you?", "role": "user"}]
# Create and start 10 threads for making completions
threads = []
for i in range(10):
thread = threading.Thread(target=make_completion, args=("gpt-3.5-turbo" if i % 2 == 0 else "azure/your-azure-deployment", messages))
threads.append(thread)
thread.start()
# Wait for all threads to finish
for thread in threads:
thread.join()
print("All completions are done.")
import os
from litellm import completion
# Function to make both OpenAI and Azure completions
def make_completions():
# Set your OpenAI API key
os.environ["OPENAI_API_KEY"] = ""
# OpenAI completion
openai_response = completion(
model="gpt-3.5-turbo",
messages=[{"content": "Hello, how are you?", "role": "user"}]
)
print("OpenAI Response:", openai_response)
# Set your Azure OpenAI API key and configuration
os.environ["AZURE_API_KEY"] = ""
os.environ["AZURE_API_BASE"] = "https://openai-gpt-4-test-v-1.openai.azure.com/"
os.environ["AZURE_API_VERSION"] = "2023-05-15"
# Azure OpenAI completion
azure_response = completion(
model="azure/your-azure-deployment",
messages=[{"content": "Hello, how are you?", "role": "user"}]
)
print("Azure OpenAI Response:", azure_response)
# Call the function to make both completions in one thread
make_completions()