Back to Litellm

LiteLLM CometAPI Cookbook

cookbook/LiteLLM_CometAPI.ipynb

1.84.0-dev.23.2 KB
Original Source

LiteLLM CometAPI Cookbook

python
!pip install litellm

Completion

python
import os

os.environ['COMETAPI_KEY'] = "Your_CometAPI_Key_Here"
api_key = os.getenv('COMETAPI_KEY')
python
from litellm import completion
response = completion(
            model="cometapi/claude-sonnet-4-5-20250929",
            messages=[{"role": "user", "content": "write code for saying hi"}]
)
response
python
response = completion(
            model="cometapi/gpt-5-chat-latest",
            messages=[{"role": "user", "content": "write code for saying hi"}]
)
response
python
response = completion(
            model="cometapi/deepseek-v3.2-exp",
            messages=[{"role": "user", "content": "write code for saying hi"}]
)
response

Streaming

Streaming Responses

python
messages = [{"role": "user", "content": "Hey, how's it going?"}]
response = completion(model="cometapi/gpt-5-mini", messages=messages, stream=True)
for part in response:
    print(part.choices[0].delta.content or "")

Async Completion

python
from litellm import acompletion
import asyncio

async def test_get_response():
    user_message = "Hello, how are you?"
    messages = [{"content": user_message, "role": "user"}]
    response = await acompletion(model="cometapi/gpt-5-mini", messages=messages)
    return response

response = await test_get_response()
print(response)

Async Streaming

python
from litellm import acompletion
import asyncio, os, traceback

async def completion_call():
    try:
        print("test acompletion + streaming")
        response = await acompletion(
            model="cometapi/gpt-5-mini", 
            messages=[{"content": "Hello, how are you?", "role": "user"}], 
            stream=True
        )
        print(f"response: {response}")
        async for chunk in response:
            print(chunk)
    except:
        print(f"error occurred: {traceback.format_exc()}")
        pass

await completion_call()

Embedding

python
import litellm


async def main():
    response = await litellm.aembedding(
        model="cometapi/text-embedding-3-small", # The model name must include prefix "openai" + the model name from ai/ml api
        api_key=api_key,  # your aiml api-key
        api_base="https://api.cometapi.com/v1", # 👈 the URL has changed from v2 to v1
        input="Your text string",
    )
    print(response)

await main()
python
import litellm


async def main():
    response = await litellm.aembedding(
        model="cometapi/text-embedding-3-small", # The model name must include prefix "cometapi/" + the model name from CometAPI
        api_key=api_key,  # your CometAPI api-key
        api_base="https://api.cometapi.com/v1",
        input="Your text string",
    )
    print(response)


await main()

Async Image Generation

python
import asyncio

import litellm


async def main():
    response = await litellm.aimage_generation(
        model="cometapi/dall-e-3",  # The model name must include prefix "cometapi/" + the model name from CometAPI
        api_key=api_key,  # your cometapi api-key
        api_base="https://api.cometapi.com/v1",
        prompt="A cute baby sea otter",
    )
    print(response)


await main()