python/semantic_kernel/connectors/ai/nvidia/README.md
This connector enables integration with NVIDIA NIM API for text embeddings and chat completion. It allows you to use NVIDIA's models within the Semantic Kernel framework.
import semantic_kernel as sk
kernel = sk.Kernel()
You can provide your API key directly or through environment variables
from semantic_kernel.connectors.ai.nvidia import NvidiaTextEmbedding
embedding_service = NvidiaTextEmbedding(
ai_model_id="nvidia/nv-embedqa-e5-v5", # Default model if not specified
api_key="your-nvidia-api-key", # Can also use NVIDIA_API_KEY env variable
service_id="nvidia-embeddings" # Optional service identifier
)
kernel.add_service(embedding_service)
texts = ["Hello, world!", "Semantic Kernel is awesome"]
embeddings = await kernel.get_service("nvidia-embeddings").generate_embeddings(texts)
from semantic_kernel.connectors.ai.nvidia import NvidiaChatCompletion
chat_service = NvidiaChatCompletion(
ai_model_id="meta/llama-3.1-8b-instruct", # Default model if not specified
api_key="your-nvidia-api-key", # Can also use NVIDIA_API_KEY env variable
service_id="nvidia-chat" # Optional service identifier
)
kernel.add_service(chat_service)
response = await kernel.invoke_prompt("Hello, how are you?")
from semantic_kernel.agents import ChatCompletionAgent
from semantic_kernel.connectors.ai.nvidia import NvidiaChatCompletion
agent = ChatCompletionAgent(
service=NvidiaChatCompletion(),
name="SK-Assistant",
instructions="You are a helpful assistant.",
)
response = await agent.get_response(messages="Write a haiku about Semantic Kernel.")
print(response.content)