bindings/python/notebook/windows(x64).ipynb
This notebook demonstrates how to use the NexaAI SDK for various AI inference tasks on windows(x64), including:
NexaAI requires Python 3.10 on windows(x64)
Verify the installation:
python -c "import sys, platform; print(f'Python version: {sys.version}')"
Your output should look like:
Python version: 3.10.11 (tags/v3.10.11:7d4cc5a, Apr 5 2023, 00:38:17) [MSC v.1929 64 bit (AMD64)]
Expected output must contain version `3.10.x
Here are suggested ways to install Python 3.10:
Using Anaconda
conda create -n nexaai python=3.10
conda activate nexaai
After installation, you may need to access Python 3.10 using python3.10:
python3.10 --version
cd to the current project root directory cd path/to/nexa-sdk.
python -m venv nexaai-env
source nexaai-env/bin/activate
pip install 'nexaai'
nexaai-env, or the custom virtual environment you have created. The kernel should automatically reload in most IDEs.Run the following code to ensure you have the right kernel running.
import sys
import platform
current_ver = sys.version_info
arch = platform.machine()
if current_ver.major != 3 or current_ver.minor != 10:
print(f"❌ Error: Python {current_ver.major}.{current_ver.minor} detected")
print("✅ Required: Python 3.10")
print("Please install Python 3.10 and restart the kernel.")
sys.exit(1)
else:
print("✅ Python 3.10 ready to proceed!")
Using -accelerated large language models for text generation and conversation. Llama3.2-3B--Turbo is specifically optimized for .
import io
import os
from nexaai import LLM, GenerationConfig, ModelConfig, LlmChatMessage
def llm_example():
"""LLM Inference example"""
print("=== LLM Inference Example ===")
# Model configuration
model_name = "Qwen/Qwen3-1.7B-GGUF/Qwen3-1.7B-Q8_0.gguf"
plugin_id = "cpu_gpu"
max_tokens = 100
system_message = "You are a helpful assistant."
print(f"Loading model: {model_name}")
# Create model instance
config = ModelConfig()
llm = LLM.from_(model=model_name, plugin_id=plugin_id, config=config)
# Create conversation history
conversation = [LlmChatMessage(role="system", content=system_message)]
# Example conversations
test_prompts = [
"What is artificial intelligence?",
"Explain the benefits of on-device AI processing.",
"How does NPU acceleration work?"
]
for i, prompt in enumerate(test_prompts, 1):
print(f"\n--- Conversation {i} ---")
print(f"User: {prompt}")
# Add user message
conversation.append(LlmChatMessage(role="user", content=prompt))
# Apply chat template
formatted_prompt = llm.apply_chat_template(conversation)
# Generate response
print("Assistant: ", end="", flush=True)
response_buffer = io.StringIO()
gen = llm.generate_stream(formatted_prompt, GenerationConfig(max_tokens=max_tokens))
result = None
try:
while True:
token = next(gen)
print(token, end="", flush=True)
response_buffer.write(token)
except StopIteration as e:
result = e.value
# Get profiling data
if result and hasattr(result, "profile_data") and result.profile_data:
print(f"\n{result.profile_data}")
# Add assistant response to conversation history
conversation.append(LlmChatMessage(role="assistant", content=response_buffer.getvalue()))
print("\n" + "=" * 50)
llm_example()
Using vision language models for multimodal understanding and generation. OmniNeural-4B supports joint processing of images and text.
import os
import io
from nexaai import (
GenerationConfig,
ModelConfig,
VlmChatMessage,
VlmContent,
)
from nexaai.vlm import VLM
def vlm_example():
"""VLM Inference example"""
print("=== VLM Inference Example ===")
# Model configuration
model_name = "ggml-org/gemma-3-4b-it-GGUF/gemma-3-4b-it-Q4_K_M.gguf"
mmproj_path = "ggml-org/gemma-3-4b-it-GGUF/mmproj-model-f16.gguf"
plugin_id = "cpu_gpu"
max_tokens = 100
system_message = "You are a helpful assistant that can understand images and text."
image_path = '/your/image/path' # Replace with actual image path if available
print(f"Loading model: {model_name}")
print(f"Using plugin: {plugin_id}")
# Check for image existence
if not (image_path and os.path.exists(image_path)):
print(
f"\033[93mWARNING: The specified image_path ('{image_path}') does not exist or was not provided. Multimodal prompts will not include image input.\033[0m")
# Create model instance
config = ModelConfig()
vlm = VLM.from_(model=model_name, mmproj_path=mmproj_path, config=config, plugin_id=plugin_id)
# Create conversation history
conversation = [
VlmChatMessage(
role="system",
contents=[VlmContent(type="text", text=system_message)]
)
]
# Example multimodal conversations
test_cases = [
{
"text": "What do you see in this image?",
"image_path": image_path
}
]
for i, case in enumerate(test_cases, 1):
print(f"\n--- Multimodal Conversation {i} ---")
print(f"User: {case['text']}")
# Build message content
contents = []
if case['text']:
contents.append(VlmContent(type="text", text=case['text']))
# Add image content if available
if case['image_path'] and os.path.exists(case['image_path']):
contents.append(VlmContent(type="image", text=case['image_path']))
print(f"Including image: {case['image_path']}")
# Add user message
conversation.append(VlmChatMessage(role="user", contents=contents))
# Apply chat template
formatted_prompt = vlm.apply_chat_template(conversation)
# Generate response
print("Assistant: ", end="", flush=True)
response_buffer = io.StringIO()
# Prepare image and audio paths
image_paths = [case['image_path']] if case['image_path'] and os.path.exists(case['image_path']) else None
audio_paths = None
gen = vlm.generate_stream(
formatted_prompt,
config=GenerationConfig(
max_tokens=max_tokens,
image_paths=image_paths,
audio_paths=audio_paths
)
)
result = None
try:
while True:
token = next(gen)
print(token, end="", flush=True)
response_buffer.write(token)
except StopIteration as e:
result = e.value
# Get profiling data
if result and hasattr(result, "profile_data") and result.profile_data:
print(f"\n{result.profile_data}")
# Add assistant response to conversation history
conversation.append(
VlmChatMessage(
role="assistant",
contents=[
VlmContent(type="text", text=response_buffer.getvalue())
]
)
)
print("\n" + "=" * 50)
vlm_example()
Using embedding models for text vectorization and similarity computation.
import numpy as np
import os
from nexaai.embedding import Embedder
def embedder_example():
"""Embedder Inference example"""
print("=== Embedder Inference Example ===")
# Model configuration
model_name = "djuna/jina-embeddings-v2-small-en-Q5_K_M-GGUF/jina-embeddings-v2-small-en-q5_k_m.gguf"
plugin_id = "cpu_gpu"
batch_size = 2
print(f"Loading model: {model_name}")
print(f"Using plugin: {plugin_id}")
print(f"Batch size: {batch_size}")
# Create embedder instance
embedder = Embedder.from_(model=model_name, plugin_id=plugin_id)
print('Embedder loaded successfully!')
# Get embedding dimension
dim = embedder.embedding_dim()
print(f"Dimension: {dim}")
# Example texts
texts = [
"On-device AI is a type of AI that is processed on the device itself, rather than in the cloud.",
"Nexa AI allows you to run state-of-the-art AI models locally on CPU, GPU, or NPU — from instant use cases to production deployments.",
"A ragdoll is a breed of cat that is known for its long, flowing hair and gentle personality.",
"The capital of France is Paris.",
"NPU acceleration provides significant performance improvements for AI workloads."
]
query = "what is on device AI"
print(f"\n=== Generating Embeddings ===")
print(f"Processing {len(texts)} texts...")
# Generate embeddings
result = embedder.embed(
texts=texts,
batch_size=batch_size,
)
embeddings = result.embeddings
print(f"Successfully generated {len(embeddings)} embeddings")
# Display embedding information
print(f"\n=== Embedding Details ===")
for i, (text, embedding) in enumerate(zip(texts, embeddings)):
print(f"\nText {i + 1}:")
print(f" Content: {text}")
print(f" Embedding shape: {len(embedding)} dimensions")
print(f" First 10 elements: {embedding[:10]}")
print("-" * 70)
# Query processing
print(f"\n=== Query Processing ===")
print(f"Query: '{query}'")
query_result = embedder.embed(
texts=[query],
batch_size=1,
)
query_embedding = query_result.embeddings[0]
print(f"Query embedding shape: {len(query_embedding)} dimensions")
# Similarity analysis
print(f"\n=== Similarity Analysis (Inner Product) ===")
similarities = []
for i, (text, embedding) in enumerate(zip(texts, embeddings)):
inner_product = sum(a * b for a, b in zip(query_embedding, embedding))
similarities.append((i, text, inner_product))
print(f"\nText {i + 1}:")
print(f" Content: {text}")
print(f" Inner product with query: {inner_product:.6f}")
print("-" * 70)
# Sort and display most similar texts
similarities.sort(key=lambda x: x[2], reverse=True)
print(f"\n=== Similarity Ranking Results ===")
for rank, (idx, text, score) in enumerate(similarities, 1):
print(f"Rank {rank}: [{score:.6f}] {text}")
return embeddings, query_embedding, similarities
embeddings, query_emb, similarities = embedder_example()
Using reranking models for document reranking.
import os
from nexaai.rerank import Reranker
def reranker_example():
"""Reranker Inference example"""
print("=== Reranker Inference Example ===")
# Model configuration
# Use huggingface repo ID
model_name = "pqnet/bge-reranker-v2-m3-Q8_0-GGUF/bge-reranker-v2-m3-q8_0.gguf"
plugin_id = "cpu_gpu"
batch_size = 4
print(f"Loading model: {model_name}")
print(f"Using plugin: {plugin_id}")
print(f"Batch size: {batch_size}")
# Create reranker instance
reranker = Reranker.from_(
model=os.path.expanduser(model_name),
plugin_id=plugin_id,
)
# Example queries and documents
queries = [
"Where is on-device AI?",
"What is NPU acceleration?",
"How does machine learning work?",
"Tell me about computer vision"
]
documents = [
"On-device AI is a type of AI that is processed on the device itself, rather than in the cloud.",
"NPU acceleration provides significant performance improvements for AI workloads on specialized hardware.",
"Edge computing brings computation and data storage closer to the sources of data.",
"A ragdoll is a breed of cat that is known for its long, flowing hair and gentle personality.",
"The capital of France is Paris, a beautiful city known for its art and culture.",
"Machine learning is a subset of artificial intelligence that enables computers to learn without being explicitly programmed.",
"Computer vision is a field of artificial intelligence that trains computers to interpret and understand visual information.",
"Deep learning uses neural networks with multiple layers to model and understand complex patterns in data."
]
print(f"\n=== Document Reranking Test ===")
print(f"Number of documents: {len(documents)}")
# Rerank for each query
for i, query in enumerate(queries, 1):
print(f"\n--- Query {i} ---")
print(f"Query: '{query}'")
print("-" * 50)
# Perform reranking
result = reranker.rerank(
query=query,
documents=documents,
batch_size=batch_size,
)
scores = result.scores
# Create (document, score) pairs and sort
doc_scores = list(zip(documents, scores))
doc_scores.sort(key=lambda x: x[1], reverse=True)
# Display ranking results
print("Reranking results:")
for rank, (doc, score) in enumerate(doc_scores, 1):
print(f" {rank:2d}. [{score:.4f}] {doc}")
# Display most relevant documents
print(f"\nMost relevant documents (top 3):")
for rank, (doc, score) in enumerate(doc_scores[:3], 1):
print(f" {rank}. {doc}")
print("=" * 80)
return reranker
reranker = reranker_example()