crates/hermesllm/README.md
A Rust library for handling LLM (Large Language Model) API requests and responses with unified abstractions across multiple providers.
Add to your Cargo.toml:
[dependencies]
hermesllm = { path = "../hermesllm" } # or appropriate path in workspace
use hermesllm::providers::{ProviderRequestType, ProviderRequest, ProviderId};
// Parse request from JSON bytes
let request_bytes = r#"{"model": "gpt-4", "messages": [{"role": "user", "content": "Hello!"}]}"#;
// Parse with provider context
let request = ProviderRequestType::try_from((request_bytes.as_bytes(), &ProviderId::OpenAI))?;
// Access request properties
println!("Model: {}", request.model());
println!("User message: {:?}", request.get_recent_user_message());
println!("Is streaming: {}", request.is_streaming());
use hermesllm::providers::{ProviderResponseType, ProviderResponse};
// Parse response from provider
let response_bytes = /* JSON response from LLM */;
let response = ProviderResponseType::try_from((response_bytes, ProviderId::OpenAI))?;
// Extract token usage
if let Some((prompt, completion, total)) = response.extract_usage_counts() {
println!("Tokens used: {}/{}/{}", prompt, completion, total);
}
use hermesllm::providers::{ProviderStreamResponseIter, ProviderStreamResponse};
// Create streaming iterator from SSE data
let sse_data = /* Server-Sent Events data */;
let mut stream = ProviderStreamResponseIter::try_from((sse_data, &ProviderId::OpenAI))?;
// Process streaming chunks
for chunk_result in stream {
match chunk_result {
Ok(chunk) => {
if let Some(content) = chunk.content_delta() {
print!("{}", content);
}
if chunk.is_final() {
break;
}
}
Err(e) => eprintln!("Stream error: {}", e),
}
}
use hermesllm::providers::{ProviderId, has_compatible_api, supported_apis};
// Check API compatibility
let provider = ProviderId::Groq;
if has_compatible_api(&provider, "/v1/chat/completions") {
println!("Provider supports chat completions");
}
// List supported APIs
let apis = supported_apis(&provider);
println!("Supported APIs: {:?}", apis);
ProviderId - Enum identifying supported providers (OpenAI, Mistral, Groq, etc.)ProviderRequestType - Enum wrapping provider-specific request typesProviderResponseType - Enum wrapping provider-specific response typesProviderStreamResponseIter - Iterator for streaming response chunksProviderRequest - Common interface for all request typesProviderResponse - Common interface for all response typesProviderStreamResponse - Interface for streaming response chunksTokenUsage - Interface for token usage informationChatCompletionsRequest - Chat completion request structureChatCompletionsResponse - Chat completion response structureMessage, Role, MessageContent - Message building blocksThe library uses a type-safe enum-based approach that:
All requests are parsed into a common ProviderRequestType enum which implements the ProviderRequest trait, allowing uniform access to request properties regardless of the underlying provider format.
See the src/lib.rs tests for complete working examples of:
This project is licensed under the MIT License.