fern/01-guide/05-baml-advanced/collector.mdx
The Collector allows you to inspect the internal state of BAML function calls, including raw HTTP requests, responses, usage metrics, and timing information, so you can always see the raw data, without any abstraction layers.
collector = Collector(name="my-collector")
result = b.ExtractResume("...", baml_options={"collector": collector})
print(collector.last.usage) # Print usage metrics print(collector.last.raw_llm_response) # Print final response as string
print(collector.last.calls[-1].http_response)
</Tab>
<Tab title="TypeScript" language="typescript">
```typescript
import { b } from 'baml_client'
import { Collector } from '@boundaryml/baml'
// Create a collector with optional name
const collector = new Collector("my-collector")
// Use it with a function call
const result = await b.ExtractResume("...", { collector })
// Access logging information
console.log(collector.last?.usage) // Print usage metrics
console.log(collector.last?.rawLlmResponse) // Print final response
// since there may be retries, print the last http response received
console.log(collector.last?.calls[-1].httpResponse)
import ( "context" "fmt"
b "example.com/myproject/baml_client"
)
func main() { ctx := context.Background()
// Create a collector with optional name
collector, err := b.NewCollector("my-collector")
if err != nil {
panic(err)
}
// Use it with a function call
result, err := b.ExtractResume(ctx, "...", b.WithCollector(collector))
if err != nil {
panic(err)
}
// Access logging information
logs, err := collector.Logs()
if err != nil {
panic(err)
}
fmt.Printf("Number of logs: %d\n", len(logs))
// Get usage information
usage, err := collector.Usage()
if err != nil {
panic(err)
}
fmt.Printf("Usage: %+v\n", usage)
}
</Tab>
<Tab title="Ruby" language="ruby">
```ruby
require_relative "baml_client/client"
b = Baml.Client
# Create a collector with optional name
collector = Baml::Collector.new(name: "my-collector")
# Use it with a function call
res = b.ExtractResume(input: '...', baml_options: { collector: collector })
# Access logging information
print(collector.last.usage) # Print usage metrics
print(collector.last.calls[-1].http_response) # Print final response
print(collector.last.raw_llm_response) # a string of the last response made
// Create a collector with optional name let collector = new_collector("my-collector");
// Use it with a function call let result = B.ExtractResume.with_collector(&collector).call("...").unwrap();
// Access logging information let logs = collector.logs(); let usage = collector.usage(); println!("Input tokens: {}", usage.input_tokens()); println!("Output tokens: {}", usage.output_tokens());
</Tab>
</Tabs>
## Common Use Cases
### Basic Logging
<Tabs>
<Tab title="Python" language="python">
```python
from baml_client import b
from baml_py import Collector # Import the Collector class
def run():
# Create a collector instance with an optional name
collector = Collector(name="my-collector")
# collector will be modified by the function to include all internal state
res = b.ExtractResume("...", baml_options={"collector": collector})
# This will print the return type of the function
print(res)
# This is guaranteed to be set by the function
assert collector.last is not None
# This will print the id of the last request
print(collector.last.id)
# This will print the usage of the last request
# (This aggregates usage from all retries if there was usage emitted)
print(collector.last.usage)
# This will print the raw response of the last request
print(collector.last.calls[-1].http_response)
# This will print the raw text we used to run the parser.
print(collector.last.raw_llm_response)
async function run() { // Create a collector instance with an optional name const collector = new Collector("my-collector") // collector will be modified by the function to include all internal state const res = await b.ExtractResume("...", { collector }) // This will print the return type of the function console.log(res)
// This is guaranteed to be set by the function
assert(collector.last)
// This will print the id of the last request
console.log(collector.last.id)
// This will print the usage of the last request
// (This aggregates usage from all retries if there was usage emitted)
console.log(collector.last.usage)
// This will print the raw response of the last request
console.log(collector.last.calls[-1].httpResponse)
// This will print the raw text we used to run the parser.
console.log(collector.last.rawLlmResponse)
}
</Tab>
<Tab title="Go" language="go">
```go
package main
import (
"context"
"fmt"
"log"
b "example.com/myproject/baml_client"
)
func run() {
ctx := context.Background()
// Create a collector instance with an optional name
collector, err := b.NewCollector("my-collector")
if err != nil {
log.Fatalf("Failed to create collector: %v", err)
}
// collector will be modified by the function to include all internal state
res, err := b.ExtractResume(ctx, "...", b.WithCollector(collector))
if err != nil {
log.Fatalf("Function call failed: %v", err)
}
// This will print the return type of the function
fmt.Printf("Result: %+v\n", res)
// Get all logs from the collector
logs, err := collector.Logs()
if err != nil {
log.Fatalf("Failed to get logs: %v", err)
}
// This is guaranteed to be set by the function
if len(logs) == 0 {
log.Fatal("Expected at least one log entry")
}
lastLog := logs[len(logs)-1]
// This will print the id of the last request
id, err := lastLog.ID()
if err != nil {
log.Fatalf("Failed to get log ID: %v", err)
}
fmt.Printf("Request ID: %s\n", id)
// This will print the usage of the last request
// (This aggregates usage from all retries if there was usage emitted)
usage, err := lastLog.Usage()
if err != nil {
log.Fatalf("Failed to get usage: %v", err)
}
inputTokens, err := usage.InputTokens()
if err != nil {
log.Fatalf("Failed to get input tokens: %v", err)
}
fmt.Printf("Input tokens: %d\n", inputTokens)
outputTokens, err := usage.OutputTokens()
if err != nil {
log.Fatalf("Failed to get output tokens: %v", err)
}
fmt.Printf("Output tokens: %d\n", outputTokens)
// This will print the raw response of the last request
calls, err := lastLog.Calls()
if err != nil {
log.Fatalf("Failed to get calls: %v", err)
}
if len(calls) > 0 {
lastCall := calls[len(calls)-1]
response, err := lastCall.HttpResponse()
if err != nil {
log.Fatalf("Failed to get HTTP response: %v", err)
}
if response != nil {
body, err := response.Body()
if err != nil {
log.Fatalf("Failed to get response body: %v", err)
}
text, err := body.Text()
if err != nil {
log.Fatalf("Failed to get response text: %v", err)
}
fmt.Printf("HTTP Response: %s\n", text)
}
}
// This will print the raw text we used to run the parser
rawResponse, err := lastLog.RawLLMResponse()
if err != nil {
log.Fatalf("Failed to get raw LLM response: %v", err)
}
if rawResponse != nil {
fmt.Printf("Raw LLM Response: %s\n", *rawResponse)
}
}
func main() {
run()
}
def run # Create a collector instance collector = Baml::Collector.new(name: "my-collector") # The function will now use the collector to track internal state res = b.ExtractResume(input: 'hi there', baml_options: { collector: collector })
# This will print the return type of the function
print(res)
# This is guaranteed to be set by the function
raise "Assertion failed" unless collector.last
# This will print the id of the last request
print(collector.last.id)
# This will print the usage of the last request
# (This aggregates usage from all retries if there was usage emitted)
print(collector.last.usage)
# This will print the raw response of the last request
print(collector.last.calls[-1].http_response)
# This will print the raw text we used to run the parser.
print(collector.last.raw_llm_response)
end
run
</Tab>
<Tab title="Rust" language="rust">
```rust
use myproject::baml_client::sync_client::B;
use myproject::baml_client::new_collector;
fn run() {
// Create a collector instance with an optional name
let collector = new_collector("my-collector");
// collector will be modified by the function to include all internal state
let res = B.ExtractResume.with_collector(&collector).call("...").unwrap();
println!("Result: {:?}", res);
// Get all logs from the collector
let logs = collector.logs();
assert!(!logs.is_empty(), "Expected at least one log entry");
let log = &logs[0];
// Print the function name
println!("Function: {}", log.function_name());
// Print the usage of the request
let usage = log.usage();
println!("Input tokens: {}", usage.input_tokens());
println!("Output tokens: {}", usage.output_tokens());
// Print details about the calls
let calls = log.calls();
for call in &calls {
println!("Provider: {}, Client: {}", call.provider(), call.client_name());
}
}
def run(): collector = Collector(name="reusable-collector") res = b.ExtractResume("...", baml_options={"collector": collector})
# Reuse the same collector
res = b.TestOpenAIGPT4oMini("Second call", baml_options={"collector": collector})
</Tab>
<Tab title="TypeScript" language="typescript">
```typescript
import {b} from 'baml_client'
import {Collector} from '@boundaryml/baml'
async function run() {
const collector = new Collector("reusable-collector")
const res = await b.ExtractResume("...", { collector })
// Reuse the same collector
const res2 = await b.ExtractResume("...", { collector })
}
import ( "context" "log"
b "example.com/myproject/baml_client"
)
func run() { ctx := context.Background()
collector, err := b.NewCollector("reusable-collector")
if err != nil {
log.Fatalf("Failed to create collector: %v", err)
}
res, err := b.ExtractResume(ctx, "...", b.WithCollector(collector))
if err != nil {
log.Fatalf("First call failed: %v", err)
}
// Reuse the same collector
res2, err := b.TestOpenAIGPT4oMini(ctx, "Second call", b.WithCollector(collector))
if err != nil {
log.Fatalf("Second call failed: %v", err)
}
// Both results are now available
_ = res
_ = res2
}
</Tab>
<Tab title="Ruby" language="ruby">
```ruby
require_relative "baml_client/client"
b = Baml.Client
def run
collector = Baml::Collector.new(name: "reusable-collector")
res = b.ExtractResume(input: 'First call', baml_options: { collector: collector })
# Reuse the same collector
res = b.ExtractResume(input: 'Second call', baml_options: { collector: collector })
end
let collector = new_collector("reusable-collector"); let res = B.ExtractResume.with_collector(&collector).call("...").unwrap();
// Reuse the same collector let res2 = B.ExtractResume.with_collector(&collector).call("...").unwrap();
// collector now has 2 logs assert_eq!(collector.logs().len(), 2);
</Tab>
</Tabs>
### Using Multiple Collectors
You can use multiple collectors to track different aspects of your application:
<Tabs>
<Tab title="Python" language="python">
```python
from baml_client import b
from baml_py import Collector
def run():
# Create separate collectors for different parts of your application
collector_a = Collector(name="collector-a")
collector_b = Collector(name="collector-b")
# Use both collectors for the same function call
res = b.ExtractResume("...", baml_options={"collector": [collector_a, collector_b]})
# Both collectors will have the same logs
assert collector_a.last.usage.input_tokens == collector_b.last.usage.input_tokens
# Use only collector_a for another call
res2 = b.TestOpenAIGPT4oMini("another call", baml_options={"collector": collector_a})
# collector_a will have 2 logs, collector_b will still have 1
assert len(collector_a.logs) == 2
assert len(collector_b.logs) == 1
async function run() { // Create separate collectors for different parts of your application const collector_a = new Collector("collector-a") const collector_b = new Collector("collector-b")
// Use both collectors for the same function call
const res = await b.ExtractResume("...", { collector: [collector_a, collector_b] })
// Both collectors will have the same logs
assert(collector_a.last?.usage.inputTokens === collector_b.last?.usage.inputTokens)
// Use only collector_a for another call
const res2 = await b.ExtractResume("...", { collector: collector_a })
// collector_a will have 2 logs, collector_b will still have 1
assert(collector_a.logs.length === 2)
assert(collector_b.logs.length === 1)
}
</Tab>
<Tab title="Go" language="go">
```go
package main
import (
"context"
"log"
b "example.com/myproject/baml_client"
)
func run() {
ctx := context.Background()
// Create separate collectors for different parts of your application
collectorA, err := b.NewCollector("collector-a")
if err != nil {
log.Fatalf("Failed to create collector A: %v", err)
}
collectorB, err := b.NewCollector("collector-b")
if err != nil {
log.Fatalf("Failed to create collector B: %v", err)
}
// Use both collectors for the same function call
res, err := b.ExtractResume(ctx, "...", b.WithCollectors([]b.Collector{collectorA, collectorB}))
if err != nil {
log.Fatalf("Function call failed: %v", err)
}
// Both collectors will have the same logs
logsA, err := collectorA.Logs()
if err != nil {
log.Fatalf("Failed to get logs A: %v", err)
}
logsB, err := collectorB.Logs()
if err != nil {
log.Fatalf("Failed to get logs B: %v", err)
}
if len(logsA) != len(logsB) {
log.Fatalf("Expected same number of logs, got %d vs %d", len(logsA), len(logsB))
}
// Use only collector_a for another call
res2, err := b.TestOpenAIGPT4oMini(ctx, "another call", b.WithCollector(collectorA))
if err != nil {
log.Fatalf("Second call failed: %v", err)
}
// collector_a will have 2 logs, collector_b will still have 1
logsA, err = collectorA.Logs()
if err != nil {
log.Fatalf("Failed to get logs A: %v", err)
}
logsB, err = collectorB.Logs()
if err != nil {
log.Fatalf("Failed to get logs B: %v", err)
}
if len(logsA) != 2 {
log.Fatalf("Expected 2 logs in collector A, got %d", len(logsA))
}
if len(logsB) != 1 {
log.Fatalf("Expected 1 log in collector B, got %d", len(logsB))
}
_ = res
_ = res2
}
# Use both collectors for the same function call
res = b.ExtractResume(input: 'hi there', baml_options: { collector: [collector_a, collector_b] })
# Both collectors will have the same logs
raise "Assertion failed" unless collector_a.last.usage.input_tokens == collector_b.last.usage.input_tokens
# Use only collector_a for another call
res2 = b.ExtractResume(input: 'another call', baml_options: { collector: collector_a })
# collector_a will have 2 logs, collector_b will still have 1
raise "Assertion failed" unless collector_a.logs.length == 2
raise "Assertion failed" unless collector_b.logs.length == 1
end
</Tab>
<Tab title="Rust" language="rust">
```rust
use myproject::baml_client::sync_client::B;
use myproject::baml_client::new_collector;
let collector1 = new_collector("collector-a");
let collector2 = new_collector("collector-b");
// Use both collectors for the same function call
let res = B.ExtractResume
.with_collectors(&[collector1.clone(), collector2.clone()])
.call("...")
.unwrap();
// Both collectors will have the same logs
assert_eq!(collector1.logs().len(), 1);
assert_eq!(collector2.logs().len(), 1);
// Use only collector1 for another call
let res2 = B.ExtractResume.with_collector(&collector1).call("...").unwrap();
// collector1 will have 2 logs, collector2 will still have 1
assert_eq!(collector1.logs().len(), 2);
assert_eq!(collector2.logs().len(), 1);
def run(): collector_a = Collector(name="collector-a") res = b.ExtractResume("...", baml_options={"collector": collector_a})
collector_b = Collector(name="collector-b")
res = b.ExtractResume("...", baml_options={"collector": collector_b})
# The total usage of both logs is now available
print(collector_a.usage)
print(collector_b.usage)
</Tab>
<Tab title="TypeScript" language="typescript">
```typescript
import {b} from 'baml_client'
import {Collector} from '@boundaryml/baml'
async function run() {
const collector_a = new Collector("collector-a")
const res = await b.ExtractResume("...", { collector: collector_a })
const collector_b = new Collector("collector-b")
const res2 = await b.ExtractResume("...", { collector: collector_b })
// The total usage of both logs is now available
console.log(collector_a.usage)
console.log(collector_b.usage)
}
import ( "context" "fmt" "log"
b "example.com/myproject/baml_client"
)
func run() { ctx := context.Background()
collectorA, err := b.NewCollector("collector-a")
if err != nil {
log.Fatalf("Failed to create collector A: %v", err)
}
res, err := b.ExtractResume(ctx, "...", b.WithCollector(collectorA))
if err != nil {
log.Fatalf("First call failed: %v", err)
}
collectorB, err := b.NewCollector("collector-b")
if err != nil {
log.Fatalf("Failed to create collector B: %v", err)
}
res2, err := b.ExtractResume(ctx, "...", b.WithCollector(collectorB))
if err != nil {
log.Fatalf("Second call failed: %v", err)
}
// The total usage of both collectors is now available
usageA, err := collectorA.Usage()
if err != nil {
log.Fatalf("Failed to get usage A: %v", err)
}
usageB, err := collectorB.Usage()
if err != nil {
log.Fatalf("Failed to get usage B: %v", err)
}
inputTokensA, err := usageA.InputTokens()
if err != nil {
log.Fatalf("Failed to get input tokens A: %v", err)
}
outputTokensA, err := usageA.OutputTokens()
if err != nil {
log.Fatalf("Failed to get output tokens A: %v", err)
}
inputTokensB, err := usageB.InputTokens()
if err != nil {
log.Fatalf("Failed to get input tokens B: %v", err)
}
outputTokensB, err := usageB.OutputTokens()
if err != nil {
log.Fatalf("Failed to get output tokens B: %v", err)
}
fmt.Printf("Collector A - Input: %d, Output: %d\n", inputTokensA, outputTokensA)
fmt.Printf("Collector B - Input: %d, Output: %d\n", inputTokensB, outputTokensB)
_ = res
_ = res2
}
</Tab>
<Tab title="Ruby" language="ruby">
```ruby
require_relative "baml_client/client"
def run
collector_a = Baml::Collector.new(name: "collector-a")
res = Baml.Client.ExtractResume(input: 'First call', baml_options: { collector: collector_a })
collector_b = Baml::Collector.new(name: "collector-b")
res = Baml.Client.ExtractResume(input: 'Second call', baml_options: { collector: collector_b })
# The total usage of both logs is now available
print(collector_a.usage)
print(collector_b.usage)
end
let collector_a = new_collector("collector-a"); let res = B.ExtractResume.with_collector(&collector_a).call("...").unwrap();
let collector_b = new_collector("collector-b"); let res2 = B.ExtractResume.with_collector(&collector_b).call("...").unwrap();
// The total usage of both collectors is now available let usage_a = collector_a.usage(); let usage_b = collector_b.usage(); println!("Collector A - Input: {}, Output: {}", usage_a.input_tokens(), usage_a.output_tokens()); println!("Collector B - Input: {}, Output: {}", usage_b.input_tokens(), usage_b.output_tokens());
</Tab>
</Tabs>
### Cached Token Tracking
When using providers that support prompt caching (like Anthropic, OpenAI, Google, or Vertex), you can track cached input tokens via the `cached_input_tokens` field:
<Tabs>
<Tab title="Python" language="python">
```python
from baml_client import b
from baml_py import Collector
async def run():
collector = Collector(name="cache-tracker")
# First call - content will be cached by the provider
res = await b.TestCaching(large_content, "Question 1", baml_options={"collector": collector})
# Second call with same content - should use cached tokens
res2 = await b.TestCaching(large_content, "Question 2", baml_options={"collector": collector})
# Access cached token counts
first_log = collector.logs[0]
second_log = collector.logs[1]
print(f"First call cached tokens: {first_log.usage.cached_input_tokens}")
print(f"Second call cached tokens: {second_log.usage.cached_input_tokens}")
# Collector aggregates cached tokens across all calls
print(f"Total cached tokens: {collector.usage.cached_input_tokens}")
# You can also access cached tokens per LLM call (including retries)
print(f"Per-call cached tokens: {first_log.calls[0].usage.cached_input_tokens}")
async function run() { const collector = new Collector("cache-tracker")
// First call - content will be cached by the provider
const res = await b.TestCaching(largeContent, "Question 1", { collector })
// Second call with same content - should use cached tokens
const res2 = await b.TestCaching(largeContent, "Question 2", { collector })
// Access cached token counts
const firstLog = collector.logs[0]
const secondLog = collector.logs[1]
console.log(`First call cached tokens: ${firstLog.usage.cachedInputTokens}`)
console.log(`Second call cached tokens: ${secondLog.usage.cachedInputTokens}`)
// Collector aggregates cached tokens across all calls
console.log(`Total cached tokens: ${collector.usage.cachedInputTokens}`)
// You can also access cached tokens per LLM call (including retries)
console.log(`Per-call cached tokens: ${firstLog.calls[0].usage?.cachedInputTokens}`)
}
</Tab>
<Tab title="Go" language="go">
```go
package main
import (
"context"
"fmt"
"log"
b "example.com/myproject/baml_client"
)
func run() {
ctx := context.Background()
collector, err := b.NewCollector("cache-tracker")
if err != nil {
log.Fatalf("Failed to create collector: %v", err)
}
// First call - content will be cached by the provider
_, err = b.TestCaching(ctx, largeContent, "Question 1", b.WithCollector(collector))
if err != nil {
log.Fatalf("First call failed: %v", err)
}
// Second call with same content - should use cached tokens
_, err = b.TestCaching(ctx, largeContent, "Question 2", b.WithCollector(collector))
if err != nil {
log.Fatalf("Second call failed: %v", err)
}
// Access cached token counts
logs, _ := collector.Logs()
firstLog := logs[0]
secondLog := logs[1]
firstUsage, _ := firstLog.Usage()
secondUsage, _ := secondLog.Usage()
firstCached, _ := firstUsage.CachedInputTokens()
secondCached, _ := secondUsage.CachedInputTokens()
fmt.Printf("First call cached tokens: %d\n", firstCached)
fmt.Printf("Second call cached tokens: %d\n", secondCached)
// Collector aggregates cached tokens across all calls
totalUsage, _ := collector.Usage()
totalCached, _ := totalUsage.CachedInputTokens()
fmt.Printf("Total cached tokens: %d\n", totalCached)
}
def run collector = Baml::Collector.new(name: "cache-tracker")
# First call - content will be cached by the provider
res = Baml.Client.TestCaching(
content: large_content,
question: "Question 1",
baml_options: { collector: collector }
)
# Second call with same content - should use cached tokens
res2 = Baml.Client.TestCaching(
content: large_content,
question: "Question 2",
baml_options: { collector: collector }
)
# Access cached token counts
first_log = collector.logs[0]
second_log = collector.logs[1]
puts "First call cached tokens: #{first_log.usage.cached_input_tokens}"
puts "Second call cached tokens: #{second_log.usage.cached_input_tokens}"
# Collector aggregates cached tokens across all calls
puts "Total cached tokens: #{collector.usage.cached_input_tokens}"
# You can also access cached tokens per LLM call (including retries)
puts "Per-call cached tokens: #{first_log.calls[0].usage.cached_input_tokens}"
end
</Tab>
</Tabs>
<Info>
Cached token tracking is supported for Anthropic, OpenAI, Google AI, and Vertex AI providers. AWS Bedrock does not currently support cached token reporting and will return `null` for this field.
</Info>
## API Reference
### Collector Class
The Collector class provides properties to introspect the internal state of BAML function calls.
| Property | Type | Description |
|--------|------|-------------|
| `logs` | `List[FunctionLog]` | A list of all function calls (ordered from oldest to newest) |
| `last` | `FunctionLog \| null` | The most recent function log. |
| `usage` | `Usage` | The cumulative total usage of all requests this collector has tracked. This includes all retries and fallbacks, if those did use any tokens. |
The Collector class provides the following methods:
| Method | Type | Description |
|--------|------|-------------|
| `id(id: string)` | `FunctionLog \| null` | Get the function log by id. |
| `clear()` | `void` | Clears all logs. |
### FunctionLog Class
The `FunctionLog` class has the following properties:
| Property | Type | Description |
|----------|------|-------------|
| `id` | `string` | The id of the request. |
| `function_name` | `string` | The name of the function. |
| `log_type` | `"call" \| "stream"` | The manner in which the function was called. |
| `timing` | `Timing` | The timing of the request. |
| `usage` | `Usage` | The usage of the request (aggregated from all calls). |
| `calls` | `(LLMCall \| LLMStreamCall)[]` | Every call made to the LLM (including fallbacks and retries). Sorted from oldest to newest. |
| `raw_llm_response` | `string \| null` | The raw text from the best matching LLM. |
| `tags` | `Map[str, any]` | Any user provided metadata. |
### Timing Class
The `Timing` class has the following properties:
| Property | Type | Description |
|----------|------|-------------|
| `start_time_utc_ms` | `int` | The start time of the request in milliseconds since epoch. |
| `duration_ms` | `int \| null` | The duration of the request in milliseconds. |
#### StreamTiming Class (extends Timing)
| Property | Type | Description |
|----------|------|-------------|
| `time_to_first_token_ms` | `int \| null` | The time to first token in milliseconds. |
### Usage Class
The `Usage` class has the following properties:
| Property | Type | Description |
|----------|------|-------------|
| `input_tokens` | `int \| null` | The cumulative number of tokens used in the inputs. |
| `output_tokens` | `int \| null` | The cumulative number of tokens used in the outputs. |
| `cached_input_tokens` | `int \| null` | The number of cached input tokens (e.g., Anthropic's `cache_read_input_tokens`). |
<Info>
Note: Usage may not include all provider-specific token types like "thinking_tokens" or "cache_creation_input_tokens". For those, you may need to look at the raw HTTP response and build your own adapters.
</Info>
### LLMCall Class
The `LLMCall` class has the following properties:
| Property | Type | Description |
|----------|------|-------------|
| `client_name` | `str` | The name of the client used. |
| `provider` | `str` | The provider of the client used. |
| `timing` | `Timing` | The timing of the request. |
| `http_request` | `HttpRequest` | The raw HTTP request sent to the client. |
| `http_response` | `HttpResponse \| null` | The raw HTTP response from the client (null for streaming). |
| `usage` | `Usage \| null` | The usage of the request (if available). |
| `selected` | `bool` | Whether this call was selected and used for parsing. |
### LLMStreamCall Class (extends LLMCall)
The `LLMStreamCall` includes the same properties as `LLMCall` plus the following:
| Property | Type | Description |
|----------|------|-------------|
| `timing` | `StreamTiming` | The timing of the request. |
|`chunks` | `string[]` | The chunks of the response (API coming soon). |
### HttpRequest Class
The `HttpRequest` class has the following properties:
| Property | Type | Description |
|----------|------|-------------|
| `url` | `str` | The URL of the request. |
| `method` | `str` | The HTTP method of the request. |
| `headers` | `object` | The request headers. |
| `body` | `HTTPBody` | The request body. |
### HttpResponse Class
The `HttpResponse` class has the following properties:
| Property | Type | Description |
|----------|------|-------------|
| `status` | `int` | The HTTP status code. |
| `headers` | `object` | The response headers. |
| `body` | `HTTPBody` | The response body. |
### HTTPBody Class
The `HTTPBody` class has the following properties:
| Property | Type | Description |
|----------|------|-------------|
| `text()` | `string` | The body as a string. |
| `json()` | `object` | The body as a JSON object. |
## Related Topics
- [Using with_options](/ref/baml_client/with-options) - Learn how to configure logging globally
- [TypeBuilder](/ref/baml_client/type-builder) - Build custom types for your BAML functions
- [Client Registry](/ref/baml_client/client-registry) - Manage LLM clients and their configurations
## Best Practices
1. Use a single collector instance when tracking related function calls in a chain.
3. Consider using multiple collectors to track different parts of your application.
4. Use function IDs when tracking specific calls in parallel operations.
5. For streaming calls, be aware that `http_response` will be null, but you can still access usage information.