Back to Baml

AsyncClient / SyncClient

fern/03-reference/baml_client/client.mdx

0.222.011.9 KB
Original Source

BAML generates both a sync client and an async client. They offer the exact same public API but methods are either synchronous or asynchronous.

BAML Functions

The generated client exposes all the functions that you've defined your BAML files as methods. Suppose we have this file named baml_src/literature.baml:

baml
function TellMeAStory() -> string {
    client "openai/gpt-4o"
    prompt #"
      Tell me a story
    "#
}

function WriteAPoemAbout(input: string) -> string {
    client "openai/gpt-4o"
    prompt #"
      Write a poem about {{ input }}
    "#
}

After running baml-cli generate you can directly call these functions from your code. Here's an example using the async client:

<Tabs> <Tab title="Python" language="python"> ```python from baml_client.async_client import b

async def example(): # Call your BAML functions. story = await b.TellMeAStory() poem = await b.WriteAPoemAbout("Roses")

</Tab>

<Tab title="TypeScript" language="typescript">
```typescript
import { b } from '../baml_client/async_client'

async function example() {
    // Call your BAML functions.
    const story = await b.TellMeAStory()
    const poem = await b.WriteAPoemAbout("Roses")
}
</Tab> <Tab title="Go" language="go"> ```go package main

import ( "context" b "example.com/myproject/baml_client" )

func example() error { ctx := context.Background()

// Call your BAML functions.
story, err := b.TellMeAStory(ctx)
if err != nil {
    return err
}

poem, err := b.WriteAPoemAbout(ctx, "Roses")
if err != nil {
    return err
}

return nil

}

</Tab>

<Tab title="Ruby" language="ruby">
```ruby
# Ruby doesn't have an async client.
require 'baml_client/client'

def example
  # Call your BAML functions.
  story = b.TellMeAStory()
  poem = b.WriteAPoemAbout("Roses")
end
</Tab> <Tab title="Rust" language="rust"> ```rust use myproject::baml_client::async_client::B;

async fn example() -> Result<(), baml_client::Error> { // Call your BAML functions. let story = B.TellMeAStory.call().await?; let poem = B.WriteAPoemAbout.call("Roses").await?; Ok(()) }

</Tab>
</Tabs>

The sync client is exactly the same but it doesn't need an async runtime,
instead it just blocks.

<Tabs>
<Tab title="Python" language="python">
```python
from baml_client.sync_client import b

def example():
    # Call your BAML functions.
    story = b.TellMeAStory()
    poem = b.WriteAPoemAbout("Roses")
</Tab> <Tab title="TypeScript" language="typescript"> ```typescript import { b } from '../baml_client/sync_client'

function example() { // Call your BAML functions. const story = b.TellMeAStory() const poem = b.WriteAPoemAbout("Roses") }

</Tab>

<Tab title="Go" language="go">
```go
package main

import (
    "context"
    b "example.com/myproject/baml_client"
)

func example() error {
    ctx := context.Background()
    
    // Go client functions are always synchronous - they block until completion
    story, err := b.TellMeAStory(ctx)
    if err != nil {
        return err
    }
    
    poem, err := b.WriteAPoemAbout(ctx, "Roses")
    if err != nil {
        return err
    }
    
    return nil
}
</Tab> <Tab title="Ruby" language="ruby"> ```ruby require 'baml_client/client'

b = Baml.Client

def example

Call your BAML functions.

story = b.TellMeAStory() poem = b.WriteAPoemAbout("Roses") end

</Tab>

<Tab title="Rust" language="rust">
```rust
use myproject::baml_client::sync_client::B;

fn example() -> Result<(), baml_client::Error> {
    // Call your BAML functions.
    let story = B.TellMeAStory.call()?;
    let poem = B.WriteAPoemAbout.call("Roses")?;
    Ok(())
}
</Tab> </Tabs>

Call Patterns

The client object exposes some references to other objects that call your functions in a different manner.

.stream

The .stream object is used to stream the response from a function.

<Tabs> <Tab title="Python" language="python"> ```python from baml_client.async_client import b

async def example(): stream = b.stream.TellMeAStory()

async for partial in stream:
    print(partial)

print(await stream.get_final_response())
</Tab>

<Tab title="TypeScript" language="typescript">
```typescript
import { b } from '../baml_client/async_client'

async function example() {
    const stream = b.stream.TellMeAStory()

    for await (const partial of stream) {
        console.log(partial)
    }

    console.log(await stream.getFinalResponse())
}
</Tab> <Tab title="Go" language="go"> ```go package main

import ( "context" "fmt"

b "example.com/myproject/baml_client"

)

func example() error { ctx := context.Background()

stream, err := b.Stream.TellMeAStory(ctx)
if err != nil {
    return err
}

for value := range stream {
    if value.IsError {
        return value.Error
    }
    
    if !value.IsFinal && value.Stream() != nil {
        partial := *value.Stream()
        fmt.Println(partial)
    }
    
    if value.IsFinal && value.Final() != nil {
        final := *value.Final()
        fmt.Println(final)
    }
}

return nil

}

</Tab>

<Tab title="Ruby" language="ruby">
```ruby
require 'baml_client/client'

b = Baml.Client

def example
  stream = b.stream.TellMeAStory

  stream.each do |partial|
    puts partial
  end

  puts stream.get_final_response
end
</Tab> <Tab title="Rust" language="rust"> ```rust use myproject::baml_client::sync_client::B;

fn example() -> Result<(), baml_client::Error> { let mut stream = B.TellMeAStory.stream()?;

for partial in stream.partials() {
    println!("{:?}", partial?);
}

let final_response = stream.get_final_response()?;
println!("{:?}", final_response);
Ok(())

}

</Tab>
</Tabs>

### `.request`

<Info>
  This feature was added in: v0.79.0
</Info>

The `.request` object returns the raw HTTP request but it **does not** send it.
However, the async client still returns an awaitable object because we might
need to resolve media types like images and convert them to base64 or the
required format in order to send them to the LLM.

<Tabs>
<Tab title="Python" language="python">
```python
from baml_client.async_client import b

async def example():
    request = await b.request.TellMeAStory()
    print(request.url)
    print(request.headers)
    print(request.body.json())
</Tab> <Tab title="TypeScript" language="typescript"> ```typescript import { b } from '../baml_client/async_client'

async function example() { const request = await b.request.TellMeAStory() console.log(request.url) console.log(request.headers) console.log(request.body.json()) }

</Tab>

<Tab title="Ruby" language="ruby">
```ruby
require 'baml_client/client'

b = Baml.Client

def example
  request = b.request.TellMeAStory
  puts request.url
  puts request.headers
  puts request.body.json
end
</Tab> <Tab title="Rust" language="rust"> ```rust // Rust modular API coming soon! ``` </Tab> </Tabs>

.stream_request

<Info> This feature was added in: v0.79.0 </Info>

Same as .request but sets the streaming options to true.

<Tabs> <Tab title="Python" language="python"> ```python from baml_client.async_client import b

async def example(): request = await b.stream_request.TellMeAStory() print(request.url) print(request.headers) print(request.body.json())

</Tab>

<Tab title="TypeScript" language="typescript">
```typescript
import { b } from '../baml_client/async_client'

async function example() {
    const request = await b.stream_request.TellMeAStory()
    console.log(request.url)
    console.log(request.headers)
    console.log(request.body.json())
}
</Tab> <Tab title="Ruby" language="ruby"> ```ruby require 'baml_client/client'

b = Baml.Client

def example request = b.stream_request.TellMeAStory puts request.url puts request.headers puts request.body.json end

</Tab>

<Tab title="Rust" language="rust">
```rust
// Rust modular API coming soon!
</Tab> </Tabs>

.parse

<Info> This feature was added in: v0.79.0 </Info>

The .parse object is used to parse the response returned by the LLM after the function call. Can be used in combination with .request.

<Tabs> <Tab title="Python" language="python"> ```python import requests # requests is not async so for simplicity we'll use the sync client. from baml_client.sync_client import b

def example(): # Get the HTTP request. request = b.request.TellMeAStory()

# Send the HTTP request.
response = requests.post(request.url, headers=request.headers, json=request.body.json())

# Parse the LLM response.
parsed = b.parse.TellMeAStory(response.json()["choices"][0]["message"]["content"])

# Fully parsed response.
print(parsed)
</Tab>

<Tab title="TypeScript" language="typescript">
```typescript
import { b } from '../baml_client/async_client'

async function example() {
    // Get the HTTP request.
    const request = await b.request.TellMeAStory()

    // Send the HTTP request.
    const response = await fetch(request.url, {
        method: request.method,
        headers: request.headers,
        body: JSON.stringify(request.body.json())
    })

    // Parse the HTTP body.
    const body = await response.json() as any

    // Parse the LLM response.
    const parsed = await b.parse.TellMeAStory(body.choices[0].message.content)

    // Fully parsed response.
    console.log(parsed)
}
</Tab> <Tab title="Ruby" language="ruby"> ```ruby require 'net/http' require 'uri' require 'json'

require_relative 'baml_client'

b = Baml.Client

def run

Get the HTTP request object.

baml_req = b.request.TellMeAStory()

Construct the Ruby HTTP client.

uri = URI.parse(baml_req.url) http = Net::HTTP.new(uri.host, uri.port) http.use_ssl = uri.scheme == 'https'

Construct the Ruby HTTP request.

req = Net::HTTP::Post.new(uri.path) req.initialize_http_header(baml_req.headers) req.body = baml_req.body.json.to_json

Send the HTTP request.

response = http.request(req)

Parse the LLM response.

parsed = b.parse.TellMeAStory( llm_response: JSON.parse(response.body)["choices"][0]["message"]["content"] )

Fully parsed Resume type.

puts parsed end

</Tab>

<Tab title="Rust" language="rust">
```rust
use myproject::baml_client::sync_client::B;

fn example() -> Result<(), baml_client::Error> {
    // Parse an LLM response string into the typed output.
    let parsed = B.TellMeAStory.parse("Once upon a time...")?;
    println!("{:?}", parsed);
    Ok(())
}
</Tab> </Tabs>

.parse_stream

<Info> This feature was added in: v0.79.0 </Info>

Same as .parse but for streaming responses. Can be used in combination with .stream_request.

<Tabs> <Tab title="Python" language="python"> ```python from openai import AsyncOpenAI from baml_client.async_client import b

async def example(): client = AsyncOpenAI()

request = await b.stream_request.TellMeAStory() stream = await client.chat.completions.create(**request.body.json())

llm_response: list[str] = [] async for chunk in stream: if len(chunk.choices) > 0 and chunk.choices[0].delta.content is not None: llm_response.append(chunk.choices[0].delta.content) print(b.parse_stream.TellMeAStory("".join(llm_response)))

</Tab>

<Tab title="TypeScript" language="typescript">
```typescript
import OpenAI from 'openai'
import { b } from '../baml_client/async_client'

async function example() {
    const client = new OpenAI()

    const request = await b.stream_request.TellMeAStory()
    const stream = await client.chat.completions.create(**request.body.json())

    let llmResponse: string[] = []
    for await (const chunk of stream) {
        if (chunk.choices.length > 0 && chunk.choices[0].delta.content) {
            llmResponse.push(chunk.choices[0].delta.content)
            console.log(b.parse_stream.TellMeAStory(llmResponse.join('')))
        }
    }
}
</Tab> </Tabs>