docs/docs/API-Reference/api-openai-responses.mdx
import CodeSnippet from '@site/src/components/CodeSnippet'; import exampleApiOpenaiResponsesExampleRequest from '!!raw-loader!@site/docs/API-Reference/curl-examples/api-openai-responses/example-request.sh'; import exampleApiOpenaiResponsesExampleStreamingRequest from '!!raw-loader!@site/docs/API-Reference/curl-examples/api-openai-responses/example-streaming-request.sh'; import resultApiOpenaiResponsesResultExampleStreamingRequest from '!!raw-loader!@site/docs/API-Reference/curl-examples/api-openai-responses/result-example-streaming-request.json'; import exampleApiOpenaiResponsesContinueConversationsWithResponseAndSessionIds from '!!raw-loader!@site/docs/API-Reference/curl-examples/api-openai-responses/continue-conversations-with-response-and-session-ids.sh'; import resultApiOpenaiResponsesResultContinueConversationsWithResponseAndSessionIds from '!!raw-loader!@site/docs/API-Reference/curl-examples/api-openai-responses/result-continue-conversations-with-response-and-session-ids.json'; import exampleApiOpenaiResponsesContinueConversationsWithResponseAndSessionIds2 from '!!raw-loader!@site/docs/API-Reference/curl-examples/api-openai-responses/continue-conversations-with-response-and-session-ids-2.sh'; import resultApiOpenaiResponsesResultContinueConversationsWithResponseAndSessionIds2 from '!!raw-loader!@site/docs/API-Reference/curl-examples/api-openai-responses/result-continue-conversations-with-response-and-session-ids-2.json'; import exampleApiOpenaiResponsesContinueConversationsWithResponseAndSessionIds3 from '!!raw-loader!@site/docs/API-Reference/curl-examples/api-openai-responses/continue-conversations-with-response-and-session-ids-3.sh'; import exampleApiOpenaiResponsesRetrieveToolCallResults from '!!raw-loader!@site/docs/API-Reference/curl-examples/api-openai-responses/retrieve-tool-call-results.sh'; import resultApiOpenaiResponsesResultRetrieveToolCallResults from '!!raw-loader!@site/docs/API-Reference/curl-examples/api-openai-responses/result-retrieve-tool-call-results.json'; import exampleApiOpenaiResponsesPassGlobalVariablesToYourFlowsInHeaders from '!!raw-loader!@site/docs/API-Reference/curl-examples/api-openai-responses/pass-global-variables-to-your-flows-in-headers.sh'; import exampleApiOpenaiResponsesTokenUsageTracking from '!!raw-loader!@site/docs/API-Reference/curl-examples/api-openai-responses/token-usage-tracking.sh'; import examplePythonApiOpenaiResponsesAdditionalConfigurationForOpenaiClientLibraries from '!!raw-loader!@site/docs/API-Reference/python-examples/api-openai-responses/additional-configuration-for-openai-client-libraries.py'; import exampleJavascriptApiOpenaiResponsesAdditionalConfigurationForOpenaiClientLibraries from '!!raw-loader!@site/docs/API-Reference/javascript-examples/api-openai-responses/additional-configuration-for-openai-client-libraries.ts'; import examplePythonApiOpenaiResponsesTokenUsageTracking from '!!raw-loader!@site/docs/API-Reference/python-examples/api-openai-responses/token-usage-tracking.py'; import examplePythonApiOpenaiResponsesExampleRequest from '!!raw-loader!@site/docs/API-Reference/python-examples/api-openai-responses/example-request.py'; import exampleJavascriptApiOpenaiResponsesExampleRequest from '!!raw-loader!@site/docs/API-Reference/javascript-examples/api-openai-responses/example-request.js'; import examplePythonApiOpenaiResponsesExampleStreamingRequest from '!!raw-loader!@site/docs/API-Reference/python-examples/api-openai-responses/example-streaming-request.py'; import exampleJavascriptApiOpenaiResponsesExampleStreamingRequest from '!!raw-loader!@site/docs/API-Reference/javascript-examples/api-openai-responses/example-streaming-request.js'; import examplePythonApiOpenaiResponsesContinueConversationsWithResponseAndSessionIds from '!!raw-loader!@site/docs/API-Reference/python-examples/api-openai-responses/continue-conversations-with-response-and-session-ids.py'; import exampleJavascriptApiOpenaiResponsesContinueConversationsWithResponseAndSessionIds from '!!raw-loader!@site/docs/API-Reference/javascript-examples/api-openai-responses/continue-conversations-with-response-and-session-ids.js'; import examplePythonApiOpenaiResponsesContinueConversationsWithResponseAndSessionIds2 from '!!raw-loader!@site/docs/API-Reference/python-examples/api-openai-responses/continue-conversations-with-response-and-session-ids-2.py'; import exampleJavascriptApiOpenaiResponsesContinueConversationsWithResponseAndSessionIds2 from '!!raw-loader!@site/docs/API-Reference/javascript-examples/api-openai-responses/continue-conversations-with-response-and-session-ids-2.js'; import examplePythonApiOpenaiResponsesContinueConversationsWithResponseAndSessionIds3 from '!!raw-loader!@site/docs/API-Reference/python-examples/api-openai-responses/continue-conversations-with-response-and-session-ids-3.py'; import exampleJavascriptApiOpenaiResponsesContinueConversationsWithResponseAndSessionIds3 from '!!raw-loader!@site/docs/API-Reference/javascript-examples/api-openai-responses/continue-conversations-with-response-and-session-ids-3.js'; import examplePythonApiOpenaiResponsesRetrieveToolCallResults from '!!raw-loader!@site/docs/API-Reference/python-examples/api-openai-responses/retrieve-tool-call-results.py'; import exampleJavascriptApiOpenaiResponsesRetrieveToolCallResults from '!!raw-loader!@site/docs/API-Reference/javascript-examples/api-openai-responses/retrieve-tool-call-results.js'; import examplePythonApiOpenaiResponsesPassGlobalVariablesToYourFlowsInHeaders from '!!raw-loader!@site/docs/API-Reference/python-examples/api-openai-responses/pass-global-variables-to-your-flows-in-headers.py'; import exampleJavascriptApiOpenaiResponsesPassGlobalVariablesToYourFlowsInHeaders from '!!raw-loader!@site/docs/API-Reference/javascript-examples/api-openai-responses/pass-global-variables-to-your-flows-in-headers.js'; import exampleJavascriptApiOpenaiResponsesTokenUsageTracking from '!!raw-loader!@site/docs/API-Reference/javascript-examples/api-openai-responses/token-usage-tracking.js';
import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem';
Langflow includes an endpoint that is compatible with the OpenAI Responses API.
It is available at POST /api/v1/responses.
This endpoint allows you to use existing OpenAI client libraries with minimal code changes.
You only need to replace the model name, such as gpt-4, with your flow_id.
You can find Flow IDs in the code snippets on the API access pane or in a flow's URL.
To be compatible with Langflow's OpenAI Responses API endpoint, your flow and request must adhere to the following requirements:
ChatInput and Chat Input are recognized as chat inputs.tools parameter isn't supported, and returns an error if provided.model field must contain a valid flow ID or endpoint name.x-api-key header.
For more information, see API keys and authentication.This endpoint is compatible with OpenAI's API, but requires special configuration when using OpenAI client libraries.
Langflow uses x-api-key headers for authentication, while OpenAI uses Authorization: Bearer headers.
When sending requests to Langflow with OpenAI client libraries, you must configure custom headers and include an api_key configuration.
The api_key parameter can have any value, such as "dummy-api-key" in the client examples, as the actual authentication is handled through the default_headers configuration.
In the following examples, replace the values for LANGFLOW_SERVER_URL, LANGFLOW_API_KEY, and FLOW_ID with values from your deployment.
<Tabs groupId="client">
<TabItem value="Python" label="OpenAI Python Client" default>
| Header | Required | Description | Example |
|---|---|---|---|
x-api-key | Yes | Your Langflow API key for authentication | "sk-..." |
Content-Type | Yes | Specifies the JSON format | "application/json" |
X-LANGFLOW-GLOBAL-VAR-* | No | Global variables for the flow | "X-LANGFLOW-GLOBAL-VAR-API_KEY: sk-..." For more, see Pass global variables to your flows in headers. |
| Field | Type | Required | Default | Description |
|---|---|---|---|---|
model | string | Yes | - | The flow ID or endpoint name to execute. |
input | string | Yes | - | The input text to process. |
stream | boolean | No | false | Whether to stream the response. |
background | boolean | No | false | Whether to process in background. |
tools | list[Any] | No | null | Tools are not supported yet. |
previous_response_id | string | No | null | ID of previous response to continue conversation. For more, see Continue conversations with response and session IDs. |
include | list[string] | No | null | Additional response data to include, such as ['tool_call.results']. For more, see Retrieve tool call results. |
{
"id": "e5e8ef8a-7efd-4090-a110-6aca082bceb7",
"object": "response",
"created_at": 1756837941,
"status": "completed",
"model": "ced2ec91-f325-4bf0-8754-f3198c2b1563",
"output": [
{
"type": "message",
"id": "msg_e5e8ef8a-7efd-4090-a110-6aca082bceb7",
"status": "completed",
"role": "assistant",
"content": [
{
"type": "output_text",
"text": "Hello! I'm here and ready to help. How can I assist you today?",
"annotations": []
}
]
}
],
"parallel_tool_calls": true,
"previous_response_id": null,
"reasoning": {"effort": null, "summary": null},
"store": true,
"temperature": 1.0,
"text": {"format": {"type": "text"}},
"tool_choice": "auto",
"tools": [],
"top_p": 1.0,
"truncation": "disabled",
"usage": null,
"user": null,
"metadata": {}
}
The response contains fields that Langflow sets dynamically and fields that use OpenAI-compatible defaults.
The OpenAI-compatible default values shown above are currently fixed and cannot be modified via the request. They are included to maintain API compatibility and provide a consistent response format.
For your requests, you will only be setting the dynamic fields. The default values are documented here for completeness and to show the full response structure.
Fields set dynamically by Langflow:
| Field | Type | Description |
|---|---|---|
id | string | Unique response identifier. |
created_at | int | Unix timestamp of response creation. |
model | string | The flow ID that was executed. |
output | list[dict] | Array of output items (messages, tool calls, etc.). |
previous_response_id | string | ID of previous response if continuing conversation. |
usage | dict | Token usage statistics if the usage field is available. Contains prompt_tokens, completion_tokens, and total_tokens. |
| Field | Type | Default Value | Description |
|---|---|---|---|
object | string | "response" | Always "response". |
status | string | "completed" | Response status: "completed", "in_progress", or "failed". |
error | dict | null | Error details (if any). |
incomplete_details | dict | null | Incomplete response details (if any). |
instructions | string | null | Response instructions (if any). |
max_output_tokens | int | null | Maximum output tokens (if any). |
parallel_tool_calls | boolean | true | Whether parallel tool calls are enabled. |
reasoning | dict | {"effort": null, "summary": null} | Reasoning information with effort and summary. |
store | boolean | true | Whether response is stored. |
temperature | float | 1.0 | Temperature setting. |
text | dict | {"format": {"type": "text"}} | Text format configuration. |
tool_choice | string | "auto" | Tool choice setting. |
tools | list[dict] | [] | Available tools. |
top_p | float | 1.0 | Top-p setting. |
truncation | string | "disabled" | Truncation setting. |
usage | dict | null | Token usage statistics. Set dynamically when available from flow components, otherwise null. See Token usage tracking. |
user | string | null | User identifier (if any). |
metadata | dict | {} | Additional metadata. |
When you set "stream": true with your request, the API returns a stream where each chunk contains a small piece of the response as it's generated. This provides a real-time experience where users can see the AI's output appear word by word, similar to ChatGPT's typing effect.
| Field | Type | Description |
|---|---|---|
id | string | Unique response identifier. |
object | string | Always "response.chunk". |
created | int | Unix timestamp of chunk creation. |
model | string | The flow ID that was executed. |
delta | dict | The new content chunk. |
status | string | Response status: "completed", "in_progress", or "failed" (optional). |
The stream continues until a final chunk with "status": "completed" indicates the response is finished.
{
"id": "f7fcea36-f128-41c4-9ac1-e683137375d5",
"object": "response.chunk",
"created": 1756838094,
"model": "ced2ec91-f325-4bf0-8754-f3198c2b1563",
"delta": {},
"status": "completed"
}
Conversation continuity allows you to maintain context across multiple API calls, enabling multi-turn conversations with your flows. This is essential for building chat applications where users can have ongoing conversations.
When you make a request, the API returns a response with an id field. You can use this id as the previous_response_id in your next request to continue the conversation from where it left off.
First Message:
<Tabs> <TabItem value="Python" label="Python" default> <CodeSnippet source={examplePythonApiOpenaiResponsesContinueConversationsWithResponseAndSessionIds} language="python" /> </TabItem> <TabItem value="JavaScript" label="JavaScript"> <CodeSnippet source={exampleJavascriptApiOpenaiResponsesContinueConversationsWithResponseAndSessionIds} language="javascript" /> </TabItem> <TabItem value="curl" label="curl"> <CodeSnippet source={exampleApiOpenaiResponsesContinueConversationsWithResponseAndSessionIds} language="bash" /> </TabItem> </Tabs> <details> <summary>Result</summary> <CodeSnippet source={resultApiOpenaiResponsesResultContinueConversationsWithResponseAndSessionIds} language="json" /> </details>Follow-up message:
<Tabs> <TabItem value="Python" label="Python" default> <CodeSnippet source={examplePythonApiOpenaiResponsesContinueConversationsWithResponseAndSessionIds2} language="python" /> </TabItem> <TabItem value="JavaScript" label="JavaScript"> <CodeSnippet source={exampleJavascriptApiOpenaiResponsesContinueConversationsWithResponseAndSessionIds2} language="javascript" /> </TabItem> <TabItem value="curl" label="curl"> <CodeSnippet source={exampleApiOpenaiResponsesContinueConversationsWithResponseAndSessionIds2} language="bash" /> </TabItem> </Tabs> <details> <summary>Result</summary> <CodeSnippet source={resultApiOpenaiResponsesResultContinueConversationsWithResponseAndSessionIds2} language="json" /> </details>Optionally, you can use your own session ID values for the previous_response_id:
This example uses the same flow as the other previous_response_id examples, but the LLM had not yet been introduced to Alice in the specified session:
{
"id": "session-alice-1756839048",
"object": "response",
"created_at": 1756839048,
"status": "completed",
"model": "ced2ec91-f325-4bf0-8754-f3198c2b1563",
"output": [
{
"type": "message",
"id": "msg_session-alice-1756839048",
"status": "completed",
"role": "assistant",
"content": [
{
"type": "output_text",
"text": "I don't have access to your name unless you tell me. If you'd like, you can share your name, and I'll remember it for this conversation!",
"annotations": []
}
]
}
],
"previous_response_id": "session-alice-1756839048"
}
When you send a request to the /api/v1/responses endpoint to run a flow that includes tools or function calls, you can retrieve the raw tool execution details by adding "include": ["tool_call.results"] to the request payload.
Without the include parameter, tool calls return basic function call information, but not the raw tool results.
For example:
{
"id": "fc_1",
"type": "function_call",
"status": "completed",
"name": "evaluate_expression",
"arguments": "{\"expression\": \"15*23\"}"
},
To get the raw results of each tool execution, add include: ["tool_call.results"] to the request payload:
The response now includes the tool call's results. For example:
{
"id": "evaluate_expression_1",
"type": "tool_call",
"tool_name": "evaluate_expression",
"queries": ["15*23"],
"results": {"result": "345"}
}
Variables passed with X-LANGFLOW-GLOBAL-VAR-{VARIABLE_NAME} are always available to your flow, regardless of whether they exist in the database.
If your flow components reference variables that aren't provided in headers or your Langflow database, the flow fails by default.
To avoid this, you can set the FALLBACK_TO_ENV_VARS environment variable is true, which allows the flow to use values from the .env file if they aren't otherwise specified.
In the above example, OPENAI_API_KEY will fall back to the database variable if not provided in the header.
USER_ID and ENVIRONMENT will fall back to environment variables if FALLBACK_TO_ENV_VARS is enabled.
Otherwise, the flow fails.
The OpenAI Responses API endpoint tracks token usage when your flow uses language model components that provide token usage information. The usage field in the response contains statistics about the number of tokens used for the request and response.
Token usage is automatically extracted from the flow execution results when the usage field is available.
The usage field follows OpenAI's format with prompt_tokens, completion_tokens, and total_tokens fields.
If token usage information is not available from the flow components, the usage field is null.
The usage field is always present in the response, either with token counts or as null. The conditional checks shown in the examples below are optional defensive programming to handle cases where usage might not be available.
{
"id": "a1b2c3d4-e5f6-7890-abcd-ef1234567890",
"object": "response",
"created_at": 1756837941,
"status": "completed",
"model": "ced2ec91-f325-4bf0-8754-f3198c2b1563",
"output": [
{
"type": "message",
"id": "msg_a1b2c3d4-e5f6-7890-abcd-ef1234567890",
"status": "completed",
"role": "assistant",
"content": [
{
"type": "output_text",
"text": "Quantum computing is a type of computing that uses quantum mechanical phenomena...",
"annotations": []
}
]
}
],
"usage": {
"prompt_tokens": 12,
"completion_tokens": 145,
"total_tokens": 157
},
"previous_response_id": null
}