docs/src/content/en/reference/processors/processor-interface.mdx
The Processor interface defines the contract for all processors in Mastra. Processors can implement one or more methods to handle different stages of the agent execution pipeline.
The five processor methods run at different points in the agent execution lifecycle:
┌─────────────────────────────────────────────────────────────────┐
│ Agent Execution Flow │
├─────────────────────────────────────────────────────────────────┤
│ │
│ User Input │
│ │ │
│ ▼ │
│ ┌─────────────────┐ │
│ │ processInput │ ← Runs ONCE at start │
│ └────────┬────────┘ │
│ │ │
│ ▼ │
│ ┌─────────────────────────────────────────────────────────┐ │
│ │ Agentic Loop │ │
│ │ ┌─────────────────────┐ │ │
│ │ │ processInputStep │ ← Runs at EACH step │ │
│ │ └──────────┬──────────┘ │ │
│ │ │ │ │
│ │ ▼ │ │
│ │ LLM Execution │ │
│ │ │ │ │
│ │ ▼ │ │
│ │ ┌──────────────────────┐ │ │
│ │ │ processOutputStream │ ← Runs on EACH stream chunk │ │
│ │ └──────────┬───────────┘ │ │
│ │ │ │ │
│ │ ▼ │ │
│ │ ┌──────────────────────┐ │ │
│ │ │ processOutputStep │ ← Runs after EACH LLM step │ │
│ │ └──────────┬───────────┘ │ │
│ │ │ │ │
│ │ ▼ │ │
│ │ Tool Execution (if needed) │ │
│ │ │ │ │
│ │ └──────── Loop back if tools called ────────│ │
│ └─────────────────────────────────────────────────────────┘ │
│ │ │
│ ▼ │
│ ┌─────────────────────┐ │
│ │ processOutputResult │ ← Runs ONCE after completion │
│ └─────────────────────┘ │
│ │ │
│ ▼ │
│ Final Response │
│ │
└─────────────────────────────────────────────────────────────────┘
| Method | When it runs | Use case |
|---|---|---|
processInput | Once at the start, before the agentic loop | Validate/transform initial user input, add context |
processInputStep | At each step of the agentic loop, before each LLM call | Transform messages between steps, handle tool results |
processOutputStream | On each streaming chunk during LLM response | Filter/modify streaming content, detect patterns in real-time |
processOutputStep | After each LLM response, before tool execution | Validate output quality, implement guardrails with retry |
processOutputResult | Once after generation completes | Post-process final response, log results |
interface Processor<TId extends string = string> {
readonly id: TId
readonly name?: string
processInput?(args: ProcessInputArgs): Promise<ProcessInputResult> | ProcessInputResult
processInputStep?(args: ProcessInputStepArgs): ProcessorMessageResult
processOutputStream?(args: ProcessOutputStreamArgs): Promise<ChunkType | null | undefined>
processOutputStep?(args: ProcessOutputStepArgs): ProcessorMessageResult
processOutputResult?(args: ProcessOutputResultArgs): ProcessorMessageResult
}
<PropertiesTable
content={[
{
name: 'id',
type: 'string',
description: 'Unique identifier for the processor. Used for tracing and debugging.',
isOptional: false,
},
{
name: 'name',
type: 'string',
description: 'Optional display name for the processor. Falls back to id if not provided.',
isOptional: true,
},
{
name: 'processDataParts',
type: 'boolean',
description:
'When true, the processOutputStream method also receives data-* chunks emitted by tools via writer.custom(). Defaults to false.',
isOptional: true,
},
]}
/>
processInputProcesses input messages before they're sent to the LLM. Runs once at the start of agent execution.
processInput?(args: ProcessInputArgs): Promise<ProcessInputResult> | ProcessInputResult;
ProcessInputArgs<PropertiesTable
content={[
{
name: 'messages',
type: 'MastraDBMessage[]',
description: 'User and assistant messages to process (excludes system messages).',
isOptional: false,
},
{
name: 'systemMessages',
type: 'CoreMessage[]',
description:
'All system messages (agent instructions, memory context, user-provided). Can be modified and returned.',
isOptional: false,
},
{
name: 'messageList',
type: 'MessageList',
description: 'Full MessageList instance for advanced message management.',
isOptional: false,
},
{
name: 'abort',
type: '(reason?: string, options?: { retry?: boolean; metadata?: unknown }) => never',
description:
'Function to abort processing. Throws a TripWire error that stops execution. Pass retry: true to request the LLM retry the step with feedback.',
isOptional: false,
},
{
name: 'retryCount',
type: 'number',
description:
'Number of times processors have triggered retry for this generation. Use this to limit retry attempts.',
isOptional: true,
},
{
name: 'tracingContext',
type: 'TracingContext',
description: 'Tracing context for observability.',
isOptional: true,
},
{
name: 'requestContext',
type: 'RequestContext',
description: 'Request-scoped context with execution metadata like threadId and resourceId.',
isOptional: true,
},
]}
/>
ProcessInputResultThe method can return one of three types:
<PropertiesTable content={[ { name: 'MastraDBMessage[]', type: 'array', description: 'Transformed messages array. System messages remain unchanged.', }, { name: 'MessageList', type: 'MessageList', description: "The same messageList instance passed in. Indicates you've mutated it directly.", }, { name: '{ messages, systemMessages }', type: 'object', description: 'Object with both transformed messages and modified system messages.', }, ]} />
processInputStepProcesses input messages at each step of the agentic loop, before they're sent to the LLM. Unlike processInput which runs once at the start, this runs at every step including tool call continuations.
processInputStep?(args: ProcessInputStepArgs): ProcessorMessageResult;
processInput (once at start)processInputStep from inputProcessors (at each step, before LLM call)prepareStep callback (runs as part of the processInputStep pipeline, after inputProcessors)ProcessInputStepArgs<PropertiesTable content={[ { name: 'messages', type: 'MastraDBMessage[]', description: 'All messages including tool calls and results from previous steps (read-only snapshot).', isOptional: false, }, { name: 'messageList', type: 'MessageList', description: 'MessageList instance for managing messages. Can mutate directly or return in result.', isOptional: false, }, { name: 'stepNumber', type: 'number', description: 'Current step number (0-indexed). Step 0 is the initial LLM call.', isOptional: false, }, { name: 'steps', type: 'StepResult[]', description: 'Results from previous steps, including text, toolCalls, and toolResults.', isOptional: false, }, { name: 'systemMessages', type: 'CoreMessage[]', description: 'All system messages (read-only snapshot). Return in result to replace.', isOptional: false, }, { name: 'model', type: 'MastraLanguageModelV2', description: 'Current model being used. Return a different model in result to switch.', isOptional: false, }, { name: 'toolChoice', type: 'ToolChoice', description: "Current tool choice setting ('auto', 'none', 'required', or specific tool).", isOptional: true, }, { name: 'activeTools', type: 'string[]', description: 'Currently active tool names. Return filtered array to limit tools.', isOptional: true, }, { name: 'tools', type: 'ToolSet', description: 'Current tools available for this step. Return in result to add/replace tools.', isOptional: true, }, { name: 'providerOptions', type: 'SharedV2ProviderOptions', description: 'Provider-specific options (e.g., Anthropic cacheControl, OpenAI reasoningEffort).', isOptional: true, }, { name: 'modelSettings', type: 'CallSettings', description: 'Model settings like temperature, maxTokens, topP.', isOptional: true, }, { name: 'structuredOutput', type: 'StructuredOutputOptions', description: 'Structured output configuration (schema, output mode). Return in result to modify.', isOptional: true, }, { name: 'abort', type: '(reason?: string) => never', description: 'Function to abort processing.', isOptional: false, }, { name: 'tracingContext', type: 'TracingContext', description: 'Tracing context for observability.', isOptional: true, }, { name: 'requestContext', type: 'RequestContext', description: 'Request-scoped context with execution metadata.', isOptional: true, }, ]} />
ProcessInputStepResultThe method can return any combination of these properties:
<PropertiesTable content={[ { name: 'model', type: 'LanguageModelV2 | string', description: "Change the model for this step. Can be a model instance or router ID like 'openai/gpt-5.4'.", isOptional: true, }, { name: 'toolChoice', type: 'ToolChoice', description: 'Change tool selection behavior for this step.', isOptional: true, }, { name: 'activeTools', type: 'string[]', description: 'Filter which tools are available for this step.', isOptional: true, }, { name: 'tools', type: 'ToolSet', description: 'Replace or modify tools for this step. Use spread to merge: { tools: { ...tools, newTool } }.', isOptional: true, }, { name: 'messages', type: 'MastraDBMessage[]', description: 'Replace all messages. Cannot be used with messageList.', isOptional: true, }, { name: 'messageList', type: 'MessageList', description: 'Return the same messageList instance (indicates you mutated it). Cannot be used with messages.', isOptional: true, }, { name: 'systemMessages', type: 'CoreMessage[]', description: 'Replace all system messages for this step only.', isOptional: true, }, { name: 'providerOptions', type: 'SharedV2ProviderOptions', description: 'Change provider-specific options for this step.', isOptional: true, }, { name: 'modelSettings', type: 'CallSettings', description: 'Change model settings for this step.', isOptional: true, }, { name: 'structuredOutput', type: 'StructuredOutputOptions', description: 'Change structured output configuration for this step.', isOptional: true, }, ]} />
When multiple processors implement processInputStep, they run in order and changes chain through:
Processor 1: receives { model: 'gpt-4o' } → returns { model: 'gpt-4o-mini' }
Processor 2: receives { model: 'gpt-4o-mini' } → returns { toolChoice: 'none' }
Final: model = 'gpt-4o-mini', toolChoice = 'none'
System messages are reset to their original values at the start of each step. Modifications made in processInputStep only affect the current step, not subsequent steps.
reasoning → thinking for Anthropic)processOutputStreamProcesses streaming output chunks with built-in state management. Allows processors to accumulate chunks and make decisions based on larger context.
processOutputStream?(args: ProcessOutputStreamArgs): Promise<ChunkType | null | undefined>;
ProcessOutputStreamArgs<PropertiesTable content={[ { name: 'part', type: 'ChunkType', description: 'The current stream chunk being processed.', isOptional: false, }, { name: 'streamParts', type: 'ChunkType[]', description: 'All chunks seen so far in the stream.', isOptional: false, }, { name: 'state', type: 'Record<string, unknown>', description: 'Mutable state object that persists across chunks within a single stream.', isOptional: false, }, { name: 'abort', type: '(reason?: string) => never', description: 'Function to abort the stream.', isOptional: false, }, { name: 'messageList', type: 'MessageList', description: 'MessageList instance for accessing conversation history.', isOptional: true, }, { name: 'tracingContext', type: 'TracingContext', description: 'Tracing context for observability.', isOptional: true, }, { name: 'requestContext', type: 'RequestContext', description: 'Request-scoped context with execution metadata.', isOptional: true, }, { name: 'writer', type: 'ProcessorStreamWriter', description: 'Stream writer for emitting custom data chunks back to the client. Call writer.custom() to emit data-* typed chunks. Available during streaming.', isOptional: true, }, ]} />
ChunkType to emit it (possibly modified)null or undefined to skip emitting the chunkprocessOutputResultProcesses the complete output result after streaming or generation is finished.
processOutputResult?(args: ProcessOutputResultArgs): ProcessorMessageResult;
ProcessOutputResultArgs<PropertiesTable
content={[
{
name: 'messages',
type: 'MastraDBMessage[]',
description: 'The generated response messages.',
isOptional: false,
},
{
name: 'messageList',
type: 'MessageList',
description: 'MessageList instance for managing messages.',
isOptional: false,
},
{
name: 'state',
type: 'Record<string, unknown>',
description:
'Per-processor state that persists across all method calls within this request. Shared with processOutputStream and other methods.',
isOptional: false,
},
{
name: 'result',
type: 'OutputResult',
description:
'Resolved generation result containing text (accumulated text), usage (token usage with inputTokens, outputTokens, totalTokens), finishReason (why generation ended), and steps (all LLM step results, each with toolCalls, toolResults, reasoning, sources, files, etc.).',
isOptional: false,
},
{
name: 'abort',
type: '(reason?: string) => never',
description: 'Function to abort processing.',
isOptional: false,
},
{
name: 'tracingContext',
type: 'TracingContext',
description: 'Tracing context for observability.',
isOptional: true,
},
{
name: 'requestContext',
type: 'RequestContext',
description: 'Request-scoped context with execution metadata.',
isOptional: true,
},
{
name: 'writer',
type: 'ProcessorStreamWriter',
description:
'Stream writer for emitting custom data chunks back to the client. Call writer.custom() to emit data-* typed chunks. Available during streaming.',
isOptional: true,
},
]}
/>
processOutputStepProcesses output after each LLM response in the agentic loop, before tool execution. Unlike processOutputResult which runs once at the end, this runs at every step. This is the ideal method for implementing guardrails that can trigger retries.
processOutputStep?(args: ProcessOutputStepArgs): ProcessorMessageResult;
ProcessOutputStepArgs<PropertiesTable
content={[
{
name: 'messages',
type: 'MastraDBMessage[]',
description: 'All messages including the latest LLM response.',
isOptional: false,
},
{
name: 'messageList',
type: 'MessageList',
description: 'MessageList instance for managing messages.',
isOptional: false,
},
{
name: 'stepNumber',
type: 'number',
description: 'Current step number (0-indexed).',
isOptional: false,
},
{
name: 'finishReason',
type: 'string',
description: 'The finish reason from the LLM (stop, tool-use, length, etc.).',
isOptional: true,
},
{
name: 'toolCalls',
type: 'ToolCallInfo[]',
description: 'Tool calls made in this step (if any).',
isOptional: true,
},
{
name: 'text',
type: 'string',
description: 'Generated text from this step.',
isOptional: true,
},
{
name: 'systemMessages',
type: 'CoreMessage[]',
description: 'All system messages for read/modify access.',
isOptional: true,
},
{
name: 'abort',
type: '(reason?: string, options?: { retry?: boolean; metadata?: unknown }) => never',
description: 'Function to abort processing. Pass retry: true to request the LLM retry the step.',
isOptional: false,
},
{
name: 'retryCount',
type: 'number',
description: 'Number of times processors have triggered retry. Use this to limit retry attempts.',
isOptional: true,
},
{
name: 'tracingContext',
type: 'TracingContext',
description: 'Tracing context for observability.',
isOptional: true,
},
{
name: 'requestContext',
type: 'RequestContext',
description: 'Request-scoped context with execution metadata.',
isOptional: true,
},
]}
/>
import type { Processor } from '@mastra/core'
export class QualityGuardrail implements Processor {
id = 'quality-guardrail'
async processOutputStep({ text, abort, retryCount }) {
const score = await evaluateResponseQuality(text)
if (score < 0.7) {
if (retryCount < 3) {
// Request retry with feedback for the LLM
abort('Response quality too low. Please provide more detail.', {
retry: true,
metadata: { qualityScore: score },
})
} else {
// Max retries reached, block the response
abort('Response quality too low after multiple attempts.')
}
}
return []
}
}
Mastra provides type aliases to ensure processors implement the required methods:
// Must implement processInput OR processInputStep (or both)
type InputProcessor = Processor & ({ processInput: required } | { processInputStep: required })
// Must implement processOutputStream, processOutputStep, OR processOutputResult (or any combination)
type OutputProcessor = Processor &
(
| { processOutputStream: required }
| { processOutputStep: required }
| { processOutputResult: required }
)
import type { Processor, MastraDBMessage } from '@mastra/core'
export class LowercaseProcessor implements Processor {
id = 'lowercase'
async processInput({ messages }): Promise<MastraDBMessage[]> {
return messages.map(msg => ({
...msg,
content: {
...msg.content,
parts: msg.content.parts?.map(part =>
part.type === 'text' ? { ...part, text: part.text.toLowerCase() } : part,
),
},
}))
}
}
processInputStepimport type { Processor, ProcessInputStepArgs, ProcessInputStepResult } from '@mastra/core'
export class DynamicModelProcessor implements Processor {
id = 'dynamic-model'
async processInputStep({
stepNumber,
steps,
toolChoice,
}: ProcessInputStepArgs): Promise<ProcessInputStepResult> {
// Use a fast model for initial response
if (stepNumber === 0) {
return { model: 'openai/gpt-5-mini' }
}
// Switch to powerful model after tool calls
if (steps.length > 0 && steps[steps.length - 1].toolCalls?.length) {
return { model: 'openai/gpt-5.4' }
}
// Disable tools after 5 steps to force completion
if (stepNumber > 5) {
return { toolChoice: 'none' }
}
return {}
}
}
processInputStepimport type { Processor, MastraDBMessage } from '@mastra/core'
export class ReasoningTransformer implements Processor {
id = 'reasoning-transformer'
async processInputStep({ messages, messageList }) {
// Transform reasoning parts to thinking parts at each step
// This is useful when switching between model providers
for (const msg of messages) {
if (msg.role === 'assistant' && msg.content.parts) {
for (const part of msg.content.parts) {
if (part.type === 'reasoning') {
;(part as any).type = 'thinking'
}
}
}
}
return messageList
}
}
import type { Processor, MastraDBMessage, ChunkType } from '@mastra/core'
export class ContentFilter implements Processor {
id = 'content-filter'
private blockedWords: string[]
constructor(blockedWords: string[]) {
this.blockedWords = blockedWords
}
async processInput({ messages, abort }): Promise<MastraDBMessage[]> {
for (const msg of messages) {
const text = msg.content.parts
?.filter(p => p.type === 'text')
.map(p => p.text)
.join(' ')
if (this.blockedWords.some(word => text?.includes(word))) {
abort('Blocked content detected in input')
}
}
return messages
}
async processOutputStream({ part, abort }): Promise<ChunkType | null> {
if (part.type === 'text-delta') {
if (this.blockedWords.some(word => part.textDelta.includes(word))) {
abort('Blocked content detected in output')
}
}
return part
}
}
import type { Processor, ChunkType } from '@mastra/core'
export class WordCounter implements Processor {
id = 'word-counter'
async processOutputStream({ part, state }): Promise<ChunkType> {
// Initialize state on first chunk
if (!state.wordCount) {
state.wordCount = 0
}
// Count words in text chunks
if (part.type === 'text-delta') {
const words = part.textDelta.split(/\s+/).filter(Boolean)
state.wordCount += words.length
}
// Log word count on finish
if (part.type === 'finish') {
console.log(`Total words: ${state.wordCount}`)
}
return part
}
}