Skip to main content

Per-Tool Governance

When a LangGraph tools node invokes multiple individual tools, each tool can be governed independently using ToolContext. This provides granular policy control — for example, allowing web_search but blocking code_executor within the same tools node.

How It Works

Instead of a single gate check for the entire tools node, you check each tool individually:

from axonflow.workflow import ToolContext, StepGateRequest, StepType

# Check gate with tool context
gate = await client.step_gate(
workflow_id=workflow.workflow_id,
step_id="step-tools-web_search",
request=StepGateRequest(
step_name="tools/web_search",
step_type=StepType.TOOL_CALL,
tool_context=ToolContext(
tool_name="web_search",
tool_type="function",
tool_input={"query": "latest AI research"},
),
),
)

The policy adapter propagates tool_name, tool_type, and tool_input.* keys into the policy evaluation context, enabling tool-aware rules.

All four SDKs provide a LangGraph adapter with convenience methods for per-tool governance.

Python

from axonflow import AxonFlow
from axonflow.adapters import AxonFlowLangGraphAdapter
from axonflow.workflow import WorkflowSource

async with AxonFlow(endpoint="http://localhost:8080") as client:
adapter = AxonFlowLangGraphAdapter(
client=client,
workflow_name="research-agent",
source=WorkflowSource.LANGGRAPH,
)

async with adapter:
await adapter.start_workflow(trace_id="langsmith-run-abc123")

# Standard LLM node gate
if await adapter.check_gate("plan_research", "llm_call", model="gpt-4"):
result = await plan_research(state)
await adapter.step_completed("plan_research", output=result)

# Per-tool governance within a tools node
if await adapter.check_tool_gate("web_search", "function",
tool_input={"query": "latest news"}):
search_result = await web_search(query="latest news")
await adapter.tool_completed("web_search", output=search_result)

if await adapter.check_tool_gate("sql_query", "mcp",
tool_input={"query": "SELECT * FROM users LIMIT 10"}):
db_result = await sql_query("SELECT * FROM users LIMIT 10")
await adapter.tool_completed("sql_query", output=db_result)

TypeScript

import { AxonFlow, AxonFlowLangGraphAdapter } from "@axonflow/sdk";

const client = new AxonFlow({
endpoint: "http://localhost:8080",
clientId: "my-app",
clientSecret: "your-secret",
});

const adapter = new AxonFlowLangGraphAdapter(client, "research-agent", {
source: "langgraph",
});

await adapter.startWorkflow(undefined, "langsmith-run-abc123");

// Standard LLM node gate
if (await adapter.checkGate("plan_research", "llm_call", { model: "gpt-4" })) {
const result = await planResearch(state);
await adapter.stepCompleted("plan_research", { output: result });
}

// Per-tool governance within a tools node
if (await adapter.checkToolGate("web_search", "function", {
toolInput: { query: "latest news" } })) {
const searchResult = await webSearch({ query: "latest news" });
await adapter.toolCompleted("web_search", { output: searchResult });
}

if (await adapter.checkToolGate("sql_query", "mcp", {
toolInput: { query: "SELECT * FROM users LIMIT 10" } })) {
const dbResult = await sqlQuery("SELECT * FROM users LIMIT 10");
await adapter.toolCompleted("sql_query", { output: dbResult });
}

await adapter.completeWorkflow();

Go

import axonflow "github.com/getaxonflow/axonflow-sdk-go/v5"

client := axonflow.NewClient(axonflow.AxonFlowConfig{
Endpoint: "http://localhost:8080",
ClientID: "my-app",
ClientSecret: "your-secret",
})

adapter := axonflow.NewLangGraphAdapter(client, "research-agent")

ctx := context.Background()
workflowID, _ := adapter.StartWorkflow(ctx, nil, "langsmith-run-abc123")

// Standard LLM node gate
allowed, _ := adapter.CheckGate(ctx, "plan_research", axonflow.StepTypeLLMCall,
&axonflow.CheckGateOptions{Model: "gpt-4"})
if allowed {
result := planResearch(state)
adapter.StepCompleted(ctx, "plan_research",
&axonflow.StepCompletedOptions{Output: result})
}

// Per-tool governance within a tools node
allowed, _ = adapter.CheckToolGate(ctx, "web_search", "function",
&axonflow.CheckToolGateOptions{ToolInput: map[string]interface{}{"query": "latest news"}})
if allowed {
searchResult := webSearch("latest news")
adapter.ToolCompleted(ctx, "web_search",
&axonflow.ToolCompletedOptions{Output: searchResult})
}

allowed, _ = adapter.CheckToolGate(ctx, "sql_query", "mcp",
&axonflow.CheckToolGateOptions{ToolInput: map[string]interface{}{"query": "SELECT * FROM users LIMIT 10"}})
if allowed {
dbResult := sqlQuery("SELECT * FROM users LIMIT 10")
adapter.ToolCompleted(ctx, "sql_query",
&axonflow.ToolCompletedOptions{Output: dbResult})
}

adapter.CompleteWorkflow(ctx)

Java

import com.getaxonflow.sdk.AxonFlow;
import com.getaxonflow.sdk.adapters.*;

AxonFlow client = AxonFlow.create(AxonFlowConfig.builder()
.endpoint("http://localhost:8080")
.clientId("my-app")
.clientSecret("your-secret")
.build());

LangGraphAdapter adapter = LangGraphAdapter.builder(client, "research-agent").build();

adapter.startWorkflow(null, "langsmith-run-abc123");

// Standard LLM node gate
if (adapter.checkGate("plan_research", "llm_call",
CheckGateOptions.builder().model("gpt-4").build())) {
Object result = planResearch(state);
adapter.stepCompleted("plan_research",
StepCompletedOptions.builder().output(Map.of("result", result)).build());
}

// Per-tool governance within a tools node
if (adapter.checkToolGate("web_search", "function",
CheckToolGateOptions.builder().toolInput(Map.of("query", "latest news")).build())) {
Object searchResult = webSearch("latest news");
adapter.toolCompleted("web_search",
ToolCompletedOptions.builder().output(Map.of("results", searchResult)).build());
}

if (adapter.checkToolGate("sql_query", "mcp",
CheckToolGateOptions.builder().toolInput(Map.of("query", "SELECT * FROM users LIMIT 10")).build())) {
Object dbResult = sqlQuery("SELECT * FROM users LIMIT 10");
adapter.toolCompleted("sql_query",
ToolCompletedOptions.builder().output(Map.of("rows", dbResult)).build());
}

adapter.completeWorkflow();

Raw HTTP

curl -X POST http://localhost:8080/api/v1/workflows/$WF_ID/steps/step-tools-web_search/gate \
-H "Content-Type: application/json" \
-H "Authorization: Basic $(echo -n 'client-id:client-secret' | base64)" \
-d '{
"step_name": "tools/web_search",
"step_type": "tool_call",
"tool_context": {
"tool_name": "web_search",
"tool_type": "function",
"tool_input": {"query": "latest news"}
}
}'

Phase 1 Scope

Per-tool governance is currently in Phase 1 (context enrichment). ToolContext is optional and fully backward compatible. Future Phase 2 will add dedicated tool_call_policy types with tool name/type matching, per-tool rate limits, and tool allowlists/blocklists.

MCP Tool Interceptor (MultiServerMCPClient)

When using LangGraph's MultiServerMCPClient from langchain-mcp-adapters, you can wrap every MCP tool call with AxonFlow policy enforcement using the mcp_tool_interceptor() factory method. This enforces the full mcp_check_input → handler → mcp_check_output pattern automatically.

Basic Usage

from langchain_mcp_adapters.client import MultiServerMCPClient
from axonflow import AxonFlow
from axonflow.adapters import AxonFlowLangGraphAdapter

async with AxonFlow(endpoint="http://localhost:8080") as client:
adapter = AxonFlowLangGraphAdapter(client, "my-workflow")

mcp_client = MultiServerMCPClient(
{"lookup": {"url": "http://localhost:8000/mcp", "transport": "http"}},
tool_interceptors=[adapter.mcp_tool_interceptor()],
)
tools = await mcp_client.get_tools()
# All tool calls through mcp_client are now policy-enforced

The interceptor:

  1. Derives connector_type from the incoming request (defaults to "{server_name}.{tool_name}")
  2. Calls mcp_check_input(...) — raises PolicyViolationError if the input is blocked
  3. Calls handler(request) to execute the tool
  4. Calls mcp_check_output(...) — raises PolicyViolationError if the result is hard-blocked; returns redacted_data in place of the original result if redaction was applied

MCPInterceptorOptions

Use MCPInterceptorOptions to customize the interceptor's behaviour:

from axonflow.adapters import AxonFlowLangGraphAdapter, MCPInterceptorOptions

opts = MCPInterceptorOptions(
# Custom connector type derivation — defaults to "{server_name}.{tool_name}"
connector_type_fn=lambda req: req.server_name,
# Operation type passed to mcp_check_input — defaults to "execute"
# Use "query" for known read-only tool calls
operation="query",
)

mcp_client = MultiServerMCPClient(
{"lookup": {"url": "http://localhost:8000/mcp", "transport": "http"}},
tool_interceptors=[adapter.mcp_tool_interceptor(opts)],
)
OptionTypeDefaultDescription
connector_type_fnCallable[[Any], str] | NoneNoneMaps an MCP request to a connector type string. Receives the MCPToolCallRequest object. Defaults to "{request.server_name}.{request.name}".
operationstr"execute"Operation type passed to mcp_check_input. Use "query" for read-only tools.

Redacted Output Passthrough

When the output policy applies redaction (rather than a hard block), the interceptor automatically substitutes the redacted_data returned by mcp_check_output for the original tool result. The calling LangGraph node receives the sanitised version transparently.

Automatic Per-Tool Governance with wrap_langgraph

If you are using the Python SDK's wrap_langgraph() wrapper, you can enable per-tool governance for all tools nodes in the graph without writing any manual gate code. Pass govern_tools=True:

from axonflow import AxonFlow
from axonflow.adapters import wrap_langgraph

async with AxonFlow(endpoint="http://localhost:8080") as client:
governed = wrap_langgraph(
graph, client,
workflow_name="research-agent",
govern_tools=True,
)

# Every tool call within tools nodes is automatically gate-checked.
# Blocked tools are skipped and the block reason is recorded.
result = await governed.ainvoke({"query": "Search for latest news"})

Under the hood, govern_tools=True calls check_tool_gate() before each individual tool execution and tool_completed() after, using the same ToolContext mechanism described above. This is equivalent to the manual adapter pattern shown in the LangGraph Adapter (Recommended) section, but without any per-tool boilerplate.

For full details on wrap_langgraph(), including NodeConfig options and error handling, see the LangGraph Wrapper page.

Governance Boundaries: Where Each Check Fires

A LangGraph agent with tool-calling has two boundaries where data flows between systems: the node boundary (when graph nodes execute) and the tool boundary (when tools interact with external systems). AxonFlow governs both, but the tool boundary is where content-level governance (PII detection, input validation) happens.

Execution Flow

User Query
|
v
+-----------------------------------------------+
| NODE BOUNDARY: check_gate() |
| "Should this node execute?" |
| Context: node name, step type, model, provider |
+-----------------------------------------------+
|
v
LLM executes, returns tool_call(name, args)
|
v (tool_call goes directly to ToolNode)
|
+-----------------------------------------------+
| TOOL BOUNDARY: mcp_check_input() |
| "Should this tool call be allowed?" |
| Context: tool name, args, connector type |
| Decision: ALLOW or BLOCK before execution |
+-----------------------------------------------+
|
v
Tool executes (database, API, email, etc.)
|
v
+-----------------------------------------------+
| TOOL BOUNDARY: mcp_check_output() |
| "Should this result reach the LLM?" |
| Context: tool name, result content |
| Decision: ALLOW, REDACT, or BLOCK |
+-----------------------------------------------+
|
v (clean result fed back to LLM)
|
LLM reasons over clean data
|
v
+-----------------------------------------------+
| NODE BOUNDARY: step_completed() |
| Audit: node output recorded |
+-----------------------------------------------+
|
v
Response to user

Why Tool-Boundary, Not LLM-Boundary

Two points in this flow look like they could provide equivalent governance:

  • Tool call as LLM output. When the LLM generates a tool_call, governance could inspect it before LangGraph executes it.
  • Tool result as LLM input. When the tool result is fed back to the LLM, governance could inspect it before the LLM reasons over it.

AxonFlow governs at the tool boundary instead. The reasons go beyond convenience — tool governance and LLM governance use different policy evaluation pipelines on the server.

1. Tool governance can redact. LLM governance cannot. mcp_check_output (used by tool_output_wrapper) returns redacted_data — it can strip SSNs from a database result while keeping the clean fields. LLM-level pre_check is binary: approve or block the entire request. When a database returns a customer record with SSN alongside order status, you want redaction, not a hard block on the entire follow-up LLM call.

2. Different policy sets. mcp_check_input evaluates dynamic policies (rate limits, tenant-specific rules, budget checks) via the Orchestrator. LLM-level pre_check evaluates static policies only and explicitly skips dynamic policies for latency. mcp_check_output adds exfiltration detection (row count and byte size limits) and response-phase SQLi scanning — neither available through pre_check.

3. Connector-scoped policies. Tool governance receives connector_type (e.g., "salesforce", "postgres.customers"), enabling connector-specific policy rules. LLM governance receives a flat query string with no connector context. Policies like "Salesforce queries must not return SSN" cannot be expressed without knowing which connector is being called.

4. No double governance. Tool output and LLM input are the same data in this flow. The result that tool_output_wrapper returns IS what LangGraph feeds to the LLM. Governing at both boundaries would create duplicate policy evaluations.

5. Clean framework hook. LangGraph provides ToolNode(awrap_tool_call=...) as a first-class interception point. There is no equivalent hook for "before feeding tool result to LLM."

What Each Layer Governs

LayerHookGovernsDoes not govern
Node governance (wrap_langgraph)on_chain_start / on_chain_endWhether a node executes; audit trailContent of individual tool calls
Tool gate (govern_tools=True)on_tool_start / on_tool_endWhether a specific tool is allowed (allowlist/blocklist)Content of tool args or results
Tool content governance (tool_output_wrapper)ToolNode(awrap_tool_call=...)PII in tool args (input), PII in tool results (output)LLM-to-user content

For most LangGraph deployments, use wrap_langgraph() with tool_output_wrapper:

from axonflow import AxonFlow
from axonflow.adapters import AxonFlowLangGraphAdapter, wrap_langgraph
from langgraph.graph import StateGraph
from langgraph.prebuilt import ToolNode

async with AxonFlow(endpoint="http://localhost:8080") as client:
adapter = AxonFlowLangGraphAdapter(client, "my-agent")
wrapper = adapter.tool_output_wrapper()
tool_node = ToolNode(tools, awrap_tool_call=wrapper)

# Build graph with governed tool node
graph = StateGraph(State)
graph.add_node("llm", llm_node)
graph.add_node("tools", tool_node)
# ... add edges ...

governed = wrap_langgraph(
graph.compile(),
client=client,
workflow_name="my-agent",
govern_tools=True,
)

result = await governed.ainvoke({"query": "Check order status"})

This covers:

  • Node-level: check_gate() before each node, step_completed() after
  • Tool gate: check_tool_gate() before each tool (via govern_tools=True)
  • Tool content: mcp_check_input() (alias: check_tool_input()) before tool execution, mcp_check_output() (alias: check_tool_output()) after (via tool_output_wrapper)

Framework-Agnostic Alternative: GovernedTool

tool_output_wrapper is LangGraph-specific (hooks into ToolNode). For any framework that accepts tool instances (LangChain AgentExecutor, CrewAI, AutoGen, LangGraph, or custom pipelines), use GovernedTool:

Basic Usage

Wrap a single tool with governance:

from axonflow import AxonFlow
from axonflow.adapters import GovernedTool

async with AxonFlow(endpoint="http://localhost:8080") as client:
governed = GovernedTool(search_tool, client)
result = await governed.ainvoke({"query": "latest news"})

Under the hood, every invoke() call runs:

  1. checkToolInput() — evaluate tool arguments against policies (block if violated)
  2. Execute the wrapped tool
  3. checkToolOutput() — evaluate tool result (redact PII, block exfiltration)

Batch Wrapping with governTools

Wrap multiple tools at once with the governTools() helper:

from axonflow import AxonFlow
from axonflow.adapters import govern_tools

async with AxonFlow(endpoint="http://localhost:8080") as client:
governed = govern_tools([search, calculator, send_email], client)

# Use with any framework
agent = AgentExecutor(agent=agent, tools=governed) # LangChain
tool_node = ToolNode(governed) # LangGraph

Custom Connector Type

By default, GovernedTool uses the tool name as the connector_type (e.g., a tool named "search" gets connector_type="search"). Override this with a custom function to match your policy connector rules:

from axonflow.adapters import GovernedTool

governed = GovernedTool(
search_tool, client,
connector_type_fn=lambda tool_name: f"custom.{tool_name}",
)

Custom Operation

Set the operation to control which tenant policies apply. Use "query" for read-only tools and "execute" (default) for tools with side effects:

governed = GovernedTool(search_tool, client, operation="query")

When to Use Which

ApproachBest forIntegration
GovernedTool / governTools()Any framework (LangChain, CrewAI, AutoGen, LangGraph)Wrap tools before passing to framework
tool_output_wrapper()LangGraph-only, when you need ToolNode-level interceptionToolNode(awrap_tool_call=wrapper)
wrap_langgraph() with govern_tools=TrueLangGraph with node + tool governanceCallback-based, wraps compiled graph

CrewAI and AutoGen

CrewAI and AutoGen have their own tool types but accept LangChain tools via adapters:

# CrewAI
from crewai.tools import BaseTool as CrewAIBaseTool
crewai_tool = CrewAIBaseTool.from_langchain(governed_tools[0])

# AutoGen
from autogen_ext.tools.langchain import LangChainToolAdapter
autogen_tool = LangChainToolAdapter(governed_tools[0])

Migrating from mcp_tool_interceptor

mcp_tool_interceptor() is deprecated. Replace with GovernedTool:

Before:

interceptor = adapter.mcp_tool_interceptor()
mcp_client = MultiServerMCPClient(
servers, tool_interceptors=[interceptor]
)

After:

governed = govern_tools(tools, client)
# Use governed tools directly — no MCP interceptor needed