Skip to main content

SDK Integration

All four AxonFlow SDKs support the Workflow Control Plane. The Python SDK also includes a dedicated LangGraph adapter with automatic workflow lifecycle management.

Python

from axonflow import AxonFlow
from axonflow.workflow import (
CreateWorkflowRequest,
StepGateRequest,
MarkStepCompletedRequest,
WorkflowSource,
StepType,
)

async with AxonFlow(
endpoint="http://localhost:8080",
client_id="my-workflow-app",
client_secret="your-secret",
) as client:
# Create workflow (total_steps is optional — auto-finalized at terminal state)
workflow = await client.create_workflow(
CreateWorkflowRequest(
workflow_name="code-review-pipeline",
source=WorkflowSource.EXTERNAL,
)
)

# Check gate before each step
gate = await client.step_gate(
workflow_id=workflow.workflow_id,
step_id="step-1",
request=StepGateRequest(
step_name="Generate Code",
step_type=StepType.LLM_CALL,
model="gpt-4",
provider="openai",
),
)

if gate.is_allowed():
# Execute your step, then mark completed
result = execute_step()
await client.mark_step_completed(
workflow_id=workflow.workflow_id,
step_id="step-1",
request=MarkStepCompletedRequest(output={"code": result}),
)
elif gate.is_blocked():
print(f"Blocked: {gate.reason}")
await client.abort_workflow(workflow.workflow_id, gate.reason)

# Complete workflow
await client.complete_workflow(workflow.workflow_id)

LangGraph Adapter

For LangGraph workflows, use the specialized adapter with automatic cleanup:

from axonflow import AxonFlow
from axonflow.adapters import AxonFlowLangGraphAdapter, WorkflowBlockedError
from axonflow.workflow import WorkflowSource

async with AxonFlow(
endpoint="http://localhost:8080",
client_id="my-langgraph-app",
client_secret="your-secret",
) as client:
# Create adapter with auto_block=True (raises exception on block)
adapter = AxonFlowLangGraphAdapter(
client=client,
workflow_name="my-langgraph-workflow",
source=WorkflowSource.LANGGRAPH,
auto_block=True, # Raises WorkflowBlockedError on block
)

try:
# Use context manager for automatic cleanup
async with adapter:
await adapter.start_workflow()

# Before each LangGraph node
if await adapter.check_gate(
step_name="generate_code",
step_type="llm_call",
model="gpt-4",
provider="openai",
):
result = await generate_code(state)
await adapter.step_completed(step_name="generate_code", output=result)

# Workflow completes automatically when context manager exits

except WorkflowBlockedError as e:
print(f"Blocked: {e.reason}")
# Workflow is automatically aborted by context manager

Go

import axonflow "github.com/getaxonflow/axonflow-sdk-go/v4"

client := axonflow.NewClient(axonflow.AxonFlowConfig{
Endpoint: "http://localhost:8080",
ClientID: "my-workflow-app",
ClientSecret: "your-secret",
})

// Create workflow (total_steps auto-computed at terminal state)
workflow, err := client.CreateWorkflow(axonflow.CreateWorkflowRequest{
WorkflowName: "code-review-pipeline",
Source: axonflow.WorkflowSourceExternal,
})
if err != nil {
log.Fatal(err)
}

// Check gate
gate, err := client.StepGate(workflow.WorkflowID, "step-1", axonflow.StepGateRequest{
StepName: "Generate Code",
StepType: axonflow.StepTypeLLMCall,
Model: "gpt-4",
Provider: "openai",
})
if err != nil {
log.Fatal(err)
}

if gate.IsAllowed() {
// Execute step, then mark completed
result := executeStep()
client.MarkStepCompleted(workflow.WorkflowID, "step-1", &axonflow.MarkStepCompletedRequest{
Output: map[string]interface{}{"code": result},
})
} else if gate.IsBlocked() {
client.AbortWorkflow(workflow.WorkflowID, gate.Reason)
}

client.CompleteWorkflow(workflow.WorkflowID)

TypeScript

import { AxonFlow } from "@axonflow/sdk";

const axonflow = new AxonFlow({
endpoint: "http://localhost:8080",
clientId: "my-workflow-app",
clientSecret: "your-secret",
});

// Create workflow (total_steps auto-computed at terminal state)
const workflow = await axonflow.createWorkflow({
workflow_name: "code-review-pipeline",
source: "external",
});

// Check gate
const gate = await axonflow.stepGate(workflow.workflow_id, "step-1", {
step_name: "Generate Code",
step_type: "llm_call",
model: "gpt-4",
provider: "openai",
});

if (gate.decision === "allow") {
// Execute step, then mark completed
const result = await executeStep();
await axonflow.markStepCompleted(workflow.workflow_id, "step-1", {
output: { code: result },
});
} else if (gate.decision === "block") {
await axonflow.abortWorkflow(workflow.workflow_id, gate.reason);
}

await axonflow.completeWorkflow(workflow.workflow_id);

Java

AxonFlow client = AxonFlow.create(AxonFlowConfig.builder()
.endpoint("http://localhost:8080")
.clientId("my-workflow-app")
.clientSecret("your-secret")
.build());

// Create workflow (total_steps auto-computed at terminal state)
CreateWorkflowResponse workflow = client.createWorkflow(
CreateWorkflowRequest.builder()
.workflowName("code-review-pipeline")
.source(WorkflowSource.EXTERNAL)
.build()
);

// Check gate
StepGateResponse gate = client.stepGate(
workflow.getWorkflowId(),
"step-1",
StepGateRequest.builder()
.stepName("Generate Code")
.stepType(StepType.LLM_CALL)
.model("gpt-4")
.provider("openai")
.build()
);

if (gate.isAllowed()) {
// Execute step, then mark completed
Object result = executeStep();
client.markStepCompleted(
workflow.getWorkflowId(),
"step-1",
MarkStepCompletedRequest.builder()
.output(Map.of("code", result))
.build()
);
} else if (gate.isBlocked()) {
client.abortWorkflow(workflow.getWorkflowId(), gate.getReason());
}

client.completeWorkflow(workflow.getWorkflowId());

Best Practices

1. Use Descriptive Step Names

# Good
await adapter.check_gate("generate_code", "llm_call")
await adapter.check_gate("review_code", "tool_call")
await adapter.check_gate("deploy_to_staging", "connector_call")

# Bad
await adapter.check_gate("step1", "llm_call")
await adapter.check_gate("step2", "tool_call")

2. Always Handle Block Decisions

gate = await client.step_gate(...)

if gate.is_blocked():
# Log the reason
logger.warning(f"Step blocked: {gate.reason}")
# Abort the workflow
await client.abort_workflow(workflow_id, gate.reason)
return

3. Use Context Manager for Cleanup

adapter = AxonFlowLangGraphAdapter(
client=client,
workflow_name="my-workflow",
source=WorkflowSource.LANGGRAPH,
)

async with adapter:
await adapter.start_workflow()
# If exception occurs, workflow is automatically aborted
# If successful, workflow is automatically completed

4. Include Relevant Metadata

workflow = await client.create_workflow(
CreateWorkflowRequest(
workflow_name="code-review-pipeline",
metadata={
"environment": "production",
"team": "engineering",
"triggered_by": "github-action"
}
)
)