Skip to main content

Java SDK - Getting Started

Current Version: 6.2.0

Add AxonFlow governance to Java services, agent backends, and internal platforms using the official Java SDK. The current SDK supports Proxy Mode, Gateway Mode, MCP connector operations, multi-agent planning, and Java interceptor wrappers for existing provider clients.

Installation

Maven

<dependency>
<groupId>com.getaxonflow</groupId>
<artifactId>axonflow-sdk</artifactId>
<version>6.2.0</version>
</dependency>

Gradle

implementation 'com.getaxonflow:axonflow-sdk:6.2.0'

Requirements

  • Java 11 or higher
  • Maven 3.6+ or Gradle 6.0+

Quick Start

Use Proxy Mode when you want AxonFlow to evaluate policy, route the LLM call, and return a governed response in one SDK call.

import com.getaxonflow.sdk.AxonFlow;
import com.getaxonflow.sdk.AxonFlowConfig;
import com.getaxonflow.sdk.types.ClientRequest;
import com.getaxonflow.sdk.types.ClientResponse;
import com.getaxonflow.sdk.types.RequestType;

public class QuickStart {
public static void main(String[] args) {
AxonFlow client = AxonFlow.create(AxonFlowConfig.builder()
.endpoint("https://your-axonflow.example.com")
.clientId(System.getenv("AXONFLOW_CLIENT_ID"))
.clientSecret(System.getenv("AXONFLOW_CLIENT_SECRET"))
.build());

ClientResponse response = client.proxyLLMCall(
ClientRequest.builder()
.query("What is AI governance?")
.userToken("user-123")
.requestType(RequestType.CHAT)
.model("gpt-4o-mini")
.addContext("temperature", 0.2)
.build()
);

if (response.isBlocked()) {
System.out.println("Blocked: " + response.getBlockReason());
return;
}

if (!response.isSuccess()) {
System.out.println("Request failed: " + response.getError());
return;
}

System.out.println(String.valueOf(response.getData()));
}
}

Gateway Mode

Gateway Mode is the right fit when you want AxonFlow to evaluate policy before the call, but you need to keep the LLM request inside your own process or framework integration.

See Choosing a Mode for the full trade-off discussion.

import com.getaxonflow.sdk.AxonFlow;
import com.getaxonflow.sdk.AxonFlowConfig;
import com.getaxonflow.sdk.types.AuditOptions;
import com.getaxonflow.sdk.types.PolicyApprovalRequest;
import com.getaxonflow.sdk.types.PolicyApprovalResult;
import com.getaxonflow.sdk.types.TokenUsage;

import java.util.List;
import java.util.Map;

AxonFlow axonflow = AxonFlow.create(AxonFlowConfig.builder()
.endpoint(System.getenv("AXONFLOW_ENDPOINT"))
.clientId(System.getenv("AXONFLOW_CLIENT_ID"))
.clientSecret(System.getenv("AXONFLOW_CLIENT_SECRET"))
.build());

PolicyApprovalResult preCheck = axonflow.getPolicyApprovedContext(
PolicyApprovalRequest.builder()
.userToken("user-jwt")
.query("Analyze customer support conversations for churn signals")
.dataSources(List.of("postgres"))
.context(Map.of("department", "customer-success"))
.build()
);

if (!preCheck.isApproved()) {
throw new RuntimeException("Blocked: " + preCheck.getBlockReason());
}

long startTime = System.currentTimeMillis();
String llmResponse = callYourLlm(String.valueOf(preCheck.getApprovedData()));
long latencyMs = System.currentTimeMillis() - startTime;

axonflow.auditLLMCall(
AuditOptions.builder()
.contextId(preCheck.getContextId())
.responseSummary(llmResponse.substring(0, Math.min(100, llmResponse.length())))
.provider("openai")
.model("gpt-4o-mini")
.tokenUsage(TokenUsage.of(150, 200))
.latencyMs(latencyMs)
.metadata(Map.of("workflow", "churn-analysis"))
.build()
);

See Gateway Mode Deep Dive for provider-specific integration guidance and response semantics.

Framework Integration

Spring Boot

import com.getaxonflow.sdk.AxonFlow;
import com.getaxonflow.sdk.AxonFlowConfig;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;

import java.time.Duration;

@Configuration
public class AxonFlowConfiguration {

@Value("${axonflow.endpoint}")
private String endpoint;

@Value("${axonflow.client-id:}")
private String clientId;

@Value("${axonflow.client-secret:}")
private String clientSecret;

@Bean
public AxonFlow axonFlowClient() {
return AxonFlow.create(AxonFlowConfig.builder()
.endpoint(endpoint)
.clientId(clientId)
.clientSecret(clientSecret)
.timeout(Duration.ofSeconds(60))
.build());
}
}

Service Layer Example

import com.getaxonflow.sdk.AxonFlow;
import com.getaxonflow.sdk.types.AuditOptions;
import com.getaxonflow.sdk.types.PolicyApprovalRequest;
import com.getaxonflow.sdk.types.PolicyApprovalResult;

import java.util.List;

@Service
public class AIAssistantService {

private final AxonFlow axonFlow;

public AIAssistantService(AxonFlow axonFlow) {
this.axonFlow = axonFlow;
}

public String processQuery(String userToken, String query) {
PolicyApprovalResult preCheck = axonFlow.getPolicyApprovedContext(
PolicyApprovalRequest.builder()
.userToken(userToken)
.query(query)
.dataSources(List.of("postgres"))
.build()
);

if (!preCheck.isApproved()) {
throw new RuntimeException(preCheck.getBlockReason());
}

String response = callLLM(query);

axonFlow.auditLLMCall(AuditOptions.builder()
.contextId(preCheck.getContextId())
.responseSummary(response)
.provider("openai")
.model("gpt-4o-mini")
.build());

return response;
}
}

application.yml:

axonflow:
endpoint: ${AXONFLOW_ENDPOINT:http://localhost:8080}
client-id: ${AXONFLOW_CLIENT_ID:}
client-secret: ${AXONFLOW_CLIENT_SECRET:}

Plain Java

import com.getaxonflow.sdk.AxonFlow;
import com.getaxonflow.sdk.AxonFlowConfig;
import com.getaxonflow.sdk.types.ClientRequest;
import com.getaxonflow.sdk.types.ClientResponse;

public class AIService {

private static final AxonFlow client = AxonFlow.create(AxonFlowConfig.builder()
.endpoint(System.getenv("AXONFLOW_ENDPOINT"))
.clientId(System.getenv("AXONFLOW_CLIENT_ID"))
.clientSecret(System.getenv("AXONFLOW_CLIENT_SECRET"))
.build());

public String query(String userToken, String prompt) {
ClientResponse response = client.proxyLLMCall(
ClientRequest.builder()
.query(prompt)
.userToken(userToken)
.build()
);

if (response.isBlocked()) {
throw new RuntimeException("Blocked: " + response.getBlockReason());
}
if (!response.isSuccess()) {
throw new RuntimeException("Request failed: " + response.getError());
}
return String.valueOf(response.getData());
}
}

Configuration

import com.getaxonflow.sdk.AxonFlow;
import com.getaxonflow.sdk.AxonFlowConfig;
import com.getaxonflow.sdk.types.Mode;
import com.getaxonflow.sdk.util.CacheConfig;
import com.getaxonflow.sdk.util.RetryConfig;

import java.time.Duration;

AxonFlowConfig config = AxonFlowConfig.builder()
.endpoint("https://your-axonflow.example.com")
.clientId("your-org-id")
.clientSecret("your-client-secret")
.mode(Mode.PRODUCTION)
.timeout(Duration.ofSeconds(60))
.mapTimeout(Duration.ofSeconds(120)) // MAP plan timeout (SDK v5.5.0+)
.debug(true)
.insecureSkipVerify(false)
.retryConfig(RetryConfig.builder()
.maxAttempts(3)
.initialDelay(Duration.ofMillis(250))
.maxDelay(Duration.ofSeconds(5))
.multiplier(2.0)
.build())
.cacheConfig(CacheConfig.builder()
.enabled(true)
.ttl(Duration.ofMinutes(5))
.maxSize(1000)
.build())
.telemetry(Boolean.TRUE)
.build();

AxonFlow client = AxonFlow.create(config);

MAP plan timeout

MAP (Multi-Agent Planning) plans chain multiple LLM calls end-to-end. A five-step plan at ~15s/step takes about 60-75s by itself, longer than the default timeout of 60s. The SDK uses a separate mapTimeout (default 120s, added in v5.5.0) for every plan-lifecycle call (generatePlan, executePlan, getPlanStatus, updatePlan, cancelPlan, resumePlan, rollbackPlan) so those calls aren't cut off by the single-request timeout.

For plans that push beyond 120s, raise mapTimeout on the builder and raise the matching knobs on the server side or the connection will be killed before the plan finishes:

AxonFlowConfig config = AxonFlowConfig.builder()
.endpoint(System.getenv("AXONFLOW_ENDPOINT"))
.clientId(System.getenv("AXONFLOW_CLIENT_ID"))
.clientSecret(System.getenv("AXONFLOW_CLIENT_SECRET"))
.mapTimeout(Duration.ofSeconds(300)) // 5-minute plan budget
.build();

Or via environment: AXONFLOW_MAP_TIMEOUT_SECONDS=300.

Keep these three values moving together. The smallest link breaks the chain:

LayerKnobDefault
SDKmapTimeout120s
OrchestratorAXONFLOW_MAP_MAX_TIMEOUT_SECONDS300s
Front-door ALBidle_timeout.timeout_seconds (AlbIdleTimeoutSeconds CFN)300s

The SDK's mapTimeout must be ≤ the orchestrator cap, which must be ≤ the ALB idle timeout. The orchestrator cap clamps to 60..1800s.

VPC Private Endpoint

For private network deployments, point the SDK at the internal AxonFlow endpoint exposed inside your environment:

AxonFlowConfig config = AxonFlowConfig.builder()
.endpoint("https://YOUR_VPC_IP:8443")
.clientId(System.getenv("AXONFLOW_CLIENT_ID"))
.clientSecret(System.getenv("AXONFLOW_CLIENT_SECRET"))
.build();

MCP Connector Integration

List Available Connectors

import com.getaxonflow.sdk.types.ConnectorInfo;

List<ConnectorInfo> connectors = client.listConnectors();

for (ConnectorInfo connector : connectors) {
System.out.printf("Connector: %s (%s)%n", connector.getName(), connector.getType());
System.out.printf(" Installed: %s%n", connector.isInstalled());
System.out.printf(" Capabilities: %s%n", connector.getCapabilities());
}

Install a Connector

ConnectorInfo connector = client.installConnector(
"amadeus-travel",
Map.of(
"environment", "production",
"region", "europe"
)
);

System.out.println("Installed connector: " + connector.getName());

Query a Connector

import com.getaxonflow.sdk.types.ConnectorQuery;
import com.getaxonflow.sdk.types.ConnectorResponse;

ConnectorResponse result = client.queryConnector(
ConnectorQuery.builder()
.connectorId("postgres")
.operation("SELECT id, name FROM customers LIMIT 10")
.userToken("user-jwt")
.parameters(Map.of("schema", "public"))
.timeoutMs(10_000)
.build()
);

if (result.isSuccess()) {
System.out.println("Data: " + result.getData());
} else {
System.out.println("Error: " + result.getError());
}

Multi-Agent Planning (MAP)

Generate a Plan

import com.getaxonflow.sdk.types.PlanRequest;
import com.getaxonflow.sdk.types.PlanResponse;
import com.getaxonflow.sdk.types.PlanStep;

PlanResponse plan = client.generatePlan(
PlanRequest.builder()
.objective("Research competitors and propose a rollout plan for an internal AI assistant")
.domain("research")
.userToken("user-123")
.maxSteps(6)
.parallel(true)
.build()
);

System.out.printf("Plan %s has %d steps%n", plan.getPlanId(), plan.getStepCount());
for (PlanStep step : plan.getSteps()) {
System.out.printf(" - %s: %s%n", step.getName(), step.getDescription());
}

Execute a Plan

PlanResponse execution = client.executePlan(plan.getPlanId(), "user-123");

if (execution.isCompleted()) {
System.out.println("Result: " + execution.getResult());
} else {
System.out.println("Execution status: " + execution.getStatus());
}

Poll Plan Status

PlanResponse status = client.getPlanStatus(plan.getPlanId());
System.out.println("Current status: " + status.getStatus());

LLM Interceptors

Use interceptors when you already have provider client code and want to add governance with minimal application changes.

import com.getaxonflow.sdk.AxonFlow;
import com.getaxonflow.sdk.interceptors.ChatCompletionRequest;
import com.getaxonflow.sdk.interceptors.ChatCompletionResponse;
import com.getaxonflow.sdk.interceptors.OpenAIInterceptor;
import com.getaxonflow.sdk.exceptions.PolicyViolationException;

AxonFlow axonflow = AxonFlow.create(AxonFlowConfig.builder()
.endpoint(System.getenv("AXONFLOW_ENDPOINT"))
.clientId(System.getenv("AXONFLOW_CLIENT_ID"))
.clientSecret(System.getenv("AXONFLOW_CLIENT_SECRET"))
.build());

OpenAIInterceptor interceptor = OpenAIInterceptor.builder()
.axonflow(axonflow)
.userToken("user-123")
.asyncAudit(true)
.build();

try {
ChatCompletionResponse response = interceptor.wrap(req ->
yourOpenAIClient.createChatCompletion(req)
).apply(ChatCompletionRequest.builder()
.model("gpt-4o-mini")
.addUserMessage("Summarize these notes")
.build());

System.out.println(response.getSummary());
} catch (PolicyViolationException e) {
System.out.println("Blocked: " + e.getMessage());
}

Supported Java interceptors:

  • OpenAIInterceptor
  • AnthropicInterceptor
  • GeminiInterceptor
  • OllamaInterceptor
  • BedrockInterceptor

See LLM Interceptors for language-specific details.

Error Handling

import com.getaxonflow.sdk.exceptions.AxonFlowException;
import com.getaxonflow.sdk.exceptions.AuthenticationException;
import com.getaxonflow.sdk.exceptions.ConnectionException;
import com.getaxonflow.sdk.exceptions.PolicyViolationException;
import com.getaxonflow.sdk.exceptions.RateLimitException;

try {
ClientResponse response = client.proxyLLMCall(
ClientRequest.builder()
.query("Analyze this request")
.userToken("user-123")
.build()
);
} catch (PolicyViolationException e) {
System.out.println("Blocked by policy: " + e.getMessage());
} catch (AuthenticationException e) {
System.out.println("Authentication failed: " + e.getMessage());
} catch (RateLimitException e) {
System.out.println("Rate limited: " + e.getMessage());
} catch (ConnectionException e) {
System.out.println("Connection failed: " + e.getMessage());
} catch (AxonFlowException e) {
System.out.println("SDK error: " + e.getMessage());
}

Thread Safety

The AxonFlow client is thread-safe and designed to be shared across requests. Create it once at application startup and reuse it across your service.

Logging

The SDK uses SLF4J. Add a logging backend such as Logback:

<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<version>1.4.14</version>
</dependency>

Enable SDK debug logging through config when you need request and retry visibility during development:

AxonFlowConfig config = AxonFlowConfig.builder()
.endpoint("https://your-axonflow.example.com")
.debug(true)
.build();

Production Best Practices

1. Load Credentials from the Environment

AxonFlow client = AxonFlow.create(AxonFlowConfig.builder()
.endpoint(System.getenv("AXONFLOW_ENDPOINT"))
.clientId(System.getenv("AXONFLOW_CLIENT_ID"))
.clientSecret(System.getenv("AXONFLOW_CLIENT_SECRET"))
.build());

2. Reuse a Singleton Client

@Bean
public AxonFlow axonFlowClient() {
return AxonFlow.create(config);
}

3. Tune Cache and Retry for Production Traffic

AxonFlowConfig config = AxonFlowConfig.builder()
.endpoint(endpoint)
.retryConfig(RetryConfig.builder().maxAttempts(3).build())
.cacheConfig(CacheConfig.builder().ttl(Duration.ofMinutes(5)).build())
.build();

4. Keep insecureSkipVerify(false) Outside Development

Only disable TLS verification for local or controlled test environments.

Examples