@axlsdk/axl 0.6.0 → 0.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +253 -83
- package/dist/index.cjs +141 -124
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +69 -65
- package/dist/index.d.ts +69 -65
- package/dist/index.js +141 -124
- package/dist/index.js.map +1 -1
- package/package.json +4 -3
package/README.md
CHANGED
|
@@ -10,6 +10,120 @@ Core SDK for orchestrating agentic systems in TypeScript. Part of the [Axl](http
|
|
|
10
10
|
npm install @axlsdk/axl zod
|
|
11
11
|
```
|
|
12
12
|
|
|
13
|
+
## Project Structure
|
|
14
|
+
|
|
15
|
+
The recommended pattern separates config, tools, agents, workflows, and runtime into their own modules. Dependencies flow one direction: tools → agents → workflows → runtime.
|
|
16
|
+
|
|
17
|
+
```
|
|
18
|
+
src/
|
|
19
|
+
config.ts — defineConfig (providers, state, trace)
|
|
20
|
+
runtime.ts — creates AxlRuntime, registers everything
|
|
21
|
+
|
|
22
|
+
tools/
|
|
23
|
+
db.ts — tool wrapping database queries
|
|
24
|
+
email.ts — tool wrapping email service
|
|
25
|
+
|
|
26
|
+
agents/
|
|
27
|
+
support.ts — support agent (imports its tools)
|
|
28
|
+
billing.ts — billing agent
|
|
29
|
+
|
|
30
|
+
workflows/
|
|
31
|
+
handle-ticket.ts — orchestrates support + billing agents
|
|
32
|
+
|
|
33
|
+
axl.config.ts — re-exports runtime for Axl Studio
|
|
34
|
+
```
|
|
35
|
+
|
|
36
|
+
### Config
|
|
37
|
+
|
|
38
|
+
Use `defineConfig` to create a typed configuration. Keep this separate from your runtime so you can swap configs per environment:
|
|
39
|
+
|
|
40
|
+
```typescript
|
|
41
|
+
// src/config.ts
|
|
42
|
+
import { defineConfig } from '@axlsdk/axl';
|
|
43
|
+
|
|
44
|
+
export const config = defineConfig({
|
|
45
|
+
providers: {
|
|
46
|
+
openai: { apiKey: process.env.OPENAI_API_KEY },
|
|
47
|
+
anthropic: { apiKey: process.env.ANTHROPIC_API_KEY },
|
|
48
|
+
google: { apiKey: process.env.GOOGLE_API_KEY },
|
|
49
|
+
},
|
|
50
|
+
state: { store: 'sqlite', sqlite: { path: './data/axl.db' } },
|
|
51
|
+
trace: { enabled: true, level: 'steps' },
|
|
52
|
+
});
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
Provider API keys are also read automatically from environment variables (`OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, `GOOGLE_API_KEY`/`GEMINI_API_KEY`), so for local development you can skip the `providers` block entirely.
|
|
56
|
+
|
|
57
|
+
State store options: `'memory'` (default), `'sqlite'` (requires `better-sqlite3`), or a `RedisStore` instance for multi-process deployments. See [State Stores](#state-stores).
|
|
58
|
+
|
|
59
|
+
### Tools, Agents, and Workflows
|
|
60
|
+
|
|
61
|
+
Define each in its own module. Tools wrap your services, agents import the tools they need, workflows orchestrate agents:
|
|
62
|
+
|
|
63
|
+
```typescript
|
|
64
|
+
// src/tools/db.ts
|
|
65
|
+
import { tool } from '@axlsdk/axl';
|
|
66
|
+
import { z } from 'zod';
|
|
67
|
+
import { db } from '../services/db.js';
|
|
68
|
+
|
|
69
|
+
export const lookupOrder = tool({
|
|
70
|
+
name: 'lookup_order',
|
|
71
|
+
description: 'Look up an order by ID',
|
|
72
|
+
input: z.object({ orderId: z.string() }),
|
|
73
|
+
handler: async ({ orderId }) => db.orders.findById(orderId),
|
|
74
|
+
});
|
|
75
|
+
```
|
|
76
|
+
|
|
77
|
+
```typescript
|
|
78
|
+
// src/agents/support.ts
|
|
79
|
+
import { agent } from '@axlsdk/axl';
|
|
80
|
+
import { lookupOrder } from '../tools/db.js';
|
|
81
|
+
|
|
82
|
+
export const supportAgent = agent({
|
|
83
|
+
name: 'support',
|
|
84
|
+
model: 'openai-responses:gpt-5.4',
|
|
85
|
+
system: 'You are a customer support agent. Use tools to look up order information.',
|
|
86
|
+
tools: [lookupOrder],
|
|
87
|
+
});
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
```typescript
|
|
91
|
+
// src/workflows/handle-ticket.ts
|
|
92
|
+
import { workflow } from '@axlsdk/axl';
|
|
93
|
+
import { z } from 'zod';
|
|
94
|
+
import { supportAgent } from '../agents/support.js';
|
|
95
|
+
|
|
96
|
+
export const handleTicket = workflow({
|
|
97
|
+
name: 'handle-ticket',
|
|
98
|
+
input: z.object({ message: z.string() }),
|
|
99
|
+
handler: async (ctx) => ctx.ask(supportAgent, ctx.input.message),
|
|
100
|
+
});
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
### Runtime
|
|
104
|
+
|
|
105
|
+
The runtime is the composition root — it imports the config and registers all workflows. Your application and [Axl Studio](https://github.com/axl-sdk/axl/tree/main/packages/axl-studio) both import this module:
|
|
106
|
+
|
|
107
|
+
```typescript
|
|
108
|
+
// src/runtime.ts
|
|
109
|
+
import { AxlRuntime } from '@axlsdk/axl';
|
|
110
|
+
import { config } from './config.js';
|
|
111
|
+
import { handleTicket } from './workflows/handle-ticket.js';
|
|
112
|
+
import { supportAgent } from './agents/support.js';
|
|
113
|
+
import { lookupOrder } from './tools/db.js';
|
|
114
|
+
|
|
115
|
+
export const runtime = new AxlRuntime(config);
|
|
116
|
+
runtime.register(handleTicket);
|
|
117
|
+
runtime.registerAgent(supportAgent);
|
|
118
|
+
runtime.registerTool(lookupOrder);
|
|
119
|
+
```
|
|
120
|
+
|
|
121
|
+
```typescript
|
|
122
|
+
// axl.config.ts — thin entry point for Axl Studio
|
|
123
|
+
import { runtime } from './src/runtime.js';
|
|
124
|
+
export default runtime;
|
|
125
|
+
```
|
|
126
|
+
|
|
13
127
|
## API
|
|
14
128
|
|
|
15
129
|
### `tool(config)`
|
|
@@ -69,9 +183,10 @@ Dynamic model and system prompt selection:
|
|
|
69
183
|
|
|
70
184
|
```typescript
|
|
71
185
|
const dynamicAgent = agent({
|
|
72
|
-
model: (ctx) =>
|
|
73
|
-
|
|
74
|
-
|
|
186
|
+
model: (ctx) =>
|
|
187
|
+
ctx.metadata?.tier === 'premium'
|
|
188
|
+
? 'openai-responses:gpt-5.4'
|
|
189
|
+
: 'openai-responses:gpt-5-nano',
|
|
75
190
|
system: (ctx) => `You are a ${ctx.metadata?.role ?? 'general'} assistant.`,
|
|
76
191
|
});
|
|
77
192
|
```
|
|
@@ -119,12 +234,12 @@ The `effort` parameter provides a unified way to control reasoning depth across
|
|
|
119
234
|
const reasoner = agent({
|
|
120
235
|
model: 'anthropic:claude-opus-4-6',
|
|
121
236
|
system: 'You are a careful analyst.',
|
|
122
|
-
effort: 'high',
|
|
237
|
+
effort: 'high', // 'none' | 'low' | 'medium' | 'high' | 'max'
|
|
123
238
|
});
|
|
124
239
|
|
|
125
|
-
// Explicit thinking budget (in tokens)
|
|
240
|
+
// Explicit thinking budget (in tokens — supported on Gemini 2.x and Anthropic)
|
|
126
241
|
const budgetReasoner = agent({
|
|
127
|
-
model: 'google:gemini-2.5-
|
|
242
|
+
model: 'google:gemini-2.5-pro',
|
|
128
243
|
system: 'Think step by step.',
|
|
129
244
|
thinkingBudget: 5000,
|
|
130
245
|
});
|
|
@@ -133,11 +248,11 @@ const budgetReasoner = agent({
|
|
|
133
248
|
const result = await reasoner.ask('Analyze this data', { effort: 'low' });
|
|
134
249
|
```
|
|
135
250
|
|
|
136
|
-
Each provider maps `effort` to its native API:
|
|
251
|
+
Each provider maps `effort` to its native API: reasoning effort (OpenAI), adaptive thinking (Anthropic), thinking level/budget (Gemini). See [docs/providers.md](../../docs/providers.md) for the full mapping table.
|
|
137
252
|
|
|
138
253
|
### `workflow(config)`
|
|
139
254
|
|
|
140
|
-
Define a named workflow with typed input
|
|
255
|
+
Define a named workflow with typed input:
|
|
141
256
|
|
|
142
257
|
```typescript
|
|
143
258
|
import { workflow } from '@axlsdk/axl';
|
|
@@ -146,10 +261,28 @@ import { z } from 'zod';
|
|
|
146
261
|
const myWorkflow = workflow({
|
|
147
262
|
name: 'my-workflow',
|
|
148
263
|
input: z.object({ query: z.string() }),
|
|
149
|
-
output: z.object({ answer: z.string() }),
|
|
150
264
|
handler: async (ctx) => {
|
|
151
|
-
|
|
152
|
-
|
|
265
|
+
return ctx.ask(researcher, ctx.input.query, {
|
|
266
|
+
schema: z.object({ answer: z.string() }),
|
|
267
|
+
});
|
|
268
|
+
},
|
|
269
|
+
});
|
|
270
|
+
```
|
|
271
|
+
|
|
272
|
+
For single-ask workflows, use `schema` on `ctx.ask()` — it instructs the LLM and retries automatically on invalid output. The optional `output` field validates your handler's return value *after* it runs (no LLM retry), which is useful for multi-step workflows where your orchestration logic (spawn, vote, transform) could assemble the wrong shape:
|
|
273
|
+
|
|
274
|
+
```typescript
|
|
275
|
+
const answerSchema = z.object({ answer: z.number() });
|
|
276
|
+
|
|
277
|
+
const reliable = workflow({
|
|
278
|
+
name: 'reliable',
|
|
279
|
+
input: z.object({ question: z.string() }),
|
|
280
|
+
output: answerSchema, // validates the spawn+vote result, not the LLM
|
|
281
|
+
handler: async (ctx) => {
|
|
282
|
+
const results = await ctx.spawn(3, async (_i) =>
|
|
283
|
+
ctx.ask(mathAgent, ctx.input.question, { schema: answerSchema }),
|
|
284
|
+
);
|
|
285
|
+
return ctx.vote(results, { strategy: 'majority', key: 'answer' });
|
|
153
286
|
},
|
|
154
287
|
});
|
|
155
288
|
```
|
|
@@ -159,9 +292,6 @@ const myWorkflow = workflow({
|
|
|
159
292
|
Register and execute workflows:
|
|
160
293
|
|
|
161
294
|
```typescript
|
|
162
|
-
import { AxlRuntime } from '@axlsdk/axl';
|
|
163
|
-
|
|
164
|
-
const runtime = new AxlRuntime();
|
|
165
295
|
runtime.register(myWorkflow);
|
|
166
296
|
|
|
167
297
|
// Execute
|
|
@@ -177,7 +307,12 @@ for await (const event of stream) {
|
|
|
177
307
|
const session = runtime.session('user-123');
|
|
178
308
|
await session.send('my-workflow', { query: 'Hello' });
|
|
179
309
|
await session.send('my-workflow', { query: 'Follow-up' });
|
|
180
|
-
|
|
310
|
+
|
|
311
|
+
// Stream a session turn
|
|
312
|
+
const sessionStream = await session.stream('my-workflow', { query: 'Hello' });
|
|
313
|
+
for await (const event of sessionStream) {
|
|
314
|
+
if (event.type === 'token') process.stdout.write(event.data);
|
|
315
|
+
}
|
|
181
316
|
```
|
|
182
317
|
|
|
183
318
|
### Context Primitives
|
|
@@ -185,32 +320,33 @@ const history = await session.history();
|
|
|
185
320
|
All available on `ctx` inside workflow handlers. See the [API Reference](../../docs/api-reference.md) for complete option types, valid values, and defaults.
|
|
186
321
|
|
|
187
322
|
```typescript
|
|
188
|
-
// Invoke an agent
|
|
323
|
+
// Invoke an agent (schema retries rebuild the call with the failed output + error in the prompt)
|
|
189
324
|
const answer = await ctx.ask(agent, 'prompt', { schema, retries });
|
|
190
325
|
|
|
191
326
|
// Run 3 agents in parallel — each gets the same question independently
|
|
192
327
|
const results = await ctx.spawn(3, async (i) => ctx.ask(agent, prompts[i]));
|
|
193
328
|
|
|
194
|
-
// Pick the answer that appeared most often
|
|
195
|
-
const winner = ctx.vote(results, { strategy: 'majority', key: 'answer' });
|
|
329
|
+
// Pick the answer that appeared most often — also supports LLM-as-judge via scorer
|
|
330
|
+
const winner = await ctx.vote(results, { strategy: 'majority', key: 'answer' });
|
|
196
331
|
|
|
197
|
-
//
|
|
332
|
+
// Generic retry-until-valid loop (not conversation-aware — you decide how to use the error)
|
|
198
333
|
const valid = await ctx.verify(
|
|
199
|
-
async (lastOutput, error) => ctx.ask(agent, prompt),
|
|
334
|
+
async (lastOutput, error) => ctx.ask(agent, error ? `Fix: ${error}` : prompt),
|
|
200
335
|
schema,
|
|
201
336
|
{ retries: 3, fallback: defaultValue },
|
|
202
337
|
);
|
|
203
338
|
|
|
204
|
-
// Cost control
|
|
205
|
-
const
|
|
206
|
-
|
|
207
|
-
|
|
339
|
+
// Cost control — returns { value, budgetExceeded, totalCost }
|
|
340
|
+
const { value } = await ctx.budget(
|
|
341
|
+
{ cost: '$1.00', onExceed: 'hard_stop' },
|
|
342
|
+
async () => ctx.ask(agent, prompt),
|
|
343
|
+
);
|
|
208
344
|
|
|
209
345
|
// First to complete
|
|
210
|
-
const fastest = await ctx.race(
|
|
211
|
-
() => ctx.ask(agentA, prompt),
|
|
212
|
-
|
|
213
|
-
|
|
346
|
+
const fastest = await ctx.race(
|
|
347
|
+
[() => ctx.ask(agentA, prompt), () => ctx.ask(agentB, prompt)],
|
|
348
|
+
{ schema },
|
|
349
|
+
);
|
|
214
350
|
|
|
215
351
|
// Concurrent independent tasks
|
|
216
352
|
const [a, b] = await ctx.parallel([
|
|
@@ -224,14 +360,16 @@ const mapped = await ctx.map(items, async (item) => ctx.ask(agent, item), {
|
|
|
224
360
|
quorum: 3,
|
|
225
361
|
});
|
|
226
362
|
|
|
227
|
-
// Human-in-the-loop
|
|
363
|
+
// Human-in-the-loop — suspends until resolved via API or Studio
|
|
228
364
|
const decision = await ctx.awaitHuman({
|
|
229
|
-
channel: '
|
|
365
|
+
channel: 'approvals',
|
|
230
366
|
prompt: 'Approve this action?',
|
|
231
367
|
});
|
|
232
368
|
|
|
233
|
-
// Durable checkpoint
|
|
234
|
-
|
|
369
|
+
// Durable checkpoint — on first run, executes and saves the result.
|
|
370
|
+
// On replay after a restart, returns the saved result without re-executing,
|
|
371
|
+
// preventing duplicate side effects (double API calls, double charges, etc.)
|
|
372
|
+
const checkpointed = await ctx.checkpoint(async () => expensiveOperation());
|
|
235
373
|
```
|
|
236
374
|
|
|
237
375
|
### OpenTelemetry Observability
|
|
@@ -240,13 +378,18 @@ Automatic span emission for every `ctx.*` primitive with cost-per-span attributi
|
|
|
240
378
|
|
|
241
379
|
```typescript
|
|
242
380
|
import { defineConfig, AxlRuntime } from '@axlsdk/axl';
|
|
243
|
-
import {
|
|
381
|
+
import {
|
|
382
|
+
BasicTracerProvider,
|
|
383
|
+
SimpleSpanProcessor,
|
|
384
|
+
} from '@opentelemetry/sdk-trace-base';
|
|
244
385
|
import { OTLPTraceExporter } from '@opentelemetry/exporter-trace-otlp-http';
|
|
245
386
|
|
|
246
387
|
const tracerProvider = new BasicTracerProvider();
|
|
247
|
-
tracerProvider.addSpanProcessor(
|
|
248
|
-
new
|
|
249
|
-
)
|
|
388
|
+
tracerProvider.addSpanProcessor(
|
|
389
|
+
new SimpleSpanProcessor(
|
|
390
|
+
new OTLPTraceExporter({ url: 'http://localhost:4318/v1/traces' }),
|
|
391
|
+
),
|
|
392
|
+
);
|
|
250
393
|
|
|
251
394
|
const config = defineConfig({
|
|
252
395
|
telemetry: {
|
|
@@ -270,7 +413,7 @@ import { createSpanManager, NoopSpanManager } from '@axlsdk/axl';
|
|
|
270
413
|
|
|
271
414
|
### Memory Primitives
|
|
272
415
|
|
|
273
|
-
Working memory backed by the
|
|
416
|
+
Working memory backed by the `StateStore` interface:
|
|
274
417
|
|
|
275
418
|
```typescript
|
|
276
419
|
// Store and retrieve structured state
|
|
@@ -283,18 +426,20 @@ await ctx.remember('user-profile', data, { scope: 'global' });
|
|
|
283
426
|
const profile = await ctx.recall('user-profile', { scope: 'global' });
|
|
284
427
|
```
|
|
285
428
|
|
|
286
|
-
Semantic recall requires a vector store and embedder
|
|
429
|
+
Semantic recall requires a vector store and embedder on the config:
|
|
287
430
|
|
|
288
431
|
```typescript
|
|
289
|
-
import { AxlRuntime, InMemoryVectorStore, OpenAIEmbedder } from '@axlsdk/axl';
|
|
432
|
+
import { defineConfig, AxlRuntime, InMemoryVectorStore, OpenAIEmbedder } from '@axlsdk/axl';
|
|
290
433
|
|
|
291
|
-
const
|
|
434
|
+
const config = defineConfig({
|
|
292
435
|
memory: {
|
|
293
436
|
vectorStore: new InMemoryVectorStore(),
|
|
294
437
|
embedder: new OpenAIEmbedder({ model: 'text-embedding-3-small' }),
|
|
295
438
|
},
|
|
296
439
|
});
|
|
297
440
|
|
|
441
|
+
const runtime = new AxlRuntime(config);
|
|
442
|
+
|
|
298
443
|
// In a workflow:
|
|
299
444
|
const relevant = await ctx.recall('knowledge-base', {
|
|
300
445
|
query: 'refund policy',
|
|
@@ -322,83 +467,108 @@ const safe = agent({
|
|
|
322
467
|
return { block: false };
|
|
323
468
|
},
|
|
324
469
|
output: async (response, ctx) => {
|
|
325
|
-
if (isOffTopic(response))
|
|
470
|
+
if (isOffTopic(response))
|
|
471
|
+
return { block: true, reason: 'Off-topic response' };
|
|
326
472
|
return { block: false };
|
|
327
473
|
},
|
|
328
|
-
onBlock: 'retry',
|
|
474
|
+
onBlock: 'retry', // 'retry' | 'throw' | (reason, ctx) => fallbackResponse
|
|
329
475
|
maxRetries: 2,
|
|
330
476
|
},
|
|
331
477
|
});
|
|
332
478
|
```
|
|
333
479
|
|
|
334
|
-
When `onBlock` is `'retry'`, the LLM
|
|
480
|
+
When `onBlock` is `'retry'`, the LLM's blocked output is appended to the conversation (as an assistant message) along with a system message containing the block reason, then the LLM is re-called so it can self-correct. These messages **accumulate** across retries — if the guardrail blocks multiple times, the LLM sees all prior failed attempts and corrections before its next try. All retry messages are ephemeral — they are **not** persisted to session history, so subsequent session turns never see the blocked attempts. Note: `ctx.ask()` schema retries work differently — each retry rebuilds the call from scratch and only includes the most recent failed output and error (previous failures do not accumulate). Input guardrails always throw since the prompt is user-supplied. Throws `GuardrailError` if retries are exhausted or `onBlock` is `'throw'`.
|
|
481
|
+
|
|
482
|
+
### State Stores
|
|
483
|
+
|
|
484
|
+
Three built-in implementations. All persist the same data: workflow execution checkpoints, `awaitHuman` decisions, session history, memory entries, and the execution state needed for suspend/resume.
|
|
485
|
+
|
|
486
|
+
**Memory** (default) — in-process, no persistence. Use for development and stateless workflows.
|
|
487
|
+
|
|
488
|
+
```typescript
|
|
489
|
+
const runtime = new AxlRuntime();
|
|
490
|
+
```
|
|
491
|
+
|
|
492
|
+
**SQLite** — file-based persistence. Use for single-process deployments that need durable state across restarts.
|
|
493
|
+
|
|
494
|
+
```bash
|
|
495
|
+
npm install better-sqlite3
|
|
496
|
+
```
|
|
497
|
+
|
|
498
|
+
```typescript
|
|
499
|
+
const runtime = new AxlRuntime({
|
|
500
|
+
state: { store: 'sqlite', sqlite: { path: './data/axl.db' } },
|
|
501
|
+
});
|
|
502
|
+
```
|
|
503
|
+
|
|
504
|
+
**Redis** — shared state across multiple processes. Use for multi-replica deployments or any setup where more than one process runs `AxlRuntime`.
|
|
505
|
+
|
|
506
|
+
```bash
|
|
507
|
+
npm install redis
|
|
508
|
+
```
|
|
509
|
+
|
|
510
|
+
```typescript
|
|
511
|
+
import { AxlRuntime, RedisStore } from '@axlsdk/axl';
|
|
512
|
+
|
|
513
|
+
const store = await RedisStore.create('redis://localhost:6379');
|
|
514
|
+
const runtime = new AxlRuntime({ state: { store } });
|
|
515
|
+
|
|
516
|
+
// Graceful shutdown — closes the Redis connection
|
|
517
|
+
await runtime.shutdown();
|
|
518
|
+
```
|
|
519
|
+
|
|
520
|
+
`RedisStore.create()` connects before returning, so any connection error surfaces at startup rather than on first use. The runtime's `shutdown()` closes the connection automatically.
|
|
335
521
|
|
|
336
522
|
### Session Options
|
|
337
523
|
|
|
338
524
|
```typescript
|
|
339
525
|
const session = runtime.session('user-123', {
|
|
340
526
|
history: {
|
|
341
|
-
maxMessages: 100,
|
|
342
|
-
summarize: true,
|
|
343
|
-
summaryModel: 'openai-responses:gpt-5-mini',
|
|
527
|
+
maxMessages: 100,
|
|
528
|
+
summarize: true,
|
|
529
|
+
summaryModel: 'openai-responses:gpt-5-mini',
|
|
344
530
|
},
|
|
345
|
-
persist: true,
|
|
531
|
+
persist: true,
|
|
346
532
|
});
|
|
347
533
|
```
|
|
348
534
|
|
|
349
|
-
`
|
|
535
|
+
When `maxMessages` is exceeded:
|
|
536
|
+
|
|
537
|
+
- **`summarize: false`** (default) — oldest messages beyond the limit are dropped. Only the most recent `maxMessages` are kept.
|
|
538
|
+
- **`summarize: true`** — before dropping, the overflow messages are sent to `summaryModel` for summarization. The summary is saved to session metadata and included as context on subsequent turns. Each time the limit is exceeded again, the new overflow is summarized together with the previous summary, so context accumulates incrementally.
|
|
350
539
|
|
|
351
|
-
| Option
|
|
352
|
-
|
|
353
|
-
| `history.maxMessages`
|
|
354
|
-
| `history.summarize`
|
|
355
|
-
| `history.summaryModel` | `string`
|
|
356
|
-
| `persist`
|
|
540
|
+
| Option | Type | Default | Description |
|
|
541
|
+
| ---------------------- | --------- | --------- | ------------------------------------------------------------- |
|
|
542
|
+
| `history.maxMessages` | `number` | unlimited | Max messages to retain in history |
|
|
543
|
+
| `history.summarize` | `boolean` | `false` | Summarize overflow messages instead of dropping them |
|
|
544
|
+
| `history.summaryModel` | `string` | — | Model URI for summarization (required when `summarize: true`) |
|
|
545
|
+
| `persist` | `boolean` | `true` | Persist history to StateStore |
|
|
357
546
|
|
|
358
547
|
### Error Hierarchy
|
|
359
548
|
|
|
360
549
|
```typescript
|
|
361
550
|
import {
|
|
362
|
-
AxlError,
|
|
363
|
-
VerifyError,
|
|
364
|
-
QuorumNotMet,
|
|
365
|
-
NoConsensus,
|
|
366
|
-
TimeoutError,
|
|
367
|
-
MaxTurnsError,
|
|
551
|
+
AxlError, // Base class
|
|
552
|
+
VerifyError, // Schema validation failed after retries
|
|
553
|
+
QuorumNotMet, // Quorum threshold not reached
|
|
554
|
+
NoConsensus, // Vote could not reach consensus
|
|
555
|
+
TimeoutError, // Operation exceeded timeout
|
|
556
|
+
MaxTurnsError, // Agent exceeded max tool-calling turns
|
|
368
557
|
BudgetExceededError, // Budget limit exceeded
|
|
369
|
-
GuardrailError,
|
|
370
|
-
ToolDenied,
|
|
558
|
+
GuardrailError, // Guardrail blocked input or output
|
|
559
|
+
ToolDenied, // Agent tried to call unauthorized tool
|
|
371
560
|
} from '@axlsdk/axl';
|
|
372
561
|
```
|
|
373
562
|
|
|
374
|
-
### State Stores
|
|
375
|
-
|
|
376
|
-
```typescript
|
|
377
|
-
import { MemoryStore, SQLiteStore, RedisStore } from '@axlsdk/axl';
|
|
378
|
-
|
|
379
|
-
// In-memory (default)
|
|
380
|
-
const runtime = new AxlRuntime();
|
|
381
|
-
|
|
382
|
-
// SQLite (requires better-sqlite3)
|
|
383
|
-
const runtime = new AxlRuntime({
|
|
384
|
-
state: { store: 'sqlite', sqlite: { path: './data/axl.db' } },
|
|
385
|
-
});
|
|
386
|
-
|
|
387
|
-
// Redis (requires ioredis)
|
|
388
|
-
const runtime = new AxlRuntime({
|
|
389
|
-
state: { store: 'redis', redis: { url: 'redis://localhost:6379' } },
|
|
390
|
-
});
|
|
391
|
-
```
|
|
392
|
-
|
|
393
563
|
### Provider URIs
|
|
394
564
|
|
|
395
565
|
Four built-in providers using the `provider:model` URI scheme:
|
|
396
566
|
|
|
397
567
|
```
|
|
398
|
-
openai-responses:gpt-5.4 # OpenAI Responses API (
|
|
568
|
+
openai-responses:gpt-5.4 # OpenAI Responses API (preferred over Chat Completions)
|
|
399
569
|
openai:gpt-5.4 # OpenAI Chat Completions
|
|
400
570
|
anthropic:claude-sonnet-4-6 # Anthropic
|
|
401
|
-
google:gemini-
|
|
571
|
+
google:gemini-3.1-pro-preview # Google Gemini
|
|
402
572
|
```
|
|
403
573
|
|
|
404
574
|
See [docs/providers.md](../../docs/providers.md) for the full model list including reasoning models.
|