@axlsdk/axl 0.4.0 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,6 +1,8 @@
1
- # axl
1
+ # @axlsdk/axl
2
2
 
3
- Core SDK for orchestrating agentic systems in TypeScript.
3
+ [![npm version](https://img.shields.io/npm/v/@axlsdk/axl)](https://www.npmjs.com/package/@axlsdk/axl)
4
+
5
+ Core SDK for orchestrating agentic systems in TypeScript. Part of the [Axl](https://github.com/axl-sdk/axl) monorepo.
4
6
 
5
7
  ## Installation
6
8
 
@@ -26,11 +28,23 @@ const calculator = tool({
26
28
  const result = new Function(`return (${expression})`)();
27
29
  return { result };
28
30
  },
31
+ // handler also accepts (input, ctx) for nested agent invocations — see below
29
32
  retry: { attempts: 3, backoff: 'exponential' },
30
33
  sensitive: false,
31
34
  });
32
35
  ```
33
36
 
37
+ Tool handlers receive a second parameter `ctx: WorkflowContext` (a child context), enabling the "agent-as-tool" composition pattern:
38
+
39
+ ```typescript
40
+ const researchTool = tool({
41
+ name: 'research',
42
+ description: 'Delegate to a specialist',
43
+ input: z.object({ question: z.string() }),
44
+ handler: async (input, ctx) => ctx.ask(researcher, input.question),
45
+ });
46
+ ```
47
+
34
48
  ### `agent(config)`
35
49
 
36
50
  Define an agent with model, system prompt, tools, and handoffs:
@@ -40,9 +54,10 @@ import { agent } from '@axlsdk/axl';
40
54
 
41
55
  const researcher = agent({
42
56
  name: 'researcher',
43
- model: 'openai:gpt-4o',
57
+ model: 'openai-responses:gpt-5.4',
44
58
  system: 'You are a research assistant.',
45
59
  tools: [calculator],
60
+ effort: 'high',
46
61
  maxTurns: 10,
47
62
  timeout: '30s',
48
63
  temperature: 0.7,
@@ -55,12 +70,71 @@ Dynamic model and system prompt selection:
55
70
  ```typescript
56
71
  const dynamicAgent = agent({
57
72
  model: (ctx) => ctx.metadata?.tier === 'premium'
58
- ? 'openai:gpt-4o'
59
- : 'openai:gpt-4.1-nano',
73
+ ? 'openai-responses:gpt-5.4'
74
+ : 'openai-responses:gpt-5-nano',
60
75
  system: (ctx) => `You are a ${ctx.metadata?.role ?? 'general'} assistant.`,
61
76
  });
62
77
  ```
63
78
 
79
+ #### Dynamic Handoffs
80
+
81
+ `handoffs` accepts a static array or a function for runtime-conditional routing:
82
+
83
+ ```typescript
84
+ const router = agent({
85
+ model: 'openai-responses:gpt-5-mini',
86
+ system: 'Route to the right specialist.',
87
+ handoffs: (ctx) => {
88
+ const base = [
89
+ { agent: billingAgent, description: 'Billing issues' },
90
+ { agent: shippingAgent, description: 'Shipping questions' },
91
+ ];
92
+ if (ctx.metadata?.tier === 'enterprise') {
93
+ base.push({ agent: priorityAgent, description: 'Priority support' });
94
+ }
95
+ return base;
96
+ },
97
+ });
98
+ ```
99
+
100
+ #### Workflow-Level Routing with `ctx.delegate()`
101
+
102
+ When your workflow (not an agent's LLM) needs to pick the best agent:
103
+
104
+ ```typescript
105
+ const result = await ctx.delegate(
106
+ [billingAgent, shippingAgent, returnsAgent],
107
+ customerMessage,
108
+ );
109
+ ```
110
+
111
+ `ctx.delegate()` creates a temporary router agent that uses handoffs to select the best candidate. For a single agent, it calls `ctx.ask()` directly with no routing overhead.
112
+
113
+ #### Effort (cross-provider reasoning control)
114
+
115
+ The `effort` parameter provides a unified way to control reasoning depth across all providers:
116
+
117
+ ```typescript
118
+ // Simple levels — works on any provider
119
+ const reasoner = agent({
120
+ model: 'anthropic:claude-opus-4-6',
121
+ system: 'You are a careful analyst.',
122
+ effort: 'high', // 'none' | 'low' | 'medium' | 'high' | 'max'
123
+ });
124
+
125
+ // Explicit thinking budget (in tokens)
126
+ const budgetReasoner = agent({
127
+ model: 'google:gemini-2.5-flash',
128
+ system: 'Think step by step.',
129
+ thinkingBudget: 5000,
130
+ });
131
+
132
+ // Per-call override
133
+ const result = await reasoner.ask('Analyze this data', { effort: 'low' });
134
+ ```
135
+
136
+ Each provider maps `effort` to its native API: `reasoning_effort` (OpenAI), adaptive thinking + `output_config.effort` (Anthropic 4.6), `thinkingLevel` (Gemini 3.x), `thinkingBudget` (Gemini 2.x). See [docs/providers.md](../../docs/providers.md) for the full mapping table.
137
+
64
138
  ### `workflow(config)`
65
139
 
66
140
  Define a named workflow with typed input/output:
@@ -240,7 +314,7 @@ const containsPII = (text: string) => /\b\d{3}-\d{2}-\d{4}\b/.test(text);
240
314
  const isOffTopic = (text: string) => !text.toLowerCase().includes('support');
241
315
 
242
316
  const safe = agent({
243
- model: 'openai:gpt-4o',
317
+ model: 'openai-responses:gpt-5.4',
244
318
  system: 'You are a helpful assistant.',
245
319
  guardrails: {
246
320
  input: async (prompt, ctx) => {
@@ -266,7 +340,7 @@ const session = runtime.session('user-123', {
266
340
  history: {
267
341
  maxMessages: 100, // Trim oldest messages when exceeded
268
342
  summarize: true, // Auto-summarize trimmed messages
269
- summaryModel: 'openai:gpt-4o-mini', // Model for summarization
343
+ summaryModel: 'openai-responses:gpt-5-mini', // Model for summarization
270
344
  },
271
345
  persist: true, // Save to StateStore (default: true)
272
346
  });
@@ -321,9 +395,9 @@ const runtime = new AxlRuntime({
321
395
  Four built-in providers using the `provider:model` URI scheme:
322
396
 
323
397
  ```
324
- openai:gpt-4o # OpenAI Chat Completions
325
- openai-responses:gpt-4o # OpenAI Responses API
326
- anthropic:claude-sonnet-4-5 # Anthropic
398
+ openai-responses:gpt-5.4 # OpenAI Responses API (recommended)
399
+ openai:gpt-5.4 # OpenAI Chat Completions
400
+ anthropic:claude-sonnet-4-6 # Anthropic
327
401
  google:gemini-2.5-pro # Google Gemini
328
402
  ```
329
403