@axlsdk/axl 0.5.0 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,6 +1,8 @@
1
- # axl
1
+ # @axlsdk/axl
2
2
 
3
- Core SDK for orchestrating agentic systems in TypeScript.
3
+ [![npm version](https://img.shields.io/npm/v/@axlsdk/axl)](https://www.npmjs.com/package/@axlsdk/axl)
4
+
5
+ Core SDK for orchestrating agentic systems in TypeScript. Part of the [Axl](https://github.com/axl-sdk/axl) monorepo.
4
6
 
5
7
  ## Installation
6
8
 
@@ -26,11 +28,23 @@ const calculator = tool({
26
28
  const result = new Function(`return (${expression})`)();
27
29
  return { result };
28
30
  },
31
+ // handler also accepts (input, ctx) for nested agent invocations — see below
29
32
  retry: { attempts: 3, backoff: 'exponential' },
30
33
  sensitive: false,
31
34
  });
32
35
  ```
33
36
 
37
+ Tool handlers receive a second parameter `ctx: WorkflowContext` (a child context), enabling the "agent-as-tool" composition pattern:
38
+
39
+ ```typescript
40
+ const researchTool = tool({
41
+ name: 'research',
42
+ description: 'Delegate to a specialist',
43
+ input: z.object({ question: z.string() }),
44
+ handler: async (input, ctx) => ctx.ask(researcher, input.question),
45
+ });
46
+ ```
47
+
34
48
  ### `agent(config)`
35
49
 
36
50
  Define an agent with model, system prompt, tools, and handoffs:
@@ -40,10 +54,10 @@ import { agent } from '@axlsdk/axl';
40
54
 
41
55
  const researcher = agent({
42
56
  name: 'researcher',
43
- model: 'openai:gpt-4o',
57
+ model: 'openai-responses:gpt-5.4',
44
58
  system: 'You are a research assistant.',
45
59
  tools: [calculator],
46
- thinking: 'high',
60
+ effort: 'high',
47
61
  maxTurns: 10,
48
62
  timeout: '30s',
49
63
  temperature: 0.7,
@@ -56,36 +70,70 @@ Dynamic model and system prompt selection:
56
70
  ```typescript
57
71
  const dynamicAgent = agent({
58
72
  model: (ctx) => ctx.metadata?.tier === 'premium'
59
- ? 'openai:gpt-4o'
60
- : 'openai:gpt-4.1-nano',
73
+ ? 'openai-responses:gpt-5.4'
74
+ : 'openai-responses:gpt-5-nano',
61
75
  system: (ctx) => `You are a ${ctx.metadata?.role ?? 'general'} assistant.`,
62
76
  });
63
77
  ```
64
78
 
65
- #### Thinking (cross-provider reasoning control)
79
+ #### Dynamic Handoffs
80
+
81
+ `handoffs` accepts a static array or a function for runtime-conditional routing:
82
+
83
+ ```typescript
84
+ const router = agent({
85
+ model: 'openai-responses:gpt-5-mini',
86
+ system: 'Route to the right specialist.',
87
+ handoffs: (ctx) => {
88
+ const base = [
89
+ { agent: billingAgent, description: 'Billing issues' },
90
+ { agent: shippingAgent, description: 'Shipping questions' },
91
+ ];
92
+ if (ctx.metadata?.tier === 'enterprise') {
93
+ base.push({ agent: priorityAgent, description: 'Priority support' });
94
+ }
95
+ return base;
96
+ },
97
+ });
98
+ ```
99
+
100
+ #### Workflow-Level Routing with `ctx.delegate()`
101
+
102
+ When your workflow (not an agent's LLM) needs to pick the best agent:
103
+
104
+ ```typescript
105
+ const result = await ctx.delegate(
106
+ [billingAgent, shippingAgent, returnsAgent],
107
+ customerMessage,
108
+ );
109
+ ```
110
+
111
+ `ctx.delegate()` creates a temporary router agent that uses handoffs to select the best candidate. For a single agent, it calls `ctx.ask()` directly with no routing overhead.
112
+
113
+ #### Effort (cross-provider reasoning control)
66
114
 
67
- The `thinking` parameter provides a unified way to control reasoning depth across all providers:
115
+ The `effort` parameter provides a unified way to control reasoning depth across all providers:
68
116
 
69
117
  ```typescript
70
118
  // Simple levels — works on any provider
71
119
  const reasoner = agent({
72
- model: 'anthropic:claude-sonnet-4-5',
120
+ model: 'anthropic:claude-opus-4-6',
73
121
  system: 'You are a careful analyst.',
74
- thinking: 'high', // 'low' | 'medium' | 'high' | 'max'
122
+ effort: 'high', // 'none' | 'low' | 'medium' | 'high' | 'max'
75
123
  });
76
124
 
77
- // Explicit budget (in tokens)
125
+ // Explicit thinking budget (in tokens)
78
126
  const budgetReasoner = agent({
79
127
  model: 'google:gemini-2.5-flash',
80
128
  system: 'Think step by step.',
81
- thinking: { budgetTokens: 5000 },
129
+ thinkingBudget: 5000,
82
130
  });
83
131
 
84
132
  // Per-call override
85
- const result = await reasoner.ask('Analyze this data', { thinking: 'low' });
133
+ const result = await reasoner.ask('Analyze this data', { effort: 'low' });
86
134
  ```
87
135
 
88
- Each provider maps `thinking` to its native API: `reasoning_effort` (OpenAI), `budget_tokens` (Anthropic), `thinkingBudget` (Gemini). See [docs/providers.md](../../docs/providers.md) for the full mapping table.
136
+ Each provider maps `effort` to its native API: `reasoning_effort` (OpenAI), adaptive thinking + `output_config.effort` (Anthropic 4.6), `thinkingLevel` (Gemini 3.x), `thinkingBudget` (Gemini 2.x). See [docs/providers.md](../../docs/providers.md) for the full mapping table.
89
137
 
90
138
  ### `workflow(config)`
91
139
 
@@ -266,7 +314,7 @@ const containsPII = (text: string) => /\b\d{3}-\d{2}-\d{4}\b/.test(text);
266
314
  const isOffTopic = (text: string) => !text.toLowerCase().includes('support');
267
315
 
268
316
  const safe = agent({
269
- model: 'openai:gpt-4o',
317
+ model: 'openai-responses:gpt-5.4',
270
318
  system: 'You are a helpful assistant.',
271
319
  guardrails: {
272
320
  input: async (prompt, ctx) => {
@@ -292,7 +340,7 @@ const session = runtime.session('user-123', {
292
340
  history: {
293
341
  maxMessages: 100, // Trim oldest messages when exceeded
294
342
  summarize: true, // Auto-summarize trimmed messages
295
- summaryModel: 'openai:gpt-4o-mini', // Model for summarization
343
+ summaryModel: 'openai-responses:gpt-5-mini', // Model for summarization
296
344
  },
297
345
  persist: true, // Save to StateStore (default: true)
298
346
  });
@@ -347,9 +395,9 @@ const runtime = new AxlRuntime({
347
395
  Four built-in providers using the `provider:model` URI scheme:
348
396
 
349
397
  ```
350
- openai:gpt-4o # OpenAI Chat Completions
351
- openai-responses:gpt-4o # OpenAI Responses API
352
- anthropic:claude-sonnet-4-5 # Anthropic
398
+ openai-responses:gpt-5.4 # OpenAI Responses API (recommended)
399
+ openai:gpt-5.4 # OpenAI Chat Completions
400
+ anthropic:claude-sonnet-4-6 # Anthropic
353
401
  google:gemini-2.5-pro # Google Gemini
354
402
  ```
355
403