@ax-llm/ax 16.1.12 → 17.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@ax-llm/ax",
3
- "version": "16.1.12",
3
+ "version": "17.0.0",
4
4
  "type": "module",
5
5
  "description": "The best library to work with LLMs",
6
6
  "repository": {
@@ -1,12 +1,12 @@
1
1
  ---
2
2
  name: ax-agent
3
3
  description: This skill helps with building AxAgent-based agents using @ax-llm/ax. Use when the user asks about agent(), AxAgent, child agents, tool functions, smart model routing, RLM mode, stopping agents, or composing multi-agent hierarchies.
4
- version: "16.1.12"
4
+ version: "17.0.0"
5
5
  ---
6
6
 
7
7
  # AxAgent Guide (@ax-llm/ax)
8
8
 
9
- AxAgent is the agent framework in Ax. It wraps AxGen with support for child agents, tool use, smart model routing, and RLM (Recursive Language Model) mode for processing long contexts through a code interpreter.
9
+ AxAgent is the agent framework in Ax. It wraps AxGen with support for child agents, tool use, smart model routing, and RLM (Recursive Language Model) mode for processing long contexts through runtime-backed code execution.
10
10
 
11
11
  ## Quick Reference
12
12
 
@@ -167,7 +167,7 @@ const result = await myAgent.forward(llm, values, {
167
167
 
168
168
  ### `stop()` method
169
169
 
170
- Call `stop()` from any context — a timer, event handler, or another async task — to halt the multi-step loop:
170
+ Call `stop()` from any context — a timer, event handler, or another async task — to halt the multi-step loop. `stop()` aborts all in-flight calls started by the same `AxAgent` instance (including retry backoff waits):
171
171
 
172
172
  ```typescript
173
173
  const myAgent = agent('question:string -> answer:string', {
@@ -266,7 +266,7 @@ const weatherAgent = agent('query:string -> response:string', {
266
266
  functions: [getCurrentWeather]
267
267
  });
268
268
 
269
- const llm = ai({ name: 'openai', apiKey: process.env.OPENAI_API_KEY! });
269
+ const llm = ai({ name: 'openai', apiKey: process.env.OPENAI_APIKEY! });
270
270
  const result = await weatherAgent.forward(llm, { query: 'Weather in Tokyo?' });
271
271
  ```
272
272
 
@@ -358,7 +358,7 @@ The LLM writes code to chunk, filter, and iterate over the document, using `llmQ
358
358
 
359
359
  ```typescript
360
360
  import { agent, ai } from '@ax-llm/ax';
361
- import { AxRLMJSInterpreter } from '@ax-llm/ax';
361
+ import { AxJSRuntime } from '@ax-llm/ax';
362
362
 
363
363
  const analyzer = agent(
364
364
  'context:string, query:string -> answer:string, evidence:string[]',
@@ -367,8 +367,8 @@ const analyzer = agent(
367
367
  description: 'Analyzes long documents using code interpreter and sub-LM queries',
368
368
  maxSteps: 15,
369
369
  rlm: {
370
- contextFields: ['context'], // Fields to load into interpreter
371
- interpreter: new AxRLMJSInterpreter(), // Code interpreter implementation
370
+ contextFields: ['context'], // Fields to load into runtime session
371
+ runtime: new AxJSRuntime(), // Code runtime implementation
372
372
  maxLlmCalls: 30, // Cap on sub-LM calls (default: 50)
373
373
  subModel: 'gpt-4o-mini', // Model for llmQuery (default: same as parent)
374
374
  },
@@ -378,15 +378,15 @@ const analyzer = agent(
378
378
 
379
379
  ### Sandbox Permissions
380
380
 
381
- By default, the `AxRLMJSInterpreter` sandbox blocks all dangerous Web APIs (network, storage, etc.). You can selectively grant access using the `AxRLMJSInterpreterPermission` enum:
381
+ By default, the `AxJSRuntime` sandbox blocks all dangerous Web APIs (network, storage, etc.). You can selectively grant access using the `AxJSRuntimePermission` enum:
382
382
 
383
383
  ```typescript
384
- import { AxRLMJSInterpreter, AxRLMJSInterpreterPermission } from '@ax-llm/ax';
384
+ import { AxJSRuntime, AxJSRuntimePermission } from '@ax-llm/ax';
385
385
 
386
- const interpreter = new AxRLMJSInterpreter({
386
+ const runtime = new AxJSRuntime({
387
387
  permissions: [
388
- AxRLMJSInterpreterPermission.NETWORK,
389
- AxRLMJSInterpreterPermission.STORAGE,
388
+ AxJSRuntimePermission.NETWORK,
389
+ AxJSRuntimePermission.STORAGE,
390
390
  ],
391
391
  });
392
392
  ```
@@ -410,7 +410,7 @@ Context fields aren't limited to plain strings. You can pass structured data —
410
410
 
411
411
  ```typescript
412
412
  import { agent, f, s } from '@ax-llm/ax';
413
- import { AxRLMJSInterpreter } from '@ax-llm/ax';
413
+ import { AxJSRuntime } from '@ax-llm/ax';
414
414
 
415
415
  const sig = s('query:string -> answer:string, evidence:string[]')
416
416
  .appendInputField('documents', f.object({
@@ -424,7 +424,7 @@ const analyzer = agent(sig, {
424
424
  description: 'Analyzes structured document collections using RLM',
425
425
  rlm: {
426
426
  contextFields: ['documents'],
427
- interpreter: new AxRLMJSInterpreter(),
427
+ runtime: new AxJSRuntime(),
428
428
  },
429
429
  });
430
430
  ```
@@ -470,7 +470,7 @@ In RLM mode, the agent gets a `codeInterpreter` tool. The LLM's typical workflow
470
470
 
471
471
  ### Custom Interpreters
472
472
 
473
- The built-in `AxRLMJSInterpreter` uses Web Workers for sandboxed code execution. For other environments, implement the `AxCodeInterpreter` interface:
473
+ The built-in `AxJSRuntime` uses Web Workers for sandboxed code execution. For other environments, implement the `AxCodeInterpreter` interface:
474
474
 
475
475
  ```typescript
476
476
  import type { AxCodeInterpreter, AxCodeSession } from '@ax-llm/ax';
@@ -507,7 +507,8 @@ RLM mode does not support true streaming. When using `streamingForward`, RLM run
507
507
  ```typescript
508
508
  interface AxRLMConfig {
509
509
  contextFields: string[]; // Input fields holding long context
510
- interpreter: AxCodeInterpreter; // Code interpreter implementation
510
+ runtime?: AxCodeInterpreter; // Preferred runtime key
511
+ interpreter?: AxCodeInterpreter; // Legacy alias (deprecated)
511
512
  maxLlmCalls?: number; // Cap on sub-LM calls (default: 50)
512
513
  subModel?: string; // Model for llmQuery sub-calls
513
514
  }
package/skills/ax-llm.md CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  name: ax-llm
3
3
  description: This skill helps with using the @ax-llm/ax TypeScript library for building LLM applications. Use when the user asks about ax(), ai(), f(), s(), agent(), flow(), AxGen, AxAgent, AxFlow, signatures, streaming, or mentions @ax-llm/ax.
4
- version: "16.1.12"
4
+ version: "17.0.0"
5
5
  ---
6
6
 
7
7
  # Ax Library (@ax-llm/ax) Usage Guide