@ax-llm/ax 16.0.11 → 16.0.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/cli/index.mjs +265 -0
- package/index.cjs +42 -42
- package/index.cjs.map +1 -1
- package/index.d.cts +953 -28
- package/index.d.ts +953 -28
- package/index.global.js +54 -54
- package/index.global.js.map +1 -1
- package/index.js +42 -42
- package/index.js.map +1 -1
- package/package.json +7 -1
- package/scripts/postinstall.mjs +209 -0
- package/skills/ax-llm.md +1582 -0
package/skills/ax-llm.md
ADDED
|
@@ -0,0 +1,1582 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: ax-llm
|
|
3
|
+
description: This skill helps with using the @ax-llm/ax TypeScript library for building LLM applications. Use when the user asks about ax(), ai(), f(), s(), agent(), flow(), AxGen, AxAgent, AxFlow, signatures, streaming, or mentions @ax-llm/ax.
|
|
4
|
+
version: "16.0.12"
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
# Ax Library (@ax-llm/ax) Usage Guide
|
|
8
|
+
|
|
9
|
+
Ax is a TypeScript library for building LLM-powered applications with type-safe signatures, streaming support, and multi-provider compatibility.
|
|
10
|
+
|
|
11
|
+
## Quick Reference
|
|
12
|
+
|
|
13
|
+
```typescript
|
|
14
|
+
import { ax, ai, s, f, agent, flow, AxGen, AxAgent, AxFlow } from '@ax-llm/ax';
|
|
15
|
+
|
|
16
|
+
// Create AI provider
|
|
17
|
+
const llm = ai({ name: 'openai', apiKey: process.env.OPENAI_API_KEY });
|
|
18
|
+
|
|
19
|
+
// Create typed generator
|
|
20
|
+
const gen = ax('question:string -> answer:string');
|
|
21
|
+
const result = await gen.forward(llm, { question: 'What is 2+2?' });
|
|
22
|
+
// result.answer is typed as string
|
|
23
|
+
|
|
24
|
+
// Create signature separately
|
|
25
|
+
const sig = s('text:string -> summary:string');
|
|
26
|
+
|
|
27
|
+
// Field builders for programmatic signatures
|
|
28
|
+
const customSig = f()
|
|
29
|
+
.input('text', f.string('Input text'))
|
|
30
|
+
.output('summary', f.string('Summary output'))
|
|
31
|
+
.build();
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
## 1. AI Provider Setup
|
|
35
|
+
|
|
36
|
+
### Quick Setup (All Providers)
|
|
37
|
+
|
|
38
|
+
```typescript
|
|
39
|
+
import { ai } from '@ax-llm/ax';
|
|
40
|
+
|
|
41
|
+
// OpenAI
|
|
42
|
+
const openai = ai({ name: 'openai', apiKey: 'sk-...' });
|
|
43
|
+
|
|
44
|
+
// Anthropic Claude
|
|
45
|
+
const claude = ai({ name: 'anthropic', apiKey: 'sk-ant-...' });
|
|
46
|
+
|
|
47
|
+
// Google Gemini
|
|
48
|
+
const gemini = ai({ name: 'google-gemini', apiKey: 'AIza...' });
|
|
49
|
+
|
|
50
|
+
// Azure OpenAI
|
|
51
|
+
const azure = ai({
|
|
52
|
+
name: 'azure-openai',
|
|
53
|
+
apiKey: 'your-key',
|
|
54
|
+
resourceName: 'your-resource',
|
|
55
|
+
deploymentName: 'gpt-4'
|
|
56
|
+
});
|
|
57
|
+
|
|
58
|
+
// Groq
|
|
59
|
+
const groq = ai({ name: 'groq', apiKey: 'gsk_...' });
|
|
60
|
+
|
|
61
|
+
// DeepSeek
|
|
62
|
+
const deepseek = ai({ name: 'deepseek', apiKey: 'sk-...' });
|
|
63
|
+
|
|
64
|
+
// Mistral
|
|
65
|
+
const mistral = ai({ name: 'mistral', apiKey: 'your-key' });
|
|
66
|
+
|
|
67
|
+
// Cohere
|
|
68
|
+
const cohere = ai({ name: 'cohere', apiKey: 'your-key' });
|
|
69
|
+
|
|
70
|
+
// Together AI
|
|
71
|
+
const together = ai({ name: 'together', apiKey: 'your-key' });
|
|
72
|
+
|
|
73
|
+
// OpenRouter
|
|
74
|
+
const openrouter = ai({ name: 'openrouter', apiKey: 'your-key' });
|
|
75
|
+
|
|
76
|
+
// Ollama (local)
|
|
77
|
+
const ollama = ai({ name: 'ollama', url: 'http://localhost:11434' });
|
|
78
|
+
|
|
79
|
+
// HuggingFace
|
|
80
|
+
const hf = ai({ name: 'huggingface', apiKey: 'hf_...' });
|
|
81
|
+
|
|
82
|
+
// Reka
|
|
83
|
+
const reka = ai({ name: 'reka', apiKey: 'your-key' });
|
|
84
|
+
|
|
85
|
+
// xAI Grok
|
|
86
|
+
const grok = ai({ name: 'grok', apiKey: 'your-key' });
|
|
87
|
+
```
|
|
88
|
+
|
|
89
|
+
### Full Provider Example
|
|
90
|
+
|
|
91
|
+
```typescript
|
|
92
|
+
import { ai, ax } from '@ax-llm/ax';
|
|
93
|
+
|
|
94
|
+
// Create provider with options
|
|
95
|
+
const llm = ai({
|
|
96
|
+
name: 'openai',
|
|
97
|
+
apiKey: process.env.OPENAI_API_KEY!,
|
|
98
|
+
config: {
|
|
99
|
+
model: 'gpt-4o',
|
|
100
|
+
temperature: 0.7,
|
|
101
|
+
maxTokens: 1000
|
|
102
|
+
}
|
|
103
|
+
});
|
|
104
|
+
|
|
105
|
+
// Use with generator
|
|
106
|
+
const gen = ax('topic:string -> essay:string "A short essay"');
|
|
107
|
+
const result = await gen.forward(llm, { topic: 'Climate change' });
|
|
108
|
+
console.log(result.essay);
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
## 2. Signatures & Generators
|
|
112
|
+
|
|
113
|
+
### String Signature Syntax
|
|
114
|
+
|
|
115
|
+
```
|
|
116
|
+
[description] inputField:type ["field desc"], ... -> outputField:type ["field desc"], ...
|
|
117
|
+
```
|
|
118
|
+
|
|
119
|
+
**Types:** `string`, `number`, `boolean`, `json`, `class`, `date`, `datetime`, `image`, `audio`, `file`, `code`, `url`
|
|
120
|
+
|
|
121
|
+
**Modifiers:**
|
|
122
|
+
- `field?:type` - Optional field
|
|
123
|
+
- `field:type[]` - Array type
|
|
124
|
+
- `field:class "opt1, opt2, opt3"` - Enum/classification
|
|
125
|
+
|
|
126
|
+
### Signature Examples
|
|
127
|
+
|
|
128
|
+
```typescript
|
|
129
|
+
import { ax, s } from '@ax-llm/ax';
|
|
130
|
+
|
|
131
|
+
// Basic signature
|
|
132
|
+
const gen1 = ax('question:string -> answer:string');
|
|
133
|
+
|
|
134
|
+
// With descriptions
|
|
135
|
+
const gen2 = ax('question:string "User question" -> answer:string "AI response"');
|
|
136
|
+
|
|
137
|
+
// Optional fields
|
|
138
|
+
const gen3 = ax('query:string, context?:string -> response:string');
|
|
139
|
+
|
|
140
|
+
// Arrays
|
|
141
|
+
const gen4 = ax('text:string -> keywords:string[]');
|
|
142
|
+
|
|
143
|
+
// Classification (enum)
|
|
144
|
+
const gen5 = ax('review:string -> sentiment:class "positive, negative, neutral"');
|
|
145
|
+
|
|
146
|
+
// Multiple outputs
|
|
147
|
+
const gen6 = ax('article:string -> title:string, summary:string, tags:string[]');
|
|
148
|
+
|
|
149
|
+
// Numbers and booleans
|
|
150
|
+
const gen7 = ax('text:string -> wordCount:number, isQuestion:boolean');
|
|
151
|
+
|
|
152
|
+
// JSON output
|
|
153
|
+
const gen8 = ax('data:string -> parsed:json');
|
|
154
|
+
|
|
155
|
+
// Dates
|
|
156
|
+
const gen9 = ax('text:string -> extractedDate:date');
|
|
157
|
+
|
|
158
|
+
// Code blocks
|
|
159
|
+
const gen10 = ax('task:string -> code:code "python"');
|
|
160
|
+
|
|
161
|
+
// Signature description
|
|
162
|
+
const gen11 = ax('"Translate text to French" text:string -> translation:string');
|
|
163
|
+
|
|
164
|
+
// Using s() for signature only
|
|
165
|
+
const sig = s('input:string -> output:string');
|
|
166
|
+
```
|
|
167
|
+
|
|
168
|
+
### Complete Generator Example
|
|
169
|
+
|
|
170
|
+
```typescript
|
|
171
|
+
import { ai, ax } from '@ax-llm/ax';
|
|
172
|
+
|
|
173
|
+
const llm = ai({ name: 'openai', apiKey: process.env.OPENAI_API_KEY! });
|
|
174
|
+
|
|
175
|
+
// Create generator with options
|
|
176
|
+
const summarizer = ax('article:string -> summary:string, keyPoints:string[]', {
|
|
177
|
+
description: 'Summarize articles and extract key points',
|
|
178
|
+
maxRetries: 3,
|
|
179
|
+
maxSteps: 5
|
|
180
|
+
});
|
|
181
|
+
|
|
182
|
+
// Forward (non-streaming)
|
|
183
|
+
const result = await summarizer.forward(llm, {
|
|
184
|
+
article: 'Long article text here...'
|
|
185
|
+
});
|
|
186
|
+
|
|
187
|
+
console.log(result.summary); // string
|
|
188
|
+
console.log(result.keyPoints); // string[]
|
|
189
|
+
|
|
190
|
+
// With model override
|
|
191
|
+
const result2 = await summarizer.forward(llm, { article: 'text' }, {
|
|
192
|
+
model: 'gpt-4o-mini'
|
|
193
|
+
});
|
|
194
|
+
```
|
|
195
|
+
|
|
196
|
+
## 3. Field Builders (f.xxx())
|
|
197
|
+
|
|
198
|
+
Use field builders for programmatic signature creation with full type safety.
|
|
199
|
+
|
|
200
|
+
### Basic Field Types
|
|
201
|
+
|
|
202
|
+
```typescript
|
|
203
|
+
import { f } from '@ax-llm/ax';
|
|
204
|
+
|
|
205
|
+
// Start a signature builder
|
|
206
|
+
const sig = f()
|
|
207
|
+
.input('userQuery', f.string('The user question'))
|
|
208
|
+
.input('context', f.string('Background context').optional())
|
|
209
|
+
.output('response', f.string('AI response'))
|
|
210
|
+
.output('confidence', f.number('Confidence score 0-1'))
|
|
211
|
+
.output('isComplete', f.boolean('Whether response is complete'))
|
|
212
|
+
.description('Answer questions with confidence scoring')
|
|
213
|
+
.build();
|
|
214
|
+
```
|
|
215
|
+
|
|
216
|
+
### All Field Types
|
|
217
|
+
|
|
218
|
+
```typescript
|
|
219
|
+
import { f } from '@ax-llm/ax';
|
|
220
|
+
|
|
221
|
+
// String types
|
|
222
|
+
f.string('description') // Basic string
|
|
223
|
+
f.string().min(10).max(1000) // With length constraints
|
|
224
|
+
f.string().email() // Email validation
|
|
225
|
+
f.string().url() // URL validation
|
|
226
|
+
f.string().regex('^[A-Z]', 'Start with capital') // Pattern
|
|
227
|
+
|
|
228
|
+
// Numbers
|
|
229
|
+
f.number('description')
|
|
230
|
+
f.number().min(0).max(100) // With range
|
|
231
|
+
|
|
232
|
+
// Boolean
|
|
233
|
+
f.boolean('description')
|
|
234
|
+
|
|
235
|
+
// Classification/Enum
|
|
236
|
+
f.class(['option1', 'option2', 'option3'], 'description')
|
|
237
|
+
|
|
238
|
+
// JSON (any structure)
|
|
239
|
+
f.json('description')
|
|
240
|
+
|
|
241
|
+
// Dates and times
|
|
242
|
+
f.date('description')
|
|
243
|
+
f.datetime('description')
|
|
244
|
+
|
|
245
|
+
// Media (input only)
|
|
246
|
+
f.image('description')
|
|
247
|
+
f.audio('description')
|
|
248
|
+
f.file('description')
|
|
249
|
+
|
|
250
|
+
// Code
|
|
251
|
+
f.code('python', 'description')
|
|
252
|
+
|
|
253
|
+
// URL
|
|
254
|
+
f.url('description')
|
|
255
|
+
|
|
256
|
+
// Nested objects
|
|
257
|
+
f.object({
|
|
258
|
+
name: f.string('Person name'),
|
|
259
|
+
age: f.number('Age in years'),
|
|
260
|
+
email: f.string().email()
|
|
261
|
+
}, 'Person details')
|
|
262
|
+
|
|
263
|
+
// Arrays
|
|
264
|
+
f.string('Item description').array('List of items')
|
|
265
|
+
f.object({ id: f.number(), name: f.string() }).array('List of objects')
|
|
266
|
+
|
|
267
|
+
// Modifiers
|
|
268
|
+
f.string().optional() // Optional field
|
|
269
|
+
f.string().internal() // Internal (not shown to LLM)
|
|
270
|
+
f.string().cache() // Enable caching
|
|
271
|
+
```
|
|
272
|
+
|
|
273
|
+
### Complete Field Builder Example
|
|
274
|
+
|
|
275
|
+
```typescript
|
|
276
|
+
import { ai, ax, f } from '@ax-llm/ax';
|
|
277
|
+
|
|
278
|
+
// Build a complex signature
|
|
279
|
+
const analysisSig = f()
|
|
280
|
+
.input('document', f.string('Document to analyze'))
|
|
281
|
+
.input('analysisType', f.class(['sentiment', 'entities', 'summary']))
|
|
282
|
+
.output('result', f.object({
|
|
283
|
+
score: f.number().min(0).max(1),
|
|
284
|
+
label: f.string(),
|
|
285
|
+
details: f.string().optional()
|
|
286
|
+
}))
|
|
287
|
+
.output('entities', f.object({
|
|
288
|
+
name: f.string(),
|
|
289
|
+
type: f.class(['person', 'org', 'location'])
|
|
290
|
+
}).array().optional())
|
|
291
|
+
.description('Analyze documents')
|
|
292
|
+
.build();
|
|
293
|
+
|
|
294
|
+
// Create generator from signature
|
|
295
|
+
const analyzer = ax(analysisSig);
|
|
296
|
+
|
|
297
|
+
const llm = ai({ name: 'anthropic', apiKey: process.env.ANTHROPIC_API_KEY! });
|
|
298
|
+
|
|
299
|
+
const result = await analyzer.forward(llm, {
|
|
300
|
+
document: 'Apple Inc. announced new products in Cupertino.',
|
|
301
|
+
analysisType: 'entities'
|
|
302
|
+
});
|
|
303
|
+
|
|
304
|
+
// Fully typed result
|
|
305
|
+
console.log(result.result.score);
|
|
306
|
+
console.log(result.entities?.[0]?.name);
|
|
307
|
+
```
|
|
308
|
+
|
|
309
|
+
## 4. Streaming
|
|
310
|
+
|
|
311
|
+
### Basic Streaming
|
|
312
|
+
|
|
313
|
+
```typescript
|
|
314
|
+
import { ai, ax } from '@ax-llm/ax';
|
|
315
|
+
|
|
316
|
+
const llm = ai({ name: 'openai', apiKey: process.env.OPENAI_API_KEY! });
|
|
317
|
+
const gen = ax('topic:string -> content:string');
|
|
318
|
+
|
|
319
|
+
// Stream responses
|
|
320
|
+
for await (const chunk of gen.streamingForward(llm, { topic: 'AI' })) {
|
|
321
|
+
// chunk.delta contains partial values
|
|
322
|
+
if (chunk.delta.content) {
|
|
323
|
+
process.stdout.write(chunk.delta.content);
|
|
324
|
+
}
|
|
325
|
+
}
|
|
326
|
+
```
|
|
327
|
+
|
|
328
|
+
### Complete Streaming Example
|
|
329
|
+
|
|
330
|
+
```typescript
|
|
331
|
+
import { ai, ax } from '@ax-llm/ax';
|
|
332
|
+
|
|
333
|
+
const llm = ai({ name: 'openai', apiKey: process.env.OPENAI_API_KEY! });
|
|
334
|
+
|
|
335
|
+
const writer = ax('prompt:string -> story:string, title:string');
|
|
336
|
+
|
|
337
|
+
async function streamStory() {
|
|
338
|
+
let fullStory = '';
|
|
339
|
+
let title = '';
|
|
340
|
+
|
|
341
|
+
for await (const chunk of writer.streamingForward(
|
|
342
|
+
llm,
|
|
343
|
+
{ prompt: 'Write a short story about a robot' },
|
|
344
|
+
{ stream: true }
|
|
345
|
+
)) {
|
|
346
|
+
// Handle story chunks
|
|
347
|
+
if (chunk.delta.story) {
|
|
348
|
+
process.stdout.write(chunk.delta.story);
|
|
349
|
+
fullStory += chunk.delta.story;
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
// Handle title (usually comes early)
|
|
353
|
+
if (chunk.delta.title) {
|
|
354
|
+
title = chunk.delta.title;
|
|
355
|
+
}
|
|
356
|
+
}
|
|
357
|
+
|
|
358
|
+
console.log('\n\nTitle:', title);
|
|
359
|
+
return { story: fullStory, title };
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
await streamStory();
|
|
363
|
+
```
|
|
364
|
+
|
|
365
|
+
### Streaming with Field Processors
|
|
366
|
+
|
|
367
|
+
```typescript
|
|
368
|
+
import { ai, ax } from '@ax-llm/ax';
|
|
369
|
+
|
|
370
|
+
const llm = ai({ name: 'openai', apiKey: process.env.OPENAI_API_KEY! });
|
|
371
|
+
|
|
372
|
+
const gen = ax('query:string -> response:string');
|
|
373
|
+
|
|
374
|
+
// Add streaming field processor
|
|
375
|
+
gen.addStreamingFieldProcessor('response', (chunk, context) => {
|
|
376
|
+
console.log('Chunk received:', chunk);
|
|
377
|
+
console.log('Full value so far:', context?.values?.response);
|
|
378
|
+
console.log('Done:', context?.done);
|
|
379
|
+
});
|
|
380
|
+
|
|
381
|
+
await gen.forward(llm, { query: 'Hello' }, { stream: true });
|
|
382
|
+
```
|
|
383
|
+
|
|
384
|
+
## 5. Agents with Tools
|
|
385
|
+
|
|
386
|
+
Agents can use functions (tools) to perform actions.
|
|
387
|
+
|
|
388
|
+
### Defining Functions
|
|
389
|
+
|
|
390
|
+
```typescript
|
|
391
|
+
import { ai, agent } from '@ax-llm/ax';
|
|
392
|
+
|
|
393
|
+
// Function definition
|
|
394
|
+
const getCurrentWeather = {
|
|
395
|
+
name: 'getCurrentWeather',
|
|
396
|
+
description: 'Get the current weather for a location',
|
|
397
|
+
parameters: {
|
|
398
|
+
type: 'object',
|
|
399
|
+
properties: {
|
|
400
|
+
location: { type: 'string', description: 'City name' },
|
|
401
|
+
unit: { type: 'string', enum: ['celsius', 'fahrenheit'] }
|
|
402
|
+
},
|
|
403
|
+
required: ['location']
|
|
404
|
+
},
|
|
405
|
+
func: async ({ location, unit = 'celsius' }) => {
|
|
406
|
+
// Implementation
|
|
407
|
+
return JSON.stringify({ temp: 22, unit, location });
|
|
408
|
+
}
|
|
409
|
+
};
|
|
410
|
+
```
|
|
411
|
+
|
|
412
|
+
### Creating Agents
|
|
413
|
+
|
|
414
|
+
```typescript
|
|
415
|
+
import { ai, agent } from '@ax-llm/ax';
|
|
416
|
+
|
|
417
|
+
const llm = ai({ name: 'openai', apiKey: process.env.OPENAI_API_KEY! });
|
|
418
|
+
|
|
419
|
+
// Create agent with functions
|
|
420
|
+
const weatherAgent = agent('query:string -> response:string', {
|
|
421
|
+
name: 'weatherAssistant',
|
|
422
|
+
description: 'An assistant that helps with weather queries',
|
|
423
|
+
definition: 'You are a helpful weather assistant. Use the getCurrentWeather function to get weather data and provide friendly responses.',
|
|
424
|
+
functions: [getCurrentWeather]
|
|
425
|
+
});
|
|
426
|
+
|
|
427
|
+
const result = await weatherAgent.forward(llm, {
|
|
428
|
+
query: 'What is the weather in Tokyo?'
|
|
429
|
+
});
|
|
430
|
+
|
|
431
|
+
console.log(result.response);
|
|
432
|
+
```
|
|
433
|
+
|
|
434
|
+
### Complete Agent Example
|
|
435
|
+
|
|
436
|
+
```typescript
|
|
437
|
+
import { ai, agent } from '@ax-llm/ax';
|
|
438
|
+
|
|
439
|
+
// Define tools
|
|
440
|
+
const searchDatabase = {
|
|
441
|
+
name: 'searchDatabase',
|
|
442
|
+
description: 'Search the product database',
|
|
443
|
+
parameters: {
|
|
444
|
+
type: 'object',
|
|
445
|
+
properties: {
|
|
446
|
+
query: { type: 'string', description: 'Search query' },
|
|
447
|
+
limit: { type: 'number', description: 'Max results' }
|
|
448
|
+
},
|
|
449
|
+
required: ['query']
|
|
450
|
+
},
|
|
451
|
+
func: async ({ query, limit = 5 }) => {
|
|
452
|
+
// Simulate database search
|
|
453
|
+
return JSON.stringify([
|
|
454
|
+
{ id: 1, name: 'Product A', price: 99 },
|
|
455
|
+
{ id: 2, name: 'Product B', price: 149 }
|
|
456
|
+
].slice(0, limit));
|
|
457
|
+
}
|
|
458
|
+
};
|
|
459
|
+
|
|
460
|
+
const getProductDetails = {
|
|
461
|
+
name: 'getProductDetails',
|
|
462
|
+
description: 'Get details of a specific product',
|
|
463
|
+
parameters: {
|
|
464
|
+
type: 'object',
|
|
465
|
+
properties: {
|
|
466
|
+
productId: { type: 'number', description: 'Product ID' }
|
|
467
|
+
},
|
|
468
|
+
required: ['productId']
|
|
469
|
+
},
|
|
470
|
+
func: async ({ productId }) => {
|
|
471
|
+
return JSON.stringify({
|
|
472
|
+
id: productId,
|
|
473
|
+
name: 'Product A',
|
|
474
|
+
price: 99,
|
|
475
|
+
description: 'A great product',
|
|
476
|
+
stock: 50
|
|
477
|
+
});
|
|
478
|
+
}
|
|
479
|
+
};
|
|
480
|
+
|
|
481
|
+
// Create agent
|
|
482
|
+
const shopAssistant = agent(
|
|
483
|
+
'userQuery:string -> response:string, recommendations:string[]',
|
|
484
|
+
{
|
|
485
|
+
name: 'shoppingAssistant',
|
|
486
|
+
description: 'An AI assistant that helps users find and learn about products',
|
|
487
|
+
definition: `You are a helpful shopping assistant. Use the available tools to:
|
|
488
|
+
1. Search for products when users ask about items
|
|
489
|
+
2. Get product details when they want more information
|
|
490
|
+
3. Provide helpful recommendations based on their needs
|
|
491
|
+
|
|
492
|
+
Always be friendly and provide clear, helpful responses.`,
|
|
493
|
+
functions: [searchDatabase, getProductDetails]
|
|
494
|
+
}
|
|
495
|
+
);
|
|
496
|
+
|
|
497
|
+
const llm = ai({ name: 'openai', apiKey: process.env.OPENAI_API_KEY! });
|
|
498
|
+
|
|
499
|
+
const result = await shopAssistant.forward(llm, {
|
|
500
|
+
userQuery: 'Can you find me some products and tell me about the first one?'
|
|
501
|
+
});
|
|
502
|
+
|
|
503
|
+
console.log('Response:', result.response);
|
|
504
|
+
console.log('Recommendations:', result.recommendations);
|
|
505
|
+
```
|
|
506
|
+
|
|
507
|
+
### Nested Agents
|
|
508
|
+
|
|
509
|
+
```typescript
|
|
510
|
+
import { ai, agent } from '@ax-llm/ax';
|
|
511
|
+
|
|
512
|
+
// Child agent
|
|
513
|
+
const researcher = agent('topic:string -> findings:string', {
|
|
514
|
+
name: 'researchAgent',
|
|
515
|
+
description: 'Researches topics and provides detailed findings'
|
|
516
|
+
});
|
|
517
|
+
|
|
518
|
+
// Parent agent that can use child agent
|
|
519
|
+
const writer = agent('topic:string -> article:string', {
|
|
520
|
+
name: 'writerAgent',
|
|
521
|
+
description: 'Writes articles using research from the research agent',
|
|
522
|
+
agents: [researcher]
|
|
523
|
+
});
|
|
524
|
+
|
|
525
|
+
const llm = ai({ name: 'openai', apiKey: process.env.OPENAI_API_KEY! });
|
|
526
|
+
|
|
527
|
+
const result = await writer.forward(llm, {
|
|
528
|
+
topic: 'Benefits of meditation'
|
|
529
|
+
});
|
|
530
|
+
```
|
|
531
|
+
|
|
532
|
+
## 6. Workflows (AxFlow)
|
|
533
|
+
|
|
534
|
+
AxFlow enables building complex, multi-step AI workflows with type safety.
|
|
535
|
+
|
|
536
|
+
### Basic Flow
|
|
537
|
+
|
|
538
|
+
```typescript
|
|
539
|
+
import { ai, flow } from '@ax-llm/ax';
|
|
540
|
+
|
|
541
|
+
const llm = ai({ name: 'openai', apiKey: process.env.OPENAI_API_KEY! });
|
|
542
|
+
|
|
543
|
+
const pipeline = flow<{ text: string }, { result: string }>()
|
|
544
|
+
.node('summarizer', 'text:string -> summary:string')
|
|
545
|
+
.node('translator', 'text:string -> translation:string')
|
|
546
|
+
.execute('summarizer', (state) => ({ text: state.text }))
|
|
547
|
+
.execute('translator', (state) => ({ text: state.summarizerResult.summary }))
|
|
548
|
+
.map((state) => ({ result: state.translatorResult.translation }));
|
|
549
|
+
|
|
550
|
+
const result = await pipeline.forward(llm, { text: 'Long article...' });
|
|
551
|
+
console.log(result.result);
|
|
552
|
+
```
|
|
553
|
+
|
|
554
|
+
### Flow with Branching
|
|
555
|
+
|
|
556
|
+
```typescript
|
|
557
|
+
import { ai, flow } from '@ax-llm/ax';
|
|
558
|
+
|
|
559
|
+
const llm = ai({ name: 'openai', apiKey: process.env.OPENAI_API_KEY! });
|
|
560
|
+
|
|
561
|
+
const workflow = flow<{ query: string; type: string }, { output: string }>()
|
|
562
|
+
.node('technical', 'query:string -> answer:string')
|
|
563
|
+
.node('creative', 'query:string -> answer:string')
|
|
564
|
+
.branch(
|
|
565
|
+
(state) => state.type === 'technical',
|
|
566
|
+
(branch) => branch.execute('technical', (s) => ({ query: s.query })),
|
|
567
|
+
(branch) => branch.execute('creative', (s) => ({ query: s.query }))
|
|
568
|
+
)
|
|
569
|
+
.map((state) => ({
|
|
570
|
+
output: state.technicalResult?.answer || state.creativeResult?.answer || ''
|
|
571
|
+
}));
|
|
572
|
+
|
|
573
|
+
const result = await workflow.forward(llm, {
|
|
574
|
+
query: 'Explain quantum computing',
|
|
575
|
+
type: 'technical'
|
|
576
|
+
});
|
|
577
|
+
```
|
|
578
|
+
|
|
579
|
+
### Flow with Parallel Execution
|
|
580
|
+
|
|
581
|
+
```typescript
|
|
582
|
+
import { ai, flow } from '@ax-llm/ax';
|
|
583
|
+
|
|
584
|
+
const llm = ai({ name: 'openai', apiKey: process.env.OPENAI_API_KEY! });
|
|
585
|
+
|
|
586
|
+
const parallelFlow = flow<{ topic: string }, { combined: string }>()
|
|
587
|
+
.node('pros', 'topic:string -> arguments:string')
|
|
588
|
+
.node('cons', 'topic:string -> arguments:string')
|
|
589
|
+
.node('summary', 'prosArgs:string, consArgs:string -> summary:string')
|
|
590
|
+
.parallel([
|
|
591
|
+
{ branch: (b) => b.execute('pros', (s) => ({ topic: s.topic })) },
|
|
592
|
+
{ branch: (b) => b.execute('cons', (s) => ({ topic: s.topic })) }
|
|
593
|
+
])
|
|
594
|
+
.execute('summary', (state) => ({
|
|
595
|
+
prosArgs: state.prosResult.arguments,
|
|
596
|
+
consArgs: state.consResult.arguments
|
|
597
|
+
}))
|
|
598
|
+
.map((state) => ({ combined: state.summaryResult.summary }));
|
|
599
|
+
|
|
600
|
+
const result = await parallelFlow.forward(llm, { topic: 'Remote work' });
|
|
601
|
+
```
|
|
602
|
+
|
|
603
|
+
### Complete Flow Example
|
|
604
|
+
|
|
605
|
+
```typescript
|
|
606
|
+
import { ai, flow, f } from '@ax-llm/ax';
|
|
607
|
+
|
|
608
|
+
// Define nodes with proper signatures
|
|
609
|
+
const researchNode = f()
|
|
610
|
+
.input('topic', f.string())
|
|
611
|
+
.output('research', f.string())
|
|
612
|
+
.output('sources', f.string().array())
|
|
613
|
+
.build();
|
|
614
|
+
|
|
615
|
+
const outlineNode = f()
|
|
616
|
+
.input('research', f.string())
|
|
617
|
+
.input('sources', f.string().array())
|
|
618
|
+
.output('outline', f.string().array())
|
|
619
|
+
.build();
|
|
620
|
+
|
|
621
|
+
const writeNode = f()
|
|
622
|
+
.input('outline', f.string().array())
|
|
623
|
+
.input('research', f.string())
|
|
624
|
+
.output('draft', f.string())
|
|
625
|
+
.build();
|
|
626
|
+
|
|
627
|
+
const editNode = f()
|
|
628
|
+
.input('draft', f.string())
|
|
629
|
+
.output('final', f.string())
|
|
630
|
+
.output('wordCount', f.number())
|
|
631
|
+
.build();
|
|
632
|
+
|
|
633
|
+
// Build the flow
|
|
634
|
+
const articlePipeline = flow<
|
|
635
|
+
{ topic: string },
|
|
636
|
+
{ article: string; wordCount: number }
|
|
637
|
+
>()
|
|
638
|
+
.node('research', researchNode)
|
|
639
|
+
.node('outline', outlineNode)
|
|
640
|
+
.node('write', writeNode)
|
|
641
|
+
.node('edit', editNode)
|
|
642
|
+
.execute('research', (s) => ({ topic: s.topic }))
|
|
643
|
+
.execute('outline', (s) => ({
|
|
644
|
+
research: s.researchResult.research,
|
|
645
|
+
sources: s.researchResult.sources
|
|
646
|
+
}))
|
|
647
|
+
.execute('write', (s) => ({
|
|
648
|
+
outline: s.outlineResult.outline,
|
|
649
|
+
research: s.researchResult.research
|
|
650
|
+
}))
|
|
651
|
+
.execute('edit', (s) => ({
|
|
652
|
+
draft: s.writeResult.draft
|
|
653
|
+
}))
|
|
654
|
+
.map((s) => ({
|
|
655
|
+
article: s.editResult.final,
|
|
656
|
+
wordCount: s.editResult.wordCount
|
|
657
|
+
}));
|
|
658
|
+
|
|
659
|
+
const llm = ai({ name: 'openai', apiKey: process.env.OPENAI_API_KEY! });
|
|
660
|
+
|
|
661
|
+
const result = await articlePipeline.forward(llm, {
|
|
662
|
+
topic: 'The future of renewable energy'
|
|
663
|
+
});
|
|
664
|
+
|
|
665
|
+
console.log('Article:', result.article);
|
|
666
|
+
console.log('Word count:', result.wordCount);
|
|
667
|
+
```
|
|
668
|
+
|
|
669
|
+
## 7. Common Patterns
|
|
670
|
+
|
|
671
|
+
### Classification
|
|
672
|
+
|
|
673
|
+
```typescript
|
|
674
|
+
import { ai, ax } from '@ax-llm/ax';
|
|
675
|
+
|
|
676
|
+
const classifier = ax(
|
|
677
|
+
'text:string -> category:class "spam, ham, uncertain", confidence:number'
|
|
678
|
+
);
|
|
679
|
+
|
|
680
|
+
const llm = ai({ name: 'openai', apiKey: process.env.OPENAI_API_KEY! });
|
|
681
|
+
|
|
682
|
+
const result = await classifier.forward(llm, {
|
|
683
|
+
text: 'Congratulations! You won $1,000,000!'
|
|
684
|
+
});
|
|
685
|
+
|
|
686
|
+
console.log(result.category); // 'spam'
|
|
687
|
+
console.log(result.confidence); // 0.95
|
|
688
|
+
```
|
|
689
|
+
|
|
690
|
+
### Extraction
|
|
691
|
+
|
|
692
|
+
```typescript
|
|
693
|
+
import { ai, ax, f } from '@ax-llm/ax';
|
|
694
|
+
|
|
695
|
+
// Using string syntax
|
|
696
|
+
const extractor = ax(`
|
|
697
|
+
text:string ->
|
|
698
|
+
people:string[],
|
|
699
|
+
organizations:string[],
|
|
700
|
+
locations:string[],
|
|
701
|
+
dates:date[]
|
|
702
|
+
`);
|
|
703
|
+
|
|
704
|
+
// Or with field builders for structured output
|
|
705
|
+
const structuredExtractor = ax(
|
|
706
|
+
f()
|
|
707
|
+
.input('text', f.string())
|
|
708
|
+
.output('entities', f.object({
|
|
709
|
+
people: f.string().array(),
|
|
710
|
+
organizations: f.string().array(),
|
|
711
|
+
locations: f.string().array()
|
|
712
|
+
}))
|
|
713
|
+
.build()
|
|
714
|
+
);
|
|
715
|
+
|
|
716
|
+
const llm = ai({ name: 'openai', apiKey: process.env.OPENAI_API_KEY! });
|
|
717
|
+
|
|
718
|
+
const result = await extractor.forward(llm, {
|
|
719
|
+
text: 'Tim Cook announced that Apple will open a new store in Paris on January 15th.'
|
|
720
|
+
});
|
|
721
|
+
|
|
722
|
+
console.log(result.people); // ['Tim Cook']
|
|
723
|
+
console.log(result.organizations); // ['Apple']
|
|
724
|
+
console.log(result.locations); // ['Paris']
|
|
725
|
+
```
|
|
726
|
+
|
|
727
|
+
### Multi-modal (Images)
|
|
728
|
+
|
|
729
|
+
```typescript
|
|
730
|
+
import { ai, ax, f } from '@ax-llm/ax';
|
|
731
|
+
import { readFileSync } from 'fs';
|
|
732
|
+
|
|
733
|
+
const imageAnalyzer = ax(
|
|
734
|
+
f()
|
|
735
|
+
.input('image', f.image('Image to analyze'))
|
|
736
|
+
.input('question', f.string('Question about the image').optional())
|
|
737
|
+
.output('description', f.string('Image description'))
|
|
738
|
+
.output('objects', f.string().array())
|
|
739
|
+
.build()
|
|
740
|
+
);
|
|
741
|
+
|
|
742
|
+
const llm = ai({ name: 'openai', apiKey: process.env.OPENAI_API_KEY! });
|
|
743
|
+
|
|
744
|
+
// From file
|
|
745
|
+
const imageData = readFileSync('./photo.jpg').toString('base64');
|
|
746
|
+
const result = await imageAnalyzer.forward(llm, {
|
|
747
|
+
image: { mimeType: 'image/jpeg', data: imageData },
|
|
748
|
+
question: 'What objects are in this image?'
|
|
749
|
+
});
|
|
750
|
+
|
|
751
|
+
// From URL (for providers that support it)
|
|
752
|
+
const result2 = await imageAnalyzer.forward(llm, {
|
|
753
|
+
image: { mimeType: 'image/jpeg', url: 'https://example.com/image.jpg' }
|
|
754
|
+
});
|
|
755
|
+
```
|
|
756
|
+
|
|
757
|
+
### Chaining Generators
|
|
758
|
+
|
|
759
|
+
```typescript
|
|
760
|
+
import { ai, ax } from '@ax-llm/ax';
|
|
761
|
+
|
|
762
|
+
const llm = ai({ name: 'openai', apiKey: process.env.OPENAI_API_KEY! });
|
|
763
|
+
|
|
764
|
+
// Define generators
|
|
765
|
+
const researcher = ax('topic:string -> research:string, keyFacts:string[]');
|
|
766
|
+
const writer = ax('research:string, keyFacts:string[] -> article:string');
|
|
767
|
+
const editor = ax('article:string -> editedArticle:string, suggestions:string[]');
|
|
768
|
+
|
|
769
|
+
// Chain them
|
|
770
|
+
async function createArticle(topic: string) {
|
|
771
|
+
const research = await researcher.forward(llm, { topic });
|
|
772
|
+
|
|
773
|
+
const draft = await writer.forward(llm, {
|
|
774
|
+
research: research.research,
|
|
775
|
+
keyFacts: research.keyFacts
|
|
776
|
+
});
|
|
777
|
+
|
|
778
|
+
const final = await editor.forward(llm, {
|
|
779
|
+
article: draft.article
|
|
780
|
+
});
|
|
781
|
+
|
|
782
|
+
return final;
|
|
783
|
+
}
|
|
784
|
+
|
|
785
|
+
const result = await createArticle('Artificial General Intelligence');
|
|
786
|
+
console.log(result.editedArticle);
|
|
787
|
+
```
|
|
788
|
+
|
|
789
|
+
### Error Handling
|
|
790
|
+
|
|
791
|
+
```typescript
|
|
792
|
+
import { ai, ax, AxGenerateError, AxAIServiceError } from '@ax-llm/ax';
|
|
793
|
+
|
|
794
|
+
const gen = ax('input:string -> output:string');
|
|
795
|
+
const llm = ai({ name: 'openai', apiKey: process.env.OPENAI_API_KEY! });
|
|
796
|
+
|
|
797
|
+
try {
|
|
798
|
+
const result = await gen.forward(llm, { input: 'test' });
|
|
799
|
+
console.log(result.output);
|
|
800
|
+
} catch (error) {
|
|
801
|
+
if (error instanceof AxGenerateError) {
|
|
802
|
+
console.error('Generation failed:', error.message);
|
|
803
|
+
console.error('Details:', error.details);
|
|
804
|
+
} else if (error instanceof AxAIServiceError) {
|
|
805
|
+
console.error('AI service error:', error.message);
|
|
806
|
+
} else {
|
|
807
|
+
throw error;
|
|
808
|
+
}
|
|
809
|
+
}
|
|
810
|
+
```
|
|
811
|
+
|
|
812
|
+
### Examples and Few-Shot Learning
|
|
813
|
+
|
|
814
|
+
```typescript
|
|
815
|
+
import { ai, ax } from '@ax-llm/ax';
|
|
816
|
+
|
|
817
|
+
const classifier = ax('text:string -> sentiment:class "positive, negative, neutral"');
|
|
818
|
+
|
|
819
|
+
// Set examples for few-shot learning
|
|
820
|
+
classifier.setExamples([
|
|
821
|
+
{ text: 'I love this product!', sentiment: 'positive' },
|
|
822
|
+
{ text: 'This is terrible.', sentiment: 'negative' },
|
|
823
|
+
{ text: 'It works as expected.', sentiment: 'neutral' }
|
|
824
|
+
]);
|
|
825
|
+
|
|
826
|
+
const llm = ai({ name: 'openai', apiKey: process.env.OPENAI_API_KEY! });
|
|
827
|
+
|
|
828
|
+
const result = await classifier.forward(llm, {
|
|
829
|
+
text: 'The quality exceeded my expectations!'
|
|
830
|
+
});
|
|
831
|
+
```
|
|
832
|
+
|
|
833
|
+
### Assertions and Validation
|
|
834
|
+
|
|
835
|
+
```typescript
|
|
836
|
+
import { ai, ax } from '@ax-llm/ax';
|
|
837
|
+
|
|
838
|
+
const gen = ax('number:number -> doubled:number');
|
|
839
|
+
|
|
840
|
+
// Add assertion
|
|
841
|
+
gen.addAssert(
|
|
842
|
+
(output) => output.doubled === output.number * 2,
|
|
843
|
+
'Output must be double the input'
|
|
844
|
+
);
|
|
845
|
+
|
|
846
|
+
const llm = ai({ name: 'openai', apiKey: process.env.OPENAI_API_KEY! });
|
|
847
|
+
|
|
848
|
+
// This will retry if assertion fails
|
|
849
|
+
const result = await gen.forward(llm, { number: 5 }, { maxRetries: 3 });
|
|
850
|
+
```
|
|
851
|
+
|
|
852
|
+
### Memory and Context
|
|
853
|
+
|
|
854
|
+
```typescript
|
|
855
|
+
import { ai, ax, AxMemory } from '@ax-llm/ax';
|
|
856
|
+
|
|
857
|
+
const chatbot = ax('userMessage:string -> response:string');
|
|
858
|
+
const llm = ai({ name: 'openai', apiKey: process.env.OPENAI_API_KEY! });
|
|
859
|
+
|
|
860
|
+
// Create shared memory
|
|
861
|
+
const memory = new AxMemory();
|
|
862
|
+
|
|
863
|
+
// Conversation with memory
|
|
864
|
+
await chatbot.forward(llm, { userMessage: 'My name is Alice' }, { mem: memory });
|
|
865
|
+
const response = await chatbot.forward(llm, { userMessage: 'What is my name?' }, { mem: memory });
|
|
866
|
+
// response.response will reference "Alice"
|
|
867
|
+
```
|
|
868
|
+
|
|
869
|
+
## 8. Advanced Configuration
|
|
870
|
+
|
|
871
|
+
### Model Configuration
|
|
872
|
+
|
|
873
|
+
```typescript
|
|
874
|
+
import { ai, ax } from '@ax-llm/ax';
|
|
875
|
+
|
|
876
|
+
const llm = ai({ name: 'openai', apiKey: process.env.OPENAI_API_KEY! });
|
|
877
|
+
const gen = ax('input:string -> output:string');
|
|
878
|
+
|
|
879
|
+
const result = await gen.forward(llm, { input: 'test' }, {
|
|
880
|
+
model: 'gpt-4o',
|
|
881
|
+
modelConfig: {
|
|
882
|
+
temperature: 0.7,
|
|
883
|
+
maxTokens: 2000,
|
|
884
|
+
topP: 0.9
|
|
885
|
+
}
|
|
886
|
+
});
|
|
887
|
+
```
|
|
888
|
+
|
|
889
|
+
### Debugging
|
|
890
|
+
|
|
891
|
+
```typescript
|
|
892
|
+
import { ai, ax, axCreateDefaultColorLogger } from '@ax-llm/ax';
|
|
893
|
+
|
|
894
|
+
const llm = ai({ name: 'openai', apiKey: process.env.OPENAI_API_KEY! });
|
|
895
|
+
const gen = ax('input:string -> output:string');
|
|
896
|
+
|
|
897
|
+
// Enable debug logging
|
|
898
|
+
const result = await gen.forward(llm, { input: 'test' }, {
|
|
899
|
+
debug: true,
|
|
900
|
+
logger: axCreateDefaultColorLogger()
|
|
901
|
+
});
|
|
902
|
+
```
|
|
903
|
+
|
|
904
|
+
### Context Caching
|
|
905
|
+
|
|
906
|
+
```typescript
|
|
907
|
+
import { ai, ax } from '@ax-llm/ax';
|
|
908
|
+
|
|
909
|
+
const gen = ax('document:string, question:string -> answer:string');
|
|
910
|
+
const llm = ai({ name: 'anthropic', apiKey: process.env.ANTHROPIC_API_KEY! });
|
|
911
|
+
|
|
912
|
+
// Enable context caching for long documents
|
|
913
|
+
const result = await gen.forward(llm, {
|
|
914
|
+
document: longDocument,
|
|
915
|
+
question: 'What is the main topic?'
|
|
916
|
+
}, {
|
|
917
|
+
contextCache: {
|
|
918
|
+
cacheBreakpoint: 'after-examples'
|
|
919
|
+
}
|
|
920
|
+
});
|
|
921
|
+
```
|
|
922
|
+
|
|
923
|
+
## 9. Forward & AI Options
|
|
924
|
+
|
|
925
|
+
### Quick Reference Table
|
|
926
|
+
|
|
927
|
+
| Goal | Option | Example |
|
|
928
|
+
|------|--------|---------|
|
|
929
|
+
| Adjust creativity | `modelConfig.temperature` | `{ modelConfig: { temperature: 0.8 } }` |
|
|
930
|
+
| Limit response length | `modelConfig.maxTokens` | `{ modelConfig: { maxTokens: 500 } }` |
|
|
931
|
+
| Use different model | `model` | `{ model: 'gpt-4o-mini' }` |
|
|
932
|
+
| Enable caching | `contextCache` | `{ contextCache: { cacheBreakpoint: 'after-examples' } }` |
|
|
933
|
+
| Debug output | `debug` | `{ debug: true }` |
|
|
934
|
+
| Retry on failure | `maxRetries` | `{ maxRetries: 3 }` |
|
|
935
|
+
| Multi-sampling | `sampleCount` | `{ sampleCount: 5, resultPicker: bestResultPicker }` |
|
|
936
|
+
| Thinking models | `thinkingTokenBudget` | `{ thinkingTokenBudget: 10000 }` |
|
|
937
|
+
| Abort request | `abortSignal` | `{ abortSignal: controller.signal }` |
|
|
938
|
+
| Custom timeout | `timeout` | `{ timeout: 60000 }` |
|
|
939
|
+
|
|
940
|
+
### Execution Control Options
|
|
941
|
+
|
|
942
|
+
```typescript
|
|
943
|
+
import { ai, ax } from '@ax-llm/ax';
|
|
944
|
+
|
|
945
|
+
const llm = ai({ name: 'openai', apiKey: process.env.OPENAI_API_KEY! });
|
|
946
|
+
const gen = ax('input:string -> output:string');
|
|
947
|
+
|
|
948
|
+
const result = await gen.forward(llm, { input: 'test' }, {
|
|
949
|
+
// Retry failed generations (validation failures, API errors)
|
|
950
|
+
maxRetries: 3,
|
|
951
|
+
|
|
952
|
+
// Maximum agentic steps (for agents with tools)
|
|
953
|
+
maxSteps: 10,
|
|
954
|
+
|
|
955
|
+
// Fail immediately on first error (don't retry)
|
|
956
|
+
fastFail: true
|
|
957
|
+
});
|
|
958
|
+
```
|
|
959
|
+
|
|
960
|
+
### Model Configuration
|
|
961
|
+
|
|
962
|
+
```typescript
|
|
963
|
+
import { ai, ax } from '@ax-llm/ax';
|
|
964
|
+
|
|
965
|
+
const llm = ai({ name: 'openai', apiKey: process.env.OPENAI_API_KEY! });
|
|
966
|
+
const gen = ax('input:string -> output:string');
|
|
967
|
+
|
|
968
|
+
const result = await gen.forward(llm, { input: 'test' }, {
|
|
969
|
+
// Override the model for this request
|
|
970
|
+
model: 'gpt-4o-mini',
|
|
971
|
+
|
|
972
|
+
// Model-specific configuration
|
|
973
|
+
modelConfig: {
|
|
974
|
+
// Sampling temperature (0.0 = deterministic, 2.0 = creative)
|
|
975
|
+
temperature: 0.7,
|
|
976
|
+
|
|
977
|
+
// Maximum tokens in response
|
|
978
|
+
maxTokens: 2000,
|
|
979
|
+
|
|
980
|
+
// Nucleus sampling threshold
|
|
981
|
+
topP: 0.9,
|
|
982
|
+
|
|
983
|
+
// Top-K sampling (not all providers support this)
|
|
984
|
+
topK: 40,
|
|
985
|
+
|
|
986
|
+
// Frequency penalty (-2.0 to 2.0)
|
|
987
|
+
frequencyPenalty: 0.5,
|
|
988
|
+
|
|
989
|
+
// Presence penalty (-2.0 to 2.0)
|
|
990
|
+
presencePenalty: 0.5,
|
|
991
|
+
|
|
992
|
+
// Stop sequences
|
|
993
|
+
stopSequences: ['\n\n', 'END'],
|
|
994
|
+
|
|
995
|
+
// Seed for reproducible outputs (when supported)
|
|
996
|
+
seed: 12345,
|
|
997
|
+
|
|
998
|
+
// Response format
|
|
999
|
+
responseFormat: 'json_object'
|
|
1000
|
+
}
|
|
1001
|
+
});
|
|
1002
|
+
```
|
|
1003
|
+
|
|
1004
|
+
### Context Caching (Gemini/Anthropic)
|
|
1005
|
+
|
|
1006
|
+
Context caching saves costs when repeatedly querying with the same large context (documents, system prompts, examples).
|
|
1007
|
+
|
|
1008
|
+
```typescript
|
|
1009
|
+
import { ai, ax } from '@ax-llm/ax';
|
|
1010
|
+
|
|
1011
|
+
const llm = ai({ name: 'anthropic', apiKey: process.env.ANTHROPIC_API_KEY! });
|
|
1012
|
+
const gen = ax('document:string, question:string -> answer:string');
|
|
1013
|
+
|
|
1014
|
+
const longDocument = '... very long document ...';
|
|
1015
|
+
|
|
1016
|
+
// Multiple questions about the same document - caching saves cost
|
|
1017
|
+
for (const question of questions) {
|
|
1018
|
+
const result = await gen.forward(llm, { document: longDocument, question }, {
|
|
1019
|
+
contextCache: {
|
|
1020
|
+
// Cache name (required for identifying the cache)
|
|
1021
|
+
name: 'doc-analysis-cache',
|
|
1022
|
+
|
|
1023
|
+
// Where to split the prompt for caching:
|
|
1024
|
+
// - 'system': Cache system prompt only
|
|
1025
|
+
// - 'after-functions': Cache system + function definitions
|
|
1026
|
+
// - 'after-examples': Cache system + functions + examples
|
|
1027
|
+
cacheBreakpoint: 'after-examples',
|
|
1028
|
+
|
|
1029
|
+
// Cache time-to-live in seconds (default: provider-specific)
|
|
1030
|
+
ttlSeconds: 3600,
|
|
1031
|
+
|
|
1032
|
+
// Minimum tokens to trigger caching (avoid caching small prompts)
|
|
1033
|
+
minTokens: 1000,
|
|
1034
|
+
|
|
1035
|
+
// Refresh cache when within this window of expiry
|
|
1036
|
+
refreshWindowSeconds: 300,
|
|
1037
|
+
|
|
1038
|
+
// Custom cache registry for external storage (Redis, etc.)
|
|
1039
|
+
registry: customCacheRegistry
|
|
1040
|
+
}
|
|
1041
|
+
});
|
|
1042
|
+
}
|
|
1043
|
+
```
|
|
1044
|
+
|
|
1045
|
+
### Thinking Models (o1, o3, Gemini 2.0 Flash Thinking)
|
|
1046
|
+
|
|
1047
|
+
For reasoning models that support extended thinking:
|
|
1048
|
+
|
|
1049
|
+
```typescript
|
|
1050
|
+
import { ai, ax } from '@ax-llm/ax';
|
|
1051
|
+
|
|
1052
|
+
const llm = ai({ name: 'openai', apiKey: process.env.OPENAI_API_KEY! });
|
|
1053
|
+
const gen = ax('problem:string -> solution:string');
|
|
1054
|
+
|
|
1055
|
+
const result = await gen.forward(llm, { problem: 'Complex math problem' }, {
|
|
1056
|
+
model: 'o1',
|
|
1057
|
+
|
|
1058
|
+
// Token budget for thinking/reasoning (model-specific)
|
|
1059
|
+
thinkingTokenBudget: 10000,
|
|
1060
|
+
|
|
1061
|
+
// Include thinking in output (when supported)
|
|
1062
|
+
showThoughts: true,
|
|
1063
|
+
|
|
1064
|
+
// Custom field name for thoughts in output
|
|
1065
|
+
thoughtFieldName: 'reasoning'
|
|
1066
|
+
});
|
|
1067
|
+
```
|
|
1068
|
+
|
|
1069
|
+
### Multi-Sampling for Quality
|
|
1070
|
+
|
|
1071
|
+
Generate multiple samples and pick the best one:
|
|
1072
|
+
|
|
1073
|
+
```typescript
|
|
1074
|
+
import { ai, ax } from '@ax-llm/ax';
|
|
1075
|
+
|
|
1076
|
+
const llm = ai({ name: 'openai', apiKey: process.env.OPENAI_API_KEY! });
|
|
1077
|
+
const gen = ax('input:string -> output:string, confidence:number');
|
|
1078
|
+
|
|
1079
|
+
const result = await gen.forward(llm, { input: 'test' }, {
|
|
1080
|
+
// Generate multiple samples
|
|
1081
|
+
sampleCount: 5,
|
|
1082
|
+
|
|
1083
|
+
// Pick the best result (custom function)
|
|
1084
|
+
resultPicker: (results) => {
|
|
1085
|
+
// Return the result with highest confidence
|
|
1086
|
+
return results.reduce((best, current) =>
|
|
1087
|
+
current.confidence > best.confidence ? current : best
|
|
1088
|
+
);
|
|
1089
|
+
}
|
|
1090
|
+
});
|
|
1091
|
+
```
|
|
1092
|
+
|
|
1093
|
+
### Function Calling Configuration
|
|
1094
|
+
|
|
1095
|
+
Control how agents use tools/functions:
|
|
1096
|
+
|
|
1097
|
+
```typescript
|
|
1098
|
+
import { ai, agent } from '@ax-llm/ax';
|
|
1099
|
+
|
|
1100
|
+
const llm = ai({ name: 'openai', apiKey: process.env.OPENAI_API_KEY! });
|
|
1101
|
+
|
|
1102
|
+
const myAgent = agent('query:string -> answer:string', {
|
|
1103
|
+
functions: [searchTool, calculatorTool]
|
|
1104
|
+
});
|
|
1105
|
+
|
|
1106
|
+
const result = await myAgent.forward(llm, { query: 'test' }, {
|
|
1107
|
+
// Function calling mode:
|
|
1108
|
+
// - 'auto': Model decides when to call functions
|
|
1109
|
+
// - 'none': Disable function calling
|
|
1110
|
+
// - 'required': Force at least one function call
|
|
1111
|
+
functionCallMode: 'auto',
|
|
1112
|
+
|
|
1113
|
+
// Force a specific function to be called
|
|
1114
|
+
functionCall: 'searchTool',
|
|
1115
|
+
|
|
1116
|
+
// Stop after this function is called
|
|
1117
|
+
stopFunction: 'finalAnswer',
|
|
1118
|
+
|
|
1119
|
+
// Override available functions for this request
|
|
1120
|
+
functions: [searchTool],
|
|
1121
|
+
|
|
1122
|
+
// Custom caching for function results
|
|
1123
|
+
cachingFunction: async (funcName, args) => {
|
|
1124
|
+
const cacheKey = `${funcName}:${JSON.stringify(args)}`;
|
|
1125
|
+
return await cache.get(cacheKey);
|
|
1126
|
+
}
|
|
1127
|
+
});
|
|
1128
|
+
```
|
|
1129
|
+
|
|
1130
|
+
### Debugging & Observability
|
|
1131
|
+
|
|
1132
|
+
```typescript
|
|
1133
|
+
import { ai, ax, axCreateDefaultColorLogger } from '@ax-llm/ax';
|
|
1134
|
+
|
|
1135
|
+
const llm = ai({ name: 'openai', apiKey: process.env.OPENAI_API_KEY! });
|
|
1136
|
+
const gen = ax('input:string -> output:string');
|
|
1137
|
+
|
|
1138
|
+
const result = await gen.forward(llm, { input: 'test' }, {
|
|
1139
|
+
// Enable debug logging
|
|
1140
|
+
debug: true,
|
|
1141
|
+
|
|
1142
|
+
// Show verbose output (more details)
|
|
1143
|
+
verbose: true,
|
|
1144
|
+
|
|
1145
|
+
// Hide system prompt in debug output (security)
|
|
1146
|
+
debugHideSystemPrompt: true,
|
|
1147
|
+
|
|
1148
|
+
// Custom logger
|
|
1149
|
+
logger: axCreateDefaultColorLogger(),
|
|
1150
|
+
|
|
1151
|
+
// OpenTelemetry tracer for distributed tracing
|
|
1152
|
+
tracer: openTelemetryTracer,
|
|
1153
|
+
|
|
1154
|
+
// OpenTelemetry meter for metrics
|
|
1155
|
+
meter: openTelemetryMeter,
|
|
1156
|
+
|
|
1157
|
+
// Parent trace context
|
|
1158
|
+
traceContext: parentSpan,
|
|
1159
|
+
|
|
1160
|
+
// Custom labels for traces/metrics
|
|
1161
|
+
customLabels: { environment: 'production', version: '1.0' },
|
|
1162
|
+
|
|
1163
|
+
// Exclude content from traces (privacy)
|
|
1164
|
+
excludeContentFromTrace: true
|
|
1165
|
+
});
|
|
1166
|
+
```
|
|
1167
|
+
|
|
1168
|
+
### Retry & Error Handling
|
|
1169
|
+
|
|
1170
|
+
```typescript
|
|
1171
|
+
import { ai, ax } from '@ax-llm/ax';
|
|
1172
|
+
|
|
1173
|
+
const llm = ai({
|
|
1174
|
+
name: 'openai',
|
|
1175
|
+
apiKey: process.env.OPENAI_API_KEY!,
|
|
1176
|
+
options: {
|
|
1177
|
+
// Retry configuration for API calls
|
|
1178
|
+
retry: {
|
|
1179
|
+
// Maximum retry attempts
|
|
1180
|
+
maxRetries: 3,
|
|
1181
|
+
|
|
1182
|
+
// Initial delay between retries (ms)
|
|
1183
|
+
initialDelayMs: 1000,
|
|
1184
|
+
|
|
1185
|
+
// Maximum delay between retries (ms)
|
|
1186
|
+
maxDelayMs: 30000,
|
|
1187
|
+
|
|
1188
|
+
// Backoff multiplier
|
|
1189
|
+
backoffMultiplier: 2,
|
|
1190
|
+
|
|
1191
|
+
// Jitter factor (0-1) to randomize delays
|
|
1192
|
+
jitterFactor: 0.1,
|
|
1193
|
+
|
|
1194
|
+
// HTTP status codes to retry on
|
|
1195
|
+
retryOnStatusCodes: [429, 500, 502, 503, 504]
|
|
1196
|
+
},
|
|
1197
|
+
|
|
1198
|
+
// Rate limiter for API calls
|
|
1199
|
+
rateLimiter: customRateLimiter,
|
|
1200
|
+
|
|
1201
|
+
// Request timeout (ms)
|
|
1202
|
+
timeout: 60000
|
|
1203
|
+
}
|
|
1204
|
+
});
|
|
1205
|
+
```
|
|
1206
|
+
|
|
1207
|
+
### Request Control
|
|
1208
|
+
|
|
1209
|
+
```typescript
|
|
1210
|
+
import { ai, ax } from '@ax-llm/ax';
|
|
1211
|
+
|
|
1212
|
+
const llm = ai({ name: 'openai', apiKey: process.env.OPENAI_API_KEY! });
|
|
1213
|
+
const gen = ax('input:string -> output:string');
|
|
1214
|
+
|
|
1215
|
+
// Create abort controller
|
|
1216
|
+
const controller = new AbortController();
|
|
1217
|
+
|
|
1218
|
+
// Cancel after 5 seconds
|
|
1219
|
+
setTimeout(() => controller.abort(), 5000);
|
|
1220
|
+
|
|
1221
|
+
try {
|
|
1222
|
+
const result = await gen.forward(llm, { input: 'test' }, {
|
|
1223
|
+
// Abort signal for cancellation
|
|
1224
|
+
abortSignal: controller.signal,
|
|
1225
|
+
|
|
1226
|
+
// Request timeout (ms)
|
|
1227
|
+
timeout: 30000,
|
|
1228
|
+
|
|
1229
|
+
// Custom fetch function (for proxies, etc.)
|
|
1230
|
+
fetch: customFetch,
|
|
1231
|
+
|
|
1232
|
+
// CORS proxy URL (for browser environments)
|
|
1233
|
+
corsProxy: 'https://cors-proxy.example.com'
|
|
1234
|
+
});
|
|
1235
|
+
} catch (error) {
|
|
1236
|
+
if (error.name === 'AbortError') {
|
|
1237
|
+
console.log('Request was cancelled');
|
|
1238
|
+
}
|
|
1239
|
+
}
|
|
1240
|
+
```
|
|
1241
|
+
|
|
1242
|
+
### Memory Configuration
|
|
1243
|
+
|
|
1244
|
+
```typescript
|
|
1245
|
+
import { ai, ax, AxMemory } from '@ax-llm/ax';
|
|
1246
|
+
|
|
1247
|
+
const llm = ai({ name: 'openai', apiKey: process.env.OPENAI_API_KEY! });
|
|
1248
|
+
const gen = ax('message:string -> response:string');
|
|
1249
|
+
|
|
1250
|
+
const memory = new AxMemory();
|
|
1251
|
+
|
|
1252
|
+
const result = await gen.forward(llm, { message: 'Hello' }, {
|
|
1253
|
+
// Use shared memory for conversation context
|
|
1254
|
+
mem: memory,
|
|
1255
|
+
|
|
1256
|
+
// Disable automatic memory cleanup (keep all messages)
|
|
1257
|
+
disableMemoryCleanup: true
|
|
1258
|
+
});
|
|
1259
|
+
```
|
|
1260
|
+
|
|
1261
|
+
### Validation Options
|
|
1262
|
+
|
|
1263
|
+
```typescript
|
|
1264
|
+
import { ai, ax } from '@ax-llm/ax';
|
|
1265
|
+
|
|
1266
|
+
const llm = ai({ name: 'openai', apiKey: process.env.OPENAI_API_KEY! });
|
|
1267
|
+
const gen = ax('input:string -> output:string');
|
|
1268
|
+
|
|
1269
|
+
const result = await gen.forward(llm, { input: 'test' }, {
|
|
1270
|
+
// Strict mode: fail on any validation error
|
|
1271
|
+
strictMode: true,
|
|
1272
|
+
|
|
1273
|
+
// Custom assertions (run after generation)
|
|
1274
|
+
asserts: [
|
|
1275
|
+
{
|
|
1276
|
+
fn: (output) => output.output.length > 10,
|
|
1277
|
+
message: 'Output must be longer than 10 characters'
|
|
1278
|
+
}
|
|
1279
|
+
],
|
|
1280
|
+
|
|
1281
|
+
// Streaming assertions (run during streaming)
|
|
1282
|
+
streamingAsserts: [
|
|
1283
|
+
{
|
|
1284
|
+
fn: (partial) => !partial.output?.includes('forbidden'),
|
|
1285
|
+
message: 'Output must not contain forbidden content'
|
|
1286
|
+
}
|
|
1287
|
+
]
|
|
1288
|
+
});
|
|
1289
|
+
```
|
|
1290
|
+
|
|
1291
|
+
## 10. MCP Integration
|
|
1292
|
+
|
|
1293
|
+
MCP (Model Context Protocol) enables AxAgent to use external tools from MCP-compliant servers. This allows your agents to interact with databases, file systems, APIs, and other services through a standardized protocol.
|
|
1294
|
+
|
|
1295
|
+
### Quick Start
|
|
1296
|
+
|
|
1297
|
+
```typescript
|
|
1298
|
+
import { AxAgent, AxAI, AxMCPClient } from '@ax-llm/ax';
|
|
1299
|
+
import { AxMCPStdioTransport } from '@ax-llm/ax-tools';
|
|
1300
|
+
|
|
1301
|
+
// Create transport for local MCP server
|
|
1302
|
+
const transport = new AxMCPStdioTransport({
|
|
1303
|
+
command: 'npx',
|
|
1304
|
+
args: ['-y', '@modelcontextprotocol/server-memory'],
|
|
1305
|
+
});
|
|
1306
|
+
|
|
1307
|
+
// Initialize MCP client
|
|
1308
|
+
const mcpClient = new AxMCPClient(transport, { debug: false });
|
|
1309
|
+
await mcpClient.init();
|
|
1310
|
+
|
|
1311
|
+
// Create agent with MCP functions
|
|
1312
|
+
const agent = new AxAgent({
|
|
1313
|
+
name: 'MyAssistant',
|
|
1314
|
+
description: 'An assistant with MCP capabilities',
|
|
1315
|
+
signature: 'userMessage -> response',
|
|
1316
|
+
functions: [mcpClient], // Pass client directly
|
|
1317
|
+
});
|
|
1318
|
+
|
|
1319
|
+
const ai = new AxAI({ name: 'openai', apiKey: process.env.OPENAI_API_KEY! });
|
|
1320
|
+
const result = await agent.forward(ai, { userMessage: 'Hello' });
|
|
1321
|
+
```
|
|
1322
|
+
|
|
1323
|
+
### MCP Transports
|
|
1324
|
+
|
|
1325
|
+
#### AxMCPStdioTransport (Local Servers)
|
|
1326
|
+
|
|
1327
|
+
For MCP servers that run as local processes via stdin/stdout:
|
|
1328
|
+
|
|
1329
|
+
```typescript
|
|
1330
|
+
import { AxMCPStdioTransport } from '@ax-llm/ax-tools';
|
|
1331
|
+
|
|
1332
|
+
const transport = new AxMCPStdioTransport({
|
|
1333
|
+
command: 'npx',
|
|
1334
|
+
args: ['-y', '@modelcontextprotocol/server-memory'],
|
|
1335
|
+
});
|
|
1336
|
+
|
|
1337
|
+
// Clean up when done
|
|
1338
|
+
await transport.terminate();
|
|
1339
|
+
```
|
|
1340
|
+
|
|
1341
|
+
#### AxMCPStreambleHTTPTransport (Remote Servers)
|
|
1342
|
+
|
|
1343
|
+
For MCP servers accessible via HTTP (e.g., Pipedream, hosted services):
|
|
1344
|
+
|
|
1345
|
+
```typescript
|
|
1346
|
+
import { AxMCPStreambleHTTPTransport } from '@ax-llm/ax/mcp/transports/httpStreamTransport.js';
|
|
1347
|
+
|
|
1348
|
+
const transport = new AxMCPStreambleHTTPTransport(
|
|
1349
|
+
'https://remote.mcp.pipedream.net',
|
|
1350
|
+
{
|
|
1351
|
+
headers: {
|
|
1352
|
+
'x-pd-project-id': projectId,
|
|
1353
|
+
'x-pd-environment': 'development',
|
|
1354
|
+
'x-pd-external-user-id': 'user123',
|
|
1355
|
+
'x-pd-app-slug': 'notion',
|
|
1356
|
+
},
|
|
1357
|
+
authorization: `Bearer ${accessToken}`,
|
|
1358
|
+
}
|
|
1359
|
+
);
|
|
1360
|
+
```
|
|
1361
|
+
|
|
1362
|
+
### Using MCP with Agents
|
|
1363
|
+
|
|
1364
|
+
Pass the MCP client directly to the agent's `functions` array:
|
|
1365
|
+
|
|
1366
|
+
```typescript
|
|
1367
|
+
import { AxAgent, AxAI, AxMCPClient } from '@ax-llm/ax';
|
|
1368
|
+
import { AxMCPStdioTransport } from '@ax-llm/ax-tools';
|
|
1369
|
+
|
|
1370
|
+
const transport = new AxMCPStdioTransport({
|
|
1371
|
+
command: 'npx',
|
|
1372
|
+
args: ['-y', '@modelcontextprotocol/server-memory'],
|
|
1373
|
+
});
|
|
1374
|
+
|
|
1375
|
+
const mcpClient = new AxMCPClient(transport);
|
|
1376
|
+
await mcpClient.init();
|
|
1377
|
+
|
|
1378
|
+
const memoryAgent = new AxAgent<
|
|
1379
|
+
{ userMessage: string; userId: string },
|
|
1380
|
+
{ assistantResponse: string }
|
|
1381
|
+
>({
|
|
1382
|
+
name: 'MemoryAssistant',
|
|
1383
|
+
description: 'An assistant that remembers past conversations. Use the database functions to manage, search, and add memories.',
|
|
1384
|
+
signature: 'userMessage, userId -> assistantResponse',
|
|
1385
|
+
functions: [mcpClient],
|
|
1386
|
+
});
|
|
1387
|
+
|
|
1388
|
+
const ai = new AxAI({
|
|
1389
|
+
name: 'openai',
|
|
1390
|
+
apiKey: process.env.OPENAI_API_KEY!,
|
|
1391
|
+
config: { model: 'gpt-4o-mini' }
|
|
1392
|
+
});
|
|
1393
|
+
|
|
1394
|
+
// First interaction - stores memory
|
|
1395
|
+
const first = await memoryAgent.forward(ai, {
|
|
1396
|
+
userMessage: 'My name is Alice and my favorite color is blue.',
|
|
1397
|
+
userId: 'user123',
|
|
1398
|
+
});
|
|
1399
|
+
|
|
1400
|
+
// Later interaction - retrieves memory
|
|
1401
|
+
const second = await memoryAgent.forward(ai, {
|
|
1402
|
+
userMessage: "What's my favorite color?",
|
|
1403
|
+
userId: 'user123',
|
|
1404
|
+
});
|
|
1405
|
+
```
|
|
1406
|
+
|
|
1407
|
+
### MCP Capabilities
|
|
1408
|
+
|
|
1409
|
+
MCP servers can provide three types of capabilities:
|
|
1410
|
+
|
|
1411
|
+
| Capability | Function Prefix | Description |
|
|
1412
|
+
|------------|-----------------|-------------|
|
|
1413
|
+
| **Tools** | *(none)* | Traditional function calls (e.g., `search`, `create`) |
|
|
1414
|
+
| **Prompts** | `prompt_` | Prompt templates (e.g., `prompt_summarize`) |
|
|
1415
|
+
| **Resources** | `resource_` | File/data access (e.g., `resource_config_json`) |
|
|
1416
|
+
|
|
1417
|
+
Check available capabilities:
|
|
1418
|
+
|
|
1419
|
+
```typescript
|
|
1420
|
+
const mcpClient = new AxMCPClient(transport);
|
|
1421
|
+
await mcpClient.init();
|
|
1422
|
+
|
|
1423
|
+
const caps = mcpClient.getCapabilities();
|
|
1424
|
+
console.log('Tools:', caps.tools); // true/false
|
|
1425
|
+
console.log('Prompts:', caps.prompts); // true/false
|
|
1426
|
+
console.log('Resources:', caps.resources); // true/false
|
|
1427
|
+
|
|
1428
|
+
// Or check individually
|
|
1429
|
+
if (mcpClient.hasToolsCapability()) {
|
|
1430
|
+
console.log('Server supports tools');
|
|
1431
|
+
}
|
|
1432
|
+
```
|
|
1433
|
+
|
|
1434
|
+
### Function Overrides
|
|
1435
|
+
|
|
1436
|
+
Customize function names and descriptions while preserving functionality:
|
|
1437
|
+
|
|
1438
|
+
```typescript
|
|
1439
|
+
const mcpClient = new AxMCPClient(transport, {
|
|
1440
|
+
functionOverrides: [
|
|
1441
|
+
{
|
|
1442
|
+
name: 'search_documents',
|
|
1443
|
+
updates: {
|
|
1444
|
+
name: 'findDocs',
|
|
1445
|
+
description: 'Search through all available documents'
|
|
1446
|
+
}
|
|
1447
|
+
},
|
|
1448
|
+
{
|
|
1449
|
+
name: 'prompt_summarize',
|
|
1450
|
+
updates: {
|
|
1451
|
+
name: 'getSummaryPrompt',
|
|
1452
|
+
description: 'Get a prompt template for summarization'
|
|
1453
|
+
}
|
|
1454
|
+
}
|
|
1455
|
+
]
|
|
1456
|
+
});
|
|
1457
|
+
```
|
|
1458
|
+
|
|
1459
|
+
### Getting Functions Directly
|
|
1460
|
+
|
|
1461
|
+
If you need the function array instead of passing the client:
|
|
1462
|
+
|
|
1463
|
+
```typescript
|
|
1464
|
+
const mcpClient = new AxMCPClient(transport);
|
|
1465
|
+
await mcpClient.init();
|
|
1466
|
+
|
|
1467
|
+
// Get all functions (tools + prompts + resources)
|
|
1468
|
+
const functions = mcpClient.toFunction();
|
|
1469
|
+
|
|
1470
|
+
// Use with agent
|
|
1471
|
+
const agent = new AxAgent({
|
|
1472
|
+
name: 'MyAgent',
|
|
1473
|
+
signature: 'query -> answer',
|
|
1474
|
+
functions: functions, // Or spread: [...functions, otherFunction]
|
|
1475
|
+
});
|
|
1476
|
+
```
|
|
1477
|
+
|
|
1478
|
+
### Complete Example: Remote HTTP MCP Server
|
|
1479
|
+
|
|
1480
|
+
```typescript
|
|
1481
|
+
import { AxAgent, AxAI, AxMCPClient } from '@ax-llm/ax';
|
|
1482
|
+
import { AxMCPStreambleHTTPTransport } from '@ax-llm/ax/mcp/transports/httpStreamTransport.js';
|
|
1483
|
+
import { createBackendClient } from '@pipedream/sdk/server';
|
|
1484
|
+
|
|
1485
|
+
// Initialize Pipedream SDK
|
|
1486
|
+
const pd = createBackendClient({
|
|
1487
|
+
environment: 'development',
|
|
1488
|
+
credentials: {
|
|
1489
|
+
clientId: process.env.PIPEDREAM_CLIENT_ID!,
|
|
1490
|
+
clientSecret: process.env.PIPEDREAM_CLIENT_SECRET!,
|
|
1491
|
+
},
|
|
1492
|
+
projectId: process.env.PIPEDREAM_PROJECT_ID!,
|
|
1493
|
+
});
|
|
1494
|
+
|
|
1495
|
+
// Get access token and app info
|
|
1496
|
+
const accessToken = await pd.rawAccessToken();
|
|
1497
|
+
const apps = await pd.getApps({ q: 'notion' });
|
|
1498
|
+
const appSlug = apps.data[0]?.name_slug;
|
|
1499
|
+
|
|
1500
|
+
// Create HTTP transport for Pipedream MCP
|
|
1501
|
+
const httpTransport = new AxMCPStreambleHTTPTransport(
|
|
1502
|
+
'https://remote.mcp.pipedream.net',
|
|
1503
|
+
{
|
|
1504
|
+
headers: {
|
|
1505
|
+
'x-pd-project-id': process.env.PIPEDREAM_PROJECT_ID!,
|
|
1506
|
+
'x-pd-environment': 'development',
|
|
1507
|
+
'x-pd-external-user-id': 'user123',
|
|
1508
|
+
'x-pd-app-slug': appSlug!,
|
|
1509
|
+
},
|
|
1510
|
+
authorization: `Bearer ${accessToken}`,
|
|
1511
|
+
}
|
|
1512
|
+
);
|
|
1513
|
+
|
|
1514
|
+
// Initialize MCP client
|
|
1515
|
+
const mcpClient = new AxMCPClient(httpTransport, { debug: false });
|
|
1516
|
+
await mcpClient.init();
|
|
1517
|
+
|
|
1518
|
+
// Create Notion agent
|
|
1519
|
+
const notionAgent = new AxAgent<
|
|
1520
|
+
{ userRequest: string },
|
|
1521
|
+
{ assistantResponse: string }
|
|
1522
|
+
>({
|
|
1523
|
+
name: 'NotionAssistant',
|
|
1524
|
+
description: 'An assistant that can interact with Notion documents. Use the provided functions to read, search, and analyze Notion content.',
|
|
1525
|
+
signature: 'userRequest -> assistantResponse',
|
|
1526
|
+
functions: [mcpClient],
|
|
1527
|
+
});
|
|
1528
|
+
|
|
1529
|
+
const ai = new AxAI({
|
|
1530
|
+
name: 'openai',
|
|
1531
|
+
apiKey: process.env.OPENAI_API_KEY!,
|
|
1532
|
+
config: { model: 'gpt-4o-mini' }
|
|
1533
|
+
});
|
|
1534
|
+
|
|
1535
|
+
// Use the agent
|
|
1536
|
+
const response = await notionAgent.forward(ai, {
|
|
1537
|
+
userRequest: 'Summarize my most recently created Notion doc'
|
|
1538
|
+
});
|
|
1539
|
+
console.log(response.assistantResponse);
|
|
1540
|
+
```
|
|
1541
|
+
|
|
1542
|
+
### Example Files
|
|
1543
|
+
|
|
1544
|
+
Full working examples on GitHub:
|
|
1545
|
+
|
|
1546
|
+
- [Local Memory Server (stdio)](https://raw.githubusercontent.com/ax-llm/ax/refs/heads/main/src/examples/mcp-client-memory.ts) - Memory-augmented agent using local MCP server
|
|
1547
|
+
- [Remote HTTP Server (Pipedream/Notion)](https://raw.githubusercontent.com/ax-llm/ax/refs/heads/main/src/examples/mcp-client-pipedream.ts) - Notion integration via Pipedream MCP
|
|
1548
|
+
|
|
1549
|
+
## Type Reference
|
|
1550
|
+
|
|
1551
|
+
```typescript
|
|
1552
|
+
// Core types
|
|
1553
|
+
type AxGenIn = Record<string, any>;
|
|
1554
|
+
type AxGenOut = Record<string, any>;
|
|
1555
|
+
|
|
1556
|
+
// Generator
|
|
1557
|
+
class AxGen<IN, OUT> {
|
|
1558
|
+
forward(ai: AxAIService, values: IN, options?: AxProgramForwardOptions): Promise<OUT>;
|
|
1559
|
+
streamingForward(ai: AxAIService, values: IN, options?: AxProgramStreamingForwardOptions): AsyncGenerator<{ delta: Partial<OUT> }>;
|
|
1560
|
+
setExamples(examples: Array<Partial<IN & OUT>>): void;
|
|
1561
|
+
addAssert(fn: (output: OUT) => boolean, message?: string): void;
|
|
1562
|
+
addFieldProcessor(field: keyof OUT, fn: (value: any) => any): void;
|
|
1563
|
+
addStreamingFieldProcessor(field: keyof OUT, fn: (chunk: string, ctx: any) => void): void;
|
|
1564
|
+
}
|
|
1565
|
+
|
|
1566
|
+
// Agent
|
|
1567
|
+
class AxAgent<IN, OUT> {
|
|
1568
|
+
forward(ai: AxAIService, values: IN, options?: AxAgentOptions): Promise<OUT>;
|
|
1569
|
+
streamingForward(ai: AxAIService, values: IN, options?: AxAgentOptions): AsyncGenerator<{ delta: Partial<OUT> }>;
|
|
1570
|
+
getFunction(): AxFunction;
|
|
1571
|
+
}
|
|
1572
|
+
|
|
1573
|
+
// Flow
|
|
1574
|
+
class AxFlow<IN, OUT, TNodes, TState> {
|
|
1575
|
+
node(name: string, signature: string | AxSignature): AxFlow;
|
|
1576
|
+
execute(nodeName: string, inputMapper: (state: TState) => any): AxFlow;
|
|
1577
|
+
map(transformer: (state: TState) => OUT): AxFlow;
|
|
1578
|
+
branch(condition: (state: TState) => boolean, ifTrue: FlowBranch, ifFalse: FlowBranch): AxFlow;
|
|
1579
|
+
parallel(branches: ParallelBranch[]): AxFlow;
|
|
1580
|
+
forward(ai: AxAIService, input: IN): Promise<OUT>;
|
|
1581
|
+
}
|
|
1582
|
+
```
|