ai-functions 2.0.2 → 2.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (130) hide show
  1. package/.turbo/turbo-build.log +4 -5
  2. package/CHANGELOG.md +38 -0
  3. package/LICENSE +21 -0
  4. package/README.md +361 -159
  5. package/dist/ai-promise.d.ts +47 -0
  6. package/dist/ai-promise.d.ts.map +1 -1
  7. package/dist/ai-promise.js +291 -3
  8. package/dist/ai-promise.js.map +1 -1
  9. package/dist/ai.d.ts +17 -18
  10. package/dist/ai.d.ts.map +1 -1
  11. package/dist/ai.js +93 -39
  12. package/dist/ai.js.map +1 -1
  13. package/dist/batch-map.d.ts +46 -4
  14. package/dist/batch-map.d.ts.map +1 -1
  15. package/dist/batch-map.js +35 -2
  16. package/dist/batch-map.js.map +1 -1
  17. package/dist/batch-queue.d.ts +116 -12
  18. package/dist/batch-queue.d.ts.map +1 -1
  19. package/dist/batch-queue.js +47 -2
  20. package/dist/batch-queue.js.map +1 -1
  21. package/dist/budget.d.ts +272 -0
  22. package/dist/budget.d.ts.map +1 -0
  23. package/dist/budget.js +500 -0
  24. package/dist/budget.js.map +1 -0
  25. package/dist/cache.d.ts +272 -0
  26. package/dist/cache.d.ts.map +1 -0
  27. package/dist/cache.js +412 -0
  28. package/dist/cache.js.map +1 -0
  29. package/dist/context.d.ts +32 -1
  30. package/dist/context.d.ts.map +1 -1
  31. package/dist/context.js +16 -1
  32. package/dist/context.js.map +1 -1
  33. package/dist/eval/runner.d.ts +2 -1
  34. package/dist/eval/runner.d.ts.map +1 -1
  35. package/dist/eval/runner.js.map +1 -1
  36. package/dist/generate.d.ts.map +1 -1
  37. package/dist/generate.js +6 -10
  38. package/dist/generate.js.map +1 -1
  39. package/dist/index.d.ts +27 -20
  40. package/dist/index.d.ts.map +1 -1
  41. package/dist/index.js +72 -42
  42. package/dist/index.js.map +1 -1
  43. package/dist/primitives.d.ts +17 -0
  44. package/dist/primitives.d.ts.map +1 -1
  45. package/dist/primitives.js +19 -1
  46. package/dist/primitives.js.map +1 -1
  47. package/dist/retry.d.ts +303 -0
  48. package/dist/retry.d.ts.map +1 -0
  49. package/dist/retry.js +539 -0
  50. package/dist/retry.js.map +1 -0
  51. package/dist/schema.d.ts.map +1 -1
  52. package/dist/schema.js +1 -9
  53. package/dist/schema.js.map +1 -1
  54. package/dist/tool-orchestration.d.ts +391 -0
  55. package/dist/tool-orchestration.d.ts.map +1 -0
  56. package/dist/tool-orchestration.js +663 -0
  57. package/dist/tool-orchestration.js.map +1 -0
  58. package/dist/types.d.ts +50 -33
  59. package/dist/types.d.ts.map +1 -1
  60. package/evalite.config.js +14 -0
  61. package/evals/classification.eval.js +97 -0
  62. package/evals/marketing.eval.js +289 -0
  63. package/evals/math.eval.js +83 -0
  64. package/evals/run-evals.js +151 -0
  65. package/evals/structured-output.eval.js +131 -0
  66. package/evals/writing.eval.js +105 -0
  67. package/examples/batch-blog-posts.js +128 -0
  68. package/package.json +26 -26
  69. package/src/ai-promise.ts +359 -3
  70. package/src/ai.ts +155 -110
  71. package/src/batch/anthropic.js +256 -0
  72. package/src/batch/bedrock.js +584 -0
  73. package/src/batch/cloudflare.js +287 -0
  74. package/src/batch/google.js +359 -0
  75. package/src/batch/index.js +30 -0
  76. package/src/batch/memory.js +187 -0
  77. package/src/batch/openai.js +402 -0
  78. package/src/batch-map.ts +46 -4
  79. package/src/batch-queue.ts +116 -12
  80. package/src/budget.ts +727 -0
  81. package/src/cache.ts +653 -0
  82. package/src/context.ts +33 -1
  83. package/src/eval/index.js +7 -0
  84. package/src/eval/models.js +119 -0
  85. package/src/eval/runner.js +147 -0
  86. package/src/eval/runner.ts +3 -2
  87. package/src/generate.ts +7 -12
  88. package/src/index.ts +231 -53
  89. package/src/primitives.ts +19 -1
  90. package/src/retry.ts +776 -0
  91. package/src/schema.ts +1 -10
  92. package/src/tool-orchestration.ts +1008 -0
  93. package/src/types.ts +59 -41
  94. package/test/ai-proxy.test.js +157 -0
  95. package/test/async-iterators.test.js +261 -0
  96. package/test/backward-compat.test.ts +147 -0
  97. package/test/batch-autosubmit-errors.test.ts +598 -0
  98. package/test/batch-background.test.js +352 -0
  99. package/test/batch-blog-posts.test.js +293 -0
  100. package/test/blog-generation.test.js +390 -0
  101. package/test/browse-read.test.js +480 -0
  102. package/test/budget-tracking.test.ts +800 -0
  103. package/test/cache.test.ts +712 -0
  104. package/test/context-isolation.test.ts +687 -0
  105. package/test/core-functions.test.js +490 -0
  106. package/test/decide.test.js +260 -0
  107. package/test/define.test.js +232 -0
  108. package/test/e2e-bedrock-manual.js +136 -0
  109. package/test/e2e-bedrock.test.js +164 -0
  110. package/test/e2e-flex-gateway.js +131 -0
  111. package/test/e2e-flex-manual.js +156 -0
  112. package/test/e2e-flex.test.js +174 -0
  113. package/test/e2e-google-manual.js +150 -0
  114. package/test/e2e-google.test.js +181 -0
  115. package/test/embeddings.test.js +220 -0
  116. package/test/evals/define-function.eval.test.js +309 -0
  117. package/test/evals/deterministic.eval.test.ts +376 -0
  118. package/test/evals/primitives.eval.test.js +360 -0
  119. package/test/function-types.test.js +407 -0
  120. package/test/generate-core.test.js +213 -0
  121. package/test/generate.test.js +143 -0
  122. package/test/generic-order.test.ts +342 -0
  123. package/test/implicit-batch.test.js +326 -0
  124. package/test/json-parse-error-handling.test.ts +463 -0
  125. package/test/retry.test.ts +1016 -0
  126. package/test/schema.test.js +96 -0
  127. package/test/streaming.test.ts +316 -0
  128. package/test/tagged-templates.test.js +240 -0
  129. package/test/tool-orchestration.test.ts +770 -0
  130. package/vitest.config.js +39 -0
package/README.md CHANGED
@@ -1,254 +1,456 @@
1
1
  # ai-functions
2
2
 
3
- Call AI like you'd talk to a colleague. No prompts. No configuration. Just say what you need.
3
+ **Calling AI models shouldn't require 50 lines of boilerplate.**
4
+
5
+ You just want to get a response from Claude, GPT, or Gemini. Instead, you're drowning in SDK initialization, error handling, retry logic, JSON parsing, and type coercion. Every AI call becomes a small engineering project.
6
+
7
+ ```typescript
8
+ // What you're doing now
9
+ import Anthropic from '@anthropic-ai/sdk'
10
+ const client = new Anthropic()
11
+ try {
12
+ const response = await client.messages.create({
13
+ model: 'claude-sonnet-4-20250514',
14
+ max_tokens: 1024,
15
+ messages: [{ role: 'user', content: 'List 5 startup ideas' }],
16
+ })
17
+ const text = response.content[0].type === 'text'
18
+ ? response.content[0].text
19
+ : ''
20
+ const ideas = JSON.parse(text) // Pray it's valid JSON
21
+ } catch (e) {
22
+ if (e.status === 429) { /* rate limit logic */ }
23
+ if (e.status === 500) { /* retry logic */ }
24
+ // ... 30 more lines
25
+ }
26
+ ```
4
27
 
5
28
  ```typescript
6
- import { ai, list, is } from 'ai-functions'
29
+ // What you could be doing
30
+ import { list } from 'ai-functions'
7
31
 
8
- // Ask for anything - it reads like English
9
- const qualified = is`${lead} a good fit for our enterprise plan?`
10
- const ideas = list`blog posts that would resonate with ${persona}`
11
- const { summary, nextSteps } = ai`analyze this sales call: ${transcript}`
32
+ const ideas = await list`5 startup ideas`
12
33
  ```
13
34
 
14
35
  ## Installation
15
36
 
16
37
  ```bash
17
- pnpm add ai-functions
38
+ npm install ai-functions
18
39
  ```
19
40
 
20
- ## The Magic: Promise Pipelining
41
+ Set your API key:
21
42
 
22
- Chain operations naturally—no `await` needed until you actually need the result:
43
+ ```bash
44
+ export ANTHROPIC_API_KEY=sk-... # or OPENAI_API_KEY
45
+ ```
46
+
47
+ ## Quick Start
48
+
49
+ ### Template Literals for Natural AI Calls
23
50
 
24
51
  ```typescript
25
- // Destructure to get exactly what you need
26
- const { qualified, score, reason } = ai`qualify this lead: ${lead}`
52
+ import { ai, list, is, write } from 'ai-functions'
27
53
 
28
- // Chain functions together—dependencies resolve automatically
29
- const followUp = ai`write follow-up email based on: ${reason}`
30
- const subject = ai`subject line for: ${followUp}`
54
+ // Generate text
55
+ const poem = await write`a haiku about TypeScript`
31
56
 
32
- // Only await when you need the actual value
33
- if (await qualified) {
34
- await sendEmail({ to: lead.email, subject: await subject, body: await followUp })
35
- }
57
+ // Generate lists
58
+ const ideas = await list`10 startup ideas in healthcare`
59
+
60
+ // Yes/no decisions
61
+ const isValid = await is`"john@example" is a valid email`
62
+
63
+ // Structured objects with auto-inferred schema
64
+ const { title, summary, tags } = await ai`analyze this article: ${articleText}`
36
65
  ```
37
66
 
38
- ## Real-World Examples
67
+ ### The `list` Primitive with `.map()`
39
68
 
40
- ### Lead Qualification
69
+ Process lists with automatic batching - one prompt generates items, then each item is processed in parallel:
41
70
 
42
71
  ```typescript
43
- const { score, qualified, reasoning } = ai`
44
- qualify ${lead} for our product
45
- considering: ${idealCustomerProfile}
46
- `
72
+ const ideas = await list`5 startup ideas`.map(idea => ({
73
+ idea,
74
+ viable: is`${idea} is technically feasible`,
75
+ market: ai`estimate market size for ${idea}`,
76
+ }))
47
77
 
48
- if (await qualified) {
49
- await assignToSales(lead)
50
- }
78
+ // Result: Array of { idea, viable, market } objects
51
79
  ```
52
80
 
53
- ### Content Marketing
81
+ ### Boolean Checks with `is`
54
82
 
55
83
  ```typescript
56
- // Generate topic ideas for your audience
57
- const topics = list`content ideas for ${persona} in ${industry}`
84
+ // Simple validation
85
+ const isColor = await is`"turquoise" is a color` // true
58
86
 
59
- // Evaluate each in parallel—single LLM call!
60
- const evaluated = await topics.map(topic => ({
61
- topic,
62
- potential: is`${topic} would drive signups?`,
63
- difficulty: ai`content difficulty for: ${topic}`,
64
- }))
87
+ // Content moderation
88
+ const isSafe = await is`${userContent} is safe for work`
65
89
 
66
- // Pick the best
67
- const winner = evaluated.find(t => t.potential && t.difficulty === 'easy')
90
+ // Quality checks
91
+ const { conclusion } = await ai`write about ${topic}`
92
+ const isWellArgumented = await is`${conclusion} is well-argued`
68
93
  ```
69
94
 
70
- ### Sales Intelligence
95
+ ### Task Execution with `do`
71
96
 
72
97
  ```typescript
73
- const { pros, cons, objections } = lists`
74
- competitive analysis: ${ourProduct} vs ${competitor}
75
- `
76
-
77
- const battleCard = ai`
78
- sales battlecard addressing: ${objections}
79
- highlighting: ${pros}
80
- `
98
+ const { summary, actions } = await do`send welcome email to ${user.email}`
99
+ // Returns: { summary: "...", actions: ["Created email", "Sent via SMTP", ...] }
81
100
  ```
82
101
 
83
- ### Customer Success
102
+ ## Features
103
+
104
+ ### Batch Processing (50% Cost Savings)
105
+
106
+ Process large workloads at half the cost using provider batch APIs:
84
107
 
85
108
  ```typescript
86
- // Analyze customer health
87
- const { healthy, churnRisk, opportunities } = ai`
88
- analyze customer health for ${customer}
89
- based on: ${usageData}
90
- `
109
+ import { createBatch, write } from 'ai-functions'
91
110
 
92
- if (await churnRisk) {
93
- const outreach = ai`retention outreach for ${customer} addressing ${churnRisk}`
94
- await scheduleCall(customer, await outreach)
95
- }
111
+ // Create a batch queue
112
+ const batch = createBatch({ provider: 'openai' })
113
+
114
+ // Add items (deferred, not executed)
115
+ const posts = titles.map(title =>
116
+ batch.add(`Write a blog post about ${title}`)
117
+ )
118
+
119
+ // Submit for batch processing
120
+ const { job } = await batch.submit()
121
+ console.log(job.id) // batch_abc123
122
+
123
+ // Wait for results (up to 24hr turnaround)
124
+ const results = await batch.wait()
96
125
  ```
97
126
 
98
- ### Recruiting
127
+ Or use the `withBatch` helper:
99
128
 
100
129
  ```typescript
101
- const candidates = list`source ${role} candidates from ${jobBoards}`
130
+ import { withBatch } from 'ai-functions'
102
131
 
103
- const evaluated = await candidates.map(candidate => ({
104
- candidate,
105
- fit: is`${candidate} matches ${requirements}?`,
106
- summary: ai`one-line summary of ${candidate}`,
107
- }))
108
-
109
- const shortlist = evaluated.filter(c => c.fit)
132
+ const results = await withBatch(async (batch) => {
133
+ return ['TypeScript', 'React', 'Next.js'].map(topic =>
134
+ batch.add(`Write tutorial about ${topic}`)
135
+ )
136
+ })
110
137
  ```
111
138
 
112
- ## API Reference
139
+ ### Retry & Circuit Breaker
113
140
 
114
- ### Generation
141
+ Built-in resilience for production workloads:
115
142
 
116
143
  ```typescript
117
- ai`anything you need` // flexible object/text
118
- write`blog post about ${topic}` // long-form content
119
- summarize`${document}` // condense to key points
120
- list`ideas for ${topic}` // array of items
121
- lists`pros and cons of ${topic}` // named lists
122
- extract`emails from ${text}` // structured extraction
123
- ```
144
+ import { withRetry, RetryPolicy, CircuitBreaker, FallbackChain } from 'ai-functions'
124
145
 
125
- ### Classification
146
+ // Simple retry wrapper
147
+ const reliableAI = withRetry(myAIFunction, {
148
+ maxRetries: 3,
149
+ baseDelay: 1000,
150
+ jitter: 0.2, // Prevent thundering herd
151
+ })
126
152
 
127
- ```typescript
128
- is`${email} spam?` // boolean
129
- decide`which converts better?`(optionA, optionB) // pick best
153
+ // Advanced retry policy
154
+ const policy = new RetryPolicy({
155
+ maxRetries: 5,
156
+ baseDelay: 1000,
157
+ maxDelay: 30000,
158
+ jitterStrategy: 'decorrelated',
159
+ respectRetryAfter: true, // Honor rate limit headers
160
+ })
161
+
162
+ await policy.execute(async () => {
163
+ return await ai`generate content`
164
+ })
165
+
166
+ // Circuit breaker for fail-fast
167
+ const breaker = new CircuitBreaker({
168
+ failureThreshold: 5,
169
+ resetTimeout: 30000,
170
+ })
171
+
172
+ await breaker.execute(async () => {
173
+ return await ai`generate content`
174
+ })
175
+
176
+ // Model fallback chain
177
+ const fallback = new FallbackChain([
178
+ { name: 'claude-sonnet', execute: () => generateWithClaude(prompt) },
179
+ { name: 'gpt-4o', execute: () => generateWithGPT(prompt) },
180
+ { name: 'gemini-pro', execute: () => generateWithGemini(prompt) },
181
+ ])
182
+
183
+ const result = await fallback.execute()
130
184
  ```
131
185
 
132
- ### Code & Visuals
186
+ ### Caching
187
+
188
+ Avoid redundant API calls with intelligent caching:
133
189
 
134
190
  ```typescript
135
- code`email validation function` // generate code
136
- diagram`user flow for ${feature}` // mermaid diagrams
137
- slides`pitch deck for ${startup}` // presentations
138
- image`hero image for ${brand}` // image generation
191
+ import { GenerationCache, EmbeddingCache, withCache, MemoryCache } from 'ai-functions'
192
+
193
+ // Generation cache
194
+ const cache = new GenerationCache({
195
+ defaultTTL: 3600000, // 1 hour
196
+ maxSize: 1000, // LRU eviction
197
+ })
198
+
199
+ // Check cache first
200
+ const cached = await cache.get({ prompt, model: 'sonnet' })
201
+ if (!cached) {
202
+ const result = await ai`${prompt}`
203
+ await cache.set({ prompt, model: 'sonnet' }, result)
204
+ }
205
+
206
+ // Embedding cache with batch support
207
+ const embedCache = new EmbeddingCache()
208
+ const { hits, misses } = await embedCache.getMany(texts, { model: 'text-embedding-3-small' })
209
+
210
+ // Wrap any function with caching
211
+ const cachedGenerate = withCache(
212
+ new MemoryCache(),
213
+ generateContent,
214
+ { keyFn: (prompt) => prompt, ttl: 3600000 }
215
+ )
139
216
  ```
140
217
 
141
- ### Research & Web
218
+ ### Budget Tracking
219
+
220
+ Monitor and limit spending:
142
221
 
143
222
  ```typescript
144
- research`${competitor} market position` // web research
145
- read`${url}` // url to markdown
146
- browse`${url}` // browser automation
223
+ import { BudgetTracker, withBudget, BudgetExceededError } from 'ai-functions'
224
+
225
+ // Create a budget tracker
226
+ const tracker = new BudgetTracker({
227
+ maxTokens: 100000,
228
+ maxCost: 10.00, // USD
229
+ alertThresholds: [0.5, 0.8, 0.95],
230
+ onAlert: (alert) => {
231
+ console.log(`Budget ${alert.type} at ${alert.threshold * 100}%`)
232
+ },
233
+ })
234
+
235
+ // Record usage
236
+ tracker.recordUsage({
237
+ inputTokens: 1500,
238
+ outputTokens: 500,
239
+ model: 'claude-sonnet-4-20250514',
240
+ })
241
+
242
+ // Check remaining budget
243
+ console.log(tracker.getRemainingBudget())
244
+ // { tokens: 98000, cost: 9.95 }
245
+
246
+ // Use with automatic tracking
247
+ const result = await withBudget({ maxCost: 5.00 }, async (tracker) => {
248
+ // All AI calls within this scope are tracked
249
+ return await ai`generate content`
250
+ })
147
251
  ```
148
252
 
149
- ### Human-in-the-Loop
253
+ ### Tool Orchestration
254
+
255
+ Build agentic loops with tool calling:
150
256
 
151
257
  ```typescript
152
- ask`what's the priority for ${feature}?` // free-form input
153
- approve`deploy ${version} to production?` // yes/no approval
154
- review`${document}` // detailed feedback
258
+ import { AgenticLoop, createTool, createToolset } from 'ai-functions'
259
+
260
+ // Define tools
261
+ const searchTool = createTool({
262
+ name: 'search',
263
+ description: 'Search the web',
264
+ parameters: { query: 'Search query' },
265
+ handler: async ({ query }) => await searchWeb(query),
266
+ })
267
+
268
+ const calculatorTool = createTool({
269
+ name: 'calculate',
270
+ description: 'Perform calculations',
271
+ parameters: { expression: 'Math expression' },
272
+ handler: async ({ expression }) => eval(expression),
273
+ })
274
+
275
+ // Create an agentic loop
276
+ const loop = new AgenticLoop({
277
+ model: 'claude-sonnet-4-20250514',
278
+ tools: [searchTool, calculatorTool],
279
+ maxIterations: 10,
280
+ })
281
+
282
+ const result = await loop.run('What is the population of Tokyo times 2?')
155
283
  ```
156
284
 
157
- ## The `lists` Function
285
+ ## Configuration
158
286
 
159
- Get exactly what you ask for through destructuring:
287
+ ### Global Configuration
160
288
 
161
289
  ```typescript
162
- // Just name what you want—the schema is inferred!
163
- const { pros, cons } = lists`pros and cons of ${decision}`
164
- const { strengths, weaknesses, opportunities, threats } = lists`SWOT for ${company}`
165
- const { mustHave, niceToHave, outOfScope } = lists`requirements for ${feature}`
290
+ import { configure } from 'ai-functions'
291
+
292
+ configure({
293
+ model: 'claude-sonnet-4-20250514',
294
+ provider: 'anthropic',
295
+ batchMode: 'auto', // 'auto' | 'immediate' | 'flex' | 'deferred'
296
+ flexThreshold: 5, // Use flex for 5+ items
297
+ batchThreshold: 500, // Use batch API for 500+ items
298
+ })
166
299
  ```
167
300
 
168
- ## Batch Processing with `.map()`
169
-
170
- Process arrays in a single LLM call:
301
+ ### Scoped Configuration
171
302
 
172
303
  ```typescript
173
- const leads = await list`leads from ${campaign}`
304
+ import { withContext } from 'ai-functions'
305
+
306
+ const results = await withContext(
307
+ { provider: 'openai', model: 'gpt-4o', batchMode: 'deferred' },
308
+ async () => {
309
+ const titles = await list`10 blog titles`
310
+ return titles.map(title => write`blog post: ${title}`)
311
+ }
312
+ )
313
+ ```
174
314
 
175
- // Each field evaluated for each lead—all in ONE call
176
- const qualified = await leads.map(lead => ({
177
- lead,
178
- score: ai`score 1-100: ${lead}`,
179
- qualified: is`${lead} matches ${icp}?`,
180
- nextStep: ai`recommended action for ${lead}`,
181
- }))
315
+ ### Environment Variables
182
316
 
183
- // Filter and act
184
- qualified
185
- .filter(l => l.qualified)
186
- .forEach(l => createTask(l.nextStep))
317
+ ```bash
318
+ AI_MODEL=claude-sonnet-4-20250514
319
+ AI_PROVIDER=anthropic
320
+ AI_BATCH_MODE=auto
321
+ AI_FLEX_THRESHOLD=5
322
+ AI_BATCH_THRESHOLD=500
323
+ AI_BATCH_WEBHOOK_URL=https://api.example.com/webhook
187
324
  ```
188
325
 
189
- ## Typed Schemas with `AI()`
326
+ ## Schema-Based Functions
190
327
 
191
- For reusable, typed functions:
328
+ Create typed AI functions with simple schemas:
192
329
 
193
330
  ```typescript
331
+ import { AI } from 'ai-functions'
332
+
194
333
  const ai = AI({
195
- qualifyLead: {
196
- score: 'Lead score 1-100 (number)',
197
- qualified: 'Whether to pursue (boolean)',
198
- reasoning: 'Explanation of score',
199
- nextSteps: ['Recommended actions'],
334
+ recipe: {
335
+ name: 'Recipe name',
336
+ servings: 'Number of servings (number)',
337
+ ingredients: ['List of ingredients'],
338
+ steps: ['Cooking steps'],
339
+ prepTime: 'Prep time in minutes (number)',
200
340
  },
201
-
202
- analyzeCompetitor: {
203
- positioning: 'How they position themselves',
204
- strengths: ['Their advantages'],
205
- weaknesses: ['Their disadvantages'],
206
- battleCard: 'Key talking points for sales',
341
+ storyBrand: {
342
+ hero: 'Who is the customer?',
343
+ problem: {
344
+ internal: 'Internal struggle',
345
+ external: 'External challenge',
346
+ philosophical: 'Why is this wrong?',
347
+ },
348
+ guide: 'Who helps them?',
349
+ plan: ['Steps to success'],
350
+ callToAction: 'What should they do?',
351
+ success: 'What success looks like',
352
+ failure: 'What failure looks like',
207
353
  },
208
354
  })
209
355
 
210
- // Fully typed!
211
- const result = await ai.qualifyLead('Enterprise CTO interested in AI automation')
212
- result.score // number
213
- result.qualified // boolean
214
- result.nextSteps // string[]
215
- ```
356
+ // Fully typed results
357
+ const recipe = await ai.recipe('Italian pasta for 4 people')
358
+ // { name: string, servings: number, ingredients: string[], ... }
216
359
 
217
- ## Schema Syntax
360
+ const brand = await ai.storyBrand('A developer tools startup')
361
+ // { hero: string, problem: { internal, external, philosophical }, ... }
362
+ ```
218
363
 
219
- | Syntax | Type | Example |
220
- |--------|------|---------|
221
- | `'description'` | string | `name: 'Company name'` |
222
- | `'desc (number)'` | number | `score: 'Score 1-100 (number)'` |
223
- | `'desc (boolean)'` | boolean | `qualified: 'Pursue? (boolean)'` |
224
- | `'opt1 \| opt2'` | enum | `priority: 'high \| medium \| low'` |
225
- | `['description']` | array | `steps: ['Action items']` |
226
- | `{ nested }` | object | `contact: { name, email }` |
364
+ ## Define Custom Functions
227
365
 
228
- ## Philosophy
366
+ ```typescript
367
+ import { define, defineFunction } from 'ai-functions'
229
368
 
230
- **Code should read like conversation.**
369
+ // Auto-define from name and example args
370
+ const planTrip = await define('planTrip', {
371
+ destination: 'Tokyo',
372
+ travelers: 2
373
+ })
231
374
 
232
- Compare:
233
- ```typescript
234
- // Traditional AI code
235
- const response = await openai.chat.completions.create({
236
- model: "gpt-4",
237
- messages: [{ role: "user", content: `Analyze this lead: ${JSON.stringify(lead)}` }],
238
- response_format: { type: "json_object" },
375
+ // Or define explicitly with full control
376
+ const summarize = defineFunction({
377
+ type: 'generative',
378
+ name: 'summarize',
379
+ args: { text: 'Text to summarize', maxLength: 'Max words (number)' },
380
+ output: 'string',
381
+ promptTemplate: 'Summarize in {{maxLength}} words: {{text}}',
239
382
  })
240
- const result = JSON.parse(response.choices[0].message.content)
241
- ```
242
383
 
243
- ```typescript
244
- // ai-functions
245
- const { qualified, score, nextStep } = ai`analyze lead: ${lead}`
384
+ // Use the defined functions
385
+ const trip = await planTrip.call({ destination: 'Paris', travelers: 4 })
386
+ const summary = await summarize.call({ text: longArticle, maxLength: 100 })
246
387
  ```
247
388
 
248
- The second version is what you'd say to a colleague. That's the goal.
389
+ ## API Reference
390
+
391
+ ### Core Primitives
392
+
393
+ | Function | Description | Returns |
394
+ |----------|-------------|---------|
395
+ | `ai` | General-purpose generation with dynamic schema | `Promise<T>` |
396
+ | `write` | Generate text content | `Promise<string>` |
397
+ | `list` | Generate a list of items | `Promise<string[]>` |
398
+ | `lists` | Generate multiple named lists | `Promise<Record<string, string[]>>` |
399
+ | `is` | Boolean yes/no checks | `Promise<boolean>` |
400
+ | `do` | Execute a task | `Promise<{ summary, actions }>` |
401
+ | `extract` | Extract structured data | `Promise<T[]>` |
402
+ | `summarize` | Summarize content | `Promise<string>` |
403
+ | `code` | Generate code | `Promise<string>` |
404
+ | `decide` | Choose between options | `(options) => Promise<T>` |
405
+
406
+ ### Batch Processing
407
+
408
+ | Export | Description |
409
+ |--------|-------------|
410
+ | `createBatch()` | Create a batch queue for deferred execution |
411
+ | `withBatch()` | Execute operations in batch mode |
412
+ | `BatchQueue` | Class for managing batch jobs |
413
+
414
+ ### Resilience
415
+
416
+ | Export | Description |
417
+ |--------|-------------|
418
+ | `withRetry()` | Wrap function with retry logic |
419
+ | `RetryPolicy` | Configurable retry policy |
420
+ | `CircuitBreaker` | Fail-fast circuit breaker |
421
+ | `FallbackChain` | Model failover chain |
422
+
423
+ ### Caching
424
+
425
+ | Export | Description |
426
+ |--------|-------------|
427
+ | `MemoryCache` | In-memory LRU cache |
428
+ | `GenerationCache` | Cache for generation results |
429
+ | `EmbeddingCache` | Cache for embeddings |
430
+ | `withCache()` | Wrap function with caching |
431
+
432
+ ### Budget & Tracking
433
+
434
+ | Export | Description |
435
+ |--------|-------------|
436
+ | `BudgetTracker` | Track token usage and costs |
437
+ | `withBudget()` | Execute with budget limits |
438
+ | `RequestContext` | Request tracing and isolation |
439
+
440
+ ### Configuration
441
+
442
+ | Export | Description |
443
+ |--------|-------------|
444
+ | `configure()` | Set global defaults |
445
+ | `withContext()` | Scoped configuration |
446
+ | `getContext()` | Get current context |
249
447
 
250
448
  ## Related Packages
251
449
 
252
- - [`ai-database`](../ai-database) — AI-powered database operations
253
- - [`ai-providers`](../ai-providers) Model provider abstraction
254
- - [`language-models`](../language-models) Model definitions
450
+ - [`ai-core`](../ai-core) - Lightweight core primitives (no batch/budget/retry)
451
+ - [`ai-providers`](../ai-providers) - Provider integrations
452
+ - [`language-models`](../language-models) - Model aliases and configuration
453
+
454
+ ## License
455
+
456
+ MIT
@@ -32,6 +32,27 @@
32
32
  import type { SimpleSchema } from './schema.js';
33
33
  import type { FunctionOptions } from './template.js';
34
34
  import { BatchMapPromise } from './batch-map.js';
35
+ /**
36
+ * Options for streaming
37
+ */
38
+ export interface StreamOptions {
39
+ /** Abort signal for cancellation */
40
+ abortSignal?: AbortSignal;
41
+ }
42
+ /**
43
+ * Streaming result wrapper that provides both AsyncIterable interface
44
+ * and access to the final result
45
+ */
46
+ export interface StreamingAIPromise<T> extends AsyncIterable<T extends string ? string : Partial<T>> {
47
+ /** Stream of text chunks (for text generation) */
48
+ textStream: AsyncIterable<string>;
49
+ /** Stream of partial objects (for object generation) */
50
+ partialObjectStream: AsyncIterable<Partial<T>>;
51
+ /** Promise that resolves to the final complete result */
52
+ result: Promise<T>;
53
+ /** Promise interface - then() */
54
+ then<TResult1 = T, TResult2 = never>(onfulfilled?: ((value: T) => TResult1 | PromiseLike<TResult1>) | null, onrejected?: ((reason: unknown) => TResult2 | PromiseLike<TResult2>) | null): Promise<TResult1 | TResult2>;
55
+ }
35
56
  /** Symbol to identify AIPromise instances */
36
57
  export declare const AI_PROMISE_SYMBOL: unique symbol;
37
58
  /** Symbol to get the raw AIPromise from a proxy */
@@ -159,6 +180,32 @@ export declare class AIPromise<T> implements PromiseLike<T> {
159
180
  * Async iterator support with smart batching
160
181
  */
161
182
  [Symbol.asyncIterator](): AsyncIterator<T extends (infer I)[] ? I : T>;
183
+ /**
184
+ * Stream the AI generation - returns chunks as they arrive
185
+ *
186
+ * For text generation, yields string chunks.
187
+ * For object generation, yields partial objects as they build up.
188
+ * For list generation, yields items as they're generated.
189
+ *
190
+ * @example
191
+ * ```ts
192
+ * // Text streaming
193
+ * const stream = write`Write a story`.stream()
194
+ * for await (const chunk of stream.textStream) {
195
+ * process.stdout.write(chunk)
196
+ * }
197
+ *
198
+ * // Object streaming with partial updates
199
+ * const stream = ai`Generate a recipe`.stream()
200
+ * for await (const partial of stream.partialObjectStream) {
201
+ * console.log('Building:', partial)
202
+ * }
203
+ *
204
+ * // Get final result after streaming
205
+ * const finalResult = await stream.result
206
+ * ```
207
+ */
208
+ stream(options?: StreamOptions): StreamingAIPromise<T>;
162
209
  /**
163
210
  * Promise interface - then()
164
211
  */