@abassey/aid 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/dist/agents/index.cjs +741 -0
  2. package/dist/agents/index.d.cts +78 -0
  3. package/dist/agents/index.d.ts +78 -0
  4. package/dist/agents/index.js +741 -0
  5. package/dist/ai-AWJOUXFM.js +9 -0
  6. package/dist/ai-DOAYJKKI.cjs +9 -0
  7. package/dist/chunk-2TNYBUNK.js +124 -0
  8. package/dist/chunk-3LGKZRGY.cjs +124 -0
  9. package/dist/chunk-AUR2BBB5.cjs +1436 -0
  10. package/dist/chunk-IJLTRQF4.cjs +276 -0
  11. package/dist/chunk-JPD7UBAZ.js +58 -0
  12. package/dist/chunk-M4RQALTT.js +276 -0
  13. package/dist/chunk-NB65IHJE.cjs +58 -0
  14. package/dist/chunk-YNIEOBDF.js +1436 -0
  15. package/dist/client/index.cjs +18 -0
  16. package/dist/client/index.d.cts +8 -0
  17. package/dist/client/index.d.ts +8 -0
  18. package/dist/client/index.js +18 -0
  19. package/dist/errors-CUVTnseb.d.ts +13 -0
  20. package/dist/errors-CgCce4cK.d.cts +158 -0
  21. package/dist/errors-CgCce4cK.d.ts +158 -0
  22. package/dist/errors-zAPbTlpe.d.cts +13 -0
  23. package/dist/eval/index.cjs +308 -0
  24. package/dist/eval/index.d.cts +106 -0
  25. package/dist/eval/index.d.ts +106 -0
  26. package/dist/eval/index.js +308 -0
  27. package/dist/index.cjs +35 -0
  28. package/dist/index.d.cts +107 -0
  29. package/dist/index.d.ts +107 -0
  30. package/dist/index.js +35 -0
  31. package/dist/middleware/index.cjs +201 -0
  32. package/dist/middleware/index.d.cts +36 -0
  33. package/dist/middleware/index.d.ts +36 -0
  34. package/dist/middleware/index.js +201 -0
  35. package/dist/observability/index.cjs +147 -0
  36. package/dist/observability/index.d.cts +30 -0
  37. package/dist/observability/index.d.ts +30 -0
  38. package/dist/observability/index.js +147 -0
  39. package/dist/react/index.cjs +253 -0
  40. package/dist/react/index.d.cts +64 -0
  41. package/dist/react/index.d.ts +64 -0
  42. package/dist/react/index.js +253 -0
  43. package/dist/serve/index.cjs +545 -0
  44. package/dist/serve/index.d.cts +69 -0
  45. package/dist/serve/index.d.ts +69 -0
  46. package/dist/serve/index.js +545 -0
  47. package/dist/types-BJReASS-.d.cts +196 -0
  48. package/dist/types-BJReASS-.d.ts +196 -0
  49. package/dist/types-CguX3F16.d.cts +173 -0
  50. package/dist/types-CrFH-_qp.d.cts +68 -0
  51. package/dist/types-DvdzPmW0.d.ts +173 -0
  52. package/dist/types-qfE32ADy.d.ts +68 -0
  53. package/package.json +144 -0
@@ -0,0 +1,1436 @@
1
+ import {
2
+ AidError,
3
+ DEFAULT_CONFIG,
4
+ executePipeline,
5
+ getGlobalConfig,
6
+ resolveProvider
7
+ } from "./chunk-2TNYBUNK.js";
8
+
9
+ // src/core/streaming.ts
10
+ var AiStream = class {
11
+ _generatorFn;
12
+ _aborted = false;
13
+ _textCallbacks = [];
14
+ _doneCallbacks = [];
15
+ _errorCallbacks = [];
16
+ constructor(generatorFn) {
17
+ this._generatorFn = generatorFn;
18
+ }
19
+ on(event, cb) {
20
+ switch (event) {
21
+ case "text":
22
+ this._textCallbacks.push(cb);
23
+ break;
24
+ case "done":
25
+ this._doneCallbacks.push(cb);
26
+ break;
27
+ case "error":
28
+ this._errorCallbacks.push(cb);
29
+ break;
30
+ }
31
+ }
32
+ /**
33
+ * Signal the stream to stop after the current chunk.
34
+ * The for-await loop will end gracefully without throwing.
35
+ */
36
+ abort() {
37
+ this._aborted = true;
38
+ }
39
+ /**
40
+ * Implement the AsyncIterable protocol so this works with `for await`.
41
+ */
42
+ [Symbol.asyncIterator]() {
43
+ const generator = this._generatorFn();
44
+ const self = this;
45
+ return {
46
+ async next() {
47
+ if (self._aborted) {
48
+ await generator.return(void 0);
49
+ return { value: void 0, done: true };
50
+ }
51
+ try {
52
+ const result = await generator.next();
53
+ if (result.done) {
54
+ return { value: void 0, done: true };
55
+ }
56
+ const chunk = result.value;
57
+ if (chunk.done) {
58
+ if (chunk.response) {
59
+ for (const cb of self._doneCallbacks) {
60
+ cb(chunk.response);
61
+ }
62
+ }
63
+ } else {
64
+ for (const cb of self._textCallbacks) {
65
+ cb(chunk.delta);
66
+ }
67
+ }
68
+ if (self._aborted) {
69
+ }
70
+ return { value: chunk, done: false };
71
+ } catch (err) {
72
+ const error = err instanceof Error ? err : new Error(String(err));
73
+ for (const cb of self._errorCallbacks) {
74
+ cb(error);
75
+ }
76
+ throw error;
77
+ }
78
+ },
79
+ async return(value) {
80
+ await generator.return(value);
81
+ return { value: void 0, done: true };
82
+ },
83
+ async throw(err) {
84
+ await generator.throw(err);
85
+ return { value: void 0, done: true };
86
+ }
87
+ };
88
+ }
89
+ };
90
+
91
+ // src/utils/http.ts
92
+ async function fetchWithTimeout(url, init, timeoutMs, signal) {
93
+ const controller = new AbortController();
94
+ const timeoutId = setTimeout(() => controller.abort(), timeoutMs);
95
+ try {
96
+ const mergedSignal = signal ? createMergedSignal(signal, controller.signal) : controller.signal;
97
+ const response = await fetch(url, {
98
+ ...init,
99
+ signal: mergedSignal
100
+ });
101
+ clearTimeout(timeoutId);
102
+ return response;
103
+ } catch (error) {
104
+ clearTimeout(timeoutId);
105
+ if (error instanceof Error && error.name === "AbortError") {
106
+ throw new AidError(
107
+ "timeout",
108
+ `Request timed out after ${timeoutMs}ms`,
109
+ { cause: error }
110
+ );
111
+ }
112
+ if (error instanceof TypeError) {
113
+ throw new AidError(
114
+ "network",
115
+ `Network error: ${error.message}`,
116
+ { cause: error }
117
+ );
118
+ }
119
+ if (error instanceof AidError) {
120
+ throw error;
121
+ }
122
+ throw new AidError(
123
+ "network",
124
+ error instanceof Error ? error.message : "Unknown network error",
125
+ { cause: error instanceof Error ? error : void 0 }
126
+ );
127
+ }
128
+ }
129
+ function mapHttpError(status, body, provider) {
130
+ let code;
131
+ let message;
132
+ if (status === 401 || status === 403) {
133
+ code = "auth_error";
134
+ message = `Authentication failed: ${body}`;
135
+ } else if (status === 429) {
136
+ code = "rate_limit";
137
+ message = `Rate limit exceeded: ${body}`;
138
+ } else if (status === 400) {
139
+ code = "invalid_request";
140
+ message = `Invalid request: ${body}`;
141
+ } else if (status === 413) {
142
+ code = "context_length";
143
+ message = `Context length exceeded: ${body}`;
144
+ } else if (status >= 500) {
145
+ code = "provider_error";
146
+ message = `Provider error: ${body}`;
147
+ } else {
148
+ code = "provider_error";
149
+ message = `HTTP ${status}: ${body}`;
150
+ }
151
+ return new AidError(code, message, {
152
+ provider,
153
+ statusCode: status,
154
+ raw: body
155
+ });
156
+ }
157
+ function createMergedSignal(signal1, signal2) {
158
+ const controller = new AbortController();
159
+ const abort = () => controller.abort();
160
+ if (signal1.aborted || signal2.aborted) {
161
+ controller.abort();
162
+ } else {
163
+ signal1.addEventListener("abort", abort, { once: true });
164
+ signal2.addEventListener("abort", abort, { once: true });
165
+ }
166
+ return controller.signal;
167
+ }
168
+
169
+ // src/utils/cost.ts
170
+ var PRICING_TABLE = {
171
+ "claude-sonnet-4-6-20250217": { input: 3, output: 15 },
172
+ "claude-sonnet-4-5-20250929": { input: 3, output: 15 },
173
+ "claude-opus-4-6-20250205": { input: 15, output: 75 },
174
+ "claude-opus-4-5-20250120": { input: 15, output: 75 },
175
+ "claude-haiku-4-5-20251001": { input: 0.8, output: 4 },
176
+ "gpt-5.4-2026-03-05": { input: 2.5, output: 10 },
177
+ "gpt-5.3-2025-12-01": { input: 2.5, output: 10 },
178
+ "gpt-4o": { input: 2.5, output: 10 },
179
+ "gpt-4o-2024-08-06": { input: 2.5, output: 10 },
180
+ "gpt-4o-mini": { input: 0.15, output: 0.6 },
181
+ "o3-2025-04-16": { input: 10, output: 40 },
182
+ "o3-mini": { input: 1.1, output: 4.4 },
183
+ "o4-mini": { input: 1.1, output: 4.4 },
184
+ "o4-mini-2025-04-16": { input: 1.1, output: 4.4 }
185
+ };
186
+ function calculateCost(model, inputTokens, outputTokens) {
187
+ const pricing = PRICING_TABLE[model];
188
+ if (!pricing) {
189
+ return 0;
190
+ }
191
+ return (inputTokens * pricing.input + outputTokens * pricing.output) / 1e6;
192
+ }
193
+ function estimateCost(params) {
194
+ const { model, inputTokens, maxOutputTokens } = params;
195
+ const min = calculateCost(model, inputTokens, 1);
196
+ const max = calculateCost(model, inputTokens, maxOutputTokens);
197
+ return { min, max };
198
+ }
199
+
200
+ // src/providers/anthropic.ts
201
+ var AnthropicAdapter = class {
202
+ name = "anthropic";
203
+ supportsImages = true;
204
+ supportsTools = true;
205
+ apiKey;
206
+ baseUrl;
207
+ apiVersion;
208
+ constructor(options) {
209
+ this.apiKey = options.apiKey;
210
+ this.baseUrl = options.baseUrl ?? "https://api.anthropic.com";
211
+ this.apiVersion = options.apiVersion ?? "2023-06-01";
212
+ }
213
+ async call(request) {
214
+ const startTime = Date.now();
215
+ let systemPrompt;
216
+ const filteredMessages = [];
217
+ for (const msg of request.messages) {
218
+ if (msg.role === "system") {
219
+ systemPrompt = msg.content;
220
+ } else {
221
+ filteredMessages.push(msg);
222
+ }
223
+ }
224
+ const anthropicMessages = filteredMessages.map(
225
+ (msg) => this.mapMessage(msg)
226
+ );
227
+ const body = {
228
+ model: request.model,
229
+ messages: anthropicMessages,
230
+ max_tokens: request.options.maxTokens
231
+ };
232
+ if (systemPrompt) {
233
+ body.system = systemPrompt;
234
+ }
235
+ if (request.options.temperature !== void 0) {
236
+ body.temperature = request.options.temperature;
237
+ } else if (request.options.topP !== void 0) {
238
+ body.top_p = request.options.topP;
239
+ }
240
+ if (request.options.stop && request.options.stop.length > 0) {
241
+ body.stop_sequences = request.options.stop;
242
+ }
243
+ if (request.tools && request.tools.length > 0) {
244
+ body.tools = request.tools.map((tool) => ({
245
+ name: tool.name,
246
+ description: tool.description,
247
+ input_schema: tool.parameters
248
+ }));
249
+ }
250
+ const url = `${this.baseUrl}/v1/messages`;
251
+ const headers = {
252
+ "content-type": "application/json",
253
+ "x-api-key": this.apiKey,
254
+ "anthropic-version": this.apiVersion
255
+ };
256
+ const response = await fetchWithTimeout(
257
+ url,
258
+ {
259
+ method: "POST",
260
+ headers,
261
+ body: JSON.stringify(body)
262
+ },
263
+ request.options.timeout
264
+ );
265
+ if (!response.ok) {
266
+ const responseBody = await response.text();
267
+ const status = response.status;
268
+ if (status === 429) {
269
+ const retryAfterHeader = response.headers.get("retry-after");
270
+ const retryAfter = retryAfterHeader ? parseInt(retryAfterHeader, 10) : void 0;
271
+ throw new AidError("rate_limit", `Rate limit exceeded: ${responseBody}`, {
272
+ provider: this.name,
273
+ model: request.model,
274
+ statusCode: status,
275
+ retryAfter,
276
+ raw: responseBody
277
+ });
278
+ }
279
+ let code;
280
+ if (status === 401 || status === 403) {
281
+ code = "auth_error";
282
+ } else if (status === 400) {
283
+ const lowerBody = responseBody.toLowerCase();
284
+ if (lowerBody.includes("context") || lowerBody.includes("token") || lowerBody.includes("too long")) {
285
+ code = "context_length";
286
+ } else {
287
+ code = "invalid_request";
288
+ }
289
+ } else if (status >= 500 || status === 529) {
290
+ code = "provider_error";
291
+ } else {
292
+ code = "provider_error";
293
+ }
294
+ throw new AidError(code, `Anthropic API error (${status}): ${responseBody}`, {
295
+ provider: this.name,
296
+ model: request.model,
297
+ statusCode: status,
298
+ raw: responseBody
299
+ });
300
+ }
301
+ const data = await response.json();
302
+ const latencyMs = Date.now() - startTime;
303
+ return this.normalizeResponse(data, request.model, latencyMs);
304
+ }
305
+ stream(request) {
306
+ const self = this;
307
+ return new AiStream(async function* () {
308
+ const startTime = Date.now();
309
+ let systemPrompt;
310
+ const filteredMessages = [];
311
+ for (const msg of request.messages) {
312
+ if (msg.role === "system") {
313
+ systemPrompt = msg.content;
314
+ } else {
315
+ filteredMessages.push(msg);
316
+ }
317
+ }
318
+ const anthropicMessages = filteredMessages.map(
319
+ (msg) => self.mapMessage(msg)
320
+ );
321
+ const body = {
322
+ model: request.model,
323
+ messages: anthropicMessages,
324
+ max_tokens: request.options.maxTokens,
325
+ stream: true
326
+ };
327
+ if (systemPrompt) {
328
+ body.system = systemPrompt;
329
+ }
330
+ if (request.options.temperature !== void 0) {
331
+ body.temperature = request.options.temperature;
332
+ } else if (request.options.topP !== void 0) {
333
+ body.top_p = request.options.topP;
334
+ }
335
+ if (request.options.stop && request.options.stop.length > 0) {
336
+ body.stop_sequences = request.options.stop;
337
+ }
338
+ if (request.tools && request.tools.length > 0) {
339
+ body.tools = request.tools.map((tool) => ({
340
+ name: tool.name,
341
+ description: tool.description,
342
+ input_schema: tool.parameters
343
+ }));
344
+ }
345
+ const url = `${self.baseUrl}/v1/messages`;
346
+ const headers = {
347
+ "content-type": "application/json",
348
+ "x-api-key": self.apiKey,
349
+ "anthropic-version": self.apiVersion
350
+ };
351
+ const response = await fetchWithTimeout(
352
+ url,
353
+ {
354
+ method: "POST",
355
+ headers,
356
+ body: JSON.stringify(body)
357
+ },
358
+ request.options.timeout
359
+ );
360
+ if (!response.ok) {
361
+ const responseBody = await response.text();
362
+ const status = response.status;
363
+ if (status === 429) {
364
+ const retryAfterHeader = response.headers.get("retry-after");
365
+ const retryAfter = retryAfterHeader ? parseInt(retryAfterHeader, 10) : void 0;
366
+ throw new AidError("rate_limit", `Rate limit exceeded: ${responseBody}`, {
367
+ provider: self.name,
368
+ model: request.model,
369
+ statusCode: status,
370
+ retryAfter,
371
+ raw: responseBody
372
+ });
373
+ }
374
+ let code;
375
+ if (status === 401 || status === 403) {
376
+ code = "auth_error";
377
+ } else if (status === 400) {
378
+ const lowerBody = responseBody.toLowerCase();
379
+ if (lowerBody.includes("context") || lowerBody.includes("token") || lowerBody.includes("too long")) {
380
+ code = "context_length";
381
+ } else {
382
+ code = "invalid_request";
383
+ }
384
+ } else if (status >= 500 || status === 529) {
385
+ code = "provider_error";
386
+ } else {
387
+ code = "provider_error";
388
+ }
389
+ throw new AidError(code, `Anthropic API error (${status}): ${responseBody}`, {
390
+ provider: self.name,
391
+ model: request.model,
392
+ statusCode: status,
393
+ raw: responseBody
394
+ });
395
+ }
396
+ const text = await response.text();
397
+ const events = self.parseSSE(text);
398
+ let accumulated = "";
399
+ let inputTokens = 0;
400
+ let outputTokens = 0;
401
+ let stopReason = "end_turn";
402
+ let modelName = request.model;
403
+ for (const event of events) {
404
+ if (event.type === "message_start" && event.data.message) {
405
+ const msg = event.data.message;
406
+ if (msg.usage?.input_tokens !== void 0) {
407
+ inputTokens = msg.usage.input_tokens;
408
+ }
409
+ if (msg.model) {
410
+ modelName = msg.model;
411
+ }
412
+ } else if (event.type === "content_block_delta") {
413
+ const delta = event.data.delta;
414
+ if (delta?.type === "text_delta" && delta.text) {
415
+ accumulated += delta.text;
416
+ yield {
417
+ text: accumulated,
418
+ delta: delta.text,
419
+ done: false
420
+ };
421
+ }
422
+ } else if (event.type === "message_delta") {
423
+ if (event.data.delta?.stop_reason) {
424
+ stopReason = event.data.delta.stop_reason;
425
+ }
426
+ if (event.data.usage?.output_tokens !== void 0) {
427
+ outputTokens = event.data.usage.output_tokens;
428
+ }
429
+ } else if (event.type === "message_stop") {
430
+ const latencyMs = Date.now() - startTime;
431
+ const cost = calculateCost(modelName, inputTokens, outputTokens);
432
+ const finishReason = self.mapFinishReason(stopReason);
433
+ const aiResponse = {
434
+ text: accumulated,
435
+ model: modelName,
436
+ tokens: {
437
+ input: inputTokens,
438
+ output: outputTokens,
439
+ total: inputTokens + outputTokens
440
+ },
441
+ cost,
442
+ latencyMs,
443
+ finishReason,
444
+ raw: null
445
+ };
446
+ yield {
447
+ text: accumulated,
448
+ delta: "",
449
+ done: true,
450
+ response: aiResponse
451
+ };
452
+ }
453
+ }
454
+ });
455
+ }
456
+ parseSSE(text) {
457
+ const events = [];
458
+ const lines = text.split("\n");
459
+ let currentEvent = "";
460
+ let currentData = "";
461
+ for (const line of lines) {
462
+ if (line.startsWith("event: ")) {
463
+ currentEvent = line.slice(7).trim();
464
+ } else if (line.startsWith("data: ")) {
465
+ currentData = line.slice(6);
466
+ } else if (line.trim() === "" && currentEvent) {
467
+ try {
468
+ const parsed = JSON.parse(currentData);
469
+ events.push({ type: currentEvent, data: parsed });
470
+ } catch {
471
+ }
472
+ currentEvent = "";
473
+ currentData = "";
474
+ }
475
+ }
476
+ if (currentEvent && currentData) {
477
+ try {
478
+ const parsed = JSON.parse(currentData);
479
+ events.push({ type: currentEvent, data: parsed });
480
+ } catch {
481
+ }
482
+ }
483
+ return events;
484
+ }
485
+ mapMessage(msg) {
486
+ if (msg.role === "user") {
487
+ if (typeof msg.content === "string") {
488
+ return { role: "user", content: msg.content };
489
+ }
490
+ return {
491
+ role: "user",
492
+ content: msg.content.map((block) => {
493
+ if (block.type === "text") {
494
+ return { type: "text", text: block.text };
495
+ }
496
+ if (block.type === "image") {
497
+ return {
498
+ type: "image",
499
+ source: {
500
+ type: block.source.type,
501
+ media_type: block.source.mediaType,
502
+ data: block.source.data
503
+ }
504
+ };
505
+ }
506
+ if (block.type === "tool_result") {
507
+ return {
508
+ type: "tool_result",
509
+ tool_use_id: block.toolUseId,
510
+ content: block.content,
511
+ ...block.isError !== void 0 && { is_error: block.isError }
512
+ };
513
+ }
514
+ return block;
515
+ })
516
+ };
517
+ }
518
+ if (msg.role === "assistant") {
519
+ if (typeof msg.content === "string") {
520
+ return { role: "assistant", content: msg.content };
521
+ }
522
+ return {
523
+ role: "assistant",
524
+ content: msg.content.map((block) => {
525
+ if (block.type === "text") {
526
+ return { type: "text", text: block.text };
527
+ }
528
+ if (block.type === "tool_use") {
529
+ return {
530
+ type: "tool_use",
531
+ id: block.id,
532
+ name: block.name,
533
+ input: block.input
534
+ };
535
+ }
536
+ return block;
537
+ })
538
+ };
539
+ }
540
+ if (msg.role === "tool") {
541
+ return {
542
+ role: "user",
543
+ content: [
544
+ {
545
+ type: "tool_result",
546
+ tool_use_id: msg.toolCallId,
547
+ content: msg.content
548
+ }
549
+ ]
550
+ };
551
+ }
552
+ throw new AidError("invalid_request", `Unsupported message role: ${msg.role}`, {
553
+ provider: this.name
554
+ });
555
+ }
556
+ normalizeResponse(data, model, latencyMs) {
557
+ const textBlocks = data.content.filter((b) => b.type === "text");
558
+ const text = textBlocks.map((b) => b.text ?? "").join("");
559
+ const toolUseBlocks = data.content.filter((b) => b.type === "tool_use");
560
+ const toolCalls = toolUseBlocks.map((b) => ({
561
+ id: b.id,
562
+ name: b.name,
563
+ args: b.input ?? {}
564
+ }));
565
+ const finishReason = this.mapFinishReason(data.stop_reason);
566
+ const inputTokens = data.usage.input_tokens;
567
+ const outputTokens = data.usage.output_tokens;
568
+ const cost = calculateCost(model, inputTokens, outputTokens);
569
+ const response = {
570
+ text,
571
+ model: data.model,
572
+ tokens: {
573
+ input: inputTokens,
574
+ output: outputTokens,
575
+ total: inputTokens + outputTokens
576
+ },
577
+ cost,
578
+ latencyMs,
579
+ finishReason,
580
+ raw: data
581
+ };
582
+ if (toolCalls.length > 0) {
583
+ response.toolCalls = toolCalls;
584
+ }
585
+ return response;
586
+ }
587
+ mapFinishReason(stopReason) {
588
+ switch (stopReason) {
589
+ case "end_turn":
590
+ return "stop";
591
+ case "max_tokens":
592
+ return "length";
593
+ case "tool_use":
594
+ return "tool_use";
595
+ default:
596
+ return "error";
597
+ }
598
+ }
599
+ };
600
+
601
+ // src/providers/openai.ts
602
+ var OpenAIAdapter = class {
603
+ name = "openai";
604
+ supportsImages = true;
605
+ supportsTools = true;
606
+ apiKey;
607
+ baseUrl;
608
+ constructor(config) {
609
+ this.apiKey = config.apiKey;
610
+ this.baseUrl = config.baseUrl ?? "https://api.openai.com/v1";
611
+ }
612
+ async call(request) {
613
+ const startTime = Date.now();
614
+ const messages = this.mapMessages(request.messages);
615
+ const body = this.buildRequestBody(request, messages);
616
+ const response = await fetchWithTimeout(
617
+ `${this.baseUrl}/chat/completions`,
618
+ {
619
+ method: "POST",
620
+ headers: {
621
+ "Content-Type": "application/json",
622
+ "Authorization": `Bearer ${this.apiKey}`
623
+ },
624
+ body: JSON.stringify(body)
625
+ },
626
+ request.options.timeout
627
+ );
628
+ if (!response.ok) {
629
+ const errorBody = await response.text();
630
+ throw mapHttpError(response.status, errorBody, this.name);
631
+ }
632
+ const data = await response.json();
633
+ const latencyMs = Date.now() - startTime;
634
+ return this.normalizeResponse(data, latencyMs);
635
+ }
636
+ stream(request) {
637
+ const adapter = this;
638
+ return new AiStream(async function* () {
639
+ const startTime = Date.now();
640
+ const messages = adapter.mapMessages(request.messages);
641
+ const body = adapter.buildRequestBody(request, messages);
642
+ body.stream = true;
643
+ body.stream_options = { include_usage: true };
644
+ const response = await fetchWithTimeout(
645
+ `${adapter.baseUrl}/chat/completions`,
646
+ {
647
+ method: "POST",
648
+ headers: {
649
+ "Content-Type": "application/json",
650
+ "Authorization": `Bearer ${adapter.apiKey}`
651
+ },
652
+ body: JSON.stringify(body)
653
+ },
654
+ request.options.timeout
655
+ );
656
+ if (!response.ok) {
657
+ const errorBody = await response.text();
658
+ throw mapHttpError(response.status, errorBody, adapter.name);
659
+ }
660
+ if (!response.body) {
661
+ throw new AidError("provider_error", "Response body is null", {
662
+ provider: adapter.name
663
+ });
664
+ }
665
+ const reader = response.body.getReader();
666
+ const decoder = new TextDecoder();
667
+ let buffer = "";
668
+ let accumulated = "";
669
+ let model = request.model;
670
+ let inputTokens = 0;
671
+ let outputTokens = 0;
672
+ let totalTokens = 0;
673
+ let finishReason = "stop";
674
+ try {
675
+ while (true) {
676
+ const { done, value } = await reader.read();
677
+ if (done) break;
678
+ buffer += decoder.decode(value, { stream: true });
679
+ const lines = buffer.split("\n");
680
+ buffer = lines.pop() ?? "";
681
+ for (const line of lines) {
682
+ const trimmed = line.trim();
683
+ if (!trimmed || trimmed.startsWith(":")) continue;
684
+ if (!trimmed.startsWith("data: ")) continue;
685
+ const data = trimmed.slice(6);
686
+ if (data === "[DONE]") {
687
+ const latencyMs = Date.now() - startTime;
688
+ const aiResponse = {
689
+ text: accumulated,
690
+ model,
691
+ tokens: {
692
+ input: inputTokens,
693
+ output: outputTokens,
694
+ total: totalTokens
695
+ },
696
+ cost: calculateCost(model, inputTokens, outputTokens),
697
+ latencyMs,
698
+ finishReason,
699
+ raw: { streaming: true }
700
+ };
701
+ yield {
702
+ text: accumulated,
703
+ delta: "",
704
+ done: true,
705
+ response: aiResponse
706
+ };
707
+ return;
708
+ }
709
+ let chunk;
710
+ try {
711
+ chunk = JSON.parse(data);
712
+ } catch {
713
+ continue;
714
+ }
715
+ if (chunk.model) {
716
+ model = chunk.model;
717
+ }
718
+ if (chunk.usage) {
719
+ inputTokens = chunk.usage.prompt_tokens ?? 0;
720
+ outputTokens = chunk.usage.completion_tokens ?? 0;
721
+ totalTokens = chunk.usage.total_tokens ?? 0;
722
+ }
723
+ const choice = chunk.choices?.[0];
724
+ if (!choice) continue;
725
+ if (choice.finish_reason) {
726
+ finishReason = adapter.mapFinishReason(choice.finish_reason);
727
+ }
728
+ const delta = choice.delta?.content ?? "";
729
+ if (delta) {
730
+ accumulated += delta;
731
+ yield {
732
+ text: accumulated,
733
+ delta,
734
+ done: false
735
+ };
736
+ }
737
+ }
738
+ }
739
+ } finally {
740
+ reader.releaseLock();
741
+ }
742
+ });
743
+ }
744
+ mapMessages(messages) {
745
+ return messages.map((msg) => {
746
+ switch (msg.role) {
747
+ case "system":
748
+ return { role: "system", content: msg.content };
749
+ case "user":
750
+ return {
751
+ role: "user",
752
+ content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content)
753
+ };
754
+ case "assistant":
755
+ return {
756
+ role: "assistant",
757
+ content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content)
758
+ };
759
+ case "tool":
760
+ return {
761
+ role: "tool",
762
+ content: msg.content,
763
+ tool_call_id: msg.toolCallId
764
+ };
765
+ }
766
+ });
767
+ }
768
+ buildRequestBody(request, messages) {
769
+ const body = {
770
+ model: request.model,
771
+ messages,
772
+ temperature: request.options.temperature,
773
+ top_p: request.options.topP
774
+ };
775
+ body.max_completion_tokens = request.options.maxTokens;
776
+ if (request.options.stop && request.options.stop.length > 0) {
777
+ body.stop = request.options.stop;
778
+ }
779
+ if (request.tools && request.tools.length > 0) {
780
+ body.tools = request.tools.map((tool) => ({
781
+ type: "function",
782
+ function: {
783
+ name: tool.name,
784
+ description: tool.description,
785
+ parameters: tool.parameters
786
+ }
787
+ }));
788
+ }
789
+ return body;
790
+ }
791
+ normalizeResponse(data, latencyMs) {
792
+ const choice = data.choices[0];
793
+ const text = choice.message.content ?? "";
794
+ const finishReason = this.mapFinishReason(choice.finish_reason);
795
+ const inputTokens = data.usage.prompt_tokens;
796
+ const outputTokens = data.usage.completion_tokens;
797
+ const toolCalls = this.extractToolCalls(choice);
798
+ return {
799
+ text,
800
+ model: data.model,
801
+ tokens: {
802
+ input: inputTokens,
803
+ output: outputTokens,
804
+ total: data.usage.total_tokens
805
+ },
806
+ cost: calculateCost(data.model, inputTokens, outputTokens),
807
+ latencyMs,
808
+ finishReason,
809
+ toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
810
+ raw: data
811
+ };
812
+ }
813
+ mapFinishReason(reason) {
814
+ switch (reason) {
815
+ case "stop":
816
+ return "stop";
817
+ case "length":
818
+ return "length";
819
+ case "tool_calls":
820
+ return "tool_use";
821
+ default:
822
+ return "error";
823
+ }
824
+ }
825
+ extractToolCalls(choice) {
826
+ if (!choice.message.tool_calls) {
827
+ return [];
828
+ }
829
+ return choice.message.tool_calls.map((tc) => ({
830
+ id: tc.id,
831
+ name: tc.function.name,
832
+ args: JSON.parse(tc.function.arguments)
833
+ }));
834
+ }
835
+ };
836
+
837
+ // src/core/tool-loop.ts
838
+ async function executeToolLoop(options) {
839
+ const {
840
+ call,
841
+ request,
842
+ tools,
843
+ maxIterations,
844
+ initialResponse,
845
+ onToolCall,
846
+ onToolResult,
847
+ onLlmCall
848
+ } = options;
849
+ if (!initialResponse || initialResponse.finishReason !== "tool_use" || !initialResponse.toolCalls || initialResponse.toolCalls.length === 0) {
850
+ return {
851
+ response: initialResponse ?? {},
852
+ messages: [...request.messages],
853
+ iterations: 0,
854
+ toolCalls: []
855
+ };
856
+ }
857
+ const toolHandlers = /* @__PURE__ */ new Map();
858
+ for (const t of tools) {
859
+ toolHandlers.set(t.name, t.handler);
860
+ }
861
+ let currentMessages = [...request.messages];
862
+ let currentResponse = initialResponse;
863
+ let iteration = 0;
864
+ const allToolCalls = [];
865
+ while (currentResponse.finishReason === "tool_use" && currentResponse.toolCalls && currentResponse.toolCalls.length > 0 && iteration < maxIterations) {
866
+ iteration++;
867
+ const assistantContent = [];
868
+ if (currentResponse.text) {
869
+ assistantContent.push({ type: "text", text: currentResponse.text });
870
+ }
871
+ for (const tc of currentResponse.toolCalls) {
872
+ assistantContent.push({
873
+ type: "tool_use",
874
+ id: tc.id,
875
+ name: tc.name,
876
+ input: tc.args
877
+ });
878
+ }
879
+ currentMessages.push({ role: "assistant", content: assistantContent });
880
+ const toolResults = [];
881
+ for (const tc of currentResponse.toolCalls) {
882
+ allToolCalls.push(tc);
883
+ if (onToolCall) {
884
+ onToolCall(tc);
885
+ }
886
+ const handler = toolHandlers.get(tc.name);
887
+ if (!handler) {
888
+ toolResults.push({
889
+ type: "tool_result",
890
+ toolUseId: tc.id,
891
+ content: `Error: unknown tool '${tc.name}'`,
892
+ isError: true
893
+ });
894
+ if (onToolResult) {
895
+ onToolResult(tc, `Error: unknown tool '${tc.name}'`, 0);
896
+ }
897
+ continue;
898
+ }
899
+ const startMs = Date.now();
900
+ try {
901
+ const result = await handler(tc.args);
902
+ const durationMs = Date.now() - startMs;
903
+ toolResults.push({
904
+ type: "tool_result",
905
+ toolUseId: tc.id,
906
+ content: result
907
+ });
908
+ if (onToolResult) {
909
+ onToolResult(tc, result, durationMs);
910
+ }
911
+ } catch (err) {
912
+ const durationMs = Date.now() - startMs;
913
+ const errorMessage = err instanceof Error ? err.message : String(err);
914
+ toolResults.push({
915
+ type: "tool_result",
916
+ toolUseId: tc.id,
917
+ content: `Error: ${errorMessage}`,
918
+ isError: true
919
+ });
920
+ if (onToolResult) {
921
+ onToolResult(tc, `Error: ${errorMessage}`, durationMs);
922
+ }
923
+ }
924
+ }
925
+ currentMessages.push({ role: "user", content: toolResults });
926
+ const newRequest = {
927
+ ...request,
928
+ messages: currentMessages
929
+ };
930
+ currentResponse = await call(newRequest);
931
+ if (onLlmCall) {
932
+ onLlmCall(currentResponse);
933
+ }
934
+ }
935
+ if (iteration >= maxIterations && currentResponse.finishReason === "tool_use") {
936
+ throw new AidError("tool_loop_limit", `Tool loop exceeded maximum iterations (${maxIterations})`);
937
+ }
938
+ return {
939
+ response: currentResponse,
940
+ messages: currentMessages,
941
+ iterations: iteration,
942
+ toolCalls: allToolCalls
943
+ };
944
+ }
945
+
946
+ // src/core/conversation.ts
947
+ var ConversationImpl = class _ConversationImpl {
948
+ aiCall;
949
+ systemPrompt;
950
+ messages;
951
+ options;
952
+ constructor(aiCall, systemOrOptions, existingMessages) {
953
+ this.aiCall = aiCall;
954
+ if (typeof systemOrOptions === "string") {
955
+ this.systemPrompt = systemOrOptions;
956
+ this.options = { system: systemOrOptions };
957
+ } else {
958
+ this.systemPrompt = systemOrOptions?.system ?? "";
959
+ this.options = systemOrOptions ?? {};
960
+ }
961
+ this.messages = existingMessages ?? [];
962
+ }
963
+ async say(message, options) {
964
+ this.messages.push({ role: "user", content: message });
965
+ const allMessages = [...this.messages];
966
+ const response = await this.aiCall(message, {
967
+ ...options,
968
+ model: this.options.model,
969
+ system: this.systemPrompt || void 0,
970
+ _messages: allMessages
971
+ });
972
+ this.messages.push({ role: "assistant", content: response.text });
973
+ if (this.options.maxHistory && this.messages.length > this.options.maxHistory) {
974
+ this.messages = this.messages.slice(-this.options.maxHistory);
975
+ }
976
+ return response;
977
+ }
978
+ save() {
979
+ return {
980
+ system: this.systemPrompt,
981
+ messages: [...this.messages],
982
+ options: { ...this.options }
983
+ };
984
+ }
985
+ fork() {
986
+ return new _ConversationImpl(
987
+ this.aiCall,
988
+ { ...this.options },
989
+ this.messages.map((m) => ({ ...m }))
990
+ );
991
+ }
992
+ static load(aiCall, serialized) {
993
+ return new _ConversationImpl(
994
+ aiCall,
995
+ serialized.options,
996
+ [...serialized.messages]
997
+ );
998
+ }
999
+ };
1000
+
1001
+ // src/core/ai.ts
1002
+ function resolveOptions(baseOptions, perCallOptions) {
1003
+ const global = getGlobalConfig();
1004
+ const model = perCallOptions?.model ?? baseOptions?.model ?? global.model ?? DEFAULT_CONFIG.model;
1005
+ const temperature = perCallOptions?.temperature ?? baseOptions?.temperature ?? global.temperature ?? DEFAULT_CONFIG.temperature;
1006
+ const maxTokens = perCallOptions?.maxTokens ?? baseOptions?.maxTokens ?? global.maxTokens ?? DEFAULT_CONFIG.maxTokens;
1007
+ const topP = perCallOptions?.topP ?? baseOptions?.topP ?? global.topP ?? DEFAULT_CONFIG.topP;
1008
+ const stop = perCallOptions?.stop ?? baseOptions?.stop ?? global.stop ?? DEFAULT_CONFIG.stop;
1009
+ const timeout = perCallOptions?.timeout ?? baseOptions?.timeout ?? global.timeout ?? DEFAULT_CONFIG.timeout;
1010
+ const tools = [
1011
+ ...DEFAULT_CONFIG.tools,
1012
+ ...global.tools,
1013
+ ...baseOptions?.tools ?? [],
1014
+ ...perCallOptions?.tools ?? []
1015
+ ];
1016
+ const middleware = [
1017
+ ...DEFAULT_CONFIG.middleware,
1018
+ ...global.middleware,
1019
+ ...baseOptions?.middleware ?? [],
1020
+ ...perCallOptions?.middleware ?? []
1021
+ ];
1022
+ return {
1023
+ model,
1024
+ temperature,
1025
+ maxTokens,
1026
+ topP,
1027
+ stop,
1028
+ timeout,
1029
+ tools,
1030
+ middleware
1031
+ };
1032
+ }
1033
+ function buildProviderRequest(prompt, options, baseOptions, resolved, modelId) {
1034
+ const system = options?.system ?? baseOptions?.system;
1035
+ const context = options?.context ?? baseOptions?.context;
1036
+ const messages = options?._messages ?? baseOptions?._messages;
1037
+ let messageList;
1038
+ if (messages) {
1039
+ messageList = [...messages];
1040
+ } else {
1041
+ const userContent = context ? `${context}
1042
+
1043
+ ${prompt}` : prompt;
1044
+ messageList = [{ role: "user", content: userContent }];
1045
+ }
1046
+ if (system) {
1047
+ messageList = [{ role: "system", content: system }, ...messageList];
1048
+ }
1049
+ const toolDefinitions = resolved.tools.map((tool) => ({
1050
+ name: tool.name,
1051
+ description: tool.description,
1052
+ parameters: tool.parameters
1053
+ }));
1054
+ const request = {
1055
+ model: modelId,
1056
+ messages: messageList,
1057
+ options: resolved
1058
+ };
1059
+ if (toolDefinitions.length > 0) {
1060
+ request.tools = toolDefinitions;
1061
+ }
1062
+ return request;
1063
+ }
1064
+ function getAdapter(providerName, config, adapterCache) {
1065
+ const cached = adapterCache.get(providerName);
1066
+ if (cached) {
1067
+ return cached;
1068
+ }
1069
+ let adapter;
1070
+ if (providerName === "anthropic") {
1071
+ const apiKey = config.providers.anthropic?.apiKey ?? process.env.ANTHROPIC_API_KEY;
1072
+ if (!apiKey) {
1073
+ throw new AidError(
1074
+ "auth_error",
1075
+ "No API key found for Anthropic. Set ANTHROPIC_API_KEY or configure providers.anthropic.apiKey."
1076
+ );
1077
+ }
1078
+ adapter = new AnthropicAdapter({
1079
+ apiKey,
1080
+ baseUrl: config.providers.anthropic?.baseUrl
1081
+ });
1082
+ } else if (providerName === "openai") {
1083
+ const apiKey = config.providers.openai?.apiKey ?? process.env.OPENAI_API_KEY;
1084
+ if (!apiKey) {
1085
+ throw new AidError(
1086
+ "auth_error",
1087
+ "No API key found for OpenAI. Set OPENAI_API_KEY or configure providers.openai.apiKey."
1088
+ );
1089
+ }
1090
+ adapter = new OpenAIAdapter({
1091
+ apiKey,
1092
+ baseUrl: config.providers.openai?.baseUrl
1093
+ });
1094
+ } else {
1095
+ throw new AidError(
1096
+ "invalid_request",
1097
+ `Unknown provider: ${providerName}`
1098
+ );
1099
+ }
1100
+ adapterCache.set(providerName, adapter);
1101
+ return adapter;
1102
+ }
1103
+ function zodToJsonSchema(schema) {
1104
+ const shape = schema.shape ?? schema._def?.shape?.();
1105
+ if (!shape) {
1106
+ return { type: "object", properties: {}, required: [] };
1107
+ }
1108
+ const properties = {};
1109
+ const required = [];
1110
+ for (const [key, fieldSchema] of Object.entries(shape)) {
1111
+ const field = fieldSchema;
1112
+ const typeName = field._def?.typeName;
1113
+ let jsonType = "string";
1114
+ if (typeName === "ZodNumber") jsonType = "number";
1115
+ else if (typeName === "ZodBoolean") jsonType = "boolean";
1116
+ else if (typeName === "ZodArray") jsonType = "array";
1117
+ else if (typeName === "ZodEnum") {
1118
+ properties[key] = { type: "string", enum: field._def.values };
1119
+ required.push(key);
1120
+ continue;
1121
+ }
1122
+ properties[key] = { type: jsonType };
1123
+ if (typeName !== "ZodOptional") {
1124
+ required.push(key);
1125
+ }
1126
+ }
1127
+ return { type: "object", properties, required };
1128
+ }
1129
+ function createAi(baseOptions) {
1130
+ const adapterCache = /* @__PURE__ */ new Map();
1131
+ async function aiCall(prompt, options) {
1132
+ const resolved = resolveOptions(baseOptions, options);
1133
+ const { providerName, modelId } = resolveProvider(resolved.model);
1134
+ const config = getGlobalConfig();
1135
+ const adapter = getAdapter(providerName, config, adapterCache);
1136
+ const request = buildProviderRequest(prompt, options, baseOptions, resolved, modelId);
1137
+ let response;
1138
+ if (resolved.middleware.length > 0) {
1139
+ response = await executePipeline(
1140
+ resolved.middleware,
1141
+ request,
1142
+ (req) => adapter.call(req)
1143
+ );
1144
+ } else {
1145
+ response = await adapter.call(request);
1146
+ }
1147
+ if (resolved.tools.length > 0 && response.finishReason === "tool_use" && response.toolCalls && response.toolCalls.length > 0) {
1148
+ const callFn = resolved.middleware.length > 0 ? (req) => executePipeline(resolved.middleware, req, (r) => adapter.call(r)) : (req) => adapter.call(req);
1149
+ const loopResult = await executeToolLoop({
1150
+ call: callFn,
1151
+ request,
1152
+ tools: resolved.tools,
1153
+ maxIterations: 10,
1154
+ initialResponse: response
1155
+ });
1156
+ return loopResult.response;
1157
+ }
1158
+ return response;
1159
+ }
1160
+ function withOptions(options) {
1161
+ const merged = { ...baseOptions, ...options };
1162
+ return createAi(merged);
1163
+ }
1164
+ async function safe(prompt, options) {
1165
+ try {
1166
+ const response = await aiCall(prompt, options);
1167
+ return [response, null];
1168
+ } catch (err) {
1169
+ const error = err instanceof Error ? err : new Error(String(err));
1170
+ return [null, error];
1171
+ }
1172
+ }
1173
+ function stream(prompt, options) {
1174
+ const resolved = resolveOptions(baseOptions, options);
1175
+ const { providerName, modelId } = resolveProvider(resolved.model);
1176
+ const config = getGlobalConfig();
1177
+ const adapter = getAdapter(providerName, config, adapterCache);
1178
+ const request = buildProviderRequest(prompt, options, baseOptions, resolved, modelId);
1179
+ return adapter.stream(request);
1180
+ }
1181
+ function model(name) {
1182
+ return buildFluent({ model: name });
1183
+ }
1184
+ function buildFluent(opts) {
1185
+ return {
1186
+ system(prompt) {
1187
+ return buildFluent({ ...opts, system: prompt });
1188
+ },
1189
+ temperature(value) {
1190
+ return buildFluent({ ...opts, temperature: value });
1191
+ },
1192
+ maxTokens(value) {
1193
+ return buildFluent({ ...opts, maxTokens: value });
1194
+ },
1195
+ run(prompt) {
1196
+ return aiCall(prompt, opts);
1197
+ },
1198
+ stream(prompt) {
1199
+ const resolved = resolveOptions(baseOptions, opts);
1200
+ const result = resolveProvider(resolved.model);
1201
+ const config = getGlobalConfig();
1202
+ const adapter = getAdapter(result.providerName, config, adapterCache);
1203
+ const request = buildProviderRequest(prompt, opts, baseOptions, resolved, result.modelId);
1204
+ return adapter.stream(request);
1205
+ }
1206
+ };
1207
+ }
1208
+ async function multi(prompt, models) {
1209
+ const promises = models.map(
1210
+ (m) => aiCall(prompt, { model: m })
1211
+ );
1212
+ const results = await Promise.allSettled(promises);
1213
+ const responses = results.map((result, index) => {
1214
+ if (result.status === "fulfilled") {
1215
+ return result.value;
1216
+ }
1217
+ const errorMessage = result.reason instanceof Error ? result.reason.message : String(result.reason);
1218
+ return {
1219
+ text: errorMessage,
1220
+ model: models[index],
1221
+ tokens: { input: 0, output: 0, total: 0 },
1222
+ cost: 0,
1223
+ latencyMs: 0,
1224
+ finishReason: "error",
1225
+ raw: result.reason
1226
+ };
1227
+ });
1228
+ const allFailed = results.every((r) => r.status === "rejected");
1229
+ if (allFailed && results.length > 0) {
1230
+ const firstRejected = results[0];
1231
+ if (firstRejected.status === "rejected") {
1232
+ throw firstRejected.reason;
1233
+ }
1234
+ }
1235
+ return responses;
1236
+ }
1237
+ async function race(prompt, models) {
1238
+ const promises = models.map((m) => aiCall(prompt, { model: m }));
1239
+ return Promise.race(promises);
1240
+ }
1241
+ async function all(promises) {
1242
+ return Promise.all(promises);
1243
+ }
1244
+ async function batch(prompts, options) {
1245
+ const concurrency = options?.concurrency ?? 10;
1246
+ const failFast = options?.failFast ?? false;
1247
+ const onProgress = options?.onProgress;
1248
+ const results = new Array(prompts.length);
1249
+ let completed = 0;
1250
+ let abortError = null;
1251
+ let nextIndex = 0;
1252
+ async function worker() {
1253
+ while (nextIndex < prompts.length) {
1254
+ if (failFast && abortError) {
1255
+ return;
1256
+ }
1257
+ const index = nextIndex;
1258
+ nextIndex++;
1259
+ try {
1260
+ results[index] = await aiCall(prompts[index]);
1261
+ } catch (err) {
1262
+ if (failFast) {
1263
+ abortError = err instanceof Error ? err : new Error(String(err));
1264
+ throw abortError;
1265
+ }
1266
+ results[index] = {
1267
+ text: err instanceof Error ? err.message : String(err),
1268
+ model: "unknown",
1269
+ tokens: { input: 0, output: 0, total: 0 },
1270
+ cost: 0,
1271
+ latencyMs: 0,
1272
+ finishReason: "error",
1273
+ raw: err
1274
+ };
1275
+ }
1276
+ completed++;
1277
+ if (onProgress) {
1278
+ onProgress(completed, prompts.length);
1279
+ }
1280
+ }
1281
+ }
1282
+ const workerCount = Math.min(concurrency, prompts.length);
1283
+ const workers = [];
1284
+ for (let i = 0; i < workerCount; i++) {
1285
+ workers.push(worker());
1286
+ }
1287
+ if (failFast) {
1288
+ await Promise.all(workers);
1289
+ } else {
1290
+ await Promise.allSettled(workers);
1291
+ }
1292
+ if (failFast && abortError) {
1293
+ throw abortError;
1294
+ }
1295
+ return results;
1296
+ }
1297
+ async function summarize(text, options) {
1298
+ const style = options?.style ?? "paragraph";
1299
+ const systemPrompt = `Summarize the following text. Style: ${style}.`;
1300
+ const { style: _style, maxLength: _maxLength, ...restOptions } = options ?? {};
1301
+ const callOptions = {
1302
+ ...restOptions,
1303
+ system: systemPrompt
1304
+ };
1305
+ if (options?.maxLength) {
1306
+ callOptions.system = `${systemPrompt} Keep it under ${options.maxLength} characters.`;
1307
+ }
1308
+ const response = await aiCall(text, callOptions);
1309
+ return response.text;
1310
+ }
1311
+ async function translate(text, language, options) {
1312
+ const response = await aiCall(text, {
1313
+ ...options,
1314
+ system: `Translate the following text to ${language}. Output only the translation.`
1315
+ });
1316
+ return response.text;
1317
+ }
1318
+ async function rewrite(text, style, options) {
1319
+ const response = await aiCall(text, {
1320
+ ...options,
1321
+ system: `Rewrite the following text in a ${style} style.`
1322
+ });
1323
+ return response.text;
1324
+ }
1325
+ async function extract(text, schema, options) {
1326
+ const zodSchema = schema;
1327
+ const jsonSchema = zodToJsonSchema(zodSchema);
1328
+ const resolved = resolveOptions(baseOptions, options);
1329
+ const { providerName, modelId } = resolveProvider(resolved.model);
1330
+ const config = getGlobalConfig();
1331
+ const adapter = getAdapter(providerName, config, adapterCache);
1332
+ const messages = [{ role: "user", content: text }];
1333
+ if (options?.system) {
1334
+ messages.unshift({ role: "system", content: options.system });
1335
+ }
1336
+ const request = {
1337
+ model: modelId,
1338
+ messages,
1339
+ options: resolved,
1340
+ tools: [{
1341
+ name: "extract",
1342
+ description: "Extract structured data from the text",
1343
+ parameters: jsonSchema
1344
+ }]
1345
+ };
1346
+ const response = await adapter.call(request);
1347
+ if (response.toolCalls && response.toolCalls.length > 0) {
1348
+ return zodSchema.parse(response.toolCalls[0].args);
1349
+ }
1350
+ throw new AidError("parse_error", "Model did not return a tool_use response for extract()");
1351
+ }
1352
+ async function classify(text, categories, options) {
1353
+ const resolved = resolveOptions(baseOptions, options);
1354
+ const { providerName, modelId } = resolveProvider(resolved.model);
1355
+ const config = getGlobalConfig();
1356
+ const adapter = getAdapter(providerName, config, adapterCache);
1357
+ const classifySchema = {
1358
+ type: "object",
1359
+ properties: {
1360
+ category: { type: "string", enum: [...categories] }
1361
+ },
1362
+ required: ["category"]
1363
+ };
1364
+ const systemPrompt = `Classify the following text into one of these categories: ${categories.join(", ")}. Use the classify tool.`;
1365
+ const messages = [
1366
+ { role: "system", content: systemPrompt },
1367
+ { role: "user", content: text }
1368
+ ];
1369
+ const request = {
1370
+ model: modelId,
1371
+ messages,
1372
+ options: resolved,
1373
+ tools: [{
1374
+ name: "classify",
1375
+ description: "Report the classification result",
1376
+ parameters: classifySchema
1377
+ }]
1378
+ };
1379
+ const response = await adapter.call(request);
1380
+ if (response.toolCalls && response.toolCalls.length > 0) {
1381
+ return response.toolCalls[0].args.category;
1382
+ }
1383
+ throw new AidError("parse_error", "Model did not return a tool_use response for classify()");
1384
+ }
1385
+ async function json(prompt, options) {
1386
+ const response = await aiCall(prompt, {
1387
+ ...options,
1388
+ system: "Respond with valid JSON only. No markdown, no explanation."
1389
+ });
1390
+ let text = response.text.trim();
1391
+ if (text.startsWith("```")) {
1392
+ text = text.replace(/^```(?:json)?\s*\n?/, "").replace(/\n?```\s*$/, "");
1393
+ }
1394
+ try {
1395
+ return JSON.parse(text);
1396
+ } catch {
1397
+ throw new AidError("parse_error", `Failed to parse JSON from response: ${response.text.slice(0, 100)}`);
1398
+ }
1399
+ }
1400
+ function conversationFn(systemOrOptions) {
1401
+ return new ConversationImpl(aiCall, systemOrOptions);
1402
+ }
1403
+ function conversationLoad(serialized) {
1404
+ return ConversationImpl.load(aiCall, serialized);
1405
+ }
1406
+ const conversation = Object.assign(conversationFn, {
1407
+ load: conversationLoad
1408
+ });
1409
+ const ai2 = Object.assign(aiCall, {
1410
+ with: withOptions,
1411
+ safe,
1412
+ stream,
1413
+ model,
1414
+ multi,
1415
+ race,
1416
+ all,
1417
+ batch,
1418
+ extract,
1419
+ classify,
1420
+ json,
1421
+ summarize,
1422
+ translate,
1423
+ rewrite,
1424
+ conversation
1425
+ });
1426
+ return ai2;
1427
+ }
1428
+ var ai = createAi();
1429
+
1430
+ export {
1431
+ AiStream,
1432
+ estimateCost,
1433
+ executeToolLoop,
1434
+ createAi,
1435
+ ai
1436
+ };