@threaded/ai 1.0.29 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (112) hide show
  1. package/README.md +240 -0
  2. package/dist/approval.d.ts +18 -0
  3. package/dist/approval.d.ts.map +1 -0
  4. package/dist/approval.js +35 -0
  5. package/dist/approval.js.map +1 -0
  6. package/dist/composition/compose.d.ts +3 -0
  7. package/dist/composition/compose.d.ts.map +1 -0
  8. package/dist/composition/compose.js +38 -0
  9. package/dist/composition/compose.js.map +1 -0
  10. package/dist/composition/model.d.ts +9 -0
  11. package/dist/composition/model.d.ts.map +1 -0
  12. package/dist/composition/model.js +192 -0
  13. package/dist/composition/model.js.map +1 -0
  14. package/dist/composition/retry.d.ts +6 -0
  15. package/dist/composition/retry.d.ts.map +1 -0
  16. package/dist/composition/retry.js +18 -0
  17. package/dist/composition/retry.js.map +1 -0
  18. package/dist/composition/scope.d.ts +3 -0
  19. package/dist/composition/scope.d.ts.map +1 -0
  20. package/dist/composition/scope.js +83 -0
  21. package/dist/composition/scope.js.map +1 -0
  22. package/dist/composition/tap.d.ts +3 -0
  23. package/dist/composition/tap.d.ts.map +1 -0
  24. package/dist/composition/tap.js +7 -0
  25. package/dist/composition/tap.js.map +1 -0
  26. package/dist/composition/when.d.ts +3 -0
  27. package/dist/composition/when.d.ts.map +1 -0
  28. package/dist/composition/when.js +9 -0
  29. package/dist/composition/when.js.map +1 -0
  30. package/dist/embed.d.ts +16 -0
  31. package/dist/embed.d.ts.map +1 -0
  32. package/dist/embed.js +72 -0
  33. package/dist/embed.js.map +1 -0
  34. package/dist/examples.d.ts +2 -0
  35. package/dist/examples.d.ts.map +1 -0
  36. package/dist/examples.js +6 -0
  37. package/dist/examples.js.map +1 -0
  38. package/dist/helpers.d.ts +17 -0
  39. package/dist/helpers.d.ts.map +1 -0
  40. package/dist/helpers.js +104 -0
  41. package/dist/helpers.js.map +1 -0
  42. package/dist/image-model-schema.d.ts +19 -0
  43. package/dist/image-model-schema.d.ts.map +1 -0
  44. package/dist/image-model-schema.js +103 -0
  45. package/dist/image-model-schema.js.map +1 -0
  46. package/dist/image.d.ts +3 -0
  47. package/dist/image.d.ts.map +1 -0
  48. package/dist/image.js +120 -0
  49. package/dist/image.js.map +1 -0
  50. package/dist/index.d.ts +18 -350
  51. package/dist/index.d.ts.map +1 -0
  52. package/dist/index.js +17 -2061
  53. package/dist/index.js.map +1 -1
  54. package/dist/mcp.d.ts +3 -0
  55. package/dist/mcp.d.ts.map +1 -0
  56. package/dist/mcp.js +29 -0
  57. package/dist/mcp.js.map +1 -0
  58. package/dist/providers/anthropic.d.ts +3 -0
  59. package/dist/providers/anthropic.d.ts.map +1 -0
  60. package/dist/providers/anthropic.js +226 -0
  61. package/dist/providers/anthropic.js.map +1 -0
  62. package/dist/providers/google.d.ts +3 -0
  63. package/dist/providers/google.d.ts.map +1 -0
  64. package/dist/providers/google.js +244 -0
  65. package/dist/providers/google.js.map +1 -0
  66. package/dist/providers/huggingface.d.ts +3 -0
  67. package/dist/providers/huggingface.d.ts.map +1 -0
  68. package/dist/providers/huggingface.js +59 -0
  69. package/dist/providers/huggingface.js.map +1 -0
  70. package/dist/providers/index.d.ts +3 -0
  71. package/dist/providers/index.d.ts.map +1 -0
  72. package/dist/providers/index.js +29 -0
  73. package/dist/providers/index.js.map +1 -0
  74. package/dist/providers/local.d.ts +3 -0
  75. package/dist/providers/local.d.ts.map +1 -0
  76. package/dist/providers/local.js +152 -0
  77. package/dist/providers/local.js.map +1 -0
  78. package/dist/providers/openai.d.ts +3 -0
  79. package/dist/providers/openai.d.ts.map +1 -0
  80. package/dist/providers/openai.js +165 -0
  81. package/dist/providers/openai.js.map +1 -0
  82. package/dist/providers/xai.d.ts +3 -0
  83. package/dist/providers/xai.d.ts.map +1 -0
  84. package/dist/providers/xai.js +161 -0
  85. package/dist/providers/xai.js.map +1 -0
  86. package/dist/schema.d.ts +7 -0
  87. package/dist/schema.d.ts.map +1 -0
  88. package/dist/schema.js +44 -0
  89. package/dist/schema.js.map +1 -0
  90. package/dist/thread.d.ts +25 -0
  91. package/dist/thread.d.ts.map +1 -0
  92. package/dist/thread.js +87 -0
  93. package/dist/thread.js.map +1 -0
  94. package/dist/types.d.ts +193 -0
  95. package/dist/types.d.ts.map +1 -0
  96. package/dist/types.js +8 -0
  97. package/dist/types.js.map +1 -0
  98. package/dist/utils/rateLimited.d.ts +27 -0
  99. package/dist/utils/rateLimited.d.ts.map +1 -0
  100. package/dist/utils/rateLimited.js +74 -0
  101. package/dist/utils/rateLimited.js.map +1 -0
  102. package/dist/utils.d.ts +8 -0
  103. package/dist/utils.d.ts.map +1 -0
  104. package/dist/utils.js +78 -0
  105. package/dist/utils.js.map +1 -0
  106. package/package.json +34 -12
  107. package/.claude/settings.local.json +0 -15
  108. package/.lore +0 -65
  109. package/dist/index.cjs +0 -2137
  110. package/dist/index.cjs.map +0 -1
  111. package/dist/index.d.cts +0 -350
  112. package/tsconfig.json +0 -29
package/dist/index.cjs DELETED
@@ -1,2137 +0,0 @@
1
- "use strict";
2
- var __create = Object.create;
3
- var __defProp = Object.defineProperty;
4
- var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
5
- var __getOwnPropNames = Object.getOwnPropertyNames;
6
- var __getProtoOf = Object.getPrototypeOf;
7
- var __hasOwnProp = Object.prototype.hasOwnProperty;
8
- var __export = (target, all) => {
9
- for (var name in all)
10
- __defProp(target, name, { get: all[name], enumerable: true });
11
- };
12
- var __copyProps = (to, from, except, desc) => {
13
- if (from && typeof from === "object" || typeof from === "function") {
14
- for (let key of __getOwnPropNames(from))
15
- if (!__hasOwnProp.call(to, key) && key !== except)
16
- __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
17
- }
18
- return to;
19
- };
20
- var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
21
- // If the importer is in node compatibility mode or this is not an ESM
22
- // file that has been converted to a CommonJS file using a Babel-
23
- // compatible transform (i.e. "__esModule" has not been set), then set
24
- // "default" to the CommonJS "module.exports" for node compatibility.
25
- isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
26
- mod
27
- ));
28
- var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
29
-
30
- // src/index.ts
31
- var index_exports = {};
32
- __export(index_exports, {
33
- IMAGE_EDIT_MODEL_SCHEMA: () => IMAGE_EDIT_MODEL_SCHEMA,
34
- IMAGE_MODEL_SCHEMA: () => IMAGE_MODEL_SCHEMA,
35
- Inherit: () => Inherit,
36
- addUsage: () => addUsage,
37
- appendToLastRequest: () => appendToLastRequest,
38
- compose: () => compose,
39
- convertMCPSchemaToToolSchema: () => convertMCPSchemaToToolSchema,
40
- convertStandardSchemaToJsonSchema: () => convertStandardSchemaToJsonSchema,
41
- convertStandardSchemaToSchemaProperties: () => convertStandardSchemaToSchemaProperties,
42
- createMCPTools: () => createMCPTools,
43
- embed: () => embed,
44
- everyNMessages: () => everyNMessages,
45
- everyNTokens: () => everyNTokens,
46
- generateApprovalToken: () => generateApprovalToken,
47
- generateImage: () => generateImage,
48
- getDefaultConfig: () => getDefaultConfig,
49
- getKey: () => getKey,
50
- getModelConfig: () => getModelConfig,
51
- getOrCreateThread: () => getOrCreateThread,
52
- isStandardSchema: () => isStandardSchema,
53
- maxCalls: () => maxCalls,
54
- model: () => model,
55
- noToolsCalled: () => noToolsCalled,
56
- normalizeSchema: () => normalizeSchema,
57
- onApprovalRequested: () => onApprovalRequested,
58
- onApprovalResolved: () => onApprovalResolved,
59
- parseModelName: () => parseModelName,
60
- rateLimited: () => rateLimited,
61
- removeApprovalListener: () => removeApprovalListener,
62
- requestApproval: () => requestApproval,
63
- resolveApproval: () => resolveApproval,
64
- retry: () => retry,
65
- scope: () => scope,
66
- setKeys: () => setKeys,
67
- tap: () => tap,
68
- toolConfigToToolDefinition: () => toolConfigToToolDefinition,
69
- toolNotUsedInNTurns: () => toolNotUsedInNTurns,
70
- toolWasCalled: () => toolWasCalled,
71
- when: () => when
72
- });
73
- module.exports = __toCommonJS(index_exports);
74
-
75
- // src/schema.ts
76
- var import_zod = require("zod");
77
- var isStandardSchema = (schema) => {
78
- return schema && typeof schema === "object" && "~standard" in schema;
79
- };
80
- var convertStandardSchemaToJsonSchema = (standardSchema, name = "Schema") => {
81
- const jsonSchema = import_zod.z.toJSONSchema(standardSchema);
82
- return {
83
- name,
84
- schema: jsonSchema
85
- };
86
- };
87
- var convertMCPSchemaToToolSchema = (mcpSchema) => {
88
- if (!mcpSchema?.properties) return {};
89
- const convertProperty = (prop) => ({
90
- type: prop.type || "string",
91
- description: prop.description || "",
92
- ...prop.enum && { enum: prop.enum },
93
- ...prop.items && { items: convertProperty(prop.items) },
94
- ...prop.properties && {
95
- properties: Object.fromEntries(
96
- Object.entries(prop.properties).map(([k, v]) => [k, convertProperty(v)])
97
- )
98
- }
99
- });
100
- const result = {};
101
- for (const [key, value] of Object.entries(mcpSchema.properties)) {
102
- const prop = value;
103
- result[key] = {
104
- ...convertProperty(prop),
105
- optional: !mcpSchema.required?.includes(key)
106
- };
107
- }
108
- return result;
109
- };
110
- function normalizeSchema(schema, name) {
111
- if (isStandardSchema(schema)) {
112
- return convertStandardSchemaToJsonSchema(schema, name);
113
- }
114
- return schema;
115
- }
116
- var convertStandardSchemaToSchemaProperties = (standardSchema) => {
117
- const jsonSchema = import_zod.z.toJSONSchema(standardSchema);
118
- return convertMCPSchemaToToolSchema(jsonSchema);
119
- };
120
-
121
- // src/mcp.ts
122
- var createMCPTools = async (client) => {
123
- const serverInfo = client.getServerVersion();
124
- const serverName = serverInfo?.name;
125
- if (!serverName) {
126
- console.error("MCP server has no name? Skipping tool creation.");
127
- return [];
128
- }
129
- const toolsResponse = await client.listTools();
130
- return toolsResponse.tools.map((mcpTool) => {
131
- const prefixedName = `${serverName}_${mcpTool.name}`;
132
- return {
133
- name: prefixedName,
134
- description: `[${serverName}] ${mcpTool.description || ""}`,
135
- schema: convertMCPSchemaToToolSchema(mcpTool.inputSchema),
136
- execute: async (args) => {
137
- const result = await client.callTool({
138
- name: mcpTool.name,
139
- arguments: args
140
- });
141
- return result.content && Array.isArray(result.content) && result.content[0]?.text || JSON.stringify(result);
142
- }
143
- };
144
- });
145
- };
146
-
147
- // src/types.ts
148
- var Inherit = /* @__PURE__ */ ((Inherit2) => {
149
- Inherit2[Inherit2["Nothing"] = 0] = "Nothing";
150
- Inherit2[Inherit2["Conversation"] = 1] = "Conversation";
151
- Inherit2[Inherit2["Tools"] = 2] = "Tools";
152
- Inherit2[Inherit2["All"] = 3] = "All";
153
- return Inherit2;
154
- })(Inherit || {});
155
-
156
- // src/utils.ts
157
- var toolConfigToToolDefinition = (tool) => {
158
- const schema = isStandardSchema(tool.schema) ? convertStandardSchemaToSchemaProperties(tool.schema) : tool.schema;
159
- const properties = {};
160
- const required = [];
161
- for (const [key, prop] of Object.entries(schema)) {
162
- properties[key] = convertSchemaProperty(prop);
163
- if (!prop.optional) {
164
- required.push(key);
165
- }
166
- }
167
- return {
168
- type: "function",
169
- function: {
170
- name: tool.name,
171
- description: tool.description,
172
- parameters: {
173
- type: "object",
174
- properties,
175
- ...required.length > 0 && { required }
176
- }
177
- }
178
- };
179
- };
180
- var convertSchemaProperty = (prop) => {
181
- const result = {
182
- type: prop.type
183
- };
184
- if (prop.description) {
185
- result.description = prop.description;
186
- }
187
- if (prop.enum) {
188
- result.enum = prop.enum;
189
- }
190
- if (prop.items) {
191
- result.items = convertSchemaProperty(prop.items);
192
- }
193
- if (prop.properties) {
194
- result.properties = {};
195
- for (const [key, childProp] of Object.entries(prop.properties)) {
196
- result.properties[key] = convertSchemaProperty(childProp);
197
- }
198
- }
199
- return result;
200
- };
201
- var parseModelName = (model2) => {
202
- const parts = model2.split("/");
203
- if (parts.length === 1) {
204
- return { provider: "huggingface", model: parts[0] };
205
- }
206
- return {
207
- provider: parts[0],
208
- model: parts.slice(1).join("/")
209
- };
210
- };
211
- var globalKeys = {};
212
- var setKeys = (keys) => {
213
- globalKeys = { ...globalKeys, ...keys };
214
- };
215
- var getKey = (provider) => {
216
- const key = globalKeys[provider.toLowerCase()];
217
- if (!key) {
218
- throw new Error(`No API key configured for provider: ${provider}`);
219
- }
220
- return key;
221
- };
222
- var maxCalls = (toolConfig, maxCalls2) => ({
223
- ...toolConfig,
224
- _maxCalls: maxCalls2
225
- });
226
- var addUsage = (existing, promptTokens, completionTokens, totalTokens) => ({
227
- promptTokens: (existing?.promptTokens || 0) + promptTokens,
228
- completionTokens: (existing?.completionTokens || 0) + completionTokens,
229
- totalTokens: (existing?.totalTokens || 0) + totalTokens
230
- });
231
-
232
- // src/embed.ts
233
- var modelCache = /* @__PURE__ */ new Map();
234
- var embed = async (model2, text, config) => {
235
- if (model2.startsWith("openai/")) {
236
- const modelName = model2.replace("openai/", "");
237
- const apiKey = getKey("openai") || process.env.OPENAI_API_KEY;
238
- if (!apiKey) {
239
- throw new Error("OpenAI API key not found");
240
- }
241
- const body = {
242
- model: modelName,
243
- input: text
244
- };
245
- if (config?.dimensions) {
246
- body.dimensions = config.dimensions;
247
- }
248
- const response = await fetch("https://api.openai.com/v1/embeddings", {
249
- method: "POST",
250
- headers: {
251
- "Content-Type": "application/json",
252
- Authorization: `Bearer ${apiKey}`
253
- },
254
- body: JSON.stringify(body)
255
- });
256
- if (!response.ok) {
257
- const error = await response.text();
258
- throw new Error(`OpenAI API error: ${error}`);
259
- }
260
- const data = await response.json();
261
- return data.data[0].embedding;
262
- }
263
- try {
264
- const { pipeline } = await import("@huggingface/transformers");
265
- if (!modelCache.has(model2)) {
266
- const extractor2 = await pipeline("feature-extraction", model2, {
267
- dtype: "fp32"
268
- });
269
- modelCache.set(model2, extractor2);
270
- }
271
- const extractor = modelCache.get(model2);
272
- const result = await extractor(text, { pooling: "mean", normalize: true });
273
- return Array.from(result.data);
274
- } catch (error) {
275
- throw new Error(
276
- `huggingface transformers failed to load. install system dependencies or use openai models instead. original error: ${error.message}`
277
- );
278
- }
279
- };
280
-
281
- // src/image.ts
282
- var providerKeyEnvVars = {
283
- openai: "OPENAI_API_KEY",
284
- xai: "XAI_API_KEY",
285
- google: "GEMINI_API_KEY"
286
- };
287
- var getApiKey = (provider) => {
288
- try {
289
- return getKey(provider);
290
- } catch {
291
- const envVar = providerKeyEnvVars[provider];
292
- const key = envVar ? process.env[envVar] || "" : "";
293
- if (!key) throw new Error(`No API key found for provider: ${provider}`);
294
- return key;
295
- }
296
- };
297
- var generateOpenAICompatible = async (endpoint, modelName, prompt, apiKey, config) => {
298
- const isGptImage = modelName.startsWith("gpt-image");
299
- const body = {
300
- model: modelName,
301
- prompt
302
- };
303
- if (!isGptImage) {
304
- body.response_format = config?.responseFormat || "b64_json";
305
- }
306
- if (config?.n) body.n = config.n;
307
- if (config?.size) body.size = config.size;
308
- if (config?.quality) body.quality = config.quality;
309
- if (config?.style && !isGptImage) body.style = config.style;
310
- if (isGptImage) {
311
- if (config?.outputFormat) body.output_format = config.outputFormat;
312
- if (config?.outputCompression != null) body.output_compression = config.outputCompression;
313
- if (config?.background) body.background = config.background;
314
- if (config?.moderation) body.moderation = config.moderation;
315
- }
316
- const response = await fetch(endpoint, {
317
- method: "POST",
318
- headers: {
319
- "Content-Type": "application/json",
320
- Authorization: `Bearer ${apiKey}`
321
- },
322
- body: JSON.stringify(body)
323
- });
324
- if (!response.ok) {
325
- const error = await response.text();
326
- throw new Error(`API error: ${error}`);
327
- }
328
- const data = await response.json();
329
- const image = data.data[0];
330
- return {
331
- data: image.b64_json || image.url,
332
- revisedPrompt: image.revised_prompt
333
- };
334
- };
335
- var generateGoogle = async (modelName, prompt, apiKey, config) => {
336
- const endpoint = `https://generativelanguage.googleapis.com/v1beta/models/${modelName}:generateContent`;
337
- const body = {
338
- contents: [{ parts: [{ text: prompt }] }],
339
- generationConfig: {
340
- responseModalities: ["TEXT", "IMAGE"]
341
- }
342
- };
343
- const imageConfig = {};
344
- if (config?.aspectRatio) imageConfig.aspectRatio = config.aspectRatio;
345
- if (config?.imageSize) imageConfig.imageSize = config.imageSize;
346
- if (Object.keys(imageConfig).length > 0) {
347
- body.generationConfig.imageConfig = imageConfig;
348
- }
349
- const response = await fetch(endpoint, {
350
- method: "POST",
351
- headers: {
352
- "Content-Type": "application/json",
353
- "x-goog-api-key": apiKey
354
- },
355
- body: JSON.stringify(body)
356
- });
357
- if (!response.ok) {
358
- const error = await response.text();
359
- throw new Error(`Google API error: ${error}`);
360
- }
361
- const data = await response.json();
362
- const parts = data.candidates?.[0]?.content?.parts || [];
363
- const imagePart = parts.find((p) => p.inlineData);
364
- const textPart = parts.find((p) => p.text);
365
- if (!imagePart?.inlineData?.data) {
366
- throw new Error("No image data in response");
367
- }
368
- return {
369
- data: imagePart.inlineData.data,
370
- revisedPrompt: textPart?.text
371
- };
372
- };
373
- var generateImage = async (model2, prompt, config) => {
374
- const { provider, model: modelName } = parseModelName(model2);
375
- const providerLower = provider.toLowerCase();
376
- const apiKey = getApiKey(providerLower);
377
- switch (providerLower) {
378
- case "openai":
379
- return generateOpenAICompatible(
380
- "https://api.openai.com/v1/images/generations",
381
- modelName,
382
- prompt,
383
- apiKey,
384
- config
385
- );
386
- case "xai":
387
- return generateOpenAICompatible(
388
- "https://api.x.ai/v1/images/generations",
389
- modelName,
390
- prompt,
391
- apiKey,
392
- config
393
- );
394
- case "google":
395
- return generateGoogle(modelName, prompt, apiKey, config);
396
- default:
397
- throw new Error(`Unsupported image generation provider: ${provider}`);
398
- }
399
- };
400
-
401
- // src/image-model-schema.ts
402
- var IMAGE_MODEL_SCHEMA = {
403
- openai: {
404
- "dall-e-3": {
405
- size: {
406
- values: ["1024x1024", "1024x1792", "1792x1024"],
407
- default: "1024x1024",
408
- description: "Image dimensions"
409
- },
410
- quality: {
411
- values: ["standard", "hd"],
412
- default: "standard",
413
- description: "Image quality level"
414
- },
415
- style: {
416
- values: ["vivid", "natural"],
417
- default: "vivid",
418
- description: "Image style"
419
- }
420
- },
421
- "gpt-image-1.5": {
422
- size: {
423
- values: ["1024x1024", "1536x1024", "1024x1536", "auto"],
424
- default: "auto",
425
- description: "Image dimensions"
426
- },
427
- quality: {
428
- values: ["low", "medium", "high", "auto"],
429
- default: "auto",
430
- description: "Image quality level"
431
- },
432
- background: {
433
- values: ["transparent", "opaque", "auto"],
434
- default: "auto",
435
- description: "Background type"
436
- },
437
- moderation: {
438
- values: ["auto", "low"],
439
- default: "auto",
440
- description: "Content moderation level"
441
- }
442
- }
443
- },
444
- google: {
445
- "gemini-2.5-flash-image": {
446
- aspectRatio: {
447
- values: ["1:1", "3:4", "4:3", "9:16", "16:9"],
448
- default: "1:1",
449
- description: "Image aspect ratio"
450
- }
451
- },
452
- "gemini-3-pro-image-preview": {
453
- aspectRatio: {
454
- values: ["1:1", "3:4", "4:3", "9:16", "16:9"],
455
- default: "1:1",
456
- description: "Image aspect ratio"
457
- },
458
- imageSize: {
459
- values: ["1K", "2K"],
460
- default: "1K",
461
- description: "Output image size"
462
- }
463
- }
464
- },
465
- xai: {}
466
- };
467
- var IMAGE_EDIT_MODEL_SCHEMA = {
468
- openai: {
469
- "gpt-image-1.5": {
470
- size: {
471
- values: ["1024x1024", "1536x1024", "1024x1536", "auto"],
472
- default: "auto",
473
- description: "Output image size"
474
- },
475
- quality: {
476
- values: ["low", "medium", "high", "auto"],
477
- default: "auto",
478
- description: "Image quality level"
479
- },
480
- background: {
481
- values: ["transparent", "opaque", "auto"],
482
- default: "auto",
483
- description: "Background type"
484
- }
485
- }
486
- },
487
- google: {
488
- "gemini-3-pro-image-preview": {}
489
- }
490
- };
491
- function getModelConfig(provider, model2) {
492
- return IMAGE_MODEL_SCHEMA[provider]?.[model2] || null;
493
- }
494
- function getDefaultConfig(provider, model2) {
495
- const schema = getModelConfig(provider, model2);
496
- if (!schema) return {};
497
- const defaults = {};
498
- for (const [key, option] of Object.entries(schema)) {
499
- defaults[key] = option.default;
500
- }
501
- return defaults;
502
- }
503
-
504
- // src/providers/openai.ts
505
- var getApiKey2 = (configApiKey) => {
506
- if (configApiKey) return configApiKey;
507
- try {
508
- return getKey("openai");
509
- } catch {
510
- const key = process.env.OPENAI_API_KEY || "";
511
- if (!key) throw new Error("OpenAI API key not found");
512
- return key;
513
- }
514
- };
515
- var appendToolCalls = (toolCalls, tcchunklist) => {
516
- for (const tcchunk of tcchunklist) {
517
- while (toolCalls.length <= tcchunk.index) {
518
- toolCalls.push({
519
- id: "",
520
- type: "function",
521
- function: { name: "", arguments: "" }
522
- });
523
- }
524
- const tc = toolCalls[tcchunk.index];
525
- tc.id += tcchunk.id || "";
526
- tc.function.name += tcchunk.function?.name || "";
527
- tc.function.arguments += tcchunk.function?.arguments || "";
528
- }
529
- return toolCalls;
530
- };
531
- var callOpenAI = async (config, ctx) => {
532
- const { model: model2, instructions, schema, apiKey: configApiKey } = config;
533
- const apiKey = getApiKey2(configApiKey);
534
- const messages = [];
535
- if (instructions) {
536
- messages.push({ role: "system", content: instructions });
537
- }
538
- messages.push(...ctx.history);
539
- const body = {
540
- model: model2,
541
- messages,
542
- stream: !!ctx.stream,
543
- ...ctx.stream && { stream_options: { include_usage: true } }
544
- };
545
- if (schema) {
546
- body.response_format = {
547
- type: "json_schema",
548
- json_schema: {
549
- name: schema.name,
550
- schema: { ...schema.schema, additionalProperties: false },
551
- strict: true
552
- }
553
- };
554
- }
555
- if (ctx.tools && ctx.tools.length > 0) {
556
- body.tools = ctx.tools;
557
- body.tool_choice = "auto";
558
- }
559
- const response = await fetch("https://api.openai.com/v1/chat/completions", {
560
- method: "POST",
561
- headers: {
562
- "Content-Type": "application/json",
563
- Authorization: `Bearer ${apiKey}`
564
- },
565
- body: JSON.stringify(body),
566
- signal: ctx.abortSignal
567
- });
568
- if (!response.ok) {
569
- const error = await response.text();
570
- throw new Error(`OpenAI API error: ${error}`);
571
- }
572
- if (ctx.stream) {
573
- return handleOpenAIStream(response, ctx);
574
- }
575
- const data = await response.json();
576
- const choice = data.choices[0];
577
- const { message } = choice;
578
- const msg = {
579
- role: "assistant",
580
- content: message.content || ""
581
- };
582
- if (message.tool_calls) {
583
- msg.tool_calls = message.tool_calls;
584
- }
585
- return {
586
- ...ctx,
587
- lastResponse: msg,
588
- history: [...ctx.history, msg],
589
- usage: addUsage(ctx.usage, data.usage?.prompt_tokens || 0, data.usage?.completion_tokens || 0, data.usage?.total_tokens || 0)
590
- };
591
- };
592
- var handleOpenAIStream = async (response, ctx) => {
593
- const reader = response.body.getReader();
594
- const decoder = new TextDecoder();
595
- let fullContent = "";
596
- let toolCalls = [];
597
- let buffer = "";
598
- let streamUsage = null;
599
- try {
600
- while (true) {
601
- if (ctx.abortSignal?.aborted) {
602
- break;
603
- }
604
- const { done, value } = await reader.read();
605
- if (done) break;
606
- buffer += decoder.decode(value, { stream: true });
607
- const lines = buffer.split("\n");
608
- buffer = lines.pop() || "";
609
- for (const line of lines) {
610
- if (line.startsWith("data: ")) {
611
- const data = line.slice(6).trim();
612
- if (data === "[DONE]") continue;
613
- if (!data) continue;
614
- try {
615
- const parsed = JSON.parse(data);
616
- if (parsed.usage) {
617
- streamUsage = parsed.usage;
618
- }
619
- const delta = parsed.choices?.[0]?.delta;
620
- if (delta?.content) {
621
- fullContent += delta.content;
622
- if (ctx.stream) {
623
- ctx.stream({ type: "content", content: delta.content });
624
- }
625
- }
626
- if (delta?.tool_calls) {
627
- toolCalls = appendToolCalls(toolCalls, delta.tool_calls);
628
- }
629
- } catch (e) {
630
- }
631
- }
632
- }
633
- }
634
- } finally {
635
- reader.releaseLock();
636
- }
637
- const msg = {
638
- role: "assistant",
639
- content: fullContent
640
- };
641
- if (toolCalls.length > 0) {
642
- msg.tool_calls = toolCalls;
643
- }
644
- const usage = addUsage(ctx.usage, streamUsage?.prompt_tokens || 0, streamUsage?.completion_tokens || 0, streamUsage?.total_tokens || 0);
645
- if (ctx.stream && streamUsage) {
646
- ctx.stream({ type: "usage", usage });
647
- }
648
- return {
649
- ...ctx,
650
- lastResponse: msg,
651
- history: [...ctx.history, msg],
652
- usage
653
- };
654
- };
655
-
656
- // src/providers/anthropic.ts
657
- var getApiKey3 = (configApiKey) => {
658
- if (configApiKey) return configApiKey;
659
- try {
660
- return getKey("anthropic");
661
- } catch {
662
- const key = process.env.ANTHROPIC_API_KEY || "";
663
- if (!key) throw new Error("Anthropic API key not found");
664
- return key;
665
- }
666
- };
667
- var convertToAnthropicFormat = (messages) => {
668
- const result = [];
669
- let i = 0;
670
- while (i < messages.length) {
671
- const msg = messages[i];
672
- if (msg.role === "system") {
673
- i++;
674
- continue;
675
- }
676
- if (msg.role === "assistant") {
677
- if (msg.tool_calls) {
678
- result.push({
679
- role: "assistant",
680
- content: msg.tool_calls.map((tc) => ({
681
- type: "tool_use",
682
- id: tc.id,
683
- name: tc.function.name,
684
- input: JSON.parse(tc.function.arguments)
685
- }))
686
- });
687
- } else {
688
- result.push({
689
- role: "assistant",
690
- content: msg.content
691
- });
692
- }
693
- i++;
694
- } else if (msg.role === "tool") {
695
- const toolResults = [];
696
- while (i < messages.length && messages[i].role === "tool") {
697
- const toolMsg = messages[i];
698
- toolResults.push({
699
- type: "tool_result",
700
- tool_use_id: toolMsg.tool_call_id,
701
- content: toolMsg.content
702
- });
703
- i++;
704
- }
705
- result.push({
706
- role: "user",
707
- content: toolResults
708
- });
709
- } else {
710
- result.push(msg);
711
- i++;
712
- }
713
- }
714
- return result;
715
- };
716
- var callAnthropic = async (config, ctx) => {
717
- const { model: model2, instructions, schema, apiKey: configApiKey } = config;
718
- const apiKey = getApiKey3(configApiKey);
719
- let system = instructions;
720
- if (ctx.history[0]?.role === "system") {
721
- system = ctx.history[0].content;
722
- }
723
- const messages = convertToAnthropicFormat(ctx.history);
724
- if (schema) {
725
- const schemaPrompt = `
726
-
727
- You must respond with valid JSON that matches this schema:
728
- ${JSON.stringify(
729
- schema.schema,
730
- null,
731
- 2
732
- )}
733
-
734
- Return only the JSON object, no other text or formatting.`;
735
- system = system ? system + schemaPrompt : schemaPrompt.slice(2);
736
- }
737
- const body = {
738
- model: model2,
739
- messages,
740
- max_tokens: 4096,
741
- stream: !!ctx.stream
742
- };
743
- if (system) {
744
- body.system = system;
745
- }
746
- if (ctx.tools && ctx.tools.length > 0) {
747
- body.tools = ctx.tools.map((tool) => ({
748
- name: tool.function.name,
749
- description: tool.function.description,
750
- input_schema: tool.function.parameters
751
- }));
752
- }
753
- const response = await fetch("https://api.anthropic.com/v1/messages", {
754
- method: "POST",
755
- headers: {
756
- "Content-Type": "application/json",
757
- "x-api-key": apiKey,
758
- "anthropic-version": "2023-06-01"
759
- },
760
- body: JSON.stringify(body),
761
- signal: ctx.abortSignal
762
- });
763
- if (!response.ok) {
764
- const error = await response.text();
765
- throw new Error(`Anthropic API error: ${error}`);
766
- }
767
- if (ctx.stream) {
768
- return handleAnthropicStream(response, ctx);
769
- }
770
- const data = await response.json();
771
- const content = data.content[0];
772
- const msg = {
773
- role: "assistant",
774
- content: content.type === "text" ? content.text : ""
775
- };
776
- if (content.type === "tool_use") {
777
- msg.tool_calls = [
778
- {
779
- id: content.id,
780
- type: "function",
781
- function: {
782
- name: content.name,
783
- arguments: JSON.stringify(content.input)
784
- }
785
- }
786
- ];
787
- }
788
- const inputTokens = data.usage?.input_tokens || 0;
789
- const outputTokens = data.usage?.output_tokens || 0;
790
- return {
791
- ...ctx,
792
- lastResponse: msg,
793
- history: [...ctx.history, msg],
794
- usage: addUsage(ctx.usage, inputTokens, outputTokens, inputTokens + outputTokens)
795
- };
796
- };
797
- var handleAnthropicStream = async (response, ctx) => {
798
- const reader = response.body.getReader();
799
- const decoder = new TextDecoder();
800
- let fullContent = "";
801
- const toolCalls = [];
802
- let buffer = "";
803
- let inputTokens = 0;
804
- let outputTokens = 0;
805
- try {
806
- while (true) {
807
- if (ctx.abortSignal?.aborted) {
808
- break;
809
- }
810
- const { done, value } = await reader.read();
811
- if (done) break;
812
- buffer += decoder.decode(value, { stream: true });
813
- const lines = buffer.split("\n");
814
- buffer = lines.pop() || "";
815
- for (const line of lines) {
816
- if (line.startsWith("data: ")) {
817
- const data = line.slice(6).trim();
818
- if (!data) continue;
819
- try {
820
- const parsed = JSON.parse(data);
821
- if (parsed.type === "message_start" && parsed.message?.usage) {
822
- inputTokens = parsed.message.usage.input_tokens || 0;
823
- }
824
- if (parsed.type === "message_delta" && parsed.usage) {
825
- outputTokens = parsed.usage.output_tokens || 0;
826
- }
827
- if (parsed.type === "content_block_delta" && parsed.delta?.text) {
828
- fullContent += parsed.delta.text;
829
- if (ctx.stream) {
830
- ctx.stream({ type: "content", content: parsed.delta.text });
831
- }
832
- }
833
- if (parsed.type === "content_block_start" && parsed.content_block?.type === "tool_use") {
834
- const toolUse = parsed.content_block;
835
- toolCalls.push({
836
- id: toolUse.id,
837
- type: "function",
838
- function: {
839
- name: toolUse.name,
840
- arguments: ""
841
- },
842
- index: parsed.index
843
- });
844
- }
845
- if (parsed.type === "content_block_delta" && parsed.delta?.type === "input_json_delta") {
846
- const toolCall = toolCalls.find((tc) => tc.index === parsed.index);
847
- if (toolCall) {
848
- toolCall.function.arguments += parsed.delta.partial_json;
849
- }
850
- }
851
- } catch (e) {
852
- }
853
- }
854
- }
855
- }
856
- } finally {
857
- reader.releaseLock();
858
- }
859
- const msg = {
860
- role: "assistant",
861
- content: fullContent
862
- };
863
- if (toolCalls.length > 0) {
864
- msg.tool_calls = toolCalls.map(({ index, ...tc }) => tc);
865
- }
866
- const usage = addUsage(ctx.usage, inputTokens, outputTokens, inputTokens + outputTokens);
867
- if (ctx.stream && (inputTokens || outputTokens)) {
868
- ctx.stream({ type: "usage", usage });
869
- }
870
- return {
871
- ...ctx,
872
- lastResponse: msg,
873
- history: [...ctx.history, msg],
874
- usage
875
- };
876
- };
877
-
878
- // src/providers/google.ts
879
- var getApiKey4 = (configApiKey) => {
880
- if (configApiKey) return configApiKey;
881
- try {
882
- return getKey("google");
883
- } catch {
884
- const key = process.env.GEMINI_API_KEY || process.env.GOOGLE_AI_API_KEY || "";
885
- if (!key) throw new Error("Google API key not found");
886
- return key;
887
- }
888
- };
889
- var callGoogle = async (config, ctx) => {
890
- const { model: model2, instructions, apiKey: configApiKey } = config;
891
- const apiKey = getApiKey4(configApiKey);
892
- const contents = [];
893
- if (instructions) {
894
- contents.push({
895
- role: "user",
896
- parts: [{ text: instructions }]
897
- });
898
- contents.push({
899
- role: "model",
900
- parts: [{ text: "I understand." }]
901
- });
902
- }
903
- const toolCallMap = /* @__PURE__ */ new Map();
904
- for (let i = 0; i < ctx.history.length; i++) {
905
- const msg2 = ctx.history[i];
906
- if (msg2.role === "assistant") {
907
- const parts2 = [];
908
- if (msg2.content) {
909
- parts2.push({ text: msg2.content });
910
- }
911
- if (msg2.tool_calls?.length) {
912
- for (const tc of msg2.tool_calls) {
913
- toolCallMap.set(tc.id, tc.function.name);
914
- const part = {
915
- functionCall: {
916
- name: tc.function.name,
917
- args: JSON.parse(tc.function.arguments)
918
- }
919
- };
920
- if (tc.thoughtSignature) {
921
- part.thoughtSignature = tc.thoughtSignature;
922
- }
923
- parts2.push(part);
924
- }
925
- }
926
- if (parts2.length > 0) {
927
- contents.push({ role: "model", parts: parts2 });
928
- }
929
- } else if (msg2.role === "tool") {
930
- const responseParts = [];
931
- while (i < ctx.history.length && ctx.history[i].role === "tool") {
932
- const toolMsg = ctx.history[i];
933
- const functionName = toolCallMap.get(toolMsg.tool_call_id);
934
- if (functionName) {
935
- let responseData;
936
- try {
937
- responseData = JSON.parse(toolMsg.content);
938
- } catch {
939
- responseData = { result: toolMsg.content };
940
- }
941
- if (Array.isArray(responseData)) {
942
- responseData = { result: responseData };
943
- }
944
- responseParts.push({
945
- functionResponse: {
946
- name: functionName,
947
- response: responseData
948
- }
949
- });
950
- }
951
- i++;
952
- }
953
- i--;
954
- if (responseParts.length > 0) {
955
- contents.push({ role: "user", parts: responseParts });
956
- }
957
- } else if (msg2.role === "user") {
958
- contents.push({
959
- role: "user",
960
- parts: [{ text: msg2.content }]
961
- });
962
- }
963
- }
964
- const body = {
965
- contents
966
- };
967
- if (ctx.tools && ctx.tools.length > 0) {
968
- body.tools = [
969
- {
970
- function_declarations: ctx.tools.map((tool) => ({
971
- name: tool.function.name,
972
- description: tool.function.description,
973
- parameters: tool.function.parameters
974
- }))
975
- }
976
- ];
977
- }
978
- const endpoint = ctx.stream ? "streamGenerateContent" : "generateContent";
979
- const response = await fetch(
980
- `https://generativelanguage.googleapis.com/v1beta/models/${model2}:${endpoint}?key=${apiKey}${ctx.stream ? "&alt=sse" : ""}`,
981
- {
982
- method: "POST",
983
- headers: {
984
- "Content-Type": "application/json"
985
- },
986
- body: JSON.stringify(body),
987
- signal: ctx.abortSignal
988
- }
989
- );
990
- if (!response.ok) {
991
- const error = await response.text();
992
- throw new Error(`Google API error: ${error}`);
993
- }
994
- if (ctx.stream) {
995
- return handleGoogleStream(response, ctx);
996
- }
997
- const data = await response.json();
998
- const candidate = data.candidates[0];
999
- const parts = candidate.content.parts || [];
1000
- const msg = {
1001
- role: "assistant",
1002
- content: ""
1003
- };
1004
- const toolCalls = [];
1005
- for (const part of parts) {
1006
- if (part.text) {
1007
- msg.content += part.text;
1008
- }
1009
- if (part.functionCall) {
1010
- const tc = {
1011
- id: Math.random().toString(36).substring(2, 9),
1012
- type: "function",
1013
- function: {
1014
- name: part.functionCall.name,
1015
- arguments: JSON.stringify(part.functionCall.args)
1016
- }
1017
- };
1018
- if (part.thoughtSignature) {
1019
- tc.thoughtSignature = part.thoughtSignature;
1020
- }
1021
- toolCalls.push(tc);
1022
- }
1023
- }
1024
- if (toolCalls.length > 0) {
1025
- msg.tool_calls = toolCalls;
1026
- }
1027
- const um = data.usageMetadata;
1028
- return {
1029
- ...ctx,
1030
- lastResponse: msg,
1031
- history: [...ctx.history, msg],
1032
- usage: addUsage(ctx.usage, um?.promptTokenCount || 0, um?.candidatesTokenCount || 0, um?.totalTokenCount || 0)
1033
- };
1034
- };
1035
- var handleGoogleStream = async (response, ctx) => {
1036
- const reader = response.body.getReader();
1037
- const decoder = new TextDecoder();
1038
- let fullContent = "";
1039
- const toolCalls = [];
1040
- let buffer = "";
1041
- let usageMetadata = null;
1042
- try {
1043
- while (true) {
1044
- if (ctx.abortSignal?.aborted) {
1045
- break;
1046
- }
1047
- const { done, value } = await reader.read();
1048
- if (done) break;
1049
- buffer += decoder.decode(value, { stream: true });
1050
- const lines = buffer.split("\n");
1051
- buffer = lines.pop() || "";
1052
- for (const line of lines) {
1053
- if (line.startsWith("data: ")) {
1054
- const data = line.slice(6).trim();
1055
- if (!data) continue;
1056
- try {
1057
- const parsed = JSON.parse(data);
1058
- if (parsed.usageMetadata) {
1059
- usageMetadata = parsed.usageMetadata;
1060
- }
1061
- const candidate = parsed.candidates?.[0];
1062
- const parts = candidate?.content?.parts || [];
1063
- for (const part of parts) {
1064
- if (part?.text) {
1065
- fullContent += part.text;
1066
- if (ctx.stream) {
1067
- ctx.stream({ type: "content", content: part.text });
1068
- }
1069
- }
1070
- if (part?.functionCall) {
1071
- const tc = {
1072
- id: Math.random().toString(36).substring(2, 9),
1073
- type: "function",
1074
- function: {
1075
- name: part.functionCall.name,
1076
- arguments: JSON.stringify(part.functionCall.args)
1077
- }
1078
- };
1079
- if (part.thoughtSignature) {
1080
- tc.thoughtSignature = part.thoughtSignature;
1081
- }
1082
- toolCalls.push(tc);
1083
- }
1084
- }
1085
- } catch (e) {
1086
- }
1087
- }
1088
- }
1089
- }
1090
- } finally {
1091
- reader.releaseLock();
1092
- }
1093
- const msg = {
1094
- role: "assistant",
1095
- content: fullContent
1096
- };
1097
- if (toolCalls.length > 0) {
1098
- msg.tool_calls = toolCalls;
1099
- }
1100
- const um = usageMetadata;
1101
- const usage = addUsage(ctx.usage, um?.promptTokenCount || 0, um?.candidatesTokenCount || 0, um?.totalTokenCount || 0);
1102
- if (ctx.stream && um) {
1103
- ctx.stream({ type: "usage", usage });
1104
- }
1105
- return {
1106
- ...ctx,
1107
- lastResponse: msg,
1108
- history: [...ctx.history, msg],
1109
- usage
1110
- };
1111
- };
1112
-
1113
- // src/providers/huggingface.ts
1114
- var modelCache2 = /* @__PURE__ */ new Map();
1115
- var formatMessages = (instructions, history) => {
1116
- const messages = [];
1117
- if (instructions) {
1118
- messages.push({ role: "system", content: instructions });
1119
- }
1120
- for (const msg of history) {
1121
- messages.push({ role: msg.role, content: msg.content });
1122
- }
1123
- return messages;
1124
- };
1125
- var callHuggingFace = async (config, ctx) => {
1126
- const { model: model2, instructions, schema } = config;
1127
- const { pipeline } = await import("@huggingface/transformers");
1128
- if (!modelCache2.has(model2)) {
1129
- const generator2 = await pipeline("text-generation", model2, {
1130
- dtype: "q4"
1131
- });
1132
- modelCache2.set(model2, generator2);
1133
- }
1134
- const generator = modelCache2.get(model2);
1135
- const messages = formatMessages(instructions, ctx.history);
1136
- if (schema) {
1137
- const schemaMsg = messages.find((m) => m.role === "system");
1138
- const schemaInstructions = [
1139
- "you must respond with valid JSON matching this schema:",
1140
- JSON.stringify(schema.schema, null, 2),
1141
- "respond ONLY with the JSON object, no other text."
1142
- ].join("\n");
1143
- if (schemaMsg) {
1144
- schemaMsg.content += "\n\n" + schemaInstructions;
1145
- } else {
1146
- messages.unshift({ role: "system", content: schemaInstructions });
1147
- }
1148
- }
1149
- const output = await generator(messages, {
1150
- max_new_tokens: 2048,
1151
- do_sample: false
1152
- });
1153
- const generatedMessages = output[0].generated_text;
1154
- const lastMessage = generatedMessages.at(-1);
1155
- const content = lastMessage?.content || "";
1156
- const msg = {
1157
- role: "assistant",
1158
- content
1159
- };
1160
- if (ctx.stream) {
1161
- ctx.stream({ type: "content", content });
1162
- }
1163
- return {
1164
- ...ctx,
1165
- lastResponse: msg,
1166
- history: [...ctx.history, msg],
1167
- usage: addUsage(ctx.usage, 0, 0, 0)
1168
- };
1169
- };
1170
-
1171
- // src/providers/xai.ts
1172
- var appendToolCalls2 = (toolCalls, tcchunklist) => {
1173
- for (const tcchunk of tcchunklist) {
1174
- while (toolCalls.length <= tcchunk.index) {
1175
- toolCalls.push({
1176
- id: "",
1177
- type: "function",
1178
- function: { name: "", arguments: "" }
1179
- });
1180
- }
1181
- const tc = toolCalls[tcchunk.index];
1182
- tc.id += tcchunk.id || "";
1183
- tc.function.name += tcchunk.function?.name || "";
1184
- tc.function.arguments += tcchunk.function?.arguments || "";
1185
- }
1186
- return toolCalls;
1187
- };
1188
- var getApiKey5 = (configApiKey) => {
1189
- if (configApiKey) return configApiKey;
1190
- try {
1191
- return getKey("xai");
1192
- } catch {
1193
- const key = process.env.XAI_API_KEY || "";
1194
- if (!key) throw new Error("xAI API key not found");
1195
- return key;
1196
- }
1197
- };
1198
- var callXAI = async (config, ctx) => {
1199
- const { model: model2, instructions, schema, apiKey: configApiKey } = config;
1200
- const apiKey = getApiKey5(configApiKey);
1201
- const messages = [];
1202
- if (instructions) {
1203
- messages.push({ role: "system", content: instructions });
1204
- }
1205
- messages.push(...ctx.history);
1206
- const body = {
1207
- model: model2,
1208
- messages,
1209
- stream: !!ctx.stream,
1210
- ...ctx.stream && { stream_options: { include_usage: true } }
1211
- };
1212
- if (schema) {
1213
- body.response_format = {
1214
- type: "json_schema",
1215
- json_schema: {
1216
- name: schema.name,
1217
- schema: { ...schema.schema, additionalProperties: false },
1218
- strict: true
1219
- }
1220
- };
1221
- }
1222
- if (ctx.tools && ctx.tools.length > 0) {
1223
- body.tools = ctx.tools;
1224
- body.tool_choice = "auto";
1225
- }
1226
- const response = await fetch("https://api.x.ai/v1/chat/completions", {
1227
- method: "POST",
1228
- headers: {
1229
- "Content-Type": "application/json",
1230
- Authorization: `Bearer ${apiKey}`
1231
- },
1232
- body: JSON.stringify(body),
1233
- signal: ctx.abortSignal
1234
- });
1235
- if (!response.ok) {
1236
- const error = await response.text();
1237
- throw new Error(`xAI API error: ${error}`);
1238
- }
1239
- if (ctx.stream) {
1240
- return handleXAIStream(response, ctx);
1241
- }
1242
- const data = await response.json();
1243
- const choice = data.choices[0];
1244
- const { message } = choice;
1245
- const msg = {
1246
- role: "assistant",
1247
- content: message.content || ""
1248
- };
1249
- if (message.tool_calls) {
1250
- msg.tool_calls = message.tool_calls;
1251
- }
1252
- return {
1253
- ...ctx,
1254
- lastResponse: msg,
1255
- history: [...ctx.history, msg],
1256
- usage: addUsage(ctx.usage, data.usage?.prompt_tokens || 0, data.usage?.completion_tokens || 0, data.usage?.total_tokens || 0)
1257
- };
1258
- };
1259
- var handleXAIStream = async (response, ctx) => {
1260
- const reader = response.body.getReader();
1261
- const decoder = new TextDecoder();
1262
- let fullContent = "";
1263
- let toolCalls = [];
1264
- let buffer = "";
1265
- let streamUsage = null;
1266
- try {
1267
- while (true) {
1268
- if (ctx.abortSignal?.aborted) {
1269
- break;
1270
- }
1271
- const { done, value } = await reader.read();
1272
- if (done) break;
1273
- buffer += decoder.decode(value, { stream: true });
1274
- const lines = buffer.split("\n");
1275
- buffer = lines.pop() || "";
1276
- for (const line of lines) {
1277
- if (line.startsWith("data: ")) {
1278
- const data = line.slice(6).trim();
1279
- if (data === "[DONE]") continue;
1280
- if (!data) continue;
1281
- try {
1282
- const parsed = JSON.parse(data);
1283
- if (parsed.usage) {
1284
- streamUsage = parsed.usage;
1285
- }
1286
- const delta = parsed.choices?.[0]?.delta;
1287
- if (delta?.content) {
1288
- fullContent += delta.content;
1289
- if (ctx.stream) {
1290
- ctx.stream({ type: "content", content: delta.content });
1291
- }
1292
- }
1293
- if (delta?.tool_calls) {
1294
- toolCalls = appendToolCalls2(toolCalls, delta.tool_calls);
1295
- }
1296
- } catch (e) {
1297
- }
1298
- }
1299
- }
1300
- }
1301
- } finally {
1302
- reader.releaseLock();
1303
- }
1304
- const msg = {
1305
- role: "assistant",
1306
- content: fullContent
1307
- };
1308
- if (toolCalls.length > 0) {
1309
- msg.tool_calls = toolCalls;
1310
- }
1311
- const usage = addUsage(ctx.usage, streamUsage?.prompt_tokens || 0, streamUsage?.completion_tokens || 0, streamUsage?.total_tokens || 0);
1312
- if (ctx.stream && streamUsage) {
1313
- ctx.stream({ type: "usage", usage });
1314
- }
1315
- return {
1316
- ...ctx,
1317
- lastResponse: msg,
1318
- history: [...ctx.history, msg],
1319
- usage
1320
- };
1321
- };
1322
-
1323
- // src/providers/local.ts
1324
- var DEFAULT_BASE_URL = "http://localhost:11434/v1";
1325
- var appendToolCalls3 = (toolCalls, tcchunklist) => {
1326
- for (const tcchunk of tcchunklist) {
1327
- while (toolCalls.length <= tcchunk.index) {
1328
- toolCalls.push({
1329
- id: "",
1330
- type: "function",
1331
- function: { name: "", arguments: "" }
1332
- });
1333
- }
1334
- const tc = toolCalls[tcchunk.index];
1335
- tc.id += tcchunk.id || "";
1336
- tc.function.name += tcchunk.function?.name || "";
1337
- tc.function.arguments += tcchunk.function?.arguments || "";
1338
- }
1339
- return toolCalls;
1340
- };
1341
- var callLocal = async (config, ctx) => {
1342
- const { model: model2, instructions, schema, apiKey, baseUrl } = config;
1343
- const endpoint = baseUrl || DEFAULT_BASE_URL;
1344
- const messages = [];
1345
- if (instructions) {
1346
- messages.push({ role: "system", content: instructions });
1347
- }
1348
- messages.push(...ctx.history);
1349
- const body = {
1350
- model: model2,
1351
- messages,
1352
- stream: !!ctx.stream,
1353
- ...ctx.stream && { stream_options: { include_usage: true } }
1354
- };
1355
- if (schema) {
1356
- body.response_format = {
1357
- type: "json_schema",
1358
- json_schema: {
1359
- name: schema.name,
1360
- schema: { ...schema.schema, additionalProperties: false },
1361
- strict: true
1362
- }
1363
- };
1364
- }
1365
- if (ctx.tools && ctx.tools.length > 0) {
1366
- body.tools = ctx.tools;
1367
- body.tool_choice = "auto";
1368
- }
1369
- const headers = {
1370
- "Content-Type": "application/json"
1371
- };
1372
- if (apiKey) {
1373
- headers["Authorization"] = `Bearer ${apiKey}`;
1374
- }
1375
- const response = await fetch(`${endpoint}/chat/completions`, {
1376
- method: "POST",
1377
- headers,
1378
- body: JSON.stringify(body),
1379
- signal: ctx.abortSignal
1380
- });
1381
- if (!response.ok) {
1382
- const error = await response.text();
1383
- throw new Error(`Local API error: ${error}`);
1384
- }
1385
- if (ctx.stream) {
1386
- return handleLocalStream(response, ctx);
1387
- }
1388
- const data = await response.json();
1389
- const choice = data.choices[0];
1390
- const { message } = choice;
1391
- const msg = {
1392
- role: "assistant",
1393
- content: message.content || ""
1394
- };
1395
- if (message.tool_calls) {
1396
- msg.tool_calls = message.tool_calls;
1397
- }
1398
- return {
1399
- ...ctx,
1400
- lastResponse: msg,
1401
- history: [...ctx.history, msg],
1402
- usage: addUsage(ctx.usage, data.usage?.prompt_tokens || 0, data.usage?.completion_tokens || 0, data.usage?.total_tokens || 0)
1403
- };
1404
- };
1405
- var handleLocalStream = async (response, ctx) => {
1406
- const reader = response.body.getReader();
1407
- const decoder = new TextDecoder();
1408
- let fullContent = "";
1409
- let toolCalls = [];
1410
- let buffer = "";
1411
- let streamUsage = null;
1412
- try {
1413
- while (true) {
1414
- if (ctx.abortSignal?.aborted) {
1415
- break;
1416
- }
1417
- const { done, value } = await reader.read();
1418
- if (done) break;
1419
- buffer += decoder.decode(value, { stream: true });
1420
- const lines = buffer.split("\n");
1421
- buffer = lines.pop() || "";
1422
- for (const line of lines) {
1423
- if (line.startsWith("data: ")) {
1424
- const data = line.slice(6).trim();
1425
- if (data === "[DONE]") continue;
1426
- if (!data) continue;
1427
- try {
1428
- const parsed = JSON.parse(data);
1429
- if (parsed.usage) {
1430
- streamUsage = parsed.usage;
1431
- }
1432
- const delta = parsed.choices?.[0]?.delta;
1433
- if (delta?.content) {
1434
- fullContent += delta.content;
1435
- if (ctx.stream) {
1436
- ctx.stream({ type: "content", content: delta.content });
1437
- }
1438
- }
1439
- if (delta?.tool_calls) {
1440
- toolCalls = appendToolCalls3(toolCalls, delta.tool_calls);
1441
- }
1442
- } catch (e) {
1443
- }
1444
- }
1445
- }
1446
- }
1447
- } finally {
1448
- reader.releaseLock();
1449
- }
1450
- const msg = {
1451
- role: "assistant",
1452
- content: fullContent
1453
- };
1454
- if (toolCalls.length > 0) {
1455
- msg.tool_calls = toolCalls;
1456
- }
1457
- const usage = addUsage(ctx.usage, streamUsage?.prompt_tokens || 0, streamUsage?.completion_tokens || 0, streamUsage?.total_tokens || 0);
1458
- if (ctx.stream && streamUsage) {
1459
- ctx.stream({ type: "usage", usage });
1460
- }
1461
- return {
1462
- ...ctx,
1463
- lastResponse: msg,
1464
- history: [...ctx.history, msg],
1465
- usage
1466
- };
1467
- };
1468
-
1469
- // src/providers/index.ts
1470
- var callProvider = async (config, ctx) => {
1471
- const { provider, model: model2 } = parseModelName(config.model);
1472
- const providerConfig = { ...config, model: model2 };
1473
- switch (provider.toLowerCase()) {
1474
- case "openai":
1475
- return callOpenAI(providerConfig, ctx);
1476
- case "anthropic":
1477
- return callAnthropic(providerConfig, ctx);
1478
- case "google":
1479
- return callGoogle(providerConfig, ctx);
1480
- case "xai":
1481
- return callXAI(providerConfig, ctx);
1482
- case "local":
1483
- return callLocal(providerConfig, ctx);
1484
- case "huggingface":
1485
- return callHuggingFace(providerConfig, ctx);
1486
- default:
1487
- return callHuggingFace({ ...config }, ctx);
1488
- }
1489
- };
1490
-
1491
- // src/approval.ts
1492
- var import_events = require("events");
1493
- var state = {
1494
- resolvers: /* @__PURE__ */ new Map(),
1495
- emitter: new import_events.EventEmitter()
1496
- };
1497
- var generateApprovalToken = () => {
1498
- return `approval_${Date.now()}_${Math.random().toString(36).substring(2, 9)}`;
1499
- };
1500
- var requestApproval = async (toolCall, approvalId) => {
1501
- const id = generateApprovalToken();
1502
- const request = { id, toolCall, approvalId };
1503
- state.emitter.emit("approvalRequested", request);
1504
- return new Promise((resolve) => {
1505
- state.resolvers.set(id, resolve);
1506
- });
1507
- };
1508
- var resolveApproval = (response) => {
1509
- const resolver = state.resolvers.get(response.id);
1510
- if (!resolver) return false;
1511
- state.resolvers.delete(response.id);
1512
- resolver(response);
1513
- state.emitter.emit("approvalResolved", response);
1514
- return true;
1515
- };
1516
- var onApprovalRequested = (listener) => {
1517
- state.emitter.on("approvalRequested", listener);
1518
- };
1519
- var onApprovalResolved = (listener) => {
1520
- state.emitter.on("approvalResolved", listener);
1521
- };
1522
- var removeApprovalListener = (event, listener) => {
1523
- state.emitter.removeListener(event, listener);
1524
- };
1525
-
1526
- // src/composition/model.ts
1527
- var model = ({
1528
- model: model2 = "openai/gpt-4o-mini",
1529
- schema,
1530
- system,
1531
- apiKey,
1532
- baseUrl
1533
- } = {}) => {
1534
- return async (ctxOrMessage) => {
1535
- const ctx = typeof ctxOrMessage === "string" ? (
1536
- // model()("hello!");
1537
- {
1538
- history: [{ role: "user", content: ctxOrMessage }],
1539
- tools: []
1540
- }
1541
- ) : (
1542
- // model()(/* few shot or history */);
1543
- ctxOrMessage
1544
- );
1545
- const normalizedSchema = schema ? normalizeSchema(schema) : void 0;
1546
- let currentCtx = ctx;
1547
- if (system) {
1548
- const systemContent = typeof system === "function" ? system(currentCtx) : system;
1549
- const [first, ...rest] = currentCtx.history;
1550
- if (first?.role === "system") {
1551
- currentCtx = {
1552
- ...currentCtx,
1553
- history: [{ role: "system", content: systemContent }, ...rest]
1554
- };
1555
- } else {
1556
- currentCtx = {
1557
- ...currentCtx,
1558
- history: [{ role: "system", content: systemContent }, ...currentCtx.history]
1559
- };
1560
- }
1561
- }
1562
- const systemMessage = currentCtx.history.find((m) => m.role === "system");
1563
- const instructions = systemMessage?.content;
1564
- do {
1565
- if (currentCtx.abortSignal?.aborted) {
1566
- break;
1567
- }
1568
- currentCtx = await callProvider(
1569
- { model: model2, instructions, schema: normalizedSchema, apiKey, baseUrl },
1570
- currentCtx
1571
- );
1572
- if (currentCtx.lastResponse?.tool_calls && currentCtx.tools?.length) {
1573
- currentCtx = await executeTools(currentCtx);
1574
- }
1575
- } while (currentCtx.lastResponse?.tool_calls && currentCtx.tools?.length && !currentCtx.abortSignal?.aborted);
1576
- return currentCtx;
1577
- };
1578
- };
1579
- var executeTools = async (ctx) => {
1580
- const calls = ctx.lastResponse?.tool_calls || [];
1581
- if (!calls.length) return ctx;
1582
- if (ctx.stream) {
1583
- ctx.stream({ type: "tool_calls_ready", calls });
1584
- }
1585
- const toolConfig = ctx.toolConfig || {};
1586
- const {
1587
- requireApproval = false,
1588
- approvalCallback,
1589
- parallel = false,
1590
- retryCount = 0,
1591
- approvalId,
1592
- executeOnApproval = false
1593
- } = toolConfig;
1594
- const updatedCounts = { ...ctx.toolCallCounts || {} };
1595
- const runCall = async (call, approved) => {
1596
- if (!approved) {
1597
- if (ctx.stream) {
1598
- ctx.stream({
1599
- type: "tool_error",
1600
- call,
1601
- error: "Tool execution denied by user"
1602
- });
1603
- }
1604
- return {
1605
- call,
1606
- result: { error: "Tool execution denied by user" }
1607
- };
1608
- }
1609
- const toolName = call.function.name;
1610
- const limits = ctx.toolLimits || {};
1611
- const maxCalls2 = limits[toolName];
1612
- const currentCount = updatedCounts[toolName] || 0;
1613
- if (maxCalls2 && currentCount >= maxCalls2) {
1614
- const error2 = `Tool ${toolName} has reached its limit of ${maxCalls2} calls`;
1615
- if (ctx.stream) {
1616
- ctx.stream({ type: "tool_error", call, error: error2 });
1617
- }
1618
- return {
1619
- call,
1620
- result: { error: error2 }
1621
- };
1622
- }
1623
- updatedCounts[toolName] = currentCount + 1;
1624
- if (ctx.stream) {
1625
- ctx.stream({ type: "tool_executing", call });
1626
- }
1627
- let lastError;
1628
- for (let i = 0; i <= retryCount; i++) {
1629
- try {
1630
- const executor = ctx.toolExecutors?.[call.function.name];
1631
- if (!executor) {
1632
- throw new Error(`Tool executor not found: ${call.function.name}`);
1633
- }
1634
- let args = {};
1635
- try {
1636
- args = call.function.arguments ? JSON.parse(call.function.arguments) : {};
1637
- } catch (e) {
1638
- throw new Error(
1639
- `Invalid JSON arguments for tool ${call.function.name}: ${call.function.arguments}`
1640
- );
1641
- }
1642
- const result = await executor(args);
1643
- if (ctx.stream) {
1644
- ctx.stream({ type: "tool_complete", call, result });
1645
- }
1646
- return { call, result };
1647
- } catch (e) {
1648
- lastError = e;
1649
- }
1650
- }
1651
- const error = lastError.message;
1652
- if (ctx.stream) {
1653
- ctx.stream({ type: "tool_error", call, error });
1654
- }
1655
- return { call, result: { error } };
1656
- };
1657
- if (executeOnApproval && requireApproval) {
1658
- const resultPromises = calls.map(async (call) => {
1659
- let approved;
1660
- if (approvalCallback) {
1661
- approved = await approvalCallback(call);
1662
- } else {
1663
- const response = await requestApproval(call, approvalId);
1664
- approved = response.approved;
1665
- }
1666
- return runCall(call, approved);
1667
- });
1668
- const results2 = await Promise.all(resultPromises);
1669
- return {
1670
- ...ctx,
1671
- history: [
1672
- ...ctx.history,
1673
- ...results2.map(({ call, result }) => ({
1674
- role: "tool",
1675
- tool_call_id: call.id,
1676
- content: JSON.stringify(result)
1677
- }))
1678
- ],
1679
- toolCallCounts: updatedCounts
1680
- };
1681
- }
1682
- const approvalPromises = calls.map(async (call) => {
1683
- if (requireApproval) {
1684
- let approved;
1685
- if (approvalCallback) {
1686
- approved = await approvalCallback(call);
1687
- } else {
1688
- const response = await requestApproval(call, approvalId);
1689
- approved = response.approved;
1690
- }
1691
- return { call, approved };
1692
- } else {
1693
- return { call, approved: true };
1694
- }
1695
- });
1696
- const approvals = await Promise.all(approvalPromises);
1697
- const runCallWithApproval = async (call) => {
1698
- const approval = approvals.find((a) => a.call.id === call.id);
1699
- return runCall(call, approval?.approved ?? true);
1700
- };
1701
- const results = parallel ? await Promise.all(calls.map(runCallWithApproval)) : await runCallsSequentially(calls, runCallWithApproval);
1702
- return {
1703
- ...ctx,
1704
- history: [
1705
- ...ctx.history,
1706
- ...results.map(({ call, result }) => ({
1707
- role: "tool",
1708
- tool_call_id: call.id,
1709
- content: JSON.stringify(result)
1710
- }))
1711
- ],
1712
- toolCallCounts: updatedCounts
1713
- };
1714
- };
1715
- var runCallsSequentially = async (calls, runCall) => {
1716
- const results = [];
1717
- for (const call of calls) {
1718
- results.push(await runCall(call));
1719
- }
1720
- return results;
1721
- };
1722
-
1723
- // src/thread.ts
1724
- var createMemoryStore = () => {
1725
- const store = /* @__PURE__ */ new Map();
1726
- return {
1727
- async get(threadId) {
1728
- return store.get(threadId) || [];
1729
- },
1730
- async set(threadId, messages) {
1731
- store.set(threadId, messages);
1732
- }
1733
- };
1734
- };
1735
- var createThread = (id, store) => {
1736
- return {
1737
- id,
1738
- store,
1739
- async generate(workflow) {
1740
- const history = await store.get(id);
1741
- const initialContext = {
1742
- history,
1743
- tools: [],
1744
- toolExecutors: {},
1745
- toolLimits: {},
1746
- toolCallCounts: {}
1747
- };
1748
- const finalContext = await workflow(initialContext);
1749
- await store.set(id, finalContext.history);
1750
- return finalContext;
1751
- },
1752
- async message(content, workflow, options) {
1753
- const history = await store.get(id);
1754
- const initialContext = {
1755
- history: [...history, { role: "user", content }],
1756
- tools: [],
1757
- toolExecutors: {},
1758
- toolLimits: {},
1759
- toolCallCounts: {},
1760
- abortSignal: options?.abortSignal
1761
- };
1762
- const finalContext = await (workflow || model())(initialContext);
1763
- if (options?.abortSignal?.aborted) {
1764
- const abortedHistory = [
1765
- ...initialContext.history,
1766
- { role: "assistant", content: "[Response interrupted]" }
1767
- ];
1768
- await store.set(id, abortedHistory);
1769
- return { ...finalContext, history: abortedHistory };
1770
- }
1771
- await store.set(id, finalContext.history);
1772
- return finalContext;
1773
- }
1774
- };
1775
- };
1776
- var threads = /* @__PURE__ */ new Map();
1777
- var getOrCreateThread = (id, store) => {
1778
- const cacheKey = store ? `${id}-${store}` : id;
1779
- if (threads.has(cacheKey)) {
1780
- return threads.get(cacheKey);
1781
- }
1782
- const threadStore = store || createMemoryStore();
1783
- const thread = createThread(id, threadStore);
1784
- threads.set(cacheKey, thread);
1785
- return thread;
1786
- };
1787
-
1788
- // src/composition/when.ts
1789
- var when = (condition, action) => {
1790
- return async (ctx) => {
1791
- if (condition(ctx)) {
1792
- return await action(ctx);
1793
- }
1794
- return ctx;
1795
- };
1796
- };
1797
-
1798
- // src/helpers.ts
1799
- var noToolsCalled = () => (ctx) => {
1800
- return !ctx.lastResponse?.tool_calls || ctx.lastResponse.tool_calls.length === 0;
1801
- };
1802
- var everyNMessages = (n, step) => {
1803
- let lastTriggeredAt = 0;
1804
- return when(
1805
- (ctx) => Math.floor(ctx.history.length / n) > Math.floor(lastTriggeredAt / n),
1806
- async (ctx) => {
1807
- lastTriggeredAt = ctx.history.length;
1808
- return await step(ctx);
1809
- }
1810
- );
1811
- };
1812
- var everyNTokens = (n, step) => {
1813
- let lastTriggeredAt = 0;
1814
- return when(
1815
- (ctx) => {
1816
- const totalTokens = ctx.history.reduce(
1817
- (acc, msg) => acc + Math.ceil(msg.content.length / 4),
1818
- 0
1819
- );
1820
- return Math.floor(totalTokens / n) > Math.floor(lastTriggeredAt / n);
1821
- },
1822
- async (ctx) => {
1823
- const totalTokens = ctx.history.reduce(
1824
- (acc, msg) => acc + Math.ceil(msg.content.length / 4),
1825
- 0
1826
- );
1827
- lastTriggeredAt = totalTokens;
1828
- return await step(ctx);
1829
- }
1830
- );
1831
- };
1832
- var appendToLastRequest = (content) => {
1833
- return async (ctx) => {
1834
- let lastUserIndex = -1;
1835
- for (let i = ctx.history.length - 1; i >= 0; i--) {
1836
- if (ctx.history[i].role === "user") {
1837
- lastUserIndex = i;
1838
- break;
1839
- }
1840
- }
1841
- if (lastUserIndex === -1) return ctx;
1842
- const newHistory = [...ctx.history];
1843
- newHistory[lastUserIndex] = {
1844
- ...newHistory[lastUserIndex],
1845
- content: newHistory[lastUserIndex].content + content
1846
- };
1847
- return {
1848
- ...ctx,
1849
- history: newHistory
1850
- };
1851
- };
1852
- };
1853
- var toolNotUsedInNTurns = ({ toolName, times }, step) => {
1854
- let turnsSinceLastUsed = 0;
1855
- let lastProcessedTurn = -1;
1856
- return when((ctx) => {
1857
- const currentTurn = getCurrentTurn(ctx);
1858
- if (currentTurn === lastProcessedTurn) return false;
1859
- lastProcessedTurn = currentTurn;
1860
- const toolUsedInTurn = wasToolUsedInCurrentTurn(ctx, toolName);
1861
- if (toolUsedInTurn) {
1862
- turnsSinceLastUsed = 0;
1863
- return false;
1864
- } else {
1865
- turnsSinceLastUsed++;
1866
- return turnsSinceLastUsed >= times;
1867
- }
1868
- }, step);
1869
- };
1870
- var getCurrentTurn = (ctx) => {
1871
- let turns = 0;
1872
- for (const msg of ctx.history) {
1873
- if (msg.role === "user") turns++;
1874
- }
1875
- return turns;
1876
- };
1877
- var wasToolUsedInCurrentTurn = (ctx, toolName) => {
1878
- let lastUserIndex = -1;
1879
- for (let i = ctx.history.length - 1; i >= 0; i--) {
1880
- if (ctx.history[i].role === "user") {
1881
- lastUserIndex = i;
1882
- break;
1883
- }
1884
- }
1885
- if (lastUserIndex === -1) return false;
1886
- for (let i = lastUserIndex + 1; i < ctx.history.length; i++) {
1887
- const msg = ctx.history[i];
1888
- if (msg.role === "assistant" && ctx.lastResponse?.tool_calls) {
1889
- return ctx.lastResponse.tool_calls.some(
1890
- (call) => call.function.name === toolName
1891
- );
1892
- }
1893
- }
1894
- return false;
1895
- };
1896
- var toolWasCalled = (name) => (ctx) => {
1897
- return !!ctx.lastResponse?.tool_calls && ctx.lastResponse.tool_calls.some((call) => call.function.name === name);
1898
- };
1899
-
1900
- // src/composition/tap.ts
1901
- var tap = (fn) => {
1902
- return async (ctx) => {
1903
- await fn(ctx);
1904
- return ctx;
1905
- };
1906
- };
1907
-
1908
- // src/composition/retry.ts
1909
- var retry = ({ times = 3 } = {}, step) => {
1910
- return async (ctx) => {
1911
- let err;
1912
- for (let i = 0; i < times; i++) {
1913
- try {
1914
- return await step(ctx);
1915
- } catch (e) {
1916
- err = e;
1917
- }
1918
- }
1919
- throw err;
1920
- };
1921
- };
1922
-
1923
- // src/composition/compose.ts
1924
- var enrichContext = (ctx) => {
1925
- const lastUserMessage = [...ctx.history].reverse().find((msg) => msg.role === "user");
1926
- return {
1927
- ...ctx,
1928
- lastRequest: lastUserMessage
1929
- };
1930
- };
1931
- var compose = (...steps) => {
1932
- return async (ctxOrMessage) => {
1933
- let initialContext;
1934
- if (typeof ctxOrMessage === "string") {
1935
- initialContext = {
1936
- history: [{ role: "user", content: ctxOrMessage }],
1937
- tools: [],
1938
- toolExecutors: {},
1939
- toolLimits: {},
1940
- toolCallCounts: {}
1941
- };
1942
- } else {
1943
- initialContext = ctxOrMessage || {
1944
- history: [],
1945
- tools: [],
1946
- toolExecutors: {},
1947
- toolLimits: {},
1948
- toolCallCounts: {}
1949
- };
1950
- }
1951
- let next = enrichContext(initialContext);
1952
- for (const step of steps) {
1953
- next = await step(enrichContext(next));
1954
- }
1955
- return next;
1956
- };
1957
- };
1958
-
1959
- // src/composition/scope.ts
1960
- var scopeContext = (config, ctx) => {
1961
- const inherit = config.inherit ?? 1 /* Conversation */;
1962
- let scopedCtx = {
1963
- history: [],
1964
- tools: [],
1965
- toolExecutors: {},
1966
- toolLimits: {},
1967
- toolCallCounts: {}
1968
- };
1969
- if (inherit & 1 /* Conversation */) {
1970
- scopedCtx.history = ctx.history;
1971
- scopedCtx.lastResponse = ctx.lastResponse;
1972
- scopedCtx.lastRequest = ctx.lastRequest;
1973
- }
1974
- if (inherit & 2 /* Tools */) {
1975
- scopedCtx.tools = [...ctx.tools || []];
1976
- scopedCtx.toolExecutors = { ...ctx.toolExecutors || {} };
1977
- scopedCtx.toolLimits = { ...ctx.toolLimits || {} };
1978
- scopedCtx.toolCallCounts = { ...ctx.toolCallCounts || {} };
1979
- scopedCtx.toolConfig = ctx.toolConfig ? { ...ctx.toolConfig } : void 0;
1980
- }
1981
- scopedCtx.stream = ctx.stream;
1982
- scopedCtx.abortSignal = ctx.abortSignal;
1983
- scopedCtx.usage = ctx.usage;
1984
- if (config.tools) {
1985
- const toolDefinitions = config.tools.map(toolConfigToToolDefinition);
1986
- const toolExecutors = config.tools.reduce(
1987
- (acc, tool) => {
1988
- acc[tool.name] = tool.execute;
1989
- return acc;
1990
- },
1991
- {}
1992
- );
1993
- const toolLimits = config.tools.reduce(
1994
- (acc, tool) => {
1995
- if (tool._maxCalls) {
1996
- acc[tool.name] = tool._maxCalls;
1997
- }
1998
- return acc;
1999
- },
2000
- {}
2001
- );
2002
- scopedCtx.tools = toolDefinitions;
2003
- scopedCtx.toolExecutors = toolExecutors;
2004
- scopedCtx.toolLimits = toolLimits;
2005
- }
2006
- if (config.toolConfig) {
2007
- scopedCtx.toolConfig = { ...config.toolConfig };
2008
- }
2009
- if (config.system) {
2010
- const [first, ...rest] = scopedCtx.history;
2011
- if (first?.role === "system") {
2012
- scopedCtx.history = [{ role: "system", content: config.system }, ...rest];
2013
- } else {
2014
- scopedCtx.history = [{ role: "system", content: config.system }, ...scopedCtx.history];
2015
- }
2016
- }
2017
- if (config.stream) {
2018
- scopedCtx.stream = config.stream;
2019
- }
2020
- return scopedCtx;
2021
- };
2022
- var scope = (config, ...steps) => {
2023
- return async (ctx) => {
2024
- let scopedCtx = scopeContext(config, ctx);
2025
- if (config.until) {
2026
- do {
2027
- scopedCtx = await compose(...steps)(scopedCtx);
2028
- } while (!config.until(scopedCtx));
2029
- } else {
2030
- scopedCtx = await compose(...steps)(scopedCtx);
2031
- }
2032
- return {
2033
- ...ctx,
2034
- history: config.silent ? ctx.history : scopedCtx.history,
2035
- lastResponse: config.silent ? ctx.lastResponse : scopedCtx.lastResponse,
2036
- lastRequest: config.silent ? ctx.lastRequest : scopedCtx.lastRequest,
2037
- stopReason: config.silent ? ctx.stopReason : scopedCtx.stopReason,
2038
- usage: scopedCtx.usage
2039
- };
2040
- };
2041
- };
2042
-
2043
- // src/utils/rateLimited.ts
2044
- var rateLimited = (config) => (fn) => {
2045
- const { rps, burst, concurrency } = config;
2046
- let tokens = burst;
2047
- let inFlight = 0;
2048
- const queue = [];
2049
- let intervalId = null;
2050
- const refillTokens = () => {
2051
- tokens = Math.min(tokens + 1, burst);
2052
- processQueue();
2053
- };
2054
- const startInterval = () => {
2055
- if (!intervalId) {
2056
- intervalId = setInterval(refillTokens, 1e3 / rps);
2057
- }
2058
- };
2059
- const stopInterval = () => {
2060
- if (intervalId && queue.length === 0 && inFlight === 0) {
2061
- clearInterval(intervalId);
2062
- intervalId = null;
2063
- }
2064
- };
2065
- const processQueue = () => {
2066
- while (queue.length > 0 && tokens > 0 && inFlight < concurrency) {
2067
- tokens--;
2068
- inFlight++;
2069
- const item = queue.shift();
2070
- item.fn().then((result) => {
2071
- inFlight--;
2072
- item.resolve(result);
2073
- processQueue();
2074
- stopInterval();
2075
- }).catch((error) => {
2076
- inFlight--;
2077
- item.reject(error);
2078
- processQueue();
2079
- stopInterval();
2080
- });
2081
- }
2082
- };
2083
- return (async (...args) => {
2084
- return new Promise((resolve, reject) => {
2085
- queue.push({
2086
- fn: () => fn(...args),
2087
- resolve,
2088
- reject
2089
- });
2090
- startInterval();
2091
- processQueue();
2092
- });
2093
- });
2094
- };
2095
- // Annotate the CommonJS export names for ESM import in node:
2096
- 0 && (module.exports = {
2097
- IMAGE_EDIT_MODEL_SCHEMA,
2098
- IMAGE_MODEL_SCHEMA,
2099
- Inherit,
2100
- addUsage,
2101
- appendToLastRequest,
2102
- compose,
2103
- convertMCPSchemaToToolSchema,
2104
- convertStandardSchemaToJsonSchema,
2105
- convertStandardSchemaToSchemaProperties,
2106
- createMCPTools,
2107
- embed,
2108
- everyNMessages,
2109
- everyNTokens,
2110
- generateApprovalToken,
2111
- generateImage,
2112
- getDefaultConfig,
2113
- getKey,
2114
- getModelConfig,
2115
- getOrCreateThread,
2116
- isStandardSchema,
2117
- maxCalls,
2118
- model,
2119
- noToolsCalled,
2120
- normalizeSchema,
2121
- onApprovalRequested,
2122
- onApprovalResolved,
2123
- parseModelName,
2124
- rateLimited,
2125
- removeApprovalListener,
2126
- requestApproval,
2127
- resolveApproval,
2128
- retry,
2129
- scope,
2130
- setKeys,
2131
- tap,
2132
- toolConfigToToolDefinition,
2133
- toolNotUsedInNTurns,
2134
- toolWasCalled,
2135
- when
2136
- });
2137
- //# sourceMappingURL=index.cjs.map