@threaded/ai 1.0.29 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +240 -0
- package/dist/approval.d.ts +18 -0
- package/dist/approval.d.ts.map +1 -0
- package/dist/approval.js +35 -0
- package/dist/approval.js.map +1 -0
- package/dist/composition/compose.d.ts +3 -0
- package/dist/composition/compose.d.ts.map +1 -0
- package/dist/composition/compose.js +38 -0
- package/dist/composition/compose.js.map +1 -0
- package/dist/composition/model.d.ts +9 -0
- package/dist/composition/model.d.ts.map +1 -0
- package/dist/composition/model.js +192 -0
- package/dist/composition/model.js.map +1 -0
- package/dist/composition/retry.d.ts +6 -0
- package/dist/composition/retry.d.ts.map +1 -0
- package/dist/composition/retry.js +18 -0
- package/dist/composition/retry.js.map +1 -0
- package/dist/composition/scope.d.ts +3 -0
- package/dist/composition/scope.d.ts.map +1 -0
- package/dist/composition/scope.js +83 -0
- package/dist/composition/scope.js.map +1 -0
- package/dist/composition/tap.d.ts +3 -0
- package/dist/composition/tap.d.ts.map +1 -0
- package/dist/composition/tap.js +7 -0
- package/dist/composition/tap.js.map +1 -0
- package/dist/composition/when.d.ts +3 -0
- package/dist/composition/when.d.ts.map +1 -0
- package/dist/composition/when.js +9 -0
- package/dist/composition/when.js.map +1 -0
- package/dist/embed.d.ts +16 -0
- package/dist/embed.d.ts.map +1 -0
- package/dist/embed.js +72 -0
- package/dist/embed.js.map +1 -0
- package/dist/examples.d.ts +2 -0
- package/dist/examples.d.ts.map +1 -0
- package/dist/examples.js +6 -0
- package/dist/examples.js.map +1 -0
- package/dist/helpers.d.ts +17 -0
- package/dist/helpers.d.ts.map +1 -0
- package/dist/helpers.js +104 -0
- package/dist/helpers.js.map +1 -0
- package/dist/image-model-schema.d.ts +19 -0
- package/dist/image-model-schema.d.ts.map +1 -0
- package/dist/image-model-schema.js +103 -0
- package/dist/image-model-schema.js.map +1 -0
- package/dist/image.d.ts +3 -0
- package/dist/image.d.ts.map +1 -0
- package/dist/image.js +120 -0
- package/dist/image.js.map +1 -0
- package/dist/index.d.ts +18 -350
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +17 -2061
- package/dist/index.js.map +1 -1
- package/dist/mcp.d.ts +3 -0
- package/dist/mcp.d.ts.map +1 -0
- package/dist/mcp.js +29 -0
- package/dist/mcp.js.map +1 -0
- package/dist/providers/anthropic.d.ts +3 -0
- package/dist/providers/anthropic.d.ts.map +1 -0
- package/dist/providers/anthropic.js +226 -0
- package/dist/providers/anthropic.js.map +1 -0
- package/dist/providers/google.d.ts +3 -0
- package/dist/providers/google.d.ts.map +1 -0
- package/dist/providers/google.js +244 -0
- package/dist/providers/google.js.map +1 -0
- package/dist/providers/huggingface.d.ts +3 -0
- package/dist/providers/huggingface.d.ts.map +1 -0
- package/dist/providers/huggingface.js +59 -0
- package/dist/providers/huggingface.js.map +1 -0
- package/dist/providers/index.d.ts +3 -0
- package/dist/providers/index.d.ts.map +1 -0
- package/dist/providers/index.js +29 -0
- package/dist/providers/index.js.map +1 -0
- package/dist/providers/local.d.ts +3 -0
- package/dist/providers/local.d.ts.map +1 -0
- package/dist/providers/local.js +152 -0
- package/dist/providers/local.js.map +1 -0
- package/dist/providers/openai.d.ts +3 -0
- package/dist/providers/openai.d.ts.map +1 -0
- package/dist/providers/openai.js +165 -0
- package/dist/providers/openai.js.map +1 -0
- package/dist/providers/xai.d.ts +3 -0
- package/dist/providers/xai.d.ts.map +1 -0
- package/dist/providers/xai.js +161 -0
- package/dist/providers/xai.js.map +1 -0
- package/dist/schema.d.ts +7 -0
- package/dist/schema.d.ts.map +1 -0
- package/dist/schema.js +44 -0
- package/dist/schema.js.map +1 -0
- package/dist/thread.d.ts +25 -0
- package/dist/thread.d.ts.map +1 -0
- package/dist/thread.js +87 -0
- package/dist/thread.js.map +1 -0
- package/dist/types.d.ts +193 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +8 -0
- package/dist/types.js.map +1 -0
- package/dist/utils/rateLimited.d.ts +27 -0
- package/dist/utils/rateLimited.d.ts.map +1 -0
- package/dist/utils/rateLimited.js +74 -0
- package/dist/utils/rateLimited.js.map +1 -0
- package/dist/utils.d.ts +8 -0
- package/dist/utils.d.ts.map +1 -0
- package/dist/utils.js +78 -0
- package/dist/utils.js.map +1 -0
- package/package.json +34 -12
- package/.claude/settings.local.json +0 -15
- package/.lore +0 -65
- package/dist/index.cjs +0 -2137
- package/dist/index.cjs.map +0 -1
- package/dist/index.d.cts +0 -350
- package/tsconfig.json +0 -29
package/dist/index.js
CHANGED
|
@@ -1,2062 +1,18 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
...prop.enum && { enum: prop.enum },
|
|
19
|
-
...prop.items && { items: convertProperty(prop.items) },
|
|
20
|
-
...prop.properties && {
|
|
21
|
-
properties: Object.fromEntries(
|
|
22
|
-
Object.entries(prop.properties).map(([k, v]) => [k, convertProperty(v)])
|
|
23
|
-
)
|
|
24
|
-
}
|
|
25
|
-
});
|
|
26
|
-
const result = {};
|
|
27
|
-
for (const [key, value] of Object.entries(mcpSchema.properties)) {
|
|
28
|
-
const prop = value;
|
|
29
|
-
result[key] = {
|
|
30
|
-
...convertProperty(prop),
|
|
31
|
-
optional: !mcpSchema.required?.includes(key)
|
|
32
|
-
};
|
|
33
|
-
}
|
|
34
|
-
return result;
|
|
35
|
-
};
|
|
36
|
-
function normalizeSchema(schema, name) {
|
|
37
|
-
if (isStandardSchema(schema)) {
|
|
38
|
-
return convertStandardSchemaToJsonSchema(schema, name);
|
|
39
|
-
}
|
|
40
|
-
return schema;
|
|
41
|
-
}
|
|
42
|
-
var convertStandardSchemaToSchemaProperties = (standardSchema) => {
|
|
43
|
-
const jsonSchema = z.toJSONSchema(standardSchema);
|
|
44
|
-
return convertMCPSchemaToToolSchema(jsonSchema);
|
|
45
|
-
};
|
|
46
|
-
|
|
47
|
-
// src/mcp.ts
|
|
48
|
-
var createMCPTools = async (client) => {
|
|
49
|
-
const serverInfo = client.getServerVersion();
|
|
50
|
-
const serverName = serverInfo?.name;
|
|
51
|
-
if (!serverName) {
|
|
52
|
-
console.error("MCP server has no name? Skipping tool creation.");
|
|
53
|
-
return [];
|
|
54
|
-
}
|
|
55
|
-
const toolsResponse = await client.listTools();
|
|
56
|
-
return toolsResponse.tools.map((mcpTool) => {
|
|
57
|
-
const prefixedName = `${serverName}_${mcpTool.name}`;
|
|
58
|
-
return {
|
|
59
|
-
name: prefixedName,
|
|
60
|
-
description: `[${serverName}] ${mcpTool.description || ""}`,
|
|
61
|
-
schema: convertMCPSchemaToToolSchema(mcpTool.inputSchema),
|
|
62
|
-
execute: async (args) => {
|
|
63
|
-
const result = await client.callTool({
|
|
64
|
-
name: mcpTool.name,
|
|
65
|
-
arguments: args
|
|
66
|
-
});
|
|
67
|
-
return result.content && Array.isArray(result.content) && result.content[0]?.text || JSON.stringify(result);
|
|
68
|
-
}
|
|
69
|
-
};
|
|
70
|
-
});
|
|
71
|
-
};
|
|
72
|
-
|
|
73
|
-
// src/types.ts
|
|
74
|
-
var Inherit = /* @__PURE__ */ ((Inherit2) => {
|
|
75
|
-
Inherit2[Inherit2["Nothing"] = 0] = "Nothing";
|
|
76
|
-
Inherit2[Inherit2["Conversation"] = 1] = "Conversation";
|
|
77
|
-
Inherit2[Inherit2["Tools"] = 2] = "Tools";
|
|
78
|
-
Inherit2[Inherit2["All"] = 3] = "All";
|
|
79
|
-
return Inherit2;
|
|
80
|
-
})(Inherit || {});
|
|
81
|
-
|
|
82
|
-
// src/utils.ts
|
|
83
|
-
var toolConfigToToolDefinition = (tool) => {
|
|
84
|
-
const schema = isStandardSchema(tool.schema) ? convertStandardSchemaToSchemaProperties(tool.schema) : tool.schema;
|
|
85
|
-
const properties = {};
|
|
86
|
-
const required = [];
|
|
87
|
-
for (const [key, prop] of Object.entries(schema)) {
|
|
88
|
-
properties[key] = convertSchemaProperty(prop);
|
|
89
|
-
if (!prop.optional) {
|
|
90
|
-
required.push(key);
|
|
91
|
-
}
|
|
92
|
-
}
|
|
93
|
-
return {
|
|
94
|
-
type: "function",
|
|
95
|
-
function: {
|
|
96
|
-
name: tool.name,
|
|
97
|
-
description: tool.description,
|
|
98
|
-
parameters: {
|
|
99
|
-
type: "object",
|
|
100
|
-
properties,
|
|
101
|
-
...required.length > 0 && { required }
|
|
102
|
-
}
|
|
103
|
-
}
|
|
104
|
-
};
|
|
105
|
-
};
|
|
106
|
-
var convertSchemaProperty = (prop) => {
|
|
107
|
-
const result = {
|
|
108
|
-
type: prop.type
|
|
109
|
-
};
|
|
110
|
-
if (prop.description) {
|
|
111
|
-
result.description = prop.description;
|
|
112
|
-
}
|
|
113
|
-
if (prop.enum) {
|
|
114
|
-
result.enum = prop.enum;
|
|
115
|
-
}
|
|
116
|
-
if (prop.items) {
|
|
117
|
-
result.items = convertSchemaProperty(prop.items);
|
|
118
|
-
}
|
|
119
|
-
if (prop.properties) {
|
|
120
|
-
result.properties = {};
|
|
121
|
-
for (const [key, childProp] of Object.entries(prop.properties)) {
|
|
122
|
-
result.properties[key] = convertSchemaProperty(childProp);
|
|
123
|
-
}
|
|
124
|
-
}
|
|
125
|
-
return result;
|
|
126
|
-
};
|
|
127
|
-
var parseModelName = (model2) => {
|
|
128
|
-
const parts = model2.split("/");
|
|
129
|
-
if (parts.length === 1) {
|
|
130
|
-
return { provider: "huggingface", model: parts[0] };
|
|
131
|
-
}
|
|
132
|
-
return {
|
|
133
|
-
provider: parts[0],
|
|
134
|
-
model: parts.slice(1).join("/")
|
|
135
|
-
};
|
|
136
|
-
};
|
|
137
|
-
var globalKeys = {};
|
|
138
|
-
var setKeys = (keys) => {
|
|
139
|
-
globalKeys = { ...globalKeys, ...keys };
|
|
140
|
-
};
|
|
141
|
-
var getKey = (provider) => {
|
|
142
|
-
const key = globalKeys[provider.toLowerCase()];
|
|
143
|
-
if (!key) {
|
|
144
|
-
throw new Error(`No API key configured for provider: ${provider}`);
|
|
145
|
-
}
|
|
146
|
-
return key;
|
|
147
|
-
};
|
|
148
|
-
var maxCalls = (toolConfig, maxCalls2) => ({
|
|
149
|
-
...toolConfig,
|
|
150
|
-
_maxCalls: maxCalls2
|
|
151
|
-
});
|
|
152
|
-
var addUsage = (existing, promptTokens, completionTokens, totalTokens) => ({
|
|
153
|
-
promptTokens: (existing?.promptTokens || 0) + promptTokens,
|
|
154
|
-
completionTokens: (existing?.completionTokens || 0) + completionTokens,
|
|
155
|
-
totalTokens: (existing?.totalTokens || 0) + totalTokens
|
|
156
|
-
});
|
|
157
|
-
|
|
158
|
-
// src/embed.ts
|
|
159
|
-
var modelCache = /* @__PURE__ */ new Map();
|
|
160
|
-
var embed = async (model2, text, config) => {
|
|
161
|
-
if (model2.startsWith("openai/")) {
|
|
162
|
-
const modelName = model2.replace("openai/", "");
|
|
163
|
-
const apiKey = getKey("openai") || process.env.OPENAI_API_KEY;
|
|
164
|
-
if (!apiKey) {
|
|
165
|
-
throw new Error("OpenAI API key not found");
|
|
166
|
-
}
|
|
167
|
-
const body = {
|
|
168
|
-
model: modelName,
|
|
169
|
-
input: text
|
|
170
|
-
};
|
|
171
|
-
if (config?.dimensions) {
|
|
172
|
-
body.dimensions = config.dimensions;
|
|
173
|
-
}
|
|
174
|
-
const response = await fetch("https://api.openai.com/v1/embeddings", {
|
|
175
|
-
method: "POST",
|
|
176
|
-
headers: {
|
|
177
|
-
"Content-Type": "application/json",
|
|
178
|
-
Authorization: `Bearer ${apiKey}`
|
|
179
|
-
},
|
|
180
|
-
body: JSON.stringify(body)
|
|
181
|
-
});
|
|
182
|
-
if (!response.ok) {
|
|
183
|
-
const error = await response.text();
|
|
184
|
-
throw new Error(`OpenAI API error: ${error}`);
|
|
185
|
-
}
|
|
186
|
-
const data = await response.json();
|
|
187
|
-
return data.data[0].embedding;
|
|
188
|
-
}
|
|
189
|
-
try {
|
|
190
|
-
const { pipeline } = await import("@huggingface/transformers");
|
|
191
|
-
if (!modelCache.has(model2)) {
|
|
192
|
-
const extractor2 = await pipeline("feature-extraction", model2, {
|
|
193
|
-
dtype: "fp32"
|
|
194
|
-
});
|
|
195
|
-
modelCache.set(model2, extractor2);
|
|
196
|
-
}
|
|
197
|
-
const extractor = modelCache.get(model2);
|
|
198
|
-
const result = await extractor(text, { pooling: "mean", normalize: true });
|
|
199
|
-
return Array.from(result.data);
|
|
200
|
-
} catch (error) {
|
|
201
|
-
throw new Error(
|
|
202
|
-
`huggingface transformers failed to load. install system dependencies or use openai models instead. original error: ${error.message}`
|
|
203
|
-
);
|
|
204
|
-
}
|
|
205
|
-
};
|
|
206
|
-
|
|
207
|
-
// src/image.ts
|
|
208
|
-
var providerKeyEnvVars = {
|
|
209
|
-
openai: "OPENAI_API_KEY",
|
|
210
|
-
xai: "XAI_API_KEY",
|
|
211
|
-
google: "GEMINI_API_KEY"
|
|
212
|
-
};
|
|
213
|
-
var getApiKey = (provider) => {
|
|
214
|
-
try {
|
|
215
|
-
return getKey(provider);
|
|
216
|
-
} catch {
|
|
217
|
-
const envVar = providerKeyEnvVars[provider];
|
|
218
|
-
const key = envVar ? process.env[envVar] || "" : "";
|
|
219
|
-
if (!key) throw new Error(`No API key found for provider: ${provider}`);
|
|
220
|
-
return key;
|
|
221
|
-
}
|
|
222
|
-
};
|
|
223
|
-
var generateOpenAICompatible = async (endpoint, modelName, prompt, apiKey, config) => {
|
|
224
|
-
const isGptImage = modelName.startsWith("gpt-image");
|
|
225
|
-
const body = {
|
|
226
|
-
model: modelName,
|
|
227
|
-
prompt
|
|
228
|
-
};
|
|
229
|
-
if (!isGptImage) {
|
|
230
|
-
body.response_format = config?.responseFormat || "b64_json";
|
|
231
|
-
}
|
|
232
|
-
if (config?.n) body.n = config.n;
|
|
233
|
-
if (config?.size) body.size = config.size;
|
|
234
|
-
if (config?.quality) body.quality = config.quality;
|
|
235
|
-
if (config?.style && !isGptImage) body.style = config.style;
|
|
236
|
-
if (isGptImage) {
|
|
237
|
-
if (config?.outputFormat) body.output_format = config.outputFormat;
|
|
238
|
-
if (config?.outputCompression != null) body.output_compression = config.outputCompression;
|
|
239
|
-
if (config?.background) body.background = config.background;
|
|
240
|
-
if (config?.moderation) body.moderation = config.moderation;
|
|
241
|
-
}
|
|
242
|
-
const response = await fetch(endpoint, {
|
|
243
|
-
method: "POST",
|
|
244
|
-
headers: {
|
|
245
|
-
"Content-Type": "application/json",
|
|
246
|
-
Authorization: `Bearer ${apiKey}`
|
|
247
|
-
},
|
|
248
|
-
body: JSON.stringify(body)
|
|
249
|
-
});
|
|
250
|
-
if (!response.ok) {
|
|
251
|
-
const error = await response.text();
|
|
252
|
-
throw new Error(`API error: ${error}`);
|
|
253
|
-
}
|
|
254
|
-
const data = await response.json();
|
|
255
|
-
const image = data.data[0];
|
|
256
|
-
return {
|
|
257
|
-
data: image.b64_json || image.url,
|
|
258
|
-
revisedPrompt: image.revised_prompt
|
|
259
|
-
};
|
|
260
|
-
};
|
|
261
|
-
var generateGoogle = async (modelName, prompt, apiKey, config) => {
|
|
262
|
-
const endpoint = `https://generativelanguage.googleapis.com/v1beta/models/${modelName}:generateContent`;
|
|
263
|
-
const body = {
|
|
264
|
-
contents: [{ parts: [{ text: prompt }] }],
|
|
265
|
-
generationConfig: {
|
|
266
|
-
responseModalities: ["TEXT", "IMAGE"]
|
|
267
|
-
}
|
|
268
|
-
};
|
|
269
|
-
const imageConfig = {};
|
|
270
|
-
if (config?.aspectRatio) imageConfig.aspectRatio = config.aspectRatio;
|
|
271
|
-
if (config?.imageSize) imageConfig.imageSize = config.imageSize;
|
|
272
|
-
if (Object.keys(imageConfig).length > 0) {
|
|
273
|
-
body.generationConfig.imageConfig = imageConfig;
|
|
274
|
-
}
|
|
275
|
-
const response = await fetch(endpoint, {
|
|
276
|
-
method: "POST",
|
|
277
|
-
headers: {
|
|
278
|
-
"Content-Type": "application/json",
|
|
279
|
-
"x-goog-api-key": apiKey
|
|
280
|
-
},
|
|
281
|
-
body: JSON.stringify(body)
|
|
282
|
-
});
|
|
283
|
-
if (!response.ok) {
|
|
284
|
-
const error = await response.text();
|
|
285
|
-
throw new Error(`Google API error: ${error}`);
|
|
286
|
-
}
|
|
287
|
-
const data = await response.json();
|
|
288
|
-
const parts = data.candidates?.[0]?.content?.parts || [];
|
|
289
|
-
const imagePart = parts.find((p) => p.inlineData);
|
|
290
|
-
const textPart = parts.find((p) => p.text);
|
|
291
|
-
if (!imagePart?.inlineData?.data) {
|
|
292
|
-
throw new Error("No image data in response");
|
|
293
|
-
}
|
|
294
|
-
return {
|
|
295
|
-
data: imagePart.inlineData.data,
|
|
296
|
-
revisedPrompt: textPart?.text
|
|
297
|
-
};
|
|
298
|
-
};
|
|
299
|
-
var generateImage = async (model2, prompt, config) => {
|
|
300
|
-
const { provider, model: modelName } = parseModelName(model2);
|
|
301
|
-
const providerLower = provider.toLowerCase();
|
|
302
|
-
const apiKey = getApiKey(providerLower);
|
|
303
|
-
switch (providerLower) {
|
|
304
|
-
case "openai":
|
|
305
|
-
return generateOpenAICompatible(
|
|
306
|
-
"https://api.openai.com/v1/images/generations",
|
|
307
|
-
modelName,
|
|
308
|
-
prompt,
|
|
309
|
-
apiKey,
|
|
310
|
-
config
|
|
311
|
-
);
|
|
312
|
-
case "xai":
|
|
313
|
-
return generateOpenAICompatible(
|
|
314
|
-
"https://api.x.ai/v1/images/generations",
|
|
315
|
-
modelName,
|
|
316
|
-
prompt,
|
|
317
|
-
apiKey,
|
|
318
|
-
config
|
|
319
|
-
);
|
|
320
|
-
case "google":
|
|
321
|
-
return generateGoogle(modelName, prompt, apiKey, config);
|
|
322
|
-
default:
|
|
323
|
-
throw new Error(`Unsupported image generation provider: ${provider}`);
|
|
324
|
-
}
|
|
325
|
-
};
|
|
326
|
-
|
|
327
|
-
// src/image-model-schema.ts
|
|
328
|
-
var IMAGE_MODEL_SCHEMA = {
|
|
329
|
-
openai: {
|
|
330
|
-
"dall-e-3": {
|
|
331
|
-
size: {
|
|
332
|
-
values: ["1024x1024", "1024x1792", "1792x1024"],
|
|
333
|
-
default: "1024x1024",
|
|
334
|
-
description: "Image dimensions"
|
|
335
|
-
},
|
|
336
|
-
quality: {
|
|
337
|
-
values: ["standard", "hd"],
|
|
338
|
-
default: "standard",
|
|
339
|
-
description: "Image quality level"
|
|
340
|
-
},
|
|
341
|
-
style: {
|
|
342
|
-
values: ["vivid", "natural"],
|
|
343
|
-
default: "vivid",
|
|
344
|
-
description: "Image style"
|
|
345
|
-
}
|
|
346
|
-
},
|
|
347
|
-
"gpt-image-1.5": {
|
|
348
|
-
size: {
|
|
349
|
-
values: ["1024x1024", "1536x1024", "1024x1536", "auto"],
|
|
350
|
-
default: "auto",
|
|
351
|
-
description: "Image dimensions"
|
|
352
|
-
},
|
|
353
|
-
quality: {
|
|
354
|
-
values: ["low", "medium", "high", "auto"],
|
|
355
|
-
default: "auto",
|
|
356
|
-
description: "Image quality level"
|
|
357
|
-
},
|
|
358
|
-
background: {
|
|
359
|
-
values: ["transparent", "opaque", "auto"],
|
|
360
|
-
default: "auto",
|
|
361
|
-
description: "Background type"
|
|
362
|
-
},
|
|
363
|
-
moderation: {
|
|
364
|
-
values: ["auto", "low"],
|
|
365
|
-
default: "auto",
|
|
366
|
-
description: "Content moderation level"
|
|
367
|
-
}
|
|
368
|
-
}
|
|
369
|
-
},
|
|
370
|
-
google: {
|
|
371
|
-
"gemini-2.5-flash-image": {
|
|
372
|
-
aspectRatio: {
|
|
373
|
-
values: ["1:1", "3:4", "4:3", "9:16", "16:9"],
|
|
374
|
-
default: "1:1",
|
|
375
|
-
description: "Image aspect ratio"
|
|
376
|
-
}
|
|
377
|
-
},
|
|
378
|
-
"gemini-3-pro-image-preview": {
|
|
379
|
-
aspectRatio: {
|
|
380
|
-
values: ["1:1", "3:4", "4:3", "9:16", "16:9"],
|
|
381
|
-
default: "1:1",
|
|
382
|
-
description: "Image aspect ratio"
|
|
383
|
-
},
|
|
384
|
-
imageSize: {
|
|
385
|
-
values: ["1K", "2K"],
|
|
386
|
-
default: "1K",
|
|
387
|
-
description: "Output image size"
|
|
388
|
-
}
|
|
389
|
-
}
|
|
390
|
-
},
|
|
391
|
-
xai: {}
|
|
392
|
-
};
|
|
393
|
-
var IMAGE_EDIT_MODEL_SCHEMA = {
|
|
394
|
-
openai: {
|
|
395
|
-
"gpt-image-1.5": {
|
|
396
|
-
size: {
|
|
397
|
-
values: ["1024x1024", "1536x1024", "1024x1536", "auto"],
|
|
398
|
-
default: "auto",
|
|
399
|
-
description: "Output image size"
|
|
400
|
-
},
|
|
401
|
-
quality: {
|
|
402
|
-
values: ["low", "medium", "high", "auto"],
|
|
403
|
-
default: "auto",
|
|
404
|
-
description: "Image quality level"
|
|
405
|
-
},
|
|
406
|
-
background: {
|
|
407
|
-
values: ["transparent", "opaque", "auto"],
|
|
408
|
-
default: "auto",
|
|
409
|
-
description: "Background type"
|
|
410
|
-
}
|
|
411
|
-
}
|
|
412
|
-
},
|
|
413
|
-
google: {
|
|
414
|
-
"gemini-3-pro-image-preview": {}
|
|
415
|
-
}
|
|
416
|
-
};
|
|
417
|
-
function getModelConfig(provider, model2) {
|
|
418
|
-
return IMAGE_MODEL_SCHEMA[provider]?.[model2] || null;
|
|
419
|
-
}
|
|
420
|
-
function getDefaultConfig(provider, model2) {
|
|
421
|
-
const schema = getModelConfig(provider, model2);
|
|
422
|
-
if (!schema) return {};
|
|
423
|
-
const defaults = {};
|
|
424
|
-
for (const [key, option] of Object.entries(schema)) {
|
|
425
|
-
defaults[key] = option.default;
|
|
426
|
-
}
|
|
427
|
-
return defaults;
|
|
428
|
-
}
|
|
429
|
-
|
|
430
|
-
// src/providers/openai.ts
|
|
431
|
-
var getApiKey2 = (configApiKey) => {
|
|
432
|
-
if (configApiKey) return configApiKey;
|
|
433
|
-
try {
|
|
434
|
-
return getKey("openai");
|
|
435
|
-
} catch {
|
|
436
|
-
const key = process.env.OPENAI_API_KEY || "";
|
|
437
|
-
if (!key) throw new Error("OpenAI API key not found");
|
|
438
|
-
return key;
|
|
439
|
-
}
|
|
440
|
-
};
|
|
441
|
-
var appendToolCalls = (toolCalls, tcchunklist) => {
|
|
442
|
-
for (const tcchunk of tcchunklist) {
|
|
443
|
-
while (toolCalls.length <= tcchunk.index) {
|
|
444
|
-
toolCalls.push({
|
|
445
|
-
id: "",
|
|
446
|
-
type: "function",
|
|
447
|
-
function: { name: "", arguments: "" }
|
|
448
|
-
});
|
|
449
|
-
}
|
|
450
|
-
const tc = toolCalls[tcchunk.index];
|
|
451
|
-
tc.id += tcchunk.id || "";
|
|
452
|
-
tc.function.name += tcchunk.function?.name || "";
|
|
453
|
-
tc.function.arguments += tcchunk.function?.arguments || "";
|
|
454
|
-
}
|
|
455
|
-
return toolCalls;
|
|
456
|
-
};
|
|
457
|
-
var callOpenAI = async (config, ctx) => {
|
|
458
|
-
const { model: model2, instructions, schema, apiKey: configApiKey } = config;
|
|
459
|
-
const apiKey = getApiKey2(configApiKey);
|
|
460
|
-
const messages = [];
|
|
461
|
-
if (instructions) {
|
|
462
|
-
messages.push({ role: "system", content: instructions });
|
|
463
|
-
}
|
|
464
|
-
messages.push(...ctx.history);
|
|
465
|
-
const body = {
|
|
466
|
-
model: model2,
|
|
467
|
-
messages,
|
|
468
|
-
stream: !!ctx.stream,
|
|
469
|
-
...ctx.stream && { stream_options: { include_usage: true } }
|
|
470
|
-
};
|
|
471
|
-
if (schema) {
|
|
472
|
-
body.response_format = {
|
|
473
|
-
type: "json_schema",
|
|
474
|
-
json_schema: {
|
|
475
|
-
name: schema.name,
|
|
476
|
-
schema: { ...schema.schema, additionalProperties: false },
|
|
477
|
-
strict: true
|
|
478
|
-
}
|
|
479
|
-
};
|
|
480
|
-
}
|
|
481
|
-
if (ctx.tools && ctx.tools.length > 0) {
|
|
482
|
-
body.tools = ctx.tools;
|
|
483
|
-
body.tool_choice = "auto";
|
|
484
|
-
}
|
|
485
|
-
const response = await fetch("https://api.openai.com/v1/chat/completions", {
|
|
486
|
-
method: "POST",
|
|
487
|
-
headers: {
|
|
488
|
-
"Content-Type": "application/json",
|
|
489
|
-
Authorization: `Bearer ${apiKey}`
|
|
490
|
-
},
|
|
491
|
-
body: JSON.stringify(body),
|
|
492
|
-
signal: ctx.abortSignal
|
|
493
|
-
});
|
|
494
|
-
if (!response.ok) {
|
|
495
|
-
const error = await response.text();
|
|
496
|
-
throw new Error(`OpenAI API error: ${error}`);
|
|
497
|
-
}
|
|
498
|
-
if (ctx.stream) {
|
|
499
|
-
return handleOpenAIStream(response, ctx);
|
|
500
|
-
}
|
|
501
|
-
const data = await response.json();
|
|
502
|
-
const choice = data.choices[0];
|
|
503
|
-
const { message } = choice;
|
|
504
|
-
const msg = {
|
|
505
|
-
role: "assistant",
|
|
506
|
-
content: message.content || ""
|
|
507
|
-
};
|
|
508
|
-
if (message.tool_calls) {
|
|
509
|
-
msg.tool_calls = message.tool_calls;
|
|
510
|
-
}
|
|
511
|
-
return {
|
|
512
|
-
...ctx,
|
|
513
|
-
lastResponse: msg,
|
|
514
|
-
history: [...ctx.history, msg],
|
|
515
|
-
usage: addUsage(ctx.usage, data.usage?.prompt_tokens || 0, data.usage?.completion_tokens || 0, data.usage?.total_tokens || 0)
|
|
516
|
-
};
|
|
517
|
-
};
|
|
518
|
-
var handleOpenAIStream = async (response, ctx) => {
|
|
519
|
-
const reader = response.body.getReader();
|
|
520
|
-
const decoder = new TextDecoder();
|
|
521
|
-
let fullContent = "";
|
|
522
|
-
let toolCalls = [];
|
|
523
|
-
let buffer = "";
|
|
524
|
-
let streamUsage = null;
|
|
525
|
-
try {
|
|
526
|
-
while (true) {
|
|
527
|
-
if (ctx.abortSignal?.aborted) {
|
|
528
|
-
break;
|
|
529
|
-
}
|
|
530
|
-
const { done, value } = await reader.read();
|
|
531
|
-
if (done) break;
|
|
532
|
-
buffer += decoder.decode(value, { stream: true });
|
|
533
|
-
const lines = buffer.split("\n");
|
|
534
|
-
buffer = lines.pop() || "";
|
|
535
|
-
for (const line of lines) {
|
|
536
|
-
if (line.startsWith("data: ")) {
|
|
537
|
-
const data = line.slice(6).trim();
|
|
538
|
-
if (data === "[DONE]") continue;
|
|
539
|
-
if (!data) continue;
|
|
540
|
-
try {
|
|
541
|
-
const parsed = JSON.parse(data);
|
|
542
|
-
if (parsed.usage) {
|
|
543
|
-
streamUsage = parsed.usage;
|
|
544
|
-
}
|
|
545
|
-
const delta = parsed.choices?.[0]?.delta;
|
|
546
|
-
if (delta?.content) {
|
|
547
|
-
fullContent += delta.content;
|
|
548
|
-
if (ctx.stream) {
|
|
549
|
-
ctx.stream({ type: "content", content: delta.content });
|
|
550
|
-
}
|
|
551
|
-
}
|
|
552
|
-
if (delta?.tool_calls) {
|
|
553
|
-
toolCalls = appendToolCalls(toolCalls, delta.tool_calls);
|
|
554
|
-
}
|
|
555
|
-
} catch (e) {
|
|
556
|
-
}
|
|
557
|
-
}
|
|
558
|
-
}
|
|
559
|
-
}
|
|
560
|
-
} finally {
|
|
561
|
-
reader.releaseLock();
|
|
562
|
-
}
|
|
563
|
-
const msg = {
|
|
564
|
-
role: "assistant",
|
|
565
|
-
content: fullContent
|
|
566
|
-
};
|
|
567
|
-
if (toolCalls.length > 0) {
|
|
568
|
-
msg.tool_calls = toolCalls;
|
|
569
|
-
}
|
|
570
|
-
const usage = addUsage(ctx.usage, streamUsage?.prompt_tokens || 0, streamUsage?.completion_tokens || 0, streamUsage?.total_tokens || 0);
|
|
571
|
-
if (ctx.stream && streamUsage) {
|
|
572
|
-
ctx.stream({ type: "usage", usage });
|
|
573
|
-
}
|
|
574
|
-
return {
|
|
575
|
-
...ctx,
|
|
576
|
-
lastResponse: msg,
|
|
577
|
-
history: [...ctx.history, msg],
|
|
578
|
-
usage
|
|
579
|
-
};
|
|
580
|
-
};
|
|
581
|
-
|
|
582
|
-
// src/providers/anthropic.ts
|
|
583
|
-
var getApiKey3 = (configApiKey) => {
|
|
584
|
-
if (configApiKey) return configApiKey;
|
|
585
|
-
try {
|
|
586
|
-
return getKey("anthropic");
|
|
587
|
-
} catch {
|
|
588
|
-
const key = process.env.ANTHROPIC_API_KEY || "";
|
|
589
|
-
if (!key) throw new Error("Anthropic API key not found");
|
|
590
|
-
return key;
|
|
591
|
-
}
|
|
592
|
-
};
|
|
593
|
-
var convertToAnthropicFormat = (messages) => {
|
|
594
|
-
const result = [];
|
|
595
|
-
let i = 0;
|
|
596
|
-
while (i < messages.length) {
|
|
597
|
-
const msg = messages[i];
|
|
598
|
-
if (msg.role === "system") {
|
|
599
|
-
i++;
|
|
600
|
-
continue;
|
|
601
|
-
}
|
|
602
|
-
if (msg.role === "assistant") {
|
|
603
|
-
if (msg.tool_calls) {
|
|
604
|
-
result.push({
|
|
605
|
-
role: "assistant",
|
|
606
|
-
content: msg.tool_calls.map((tc) => ({
|
|
607
|
-
type: "tool_use",
|
|
608
|
-
id: tc.id,
|
|
609
|
-
name: tc.function.name,
|
|
610
|
-
input: JSON.parse(tc.function.arguments)
|
|
611
|
-
}))
|
|
612
|
-
});
|
|
613
|
-
} else {
|
|
614
|
-
result.push({
|
|
615
|
-
role: "assistant",
|
|
616
|
-
content: msg.content
|
|
617
|
-
});
|
|
618
|
-
}
|
|
619
|
-
i++;
|
|
620
|
-
} else if (msg.role === "tool") {
|
|
621
|
-
const toolResults = [];
|
|
622
|
-
while (i < messages.length && messages[i].role === "tool") {
|
|
623
|
-
const toolMsg = messages[i];
|
|
624
|
-
toolResults.push({
|
|
625
|
-
type: "tool_result",
|
|
626
|
-
tool_use_id: toolMsg.tool_call_id,
|
|
627
|
-
content: toolMsg.content
|
|
628
|
-
});
|
|
629
|
-
i++;
|
|
630
|
-
}
|
|
631
|
-
result.push({
|
|
632
|
-
role: "user",
|
|
633
|
-
content: toolResults
|
|
634
|
-
});
|
|
635
|
-
} else {
|
|
636
|
-
result.push(msg);
|
|
637
|
-
i++;
|
|
638
|
-
}
|
|
639
|
-
}
|
|
640
|
-
return result;
|
|
641
|
-
};
|
|
642
|
-
var callAnthropic = async (config, ctx) => {
|
|
643
|
-
const { model: model2, instructions, schema, apiKey: configApiKey } = config;
|
|
644
|
-
const apiKey = getApiKey3(configApiKey);
|
|
645
|
-
let system = instructions;
|
|
646
|
-
if (ctx.history[0]?.role === "system") {
|
|
647
|
-
system = ctx.history[0].content;
|
|
648
|
-
}
|
|
649
|
-
const messages = convertToAnthropicFormat(ctx.history);
|
|
650
|
-
if (schema) {
|
|
651
|
-
const schemaPrompt = `
|
|
652
|
-
|
|
653
|
-
You must respond with valid JSON that matches this schema:
|
|
654
|
-
${JSON.stringify(
|
|
655
|
-
schema.schema,
|
|
656
|
-
null,
|
|
657
|
-
2
|
|
658
|
-
)}
|
|
659
|
-
|
|
660
|
-
Return only the JSON object, no other text or formatting.`;
|
|
661
|
-
system = system ? system + schemaPrompt : schemaPrompt.slice(2);
|
|
662
|
-
}
|
|
663
|
-
const body = {
|
|
664
|
-
model: model2,
|
|
665
|
-
messages,
|
|
666
|
-
max_tokens: 4096,
|
|
667
|
-
stream: !!ctx.stream
|
|
668
|
-
};
|
|
669
|
-
if (system) {
|
|
670
|
-
body.system = system;
|
|
671
|
-
}
|
|
672
|
-
if (ctx.tools && ctx.tools.length > 0) {
|
|
673
|
-
body.tools = ctx.tools.map((tool) => ({
|
|
674
|
-
name: tool.function.name,
|
|
675
|
-
description: tool.function.description,
|
|
676
|
-
input_schema: tool.function.parameters
|
|
677
|
-
}));
|
|
678
|
-
}
|
|
679
|
-
const response = await fetch("https://api.anthropic.com/v1/messages", {
|
|
680
|
-
method: "POST",
|
|
681
|
-
headers: {
|
|
682
|
-
"Content-Type": "application/json",
|
|
683
|
-
"x-api-key": apiKey,
|
|
684
|
-
"anthropic-version": "2023-06-01"
|
|
685
|
-
},
|
|
686
|
-
body: JSON.stringify(body),
|
|
687
|
-
signal: ctx.abortSignal
|
|
688
|
-
});
|
|
689
|
-
if (!response.ok) {
|
|
690
|
-
const error = await response.text();
|
|
691
|
-
throw new Error(`Anthropic API error: ${error}`);
|
|
692
|
-
}
|
|
693
|
-
if (ctx.stream) {
|
|
694
|
-
return handleAnthropicStream(response, ctx);
|
|
695
|
-
}
|
|
696
|
-
const data = await response.json();
|
|
697
|
-
const content = data.content[0];
|
|
698
|
-
const msg = {
|
|
699
|
-
role: "assistant",
|
|
700
|
-
content: content.type === "text" ? content.text : ""
|
|
701
|
-
};
|
|
702
|
-
if (content.type === "tool_use") {
|
|
703
|
-
msg.tool_calls = [
|
|
704
|
-
{
|
|
705
|
-
id: content.id,
|
|
706
|
-
type: "function",
|
|
707
|
-
function: {
|
|
708
|
-
name: content.name,
|
|
709
|
-
arguments: JSON.stringify(content.input)
|
|
710
|
-
}
|
|
711
|
-
}
|
|
712
|
-
];
|
|
713
|
-
}
|
|
714
|
-
const inputTokens = data.usage?.input_tokens || 0;
|
|
715
|
-
const outputTokens = data.usage?.output_tokens || 0;
|
|
716
|
-
return {
|
|
717
|
-
...ctx,
|
|
718
|
-
lastResponse: msg,
|
|
719
|
-
history: [...ctx.history, msg],
|
|
720
|
-
usage: addUsage(ctx.usage, inputTokens, outputTokens, inputTokens + outputTokens)
|
|
721
|
-
};
|
|
722
|
-
};
|
|
723
|
-
var handleAnthropicStream = async (response, ctx) => {
|
|
724
|
-
const reader = response.body.getReader();
|
|
725
|
-
const decoder = new TextDecoder();
|
|
726
|
-
let fullContent = "";
|
|
727
|
-
const toolCalls = [];
|
|
728
|
-
let buffer = "";
|
|
729
|
-
let inputTokens = 0;
|
|
730
|
-
let outputTokens = 0;
|
|
731
|
-
try {
|
|
732
|
-
while (true) {
|
|
733
|
-
if (ctx.abortSignal?.aborted) {
|
|
734
|
-
break;
|
|
735
|
-
}
|
|
736
|
-
const { done, value } = await reader.read();
|
|
737
|
-
if (done) break;
|
|
738
|
-
buffer += decoder.decode(value, { stream: true });
|
|
739
|
-
const lines = buffer.split("\n");
|
|
740
|
-
buffer = lines.pop() || "";
|
|
741
|
-
for (const line of lines) {
|
|
742
|
-
if (line.startsWith("data: ")) {
|
|
743
|
-
const data = line.slice(6).trim();
|
|
744
|
-
if (!data) continue;
|
|
745
|
-
try {
|
|
746
|
-
const parsed = JSON.parse(data);
|
|
747
|
-
if (parsed.type === "message_start" && parsed.message?.usage) {
|
|
748
|
-
inputTokens = parsed.message.usage.input_tokens || 0;
|
|
749
|
-
}
|
|
750
|
-
if (parsed.type === "message_delta" && parsed.usage) {
|
|
751
|
-
outputTokens = parsed.usage.output_tokens || 0;
|
|
752
|
-
}
|
|
753
|
-
if (parsed.type === "content_block_delta" && parsed.delta?.text) {
|
|
754
|
-
fullContent += parsed.delta.text;
|
|
755
|
-
if (ctx.stream) {
|
|
756
|
-
ctx.stream({ type: "content", content: parsed.delta.text });
|
|
757
|
-
}
|
|
758
|
-
}
|
|
759
|
-
if (parsed.type === "content_block_start" && parsed.content_block?.type === "tool_use") {
|
|
760
|
-
const toolUse = parsed.content_block;
|
|
761
|
-
toolCalls.push({
|
|
762
|
-
id: toolUse.id,
|
|
763
|
-
type: "function",
|
|
764
|
-
function: {
|
|
765
|
-
name: toolUse.name,
|
|
766
|
-
arguments: ""
|
|
767
|
-
},
|
|
768
|
-
index: parsed.index
|
|
769
|
-
});
|
|
770
|
-
}
|
|
771
|
-
if (parsed.type === "content_block_delta" && parsed.delta?.type === "input_json_delta") {
|
|
772
|
-
const toolCall = toolCalls.find((tc) => tc.index === parsed.index);
|
|
773
|
-
if (toolCall) {
|
|
774
|
-
toolCall.function.arguments += parsed.delta.partial_json;
|
|
775
|
-
}
|
|
776
|
-
}
|
|
777
|
-
} catch (e) {
|
|
778
|
-
}
|
|
779
|
-
}
|
|
780
|
-
}
|
|
781
|
-
}
|
|
782
|
-
} finally {
|
|
783
|
-
reader.releaseLock();
|
|
784
|
-
}
|
|
785
|
-
const msg = {
|
|
786
|
-
role: "assistant",
|
|
787
|
-
content: fullContent
|
|
788
|
-
};
|
|
789
|
-
if (toolCalls.length > 0) {
|
|
790
|
-
msg.tool_calls = toolCalls.map(({ index, ...tc }) => tc);
|
|
791
|
-
}
|
|
792
|
-
const usage = addUsage(ctx.usage, inputTokens, outputTokens, inputTokens + outputTokens);
|
|
793
|
-
if (ctx.stream && (inputTokens || outputTokens)) {
|
|
794
|
-
ctx.stream({ type: "usage", usage });
|
|
795
|
-
}
|
|
796
|
-
return {
|
|
797
|
-
...ctx,
|
|
798
|
-
lastResponse: msg,
|
|
799
|
-
history: [...ctx.history, msg],
|
|
800
|
-
usage
|
|
801
|
-
};
|
|
802
|
-
};
|
|
803
|
-
|
|
804
|
-
// src/providers/google.ts
|
|
805
|
-
var getApiKey4 = (configApiKey) => {
|
|
806
|
-
if (configApiKey) return configApiKey;
|
|
807
|
-
try {
|
|
808
|
-
return getKey("google");
|
|
809
|
-
} catch {
|
|
810
|
-
const key = process.env.GEMINI_API_KEY || process.env.GOOGLE_AI_API_KEY || "";
|
|
811
|
-
if (!key) throw new Error("Google API key not found");
|
|
812
|
-
return key;
|
|
813
|
-
}
|
|
814
|
-
};
|
|
815
|
-
var callGoogle = async (config, ctx) => {
|
|
816
|
-
const { model: model2, instructions, apiKey: configApiKey } = config;
|
|
817
|
-
const apiKey = getApiKey4(configApiKey);
|
|
818
|
-
const contents = [];
|
|
819
|
-
if (instructions) {
|
|
820
|
-
contents.push({
|
|
821
|
-
role: "user",
|
|
822
|
-
parts: [{ text: instructions }]
|
|
823
|
-
});
|
|
824
|
-
contents.push({
|
|
825
|
-
role: "model",
|
|
826
|
-
parts: [{ text: "I understand." }]
|
|
827
|
-
});
|
|
828
|
-
}
|
|
829
|
-
const toolCallMap = /* @__PURE__ */ new Map();
|
|
830
|
-
for (let i = 0; i < ctx.history.length; i++) {
|
|
831
|
-
const msg2 = ctx.history[i];
|
|
832
|
-
if (msg2.role === "assistant") {
|
|
833
|
-
const parts2 = [];
|
|
834
|
-
if (msg2.content) {
|
|
835
|
-
parts2.push({ text: msg2.content });
|
|
836
|
-
}
|
|
837
|
-
if (msg2.tool_calls?.length) {
|
|
838
|
-
for (const tc of msg2.tool_calls) {
|
|
839
|
-
toolCallMap.set(tc.id, tc.function.name);
|
|
840
|
-
const part = {
|
|
841
|
-
functionCall: {
|
|
842
|
-
name: tc.function.name,
|
|
843
|
-
args: JSON.parse(tc.function.arguments)
|
|
844
|
-
}
|
|
845
|
-
};
|
|
846
|
-
if (tc.thoughtSignature) {
|
|
847
|
-
part.thoughtSignature = tc.thoughtSignature;
|
|
848
|
-
}
|
|
849
|
-
parts2.push(part);
|
|
850
|
-
}
|
|
851
|
-
}
|
|
852
|
-
if (parts2.length > 0) {
|
|
853
|
-
contents.push({ role: "model", parts: parts2 });
|
|
854
|
-
}
|
|
855
|
-
} else if (msg2.role === "tool") {
|
|
856
|
-
const responseParts = [];
|
|
857
|
-
while (i < ctx.history.length && ctx.history[i].role === "tool") {
|
|
858
|
-
const toolMsg = ctx.history[i];
|
|
859
|
-
const functionName = toolCallMap.get(toolMsg.tool_call_id);
|
|
860
|
-
if (functionName) {
|
|
861
|
-
let responseData;
|
|
862
|
-
try {
|
|
863
|
-
responseData = JSON.parse(toolMsg.content);
|
|
864
|
-
} catch {
|
|
865
|
-
responseData = { result: toolMsg.content };
|
|
866
|
-
}
|
|
867
|
-
if (Array.isArray(responseData)) {
|
|
868
|
-
responseData = { result: responseData };
|
|
869
|
-
}
|
|
870
|
-
responseParts.push({
|
|
871
|
-
functionResponse: {
|
|
872
|
-
name: functionName,
|
|
873
|
-
response: responseData
|
|
874
|
-
}
|
|
875
|
-
});
|
|
876
|
-
}
|
|
877
|
-
i++;
|
|
878
|
-
}
|
|
879
|
-
i--;
|
|
880
|
-
if (responseParts.length > 0) {
|
|
881
|
-
contents.push({ role: "user", parts: responseParts });
|
|
882
|
-
}
|
|
883
|
-
} else if (msg2.role === "user") {
|
|
884
|
-
contents.push({
|
|
885
|
-
role: "user",
|
|
886
|
-
parts: [{ text: msg2.content }]
|
|
887
|
-
});
|
|
888
|
-
}
|
|
889
|
-
}
|
|
890
|
-
const body = {
|
|
891
|
-
contents
|
|
892
|
-
};
|
|
893
|
-
if (ctx.tools && ctx.tools.length > 0) {
|
|
894
|
-
body.tools = [
|
|
895
|
-
{
|
|
896
|
-
function_declarations: ctx.tools.map((tool) => ({
|
|
897
|
-
name: tool.function.name,
|
|
898
|
-
description: tool.function.description,
|
|
899
|
-
parameters: tool.function.parameters
|
|
900
|
-
}))
|
|
901
|
-
}
|
|
902
|
-
];
|
|
903
|
-
}
|
|
904
|
-
const endpoint = ctx.stream ? "streamGenerateContent" : "generateContent";
|
|
905
|
-
const response = await fetch(
|
|
906
|
-
`https://generativelanguage.googleapis.com/v1beta/models/${model2}:${endpoint}?key=${apiKey}${ctx.stream ? "&alt=sse" : ""}`,
|
|
907
|
-
{
|
|
908
|
-
method: "POST",
|
|
909
|
-
headers: {
|
|
910
|
-
"Content-Type": "application/json"
|
|
911
|
-
},
|
|
912
|
-
body: JSON.stringify(body),
|
|
913
|
-
signal: ctx.abortSignal
|
|
914
|
-
}
|
|
915
|
-
);
|
|
916
|
-
if (!response.ok) {
|
|
917
|
-
const error = await response.text();
|
|
918
|
-
throw new Error(`Google API error: ${error}`);
|
|
919
|
-
}
|
|
920
|
-
if (ctx.stream) {
|
|
921
|
-
return handleGoogleStream(response, ctx);
|
|
922
|
-
}
|
|
923
|
-
const data = await response.json();
|
|
924
|
-
const candidate = data.candidates[0];
|
|
925
|
-
const parts = candidate.content.parts || [];
|
|
926
|
-
const msg = {
|
|
927
|
-
role: "assistant",
|
|
928
|
-
content: ""
|
|
929
|
-
};
|
|
930
|
-
const toolCalls = [];
|
|
931
|
-
for (const part of parts) {
|
|
932
|
-
if (part.text) {
|
|
933
|
-
msg.content += part.text;
|
|
934
|
-
}
|
|
935
|
-
if (part.functionCall) {
|
|
936
|
-
const tc = {
|
|
937
|
-
id: Math.random().toString(36).substring(2, 9),
|
|
938
|
-
type: "function",
|
|
939
|
-
function: {
|
|
940
|
-
name: part.functionCall.name,
|
|
941
|
-
arguments: JSON.stringify(part.functionCall.args)
|
|
942
|
-
}
|
|
943
|
-
};
|
|
944
|
-
if (part.thoughtSignature) {
|
|
945
|
-
tc.thoughtSignature = part.thoughtSignature;
|
|
946
|
-
}
|
|
947
|
-
toolCalls.push(tc);
|
|
948
|
-
}
|
|
949
|
-
}
|
|
950
|
-
if (toolCalls.length > 0) {
|
|
951
|
-
msg.tool_calls = toolCalls;
|
|
952
|
-
}
|
|
953
|
-
const um = data.usageMetadata;
|
|
954
|
-
return {
|
|
955
|
-
...ctx,
|
|
956
|
-
lastResponse: msg,
|
|
957
|
-
history: [...ctx.history, msg],
|
|
958
|
-
usage: addUsage(ctx.usage, um?.promptTokenCount || 0, um?.candidatesTokenCount || 0, um?.totalTokenCount || 0)
|
|
959
|
-
};
|
|
960
|
-
};
|
|
961
|
-
var handleGoogleStream = async (response, ctx) => {
|
|
962
|
-
const reader = response.body.getReader();
|
|
963
|
-
const decoder = new TextDecoder();
|
|
964
|
-
let fullContent = "";
|
|
965
|
-
const toolCalls = [];
|
|
966
|
-
let buffer = "";
|
|
967
|
-
let usageMetadata = null;
|
|
968
|
-
try {
|
|
969
|
-
while (true) {
|
|
970
|
-
if (ctx.abortSignal?.aborted) {
|
|
971
|
-
break;
|
|
972
|
-
}
|
|
973
|
-
const { done, value } = await reader.read();
|
|
974
|
-
if (done) break;
|
|
975
|
-
buffer += decoder.decode(value, { stream: true });
|
|
976
|
-
const lines = buffer.split("\n");
|
|
977
|
-
buffer = lines.pop() || "";
|
|
978
|
-
for (const line of lines) {
|
|
979
|
-
if (line.startsWith("data: ")) {
|
|
980
|
-
const data = line.slice(6).trim();
|
|
981
|
-
if (!data) continue;
|
|
982
|
-
try {
|
|
983
|
-
const parsed = JSON.parse(data);
|
|
984
|
-
if (parsed.usageMetadata) {
|
|
985
|
-
usageMetadata = parsed.usageMetadata;
|
|
986
|
-
}
|
|
987
|
-
const candidate = parsed.candidates?.[0];
|
|
988
|
-
const parts = candidate?.content?.parts || [];
|
|
989
|
-
for (const part of parts) {
|
|
990
|
-
if (part?.text) {
|
|
991
|
-
fullContent += part.text;
|
|
992
|
-
if (ctx.stream) {
|
|
993
|
-
ctx.stream({ type: "content", content: part.text });
|
|
994
|
-
}
|
|
995
|
-
}
|
|
996
|
-
if (part?.functionCall) {
|
|
997
|
-
const tc = {
|
|
998
|
-
id: Math.random().toString(36).substring(2, 9),
|
|
999
|
-
type: "function",
|
|
1000
|
-
function: {
|
|
1001
|
-
name: part.functionCall.name,
|
|
1002
|
-
arguments: JSON.stringify(part.functionCall.args)
|
|
1003
|
-
}
|
|
1004
|
-
};
|
|
1005
|
-
if (part.thoughtSignature) {
|
|
1006
|
-
tc.thoughtSignature = part.thoughtSignature;
|
|
1007
|
-
}
|
|
1008
|
-
toolCalls.push(tc);
|
|
1009
|
-
}
|
|
1010
|
-
}
|
|
1011
|
-
} catch (e) {
|
|
1012
|
-
}
|
|
1013
|
-
}
|
|
1014
|
-
}
|
|
1015
|
-
}
|
|
1016
|
-
} finally {
|
|
1017
|
-
reader.releaseLock();
|
|
1018
|
-
}
|
|
1019
|
-
const msg = {
|
|
1020
|
-
role: "assistant",
|
|
1021
|
-
content: fullContent
|
|
1022
|
-
};
|
|
1023
|
-
if (toolCalls.length > 0) {
|
|
1024
|
-
msg.tool_calls = toolCalls;
|
|
1025
|
-
}
|
|
1026
|
-
const um = usageMetadata;
|
|
1027
|
-
const usage = addUsage(ctx.usage, um?.promptTokenCount || 0, um?.candidatesTokenCount || 0, um?.totalTokenCount || 0);
|
|
1028
|
-
if (ctx.stream && um) {
|
|
1029
|
-
ctx.stream({ type: "usage", usage });
|
|
1030
|
-
}
|
|
1031
|
-
return {
|
|
1032
|
-
...ctx,
|
|
1033
|
-
lastResponse: msg,
|
|
1034
|
-
history: [...ctx.history, msg],
|
|
1035
|
-
usage
|
|
1036
|
-
};
|
|
1037
|
-
};
|
|
1038
|
-
|
|
1039
|
-
// src/providers/huggingface.ts
|
|
1040
|
-
var modelCache2 = /* @__PURE__ */ new Map();
|
|
1041
|
-
var formatMessages = (instructions, history) => {
|
|
1042
|
-
const messages = [];
|
|
1043
|
-
if (instructions) {
|
|
1044
|
-
messages.push({ role: "system", content: instructions });
|
|
1045
|
-
}
|
|
1046
|
-
for (const msg of history) {
|
|
1047
|
-
messages.push({ role: msg.role, content: msg.content });
|
|
1048
|
-
}
|
|
1049
|
-
return messages;
|
|
1050
|
-
};
|
|
1051
|
-
var callHuggingFace = async (config, ctx) => {
|
|
1052
|
-
const { model: model2, instructions, schema } = config;
|
|
1053
|
-
const { pipeline } = await import("@huggingface/transformers");
|
|
1054
|
-
if (!modelCache2.has(model2)) {
|
|
1055
|
-
const generator2 = await pipeline("text-generation", model2, {
|
|
1056
|
-
dtype: "q4"
|
|
1057
|
-
});
|
|
1058
|
-
modelCache2.set(model2, generator2);
|
|
1059
|
-
}
|
|
1060
|
-
const generator = modelCache2.get(model2);
|
|
1061
|
-
const messages = formatMessages(instructions, ctx.history);
|
|
1062
|
-
if (schema) {
|
|
1063
|
-
const schemaMsg = messages.find((m) => m.role === "system");
|
|
1064
|
-
const schemaInstructions = [
|
|
1065
|
-
"you must respond with valid JSON matching this schema:",
|
|
1066
|
-
JSON.stringify(schema.schema, null, 2),
|
|
1067
|
-
"respond ONLY with the JSON object, no other text."
|
|
1068
|
-
].join("\n");
|
|
1069
|
-
if (schemaMsg) {
|
|
1070
|
-
schemaMsg.content += "\n\n" + schemaInstructions;
|
|
1071
|
-
} else {
|
|
1072
|
-
messages.unshift({ role: "system", content: schemaInstructions });
|
|
1073
|
-
}
|
|
1074
|
-
}
|
|
1075
|
-
const output = await generator(messages, {
|
|
1076
|
-
max_new_tokens: 2048,
|
|
1077
|
-
do_sample: false
|
|
1078
|
-
});
|
|
1079
|
-
const generatedMessages = output[0].generated_text;
|
|
1080
|
-
const lastMessage = generatedMessages.at(-1);
|
|
1081
|
-
const content = lastMessage?.content || "";
|
|
1082
|
-
const msg = {
|
|
1083
|
-
role: "assistant",
|
|
1084
|
-
content
|
|
1085
|
-
};
|
|
1086
|
-
if (ctx.stream) {
|
|
1087
|
-
ctx.stream({ type: "content", content });
|
|
1088
|
-
}
|
|
1089
|
-
return {
|
|
1090
|
-
...ctx,
|
|
1091
|
-
lastResponse: msg,
|
|
1092
|
-
history: [...ctx.history, msg],
|
|
1093
|
-
usage: addUsage(ctx.usage, 0, 0, 0)
|
|
1094
|
-
};
|
|
1095
|
-
};
|
|
1096
|
-
|
|
1097
|
-
// src/providers/xai.ts
|
|
1098
|
-
var appendToolCalls2 = (toolCalls, tcchunklist) => {
|
|
1099
|
-
for (const tcchunk of tcchunklist) {
|
|
1100
|
-
while (toolCalls.length <= tcchunk.index) {
|
|
1101
|
-
toolCalls.push({
|
|
1102
|
-
id: "",
|
|
1103
|
-
type: "function",
|
|
1104
|
-
function: { name: "", arguments: "" }
|
|
1105
|
-
});
|
|
1106
|
-
}
|
|
1107
|
-
const tc = toolCalls[tcchunk.index];
|
|
1108
|
-
tc.id += tcchunk.id || "";
|
|
1109
|
-
tc.function.name += tcchunk.function?.name || "";
|
|
1110
|
-
tc.function.arguments += tcchunk.function?.arguments || "";
|
|
1111
|
-
}
|
|
1112
|
-
return toolCalls;
|
|
1113
|
-
};
|
|
1114
|
-
var getApiKey5 = (configApiKey) => {
|
|
1115
|
-
if (configApiKey) return configApiKey;
|
|
1116
|
-
try {
|
|
1117
|
-
return getKey("xai");
|
|
1118
|
-
} catch {
|
|
1119
|
-
const key = process.env.XAI_API_KEY || "";
|
|
1120
|
-
if (!key) throw new Error("xAI API key not found");
|
|
1121
|
-
return key;
|
|
1122
|
-
}
|
|
1123
|
-
};
|
|
1124
|
-
var callXAI = async (config, ctx) => {
|
|
1125
|
-
const { model: model2, instructions, schema, apiKey: configApiKey } = config;
|
|
1126
|
-
const apiKey = getApiKey5(configApiKey);
|
|
1127
|
-
const messages = [];
|
|
1128
|
-
if (instructions) {
|
|
1129
|
-
messages.push({ role: "system", content: instructions });
|
|
1130
|
-
}
|
|
1131
|
-
messages.push(...ctx.history);
|
|
1132
|
-
const body = {
|
|
1133
|
-
model: model2,
|
|
1134
|
-
messages,
|
|
1135
|
-
stream: !!ctx.stream,
|
|
1136
|
-
...ctx.stream && { stream_options: { include_usage: true } }
|
|
1137
|
-
};
|
|
1138
|
-
if (schema) {
|
|
1139
|
-
body.response_format = {
|
|
1140
|
-
type: "json_schema",
|
|
1141
|
-
json_schema: {
|
|
1142
|
-
name: schema.name,
|
|
1143
|
-
schema: { ...schema.schema, additionalProperties: false },
|
|
1144
|
-
strict: true
|
|
1145
|
-
}
|
|
1146
|
-
};
|
|
1147
|
-
}
|
|
1148
|
-
if (ctx.tools && ctx.tools.length > 0) {
|
|
1149
|
-
body.tools = ctx.tools;
|
|
1150
|
-
body.tool_choice = "auto";
|
|
1151
|
-
}
|
|
1152
|
-
const response = await fetch("https://api.x.ai/v1/chat/completions", {
|
|
1153
|
-
method: "POST",
|
|
1154
|
-
headers: {
|
|
1155
|
-
"Content-Type": "application/json",
|
|
1156
|
-
Authorization: `Bearer ${apiKey}`
|
|
1157
|
-
},
|
|
1158
|
-
body: JSON.stringify(body),
|
|
1159
|
-
signal: ctx.abortSignal
|
|
1160
|
-
});
|
|
1161
|
-
if (!response.ok) {
|
|
1162
|
-
const error = await response.text();
|
|
1163
|
-
throw new Error(`xAI API error: ${error}`);
|
|
1164
|
-
}
|
|
1165
|
-
if (ctx.stream) {
|
|
1166
|
-
return handleXAIStream(response, ctx);
|
|
1167
|
-
}
|
|
1168
|
-
const data = await response.json();
|
|
1169
|
-
const choice = data.choices[0];
|
|
1170
|
-
const { message } = choice;
|
|
1171
|
-
const msg = {
|
|
1172
|
-
role: "assistant",
|
|
1173
|
-
content: message.content || ""
|
|
1174
|
-
};
|
|
1175
|
-
if (message.tool_calls) {
|
|
1176
|
-
msg.tool_calls = message.tool_calls;
|
|
1177
|
-
}
|
|
1178
|
-
return {
|
|
1179
|
-
...ctx,
|
|
1180
|
-
lastResponse: msg,
|
|
1181
|
-
history: [...ctx.history, msg],
|
|
1182
|
-
usage: addUsage(ctx.usage, data.usage?.prompt_tokens || 0, data.usage?.completion_tokens || 0, data.usage?.total_tokens || 0)
|
|
1183
|
-
};
|
|
1184
|
-
};
|
|
1185
|
-
var handleXAIStream = async (response, ctx) => {
|
|
1186
|
-
const reader = response.body.getReader();
|
|
1187
|
-
const decoder = new TextDecoder();
|
|
1188
|
-
let fullContent = "";
|
|
1189
|
-
let toolCalls = [];
|
|
1190
|
-
let buffer = "";
|
|
1191
|
-
let streamUsage = null;
|
|
1192
|
-
try {
|
|
1193
|
-
while (true) {
|
|
1194
|
-
if (ctx.abortSignal?.aborted) {
|
|
1195
|
-
break;
|
|
1196
|
-
}
|
|
1197
|
-
const { done, value } = await reader.read();
|
|
1198
|
-
if (done) break;
|
|
1199
|
-
buffer += decoder.decode(value, { stream: true });
|
|
1200
|
-
const lines = buffer.split("\n");
|
|
1201
|
-
buffer = lines.pop() || "";
|
|
1202
|
-
for (const line of lines) {
|
|
1203
|
-
if (line.startsWith("data: ")) {
|
|
1204
|
-
const data = line.slice(6).trim();
|
|
1205
|
-
if (data === "[DONE]") continue;
|
|
1206
|
-
if (!data) continue;
|
|
1207
|
-
try {
|
|
1208
|
-
const parsed = JSON.parse(data);
|
|
1209
|
-
if (parsed.usage) {
|
|
1210
|
-
streamUsage = parsed.usage;
|
|
1211
|
-
}
|
|
1212
|
-
const delta = parsed.choices?.[0]?.delta;
|
|
1213
|
-
if (delta?.content) {
|
|
1214
|
-
fullContent += delta.content;
|
|
1215
|
-
if (ctx.stream) {
|
|
1216
|
-
ctx.stream({ type: "content", content: delta.content });
|
|
1217
|
-
}
|
|
1218
|
-
}
|
|
1219
|
-
if (delta?.tool_calls) {
|
|
1220
|
-
toolCalls = appendToolCalls2(toolCalls, delta.tool_calls);
|
|
1221
|
-
}
|
|
1222
|
-
} catch (e) {
|
|
1223
|
-
}
|
|
1224
|
-
}
|
|
1225
|
-
}
|
|
1226
|
-
}
|
|
1227
|
-
} finally {
|
|
1228
|
-
reader.releaseLock();
|
|
1229
|
-
}
|
|
1230
|
-
const msg = {
|
|
1231
|
-
role: "assistant",
|
|
1232
|
-
content: fullContent
|
|
1233
|
-
};
|
|
1234
|
-
if (toolCalls.length > 0) {
|
|
1235
|
-
msg.tool_calls = toolCalls;
|
|
1236
|
-
}
|
|
1237
|
-
const usage = addUsage(ctx.usage, streamUsage?.prompt_tokens || 0, streamUsage?.completion_tokens || 0, streamUsage?.total_tokens || 0);
|
|
1238
|
-
if (ctx.stream && streamUsage) {
|
|
1239
|
-
ctx.stream({ type: "usage", usage });
|
|
1240
|
-
}
|
|
1241
|
-
return {
|
|
1242
|
-
...ctx,
|
|
1243
|
-
lastResponse: msg,
|
|
1244
|
-
history: [...ctx.history, msg],
|
|
1245
|
-
usage
|
|
1246
|
-
};
|
|
1247
|
-
};
|
|
1248
|
-
|
|
1249
|
-
// src/providers/local.ts
|
|
1250
|
-
var DEFAULT_BASE_URL = "http://localhost:11434/v1";
|
|
1251
|
-
var appendToolCalls3 = (toolCalls, tcchunklist) => {
|
|
1252
|
-
for (const tcchunk of tcchunklist) {
|
|
1253
|
-
while (toolCalls.length <= tcchunk.index) {
|
|
1254
|
-
toolCalls.push({
|
|
1255
|
-
id: "",
|
|
1256
|
-
type: "function",
|
|
1257
|
-
function: { name: "", arguments: "" }
|
|
1258
|
-
});
|
|
1259
|
-
}
|
|
1260
|
-
const tc = toolCalls[tcchunk.index];
|
|
1261
|
-
tc.id += tcchunk.id || "";
|
|
1262
|
-
tc.function.name += tcchunk.function?.name || "";
|
|
1263
|
-
tc.function.arguments += tcchunk.function?.arguments || "";
|
|
1264
|
-
}
|
|
1265
|
-
return toolCalls;
|
|
1266
|
-
};
|
|
1267
|
-
var callLocal = async (config, ctx) => {
|
|
1268
|
-
const { model: model2, instructions, schema, apiKey, baseUrl } = config;
|
|
1269
|
-
const endpoint = baseUrl || DEFAULT_BASE_URL;
|
|
1270
|
-
const messages = [];
|
|
1271
|
-
if (instructions) {
|
|
1272
|
-
messages.push({ role: "system", content: instructions });
|
|
1273
|
-
}
|
|
1274
|
-
messages.push(...ctx.history);
|
|
1275
|
-
const body = {
|
|
1276
|
-
model: model2,
|
|
1277
|
-
messages,
|
|
1278
|
-
stream: !!ctx.stream,
|
|
1279
|
-
...ctx.stream && { stream_options: { include_usage: true } }
|
|
1280
|
-
};
|
|
1281
|
-
if (schema) {
|
|
1282
|
-
body.response_format = {
|
|
1283
|
-
type: "json_schema",
|
|
1284
|
-
json_schema: {
|
|
1285
|
-
name: schema.name,
|
|
1286
|
-
schema: { ...schema.schema, additionalProperties: false },
|
|
1287
|
-
strict: true
|
|
1288
|
-
}
|
|
1289
|
-
};
|
|
1290
|
-
}
|
|
1291
|
-
if (ctx.tools && ctx.tools.length > 0) {
|
|
1292
|
-
body.tools = ctx.tools;
|
|
1293
|
-
body.tool_choice = "auto";
|
|
1294
|
-
}
|
|
1295
|
-
const headers = {
|
|
1296
|
-
"Content-Type": "application/json"
|
|
1297
|
-
};
|
|
1298
|
-
if (apiKey) {
|
|
1299
|
-
headers["Authorization"] = `Bearer ${apiKey}`;
|
|
1300
|
-
}
|
|
1301
|
-
const response = await fetch(`${endpoint}/chat/completions`, {
|
|
1302
|
-
method: "POST",
|
|
1303
|
-
headers,
|
|
1304
|
-
body: JSON.stringify(body),
|
|
1305
|
-
signal: ctx.abortSignal
|
|
1306
|
-
});
|
|
1307
|
-
if (!response.ok) {
|
|
1308
|
-
const error = await response.text();
|
|
1309
|
-
throw new Error(`Local API error: ${error}`);
|
|
1310
|
-
}
|
|
1311
|
-
if (ctx.stream) {
|
|
1312
|
-
return handleLocalStream(response, ctx);
|
|
1313
|
-
}
|
|
1314
|
-
const data = await response.json();
|
|
1315
|
-
const choice = data.choices[0];
|
|
1316
|
-
const { message } = choice;
|
|
1317
|
-
const msg = {
|
|
1318
|
-
role: "assistant",
|
|
1319
|
-
content: message.content || ""
|
|
1320
|
-
};
|
|
1321
|
-
if (message.tool_calls) {
|
|
1322
|
-
msg.tool_calls = message.tool_calls;
|
|
1323
|
-
}
|
|
1324
|
-
return {
|
|
1325
|
-
...ctx,
|
|
1326
|
-
lastResponse: msg,
|
|
1327
|
-
history: [...ctx.history, msg],
|
|
1328
|
-
usage: addUsage(ctx.usage, data.usage?.prompt_tokens || 0, data.usage?.completion_tokens || 0, data.usage?.total_tokens || 0)
|
|
1329
|
-
};
|
|
1330
|
-
};
|
|
1331
|
-
var handleLocalStream = async (response, ctx) => {
|
|
1332
|
-
const reader = response.body.getReader();
|
|
1333
|
-
const decoder = new TextDecoder();
|
|
1334
|
-
let fullContent = "";
|
|
1335
|
-
let toolCalls = [];
|
|
1336
|
-
let buffer = "";
|
|
1337
|
-
let streamUsage = null;
|
|
1338
|
-
try {
|
|
1339
|
-
while (true) {
|
|
1340
|
-
if (ctx.abortSignal?.aborted) {
|
|
1341
|
-
break;
|
|
1342
|
-
}
|
|
1343
|
-
const { done, value } = await reader.read();
|
|
1344
|
-
if (done) break;
|
|
1345
|
-
buffer += decoder.decode(value, { stream: true });
|
|
1346
|
-
const lines = buffer.split("\n");
|
|
1347
|
-
buffer = lines.pop() || "";
|
|
1348
|
-
for (const line of lines) {
|
|
1349
|
-
if (line.startsWith("data: ")) {
|
|
1350
|
-
const data = line.slice(6).trim();
|
|
1351
|
-
if (data === "[DONE]") continue;
|
|
1352
|
-
if (!data) continue;
|
|
1353
|
-
try {
|
|
1354
|
-
const parsed = JSON.parse(data);
|
|
1355
|
-
if (parsed.usage) {
|
|
1356
|
-
streamUsage = parsed.usage;
|
|
1357
|
-
}
|
|
1358
|
-
const delta = parsed.choices?.[0]?.delta;
|
|
1359
|
-
if (delta?.content) {
|
|
1360
|
-
fullContent += delta.content;
|
|
1361
|
-
if (ctx.stream) {
|
|
1362
|
-
ctx.stream({ type: "content", content: delta.content });
|
|
1363
|
-
}
|
|
1364
|
-
}
|
|
1365
|
-
if (delta?.tool_calls) {
|
|
1366
|
-
toolCalls = appendToolCalls3(toolCalls, delta.tool_calls);
|
|
1367
|
-
}
|
|
1368
|
-
} catch (e) {
|
|
1369
|
-
}
|
|
1370
|
-
}
|
|
1371
|
-
}
|
|
1372
|
-
}
|
|
1373
|
-
} finally {
|
|
1374
|
-
reader.releaseLock();
|
|
1375
|
-
}
|
|
1376
|
-
const msg = {
|
|
1377
|
-
role: "assistant",
|
|
1378
|
-
content: fullContent
|
|
1379
|
-
};
|
|
1380
|
-
if (toolCalls.length > 0) {
|
|
1381
|
-
msg.tool_calls = toolCalls;
|
|
1382
|
-
}
|
|
1383
|
-
const usage = addUsage(ctx.usage, streamUsage?.prompt_tokens || 0, streamUsage?.completion_tokens || 0, streamUsage?.total_tokens || 0);
|
|
1384
|
-
if (ctx.stream && streamUsage) {
|
|
1385
|
-
ctx.stream({ type: "usage", usage });
|
|
1386
|
-
}
|
|
1387
|
-
return {
|
|
1388
|
-
...ctx,
|
|
1389
|
-
lastResponse: msg,
|
|
1390
|
-
history: [...ctx.history, msg],
|
|
1391
|
-
usage
|
|
1392
|
-
};
|
|
1393
|
-
};
|
|
1394
|
-
|
|
1395
|
-
// src/providers/index.ts
|
|
1396
|
-
var callProvider = async (config, ctx) => {
|
|
1397
|
-
const { provider, model: model2 } = parseModelName(config.model);
|
|
1398
|
-
const providerConfig = { ...config, model: model2 };
|
|
1399
|
-
switch (provider.toLowerCase()) {
|
|
1400
|
-
case "openai":
|
|
1401
|
-
return callOpenAI(providerConfig, ctx);
|
|
1402
|
-
case "anthropic":
|
|
1403
|
-
return callAnthropic(providerConfig, ctx);
|
|
1404
|
-
case "google":
|
|
1405
|
-
return callGoogle(providerConfig, ctx);
|
|
1406
|
-
case "xai":
|
|
1407
|
-
return callXAI(providerConfig, ctx);
|
|
1408
|
-
case "local":
|
|
1409
|
-
return callLocal(providerConfig, ctx);
|
|
1410
|
-
case "huggingface":
|
|
1411
|
-
return callHuggingFace(providerConfig, ctx);
|
|
1412
|
-
default:
|
|
1413
|
-
return callHuggingFace({ ...config }, ctx);
|
|
1414
|
-
}
|
|
1415
|
-
};
|
|
1416
|
-
|
|
1417
|
-
// src/approval.ts
|
|
1418
|
-
import { EventEmitter } from "events";
|
|
1419
|
-
var state = {
|
|
1420
|
-
resolvers: /* @__PURE__ */ new Map(),
|
|
1421
|
-
emitter: new EventEmitter()
|
|
1422
|
-
};
|
|
1423
|
-
var generateApprovalToken = () => {
|
|
1424
|
-
return `approval_${Date.now()}_${Math.random().toString(36).substring(2, 9)}`;
|
|
1425
|
-
};
|
|
1426
|
-
var requestApproval = async (toolCall, approvalId) => {
|
|
1427
|
-
const id = generateApprovalToken();
|
|
1428
|
-
const request = { id, toolCall, approvalId };
|
|
1429
|
-
state.emitter.emit("approvalRequested", request);
|
|
1430
|
-
return new Promise((resolve) => {
|
|
1431
|
-
state.resolvers.set(id, resolve);
|
|
1432
|
-
});
|
|
1433
|
-
};
|
|
1434
|
-
var resolveApproval = (response) => {
|
|
1435
|
-
const resolver = state.resolvers.get(response.id);
|
|
1436
|
-
if (!resolver) return false;
|
|
1437
|
-
state.resolvers.delete(response.id);
|
|
1438
|
-
resolver(response);
|
|
1439
|
-
state.emitter.emit("approvalResolved", response);
|
|
1440
|
-
return true;
|
|
1441
|
-
};
|
|
1442
|
-
var onApprovalRequested = (listener) => {
|
|
1443
|
-
state.emitter.on("approvalRequested", listener);
|
|
1444
|
-
};
|
|
1445
|
-
var onApprovalResolved = (listener) => {
|
|
1446
|
-
state.emitter.on("approvalResolved", listener);
|
|
1447
|
-
};
|
|
1448
|
-
var removeApprovalListener = (event, listener) => {
|
|
1449
|
-
state.emitter.removeListener(event, listener);
|
|
1450
|
-
};
|
|
1451
|
-
|
|
1452
|
-
// src/composition/model.ts
|
|
1453
|
-
var model = ({
|
|
1454
|
-
model: model2 = "openai/gpt-4o-mini",
|
|
1455
|
-
schema,
|
|
1456
|
-
system,
|
|
1457
|
-
apiKey,
|
|
1458
|
-
baseUrl
|
|
1459
|
-
} = {}) => {
|
|
1460
|
-
return async (ctxOrMessage) => {
|
|
1461
|
-
const ctx = typeof ctxOrMessage === "string" ? (
|
|
1462
|
-
// model()("hello!");
|
|
1463
|
-
{
|
|
1464
|
-
history: [{ role: "user", content: ctxOrMessage }],
|
|
1465
|
-
tools: []
|
|
1466
|
-
}
|
|
1467
|
-
) : (
|
|
1468
|
-
// model()(/* few shot or history */);
|
|
1469
|
-
ctxOrMessage
|
|
1470
|
-
);
|
|
1471
|
-
const normalizedSchema = schema ? normalizeSchema(schema) : void 0;
|
|
1472
|
-
let currentCtx = ctx;
|
|
1473
|
-
if (system) {
|
|
1474
|
-
const systemContent = typeof system === "function" ? system(currentCtx) : system;
|
|
1475
|
-
const [first, ...rest] = currentCtx.history;
|
|
1476
|
-
if (first?.role === "system") {
|
|
1477
|
-
currentCtx = {
|
|
1478
|
-
...currentCtx,
|
|
1479
|
-
history: [{ role: "system", content: systemContent }, ...rest]
|
|
1480
|
-
};
|
|
1481
|
-
} else {
|
|
1482
|
-
currentCtx = {
|
|
1483
|
-
...currentCtx,
|
|
1484
|
-
history: [{ role: "system", content: systemContent }, ...currentCtx.history]
|
|
1485
|
-
};
|
|
1486
|
-
}
|
|
1487
|
-
}
|
|
1488
|
-
const systemMessage = currentCtx.history.find((m) => m.role === "system");
|
|
1489
|
-
const instructions = systemMessage?.content;
|
|
1490
|
-
do {
|
|
1491
|
-
if (currentCtx.abortSignal?.aborted) {
|
|
1492
|
-
break;
|
|
1493
|
-
}
|
|
1494
|
-
currentCtx = await callProvider(
|
|
1495
|
-
{ model: model2, instructions, schema: normalizedSchema, apiKey, baseUrl },
|
|
1496
|
-
currentCtx
|
|
1497
|
-
);
|
|
1498
|
-
if (currentCtx.lastResponse?.tool_calls && currentCtx.tools?.length) {
|
|
1499
|
-
currentCtx = await executeTools(currentCtx);
|
|
1500
|
-
}
|
|
1501
|
-
} while (currentCtx.lastResponse?.tool_calls && currentCtx.tools?.length && !currentCtx.abortSignal?.aborted);
|
|
1502
|
-
return currentCtx;
|
|
1503
|
-
};
|
|
1504
|
-
};
|
|
1505
|
-
var executeTools = async (ctx) => {
|
|
1506
|
-
const calls = ctx.lastResponse?.tool_calls || [];
|
|
1507
|
-
if (!calls.length) return ctx;
|
|
1508
|
-
if (ctx.stream) {
|
|
1509
|
-
ctx.stream({ type: "tool_calls_ready", calls });
|
|
1510
|
-
}
|
|
1511
|
-
const toolConfig = ctx.toolConfig || {};
|
|
1512
|
-
const {
|
|
1513
|
-
requireApproval = false,
|
|
1514
|
-
approvalCallback,
|
|
1515
|
-
parallel = false,
|
|
1516
|
-
retryCount = 0,
|
|
1517
|
-
approvalId,
|
|
1518
|
-
executeOnApproval = false
|
|
1519
|
-
} = toolConfig;
|
|
1520
|
-
const updatedCounts = { ...ctx.toolCallCounts || {} };
|
|
1521
|
-
const runCall = async (call, approved) => {
|
|
1522
|
-
if (!approved) {
|
|
1523
|
-
if (ctx.stream) {
|
|
1524
|
-
ctx.stream({
|
|
1525
|
-
type: "tool_error",
|
|
1526
|
-
call,
|
|
1527
|
-
error: "Tool execution denied by user"
|
|
1528
|
-
});
|
|
1529
|
-
}
|
|
1530
|
-
return {
|
|
1531
|
-
call,
|
|
1532
|
-
result: { error: "Tool execution denied by user" }
|
|
1533
|
-
};
|
|
1534
|
-
}
|
|
1535
|
-
const toolName = call.function.name;
|
|
1536
|
-
const limits = ctx.toolLimits || {};
|
|
1537
|
-
const maxCalls2 = limits[toolName];
|
|
1538
|
-
const currentCount = updatedCounts[toolName] || 0;
|
|
1539
|
-
if (maxCalls2 && currentCount >= maxCalls2) {
|
|
1540
|
-
const error2 = `Tool ${toolName} has reached its limit of ${maxCalls2} calls`;
|
|
1541
|
-
if (ctx.stream) {
|
|
1542
|
-
ctx.stream({ type: "tool_error", call, error: error2 });
|
|
1543
|
-
}
|
|
1544
|
-
return {
|
|
1545
|
-
call,
|
|
1546
|
-
result: { error: error2 }
|
|
1547
|
-
};
|
|
1548
|
-
}
|
|
1549
|
-
updatedCounts[toolName] = currentCount + 1;
|
|
1550
|
-
if (ctx.stream) {
|
|
1551
|
-
ctx.stream({ type: "tool_executing", call });
|
|
1552
|
-
}
|
|
1553
|
-
let lastError;
|
|
1554
|
-
for (let i = 0; i <= retryCount; i++) {
|
|
1555
|
-
try {
|
|
1556
|
-
const executor = ctx.toolExecutors?.[call.function.name];
|
|
1557
|
-
if (!executor) {
|
|
1558
|
-
throw new Error(`Tool executor not found: ${call.function.name}`);
|
|
1559
|
-
}
|
|
1560
|
-
let args = {};
|
|
1561
|
-
try {
|
|
1562
|
-
args = call.function.arguments ? JSON.parse(call.function.arguments) : {};
|
|
1563
|
-
} catch (e) {
|
|
1564
|
-
throw new Error(
|
|
1565
|
-
`Invalid JSON arguments for tool ${call.function.name}: ${call.function.arguments}`
|
|
1566
|
-
);
|
|
1567
|
-
}
|
|
1568
|
-
const result = await executor(args);
|
|
1569
|
-
if (ctx.stream) {
|
|
1570
|
-
ctx.stream({ type: "tool_complete", call, result });
|
|
1571
|
-
}
|
|
1572
|
-
return { call, result };
|
|
1573
|
-
} catch (e) {
|
|
1574
|
-
lastError = e;
|
|
1575
|
-
}
|
|
1576
|
-
}
|
|
1577
|
-
const error = lastError.message;
|
|
1578
|
-
if (ctx.stream) {
|
|
1579
|
-
ctx.stream({ type: "tool_error", call, error });
|
|
1580
|
-
}
|
|
1581
|
-
return { call, result: { error } };
|
|
1582
|
-
};
|
|
1583
|
-
if (executeOnApproval && requireApproval) {
|
|
1584
|
-
const resultPromises = calls.map(async (call) => {
|
|
1585
|
-
let approved;
|
|
1586
|
-
if (approvalCallback) {
|
|
1587
|
-
approved = await approvalCallback(call);
|
|
1588
|
-
} else {
|
|
1589
|
-
const response = await requestApproval(call, approvalId);
|
|
1590
|
-
approved = response.approved;
|
|
1591
|
-
}
|
|
1592
|
-
return runCall(call, approved);
|
|
1593
|
-
});
|
|
1594
|
-
const results2 = await Promise.all(resultPromises);
|
|
1595
|
-
return {
|
|
1596
|
-
...ctx,
|
|
1597
|
-
history: [
|
|
1598
|
-
...ctx.history,
|
|
1599
|
-
...results2.map(({ call, result }) => ({
|
|
1600
|
-
role: "tool",
|
|
1601
|
-
tool_call_id: call.id,
|
|
1602
|
-
content: JSON.stringify(result)
|
|
1603
|
-
}))
|
|
1604
|
-
],
|
|
1605
|
-
toolCallCounts: updatedCounts
|
|
1606
|
-
};
|
|
1607
|
-
}
|
|
1608
|
-
const approvalPromises = calls.map(async (call) => {
|
|
1609
|
-
if (requireApproval) {
|
|
1610
|
-
let approved;
|
|
1611
|
-
if (approvalCallback) {
|
|
1612
|
-
approved = await approvalCallback(call);
|
|
1613
|
-
} else {
|
|
1614
|
-
const response = await requestApproval(call, approvalId);
|
|
1615
|
-
approved = response.approved;
|
|
1616
|
-
}
|
|
1617
|
-
return { call, approved };
|
|
1618
|
-
} else {
|
|
1619
|
-
return { call, approved: true };
|
|
1620
|
-
}
|
|
1621
|
-
});
|
|
1622
|
-
const approvals = await Promise.all(approvalPromises);
|
|
1623
|
-
const runCallWithApproval = async (call) => {
|
|
1624
|
-
const approval = approvals.find((a) => a.call.id === call.id);
|
|
1625
|
-
return runCall(call, approval?.approved ?? true);
|
|
1626
|
-
};
|
|
1627
|
-
const results = parallel ? await Promise.all(calls.map(runCallWithApproval)) : await runCallsSequentially(calls, runCallWithApproval);
|
|
1628
|
-
return {
|
|
1629
|
-
...ctx,
|
|
1630
|
-
history: [
|
|
1631
|
-
...ctx.history,
|
|
1632
|
-
...results.map(({ call, result }) => ({
|
|
1633
|
-
role: "tool",
|
|
1634
|
-
tool_call_id: call.id,
|
|
1635
|
-
content: JSON.stringify(result)
|
|
1636
|
-
}))
|
|
1637
|
-
],
|
|
1638
|
-
toolCallCounts: updatedCounts
|
|
1639
|
-
};
|
|
1640
|
-
};
|
|
1641
|
-
var runCallsSequentially = async (calls, runCall) => {
|
|
1642
|
-
const results = [];
|
|
1643
|
-
for (const call of calls) {
|
|
1644
|
-
results.push(await runCall(call));
|
|
1645
|
-
}
|
|
1646
|
-
return results;
|
|
1647
|
-
};
|
|
1648
|
-
|
|
1649
|
-
// src/thread.ts
|
|
1650
|
-
var createMemoryStore = () => {
|
|
1651
|
-
const store = /* @__PURE__ */ new Map();
|
|
1652
|
-
return {
|
|
1653
|
-
async get(threadId) {
|
|
1654
|
-
return store.get(threadId) || [];
|
|
1655
|
-
},
|
|
1656
|
-
async set(threadId, messages) {
|
|
1657
|
-
store.set(threadId, messages);
|
|
1658
|
-
}
|
|
1659
|
-
};
|
|
1660
|
-
};
|
|
1661
|
-
var createThread = (id, store) => {
|
|
1662
|
-
return {
|
|
1663
|
-
id,
|
|
1664
|
-
store,
|
|
1665
|
-
async generate(workflow) {
|
|
1666
|
-
const history = await store.get(id);
|
|
1667
|
-
const initialContext = {
|
|
1668
|
-
history,
|
|
1669
|
-
tools: [],
|
|
1670
|
-
toolExecutors: {},
|
|
1671
|
-
toolLimits: {},
|
|
1672
|
-
toolCallCounts: {}
|
|
1673
|
-
};
|
|
1674
|
-
const finalContext = await workflow(initialContext);
|
|
1675
|
-
await store.set(id, finalContext.history);
|
|
1676
|
-
return finalContext;
|
|
1677
|
-
},
|
|
1678
|
-
async message(content, workflow, options) {
|
|
1679
|
-
const history = await store.get(id);
|
|
1680
|
-
const initialContext = {
|
|
1681
|
-
history: [...history, { role: "user", content }],
|
|
1682
|
-
tools: [],
|
|
1683
|
-
toolExecutors: {},
|
|
1684
|
-
toolLimits: {},
|
|
1685
|
-
toolCallCounts: {},
|
|
1686
|
-
abortSignal: options?.abortSignal
|
|
1687
|
-
};
|
|
1688
|
-
const finalContext = await (workflow || model())(initialContext);
|
|
1689
|
-
if (options?.abortSignal?.aborted) {
|
|
1690
|
-
const abortedHistory = [
|
|
1691
|
-
...initialContext.history,
|
|
1692
|
-
{ role: "assistant", content: "[Response interrupted]" }
|
|
1693
|
-
];
|
|
1694
|
-
await store.set(id, abortedHistory);
|
|
1695
|
-
return { ...finalContext, history: abortedHistory };
|
|
1696
|
-
}
|
|
1697
|
-
await store.set(id, finalContext.history);
|
|
1698
|
-
return finalContext;
|
|
1699
|
-
}
|
|
1700
|
-
};
|
|
1701
|
-
};
|
|
1702
|
-
var threads = /* @__PURE__ */ new Map();
|
|
1703
|
-
var getOrCreateThread = (id, store) => {
|
|
1704
|
-
const cacheKey = store ? `${id}-${store}` : id;
|
|
1705
|
-
if (threads.has(cacheKey)) {
|
|
1706
|
-
return threads.get(cacheKey);
|
|
1707
|
-
}
|
|
1708
|
-
const threadStore = store || createMemoryStore();
|
|
1709
|
-
const thread = createThread(id, threadStore);
|
|
1710
|
-
threads.set(cacheKey, thread);
|
|
1711
|
-
return thread;
|
|
1712
|
-
};
|
|
1713
|
-
|
|
1714
|
-
// src/composition/when.ts
|
|
1715
|
-
var when = (condition, action) => {
|
|
1716
|
-
return async (ctx) => {
|
|
1717
|
-
if (condition(ctx)) {
|
|
1718
|
-
return await action(ctx);
|
|
1719
|
-
}
|
|
1720
|
-
return ctx;
|
|
1721
|
-
};
|
|
1722
|
-
};
|
|
1723
|
-
|
|
1724
|
-
// src/helpers.ts
|
|
1725
|
-
var noToolsCalled = () => (ctx) => {
|
|
1726
|
-
return !ctx.lastResponse?.tool_calls || ctx.lastResponse.tool_calls.length === 0;
|
|
1727
|
-
};
|
|
1728
|
-
var everyNMessages = (n, step) => {
|
|
1729
|
-
let lastTriggeredAt = 0;
|
|
1730
|
-
return when(
|
|
1731
|
-
(ctx) => Math.floor(ctx.history.length / n) > Math.floor(lastTriggeredAt / n),
|
|
1732
|
-
async (ctx) => {
|
|
1733
|
-
lastTriggeredAt = ctx.history.length;
|
|
1734
|
-
return await step(ctx);
|
|
1735
|
-
}
|
|
1736
|
-
);
|
|
1737
|
-
};
|
|
1738
|
-
var everyNTokens = (n, step) => {
|
|
1739
|
-
let lastTriggeredAt = 0;
|
|
1740
|
-
return when(
|
|
1741
|
-
(ctx) => {
|
|
1742
|
-
const totalTokens = ctx.history.reduce(
|
|
1743
|
-
(acc, msg) => acc + Math.ceil(msg.content.length / 4),
|
|
1744
|
-
0
|
|
1745
|
-
);
|
|
1746
|
-
return Math.floor(totalTokens / n) > Math.floor(lastTriggeredAt / n);
|
|
1747
|
-
},
|
|
1748
|
-
async (ctx) => {
|
|
1749
|
-
const totalTokens = ctx.history.reduce(
|
|
1750
|
-
(acc, msg) => acc + Math.ceil(msg.content.length / 4),
|
|
1751
|
-
0
|
|
1752
|
-
);
|
|
1753
|
-
lastTriggeredAt = totalTokens;
|
|
1754
|
-
return await step(ctx);
|
|
1755
|
-
}
|
|
1756
|
-
);
|
|
1757
|
-
};
|
|
1758
|
-
var appendToLastRequest = (content) => {
|
|
1759
|
-
return async (ctx) => {
|
|
1760
|
-
let lastUserIndex = -1;
|
|
1761
|
-
for (let i = ctx.history.length - 1; i >= 0; i--) {
|
|
1762
|
-
if (ctx.history[i].role === "user") {
|
|
1763
|
-
lastUserIndex = i;
|
|
1764
|
-
break;
|
|
1765
|
-
}
|
|
1766
|
-
}
|
|
1767
|
-
if (lastUserIndex === -1) return ctx;
|
|
1768
|
-
const newHistory = [...ctx.history];
|
|
1769
|
-
newHistory[lastUserIndex] = {
|
|
1770
|
-
...newHistory[lastUserIndex],
|
|
1771
|
-
content: newHistory[lastUserIndex].content + content
|
|
1772
|
-
};
|
|
1773
|
-
return {
|
|
1774
|
-
...ctx,
|
|
1775
|
-
history: newHistory
|
|
1776
|
-
};
|
|
1777
|
-
};
|
|
1778
|
-
};
|
|
1779
|
-
var toolNotUsedInNTurns = ({ toolName, times }, step) => {
|
|
1780
|
-
let turnsSinceLastUsed = 0;
|
|
1781
|
-
let lastProcessedTurn = -1;
|
|
1782
|
-
return when((ctx) => {
|
|
1783
|
-
const currentTurn = getCurrentTurn(ctx);
|
|
1784
|
-
if (currentTurn === lastProcessedTurn) return false;
|
|
1785
|
-
lastProcessedTurn = currentTurn;
|
|
1786
|
-
const toolUsedInTurn = wasToolUsedInCurrentTurn(ctx, toolName);
|
|
1787
|
-
if (toolUsedInTurn) {
|
|
1788
|
-
turnsSinceLastUsed = 0;
|
|
1789
|
-
return false;
|
|
1790
|
-
} else {
|
|
1791
|
-
turnsSinceLastUsed++;
|
|
1792
|
-
return turnsSinceLastUsed >= times;
|
|
1793
|
-
}
|
|
1794
|
-
}, step);
|
|
1795
|
-
};
|
|
1796
|
-
var getCurrentTurn = (ctx) => {
|
|
1797
|
-
let turns = 0;
|
|
1798
|
-
for (const msg of ctx.history) {
|
|
1799
|
-
if (msg.role === "user") turns++;
|
|
1800
|
-
}
|
|
1801
|
-
return turns;
|
|
1802
|
-
};
|
|
1803
|
-
var wasToolUsedInCurrentTurn = (ctx, toolName) => {
|
|
1804
|
-
let lastUserIndex = -1;
|
|
1805
|
-
for (let i = ctx.history.length - 1; i >= 0; i--) {
|
|
1806
|
-
if (ctx.history[i].role === "user") {
|
|
1807
|
-
lastUserIndex = i;
|
|
1808
|
-
break;
|
|
1809
|
-
}
|
|
1810
|
-
}
|
|
1811
|
-
if (lastUserIndex === -1) return false;
|
|
1812
|
-
for (let i = lastUserIndex + 1; i < ctx.history.length; i++) {
|
|
1813
|
-
const msg = ctx.history[i];
|
|
1814
|
-
if (msg.role === "assistant" && ctx.lastResponse?.tool_calls) {
|
|
1815
|
-
return ctx.lastResponse.tool_calls.some(
|
|
1816
|
-
(call) => call.function.name === toolName
|
|
1817
|
-
);
|
|
1818
|
-
}
|
|
1819
|
-
}
|
|
1820
|
-
return false;
|
|
1821
|
-
};
|
|
1822
|
-
var toolWasCalled = (name) => (ctx) => {
|
|
1823
|
-
return !!ctx.lastResponse?.tool_calls && ctx.lastResponse.tool_calls.some((call) => call.function.name === name);
|
|
1824
|
-
};
|
|
1825
|
-
|
|
1826
|
-
// src/composition/tap.ts
|
|
1827
|
-
var tap = (fn) => {
|
|
1828
|
-
return async (ctx) => {
|
|
1829
|
-
await fn(ctx);
|
|
1830
|
-
return ctx;
|
|
1831
|
-
};
|
|
1832
|
-
};
|
|
1833
|
-
|
|
1834
|
-
// src/composition/retry.ts
|
|
1835
|
-
var retry = ({ times = 3 } = {}, step) => {
|
|
1836
|
-
return async (ctx) => {
|
|
1837
|
-
let err;
|
|
1838
|
-
for (let i = 0; i < times; i++) {
|
|
1839
|
-
try {
|
|
1840
|
-
return await step(ctx);
|
|
1841
|
-
} catch (e) {
|
|
1842
|
-
err = e;
|
|
1843
|
-
}
|
|
1844
|
-
}
|
|
1845
|
-
throw err;
|
|
1846
|
-
};
|
|
1847
|
-
};
|
|
1848
|
-
|
|
1849
|
-
// src/composition/compose.ts
|
|
1850
|
-
var enrichContext = (ctx) => {
|
|
1851
|
-
const lastUserMessage = [...ctx.history].reverse().find((msg) => msg.role === "user");
|
|
1852
|
-
return {
|
|
1853
|
-
...ctx,
|
|
1854
|
-
lastRequest: lastUserMessage
|
|
1855
|
-
};
|
|
1856
|
-
};
|
|
1857
|
-
var compose = (...steps) => {
|
|
1858
|
-
return async (ctxOrMessage) => {
|
|
1859
|
-
let initialContext;
|
|
1860
|
-
if (typeof ctxOrMessage === "string") {
|
|
1861
|
-
initialContext = {
|
|
1862
|
-
history: [{ role: "user", content: ctxOrMessage }],
|
|
1863
|
-
tools: [],
|
|
1864
|
-
toolExecutors: {},
|
|
1865
|
-
toolLimits: {},
|
|
1866
|
-
toolCallCounts: {}
|
|
1867
|
-
};
|
|
1868
|
-
} else {
|
|
1869
|
-
initialContext = ctxOrMessage || {
|
|
1870
|
-
history: [],
|
|
1871
|
-
tools: [],
|
|
1872
|
-
toolExecutors: {},
|
|
1873
|
-
toolLimits: {},
|
|
1874
|
-
toolCallCounts: {}
|
|
1875
|
-
};
|
|
1876
|
-
}
|
|
1877
|
-
let next = enrichContext(initialContext);
|
|
1878
|
-
for (const step of steps) {
|
|
1879
|
-
next = await step(enrichContext(next));
|
|
1880
|
-
}
|
|
1881
|
-
return next;
|
|
1882
|
-
};
|
|
1883
|
-
};
|
|
1884
|
-
|
|
1885
|
-
// src/composition/scope.ts
|
|
1886
|
-
var scopeContext = (config, ctx) => {
|
|
1887
|
-
const inherit = config.inherit ?? 1 /* Conversation */;
|
|
1888
|
-
let scopedCtx = {
|
|
1889
|
-
history: [],
|
|
1890
|
-
tools: [],
|
|
1891
|
-
toolExecutors: {},
|
|
1892
|
-
toolLimits: {},
|
|
1893
|
-
toolCallCounts: {}
|
|
1894
|
-
};
|
|
1895
|
-
if (inherit & 1 /* Conversation */) {
|
|
1896
|
-
scopedCtx.history = ctx.history;
|
|
1897
|
-
scopedCtx.lastResponse = ctx.lastResponse;
|
|
1898
|
-
scopedCtx.lastRequest = ctx.lastRequest;
|
|
1899
|
-
}
|
|
1900
|
-
if (inherit & 2 /* Tools */) {
|
|
1901
|
-
scopedCtx.tools = [...ctx.tools || []];
|
|
1902
|
-
scopedCtx.toolExecutors = { ...ctx.toolExecutors || {} };
|
|
1903
|
-
scopedCtx.toolLimits = { ...ctx.toolLimits || {} };
|
|
1904
|
-
scopedCtx.toolCallCounts = { ...ctx.toolCallCounts || {} };
|
|
1905
|
-
scopedCtx.toolConfig = ctx.toolConfig ? { ...ctx.toolConfig } : void 0;
|
|
1906
|
-
}
|
|
1907
|
-
scopedCtx.stream = ctx.stream;
|
|
1908
|
-
scopedCtx.abortSignal = ctx.abortSignal;
|
|
1909
|
-
scopedCtx.usage = ctx.usage;
|
|
1910
|
-
if (config.tools) {
|
|
1911
|
-
const toolDefinitions = config.tools.map(toolConfigToToolDefinition);
|
|
1912
|
-
const toolExecutors = config.tools.reduce(
|
|
1913
|
-
(acc, tool) => {
|
|
1914
|
-
acc[tool.name] = tool.execute;
|
|
1915
|
-
return acc;
|
|
1916
|
-
},
|
|
1917
|
-
{}
|
|
1918
|
-
);
|
|
1919
|
-
const toolLimits = config.tools.reduce(
|
|
1920
|
-
(acc, tool) => {
|
|
1921
|
-
if (tool._maxCalls) {
|
|
1922
|
-
acc[tool.name] = tool._maxCalls;
|
|
1923
|
-
}
|
|
1924
|
-
return acc;
|
|
1925
|
-
},
|
|
1926
|
-
{}
|
|
1927
|
-
);
|
|
1928
|
-
scopedCtx.tools = toolDefinitions;
|
|
1929
|
-
scopedCtx.toolExecutors = toolExecutors;
|
|
1930
|
-
scopedCtx.toolLimits = toolLimits;
|
|
1931
|
-
}
|
|
1932
|
-
if (config.toolConfig) {
|
|
1933
|
-
scopedCtx.toolConfig = { ...config.toolConfig };
|
|
1934
|
-
}
|
|
1935
|
-
if (config.system) {
|
|
1936
|
-
const [first, ...rest] = scopedCtx.history;
|
|
1937
|
-
if (first?.role === "system") {
|
|
1938
|
-
scopedCtx.history = [{ role: "system", content: config.system }, ...rest];
|
|
1939
|
-
} else {
|
|
1940
|
-
scopedCtx.history = [{ role: "system", content: config.system }, ...scopedCtx.history];
|
|
1941
|
-
}
|
|
1942
|
-
}
|
|
1943
|
-
if (config.stream) {
|
|
1944
|
-
scopedCtx.stream = config.stream;
|
|
1945
|
-
}
|
|
1946
|
-
return scopedCtx;
|
|
1947
|
-
};
|
|
1948
|
-
var scope = (config, ...steps) => {
|
|
1949
|
-
return async (ctx) => {
|
|
1950
|
-
let scopedCtx = scopeContext(config, ctx);
|
|
1951
|
-
if (config.until) {
|
|
1952
|
-
do {
|
|
1953
|
-
scopedCtx = await compose(...steps)(scopedCtx);
|
|
1954
|
-
} while (!config.until(scopedCtx));
|
|
1955
|
-
} else {
|
|
1956
|
-
scopedCtx = await compose(...steps)(scopedCtx);
|
|
1957
|
-
}
|
|
1958
|
-
return {
|
|
1959
|
-
...ctx,
|
|
1960
|
-
history: config.silent ? ctx.history : scopedCtx.history,
|
|
1961
|
-
lastResponse: config.silent ? ctx.lastResponse : scopedCtx.lastResponse,
|
|
1962
|
-
lastRequest: config.silent ? ctx.lastRequest : scopedCtx.lastRequest,
|
|
1963
|
-
stopReason: config.silent ? ctx.stopReason : scopedCtx.stopReason,
|
|
1964
|
-
usage: scopedCtx.usage
|
|
1965
|
-
};
|
|
1966
|
-
};
|
|
1967
|
-
};
|
|
1968
|
-
|
|
1969
|
-
// src/utils/rateLimited.ts
|
|
1970
|
-
var rateLimited = (config) => (fn) => {
|
|
1971
|
-
const { rps, burst, concurrency } = config;
|
|
1972
|
-
let tokens = burst;
|
|
1973
|
-
let inFlight = 0;
|
|
1974
|
-
const queue = [];
|
|
1975
|
-
let intervalId = null;
|
|
1976
|
-
const refillTokens = () => {
|
|
1977
|
-
tokens = Math.min(tokens + 1, burst);
|
|
1978
|
-
processQueue();
|
|
1979
|
-
};
|
|
1980
|
-
const startInterval = () => {
|
|
1981
|
-
if (!intervalId) {
|
|
1982
|
-
intervalId = setInterval(refillTokens, 1e3 / rps);
|
|
1983
|
-
}
|
|
1984
|
-
};
|
|
1985
|
-
const stopInterval = () => {
|
|
1986
|
-
if (intervalId && queue.length === 0 && inFlight === 0) {
|
|
1987
|
-
clearInterval(intervalId);
|
|
1988
|
-
intervalId = null;
|
|
1989
|
-
}
|
|
1990
|
-
};
|
|
1991
|
-
const processQueue = () => {
|
|
1992
|
-
while (queue.length > 0 && tokens > 0 && inFlight < concurrency) {
|
|
1993
|
-
tokens--;
|
|
1994
|
-
inFlight++;
|
|
1995
|
-
const item = queue.shift();
|
|
1996
|
-
item.fn().then((result) => {
|
|
1997
|
-
inFlight--;
|
|
1998
|
-
item.resolve(result);
|
|
1999
|
-
processQueue();
|
|
2000
|
-
stopInterval();
|
|
2001
|
-
}).catch((error) => {
|
|
2002
|
-
inFlight--;
|
|
2003
|
-
item.reject(error);
|
|
2004
|
-
processQueue();
|
|
2005
|
-
stopInterval();
|
|
2006
|
-
});
|
|
2007
|
-
}
|
|
2008
|
-
};
|
|
2009
|
-
return (async (...args) => {
|
|
2010
|
-
return new Promise((resolve, reject) => {
|
|
2011
|
-
queue.push({
|
|
2012
|
-
fn: () => fn(...args),
|
|
2013
|
-
resolve,
|
|
2014
|
-
reject
|
|
2015
|
-
});
|
|
2016
|
-
startInterval();
|
|
2017
|
-
processQueue();
|
|
2018
|
-
});
|
|
2019
|
-
});
|
|
2020
|
-
};
|
|
2021
|
-
export {
|
|
2022
|
-
IMAGE_EDIT_MODEL_SCHEMA,
|
|
2023
|
-
IMAGE_MODEL_SCHEMA,
|
|
2024
|
-
Inherit,
|
|
2025
|
-
addUsage,
|
|
2026
|
-
appendToLastRequest,
|
|
2027
|
-
compose,
|
|
2028
|
-
convertMCPSchemaToToolSchema,
|
|
2029
|
-
convertStandardSchemaToJsonSchema,
|
|
2030
|
-
convertStandardSchemaToSchemaProperties,
|
|
2031
|
-
createMCPTools,
|
|
2032
|
-
embed,
|
|
2033
|
-
everyNMessages,
|
|
2034
|
-
everyNTokens,
|
|
2035
|
-
generateApprovalToken,
|
|
2036
|
-
generateImage,
|
|
2037
|
-
getDefaultConfig,
|
|
2038
|
-
getKey,
|
|
2039
|
-
getModelConfig,
|
|
2040
|
-
getOrCreateThread,
|
|
2041
|
-
isStandardSchema,
|
|
2042
|
-
maxCalls,
|
|
2043
|
-
model,
|
|
2044
|
-
noToolsCalled,
|
|
2045
|
-
normalizeSchema,
|
|
2046
|
-
onApprovalRequested,
|
|
2047
|
-
onApprovalResolved,
|
|
2048
|
-
parseModelName,
|
|
2049
|
-
rateLimited,
|
|
2050
|
-
removeApprovalListener,
|
|
2051
|
-
requestApproval,
|
|
2052
|
-
resolveApproval,
|
|
2053
|
-
retry,
|
|
2054
|
-
scope,
|
|
2055
|
-
setKeys,
|
|
2056
|
-
tap,
|
|
2057
|
-
toolConfigToToolDefinition,
|
|
2058
|
-
toolNotUsedInNTurns,
|
|
2059
|
-
toolWasCalled,
|
|
2060
|
-
when
|
|
2061
|
-
};
|
|
1
|
+
export * from "./mcp";
|
|
2
|
+
export * from "./types";
|
|
3
|
+
export * from "./utils";
|
|
4
|
+
export * from "./embed";
|
|
5
|
+
export * from "./image";
|
|
6
|
+
export * from "./image-model-schema";
|
|
7
|
+
export * from "./thread";
|
|
8
|
+
export * from "./schema";
|
|
9
|
+
export * from "./helpers";
|
|
10
|
+
export * from "./approval";
|
|
11
|
+
export * from "./composition/tap";
|
|
12
|
+
export * from "./composition/when";
|
|
13
|
+
export * from "./composition/model";
|
|
14
|
+
export * from "./composition/retry";
|
|
15
|
+
export * from "./composition/scope";
|
|
16
|
+
export * from "./utils/rateLimited";
|
|
17
|
+
export * from "./composition/compose";
|
|
2062
18
|
//# sourceMappingURL=index.js.map
|