@digilogiclabs/platform-core 1.14.0 → 1.16.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{ConsoleEmail-CqXhZmFN.d.mts → ConsoleEmail-CO0OM4UT.d.ts} +2 -718
- package/dist/{ConsoleEmail-CqXhZmFN.d.ts → ConsoleEmail-DDB28vyS.d.mts} +2 -718
- package/dist/IAI-D8wA_i8N.d.mts +718 -0
- package/dist/IAI-D8wA_i8N.d.ts +718 -0
- package/dist/agents-Cc65YUoW.d.ts +921 -0
- package/dist/agents-DGciJI27.d.mts +921 -0
- package/dist/agents.d.mts +2 -0
- package/dist/agents.d.ts +2 -0
- package/dist/agents.js +813 -0
- package/dist/agents.js.map +1 -0
- package/dist/agents.mjs +781 -0
- package/dist/agents.mjs.map +1 -0
- package/dist/auth.d.mts +28 -1
- package/dist/auth.d.ts +28 -1
- package/dist/auth.js +41 -8
- package/dist/auth.js.map +1 -1
- package/dist/auth.mjs +41 -8
- package/dist/auth.mjs.map +1 -1
- package/dist/index.d.mts +7 -563
- package/dist/index.d.ts +7 -563
- package/dist/index.js +736 -0
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +730 -0
- package/dist/index.mjs.map +1 -1
- package/dist/migrate.js +0 -0
- package/dist/testing.d.mts +3 -2
- package/dist/testing.d.ts +3 -2
- package/package.json +16 -11
package/dist/agents.js
ADDED
|
@@ -0,0 +1,813 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
+
var __export = (target, all) => {
|
|
7
|
+
for (var name in all)
|
|
8
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
9
|
+
};
|
|
10
|
+
var __copyProps = (to, from, except, desc) => {
|
|
11
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
+
for (let key of __getOwnPropNames(from))
|
|
13
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
+
}
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
|
+
|
|
20
|
+
// src/agents.ts
|
|
21
|
+
var agents_exports = {};
|
|
22
|
+
__export(agents_exports, {
|
|
23
|
+
DEFAULT_AGENT_LOOP_OPTIONS: () => DEFAULT_AGENT_LOOP_OPTIONS,
|
|
24
|
+
OllamaAdapter: () => OllamaAdapter,
|
|
25
|
+
createAgentTracer: () => createAgentTracer,
|
|
26
|
+
createAgentUsageTracker: () => createAgentUsageTracker,
|
|
27
|
+
createTracedAI: () => createTracedAI,
|
|
28
|
+
runAgentLoop: () => runAgentLoop
|
|
29
|
+
});
|
|
30
|
+
module.exports = __toCommonJS(agents_exports);
|
|
31
|
+
|
|
32
|
+
// src/interfaces/ILogger.ts
|
|
33
|
+
var NoopLogger = class {
|
|
34
|
+
debug() {
|
|
35
|
+
}
|
|
36
|
+
info() {
|
|
37
|
+
}
|
|
38
|
+
warn() {
|
|
39
|
+
}
|
|
40
|
+
error() {
|
|
41
|
+
}
|
|
42
|
+
child() {
|
|
43
|
+
return this;
|
|
44
|
+
}
|
|
45
|
+
};
|
|
46
|
+
|
|
47
|
+
// src/agent/agent-loop.ts
|
|
48
|
+
var DEFAULT_AGENT_LOOP_OPTIONS = {
|
|
49
|
+
maxIterations: 10,
|
|
50
|
+
maxTokens: Infinity,
|
|
51
|
+
maxCostUsd: Infinity,
|
|
52
|
+
cacheTtlSeconds: 300,
|
|
53
|
+
cacheKeyPrefix: "agent:tool:"
|
|
54
|
+
};
|
|
55
|
+
function mapFinishReason(reason) {
|
|
56
|
+
switch (reason) {
|
|
57
|
+
case "stop":
|
|
58
|
+
return "stop";
|
|
59
|
+
case "length":
|
|
60
|
+
return "length";
|
|
61
|
+
case "content_filter":
|
|
62
|
+
return "content_filter";
|
|
63
|
+
case "tool_calls":
|
|
64
|
+
return "stop";
|
|
65
|
+
// shouldn't reach here in normal flow
|
|
66
|
+
case "error":
|
|
67
|
+
default:
|
|
68
|
+
return "error";
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
async function executeToolCall(toolCall, executorMap, cache, cacheKeyPrefix, cacheTtlSeconds) {
|
|
72
|
+
const start = Date.now();
|
|
73
|
+
const executor = executorMap.get(toolCall.function.name);
|
|
74
|
+
if (!executor) {
|
|
75
|
+
return {
|
|
76
|
+
toolCall,
|
|
77
|
+
result: `Error: Unknown tool "${toolCall.function.name}"`,
|
|
78
|
+
success: false,
|
|
79
|
+
error: `Unknown tool: ${toolCall.function.name}`,
|
|
80
|
+
durationMs: Date.now() - start,
|
|
81
|
+
cached: false
|
|
82
|
+
};
|
|
83
|
+
}
|
|
84
|
+
if (cache) {
|
|
85
|
+
const cacheKey = cacheKeyPrefix + toolCall.function.name + ":" + toolCall.function.arguments;
|
|
86
|
+
try {
|
|
87
|
+
const cached = await cache.get(cacheKey);
|
|
88
|
+
if (cached !== null) {
|
|
89
|
+
return {
|
|
90
|
+
toolCall,
|
|
91
|
+
result: cached,
|
|
92
|
+
success: true,
|
|
93
|
+
durationMs: Date.now() - start,
|
|
94
|
+
cached: true
|
|
95
|
+
};
|
|
96
|
+
}
|
|
97
|
+
} catch {
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
try {
|
|
101
|
+
const parsed = JSON.parse(toolCall.function.arguments);
|
|
102
|
+
const result = await executor.execute(parsed, toolCall);
|
|
103
|
+
if (cache) {
|
|
104
|
+
const cacheKey = cacheKeyPrefix + toolCall.function.name + ":" + toolCall.function.arguments;
|
|
105
|
+
await cache.set(cacheKey, result, cacheTtlSeconds).catch(() => {
|
|
106
|
+
});
|
|
107
|
+
}
|
|
108
|
+
return {
|
|
109
|
+
toolCall,
|
|
110
|
+
result,
|
|
111
|
+
success: true,
|
|
112
|
+
durationMs: Date.now() - start,
|
|
113
|
+
cached: false
|
|
114
|
+
};
|
|
115
|
+
} catch (err) {
|
|
116
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
117
|
+
return {
|
|
118
|
+
toolCall,
|
|
119
|
+
result: `Error: ${errorMsg}`,
|
|
120
|
+
success: false,
|
|
121
|
+
error: errorMsg,
|
|
122
|
+
durationMs: Date.now() - start,
|
|
123
|
+
cached: false
|
|
124
|
+
};
|
|
125
|
+
}
|
|
126
|
+
}
|
|
127
|
+
async function runAgentLoop(options) {
|
|
128
|
+
const opts = { ...DEFAULT_AGENT_LOOP_OPTIONS, ...options };
|
|
129
|
+
const logger = opts.logger ?? new NoopLogger();
|
|
130
|
+
const toolDefs = opts.tools.map((t) => t.definition);
|
|
131
|
+
const executorMap = /* @__PURE__ */ new Map();
|
|
132
|
+
for (const tool of opts.tools) {
|
|
133
|
+
executorMap.set(tool.definition.function.name, tool);
|
|
134
|
+
}
|
|
135
|
+
const messages = [...opts.messages];
|
|
136
|
+
const iterations = [];
|
|
137
|
+
const cumulative = {
|
|
138
|
+
promptTokens: 0,
|
|
139
|
+
completionTokens: 0,
|
|
140
|
+
totalTokens: 0,
|
|
141
|
+
estimatedCostUsd: 0,
|
|
142
|
+
iterations: 0,
|
|
143
|
+
toolCallCount: 0
|
|
144
|
+
};
|
|
145
|
+
let lastResponse;
|
|
146
|
+
let stopReason = "stop";
|
|
147
|
+
for (let i = 1; i <= opts.maxIterations; i++) {
|
|
148
|
+
if (opts.signal?.aborted) {
|
|
149
|
+
stopReason = "aborted";
|
|
150
|
+
break;
|
|
151
|
+
}
|
|
152
|
+
if (cumulative.totalTokens >= opts.maxTokens) {
|
|
153
|
+
stopReason = "max_tokens";
|
|
154
|
+
break;
|
|
155
|
+
}
|
|
156
|
+
if (cumulative.estimatedCostUsd >= opts.maxCostUsd) {
|
|
157
|
+
stopReason = "max_cost";
|
|
158
|
+
break;
|
|
159
|
+
}
|
|
160
|
+
logger.debug("Agent loop iteration", { iteration: i });
|
|
161
|
+
const chatRequest = {
|
|
162
|
+
...opts.chatRequestOverrides,
|
|
163
|
+
messages,
|
|
164
|
+
tools: toolDefs.length > 0 ? toolDefs : void 0
|
|
165
|
+
};
|
|
166
|
+
const response = await opts.ai.chat(chatRequest);
|
|
167
|
+
lastResponse = response;
|
|
168
|
+
cumulative.promptTokens += response.usage.promptTokens;
|
|
169
|
+
cumulative.completionTokens += response.usage.completionTokens;
|
|
170
|
+
cumulative.totalTokens += response.usage.totalTokens;
|
|
171
|
+
cumulative.estimatedCostUsd += response.usage.estimatedCostUsd;
|
|
172
|
+
cumulative.iterations = i;
|
|
173
|
+
const assistantMessage = response.choices[0]?.message;
|
|
174
|
+
if (!assistantMessage) {
|
|
175
|
+
stopReason = "error";
|
|
176
|
+
break;
|
|
177
|
+
}
|
|
178
|
+
messages.push(assistantMessage);
|
|
179
|
+
if (response.finishReason !== "tool_calls") {
|
|
180
|
+
stopReason = mapFinishReason(response.finishReason);
|
|
181
|
+
const event2 = {
|
|
182
|
+
iteration: i,
|
|
183
|
+
response,
|
|
184
|
+
toolCalls: [],
|
|
185
|
+
cumulativeUsage: { ...cumulative },
|
|
186
|
+
messages: [...messages]
|
|
187
|
+
};
|
|
188
|
+
iterations.push(event2);
|
|
189
|
+
await opts.onIteration?.(event2);
|
|
190
|
+
break;
|
|
191
|
+
}
|
|
192
|
+
const toolCalls = assistantMessage.toolCalls ?? [];
|
|
193
|
+
cumulative.toolCallCount += toolCalls.length;
|
|
194
|
+
const toolResults = await Promise.all(
|
|
195
|
+
toolCalls.map(
|
|
196
|
+
(tc) => executeToolCall(
|
|
197
|
+
tc,
|
|
198
|
+
executorMap,
|
|
199
|
+
opts.cache,
|
|
200
|
+
opts.cacheKeyPrefix,
|
|
201
|
+
opts.cacheTtlSeconds
|
|
202
|
+
)
|
|
203
|
+
)
|
|
204
|
+
);
|
|
205
|
+
for (const tr of toolResults) {
|
|
206
|
+
messages.push({
|
|
207
|
+
role: "tool",
|
|
208
|
+
content: tr.result,
|
|
209
|
+
toolCallId: tr.toolCall.id
|
|
210
|
+
});
|
|
211
|
+
}
|
|
212
|
+
const event = {
|
|
213
|
+
iteration: i,
|
|
214
|
+
response,
|
|
215
|
+
toolCalls: toolResults,
|
|
216
|
+
cumulativeUsage: { ...cumulative },
|
|
217
|
+
messages: [...messages]
|
|
218
|
+
};
|
|
219
|
+
iterations.push(event);
|
|
220
|
+
const callbackResult = await opts.onIteration?.(event);
|
|
221
|
+
if (callbackResult === false) {
|
|
222
|
+
stopReason = "callback_abort";
|
|
223
|
+
break;
|
|
224
|
+
}
|
|
225
|
+
if (i === opts.maxIterations) {
|
|
226
|
+
stopReason = "max_iterations";
|
|
227
|
+
}
|
|
228
|
+
}
|
|
229
|
+
return {
|
|
230
|
+
response: lastResponse,
|
|
231
|
+
messages,
|
|
232
|
+
stopReason,
|
|
233
|
+
iterations,
|
|
234
|
+
usage: cumulative
|
|
235
|
+
};
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
// src/agent/agent-tracer.ts
|
|
239
|
+
function createAgentTracer(options) {
|
|
240
|
+
const { tracing, agentId } = options;
|
|
241
|
+
return {
|
|
242
|
+
async traceLoop(fn) {
|
|
243
|
+
return tracing.withSpanAsync(
|
|
244
|
+
"agent.loop",
|
|
245
|
+
async (span) => {
|
|
246
|
+
span.setAttribute("agent.id", agentId);
|
|
247
|
+
return fn(span);
|
|
248
|
+
},
|
|
249
|
+
{ kind: "internal" }
|
|
250
|
+
);
|
|
251
|
+
},
|
|
252
|
+
async traceIteration(iteration, fn) {
|
|
253
|
+
return tracing.withSpanAsync(
|
|
254
|
+
"agent.iteration",
|
|
255
|
+
async (span) => {
|
|
256
|
+
span.setAttributes({
|
|
257
|
+
"agent.id": agentId,
|
|
258
|
+
"agent.iteration": iteration
|
|
259
|
+
});
|
|
260
|
+
return fn(span);
|
|
261
|
+
},
|
|
262
|
+
{ kind: "internal" }
|
|
263
|
+
);
|
|
264
|
+
},
|
|
265
|
+
async traceToolCall(toolName, fn) {
|
|
266
|
+
return tracing.withSpanAsync(
|
|
267
|
+
`agent.tool.${toolName}`,
|
|
268
|
+
async (span) => {
|
|
269
|
+
span.setAttributes({
|
|
270
|
+
"agent.id": agentId,
|
|
271
|
+
"agent.tool.name": toolName
|
|
272
|
+
});
|
|
273
|
+
const result = await fn(span);
|
|
274
|
+
span.setAttributes({
|
|
275
|
+
"agent.tool.duration_ms": result.durationMs,
|
|
276
|
+
"agent.tool.success": result.success,
|
|
277
|
+
"agent.tool.cached": result.cached
|
|
278
|
+
});
|
|
279
|
+
if (result.error) {
|
|
280
|
+
span.setAttribute("agent.tool.error", result.error);
|
|
281
|
+
}
|
|
282
|
+
return result;
|
|
283
|
+
},
|
|
284
|
+
{ kind: "internal" }
|
|
285
|
+
);
|
|
286
|
+
},
|
|
287
|
+
traceDecision(span, attrs) {
|
|
288
|
+
span.setAttributes({
|
|
289
|
+
"agent.decision.finish_reason": attrs.finishReason,
|
|
290
|
+
"agent.decision.tool_count": attrs.toolCallCount,
|
|
291
|
+
"agent.decision.total_tokens": attrs.cumulativeUsage.totalTokens,
|
|
292
|
+
"agent.decision.cost_usd": attrs.cumulativeUsage.estimatedCostUsd,
|
|
293
|
+
"agent.decision.iterations": attrs.cumulativeUsage.iterations
|
|
294
|
+
});
|
|
295
|
+
span.addEvent("agent.decision", {
|
|
296
|
+
finish_reason: attrs.finishReason,
|
|
297
|
+
tool_count: attrs.toolCallCount
|
|
298
|
+
});
|
|
299
|
+
}
|
|
300
|
+
};
|
|
301
|
+
}
|
|
302
|
+
|
|
303
|
+
// src/agent/agent-usage.ts
|
|
304
|
+
function createAgentUsageTracker(options) {
|
|
305
|
+
const { usage, agentId, tenantId, userId } = options;
|
|
306
|
+
return {
|
|
307
|
+
async recordIteration(params) {
|
|
308
|
+
return usage.record({
|
|
309
|
+
tenantId,
|
|
310
|
+
userId,
|
|
311
|
+
category: params.category ?? "chat",
|
|
312
|
+
provider: params.provider,
|
|
313
|
+
model: params.model,
|
|
314
|
+
inputTokens: params.usage.promptTokens,
|
|
315
|
+
outputTokens: params.usage.completionTokens,
|
|
316
|
+
totalTokens: params.usage.totalTokens,
|
|
317
|
+
costUsd: params.usage.estimatedCostUsd,
|
|
318
|
+
latencyMs: params.latencyMs ?? 0,
|
|
319
|
+
success: params.success ?? true,
|
|
320
|
+
error: params.error,
|
|
321
|
+
metadata: { agentId }
|
|
322
|
+
});
|
|
323
|
+
},
|
|
324
|
+
createBudgetCallback(budget) {
|
|
325
|
+
return async (event) => {
|
|
326
|
+
const response = event.response;
|
|
327
|
+
await usage.record({
|
|
328
|
+
tenantId,
|
|
329
|
+
userId,
|
|
330
|
+
category: "chat",
|
|
331
|
+
provider: response.provider,
|
|
332
|
+
model: response.model,
|
|
333
|
+
inputTokens: response.usage.promptTokens,
|
|
334
|
+
outputTokens: response.usage.completionTokens,
|
|
335
|
+
totalTokens: response.usage.totalTokens,
|
|
336
|
+
costUsd: response.usage.estimatedCostUsd,
|
|
337
|
+
latencyMs: 0,
|
|
338
|
+
success: true,
|
|
339
|
+
metadata: { agentId, iteration: event.iteration }
|
|
340
|
+
});
|
|
341
|
+
if (budget) {
|
|
342
|
+
if (budget.maxCostUsd !== void 0 && event.cumulativeUsage.estimatedCostUsd >= budget.maxCostUsd) {
|
|
343
|
+
return false;
|
|
344
|
+
}
|
|
345
|
+
if (budget.maxTokens !== void 0 && event.cumulativeUsage.totalTokens >= budget.maxTokens) {
|
|
346
|
+
return false;
|
|
347
|
+
}
|
|
348
|
+
}
|
|
349
|
+
};
|
|
350
|
+
}
|
|
351
|
+
};
|
|
352
|
+
}
|
|
353
|
+
|
|
354
|
+
// src/agent/traced-ai.ts
|
|
355
|
+
function createTracedAI(options) {
|
|
356
|
+
const { ai, tracing } = options;
|
|
357
|
+
return {
|
|
358
|
+
async chat(request) {
|
|
359
|
+
return tracing.withSpanAsync(
|
|
360
|
+
"ai.chat",
|
|
361
|
+
async (span) => {
|
|
362
|
+
const start = Date.now();
|
|
363
|
+
if (request.model) {
|
|
364
|
+
span.setAttribute("ai.model", request.model);
|
|
365
|
+
}
|
|
366
|
+
try {
|
|
367
|
+
const response = await ai.chat(request);
|
|
368
|
+
span.setAttributes({
|
|
369
|
+
"ai.provider": response.provider,
|
|
370
|
+
"ai.model": response.model,
|
|
371
|
+
"ai.input_tokens": response.usage.promptTokens,
|
|
372
|
+
"ai.output_tokens": response.usage.completionTokens,
|
|
373
|
+
"ai.total_tokens": response.usage.totalTokens,
|
|
374
|
+
"ai.cost_usd": response.usage.estimatedCostUsd,
|
|
375
|
+
"ai.finish_reason": response.finishReason,
|
|
376
|
+
"ai.latency_ms": Date.now() - start
|
|
377
|
+
});
|
|
378
|
+
span.setStatus({ code: "ok" });
|
|
379
|
+
return response;
|
|
380
|
+
} catch (err) {
|
|
381
|
+
span.setAttributes({
|
|
382
|
+
"ai.latency_ms": Date.now() - start
|
|
383
|
+
});
|
|
384
|
+
if (err instanceof Error) {
|
|
385
|
+
span.recordException(err);
|
|
386
|
+
}
|
|
387
|
+
span.setStatus({
|
|
388
|
+
code: "error",
|
|
389
|
+
message: err instanceof Error ? err.message : String(err)
|
|
390
|
+
});
|
|
391
|
+
throw err;
|
|
392
|
+
}
|
|
393
|
+
},
|
|
394
|
+
{ kind: "client" }
|
|
395
|
+
);
|
|
396
|
+
},
|
|
397
|
+
async *chatStream(request) {
|
|
398
|
+
yield* ai.chatStream(request);
|
|
399
|
+
},
|
|
400
|
+
async chatWithCallback(request, callback) {
|
|
401
|
+
return tracing.withSpanAsync(
|
|
402
|
+
"ai.chatWithCallback",
|
|
403
|
+
async (span) => {
|
|
404
|
+
const start = Date.now();
|
|
405
|
+
if (request.model) {
|
|
406
|
+
span.setAttribute("ai.model", request.model);
|
|
407
|
+
}
|
|
408
|
+
try {
|
|
409
|
+
const response = await ai.chatWithCallback(request, callback);
|
|
410
|
+
span.setAttributes({
|
|
411
|
+
"ai.provider": response.provider,
|
|
412
|
+
"ai.model": response.model,
|
|
413
|
+
"ai.input_tokens": response.usage.promptTokens,
|
|
414
|
+
"ai.output_tokens": response.usage.completionTokens,
|
|
415
|
+
"ai.total_tokens": response.usage.totalTokens,
|
|
416
|
+
"ai.cost_usd": response.usage.estimatedCostUsd,
|
|
417
|
+
"ai.finish_reason": response.finishReason,
|
|
418
|
+
"ai.latency_ms": Date.now() - start
|
|
419
|
+
});
|
|
420
|
+
span.setStatus({ code: "ok" });
|
|
421
|
+
return response;
|
|
422
|
+
} catch (err) {
|
|
423
|
+
span.setAttributes({
|
|
424
|
+
"ai.latency_ms": Date.now() - start
|
|
425
|
+
});
|
|
426
|
+
if (err instanceof Error) {
|
|
427
|
+
span.recordException(err);
|
|
428
|
+
}
|
|
429
|
+
span.setStatus({
|
|
430
|
+
code: "error",
|
|
431
|
+
message: err instanceof Error ? err.message : String(err)
|
|
432
|
+
});
|
|
433
|
+
throw err;
|
|
434
|
+
}
|
|
435
|
+
},
|
|
436
|
+
{ kind: "client" }
|
|
437
|
+
);
|
|
438
|
+
},
|
|
439
|
+
// Pass-through methods (no tracing needed for non-chat operations in v1)
|
|
440
|
+
async complete(request) {
|
|
441
|
+
return ai.complete(request);
|
|
442
|
+
},
|
|
443
|
+
async *completeStream(request) {
|
|
444
|
+
yield* ai.completeStream(request);
|
|
445
|
+
},
|
|
446
|
+
async embed(request) {
|
|
447
|
+
return ai.embed(request);
|
|
448
|
+
},
|
|
449
|
+
async similarity(text1, text2, model) {
|
|
450
|
+
return ai.similarity(text1, text2, model);
|
|
451
|
+
},
|
|
452
|
+
async listModels() {
|
|
453
|
+
return ai.listModels();
|
|
454
|
+
},
|
|
455
|
+
async getModel(modelId) {
|
|
456
|
+
return ai.getModel(modelId);
|
|
457
|
+
},
|
|
458
|
+
async supportsCapability(modelId, capability) {
|
|
459
|
+
return ai.supportsCapability(modelId, capability);
|
|
460
|
+
},
|
|
461
|
+
async estimateTokens(text, model) {
|
|
462
|
+
return ai.estimateTokens(text, model);
|
|
463
|
+
},
|
|
464
|
+
async estimateCost(request) {
|
|
465
|
+
return ai.estimateCost(request);
|
|
466
|
+
},
|
|
467
|
+
async healthCheck() {
|
|
468
|
+
return ai.healthCheck();
|
|
469
|
+
}
|
|
470
|
+
};
|
|
471
|
+
}
|
|
472
|
+
|
|
473
|
+
// src/interfaces/IAI.ts
|
|
474
|
+
var AIErrorMessages = {
|
|
475
|
+
invalid_request: "The request was invalid or malformed",
|
|
476
|
+
authentication_error: "Authentication failed - check your API key",
|
|
477
|
+
rate_limit_exceeded: "Rate limit exceeded - try again later",
|
|
478
|
+
quota_exceeded: "Usage quota exceeded for this billing period",
|
|
479
|
+
model_not_found: "The specified model was not found",
|
|
480
|
+
context_length_exceeded: "Input exceeds the model context length",
|
|
481
|
+
content_filter: "Content was filtered due to policy violations",
|
|
482
|
+
server_error: "The AI provider encountered an internal error",
|
|
483
|
+
timeout: "The request timed out",
|
|
484
|
+
network_error: "Network error connecting to AI provider",
|
|
485
|
+
provider_unavailable: "The AI provider is currently unavailable",
|
|
486
|
+
unknown: "An unknown error occurred"
|
|
487
|
+
};
|
|
488
|
+
function createAIError(code, message, options) {
|
|
489
|
+
const error = new Error(message || AIErrorMessages[code]);
|
|
490
|
+
error.name = "AIError";
|
|
491
|
+
error.code = code;
|
|
492
|
+
error.provider = options?.provider;
|
|
493
|
+
error.model = options?.model;
|
|
494
|
+
error.statusCode = options?.statusCode;
|
|
495
|
+
error.retryable = options?.retryable ?? [
|
|
496
|
+
"rate_limit_exceeded",
|
|
497
|
+
"server_error",
|
|
498
|
+
"timeout",
|
|
499
|
+
"network_error"
|
|
500
|
+
].includes(code);
|
|
501
|
+
error.retryAfterMs = options?.retryAfterMs;
|
|
502
|
+
if (options?.cause) {
|
|
503
|
+
error.cause = options.cause;
|
|
504
|
+
}
|
|
505
|
+
return error;
|
|
506
|
+
}
|
|
507
|
+
|
|
508
|
+
// src/adapters/ollama/OllamaAdapter.ts
|
|
509
|
+
var OllamaAdapter = class {
|
|
510
|
+
baseUrl;
|
|
511
|
+
defaultModel;
|
|
512
|
+
defaultEmbeddingModel;
|
|
513
|
+
timeoutMs;
|
|
514
|
+
constructor(config) {
|
|
515
|
+
this.baseUrl = (config?.baseUrl ?? "http://localhost:11434").replace(
|
|
516
|
+
/\/$/,
|
|
517
|
+
""
|
|
518
|
+
);
|
|
519
|
+
this.defaultModel = config?.defaultModel ?? "qwen2.5:3b";
|
|
520
|
+
this.defaultEmbeddingModel = config?.defaultEmbeddingModel ?? "nomic-embed-text";
|
|
521
|
+
this.timeoutMs = config?.timeoutMs ?? 12e4;
|
|
522
|
+
}
|
|
523
|
+
async chat(request) {
|
|
524
|
+
const model = request.model ?? this.defaultModel;
|
|
525
|
+
const ollamaReq = {
|
|
526
|
+
model,
|
|
527
|
+
messages: request.messages.map((m) => ({
|
|
528
|
+
role: m.role === "tool" ? "assistant" : m.role,
|
|
529
|
+
content: m.content
|
|
530
|
+
})),
|
|
531
|
+
stream: false,
|
|
532
|
+
options: {
|
|
533
|
+
temperature: request.temperature,
|
|
534
|
+
num_predict: request.maxTokens,
|
|
535
|
+
top_p: request.topP,
|
|
536
|
+
stop: request.stop ? Array.isArray(request.stop) ? request.stop : [request.stop] : void 0
|
|
537
|
+
}
|
|
538
|
+
};
|
|
539
|
+
const response = await this.fetch(
|
|
540
|
+
"/api/chat",
|
|
541
|
+
ollamaReq
|
|
542
|
+
);
|
|
543
|
+
const usage = {
|
|
544
|
+
promptTokens: response.prompt_eval_count ?? 0,
|
|
545
|
+
completionTokens: response.eval_count ?? 0,
|
|
546
|
+
totalTokens: (response.prompt_eval_count ?? 0) + (response.eval_count ?? 0),
|
|
547
|
+
estimatedCostUsd: 0
|
|
548
|
+
// Local — zero cost
|
|
549
|
+
};
|
|
550
|
+
return {
|
|
551
|
+
id: `ollama-${Date.now()}`,
|
|
552
|
+
model: response.model ?? model,
|
|
553
|
+
provider: "custom",
|
|
554
|
+
choices: [
|
|
555
|
+
{
|
|
556
|
+
index: 0,
|
|
557
|
+
message: {
|
|
558
|
+
role: "assistant",
|
|
559
|
+
content: response.message.content
|
|
560
|
+
},
|
|
561
|
+
finishReason: "stop"
|
|
562
|
+
}
|
|
563
|
+
],
|
|
564
|
+
usage,
|
|
565
|
+
created: /* @__PURE__ */ new Date(),
|
|
566
|
+
finishReason: "stop"
|
|
567
|
+
};
|
|
568
|
+
}
|
|
569
|
+
async *chatStream(request) {
|
|
570
|
+
const model = request.model ?? this.defaultModel;
|
|
571
|
+
const ollamaReq = {
|
|
572
|
+
model,
|
|
573
|
+
messages: request.messages.map((m) => ({
|
|
574
|
+
role: m.role === "tool" ? "assistant" : m.role,
|
|
575
|
+
content: m.content
|
|
576
|
+
})),
|
|
577
|
+
stream: true,
|
|
578
|
+
options: {
|
|
579
|
+
temperature: request.temperature,
|
|
580
|
+
num_predict: request.maxTokens,
|
|
581
|
+
top_p: request.topP
|
|
582
|
+
}
|
|
583
|
+
};
|
|
584
|
+
const controller = new AbortController();
|
|
585
|
+
const timer = setTimeout(() => controller.abort(), this.timeoutMs);
|
|
586
|
+
try {
|
|
587
|
+
const res = await fetch(`${this.baseUrl}/api/chat`, {
|
|
588
|
+
method: "POST",
|
|
589
|
+
headers: { "Content-Type": "application/json" },
|
|
590
|
+
body: JSON.stringify(ollamaReq),
|
|
591
|
+
signal: controller.signal
|
|
592
|
+
});
|
|
593
|
+
if (!res.ok) {
|
|
594
|
+
throw createAIError("server_error", `Ollama error: ${res.status}`);
|
|
595
|
+
}
|
|
596
|
+
const reader = res.body?.getReader();
|
|
597
|
+
if (!reader) return;
|
|
598
|
+
const decoder = new TextDecoder();
|
|
599
|
+
let buffer = "";
|
|
600
|
+
while (true) {
|
|
601
|
+
const { done, value } = await reader.read();
|
|
602
|
+
if (done) break;
|
|
603
|
+
buffer += decoder.decode(value, { stream: true });
|
|
604
|
+
const lines = buffer.split("\n");
|
|
605
|
+
buffer = lines.pop() ?? "";
|
|
606
|
+
for (const line of lines) {
|
|
607
|
+
if (!line.trim()) continue;
|
|
608
|
+
const chunk = JSON.parse(line);
|
|
609
|
+
yield {
|
|
610
|
+
id: `ollama-stream-${Date.now()}`,
|
|
611
|
+
model,
|
|
612
|
+
provider: "custom",
|
|
613
|
+
delta: {
|
|
614
|
+
content: chunk.message?.content ?? ""
|
|
615
|
+
},
|
|
616
|
+
finishReason: chunk.done ? "stop" : void 0
|
|
617
|
+
};
|
|
618
|
+
}
|
|
619
|
+
}
|
|
620
|
+
} finally {
|
|
621
|
+
clearTimeout(timer);
|
|
622
|
+
}
|
|
623
|
+
}
|
|
624
|
+
async chatWithCallback(request, callback) {
|
|
625
|
+
let fullContent = "";
|
|
626
|
+
for await (const chunk of this.chatStream(request)) {
|
|
627
|
+
await callback(chunk);
|
|
628
|
+
if (chunk.delta.content) {
|
|
629
|
+
fullContent += chunk.delta.content;
|
|
630
|
+
}
|
|
631
|
+
}
|
|
632
|
+
const model = request.model ?? this.defaultModel;
|
|
633
|
+
return {
|
|
634
|
+
id: `ollama-${Date.now()}`,
|
|
635
|
+
model,
|
|
636
|
+
provider: "custom",
|
|
637
|
+
choices: [
|
|
638
|
+
{
|
|
639
|
+
index: 0,
|
|
640
|
+
message: { role: "assistant", content: fullContent },
|
|
641
|
+
finishReason: "stop"
|
|
642
|
+
}
|
|
643
|
+
],
|
|
644
|
+
usage: {
|
|
645
|
+
promptTokens: 0,
|
|
646
|
+
completionTokens: 0,
|
|
647
|
+
totalTokens: 0,
|
|
648
|
+
estimatedCostUsd: 0
|
|
649
|
+
},
|
|
650
|
+
created: /* @__PURE__ */ new Date(),
|
|
651
|
+
finishReason: "stop"
|
|
652
|
+
};
|
|
653
|
+
}
|
|
654
|
+
async complete(request) {
|
|
655
|
+
const response = await this.chat({
|
|
656
|
+
messages: [{ role: "user", content: request.prompt }],
|
|
657
|
+
model: request.model,
|
|
658
|
+
temperature: request.temperature,
|
|
659
|
+
maxTokens: request.maxTokens
|
|
660
|
+
});
|
|
661
|
+
return {
|
|
662
|
+
id: response.id,
|
|
663
|
+
model: response.model,
|
|
664
|
+
provider: "custom",
|
|
665
|
+
text: response.choices[0]?.message.content ?? "",
|
|
666
|
+
usage: response.usage,
|
|
667
|
+
created: response.created,
|
|
668
|
+
finishReason: response.finishReason
|
|
669
|
+
};
|
|
670
|
+
}
|
|
671
|
+
async *completeStream(request) {
|
|
672
|
+
yield* this.chatStream({
|
|
673
|
+
messages: [{ role: "user", content: request.prompt }],
|
|
674
|
+
model: request.model,
|
|
675
|
+
temperature: request.temperature,
|
|
676
|
+
maxTokens: request.maxTokens
|
|
677
|
+
});
|
|
678
|
+
}
|
|
679
|
+
async embed(request) {
|
|
680
|
+
const model = request.model ?? this.defaultEmbeddingModel;
|
|
681
|
+
const response = await this.fetch("/api/embed", {
|
|
682
|
+
model,
|
|
683
|
+
input: request.input
|
|
684
|
+
});
|
|
685
|
+
return {
|
|
686
|
+
id: `ollama-emb-${Date.now()}`,
|
|
687
|
+
model,
|
|
688
|
+
provider: "custom",
|
|
689
|
+
embeddings: response.embeddings,
|
|
690
|
+
usage: {
|
|
691
|
+
promptTokens: 0,
|
|
692
|
+
completionTokens: 0,
|
|
693
|
+
totalTokens: 0,
|
|
694
|
+
estimatedCostUsd: 0
|
|
695
|
+
},
|
|
696
|
+
created: /* @__PURE__ */ new Date()
|
|
697
|
+
};
|
|
698
|
+
}
|
|
699
|
+
async similarity(text1, text2, model) {
|
|
700
|
+
const response = await this.embed({ input: [text1, text2], model });
|
|
701
|
+
const [a, b] = response.embeddings;
|
|
702
|
+
if (!a || !b) return 0;
|
|
703
|
+
let dot = 0, normA = 0, normB = 0;
|
|
704
|
+
for (let i = 0; i < a.length; i++) {
|
|
705
|
+
dot += a[i] * b[i];
|
|
706
|
+
normA += a[i] * a[i];
|
|
707
|
+
normB += b[i] * b[i];
|
|
708
|
+
}
|
|
709
|
+
return dot / (Math.sqrt(normA) * Math.sqrt(normB));
|
|
710
|
+
}
|
|
711
|
+
async listModels() {
|
|
712
|
+
try {
|
|
713
|
+
const response = await this.fetch(
|
|
714
|
+
"/api/tags",
|
|
715
|
+
null,
|
|
716
|
+
"GET"
|
|
717
|
+
);
|
|
718
|
+
return response.models.map((m) => ({
|
|
719
|
+
modelId: m.name,
|
|
720
|
+
provider: "custom",
|
|
721
|
+
capabilities: ["chat", "completion"],
|
|
722
|
+
maxContextTokens: 4096,
|
|
723
|
+
maxOutputTokens: 2048,
|
|
724
|
+
inputCostPer1K: 0,
|
|
725
|
+
outputCostPer1K: 0,
|
|
726
|
+
supportsStreaming: true,
|
|
727
|
+
supportsTools: false,
|
|
728
|
+
supportsVision: false
|
|
729
|
+
}));
|
|
730
|
+
} catch {
|
|
731
|
+
return [];
|
|
732
|
+
}
|
|
733
|
+
}
|
|
734
|
+
async getModel(modelId) {
|
|
735
|
+
const models = await this.listModels();
|
|
736
|
+
return models.find((m) => m.modelId === modelId) ?? null;
|
|
737
|
+
}
|
|
738
|
+
async supportsCapability(modelId, capability) {
|
|
739
|
+
const model = await this.getModel(modelId);
|
|
740
|
+
return model?.capabilities.includes(capability) ?? false;
|
|
741
|
+
}
|
|
742
|
+
async estimateTokens(text, _model) {
|
|
743
|
+
return Math.ceil(text.length / 4);
|
|
744
|
+
}
|
|
745
|
+
async estimateCost() {
|
|
746
|
+
return 0;
|
|
747
|
+
}
|
|
748
|
+
async healthCheck() {
|
|
749
|
+
try {
|
|
750
|
+
const start = Date.now();
|
|
751
|
+
await this.fetch("/api/tags", null, "GET");
|
|
752
|
+
const latency = Date.now() - start;
|
|
753
|
+
return {
|
|
754
|
+
healthy: true,
|
|
755
|
+
providers: {
|
|
756
|
+
custom: { available: true, latencyMs: latency },
|
|
757
|
+
openai: { available: false, error: "Not configured" },
|
|
758
|
+
anthropic: { available: false, error: "Not configured" },
|
|
759
|
+
google: { available: false, error: "Not configured" },
|
|
760
|
+
azure: { available: false, error: "Not configured" },
|
|
761
|
+
bedrock: { available: false, error: "Not configured" }
|
|
762
|
+
}
|
|
763
|
+
};
|
|
764
|
+
} catch (err) {
|
|
765
|
+
return {
|
|
766
|
+
healthy: false,
|
|
767
|
+
providers: {
|
|
768
|
+
custom: {
|
|
769
|
+
available: false,
|
|
770
|
+
error: err instanceof Error ? err.message : "Ollama unavailable"
|
|
771
|
+
},
|
|
772
|
+
openai: { available: false, error: "Not configured" },
|
|
773
|
+
anthropic: { available: false, error: "Not configured" },
|
|
774
|
+
google: { available: false, error: "Not configured" },
|
|
775
|
+
azure: { available: false, error: "Not configured" },
|
|
776
|
+
bedrock: { available: false, error: "Not configured" }
|
|
777
|
+
}
|
|
778
|
+
};
|
|
779
|
+
}
|
|
780
|
+
}
|
|
781
|
+
async fetch(path, body, method = "POST") {
|
|
782
|
+
const controller = new AbortController();
|
|
783
|
+
const timer = setTimeout(() => controller.abort(), this.timeoutMs);
|
|
784
|
+
try {
|
|
785
|
+
const res = await fetch(`${this.baseUrl}${path}`, {
|
|
786
|
+
method,
|
|
787
|
+
headers: body ? { "Content-Type": "application/json" } : void 0,
|
|
788
|
+
body: body ? JSON.stringify(body) : void 0,
|
|
789
|
+
signal: controller.signal
|
|
790
|
+
});
|
|
791
|
+
if (!res.ok) {
|
|
792
|
+
const text = await res.text().catch(() => "");
|
|
793
|
+
throw createAIError(
|
|
794
|
+
"server_error",
|
|
795
|
+
`Ollama ${method} ${path}: ${res.status} ${text}`
|
|
796
|
+
);
|
|
797
|
+
}
|
|
798
|
+
return await res.json();
|
|
799
|
+
} finally {
|
|
800
|
+
clearTimeout(timer);
|
|
801
|
+
}
|
|
802
|
+
}
|
|
803
|
+
};
|
|
804
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
805
|
+
0 && (module.exports = {
|
|
806
|
+
DEFAULT_AGENT_LOOP_OPTIONS,
|
|
807
|
+
OllamaAdapter,
|
|
808
|
+
createAgentTracer,
|
|
809
|
+
createAgentUsageTracker,
|
|
810
|
+
createTracedAI,
|
|
811
|
+
runAgentLoop
|
|
812
|
+
});
|
|
813
|
+
//# sourceMappingURL=agents.js.map
|