@abassey/aid 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agents/index.cjs +741 -0
- package/dist/agents/index.d.cts +78 -0
- package/dist/agents/index.d.ts +78 -0
- package/dist/agents/index.js +741 -0
- package/dist/ai-AWJOUXFM.js +9 -0
- package/dist/ai-DOAYJKKI.cjs +9 -0
- package/dist/chunk-2TNYBUNK.js +124 -0
- package/dist/chunk-3LGKZRGY.cjs +124 -0
- package/dist/chunk-AUR2BBB5.cjs +1436 -0
- package/dist/chunk-IJLTRQF4.cjs +276 -0
- package/dist/chunk-JPD7UBAZ.js +58 -0
- package/dist/chunk-M4RQALTT.js +276 -0
- package/dist/chunk-NB65IHJE.cjs +58 -0
- package/dist/chunk-YNIEOBDF.js +1436 -0
- package/dist/client/index.cjs +18 -0
- package/dist/client/index.d.cts +8 -0
- package/dist/client/index.d.ts +8 -0
- package/dist/client/index.js +18 -0
- package/dist/errors-CUVTnseb.d.ts +13 -0
- package/dist/errors-CgCce4cK.d.cts +158 -0
- package/dist/errors-CgCce4cK.d.ts +158 -0
- package/dist/errors-zAPbTlpe.d.cts +13 -0
- package/dist/eval/index.cjs +308 -0
- package/dist/eval/index.d.cts +106 -0
- package/dist/eval/index.d.ts +106 -0
- package/dist/eval/index.js +308 -0
- package/dist/index.cjs +35 -0
- package/dist/index.d.cts +107 -0
- package/dist/index.d.ts +107 -0
- package/dist/index.js +35 -0
- package/dist/middleware/index.cjs +201 -0
- package/dist/middleware/index.d.cts +36 -0
- package/dist/middleware/index.d.ts +36 -0
- package/dist/middleware/index.js +201 -0
- package/dist/observability/index.cjs +147 -0
- package/dist/observability/index.d.cts +30 -0
- package/dist/observability/index.d.ts +30 -0
- package/dist/observability/index.js +147 -0
- package/dist/react/index.cjs +253 -0
- package/dist/react/index.d.cts +64 -0
- package/dist/react/index.d.ts +64 -0
- package/dist/react/index.js +253 -0
- package/dist/serve/index.cjs +545 -0
- package/dist/serve/index.d.cts +69 -0
- package/dist/serve/index.d.ts +69 -0
- package/dist/serve/index.js +545 -0
- package/dist/types-BJReASS-.d.cts +196 -0
- package/dist/types-BJReASS-.d.ts +196 -0
- package/dist/types-CguX3F16.d.cts +173 -0
- package/dist/types-CrFH-_qp.d.cts +68 -0
- package/dist/types-DvdzPmW0.d.ts +173 -0
- package/dist/types-qfE32ADy.d.ts +68 -0
- package/package.json +144 -0
|
@@ -0,0 +1,308 @@
|
|
|
1
|
+
// src/eval/suite.ts
|
|
2
|
+
function evalSuite(name) {
|
|
3
|
+
const cases = [];
|
|
4
|
+
const scorers = [];
|
|
5
|
+
let models = ["sonnet"];
|
|
6
|
+
let threshold = 0.5;
|
|
7
|
+
let concurrencyLimit = 1;
|
|
8
|
+
let aiFn = null;
|
|
9
|
+
const builder = {
|
|
10
|
+
case(caseName, testCase) {
|
|
11
|
+
cases.push({ name: caseName, ...testCase });
|
|
12
|
+
return builder;
|
|
13
|
+
},
|
|
14
|
+
scorer(scorer) {
|
|
15
|
+
scorers.push(scorer);
|
|
16
|
+
return builder;
|
|
17
|
+
},
|
|
18
|
+
models(m) {
|
|
19
|
+
models = m;
|
|
20
|
+
return builder;
|
|
21
|
+
},
|
|
22
|
+
threshold(value) {
|
|
23
|
+
threshold = value;
|
|
24
|
+
return builder;
|
|
25
|
+
},
|
|
26
|
+
concurrency(limit) {
|
|
27
|
+
concurrencyLimit = limit;
|
|
28
|
+
return builder;
|
|
29
|
+
},
|
|
30
|
+
ai(fn) {
|
|
31
|
+
aiFn = fn;
|
|
32
|
+
return builder;
|
|
33
|
+
},
|
|
34
|
+
async run() {
|
|
35
|
+
const ai = aiFn ?? (await import("../ai-AWJOUXFM.js")).ai;
|
|
36
|
+
const jobs = [];
|
|
37
|
+
for (const evalCase of cases) {
|
|
38
|
+
for (const model of models) {
|
|
39
|
+
jobs.push({ evalCase, model });
|
|
40
|
+
}
|
|
41
|
+
}
|
|
42
|
+
const results = [];
|
|
43
|
+
if (concurrencyLimit <= 1) {
|
|
44
|
+
for (const job of jobs) {
|
|
45
|
+
results.push(await runSingleCase(ai, job.evalCase, job.model, scorers, threshold));
|
|
46
|
+
}
|
|
47
|
+
} else {
|
|
48
|
+
let index = 0;
|
|
49
|
+
const workers = Array.from({ length: Math.min(concurrencyLimit, jobs.length) }, async () => {
|
|
50
|
+
while (index < jobs.length) {
|
|
51
|
+
const jobIndex = index++;
|
|
52
|
+
const job = jobs[jobIndex];
|
|
53
|
+
const result = await runSingleCase(ai, job.evalCase, job.model, scorers, threshold);
|
|
54
|
+
results.push(result);
|
|
55
|
+
}
|
|
56
|
+
});
|
|
57
|
+
await Promise.all(workers);
|
|
58
|
+
}
|
|
59
|
+
return buildResults(name, results, scorers);
|
|
60
|
+
}
|
|
61
|
+
};
|
|
62
|
+
return builder;
|
|
63
|
+
}
|
|
64
|
+
async function runSingleCase(ai, evalCase, model, scorers, threshold) {
|
|
65
|
+
let output = "";
|
|
66
|
+
let tokens = { input: 0, output: 0, total: 0 };
|
|
67
|
+
let cost = 0;
|
|
68
|
+
let latencyMs = 0;
|
|
69
|
+
const scores = {};
|
|
70
|
+
try {
|
|
71
|
+
const startTime = Date.now();
|
|
72
|
+
const response = await ai(evalCase.input, {
|
|
73
|
+
model,
|
|
74
|
+
system: evalCase.system
|
|
75
|
+
});
|
|
76
|
+
latencyMs = Date.now() - startTime;
|
|
77
|
+
output = response.text;
|
|
78
|
+
tokens = response.tokens;
|
|
79
|
+
cost = response.cost;
|
|
80
|
+
for (const scorer of scorers) {
|
|
81
|
+
scores[scorer.name] = await scorer.score(output, evalCase.expected, evalCase.input);
|
|
82
|
+
}
|
|
83
|
+
} catch (error) {
|
|
84
|
+
for (const scorer of scorers) {
|
|
85
|
+
scores[scorer.name] = 0;
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
const pass = Object.values(scores).every((s) => s >= threshold);
|
|
89
|
+
return {
|
|
90
|
+
case: evalCase.name,
|
|
91
|
+
model,
|
|
92
|
+
output,
|
|
93
|
+
scores,
|
|
94
|
+
tokens,
|
|
95
|
+
cost,
|
|
96
|
+
latencyMs,
|
|
97
|
+
pass
|
|
98
|
+
};
|
|
99
|
+
}
|
|
100
|
+
function buildResults(suiteName, cases, scorers) {
|
|
101
|
+
const byModel = {};
|
|
102
|
+
const modelGroups = /* @__PURE__ */ new Map();
|
|
103
|
+
for (const c of cases) {
|
|
104
|
+
if (!modelGroups.has(c.model)) modelGroups.set(c.model, []);
|
|
105
|
+
modelGroups.get(c.model).push(c);
|
|
106
|
+
}
|
|
107
|
+
for (const [model, modelCases] of modelGroups) {
|
|
108
|
+
const allScores2 = modelCases.flatMap((c) => Object.values(c.scores));
|
|
109
|
+
byModel[model] = {
|
|
110
|
+
avgScore: allScores2.length > 0 ? allScores2.reduce((a, b) => a + b, 0) / allScores2.length : 0,
|
|
111
|
+
passRate: modelCases.filter((c) => c.pass).length / modelCases.length,
|
|
112
|
+
totalCost: modelCases.reduce((sum, c) => sum + c.cost, 0),
|
|
113
|
+
totalTokens: {
|
|
114
|
+
input: modelCases.reduce((sum, c) => sum + c.tokens.input, 0),
|
|
115
|
+
output: modelCases.reduce((sum, c) => sum + c.tokens.output, 0),
|
|
116
|
+
total: modelCases.reduce((sum, c) => sum + c.tokens.total, 0)
|
|
117
|
+
},
|
|
118
|
+
avgLatencyMs: modelCases.reduce((sum, c) => sum + c.latencyMs, 0) / modelCases.length
|
|
119
|
+
};
|
|
120
|
+
}
|
|
121
|
+
const byScorer = {};
|
|
122
|
+
for (const scorer of scorers) {
|
|
123
|
+
const scores = cases.map((c) => c.scores[scorer.name] ?? 0);
|
|
124
|
+
byScorer[scorer.name] = {
|
|
125
|
+
avgScore: scores.length > 0 ? scores.reduce((a, b) => a + b, 0) / scores.length : 0
|
|
126
|
+
};
|
|
127
|
+
}
|
|
128
|
+
const allScores = cases.flatMap((c) => Object.values(c.scores));
|
|
129
|
+
const overall = {
|
|
130
|
+
avgScore: allScores.length > 0 ? allScores.reduce((a, b) => a + b, 0) / allScores.length : 0,
|
|
131
|
+
passRate: cases.length > 0 ? cases.filter((c) => c.pass).length / cases.length : 0,
|
|
132
|
+
totalCost: cases.reduce((sum, c) => sum + c.cost, 0)
|
|
133
|
+
};
|
|
134
|
+
return {
|
|
135
|
+
suite: suiteName,
|
|
136
|
+
timestamp: /* @__PURE__ */ new Date(),
|
|
137
|
+
cases,
|
|
138
|
+
summary: { byModel, byScorer, overall }
|
|
139
|
+
};
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
// src/eval/scorers.ts
|
|
143
|
+
function exactMatch() {
|
|
144
|
+
return {
|
|
145
|
+
name: "exactMatch",
|
|
146
|
+
score(output, expected) {
|
|
147
|
+
if (expected === void 0) return 0;
|
|
148
|
+
return output.trim() === expected.trim() ? 1 : 0;
|
|
149
|
+
}
|
|
150
|
+
};
|
|
151
|
+
}
|
|
152
|
+
function contains(substring) {
|
|
153
|
+
return {
|
|
154
|
+
name: `contains(${substring})`,
|
|
155
|
+
score(output) {
|
|
156
|
+
return output.includes(substring) ? 1 : 0;
|
|
157
|
+
}
|
|
158
|
+
};
|
|
159
|
+
}
|
|
160
|
+
function regex(pattern) {
|
|
161
|
+
return {
|
|
162
|
+
name: `regex(${pattern})`,
|
|
163
|
+
score(output) {
|
|
164
|
+
return pattern.test(output) ? 1 : 0;
|
|
165
|
+
}
|
|
166
|
+
};
|
|
167
|
+
}
|
|
168
|
+
function custom(fn, name) {
|
|
169
|
+
return {
|
|
170
|
+
name: name ?? "custom",
|
|
171
|
+
score(output, expected, input) {
|
|
172
|
+
const result = fn(output, expected, input);
|
|
173
|
+
if (result instanceof Promise) {
|
|
174
|
+
return result.then((raw) => Math.max(0, Math.min(1, raw)));
|
|
175
|
+
}
|
|
176
|
+
return Math.max(0, Math.min(1, result));
|
|
177
|
+
}
|
|
178
|
+
};
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
// src/eval/llm-judge.ts
|
|
182
|
+
var BUILT_IN_CRITERIA = {
|
|
183
|
+
accuracy: "Does the output correctly and factually answer the input?",
|
|
184
|
+
relevance: "Is the output relevant and on-topic for the input?",
|
|
185
|
+
coherence: "Is the output well-structured, logical, and coherent?",
|
|
186
|
+
helpfulness: "Is the output helpful, actionable, and complete?"
|
|
187
|
+
};
|
|
188
|
+
function llmJudge(name, options) {
|
|
189
|
+
const model = options?.model ?? "sonnet";
|
|
190
|
+
const criteria = options?.criteria ?? BUILT_IN_CRITERIA[name] ?? name;
|
|
191
|
+
return {
|
|
192
|
+
name: `llmJudge(${name})`,
|
|
193
|
+
async score(output, expected, input) {
|
|
194
|
+
const ai = options?.ai ?? (await import("../ai-AWJOUXFM.js")).ai;
|
|
195
|
+
const prompt = [
|
|
196
|
+
"You are evaluating an AI output. Score from 0.0 to 1.0.",
|
|
197
|
+
"",
|
|
198
|
+
`Criteria: ${criteria}`,
|
|
199
|
+
`Input: ${input}`,
|
|
200
|
+
`Expected: ${expected ?? "Not provided"}`,
|
|
201
|
+
`Actual Output: ${output}`,
|
|
202
|
+
"",
|
|
203
|
+
'Respond with JSON only: { "score": <number 0.0-1.0>, "reasoning": "<brief explanation>" }'
|
|
204
|
+
].join("\n");
|
|
205
|
+
try {
|
|
206
|
+
const response = await ai(prompt, {
|
|
207
|
+
model,
|
|
208
|
+
system: "You are a precise evaluator. Respond with valid JSON only."
|
|
209
|
+
});
|
|
210
|
+
const parsed = JSON.parse(response.text);
|
|
211
|
+
if (typeof parsed.score !== "number") return 0;
|
|
212
|
+
return Math.max(0, Math.min(1, parsed.score));
|
|
213
|
+
} catch {
|
|
214
|
+
return 0;
|
|
215
|
+
}
|
|
216
|
+
}
|
|
217
|
+
};
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
// src/eval/agent-test.ts
|
|
221
|
+
async function testAgent(agent, cases) {
|
|
222
|
+
const results = [];
|
|
223
|
+
for (const testCase of cases) {
|
|
224
|
+
const caseResult = await runAgentTestCase(agent, testCase);
|
|
225
|
+
results.push(caseResult);
|
|
226
|
+
}
|
|
227
|
+
const passed = results.filter((r) => r.passed).length;
|
|
228
|
+
const failed = results.filter((r) => !r.passed).length;
|
|
229
|
+
return {
|
|
230
|
+
agent: agent.name,
|
|
231
|
+
cases: results,
|
|
232
|
+
summary: { passed, failed, total: results.length }
|
|
233
|
+
};
|
|
234
|
+
}
|
|
235
|
+
async function runAgentTestCase(agent, testCase) {
|
|
236
|
+
const failures = [];
|
|
237
|
+
let text = "";
|
|
238
|
+
let toolsCalled = [];
|
|
239
|
+
let steps = 0;
|
|
240
|
+
let cost = 0;
|
|
241
|
+
let latencyMs = 0;
|
|
242
|
+
try {
|
|
243
|
+
const result = await agent.run(testCase.input);
|
|
244
|
+
text = result.text;
|
|
245
|
+
toolsCalled = result.toolCalls.map((tc) => tc.name);
|
|
246
|
+
steps = result.steps.length;
|
|
247
|
+
cost = result.cost;
|
|
248
|
+
latencyMs = result.latencyMs;
|
|
249
|
+
if (testCase.expectContains) {
|
|
250
|
+
for (const expected of testCase.expectContains) {
|
|
251
|
+
if (!text.includes(expected)) {
|
|
252
|
+
failures.push(`Expected output to contain "${expected}"`);
|
|
253
|
+
}
|
|
254
|
+
}
|
|
255
|
+
}
|
|
256
|
+
if (testCase.expectNotContains) {
|
|
257
|
+
for (const notExpected of testCase.expectNotContains) {
|
|
258
|
+
if (text.includes(notExpected)) {
|
|
259
|
+
failures.push(`Expected output to NOT contain "${notExpected}"`);
|
|
260
|
+
}
|
|
261
|
+
}
|
|
262
|
+
}
|
|
263
|
+
if (testCase.expectToolCalled) {
|
|
264
|
+
if (!toolsCalled.includes(testCase.expectToolCalled)) {
|
|
265
|
+
failures.push(`Expected tool "${testCase.expectToolCalled}" to be called but it was not`);
|
|
266
|
+
}
|
|
267
|
+
}
|
|
268
|
+
if (testCase.expectToolsCalled) {
|
|
269
|
+
for (const tool of testCase.expectToolsCalled) {
|
|
270
|
+
if (!toolsCalled.includes(tool)) {
|
|
271
|
+
failures.push(`Expected tool "${tool}" to be called but it was not`);
|
|
272
|
+
}
|
|
273
|
+
}
|
|
274
|
+
}
|
|
275
|
+
if (testCase.expectMaxSteps !== void 0) {
|
|
276
|
+
if (steps > testCase.expectMaxSteps) {
|
|
277
|
+
failures.push(`Expected at most ${testCase.expectMaxSteps} steps but agent took ${steps}`);
|
|
278
|
+
}
|
|
279
|
+
}
|
|
280
|
+
if (testCase.expectMatch) {
|
|
281
|
+
if (!testCase.expectMatch.test(text)) {
|
|
282
|
+
failures.push(`Expected output to match ${testCase.expectMatch} but it did not`);
|
|
283
|
+
}
|
|
284
|
+
}
|
|
285
|
+
} catch (error) {
|
|
286
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
287
|
+
failures.push(`Agent execution failed: ${message}`);
|
|
288
|
+
}
|
|
289
|
+
return {
|
|
290
|
+
name: testCase.name,
|
|
291
|
+
passed: failures.length === 0,
|
|
292
|
+
text,
|
|
293
|
+
toolsCalled,
|
|
294
|
+
steps,
|
|
295
|
+
cost,
|
|
296
|
+
latencyMs,
|
|
297
|
+
failures
|
|
298
|
+
};
|
|
299
|
+
}
|
|
300
|
+
export {
|
|
301
|
+
contains,
|
|
302
|
+
custom,
|
|
303
|
+
evalSuite,
|
|
304
|
+
exactMatch,
|
|
305
|
+
llmJudge,
|
|
306
|
+
regex,
|
|
307
|
+
testAgent
|
|
308
|
+
};
|
package/dist/index.cjs
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
"use strict";Object.defineProperty(exports, "__esModule", {value: true});
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
var _chunkAUR2BBB5cjs = require('./chunk-AUR2BBB5.cjs');
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
var _chunk3LGKZRGYcjs = require('./chunk-3LGKZRGY.cjs');
|
|
11
|
+
|
|
12
|
+
// src/utils/tokens.ts
|
|
13
|
+
function tokens(text, model) {
|
|
14
|
+
if (text.length === 0) {
|
|
15
|
+
return 0;
|
|
16
|
+
}
|
|
17
|
+
try {
|
|
18
|
+
return Math.ceil(text.length / 4);
|
|
19
|
+
} catch (error) {
|
|
20
|
+
return Math.ceil(text.length / 4);
|
|
21
|
+
}
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
// src/index.ts
|
|
25
|
+
var ai = _chunkAUR2BBB5cjs.createAi.call(void 0, );
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
exports.AiStream = _chunkAUR2BBB5cjs.AiStream; exports.AidError = _chunk3LGKZRGYcjs.AidError; exports.ai = ai; exports.configure = _chunk3LGKZRGYcjs.configure; exports.createAi = _chunkAUR2BBB5cjs.createAi; exports.estimateCost = _chunkAUR2BBB5cjs.estimateCost; exports.resetConfig = _chunk3LGKZRGYcjs.resetConfig; exports.tokens = tokens;
|
package/dist/index.d.cts
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
import { A as AiOptions, a as AiFunction, T as Tool, M as Middleware, b as AiStreamChunk, c as AiResponse } from './types-BJReASS-.cjs';
|
|
2
|
+
export { B as BatchOptions, C as ConversationOptions, E as ErrorCode, F as FluentAi, d as Message, R as ResolvedOptions, e as RetryConfig, f as RewriteOptions, S as SummarizeOptions, g as TokenUsage, h as ToolCall } from './types-BJReASS-.cjs';
|
|
3
|
+
export { A as AidError } from './errors-zAPbTlpe.cjs';
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* Creates an AiFunction - the core callable with all methods attached.
|
|
7
|
+
*/
|
|
8
|
+
declare function createAi(baseOptions?: AiOptions): AiFunction;
|
|
9
|
+
|
|
10
|
+
interface Config {
|
|
11
|
+
model: string;
|
|
12
|
+
timeout: number;
|
|
13
|
+
maxTokens: number;
|
|
14
|
+
temperature: number;
|
|
15
|
+
topP: number;
|
|
16
|
+
stop: string[];
|
|
17
|
+
tools: Tool[];
|
|
18
|
+
middleware: Middleware[];
|
|
19
|
+
aliases: Record<string, string>;
|
|
20
|
+
providers: Record<string, {
|
|
21
|
+
apiKey?: string;
|
|
22
|
+
baseUrl?: string;
|
|
23
|
+
}>;
|
|
24
|
+
}
|
|
25
|
+
type PartialConfig = Partial<Config>;
|
|
26
|
+
/**
|
|
27
|
+
* Configure global defaults for all AI calls.
|
|
28
|
+
* Merges the provided options into the existing global configuration.
|
|
29
|
+
*/
|
|
30
|
+
declare function configure(options: PartialConfig): void;
|
|
31
|
+
/**
|
|
32
|
+
* Reset the global configuration to defaults.
|
|
33
|
+
*/
|
|
34
|
+
declare function resetConfig(): void;
|
|
35
|
+
|
|
36
|
+
/**
|
|
37
|
+
* Count the number of tokens in a text string.
|
|
38
|
+
*
|
|
39
|
+
* Attempts to use tiktoken for accurate token counting.
|
|
40
|
+
* Falls back to a simple heuristic (text.length / 4) if tiktoken is unavailable.
|
|
41
|
+
*
|
|
42
|
+
* @param text - The text to count tokens for
|
|
43
|
+
* @param model - Optional model name (for future tiktoken encoding selection)
|
|
44
|
+
* @returns The estimated number of tokens
|
|
45
|
+
*/
|
|
46
|
+
declare function tokens(text: string, model?: string): number;
|
|
47
|
+
|
|
48
|
+
/**
|
|
49
|
+
* Estimate the min and max cost of an LLM API call.
|
|
50
|
+
*
|
|
51
|
+
* @param params - Estimation parameters
|
|
52
|
+
* @param params.model - The model name
|
|
53
|
+
* @param params.inputTokens - Number of input tokens
|
|
54
|
+
* @param params.maxOutputTokens - Maximum number of output tokens
|
|
55
|
+
* @returns An object with min and max cost estimates in USD
|
|
56
|
+
*/
|
|
57
|
+
declare function estimateCost(params: {
|
|
58
|
+
model: string;
|
|
59
|
+
inputTokens: number;
|
|
60
|
+
maxOutputTokens: number;
|
|
61
|
+
}): {
|
|
62
|
+
min: number;
|
|
63
|
+
max: number;
|
|
64
|
+
};
|
|
65
|
+
|
|
66
|
+
type TextCallback = (delta: string) => void;
|
|
67
|
+
type DoneCallback = (response: AiResponse) => void;
|
|
68
|
+
type ErrorCallback = (error: Error) => void;
|
|
69
|
+
/**
|
|
70
|
+
* AiStream wraps an AsyncGenerator of AiStreamChunk values, providing both
|
|
71
|
+
* the AsyncIterable protocol (for `for await`) and event-based callbacks
|
|
72
|
+
* (via `.on("text", cb)`, `.on("done", cb)`, `.on("error", cb)`).
|
|
73
|
+
*
|
|
74
|
+
* Provider adapters create AiStream instances by passing an async generator
|
|
75
|
+
* function that yields chunks as they arrive from the upstream API.
|
|
76
|
+
*/
|
|
77
|
+
declare class AiStream implements AsyncIterable<AiStreamChunk> {
|
|
78
|
+
private _generatorFn;
|
|
79
|
+
private _aborted;
|
|
80
|
+
private _textCallbacks;
|
|
81
|
+
private _doneCallbacks;
|
|
82
|
+
private _errorCallbacks;
|
|
83
|
+
constructor(generatorFn: () => AsyncGenerator<AiStreamChunk>);
|
|
84
|
+
/**
|
|
85
|
+
* Register a callback for a stream event.
|
|
86
|
+
*
|
|
87
|
+
* - "text": fired for each non-done chunk, receives the delta string
|
|
88
|
+
* - "done": fired when the final chunk arrives, receives the AiResponse
|
|
89
|
+
* - "error": fired if the generator throws, receives the Error
|
|
90
|
+
*/
|
|
91
|
+
on(event: "text", cb: TextCallback): void;
|
|
92
|
+
on(event: "done", cb: DoneCallback): void;
|
|
93
|
+
on(event: "error", cb: ErrorCallback): void;
|
|
94
|
+
/**
|
|
95
|
+
* Signal the stream to stop after the current chunk.
|
|
96
|
+
* The for-await loop will end gracefully without throwing.
|
|
97
|
+
*/
|
|
98
|
+
abort(): void;
|
|
99
|
+
/**
|
|
100
|
+
* Implement the AsyncIterable protocol so this works with `for await`.
|
|
101
|
+
*/
|
|
102
|
+
[Symbol.asyncIterator](): AsyncIterator<AiStreamChunk>;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
declare const ai: AiFunction;
|
|
106
|
+
|
|
107
|
+
export { AiFunction, AiOptions, AiResponse, AiStream, AiStreamChunk, ai, configure, createAi, estimateCost, resetConfig, tokens };
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
import { A as AiOptions, a as AiFunction, T as Tool, M as Middleware, b as AiStreamChunk, c as AiResponse } from './types-BJReASS-.js';
|
|
2
|
+
export { B as BatchOptions, C as ConversationOptions, E as ErrorCode, F as FluentAi, d as Message, R as ResolvedOptions, e as RetryConfig, f as RewriteOptions, S as SummarizeOptions, g as TokenUsage, h as ToolCall } from './types-BJReASS-.js';
|
|
3
|
+
export { A as AidError } from './errors-CUVTnseb.js';
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* Creates an AiFunction - the core callable with all methods attached.
|
|
7
|
+
*/
|
|
8
|
+
declare function createAi(baseOptions?: AiOptions): AiFunction;
|
|
9
|
+
|
|
10
|
+
interface Config {
|
|
11
|
+
model: string;
|
|
12
|
+
timeout: number;
|
|
13
|
+
maxTokens: number;
|
|
14
|
+
temperature: number;
|
|
15
|
+
topP: number;
|
|
16
|
+
stop: string[];
|
|
17
|
+
tools: Tool[];
|
|
18
|
+
middleware: Middleware[];
|
|
19
|
+
aliases: Record<string, string>;
|
|
20
|
+
providers: Record<string, {
|
|
21
|
+
apiKey?: string;
|
|
22
|
+
baseUrl?: string;
|
|
23
|
+
}>;
|
|
24
|
+
}
|
|
25
|
+
type PartialConfig = Partial<Config>;
|
|
26
|
+
/**
|
|
27
|
+
* Configure global defaults for all AI calls.
|
|
28
|
+
* Merges the provided options into the existing global configuration.
|
|
29
|
+
*/
|
|
30
|
+
declare function configure(options: PartialConfig): void;
|
|
31
|
+
/**
|
|
32
|
+
* Reset the global configuration to defaults.
|
|
33
|
+
*/
|
|
34
|
+
declare function resetConfig(): void;
|
|
35
|
+
|
|
36
|
+
/**
|
|
37
|
+
* Count the number of tokens in a text string.
|
|
38
|
+
*
|
|
39
|
+
* Attempts to use tiktoken for accurate token counting.
|
|
40
|
+
* Falls back to a simple heuristic (text.length / 4) if tiktoken is unavailable.
|
|
41
|
+
*
|
|
42
|
+
* @param text - The text to count tokens for
|
|
43
|
+
* @param model - Optional model name (for future tiktoken encoding selection)
|
|
44
|
+
* @returns The estimated number of tokens
|
|
45
|
+
*/
|
|
46
|
+
declare function tokens(text: string, model?: string): number;
|
|
47
|
+
|
|
48
|
+
/**
|
|
49
|
+
* Estimate the min and max cost of an LLM API call.
|
|
50
|
+
*
|
|
51
|
+
* @param params - Estimation parameters
|
|
52
|
+
* @param params.model - The model name
|
|
53
|
+
* @param params.inputTokens - Number of input tokens
|
|
54
|
+
* @param params.maxOutputTokens - Maximum number of output tokens
|
|
55
|
+
* @returns An object with min and max cost estimates in USD
|
|
56
|
+
*/
|
|
57
|
+
declare function estimateCost(params: {
|
|
58
|
+
model: string;
|
|
59
|
+
inputTokens: number;
|
|
60
|
+
maxOutputTokens: number;
|
|
61
|
+
}): {
|
|
62
|
+
min: number;
|
|
63
|
+
max: number;
|
|
64
|
+
};
|
|
65
|
+
|
|
66
|
+
type TextCallback = (delta: string) => void;
|
|
67
|
+
type DoneCallback = (response: AiResponse) => void;
|
|
68
|
+
type ErrorCallback = (error: Error) => void;
|
|
69
|
+
/**
|
|
70
|
+
* AiStream wraps an AsyncGenerator of AiStreamChunk values, providing both
|
|
71
|
+
* the AsyncIterable protocol (for `for await`) and event-based callbacks
|
|
72
|
+
* (via `.on("text", cb)`, `.on("done", cb)`, `.on("error", cb)`).
|
|
73
|
+
*
|
|
74
|
+
* Provider adapters create AiStream instances by passing an async generator
|
|
75
|
+
* function that yields chunks as they arrive from the upstream API.
|
|
76
|
+
*/
|
|
77
|
+
declare class AiStream implements AsyncIterable<AiStreamChunk> {
|
|
78
|
+
private _generatorFn;
|
|
79
|
+
private _aborted;
|
|
80
|
+
private _textCallbacks;
|
|
81
|
+
private _doneCallbacks;
|
|
82
|
+
private _errorCallbacks;
|
|
83
|
+
constructor(generatorFn: () => AsyncGenerator<AiStreamChunk>);
|
|
84
|
+
/**
|
|
85
|
+
* Register a callback for a stream event.
|
|
86
|
+
*
|
|
87
|
+
* - "text": fired for each non-done chunk, receives the delta string
|
|
88
|
+
* - "done": fired when the final chunk arrives, receives the AiResponse
|
|
89
|
+
* - "error": fired if the generator throws, receives the Error
|
|
90
|
+
*/
|
|
91
|
+
on(event: "text", cb: TextCallback): void;
|
|
92
|
+
on(event: "done", cb: DoneCallback): void;
|
|
93
|
+
on(event: "error", cb: ErrorCallback): void;
|
|
94
|
+
/**
|
|
95
|
+
* Signal the stream to stop after the current chunk.
|
|
96
|
+
* The for-await loop will end gracefully without throwing.
|
|
97
|
+
*/
|
|
98
|
+
abort(): void;
|
|
99
|
+
/**
|
|
100
|
+
* Implement the AsyncIterable protocol so this works with `for await`.
|
|
101
|
+
*/
|
|
102
|
+
[Symbol.asyncIterator](): AsyncIterator<AiStreamChunk>;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
declare const ai: AiFunction;
|
|
106
|
+
|
|
107
|
+
export { AiFunction, AiOptions, AiResponse, AiStream, AiStreamChunk, ai, configure, createAi, estimateCost, resetConfig, tokens };
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
import {
|
|
2
|
+
AiStream,
|
|
3
|
+
createAi,
|
|
4
|
+
estimateCost
|
|
5
|
+
} from "./chunk-YNIEOBDF.js";
|
|
6
|
+
import {
|
|
7
|
+
AidError,
|
|
8
|
+
configure,
|
|
9
|
+
resetConfig
|
|
10
|
+
} from "./chunk-2TNYBUNK.js";
|
|
11
|
+
|
|
12
|
+
// src/utils/tokens.ts
|
|
13
|
+
function tokens(text, model) {
|
|
14
|
+
if (text.length === 0) {
|
|
15
|
+
return 0;
|
|
16
|
+
}
|
|
17
|
+
try {
|
|
18
|
+
return Math.ceil(text.length / 4);
|
|
19
|
+
} catch (error) {
|
|
20
|
+
return Math.ceil(text.length / 4);
|
|
21
|
+
}
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
// src/index.ts
|
|
25
|
+
var ai = createAi();
|
|
26
|
+
export {
|
|
27
|
+
AiStream,
|
|
28
|
+
AidError,
|
|
29
|
+
ai,
|
|
30
|
+
configure,
|
|
31
|
+
createAi,
|
|
32
|
+
estimateCost,
|
|
33
|
+
resetConfig,
|
|
34
|
+
tokens
|
|
35
|
+
};
|