agentic-api 2.0.646 → 2.0.885

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. package/dist/src/agents/prompts.d.ts +2 -3
  2. package/dist/src/agents/prompts.js +21 -118
  3. package/dist/src/agents/reducer.loaders.d.ts +103 -1
  4. package/dist/src/agents/reducer.loaders.js +164 -2
  5. package/dist/src/agents/reducer.types.d.ts +34 -3
  6. package/dist/src/agents/simulator.d.ts +32 -2
  7. package/dist/src/agents/simulator.executor.d.ts +15 -5
  8. package/dist/src/agents/simulator.executor.js +134 -67
  9. package/dist/src/agents/simulator.js +251 -8
  10. package/dist/src/agents/simulator.prompts.d.ts +55 -10
  11. package/dist/src/agents/simulator.prompts.js +305 -61
  12. package/dist/src/agents/simulator.types.d.ts +62 -1
  13. package/dist/src/agents/simulator.types.js +5 -0
  14. package/dist/src/agents/subagent.d.ts +128 -0
  15. package/dist/src/agents/subagent.js +231 -0
  16. package/dist/src/agents/worker.executor.d.ts +48 -0
  17. package/dist/src/agents/worker.executor.js +152 -0
  18. package/dist/src/execute/helpers.d.ts +3 -0
  19. package/dist/src/execute/helpers.js +222 -16
  20. package/dist/src/execute/responses.js +81 -55
  21. package/dist/src/execute/shared.d.ts +5 -0
  22. package/dist/src/execute/shared.js +27 -0
  23. package/dist/src/index.d.ts +2 -1
  24. package/dist/src/index.js +3 -1
  25. package/dist/src/llm/openai.js +8 -1
  26. package/dist/src/llm/pricing.js +2 -0
  27. package/dist/src/llm/xai.js +11 -6
  28. package/dist/src/prompts.d.ts +14 -0
  29. package/dist/src/prompts.js +41 -1
  30. package/dist/src/rag/rag.manager.d.ts +18 -3
  31. package/dist/src/rag/rag.manager.js +114 -12
  32. package/dist/src/rag/types.d.ts +3 -1
  33. package/dist/src/rules/git/git.e2e.helper.js +51 -4
  34. package/dist/src/rules/git/git.health.js +89 -56
  35. package/dist/src/rules/git/index.d.ts +2 -2
  36. package/dist/src/rules/git/index.js +22 -5
  37. package/dist/src/rules/git/repo.d.ts +64 -6
  38. package/dist/src/rules/git/repo.js +572 -141
  39. package/dist/src/rules/git/repo.pr.d.ts +11 -18
  40. package/dist/src/rules/git/repo.pr.js +82 -94
  41. package/dist/src/rules/git/repo.tools.d.ts +5 -0
  42. package/dist/src/rules/git/repo.tools.js +6 -1
  43. package/dist/src/rules/types.d.ts +0 -2
  44. package/dist/src/rules/utils.matter.js +1 -5
  45. package/dist/src/scrapper.d.ts +138 -25
  46. package/dist/src/scrapper.js +538 -160
  47. package/dist/src/stategraph/stategraph.d.ts +6 -2
  48. package/dist/src/stategraph/stategraph.js +21 -6
  49. package/dist/src/stategraph/types.d.ts +14 -6
  50. package/dist/src/types.d.ts +22 -0
  51. package/dist/src/utils.d.ts +24 -0
  52. package/dist/src/utils.js +84 -86
  53. package/package.json +3 -2
  54. package/dist/src/agents/semantic.d.ts +0 -4
  55. package/dist/src/agents/semantic.js +0 -19
  56. package/dist/src/execute/legacy.d.ts +0 -46
  57. package/dist/src/execute/legacy.js +0 -460
  58. package/dist/src/pricing.llm.d.ts +0 -5
  59. package/dist/src/pricing.llm.js +0 -14
@@ -1,460 +0,0 @@
1
- "use strict";
2
- /**
3
- * Legacy Implementation - Chat Completions API (beta.chat.completions)
4
- *
5
- * ⚠️ Cette implémentation utilise openai.beta.chat.completions.stream (ancienne API)
6
- * Pour les nouveaux projets, Responses API (responses.ts) est recommandée.
7
- *
8
- * Code optimisé depuis execute.ts original avec les améliorations suivantes:
9
- * - OPTIM: Helpers centralisés (accumulateUsageTokens, stepsToActions)
10
- * - BUG FIX: executionResultMerge fusionne actions correctement (corrigé dans types.ts)
11
- * - BUG FIX: moreThinkin supprimé (obsolète, reasoning_effort fait le job)
12
- * - BUG FIX: Suppression de la boucle do...while(moreThinkin)
13
- * - BUG FIX: Suppression de la ligne reasoning_effort dupliquée (ligne 425 originale)
14
- *
15
- * TODO [Optimisation future]: Remplacer la boucle for séquentielle par batchProcessToolCalls
16
- * pour exploiter pleinement parallel_tool_calls et réduire la latence
17
- */
18
- Object.defineProperty(exports, "__esModule", { value: true });
19
- exports.readCompletionsStream = readCompletionsStream;
20
- exports.executeAgentSet = executeAgentSet;
21
- exports.executeAgent = executeAgent;
22
- exports.executeQuery = executeQuery;
23
- const types_1 = require("../types");
24
- const utils_1 = require("../utils");
25
- const stategraph_1 = require("../stategraph");
26
- const pricing_llm_1 = require("../pricing.llm");
27
- //
28
- // Import des utilitaires partagés et helpers optimisés
29
- const shared_1 = require("./shared");
30
- const modelconfig_1 = require("./modelconfig");
31
- const helpers_1 = require("./helpers");
32
- async function readCompletionsStream(params) {
33
- const openai = (0, utils_1.openaiInstance)();
34
- const { stateGraph, discussion, agentConfig, agents, discussionRootAgent, stdout, final, context, verbose } = params;
35
- const model = (0, modelconfig_1.modelConfig)(agentConfig.model).model;
36
- const accumulatedFunctionCall = final.choices[0]?.message.tool_calls || [];
37
- const content = final.choices[0]?.message.content;
38
- let localResult = (0, types_1.enrichExecutionResult)({
39
- runId: `${discussionRootAgent}-${Date.now()}`,
40
- startQuery: '',
41
- actions: [],
42
- lastMessage: '',
43
- usage: { prompt: 0, completion: 0, total: 0, cost: 0 },
44
- });
45
- //
46
- // OPTIM: Utiliser helper centralisé pour l'accumulation
47
- (0, helpers_1.accumulateUsageTokens)(stateGraph, discussion, discussionRootAgent, model, final.usage);
48
- if (content) {
49
- if (verbose)
50
- console.log("✅ Agent (1): save content:", content?.length);
51
- stateGraph.push(discussionRootAgent, { role: "assistant", content });
52
- }
53
- //
54
- // Si le modèle décide d'appeler une fonction (par exemple "transferAgents")
55
- for (const functionCall of accumulatedFunctionCall) {
56
- const args = JSON.parse(functionCall?.function?.arguments || '{}');
57
- if (args.justification) {
58
- (0, shared_1.sendFeedback)({
59
- agent: agentConfig.name,
60
- stdout,
61
- description: args.justification,
62
- usage: discussion.usage,
63
- state: '',
64
- verbose
65
- });
66
- }
67
- // Créer une référence mutable pour handleTransferCall
68
- const currentAgentRef = { name: agentConfig.name };
69
- const functionCallResult = await (0, utils_1.handleTransferCall)(discussion, currentAgentRef, agents, functionCall, context);
70
- if (functionCallResult.usage) {
71
- stateGraph.updateTokens(discussionRootAgent, {
72
- prompt: functionCallResult.usage.prompt || 0,
73
- completion: functionCallResult.usage.completion || 0,
74
- total: functionCallResult.usage.total || 0,
75
- cost: functionCallResult.usage.cost || 0
76
- });
77
- }
78
- if (functionCallResult.did_transfer) {
79
- if (verbose)
80
- console.log("✅ Agent transfer response:", functionCallResult.source_agent, '::to', functionCallResult.destination_agent, '::with', functionCallResult.content, ' transfer done ✅');
81
- // Mise à jour du message système avec les nouvelles instructions du nouvel agent courant
82
- const transferredAgent = agents.find(a => a.name === currentAgentRef.name) || agentConfig;
83
- const instructions = transferredAgent.instructions;
84
- const enrichedInstructions = (await params.enrichWithMemory?.("system", transferredAgent, context)) || '';
85
- // ✅ Set préserve le trail existant via updateSystemMessage()
86
- stateGraph.set(discussionRootAgent, instructions + '\n' + enrichedInstructions);
87
- stateGraph.push(discussionRootAgent, {
88
- role: "assistant",
89
- content: functionCallResult.content,
90
- name: functionCallResult.name
91
- });
92
- }
93
- else {
94
- // other function call have a result
95
- stateGraph.push(discussionRootAgent, {
96
- role: "assistant",
97
- content: functionCallResult.content,
98
- name: functionCallResult.name
99
- });
100
- }
101
- //
102
- // send user feedback (if not already sent via addStep for transfer)
103
- if (functionCallResult.feedback && !functionCallResult.did_transfer) {
104
- (0, shared_1.sendFeedback)({
105
- agent: agentConfig.name,
106
- stdout,
107
- description: functionCallResult.feedback,
108
- usage: discussion.usage,
109
- state: '',
110
- verbose
111
- });
112
- }
113
- // Réactualisation de la liste des outils pour le nouvel agent courant
114
- const currentAgent = agents.find(a => a.name === currentAgentRef.name) || agentConfig;
115
- const tools = currentAgent?.tools || [];
116
- const followUpOptions = Object.assign({}, (0, modelconfig_1.modelConfig)(currentAgent.model));
117
- followUpOptions.messages = discussion.messages;
118
- if (tools.length > 0) {
119
- followUpOptions.tools = tools;
120
- followUpOptions.tool_choice = "auto";
121
- }
122
- //
123
- // Legacy: utiliser beta.chat.completions.stream (ancienne API)
124
- // NOTE: Cette API est en beta, Responses API est recommandée pour les nouveaux projets
125
- const followUpStream = await openai.beta.chat.completions.stream(followUpOptions);
126
- for await (const chunk of followUpStream) {
127
- const delta = chunk.choices[0]?.delta;
128
- if (delta?.content) {
129
- stdout.write(delta?.content);
130
- }
131
- }
132
- const followUpFinal = await followUpStream.finalChatCompletion();
133
- //
134
- // ✅ addStep APRÈS la réponse de l'agent (pour avoir le trail complet au prochain tour)
135
- if (functionCallResult.name) {
136
- stateGraph.addStep(discussionRootAgent, {
137
- tool: functionCallResult.name,
138
- context: functionCallResult.context || '',
139
- reason: args.justification || '',
140
- id: functionCallResult.id
141
- });
142
- }
143
- //
144
- // when called a function, agent must continue the conversation
145
- if (followUpFinal.choices[0]?.message.tool_calls) {
146
- const partial = await readCompletionsStream({
147
- stateGraph,
148
- discussion,
149
- agentConfig: currentAgent,
150
- agents,
151
- discussionRootAgent,
152
- stdout,
153
- final: followUpFinal,
154
- context,
155
- verbose,
156
- enrichWithMemory: params.enrichWithMemory
157
- });
158
- localResult = (0, types_1.executionResultMerge)(localResult, partial);
159
- // ✅ OPTIM: Utiliser helper centralisé
160
- localResult.actions = (0, helpers_1.stepsToActions)(stateGraph, discussionRootAgent);
161
- return localResult;
162
- }
163
- //
164
- // OPTIM: Utiliser helper centralisé pour l'accumulation
165
- (0, helpers_1.accumulateUsageTokens)(stateGraph, discussion, discussionRootAgent, model, followUpFinal.usage);
166
- //
167
- // send the cost
168
- (0, shared_1.sendFeedback)({
169
- agent: currentAgent.name,
170
- stdout,
171
- description: '',
172
- usage: discussion.usage,
173
- state: followUpFinal.id || '',
174
- verbose
175
- });
176
- const followUpContent = followUpFinal.choices[0]?.message.content;
177
- //
178
- // capture new memory with the last message
179
- await params.enrichWithMemory?.("assistant", currentAgent, context);
180
- if (followUpContent) {
181
- stateGraph.push(discussionRootAgent, { role: "assistant", content: followUpContent });
182
- }
183
- }
184
- //
185
- // ✅ OPTIM: Utiliser helper centralisé
186
- localResult.actions = (0, helpers_1.stepsToActions)(stateGraph, discussionRootAgent);
187
- return localResult;
188
- }
189
- /**
190
- * Executes a set of agents to process a user query
191
- *
192
- * OPTIMIZED: Sans boucle do...while(moreThinkin), reasoning_effort fait le job
193
- */
194
- async function executeAgentSet(agentSet, context, params) {
195
- const { query, verbose } = params;
196
- const openai = (0, utils_1.openaiInstance)();
197
- const agents = (0, utils_1.injectTransferTools)(agentSet);
198
- // 🔑 CLÉ DE DISCUSSION: Agent racine qui sert de point d'entrée
199
- const discussionRootAgent = params.home || agents[0].name;
200
- // 🎯 Récupération du StateGraph depuis le context
201
- const stateGraph = (0, stategraph_1.sessionStateGraphGet)(context);
202
- // 📍 Créer ou restaurer la discussion pour cet agent racine
203
- const discussion = stateGraph.createOrRestore(discussionRootAgent);
204
- // 🔄 Agent courant: peut différer de l'agent racine après des transferts
205
- let currentAgent = (0, stategraph_1.getSpecializedAgent)(discussion) || discussionRootAgent;
206
- // Trouver la config de l'agent courant
207
- const currentAgentConfig = agents.find(a => a.name === currentAgent);
208
- discussion.description = currentAgentConfig?.publicDescription;
209
- if (!currentAgentConfig) {
210
- throw new Error(`Agent ${currentAgent} not found`);
211
- }
212
- if (!currentAgentConfig.instructions) {
213
- throw new Error(`Agent ${currentAgent} has no instructions`);
214
- }
215
- // 🔧 Setup system message si pas encore fait
216
- let enrichedQuery = query;
217
- if (!discussion.messages.length) {
218
- discussion.usage = { prompt: 0, completion: 0, total: 0, cost: 0 };
219
- const enrichedInstructions = await params.enrichWithMemory?.("system", currentAgentConfig, context);
220
- const instructions = currentAgentConfig.instructions + '\n' + enrichedInstructions;
221
- stateGraph.set(discussionRootAgent, instructions);
222
- }
223
- else {
224
- enrichedQuery = (await params.enrichWithMemory?.("user", currentAgentConfig, context)) || query;
225
- }
226
- stateGraph.push(discussionRootAgent, { role: "user", content: enrichedQuery });
227
- const tools = currentAgentConfig.tools;
228
- if (verbose) {
229
- console.log('--- DBG current agent', currentAgentConfig.name, `deep-thinking:${params.thinking}`, 'memory len:', discussion.messages.length);
230
- }
231
- let result = (0, types_1.enrichExecutionResult)({
232
- runId: `${discussionRootAgent}-${Date.now()}`,
233
- startQuery: query,
234
- actions: [],
235
- lastMessage: '',
236
- usage: { prompt: 0, completion: 0, total: 0, cost: 0 },
237
- });
238
- //
239
- // ✅ BUG FIX: Plus de boucle do...while(moreThinkin) - reasoning_effort fait le job
240
- const intialinfo = ["Analyse", "Analyse en cours…", "Réflexion"];
241
- const randomIndex = Math.floor(Math.random() * intialinfo.length);
242
- (0, shared_1.sendFeedback)({
243
- agent: currentAgentConfig.name,
244
- stdout: params.stdout,
245
- description: intialinfo[randomIndex],
246
- usage: result.usage,
247
- state: discussion.id
248
- });
249
- //
250
- // ✅ BUG FIX: modelConfig gère reasoning_effort, pas de duplication (ligne 425 originale supprimée)
251
- const model = (0, modelconfig_1.modelConfig)(currentAgentConfig.model, { thinking: params.thinking });
252
- const options = Object.assign({}, model);
253
- options.messages = discussion.messages;
254
- if (tools.length > 0) {
255
- options.tools = tools;
256
- options.tool_choice = "auto";
257
- options.parallel_tool_calls = true;
258
- }
259
- const stream = await openai.beta.chat.completions.stream(options);
260
- for await (const chunk of stream) {
261
- const delta = chunk.choices[0]?.delta;
262
- if (delta?.content) {
263
- params.stdout.write(delta?.content);
264
- }
265
- }
266
- const final = await stream.finalChatCompletion();
267
- const partial = await readCompletionsStream({
268
- stateGraph,
269
- discussion,
270
- agentConfig: currentAgentConfig,
271
- agents,
272
- discussionRootAgent,
273
- stdout: params.stdout,
274
- context,
275
- final,
276
- verbose,
277
- enrichWithMemory: params.enrichWithMemory,
278
- });
279
- result = (0, types_1.executionResultMerge)(result, partial);
280
- //
281
- // ✅ OPTIM: Utiliser helper centralisé
282
- result.actions = (0, helpers_1.stepsToActions)(stateGraph, discussionRootAgent);
283
- // 💾 Auto-save du StateGraph à la fin
284
- (0, stategraph_1.sessionStateGraphSet)(context, stateGraph);
285
- // finalize result
286
- result.lastMessage = discussion.messages?.[discussion.messages.length - 1]?.content || '';
287
- if (discussion?.usage) {
288
- result.usage = {
289
- prompt: discussion.usage.prompt || 0,
290
- completion: discussion.usage.completion || 0,
291
- total: discussion.usage.total || 0,
292
- cost: discussion.usage.cost || 0,
293
- };
294
- }
295
- result = (0, types_1.enrichExecutionResult)(result);
296
- return result;
297
- }
298
- async function executeAgent(agentSet, params) {
299
- const { query, verbose, debug } = params;
300
- const openai = (0, utils_1.openaiInstance)();
301
- const agent = agentSet.find(a => a.name === params.home);
302
- if (!agent) {
303
- throw new Error(`Agent ${params.home} not found`);
304
- }
305
- if (!agent.instructions) {
306
- throw new Error(`Agent ${agent.name} has no instructions`);
307
- }
308
- const messages = [
309
- { role: "system", content: agent.instructions },
310
- { role: "user", content: query }
311
- ];
312
- let usage = { prompt: 0, completion: 0, total: 0, cost: 0 };
313
- let state = '';
314
- let maxIterations = 10;
315
- let iterations = 0;
316
- if (verbose) {
317
- console.log('--- DBG executeAgent:', agent.name);
318
- console.log('--- DBG query:', `${query?.substring(0, 100)}...`);
319
- }
320
- if (debug) {
321
- console.log('--- DBG executeAgent-system:', agent.instructions);
322
- console.log('--- DBG executeAgent-user:', query);
323
- }
324
- while (iterations < maxIterations) {
325
- iterations++;
326
- const options = Object.assign({}, (0, modelconfig_1.modelConfig)(agent.model));
327
- options.messages = messages;
328
- const tools = agent.tools || [];
329
- if (tools.length > 0) {
330
- options.tools = tools;
331
- options.tool_choice = "auto";
332
- }
333
- if (verbose) {
334
- console.log('--- DBG executeAgent:', agent.name, 'iterations:', iterations, '\n', messages.length, '\n---', messages[messages.length - 1]?.content);
335
- }
336
- const stream = await openai.beta.chat.completions.stream(options);
337
- for await (const chunk of stream) {
338
- const delta = chunk.choices[0]?.delta;
339
- if (delta?.content) {
340
- params.stdout.write(delta?.content);
341
- }
342
- }
343
- const final = await stream.finalChatCompletion();
344
- const model = (0, modelconfig_1.modelConfig)(agent.model).model;
345
- (0, pricing_llm_1.accumulateCost)(usage, model, final.usage);
346
- state = final.id;
347
- const content = final.choices[0]?.message.content;
348
- if (content) {
349
- messages.push({ role: "assistant", content });
350
- }
351
- const toolCalls = final.choices[0]?.message.tool_calls;
352
- let hasToolCalls = false;
353
- if (toolCalls && toolCalls.length > 0) {
354
- hasToolCalls = true;
355
- const lastAssistant = messages[messages.length - 1];
356
- if (lastAssistant && lastAssistant.role === 'assistant') {
357
- lastAssistant.tool_calls = toolCalls;
358
- }
359
- else {
360
- messages.push({
361
- role: "assistant",
362
- content: content || null,
363
- tool_calls: toolCalls
364
- });
365
- }
366
- for (const toolCall of toolCalls) {
367
- const args = JSON.parse(toolCall.function.arguments || '{}');
368
- if (agent.toolLogic && agent.toolLogic[toolCall.function.name]) {
369
- try {
370
- const result = await agent.toolLogic[toolCall.function.name](args, { state });
371
- messages.push({
372
- role: "tool",
373
- tool_call_id: toolCall.id,
374
- content: typeof result === 'string' ? result : JSON.stringify(result)
375
- });
376
- }
377
- catch (error) {
378
- messages.push({
379
- role: "tool",
380
- tool_call_id: toolCall.id,
381
- content: `Error: ${error instanceof Error ? error.message : 'Unknown error'}`
382
- });
383
- }
384
- }
385
- }
386
- }
387
- if (!hasToolCalls) {
388
- break;
389
- }
390
- }
391
- return {
392
- usage,
393
- content: messages[messages.length - 1]?.content || '',
394
- messages,
395
- state
396
- };
397
- }
398
- /**
399
- * Executes a simple query without agent orchestration or tool handling
400
- */
401
- async function executeQuery(params) {
402
- const { query, verbose, model: modelName, instructions } = params;
403
- if (!modelName) {
404
- throw new Error('executeQuery requires "model" parameter');
405
- }
406
- const openai = (0, utils_1.openaiInstance)();
407
- const model = (0, modelconfig_1.modelConfig)(modelName);
408
- const more = {};
409
- if (params.json) {
410
- more.response_format = { type: "json_object" };
411
- }
412
- if (params.schema) {
413
- more.response_format = { type: "json_object", schema: params.schema };
414
- }
415
- if (verbose) {
416
- console.log('--- DBG query:', modelName, `${query?.substring(0, 100)}...`);
417
- }
418
- const messages = params.messages || [];
419
- // Ajouter le système prompt si fourni
420
- if (instructions) {
421
- messages.unshift({ role: "system", content: instructions });
422
- }
423
- messages.push({ role: "user", content: query });
424
- let usage = { prompt: 0, completion: 0, total: 0, cost: 0 };
425
- let state = '';
426
- try {
427
- const options = Object.assign({}, model, more);
428
- options.messages = messages;
429
- if (verbose) {
430
- console.log('--- DBG executeQuery options:', JSON.stringify(options, null, 2));
431
- }
432
- const stream = await openai.beta.chat.completions.stream(options);
433
- for await (const chunk of stream) {
434
- const delta = chunk.choices[0]?.delta;
435
- if (delta?.content && params.stdout) {
436
- params.stdout.write(delta?.content);
437
- }
438
- }
439
- const final = await stream.finalChatCompletion();
440
- (0, pricing_llm_1.accumulateCost)(usage, model.model, final.usage);
441
- state = final.id || '';
442
- const content = final.choices[0]?.message.content || '';
443
- if (verbose) {
444
- console.log('--- DBG executeQuery completed, usage:', usage);
445
- }
446
- return {
447
- usage,
448
- content,
449
- messages: [
450
- ...messages,
451
- { role: "assistant", content }
452
- ],
453
- state
454
- };
455
- }
456
- catch (error) {
457
- console.error('❌ executeQuery failed:', error);
458
- throw error;
459
- }
460
- }
@@ -1,5 +0,0 @@
1
- /**
2
- * @deprecated Ce fichier est conservé pour rétrocompatibilité.
3
- * Utiliser les imports depuis './llm/pricing' directement.
4
- */
5
- export { modelPricing, calculateCost, accumulateCost, LLM, LLMxai, LLMopenai, } from './llm/pricing';
@@ -1,14 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.LLMopenai = exports.LLMxai = exports.LLM = exports.accumulateCost = exports.calculateCost = exports.modelPricing = void 0;
4
- /**
5
- * @deprecated Ce fichier est conservé pour rétrocompatibilité.
6
- * Utiliser les imports depuis './llm/pricing' directement.
7
- */
8
- var pricing_1 = require("./llm/pricing");
9
- Object.defineProperty(exports, "modelPricing", { enumerable: true, get: function () { return pricing_1.modelPricing; } });
10
- Object.defineProperty(exports, "calculateCost", { enumerable: true, get: function () { return pricing_1.calculateCost; } });
11
- Object.defineProperty(exports, "accumulateCost", { enumerable: true, get: function () { return pricing_1.accumulateCost; } });
12
- Object.defineProperty(exports, "LLM", { enumerable: true, get: function () { return pricing_1.LLM; } });
13
- Object.defineProperty(exports, "LLMxai", { enumerable: true, get: function () { return pricing_1.LLMxai; } });
14
- Object.defineProperty(exports, "LLMopenai", { enumerable: true, get: function () { return pricing_1.LLMopenai; } });