agentic-api 2.0.31 → 2.0.491

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (102) hide show
  1. package/dist/src/agents/agents.example.js +21 -22
  2. package/dist/src/agents/authentication.js +1 -2
  3. package/dist/src/agents/prompts.d.ts +5 -4
  4. package/dist/src/agents/prompts.js +44 -87
  5. package/dist/src/agents/reducer.core.d.ts +24 -2
  6. package/dist/src/agents/reducer.core.js +125 -35
  7. package/dist/src/agents/reducer.loaders.d.ts +55 -1
  8. package/dist/src/agents/reducer.loaders.js +114 -1
  9. package/dist/src/agents/reducer.types.d.ts +45 -2
  10. package/dist/src/agents/semantic.js +1 -2
  11. package/dist/src/agents/simulator.d.ts +11 -3
  12. package/dist/src/agents/simulator.executor.d.ts +14 -4
  13. package/dist/src/agents/simulator.executor.js +81 -23
  14. package/dist/src/agents/simulator.js +128 -42
  15. package/dist/src/agents/simulator.prompts.d.ts +9 -7
  16. package/dist/src/agents/simulator.prompts.js +66 -86
  17. package/dist/src/agents/simulator.types.d.ts +23 -5
  18. package/dist/src/agents/simulator.utils.d.ts +7 -2
  19. package/dist/src/agents/simulator.utils.js +31 -11
  20. package/dist/src/agents/system.js +1 -2
  21. package/dist/src/execute/helpers.d.ts +75 -0
  22. package/dist/src/execute/helpers.js +139 -0
  23. package/dist/src/execute/index.d.ts +11 -0
  24. package/dist/src/execute/index.js +44 -0
  25. package/dist/src/execute/legacy.d.ts +46 -0
  26. package/dist/src/execute/legacy.js +460 -0
  27. package/dist/src/execute/modelconfig.d.ts +19 -0
  28. package/dist/src/execute/modelconfig.js +56 -0
  29. package/dist/src/execute/responses.d.ts +55 -0
  30. package/dist/src/execute/responses.js +594 -0
  31. package/dist/src/execute/shared.d.ts +83 -0
  32. package/dist/src/execute/shared.js +188 -0
  33. package/dist/src/index.d.ts +1 -1
  34. package/dist/src/index.js +2 -2
  35. package/dist/src/{princing.openai.d.ts → pricing.llm.d.ts} +6 -0
  36. package/dist/src/pricing.llm.js +255 -0
  37. package/dist/src/prompts.d.ts +13 -4
  38. package/dist/src/prompts.js +221 -114
  39. package/dist/src/rag/embeddings.d.ts +36 -18
  40. package/dist/src/rag/embeddings.js +131 -128
  41. package/dist/src/rag/index.d.ts +5 -5
  42. package/dist/src/rag/index.js +14 -17
  43. package/dist/src/rag/parser.d.ts +2 -1
  44. package/dist/src/rag/parser.js +11 -14
  45. package/dist/src/rag/rag.examples.d.ts +27 -0
  46. package/dist/src/rag/rag.examples.js +151 -0
  47. package/dist/src/rag/rag.manager.d.ts +383 -0
  48. package/dist/src/rag/rag.manager.js +1390 -0
  49. package/dist/src/rag/types.d.ts +128 -12
  50. package/dist/src/rag/types.js +100 -1
  51. package/dist/src/rag/usecase.d.ts +37 -0
  52. package/dist/src/rag/usecase.js +96 -7
  53. package/dist/src/rules/git/git.e2e.helper.js +22 -2
  54. package/dist/src/rules/git/git.health.d.ts +61 -2
  55. package/dist/src/rules/git/git.health.js +333 -11
  56. package/dist/src/rules/git/index.d.ts +2 -2
  57. package/dist/src/rules/git/index.js +13 -1
  58. package/dist/src/rules/git/repo.d.ts +160 -0
  59. package/dist/src/rules/git/repo.js +777 -0
  60. package/dist/src/rules/git/repo.pr.js +117 -13
  61. package/dist/src/rules/git/repo.tools.d.ts +22 -1
  62. package/dist/src/rules/git/repo.tools.js +50 -1
  63. package/dist/src/rules/types.d.ts +27 -14
  64. package/dist/src/rules/utils.matter.d.ts +0 -4
  65. package/dist/src/rules/utils.matter.js +35 -7
  66. package/dist/src/scrapper.d.ts +15 -22
  67. package/dist/src/scrapper.js +58 -110
  68. package/dist/src/stategraph/index.d.ts +1 -1
  69. package/dist/src/stategraph/stategraph.d.ts +56 -2
  70. package/dist/src/stategraph/stategraph.js +134 -6
  71. package/dist/src/stategraph/stategraph.storage.js +8 -0
  72. package/dist/src/stategraph/types.d.ts +27 -0
  73. package/dist/src/types.d.ts +46 -9
  74. package/dist/src/types.js +8 -7
  75. package/dist/src/usecase.d.ts +11 -2
  76. package/dist/src/usecase.js +27 -35
  77. package/dist/src/utils.d.ts +32 -18
  78. package/dist/src/utils.js +87 -129
  79. package/package.json +10 -3
  80. package/dist/src/agents/digestor.test.d.ts +0 -1
  81. package/dist/src/agents/digestor.test.js +0 -45
  82. package/dist/src/agents/reducer.example.d.ts +0 -28
  83. package/dist/src/agents/reducer.example.js +0 -118
  84. package/dist/src/agents/reducer.process.d.ts +0 -16
  85. package/dist/src/agents/reducer.process.js +0 -143
  86. package/dist/src/agents/reducer.tools.d.ts +0 -29
  87. package/dist/src/agents/reducer.tools.js +0 -157
  88. package/dist/src/agents/simpleExample.d.ts +0 -3
  89. package/dist/src/agents/simpleExample.js +0 -38
  90. package/dist/src/agents/system-review.d.ts +0 -5
  91. package/dist/src/agents/system-review.js +0 -181
  92. package/dist/src/agents/systemReview.d.ts +0 -4
  93. package/dist/src/agents/systemReview.js +0 -22
  94. package/dist/src/execute.d.ts +0 -49
  95. package/dist/src/execute.js +0 -564
  96. package/dist/src/princing.openai.js +0 -54
  97. package/dist/src/rag/tools.d.ts +0 -76
  98. package/dist/src/rag/tools.js +0 -196
  99. package/dist/src/rules/user.mapper.d.ts +0 -61
  100. package/dist/src/rules/user.mapper.js +0 -160
  101. package/dist/src/rules/utils/slug.d.ts +0 -22
  102. package/dist/src/rules/utils/slug.js +0 -35
@@ -1,564 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.modelConfig = modelConfig;
4
- exports.sendFeedback = sendFeedback;
5
- exports.readCompletionsStream = readCompletionsStream;
6
- exports.executeAgentSet = executeAgentSet;
7
- exports.executeAgent = executeAgent;
8
- const princing_openai_1 = require("./princing.openai");
9
- const types_1 = require("./types");
10
- const utils_1 = require("./utils");
11
- const stategraph_1 = require("./stategraph");
12
- function modelConfig(model, custom) {
13
- const defaultOptions = Object.assign({
14
- stream_options: { "include_usage": true },
15
- }, custom || {});
16
- const mapping = {
17
- "LOW-fast": {
18
- temperature: 1,
19
- frequency_penalty: 0.0,
20
- presence_penalty: 0.0,
21
- model: "gpt-5-nano",
22
- reasoning_effort: "minimal",
23
- verbosity: "low",
24
- stream: true
25
- },
26
- "LOW": {
27
- temperature: 1,
28
- frequency_penalty: 0.0,
29
- presence_penalty: 0.0,
30
- model: "gpt-5-nano",
31
- reasoning_effort: "medium",
32
- verbosity: "low",
33
- stream: true
34
- },
35
- "MEDIUM-fast": {
36
- temperature: 1,
37
- frequency_penalty: 0.0,
38
- presence_penalty: 0.0,
39
- model: "gpt-5-mini",
40
- reasoning_effort: "minimal",
41
- verbosity: "low",
42
- stream: true
43
- },
44
- "LOW-4.1": {
45
- temperature: .2,
46
- frequency_penalty: 0.0,
47
- presence_penalty: 0.0,
48
- model: "gpt-4.1-nano",
49
- stream: true
50
- },
51
- "MEDIUM-4.1-mini": {
52
- temperature: .2,
53
- frequency_penalty: 0.0,
54
- presence_penalty: 0.0,
55
- model: "gpt-4.1-mini",
56
- stream: true
57
- },
58
- "MEDIUM-4.1": {
59
- temperature: .2,
60
- frequency_penalty: 0.0,
61
- presence_penalty: 0.0,
62
- model: "gpt-4.1",
63
- stream: true
64
- },
65
- "MEDIUM": {
66
- temperature: 1,
67
- frequency_penalty: 0.0,
68
- presence_penalty: 0.0,
69
- model: "gpt-5-mini",
70
- reasoning_effort: "low",
71
- verbosity: "low",
72
- stream: true
73
- },
74
- "HIGH-fast": {
75
- model: "gpt-5",
76
- reasoning_effort: "minimal",
77
- verbosity: "low",
78
- temperature: 1,
79
- stream: true
80
- },
81
- "HIGH-low": {
82
- model: "gpt-5",
83
- reasoning_effort: "low",
84
- verbosity: "low",
85
- stream: true
86
- },
87
- "HIGH-medium": {
88
- model: "gpt-5",
89
- reasoning_effort: "medium",
90
- verbosity: "low",
91
- stream: true
92
- },
93
- "SEARCH": {
94
- temperature: 0.2,
95
- frequency_penalty: 0.0,
96
- presence_penalty: 0.0,
97
- model: "gpt-4o-mini-search-preview",
98
- web_search_options: {
99
- user_location: {
100
- type: "approximate",
101
- approximate: {
102
- country: "CH",
103
- city: "Geneva",
104
- region: "Geneva",
105
- },
106
- },
107
- },
108
- },
109
- };
110
- const options = Object.assign(mapping[model], defaultOptions);
111
- return options;
112
- }
113
- function sendFeedback(params) {
114
- const { agent, stdout, description, usage, state, verbose } = params;
115
- const feedback = {
116
- agent,
117
- description,
118
- usage,
119
- state
120
- };
121
- // if(verbose) {
122
- // console.log('--- DBG sendFeedback:',agent, description || '--', state);
123
- // }
124
- //
125
- // send agent state and description
126
- stdout.write(`\n<step>${JSON.stringify(feedback)}</step>\n`);
127
- }
128
- async function readCompletionsStream(params) {
129
- const openai = (0, utils_1.openaiInstance)();
130
- //
131
- // set default context here
132
- const { stateGraph, discussion, agentConfig, agents, agentName, stdout, final, session, verbose } = params;
133
- const model = agentConfig.model.model;
134
- const accumulatedFunctionCall = final.choices[0]?.message.tool_calls || [];
135
- const content = final.choices[0]?.message.content;
136
- let thinking = false;
137
- let localResult = (0, types_1.enrichExecutionResult)({
138
- runId: `${agentName}-${Date.now()}`,
139
- startQuery: '',
140
- actions: [],
141
- lastMessage: '',
142
- usage: { prompt: 0, completion: 0, total: 0, cost: 0 },
143
- moreThinkin: false,
144
- });
145
- // Accumulate cost in the discussion usage
146
- (0, princing_openai_1.accumulateCost)(discussion.usage, model, final.usage);
147
- stateGraph.updateTokens(agentName, {
148
- prompt: final.usage?.prompt_tokens || 0,
149
- completion: final.usage?.completion_tokens || 0,
150
- total: final.usage?.total_tokens || 0,
151
- cost: 0 // Cost already accumulated directly in discussion.usage
152
- });
153
- // Store state (implementation can be added later if needed)
154
- // discussion.state = final.id;
155
- if (content) {
156
- if (verbose)
157
- console.log("✅ Agent (1): 🌶️🌶️🌶️ save content:", content?.length);
158
- stateGraph.push(agentName, { role: "assistant", content });
159
- }
160
- // Si le modèle décide d'appeler une fonction (par exemple "transferAgents")
161
- for (const functionCall of accumulatedFunctionCall) {
162
- const args = JSON.parse(functionCall?.function?.arguments || '{}');
163
- if (args.justification) {
164
- sendFeedback({
165
- agent: agentConfig.name,
166
- stdout,
167
- description: args.justification,
168
- usage: discussion.usage,
169
- state: '', // State will be set later if needed,
170
- verbose
171
- });
172
- }
173
- // Créer une référence mutable pour handleTransferCall
174
- const currentAgentRef = { name: agentConfig.name };
175
- const functionCallResult = await (0, utils_1.handleTransferCall)(discussion, currentAgentRef, agents, functionCall, session);
176
- // result can be
177
- // {content, usage} {did_transfer}
178
- thinking = functionCallResult.thinking;
179
- if (functionCallResult.usage) {
180
- stateGraph.updateTokens(agentName, {
181
- prompt: functionCallResult.usage.prompt || 0,
182
- completion: functionCallResult.usage.completion || 0,
183
- total: functionCallResult.usage.total || 0,
184
- cost: functionCallResult.usage.cost || 0
185
- });
186
- }
187
- if (functionCallResult.did_transfer) {
188
- // record transfer action
189
- localResult.actions.push({
190
- action: 'transfert',
191
- content: functionCallResult.destination_agent || currentAgentRef.name,
192
- feedback: functionCallResult.feedback,
193
- });
194
- if (verbose)
195
- console.log("✅ Agent transfer response:", functionCallResult.source_agent, ' call function ', functionCall.function.name, '::to', functionCallResult.destination_agent, ' transfer done ✅');
196
- // Mise à jour du message système avec les nouvelles instructions du nouvel agent courant
197
- const transferredAgent = agents.find(a => a.name === currentAgentRef.name) || agentConfig;
198
- const instructions = transferredAgent.instructions;
199
- const enrichedInstructions = await params.enrichWithMemory?.("system", transferredAgent, session);
200
- stateGraph.set(agentName, instructions + '\n' + enrichedInstructions);
201
- // Ajout de la réponse de l'appel de fonction dans le contexte conversationnel
202
- const message = functionCallResult.content ?? `Le transfert vers l'agent "${transferredAgent.name}" a été effectué, Tu dois répondre immédiatement à la question.`;
203
- stateGraph.push(agentName, {
204
- role: "assistant",
205
- content: message,
206
- name: functionCallResult.name
207
- });
208
- // Immediately surface the tool confirmation to the user output
209
- // feedback or message?
210
- if (message) {
211
- stdout.write(message + "\n");
212
- }
213
- }
214
- //
215
- // other function call have a result
216
- else if (functionCallResult.content) {
217
- // record tool action
218
- localResult.actions.push({
219
- action: functionCall?.function?.name,
220
- content: functionCallResult.content || '',
221
- feedback: functionCallResult.feedback,
222
- });
223
- // console.log("✅ Agent tool response:",agentConfig.name,'::',functionCall.function.name, ' with content',functionCallResult.content);
224
- stateGraph.push(agentName, {
225
- role: "assistant",
226
- content: functionCallResult.content,
227
- name: functionCallResult.name
228
- });
229
- }
230
- //
231
- // send user feedback
232
- if (functionCallResult.feedback) {
233
- sendFeedback({
234
- agent: agentConfig.name,
235
- stdout,
236
- description: functionCallResult.feedback,
237
- usage: discussion.usage,
238
- state: '', // State tracking can be added later if needed
239
- verbose
240
- });
241
- }
242
- // Réactualisation de la liste des outils pour le nouvel agent courant
243
- const currentAgent = agents.find(a => a.name === currentAgentRef.name) || agentConfig;
244
- const tools = currentAgent?.tools || [];
245
- const followUpOptions = Object.assign({}, currentAgent?.model);
246
- followUpOptions.messages = discussion.messages;
247
- //
248
- // always force HIGH model for follow up
249
- followUpOptions.model = currentAgent.model.model;
250
- if (tools.length > 0) {
251
- followUpOptions.tools = tools;
252
- followUpOptions.tool_choice = "auto";
253
- }
254
- // if(!functionCallResult.did_transfer) {
255
- // followUpOptions.tool_choice = "none";
256
- // }
257
- // Poursuite de la conversation avec le contexte mis à jour
258
- const followUpStream = await openai.beta.chat.completions.stream(followUpOptions);
259
- for await (const chunk of followUpStream) {
260
- //process.stdout.write(chunk.choices[0]?.delta?.content || "");
261
- const delta = chunk.choices[0]?.delta;
262
- if (delta?.content) {
263
- stdout.write(delta?.content);
264
- }
265
- }
266
- const final = await followUpStream.finalChatCompletion();
267
- // console.log("✅ DEBUG followUpOptions (OUT content):",final.choices[0]?.message.content);
268
- // console.log("✅ DEBUG followUpOptions (OUT tool_calls):",final.choices[0]?.message.tool_calls);
269
- //
270
- // when called a function, agent must continue the conversation
271
- // if(verbose) console.log("✅ Agent ( followUp - OUT):",currentAgent.name, 'with tool_calls ',!!(final.choices[0]?.message.tool_calls),' and content:' ,!!(final.choices[0]?.message.content));
272
- if (final.choices[0]?.message.tool_calls) {
273
- const partial = await readCompletionsStream({
274
- stateGraph,
275
- discussion,
276
- agentConfig: currentAgent,
277
- agents,
278
- agentName,
279
- stdout,
280
- final,
281
- session,
282
- verbose,
283
- enrichWithMemory: params.enrichWithMemory
284
- });
285
- localResult = (0, types_1.executionResultMerge)(localResult, partial);
286
- return localResult;
287
- }
288
- // Accumulate final cost
289
- stateGraph.updateTokens(agentName, {
290
- prompt: final.usage?.prompt_tokens || 0,
291
- completion: final.usage?.completion_tokens || 0,
292
- total: final.usage?.total_tokens || 0,
293
- cost: 0 // Cost calculation handled internally
294
- });
295
- //
296
- // send the cost
297
- sendFeedback({
298
- agent: currentAgent.name,
299
- stdout,
300
- description: '',
301
- usage: discussion.usage,
302
- state: final.id || '',
303
- verbose
304
- });
305
- const content = final.choices[0]?.message.content;
306
- //
307
- // capture new memory with the last message
308
- await params.enrichWithMemory?.("assistant", currentAgent, session);
309
- // if(verbose) console.log("✅ Agent (OUT):",currentAgent.name, 'with content length',!!content);
310
- if (content) {
311
- stateGraph.push(agentName, { role: "assistant", content });
312
- }
313
- if (content?.includes('<continue>')) {
314
- localResult.moreThinkin = true;
315
- return localResult;
316
- }
317
- return localResult;
318
- }
319
- return localResult;
320
- }
321
- /**
322
- * Executes a set of agents to process a user query
323
- *
324
- * This function initializes the agent memory, processes the user query through the appropriate
325
- * agent, and handles any agent transfers or tool calls that occur during execution.
326
- *
327
- * @param {AgentConfig[]} agentSet - Array of agent configurations
328
- * @param {AgenticContext} session - {memory, user, ...} - Session object to store/read conversation state
329
- * @param {ExecuteAgentSetParams} params - Execution parameters
330
- * @returns {Promise<void>}
331
- */
332
- async function executeAgentSet(agentSet, context, params) {
333
- const { query, verbose } = params;
334
- const openai = (0, utils_1.openaiInstance)();
335
- const agents = (0, utils_1.injectTransferTools)(agentSet);
336
- const home = params.home || agents[0].name;
337
- // 🎯 Récupération du StateGraph depuis le context (qui contient session, user, credential, etc.)
338
- const stateGraph = (0, stategraph_1.sessionStateGraphGet)(context);
339
- // 📍 Agent spécialisé (persistant) vs Agent contextuel (temporaire)
340
- const specializedAgent = home;
341
- const discussion = stateGraph.createOrRestore(specializedAgent);
342
- let currentAgent = (0, stategraph_1.getSpecializedAgent)(discussion) || specializedAgent;
343
- // Référence mutable pour handleTransferCall
344
- const currentAgentRef = { name: currentAgent };
345
- // Trouver la config de l'agent courant
346
- const currentAgentConfig = agents.find(a => a.name === currentAgent);
347
- discussion.description = currentAgentConfig?.publicDescription;
348
- if (!currentAgentConfig) {
349
- throw new Error(`Agent ${currentAgent} not found`);
350
- }
351
- if (!currentAgentConfig.instructions) {
352
- throw new Error(`Agent ${currentAgent} has no instructions`);
353
- }
354
- // 🔧 Setup system message si pas encore fait
355
- let enrichedQuery = query;
356
- if (!discussion.messages.length) {
357
- discussion.usage = { prompt: 0, completion: 0, total: 0, cost: 0 };
358
- //
359
- // add the initial agent to his memory as System
360
- // Handle two-shot prompting: if instructions is an array, use the first part as a system message
361
- const enrichedInstructions = await params.enrichWithMemory?.("system", currentAgentConfig, context);
362
- const instructions = currentAgentConfig.instructions + '\n' + enrichedInstructions;
363
- stateGraph.set(specializedAgent, instructions);
364
- }
365
- else {
366
- // enrich the user query with memory as User
367
- enrichedQuery = (await params.enrichWithMemory?.("user", currentAgentConfig, context)) || query;
368
- }
369
- // Append the user's query to the session-specific messages
370
- // input: `${getMemoryString(relevantMemories)}\n${input}`,
371
- stateGraph.push(specializedAgent, { role: "user", content: enrichedQuery });
372
- // Les outils (définition des fonctions) disponibles par l'agent courant
373
- const tools = currentAgentConfig.tools;
374
- // console.log('--- DBG toolLogic (1)',currentAgentConfig, currentAgentConfig?.toolLogic);
375
- if (verbose) {
376
- console.log('--- DBG current agent', currentAgentConfig.name, 'memory len:', discussion.messages.length);
377
- }
378
- // let shots = 1;
379
- let result = (0, types_1.enrichExecutionResult)({
380
- runId: `${specializedAgent}-${Date.now()}`,
381
- startQuery: query,
382
- actions: [],
383
- lastMessage: '',
384
- usage: { prompt: 0, completion: 0, total: 0, cost: 0 },
385
- moreThinkin: false,
386
- });
387
- do {
388
- const options = Object.assign({}, currentAgentConfig?.model);
389
- options.messages = discussion.messages;
390
- if (tools.length > 0) {
391
- options.tools = tools;
392
- options.tool_choice = "auto";
393
- }
394
- const stream = await openai.beta.chat.completions.stream(options);
395
- // const intialinfo = [
396
- // "Analyse",
397
- // "Analyse en cours…",
398
- // "Hummm"
399
- // ];
400
- // const randomIndex = Math.floor(Math.random() * intialinfo.length);
401
- // //
402
- // // initial feedback
403
- // sendFeedback({
404
- // agent:memory.currentAgent.name,
405
- // stdout:params.stdout,
406
- // description:intialinfo[randomIndex],
407
- // usage:memory.usage,
408
- // state:memory.state!
409
- // })
410
- for await (const chunk of stream) {
411
- const delta = chunk.choices[0]?.delta;
412
- if (delta?.content) {
413
- params.stdout.write(delta?.content);
414
- }
415
- }
416
- const final = await stream.finalChatCompletion();
417
- const partial = await readCompletionsStream({
418
- stateGraph,
419
- discussion,
420
- agentConfig: currentAgentConfig,
421
- agents,
422
- agentName: specializedAgent,
423
- stdout: params.stdout,
424
- session: context,
425
- final,
426
- verbose,
427
- enrichWithMemory: params.enrichWithMemory,
428
- });
429
- result = (0, types_1.executionResultMerge)(result, partial);
430
- // Handle two-shot prompting: if instructions is an array, send the second part as a user message
431
- // This allows for more complex agent behavior by providing additional context or instructions
432
- // after the initial response, similar to chain-of-thought prompting
433
- // if(Array.isArray(currentAgent.instructions && shots < currentAgent.instructions.length)){
434
- // const instructions = currentAgent.instructions[shots];
435
- // memory.messages.push({ role: "user", content: instructions });
436
- // thinking = true;
437
- // shots++;
438
- // }
439
- if (result.moreThinkin)
440
- console.log("🌶️🌶️🌶️ restart thinking:");
441
- } while (result.moreThinkin);
442
- // 💾 Auto-save du StateGraph à la fin (context contient session, user, credential, etc.)
443
- (0, stategraph_1.sessionStateGraphSet)(context, stateGraph);
444
- // finalize result (usage accumulated via discussion.usage once here)
445
- result.lastMessage = discussion.messages?.[discussion.messages.length - 1]?.content || '';
446
- // Ensure usage reflects the aggregated discussion usage (prompt/completion/total)
447
- if (discussion?.usage) {
448
- result.usage = {
449
- prompt: discussion.usage.prompt || 0,
450
- completion: discussion.usage.completion || 0,
451
- total: discussion.usage.total || 0,
452
- cost: discussion.usage.cost || 0,
453
- };
454
- }
455
- result = (0, types_1.enrichExecutionResult)(result);
456
- return result;
457
- }
458
- async function executeAgent(agentSet, params) {
459
- const { query, verbose } = params;
460
- const openai = (0, utils_1.openaiInstance)();
461
- const agent = agentSet.find(a => a.name === params.home);
462
- if (!agent) {
463
- throw new Error(`Agent ${params.home} not found`);
464
- }
465
- if (!agent.instructions) {
466
- throw new Error(`Agent ${agent.name} has no instructions`);
467
- }
468
- // Simple message array without memory manager - but preserve conversation
469
- const messages = [
470
- { role: "system", content: agent.instructions },
471
- { role: "user", content: query }
472
- ];
473
- let usage = { prompt: 0, completion: 0, total: 0, cost: 0 };
474
- let state = '';
475
- let maxIterations = 10; // Prevent infinite loops
476
- let iterations = 0;
477
- if (verbose) {
478
- console.log('--- DBG executeAgent (simple):', agent.name);
479
- console.log('--- DBG query:', `${query?.substring(0, 100)}...`);
480
- }
481
- // Execute the agent with tool call handling loop
482
- while (iterations < maxIterations) {
483
- iterations++;
484
- const options = Object.assign({}, agent.model);
485
- options.messages = messages;
486
- const tools = agent.tools || [];
487
- if (tools.length > 0) {
488
- options.tools = tools;
489
- options.tool_choice = "auto";
490
- }
491
- if (verbose) {
492
- console.log('--- DBG executeAgent (simple):', agent.name, 'iterations:', iterations, '\n', messages.length, '\n---', messages[messages.length - 1]?.content);
493
- }
494
- const stream = await openai.beta.chat.completions.stream(options);
495
- // Stream the response
496
- for await (const chunk of stream) {
497
- const delta = chunk.choices[0]?.delta;
498
- if (delta?.content) {
499
- params.stdout.write(delta?.content);
500
- }
501
- }
502
- const final = await stream.finalChatCompletion();
503
- // Update usage and state
504
- const model = agent.model?.model;
505
- (0, princing_openai_1.accumulateCost)(usage, model, final.usage);
506
- state = final.id;
507
- // Add assistant response to messages
508
- const content = final.choices[0]?.message.content;
509
- if (content) {
510
- messages.push({ role: "assistant", content });
511
- }
512
- // Handle tool calls if any
513
- const toolCalls = final.choices[0]?.message.tool_calls;
514
- let hasToolCalls = false;
515
- if (toolCalls && toolCalls.length > 0) {
516
- hasToolCalls = true;
517
- // First, update the assistant message with tool_calls
518
- const lastAssistant = messages[messages.length - 1];
519
- if (lastAssistant && lastAssistant.role === 'assistant') {
520
- lastAssistant.tool_calls = toolCalls;
521
- }
522
- else {
523
- // If no assistant message, add one with tool calls
524
- messages.push({
525
- role: "assistant",
526
- content: content || null,
527
- tool_calls: toolCalls
528
- });
529
- }
530
- // Then execute tools and add tool responses
531
- for (const toolCall of toolCalls) {
532
- const args = JSON.parse(toolCall.function.arguments || '{}');
533
- // Execute tool if it exists in agent's toolLogic
534
- if (agent.toolLogic && agent.toolLogic[toolCall.function.name]) {
535
- try {
536
- const result = await agent.toolLogic[toolCall.function.name](args, { state });
537
- messages.push({
538
- role: "tool",
539
- tool_call_id: toolCall.id,
540
- content: typeof result === 'string' ? result : JSON.stringify(result)
541
- });
542
- }
543
- catch (error) {
544
- messages.push({
545
- role: "tool",
546
- tool_call_id: toolCall.id,
547
- content: `Error: ${error instanceof Error ? error.message : 'Unknown error'}`
548
- });
549
- }
550
- }
551
- }
552
- }
553
- // If no tool calls, we're done
554
- if (!hasToolCalls) {
555
- break;
556
- }
557
- }
558
- return {
559
- usage,
560
- content: messages[messages.length - 1]?.content || '',
561
- messages,
562
- state
563
- };
564
- }
@@ -1,54 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.modelPricing = void 0;
4
- exports.calculateCost = calculateCost;
5
- exports.accumulateCost = accumulateCost;
6
- //
7
- // get/update pricing from openai
8
- // - https://platform.openai.com/docs/pricing#latest-models
9
- exports.modelPricing = {
10
- "gpt-4.5-preview": { input: 0.000075, cachedInput: 0.0000325, output: 0.000125 },
11
- "gpt-4.1": { input: 0.000002, cachedInput: 0.0000005, output: 0.000008 },
12
- "gpt-4.1-mini": { input: 0.0000004, cachedInput: 0.0000001, output: 0.0000016 },
13
- "gpt-4.1-nano": { input: 0.0000001, cachedInput: 0.000000025, output: 0.0000004 },
14
- "gpt-4o": { input: 0.0000025, cachedInput: 0.00000125, output: 0.00001 },
15
- "gpt-4o-audio-preview": { input: 0.0000025, output: 0.00001 },
16
- "gpt-4o-realtime-preview": { input: 0.000005, cachedInput: 0.0000025, output: 0.00002 },
17
- "gpt-4o-search-preview": { input: 0.000005, cachedInput: 0.0000025, output: 0.00002 },
18
- "gpt-4o-mini": { input: 0.00000015, cachedInput: 0.000000075, output: 0.0000006 },
19
- "gpt-4o-mini-audio-preview": { input: 0.00000015, output: 0.0000006 },
20
- "gpt-4o-mini-realtime-preview": { input: 0.0000006, cachedInput: 0.0000003, output: 0.0000024 },
21
- "gpt-4o-mini-search-preview": { input: 0.0000015, cachedInput: 0.00000075, output: 0.000006 },
22
- // GPT-5 family
23
- "gpt-5": { input: 0.00000125, output: 0.00001 },
24
- "gpt-5-mini": { input: 0.00000025, output: 0.000002 },
25
- "gpt-5-nano": { input: 0.00000005, output: 0.0000004 },
26
- "o1": { input: 0.000015, cachedInput: 0.0000075, output: 0.00006 },
27
- "o4-mini": { input: 0.0000011, cachedInput: 0.00000055, output: 0.0000044 },
28
- "o3-mini": { input: 0.0000011, cachedInput: 0.00000055, output: 0.0000044 },
29
- "o1-mini": { input: 0.0000011, cachedInput: 0.00000055, output: 0.0000044 },
30
- };
31
- function calculateCost(model, usage) {
32
- if (!usage) {
33
- return 0;
34
- }
35
- if (!exports.modelPricing[model]) {
36
- throw new Error("Unknown model");
37
- }
38
- const pricing = exports.modelPricing[model];
39
- const cost = usage.prompt_tokens * pricing.input +
40
- // usage.completion_tokens * (pricing.cachedInput || 0) +
41
- usage.completion_tokens * pricing.output;
42
- return cost;
43
- }
44
- function accumulateCost(currentUsage, model, usage) {
45
- if (!usage) {
46
- return 0;
47
- }
48
- currentUsage.prompt += usage.prompt_tokens || 0;
49
- currentUsage.completion += usage.completion_tokens || 0;
50
- currentUsage.total += usage.total_tokens || 0;
51
- const cost = calculateCost(model, usage);
52
- currentUsage.cost += cost;
53
- return currentUsage.cost;
54
- }
@@ -1,76 +0,0 @@
1
- import OpenAI from 'openai';
2
- import { RAGConfig } from './types';
3
- /**
4
- * Crée les outils RAG pour un agent
5
- */
6
- declare const createTools: (openai: OpenAI, config: RAGConfig) => {
7
- toolsReferencesContent: {
8
- type: "function";
9
- function: {
10
- name: string;
11
- description: string;
12
- parameters: {
13
- type: string;
14
- properties: {
15
- service: {
16
- type: string;
17
- description: string;
18
- };
19
- source: {
20
- type: string;
21
- description: string;
22
- };
23
- };
24
- required: never[];
25
- additionalProperties: boolean;
26
- };
27
- };
28
- };
29
- toolsKnowledge: {
30
- type: "function";
31
- function: {
32
- name: string;
33
- description: string;
34
- parameters: {
35
- type: string;
36
- properties: {
37
- question: {
38
- type: string;
39
- description: string;
40
- };
41
- justification: {
42
- type: string;
43
- description: string;
44
- };
45
- action_suivante: {
46
- type: string;
47
- description: string;
48
- };
49
- };
50
- required: string[];
51
- additionalProperties: boolean;
52
- };
53
- };
54
- };
55
- lookupReferencesContent: ({ service, source }: {
56
- service?: string;
57
- source?: string;
58
- }) => Promise<{
59
- content: string;
60
- }>;
61
- lookupKnowledge: (args: any, debug?: boolean) => Promise<{
62
- content: string;
63
- documents?: undefined;
64
- scores?: undefined;
65
- } | {
66
- documents: {
67
- id: number;
68
- ref: string;
69
- score: number;
70
- content: string;
71
- }[];
72
- scores: number[];
73
- content?: undefined;
74
- }>;
75
- };
76
- export { createTools };