agentic-api 2.0.314 → 2.0.585

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. package/README.md +37 -34
  2. package/dist/src/agents/prompts.d.ts +1 -1
  3. package/dist/src/agents/prompts.js +9 -7
  4. package/dist/src/agents/reducer.core.js +2 -2
  5. package/dist/src/agents/simulator.d.ts +33 -4
  6. package/dist/src/agents/simulator.dashboard.d.ts +140 -0
  7. package/dist/src/agents/simulator.dashboard.js +344 -0
  8. package/dist/src/agents/simulator.executor.d.ts +9 -3
  9. package/dist/src/agents/simulator.executor.js +43 -17
  10. package/dist/src/agents/simulator.js +103 -19
  11. package/dist/src/agents/simulator.prompts.d.ts +9 -8
  12. package/dist/src/agents/simulator.prompts.js +68 -62
  13. package/dist/src/agents/simulator.types.d.ts +39 -4
  14. package/dist/src/agents/simulator.utils.d.ts +22 -1
  15. package/dist/src/agents/simulator.utils.js +27 -2
  16. package/dist/src/execute/helpers.d.ts +75 -0
  17. package/dist/src/execute/helpers.js +139 -0
  18. package/dist/src/execute/index.d.ts +11 -0
  19. package/dist/src/execute/index.js +44 -0
  20. package/dist/src/execute/legacy.d.ts +46 -0
  21. package/dist/src/{execute.js → execute/legacy.js} +130 -232
  22. package/dist/src/execute/modelconfig.d.ts +29 -0
  23. package/dist/src/execute/modelconfig.js +72 -0
  24. package/dist/src/execute/responses.d.ts +55 -0
  25. package/dist/src/execute/responses.js +595 -0
  26. package/dist/src/execute/shared.d.ts +83 -0
  27. package/dist/src/execute/shared.js +188 -0
  28. package/dist/src/index.d.ts +5 -1
  29. package/dist/src/index.js +21 -2
  30. package/dist/src/llm/config.d.ts +25 -0
  31. package/dist/src/llm/config.js +38 -0
  32. package/dist/src/llm/index.d.ts +48 -0
  33. package/dist/src/llm/index.js +115 -0
  34. package/dist/src/llm/openai.d.ts +6 -0
  35. package/dist/src/llm/openai.js +154 -0
  36. package/dist/src/llm/pricing.d.ts +26 -0
  37. package/dist/src/llm/pricing.js +129 -0
  38. package/dist/src/llm/xai.d.ts +17 -0
  39. package/dist/src/llm/xai.js +90 -0
  40. package/dist/src/pricing.llm.d.ts +3 -15
  41. package/dist/src/pricing.llm.js +10 -230
  42. package/dist/src/prompts.d.ts +0 -1
  43. package/dist/src/prompts.js +51 -118
  44. package/dist/src/rag/embeddings.d.ts +5 -1
  45. package/dist/src/rag/embeddings.js +23 -7
  46. package/dist/src/rag/parser.js +1 -1
  47. package/dist/src/rag/rag.manager.d.ts +33 -2
  48. package/dist/src/rag/rag.manager.js +159 -61
  49. package/dist/src/rag/types.d.ts +2 -0
  50. package/dist/src/rag/usecase.js +8 -11
  51. package/dist/src/rules/git/git.e2e.helper.js +21 -2
  52. package/dist/src/rules/git/git.health.d.ts +4 -2
  53. package/dist/src/rules/git/git.health.js +113 -16
  54. package/dist/src/rules/git/index.d.ts +1 -1
  55. package/dist/src/rules/git/index.js +3 -2
  56. package/dist/src/rules/git/repo.d.ts +57 -7
  57. package/dist/src/rules/git/repo.js +326 -39
  58. package/dist/src/rules/git/repo.pr.d.ts +8 -0
  59. package/dist/src/rules/git/repo.pr.js +161 -13
  60. package/dist/src/rules/git/repo.tools.d.ts +5 -1
  61. package/dist/src/rules/git/repo.tools.js +54 -7
  62. package/dist/src/rules/types.d.ts +25 -0
  63. package/dist/src/rules/utils.matter.d.ts +0 -20
  64. package/dist/src/rules/utils.matter.js +58 -81
  65. package/dist/src/scrapper.js +3 -2
  66. package/dist/src/stategraph/stategraph.d.ts +26 -1
  67. package/dist/src/stategraph/stategraph.js +43 -2
  68. package/dist/src/stategraph/stategraph.storage.js +4 -0
  69. package/dist/src/stategraph/types.d.ts +5 -0
  70. package/dist/src/types.d.ts +42 -7
  71. package/dist/src/types.js +8 -7
  72. package/dist/src/usecase.js +1 -1
  73. package/dist/src/utils.d.ts +0 -8
  74. package/dist/src/utils.js +26 -29
  75. package/package.json +9 -7
  76. package/dist/src/execute.d.ts +0 -63
@@ -1,94 +1,72 @@
1
1
  "use strict";
2
+ /**
3
+ * Legacy Implementation - Chat Completions API (beta.chat.completions)
4
+ *
5
+ * ⚠️ Cette implémentation utilise openai.beta.chat.completions.stream (ancienne API)
6
+ * Pour les nouveaux projets, Responses API (responses.ts) est recommandée.
7
+ *
8
+ * Code optimisé depuis execute.ts original avec les améliorations suivantes:
9
+ * - OPTIM: Helpers centralisés (accumulateUsageTokens, stepsToActions)
10
+ * - BUG FIX: executionResultMerge fusionne actions correctement (corrigé dans types.ts)
11
+ * - BUG FIX: moreThinkin supprimé (obsolète, reasoning_effort fait le job)
12
+ * - BUG FIX: Suppression de la boucle do...while(moreThinkin)
13
+ * - BUG FIX: Suppression de la ligne reasoning_effort dupliquée (ligne 425 originale)
14
+ *
15
+ * TODO [Optimisation future]: Remplacer la boucle for séquentielle par batchProcessToolCalls
16
+ * pour exploiter pleinement parallel_tool_calls et réduire la latence
17
+ */
2
18
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.DummyWritable = void 0;
4
- exports.modelConfig = modelConfig;
5
- exports.sendFeedback = sendFeedback;
6
19
  exports.readCompletionsStream = readCompletionsStream;
7
20
  exports.executeAgentSet = executeAgentSet;
8
21
  exports.executeAgent = executeAgent;
9
22
  exports.executeQuery = executeQuery;
10
- const types_1 = require("./types");
11
- const utils_1 = require("./utils");
12
- const stategraph_1 = require("./stategraph");
13
- const pricing_llm_1 = require("./pricing.llm");
14
- exports.DummyWritable = {
15
- write: () => { }
16
- };
17
- function modelConfig(model, custom) {
18
- const defaultOptions = Object.assign({
19
- stream_options: { "include_usage": true },
20
- }, custom || {});
21
- // Get mapping based on provider
22
- const mapping = (0, pricing_llm_1.LLM)((0, utils_1.openaiInstance)());
23
- const options = Object.assign(mapping[model], defaultOptions);
24
- return options;
25
- }
26
- function sendFeedback(params) {
27
- const { agent, stdout, description, usage, state, verbose } = params;
28
- const feedback = {
29
- agent,
30
- description,
31
- usage,
32
- state
33
- };
34
- // if(verbose) {
35
- // console.log('--- DBG sendFeedback:',agent, description || '--', state);
36
- // }
37
- //
38
- // send agent state and description
39
- stdout.write(`\n<step>${JSON.stringify(feedback)}</step>\n`);
40
- }
23
+ const types_1 = require("../types");
24
+ const utils_1 = require("../utils");
25
+ const stategraph_1 = require("../stategraph");
26
+ const pricing_llm_1 = require("../pricing.llm");
27
+ //
28
+ // Import des utilitaires partagés et helpers optimisés
29
+ const shared_1 = require("./shared");
30
+ const modelconfig_1 = require("./modelconfig");
31
+ const helpers_1 = require("./helpers");
41
32
  async function readCompletionsStream(params) {
42
33
  const openai = (0, utils_1.openaiInstance)();
43
- //
44
- // set default context here
45
- const { stateGraph, discussion, agentConfig, agents, discussionRootAgent, stdout, final, session, verbose } = params;
46
- const model = modelConfig(agentConfig.model).model;
34
+ const { stateGraph, discussion, agentConfig, agents, discussionRootAgent, stdout, final, context, verbose } = params;
35
+ const model = (0, modelconfig_1.modelConfig)(agentConfig.model).model;
47
36
  const accumulatedFunctionCall = final.choices[0]?.message.tool_calls || [];
48
37
  const content = final.choices[0]?.message.content;
49
- let thinking = false;
50
38
  let localResult = (0, types_1.enrichExecutionResult)({
51
39
  runId: `${discussionRootAgent}-${Date.now()}`,
52
40
  startQuery: '',
53
41
  actions: [],
54
42
  lastMessage: '',
55
43
  usage: { prompt: 0, completion: 0, total: 0, cost: 0 },
56
- moreThinkin: false,
57
- });
58
- // Accumulate cost in the discussion usage
59
- (0, pricing_llm_1.accumulateCost)(discussion.usage, model, final.usage);
60
- stateGraph.updateTokens(discussionRootAgent, {
61
- prompt: final.usage?.prompt_tokens || 0,
62
- completion: final.usage?.completion_tokens || 0,
63
- total: final.usage?.total_tokens || 0,
64
- cost: 0 // Cost already accumulated directly in discussion.usage
65
44
  });
66
- // Store state (implementation can be added later if needed)
67
- // discussion.state = final.id;
45
+ //
46
+ // OPTIM: Utiliser helper centralisé pour l'accumulation
47
+ (0, helpers_1.accumulateUsageTokens)(stateGraph, discussion, discussionRootAgent, model, final.usage);
68
48
  if (content) {
69
49
  if (verbose)
70
- console.log("✅ Agent (1): 🌶️🌶️🌶️ save content:", content?.length);
50
+ console.log("✅ Agent (1): save content:", content?.length);
71
51
  stateGraph.push(discussionRootAgent, { role: "assistant", content });
72
52
  }
53
+ //
73
54
  // Si le modèle décide d'appeler une fonction (par exemple "transferAgents")
74
55
  for (const functionCall of accumulatedFunctionCall) {
75
56
  const args = JSON.parse(functionCall?.function?.arguments || '{}');
76
57
  if (args.justification) {
77
- sendFeedback({
58
+ (0, shared_1.sendFeedback)({
78
59
  agent: agentConfig.name,
79
60
  stdout,
80
61
  description: args.justification,
81
62
  usage: discussion.usage,
82
- state: '', // State will be set later if needed,
63
+ state: '',
83
64
  verbose
84
65
  });
85
66
  }
86
67
  // Créer une référence mutable pour handleTransferCall
87
68
  const currentAgentRef = { name: agentConfig.name };
88
- const functionCallResult = await (0, utils_1.handleTransferCall)(discussion, currentAgentRef, agents, functionCall, session);
89
- // result can be
90
- // {content, usage} {did_transfer}
91
- thinking = functionCallResult.thinking;
69
+ const functionCallResult = await (0, utils_1.handleTransferCall)(discussion, currentAgentRef, agents, functionCall, context);
92
70
  if (functionCallResult.usage) {
93
71
  stateGraph.updateTokens(discussionRootAgent, {
94
72
  prompt: functionCallResult.usage.prompt || 0,
@@ -103,19 +81,17 @@ async function readCompletionsStream(params) {
103
81
  // Mise à jour du message système avec les nouvelles instructions du nouvel agent courant
104
82
  const transferredAgent = agents.find(a => a.name === currentAgentRef.name) || agentConfig;
105
83
  const instructions = transferredAgent.instructions;
106
- const enrichedInstructions = (await params.enrichWithMemory?.("system", transferredAgent, session)) || '';
84
+ const enrichedInstructions = (await params.enrichWithMemory?.("system", transferredAgent, context)) || '';
107
85
  // ✅ Set préserve le trail existant via updateSystemMessage()
108
86
  stateGraph.set(discussionRootAgent, instructions + '\n' + enrichedInstructions);
109
- //
110
- // Display only explicit content to stdout (usually empty for silent transfers)
111
- if (functionCallResult.content) {
112
- stdout.write(functionCallResult.content + "\n");
113
- }
87
+ stateGraph.push(discussionRootAgent, {
88
+ role: "assistant",
89
+ content: functionCallResult.content,
90
+ name: functionCallResult.name
91
+ });
114
92
  }
115
- //
116
- // other function call have a result
117
- else if (functionCallResult.content) {
118
- // console.log("✅ Agent tool response:",agentConfig.name,'::',functionCall.function.name, ' with content',functionCallResult.content);
93
+ else {
94
+ // other function call have a result
119
95
  stateGraph.push(discussionRootAgent, {
120
96
  role: "assistant",
121
97
  content: functionCallResult.content,
@@ -125,135 +101,103 @@ async function readCompletionsStream(params) {
125
101
  //
126
102
  // send user feedback (if not already sent via addStep for transfer)
127
103
  if (functionCallResult.feedback && !functionCallResult.did_transfer) {
128
- sendFeedback({
104
+ (0, shared_1.sendFeedback)({
129
105
  agent: agentConfig.name,
130
106
  stdout,
131
107
  description: functionCallResult.feedback,
132
108
  usage: discussion.usage,
133
- state: '', // State tracking can be added later if needed
109
+ state: '',
134
110
  verbose
135
111
  });
136
112
  }
137
113
  // Réactualisation de la liste des outils pour le nouvel agent courant
138
114
  const currentAgent = agents.find(a => a.name === currentAgentRef.name) || agentConfig;
139
115
  const tools = currentAgent?.tools || [];
140
- const followUpOptions = Object.assign({}, modelConfig(currentAgent.model));
116
+ const followUpOptions = Object.assign({}, (0, modelconfig_1.modelConfig)(currentAgent.model));
141
117
  followUpOptions.messages = discussion.messages;
142
118
  if (tools.length > 0) {
143
119
  followUpOptions.tools = tools;
144
120
  followUpOptions.tool_choice = "auto";
145
121
  }
146
- // console.log("✅ DEBUG followUpOptions (tools):",currentAgentRef.name,'\n',JSON.stringify(followUpOptions,null,2));
147
- // console.log("✅ DEBUG followUpOptions (tools):",currentAgentRef.name,tools.length, tools[0]?.function?.parameters?.properties?.destination_agent);
148
- // console.log("✅ DEBUG followUpOptions (system):",discussion.messages[0]?.content);
149
- // if(!functionCallResult.did_transfer) {
150
- // followUpOptions.tool_choice = "none";
151
- // }
152
- // Poursuite de la conversation avec le contexte mis à jour
122
+ //
123
+ // Legacy: utiliser beta.chat.completions.stream (ancienne API)
124
+ // NOTE: Cette API est en beta, Responses API est recommandée pour les nouveaux projets
153
125
  const followUpStream = await openai.beta.chat.completions.stream(followUpOptions);
154
126
  for await (const chunk of followUpStream) {
155
- //process.stdout.write(chunk.choices[0]?.delta?.content || "");
156
127
  const delta = chunk.choices[0]?.delta;
157
128
  if (delta?.content) {
158
129
  stdout.write(delta?.content);
159
130
  }
160
131
  }
161
- const final = await followUpStream.finalChatCompletion();
132
+ const followUpFinal = await followUpStream.finalChatCompletion();
162
133
  //
163
134
  // ✅ addStep APRÈS la réponse de l'agent (pour avoir le trail complet au prochain tour)
164
- if (functionCallResult.feedback) {
135
+ if (functionCallResult.name) {
165
136
  stateGraph.addStep(discussionRootAgent, {
166
137
  tool: functionCallResult.name,
167
138
  context: functionCallResult.context || '',
168
- reason: functionCallResult.feedback,
139
+ reason: args.justification || '',
169
140
  id: functionCallResult.id
170
141
  });
171
142
  }
172
- // console.log("✅ DEBUG followUpOptions (OUT content):",final.choices[0]?.message.content);
173
- // console.log("✅ DEBUG followUpOptions (OUT tool_calls):",final.choices[0]?.message.tool_calls);
174
143
  //
175
144
  // when called a function, agent must continue the conversation
176
- // if(verbose) console.log("✅ Agent ( followUp - OUT):",currentAgent.name, 'with tool_calls ',!!(final.choices[0]?.message.tool_calls),' and content:' ,!!(final.choices[0]?.message.content));
177
- if (final.choices[0]?.message.tool_calls) {
145
+ if (followUpFinal.choices[0]?.message.tool_calls) {
178
146
  const partial = await readCompletionsStream({
179
147
  stateGraph,
180
148
  discussion,
181
149
  agentConfig: currentAgent,
182
150
  agents,
183
- discussionRootAgent, // ✅ Renommé de agentName pour clarté
151
+ discussionRootAgent,
184
152
  stdout,
185
- final,
186
- session,
153
+ final: followUpFinal,
154
+ context,
187
155
  verbose,
188
156
  enrichWithMemory: params.enrichWithMemory
189
157
  });
190
158
  localResult = (0, types_1.executionResultMerge)(localResult, partial);
191
- // ✅ Convertir steps en actions avant de retourner
192
- localResult.actions = stateGraph.steps(discussionRootAgent).map(step => ({
193
- action: step.tool,
194
- content: step.context,
195
- feedback: step.reason
196
- }));
159
+ // ✅ OPTIM: Utiliser helper centralisé
160
+ localResult.actions = (0, helpers_1.stepsToActions)(stateGraph, discussionRootAgent);
197
161
  return localResult;
198
162
  }
199
- // Accumulate final cost
200
- stateGraph.updateTokens(discussionRootAgent, {
201
- prompt: final.usage?.prompt_tokens || 0,
202
- completion: final.usage?.completion_tokens || 0,
203
- total: final.usage?.total_tokens || 0,
204
- cost: 0 // Cost calculation handled internally
205
- });
163
+ //
164
+ // OPTIM: Utiliser helper centralisé pour l'accumulation
165
+ (0, helpers_1.accumulateUsageTokens)(stateGraph, discussion, discussionRootAgent, model, followUpFinal.usage);
206
166
  //
207
167
  // send the cost
208
- sendFeedback({
168
+ (0, shared_1.sendFeedback)({
209
169
  agent: currentAgent.name,
210
170
  stdout,
211
171
  description: '',
212
172
  usage: discussion.usage,
213
- state: final.id || '',
173
+ state: followUpFinal.id || '',
214
174
  verbose
215
175
  });
216
- const content = final.choices[0]?.message.content;
176
+ const followUpContent = followUpFinal.choices[0]?.message.content;
217
177
  //
218
178
  // capture new memory with the last message
219
- await params.enrichWithMemory?.("assistant", currentAgent, session);
220
- // if(verbose) console.log("✅ Agent (OUT):",currentAgent.name, 'with content length',!!content);
221
- if (content) {
222
- stateGraph.push(discussionRootAgent, { role: "assistant", content });
223
- }
224
- if (content?.includes('<continue>')) {
225
- localResult.moreThinkin = true;
179
+ await params.enrichWithMemory?.("assistant", currentAgent, context);
180
+ if (followUpContent) {
181
+ stateGraph.push(discussionRootAgent, { role: "assistant", content: followUpContent });
226
182
  }
227
183
  }
228
184
  //
229
- // ✅ Convertir steps en actions une seule fois avant de retourner
230
- const allSteps = stateGraph.steps(discussionRootAgent);
231
- localResult.actions = allSteps.map(step => ({
232
- action: step.tool,
233
- content: step.context,
234
- feedback: step.reason
235
- }));
185
+ // ✅ OPTIM: Utiliser helper centralisé
186
+ localResult.actions = (0, helpers_1.stepsToActions)(stateGraph, discussionRootAgent);
236
187
  return localResult;
237
188
  }
238
189
  /**
239
190
  * Executes a set of agents to process a user query
240
191
  *
241
- * This function initializes the agent memory, processes the user query through the appropriate
242
- * agent, and handles any agent transfers or tool calls that occur during execution.
243
- *
244
- * @param {AgentConfig[]} agentSet - Array of agent configurations
245
- * @param {AgenticContext} session - {memory, user, ...} - Session object to store/read conversation state
246
- * @param {ExecuteAgentSetParams} params - Execution parameters
247
- * @returns {Promise<void>}
192
+ * OPTIMIZED: Sans boucle do...while(moreThinkin), reasoning_effort fait le job
248
193
  */
249
194
  async function executeAgentSet(agentSet, context, params) {
250
195
  const { query, verbose } = params;
251
196
  const openai = (0, utils_1.openaiInstance)();
252
197
  const agents = (0, utils_1.injectTransferTools)(agentSet);
253
198
  // 🔑 CLÉ DE DISCUSSION: Agent racine qui sert de point d'entrée
254
- // Cette clé reste FIXE même après des transferts, permettant de garder l'historique unifié
255
199
  const discussionRootAgent = params.home || agents[0].name;
256
- // 🎯 Récupération du StateGraph depuis le context (qui contient session, user, credential, etc.)
200
+ // 🎯 Récupération du StateGraph depuis le context
257
201
  const stateGraph = (0, stategraph_1.sessionStateGraphGet)(context);
258
202
  // 📍 Créer ou restaurer la discussion pour cet agent racine
259
203
  const discussion = stateGraph.createOrRestore(discussionRootAgent);
@@ -272,101 +216,74 @@ async function executeAgentSet(agentSet, context, params) {
272
216
  let enrichedQuery = query;
273
217
  if (!discussion.messages.length) {
274
218
  discussion.usage = { prompt: 0, completion: 0, total: 0, cost: 0 };
275
- //
276
- // add the initial agent to his memory as System
277
- // Handle two-shot prompting: if instructions is an array, use the first part as a system message
278
219
  const enrichedInstructions = await params.enrichWithMemory?.("system", currentAgentConfig, context);
279
220
  const instructions = currentAgentConfig.instructions + '\n' + enrichedInstructions;
280
221
  stateGraph.set(discussionRootAgent, instructions);
281
222
  }
282
223
  else {
283
- // enrich the user query with memory as User
284
224
  enrichedQuery = (await params.enrichWithMemory?.("user", currentAgentConfig, context)) || query;
285
225
  }
286
- // Append the user's query to the session-specific messages
287
- // input: `${getMemoryString(relevantMemories)}\n${input}`,
288
226
  stateGraph.push(discussionRootAgent, { role: "user", content: enrichedQuery });
289
- // Les outils (définition des fonctions) disponibles par l'agent courant
290
227
  const tools = currentAgentConfig.tools;
291
- // console.log('--- DBG toolLogic (1)',currentAgentConfig, currentAgentConfig?.toolLogic);
292
228
  if (verbose) {
293
- console.log('--- DBG current agent', currentAgentConfig.name, 'memory len:', discussion.messages.length);
229
+ console.log('--- DBG current agent', currentAgentConfig.name, `deep-thinking:${params.thinking}`, 'memory len:', discussion.messages.length);
294
230
  }
295
- // let shots = 1;
296
231
  let result = (0, types_1.enrichExecutionResult)({
297
232
  runId: `${discussionRootAgent}-${Date.now()}`,
298
233
  startQuery: query,
299
234
  actions: [],
300
235
  lastMessage: '',
301
236
  usage: { prompt: 0, completion: 0, total: 0, cost: 0 },
302
- moreThinkin: false,
303
237
  });
304
- do {
305
- const model = modelConfig(currentAgentConfig.model);
306
- const options = Object.assign({}, model);
307
- options.messages = discussion.messages;
308
- if (tools.length > 0) {
309
- options.tools = tools;
310
- options.tool_choice = "auto";
311
- }
312
- const stream = await openai.beta.chat.completions.stream(options);
313
- // const intialinfo = [
314
- // "Analyse",
315
- // "Analyse en cours…",
316
- // "Hummm"
317
- // ];
318
- // const randomIndex = Math.floor(Math.random() * intialinfo.length);
319
- // //
320
- // // initial feedback
321
- // sendFeedback({
322
- // agent:memory.currentAgent.name,
323
- // stdout:params.stdout,
324
- // description:intialinfo[randomIndex],
325
- // usage:memory.usage,
326
- // state:memory.state!
327
- // })
328
- for await (const chunk of stream) {
329
- const delta = chunk.choices[0]?.delta;
330
- if (delta?.content) {
331
- params.stdout.write(delta?.content);
332
- }
238
+ //
239
+ // BUG FIX: Plus de boucle do...while(moreThinkin) - reasoning_effort fait le job
240
+ const intialinfo = ["Analyse", "Analyse en cours…", "Réflexion"];
241
+ const randomIndex = Math.floor(Math.random() * intialinfo.length);
242
+ (0, shared_1.sendFeedback)({
243
+ agent: currentAgentConfig.name,
244
+ stdout: params.stdout,
245
+ description: intialinfo[randomIndex],
246
+ usage: result.usage,
247
+ state: discussion.id
248
+ });
249
+ //
250
+ // ✅ BUG FIX: modelConfig gère reasoning_effort, pas de duplication (ligne 425 originale supprimée)
251
+ const model = (0, modelconfig_1.modelConfig)(currentAgentConfig.model, { thinking: params.thinking });
252
+ const options = Object.assign({}, model);
253
+ options.messages = discussion.messages;
254
+ if (tools.length > 0) {
255
+ options.tools = tools;
256
+ options.tool_choice = "auto";
257
+ options.parallel_tool_calls = true;
258
+ }
259
+ const stream = await openai.beta.chat.completions.stream(options);
260
+ for await (const chunk of stream) {
261
+ const delta = chunk.choices[0]?.delta;
262
+ if (delta?.content) {
263
+ params.stdout.write(delta?.content);
333
264
  }
334
- const final = await stream.finalChatCompletion();
335
- const partial = await readCompletionsStream({
336
- stateGraph,
337
- discussion,
338
- agentConfig: currentAgentConfig,
339
- agents,
340
- discussionRootAgent, // ✅ CLÉ de discussion
341
- stdout: params.stdout,
342
- session: context,
343
- final,
344
- verbose,
345
- enrichWithMemory: params.enrichWithMemory,
346
- });
347
- result = (0, types_1.executionResultMerge)(result, partial);
348
- result.actions = stateGraph.steps(discussionRootAgent).map(step => ({
349
- action: step.tool,
350
- content: step.context,
351
- feedback: step.reason
352
- }));
353
- // Handle two-shot prompting: if instructions is an array, send the second part as a user message
354
- // This allows for more complex agent behavior by providing additional context or instructions
355
- // after the initial response, similar to chain-of-thought prompting
356
- // if(Array.isArray(currentAgent.instructions && shots < currentAgent.instructions.length)){
357
- // const instructions = currentAgent.instructions[shots];
358
- // memory.messages.push({ role: "user", content: instructions });
359
- // thinking = true;
360
- // shots++;
361
- // }
362
- if (result.moreThinkin)
363
- console.log("🌶️🌶️🌶️ restart thinking:");
364
- } while (result.moreThinkin);
365
- // 💾 Auto-save du StateGraph à la fin (context contient session, user, credential, etc.)
265
+ }
266
+ const final = await stream.finalChatCompletion();
267
+ const partial = await readCompletionsStream({
268
+ stateGraph,
269
+ discussion,
270
+ agentConfig: currentAgentConfig,
271
+ agents,
272
+ discussionRootAgent,
273
+ stdout: params.stdout,
274
+ context,
275
+ final,
276
+ verbose,
277
+ enrichWithMemory: params.enrichWithMemory,
278
+ });
279
+ result = (0, types_1.executionResultMerge)(result, partial);
280
+ //
281
+ // ✅ OPTIM: Utiliser helper centralisé
282
+ result.actions = (0, helpers_1.stepsToActions)(stateGraph, discussionRootAgent);
283
+ // 💾 Auto-save du StateGraph à la fin
366
284
  (0, stategraph_1.sessionStateGraphSet)(context, stateGraph);
367
- // finalize result (usage accumulated via discussion.usage once here)
285
+ // finalize result
368
286
  result.lastMessage = discussion.messages?.[discussion.messages.length - 1]?.content || '';
369
- // Ensure usage reflects the aggregated discussion usage (prompt/completion/total)
370
287
  if (discussion?.usage) {
371
288
  result.usage = {
372
289
  prompt: discussion.usage.prompt || 0,
@@ -388,14 +305,13 @@ async function executeAgent(agentSet, params) {
388
305
  if (!agent.instructions) {
389
306
  throw new Error(`Agent ${agent.name} has no instructions`);
390
307
  }
391
- // Simple message array without memory manager - but preserve conversation
392
308
  const messages = [
393
309
  { role: "system", content: agent.instructions },
394
310
  { role: "user", content: query }
395
311
  ];
396
312
  let usage = { prompt: 0, completion: 0, total: 0, cost: 0 };
397
313
  let state = '';
398
- let maxIterations = 10; // Prevent infinite loops
314
+ let maxIterations = 10;
399
315
  let iterations = 0;
400
316
  if (verbose) {
401
317
  console.log('--- DBG executeAgent:', agent.name);
@@ -405,10 +321,9 @@ async function executeAgent(agentSet, params) {
405
321
  console.log('--- DBG executeAgent-system:', agent.instructions);
406
322
  console.log('--- DBG executeAgent-user:', query);
407
323
  }
408
- // Execute the agent with tool call handling loop
409
324
  while (iterations < maxIterations) {
410
325
  iterations++;
411
- const options = Object.assign({}, modelConfig(agent.model));
326
+ const options = Object.assign({}, (0, modelconfig_1.modelConfig)(agent.model));
412
327
  options.messages = messages;
413
328
  const tools = agent.tools || [];
414
329
  if (tools.length > 0) {
@@ -419,7 +334,6 @@ async function executeAgent(agentSet, params) {
419
334
  console.log('--- DBG executeAgent:', agent.name, 'iterations:', iterations, '\n', messages.length, '\n---', messages[messages.length - 1]?.content);
420
335
  }
421
336
  const stream = await openai.beta.chat.completions.stream(options);
422
- // Stream the response
423
337
  for await (const chunk of stream) {
424
338
  const delta = chunk.choices[0]?.delta;
425
339
  if (delta?.content) {
@@ -427,37 +341,30 @@ async function executeAgent(agentSet, params) {
427
341
  }
428
342
  }
429
343
  const final = await stream.finalChatCompletion();
430
- // Update usage and state
431
- const model = modelConfig(agent.model).model;
344
+ const model = (0, modelconfig_1.modelConfig)(agent.model).model;
432
345
  (0, pricing_llm_1.accumulateCost)(usage, model, final.usage);
433
346
  state = final.id;
434
- // Add assistant response to messages
435
347
  const content = final.choices[0]?.message.content;
436
348
  if (content) {
437
349
  messages.push({ role: "assistant", content });
438
350
  }
439
- // Handle tool calls if any
440
351
  const toolCalls = final.choices[0]?.message.tool_calls;
441
352
  let hasToolCalls = false;
442
353
  if (toolCalls && toolCalls.length > 0) {
443
354
  hasToolCalls = true;
444
- // First, update the assistant message with tool_calls
445
355
  const lastAssistant = messages[messages.length - 1];
446
356
  if (lastAssistant && lastAssistant.role === 'assistant') {
447
357
  lastAssistant.tool_calls = toolCalls;
448
358
  }
449
359
  else {
450
- // If no assistant message, add one with tool calls
451
360
  messages.push({
452
361
  role: "assistant",
453
362
  content: content || null,
454
363
  tool_calls: toolCalls
455
364
  });
456
365
  }
457
- // Then execute tools and add tool responses
458
366
  for (const toolCall of toolCalls) {
459
367
  const args = JSON.parse(toolCall.function.arguments || '{}');
460
- // Execute tool if it exists in agent's toolLogic
461
368
  if (agent.toolLogic && agent.toolLogic[toolCall.function.name]) {
462
369
  try {
463
370
  const result = await agent.toolLogic[toolCall.function.name](args, { state });
@@ -477,7 +384,6 @@ async function executeAgent(agentSet, params) {
477
384
  }
478
385
  }
479
386
  }
480
- // If no tool calls, we're done
481
387
  if (!hasToolCalls) {
482
388
  break;
483
389
  }
@@ -491,20 +397,14 @@ async function executeAgent(agentSet, params) {
491
397
  }
492
398
  /**
493
399
  * Executes a simple query without agent orchestration or tool handling
494
- *
495
- * This is the simplest level of execution - just sends a query to the LLM
496
- * and returns the response. No tools, no agent transfers, no complex logic.
497
- *
498
- * @param params - Execution parameters including query and model config
499
- * @returns Promise<ExecuteAgentResult> - Simple result with usage and content
500
400
  */
501
401
  async function executeQuery(params) {
502
- const { query, verbose } = params;
503
- // Get OpenAI/Grok instance
402
+ const { query, verbose, model: modelName, instructions } = params;
403
+ if (!modelName) {
404
+ throw new Error('executeQuery requires "model" parameter');
405
+ }
504
406
  const openai = (0, utils_1.openaiInstance)();
505
- // Use default model configuration if no home agent specified
506
- const modelName = params.model || params.home || "LOW-fast";
507
- const model = modelConfig(modelName);
407
+ const model = (0, modelconfig_1.modelConfig)(modelName);
508
408
  const more = {};
509
409
  if (params.json) {
510
410
  more.response_format = { type: "json_object" };
@@ -515,21 +415,21 @@ async function executeQuery(params) {
515
415
  if (verbose) {
516
416
  console.log('--- DBG query:', modelName, `${query?.substring(0, 100)}...`);
517
417
  }
518
- // Simple message array - no system prompt, just the user query
519
418
  const messages = params.messages || [];
419
+ // Ajouter le système prompt si fourni
420
+ if (instructions) {
421
+ messages.unshift({ role: "system", content: instructions });
422
+ }
520
423
  messages.push({ role: "user", content: query });
521
424
  let usage = { prompt: 0, completion: 0, total: 0, cost: 0 };
522
425
  let state = '';
523
426
  try {
524
- // Create completion options
525
427
  const options = Object.assign({}, model, more);
526
428
  options.messages = messages;
527
429
  if (verbose) {
528
430
  console.log('--- DBG executeQuery options:', JSON.stringify(options, null, 2));
529
431
  }
530
- // Execute the query
531
432
  const stream = await openai.beta.chat.completions.stream(options);
532
- // Stream the response
533
433
  for await (const chunk of stream) {
534
434
  const delta = chunk.choices[0]?.delta;
535
435
  if (delta?.content && params.stdout) {
@@ -537,10 +437,8 @@ async function executeQuery(params) {
537
437
  }
538
438
  }
539
439
  const final = await stream.finalChatCompletion();
540
- // Update usage and state
541
440
  (0, pricing_llm_1.accumulateCost)(usage, model.model, final.usage);
542
441
  state = final.id || '';
543
- // Get the response content
544
442
  const content = final.choices[0]?.message.content || '';
545
443
  if (verbose) {
546
444
  console.log('--- DBG executeQuery completed, usage:', usage);
@@ -0,0 +1,29 @@
1
+ import { AgentModel } from '../types';
2
+ import { LLMProvider } from '../llm';
3
+ /**
4
+ * Configuration des modèles pour Chat Completions (legacy) et Responses API
5
+ *
6
+ * Gère la configuration des modèles avec migration automatique des paramètres
7
+ * entre les deux APIs.
8
+ *
9
+ * @param model - Alias du modèle (ex: "LOW", "MEDIUM", "HIGH", "EMBEDDING-small", "VISION")
10
+ * @param custom - Options personnalisées (provider, thinking, temperature, etc.)
11
+ * @param custom.provider - Provider à utiliser ('openai' | 'xai'), default: LLM_PROVIDER env
12
+ * @param custom.thinking - Active le mode raisonnement (reasoning_effort: high)
13
+ * @param forResponses - Si true, adapte pour l'API Responses (sinon Chat Completions)
14
+ *
15
+ * @example
16
+ * // Modèle par défaut
17
+ * const config = modelConfig("MEDIUM");
18
+ *
19
+ * // Forcer OpenAI pour embeddings
20
+ * const config = modelConfig("EMBEDDING-small", { provider: 'openai' });
21
+ *
22
+ * // Vision avec xAI
23
+ * const config = modelConfig("VISION", { provider: 'xai' });
24
+ */
25
+ export declare function modelConfig(model: string, custom?: {
26
+ provider?: LLMProvider;
27
+ thinking?: boolean;
28
+ [key: string]: any;
29
+ }, forResponses?: boolean): AgentModel;