agentic-api 2.0.31 → 2.0.491
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/src/agents/agents.example.js +21 -22
- package/dist/src/agents/authentication.js +1 -2
- package/dist/src/agents/prompts.d.ts +5 -4
- package/dist/src/agents/prompts.js +44 -87
- package/dist/src/agents/reducer.core.d.ts +24 -2
- package/dist/src/agents/reducer.core.js +125 -35
- package/dist/src/agents/reducer.loaders.d.ts +55 -1
- package/dist/src/agents/reducer.loaders.js +114 -1
- package/dist/src/agents/reducer.types.d.ts +45 -2
- package/dist/src/agents/semantic.js +1 -2
- package/dist/src/agents/simulator.d.ts +11 -3
- package/dist/src/agents/simulator.executor.d.ts +14 -4
- package/dist/src/agents/simulator.executor.js +81 -23
- package/dist/src/agents/simulator.js +128 -42
- package/dist/src/agents/simulator.prompts.d.ts +9 -7
- package/dist/src/agents/simulator.prompts.js +66 -86
- package/dist/src/agents/simulator.types.d.ts +23 -5
- package/dist/src/agents/simulator.utils.d.ts +7 -2
- package/dist/src/agents/simulator.utils.js +31 -11
- package/dist/src/agents/system.js +1 -2
- package/dist/src/execute/helpers.d.ts +75 -0
- package/dist/src/execute/helpers.js +139 -0
- package/dist/src/execute/index.d.ts +11 -0
- package/dist/src/execute/index.js +44 -0
- package/dist/src/execute/legacy.d.ts +46 -0
- package/dist/src/execute/legacy.js +460 -0
- package/dist/src/execute/modelconfig.d.ts +19 -0
- package/dist/src/execute/modelconfig.js +56 -0
- package/dist/src/execute/responses.d.ts +55 -0
- package/dist/src/execute/responses.js +594 -0
- package/dist/src/execute/shared.d.ts +83 -0
- package/dist/src/execute/shared.js +188 -0
- package/dist/src/index.d.ts +1 -1
- package/dist/src/index.js +2 -2
- package/dist/src/{princing.openai.d.ts → pricing.llm.d.ts} +6 -0
- package/dist/src/pricing.llm.js +255 -0
- package/dist/src/prompts.d.ts +13 -4
- package/dist/src/prompts.js +221 -114
- package/dist/src/rag/embeddings.d.ts +36 -18
- package/dist/src/rag/embeddings.js +131 -128
- package/dist/src/rag/index.d.ts +5 -5
- package/dist/src/rag/index.js +14 -17
- package/dist/src/rag/parser.d.ts +2 -1
- package/dist/src/rag/parser.js +11 -14
- package/dist/src/rag/rag.examples.d.ts +27 -0
- package/dist/src/rag/rag.examples.js +151 -0
- package/dist/src/rag/rag.manager.d.ts +383 -0
- package/dist/src/rag/rag.manager.js +1390 -0
- package/dist/src/rag/types.d.ts +128 -12
- package/dist/src/rag/types.js +100 -1
- package/dist/src/rag/usecase.d.ts +37 -0
- package/dist/src/rag/usecase.js +96 -7
- package/dist/src/rules/git/git.e2e.helper.js +22 -2
- package/dist/src/rules/git/git.health.d.ts +61 -2
- package/dist/src/rules/git/git.health.js +333 -11
- package/dist/src/rules/git/index.d.ts +2 -2
- package/dist/src/rules/git/index.js +13 -1
- package/dist/src/rules/git/repo.d.ts +160 -0
- package/dist/src/rules/git/repo.js +777 -0
- package/dist/src/rules/git/repo.pr.js +117 -13
- package/dist/src/rules/git/repo.tools.d.ts +22 -1
- package/dist/src/rules/git/repo.tools.js +50 -1
- package/dist/src/rules/types.d.ts +27 -14
- package/dist/src/rules/utils.matter.d.ts +0 -4
- package/dist/src/rules/utils.matter.js +35 -7
- package/dist/src/scrapper.d.ts +15 -22
- package/dist/src/scrapper.js +58 -110
- package/dist/src/stategraph/index.d.ts +1 -1
- package/dist/src/stategraph/stategraph.d.ts +56 -2
- package/dist/src/stategraph/stategraph.js +134 -6
- package/dist/src/stategraph/stategraph.storage.js +8 -0
- package/dist/src/stategraph/types.d.ts +27 -0
- package/dist/src/types.d.ts +46 -9
- package/dist/src/types.js +8 -7
- package/dist/src/usecase.d.ts +11 -2
- package/dist/src/usecase.js +27 -35
- package/dist/src/utils.d.ts +32 -18
- package/dist/src/utils.js +87 -129
- package/package.json +10 -3
- package/dist/src/agents/digestor.test.d.ts +0 -1
- package/dist/src/agents/digestor.test.js +0 -45
- package/dist/src/agents/reducer.example.d.ts +0 -28
- package/dist/src/agents/reducer.example.js +0 -118
- package/dist/src/agents/reducer.process.d.ts +0 -16
- package/dist/src/agents/reducer.process.js +0 -143
- package/dist/src/agents/reducer.tools.d.ts +0 -29
- package/dist/src/agents/reducer.tools.js +0 -157
- package/dist/src/agents/simpleExample.d.ts +0 -3
- package/dist/src/agents/simpleExample.js +0 -38
- package/dist/src/agents/system-review.d.ts +0 -5
- package/dist/src/agents/system-review.js +0 -181
- package/dist/src/agents/systemReview.d.ts +0 -4
- package/dist/src/agents/systemReview.js +0 -22
- package/dist/src/execute.d.ts +0 -49
- package/dist/src/execute.js +0 -564
- package/dist/src/princing.openai.js +0 -54
- package/dist/src/rag/tools.d.ts +0 -76
- package/dist/src/rag/tools.js +0 -196
- package/dist/src/rules/user.mapper.d.ts +0 -61
- package/dist/src/rules/user.mapper.js +0 -160
- package/dist/src/rules/utils/slug.d.ts +0 -22
- package/dist/src/rules/utils/slug.js +0 -35
|
@@ -0,0 +1,594 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Responses API Implementation
|
|
4
|
+
*
|
|
5
|
+
* Migration de Chat Completions vers Responses API pour:
|
|
6
|
+
* - Meilleur support des modèles reasoning (o-series, gpt-5)
|
|
7
|
+
* - Tool calls préservés pendant le raisonnement via reasoning: { effort }
|
|
8
|
+
* - Events SSE typés (response.output_text.delta, response.function_call.*)
|
|
9
|
+
* - Plus de boucle moreThinkin (obsolète) : reasoning géré nativement
|
|
10
|
+
*
|
|
11
|
+
* Différences clés vs legacy:
|
|
12
|
+
* - `input` remplace `messages` (même format array, juste renommage du paramètre)
|
|
13
|
+
* - `reasoning: { effort: 'low' | 'medium' | 'high' }` remplace `reasoning_effort`
|
|
14
|
+
* - Events SSE structurés vs delta progressifs
|
|
15
|
+
* - `response.usage` vs `final.usage`
|
|
16
|
+
* - Tools/function calling: format identique, `parallel_tool_calls` supporté
|
|
17
|
+
*
|
|
18
|
+
* Optimisations appliquées (vs code original):
|
|
19
|
+
* - OPTIM: Helpers centralisés (accumulateUsageTokens, stepsToActions)
|
|
20
|
+
* - BUG FIX: executionResultMerge fusionne actions correctement (corrigé dans types.ts)
|
|
21
|
+
* - BUG FIX: moreThinkin supprimé (obsolète)
|
|
22
|
+
* - BUG FIX: reasoning_effort géré par modelConfig uniquement
|
|
23
|
+
*
|
|
24
|
+
* TODO [Phase 2]: Migration vers openai-agents-js
|
|
25
|
+
* https://openai.github.io/openai-agents-js/
|
|
26
|
+
* - Remplacer handleTransferCall par handoff natif du SDK
|
|
27
|
+
* - Utiliser swarm.run() pour orchestration multi-agents
|
|
28
|
+
* - Mapper downstreamAgents vers router policies
|
|
29
|
+
* - Intégration optionnelle avec Vercel AI SDK (multi-providers)
|
|
30
|
+
* - stateGraph restera compatible (AgentMessage générique, agnostic de l'API)
|
|
31
|
+
*/
|
|
32
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
33
|
+
exports.readCompletionsStream = readCompletionsStream;
|
|
34
|
+
exports.executeAgentSet = executeAgentSet;
|
|
35
|
+
exports.executeAgent = executeAgent;
|
|
36
|
+
exports.executeQuery = executeQuery;
|
|
37
|
+
const types_1 = require("../types");
|
|
38
|
+
const utils_1 = require("../utils");
|
|
39
|
+
const stategraph_1 = require("../stategraph");
|
|
40
|
+
const pricing_llm_1 = require("../pricing.llm");
|
|
41
|
+
//
|
|
42
|
+
// Import des utilitaires partagés et helpers optimisés
|
|
43
|
+
const shared_1 = require("./shared");
|
|
44
|
+
const modelconfig_1 = require("./modelconfig");
|
|
45
|
+
const helpers_1 = require("./helpers");
|
|
46
|
+
/**
|
|
47
|
+
* Helper pour créer un stream Responses API avec handlers
|
|
48
|
+
*
|
|
49
|
+
* Conforme à la documentation officielle: utilise .stream() avec .on() handlers
|
|
50
|
+
* et .finalResponse() pour obtenir le résultat final
|
|
51
|
+
*
|
|
52
|
+
* Gère les événements:
|
|
53
|
+
* - response.output_text.delta: texte en streaming
|
|
54
|
+
* - response.function_call.arguments.delta: arguments de function call en streaming
|
|
55
|
+
* - response.function_call.arguments.done: function call complète
|
|
56
|
+
* - response.error: erreurs
|
|
57
|
+
*
|
|
58
|
+
* @param openai - Instance OpenAI
|
|
59
|
+
* @param options - Options pour responses.stream()
|
|
60
|
+
* @param stdout - Optional writable stream pour affichage en temps réel
|
|
61
|
+
*/
|
|
62
|
+
async function createResponseStream(openai, options, stdout) {
|
|
63
|
+
// Normaliser les options pour l'API Responses (tools, etc.)
|
|
64
|
+
const normalizedOptions = (0, shared_1.normalizeOptionsForResponses)(options);
|
|
65
|
+
const stream = openai.responses.stream(normalizedOptions)
|
|
66
|
+
.on("response.output_text.delta", (event) => {
|
|
67
|
+
if (stdout && event.delta) {
|
|
68
|
+
stdout.write(event.delta);
|
|
69
|
+
}
|
|
70
|
+
})
|
|
71
|
+
.on("response.function_call.arguments.delta", (event) => {
|
|
72
|
+
// Function call arguments arrivent en streaming
|
|
73
|
+
// Peut être loggé pour debug si nécessaire
|
|
74
|
+
// console.log('Function call delta:', event);
|
|
75
|
+
})
|
|
76
|
+
.on("response.function_call.arguments.done", (event) => {
|
|
77
|
+
// Function call complète - déjà capturé dans finalResponse
|
|
78
|
+
// Peut être loggé pour debug si nécessaire
|
|
79
|
+
// console.log('Function call done:', event.call?.name);
|
|
80
|
+
})
|
|
81
|
+
.on("response.error", (event) => {
|
|
82
|
+
console.error('Response error:', event.error);
|
|
83
|
+
});
|
|
84
|
+
// Obtenir la réponse finale qui contient tous the function calls complétés
|
|
85
|
+
const result = await stream.finalResponse();
|
|
86
|
+
// Normaliser l'output vers un format compatible avec le code existant
|
|
87
|
+
return (0, shared_1.normalizeOutputFromResponses)(result);
|
|
88
|
+
}
|
|
89
|
+
/**
|
|
90
|
+
* RESPONSES API: Traite le stream de responses avec tool calls
|
|
91
|
+
*
|
|
92
|
+
* Utilise les events SSE typés de Responses API:
|
|
93
|
+
* - response.output_text.delta → texte
|
|
94
|
+
* - response.function_call.* → tool calls
|
|
95
|
+
* - response.completed → finalisation
|
|
96
|
+
*/
|
|
97
|
+
async function readCompletionsStream(params) {
|
|
98
|
+
const openai = (0, utils_1.openaiInstance)();
|
|
99
|
+
const { stateGraph, discussion, agentConfig, agents, discussionRootAgent, stdout, final, context, verbose } = params;
|
|
100
|
+
const model = (0, modelconfig_1.modelConfig)(agentConfig.model, {}, true).model; // forResponses=true
|
|
101
|
+
const accumulatedFunctionCall = final.choices[0]?.message.tool_calls || [];
|
|
102
|
+
const content = final.choices[0]?.message.content;
|
|
103
|
+
let localResult = (0, types_1.enrichExecutionResult)({
|
|
104
|
+
runId: `${discussionRootAgent}-${Date.now()}`,
|
|
105
|
+
startQuery: '',
|
|
106
|
+
actions: [],
|
|
107
|
+
lastMessage: '',
|
|
108
|
+
usage: { prompt: 0, completion: 0, total: 0, cost: 0 },
|
|
109
|
+
});
|
|
110
|
+
//
|
|
111
|
+
// OPTIM: Utiliser helper centralisé pour l'accumulation
|
|
112
|
+
(0, helpers_1.accumulateUsageTokens)(stateGraph, discussion, discussionRootAgent, model, final.usage);
|
|
113
|
+
//
|
|
114
|
+
// ✅ Envoyer reasoning_text via sendFeedback si présent (modèles reasoning: o-series, gpt-5)
|
|
115
|
+
const reasoningText = final.choices[0]?.message.reasoning_text;
|
|
116
|
+
if (reasoningText) {
|
|
117
|
+
(0, shared_1.sendFeedback)({
|
|
118
|
+
agent: agentConfig.name,
|
|
119
|
+
stdout,
|
|
120
|
+
description: `💭 Raisonnement: ${reasoningText}`,
|
|
121
|
+
usage: discussion.usage,
|
|
122
|
+
state: '',
|
|
123
|
+
verbose
|
|
124
|
+
});
|
|
125
|
+
}
|
|
126
|
+
//
|
|
127
|
+
// ✅ OPTIM: Utiliser batchProcessToolCalls pour traiter les tool calls en batch
|
|
128
|
+
// Au lieu de boucler séquentiellement avec un followUp par call, on fait un seul followUp pour tous
|
|
129
|
+
if (accumulatedFunctionCall && accumulatedFunctionCall.length > 0) {
|
|
130
|
+
//
|
|
131
|
+
// Envoyer les justifications des tool calls
|
|
132
|
+
for (const functionCall of accumulatedFunctionCall) {
|
|
133
|
+
const args = JSON.parse(functionCall?.function?.arguments || '{}');
|
|
134
|
+
if (args.justification) {
|
|
135
|
+
(0, shared_1.sendFeedback)({
|
|
136
|
+
agent: agentConfig.name,
|
|
137
|
+
stdout,
|
|
138
|
+
description: args.justification,
|
|
139
|
+
usage: discussion.usage,
|
|
140
|
+
state: '',
|
|
141
|
+
verbose
|
|
142
|
+
});
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
//
|
|
146
|
+
// Normaliser tous les functionCalls vers le format Chat Completions
|
|
147
|
+
const normalizedFunctionCalls = accumulatedFunctionCall.map(shared_1.normalizedFunctionCallFromResponse);
|
|
148
|
+
//
|
|
149
|
+
// ✅ Ajouter le message assistant avec les tool_calls (response.output) AVANT les function_call_output
|
|
150
|
+
// Nécessaire pour que l'API Responses puisse associer les function_call_output aux tool_calls
|
|
151
|
+
// Documentation: "input_list += response.output"
|
|
152
|
+
stateGraph.push(discussionRootAgent, {
|
|
153
|
+
role: "assistant",
|
|
154
|
+
content: content || '',
|
|
155
|
+
tool_calls: normalizedFunctionCalls
|
|
156
|
+
});
|
|
157
|
+
//
|
|
158
|
+
// ✅ Enrichir le session avec discussionRootAgent et currentAgent pour les tools
|
|
159
|
+
const currentAgent = agents.find(a => a.name === agentConfig.name);
|
|
160
|
+
context.discussionRootAgent = discussionRootAgent;
|
|
161
|
+
context.currentAgent = currentAgent;
|
|
162
|
+
//
|
|
163
|
+
// ✅ Traiter tous les tool calls en batch
|
|
164
|
+
const { results, needsFollowUp, parsedCalls, transferAgentName, toolOutputs } = await (0, helpers_1.batchProcessToolCalls)(normalizedFunctionCalls, agents, stateGraph, discussionRootAgent, context);
|
|
165
|
+
//
|
|
166
|
+
// ✅ Ajouter les function_call_output au stateGraph pour le follow-up
|
|
167
|
+
// Les outputs peuvent être du texte (content) ou du JSON structuré (JSON.stringify)
|
|
168
|
+
for (const output of toolOutputs) {
|
|
169
|
+
stateGraph.push(discussionRootAgent, {
|
|
170
|
+
type: "function_call_output",
|
|
171
|
+
call_id: output.call_id,
|
|
172
|
+
output: output.output // ✅ Déjà formaté : content ou JSON.stringify(result.rawResult || result)
|
|
173
|
+
});
|
|
174
|
+
}
|
|
175
|
+
//
|
|
176
|
+
// Accumuler les usages de tous les tool calls
|
|
177
|
+
for (const result of results) {
|
|
178
|
+
if (result.usage) {
|
|
179
|
+
stateGraph.updateTokens(discussionRootAgent, {
|
|
180
|
+
prompt: result.usage.prompt || 0,
|
|
181
|
+
completion: result.usage.completion || 0,
|
|
182
|
+
total: result.usage.total || 0,
|
|
183
|
+
cost: result.usage.cost || 0
|
|
184
|
+
});
|
|
185
|
+
}
|
|
186
|
+
}
|
|
187
|
+
//
|
|
188
|
+
// Gérer les transfers d'agents si nécessaire
|
|
189
|
+
const hasTransfer = results.some(r => r.did_transfer);
|
|
190
|
+
if (hasTransfer) {
|
|
191
|
+
const transferResult = results.find(r => r.did_transfer);
|
|
192
|
+
if (verbose)
|
|
193
|
+
console.log("✅ Agent transfer (Responses):", transferResult?.source_agent, '→', transferResult?.destination_agent);
|
|
194
|
+
// Mise à jour du message système avec les nouvelles instructions du nouvel agent courant
|
|
195
|
+
const transferredAgent = agents.find(a => a.name === transferAgentName) || agentConfig;
|
|
196
|
+
const instructions = transferredAgent.instructions;
|
|
197
|
+
const enrichedInstructions = (await params.enrichWithMemory?.("system", transferredAgent, context)) || '';
|
|
198
|
+
// ✅ Set préserve le trail existant via updateSystemMessage()
|
|
199
|
+
stateGraph.set(discussionRootAgent, instructions + '\n' + enrichedInstructions);
|
|
200
|
+
}
|
|
201
|
+
//
|
|
202
|
+
// Envoyer feedbacks pour les tool calls non-transfer
|
|
203
|
+
for (const result of results) {
|
|
204
|
+
if (result.feedback && !result.did_transfer) {
|
|
205
|
+
(0, shared_1.sendFeedback)({
|
|
206
|
+
agent: agentConfig.name,
|
|
207
|
+
stdout,
|
|
208
|
+
description: result.feedback,
|
|
209
|
+
usage: discussion.usage,
|
|
210
|
+
state: '',
|
|
211
|
+
verbose
|
|
212
|
+
});
|
|
213
|
+
}
|
|
214
|
+
}
|
|
215
|
+
//
|
|
216
|
+
// ✅ Un seul follow-up stream pour tous les tool calls (au lieu de N streams séquentiels)
|
|
217
|
+
if (needsFollowUp) {
|
|
218
|
+
// ✅ Utiliser l'agent transféré si transfer, sinon l'agent courant
|
|
219
|
+
const followUpAgentName = transferAgentName || currentAgent.name;
|
|
220
|
+
const followUpAgent = agents.find(a => a.name === followUpAgentName) || currentAgent;
|
|
221
|
+
const tools = followUpAgent.tools || [];
|
|
222
|
+
//
|
|
223
|
+
// Responses API: utiliser input au lieu de messages
|
|
224
|
+
const followUpOptions = Object.assign({}, (0, modelconfig_1.modelConfig)(followUpAgent.model, {}, true)); // forResponses=true
|
|
225
|
+
followUpOptions.input = (0, shared_1.convertMessagesToResponsesInput)(discussion.messages);
|
|
226
|
+
if (tools.length > 0) {
|
|
227
|
+
followUpOptions.tools = tools;
|
|
228
|
+
followUpOptions.tool_choice = "auto";
|
|
229
|
+
}
|
|
230
|
+
//
|
|
231
|
+
// Responses API: utiliser responses.stream() avec .on() handlers et .finalResponse()
|
|
232
|
+
const followUpFinal = await createResponseStream(openai, followUpOptions, stdout);
|
|
233
|
+
//
|
|
234
|
+
// ✅ Ajouter les steps au trail APRÈS le follow-up (trail visible aux prochaines itérations)
|
|
235
|
+
for (let i = 0; i < results.length; i++) {
|
|
236
|
+
const result = results[i];
|
|
237
|
+
const toolCall = parsedCalls[i];
|
|
238
|
+
if (result.name) {
|
|
239
|
+
stateGraph.addStep(discussionRootAgent, {
|
|
240
|
+
tool: result.name,
|
|
241
|
+
context: result.context || '',
|
|
242
|
+
reason: toolCall.args.justification || '',
|
|
243
|
+
id: result.id
|
|
244
|
+
});
|
|
245
|
+
}
|
|
246
|
+
}
|
|
247
|
+
// Debug: Vérifier que le trail est bien dans le message system
|
|
248
|
+
// if(verbose) {
|
|
249
|
+
// const systemMsg = discussion.messages.find((m: any) => m.role === 'system');
|
|
250
|
+
// const hasTrail = systemMsg?.content.includes('<context-trail>');
|
|
251
|
+
// const trailSteps = stateGraph.steps(discussionRootAgent);
|
|
252
|
+
// console.log('--- DBG trail added:', hasTrail, 'steps count:', trailSteps.length);
|
|
253
|
+
// if(hasTrail && trailSteps.length > 0) {
|
|
254
|
+
// console.log('--- DBG trail content:', systemMsg.content.match(/<context-trail>[\s\S]*?<\/context-trail>/)?.[0]);
|
|
255
|
+
// }
|
|
256
|
+
// }
|
|
257
|
+
//
|
|
258
|
+
// when called a function, agent must continue the conversation
|
|
259
|
+
if (followUpFinal.choices[0]?.message.tool_calls && followUpFinal.choices[0].message.tool_calls.length > 0) {
|
|
260
|
+
const partial = await readCompletionsStream({
|
|
261
|
+
stateGraph,
|
|
262
|
+
discussion,
|
|
263
|
+
agentConfig: currentAgent,
|
|
264
|
+
agents,
|
|
265
|
+
discussionRootAgent,
|
|
266
|
+
stdout,
|
|
267
|
+
final: followUpFinal,
|
|
268
|
+
context,
|
|
269
|
+
verbose,
|
|
270
|
+
enrichWithMemory: params.enrichWithMemory
|
|
271
|
+
});
|
|
272
|
+
localResult = (0, types_1.executionResultMerge)(localResult, partial);
|
|
273
|
+
localResult.actions = (0, helpers_1.stepsToActions)(stateGraph, discussionRootAgent);
|
|
274
|
+
return localResult;
|
|
275
|
+
}
|
|
276
|
+
//
|
|
277
|
+
// OPTIM: Utiliser helper centralisé pour l'accumulation
|
|
278
|
+
(0, helpers_1.accumulateUsageTokens)(stateGraph, discussion, discussionRootAgent, model, followUpFinal.usage);
|
|
279
|
+
//
|
|
280
|
+
// send the cost
|
|
281
|
+
(0, shared_1.sendFeedback)({
|
|
282
|
+
agent: currentAgent.name,
|
|
283
|
+
stdout,
|
|
284
|
+
description: '',
|
|
285
|
+
usage: discussion.usage,
|
|
286
|
+
state: followUpFinal.id || '',
|
|
287
|
+
verbose
|
|
288
|
+
});
|
|
289
|
+
const followUpContent = followUpFinal.choices[0]?.message.content || '';
|
|
290
|
+
//
|
|
291
|
+
// capture new memory with the last message
|
|
292
|
+
await params.enrichWithMemory?.("assistant", currentAgent, context);
|
|
293
|
+
if (followUpContent) {
|
|
294
|
+
stateGraph.push(discussionRootAgent, { role: "assistant", content: followUpContent });
|
|
295
|
+
}
|
|
296
|
+
}
|
|
297
|
+
}
|
|
298
|
+
else if (content) {
|
|
299
|
+
// ✅ Pas de tool calls, mais il y a du content → ajouter le message assistant
|
|
300
|
+
if (verbose)
|
|
301
|
+
console.log("✅ Agent (Responses): save content without tool calls:", content?.length);
|
|
302
|
+
stateGraph.push(discussionRootAgent, { role: "assistant", content });
|
|
303
|
+
}
|
|
304
|
+
//
|
|
305
|
+
// ✅ OPTIM: Utiliser helper centralisé
|
|
306
|
+
localResult.actions = (0, helpers_1.stepsToActions)(stateGraph, discussionRootAgent);
|
|
307
|
+
return localResult;
|
|
308
|
+
}
|
|
309
|
+
/**
|
|
310
|
+
* Executes a set of agents to process a user query
|
|
311
|
+
*
|
|
312
|
+
* RESPONSES API VERSION
|
|
313
|
+
*/
|
|
314
|
+
async function executeAgentSet(agentSet, context, params) {
|
|
315
|
+
const { query, verbose } = params;
|
|
316
|
+
const openai = (0, utils_1.openaiInstance)();
|
|
317
|
+
const agents = (0, utils_1.injectTransferTools)(agentSet);
|
|
318
|
+
const discussionRootAgent = params.home || agents[0].name;
|
|
319
|
+
const stateGraph = (0, stategraph_1.sessionStateGraphGet)(context);
|
|
320
|
+
const discussion = stateGraph.createOrRestore(discussionRootAgent);
|
|
321
|
+
let currentAgent = (0, stategraph_1.getSpecializedAgent)(discussion) || discussionRootAgent;
|
|
322
|
+
const currentAgentConfig = agents.find(a => a.name === currentAgent);
|
|
323
|
+
discussion.description = currentAgentConfig?.publicDescription;
|
|
324
|
+
if (!currentAgentConfig) {
|
|
325
|
+
throw new Error(`Agent ${currentAgent} not found`);
|
|
326
|
+
}
|
|
327
|
+
if (!currentAgentConfig.instructions) {
|
|
328
|
+
throw new Error(`Agent ${currentAgent} has no instructions`);
|
|
329
|
+
}
|
|
330
|
+
let enrichedQuery = query;
|
|
331
|
+
if (!discussion.messages.length) {
|
|
332
|
+
discussion.usage = { prompt: 0, completion: 0, total: 0, cost: 0 };
|
|
333
|
+
const enrichedInstructions = await params.enrichWithMemory?.("system", currentAgentConfig, context);
|
|
334
|
+
const instructions = currentAgentConfig.instructions + '\n' + enrichedInstructions;
|
|
335
|
+
stateGraph.set(discussionRootAgent, instructions);
|
|
336
|
+
}
|
|
337
|
+
else {
|
|
338
|
+
enrichedQuery = (await params.enrichWithMemory?.("user", currentAgentConfig, context)) || query;
|
|
339
|
+
}
|
|
340
|
+
stateGraph.push(discussionRootAgent, { role: "user", content: enrichedQuery });
|
|
341
|
+
const tools = currentAgentConfig.tools;
|
|
342
|
+
if (verbose) {
|
|
343
|
+
console.log('--- DBG current agent (Responses)', currentAgentConfig.name, `reasoning:${params.thinking}`, 'memory len:', discussion.messages.length);
|
|
344
|
+
}
|
|
345
|
+
let result = (0, types_1.enrichExecutionResult)({
|
|
346
|
+
runId: `${discussionRootAgent}-${Date.now()}`,
|
|
347
|
+
startQuery: query,
|
|
348
|
+
actions: [],
|
|
349
|
+
lastMessage: '',
|
|
350
|
+
usage: { prompt: 0, completion: 0, total: 0, cost: 0 },
|
|
351
|
+
});
|
|
352
|
+
const intialinfo = ["Analyse", "Analyse en cours…"];
|
|
353
|
+
const randomIndex = Math.floor(Math.random() * intialinfo.length);
|
|
354
|
+
(0, shared_1.sendFeedback)({
|
|
355
|
+
agent: currentAgentConfig.name,
|
|
356
|
+
stdout: params.stdout,
|
|
357
|
+
description: intialinfo[randomIndex],
|
|
358
|
+
usage: result.usage,
|
|
359
|
+
state: discussion.id
|
|
360
|
+
});
|
|
361
|
+
//
|
|
362
|
+
// Responses API: utiliser modelConfig avec forResponses=true pour reasoning: { effort }
|
|
363
|
+
const model = (0, modelconfig_1.modelConfig)(currentAgentConfig.model, { thinking: params.thinking }, true);
|
|
364
|
+
const options = Object.assign({}, model);
|
|
365
|
+
//
|
|
366
|
+
// TODO [Phase 3]: Ajouter support de previous_response_id dans executeAgentSet
|
|
367
|
+
// Actuellement uniquement implémenté dans executeAgent (ligne 456)
|
|
368
|
+
// Pour améliorer le contexte multi-tours, stocker response.id dans discussion/stateGraph
|
|
369
|
+
// et l'utiliser dans les appels suivants:
|
|
370
|
+
// if (discussion.lastResponseId) {
|
|
371
|
+
// options.previous_response_id = discussion.lastResponseId;
|
|
372
|
+
// }
|
|
373
|
+
//
|
|
374
|
+
// Responses API: utiliser input au lieu de messages
|
|
375
|
+
options.input = (0, shared_1.convertMessagesToResponsesInput)(discussion.messages);
|
|
376
|
+
if (tools.length > 0) {
|
|
377
|
+
options.tools = tools;
|
|
378
|
+
options.tool_choice = "auto";
|
|
379
|
+
options.parallel_tool_calls = true;
|
|
380
|
+
}
|
|
381
|
+
if (params.debug) {
|
|
382
|
+
console.log('--- DBG executeAgentSet:', JSON.stringify(options.input, null, 2));
|
|
383
|
+
}
|
|
384
|
+
//
|
|
385
|
+
// Responses API: utiliser responses.stream() avec .on() handlers et .finalResponse()
|
|
386
|
+
// normalizeOutputFromResponses retourne directement le format Chat Completions
|
|
387
|
+
try {
|
|
388
|
+
const final = await createResponseStream(openai, options, params.stdout);
|
|
389
|
+
const partial = await readCompletionsStream({
|
|
390
|
+
stateGraph,
|
|
391
|
+
discussion,
|
|
392
|
+
agentConfig: currentAgentConfig,
|
|
393
|
+
agents,
|
|
394
|
+
discussionRootAgent,
|
|
395
|
+
stdout: params.stdout,
|
|
396
|
+
context: context,
|
|
397
|
+
final: final,
|
|
398
|
+
verbose,
|
|
399
|
+
enrichWithMemory: params.enrichWithMemory,
|
|
400
|
+
});
|
|
401
|
+
result = (0, types_1.executionResultMerge)(result, partial);
|
|
402
|
+
}
|
|
403
|
+
catch (error) {
|
|
404
|
+
// ✅ Gérer les erreurs OpenAI de manière gracieuse (timeout, premature close, etc.)
|
|
405
|
+
const errorMsg = `❌ Erreur technique: ${error.message || 'Problème de communication avec l\'IA'}. Veuillez réessayer.`;
|
|
406
|
+
console.error('Error during OpenAI stream (executeAgentSet):', error);
|
|
407
|
+
// Ajouter le message d'erreur à la discussion pour que l'utilisateur le voie
|
|
408
|
+
stateGraph.push(discussionRootAgent, {
|
|
409
|
+
role: "assistant",
|
|
410
|
+
content: errorMsg
|
|
411
|
+
});
|
|
412
|
+
result.lastMessage = errorMsg;
|
|
413
|
+
}
|
|
414
|
+
//
|
|
415
|
+
// ✅ OPTIM: Utiliser helper centralisé
|
|
416
|
+
result.actions = (0, helpers_1.stepsToActions)(stateGraph, discussionRootAgent);
|
|
417
|
+
// 💾 Auto-save du StateGraph à la fin
|
|
418
|
+
(0, stategraph_1.sessionStateGraphSet)(context, stateGraph);
|
|
419
|
+
// finalize result
|
|
420
|
+
result.lastMessage = discussion.messages?.[discussion.messages.length - 1]?.content || '';
|
|
421
|
+
if (discussion?.usage) {
|
|
422
|
+
result.usage = {
|
|
423
|
+
prompt: discussion.usage.prompt || 0,
|
|
424
|
+
completion: discussion.usage.completion || 0,
|
|
425
|
+
total: discussion.usage.total || 0,
|
|
426
|
+
cost: discussion.usage.cost || 0,
|
|
427
|
+
};
|
|
428
|
+
}
|
|
429
|
+
result = (0, types_1.enrichExecutionResult)(result);
|
|
430
|
+
return result;
|
|
431
|
+
}
|
|
432
|
+
/**
|
|
433
|
+
* Execute agent - Responses API version
|
|
434
|
+
*/
|
|
435
|
+
async function executeAgent(agentSet, params) {
|
|
436
|
+
const { query, verbose, debug } = params;
|
|
437
|
+
const openai = (0, utils_1.openaiInstance)();
|
|
438
|
+
const agent = agentSet.find(a => a.name === params.home);
|
|
439
|
+
if (!agent) {
|
|
440
|
+
throw new Error(`Agent ${params.home} not found`);
|
|
441
|
+
}
|
|
442
|
+
if (!agent.instructions) {
|
|
443
|
+
throw new Error(`Agent ${agent.name} has no instructions`);
|
|
444
|
+
}
|
|
445
|
+
const messages = [
|
|
446
|
+
{ role: "system", content: agent.instructions },
|
|
447
|
+
{ role: "user", content: query }
|
|
448
|
+
];
|
|
449
|
+
let usage = { prompt: 0, completion: 0, total: 0, cost: 0 };
|
|
450
|
+
let state = '';
|
|
451
|
+
let maxIterations = 10;
|
|
452
|
+
let iterations = 0;
|
|
453
|
+
if (verbose) {
|
|
454
|
+
console.log('--- DBG executeAgent (Responses):', agent.name);
|
|
455
|
+
}
|
|
456
|
+
if (debug) {
|
|
457
|
+
console.log('--- DBG executeAgent-system (Responses):', agent.instructions);
|
|
458
|
+
console.log('--- DBG executeAgent-user:', query);
|
|
459
|
+
}
|
|
460
|
+
while (iterations < maxIterations) {
|
|
461
|
+
iterations++;
|
|
462
|
+
const options = Object.assign({}, (0, modelconfig_1.modelConfig)(agent.model, {}, true)); // forResponses=true
|
|
463
|
+
options.input = messages;
|
|
464
|
+
// Utiliser previous_response_id pour maintenir le contexte des function calls
|
|
465
|
+
if (state) {
|
|
466
|
+
options.previous_response_id = state;
|
|
467
|
+
}
|
|
468
|
+
const tools = agent.tools || [];
|
|
469
|
+
if (tools.length > 0) {
|
|
470
|
+
options.tools = tools;
|
|
471
|
+
options.tool_choice = "auto";
|
|
472
|
+
}
|
|
473
|
+
if (verbose) {
|
|
474
|
+
console.log('--- DBG executeAgent (Responses):', agent.name, 'iterations:', iterations);
|
|
475
|
+
}
|
|
476
|
+
// Responses API: utiliser responses.stream() avec .on() handlers et .finalResponse()
|
|
477
|
+
// normalizeOutputFromResponses retourne directement le format Chat Completions
|
|
478
|
+
const final = await createResponseStream(openai, options, params.stdout);
|
|
479
|
+
const model = (0, modelconfig_1.modelConfig)(agent.model, {}, true).model;
|
|
480
|
+
(0, pricing_llm_1.accumulateCost)(usage, model, final.usage);
|
|
481
|
+
state = final.id;
|
|
482
|
+
const content = final.choices[0]?.message.content || '';
|
|
483
|
+
const toolCalls = final.choices[0]?.message.tool_calls || [];
|
|
484
|
+
let hasToolCalls = false;
|
|
485
|
+
// L'API Responses a un format d'input différent de Chat Completions
|
|
486
|
+
// On ajoute le content de l'assistant si présent
|
|
487
|
+
if (content) {
|
|
488
|
+
messages.push({ role: "assistant", content });
|
|
489
|
+
}
|
|
490
|
+
// Traiter les tool calls si présents (format Chat Completions après normalisation)
|
|
491
|
+
if (toolCalls && toolCalls.length > 0) {
|
|
492
|
+
hasToolCalls = true;
|
|
493
|
+
for (const toolCall of toolCalls) {
|
|
494
|
+
const args = JSON.parse(toolCall.function.arguments || '{}');
|
|
495
|
+
if (agent.toolLogic && agent.toolLogic[toolCall.function.name]) {
|
|
496
|
+
try {
|
|
497
|
+
const result = await agent.toolLogic[toolCall.function.name](args, { state });
|
|
498
|
+
// Responses API: utiliser function_call_output
|
|
499
|
+
messages.push({
|
|
500
|
+
type: "function_call_output",
|
|
501
|
+
call_id: toolCall.id,
|
|
502
|
+
output: typeof result === 'string' ? result : JSON.stringify(result)
|
|
503
|
+
});
|
|
504
|
+
}
|
|
505
|
+
catch (error) {
|
|
506
|
+
messages.push({
|
|
507
|
+
type: "function_call_output",
|
|
508
|
+
call_id: toolCall.id,
|
|
509
|
+
output: `Error: ${error instanceof Error ? error.message : 'Unknown error'}`
|
|
510
|
+
});
|
|
511
|
+
}
|
|
512
|
+
}
|
|
513
|
+
}
|
|
514
|
+
}
|
|
515
|
+
if (!hasToolCalls) {
|
|
516
|
+
break;
|
|
517
|
+
}
|
|
518
|
+
}
|
|
519
|
+
return {
|
|
520
|
+
usage,
|
|
521
|
+
content: messages[messages.length - 1]?.content || '',
|
|
522
|
+
messages,
|
|
523
|
+
state
|
|
524
|
+
};
|
|
525
|
+
}
|
|
526
|
+
/**
|
|
527
|
+
* Execute query - Responses API version
|
|
528
|
+
*/
|
|
529
|
+
async function executeQuery(params) {
|
|
530
|
+
const { query, verbose, model: modelName, instructions } = params;
|
|
531
|
+
if (!modelName) {
|
|
532
|
+
throw new Error('executeQuery requires "model" parameter');
|
|
533
|
+
}
|
|
534
|
+
const openai = (0, utils_1.openaiInstance)();
|
|
535
|
+
const model = (0, modelconfig_1.modelConfig)(modelName, {}, true); // forResponses=true
|
|
536
|
+
// Responses API: response_format → text.format
|
|
537
|
+
// Fusionner avec text.verbosity qui peut venir de modelConfig
|
|
538
|
+
if (params.json || params.schema) {
|
|
539
|
+
const format = {};
|
|
540
|
+
if (params.schema) {
|
|
541
|
+
// Structured Outputs: type "json_schema" avec schema et strict
|
|
542
|
+
format.type = "json_schema";
|
|
543
|
+
format.name = "response_schema";
|
|
544
|
+
format.schema = params.schema;
|
|
545
|
+
format.strict = true;
|
|
546
|
+
}
|
|
547
|
+
else if (params.json) {
|
|
548
|
+
// JSON mode simple
|
|
549
|
+
format.type = "json_object";
|
|
550
|
+
}
|
|
551
|
+
// Fusionner avec text existant (verbosity)
|
|
552
|
+
model.text = Object.assign({}, model.text || {}, { format });
|
|
553
|
+
}
|
|
554
|
+
if (verbose) {
|
|
555
|
+
console.log('--- DBG query (Responses):', modelName, `${query?.substring(0, 100)}...`);
|
|
556
|
+
}
|
|
557
|
+
const messages = params.messages || [];
|
|
558
|
+
messages.push({ role: "user", content: query });
|
|
559
|
+
let usage = { prompt: 0, completion: 0, total: 0, cost: 0 };
|
|
560
|
+
let state = '';
|
|
561
|
+
try {
|
|
562
|
+
const options = Object.assign({}, model);
|
|
563
|
+
options.input = messages;
|
|
564
|
+
// Responses API: utiliser instructions au lieu de role: "system" si fourni
|
|
565
|
+
if (instructions) {
|
|
566
|
+
options.instructions = instructions;
|
|
567
|
+
}
|
|
568
|
+
if (verbose) {
|
|
569
|
+
console.log('--- DBG executeQuery options (Responses):', JSON.stringify(options, null, 2));
|
|
570
|
+
}
|
|
571
|
+
// Responses API: utiliser responses.stream() avec .on() handlers et .finalResponse()
|
|
572
|
+
// normalizeOutputFromResponses retourne directement le format Chat Completions
|
|
573
|
+
const final = await createResponseStream(openai, options, params.stdout);
|
|
574
|
+
(0, pricing_llm_1.accumulateCost)(usage, model.model, final.usage);
|
|
575
|
+
state = final.id || '';
|
|
576
|
+
const content = final.choices[0]?.message.content || '';
|
|
577
|
+
if (verbose) {
|
|
578
|
+
console.log('--- DBG executeQuery completed (Responses), usage:', usage);
|
|
579
|
+
}
|
|
580
|
+
return {
|
|
581
|
+
usage,
|
|
582
|
+
content,
|
|
583
|
+
messages: [
|
|
584
|
+
...messages,
|
|
585
|
+
{ role: "assistant", content }
|
|
586
|
+
],
|
|
587
|
+
state
|
|
588
|
+
};
|
|
589
|
+
}
|
|
590
|
+
catch (error) {
|
|
591
|
+
console.error('❌ executeQuery failed (Responses):', error);
|
|
592
|
+
throw error;
|
|
593
|
+
}
|
|
594
|
+
}
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
import { Writable } from "stream";
|
|
2
|
+
import { Feedback } from "../types";
|
|
3
|
+
export declare const DummyWritable: any;
|
|
4
|
+
export declare function sendFeedback(params: Feedback): void;
|
|
5
|
+
export interface ReadCompletionsStreamOptions {
|
|
6
|
+
stateGraph: any;
|
|
7
|
+
discussion: any;
|
|
8
|
+
agentConfig: any;
|
|
9
|
+
agents: any[];
|
|
10
|
+
discussionRootAgent: string;
|
|
11
|
+
stdout: Writable;
|
|
12
|
+
context: any;
|
|
13
|
+
final: any;
|
|
14
|
+
verbose?: boolean;
|
|
15
|
+
enrichWithMemory?: (role: string, agent: any, context: any) => Promise<string>;
|
|
16
|
+
responseId?: string;
|
|
17
|
+
}
|
|
18
|
+
export interface ExecuteAgentSetParams {
|
|
19
|
+
enrichWithMemory?: (role: string, agent: any, context: any) => Promise<string>;
|
|
20
|
+
query: string;
|
|
21
|
+
home?: string;
|
|
22
|
+
thinking?: boolean;
|
|
23
|
+
model?: string;
|
|
24
|
+
instructions?: string;
|
|
25
|
+
stdout: Writable;
|
|
26
|
+
messages?: any[];
|
|
27
|
+
verbose?: boolean;
|
|
28
|
+
json?: boolean;
|
|
29
|
+
schema?: any;
|
|
30
|
+
debug?: boolean;
|
|
31
|
+
}
|
|
32
|
+
/**
|
|
33
|
+
* Normalise les options pour l'API Responses
|
|
34
|
+
*
|
|
35
|
+
* Transforme les paramètres du format Chat Completions vers Responses API:
|
|
36
|
+
* - Tools: { type: "function", function: {...} } → { type: "function", name, description, parameters, strict }
|
|
37
|
+
* - Autres transformations futures si nécessaire
|
|
38
|
+
*
|
|
39
|
+
* @param options - Options à normaliser
|
|
40
|
+
* @returns Options normalisées pour Responses API
|
|
41
|
+
*/
|
|
42
|
+
export declare function normalizeOptionsForResponses(options: any): any;
|
|
43
|
+
/**
|
|
44
|
+
* Normalise un function call du format Responses vers Chat Completions
|
|
45
|
+
*
|
|
46
|
+
* Responses API: { type: "function_call", name, arguments, call_id }
|
|
47
|
+
* Chat Completions: { id, type: "function", function: { name, arguments } }
|
|
48
|
+
*
|
|
49
|
+
* Si déjà au format Chat Completions (avec .function), retourne tel quel
|
|
50
|
+
*
|
|
51
|
+
* @param functionCall - Function call au format Responses ou Chat Completions
|
|
52
|
+
* @returns Function call au format Chat Completions
|
|
53
|
+
*/
|
|
54
|
+
export declare function normalizedFunctionCallFromResponse(functionCall: any): any;
|
|
55
|
+
/**
|
|
56
|
+
* Normalise l'output de l'API Responses vers le format Chat Completions
|
|
57
|
+
*
|
|
58
|
+
* Transforme la structure Responses API vers le format attendu par le reste du code
|
|
59
|
+
*
|
|
60
|
+
* Format Responses API (result.output array):
|
|
61
|
+
* - { type: "reasoning", summary: [] }
|
|
62
|
+
* - { type: "message", content: [{ type: "output_text", text: "..." }] }
|
|
63
|
+
* - { type: "function_call", name, arguments, call_id }
|
|
64
|
+
*
|
|
65
|
+
* Format Chat Completions compatible:
|
|
66
|
+
* - { choices: [{ message: { content, tool_calls, reasoning_text } }], usage: {...}, id: "..." }
|
|
67
|
+
*
|
|
68
|
+
* @param result - Résultat de stream.finalResponse()
|
|
69
|
+
* @returns Résultat au format Chat Completions avec reasoning_text si présent
|
|
70
|
+
*/
|
|
71
|
+
export declare function normalizeOutputFromResponses(result: any): any;
|
|
72
|
+
/**
|
|
73
|
+
* Convertit les messages du stateGraph vers le format Responses API input
|
|
74
|
+
* Gère les messages normaux, tool_calls (Chat Completions), et function_call_output (Responses)
|
|
75
|
+
*
|
|
76
|
+
* Utilisée par:
|
|
77
|
+
* - readCompletionsStream (responses.ts) pour le follow-up après tool calls
|
|
78
|
+
* - executeAgentSet (responses.ts) pour préparer l'input initial
|
|
79
|
+
*
|
|
80
|
+
* @param messages Messages du stateGraph
|
|
81
|
+
* @returns Array d'items au format Responses API input
|
|
82
|
+
*/
|
|
83
|
+
export declare function convertMessagesToResponsesInput(messages: any[]): any[];
|