agentic-api 2.0.31 → 2.0.491
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/src/agents/agents.example.js +21 -22
- package/dist/src/agents/authentication.js +1 -2
- package/dist/src/agents/prompts.d.ts +5 -4
- package/dist/src/agents/prompts.js +44 -87
- package/dist/src/agents/reducer.core.d.ts +24 -2
- package/dist/src/agents/reducer.core.js +125 -35
- package/dist/src/agents/reducer.loaders.d.ts +55 -1
- package/dist/src/agents/reducer.loaders.js +114 -1
- package/dist/src/agents/reducer.types.d.ts +45 -2
- package/dist/src/agents/semantic.js +1 -2
- package/dist/src/agents/simulator.d.ts +11 -3
- package/dist/src/agents/simulator.executor.d.ts +14 -4
- package/dist/src/agents/simulator.executor.js +81 -23
- package/dist/src/agents/simulator.js +128 -42
- package/dist/src/agents/simulator.prompts.d.ts +9 -7
- package/dist/src/agents/simulator.prompts.js +66 -86
- package/dist/src/agents/simulator.types.d.ts +23 -5
- package/dist/src/agents/simulator.utils.d.ts +7 -2
- package/dist/src/agents/simulator.utils.js +31 -11
- package/dist/src/agents/system.js +1 -2
- package/dist/src/execute/helpers.d.ts +75 -0
- package/dist/src/execute/helpers.js +139 -0
- package/dist/src/execute/index.d.ts +11 -0
- package/dist/src/execute/index.js +44 -0
- package/dist/src/execute/legacy.d.ts +46 -0
- package/dist/src/execute/legacy.js +460 -0
- package/dist/src/execute/modelconfig.d.ts +19 -0
- package/dist/src/execute/modelconfig.js +56 -0
- package/dist/src/execute/responses.d.ts +55 -0
- package/dist/src/execute/responses.js +594 -0
- package/dist/src/execute/shared.d.ts +83 -0
- package/dist/src/execute/shared.js +188 -0
- package/dist/src/index.d.ts +1 -1
- package/dist/src/index.js +2 -2
- package/dist/src/{princing.openai.d.ts → pricing.llm.d.ts} +6 -0
- package/dist/src/pricing.llm.js +255 -0
- package/dist/src/prompts.d.ts +13 -4
- package/dist/src/prompts.js +221 -114
- package/dist/src/rag/embeddings.d.ts +36 -18
- package/dist/src/rag/embeddings.js +131 -128
- package/dist/src/rag/index.d.ts +5 -5
- package/dist/src/rag/index.js +14 -17
- package/dist/src/rag/parser.d.ts +2 -1
- package/dist/src/rag/parser.js +11 -14
- package/dist/src/rag/rag.examples.d.ts +27 -0
- package/dist/src/rag/rag.examples.js +151 -0
- package/dist/src/rag/rag.manager.d.ts +383 -0
- package/dist/src/rag/rag.manager.js +1390 -0
- package/dist/src/rag/types.d.ts +128 -12
- package/dist/src/rag/types.js +100 -1
- package/dist/src/rag/usecase.d.ts +37 -0
- package/dist/src/rag/usecase.js +96 -7
- package/dist/src/rules/git/git.e2e.helper.js +22 -2
- package/dist/src/rules/git/git.health.d.ts +61 -2
- package/dist/src/rules/git/git.health.js +333 -11
- package/dist/src/rules/git/index.d.ts +2 -2
- package/dist/src/rules/git/index.js +13 -1
- package/dist/src/rules/git/repo.d.ts +160 -0
- package/dist/src/rules/git/repo.js +777 -0
- package/dist/src/rules/git/repo.pr.js +117 -13
- package/dist/src/rules/git/repo.tools.d.ts +22 -1
- package/dist/src/rules/git/repo.tools.js +50 -1
- package/dist/src/rules/types.d.ts +27 -14
- package/dist/src/rules/utils.matter.d.ts +0 -4
- package/dist/src/rules/utils.matter.js +35 -7
- package/dist/src/scrapper.d.ts +15 -22
- package/dist/src/scrapper.js +58 -110
- package/dist/src/stategraph/index.d.ts +1 -1
- package/dist/src/stategraph/stategraph.d.ts +56 -2
- package/dist/src/stategraph/stategraph.js +134 -6
- package/dist/src/stategraph/stategraph.storage.js +8 -0
- package/dist/src/stategraph/types.d.ts +27 -0
- package/dist/src/types.d.ts +46 -9
- package/dist/src/types.js +8 -7
- package/dist/src/usecase.d.ts +11 -2
- package/dist/src/usecase.js +27 -35
- package/dist/src/utils.d.ts +32 -18
- package/dist/src/utils.js +87 -129
- package/package.json +10 -3
- package/dist/src/agents/digestor.test.d.ts +0 -1
- package/dist/src/agents/digestor.test.js +0 -45
- package/dist/src/agents/reducer.example.d.ts +0 -28
- package/dist/src/agents/reducer.example.js +0 -118
- package/dist/src/agents/reducer.process.d.ts +0 -16
- package/dist/src/agents/reducer.process.js +0 -143
- package/dist/src/agents/reducer.tools.d.ts +0 -29
- package/dist/src/agents/reducer.tools.js +0 -157
- package/dist/src/agents/simpleExample.d.ts +0 -3
- package/dist/src/agents/simpleExample.js +0 -38
- package/dist/src/agents/system-review.d.ts +0 -5
- package/dist/src/agents/system-review.js +0 -181
- package/dist/src/agents/systemReview.d.ts +0 -4
- package/dist/src/agents/systemReview.js +0 -22
- package/dist/src/execute.d.ts +0 -49
- package/dist/src/execute.js +0 -564
- package/dist/src/princing.openai.js +0 -54
- package/dist/src/rag/tools.d.ts +0 -76
- package/dist/src/rag/tools.js +0 -196
- package/dist/src/rules/user.mapper.d.ts +0 -61
- package/dist/src/rules/user.mapper.js +0 -160
- package/dist/src/rules/utils/slug.d.ts +0 -22
- package/dist/src/rules/utils/slug.js +0 -35
|
@@ -0,0 +1,460 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Legacy Implementation - Chat Completions API (beta.chat.completions)
|
|
4
|
+
*
|
|
5
|
+
* ⚠️ Cette implémentation utilise openai.beta.chat.completions.stream (ancienne API)
|
|
6
|
+
* Pour les nouveaux projets, Responses API (responses.ts) est recommandée.
|
|
7
|
+
*
|
|
8
|
+
* Code optimisé depuis execute.ts original avec les améliorations suivantes:
|
|
9
|
+
* - OPTIM: Helpers centralisés (accumulateUsageTokens, stepsToActions)
|
|
10
|
+
* - BUG FIX: executionResultMerge fusionne actions correctement (corrigé dans types.ts)
|
|
11
|
+
* - BUG FIX: moreThinkin supprimé (obsolète, reasoning_effort fait le job)
|
|
12
|
+
* - BUG FIX: Suppression de la boucle do...while(moreThinkin)
|
|
13
|
+
* - BUG FIX: Suppression de la ligne reasoning_effort dupliquée (ligne 425 originale)
|
|
14
|
+
*
|
|
15
|
+
* TODO [Optimisation future]: Remplacer la boucle for séquentielle par batchProcessToolCalls
|
|
16
|
+
* pour exploiter pleinement parallel_tool_calls et réduire la latence
|
|
17
|
+
*/
|
|
18
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
19
|
+
exports.readCompletionsStream = readCompletionsStream;
|
|
20
|
+
exports.executeAgentSet = executeAgentSet;
|
|
21
|
+
exports.executeAgent = executeAgent;
|
|
22
|
+
exports.executeQuery = executeQuery;
|
|
23
|
+
const types_1 = require("../types");
|
|
24
|
+
const utils_1 = require("../utils");
|
|
25
|
+
const stategraph_1 = require("../stategraph");
|
|
26
|
+
const pricing_llm_1 = require("../pricing.llm");
|
|
27
|
+
//
|
|
28
|
+
// Import des utilitaires partagés et helpers optimisés
|
|
29
|
+
const shared_1 = require("./shared");
|
|
30
|
+
const modelconfig_1 = require("./modelconfig");
|
|
31
|
+
const helpers_1 = require("./helpers");
|
|
32
|
+
async function readCompletionsStream(params) {
|
|
33
|
+
const openai = (0, utils_1.openaiInstance)();
|
|
34
|
+
const { stateGraph, discussion, agentConfig, agents, discussionRootAgent, stdout, final, context, verbose } = params;
|
|
35
|
+
const model = (0, modelconfig_1.modelConfig)(agentConfig.model).model;
|
|
36
|
+
const accumulatedFunctionCall = final.choices[0]?.message.tool_calls || [];
|
|
37
|
+
const content = final.choices[0]?.message.content;
|
|
38
|
+
let localResult = (0, types_1.enrichExecutionResult)({
|
|
39
|
+
runId: `${discussionRootAgent}-${Date.now()}`,
|
|
40
|
+
startQuery: '',
|
|
41
|
+
actions: [],
|
|
42
|
+
lastMessage: '',
|
|
43
|
+
usage: { prompt: 0, completion: 0, total: 0, cost: 0 },
|
|
44
|
+
});
|
|
45
|
+
//
|
|
46
|
+
// OPTIM: Utiliser helper centralisé pour l'accumulation
|
|
47
|
+
(0, helpers_1.accumulateUsageTokens)(stateGraph, discussion, discussionRootAgent, model, final.usage);
|
|
48
|
+
if (content) {
|
|
49
|
+
if (verbose)
|
|
50
|
+
console.log("✅ Agent (1): save content:", content?.length);
|
|
51
|
+
stateGraph.push(discussionRootAgent, { role: "assistant", content });
|
|
52
|
+
}
|
|
53
|
+
//
|
|
54
|
+
// Si le modèle décide d'appeler une fonction (par exemple "transferAgents")
|
|
55
|
+
for (const functionCall of accumulatedFunctionCall) {
|
|
56
|
+
const args = JSON.parse(functionCall?.function?.arguments || '{}');
|
|
57
|
+
if (args.justification) {
|
|
58
|
+
(0, shared_1.sendFeedback)({
|
|
59
|
+
agent: agentConfig.name,
|
|
60
|
+
stdout,
|
|
61
|
+
description: args.justification,
|
|
62
|
+
usage: discussion.usage,
|
|
63
|
+
state: '',
|
|
64
|
+
verbose
|
|
65
|
+
});
|
|
66
|
+
}
|
|
67
|
+
// Créer une référence mutable pour handleTransferCall
|
|
68
|
+
const currentAgentRef = { name: agentConfig.name };
|
|
69
|
+
const functionCallResult = await (0, utils_1.handleTransferCall)(discussion, currentAgentRef, agents, functionCall, context);
|
|
70
|
+
if (functionCallResult.usage) {
|
|
71
|
+
stateGraph.updateTokens(discussionRootAgent, {
|
|
72
|
+
prompt: functionCallResult.usage.prompt || 0,
|
|
73
|
+
completion: functionCallResult.usage.completion || 0,
|
|
74
|
+
total: functionCallResult.usage.total || 0,
|
|
75
|
+
cost: functionCallResult.usage.cost || 0
|
|
76
|
+
});
|
|
77
|
+
}
|
|
78
|
+
if (functionCallResult.did_transfer) {
|
|
79
|
+
if (verbose)
|
|
80
|
+
console.log("✅ Agent transfer response:", functionCallResult.source_agent, '::to', functionCallResult.destination_agent, '::with', functionCallResult.content, ' transfer done ✅');
|
|
81
|
+
// Mise à jour du message système avec les nouvelles instructions du nouvel agent courant
|
|
82
|
+
const transferredAgent = agents.find(a => a.name === currentAgentRef.name) || agentConfig;
|
|
83
|
+
const instructions = transferredAgent.instructions;
|
|
84
|
+
const enrichedInstructions = (await params.enrichWithMemory?.("system", transferredAgent, context)) || '';
|
|
85
|
+
// ✅ Set préserve le trail existant via updateSystemMessage()
|
|
86
|
+
stateGraph.set(discussionRootAgent, instructions + '\n' + enrichedInstructions);
|
|
87
|
+
stateGraph.push(discussionRootAgent, {
|
|
88
|
+
role: "assistant",
|
|
89
|
+
content: functionCallResult.content,
|
|
90
|
+
name: functionCallResult.name
|
|
91
|
+
});
|
|
92
|
+
}
|
|
93
|
+
else {
|
|
94
|
+
// other function call have a result
|
|
95
|
+
stateGraph.push(discussionRootAgent, {
|
|
96
|
+
role: "assistant",
|
|
97
|
+
content: functionCallResult.content,
|
|
98
|
+
name: functionCallResult.name
|
|
99
|
+
});
|
|
100
|
+
}
|
|
101
|
+
//
|
|
102
|
+
// send user feedback (if not already sent via addStep for transfer)
|
|
103
|
+
if (functionCallResult.feedback && !functionCallResult.did_transfer) {
|
|
104
|
+
(0, shared_1.sendFeedback)({
|
|
105
|
+
agent: agentConfig.name,
|
|
106
|
+
stdout,
|
|
107
|
+
description: functionCallResult.feedback,
|
|
108
|
+
usage: discussion.usage,
|
|
109
|
+
state: '',
|
|
110
|
+
verbose
|
|
111
|
+
});
|
|
112
|
+
}
|
|
113
|
+
// Réactualisation de la liste des outils pour le nouvel agent courant
|
|
114
|
+
const currentAgent = agents.find(a => a.name === currentAgentRef.name) || agentConfig;
|
|
115
|
+
const tools = currentAgent?.tools || [];
|
|
116
|
+
const followUpOptions = Object.assign({}, (0, modelconfig_1.modelConfig)(currentAgent.model));
|
|
117
|
+
followUpOptions.messages = discussion.messages;
|
|
118
|
+
if (tools.length > 0) {
|
|
119
|
+
followUpOptions.tools = tools;
|
|
120
|
+
followUpOptions.tool_choice = "auto";
|
|
121
|
+
}
|
|
122
|
+
//
|
|
123
|
+
// Legacy: utiliser beta.chat.completions.stream (ancienne API)
|
|
124
|
+
// NOTE: Cette API est en beta, Responses API est recommandée pour les nouveaux projets
|
|
125
|
+
const followUpStream = await openai.beta.chat.completions.stream(followUpOptions);
|
|
126
|
+
for await (const chunk of followUpStream) {
|
|
127
|
+
const delta = chunk.choices[0]?.delta;
|
|
128
|
+
if (delta?.content) {
|
|
129
|
+
stdout.write(delta?.content);
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
const followUpFinal = await followUpStream.finalChatCompletion();
|
|
133
|
+
//
|
|
134
|
+
// ✅ addStep APRÈS la réponse de l'agent (pour avoir le trail complet au prochain tour)
|
|
135
|
+
if (functionCallResult.name) {
|
|
136
|
+
stateGraph.addStep(discussionRootAgent, {
|
|
137
|
+
tool: functionCallResult.name,
|
|
138
|
+
context: functionCallResult.context || '',
|
|
139
|
+
reason: args.justification || '',
|
|
140
|
+
id: functionCallResult.id
|
|
141
|
+
});
|
|
142
|
+
}
|
|
143
|
+
//
|
|
144
|
+
// when called a function, agent must continue the conversation
|
|
145
|
+
if (followUpFinal.choices[0]?.message.tool_calls) {
|
|
146
|
+
const partial = await readCompletionsStream({
|
|
147
|
+
stateGraph,
|
|
148
|
+
discussion,
|
|
149
|
+
agentConfig: currentAgent,
|
|
150
|
+
agents,
|
|
151
|
+
discussionRootAgent,
|
|
152
|
+
stdout,
|
|
153
|
+
final: followUpFinal,
|
|
154
|
+
context,
|
|
155
|
+
verbose,
|
|
156
|
+
enrichWithMemory: params.enrichWithMemory
|
|
157
|
+
});
|
|
158
|
+
localResult = (0, types_1.executionResultMerge)(localResult, partial);
|
|
159
|
+
// ✅ OPTIM: Utiliser helper centralisé
|
|
160
|
+
localResult.actions = (0, helpers_1.stepsToActions)(stateGraph, discussionRootAgent);
|
|
161
|
+
return localResult;
|
|
162
|
+
}
|
|
163
|
+
//
|
|
164
|
+
// OPTIM: Utiliser helper centralisé pour l'accumulation
|
|
165
|
+
(0, helpers_1.accumulateUsageTokens)(stateGraph, discussion, discussionRootAgent, model, followUpFinal.usage);
|
|
166
|
+
//
|
|
167
|
+
// send the cost
|
|
168
|
+
(0, shared_1.sendFeedback)({
|
|
169
|
+
agent: currentAgent.name,
|
|
170
|
+
stdout,
|
|
171
|
+
description: '',
|
|
172
|
+
usage: discussion.usage,
|
|
173
|
+
state: followUpFinal.id || '',
|
|
174
|
+
verbose
|
|
175
|
+
});
|
|
176
|
+
const followUpContent = followUpFinal.choices[0]?.message.content;
|
|
177
|
+
//
|
|
178
|
+
// capture new memory with the last message
|
|
179
|
+
await params.enrichWithMemory?.("assistant", currentAgent, context);
|
|
180
|
+
if (followUpContent) {
|
|
181
|
+
stateGraph.push(discussionRootAgent, { role: "assistant", content: followUpContent });
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
//
|
|
185
|
+
// ✅ OPTIM: Utiliser helper centralisé
|
|
186
|
+
localResult.actions = (0, helpers_1.stepsToActions)(stateGraph, discussionRootAgent);
|
|
187
|
+
return localResult;
|
|
188
|
+
}
|
|
189
|
+
/**
|
|
190
|
+
* Executes a set of agents to process a user query
|
|
191
|
+
*
|
|
192
|
+
* OPTIMIZED: Sans boucle do...while(moreThinkin), reasoning_effort fait le job
|
|
193
|
+
*/
|
|
194
|
+
async function executeAgentSet(agentSet, context, params) {
|
|
195
|
+
const { query, verbose } = params;
|
|
196
|
+
const openai = (0, utils_1.openaiInstance)();
|
|
197
|
+
const agents = (0, utils_1.injectTransferTools)(agentSet);
|
|
198
|
+
// 🔑 CLÉ DE DISCUSSION: Agent racine qui sert de point d'entrée
|
|
199
|
+
const discussionRootAgent = params.home || agents[0].name;
|
|
200
|
+
// 🎯 Récupération du StateGraph depuis le context
|
|
201
|
+
const stateGraph = (0, stategraph_1.sessionStateGraphGet)(context);
|
|
202
|
+
// 📍 Créer ou restaurer la discussion pour cet agent racine
|
|
203
|
+
const discussion = stateGraph.createOrRestore(discussionRootAgent);
|
|
204
|
+
// 🔄 Agent courant: peut différer de l'agent racine après des transferts
|
|
205
|
+
let currentAgent = (0, stategraph_1.getSpecializedAgent)(discussion) || discussionRootAgent;
|
|
206
|
+
// Trouver la config de l'agent courant
|
|
207
|
+
const currentAgentConfig = agents.find(a => a.name === currentAgent);
|
|
208
|
+
discussion.description = currentAgentConfig?.publicDescription;
|
|
209
|
+
if (!currentAgentConfig) {
|
|
210
|
+
throw new Error(`Agent ${currentAgent} not found`);
|
|
211
|
+
}
|
|
212
|
+
if (!currentAgentConfig.instructions) {
|
|
213
|
+
throw new Error(`Agent ${currentAgent} has no instructions`);
|
|
214
|
+
}
|
|
215
|
+
// 🔧 Setup system message si pas encore fait
|
|
216
|
+
let enrichedQuery = query;
|
|
217
|
+
if (!discussion.messages.length) {
|
|
218
|
+
discussion.usage = { prompt: 0, completion: 0, total: 0, cost: 0 };
|
|
219
|
+
const enrichedInstructions = await params.enrichWithMemory?.("system", currentAgentConfig, context);
|
|
220
|
+
const instructions = currentAgentConfig.instructions + '\n' + enrichedInstructions;
|
|
221
|
+
stateGraph.set(discussionRootAgent, instructions);
|
|
222
|
+
}
|
|
223
|
+
else {
|
|
224
|
+
enrichedQuery = (await params.enrichWithMemory?.("user", currentAgentConfig, context)) || query;
|
|
225
|
+
}
|
|
226
|
+
stateGraph.push(discussionRootAgent, { role: "user", content: enrichedQuery });
|
|
227
|
+
const tools = currentAgentConfig.tools;
|
|
228
|
+
if (verbose) {
|
|
229
|
+
console.log('--- DBG current agent', currentAgentConfig.name, `deep-thinking:${params.thinking}`, 'memory len:', discussion.messages.length);
|
|
230
|
+
}
|
|
231
|
+
let result = (0, types_1.enrichExecutionResult)({
|
|
232
|
+
runId: `${discussionRootAgent}-${Date.now()}`,
|
|
233
|
+
startQuery: query,
|
|
234
|
+
actions: [],
|
|
235
|
+
lastMessage: '',
|
|
236
|
+
usage: { prompt: 0, completion: 0, total: 0, cost: 0 },
|
|
237
|
+
});
|
|
238
|
+
//
|
|
239
|
+
// ✅ BUG FIX: Plus de boucle do...while(moreThinkin) - reasoning_effort fait le job
|
|
240
|
+
const intialinfo = ["Analyse", "Analyse en cours…", "Réflexion"];
|
|
241
|
+
const randomIndex = Math.floor(Math.random() * intialinfo.length);
|
|
242
|
+
(0, shared_1.sendFeedback)({
|
|
243
|
+
agent: currentAgentConfig.name,
|
|
244
|
+
stdout: params.stdout,
|
|
245
|
+
description: intialinfo[randomIndex],
|
|
246
|
+
usage: result.usage,
|
|
247
|
+
state: discussion.id
|
|
248
|
+
});
|
|
249
|
+
//
|
|
250
|
+
// ✅ BUG FIX: modelConfig gère reasoning_effort, pas de duplication (ligne 425 originale supprimée)
|
|
251
|
+
const model = (0, modelconfig_1.modelConfig)(currentAgentConfig.model, { thinking: params.thinking });
|
|
252
|
+
const options = Object.assign({}, model);
|
|
253
|
+
options.messages = discussion.messages;
|
|
254
|
+
if (tools.length > 0) {
|
|
255
|
+
options.tools = tools;
|
|
256
|
+
options.tool_choice = "auto";
|
|
257
|
+
options.parallel_tool_calls = true;
|
|
258
|
+
}
|
|
259
|
+
const stream = await openai.beta.chat.completions.stream(options);
|
|
260
|
+
for await (const chunk of stream) {
|
|
261
|
+
const delta = chunk.choices[0]?.delta;
|
|
262
|
+
if (delta?.content) {
|
|
263
|
+
params.stdout.write(delta?.content);
|
|
264
|
+
}
|
|
265
|
+
}
|
|
266
|
+
const final = await stream.finalChatCompletion();
|
|
267
|
+
const partial = await readCompletionsStream({
|
|
268
|
+
stateGraph,
|
|
269
|
+
discussion,
|
|
270
|
+
agentConfig: currentAgentConfig,
|
|
271
|
+
agents,
|
|
272
|
+
discussionRootAgent,
|
|
273
|
+
stdout: params.stdout,
|
|
274
|
+
context,
|
|
275
|
+
final,
|
|
276
|
+
verbose,
|
|
277
|
+
enrichWithMemory: params.enrichWithMemory,
|
|
278
|
+
});
|
|
279
|
+
result = (0, types_1.executionResultMerge)(result, partial);
|
|
280
|
+
//
|
|
281
|
+
// ✅ OPTIM: Utiliser helper centralisé
|
|
282
|
+
result.actions = (0, helpers_1.stepsToActions)(stateGraph, discussionRootAgent);
|
|
283
|
+
// 💾 Auto-save du StateGraph à la fin
|
|
284
|
+
(0, stategraph_1.sessionStateGraphSet)(context, stateGraph);
|
|
285
|
+
// finalize result
|
|
286
|
+
result.lastMessage = discussion.messages?.[discussion.messages.length - 1]?.content || '';
|
|
287
|
+
if (discussion?.usage) {
|
|
288
|
+
result.usage = {
|
|
289
|
+
prompt: discussion.usage.prompt || 0,
|
|
290
|
+
completion: discussion.usage.completion || 0,
|
|
291
|
+
total: discussion.usage.total || 0,
|
|
292
|
+
cost: discussion.usage.cost || 0,
|
|
293
|
+
};
|
|
294
|
+
}
|
|
295
|
+
result = (0, types_1.enrichExecutionResult)(result);
|
|
296
|
+
return result;
|
|
297
|
+
}
|
|
298
|
+
async function executeAgent(agentSet, params) {
|
|
299
|
+
const { query, verbose, debug } = params;
|
|
300
|
+
const openai = (0, utils_1.openaiInstance)();
|
|
301
|
+
const agent = agentSet.find(a => a.name === params.home);
|
|
302
|
+
if (!agent) {
|
|
303
|
+
throw new Error(`Agent ${params.home} not found`);
|
|
304
|
+
}
|
|
305
|
+
if (!agent.instructions) {
|
|
306
|
+
throw new Error(`Agent ${agent.name} has no instructions`);
|
|
307
|
+
}
|
|
308
|
+
const messages = [
|
|
309
|
+
{ role: "system", content: agent.instructions },
|
|
310
|
+
{ role: "user", content: query }
|
|
311
|
+
];
|
|
312
|
+
let usage = { prompt: 0, completion: 0, total: 0, cost: 0 };
|
|
313
|
+
let state = '';
|
|
314
|
+
let maxIterations = 10;
|
|
315
|
+
let iterations = 0;
|
|
316
|
+
if (verbose) {
|
|
317
|
+
console.log('--- DBG executeAgent:', agent.name);
|
|
318
|
+
console.log('--- DBG query:', `${query?.substring(0, 100)}...`);
|
|
319
|
+
}
|
|
320
|
+
if (debug) {
|
|
321
|
+
console.log('--- DBG executeAgent-system:', agent.instructions);
|
|
322
|
+
console.log('--- DBG executeAgent-user:', query);
|
|
323
|
+
}
|
|
324
|
+
while (iterations < maxIterations) {
|
|
325
|
+
iterations++;
|
|
326
|
+
const options = Object.assign({}, (0, modelconfig_1.modelConfig)(agent.model));
|
|
327
|
+
options.messages = messages;
|
|
328
|
+
const tools = agent.tools || [];
|
|
329
|
+
if (tools.length > 0) {
|
|
330
|
+
options.tools = tools;
|
|
331
|
+
options.tool_choice = "auto";
|
|
332
|
+
}
|
|
333
|
+
if (verbose) {
|
|
334
|
+
console.log('--- DBG executeAgent:', agent.name, 'iterations:', iterations, '\n', messages.length, '\n---', messages[messages.length - 1]?.content);
|
|
335
|
+
}
|
|
336
|
+
const stream = await openai.beta.chat.completions.stream(options);
|
|
337
|
+
for await (const chunk of stream) {
|
|
338
|
+
const delta = chunk.choices[0]?.delta;
|
|
339
|
+
if (delta?.content) {
|
|
340
|
+
params.stdout.write(delta?.content);
|
|
341
|
+
}
|
|
342
|
+
}
|
|
343
|
+
const final = await stream.finalChatCompletion();
|
|
344
|
+
const model = (0, modelconfig_1.modelConfig)(agent.model).model;
|
|
345
|
+
(0, pricing_llm_1.accumulateCost)(usage, model, final.usage);
|
|
346
|
+
state = final.id;
|
|
347
|
+
const content = final.choices[0]?.message.content;
|
|
348
|
+
if (content) {
|
|
349
|
+
messages.push({ role: "assistant", content });
|
|
350
|
+
}
|
|
351
|
+
const toolCalls = final.choices[0]?.message.tool_calls;
|
|
352
|
+
let hasToolCalls = false;
|
|
353
|
+
if (toolCalls && toolCalls.length > 0) {
|
|
354
|
+
hasToolCalls = true;
|
|
355
|
+
const lastAssistant = messages[messages.length - 1];
|
|
356
|
+
if (lastAssistant && lastAssistant.role === 'assistant') {
|
|
357
|
+
lastAssistant.tool_calls = toolCalls;
|
|
358
|
+
}
|
|
359
|
+
else {
|
|
360
|
+
messages.push({
|
|
361
|
+
role: "assistant",
|
|
362
|
+
content: content || null,
|
|
363
|
+
tool_calls: toolCalls
|
|
364
|
+
});
|
|
365
|
+
}
|
|
366
|
+
for (const toolCall of toolCalls) {
|
|
367
|
+
const args = JSON.parse(toolCall.function.arguments || '{}');
|
|
368
|
+
if (agent.toolLogic && agent.toolLogic[toolCall.function.name]) {
|
|
369
|
+
try {
|
|
370
|
+
const result = await agent.toolLogic[toolCall.function.name](args, { state });
|
|
371
|
+
messages.push({
|
|
372
|
+
role: "tool",
|
|
373
|
+
tool_call_id: toolCall.id,
|
|
374
|
+
content: typeof result === 'string' ? result : JSON.stringify(result)
|
|
375
|
+
});
|
|
376
|
+
}
|
|
377
|
+
catch (error) {
|
|
378
|
+
messages.push({
|
|
379
|
+
role: "tool",
|
|
380
|
+
tool_call_id: toolCall.id,
|
|
381
|
+
content: `Error: ${error instanceof Error ? error.message : 'Unknown error'}`
|
|
382
|
+
});
|
|
383
|
+
}
|
|
384
|
+
}
|
|
385
|
+
}
|
|
386
|
+
}
|
|
387
|
+
if (!hasToolCalls) {
|
|
388
|
+
break;
|
|
389
|
+
}
|
|
390
|
+
}
|
|
391
|
+
return {
|
|
392
|
+
usage,
|
|
393
|
+
content: messages[messages.length - 1]?.content || '',
|
|
394
|
+
messages,
|
|
395
|
+
state
|
|
396
|
+
};
|
|
397
|
+
}
|
|
398
|
+
/**
|
|
399
|
+
* Executes a simple query without agent orchestration or tool handling
|
|
400
|
+
*/
|
|
401
|
+
async function executeQuery(params) {
|
|
402
|
+
const { query, verbose, model: modelName, instructions } = params;
|
|
403
|
+
if (!modelName) {
|
|
404
|
+
throw new Error('executeQuery requires "model" parameter');
|
|
405
|
+
}
|
|
406
|
+
const openai = (0, utils_1.openaiInstance)();
|
|
407
|
+
const model = (0, modelconfig_1.modelConfig)(modelName);
|
|
408
|
+
const more = {};
|
|
409
|
+
if (params.json) {
|
|
410
|
+
more.response_format = { type: "json_object" };
|
|
411
|
+
}
|
|
412
|
+
if (params.schema) {
|
|
413
|
+
more.response_format = { type: "json_object", schema: params.schema };
|
|
414
|
+
}
|
|
415
|
+
if (verbose) {
|
|
416
|
+
console.log('--- DBG query:', modelName, `${query?.substring(0, 100)}...`);
|
|
417
|
+
}
|
|
418
|
+
const messages = params.messages || [];
|
|
419
|
+
// Ajouter le système prompt si fourni
|
|
420
|
+
if (instructions) {
|
|
421
|
+
messages.unshift({ role: "system", content: instructions });
|
|
422
|
+
}
|
|
423
|
+
messages.push({ role: "user", content: query });
|
|
424
|
+
let usage = { prompt: 0, completion: 0, total: 0, cost: 0 };
|
|
425
|
+
let state = '';
|
|
426
|
+
try {
|
|
427
|
+
const options = Object.assign({}, model, more);
|
|
428
|
+
options.messages = messages;
|
|
429
|
+
if (verbose) {
|
|
430
|
+
console.log('--- DBG executeQuery options:', JSON.stringify(options, null, 2));
|
|
431
|
+
}
|
|
432
|
+
const stream = await openai.beta.chat.completions.stream(options);
|
|
433
|
+
for await (const chunk of stream) {
|
|
434
|
+
const delta = chunk.choices[0]?.delta;
|
|
435
|
+
if (delta?.content && params.stdout) {
|
|
436
|
+
params.stdout.write(delta?.content);
|
|
437
|
+
}
|
|
438
|
+
}
|
|
439
|
+
const final = await stream.finalChatCompletion();
|
|
440
|
+
(0, pricing_llm_1.accumulateCost)(usage, model.model, final.usage);
|
|
441
|
+
state = final.id || '';
|
|
442
|
+
const content = final.choices[0]?.message.content || '';
|
|
443
|
+
if (verbose) {
|
|
444
|
+
console.log('--- DBG executeQuery completed, usage:', usage);
|
|
445
|
+
}
|
|
446
|
+
return {
|
|
447
|
+
usage,
|
|
448
|
+
content,
|
|
449
|
+
messages: [
|
|
450
|
+
...messages,
|
|
451
|
+
{ role: "assistant", content }
|
|
452
|
+
],
|
|
453
|
+
state
|
|
454
|
+
};
|
|
455
|
+
}
|
|
456
|
+
catch (error) {
|
|
457
|
+
console.error('❌ executeQuery failed:', error);
|
|
458
|
+
throw error;
|
|
459
|
+
}
|
|
460
|
+
}
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
import { AgentModel } from '../types';
|
|
2
|
+
/**
|
|
3
|
+
* Configuration des modèles pour Chat Completions (legacy) et Responses API
|
|
4
|
+
*
|
|
5
|
+
* Gère la configuration des modèles avec migration automatique des paramètres
|
|
6
|
+
* selon le provider (OpenAI, xAI) et l'API utilisée (legacy ou Responses)
|
|
7
|
+
*
|
|
8
|
+
* @param model - Alias du modèle (LOW-fast, MEDIUM-fast, HIGH-fast, HIGH-medium, etc.)
|
|
9
|
+
* @param custom - Options custom
|
|
10
|
+
* @param custom.thinking - Si true, active reasoning_effort élevé (via LLM mapping)
|
|
11
|
+
* @param forResponses - Si true, retourne format Responses API avec mappings:
|
|
12
|
+
* - reasoning_effort → reasoning: { effort }
|
|
13
|
+
* - verbosity → text.verbosity
|
|
14
|
+
* @returns Configuration du modèle
|
|
15
|
+
*
|
|
16
|
+
* NOTE: Pour GPT-5, temperature est toujours fixée à 1
|
|
17
|
+
* NOTE: reasoning_effort est géré automatiquement par LLM() selon le provider
|
|
18
|
+
*/
|
|
19
|
+
export declare function modelConfig(model: string, custom?: any, forResponses?: boolean): AgentModel;
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.modelConfig = modelConfig;
|
|
4
|
+
const utils_1 = require("../utils");
|
|
5
|
+
const pricing_llm_1 = require("../pricing.llm");
|
|
6
|
+
/**
|
|
7
|
+
* Configuration des modèles pour Chat Completions (legacy) et Responses API
|
|
8
|
+
*
|
|
9
|
+
* Gère la configuration des modèles avec migration automatique des paramètres
|
|
10
|
+
* selon le provider (OpenAI, xAI) et l'API utilisée (legacy ou Responses)
|
|
11
|
+
*
|
|
12
|
+
* @param model - Alias du modèle (LOW-fast, MEDIUM-fast, HIGH-fast, HIGH-medium, etc.)
|
|
13
|
+
* @param custom - Options custom
|
|
14
|
+
* @param custom.thinking - Si true, active reasoning_effort élevé (via LLM mapping)
|
|
15
|
+
* @param forResponses - Si true, retourne format Responses API avec mappings:
|
|
16
|
+
* - reasoning_effort → reasoning: { effort }
|
|
17
|
+
* - verbosity → text.verbosity
|
|
18
|
+
* @returns Configuration du modèle
|
|
19
|
+
*
|
|
20
|
+
* NOTE: Pour GPT-5, temperature est toujours fixée à 1
|
|
21
|
+
* NOTE: reasoning_effort est géré automatiquement par LLM() selon le provider
|
|
22
|
+
*/
|
|
23
|
+
function modelConfig(model, custom, forResponses = false) {
|
|
24
|
+
const thinking = custom?.thinking || false;
|
|
25
|
+
delete custom?.thinking;
|
|
26
|
+
const defaultOptions = Object.assign({
|
|
27
|
+
stream_options: { "include_usage": true },
|
|
28
|
+
}, custom || {});
|
|
29
|
+
//
|
|
30
|
+
// Get mapping based on provider (OpenAI vs xAI)
|
|
31
|
+
// LLM() applique automatiquement reasoning_effort si thinking=true
|
|
32
|
+
const mapping = (0, pricing_llm_1.LLM)((0, utils_1.openaiInstance)(), thinking);
|
|
33
|
+
const options = Object.assign({}, mapping[model], defaultOptions);
|
|
34
|
+
//
|
|
35
|
+
// Pour Responses API : mapper vers la nouvelle structure et filtrer les paramètres incompatibles
|
|
36
|
+
// Chat Completions utilise reasoning_effort (string) et verbosity (string)
|
|
37
|
+
// Responses API utilise reasoning: { effort: string } et text.verbosity (string)
|
|
38
|
+
if (forResponses) {
|
|
39
|
+
// Mapper reasoning_effort → reasoning: { effort }
|
|
40
|
+
if (options.reasoning_effort) {
|
|
41
|
+
options.reasoning = { effort: options.reasoning_effort };
|
|
42
|
+
delete options.reasoning_effort;
|
|
43
|
+
}
|
|
44
|
+
// Mapper verbosity → text.verbosity
|
|
45
|
+
if (options.verbosity) {
|
|
46
|
+
options.text = { verbosity: options.verbosity };
|
|
47
|
+
delete options.verbosity;
|
|
48
|
+
}
|
|
49
|
+
// Supprimer les paramètres non supportés par Responses API
|
|
50
|
+
const unsupportedParams = ['frequency_penalty', 'presence_penalty', 'stream_options', 'web_search_options'];
|
|
51
|
+
unsupportedParams.forEach(param => {
|
|
52
|
+
delete options[param];
|
|
53
|
+
});
|
|
54
|
+
}
|
|
55
|
+
return options;
|
|
56
|
+
}
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Responses API Implementation
|
|
3
|
+
*
|
|
4
|
+
* Migration de Chat Completions vers Responses API pour:
|
|
5
|
+
* - Meilleur support des modèles reasoning (o-series, gpt-5)
|
|
6
|
+
* - Tool calls préservés pendant le raisonnement via reasoning: { effort }
|
|
7
|
+
* - Events SSE typés (response.output_text.delta, response.function_call.*)
|
|
8
|
+
* - Plus de boucle moreThinkin (obsolète) : reasoning géré nativement
|
|
9
|
+
*
|
|
10
|
+
* Différences clés vs legacy:
|
|
11
|
+
* - `input` remplace `messages` (même format array, juste renommage du paramètre)
|
|
12
|
+
* - `reasoning: { effort: 'low' | 'medium' | 'high' }` remplace `reasoning_effort`
|
|
13
|
+
* - Events SSE structurés vs delta progressifs
|
|
14
|
+
* - `response.usage` vs `final.usage`
|
|
15
|
+
* - Tools/function calling: format identique, `parallel_tool_calls` supporté
|
|
16
|
+
*
|
|
17
|
+
* Optimisations appliquées (vs code original):
|
|
18
|
+
* - OPTIM: Helpers centralisés (accumulateUsageTokens, stepsToActions)
|
|
19
|
+
* - BUG FIX: executionResultMerge fusionne actions correctement (corrigé dans types.ts)
|
|
20
|
+
* - BUG FIX: moreThinkin supprimé (obsolète)
|
|
21
|
+
* - BUG FIX: reasoning_effort géré par modelConfig uniquement
|
|
22
|
+
*
|
|
23
|
+
* TODO [Phase 2]: Migration vers openai-agents-js
|
|
24
|
+
* https://openai.github.io/openai-agents-js/
|
|
25
|
+
* - Remplacer handleTransferCall par handoff natif du SDK
|
|
26
|
+
* - Utiliser swarm.run() pour orchestration multi-agents
|
|
27
|
+
* - Mapper downstreamAgents vers router policies
|
|
28
|
+
* - Intégration optionnelle avec Vercel AI SDK (multi-providers)
|
|
29
|
+
* - stateGraph restera compatible (AgentMessage générique, agnostic de l'API)
|
|
30
|
+
*/
|
|
31
|
+
import { AgentConfig, AgenticContext, ExecuteAgentResult, ExecutionResult } from "../types";
|
|
32
|
+
import { ReadCompletionsStreamOptions, ExecuteAgentSetParams } from "./shared";
|
|
33
|
+
/**
|
|
34
|
+
* RESPONSES API: Traite le stream de responses avec tool calls
|
|
35
|
+
*
|
|
36
|
+
* Utilise les events SSE typés de Responses API:
|
|
37
|
+
* - response.output_text.delta → texte
|
|
38
|
+
* - response.function_call.* → tool calls
|
|
39
|
+
* - response.completed → finalisation
|
|
40
|
+
*/
|
|
41
|
+
export declare function readCompletionsStream(params: ReadCompletionsStreamOptions): Promise<ExecutionResult>;
|
|
42
|
+
/**
|
|
43
|
+
* Executes a set of agents to process a user query
|
|
44
|
+
*
|
|
45
|
+
* RESPONSES API VERSION
|
|
46
|
+
*/
|
|
47
|
+
export declare function executeAgentSet(agentSet: AgentConfig[], context: AgenticContext, params: ExecuteAgentSetParams): Promise<ExecutionResult>;
|
|
48
|
+
/**
|
|
49
|
+
* Execute agent - Responses API version
|
|
50
|
+
*/
|
|
51
|
+
export declare function executeAgent(agentSet: AgentConfig[], params: ExecuteAgentSetParams): Promise<ExecuteAgentResult>;
|
|
52
|
+
/**
|
|
53
|
+
* Execute query - Responses API version
|
|
54
|
+
*/
|
|
55
|
+
export declare function executeQuery(params: ExecuteAgentSetParams): Promise<ExecuteAgentResult>;
|