agentic-api 2.0.491 → 2.0.592
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +37 -34
- package/dist/src/agents/job.runner.d.ts +130 -0
- package/dist/src/agents/job.runner.js +339 -0
- package/dist/src/agents/reducer.core.d.ts +11 -1
- package/dist/src/agents/reducer.core.js +76 -86
- package/dist/src/agents/reducer.d.ts +1 -0
- package/dist/src/agents/reducer.factory.d.ts +46 -0
- package/dist/src/agents/reducer.factory.js +154 -0
- package/dist/src/agents/reducer.js +1 -0
- package/dist/src/agents/simulator.d.ts +26 -1
- package/dist/src/agents/simulator.dashboard.d.ts +140 -0
- package/dist/src/agents/simulator.dashboard.js +344 -0
- package/dist/src/agents/simulator.js +56 -0
- package/dist/src/agents/simulator.types.d.ts +38 -6
- package/dist/src/agents/simulator.utils.d.ts +22 -1
- package/dist/src/agents/simulator.utils.js +27 -0
- package/dist/src/execute/helpers.js +2 -2
- package/dist/src/execute/modelconfig.d.ts +21 -11
- package/dist/src/execute/modelconfig.js +29 -13
- package/dist/src/execute/responses.js +8 -7
- package/dist/src/index.d.ts +6 -1
- package/dist/src/index.js +21 -1
- package/dist/src/llm/config.d.ts +25 -0
- package/dist/src/llm/config.js +38 -0
- package/dist/src/llm/index.d.ts +48 -0
- package/dist/src/llm/index.js +115 -0
- package/dist/src/llm/openai.d.ts +6 -0
- package/dist/src/llm/openai.js +154 -0
- package/dist/src/llm/pricing.d.ts +26 -0
- package/dist/src/llm/pricing.js +129 -0
- package/dist/src/llm/xai.d.ts +17 -0
- package/dist/src/llm/xai.js +90 -0
- package/dist/src/pricing.llm.d.ts +3 -15
- package/dist/src/pricing.llm.js +10 -251
- package/dist/src/prompts.d.ts +0 -1
- package/dist/src/prompts.js +51 -118
- package/dist/src/rag/embeddings.d.ts +5 -1
- package/dist/src/rag/embeddings.js +15 -5
- package/dist/src/rag/parser.js +1 -1
- package/dist/src/rag/rag.manager.d.ts +44 -6
- package/dist/src/rag/rag.manager.js +138 -49
- package/dist/src/rag/types.d.ts +2 -0
- package/dist/src/rag/usecase.js +8 -11
- package/dist/src/rules/git/git.health.js +59 -4
- package/dist/src/rules/git/repo.d.ts +11 -4
- package/dist/src/rules/git/repo.js +64 -18
- package/dist/src/rules/git/repo.pr.d.ts +8 -0
- package/dist/src/rules/git/repo.pr.js +45 -1
- package/dist/src/rules/git/repo.tools.d.ts +5 -1
- package/dist/src/rules/git/repo.tools.js +54 -7
- package/dist/src/rules/types.d.ts +14 -0
- package/dist/src/rules/utils.matter.d.ts +0 -20
- package/dist/src/rules/utils.matter.js +42 -74
- package/dist/src/scrapper.js +2 -2
- package/dist/src/utils.d.ts +0 -8
- package/dist/src/utils.js +1 -28
- package/package.json +1 -1
|
@@ -5,13 +5,17 @@
|
|
|
5
5
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
6
|
exports.MapLLM = void 0;
|
|
7
7
|
const execute_1 = require("../execute");
|
|
8
|
-
const utils_1 = require("../utils");
|
|
9
8
|
/**
|
|
10
9
|
* MapLLM - Orchestrateur principal pour le reduce hiérarchique
|
|
11
10
|
*/
|
|
12
11
|
class MapLLM {
|
|
13
|
-
constructor(loader) {
|
|
12
|
+
constructor(loader, options) {
|
|
14
13
|
this.loader = loader;
|
|
14
|
+
// Default options
|
|
15
|
+
this.options = {
|
|
16
|
+
finalReduce: options?.finalReduce ?? true,
|
|
17
|
+
reduceThresholdBytes: options?.reduceThresholdBytes ?? 0
|
|
18
|
+
};
|
|
15
19
|
//
|
|
16
20
|
// ✅ Si pas d'agentConfig fourni, essayer d'extraire depuis le loader
|
|
17
21
|
if (this.hasAgents(loader)) {
|
|
@@ -53,11 +57,7 @@ class MapLLM {
|
|
|
53
57
|
let position = 0;
|
|
54
58
|
let totalChunkSize = 0;
|
|
55
59
|
let totalReduce = 0;
|
|
56
|
-
const
|
|
57
|
-
const openai = (0, utils_1.openaiInstance)();
|
|
58
|
-
const llm = Object.assign({}, model);
|
|
59
|
-
llm.stream = false;
|
|
60
|
-
delete llm.stream_options;
|
|
60
|
+
const modelName = result.model || 'LOW-fast';
|
|
61
61
|
//
|
|
62
62
|
// maxIterations is set by the callback
|
|
63
63
|
while (!result.maxIterations) {
|
|
@@ -118,35 +118,23 @@ class MapLLM {
|
|
|
118
118
|
}
|
|
119
119
|
else {
|
|
120
120
|
//
|
|
121
|
-
//
|
|
122
|
-
// MODE DOCUMENT :
|
|
123
|
-
//
|
|
124
|
-
const messages = isFirstChunk ? [
|
|
125
|
-
{ role: "
|
|
126
|
-
{ role: "user", content: chunk.content }
|
|
127
|
-
] : [
|
|
128
|
-
{ role: "system", content: config.digestPrompt },
|
|
129
|
-
{ role: "assistant", content: accContent },
|
|
130
|
-
{ role: "user", content: chunk.content }
|
|
121
|
+
// ══════════════════════════════════════════════════════════
|
|
122
|
+
// MODE DOCUMENT : executeQuery() avec API Responses unifiée
|
|
123
|
+
// ══════════════════════════════════════════════════════════
|
|
124
|
+
const messages = isFirstChunk ? [] : [
|
|
125
|
+
{ role: "assistant", content: accContent }
|
|
131
126
|
];
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
};
|
|
144
|
-
}
|
|
145
|
-
const chat = await openai.chat.completions.create(llm);
|
|
146
|
-
const digestMessage = chat.choices[0]?.message;
|
|
147
|
-
//
|
|
148
|
-
// Parse JSON if structured output is enabled
|
|
149
|
-
digestContent = digestMessage.content || '';
|
|
127
|
+
const execResult = await (0, execute_1.executeQuery)({
|
|
128
|
+
query: chunk.content,
|
|
129
|
+
model: modelName,
|
|
130
|
+
instructions: config.digestPrompt,
|
|
131
|
+
messages,
|
|
132
|
+
schema: result.format ? result.format.schema : undefined,
|
|
133
|
+
verbose: verbose,
|
|
134
|
+
stdout: init.stdout || execute_1.DummyWritable
|
|
135
|
+
});
|
|
136
|
+
// executeQuery returns content - parse if structured output is enabled
|
|
137
|
+
digestContent = execResult.content;
|
|
150
138
|
if (result.format && digestContent) {
|
|
151
139
|
try {
|
|
152
140
|
digestContent = JSON.parse(digestContent);
|
|
@@ -169,31 +157,31 @@ class MapLLM {
|
|
|
169
157
|
}
|
|
170
158
|
break;
|
|
171
159
|
}
|
|
172
|
-
//
|
|
173
|
-
|
|
160
|
+
// Auto-reduce if accumulator exceeds threshold (if configured)
|
|
161
|
+
const accSize = typeof result.acc === 'string' ? result.acc.length : JSON.stringify(result.acc).length;
|
|
162
|
+
const shouldAutoReduce = this.options.reduceThresholdBytes > 0 && accSize > this.options.reduceThresholdBytes;
|
|
163
|
+
// Décision de réduction basée sur callback ou auto-threshold
|
|
164
|
+
if (!result.continue && !shouldAutoReduce) {
|
|
174
165
|
continue;
|
|
175
166
|
}
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
{ role: "system", content: config.reducePrompt },
|
|
179
|
-
{ role: "user", content: accForReduce }
|
|
180
|
-
];
|
|
181
|
-
// Configure structured output if format is specified
|
|
182
|
-
if (result.format) {
|
|
183
|
-
llm.response_format = {
|
|
184
|
-
type: "json_schema",
|
|
185
|
-
json_schema: {
|
|
186
|
-
name: result.format.name,
|
|
187
|
-
schema: result.format.schema,
|
|
188
|
-
strict: result.format.strict ?? true
|
|
189
|
-
}
|
|
190
|
-
};
|
|
167
|
+
if (verbose && shouldAutoReduce) {
|
|
168
|
+
console.log(`🔄 Auto-reduce triggered: acc size ${accSize} > threshold ${this.options.reduceThresholdBytes}`);
|
|
191
169
|
}
|
|
192
|
-
const
|
|
193
|
-
|
|
170
|
+
const accForReduce = typeof result.acc === 'string' ? result.acc : JSON.stringify(result.acc);
|
|
171
|
+
//
|
|
172
|
+
// Intermediate reduce avec executeQuery
|
|
173
|
+
const reduceResult = await (0, execute_1.executeQuery)({
|
|
174
|
+
query: accForReduce,
|
|
175
|
+
model: modelName,
|
|
176
|
+
instructions: config.reducePrompt,
|
|
177
|
+
messages: [],
|
|
178
|
+
schema: result.format ? result.format.schema : undefined,
|
|
179
|
+
verbose: verbose,
|
|
180
|
+
stdout: init.stdout || execute_1.DummyWritable
|
|
181
|
+
});
|
|
194
182
|
//
|
|
195
183
|
// should not happen
|
|
196
|
-
if (!
|
|
184
|
+
if (!reduceResult.content) {
|
|
197
185
|
continue;
|
|
198
186
|
}
|
|
199
187
|
// 3. Reduce with system - Update result.acc (replace)
|
|
@@ -201,15 +189,15 @@ class MapLLM {
|
|
|
201
189
|
// Parse JSON if structured output is enabled
|
|
202
190
|
if (result.format) {
|
|
203
191
|
try {
|
|
204
|
-
result.acc = JSON.parse(
|
|
192
|
+
result.acc = JSON.parse(reduceResult.content);
|
|
205
193
|
}
|
|
206
194
|
catch (e) {
|
|
207
|
-
console.warn('Failed to parse reduce result as JSON:',
|
|
208
|
-
result.acc =
|
|
195
|
+
console.warn('Failed to parse reduce result as JSON:', reduceResult.content);
|
|
196
|
+
result.acc = reduceResult.content;
|
|
209
197
|
}
|
|
210
198
|
}
|
|
211
199
|
else {
|
|
212
|
-
result.acc =
|
|
200
|
+
result.acc = reduceResult.content;
|
|
213
201
|
}
|
|
214
202
|
if (verbose) {
|
|
215
203
|
console.log(`✅ Reduce ${result.metadata?.iterations} processed (${chunk.content.length} chars)`);
|
|
@@ -224,38 +212,40 @@ class MapLLM {
|
|
|
224
212
|
throw new Error(`Failed to process chunk ${result.metadata?.iterations}: ${error}`);
|
|
225
213
|
}
|
|
226
214
|
}
|
|
227
|
-
// Final reduce
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
215
|
+
// Final reduce (optional, controlled by options.finalReduce)
|
|
216
|
+
if (this.options.finalReduce) {
|
|
217
|
+
const finalAccContent = typeof result.acc === 'string' ? result.acc : JSON.stringify(result.acc);
|
|
218
|
+
//
|
|
219
|
+
// Final reduce avec executeQuery
|
|
220
|
+
const finalResult = await (0, execute_1.executeQuery)({
|
|
221
|
+
query: finalAccContent,
|
|
222
|
+
model: modelName,
|
|
223
|
+
instructions: config.reducePrompt,
|
|
224
|
+
messages: [],
|
|
225
|
+
schema: result.format ? result.format.schema : undefined,
|
|
226
|
+
verbose: verbose,
|
|
227
|
+
stdout: init.stdout || execute_1.DummyWritable
|
|
228
|
+
});
|
|
229
|
+
const finalContent = finalResult.content || '';
|
|
230
|
+
// Parse JSON if structured output is enabled
|
|
231
|
+
if (result.format && finalContent) {
|
|
232
|
+
try {
|
|
233
|
+
result.acc = JSON.parse(finalContent);
|
|
234
|
+
}
|
|
235
|
+
catch (e) {
|
|
236
|
+
console.warn('Failed to parse final result as JSON:', finalContent);
|
|
237
|
+
result.acc = finalContent;
|
|
242
238
|
}
|
|
243
|
-
};
|
|
244
|
-
}
|
|
245
|
-
const reduce = await openai.chat.completions.create(llm);
|
|
246
|
-
const finalContent = reduce.choices[0]?.message.content || '';
|
|
247
|
-
// Parse JSON if structured output is enabled
|
|
248
|
-
if (result.format && finalContent) {
|
|
249
|
-
try {
|
|
250
|
-
result.acc = JSON.parse(finalContent);
|
|
251
239
|
}
|
|
252
|
-
|
|
253
|
-
console.warn('Failed to parse final result as JSON:', finalContent);
|
|
240
|
+
else {
|
|
254
241
|
result.acc = finalContent;
|
|
255
242
|
}
|
|
243
|
+
if (verbose) {
|
|
244
|
+
console.log('🎯 Final reduce completed');
|
|
245
|
+
}
|
|
256
246
|
}
|
|
257
|
-
else {
|
|
258
|
-
|
|
247
|
+
else if (verbose) {
|
|
248
|
+
console.log('⏭️ Final reduce skipped (finalReduce=false)');
|
|
259
249
|
}
|
|
260
250
|
const endTime = Date.now();
|
|
261
251
|
const processingTimeMs = endTime - startTime;
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Factory to create a ReducerFn compatible with JobRunner using MapLLM
|
|
3
|
+
*/
|
|
4
|
+
import type { StructuredOutputFormat } from './reducer.types';
|
|
5
|
+
import type { ReducerFn } from './job.runner';
|
|
6
|
+
/**
|
|
7
|
+
* Options for createMapLLMReducer factory
|
|
8
|
+
*/
|
|
9
|
+
export interface CreateMapLLMReducerOptions {
|
|
10
|
+
/** Prompt for digesting task + result into facts */
|
|
11
|
+
digestPrompt: string;
|
|
12
|
+
/** Prompt for reducing/fusing with previous memory */
|
|
13
|
+
reducePrompt: string;
|
|
14
|
+
/** Custom JSON schema for ReducedJobMemory (optional, uses default if not provided) */
|
|
15
|
+
format?: StructuredOutputFormat;
|
|
16
|
+
/** Model to use (default: 'LOW') */
|
|
17
|
+
model?: string;
|
|
18
|
+
/** Whether to execute final reduce pass (default: true) */
|
|
19
|
+
finalReduce?: boolean;
|
|
20
|
+
/** Threshold in bytes to trigger auto intermediate reduce (optional) */
|
|
21
|
+
reduceThresholdBytes?: number;
|
|
22
|
+
/** Enable verbose logging (default: false) */
|
|
23
|
+
verbose?: boolean;
|
|
24
|
+
}
|
|
25
|
+
/**
|
|
26
|
+
* Creates a ReducerFn compatible with JobRunner that uses MapLLM internally.
|
|
27
|
+
*
|
|
28
|
+
* This factory bridges JobRunner and MapLLM, allowing LLM-powered reduction
|
|
29
|
+
* with structured outputs while keeping both modules independent.
|
|
30
|
+
*
|
|
31
|
+
* @example
|
|
32
|
+
* ```typescript
|
|
33
|
+
* const reducer = createMapLLMReducer({
|
|
34
|
+
* digestPrompt: "Analyze this task result and extract key facts...",
|
|
35
|
+
* reducePrompt: "Merge with previous memory to produce updated canonical memory...",
|
|
36
|
+
* model: 'LOW'
|
|
37
|
+
* });
|
|
38
|
+
*
|
|
39
|
+
* const runner = new JobRunner({
|
|
40
|
+
* planner: myPlanner,
|
|
41
|
+
* executor: myExecutor,
|
|
42
|
+
* reducer: reducer // ← ReducerFn compatible
|
|
43
|
+
* });
|
|
44
|
+
* ```
|
|
45
|
+
*/
|
|
46
|
+
export declare function createMapLLMReducer(options: CreateMapLLMReducerOptions): ReducerFn;
|
|
@@ -0,0 +1,154 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Factory to create a ReducerFn compatible with JobRunner using MapLLM
|
|
4
|
+
*/
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.createMapLLMReducer = createMapLLMReducer;
|
|
7
|
+
const reducer_core_1 = require("./reducer.core");
|
|
8
|
+
const reducer_loaders_1 = require("./reducer.loaders");
|
|
9
|
+
/**
|
|
10
|
+
* Default JSON schema for ReducedJobMemory
|
|
11
|
+
* Note: For strict mode, all properties must be in required array and
|
|
12
|
+
* all nested objects need additionalProperties: false
|
|
13
|
+
*/
|
|
14
|
+
const DEFAULT_MEMORY_SCHEMA = {
|
|
15
|
+
name: 'ReducedJobMemory',
|
|
16
|
+
schema: {
|
|
17
|
+
type: 'object',
|
|
18
|
+
properties: {
|
|
19
|
+
memory: {
|
|
20
|
+
type: 'string',
|
|
21
|
+
description: 'Short canonical memory summarizing progress and key facts'
|
|
22
|
+
},
|
|
23
|
+
index: {
|
|
24
|
+
type: 'object',
|
|
25
|
+
description: 'Stable references: artifact IDs, data tables, error traces',
|
|
26
|
+
properties: {
|
|
27
|
+
artifacts: {
|
|
28
|
+
type: 'array',
|
|
29
|
+
items: { type: 'string' },
|
|
30
|
+
description: 'List of artifact references'
|
|
31
|
+
},
|
|
32
|
+
taskIds: {
|
|
33
|
+
type: 'array',
|
|
34
|
+
items: { type: 'string' },
|
|
35
|
+
description: 'List of completed task IDs'
|
|
36
|
+
},
|
|
37
|
+
errors: {
|
|
38
|
+
type: 'array',
|
|
39
|
+
items: { type: 'string' },
|
|
40
|
+
description: 'List of error messages'
|
|
41
|
+
}
|
|
42
|
+
},
|
|
43
|
+
required: ['artifacts', 'taskIds', 'errors'],
|
|
44
|
+
additionalProperties: false
|
|
45
|
+
},
|
|
46
|
+
statusLine: {
|
|
47
|
+
type: 'string',
|
|
48
|
+
description: 'UI progress line'
|
|
49
|
+
}
|
|
50
|
+
},
|
|
51
|
+
required: ['memory', 'index', 'statusLine'],
|
|
52
|
+
additionalProperties: false
|
|
53
|
+
},
|
|
54
|
+
strict: true
|
|
55
|
+
};
|
|
56
|
+
/**
|
|
57
|
+
* Creates a ReducerFn compatible with JobRunner that uses MapLLM internally.
|
|
58
|
+
*
|
|
59
|
+
* This factory bridges JobRunner and MapLLM, allowing LLM-powered reduction
|
|
60
|
+
* with structured outputs while keeping both modules independent.
|
|
61
|
+
*
|
|
62
|
+
* @example
|
|
63
|
+
* ```typescript
|
|
64
|
+
* const reducer = createMapLLMReducer({
|
|
65
|
+
* digestPrompt: "Analyze this task result and extract key facts...",
|
|
66
|
+
* reducePrompt: "Merge with previous memory to produce updated canonical memory...",
|
|
67
|
+
* model: 'LOW'
|
|
68
|
+
* });
|
|
69
|
+
*
|
|
70
|
+
* const runner = new JobRunner({
|
|
71
|
+
* planner: myPlanner,
|
|
72
|
+
* executor: myExecutor,
|
|
73
|
+
* reducer: reducer // ← ReducerFn compatible
|
|
74
|
+
* });
|
|
75
|
+
* ```
|
|
76
|
+
*/
|
|
77
|
+
function createMapLLMReducer(options) {
|
|
78
|
+
const { digestPrompt, reducePrompt, format = DEFAULT_MEMORY_SCHEMA, model = 'LOW', finalReduce = true, reduceThresholdBytes, verbose = false } = options;
|
|
79
|
+
return async (previous, task, result) => {
|
|
80
|
+
// Serialize context for reduction
|
|
81
|
+
const context = JSON.stringify({
|
|
82
|
+
previousMemory: previous,
|
|
83
|
+
task: {
|
|
84
|
+
id: task.id,
|
|
85
|
+
title: task.title,
|
|
86
|
+
type: task.type
|
|
87
|
+
},
|
|
88
|
+
result: {
|
|
89
|
+
taskId: result.taskId,
|
|
90
|
+
ok: result.ok,
|
|
91
|
+
summary: result.summary,
|
|
92
|
+
error: result.error,
|
|
93
|
+
artifacts: result.artifacts,
|
|
94
|
+
// Include data if small enough, otherwise just note its presence
|
|
95
|
+
data: result.data && JSON.stringify(result.data).length < 2000
|
|
96
|
+
? result.data
|
|
97
|
+
: (result.data ? '[data truncated]' : undefined)
|
|
98
|
+
}
|
|
99
|
+
}, null, 2);
|
|
100
|
+
// Create loader with single-chunk strategy (the context is already compact)
|
|
101
|
+
const loader = new reducer_loaders_1.StringNativeLoader(context, { type: 'paragraphs', size: 10 });
|
|
102
|
+
// Create MapLLM with options
|
|
103
|
+
const mapllmOptions = {
|
|
104
|
+
finalReduce,
|
|
105
|
+
reduceThresholdBytes
|
|
106
|
+
};
|
|
107
|
+
const mapper = new reducer_core_1.MapLLM(loader, mapllmOptions);
|
|
108
|
+
// Config for MapLLM
|
|
109
|
+
const config = {
|
|
110
|
+
digestPrompt,
|
|
111
|
+
reducePrompt
|
|
112
|
+
};
|
|
113
|
+
// Callback: accumulate structured output
|
|
114
|
+
const callback = (res, current) => {
|
|
115
|
+
// If current is already an object (structured output), use it directly
|
|
116
|
+
if (typeof current === 'object' && current !== null) {
|
|
117
|
+
res.acc = current;
|
|
118
|
+
}
|
|
119
|
+
else if (typeof current === 'string') {
|
|
120
|
+
// Try to parse as JSON
|
|
121
|
+
try {
|
|
122
|
+
res.acc = JSON.parse(current);
|
|
123
|
+
}
|
|
124
|
+
catch {
|
|
125
|
+
// Fallback: wrap in memory field
|
|
126
|
+
res.acc = {
|
|
127
|
+
memory: current,
|
|
128
|
+
index: res.acc?.index || {}
|
|
129
|
+
};
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
return res;
|
|
133
|
+
};
|
|
134
|
+
// Init with previous memory or empty
|
|
135
|
+
const init = {
|
|
136
|
+
acc: previous || { memory: '', index: {} },
|
|
137
|
+
config,
|
|
138
|
+
format,
|
|
139
|
+
model,
|
|
140
|
+
verbose
|
|
141
|
+
};
|
|
142
|
+
// Execute MapLLM reduce
|
|
143
|
+
const out = await mapper.reduce(callback, init);
|
|
144
|
+
// Validate and return
|
|
145
|
+
const result_acc = out.acc;
|
|
146
|
+
// Ensure required fields exist
|
|
147
|
+
const reducedMemory = {
|
|
148
|
+
memory: typeof result_acc.memory === 'string' ? result_acc.memory : JSON.stringify(result_acc.memory || ''),
|
|
149
|
+
index: typeof result_acc.index === 'object' ? result_acc.index : {},
|
|
150
|
+
statusLine: result_acc.statusLine
|
|
151
|
+
};
|
|
152
|
+
return reducedMemory;
|
|
153
|
+
};
|
|
154
|
+
}
|
|
@@ -17,3 +17,4 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
17
17
|
__exportStar(require("./reducer.core"), exports);
|
|
18
18
|
__exportStar(require("./reducer.loaders"), exports);
|
|
19
19
|
__exportStar(require("./reducer.types"), exports);
|
|
20
|
+
__exportStar(require("./reducer.factory"), exports);
|
|
@@ -1,10 +1,35 @@
|
|
|
1
|
-
import { SimulatorConfig, SimulationOptions, SimulationResult } from './simulator.types';
|
|
1
|
+
import { SimulatorConfig, SimulationOptions, SimulationResult, TestScenario, TestCaseInput } from './simulator.types';
|
|
2
2
|
export declare class AgentSimulator {
|
|
3
3
|
private config;
|
|
4
4
|
private executor;
|
|
5
5
|
private lastExecution?;
|
|
6
6
|
constructor(config: SimulatorConfig);
|
|
7
7
|
/**
|
|
8
|
+
* Exécuter un cas de test avec scénario et paramètres séparés
|
|
9
|
+
*
|
|
10
|
+
* @param scenario - Contexte stable (goals, persona, result)
|
|
11
|
+
* @param testCase - Paramètres du test (query, maxExchanges, model, expectedTools)
|
|
12
|
+
* @returns SimulationResult
|
|
13
|
+
*
|
|
14
|
+
* @example
|
|
15
|
+
* ```typescript
|
|
16
|
+
* const scenario = {
|
|
17
|
+
* goals: 'Obtenir le nombre secret 1942',
|
|
18
|
+
* persona: PERSONA_PATIENT,
|
|
19
|
+
* result: '{"success": boolean, "error": string}'
|
|
20
|
+
* };
|
|
21
|
+
*
|
|
22
|
+
* const result = await simulator.testCase(scenario, {
|
|
23
|
+
* query: 'À quel nombre penses-tu?',
|
|
24
|
+
* maxExchanges: 3, // défaut: 1 (oneshot)
|
|
25
|
+
* expectedTools: { 'transferAgents': { equal: 1 } } // défaut: {}
|
|
26
|
+
* });
|
|
27
|
+
* ```
|
|
28
|
+
*/
|
|
29
|
+
testCase(scenario: TestScenario, testCase: TestCaseInput): Promise<SimulationResult>;
|
|
30
|
+
/**
|
|
31
|
+
* @deprecated Utiliser testCase(scenario, case) à la place
|
|
32
|
+
*
|
|
8
33
|
* Exécuter la simulation complète
|
|
9
34
|
*
|
|
10
35
|
* Architecture :
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
import { TestScenario, TestCaseInput, SimulatorConfig } from './simulator.types';
|
|
2
|
+
import { AgentMessage } from '../stategraph';
|
|
3
|
+
/**
|
|
4
|
+
* Test case combining scenario and case input
|
|
5
|
+
* Format du fichier JSON d'entrée
|
|
6
|
+
*/
|
|
7
|
+
export interface DashboardTestCase {
|
|
8
|
+
id?: string;
|
|
9
|
+
name?: string;
|
|
10
|
+
scenario: TestScenario;
|
|
11
|
+
case: TestCaseInput;
|
|
12
|
+
}
|
|
13
|
+
/**
|
|
14
|
+
* Format du fichier JSON d'entrée
|
|
15
|
+
*/
|
|
16
|
+
export interface DashboardInput {
|
|
17
|
+
name?: string;
|
|
18
|
+
description?: string;
|
|
19
|
+
config?: Partial<SimulatorConfig>;
|
|
20
|
+
tests: DashboardTestCase[];
|
|
21
|
+
}
|
|
22
|
+
/**
|
|
23
|
+
* Status d'exécution d'un test
|
|
24
|
+
*/
|
|
25
|
+
export type TestStatus = 'pending' | 'running' | 'completed' | 'failed' | 'error';
|
|
26
|
+
/**
|
|
27
|
+
* Ligne JSONL pour un résultat de test
|
|
28
|
+
*/
|
|
29
|
+
export interface DashboardOutputLine {
|
|
30
|
+
type: 'start' | 'result' | 'end' | 'error';
|
|
31
|
+
timestamp: string;
|
|
32
|
+
sessionId?: string;
|
|
33
|
+
totalTests?: number;
|
|
34
|
+
testId?: string;
|
|
35
|
+
testIndex?: number;
|
|
36
|
+
name?: string;
|
|
37
|
+
description?: string;
|
|
38
|
+
query?: string;
|
|
39
|
+
status?: TestStatus;
|
|
40
|
+
success?: boolean;
|
|
41
|
+
message?: string;
|
|
42
|
+
error?: string;
|
|
43
|
+
exchangeCount?: number;
|
|
44
|
+
messages?: AgentMessage[];
|
|
45
|
+
duration?: number;
|
|
46
|
+
summary?: {
|
|
47
|
+
total: number;
|
|
48
|
+
passed: number;
|
|
49
|
+
failed: number;
|
|
50
|
+
errors: number;
|
|
51
|
+
totalDuration: number;
|
|
52
|
+
};
|
|
53
|
+
}
|
|
54
|
+
/**
|
|
55
|
+
* Status de l'exécution pour le contrôleur backend
|
|
56
|
+
*/
|
|
57
|
+
export interface DashboardStatus {
|
|
58
|
+
isRunning: boolean;
|
|
59
|
+
sessionId: string | null;
|
|
60
|
+
currentTest: number;
|
|
61
|
+
totalTests: number;
|
|
62
|
+
passed: number;
|
|
63
|
+
failed: number;
|
|
64
|
+
errors: number;
|
|
65
|
+
startTime: Date | null;
|
|
66
|
+
lastUpdate: Date | null;
|
|
67
|
+
}
|
|
68
|
+
export declare class SimulatorDashboard {
|
|
69
|
+
private config;
|
|
70
|
+
private status;
|
|
71
|
+
private simulator;
|
|
72
|
+
private abortController;
|
|
73
|
+
private _currentOutputPath;
|
|
74
|
+
private _currentInputPath;
|
|
75
|
+
constructor(config: SimulatorConfig);
|
|
76
|
+
/**
|
|
77
|
+
* Chemin du fichier output actuel (ou du dernier run)
|
|
78
|
+
*/
|
|
79
|
+
get currentOutputPath(): string | null;
|
|
80
|
+
/**
|
|
81
|
+
* Chemin du fichier input actuel (ou du dernier run)
|
|
82
|
+
*/
|
|
83
|
+
get currentInputPath(): string | null;
|
|
84
|
+
private createInitialStatus;
|
|
85
|
+
/**
|
|
86
|
+
* Obtenir le status actuel (pour le contrôleur backend)
|
|
87
|
+
*/
|
|
88
|
+
getStatus(): DashboardStatus;
|
|
89
|
+
/**
|
|
90
|
+
* Vérifier si une exécution est en cours
|
|
91
|
+
*/
|
|
92
|
+
isRunning(): boolean;
|
|
93
|
+
/**
|
|
94
|
+
* Annuler l'exécution en cours
|
|
95
|
+
*/
|
|
96
|
+
abort(): void;
|
|
97
|
+
/**
|
|
98
|
+
* Charger un fichier JSON d'entrée
|
|
99
|
+
*/
|
|
100
|
+
loadInputFile(filePath: string): Promise<DashboardInput>;
|
|
101
|
+
/**
|
|
102
|
+
* Générer le chemin du fichier output basé sur le fichier input
|
|
103
|
+
* Exemple: tests/my-tests.json → tests/results.my-tests.jsonl
|
|
104
|
+
*/
|
|
105
|
+
createOutputPath(inputPath: string): string;
|
|
106
|
+
/**
|
|
107
|
+
* Charger les résultats JSONL existants (en cours ou terminés)
|
|
108
|
+
* Supporte le streaming partiel (fichier en cours d'écriture)
|
|
109
|
+
*
|
|
110
|
+
* @param inputPath - Chemin du fichier JSON d'entrée (génère automatiquement le output path)
|
|
111
|
+
* @returns Les lignes parsées ou null si le fichier n'existe pas
|
|
112
|
+
*/
|
|
113
|
+
loadResults(inputPath?: string): Promise<DashboardOutputLine[] | null>;
|
|
114
|
+
/**
|
|
115
|
+
* Obtenir le résumé des résultats (dernière ligne type='end')
|
|
116
|
+
* @param inputPath - Chemin du fichier JSON d'entrée
|
|
117
|
+
*/
|
|
118
|
+
getResultsSummary(inputPath?: string): Promise<DashboardOutputLine['summary'] | null>;
|
|
119
|
+
/**
|
|
120
|
+
* Vérifier si les résultats sont complets (contient une ligne 'end')
|
|
121
|
+
* @param inputPath - Chemin du fichier JSON d'entrée
|
|
122
|
+
*/
|
|
123
|
+
isResultsComplete(inputPath?: string): Promise<boolean>;
|
|
124
|
+
/**
|
|
125
|
+
* Exécuter les tests et écrire les résultats en JSONL
|
|
126
|
+
*
|
|
127
|
+
* @param input - Données d'entrée (ou chemin vers fichier JSON)
|
|
128
|
+
* @param outputPath - Chemin du fichier JSONL de sortie
|
|
129
|
+
* @param onLine - Callback optionnel pour chaque ligne JSONL (streaming)
|
|
130
|
+
*/
|
|
131
|
+
run(input: DashboardInput | string, outputPath?: string, onLine?: (line: DashboardOutputLine) => void): Promise<DashboardOutputLine[]>;
|
|
132
|
+
/**
|
|
133
|
+
* Exécuter avec callback de streaming (pour SSE/WebSocket)
|
|
134
|
+
*/
|
|
135
|
+
runWithStream(input: DashboardInput | string, onLine: (line: DashboardOutputLine) => void): Promise<DashboardOutputLine[]>;
|
|
136
|
+
/**
|
|
137
|
+
* Créer un fichier JSON d'exemple pour les tests
|
|
138
|
+
*/
|
|
139
|
+
static createExampleInput(): DashboardInput;
|
|
140
|
+
}
|