opencode-swarm-plugin 0.20.0 → 0.22.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.beads/issues.jsonl +213 -0
- package/INTEGRATION_EXAMPLE.md +66 -0
- package/README.md +352 -522
- package/dist/index.js +2046 -984
- package/dist/plugin.js +2051 -1017
- package/docs/analysis/subagent-coordination-patterns.md +2 -0
- package/docs/semantic-memory-cli-syntax.md +123 -0
- package/docs/swarm-mail-architecture.md +1147 -0
- package/evals/README.md +116 -0
- package/evals/evalite.config.ts +15 -0
- package/evals/example.eval.ts +32 -0
- package/evals/fixtures/decomposition-cases.ts +105 -0
- package/evals/lib/data-loader.test.ts +288 -0
- package/evals/lib/data-loader.ts +111 -0
- package/evals/lib/llm.ts +115 -0
- package/evals/scorers/index.ts +200 -0
- package/evals/scorers/outcome-scorers.test.ts +27 -0
- package/evals/scorers/outcome-scorers.ts +349 -0
- package/evals/swarm-decomposition.eval.ts +112 -0
- package/package.json +8 -1
- package/scripts/cleanup-test-memories.ts +346 -0
- package/src/beads.ts +49 -0
- package/src/eval-capture.ts +487 -0
- package/src/index.ts +45 -3
- package/src/learning.integration.test.ts +19 -4
- package/src/output-guardrails.test.ts +438 -0
- package/src/output-guardrails.ts +381 -0
- package/src/schemas/index.ts +18 -0
- package/src/schemas/swarm-context.ts +115 -0
- package/src/storage.ts +117 -5
- package/src/streams/events.test.ts +296 -0
- package/src/streams/events.ts +93 -0
- package/src/streams/migrations.test.ts +24 -20
- package/src/streams/migrations.ts +51 -0
- package/src/streams/projections.ts +187 -0
- package/src/streams/store.ts +275 -0
- package/src/swarm-orchestrate.ts +771 -189
- package/src/swarm-prompts.ts +84 -12
- package/src/swarm.integration.test.ts +124 -0
- package/vitest.integration.config.ts +6 -0
- package/vitest.integration.setup.ts +48 -0
|
@@ -0,0 +1,487 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Eval Data Capture - Captures real swarm execution data for evals
|
|
3
|
+
*
|
|
4
|
+
* Records decomposition inputs, outputs, and outcomes to JSONL files
|
|
5
|
+
* that can be used as ground truth for Evalite evals.
|
|
6
|
+
*
|
|
7
|
+
* Data flow:
|
|
8
|
+
* 1. swarm_decompose captures: task, context, generated decomposition
|
|
9
|
+
* 2. swarm_complete captures: outcome signals per subtask
|
|
10
|
+
* 3. swarm_record_outcome captures: learning signals
|
|
11
|
+
* 4. Human feedback (optional): accept/reject/modify
|
|
12
|
+
*
|
|
13
|
+
* @module eval-capture
|
|
14
|
+
*/
|
|
15
|
+
import { z } from "zod";
|
|
16
|
+
import * as fs from "fs";
|
|
17
|
+
import * as path from "path";
|
|
18
|
+
|
|
19
|
+
// ============================================================================
|
|
20
|
+
// Schemas
|
|
21
|
+
// ============================================================================
|
|
22
|
+
|
|
23
|
+
/**
|
|
24
|
+
* Subtask outcome - what actually happened
|
|
25
|
+
*/
|
|
26
|
+
export const SubtaskOutcomeSchema = z.object({
|
|
27
|
+
/** Subtask bead ID */
|
|
28
|
+
bead_id: z.string(),
|
|
29
|
+
/** Subtask title */
|
|
30
|
+
title: z.string(),
|
|
31
|
+
/** Planned files */
|
|
32
|
+
planned_files: z.array(z.string()),
|
|
33
|
+
/** Actual files touched */
|
|
34
|
+
actual_files: z.array(z.string()),
|
|
35
|
+
/** Duration in ms */
|
|
36
|
+
duration_ms: z.number().int().min(0),
|
|
37
|
+
/** Error count */
|
|
38
|
+
error_count: z.number().int().min(0),
|
|
39
|
+
/** Retry count */
|
|
40
|
+
retry_count: z.number().int().min(0),
|
|
41
|
+
/** Success */
|
|
42
|
+
success: z.boolean(),
|
|
43
|
+
/** Failure mode if failed */
|
|
44
|
+
failure_mode: z.string().optional(),
|
|
45
|
+
});
|
|
46
|
+
export type SubtaskOutcome = z.infer<typeof SubtaskOutcomeSchema>;
|
|
47
|
+
|
|
48
|
+
/**
|
|
49
|
+
* Complete eval record - input, output, and outcome
|
|
50
|
+
*/
|
|
51
|
+
export const EvalRecordSchema = z.object({
|
|
52
|
+
/** Unique ID for this eval record */
|
|
53
|
+
id: z.string(),
|
|
54
|
+
/** Timestamp when decomposition was generated */
|
|
55
|
+
timestamp: z.string(), // ISO-8601
|
|
56
|
+
/** Project path */
|
|
57
|
+
project_path: z.string(),
|
|
58
|
+
|
|
59
|
+
// INPUT
|
|
60
|
+
/** Original task description */
|
|
61
|
+
task: z.string(),
|
|
62
|
+
/** Context provided (codebase info, CASS results, etc.) */
|
|
63
|
+
context: z.string().optional(),
|
|
64
|
+
/** Strategy used for decomposition */
|
|
65
|
+
strategy: z.enum(["file-based", "feature-based", "risk-based", "auto"]),
|
|
66
|
+
/** Max subtasks requested */
|
|
67
|
+
max_subtasks: z.number().int().min(1).max(10),
|
|
68
|
+
|
|
69
|
+
// OUTPUT (the decomposition)
|
|
70
|
+
/** Epic title */
|
|
71
|
+
epic_title: z.string(),
|
|
72
|
+
/** Epic description */
|
|
73
|
+
epic_description: z.string().optional(),
|
|
74
|
+
/** Generated subtasks */
|
|
75
|
+
subtasks: z.array(
|
|
76
|
+
z.object({
|
|
77
|
+
title: z.string(),
|
|
78
|
+
description: z.string().optional(),
|
|
79
|
+
files: z.array(z.string()),
|
|
80
|
+
dependencies: z.array(z.number()).optional(),
|
|
81
|
+
estimated_complexity: z.number().int().min(1).max(5).optional(),
|
|
82
|
+
}),
|
|
83
|
+
),
|
|
84
|
+
|
|
85
|
+
// OUTCOME (what actually happened)
|
|
86
|
+
/** Subtask outcomes */
|
|
87
|
+
outcomes: z.array(SubtaskOutcomeSchema).optional(),
|
|
88
|
+
/** Overall success (all subtasks succeeded) */
|
|
89
|
+
overall_success: z.boolean().optional(),
|
|
90
|
+
/** Total duration (sum of all subtasks) */
|
|
91
|
+
total_duration_ms: z.number().int().min(0).optional(),
|
|
92
|
+
/** Total errors across all subtasks */
|
|
93
|
+
total_errors: z.number().int().min(0).optional(),
|
|
94
|
+
|
|
95
|
+
// HUMAN FEEDBACK (optional)
|
|
96
|
+
/** Human accepted the decomposition as-is */
|
|
97
|
+
human_accepted: z.boolean().optional(),
|
|
98
|
+
/** Human modified the decomposition */
|
|
99
|
+
human_modified: z.boolean().optional(),
|
|
100
|
+
/** Human feedback notes */
|
|
101
|
+
human_notes: z.string().optional(),
|
|
102
|
+
|
|
103
|
+
// COMPUTED METRICS
|
|
104
|
+
/** File overlap between subtasks (should be 0) */
|
|
105
|
+
file_overlap_count: z.number().int().min(0).optional(),
|
|
106
|
+
/** Scope accuracy: actual files / planned files */
|
|
107
|
+
scope_accuracy: z.number().min(0).max(2).optional(),
|
|
108
|
+
/** Time balance: max duration / min duration (lower is better) */
|
|
109
|
+
time_balance_ratio: z.number().min(1).optional(),
|
|
110
|
+
});
|
|
111
|
+
export type EvalRecord = z.infer<typeof EvalRecordSchema>;
|
|
112
|
+
|
|
113
|
+
/**
|
|
114
|
+
* Partial record for in-progress capture
|
|
115
|
+
*/
|
|
116
|
+
export type PartialEvalRecord = Partial<EvalRecord> & {
|
|
117
|
+
id: string;
|
|
118
|
+
timestamp: string;
|
|
119
|
+
task: string;
|
|
120
|
+
};
|
|
121
|
+
|
|
122
|
+
// ============================================================================
|
|
123
|
+
// Storage
|
|
124
|
+
// ============================================================================
|
|
125
|
+
|
|
126
|
+
/**
|
|
127
|
+
* Default path for eval data
|
|
128
|
+
*/
|
|
129
|
+
export const DEFAULT_EVAL_DATA_PATH = ".opencode/eval-data.jsonl";
|
|
130
|
+
|
|
131
|
+
/**
|
|
132
|
+
* Get the eval data file path for a project
|
|
133
|
+
*/
|
|
134
|
+
export function getEvalDataPath(projectPath: string): string {
|
|
135
|
+
return path.join(projectPath, DEFAULT_EVAL_DATA_PATH);
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
/**
|
|
139
|
+
* Ensure the eval data directory exists
|
|
140
|
+
*/
|
|
141
|
+
export function ensureEvalDataDir(projectPath: string): void {
|
|
142
|
+
const evalPath = getEvalDataPath(projectPath);
|
|
143
|
+
const dir = path.dirname(evalPath);
|
|
144
|
+
if (!fs.existsSync(dir)) {
|
|
145
|
+
fs.mkdirSync(dir, { recursive: true });
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
/**
|
|
150
|
+
* Append an eval record to the JSONL file
|
|
151
|
+
*/
|
|
152
|
+
export function appendEvalRecord(
|
|
153
|
+
projectPath: string,
|
|
154
|
+
record: EvalRecord | PartialEvalRecord,
|
|
155
|
+
): void {
|
|
156
|
+
ensureEvalDataDir(projectPath);
|
|
157
|
+
const evalPath = getEvalDataPath(projectPath);
|
|
158
|
+
const line = JSON.stringify(record) + "\n";
|
|
159
|
+
fs.appendFileSync(evalPath, line, "utf-8");
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
/**
|
|
163
|
+
* Read all eval records from a project
|
|
164
|
+
*/
|
|
165
|
+
export function readEvalRecords(projectPath: string): EvalRecord[] {
|
|
166
|
+
const evalPath = getEvalDataPath(projectPath);
|
|
167
|
+
if (!fs.existsSync(evalPath)) {
|
|
168
|
+
return [];
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
const content = fs.readFileSync(evalPath, "utf-8");
|
|
172
|
+
const lines = content.trim().split("\n").filter(Boolean);
|
|
173
|
+
|
|
174
|
+
return lines.map((line) => {
|
|
175
|
+
const parsed = JSON.parse(line);
|
|
176
|
+
return EvalRecordSchema.parse(parsed);
|
|
177
|
+
});
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
/**
|
|
181
|
+
* Read partial records (for updating in-progress records)
|
|
182
|
+
*/
|
|
183
|
+
export function readPartialRecords(projectPath: string): PartialEvalRecord[] {
|
|
184
|
+
const evalPath = getEvalDataPath(projectPath);
|
|
185
|
+
if (!fs.existsSync(evalPath)) {
|
|
186
|
+
return [];
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
const content = fs.readFileSync(evalPath, "utf-8");
|
|
190
|
+
const lines = content.trim().split("\n").filter(Boolean);
|
|
191
|
+
|
|
192
|
+
return lines.map((line) => JSON.parse(line) as PartialEvalRecord);
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
/**
|
|
196
|
+
* Update an existing record by ID
|
|
197
|
+
*/
|
|
198
|
+
export function updateEvalRecord(
|
|
199
|
+
projectPath: string,
|
|
200
|
+
id: string,
|
|
201
|
+
updates: Partial<EvalRecord>,
|
|
202
|
+
): boolean {
|
|
203
|
+
const records = readPartialRecords(projectPath);
|
|
204
|
+
const index = records.findIndex((r) => r.id === id);
|
|
205
|
+
|
|
206
|
+
if (index === -1) {
|
|
207
|
+
return false;
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
records[index] = { ...records[index], ...updates };
|
|
211
|
+
|
|
212
|
+
// Rewrite the file
|
|
213
|
+
const evalPath = getEvalDataPath(projectPath);
|
|
214
|
+
const content = records.map((r) => JSON.stringify(r)).join("\n") + "\n";
|
|
215
|
+
fs.writeFileSync(evalPath, content, "utf-8");
|
|
216
|
+
|
|
217
|
+
return true;
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
// ============================================================================
|
|
221
|
+
// Capture Functions
|
|
222
|
+
// ============================================================================
|
|
223
|
+
|
|
224
|
+
/**
|
|
225
|
+
* In-memory store for in-progress records (keyed by epic ID)
|
|
226
|
+
*/
|
|
227
|
+
const inProgressRecords = new Map<string, PartialEvalRecord>();
|
|
228
|
+
|
|
229
|
+
/**
|
|
230
|
+
* Start capturing a decomposition
|
|
231
|
+
*
|
|
232
|
+
* Called when swarm_decompose generates a decomposition.
|
|
233
|
+
* Creates a partial record that will be completed when outcomes arrive.
|
|
234
|
+
*/
|
|
235
|
+
export function captureDecomposition(params: {
|
|
236
|
+
epicId: string;
|
|
237
|
+
projectPath: string;
|
|
238
|
+
task: string;
|
|
239
|
+
context?: string;
|
|
240
|
+
strategy: "file-based" | "feature-based" | "risk-based" | "auto";
|
|
241
|
+
maxSubtasks: number;
|
|
242
|
+
epicTitle: string;
|
|
243
|
+
epicDescription?: string;
|
|
244
|
+
subtasks: Array<{
|
|
245
|
+
title: string;
|
|
246
|
+
description?: string;
|
|
247
|
+
files: string[];
|
|
248
|
+
dependencies?: number[];
|
|
249
|
+
estimated_complexity?: number;
|
|
250
|
+
}>;
|
|
251
|
+
}): PartialEvalRecord {
|
|
252
|
+
const record: PartialEvalRecord = {
|
|
253
|
+
id: params.epicId,
|
|
254
|
+
timestamp: new Date().toISOString(),
|
|
255
|
+
project_path: params.projectPath,
|
|
256
|
+
task: params.task,
|
|
257
|
+
context: params.context,
|
|
258
|
+
strategy: params.strategy,
|
|
259
|
+
max_subtasks: params.maxSubtasks,
|
|
260
|
+
epic_title: params.epicTitle,
|
|
261
|
+
epic_description: params.epicDescription,
|
|
262
|
+
subtasks: params.subtasks,
|
|
263
|
+
outcomes: [],
|
|
264
|
+
};
|
|
265
|
+
|
|
266
|
+
// Store in memory for later updates
|
|
267
|
+
inProgressRecords.set(params.epicId, record);
|
|
268
|
+
|
|
269
|
+
// Also persist to disk (partial)
|
|
270
|
+
appendEvalRecord(params.projectPath, record);
|
|
271
|
+
|
|
272
|
+
return record;
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
/**
|
|
276
|
+
* Capture a subtask outcome
|
|
277
|
+
*
|
|
278
|
+
* Called when swarm_complete finishes a subtask.
|
|
279
|
+
* Updates the in-progress record with outcome data.
|
|
280
|
+
*/
|
|
281
|
+
export function captureSubtaskOutcome(params: {
|
|
282
|
+
epicId: string;
|
|
283
|
+
projectPath: string;
|
|
284
|
+
beadId: string;
|
|
285
|
+
title: string;
|
|
286
|
+
plannedFiles: string[];
|
|
287
|
+
actualFiles: string[];
|
|
288
|
+
durationMs: number;
|
|
289
|
+
errorCount: number;
|
|
290
|
+
retryCount: number;
|
|
291
|
+
success: boolean;
|
|
292
|
+
failureMode?: string;
|
|
293
|
+
}): void {
|
|
294
|
+
const outcome: SubtaskOutcome = {
|
|
295
|
+
bead_id: params.beadId,
|
|
296
|
+
title: params.title,
|
|
297
|
+
planned_files: params.plannedFiles,
|
|
298
|
+
actual_files: params.actualFiles,
|
|
299
|
+
duration_ms: params.durationMs,
|
|
300
|
+
error_count: params.errorCount,
|
|
301
|
+
retry_count: params.retryCount,
|
|
302
|
+
success: params.success,
|
|
303
|
+
failure_mode: params.failureMode,
|
|
304
|
+
};
|
|
305
|
+
|
|
306
|
+
// Update in-memory record
|
|
307
|
+
const record = inProgressRecords.get(params.epicId);
|
|
308
|
+
if (record) {
|
|
309
|
+
record.outcomes = record.outcomes || [];
|
|
310
|
+
record.outcomes.push(outcome);
|
|
311
|
+
}
|
|
312
|
+
|
|
313
|
+
// Update on disk
|
|
314
|
+
updateEvalRecord(params.projectPath, params.epicId, {
|
|
315
|
+
outcomes: record?.outcomes,
|
|
316
|
+
});
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
/**
|
|
320
|
+
* Finalize an eval record
|
|
321
|
+
*
|
|
322
|
+
* Called when all subtasks are complete.
|
|
323
|
+
* Computes aggregate metrics and marks record as complete.
|
|
324
|
+
*/
|
|
325
|
+
export function finalizeEvalRecord(params: {
|
|
326
|
+
epicId: string;
|
|
327
|
+
projectPath: string;
|
|
328
|
+
}): EvalRecord | null {
|
|
329
|
+
const record = inProgressRecords.get(params.epicId);
|
|
330
|
+
if (!record || !record.outcomes || record.outcomes.length === 0) {
|
|
331
|
+
return null;
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
// Compute aggregate metrics
|
|
335
|
+
const outcomes = record.outcomes;
|
|
336
|
+
|
|
337
|
+
const overallSuccess = outcomes.every((o) => o.success);
|
|
338
|
+
const totalDurationMs = outcomes.reduce((sum, o) => sum + o.duration_ms, 0);
|
|
339
|
+
const totalErrors = outcomes.reduce((sum, o) => sum + o.error_count, 0);
|
|
340
|
+
|
|
341
|
+
// File overlap: count files that appear in multiple subtasks
|
|
342
|
+
const allPlannedFiles = record.subtasks?.flatMap((s) => s.files) || [];
|
|
343
|
+
const fileOccurrences = new Map<string, number>();
|
|
344
|
+
for (const file of allPlannedFiles) {
|
|
345
|
+
fileOccurrences.set(file, (fileOccurrences.get(file) || 0) + 1);
|
|
346
|
+
}
|
|
347
|
+
const fileOverlapCount = Array.from(fileOccurrences.values()).filter(
|
|
348
|
+
(count) => count > 1,
|
|
349
|
+
).length;
|
|
350
|
+
|
|
351
|
+
// Scope accuracy: actual files / planned files
|
|
352
|
+
const plannedFileSet = new Set(allPlannedFiles);
|
|
353
|
+
const actualFileSet = new Set(outcomes.flatMap((o) => o.actual_files));
|
|
354
|
+
const scopeAccuracy =
|
|
355
|
+
plannedFileSet.size > 0 ? actualFileSet.size / plannedFileSet.size : 1;
|
|
356
|
+
|
|
357
|
+
// Time balance: max duration / min duration
|
|
358
|
+
const durations = outcomes.map((o) => o.duration_ms).filter((d) => d > 0);
|
|
359
|
+
const timeBalanceRatio =
|
|
360
|
+
durations.length > 1 ? Math.max(...durations) / Math.min(...durations) : 1;
|
|
361
|
+
|
|
362
|
+
// Update record with computed metrics
|
|
363
|
+
const finalRecord: EvalRecord = {
|
|
364
|
+
...(record as EvalRecord),
|
|
365
|
+
overall_success: overallSuccess,
|
|
366
|
+
total_duration_ms: totalDurationMs,
|
|
367
|
+
total_errors: totalErrors,
|
|
368
|
+
file_overlap_count: fileOverlapCount,
|
|
369
|
+
scope_accuracy: scopeAccuracy,
|
|
370
|
+
time_balance_ratio: timeBalanceRatio,
|
|
371
|
+
};
|
|
372
|
+
|
|
373
|
+
// Update on disk
|
|
374
|
+
updateEvalRecord(params.projectPath, params.epicId, finalRecord);
|
|
375
|
+
|
|
376
|
+
// Remove from in-progress
|
|
377
|
+
inProgressRecords.delete(params.epicId);
|
|
378
|
+
|
|
379
|
+
return finalRecord;
|
|
380
|
+
}
|
|
381
|
+
|
|
382
|
+
/**
|
|
383
|
+
* Capture human feedback on a decomposition
|
|
384
|
+
*/
|
|
385
|
+
export function captureHumanFeedback(params: {
|
|
386
|
+
epicId: string;
|
|
387
|
+
projectPath: string;
|
|
388
|
+
accepted: boolean;
|
|
389
|
+
modified: boolean;
|
|
390
|
+
notes?: string;
|
|
391
|
+
}): void {
|
|
392
|
+
updateEvalRecord(params.projectPath, params.epicId, {
|
|
393
|
+
human_accepted: params.accepted,
|
|
394
|
+
human_modified: params.modified,
|
|
395
|
+
human_notes: params.notes,
|
|
396
|
+
});
|
|
397
|
+
}
|
|
398
|
+
|
|
399
|
+
// ============================================================================
|
|
400
|
+
// Eval Data Export
|
|
401
|
+
// ============================================================================
|
|
402
|
+
|
|
403
|
+
/**
|
|
404
|
+
* Export eval records as Evalite-compatible test cases
|
|
405
|
+
*
|
|
406
|
+
* Filters to only complete records with outcomes.
|
|
407
|
+
*/
|
|
408
|
+
export function exportForEvalite(projectPath: string): Array<{
|
|
409
|
+
input: { task: string; context?: string };
|
|
410
|
+
expected: {
|
|
411
|
+
minSubtasks: number;
|
|
412
|
+
maxSubtasks: number;
|
|
413
|
+
requiredFiles?: string[];
|
|
414
|
+
overallSuccess?: boolean;
|
|
415
|
+
};
|
|
416
|
+
actual: EvalRecord;
|
|
417
|
+
}> {
|
|
418
|
+
const records = readEvalRecords(projectPath);
|
|
419
|
+
|
|
420
|
+
return records
|
|
421
|
+
.filter((r) => r.outcomes && r.outcomes.length > 0)
|
|
422
|
+
.map((record) => ({
|
|
423
|
+
input: {
|
|
424
|
+
task: record.task,
|
|
425
|
+
context: record.context,
|
|
426
|
+
},
|
|
427
|
+
expected: {
|
|
428
|
+
minSubtasks: 2,
|
|
429
|
+
maxSubtasks: record.max_subtasks,
|
|
430
|
+
requiredFiles: record.subtasks.flatMap((s) => s.files),
|
|
431
|
+
overallSuccess: record.overall_success,
|
|
432
|
+
},
|
|
433
|
+
actual: record,
|
|
434
|
+
}));
|
|
435
|
+
}
|
|
436
|
+
|
|
437
|
+
/**
|
|
438
|
+
* Get statistics about captured eval data
|
|
439
|
+
*/
|
|
440
|
+
export function getEvalDataStats(projectPath: string): {
|
|
441
|
+
totalRecords: number;
|
|
442
|
+
completeRecords: number;
|
|
443
|
+
successRate: number;
|
|
444
|
+
avgSubtasks: number;
|
|
445
|
+
avgDurationMs: number;
|
|
446
|
+
avgScopeAccuracy: number;
|
|
447
|
+
avgTimeBalance: number;
|
|
448
|
+
} {
|
|
449
|
+
const records = readEvalRecords(projectPath);
|
|
450
|
+
const complete = records.filter((r) => r.outcomes && r.outcomes.length > 0);
|
|
451
|
+
|
|
452
|
+
if (complete.length === 0) {
|
|
453
|
+
return {
|
|
454
|
+
totalRecords: records.length,
|
|
455
|
+
completeRecords: 0,
|
|
456
|
+
successRate: 0,
|
|
457
|
+
avgSubtasks: 0,
|
|
458
|
+
avgDurationMs: 0,
|
|
459
|
+
avgScopeAccuracy: 0,
|
|
460
|
+
avgTimeBalance: 0,
|
|
461
|
+
};
|
|
462
|
+
}
|
|
463
|
+
|
|
464
|
+
const successCount = complete.filter((r) => r.overall_success).length;
|
|
465
|
+
const avgSubtasks =
|
|
466
|
+
complete.reduce((sum, r) => sum + (r.outcomes?.length || 0), 0) /
|
|
467
|
+
complete.length;
|
|
468
|
+
const avgDurationMs =
|
|
469
|
+
complete.reduce((sum, r) => sum + (r.total_duration_ms || 0), 0) /
|
|
470
|
+
complete.length;
|
|
471
|
+
const avgScopeAccuracy =
|
|
472
|
+
complete.reduce((sum, r) => sum + (r.scope_accuracy || 1), 0) /
|
|
473
|
+
complete.length;
|
|
474
|
+
const avgTimeBalance =
|
|
475
|
+
complete.reduce((sum, r) => sum + (r.time_balance_ratio || 1), 0) /
|
|
476
|
+
complete.length;
|
|
477
|
+
|
|
478
|
+
return {
|
|
479
|
+
totalRecords: records.length,
|
|
480
|
+
completeRecords: complete.length,
|
|
481
|
+
successRate: successCount / complete.length,
|
|
482
|
+
avgSubtasks,
|
|
483
|
+
avgDurationMs,
|
|
484
|
+
avgScopeAccuracy,
|
|
485
|
+
avgTimeBalance,
|
|
486
|
+
};
|
|
487
|
+
}
|
package/src/index.ts
CHANGED
|
@@ -39,6 +39,11 @@ import { swarmTools } from "./swarm";
|
|
|
39
39
|
import { repoCrawlTools } from "./repo-crawl";
|
|
40
40
|
import { skillsTools, setSkillsProjectDirectory } from "./skills";
|
|
41
41
|
import { mandateTools } from "./mandates";
|
|
42
|
+
import {
|
|
43
|
+
guardrailOutput,
|
|
44
|
+
DEFAULT_GUARDRAIL_CONFIG,
|
|
45
|
+
type GuardrailResult,
|
|
46
|
+
} from "./output-guardrails";
|
|
42
47
|
|
|
43
48
|
/**
|
|
44
49
|
* OpenCode Swarm Plugin
|
|
@@ -160,14 +165,27 @@ export const SwarmPlugin: Plugin = async (
|
|
|
160
165
|
},
|
|
161
166
|
|
|
162
167
|
/**
|
|
163
|
-
* Hook after tool execution for automatic cleanup
|
|
168
|
+
* Hook after tool execution for automatic cleanup and guardrails
|
|
164
169
|
*
|
|
165
|
-
*
|
|
166
|
-
*
|
|
170
|
+
* - Applies output guardrails to prevent context blowout from MCP tools
|
|
171
|
+
* - Auto-releases file reservations after swarm:complete or beads:close
|
|
172
|
+
* - Auto-syncs beads after closing
|
|
167
173
|
*/
|
|
168
174
|
"tool.execute.after": async (input, output) => {
|
|
169
175
|
const toolName = input.tool;
|
|
170
176
|
|
|
177
|
+
// Apply output guardrails to prevent context blowout
|
|
178
|
+
// Skip if output is empty or tool is in skip list
|
|
179
|
+
if (output.output && typeof output.output === "string") {
|
|
180
|
+
const guardrailResult = guardrailOutput(toolName, output.output);
|
|
181
|
+
if (guardrailResult.truncated) {
|
|
182
|
+
output.output = guardrailResult.output;
|
|
183
|
+
console.log(
|
|
184
|
+
`[swarm-plugin] Guardrail truncated ${toolName}: ${guardrailResult.originalLength} → ${guardrailResult.truncatedLength} chars`,
|
|
185
|
+
);
|
|
186
|
+
}
|
|
187
|
+
}
|
|
188
|
+
|
|
171
189
|
// Track Agent Mail state for cleanup
|
|
172
190
|
if (toolName === "agentmail_init" && output.output) {
|
|
173
191
|
try {
|
|
@@ -559,3 +577,27 @@ export {
|
|
|
559
577
|
groupByTransition,
|
|
560
578
|
type PromotionResult,
|
|
561
579
|
} from "./mandate-promotion";
|
|
580
|
+
|
|
581
|
+
/**
|
|
582
|
+
* Re-export output-guardrails module
|
|
583
|
+
*
|
|
584
|
+
* Includes:
|
|
585
|
+
* - guardrailOutput - Main entry point for truncating tool output
|
|
586
|
+
* - truncateWithBoundaries - Smart truncation preserving structure
|
|
587
|
+
* - getToolLimit - Get character limit for a tool
|
|
588
|
+
* - DEFAULT_GUARDRAIL_CONFIG - Default configuration
|
|
589
|
+
*
|
|
590
|
+
* Types:
|
|
591
|
+
* - GuardrailConfig - Configuration interface
|
|
592
|
+
* - GuardrailResult - Result of guardrail processing
|
|
593
|
+
* - GuardrailMetrics - Analytics data
|
|
594
|
+
*/
|
|
595
|
+
export {
|
|
596
|
+
guardrailOutput,
|
|
597
|
+
truncateWithBoundaries,
|
|
598
|
+
createMetrics,
|
|
599
|
+
DEFAULT_GUARDRAIL_CONFIG,
|
|
600
|
+
type GuardrailConfig,
|
|
601
|
+
type GuardrailResult,
|
|
602
|
+
type GuardrailMetrics,
|
|
603
|
+
} from "./output-guardrails";
|
|
@@ -7,7 +7,7 @@
|
|
|
7
7
|
* These tests don't require external services - they test the learning
|
|
8
8
|
* algorithms and their integration with swarm tools.
|
|
9
9
|
*/
|
|
10
|
-
import { describe, it, expect, beforeEach, vi } from "vitest";
|
|
10
|
+
import { describe, it, expect, beforeEach, afterEach, vi } from "vitest";
|
|
11
11
|
|
|
12
12
|
// Learning module
|
|
13
13
|
import {
|
|
@@ -1150,6 +1150,10 @@ describe("Storage Module", () => {
|
|
|
1150
1150
|
storage = new InMemoryStorage();
|
|
1151
1151
|
});
|
|
1152
1152
|
|
|
1153
|
+
afterEach(async () => {
|
|
1154
|
+
await storage.close();
|
|
1155
|
+
});
|
|
1156
|
+
|
|
1153
1157
|
it("stores and retrieves feedback", async () => {
|
|
1154
1158
|
const event = createFeedbackEvent("type_safe", "helpful");
|
|
1155
1159
|
await storage.storeFeedback(event);
|
|
@@ -1305,16 +1309,23 @@ describe("Storage Module", () => {
|
|
|
1305
1309
|
beforeEach(async () => {
|
|
1306
1310
|
isAvailable = await isSemanticMemoryAvailable();
|
|
1307
1311
|
if (isAvailable) {
|
|
1312
|
+
// Use unique collections per test run to ensure isolation
|
|
1308
1313
|
storage = new SemanticMemoryStorage({
|
|
1309
1314
|
collections: {
|
|
1310
|
-
feedback:
|
|
1311
|
-
patterns:
|
|
1312
|
-
maturity:
|
|
1315
|
+
feedback: `test-feedback-learning-${Date.now()}`,
|
|
1316
|
+
patterns: `test-patterns-learning-${Date.now()}`,
|
|
1317
|
+
maturity: `test-maturity-learning-${Date.now()}`,
|
|
1313
1318
|
},
|
|
1314
1319
|
});
|
|
1315
1320
|
}
|
|
1316
1321
|
});
|
|
1317
1322
|
|
|
1323
|
+
afterEach(async () => {
|
|
1324
|
+
if (storage) {
|
|
1325
|
+
await storage.close();
|
|
1326
|
+
}
|
|
1327
|
+
});
|
|
1328
|
+
|
|
1318
1329
|
it("skips tests if semantic-memory not available", async () => {
|
|
1319
1330
|
if (!isAvailable) {
|
|
1320
1331
|
expect(isAvailable).toBe(false);
|
|
@@ -1380,6 +1391,10 @@ describe("Storage Module", () => {
|
|
|
1380
1391
|
await resetStorage();
|
|
1381
1392
|
});
|
|
1382
1393
|
|
|
1394
|
+
afterEach(async () => {
|
|
1395
|
+
await resetStorage();
|
|
1396
|
+
});
|
|
1397
|
+
|
|
1383
1398
|
it("getStorage returns a storage instance", async () => {
|
|
1384
1399
|
const storage = await getStorage();
|
|
1385
1400
|
expect(storage).toBeDefined();
|