opencode-swarm-plugin 0.37.0 → 0.39.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. package/.env +2 -0
  2. package/.hive/eval-results.json +26 -0
  3. package/.hive/issues.jsonl +20 -5
  4. package/.hive/memories.jsonl +35 -1
  5. package/.opencode/eval-history.jsonl +12 -0
  6. package/.turbo/turbo-build.log +4 -4
  7. package/.turbo/turbo-test.log +319 -319
  8. package/CHANGELOG.md +258 -0
  9. package/README.md +50 -0
  10. package/bin/swarm.test.ts +475 -0
  11. package/bin/swarm.ts +385 -208
  12. package/dist/compaction-hook.d.ts +1 -1
  13. package/dist/compaction-hook.d.ts.map +1 -1
  14. package/dist/compaction-prompt-scoring.d.ts +124 -0
  15. package/dist/compaction-prompt-scoring.d.ts.map +1 -0
  16. package/dist/eval-capture.d.ts +81 -1
  17. package/dist/eval-capture.d.ts.map +1 -1
  18. package/dist/eval-gates.d.ts +84 -0
  19. package/dist/eval-gates.d.ts.map +1 -0
  20. package/dist/eval-history.d.ts +117 -0
  21. package/dist/eval-history.d.ts.map +1 -0
  22. package/dist/eval-learning.d.ts +216 -0
  23. package/dist/eval-learning.d.ts.map +1 -0
  24. package/dist/hive.d.ts +59 -0
  25. package/dist/hive.d.ts.map +1 -1
  26. package/dist/index.d.ts +87 -0
  27. package/dist/index.d.ts.map +1 -1
  28. package/dist/index.js +823 -131
  29. package/dist/plugin.js +655 -131
  30. package/dist/post-compaction-tracker.d.ts +133 -0
  31. package/dist/post-compaction-tracker.d.ts.map +1 -0
  32. package/dist/swarm-decompose.d.ts +30 -0
  33. package/dist/swarm-decompose.d.ts.map +1 -1
  34. package/dist/swarm-orchestrate.d.ts +23 -0
  35. package/dist/swarm-orchestrate.d.ts.map +1 -1
  36. package/dist/swarm-prompts.d.ts +25 -1
  37. package/dist/swarm-prompts.d.ts.map +1 -1
  38. package/dist/swarm.d.ts +19 -0
  39. package/dist/swarm.d.ts.map +1 -1
  40. package/evals/README.md +595 -94
  41. package/evals/compaction-prompt.eval.ts +149 -0
  42. package/evals/coordinator-behavior.eval.ts +8 -8
  43. package/evals/fixtures/compaction-prompt-cases.ts +305 -0
  44. package/evals/lib/compaction-loader.test.ts +248 -0
  45. package/evals/lib/compaction-loader.ts +320 -0
  46. package/evals/lib/data-loader.test.ts +345 -0
  47. package/evals/lib/data-loader.ts +107 -6
  48. package/evals/scorers/compaction-prompt-scorers.ts +145 -0
  49. package/evals/scorers/compaction-scorers.ts +13 -13
  50. package/evals/scorers/coordinator-discipline.evalite-test.ts +3 -2
  51. package/evals/scorers/coordinator-discipline.ts +13 -13
  52. package/examples/plugin-wrapper-template.ts +177 -8
  53. package/package.json +7 -2
  54. package/scripts/migrate-unknown-sessions.ts +349 -0
  55. package/src/compaction-capture.integration.test.ts +257 -0
  56. package/src/compaction-hook.test.ts +139 -2
  57. package/src/compaction-hook.ts +113 -2
  58. package/src/compaction-prompt-scorers.test.ts +299 -0
  59. package/src/compaction-prompt-scoring.ts +298 -0
  60. package/src/eval-capture.test.ts +422 -0
  61. package/src/eval-capture.ts +94 -2
  62. package/src/eval-gates.test.ts +306 -0
  63. package/src/eval-gates.ts +218 -0
  64. package/src/eval-history.test.ts +508 -0
  65. package/src/eval-history.ts +214 -0
  66. package/src/eval-learning.test.ts +378 -0
  67. package/src/eval-learning.ts +360 -0
  68. package/src/index.ts +61 -1
  69. package/src/post-compaction-tracker.test.ts +251 -0
  70. package/src/post-compaction-tracker.ts +237 -0
  71. package/src/swarm-decompose.test.ts +40 -47
  72. package/src/swarm-decompose.ts +2 -2
  73. package/src/swarm-orchestrate.test.ts +270 -7
  74. package/src/swarm-orchestrate.ts +100 -13
  75. package/src/swarm-prompts.test.ts +121 -0
  76. package/src/swarm-prompts.ts +297 -4
  77. package/src/swarm-research.integration.test.ts +157 -0
  78. package/src/swarm-review.ts +3 -3
  79. /package/evals/{evalite.config.ts → evalite.config.ts.bak} +0 -0
@@ -0,0 +1,214 @@
1
+ /**
2
+ * Eval History Tracker - Progressive gates based on run history
3
+ *
4
+ * Tracks eval run scores over time and calculates the current phase:
5
+ * - Bootstrap (<10 runs): No gates, just collect data
6
+ * - Stabilization (10-50 runs): Warn on >10% regression
7
+ * - Production (>50 runs + variance <0.1): Fail on >5% regression
8
+ *
9
+ * @module eval-history
10
+ */
11
+ import * as fs from "node:fs";
12
+ import * as path from "node:path";
13
+
14
+ /**
15
+ * Progressive phases based on run count and variance
16
+ */
17
+ export type Phase = "bootstrap" | "stabilization" | "production";
18
+
19
+ /**
20
+ * Single eval run record
21
+ */
22
+ export interface EvalRunRecord {
23
+ /** ISO-8601 timestamp */
24
+ timestamp: string;
25
+ /** Name of the eval (e.g., "swarm-decomposition") */
26
+ eval_name: string;
27
+ /** Score (0-1 range typically) */
28
+ score: number;
29
+ /** Run count (monotonically increasing per eval) */
30
+ run_count: number;
31
+ }
32
+
33
+ /**
34
+ * Default path for eval history
35
+ */
36
+ export const DEFAULT_EVAL_HISTORY_PATH = ".opencode/eval-history.jsonl";
37
+
38
+ /**
39
+ * Variance threshold for production phase
40
+ */
41
+ export const VARIANCE_THRESHOLD = 0.1;
42
+
43
+ /**
44
+ * Run count thresholds for phase transitions
45
+ */
46
+ export const BOOTSTRAP_THRESHOLD = 10;
47
+ export const STABILIZATION_THRESHOLD = 50;
48
+
49
+ /**
50
+ * Get the eval history file path
51
+ */
52
+ export function getEvalHistoryPath(projectPath: string): string {
53
+ return path.join(projectPath, DEFAULT_EVAL_HISTORY_PATH);
54
+ }
55
+
56
+ /**
57
+ * Ensure the eval history directory exists
58
+ */
59
+ export function ensureEvalHistoryDir(projectPath: string): void {
60
+ const historyPath = getEvalHistoryPath(projectPath);
61
+ const dir = path.dirname(historyPath);
62
+ if (!fs.existsSync(dir)) {
63
+ fs.mkdirSync(dir, { recursive: true });
64
+ }
65
+ }
66
+
67
+ /**
68
+ * Record an eval run to JSONL history
69
+ *
70
+ * Appends atomically to `.opencode/eval-history.jsonl`. Each line is a complete JSON object
71
+ * representing one eval run (timestamp, eval name, score, run count).
72
+ *
73
+ * **Auto-creates directory** if `.opencode/` doesn't exist.
74
+ *
75
+ * **Thread-safe**: Uses `appendFileSync` for atomic writes (safe for concurrent eval runs).
76
+ *
77
+ * **Integration**: Called automatically by evalite runner after each eval completes.
78
+ * Also callable manually for custom eval tracking.
79
+ *
80
+ * @param projectPath - Absolute path to project root
81
+ * @param run - Eval run record with timestamp, eval_name, score, run_count
82
+ *
83
+ * @example
84
+ * ```typescript
85
+ * import { recordEvalRun } from "./eval-history.js";
86
+ *
87
+ * recordEvalRun("/path/to/project", {
88
+ * timestamp: new Date().toISOString(),
89
+ * eval_name: "swarm-decomposition",
90
+ * score: 0.92,
91
+ * run_count: 15,
92
+ * });
93
+ * ```
94
+ */
95
+ export function recordEvalRun(
96
+ projectPath: string,
97
+ run: EvalRunRecord,
98
+ ): void {
99
+ ensureEvalHistoryDir(projectPath);
100
+ const historyPath = getEvalHistoryPath(projectPath);
101
+ const line = `${JSON.stringify(run)}\n`;
102
+ fs.appendFileSync(historyPath, line, "utf-8");
103
+ }
104
+
105
+ /**
106
+ * Read all eval run records from JSONL file
107
+ *
108
+ * Internal helper for parsing the history file
109
+ */
110
+ function readAllRecords(projectPath: string): EvalRunRecord[] {
111
+ const historyPath = getEvalHistoryPath(projectPath);
112
+
113
+ if (!fs.existsSync(historyPath)) {
114
+ return [];
115
+ }
116
+
117
+ const content = fs.readFileSync(historyPath, "utf-8");
118
+ const lines = content.trim().split("\n").filter(Boolean);
119
+
120
+ return lines.map((line) => JSON.parse(line) as EvalRunRecord);
121
+ }
122
+
123
+ /**
124
+ * Get score history for a specific eval
125
+ *
126
+ * Returns runs in chronological order (oldest first)
127
+ */
128
+ export function getScoreHistory(
129
+ projectPath: string,
130
+ evalName: string,
131
+ ): EvalRunRecord[] {
132
+ return readAllRecords(projectPath).filter(
133
+ (run) => run.eval_name === evalName,
134
+ );
135
+ }
136
+
137
+ /**
138
+ * Calculate statistical variance of scores
139
+ *
140
+ * Variance = mean of squared deviations from the mean
141
+ * Formula: Σ((x - μ)²) / n
142
+ */
143
+ export function calculateVariance(scores: number[]): number {
144
+ if (scores.length <= 1) {
145
+ return 0;
146
+ }
147
+
148
+ const mean = scores.reduce((sum, score) => sum + score, 0) / scores.length;
149
+
150
+ const variance = scores.reduce((sum, score) => {
151
+ const deviation = score - mean;
152
+ return sum + deviation * deviation;
153
+ }, 0) / scores.length;
154
+
155
+ return variance;
156
+ }
157
+
158
+ /**
159
+ * Get the current phase for an eval based on run count and score variance
160
+ *
161
+ * Progressive phase logic ensures quality gates adapt to data maturity:
162
+ *
163
+ * - **Bootstrap (<10 runs)**: No gates, just collect baseline data
164
+ * - **Stabilization (10-50 runs)**: Warn on >10% regression (but pass)
165
+ * - **Production (>50 runs AND variance <0.1)**: Fail on >5% regression
166
+ *
167
+ * **Variance check**: If >50 runs but variance ≥0.1, stays in stabilization.
168
+ * This prevents premature production gates when scores are still unstable.
169
+ *
170
+ * **Why variance matters**: An eval with wildly fluctuating scores isn't ready for
171
+ * strict gates. Variance threshold (0.1) ensures the eval is consistent before
172
+ * enforcing production-level quality control.
173
+ *
174
+ * @param projectPath - Absolute path to project root (contains `.opencode/eval-history.jsonl`)
175
+ * @param evalName - Name of the eval (e.g., "swarm-decomposition")
176
+ * @returns Current phase: "bootstrap" | "stabilization" | "production"
177
+ *
178
+ * @example
179
+ * ```typescript
180
+ * import { getPhase } from "./eval-history.js";
181
+ *
182
+ * const phase = getPhase("/path/to/project", "swarm-decomposition");
183
+ *
184
+ * if (phase === "production") {
185
+ * console.log("🚀 Production phase - strict gates enabled");
186
+ * } else if (phase === "stabilization") {
187
+ * console.log("⚙️ Stabilization phase - warnings only");
188
+ * } else {
189
+ * console.log("🌱 Bootstrap phase - collecting data");
190
+ * }
191
+ * ```
192
+ */
193
+ export function getPhase(projectPath: string, evalName: string): Phase {
194
+ const history = getScoreHistory(projectPath, evalName);
195
+
196
+ if (history.length < BOOTSTRAP_THRESHOLD) {
197
+ return "bootstrap";
198
+ }
199
+
200
+ if (history.length <= STABILIZATION_THRESHOLD) {
201
+ return "stabilization";
202
+ }
203
+
204
+ // >50 runs - check variance
205
+ const scores = history.map((run) => run.score);
206
+ const variance = calculateVariance(scores);
207
+
208
+ if (variance < VARIANCE_THRESHOLD) {
209
+ return "production";
210
+ }
211
+
212
+ // High variance - stay in stabilization
213
+ return "stabilization";
214
+ }
@@ -0,0 +1,378 @@
1
+ /**
2
+ * Tests for eval-learning.ts - Eval-to-Learning Feedback Loop
3
+ *
4
+ * TDD RED phase: Write failing tests first, then implement.
5
+ *
6
+ * Core behavior:
7
+ * - Detect significant eval score drops (>15% from rolling average)
8
+ * - Store failure context to semantic-memory with structured tags
9
+ * - Ignore minor fluctuations (<15% variance)
10
+ * - Configurable threshold for sensitivity tuning
11
+ */
12
+ import { describe, test, expect, beforeEach, mock } from "bun:test";
13
+ import {
14
+ learnFromEvalFailure,
15
+ type EvalLearningConfig,
16
+ calculateRollingAverage,
17
+ isSignificantDrop,
18
+ formatFailureContext,
19
+ createLearningConfig,
20
+ DEFAULT_EVAL_LEARNING_CONFIG,
21
+ } from "./eval-learning";
22
+ import type { EvalRunRecord } from "./eval-history";
23
+ import type { MemoryAdapter } from "./memory-tools";
24
+
25
+ // ============================================================================
26
+ // Mock Memory Adapter
27
+ // ============================================================================
28
+
29
+ /**
30
+ * Create a mock memory adapter for testing
31
+ *
32
+ * Tracks store() calls without hitting real storage
33
+ */
34
+ function createMockMemoryAdapter(): MemoryAdapter {
35
+ const storedMemories: Array<{
36
+ information: string;
37
+ tags?: string;
38
+ metadata?: string;
39
+ }> = [];
40
+
41
+ return {
42
+ store: mock(async (args) => {
43
+ storedMemories.push(args);
44
+ return {
45
+ id: `mem_${Date.now()}`,
46
+ message: "Stored successfully",
47
+ };
48
+ }),
49
+ find: mock(async () => ({ results: [], total: 0 })),
50
+ get: mock(async () => null),
51
+ remove: mock(async () => ({ success: true, message: "Removed" })),
52
+ validate: mock(async () => ({ success: true, message: "Validated" })),
53
+ list: mock(async () => []),
54
+ stats: mock(async () => ({
55
+ total_memories: 0,
56
+ total_embeddings: 0,
57
+ collections: {},
58
+ })),
59
+ checkHealth: mock(async () => ({ ready: true, message: "OK" })),
60
+ getStoredMemories: () => storedMemories,
61
+ } as any;
62
+ }
63
+
64
+ // ============================================================================
65
+ // Tests: Rolling Average Calculation
66
+ // ============================================================================
67
+
68
+ describe("calculateRollingAverage", () => {
69
+ test("returns 0 for empty history", () => {
70
+ const avg = calculateRollingAverage([]);
71
+ expect(avg).toBe(0);
72
+ });
73
+
74
+ test("returns single score for history of 1", () => {
75
+ const history: EvalRunRecord[] = [
76
+ {
77
+ eval_name: "test",
78
+ score: 0.85,
79
+ timestamp: "2024-12-01T00:00:00Z",
80
+ run_count: 1,
81
+ },
82
+ ];
83
+
84
+ const avg = calculateRollingAverage(history);
85
+ expect(avg).toBe(0.85);
86
+ });
87
+
88
+ test("calculates average of last N runs (default 5)", () => {
89
+ const history: EvalRunRecord[] = [
90
+ { eval_name: "test", score: 0.8, timestamp: "2024-12-01", run_count: 1 },
91
+ { eval_name: "test", score: 0.82, timestamp: "2024-12-02", run_count: 2 },
92
+ { eval_name: "test", score: 0.84, timestamp: "2024-12-03", run_count: 3 },
93
+ { eval_name: "test", score: 0.86, timestamp: "2024-12-04", run_count: 4 },
94
+ { eval_name: "test", score: 0.88, timestamp: "2024-12-05", run_count: 5 },
95
+ { eval_name: "test", score: 0.9, timestamp: "2024-12-06", run_count: 6 },
96
+ ];
97
+
98
+ const avg = calculateRollingAverage(history);
99
+ // Last 5: 0.82, 0.84, 0.86, 0.88, 0.9 => avg = 0.86
100
+ expect(avg).toBeCloseTo(0.86, 2);
101
+ });
102
+
103
+ test("uses custom window size", () => {
104
+ const history: EvalRunRecord[] = [
105
+ { eval_name: "test", score: 0.8, timestamp: "2024-12-01", run_count: 1 },
106
+ { eval_name: "test", score: 0.85, timestamp: "2024-12-02", run_count: 2 },
107
+ { eval_name: "test", score: 0.9, timestamp: "2024-12-03", run_count: 3 },
108
+ ];
109
+
110
+ const avg = calculateRollingAverage(history, 2);
111
+ // Last 2: 0.85, 0.9 => avg = 0.875
112
+ expect(avg).toBeCloseTo(0.875, 3);
113
+ });
114
+
115
+ test("handles window larger than history", () => {
116
+ const history: EvalRunRecord[] = [
117
+ { eval_name: "test", score: 0.8, timestamp: "2024-12-01", run_count: 1 },
118
+ { eval_name: "test", score: 0.9, timestamp: "2024-12-02", run_count: 2 },
119
+ ];
120
+
121
+ const avg = calculateRollingAverage(history, 10);
122
+ // Uses all available: (0.8 + 0.9) / 2 = 0.85
123
+ expect(avg).toBeCloseTo(0.85, 2);
124
+ });
125
+ });
126
+
127
+ // ============================================================================
128
+ // Tests: Significant Drop Detection
129
+ // ============================================================================
130
+
131
+ describe("isSignificantDrop", () => {
132
+ test("returns false when current equals baseline", () => {
133
+ expect(isSignificantDrop(0.85, 0.85)).toBe(false);
134
+ });
135
+
136
+ test("returns false when current is higher than baseline", () => {
137
+ expect(isSignificantDrop(0.9, 0.85)).toBe(false);
138
+ });
139
+
140
+ test("returns false for drop below threshold (default 15%)", () => {
141
+ // Drop of 10%: 0.85 -> 0.765 (90% of 0.85)
142
+ expect(isSignificantDrop(0.765, 0.85)).toBe(false);
143
+ });
144
+
145
+ test("returns true for drop at threshold (15%)", () => {
146
+ // Drop of exactly 15%: 0.85 -> 0.7225 (85% of 0.85)
147
+ // Use slightly lower to account for floating point precision
148
+ expect(isSignificantDrop(0.722, 0.85)).toBe(true);
149
+ });
150
+
151
+ test("returns true for drop above threshold (20%)", () => {
152
+ // Drop of 20%: 0.85 -> 0.68 (80% of 0.85)
153
+ expect(isSignificantDrop(0.68, 0.85)).toBe(true);
154
+ });
155
+
156
+ test("uses custom threshold", () => {
157
+ // Drop of 8%: 0.85 -> 0.782 (92% of 0.85)
158
+ // Default (15%) => false
159
+ expect(isSignificantDrop(0.782, 0.85)).toBe(false);
160
+
161
+ // Custom threshold (5%) => true
162
+ expect(isSignificantDrop(0.782, 0.85, 0.05)).toBe(true);
163
+ });
164
+
165
+ test("returns false when baseline is 0 (avoid division by zero)", () => {
166
+ expect(isSignificantDrop(0, 0)).toBe(false);
167
+ expect(isSignificantDrop(0.5, 0)).toBe(false);
168
+ });
169
+ });
170
+
171
+ // ============================================================================
172
+ // Tests: Failure Context Formatting
173
+ // ============================================================================
174
+
175
+ describe("formatFailureContext", () => {
176
+ test("includes eval name, scores, and drop percentage", () => {
177
+ const context = formatFailureContext("compaction-test", 0.68, 0.85);
178
+
179
+ expect(context).toContain("compaction-test");
180
+ expect(context).toContain("0.68");
181
+ expect(context).toContain("0.85");
182
+ expect(context).toContain("20.0%"); // (0.85 - 0.68) / 0.85 = 20%
183
+ });
184
+
185
+ test("includes optional scorer context", () => {
186
+ const scorerContext = "violationCount scorer failed: 5 violations detected";
187
+ const context = formatFailureContext(
188
+ "coordinator-behavior",
189
+ 0.5,
190
+ 0.8,
191
+ scorerContext,
192
+ );
193
+
194
+ expect(context).toContain("coordinator-behavior");
195
+ expect(context).toContain(scorerContext);
196
+ });
197
+
198
+ test("handles baseline of 0 gracefully", () => {
199
+ const context = formatFailureContext("test", 0.5, 0);
200
+ expect(context).not.toContain("NaN");
201
+ expect(context).not.toContain("Infinity");
202
+ });
203
+ });
204
+
205
+ // ============================================================================
206
+ // Tests: Main learnFromEvalFailure Function
207
+ // ============================================================================
208
+
209
+ describe("learnFromEvalFailure", () => {
210
+ let mockAdapter: MemoryAdapter;
211
+
212
+ beforeEach(() => {
213
+ mockAdapter = createMockMemoryAdapter();
214
+ });
215
+
216
+ test("stores memory when score drops significantly", async () => {
217
+ const history: EvalRunRecord[] = [
218
+ { eval_name: "test", score: 0.85, timestamp: "2024-12-01", run_count: 1 },
219
+ { eval_name: "test", score: 0.84, timestamp: "2024-12-02", run_count: 2 },
220
+ { eval_name: "test", score: 0.86, timestamp: "2024-12-03", run_count: 3 },
221
+ { eval_name: "test", score: 0.85, timestamp: "2024-12-04", run_count: 4 },
222
+ { eval_name: "test", score: 0.84, timestamp: "2024-12-05", run_count: 5 },
223
+ ];
224
+ const currentScore = 0.68; // Drop of ~20%
225
+
226
+ const result = await learnFromEvalFailure(
227
+ "test-eval",
228
+ currentScore,
229
+ history,
230
+ mockAdapter,
231
+ );
232
+
233
+ expect(result.triggered).toBe(true);
234
+ expect(result.baseline).toBeCloseTo(0.848, 2);
235
+ expect(result.drop_percentage).toBeCloseTo(0.198, 2); // ~20%
236
+
237
+ // Verify memory was stored
238
+ expect(mockAdapter.store).toHaveBeenCalledTimes(1);
239
+
240
+ const storedMemory = (mockAdapter as any).getStoredMemories()[0];
241
+ expect(storedMemory.information).toContain("test-eval");
242
+ expect(storedMemory.information).toContain("0.68");
243
+ expect(storedMemory.tags).toContain("eval-failure");
244
+ expect(storedMemory.tags).toContain("test-eval");
245
+ });
246
+
247
+ test("does not store memory for minor fluctuations", async () => {
248
+ const history: EvalRunRecord[] = [
249
+ { eval_name: "test", score: 0.85, timestamp: "2024-12-01", run_count: 1 },
250
+ { eval_name: "test", score: 0.84, timestamp: "2024-12-02", run_count: 2 },
251
+ ];
252
+ const currentScore = 0.8; // Drop of ~5%, below 15% threshold
253
+
254
+ const result = await learnFromEvalFailure(
255
+ "test-eval",
256
+ currentScore,
257
+ history,
258
+ mockAdapter,
259
+ );
260
+
261
+ expect(result.triggered).toBe(false);
262
+ expect(mockAdapter.store).not.toHaveBeenCalled();
263
+ });
264
+
265
+ test("includes scorer context in memory if provided", async () => {
266
+ const history: EvalRunRecord[] = [
267
+ { eval_name: "test", score: 0.9, timestamp: "2024-12-01", run_count: 1 },
268
+ ];
269
+ const currentScore = 0.7; // Drop of ~22%
270
+ const scorerContext = "violationCount: 8 protocol violations";
271
+
272
+ await learnFromEvalFailure(
273
+ "coordinator-behavior",
274
+ currentScore,
275
+ history,
276
+ mockAdapter,
277
+ { scorerContext },
278
+ );
279
+
280
+ const storedMemory = (mockAdapter as any).getStoredMemories()[0];
281
+ expect(storedMemory.information).toContain(scorerContext);
282
+ });
283
+
284
+ test("uses custom threshold when provided", async () => {
285
+ const history: EvalRunRecord[] = [
286
+ { eval_name: "test", score: 0.9, timestamp: "2024-12-01", run_count: 1 },
287
+ ];
288
+ const currentScore = 0.85; // Drop of ~5.5%
289
+
290
+ const customConfig: EvalLearningConfig = {
291
+ ...DEFAULT_EVAL_LEARNING_CONFIG,
292
+ dropThreshold: 0.05, // 5% threshold
293
+ };
294
+
295
+ const result = await learnFromEvalFailure(
296
+ "test-eval",
297
+ currentScore,
298
+ history,
299
+ mockAdapter,
300
+ { config: customConfig },
301
+ );
302
+
303
+ expect(result.triggered).toBe(true);
304
+ expect(mockAdapter.store).toHaveBeenCalledTimes(1);
305
+ });
306
+
307
+ test("handles empty history gracefully", async () => {
308
+ const result = await learnFromEvalFailure(
309
+ "test-eval",
310
+ 0.5,
311
+ [],
312
+ mockAdapter,
313
+ );
314
+
315
+ expect(result.triggered).toBe(false);
316
+ expect(result.baseline).toBe(0);
317
+ expect(mockAdapter.store).not.toHaveBeenCalled();
318
+ });
319
+
320
+ test("generates structured tags for semantic search", async () => {
321
+ const history: EvalRunRecord[] = [
322
+ { eval_name: "test", score: 0.9, timestamp: "2024-12-01", run_count: 1 },
323
+ ];
324
+ const currentScore = 0.7; // Significant drop
325
+
326
+ await learnFromEvalFailure(
327
+ "compaction-test",
328
+ currentScore,
329
+ history,
330
+ mockAdapter,
331
+ );
332
+
333
+ const storedMemory = (mockAdapter as any).getStoredMemories()[0];
334
+ const tags = storedMemory.tags;
335
+
336
+ expect(tags).toContain("eval-failure");
337
+ expect(tags).toContain("compaction-test");
338
+ expect(tags).toContain("regression");
339
+ });
340
+
341
+ test("stores metadata for future prompt generation", async () => {
342
+ const history: EvalRunRecord[] = [
343
+ { eval_name: "test", score: 0.9, timestamp: "2024-12-01", run_count: 1 },
344
+ ];
345
+ const currentScore = 0.7;
346
+
347
+ await learnFromEvalFailure("test-eval", currentScore, history, mockAdapter);
348
+
349
+ const storedMemory = (mockAdapter as any).getStoredMemories()[0];
350
+ expect(storedMemory.metadata).toBeDefined();
351
+
352
+ const metadata = JSON.parse(storedMemory.metadata!);
353
+ expect(metadata.eval_name).toBe("test-eval");
354
+ expect(metadata.baseline_score).toBeCloseTo(0.9, 2);
355
+ expect(metadata.current_score).toBe(0.7);
356
+ expect(metadata.drop_percentage).toBeCloseTo(0.222, 2);
357
+ });
358
+ });
359
+
360
+ // ============================================================================
361
+ // Tests: Convenience Helpers
362
+ // ============================================================================
363
+
364
+ describe("createLearningConfig", () => {
365
+ test("creates config with custom threshold", () => {
366
+ const config = createLearningConfig(0.1);
367
+
368
+ expect(config.dropThreshold).toBe(0.1);
369
+ expect(config.windowSize).toBe(DEFAULT_EVAL_LEARNING_CONFIG.windowSize);
370
+ });
371
+
372
+ test("accepts custom window size", () => {
373
+ const config = createLearningConfig(0.2, 10);
374
+
375
+ expect(config.dropThreshold).toBe(0.2);
376
+ expect(config.windowSize).toBe(10);
377
+ });
378
+ });