opencode-swarm-plugin 0.17.1 → 0.19.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -19,10 +19,15 @@ export const PatternKindSchema = z.enum(["pattern", "anti_pattern"]);
19
19
  export type PatternKind = z.infer<typeof PatternKindSchema>;
20
20
 
21
21
  /**
22
- * A decomposition pattern that has been observed
22
+ * Decomposition pattern with success/failure tracking.
23
23
  *
24
- * Patterns are extracted from successful/failed decompositions and
25
- * tracked over time to learn what works and what doesn't.
24
+ * Field relationships:
25
+ * - `kind`: Tracks pattern lifecycle ("pattern" "anti_pattern" when failure rate exceeds threshold)
26
+ * - `is_negative`: Derived boolean flag for quick filtering (true when kind === "anti_pattern")
27
+ *
28
+ * Both fields exist because:
29
+ * - `kind` is the source of truth for pattern status
30
+ * - `is_negative` enables efficient filtering without string comparison
26
31
  */
27
32
  export const DecompositionPatternSchema = z.object({
28
33
  /** Unique ID for this pattern */
@@ -69,6 +74,9 @@ export type PatternInversionResult = z.infer<
69
74
  // Configuration
70
75
  // ============================================================================
71
76
 
77
+ /** Maximum number of example beads to keep per pattern */
78
+ const MAX_EXAMPLE_BEADS = 10;
79
+
72
80
  /**
73
81
  * Configuration for anti-pattern detection
74
82
  */
@@ -186,7 +194,7 @@ export function recordPatternObservation(
186
194
  failure_count: success ? pattern.failure_count : pattern.failure_count + 1,
187
195
  updated_at: new Date().toISOString(),
188
196
  example_beads: beadId
189
- ? [...pattern.example_beads.slice(-9), beadId] // Keep last 10
197
+ ? [...pattern.example_beads.slice(-(MAX_EXAMPLE_BEADS - 1)), beadId]
190
198
  : pattern.example_beads,
191
199
  };
192
200
 
@@ -216,8 +224,16 @@ export function recordPatternObservation(
216
224
  export function extractPatternsFromDescription(description: string): string[] {
217
225
  const patterns: string[] = [];
218
226
 
219
- // Common decomposition strategies to detect
220
- const strategyPatterns = [
227
+ /**
228
+ * Regex patterns for detecting common decomposition strategies.
229
+ *
230
+ * Detection is keyword-based and not exhaustive - patterns can be
231
+ * manually created for novel strategies not covered here.
232
+ *
233
+ * Each pattern maps a regex to a strategy name that will be extracted
234
+ * from task descriptions during pattern observation.
235
+ */
236
+ const strategyPatterns: Array<{ regex: RegExp; pattern: string }> = [
221
237
  {
222
238
  regex: /split(?:ting)?\s+by\s+file\s+type/i,
223
239
  pattern: "Split by file type",
@@ -322,15 +338,17 @@ export function formatAntiPatternsForPrompt(
322
338
  }
323
339
 
324
340
  /**
325
- * Format successful patterns for inclusion in decomposition prompts
341
+ * Format successful patterns for inclusion in prompts.
326
342
  *
327
- * @param patterns - Patterns to format
328
- * @param minSuccessRate - Minimum success rate to include (0-1)
329
- * @returns Formatted string for prompt inclusion
343
+ * @param patterns - Array of decomposition patterns to filter and format
344
+ * @param minSuccessRate - Minimum success rate to include (default 0.7 = 70%).
345
+ * Chosen to filter out patterns with marginal track records - only patterns
346
+ * that succeed at least 70% of the time are recommended.
347
+ * @returns Formatted string of successful patterns for prompt injection
330
348
  */
331
349
  export function formatSuccessfulPatternsForPrompt(
332
350
  patterns: DecompositionPattern[],
333
- minSuccessRate: number = 0.7,
351
+ minSuccessRate = 0.7,
334
352
  ): string {
335
353
  const successful = patterns.filter((p) => {
336
354
  if (p.kind === "anti_pattern") return false;
package/src/learning.ts CHANGED
@@ -956,6 +956,112 @@ export class ErrorAccumulator {
956
956
  }
957
957
  }
958
958
 
959
+ // ============================================================================
960
+ // Semantic Memory Integration Helpers
961
+ // ============================================================================
962
+
963
+ /**
964
+ * Format memory store instruction for successful task completion
965
+ *
966
+ * @param beadId - Bead ID that completed
967
+ * @param summary - Completion summary
968
+ * @param filesTouched - Files modified
969
+ * @param strategy - Decomposition strategy used (if applicable)
970
+ * @returns Memory store instruction object
971
+ */
972
+ export function formatMemoryStoreOnSuccess(
973
+ beadId: string,
974
+ summary: string,
975
+ filesTouched: string[],
976
+ strategy?: DecompositionStrategy,
977
+ ): {
978
+ information: string;
979
+ metadata: string;
980
+ instruction: string;
981
+ } {
982
+ const strategyInfo = strategy ? ` using ${strategy} strategy` : "";
983
+
984
+ return {
985
+ information: `Task "${beadId}" completed successfully${strategyInfo}.
986
+ Key insight: ${summary}
987
+ Files touched: ${filesTouched.join(", ") || "none"}`,
988
+ metadata: `swarm, success, ${beadId}, ${strategy || "completion"}`,
989
+ instruction:
990
+ "Store this successful completion in semantic-memory for future reference",
991
+ };
992
+ }
993
+
994
+ /**
995
+ * Format memory store instruction for architectural problems (3-strike)
996
+ *
997
+ * @param beadId - Bead ID that struck out
998
+ * @param failures - Array of failure attempts
999
+ * @returns Memory store instruction object
1000
+ */
1001
+ export function formatMemoryStoreOn3Strike(
1002
+ beadId: string,
1003
+ failures: Array<{ attempt: string; reason: string }>,
1004
+ ): {
1005
+ information: string;
1006
+ metadata: string;
1007
+ instruction: string;
1008
+ } {
1009
+ const failuresList = failures
1010
+ .map((f, i) => `${i + 1}. ${f.attempt} - Failed: ${f.reason}`)
1011
+ .join("\n");
1012
+
1013
+ return {
1014
+ information: `Architecture problem detected in ${beadId}: Task failed after 3 attempts.
1015
+ Attempts:
1016
+ ${failuresList}
1017
+
1018
+ This indicates a structural issue requiring human decision, not another fix attempt.`,
1019
+ metadata: `architecture, 3-strike, ${beadId}, failure`,
1020
+ instruction:
1021
+ "Store this architectural problem in semantic-memory to avoid similar patterns in future",
1022
+ };
1023
+ }
1024
+
1025
+ /**
1026
+ * Format memory query instruction for task decomposition
1027
+ *
1028
+ * @param task - Task description
1029
+ * @param limit - Max results to return
1030
+ * @returns Memory query instruction object
1031
+ */
1032
+ export function formatMemoryQueryForDecomposition(
1033
+ task: string,
1034
+ limit: number = 3,
1035
+ ): {
1036
+ query: string;
1037
+ limit: number;
1038
+ instruction: string;
1039
+ } {
1040
+ return {
1041
+ query: task,
1042
+ limit,
1043
+ instruction:
1044
+ "Query semantic-memory for relevant past learnings about similar tasks before decomposition",
1045
+ };
1046
+ }
1047
+
1048
+ /**
1049
+ * Format memory validation hint when CASS history helped
1050
+ *
1051
+ * @param beadId - Bead ID that benefited from CASS
1052
+ * @returns Memory validation hint
1053
+ */
1054
+ export function formatMemoryValidationHint(beadId: string): {
1055
+ instruction: string;
1056
+ context: string;
1057
+ } {
1058
+ return {
1059
+ instruction:
1060
+ "If any semantic-memory entries helped with this task, validate them to reset decay timer",
1061
+ context: `Task ${beadId} completed successfully with assistance from past learnings`,
1062
+ };
1063
+ }
1064
+
959
1065
  // ============================================================================
960
1066
  // Exports
961
1067
  // ============================================================================
@@ -12,6 +12,16 @@
12
12
  import { z } from "zod";
13
13
  import { calculateDecayedValue } from "./learning";
14
14
 
15
+ // ============================================================================
16
+ // Constants
17
+ // ============================================================================
18
+
19
+ /**
20
+ * Tolerance for floating-point comparisons.
21
+ * Used when comparing success rates to avoid floating-point precision issues.
22
+ */
23
+ const FLOAT_EPSILON = 0.01;
24
+
15
25
  // ============================================================================
16
26
  // Schemas
17
27
  // ============================================================================
@@ -164,26 +174,26 @@ export function calculateMaturityState(
164
174
  );
165
175
 
166
176
  const total = decayedHelpful + decayedHarmful;
167
- const epsilon = 0.01; // Float comparison tolerance
168
- const safeTotal = total > epsilon ? total : 0;
177
+ // Use FLOAT_EPSILON constant (defined at module level)
178
+ const safeTotal = total > FLOAT_EPSILON ? total : 0;
169
179
  const harmfulRatio = safeTotal > 0 ? decayedHarmful / safeTotal : 0;
170
180
 
171
181
  // Deprecated: high harmful ratio with enough feedback
172
182
  if (
173
183
  harmfulRatio > config.deprecationThreshold &&
174
- safeTotal >= config.minFeedback - epsilon
184
+ safeTotal >= config.minFeedback - FLOAT_EPSILON
175
185
  ) {
176
186
  return "deprecated";
177
187
  }
178
188
 
179
189
  // Candidate: not enough feedback yet
180
- if (safeTotal < config.minFeedback - epsilon) {
190
+ if (safeTotal < config.minFeedback - FLOAT_EPSILON) {
181
191
  return "candidate";
182
192
  }
183
193
 
184
194
  // Proven: strong positive signal
185
195
  if (
186
- decayedHelpful >= config.minHelpful - epsilon &&
196
+ decayedHelpful >= config.minHelpful - FLOAT_EPSILON &&
187
197
  harmfulRatio < config.maxHarmful
188
198
  ) {
189
199
  return "proven";
@@ -210,14 +220,23 @@ export function createPatternMaturity(patternId: string): PatternMaturity {
210
220
  }
211
221
 
212
222
  /**
213
- * Update pattern maturity with new feedback
223
+ * Update pattern maturity with new feedback.
224
+ *
225
+ * Side Effects:
226
+ * - Sets `promoted_at` timestamp on first entry into 'proven' status
227
+ * - Sets `deprecated_at` timestamp on first entry into 'deprecated' status
228
+ * - Updates `helpful_count` and `harmful_count` based on feedback events
229
+ * - Recalculates `state` based on decayed feedback counts
214
230
  *
215
- * Records feedback, updates counts, and recalculates state.
231
+ * State Transitions:
232
+ * - candidate → established: After minFeedback observations (default 3)
233
+ * - established → proven: When decayedHelpful >= minHelpful (5) AND harmfulRatio < maxHarmful (15%)
234
+ * - any → deprecated: When harmfulRatio > deprecationThreshold (30%) AND total >= minFeedback
216
235
  *
217
236
  * @param maturity - Current maturity record
218
237
  * @param feedbackEvents - All feedback events for this pattern
219
238
  * @param config - Maturity configuration
220
- * @returns Updated maturity record
239
+ * @returns Updated maturity record with new state
221
240
  */
222
241
  export function updatePatternMaturity(
223
242
  maturity: PatternMaturity,
@@ -269,7 +288,16 @@ export function promotePattern(maturity: PatternMaturity): PatternMaturity {
269
288
  }
270
289
 
271
290
  if (maturity.state === "proven") {
272
- return maturity; // Already proven
291
+ console.warn(
292
+ `[PatternMaturity] Pattern already proven: ${maturity.pattern_id}`,
293
+ );
294
+ return maturity; // No-op but warn
295
+ }
296
+
297
+ if (maturity.state === "candidate" && maturity.helpful_count < 3) {
298
+ console.warn(
299
+ `[PatternMaturity] Promoting candidate with insufficient data: ${maturity.pattern_id} (${maturity.helpful_count} helpful observations)`,
300
+ );
273
301
  }
274
302
 
275
303
  const now = new Date().toISOString();
@@ -309,12 +337,16 @@ export function deprecatePattern(
309
337
  }
310
338
 
311
339
  /**
312
- * Get maturity score multiplier for pattern ranking
340
+ * Get weight multiplier based on pattern maturity status.
313
341
  *
314
- * Higher maturity patterns should be weighted more heavily.
342
+ * Multipliers chosen to:
343
+ * - Heavily penalize deprecated patterns (0x) - never recommend
344
+ * - Slightly boost proven patterns (1.5x) - reward validated success
345
+ * - Penalize unvalidated candidates (0.5x) - reduce impact until proven
346
+ * - Neutral for established (1.0x) - baseline weight
315
347
  *
316
- * @param state - Maturity state
317
- * @returns Score multiplier (0-1.5)
348
+ * @param state - Pattern maturity status
349
+ * @returns Multiplier to apply to pattern weight
318
350
  */
319
351
  export function getMaturityMultiplier(state: MaturityState): number {
320
352
  const multipliers: Record<MaturityState, number> = {
@@ -336,6 +368,12 @@ export function getMaturityMultiplier(state: MaturityState): number {
336
368
  */
337
369
  export function formatMaturityForPrompt(maturity: PatternMaturity): string {
338
370
  const total = maturity.helpful_count + maturity.harmful_count;
371
+
372
+ // Don't show percentages for insufficient data
373
+ if (total < 3) {
374
+ return `[LIMITED DATA - ${total} observation${total !== 1 ? "s" : ""}]`;
375
+ }
376
+
339
377
  const harmfulRatio =
340
378
  total > 0 ? Math.round((maturity.harmful_count / total) * 100) : 0;
341
379
  const helpfulRatio =
package/src/plugin.ts CHANGED
@@ -1,9 +1,21 @@
1
1
  /**
2
2
  * OpenCode Plugin Entry Point
3
3
  *
4
- * This file ONLY exports the plugin function.
5
- * The plugin loader iterates over all exports and calls them as functions,
6
- * so we cannot export anything else here (classes, constants, types, etc.)
4
+ * CRITICAL: Only export the plugin function from this file.
5
+ *
6
+ * OpenCode's plugin loader calls ALL exports as functions during initialization.
7
+ * Exporting classes, constants, or non-function values will cause the plugin
8
+ * to fail to load with cryptic errors.
9
+ *
10
+ * If you need to export utilities for external use, add them to src/index.ts instead.
11
+ *
12
+ * @example
13
+ * // ✅ CORRECT - only export the plugin function
14
+ * export default SwarmPlugin;
15
+ *
16
+ * // ❌ WRONG - will break plugin loading
17
+ * export const VERSION = "1.0.0";
18
+ * export class Helper {}
7
19
  */
8
20
  import { SwarmPlugin } from "./index";
9
21
 
@@ -35,12 +35,15 @@ import { homedir } from "node:os";
35
35
  // SQLite is optional - only available in Bun runtime
36
36
  // We use dynamic import to avoid breaking Node.js environments
37
37
  interface BunDatabase {
38
- run(sql: string, params?: unknown[]): void;
38
+ run(
39
+ sql: string,
40
+ params?: unknown[],
41
+ ): { changes: number; lastInsertRowid: number };
39
42
  query<T>(sql: string): {
40
43
  get(...params: unknown[]): T | null;
41
44
  };
42
45
  prepare(sql: string): {
43
- run(...params: unknown[]): void;
46
+ run(...params: unknown[]): { changes: number; lastInsertRowid: number };
44
47
  };
45
48
  close(): void;
46
49
  }
@@ -453,6 +456,47 @@ export class SqliteRateLimiter implements RateLimiter {
453
456
  return { allowed, remaining, resetAt };
454
457
  }
455
458
 
459
+ /**
460
+ * Clean up old rate limit entries in bounded batches
461
+ *
462
+ * Limits cleanup to prevent blocking recordRequest on large datasets:
463
+ * - BATCH_SIZE: 1000 rows per iteration
464
+ * - MAX_BATCHES: 10 (max 10k rows per cleanup invocation)
465
+ *
466
+ * Stops early if fewer than BATCH_SIZE rows deleted (no more to clean).
467
+ */
468
+ private cleanup(): void {
469
+ const BATCH_SIZE = 1000;
470
+ const MAX_BATCHES = 10;
471
+ const cutoff = Date.now() - 7_200_000; // 2 hours
472
+
473
+ let totalDeleted = 0;
474
+
475
+ // Run bounded batches
476
+ for (let i = 0; i < MAX_BATCHES; i++) {
477
+ const result = this.db.run(
478
+ `DELETE FROM rate_limits
479
+ WHERE rowid IN (
480
+ SELECT rowid FROM rate_limits
481
+ WHERE timestamp < ?
482
+ LIMIT ?
483
+ )`,
484
+ [cutoff, BATCH_SIZE],
485
+ );
486
+
487
+ totalDeleted += result.changes;
488
+
489
+ // Stop if we deleted less than batch size (no more to delete)
490
+ if (result.changes < BATCH_SIZE) break;
491
+ }
492
+
493
+ if (totalDeleted > 0) {
494
+ console.log("[RateLimiter] Cleanup completed:", {
495
+ deletedRows: totalDeleted,
496
+ });
497
+ }
498
+ }
499
+
456
500
  async recordRequest(agentName: string, endpoint: string): Promise<void> {
457
501
  const now = Date.now();
458
502
 
@@ -465,9 +509,9 @@ export class SqliteRateLimiter implements RateLimiter {
465
509
  stmt.run(agentName, endpoint, "hour", now);
466
510
 
467
511
  // Opportunistic cleanup of old entries (1% chance to avoid overhead)
512
+ // Now bounded to prevent blocking on large datasets
468
513
  if (Math.random() < 0.01) {
469
- const cutoff = Date.now() - 7_200_000;
470
- this.db.run(`DELETE FROM rate_limits WHERE timestamp < ?`, [cutoff]);
514
+ this.cleanup();
471
515
  }
472
516
  }
473
517
 
@@ -42,19 +42,42 @@ export type BeadDependency = z.infer<typeof BeadDependencySchema>;
42
42
  * - Custom subtask: `{project}-{custom-id}.{suffix}` (e.g., `migrate-egghead-phase-0.e2e-test`)
43
43
  */
44
44
  export const BeadSchema = z.object({
45
+ /**
46
+ * Bead ID format: project-slug-hash with optional subtask index.
47
+ *
48
+ * Pattern: `project-name-xxxxx` or `project-name-xxxxx.N`
49
+ * Examples:
50
+ * - `my-project-abc12` (main bead)
51
+ * - `my-project-abc12.1` (first subtask)
52
+ * - `my-project-abc12.2` (second subtask)
53
+ */
45
54
  id: z
46
55
  .string()
47
- .regex(/^[a-z0-9]+(-[a-z0-9]+)+(\.[\w-]+)?$/, "Invalid bead ID format"),
56
+ .regex(
57
+ /^[a-z0-9]+(-[a-z0-9]+)+(\.[\w-]+)?$/,
58
+ "Invalid bead ID format (expected: project-slug-hash or project-slug-hash.N)",
59
+ ),
48
60
  title: z.string().min(1, "Title required"),
49
61
  description: z.string().optional().default(""),
50
62
  status: BeadStatusSchema.default("open"),
51
63
  priority: z.number().int().min(0).max(3).default(2),
52
64
  issue_type: BeadTypeSchema.default("task"),
53
- created_at: z.string().datetime({ offset: true }), // ISO-8601 with timezone offset
54
- updated_at: z.string().datetime({ offset: true }).optional(),
65
+ created_at: z.string().datetime({
66
+ offset: true,
67
+ message:
68
+ "Must be ISO-8601 datetime with timezone (e.g., 2024-01-15T10:30:00Z)",
69
+ }),
70
+ updated_at: z
71
+ .string()
72
+ .datetime({
73
+ offset: true,
74
+ message:
75
+ "Must be ISO-8601 datetime with timezone (e.g., 2024-01-15T10:30:00Z)",
76
+ })
77
+ .optional(),
55
78
  closed_at: z.string().datetime({ offset: true }).optional(),
56
79
  parent_id: z.string().optional(),
57
- dependencies: z.array(BeadDependencySchema).optional().default([]),
80
+ dependencies: z.array(BeadDependencySchema).default([]),
58
81
  metadata: z.record(z.string(), z.unknown()).optional(),
59
82
  });
60
83
  export type Bead = z.infer<typeof BeadSchema>;
@@ -111,6 +134,14 @@ export const SubtaskSpecSchema = z.object({
111
134
  description: z.string().optional().default(""),
112
135
  files: z.array(z.string()).default([]),
113
136
  dependencies: z.array(z.number().int().min(0)).default([]), // Indices of other subtasks
137
+ /**
138
+ * Complexity estimate on 1-5 scale:
139
+ * 1 = trivial (typo fix, simple rename)
140
+ * 2 = simple (single function change)
141
+ * 3 = moderate (multi-file, some coordination)
142
+ * 4 = complex (significant refactoring)
143
+ * 5 = very complex (architectural change)
144
+ */
114
145
  estimated_complexity: z.number().int().min(1).max(5).default(3),
115
146
  });
116
147
  export type SubtaskSpec = z.infer<typeof SubtaskSpecSchema>;
@@ -12,9 +12,15 @@
12
12
  import { z } from "zod";
13
13
 
14
14
  /**
15
- * Single criterion evaluation
15
+ * Evaluation of a single criterion.
16
16
  *
17
- * Each criterion (type_safe, no_bugs, etc.) gets its own evaluation.
17
+ * @example
18
+ * // Passing criterion
19
+ * { passed: true, feedback: "All types validated", score: 0.95 }
20
+ *
21
+ * @example
22
+ * // Failing criterion
23
+ * { passed: false, feedback: "Missing error handling in auth flow", score: 0.3 }
18
24
  */
19
25
  export const CriterionEvaluationSchema = z.object({
20
26
  passed: z.boolean(),
@@ -31,7 +37,11 @@ export type CriterionEvaluation = z.infer<typeof CriterionEvaluationSchema>;
31
37
  */
32
38
  export const WeightedCriterionEvaluationSchema =
33
39
  CriterionEvaluationSchema.extend({
34
- /** Current weight after decay (0-1, lower = less reliable) */
40
+ /**
41
+ * Current weight after 90-day half-life decay.
42
+ * Range: 0-1 where 1 = recent/validated, 0 = old/unreliable.
43
+ * Weights decay over time unless revalidated via semantic-memory_validate.
44
+ */
35
45
  weight: z.number().min(0).max(1).default(1),
36
46
  /** Weighted score = score * weight */
37
47
  weighted_score: z.number().min(0).max(1).optional(),
@@ -75,9 +85,11 @@ export type DefaultCriterion = (typeof DEFAULT_CRITERIA)[number];
75
85
  * Evaluation request arguments
76
86
  */
77
87
  export const EvaluationRequestSchema = z.object({
78
- subtask_id: z.string(),
79
- criteria: z.array(z.string()).default([...DEFAULT_CRITERIA]),
80
- context: z.string().optional(),
88
+ bead_id: z.string(),
89
+ subtask_title: z.string(),
90
+ files_touched: z.array(z.string()),
91
+ /** ISO-8601 timestamp when evaluation was requested */
92
+ requested_at: z.string().datetime().optional(),
81
93
  });
82
94
  export type EvaluationRequest = z.infer<typeof EvaluationRequestSchema>;
83
95
 
@@ -1,7 +1,30 @@
1
1
  /**
2
- * Schema exports
2
+ * Schema Definitions - Central export point for all Zod schemas
3
3
  *
4
- * Re-export all schemas for convenient importing.
4
+ * This module re-exports all schema definitions used throughout the plugin.
5
+ * Schemas are organized by domain:
6
+ *
7
+ * ## Bead Schemas (Issue Tracking)
8
+ * - `BeadSchema` - Core bead/issue definition
9
+ * - `BeadStatusSchema` - Status enum (open, in_progress, blocked, closed)
10
+ * - `BeadTypeSchema` - Type enum (bug, feature, task, epic, chore)
11
+ * - `SubtaskSpecSchema` - Subtask specification for epic creation
12
+ *
13
+ * ## Task Schemas (Swarm Decomposition)
14
+ * - `TaskDecompositionSchema` - Full task breakdown
15
+ * - `DecomposedSubtaskSchema` - Individual subtask definition
16
+ * - `BeadTreeSchema` - Epic + subtasks structure
17
+ *
18
+ * ## Evaluation Schemas (Agent Self-Assessment)
19
+ * - `EvaluationSchema` - Complete evaluation with criteria
20
+ * - `CriterionEvaluationSchema` - Single criterion result
21
+ *
22
+ * ## Progress Schemas (Swarm Coordination)
23
+ * - `SwarmStatusSchema` - Overall swarm progress
24
+ * - `AgentProgressSchema` - Individual agent status
25
+ * - `SpawnedAgentSchema` - Spawned agent metadata
26
+ *
27
+ * @module schemas
5
28
  */
6
29
 
7
30
  // Bead schemas
@@ -7,13 +7,19 @@
7
7
  import { z } from "zod";
8
8
 
9
9
  /**
10
- * Effort estimation levels
10
+ * Effort estimation for subtasks.
11
+ *
12
+ * Time ranges:
13
+ * - `trivial`: < 5 minutes (simple rename, typo fix)
14
+ * - `small`: 5-30 minutes (single function, simple feature)
15
+ * - `medium`: 30 min - 2 hours (multi-file change, moderate complexity)
16
+ * - `large`: 2+ hours (significant feature, refactoring)
11
17
  */
12
18
  export const EffortLevelSchema = z.enum([
13
- "trivial", // < 5 min
14
- "small", // 5-30 min
15
- "medium", // 30 min - 2 hours
16
- "large", // 2+ hours
19
+ "trivial",
20
+ "small",
21
+ "medium",
22
+ "large",
17
23
  ]);
18
24
  export type EffortLevel = z.infer<typeof EffortLevelSchema>;
19
25
 
@@ -35,6 +41,7 @@ export const DecomposedSubtaskSchema = z.object({
35
41
  description: z.string(),
36
42
  files: z.array(z.string()), // File paths this subtask will modify
37
43
  estimated_effort: EffortLevelSchema,
44
+ /** Potential risks or complications (e.g., 'tight coupling', 'data migration required', 'breaking change') */
38
45
  risks: z.array(z.string()).optional().default([]),
39
46
  });
40
47
  export type DecomposedSubtask = z.infer<typeof DecomposedSubtaskSchema>;
@@ -43,8 +50,10 @@ export type DecomposedSubtask = z.infer<typeof DecomposedSubtaskSchema>;
43
50
  * Dependency between subtasks
44
51
  */
45
52
  export const SubtaskDependencySchema = z.object({
46
- from: z.number().int().min(0), // Subtask index
47
- to: z.number().int().min(0), // Subtask index
53
+ /** Zero-based index of the dependency source subtask */
54
+ from: z.number().int().min(0),
55
+ /** Zero-based index of the dependency target subtask */
56
+ to: z.number().int().min(0),
48
57
  type: DependencyTypeSchema,
49
58
  });
50
59
  export type SubtaskDependency = z.infer<typeof SubtaskDependencySchema>;
@@ -56,10 +65,15 @@ export type SubtaskDependency = z.infer<typeof SubtaskDependencySchema>;
56
65
  */
57
66
  export const TaskDecompositionSchema = z.object({
58
67
  task: z.string(), // Original task description
59
- reasoning: z.string().optional(), // Why this decomposition
68
+ /** Rationale for this decomposition strategy (why these subtasks, why this order) */
69
+ reasoning: z.string().optional(),
60
70
  subtasks: z.array(DecomposedSubtaskSchema).min(1),
61
71
  dependencies: z.array(SubtaskDependencySchema).optional().default([]),
62
- shared_context: z.string().optional(), // Context to pass to all agents
72
+ /**
73
+ * Context shared with all spawned agents.
74
+ * Examples: API contracts, shared types, project conventions, architectural decisions.
75
+ */
76
+ shared_context: z.string().optional(),
63
77
  });
64
78
  export type TaskDecomposition = z.infer<typeof TaskDecompositionSchema>;
65
79
 
@@ -78,11 +92,19 @@ export type DecomposeArgs = z.infer<typeof DecomposeArgsSchema>;
78
92
  */
79
93
  export const SpawnedAgentSchema = z.object({
80
94
  bead_id: z.string(),
81
- agent_name: z.string(), // Agent Mail name (e.g., "BlueLake")
95
+ /**
96
+ * Agent Mail assigned name (e.g., 'BlueLake', 'CrimsonRiver').
97
+ * Generated by Agent Mail on session init.
98
+ */
99
+ agent_name: z.string(),
82
100
  task_id: z.string().optional(), // OpenCode task ID
83
101
  status: z.enum(["pending", "running", "completed", "failed"]),
84
102
  files: z.array(z.string()), // Reserved files
85
- reservation_ids: z.array(z.number()).optional(), // Agent Mail reservation IDs
103
+ /**
104
+ * Agent Mail reservation IDs for file locking.
105
+ * Used to release locks on task completion via agentmail_release.
106
+ */
107
+ reservation_ids: z.array(z.number()).optional(),
86
108
  });
87
109
  export type SpawnedAgent = z.infer<typeof SpawnedAgentSchema>;
88
110
 
@@ -101,16 +123,22 @@ export type SwarmSpawnResult = z.infer<typeof SwarmSpawnResultSchema>;
101
123
  /**
102
124
  * Progress update from an agent
103
125
  */
104
- export const AgentProgressSchema = z.object({
105
- bead_id: z.string(),
106
- agent_name: z.string(),
107
- status: z.enum(["in_progress", "blocked", "completed", "failed"]),
108
- progress_percent: z.number().min(0).max(100).optional(),
109
- message: z.string().optional(),
110
- files_touched: z.array(z.string()).optional(),
111
- blockers: z.array(z.string()).optional(),
112
- timestamp: z.string().datetime({ offset: true }), // ISO-8601 with timezone
113
- });
126
+ export const AgentProgressSchema = z
127
+ .object({
128
+ bead_id: z.string(),
129
+ agent_name: z.string(),
130
+ status: z.enum(["in_progress", "blocked", "completed", "failed"]),
131
+ progress_percent: z.number().min(0).max(100).optional(),
132
+ message: z.string().optional(),
133
+ files_touched: z.array(z.string()).optional(),
134
+ blockers: z.array(z.string()).optional(),
135
+ timestamp: z.string().datetime({ offset: true }), // ISO-8601 with timezone
136
+ })
137
+ .refine(
138
+ (data) =>
139
+ data.status !== "blocked" || (data.blockers && data.blockers.length > 0),
140
+ { message: "blockers array required when status is 'blocked'" },
141
+ );
114
142
  export type AgentProgress = z.infer<typeof AgentProgressSchema>;
115
143
 
116
144
  /**