opencodekit 0.16.4 → 0.16.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. package/dist/index.js +1 -1
  2. package/dist/template/.opencode/AGENTS.md +106 -384
  3. package/dist/template/.opencode/README.md +170 -104
  4. package/dist/template/.opencode/agent/build.md +39 -32
  5. package/dist/template/.opencode/agent/explore.md +2 -0
  6. package/dist/template/.opencode/agent/review.md +3 -0
  7. package/dist/template/.opencode/agent/scout.md +22 -11
  8. package/dist/template/.opencode/command/create.md +164 -106
  9. package/dist/template/.opencode/command/design.md +5 -1
  10. package/dist/template/.opencode/command/handoff.md +6 -4
  11. package/dist/template/.opencode/command/init.md +1 -1
  12. package/dist/template/.opencode/command/plan.md +26 -23
  13. package/dist/template/.opencode/command/research.md +13 -6
  14. package/dist/template/.opencode/command/resume.md +8 -6
  15. package/dist/template/.opencode/command/ship.md +1 -1
  16. package/dist/template/.opencode/command/start.md +30 -25
  17. package/dist/template/.opencode/command/status.md +9 -42
  18. package/dist/template/.opencode/command/verify.md +11 -11
  19. package/dist/template/.opencode/memory/README.md +67 -37
  20. package/dist/template/.opencode/memory/_templates/prd.md +102 -18
  21. package/dist/template/.opencode/memory/project/gotchas.md +31 -0
  22. package/dist/template/.opencode/memory.db +0 -0
  23. package/dist/template/.opencode/memory.db-shm +0 -0
  24. package/dist/template/.opencode/memory.db-wal +0 -0
  25. package/dist/template/.opencode/opencode.json +0 -10
  26. package/dist/template/.opencode/package.json +1 -1
  27. package/dist/template/.opencode/skill/beads/SKILL.md +164 -380
  28. package/dist/template/.opencode/skill/beads/references/BOUNDARIES.md +23 -22
  29. package/dist/template/.opencode/skill/beads/references/DEPENDENCIES.md +23 -29
  30. package/dist/template/.opencode/skill/beads/references/RESUMABILITY.md +5 -8
  31. package/dist/template/.opencode/skill/beads/references/WORKFLOWS.md +43 -39
  32. package/dist/template/.opencode/skill/beads-bridge/SKILL.md +80 -53
  33. package/dist/template/.opencode/skill/brainstorming/SKILL.md +19 -5
  34. package/dist/template/.opencode/skill/context-engineering/SKILL.md +30 -63
  35. package/dist/template/.opencode/skill/context-management/SKILL.md +115 -0
  36. package/dist/template/.opencode/skill/deep-research/SKILL.md +4 -4
  37. package/dist/template/.opencode/skill/development-lifecycle/SKILL.md +305 -0
  38. package/dist/template/.opencode/skill/memory-system/SKILL.md +3 -3
  39. package/dist/template/.opencode/skill/prd/SKILL.md +47 -122
  40. package/dist/template/.opencode/skill/prd-task/SKILL.md +48 -4
  41. package/dist/template/.opencode/skill/prd-task/references/prd-schema.json +120 -24
  42. package/dist/template/.opencode/skill/swarm-coordination/SKILL.md +79 -61
  43. package/dist/template/.opencode/skill/tool-priority/SKILL.md +31 -22
  44. package/dist/template/.opencode/tool/context7.ts +183 -0
  45. package/dist/template/.opencode/tool/memory-admin.ts +445 -0
  46. package/dist/template/.opencode/tool/swarm.ts +572 -0
  47. package/package.json +1 -1
  48. package/dist/template/.opencode/memory/_templates/spec.md +0 -66
  49. package/dist/template/.opencode/tool/beads-sync.ts +0 -657
  50. package/dist/template/.opencode/tool/context7-query-docs.ts +0 -89
  51. package/dist/template/.opencode/tool/context7-resolve-library-id.ts +0 -113
  52. package/dist/template/.opencode/tool/memory-maintain.ts +0 -167
  53. package/dist/template/.opencode/tool/memory-migrate.ts +0 -319
  54. package/dist/template/.opencode/tool/swarm-delegate.ts +0 -180
  55. package/dist/template/.opencode/tool/swarm-monitor.ts +0 -388
  56. package/dist/template/.opencode/tool/swarm-plan.ts +0 -697
@@ -1,697 +0,0 @@
1
- import { tool } from "@opencode-ai/plugin";
2
-
3
- interface Phase {
4
- name: string;
5
- role: "explore" | "general" | "review" | "plan";
6
- agent_count: number;
7
- dependencies: string[];
8
- }
9
-
10
- interface TaskNode {
11
- id: string;
12
- content: string;
13
- phase: string;
14
- worker: string;
15
- status: "pending" | "in_progress" | "completed";
16
- priority: "high" | "medium" | "low";
17
- blockedBy: string[];
18
- blocks: string[];
19
- assignedFiles: string[];
20
- }
21
-
22
- interface DependencyGraph {
23
- nodes: TaskNode[];
24
- edges: Array<{ from: string; to: string; type: "blocks" | "blocked_by" }>;
25
- critical_path: string[];
26
- parallelizable_groups: string[][];
27
- }
28
-
29
- interface TaskClassification {
30
- type: "search" | "batch" | "writing" | "sequential" | "mixed";
31
- coupling: "high" | "medium" | "low";
32
- recommended_agents: number;
33
- phases: Phase[];
34
- reasoning: string;
35
- confidence: "high" | "medium" | "low";
36
- }
37
-
38
- interface SerialCollapseCheck {
39
- is_serial_collapse: boolean;
40
- warning_signs: string[];
41
- severity: "high" | "medium" | "none";
42
- recommendation: string;
43
- }
44
-
45
- interface OrchestrationPlan {
46
- task: string;
47
- file_count: number;
48
- classification: TaskClassification;
49
- serial_collapse_check: SerialCollapseCheck;
50
- allocation: {
51
- initial_agents: number;
52
- max_agents: number;
53
- strategy: string;
54
- };
55
- dependency_graph: DependencyGraph;
56
- recommendation: string;
57
- }
58
-
59
- export default tool({
60
- description: `Swarm planning and orchestration with Kimi K2.5 PARL patterns.
61
-
62
- Combines task classification, anti-serial-collapse detection, and dynamic agent allocation.
63
-
64
- Operations:
65
- - analyze: Full task analysis with classification and recommendations
66
- - classify: Quick task classification only
67
- - check: Check for serial collapse in current plan
68
- - allocate: Get dynamic agent allocation strategy
69
-
70
- Usage (pass the user's actual request):
71
- swarm-plan({ operation: "analyze", task: "<user's request>", files: "<detected files>" })
72
- swarm-plan({ operation: "check", task: "<user's request>", files: "<file count>", recommended_agents: <current> })
73
-
74
- Worker skills (load these in worker prompt):
75
- skill({ name: "verification-before-completion" }) // Required: verify before claiming done
76
- skill({ name: "test-driven-development" }) // Recommended: test first, implement, verify
77
- skill({ name: "tool-priority" }) // Recommended: LSP before edits
78
-
79
- Worker progress reporting (simplified flow - Task tool returns results directly):
80
- swarm-monitor({ operation: "progress_update", team_name: "<team>", worker_id: "<id>", phase: "<phase>", progress: 0-100, status: "working|completed|error", file: "<current-file>" })`,
81
-
82
- args: {
83
- operation: tool.schema
84
- .enum(["analyze", "classify", "check", "allocate"])
85
- .describe("Planning operation to perform"),
86
- task: tool.schema.string().describe("Task description"),
87
- files: tool.schema
88
- .string()
89
- .optional()
90
- .describe("Comma-separated file paths or file count"),
91
- recommended_agents: tool.schema
92
- .number()
93
- .optional()
94
- .describe("Current agent recommendation (for check operation)"),
95
- current_phases: tool.schema
96
- .string()
97
- .optional()
98
- .describe("Current phases JSON (for check operation)"),
99
- context: tool.schema
100
- .string()
101
- .optional()
102
- .describe("Additional context about the task"),
103
- },
104
-
105
- execute: async (args) => {
106
- switch (args.operation) {
107
- case "analyze":
108
- return await analyzeOperation(args.task, args.files);
109
- case "classify":
110
- return await classifyOperation(args.task, args.files);
111
- case "check":
112
- return await checkOperation(
113
- args.task,
114
- args.files,
115
- args.recommended_agents,
116
- args.current_phases,
117
- );
118
- case "allocate":
119
- return await allocateOperation(args.task, args.files);
120
- default:
121
- return `Error: Unknown operation: ${args.operation}`;
122
- }
123
- },
124
- });
125
-
126
- async function analyzeOperation(task: string, files?: string): Promise<string> {
127
- const fileList = files?.split(",").filter(Boolean) || [];
128
- const fileCount = Number.parseInt(files || "0") || fileList.length;
129
-
130
- // Get classification
131
- const classification = classifyTask(task, fileList);
132
-
133
- // Check for serial collapse
134
- const collapseCheck = detectSerialCollapse(
135
- task,
136
- fileCount,
137
- classification.recommended_agents,
138
- );
139
-
140
- // Get allocation strategy
141
- const allocation = calculateAllocation(fileCount);
142
-
143
- // Build dependency graph
144
- const dependencyGraph = buildDependencyGraph(
145
- classification.phases,
146
- fileList,
147
- task,
148
- );
149
-
150
- // Build recommendation
151
- let recommendation: string;
152
- if (collapseCheck.is_serial_collapse) {
153
- recommendation = `Use swarm mode with ${Math.min(fileCount, 5)} agents (serial collapse detected)`;
154
- } else if (classification.recommended_agents > 1) {
155
- recommendation = `Use swarm mode with ${classification.recommended_agents} agents`;
156
- } else {
157
- recommendation = "Single agent sufficient";
158
- }
159
-
160
- const plan: OrchestrationPlan = {
161
- task,
162
- file_count: fileCount,
163
- classification,
164
- serial_collapse_check: collapseCheck,
165
- allocation,
166
- dependency_graph: dependencyGraph,
167
- recommendation,
168
- };
169
-
170
- return JSON.stringify(plan, null, 2);
171
- }
172
-
173
- async function classifyOperation(
174
- task: string,
175
- files?: string,
176
- ): Promise<string> {
177
- const fileList = files?.split(",").filter(Boolean) || [];
178
- const classification = classifyTask(task, fileList);
179
- return JSON.stringify(classification, null, 2);
180
- }
181
-
182
- async function checkOperation(
183
- task: string,
184
- files?: string,
185
- recommendedAgents?: number,
186
- currentPhases?: string,
187
- ): Promise<string> {
188
- const fileCount =
189
- Number.parseInt(files || "0") || files?.split(",").length || 0;
190
- const agents = recommendedAgents || 1;
191
-
192
- const collapseCheck = detectSerialCollapse(task, fileCount, agents);
193
-
194
- // Additional phase check
195
- let phaseCheck: { issue: string; suggestion: string } | null = null;
196
- if (currentPhases) {
197
- try {
198
- const phases = JSON.parse(currentPhases);
199
- const allSingleAgent = phases.every((p: any) => p.agent_count === 1);
200
- const noParallelPhases =
201
- phases.filter((p: any) => p.agent_count > 1).length === 0;
202
-
203
- if (allSingleAgent && noParallelPhases && fileCount > 3) {
204
- phaseCheck = {
205
- issue: "All phases use single agent despite multiple files",
206
- suggestion: "Add parallel exploration phase",
207
- };
208
- }
209
- } catch {
210
- // Invalid phases JSON
211
- }
212
- }
213
-
214
- return JSON.stringify(
215
- {
216
- task,
217
- file_count: fileCount,
218
- recommended_agents: agents,
219
- serial_collapse_detected: collapseCheck.is_serial_collapse,
220
- warning_signs: collapseCheck.warning_signs,
221
- severity: collapseCheck.severity,
222
- phase_check: phaseCheck,
223
- action_required: collapseCheck.is_serial_collapse || phaseCheck !== null,
224
- suggested_agents: collapseCheck.is_serial_collapse
225
- ? Math.min(fileCount, 5)
226
- : agents,
227
- },
228
- null,
229
- 2,
230
- );
231
- }
232
-
233
- async function allocateOperation(
234
- task: string,
235
- files?: string,
236
- ): Promise<string> {
237
- const fileCount =
238
- Number.parseInt(files || "0") || files?.split(",").length || 0;
239
- const allocation = calculateAllocation(fileCount);
240
-
241
- return JSON.stringify(
242
- {
243
- task,
244
- file_count: fileCount,
245
- allocation,
246
- scaling_rules: [
247
- "Scale up if: Bottleneck detected (1 agent at 100%, others idle)",
248
- "Scale up if: New files discovered during execution",
249
- "Scale down if: Error rate exceeds 20%",
250
- "Scale down if: File conflicts detected between workers",
251
- ],
252
- recommendation: `Start with ${allocation.initial_agents} agents, scale up to ${allocation.max_agents} if needed`,
253
- },
254
- null,
255
- 2,
256
- );
257
- }
258
-
259
- function classifyTask(task: string, files: string[]): TaskClassification {
260
- // Search indicators
261
- const searchPatterns =
262
- /research|find|search|explore|investigate|compare|analyze.*codebase|look.*for|discover/i;
263
-
264
- // Writing indicators
265
- const writingPatterns =
266
- /write|create|document|generate|draft|author|compose/i;
267
-
268
- // Batch indicators
269
- const batchPatterns =
270
- /refactor|update|migrate|convert.*all|batch|multiple.*files|every.*file|all.*components/i;
271
- const hasManyFiles = files.length > 3;
272
-
273
- // Sequential indicators
274
- const sequentialPatterns =
275
- /debug|fix.*issue|optimize|complex.*refactor|architecture|design.*pattern|troubleshoot/i;
276
-
277
- // Analyze coupling
278
- const coupling = analyzeCoupling(files);
279
-
280
- // Classification logic
281
- if (searchPatterns.test(task)) {
282
- return {
283
- type: "search",
284
- coupling: "low",
285
- recommended_agents: Math.min(Math.max(files.length, 3), 5),
286
- phases: [
287
- {
288
- name: "explore",
289
- role: "explore",
290
- agent_count: Math.min(files.length || 3, 3),
291
- dependencies: [],
292
- },
293
- {
294
- name: "synthesize",
295
- role: "plan",
296
- agent_count: 1,
297
- dependencies: ["explore"],
298
- },
299
- ],
300
- reasoning:
301
- "Search tasks benefit from parallel exploration across multiple sources/domains",
302
- confidence: "high",
303
- };
304
- }
305
-
306
- if ((batchPatterns.test(task) || hasManyFiles) && files.length > 0) {
307
- return {
308
- type: "batch",
309
- coupling: coupling,
310
- recommended_agents: Math.min(files.length, 8),
311
- phases: [
312
- { name: "explore", role: "explore", agent_count: 2, dependencies: [] },
313
- {
314
- name: "generate",
315
- role: "general",
316
- agent_count: Math.min(files.length, 6),
317
- dependencies: ["explore"],
318
- },
319
- {
320
- name: "reflect",
321
- role: "review",
322
- agent_count: 2,
323
- dependencies: ["generate"],
324
- },
325
- {
326
- name: "synthesize",
327
- role: "plan",
328
- agent_count: 1,
329
- dependencies: ["reflect"],
330
- },
331
- ],
332
- reasoning: `Batch processing of ${files.length} files with ${coupling} coupling - parallel execution recommended`,
333
- confidence: coupling === "low" ? "high" : "medium",
334
- };
335
- }
336
-
337
- if (writingPatterns.test(task) && files.length > 0) {
338
- return {
339
- type: "writing",
340
- coupling: coupling,
341
- recommended_agents: Math.min(Math.ceil(files.length / 2), 4),
342
- phases: [
343
- {
344
- name: "explore",
345
- role: "explore",
346
- agent_count: 1,
347
- dependencies: [],
348
- },
349
- {
350
- name: "generate",
351
- role: "general",
352
- agent_count: Math.min(files.length, 3),
353
- dependencies: ["explore"],
354
- },
355
- {
356
- name: "reflect",
357
- role: "review",
358
- agent_count: 1,
359
- dependencies: ["generate"],
360
- },
361
- ],
362
- reasoning:
363
- "Writing tasks parallelize by section when coupling is low-medium",
364
- confidence: coupling === "low" ? "high" : "medium",
365
- };
366
- }
367
-
368
- if (
369
- sequentialPatterns.test(task) ||
370
- coupling === "high" ||
371
- files.length <= 2
372
- ) {
373
- return {
374
- type: "sequential",
375
- coupling: "high",
376
- recommended_agents: 1,
377
- phases: [
378
- {
379
- name: "analyze",
380
- role: "explore",
381
- agent_count: 1,
382
- dependencies: [],
383
- },
384
- {
385
- name: "implement",
386
- role: "general",
387
- agent_count: 1,
388
- dependencies: ["analyze"],
389
- },
390
- {
391
- name: "verify",
392
- role: "review",
393
- agent_count: 1,
394
- dependencies: ["implement"],
395
- },
396
- ],
397
- reasoning:
398
- "High coupling or complex debugging requires sequential execution for coherence",
399
- confidence: "high",
400
- };
401
- }
402
-
403
- // Default: mixed approach
404
- return {
405
- type: "mixed",
406
- coupling: coupling,
407
- recommended_agents: Math.min(files.length || 2, 4),
408
- phases: [
409
- { name: "explore", role: "explore", agent_count: 2, dependencies: [] },
410
- {
411
- name: "implement",
412
- role: "general",
413
- agent_count: 2,
414
- dependencies: ["explore"],
415
- },
416
- ],
417
- reasoning:
418
- "Unclear task type - using conservative mixed approach with verification",
419
- confidence: "low",
420
- };
421
- }
422
-
423
- function analyzeCoupling(files: string[]): "high" | "medium" | "low" {
424
- if (files.length <= 1) return "high";
425
- if (files.length <= 3) return "medium";
426
-
427
- // Check for shared directories (potential coupling)
428
- const dirs = files.map((f) => {
429
- const parts = f.split("/");
430
- return parts.slice(0, -1).join("/");
431
- });
432
- const uniqueDirs = new Set(dirs);
433
-
434
- if (uniqueDirs.size === 1) return "high"; // All in same directory
435
- if (uniqueDirs.size <= files.length / 2) return "medium";
436
- return "low";
437
- }
438
-
439
- function detectSerialCollapse(
440
- task: string,
441
- fileCount: number,
442
- recommendedAgents: number,
443
- ): SerialCollapseCheck {
444
- const warningSigns: string[] = [];
445
-
446
- // Sign 1: Many files but choosing 1 agent
447
- if (fileCount >= 5 && recommendedAgents === 1) {
448
- warningSigns.push(`${fileCount} files but single agent selected`);
449
- }
450
-
451
- // Sign 2: Search task with single agent
452
- if (/research|search|explore/i.test(task) && recommendedAgents === 1) {
453
- warningSigns.push("Search task with single agent");
454
- }
455
-
456
- // Sign 3: Batch operation with single agent
457
- if (
458
- /refactor.*all|update.*all|migrate.*all/i.test(task) &&
459
- recommendedAgents === 1
460
- ) {
461
- warningSigns.push("Batch operation with single agent");
462
- }
463
-
464
- // Sign 4: Explicit parallel keywords but 1 agent
465
- if (
466
- /parallel|concurrent|multiple.*at.*once/i.test(task) &&
467
- recommendedAgents === 1
468
- ) {
469
- warningSigns.push("Parallel keywords but single agent");
470
- }
471
-
472
- const isSerialCollapse =
473
- warningSigns.length >= 2 || (warningSigns.length === 1 && fileCount > 8);
474
-
475
- return {
476
- is_serial_collapse: isSerialCollapse,
477
- warning_signs: warningSigns,
478
- severity:
479
- warningSigns.length >= 2
480
- ? "high"
481
- : warningSigns.length === 1
482
- ? "medium"
483
- : "none",
484
- recommendation: isSerialCollapse
485
- ? `Increase to ${Math.min(fileCount, 5)} agents for parallel execution`
486
- : "Current allocation appropriate",
487
- };
488
- }
489
-
490
- function calculateAllocation(fileCount: number) {
491
- // Conservative start: 50% of what we might need, at least 1, at most 3
492
- const initialAgents = Math.max(1, Math.min(Math.ceil(fileCount * 0.5), 3));
493
-
494
- // Max based on reasonable limits
495
- const maxAgents = Math.min(fileCount, 10);
496
-
497
- return {
498
- initial_agents: initialAgents,
499
- max_agents: maxAgents,
500
- strategy: "Conservative start, scale based on bottlenecks",
501
- };
502
- }
503
-
504
- /**
505
- * Build a dependency graph from phases and files
506
- * This enables proper DAG-based task scheduling
507
- */
508
- function buildDependencyGraph(
509
- phases: Phase[],
510
- files: string[],
511
- task: string,
512
- ): DependencyGraph {
513
- const nodes: TaskNode[] = [];
514
- const edges: Array<{
515
- from: string;
516
- to: string;
517
- type: "blocks" | "blocked_by";
518
- }> = [];
519
-
520
- // Distribute files across workers in each phase
521
- let taskIdCounter = 1;
522
-
523
- for (const phase of phases) {
524
- const phaseFiles =
525
- phase.name === "explore" || phase.name === "synthesize"
526
- ? files
527
- : distributeFiles(files, phase.agent_count);
528
-
529
- for (let i = 0; i < phase.agent_count; i++) {
530
- const workerId = `${phase.name}-worker-${i + 1}`;
531
- const taskId = `task-${taskIdCounter++}`;
532
-
533
- // Determine assigned files based on phase type
534
- let assignedFiles: string[];
535
- if (phase.name === "explore" || phase.name === "synthesize") {
536
- // These phases get all files
537
- assignedFiles = files;
538
- } else {
539
- // Other phases get distributed files
540
- const distributed = phaseFiles as string[][];
541
- assignedFiles = distributed[i] || [];
542
- }
543
-
544
- const node: TaskNode = {
545
- id: taskId,
546
- content: `${phase.name}: ${task.slice(0, 50)}${task.length > 50 ? "..." : ""}`,
547
- phase: phase.name,
548
- worker: workerId,
549
- status: "pending",
550
- priority: phase.name === "explore" ? "high" : "medium",
551
- blockedBy: [],
552
- blocks: [],
553
- assignedFiles,
554
- };
555
-
556
- // Add dependencies based on phase dependencies
557
- if (phase.dependencies.length > 0) {
558
- for (const dep of phase.dependencies) {
559
- // Find all tasks in the dependency phase
560
- const depTasks = nodes.filter((n) => n.phase === dep);
561
- for (const depTask of depTasks) {
562
- node.blockedBy.push(depTask.id);
563
- depTask.blocks.push(taskId);
564
- edges.push({ from: depTask.id, to: taskId, type: "blocks" });
565
- }
566
- }
567
- }
568
-
569
- nodes.push(node);
570
- }
571
- }
572
-
573
- // Calculate critical path (longest chain of dependencies)
574
- const criticalPath = calculateCriticalPath(nodes);
575
-
576
- // Find parallelizable groups (tasks that can run simultaneously)
577
- const parallelizableGroups = findParallelizableGroups(nodes);
578
-
579
- return {
580
- nodes,
581
- edges,
582
- critical_path: criticalPath,
583
- parallelizable_groups: parallelizableGroups,
584
- };
585
- }
586
-
587
- /**
588
- * Distribute files across workers
589
- */
590
- function distributeFiles(files: string[], workerCount: number): string[][] {
591
- if (files.length === 0 || workerCount === 0) return [];
592
-
593
- const result: string[][] = Array.from({ length: workerCount }, () => []);
594
-
595
- files.forEach((file, index) => {
596
- result[index % workerCount].push(file);
597
- });
598
-
599
- return result;
600
- }
601
-
602
- /**
603
- * Calculate the critical path (longest dependency chain)
604
- */
605
- function calculateCriticalPath(nodes: TaskNode[]): string[] {
606
- const visited = new Set<string>();
607
- const pathLengths = new Map<string, number>();
608
- const pathParents = new Map<string, string | null>();
609
-
610
- // Find root nodes (no blockedBy)
611
- const roots = nodes.filter((n) => n.blockedBy.length === 0);
612
-
613
- // BFS to calculate distances
614
- const queue = [...roots];
615
- for (const root of roots) {
616
- pathLengths.set(root.id, 1);
617
- pathParents.set(root.id, null);
618
- }
619
-
620
- while (queue.length > 0) {
621
- const current = queue.shift()!;
622
- if (visited.has(current.id)) continue;
623
- visited.add(current.id);
624
-
625
- const currentLength = pathLengths.get(current.id) || 1;
626
-
627
- for (const blockedId of current.blocks) {
628
- const blocked = nodes.find((n) => n.id === blockedId);
629
- if (!blocked) continue;
630
-
631
- const newLength = currentLength + 1;
632
- const existingLength = pathLengths.get(blockedId) || 0;
633
-
634
- if (newLength > existingLength) {
635
- pathLengths.set(blockedId, newLength);
636
- pathParents.set(blockedId, current.id);
637
- }
638
-
639
- queue.push(blocked);
640
- }
641
- }
642
-
643
- // Find the node with longest path
644
- let maxLength = 0;
645
- let maxNode: string | null = null;
646
- for (const [nodeId, length] of pathLengths) {
647
- if (length > maxLength) {
648
- maxLength = length;
649
- maxNode = nodeId;
650
- }
651
- }
652
-
653
- // Reconstruct path
654
- const criticalPath: string[] = [];
655
- let current = maxNode;
656
- while (current) {
657
- criticalPath.unshift(current);
658
- current = pathParents.get(current) || null;
659
- }
660
-
661
- return criticalPath;
662
- }
663
-
664
- /**
665
- * Find groups of tasks that can run in parallel
666
- */
667
- function findParallelizableGroups(nodes: TaskNode[]): string[][] {
668
- const groups: string[][] = [];
669
- const assigned = new Set<string>();
670
-
671
- // Group by "generation" - tasks whose all dependencies are satisfied
672
- while (assigned.size < nodes.length) {
673
- const group: string[] = [];
674
-
675
- for (const node of nodes) {
676
- if (assigned.has(node.id)) continue;
677
-
678
- // Check if all blockedBy are already assigned
679
- const allDependenciesMet = node.blockedBy.every((dep) =>
680
- assigned.has(dep),
681
- );
682
-
683
- if (allDependenciesMet) {
684
- group.push(node.id);
685
- }
686
- }
687
-
688
- if (group.length === 0) break; // Prevent infinite loop on cycles
689
-
690
- for (const id of group) {
691
- assigned.add(id);
692
- }
693
- groups.push(group);
694
- }
695
-
696
- return groups;
697
- }