@kodrunhq/opencode-autopilot 1.12.1 → 1.14.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. package/assets/commands/oc-brainstorm.md +2 -0
  2. package/assets/commands/oc-new-agent.md +2 -0
  3. package/assets/commands/oc-new-command.md +2 -0
  4. package/assets/commands/oc-new-skill.md +2 -0
  5. package/assets/commands/oc-quick.md +2 -0
  6. package/assets/commands/oc-refactor.md +26 -0
  7. package/assets/commands/oc-review-agents.md +2 -0
  8. package/assets/commands/oc-review-pr.md +1 -0
  9. package/assets/commands/oc-security-audit.md +20 -0
  10. package/assets/commands/oc-stocktake.md +2 -0
  11. package/assets/commands/oc-tdd.md +2 -0
  12. package/assets/commands/oc-update-docs.md +2 -0
  13. package/assets/commands/oc-write-plan.md +2 -0
  14. package/assets/skills/api-design/SKILL.md +391 -0
  15. package/assets/skills/brainstorming/SKILL.md +1 -0
  16. package/assets/skills/code-review/SKILL.md +1 -0
  17. package/assets/skills/coding-standards/SKILL.md +3 -0
  18. package/assets/skills/csharp-patterns/SKILL.md +1 -0
  19. package/assets/skills/database-patterns/SKILL.md +270 -0
  20. package/assets/skills/docker-deployment/SKILL.md +326 -0
  21. package/assets/skills/e2e-testing/SKILL.md +1 -0
  22. package/assets/skills/frontend-design/SKILL.md +1 -0
  23. package/assets/skills/git-worktrees/SKILL.md +1 -0
  24. package/assets/skills/go-patterns/SKILL.md +1 -0
  25. package/assets/skills/java-patterns/SKILL.md +1 -0
  26. package/assets/skills/plan-executing/SKILL.md +1 -0
  27. package/assets/skills/plan-writing/SKILL.md +1 -0
  28. package/assets/skills/python-patterns/SKILL.md +1 -0
  29. package/assets/skills/rust-patterns/SKILL.md +1 -0
  30. package/assets/skills/security-patterns/SKILL.md +312 -0
  31. package/assets/skills/strategic-compaction/SKILL.md +1 -0
  32. package/assets/skills/systematic-debugging/SKILL.md +1 -0
  33. package/assets/skills/tdd-workflow/SKILL.md +1 -0
  34. package/assets/skills/typescript-patterns/SKILL.md +1 -0
  35. package/assets/skills/verification/SKILL.md +1 -0
  36. package/package.json +1 -1
  37. package/src/agents/autopilot.ts +4 -0
  38. package/src/agents/coder.ts +265 -0
  39. package/src/agents/db-specialist.ts +295 -0
  40. package/src/agents/debugger.ts +4 -0
  41. package/src/agents/devops.ts +352 -0
  42. package/src/agents/frontend-engineer.ts +541 -0
  43. package/src/agents/index.ts +31 -0
  44. package/src/agents/pipeline/oc-implementer.ts +4 -0
  45. package/src/agents/security-auditor.ts +348 -0
  46. package/src/hooks/anti-slop.ts +40 -1
  47. package/src/hooks/slop-patterns.ts +24 -4
  48. package/src/index.ts +2 -0
  49. package/src/installer.ts +29 -2
  50. package/src/memory/capture.ts +9 -4
  51. package/src/memory/decay.ts +11 -0
  52. package/src/memory/retrieval.ts +31 -2
  53. package/src/orchestrator/artifacts.ts +7 -2
  54. package/src/orchestrator/confidence.ts +3 -2
  55. package/src/orchestrator/handlers/architect.ts +11 -8
  56. package/src/orchestrator/handlers/build.ts +57 -16
  57. package/src/orchestrator/handlers/challenge.ts +9 -3
  58. package/src/orchestrator/handlers/plan.ts +5 -4
  59. package/src/orchestrator/handlers/recon.ts +9 -4
  60. package/src/orchestrator/handlers/retrospective.ts +3 -1
  61. package/src/orchestrator/handlers/ship.ts +8 -7
  62. package/src/orchestrator/handlers/types.ts +1 -0
  63. package/src/orchestrator/lesson-memory.ts +2 -1
  64. package/src/orchestrator/orchestration-logger.ts +40 -0
  65. package/src/orchestrator/phase.ts +14 -0
  66. package/src/orchestrator/schemas.ts +2 -0
  67. package/src/orchestrator/skill-injection.ts +11 -6
  68. package/src/orchestrator/state.ts +2 -1
  69. package/src/orchestrator/wave-assigner.ts +117 -0
  70. package/src/review/selection.ts +4 -32
  71. package/src/skills/adaptive-injector.ts +96 -5
  72. package/src/skills/loader.ts +4 -1
  73. package/src/tools/hashline-edit.ts +317 -0
  74. package/src/tools/orchestrate.ts +141 -18
  75. package/src/tools/review.ts +2 -1
@@ -0,0 +1,117 @@
1
+ /**
2
+ * Automatic wave assignment from task dependencies using Kahn's algorithm.
3
+ * Tasks declare depends_on arrays, this module computes optimal wave numbers.
4
+ * Reuses the cycle detection concept from src/skills/dependency-resolver.ts.
5
+ */
6
+
7
+ export interface TaskNode {
8
+ readonly id: number;
9
+ readonly depends_on: readonly number[];
10
+ }
11
+
12
+ export interface WaveAssignment {
13
+ readonly assignments: ReadonlyMap<number, number>; // taskId -> wave number
14
+ readonly cycles: readonly number[]; // task IDs participating in cycles
15
+ }
16
+
17
+ /** Hard cap on task count to prevent DoS via crafted dependency chains. */
18
+ const MAX_TASKS = 500;
19
+
20
+ /**
21
+ * Assign wave numbers to tasks based on their depends_on relationships.
22
+ * Uses Kahn's algorithm (BFS-based topological sort):
23
+ * 1. Build in-degree map from depends_on
24
+ * 2. All tasks with in-degree 0 -> Wave 1
25
+ * 3. Remove Wave 1, decrement in-degrees of dependents
26
+ * 4. Repeat for Wave 2, 3, etc.
27
+ * 5. Any remaining tasks are in cycles
28
+ *
29
+ * Tasks with empty depends_on arrays get wave 1 (backward compatible).
30
+ * Dependencies referencing non-existent task IDs are silently ignored.
31
+ */
32
+ export function assignWaves(tasks: readonly TaskNode[]): WaveAssignment {
33
+ if (tasks.length === 0) {
34
+ return Object.freeze({
35
+ assignments: Object.freeze(new Map<number, number>()),
36
+ cycles: Object.freeze([] as number[]),
37
+ });
38
+ }
39
+
40
+ if (tasks.length > MAX_TASKS) {
41
+ return Object.freeze({
42
+ assignments: Object.freeze(new Map<number, number>()),
43
+ cycles: Object.freeze(tasks.map((t) => t.id)),
44
+ });
45
+ }
46
+
47
+ // Build set of valid task IDs
48
+ const validIds = new Set(tasks.map((t) => t.id));
49
+
50
+ // Build adjacency list: for each task, which tasks depend on it
51
+ // (reverse of depends_on — "dependents" map)
52
+ const dependents = new Map<number, number[]>();
53
+ for (const id of validIds) {
54
+ dependents.set(id, []);
55
+ }
56
+
57
+ // Build in-degree map: count of valid dependencies per task
58
+ // Deduplicate depends_on and skip self-dependencies
59
+ const inDegree = new Map<number, number>();
60
+ for (const task of tasks) {
61
+ const uniqueDeps = [...new Set(task.depends_on)];
62
+ let degree = 0;
63
+ for (const dep of uniqueDeps) {
64
+ if (dep === task.id) continue; // Skip self-dependency
65
+ if (validIds.has(dep)) {
66
+ degree++;
67
+ const list = dependents.get(dep);
68
+ if (list) {
69
+ list.push(task.id);
70
+ }
71
+ }
72
+ }
73
+ inDegree.set(task.id, degree);
74
+ }
75
+
76
+ // BFS: process waves
77
+ const assignments = new Map<number, number>();
78
+ let currentQueue: number[] = [];
79
+
80
+ // Initialize with all tasks that have in-degree 0 (wave 1)
81
+ for (const task of tasks) {
82
+ if ((inDegree.get(task.id) ?? 0) === 0) {
83
+ currentQueue.push(task.id);
84
+ }
85
+ }
86
+
87
+ let wave = 1;
88
+ while (currentQueue.length > 0) {
89
+ const nextQueue: number[] = [];
90
+ for (const taskId of currentQueue) {
91
+ assignments.set(taskId, wave);
92
+ const deps = dependents.get(taskId) ?? [];
93
+ for (const dependent of deps) {
94
+ const newDegree = (inDegree.get(dependent) ?? 1) - 1;
95
+ inDegree.set(dependent, newDegree);
96
+ if (newDegree === 0) {
97
+ nextQueue.push(dependent);
98
+ }
99
+ }
100
+ }
101
+ currentQueue = nextQueue;
102
+ wave++;
103
+ }
104
+
105
+ // Any tasks not assigned a wave are in cycles
106
+ const cycleIds: number[] = [];
107
+ for (const task of tasks) {
108
+ if (!assignments.has(task.id)) {
109
+ cycleIds.push(task.id);
110
+ }
111
+ }
112
+
113
+ return Object.freeze({
114
+ assignments: Object.freeze(new Map(assignments)),
115
+ cycles: Object.freeze(cycleIds),
116
+ });
117
+ }
@@ -1,10 +1,8 @@
1
1
  /**
2
- * Two-pass deterministic agent selection for the review pipeline.
2
+ * Deterministic agent selection for the review pipeline.
3
3
  *
4
- * Pass 1: Stack gate -- agents with empty relevantStacks always pass;
5
- * agents with non-empty relevantStacks require at least one match.
6
- * Pass 2: Diff relevance scoring -- currently used for future ordering,
7
- * all stack-passing agents run regardless of score.
4
+ * Stack gate: agents with empty relevantStacks always pass;
5
+ * agents with non-empty relevantStacks require at least one match.
8
6
  */
9
7
 
10
8
  /** Minimal agent shape needed for selection (compatible with ReviewAgent from agents/). */
@@ -38,7 +36,7 @@ export interface SelectionResult {
38
36
  */
39
37
  export function selectAgents(
40
38
  detectedStacks: readonly string[],
41
- diffAnalysis: DiffAnalysisInput,
39
+ _diffAnalysis: DiffAnalysisInput,
42
40
  agents: readonly SelectableAgent[],
43
41
  ): SelectionResult {
44
42
  const stackSet = new Set(detectedStacks);
@@ -65,34 +63,8 @@ export function selectAgents(
65
63
  }
66
64
  }
67
65
 
68
- // Pass 2: Compute relevance scores (stored for future ordering, no filtering)
69
- // Scores are intentionally not used for filtering yet
70
- for (const agent of selected) {
71
- computeDiffRelevance(agent, diffAnalysis);
72
- }
73
-
74
66
  return Object.freeze({
75
67
  selected: Object.freeze(selected),
76
68
  excluded: Object.freeze(excluded),
77
69
  });
78
70
  }
79
-
80
- /**
81
- * Compute diff-based relevance score for an agent.
82
- * Base score of 1.0 with bonuses for specific agent-analysis matches.
83
- * Used for future prioritization/ordering, not for filtering.
84
- */
85
- export function computeDiffRelevance(agent: SelectableAgent, analysis: DiffAnalysisInput): number {
86
- let score = 1.0;
87
-
88
- if (agent.name === "security-auditor") {
89
- if (analysis.hasAuth) score += 0.5;
90
- if (analysis.hasConfig) score += 0.3;
91
- }
92
-
93
- if (agent.name === "test-interrogator") {
94
- if (!analysis.hasTests) score += 0.5;
95
- }
96
-
97
- return score;
98
- }
@@ -18,6 +18,24 @@ const DEFAULT_TOKEN_BUDGET = 8000;
18
18
  /** Rough estimate: 1 token ~ 4 chars */
19
19
  const CHARS_PER_TOKEN = 4;
20
20
 
21
+ /**
22
+ * Maps pipeline phases to the skill names relevant for that phase.
23
+ * Skills not in the list for the current phase are excluded from injection,
24
+ * preventing the full 13-19KB per-skill content from bloating every dispatch.
25
+ */
26
+ export const PHASE_SKILL_MAP: Readonly<Record<string, readonly string[]>> = Object.freeze({
27
+ RECON: ["plan-writing"],
28
+ CHALLENGE: ["plan-writing"],
29
+ ARCHITECT: ["plan-writing"],
30
+ PLAN: ["plan-writing", "plan-executing"],
31
+ BUILD: ["coding-standards", "tdd-workflow"],
32
+ SHIP: ["plan-executing"],
33
+ RETROSPECTIVE: [],
34
+ EXPLORE: [],
35
+ });
36
+
37
+ export type SkillMode = "summary" | "full";
38
+
21
39
  /**
22
40
  * Manifest files that indicate project stack.
23
41
  * Checks project root for these files to detect the stack.
@@ -121,14 +139,40 @@ export function filterSkillsByStack(
121
139
  return filtered;
122
140
  }
123
141
 
142
+ /**
143
+ * Build a compact summary for a single skill: frontmatter name + description
144
+ * (max 200 chars). Used in summary mode to avoid injecting full skill content.
145
+ */
146
+ export function buildSkillSummary(skill: LoadedSkill): string {
147
+ const { name, description } = skill.frontmatter;
148
+ const safeName = sanitizeTemplateContent(name);
149
+ const safeDesc = sanitizeTemplateContent((description ?? "").slice(0, 200));
150
+ return `[Skill: ${safeName}]\n${safeDesc}`;
151
+ }
152
+
153
+ /**
154
+ * In full mode, truncate skill content at the first `## ` heading boundary
155
+ * that exceeds the per-skill character budget. Preserves structure instead
156
+ * of collapsing all newlines.
157
+ */
158
+ function truncateAtSectionBoundary(content: string, maxChars: number): string {
159
+ if (content.length <= maxChars) return content;
160
+ const cutPoint = content.lastIndexOf("\n## ", maxChars);
161
+ if (cutPoint > 0) return content.slice(0, cutPoint);
162
+ return content.slice(0, maxChars);
163
+ }
164
+
124
165
  /**
125
166
  * Build multi-skill context string with dependency ordering and token budget.
126
167
  * Skills are ordered by dependency (prerequisites first), then concatenated
127
168
  * until the token budget is exhausted.
169
+ *
170
+ * @param mode - "summary" emits only name + description (compact); "full" preserves structure
128
171
  */
129
172
  export function buildMultiSkillContext(
130
173
  skills: ReadonlyMap<string, LoadedSkill>,
131
174
  tokenBudget: number = DEFAULT_TOKEN_BUDGET,
175
+ mode: SkillMode = "summary",
132
176
  ): string {
133
177
  if (skills.size === 0) return "";
134
178
 
@@ -151,17 +195,64 @@ export function buildMultiSkillContext(
151
195
  const skill = skills.get(name);
152
196
  if (!skill) continue;
153
197
 
154
- const collapsed = skill.content.replace(/[\r\n]+/g, " ");
155
- const header = `[Skill: ${name}]\n`;
198
+ let section: string;
199
+ if (mode === "summary") {
200
+ section = sanitizeTemplateContent(buildSkillSummary(skill));
201
+ } else {
202
+ // Full mode: preserve structure, truncate at section boundaries
203
+ const header = `[Skill: ${name}]\n`;
204
+ const perSkillBudget = Math.max(charBudget - totalChars - header.length, 0);
205
+ const truncated = truncateAtSectionBoundary(skill.content, perSkillBudget);
206
+ const sanitized = sanitizeTemplateContent(truncated);
207
+ section = `${header}${sanitized}`;
208
+ }
209
+
156
210
  const separator = sections.length > 0 ? 2 : 0; // "\n\n"
157
- const sectionCost = collapsed.length + header.length + separator;
211
+ const sectionCost = section.length + separator;
158
212
  if (totalChars + sectionCost > charBudget) break;
159
213
 
160
- const sanitized = sanitizeTemplateContent(collapsed);
161
- sections.push(`${header}${sanitized}`);
214
+ sections.push(section);
162
215
  totalChars += sectionCost;
163
216
  }
164
217
 
165
218
  if (sections.length === 0) return "";
166
219
  return `\n\nSkills context (follow these conventions and methodologies):\n${sections.join("\n\n")}`;
167
220
  }
221
+
222
+ /**
223
+ * Build adaptive skill context with optional phase filtering.
224
+ *
225
+ * When `phase` is provided, only skills listed in PHASE_SKILL_MAP for that
226
+ * phase are included (pipeline dispatch path). When omitted, all stack-filtered
227
+ * skills are included (direct chat injection path).
228
+ */
229
+ export function buildAdaptiveSkillContext(
230
+ skills: ReadonlyMap<string, LoadedSkill>,
231
+ options?: {
232
+ readonly phase?: string;
233
+ readonly budget?: number;
234
+ readonly mode?: SkillMode;
235
+ },
236
+ ): string {
237
+ const phase = options?.phase;
238
+ const budget = options?.budget ?? DEFAULT_TOKEN_BUDGET;
239
+ const mode = options?.mode ?? "summary";
240
+
241
+ if (phase !== undefined) {
242
+ const allowedNames = PHASE_SKILL_MAP[phase] ?? [];
243
+ if (allowedNames.length === 0) return "";
244
+
245
+ const allowedSet = new Set(allowedNames);
246
+ const filtered = new Map<string, LoadedSkill>();
247
+ for (const [name, skill] of skills) {
248
+ if (allowedSet.has(name)) {
249
+ filtered.set(name, skill);
250
+ }
251
+ }
252
+
253
+ return buildMultiSkillContext(filtered, budget, mode);
254
+ }
255
+
256
+ // No phase -- include all provided skills (caller already stack-filtered)
257
+ return buildMultiSkillContext(skills, budget, mode);
258
+ }
@@ -84,5 +84,8 @@ export async function loadAllSkills(skillsDir: string): Promise<ReadonlyMap<stri
84
84
  if (!isEnoentError(error)) throw error;
85
85
  }
86
86
 
87
- return skills;
87
+ // Sort alphabetically by name for deterministic ordering regardless of
88
+ // filesystem readdir order (which varies across OS and FS types).
89
+ const sorted = new Map([...skills.entries()].sort(([a], [b]) => a.localeCompare(b)));
90
+ return Object.freeze(sorted);
88
91
  }
@@ -0,0 +1,317 @@
1
+ import { readFile, writeFile } from "node:fs/promises";
2
+ import { isAbsolute, resolve } from "node:path";
3
+ import { tool } from "@opencode-ai/plugin";
4
+
5
+ /**
6
+ * CID alphabet from omo — 16 uppercase characters used for 2-char line hashes.
7
+ */
8
+ export const CID_ALPHABET = "ZPMQVRWSNKTXJBYH";
9
+
10
+ const CID_SET = new Set(CID_ALPHABET);
11
+
12
+ /**
13
+ * FNV-1a 32-bit hash.
14
+ */
15
+ function fnv1a(str: string): number {
16
+ let hash = 0x811c9dc5; // FNV offset basis
17
+ for (let i = 0; i < str.length; i++) {
18
+ hash ^= str.charCodeAt(i);
19
+ hash = Math.imul(hash, 0x01000193); // FNV prime
20
+ }
21
+ return hash >>> 0;
22
+ }
23
+
24
+ /**
25
+ * Compute a 2-character line hash using FNV-1a and CID alphabet.
26
+ */
27
+ export function computeLineHash(content: string): string {
28
+ const h = fnv1a(content);
29
+ return CID_ALPHABET[h & 0xf] + CID_ALPHABET[(h >> 4) & 0xf];
30
+ }
31
+
32
+ /**
33
+ * Parse a "LINE#HASH" anchor string into its components.
34
+ */
35
+ export function parseAnchor(
36
+ anchor: string,
37
+ ): { readonly line: number; readonly hash: string } | { readonly error: string } {
38
+ const idx = anchor.indexOf("#");
39
+ if (idx < 1) {
40
+ return { error: `Invalid anchor format: "${anchor}". Expected "LINE#HASH" (e.g. "42#VK").` };
41
+ }
42
+
43
+ const lineStr = anchor.slice(0, idx);
44
+ const hash = anchor.slice(idx + 1);
45
+
46
+ const line = Number.parseInt(lineStr, 10);
47
+ if (!Number.isFinite(line) || line < 1) {
48
+ return { error: `Invalid line number in anchor "${anchor}". Must be >= 1.` };
49
+ }
50
+
51
+ if (hash.length !== 2 || !CID_SET.has(hash[0]) || !CID_SET.has(hash[1])) {
52
+ return {
53
+ error: `Invalid hash "${hash}" in anchor "${anchor}". Must be 2 chars from CID alphabet.`,
54
+ };
55
+ }
56
+
57
+ return { line, hash };
58
+ }
59
+
60
+ // --- Types ---
61
+
62
+ interface HashlineEdit {
63
+ readonly op: "replace" | "append" | "prepend";
64
+ readonly pos: string; // "LINE#HASH" anchor
65
+ readonly end?: string; // End anchor for range replace
66
+ readonly lines: string | readonly string[] | null; // null = delete
67
+ }
68
+
69
+ interface HashlineEditArgs {
70
+ readonly file: string;
71
+ readonly edits: readonly HashlineEdit[];
72
+ }
73
+
74
+ // --- Helpers ---
75
+
76
+ function formatAnchor(lineNum: number, content: string): string {
77
+ return `${lineNum}#${computeLineHash(content)}`;
78
+ }
79
+
80
+ function getSurroundingAnchors(
81
+ fileLines: readonly string[],
82
+ lineIdx: number,
83
+ radius: number,
84
+ ): string {
85
+ const anchors: string[] = [];
86
+ const start = Math.max(0, lineIdx - radius);
87
+ const end = Math.min(fileLines.length - 1, lineIdx + radius);
88
+ for (let i = start; i <= end; i++) {
89
+ anchors.push(` ${formatAnchor(i + 1, fileLines[i])} ${fileLines[i]}`);
90
+ }
91
+ return anchors.join("\n");
92
+ }
93
+
94
+ function toLineArray(lines: string | readonly string[] | null): readonly string[] | null {
95
+ if (lines === null) return null;
96
+ if (typeof lines === "string") return [lines];
97
+ return lines;
98
+ }
99
+
100
+ // --- Core function ---
101
+
102
+ export async function hashlineEditCore(args: HashlineEditArgs): Promise<string> {
103
+ // Path safety: require absolute paths to prevent relative path confusion
104
+ if (!isAbsolute(args.file)) {
105
+ return `Error: File path must be absolute. Got: "${args.file}"`;
106
+ }
107
+ const resolved = resolve(args.file);
108
+
109
+ if (args.edits.length === 0) {
110
+ return "Applied 0 edit(s) — no changes made.";
111
+ }
112
+
113
+ let raw: string;
114
+ try {
115
+ raw = await readFile(resolved, "utf-8");
116
+ } catch (err) {
117
+ return `Error: Cannot read file "${resolved}": ${err instanceof Error ? err.message : String(err)}`;
118
+ }
119
+
120
+ // Split preserving trailing newline behavior
121
+ const hasTrailingNewline = raw.endsWith("\n");
122
+ const fileLines = raw.split("\n");
123
+ // If file ends with newline, split produces an extra empty string at the end — remove it
124
+ if (hasTrailingNewline && fileLines[fileLines.length - 1] === "") {
125
+ fileLines.pop();
126
+ }
127
+
128
+ // Parse all anchors first and validate
129
+ const parsedEdits: Array<{
130
+ readonly op: "replace" | "append" | "prepend";
131
+ readonly lineIdx: number;
132
+ readonly hash: string;
133
+ readonly endLineIdx?: number;
134
+ readonly endHash?: string;
135
+ readonly lines: readonly string[] | null;
136
+ }> = [];
137
+
138
+ const errors: string[] = [];
139
+
140
+ for (const edit of args.edits) {
141
+ const parsed = parseAnchor(edit.pos);
142
+ if ("error" in parsed) {
143
+ errors.push(parsed.error);
144
+ continue;
145
+ }
146
+
147
+ const lineIdx = parsed.line - 1; // Convert to 0-based
148
+ if (lineIdx >= fileLines.length) {
149
+ errors.push(`Line ${parsed.line} is out of bounds (file has ${fileLines.length} lines).`);
150
+ continue;
151
+ }
152
+
153
+ let endLineIdx: number | undefined;
154
+ let endHash: string | undefined;
155
+
156
+ if (edit.end) {
157
+ const parsedEnd = parseAnchor(edit.end);
158
+ if ("error" in parsedEnd) {
159
+ errors.push(parsedEnd.error);
160
+ continue;
161
+ }
162
+ endLineIdx = parsedEnd.line - 1;
163
+ endHash = parsedEnd.hash;
164
+ if (endLineIdx >= fileLines.length) {
165
+ errors.push(
166
+ `End line ${parsedEnd.line} is out of bounds (file has ${fileLines.length} lines).`,
167
+ );
168
+ continue;
169
+ }
170
+ if (endLineIdx < lineIdx) {
171
+ errors.push(`End line ${parsedEnd.line} is before start line ${parsed.line}.`);
172
+ continue;
173
+ }
174
+ }
175
+
176
+ parsedEdits.push({
177
+ op: edit.op,
178
+ lineIdx,
179
+ hash: parsed.hash,
180
+ endLineIdx,
181
+ endHash,
182
+ lines: toLineArray(edit.lines),
183
+ });
184
+ }
185
+
186
+ if (errors.length > 0) {
187
+ return `Error: ${errors.join("\n")}`;
188
+ }
189
+
190
+ // Validate hashes against current file content
191
+ const hashErrors: string[] = [];
192
+
193
+ for (const edit of parsedEdits) {
194
+ const actualHash = computeLineHash(fileLines[edit.lineIdx]);
195
+ if (actualHash !== edit.hash) {
196
+ const surrounding = getSurroundingAnchors(fileLines, edit.lineIdx, 2);
197
+ hashErrors.push(
198
+ `Hash mismatch at line ${edit.lineIdx + 1}: expected ${edit.hash}, actual ${actualHash}.\nUpdated anchors:\n${surrounding}`,
199
+ );
200
+ }
201
+
202
+ if (edit.endLineIdx !== undefined && edit.endHash !== undefined) {
203
+ const actualEndHash = computeLineHash(fileLines[edit.endLineIdx]);
204
+ if (actualEndHash !== edit.endHash) {
205
+ const surrounding = getSurroundingAnchors(fileLines, edit.endLineIdx, 2);
206
+ hashErrors.push(
207
+ `Hash mismatch at end line ${edit.endLineIdx + 1}: expected ${edit.endHash}, actual ${actualEndHash}.\nUpdated anchors:\n${surrounding}`,
208
+ );
209
+ }
210
+ }
211
+ }
212
+
213
+ if (hashErrors.length > 0) {
214
+ return `Error: Stale edit(s) detected.\n${hashErrors.join("\n\n")}`;
215
+ }
216
+
217
+ // Detect overlapping edits — reject before applying any mutations
218
+ for (let i = 0; i < parsedEdits.length; i++) {
219
+ const a = parsedEdits[i];
220
+ const aStart = a.lineIdx;
221
+ const aEnd = a.endLineIdx ?? a.lineIdx;
222
+ for (let j = i + 1; j < parsedEdits.length; j++) {
223
+ const b = parsedEdits[j];
224
+ const bStart = b.lineIdx;
225
+ const bEnd = b.endLineIdx ?? b.lineIdx;
226
+ if (aStart <= bEnd && bStart <= aEnd) {
227
+ return `Error: Overlapping edits at lines ${aStart + 1}-${aEnd + 1} and ${bStart + 1}-${bEnd + 1}. Split into separate calls.`;
228
+ }
229
+ }
230
+ }
231
+
232
+ // Sort edits bottom-up (highest line index first) to prevent drift
233
+ const sortedEdits = [...parsedEdits].sort((a, b) => {
234
+ const aLine = a.endLineIdx ?? a.lineIdx;
235
+ const bLine = b.endLineIdx ?? b.lineIdx;
236
+ return bLine - aLine;
237
+ });
238
+
239
+ // Apply edits
240
+ for (const edit of sortedEdits) {
241
+ const newLines = edit.lines;
242
+
243
+ switch (edit.op) {
244
+ case "replace": {
245
+ if (edit.endLineIdx !== undefined) {
246
+ // Range replace: remove from lineIdx to endLineIdx (inclusive), insert newLines
247
+ const count = edit.endLineIdx - edit.lineIdx + 1;
248
+ if (newLines === null) {
249
+ fileLines.splice(edit.lineIdx, count);
250
+ } else {
251
+ fileLines.splice(edit.lineIdx, count, ...newLines);
252
+ }
253
+ } else {
254
+ // Single line replace
255
+ if (newLines === null) {
256
+ fileLines.splice(edit.lineIdx, 1);
257
+ } else {
258
+ fileLines.splice(edit.lineIdx, 1, ...newLines);
259
+ }
260
+ }
261
+ break;
262
+ }
263
+ case "append": {
264
+ const insertLines = newLines ?? [];
265
+ fileLines.splice(edit.lineIdx + 1, 0, ...insertLines);
266
+ break;
267
+ }
268
+ case "prepend": {
269
+ const insertLines = newLines ?? [];
270
+ fileLines.splice(edit.lineIdx, 0, ...insertLines);
271
+ break;
272
+ }
273
+ }
274
+ }
275
+
276
+ // Write back
277
+ const output = fileLines.join("\n") + (hasTrailingNewline ? "\n" : "");
278
+ try {
279
+ await writeFile(resolved, output, "utf-8");
280
+ } catch (err) {
281
+ return `Error: Cannot write file "${resolved}": ${err instanceof Error ? err.message : String(err)}`;
282
+ }
283
+
284
+ return `Applied ${sortedEdits.length} edit(s) to ${resolved}.`;
285
+ }
286
+
287
+ // --- Tool wrapper ---
288
+
289
+ export const ocHashlineEdit = tool({
290
+ description:
291
+ "Edit files using hash-anchored line references (LINE#ID format). Validates line content hasn't changed before applying edits. Supports replace, append, and prepend operations.",
292
+ args: {
293
+ file: tool.schema.string().describe("Absolute path to the file to edit"),
294
+ edits: tool.schema
295
+ .array(
296
+ tool.schema.object({
297
+ op: tool.schema.enum(["replace", "append", "prepend"]).describe("Edit operation type"),
298
+ pos: tool.schema.string().describe("LINE#HASH anchor, e.g. '42#VK'"),
299
+ end: tool.schema
300
+ .string()
301
+ .optional()
302
+ .describe("End anchor for range replace, e.g. '48#SN'"),
303
+ lines: tool.schema
304
+ .union([
305
+ tool.schema.string(),
306
+ tool.schema.array(tool.schema.string()),
307
+ tool.schema.null(),
308
+ ])
309
+ .describe("New content (string, string[], or null to delete)"),
310
+ }),
311
+ )
312
+ .describe("Array of edit operations to apply"),
313
+ },
314
+ async execute(args) {
315
+ return hashlineEditCore(args);
316
+ },
317
+ });