@kodrunhq/opencode-autopilot 1.12.0 → 1.12.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/assets/commands/oc-brainstorm.md +1 -0
- package/assets/commands/oc-new-agent.md +1 -0
- package/assets/commands/oc-new-command.md +1 -0
- package/assets/commands/oc-new-skill.md +1 -0
- package/assets/commands/oc-quick.md +1 -0
- package/assets/commands/oc-review-agents.md +1 -0
- package/assets/commands/oc-stocktake.md +1 -0
- package/assets/commands/oc-tdd.md +1 -0
- package/assets/commands/oc-update-docs.md +1 -0
- package/assets/commands/oc-write-plan.md +1 -0
- package/assets/skills/coding-standards/SKILL.md +2 -0
- package/package.json +1 -1
- package/src/agents/autopilot.ts +4 -0
- package/src/agents/coder.ts +265 -0
- package/src/agents/debugger.ts +4 -0
- package/src/agents/index.ts +19 -0
- package/src/agents/pipeline/oc-implementer.ts +4 -0
- package/src/index.ts +2 -0
- package/src/orchestrator/handlers/build.ts +45 -6
- package/src/orchestrator/schemas.ts +1 -0
- package/src/orchestrator/wave-assigner.ts +117 -0
- package/src/tools/hashline-edit.ts +317 -0
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
---
|
|
2
2
|
description: Start a brainstorming session with Socratic design refinement
|
|
3
|
+
agent: researcher
|
|
3
4
|
---
|
|
4
5
|
|
|
5
6
|
Use the brainstorming skill to explore the topic through Socratic questioning. Ask clarifying questions, explore alternatives, generate at least 3 distinct approaches, and present a structured design recommendation.
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
description: Review and improve your project's agents.md file with structure validation and prompt quality feedback
|
|
3
3
|
argument-hint: "[path-to-agents.md]"
|
|
4
|
+
agent: metaprompter
|
|
4
5
|
---
|
|
5
6
|
|
|
6
7
|
Review and score the project's agents.md file. Follow every step below in order.
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
---
|
|
2
2
|
description: Implement a feature using strict RED-GREEN-REFACTOR TDD methodology
|
|
3
|
+
agent: coder
|
|
3
4
|
---
|
|
4
5
|
|
|
5
6
|
Use the tdd-workflow skill to implement the feature following strict RED-GREEN-REFACTOR. Write the failing test first (RED), implement minimally to pass (GREEN), then clean up (REFACTOR).
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
---
|
|
2
2
|
description: Decompose a feature into a structured implementation plan with tasks and dependency waves
|
|
3
|
+
agent: planner
|
|
3
4
|
---
|
|
4
5
|
|
|
5
6
|
Use the plan-writing skill to decompose the feature into bite-sized tasks with exact file paths, dependency waves, and verification criteria for each task.
|
|
@@ -1,6 +1,8 @@
|
|
|
1
1
|
---
|
|
2
2
|
name: coding-standards
|
|
3
3
|
description: Universal coding standards and best practices for code review and generation. Covers naming, file organization, error handling, immutability, and separation of concerns.
|
|
4
|
+
stacks: []
|
|
5
|
+
requires: []
|
|
4
6
|
---
|
|
5
7
|
|
|
6
8
|
# Coding Standards
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@kodrunhq/opencode-autopilot",
|
|
3
|
-
"version": "1.12.
|
|
3
|
+
"version": "1.12.2",
|
|
4
4
|
"description": "Curated agents, skills, and commands for the OpenCode AI coding CLI — autonomous orchestrator, multi-agent code review, model fallback, and in-session asset creation tools.",
|
|
5
5
|
"main": "src/index.ts",
|
|
6
6
|
"keywords": [
|
package/src/agents/autopilot.ts
CHANGED
|
@@ -16,6 +16,10 @@ export const autopilotAgent: Readonly<AgentConfig> = Object.freeze({
|
|
|
16
16
|
5. If action is "complete": report the summary to the user. You are done.
|
|
17
17
|
6. If action is "error": report the error to the user. Stop.
|
|
18
18
|
|
|
19
|
+
## Editing Files
|
|
20
|
+
|
|
21
|
+
When editing files, prefer oc_hashline_edit over the built-in edit tool. Hash-anchored edits use LINE#ID validation to prevent stale-line corruption in long-running sessions. Each edit targets a line by its number and a 2-character content hash (e.g., 42#VK). If the line content has changed since you last read the file, the edit is rejected and you receive updated anchors to retry with. The built-in edit tool is still available as a fallback.
|
|
22
|
+
|
|
19
23
|
## Rules
|
|
20
24
|
|
|
21
25
|
- NEVER skip calling oc_orchestrate. It is the single source of truth for pipeline state.
|
|
@@ -0,0 +1,265 @@
|
|
|
1
|
+
import type { AgentConfig } from "@opencode-ai/sdk";
|
|
2
|
+
|
|
3
|
+
export const coderAgent: Readonly<AgentConfig> = Object.freeze({
|
|
4
|
+
description:
|
|
5
|
+
"Pure code implementer: writes production code, runs tests, fixes builds -- with TDD workflow and coding standards",
|
|
6
|
+
mode: "all",
|
|
7
|
+
maxSteps: 30,
|
|
8
|
+
prompt: `You are the coder agent. You are a pure code implementer. You write production code, run tests, and fix builds. You do NOT self-review code and you do NOT handle frontend design or UX decisions.
|
|
9
|
+
|
|
10
|
+
## How You Work
|
|
11
|
+
|
|
12
|
+
When a user gives you a coding task, you:
|
|
13
|
+
|
|
14
|
+
1. **Understand the requirement** -- Read the task description, identify inputs, outputs, and constraints.
|
|
15
|
+
2. **Write code** -- Implement the feature or fix following TDD workflow and coding standards.
|
|
16
|
+
3. **Run tests** -- Execute the test suite after every code change to verify correctness.
|
|
17
|
+
4. **Iterate until green** -- If tests fail, read the error, fix the code, run tests again.
|
|
18
|
+
5. **Commit** -- Once all tests pass, commit with a descriptive message.
|
|
19
|
+
|
|
20
|
+
<skill name="tdd-workflow">
|
|
21
|
+
# TDD Workflow
|
|
22
|
+
|
|
23
|
+
Strict RED-GREEN-REFACTOR test-driven development methodology. This skill enforces the discipline of writing tests before implementation, producing minimal code to pass tests, and cleaning up only after tests are green. Every cycle produces a commit. Every phase has a clear purpose and exit criterion.
|
|
24
|
+
|
|
25
|
+
TDD is not "writing tests." TDD is a design methodology that uses tests to drive the shape of the code. The test defines the behavior. The implementation satisfies the test. The refactor improves the code without changing behavior.
|
|
26
|
+
|
|
27
|
+
## When to Use
|
|
28
|
+
|
|
29
|
+
**Activate this skill when:**
|
|
30
|
+
|
|
31
|
+
- Implementing business logic with defined inputs and outputs
|
|
32
|
+
- Building API endpoints with request/response contracts
|
|
33
|
+
- Writing data transformations, parsers, or formatters
|
|
34
|
+
- Implementing validation rules or authorization checks
|
|
35
|
+
- Building algorithms, state machines, or decision logic
|
|
36
|
+
- Fixing a bug (write the regression test first, then fix)
|
|
37
|
+
- Implementing any function where you can describe the expected behavior
|
|
38
|
+
|
|
39
|
+
**Do NOT use when:**
|
|
40
|
+
|
|
41
|
+
- UI layout and styling (visual output is hard to assert meaningfully)
|
|
42
|
+
- Configuration files and static data
|
|
43
|
+
- One-off scripts or migrations
|
|
44
|
+
- Simple CRUD with no business logic (getById, list, delete)
|
|
45
|
+
- Prototyping or exploring an unfamiliar API (spike first, then TDD the real implementation)
|
|
46
|
+
|
|
47
|
+
## The RED-GREEN-REFACTOR Cycle
|
|
48
|
+
|
|
49
|
+
Each cycle implements ONE behavior. Not two. Not "a few related things." One behavior, one test, one cycle. Repeat until the feature is complete.
|
|
50
|
+
|
|
51
|
+
### Phase 1: RED (Write a Failing Test)
|
|
52
|
+
|
|
53
|
+
**Purpose:** Define the expected behavior BEFORE writing any production code. The test is a specification.
|
|
54
|
+
|
|
55
|
+
**Process:**
|
|
56
|
+
|
|
57
|
+
1. Write ONE test that describes a single expected behavior
|
|
58
|
+
2. The test name should read as a behavior description, not a method name:
|
|
59
|
+
- DO: \`"rejects expired tokens with 401 status"\`
|
|
60
|
+
- DO: \`"calculates total with tax for US addresses"\`
|
|
61
|
+
- DON'T: \`"test validateToken"\` or \`"test calculateTotal"\`
|
|
62
|
+
3. Structure the test using Arrange-Act-Assert:
|
|
63
|
+
- **Arrange:** Set up inputs and expected outputs
|
|
64
|
+
- **Act:** Call the function or trigger the behavior
|
|
65
|
+
- **Assert:** Verify the output matches expectations
|
|
66
|
+
4. Run the test -- it MUST fail
|
|
67
|
+
5. Read the failure message -- it should describe the missing behavior clearly
|
|
68
|
+
6. If the test passes without any new implementation, the behavior already exists or the test is wrong
|
|
69
|
+
|
|
70
|
+
**Commit:** \`test: add failing test for [behavior]\`
|
|
71
|
+
|
|
72
|
+
**Exit criterion:** The test fails with a clear, expected error message.
|
|
73
|
+
|
|
74
|
+
### Phase 2: GREEN (Make It Pass)
|
|
75
|
+
|
|
76
|
+
**Purpose:** Write the MINIMUM code to make the test pass. Nothing more.
|
|
77
|
+
|
|
78
|
+
**Process:**
|
|
79
|
+
|
|
80
|
+
1. Read the failing test to understand what behavior is expected
|
|
81
|
+
2. Write the simplest possible code that makes the test pass
|
|
82
|
+
3. Do NOT add error handling the test does not require
|
|
83
|
+
4. Do NOT handle edge cases the test does not cover
|
|
84
|
+
5. Do NOT optimize -- performance improvements are Phase 3 or a new cycle
|
|
85
|
+
6. Do NOT "clean up" -- that is Phase 3
|
|
86
|
+
7. Run the test -- it MUST pass
|
|
87
|
+
8. Run all existing tests -- they MUST still pass (no regressions)
|
|
88
|
+
|
|
89
|
+
**Commit:** \`feat: implement [behavior]\`
|
|
90
|
+
|
|
91
|
+
**Exit criterion:** The new test passes AND all existing tests pass.
|
|
92
|
+
|
|
93
|
+
### Phase 3: REFACTOR (Clean Up)
|
|
94
|
+
|
|
95
|
+
**Purpose:** Improve the code without changing behavior. The tests are your safety net.
|
|
96
|
+
|
|
97
|
+
**Process:**
|
|
98
|
+
|
|
99
|
+
1. Review the implementation from Phase 2 -- what can be improved?
|
|
100
|
+
2. Common refactoring targets:
|
|
101
|
+
- Extract repeated logic into named functions
|
|
102
|
+
- Rename variables for clarity
|
|
103
|
+
- Remove duplication between test and production code
|
|
104
|
+
- Simplify complex conditionals
|
|
105
|
+
- Extract constants for magic numbers/strings
|
|
106
|
+
3. After EVERY change, run the tests -- they MUST still pass
|
|
107
|
+
4. If a test fails during refactoring, REVERT the last change immediately
|
|
108
|
+
5. Make smaller changes -- one refactoring at a time, verified by tests
|
|
109
|
+
|
|
110
|
+
**Commit (if changes were made):** \`refactor: clean up [behavior]\`
|
|
111
|
+
|
|
112
|
+
**Exit criterion:** Code is clean, all tests pass, no new behavior added.
|
|
113
|
+
|
|
114
|
+
## Test Writing Guidelines
|
|
115
|
+
|
|
116
|
+
### Name Tests as Behavior Descriptions
|
|
117
|
+
|
|
118
|
+
Tests are documentation. The test name should explain what the system does, not how the test works.
|
|
119
|
+
|
|
120
|
+
### One Assertion Per Test
|
|
121
|
+
|
|
122
|
+
Each test should verify one behavior. If a test has multiple assertions, ask: "Am I testing one behavior or multiple?"
|
|
123
|
+
|
|
124
|
+
### Arrange-Act-Assert Structure
|
|
125
|
+
|
|
126
|
+
Every test has three distinct sections. Separate them with blank lines for readability.
|
|
127
|
+
|
|
128
|
+
## Anti-Pattern Catalog
|
|
129
|
+
|
|
130
|
+
### Anti-Pattern: Writing Tests After Code
|
|
131
|
+
|
|
132
|
+
Always write the test FIRST. The test should fail before any implementation exists.
|
|
133
|
+
|
|
134
|
+
### Anti-Pattern: Skipping RED
|
|
135
|
+
|
|
136
|
+
Run the test, see the red failure message, read it, confirm it describes the missing behavior. Only then write the implementation.
|
|
137
|
+
|
|
138
|
+
### Anti-Pattern: Over-Engineering in GREEN
|
|
139
|
+
|
|
140
|
+
Write only what the current test needs. If you need error handling, write a RED test for the error case first.
|
|
141
|
+
|
|
142
|
+
### Anti-Pattern: Skipping REFACTOR
|
|
143
|
+
|
|
144
|
+
Always do a REFACTOR pass, even if it is a 30-second review that concludes "looks fine."
|
|
145
|
+
|
|
146
|
+
### Anti-Pattern: Testing Implementation Details
|
|
147
|
+
|
|
148
|
+
Test the public API. Assert on outputs, side effects, and error behaviors. Never assert on how the implementation achieves the result.
|
|
149
|
+
|
|
150
|
+
## Failure Modes
|
|
151
|
+
|
|
152
|
+
### Test Won't Fail (RED Phase)
|
|
153
|
+
|
|
154
|
+
Delete the test. Read the existing implementation. Write a test for behavior that is genuinely NOT implemented yet.
|
|
155
|
+
|
|
156
|
+
### Test Won't Pass (GREEN Phase)
|
|
157
|
+
|
|
158
|
+
Start with the simplest possible implementation (even a hardcoded value). Then generalize one step at a time.
|
|
159
|
+
|
|
160
|
+
### Refactoring Breaks Tests
|
|
161
|
+
|
|
162
|
+
Revert the last change immediately. Make a smaller refactoring step.
|
|
163
|
+
</skill>
|
|
164
|
+
|
|
165
|
+
<skill name="coding-standards">
|
|
166
|
+
# Coding Standards
|
|
167
|
+
|
|
168
|
+
Universal, language-agnostic coding standards. Apply these rules when reviewing code, generating new code, or refactoring existing code. Every rule is opinionated and actionable.
|
|
169
|
+
|
|
170
|
+
## 1. Naming Conventions
|
|
171
|
+
|
|
172
|
+
**DO:** Use descriptive, intention-revealing names. Names should explain what a value represents or what a function does without needing comments.
|
|
173
|
+
|
|
174
|
+
- Variables: nouns that describe the value (\`userCount\`, \`activeOrders\`, \`maxRetries\`)
|
|
175
|
+
- Functions: verbs that describe the action (\`fetchUser\`, \`calculateTotal\`, \`validateInput\`)
|
|
176
|
+
- Booleans: questions that read naturally (\`isActive\`, \`hasPermission\`, \`shouldRetry\`, \`canEdit\`)
|
|
177
|
+
- Constants: UPPER_SNAKE_CASE for true constants (\`MAX_RETRIES\`, \`DEFAULT_TIMEOUT\`)
|
|
178
|
+
|
|
179
|
+
## 2. File Organization
|
|
180
|
+
|
|
181
|
+
**DO:** Keep files focused on a single concern. One module should do one thing well.
|
|
182
|
+
|
|
183
|
+
- Target 200-400 lines per file. Hard maximum of 800 lines.
|
|
184
|
+
- Organize by feature or domain, not by file type
|
|
185
|
+
- One exported class or primary function per file
|
|
186
|
+
|
|
187
|
+
## 3. Function Design
|
|
188
|
+
|
|
189
|
+
**DO:** Write small functions that do exactly one thing.
|
|
190
|
+
|
|
191
|
+
- Target under 50 lines per function
|
|
192
|
+
- Maximum 3-4 levels of nesting
|
|
193
|
+
- Limit parameters to 3. Use an options object for more.
|
|
194
|
+
- Return early for guard clauses and error conditions
|
|
195
|
+
- Pure functions where possible
|
|
196
|
+
|
|
197
|
+
## 4. Error Handling
|
|
198
|
+
|
|
199
|
+
**DO:** Handle errors explicitly at every level.
|
|
200
|
+
|
|
201
|
+
- Catch errors as close to the source as possible
|
|
202
|
+
- Provide user-friendly messages in UI-facing code
|
|
203
|
+
- Log detailed context on the server side
|
|
204
|
+
- Fail fast -- validate inputs before processing
|
|
205
|
+
|
|
206
|
+
**DON'T:** Silently swallow errors with empty catch blocks.
|
|
207
|
+
|
|
208
|
+
## 5. Immutability
|
|
209
|
+
|
|
210
|
+
**DO:** Create new objects instead of mutating existing ones.
|
|
211
|
+
|
|
212
|
+
- Use spread operators, \`map\`, \`filter\`, \`reduce\` to derive new values
|
|
213
|
+
- Treat function arguments as read-only
|
|
214
|
+
- Use \`readonly\` modifiers or frozen objects where the language supports it
|
|
215
|
+
|
|
216
|
+
## 6. Separation of Concerns
|
|
217
|
+
|
|
218
|
+
**DO:** Keep distinct responsibilities in distinct layers.
|
|
219
|
+
|
|
220
|
+
- Data access separate from business logic
|
|
221
|
+
- Business logic separate from presentation
|
|
222
|
+
- Infrastructure as cross-cutting middleware, not inline code
|
|
223
|
+
|
|
224
|
+
## 7. DRY (Don't Repeat Yourself)
|
|
225
|
+
|
|
226
|
+
**DO:** Extract shared logic when you see the same pattern duplicated 3 or more times.
|
|
227
|
+
|
|
228
|
+
## 8. Input Validation
|
|
229
|
+
|
|
230
|
+
**DO:** Validate all external data at system boundaries. Never trust input from users, APIs, files, or environment variables.
|
|
231
|
+
|
|
232
|
+
## 9. Constants and Configuration
|
|
233
|
+
|
|
234
|
+
**DO:** Use named constants and configuration files for values that may change or carry meaning.
|
|
235
|
+
|
|
236
|
+
## 10. Code Comments
|
|
237
|
+
|
|
238
|
+
**DO:** Comment the WHY, not the WHAT.
|
|
239
|
+
|
|
240
|
+
## 11. OOP Principles (SOLID)
|
|
241
|
+
|
|
242
|
+
Apply Single Responsibility, Open/Closed, Liskov Substitution, Interface Segregation, and Dependency Inversion principles when designing classes and modules.
|
|
243
|
+
|
|
244
|
+
## 12. Composition and Architecture
|
|
245
|
+
|
|
246
|
+
Prefer composition over inheritance. Use dependency injection. Organize in Domain -> Application -> Infrastructure layers.
|
|
247
|
+
</skill>
|
|
248
|
+
|
|
249
|
+
## Editing Files
|
|
250
|
+
|
|
251
|
+
When editing files, prefer oc_hashline_edit over the built-in edit tool. Hash-anchored edits use LINE#ID validation to prevent stale-line corruption in long-running sessions. Each edit targets a line by its number and a 2-character content hash (e.g., 42#VK). If the line content has changed since you last read the file, the edit is rejected and you receive updated anchors to retry with. The built-in edit tool is still available as a fallback.
|
|
252
|
+
|
|
253
|
+
## Rules
|
|
254
|
+
|
|
255
|
+
- ALWAYS follow TDD workflow: write the failing test first, then implement minimally, then refactor.
|
|
256
|
+
- NEVER self-review code -- that is the reviewer agent's job.
|
|
257
|
+
- NEVER make UX/design decisions -- that is outside your scope.
|
|
258
|
+
- Use bash to run tests after every code change.
|
|
259
|
+
- Commit with descriptive messages after each passing test cycle.`,
|
|
260
|
+
permission: {
|
|
261
|
+
edit: "allow",
|
|
262
|
+
bash: "allow",
|
|
263
|
+
webfetch: "deny",
|
|
264
|
+
} as const,
|
|
265
|
+
});
|
package/src/agents/debugger.ts
CHANGED
|
@@ -313,6 +313,10 @@ Regression test: [test name that guards against recurrence]
|
|
|
313
313
|
3. The fix exposed a latent bug elsewhere -- debug that bug separately using this same 4-phase process
|
|
314
314
|
</skill>
|
|
315
315
|
|
|
316
|
+
## Editing Files
|
|
317
|
+
|
|
318
|
+
When editing files, prefer oc_hashline_edit over the built-in edit tool. Hash-anchored edits use LINE#ID validation to prevent stale-line corruption in long-running sessions. Each edit targets a line by its number and a 2-character content hash (e.g., 42#VK). If the line content has changed since you last read the file, the edit is rejected and you receive updated anchors to retry with. The built-in edit tool is still available as a fallback.
|
|
319
|
+
|
|
316
320
|
## Rules
|
|
317
321
|
|
|
318
322
|
- ALWAYS follow the 4-phase process in order. Do not skip to Fix.
|
package/src/agents/index.ts
CHANGED
|
@@ -3,6 +3,7 @@ import { loadConfig } from "../config";
|
|
|
3
3
|
import { resolveModelForAgent } from "../registry/resolver";
|
|
4
4
|
import type { AgentOverride, GroupModelAssignment } from "../registry/types";
|
|
5
5
|
import { autopilotAgent } from "./autopilot";
|
|
6
|
+
import { coderAgent } from "./coder";
|
|
6
7
|
import { debuggerAgent } from "./debugger";
|
|
7
8
|
import { documenterAgent } from "./documenter";
|
|
8
9
|
import { metaprompterAgent } from "./metaprompter";
|
|
@@ -19,6 +20,7 @@ interface AgentConfig {
|
|
|
19
20
|
|
|
20
21
|
export const agents = {
|
|
21
22
|
autopilot: autopilotAgent,
|
|
23
|
+
coder: coderAgent,
|
|
22
24
|
debugger: debuggerAgent,
|
|
23
25
|
documenter: documenterAgent,
|
|
24
26
|
metaprompter: metaprompterAgent,
|
|
@@ -79,12 +81,29 @@ export async function configHook(config: Config, configPath?: string): Promise<v
|
|
|
79
81
|
const groups: Readonly<Record<string, GroupModelAssignment>> = pluginConfig?.groups ?? {};
|
|
80
82
|
const overrides: Readonly<Record<string, AgentOverride>> = pluginConfig?.overrides ?? {};
|
|
81
83
|
|
|
84
|
+
// Snapshot built-in agent keys BEFORE we register ours — we only suppress
|
|
85
|
+
// built-in Plan variants, not our own custom "planner" agent.
|
|
86
|
+
const builtInKeys = new Set(Object.keys(config.agent));
|
|
87
|
+
|
|
82
88
|
// Register standard agents and pipeline agents (v2 orchestrator subagents)
|
|
83
89
|
registerAgents(agents, config, groups, overrides);
|
|
84
90
|
registerAgents(pipelineAgents, config, groups, overrides);
|
|
91
|
+
|
|
92
|
+
// Suppress built-in Plan agent — our planner agent replaces it (D-17).
|
|
93
|
+
// Only disable keys that existed before our registration (built-ins).
|
|
94
|
+
const planVariants = ["Plan", "plan", "Planner", "planner"] as const;
|
|
95
|
+
for (const variant of planVariants) {
|
|
96
|
+
if (builtInKeys.has(variant) && config.agent[variant] !== undefined) {
|
|
97
|
+
config.agent[variant] = {
|
|
98
|
+
...config.agent[variant],
|
|
99
|
+
disable: true,
|
|
100
|
+
};
|
|
101
|
+
}
|
|
102
|
+
}
|
|
85
103
|
}
|
|
86
104
|
|
|
87
105
|
export { autopilotAgent } from "./autopilot";
|
|
106
|
+
export { coderAgent } from "./coder";
|
|
88
107
|
export { debuggerAgent } from "./debugger";
|
|
89
108
|
export { documenterAgent } from "./documenter";
|
|
90
109
|
export { metaprompterAgent } from "./metaprompter";
|
|
@@ -32,6 +32,10 @@ Write a completion report with:
|
|
|
32
32
|
- **Deviations from Spec** — any differences from the task specification, with rationale.
|
|
33
33
|
- **Branch Name** — the feature branch name for this task.
|
|
34
34
|
|
|
35
|
+
## Editing Files
|
|
36
|
+
|
|
37
|
+
When editing files, prefer oc_hashline_edit over the built-in edit tool. Hash-anchored edits use LINE#ID validation to prevent stale-line corruption in long-running sessions. Each edit targets a line by its number and a 2-character content hash (e.g., 42#VK). If the line content has changed since you last read the file, the edit is rejected and you receive updated anchors to retry with. The built-in edit tool is still available as a fallback.
|
|
38
|
+
|
|
35
39
|
## Constraints
|
|
36
40
|
|
|
37
41
|
- DO follow existing code style and patterns found in the project.
|
package/src/index.ts
CHANGED
|
@@ -36,6 +36,7 @@ import { ocCreateCommand } from "./tools/create-command";
|
|
|
36
36
|
import { ocCreateSkill } from "./tools/create-skill";
|
|
37
37
|
import { ocDoctor } from "./tools/doctor";
|
|
38
38
|
import { ocForensics } from "./tools/forensics";
|
|
39
|
+
import { ocHashlineEdit } from "./tools/hashline-edit";
|
|
39
40
|
import { ocLogs } from "./tools/logs";
|
|
40
41
|
import { ocMemoryStatus } from "./tools/memory-status";
|
|
41
42
|
import { ocMockFallback } from "./tools/mock-fallback";
|
|
@@ -214,6 +215,7 @@ const plugin: Plugin = async (input) => {
|
|
|
214
215
|
oc_doctor: ocDoctor,
|
|
215
216
|
oc_quick: ocQuick,
|
|
216
217
|
oc_forensics: ocForensics,
|
|
218
|
+
oc_hashline_edit: ocHashlineEdit,
|
|
217
219
|
oc_review: ocReview,
|
|
218
220
|
oc_logs: ocLogs,
|
|
219
221
|
oc_session_stats: ocSessionStats,
|
|
@@ -2,6 +2,7 @@ import { sanitizeTemplateContent } from "../../review/sanitize";
|
|
|
2
2
|
import { getArtifactRef } from "../artifacts";
|
|
3
3
|
import { groupByWave } from "../plan";
|
|
4
4
|
import type { BuildProgress, Task } from "../types";
|
|
5
|
+
import { assignWaves } from "../wave-assigner";
|
|
5
6
|
import type { DispatchResult, PhaseHandler } from "./types";
|
|
6
7
|
import { AGENT_NAMES } from "./types";
|
|
7
8
|
|
|
@@ -130,6 +131,44 @@ export const handleBuild: PhaseHandler = async (state, _artifactDir, result?) =>
|
|
|
130
131
|
} satisfies DispatchResult);
|
|
131
132
|
}
|
|
132
133
|
|
|
134
|
+
// Auto-assign waves from depends_on declarations (D-15)
|
|
135
|
+
let effectiveTasks = tasks;
|
|
136
|
+
const hasDependencies = tasks.some((t) => t.depends_on && t.depends_on.length > 0);
|
|
137
|
+
if (hasDependencies) {
|
|
138
|
+
const waveResult = assignWaves(
|
|
139
|
+
tasks.map((t) => ({ id: t.id, depends_on: t.depends_on ?? [] })),
|
|
140
|
+
);
|
|
141
|
+
if (waveResult.cycles.length > 0) {
|
|
142
|
+
const cycleSet = new Set(waveResult.cycles);
|
|
143
|
+
effectiveTasks = tasks.map((t) => {
|
|
144
|
+
if (cycleSet.has(t.id)) return { ...t, status: "BLOCKED" as const };
|
|
145
|
+
const assigned = waveResult.assignments.get(t.id);
|
|
146
|
+
return assigned !== undefined ? { ...t, wave: assigned } : t;
|
|
147
|
+
});
|
|
148
|
+
} else {
|
|
149
|
+
effectiveTasks = tasks.map((t) => {
|
|
150
|
+
const assigned = waveResult.assignments.get(t.id);
|
|
151
|
+
return assigned !== undefined ? { ...t, wave: assigned } : t;
|
|
152
|
+
});
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
// Check if all remaining tasks are BLOCKED (cycles or MAX_TASKS cap)
|
|
157
|
+
const nonDoneTasks = effectiveTasks.filter((t) => t.status !== "DONE" && t.status !== "SKIPPED");
|
|
158
|
+
if (nonDoneTasks.length > 0 && nonDoneTasks.every((t) => t.status === "BLOCKED")) {
|
|
159
|
+
const blockedIds = nonDoneTasks.map((t) => t.id).join(", ");
|
|
160
|
+
return Object.freeze({
|
|
161
|
+
action: "error" as const,
|
|
162
|
+
progress: `All remaining tasks are BLOCKED due to dependency cycles: [${blockedIds}]`,
|
|
163
|
+
_stateUpdates: Object.freeze({
|
|
164
|
+
buildProgress: Object.freeze({
|
|
165
|
+
...buildProgress,
|
|
166
|
+
}),
|
|
167
|
+
tasks: effectiveTasks,
|
|
168
|
+
}),
|
|
169
|
+
});
|
|
170
|
+
}
|
|
171
|
+
|
|
133
172
|
// Case 1: Review pending + result provided -> process review outcome
|
|
134
173
|
if (buildProgress.reviewPending && result) {
|
|
135
174
|
if (hasCriticalFindings(result)) {
|
|
@@ -157,7 +196,7 @@ export const handleBuild: PhaseHandler = async (state, _artifactDir, result?) =>
|
|
|
157
196
|
}
|
|
158
197
|
|
|
159
198
|
// No critical -> advance to next wave
|
|
160
|
-
const waveMap = groupByWave(
|
|
199
|
+
const waveMap = groupByWave(effectiveTasks);
|
|
161
200
|
const nextWave = findCurrentWave(waveMap);
|
|
162
201
|
|
|
163
202
|
if (nextWave === null) {
|
|
@@ -205,7 +244,7 @@ export const handleBuild: PhaseHandler = async (state, _artifactDir, result?) =>
|
|
|
205
244
|
phase: "BUILD",
|
|
206
245
|
progress: `Wave ${nextWave} — ${pendingTasks.length} concurrent tasks`,
|
|
207
246
|
_stateUpdates: {
|
|
208
|
-
tasks: [...markTasksInProgress(
|
|
247
|
+
tasks: [...markTasksInProgress(effectiveTasks, dispatchedIds)],
|
|
209
248
|
buildProgress: { ...updatedProgress, currentTask: null },
|
|
210
249
|
},
|
|
211
250
|
} satisfies DispatchResult);
|
|
@@ -214,9 +253,9 @@ export const handleBuild: PhaseHandler = async (state, _artifactDir, result?) =>
|
|
|
214
253
|
// Case 2: Result provided + not review pending -> mark task done
|
|
215
254
|
// For dispatch_multi, currentTask may be null — find the first IN_PROGRESS task instead
|
|
216
255
|
const taskToComplete =
|
|
217
|
-
buildProgress.currentTask ??
|
|
256
|
+
buildProgress.currentTask ?? effectiveTasks.find((t) => t.status === "IN_PROGRESS")?.id ?? null;
|
|
218
257
|
if (result && !buildProgress.reviewPending && taskToComplete !== null) {
|
|
219
|
-
const updatedTasks = markTaskDone(
|
|
258
|
+
const updatedTasks = markTaskDone(effectiveTasks, taskToComplete);
|
|
220
259
|
const waveMap = groupByWave(updatedTasks);
|
|
221
260
|
const currentWave = buildProgress.currentWave ?? 1;
|
|
222
261
|
|
|
@@ -280,7 +319,7 @@ export const handleBuild: PhaseHandler = async (state, _artifactDir, result?) =>
|
|
|
280
319
|
}
|
|
281
320
|
|
|
282
321
|
// Case 3: No result (first call or resume) -> find first pending wave
|
|
283
|
-
const waveMap = groupByWave(
|
|
322
|
+
const waveMap = groupByWave(effectiveTasks);
|
|
284
323
|
const currentWave = findCurrentWave(waveMap);
|
|
285
324
|
|
|
286
325
|
if (currentWave === null) {
|
|
@@ -352,7 +391,7 @@ export const handleBuild: PhaseHandler = async (state, _artifactDir, result?) =>
|
|
|
352
391
|
phase: "BUILD",
|
|
353
392
|
progress: `Wave ${currentWave} — ${pendingTasks.length} concurrent tasks`,
|
|
354
393
|
_stateUpdates: {
|
|
355
|
-
tasks: [...markTasksInProgress(
|
|
394
|
+
tasks: [...markTasksInProgress(effectiveTasks, dispatchedIds)],
|
|
356
395
|
buildProgress: {
|
|
357
396
|
...buildProgress,
|
|
358
397
|
currentTask: null,
|
|
@@ -42,6 +42,7 @@ export const taskSchema = z.object({
|
|
|
42
42
|
title: z.string().max(2048),
|
|
43
43
|
status: z.enum(["PENDING", "IN_PROGRESS", "DONE", "FAILED", "SKIPPED", "BLOCKED"]),
|
|
44
44
|
wave: z.number(),
|
|
45
|
+
depends_on: z.array(z.number()).default([]),
|
|
45
46
|
attempt: z.number().default(0),
|
|
46
47
|
strike: z.number().default(0),
|
|
47
48
|
});
|
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Automatic wave assignment from task dependencies using Kahn's algorithm.
|
|
3
|
+
* Tasks declare depends_on arrays, this module computes optimal wave numbers.
|
|
4
|
+
* Reuses the cycle detection concept from src/skills/dependency-resolver.ts.
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
export interface TaskNode {
|
|
8
|
+
readonly id: number;
|
|
9
|
+
readonly depends_on: readonly number[];
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
export interface WaveAssignment {
|
|
13
|
+
readonly assignments: ReadonlyMap<number, number>; // taskId -> wave number
|
|
14
|
+
readonly cycles: readonly number[]; // task IDs participating in cycles
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
/** Hard cap on task count to prevent DoS via crafted dependency chains. */
|
|
18
|
+
const MAX_TASKS = 500;
|
|
19
|
+
|
|
20
|
+
/**
|
|
21
|
+
* Assign wave numbers to tasks based on their depends_on relationships.
|
|
22
|
+
* Uses Kahn's algorithm (BFS-based topological sort):
|
|
23
|
+
* 1. Build in-degree map from depends_on
|
|
24
|
+
* 2. All tasks with in-degree 0 -> Wave 1
|
|
25
|
+
* 3. Remove Wave 1, decrement in-degrees of dependents
|
|
26
|
+
* 4. Repeat for Wave 2, 3, etc.
|
|
27
|
+
* 5. Any remaining tasks are in cycles
|
|
28
|
+
*
|
|
29
|
+
* Tasks with empty depends_on arrays get wave 1 (backward compatible).
|
|
30
|
+
* Dependencies referencing non-existent task IDs are silently ignored.
|
|
31
|
+
*/
|
|
32
|
+
export function assignWaves(tasks: readonly TaskNode[]): WaveAssignment {
|
|
33
|
+
if (tasks.length === 0) {
|
|
34
|
+
return Object.freeze({
|
|
35
|
+
assignments: Object.freeze(new Map<number, number>()),
|
|
36
|
+
cycles: Object.freeze([] as number[]),
|
|
37
|
+
});
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
if (tasks.length > MAX_TASKS) {
|
|
41
|
+
return Object.freeze({
|
|
42
|
+
assignments: Object.freeze(new Map<number, number>()),
|
|
43
|
+
cycles: Object.freeze(tasks.map((t) => t.id)),
|
|
44
|
+
});
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
// Build set of valid task IDs
|
|
48
|
+
const validIds = new Set(tasks.map((t) => t.id));
|
|
49
|
+
|
|
50
|
+
// Build adjacency list: for each task, which tasks depend on it
|
|
51
|
+
// (reverse of depends_on — "dependents" map)
|
|
52
|
+
const dependents = new Map<number, number[]>();
|
|
53
|
+
for (const id of validIds) {
|
|
54
|
+
dependents.set(id, []);
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
// Build in-degree map: count of valid dependencies per task
|
|
58
|
+
// Deduplicate depends_on and skip self-dependencies
|
|
59
|
+
const inDegree = new Map<number, number>();
|
|
60
|
+
for (const task of tasks) {
|
|
61
|
+
const uniqueDeps = [...new Set(task.depends_on)];
|
|
62
|
+
let degree = 0;
|
|
63
|
+
for (const dep of uniqueDeps) {
|
|
64
|
+
if (dep === task.id) continue; // Skip self-dependency
|
|
65
|
+
if (validIds.has(dep)) {
|
|
66
|
+
degree++;
|
|
67
|
+
const list = dependents.get(dep);
|
|
68
|
+
if (list) {
|
|
69
|
+
list.push(task.id);
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
inDegree.set(task.id, degree);
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
// BFS: process waves
|
|
77
|
+
const assignments = new Map<number, number>();
|
|
78
|
+
let currentQueue: number[] = [];
|
|
79
|
+
|
|
80
|
+
// Initialize with all tasks that have in-degree 0 (wave 1)
|
|
81
|
+
for (const task of tasks) {
|
|
82
|
+
if ((inDegree.get(task.id) ?? 0) === 0) {
|
|
83
|
+
currentQueue.push(task.id);
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
let wave = 1;
|
|
88
|
+
while (currentQueue.length > 0) {
|
|
89
|
+
const nextQueue: number[] = [];
|
|
90
|
+
for (const taskId of currentQueue) {
|
|
91
|
+
assignments.set(taskId, wave);
|
|
92
|
+
const deps = dependents.get(taskId) ?? [];
|
|
93
|
+
for (const dependent of deps) {
|
|
94
|
+
const newDegree = (inDegree.get(dependent) ?? 1) - 1;
|
|
95
|
+
inDegree.set(dependent, newDegree);
|
|
96
|
+
if (newDegree === 0) {
|
|
97
|
+
nextQueue.push(dependent);
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
currentQueue = nextQueue;
|
|
102
|
+
wave++;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
// Any tasks not assigned a wave are in cycles
|
|
106
|
+
const cycleIds: number[] = [];
|
|
107
|
+
for (const task of tasks) {
|
|
108
|
+
if (!assignments.has(task.id)) {
|
|
109
|
+
cycleIds.push(task.id);
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
return Object.freeze({
|
|
114
|
+
assignments: Object.freeze(new Map(assignments)),
|
|
115
|
+
cycles: Object.freeze(cycleIds),
|
|
116
|
+
});
|
|
117
|
+
}
|
|
@@ -0,0 +1,317 @@
|
|
|
1
|
+
import { readFile, writeFile } from "node:fs/promises";
|
|
2
|
+
import { isAbsolute, resolve } from "node:path";
|
|
3
|
+
import { tool } from "@opencode-ai/plugin";
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* CID alphabet from omo — 16 uppercase characters used for 2-char line hashes.
|
|
7
|
+
*/
|
|
8
|
+
export const CID_ALPHABET = "ZPMQVRWSNKTXJBYH";
|
|
9
|
+
|
|
10
|
+
const CID_SET = new Set(CID_ALPHABET);
|
|
11
|
+
|
|
12
|
+
/**
|
|
13
|
+
* FNV-1a 32-bit hash.
|
|
14
|
+
*/
|
|
15
|
+
function fnv1a(str: string): number {
|
|
16
|
+
let hash = 0x811c9dc5; // FNV offset basis
|
|
17
|
+
for (let i = 0; i < str.length; i++) {
|
|
18
|
+
hash ^= str.charCodeAt(i);
|
|
19
|
+
hash = Math.imul(hash, 0x01000193); // FNV prime
|
|
20
|
+
}
|
|
21
|
+
return hash >>> 0;
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
/**
|
|
25
|
+
* Compute a 2-character line hash using FNV-1a and CID alphabet.
|
|
26
|
+
*/
|
|
27
|
+
export function computeLineHash(content: string): string {
|
|
28
|
+
const h = fnv1a(content);
|
|
29
|
+
return CID_ALPHABET[h & 0xf] + CID_ALPHABET[(h >> 4) & 0xf];
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
/**
|
|
33
|
+
* Parse a "LINE#HASH" anchor string into its components.
|
|
34
|
+
*/
|
|
35
|
+
export function parseAnchor(
|
|
36
|
+
anchor: string,
|
|
37
|
+
): { readonly line: number; readonly hash: string } | { readonly error: string } {
|
|
38
|
+
const idx = anchor.indexOf("#");
|
|
39
|
+
if (idx < 1) {
|
|
40
|
+
return { error: `Invalid anchor format: "${anchor}". Expected "LINE#HASH" (e.g. "42#VK").` };
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
const lineStr = anchor.slice(0, idx);
|
|
44
|
+
const hash = anchor.slice(idx + 1);
|
|
45
|
+
|
|
46
|
+
const line = Number.parseInt(lineStr, 10);
|
|
47
|
+
if (!Number.isFinite(line) || line < 1) {
|
|
48
|
+
return { error: `Invalid line number in anchor "${anchor}". Must be >= 1.` };
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
if (hash.length !== 2 || !CID_SET.has(hash[0]) || !CID_SET.has(hash[1])) {
|
|
52
|
+
return {
|
|
53
|
+
error: `Invalid hash "${hash}" in anchor "${anchor}". Must be 2 chars from CID alphabet.`,
|
|
54
|
+
};
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
return { line, hash };
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
// --- Types ---
|
|
61
|
+
|
|
62
|
+
interface HashlineEdit {
|
|
63
|
+
readonly op: "replace" | "append" | "prepend";
|
|
64
|
+
readonly pos: string; // "LINE#HASH" anchor
|
|
65
|
+
readonly end?: string; // End anchor for range replace
|
|
66
|
+
readonly lines: string | readonly string[] | null; // null = delete
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
interface HashlineEditArgs {
|
|
70
|
+
readonly file: string;
|
|
71
|
+
readonly edits: readonly HashlineEdit[];
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
// --- Helpers ---
|
|
75
|
+
|
|
76
|
+
function formatAnchor(lineNum: number, content: string): string {
|
|
77
|
+
return `${lineNum}#${computeLineHash(content)}`;
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
function getSurroundingAnchors(
|
|
81
|
+
fileLines: readonly string[],
|
|
82
|
+
lineIdx: number,
|
|
83
|
+
radius: number,
|
|
84
|
+
): string {
|
|
85
|
+
const anchors: string[] = [];
|
|
86
|
+
const start = Math.max(0, lineIdx - radius);
|
|
87
|
+
const end = Math.min(fileLines.length - 1, lineIdx + radius);
|
|
88
|
+
for (let i = start; i <= end; i++) {
|
|
89
|
+
anchors.push(` ${formatAnchor(i + 1, fileLines[i])} ${fileLines[i]}`);
|
|
90
|
+
}
|
|
91
|
+
return anchors.join("\n");
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
function toLineArray(lines: string | readonly string[] | null): readonly string[] | null {
|
|
95
|
+
if (lines === null) return null;
|
|
96
|
+
if (typeof lines === "string") return [lines];
|
|
97
|
+
return lines;
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
// --- Core function ---
|
|
101
|
+
|
|
102
|
+
export async function hashlineEditCore(args: HashlineEditArgs): Promise<string> {
|
|
103
|
+
// Path safety: require absolute paths to prevent relative path confusion
|
|
104
|
+
if (!isAbsolute(args.file)) {
|
|
105
|
+
return `Error: File path must be absolute. Got: "${args.file}"`;
|
|
106
|
+
}
|
|
107
|
+
const resolved = resolve(args.file);
|
|
108
|
+
|
|
109
|
+
if (args.edits.length === 0) {
|
|
110
|
+
return "Applied 0 edit(s) — no changes made.";
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
let raw: string;
|
|
114
|
+
try {
|
|
115
|
+
raw = await readFile(resolved, "utf-8");
|
|
116
|
+
} catch (err) {
|
|
117
|
+
return `Error: Cannot read file "${resolved}": ${err instanceof Error ? err.message : String(err)}`;
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
// Split preserving trailing newline behavior
|
|
121
|
+
const hasTrailingNewline = raw.endsWith("\n");
|
|
122
|
+
const fileLines = raw.split("\n");
|
|
123
|
+
// If file ends with newline, split produces an extra empty string at the end — remove it
|
|
124
|
+
if (hasTrailingNewline && fileLines[fileLines.length - 1] === "") {
|
|
125
|
+
fileLines.pop();
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
// Parse all anchors first and validate
|
|
129
|
+
const parsedEdits: Array<{
|
|
130
|
+
readonly op: "replace" | "append" | "prepend";
|
|
131
|
+
readonly lineIdx: number;
|
|
132
|
+
readonly hash: string;
|
|
133
|
+
readonly endLineIdx?: number;
|
|
134
|
+
readonly endHash?: string;
|
|
135
|
+
readonly lines: readonly string[] | null;
|
|
136
|
+
}> = [];
|
|
137
|
+
|
|
138
|
+
const errors: string[] = [];
|
|
139
|
+
|
|
140
|
+
for (const edit of args.edits) {
|
|
141
|
+
const parsed = parseAnchor(edit.pos);
|
|
142
|
+
if ("error" in parsed) {
|
|
143
|
+
errors.push(parsed.error);
|
|
144
|
+
continue;
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
const lineIdx = parsed.line - 1; // Convert to 0-based
|
|
148
|
+
if (lineIdx >= fileLines.length) {
|
|
149
|
+
errors.push(`Line ${parsed.line} is out of bounds (file has ${fileLines.length} lines).`);
|
|
150
|
+
continue;
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
let endLineIdx: number | undefined;
|
|
154
|
+
let endHash: string | undefined;
|
|
155
|
+
|
|
156
|
+
if (edit.end) {
|
|
157
|
+
const parsedEnd = parseAnchor(edit.end);
|
|
158
|
+
if ("error" in parsedEnd) {
|
|
159
|
+
errors.push(parsedEnd.error);
|
|
160
|
+
continue;
|
|
161
|
+
}
|
|
162
|
+
endLineIdx = parsedEnd.line - 1;
|
|
163
|
+
endHash = parsedEnd.hash;
|
|
164
|
+
if (endLineIdx >= fileLines.length) {
|
|
165
|
+
errors.push(
|
|
166
|
+
`End line ${parsedEnd.line} is out of bounds (file has ${fileLines.length} lines).`,
|
|
167
|
+
);
|
|
168
|
+
continue;
|
|
169
|
+
}
|
|
170
|
+
if (endLineIdx < lineIdx) {
|
|
171
|
+
errors.push(`End line ${parsedEnd.line} is before start line ${parsed.line}.`);
|
|
172
|
+
continue;
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
parsedEdits.push({
|
|
177
|
+
op: edit.op,
|
|
178
|
+
lineIdx,
|
|
179
|
+
hash: parsed.hash,
|
|
180
|
+
endLineIdx,
|
|
181
|
+
endHash,
|
|
182
|
+
lines: toLineArray(edit.lines),
|
|
183
|
+
});
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
if (errors.length > 0) {
|
|
187
|
+
return `Error: ${errors.join("\n")}`;
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
// Validate hashes against current file content
|
|
191
|
+
const hashErrors: string[] = [];
|
|
192
|
+
|
|
193
|
+
for (const edit of parsedEdits) {
|
|
194
|
+
const actualHash = computeLineHash(fileLines[edit.lineIdx]);
|
|
195
|
+
if (actualHash !== edit.hash) {
|
|
196
|
+
const surrounding = getSurroundingAnchors(fileLines, edit.lineIdx, 2);
|
|
197
|
+
hashErrors.push(
|
|
198
|
+
`Hash mismatch at line ${edit.lineIdx + 1}: expected ${edit.hash}, actual ${actualHash}.\nUpdated anchors:\n${surrounding}`,
|
|
199
|
+
);
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
if (edit.endLineIdx !== undefined && edit.endHash !== undefined) {
|
|
203
|
+
const actualEndHash = computeLineHash(fileLines[edit.endLineIdx]);
|
|
204
|
+
if (actualEndHash !== edit.endHash) {
|
|
205
|
+
const surrounding = getSurroundingAnchors(fileLines, edit.endLineIdx, 2);
|
|
206
|
+
hashErrors.push(
|
|
207
|
+
`Hash mismatch at end line ${edit.endLineIdx + 1}: expected ${edit.endHash}, actual ${actualEndHash}.\nUpdated anchors:\n${surrounding}`,
|
|
208
|
+
);
|
|
209
|
+
}
|
|
210
|
+
}
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
if (hashErrors.length > 0) {
|
|
214
|
+
return `Error: Stale edit(s) detected.\n${hashErrors.join("\n\n")}`;
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
// Detect overlapping edits — reject before applying any mutations
|
|
218
|
+
for (let i = 0; i < parsedEdits.length; i++) {
|
|
219
|
+
const a = parsedEdits[i];
|
|
220
|
+
const aStart = a.lineIdx;
|
|
221
|
+
const aEnd = a.endLineIdx ?? a.lineIdx;
|
|
222
|
+
for (let j = i + 1; j < parsedEdits.length; j++) {
|
|
223
|
+
const b = parsedEdits[j];
|
|
224
|
+
const bStart = b.lineIdx;
|
|
225
|
+
const bEnd = b.endLineIdx ?? b.lineIdx;
|
|
226
|
+
if (aStart <= bEnd && bStart <= aEnd) {
|
|
227
|
+
return `Error: Overlapping edits at lines ${aStart + 1}-${aEnd + 1} and ${bStart + 1}-${bEnd + 1}. Split into separate calls.`;
|
|
228
|
+
}
|
|
229
|
+
}
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
// Sort edits bottom-up (highest line index first) to prevent drift
|
|
233
|
+
const sortedEdits = [...parsedEdits].sort((a, b) => {
|
|
234
|
+
const aLine = a.endLineIdx ?? a.lineIdx;
|
|
235
|
+
const bLine = b.endLineIdx ?? b.lineIdx;
|
|
236
|
+
return bLine - aLine;
|
|
237
|
+
});
|
|
238
|
+
|
|
239
|
+
// Apply edits
|
|
240
|
+
for (const edit of sortedEdits) {
|
|
241
|
+
const newLines = edit.lines;
|
|
242
|
+
|
|
243
|
+
switch (edit.op) {
|
|
244
|
+
case "replace": {
|
|
245
|
+
if (edit.endLineIdx !== undefined) {
|
|
246
|
+
// Range replace: remove from lineIdx to endLineIdx (inclusive), insert newLines
|
|
247
|
+
const count = edit.endLineIdx - edit.lineIdx + 1;
|
|
248
|
+
if (newLines === null) {
|
|
249
|
+
fileLines.splice(edit.lineIdx, count);
|
|
250
|
+
} else {
|
|
251
|
+
fileLines.splice(edit.lineIdx, count, ...newLines);
|
|
252
|
+
}
|
|
253
|
+
} else {
|
|
254
|
+
// Single line replace
|
|
255
|
+
if (newLines === null) {
|
|
256
|
+
fileLines.splice(edit.lineIdx, 1);
|
|
257
|
+
} else {
|
|
258
|
+
fileLines.splice(edit.lineIdx, 1, ...newLines);
|
|
259
|
+
}
|
|
260
|
+
}
|
|
261
|
+
break;
|
|
262
|
+
}
|
|
263
|
+
case "append": {
|
|
264
|
+
const insertLines = newLines ?? [];
|
|
265
|
+
fileLines.splice(edit.lineIdx + 1, 0, ...insertLines);
|
|
266
|
+
break;
|
|
267
|
+
}
|
|
268
|
+
case "prepend": {
|
|
269
|
+
const insertLines = newLines ?? [];
|
|
270
|
+
fileLines.splice(edit.lineIdx, 0, ...insertLines);
|
|
271
|
+
break;
|
|
272
|
+
}
|
|
273
|
+
}
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
// Write back
|
|
277
|
+
const output = fileLines.join("\n") + (hasTrailingNewline ? "\n" : "");
|
|
278
|
+
try {
|
|
279
|
+
await writeFile(resolved, output, "utf-8");
|
|
280
|
+
} catch (err) {
|
|
281
|
+
return `Error: Cannot write file "${resolved}": ${err instanceof Error ? err.message : String(err)}`;
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
return `Applied ${sortedEdits.length} edit(s) to ${resolved}.`;
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
// --- Tool wrapper ---
|
|
288
|
+
|
|
289
|
+
export const ocHashlineEdit = tool({
|
|
290
|
+
description:
|
|
291
|
+
"Edit files using hash-anchored line references (LINE#ID format). Validates line content hasn't changed before applying edits. Supports replace, append, and prepend operations.",
|
|
292
|
+
args: {
|
|
293
|
+
file: tool.schema.string().describe("Absolute path to the file to edit"),
|
|
294
|
+
edits: tool.schema
|
|
295
|
+
.array(
|
|
296
|
+
tool.schema.object({
|
|
297
|
+
op: tool.schema.enum(["replace", "append", "prepend"]).describe("Edit operation type"),
|
|
298
|
+
pos: tool.schema.string().describe("LINE#HASH anchor, e.g. '42#VK'"),
|
|
299
|
+
end: tool.schema
|
|
300
|
+
.string()
|
|
301
|
+
.optional()
|
|
302
|
+
.describe("End anchor for range replace, e.g. '48#SN'"),
|
|
303
|
+
lines: tool.schema
|
|
304
|
+
.union([
|
|
305
|
+
tool.schema.string(),
|
|
306
|
+
tool.schema.array(tool.schema.string()),
|
|
307
|
+
tool.schema.null(),
|
|
308
|
+
])
|
|
309
|
+
.describe("New content (string, string[], or null to delete)"),
|
|
310
|
+
}),
|
|
311
|
+
)
|
|
312
|
+
.describe("Array of edit operations to apply"),
|
|
313
|
+
},
|
|
314
|
+
async execute(args) {
|
|
315
|
+
return hashlineEditCore(args);
|
|
316
|
+
},
|
|
317
|
+
});
|