micode 0.8.2 → 0.8.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +2 -2
- package/package.json +6 -3
- package/src/agents/artifact-searcher.ts +1 -1
- package/src/agents/brainstormer.ts +1 -1
- package/src/agents/codebase-analyzer.ts +1 -1
- package/src/agents/codebase-locator.ts +1 -1
- package/src/agents/commander.ts +1 -1
- package/src/agents/executor.ts +1 -1
- package/src/agents/implementer.ts +1 -1
- package/src/agents/ledger-creator.ts +1 -1
- package/src/agents/pattern-finder.ts +1 -1
- package/src/agents/planner.ts +1 -1
- package/src/agents/project-initializer.ts +1 -1
- package/src/agents/reviewer.ts +1 -1
- package/src/hooks/auto-compact.ts +20 -19
package/README.md
CHANGED
|
@@ -35,7 +35,7 @@ Transform designs into implementation plans with bite-sized tasks (2-5 min each)
|
|
|
35
35
|
Execute in git worktree for isolation. The **Executor** orchestrates implementer→reviewer cycles with parallel execution via fire-and-check pattern.
|
|
36
36
|
|
|
37
37
|
### Session Continuity
|
|
38
|
-
Maintain context across sessions with structured compaction. Run `/ledger` to create/update `thoughts/ledgers/CONTINUITY_{session}.md`.
|
|
38
|
+
Maintain context across sessions with structured compaction. Run `/ledger` to create/update `thoughts/ledgers/CONTINUITY_{session}.md`.
|
|
39
39
|
|
|
40
40
|
## Commands
|
|
41
41
|
|
|
@@ -81,7 +81,7 @@ Maintain context across sessions with structured compaction. Run `/ledger` to cr
|
|
|
81
81
|
|
|
82
82
|
- **Think Mode** - Keywords like "think hard" enable 32k token thinking budget
|
|
83
83
|
- **Ledger Loader** - Injects continuity ledger into system prompt
|
|
84
|
-
- **Auto-
|
|
84
|
+
- **Auto-Compact** - At 50% context usage, automatically summarizes session to reduce context
|
|
85
85
|
- **File Ops Tracker** - Tracks read/write/edit for deterministic logging
|
|
86
86
|
- **Artifact Auto-Index** - Indexes artifacts in thoughts/ directories
|
|
87
87
|
- **Context Injector** - Injects ARCHITECTURE.md, CODE_STYLE.md
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "micode",
|
|
3
|
-
"version": "0.8.
|
|
3
|
+
"version": "0.8.4",
|
|
4
4
|
"description": "OpenCode plugin with Brainstorm-Research-Plan-Implement workflow",
|
|
5
5
|
"module": "src/index.ts",
|
|
6
6
|
"main": "src/index.ts",
|
|
@@ -11,6 +11,7 @@
|
|
|
11
11
|
"INSTALL_CLAUDE.md"
|
|
12
12
|
],
|
|
13
13
|
"scripts": {
|
|
14
|
+
"prepare": "lefthook install",
|
|
14
15
|
"build": "tsc --noEmit",
|
|
15
16
|
"typecheck": "tsc --noEmit",
|
|
16
17
|
"prepublishOnly": "bun run typecheck",
|
|
@@ -38,12 +39,14 @@
|
|
|
38
39
|
"url": "https://github.com/vtemian/micode/issues"
|
|
39
40
|
},
|
|
40
41
|
"dependencies": {
|
|
41
|
-
"@opencode-ai/plugin": "
|
|
42
|
-
"bun-pty": "^0.4.5"
|
|
42
|
+
"@opencode-ai/plugin": "1.1.6",
|
|
43
|
+
"bun-pty": "^0.4.5",
|
|
44
|
+
"valibot": "^1.2.0"
|
|
43
45
|
},
|
|
44
46
|
"devDependencies": {
|
|
45
47
|
"@biomejs/biome": "^2.3.10",
|
|
46
48
|
"bun-types": "latest",
|
|
49
|
+
"lefthook": "^2.0.13",
|
|
47
50
|
"typescript": "^5.7.3"
|
|
48
51
|
}
|
|
49
52
|
}
|
|
@@ -4,7 +4,7 @@ import type { AgentConfig } from "@opencode-ai/sdk";
|
|
|
4
4
|
export const artifactSearcherAgent: AgentConfig = {
|
|
5
5
|
description: "Searches past handoffs, plans, and ledgers for relevant precedent",
|
|
6
6
|
mode: "subagent",
|
|
7
|
-
model: "
|
|
7
|
+
model: "openai/gpt-5.2-codex",
|
|
8
8
|
temperature: 0.3,
|
|
9
9
|
tools: {
|
|
10
10
|
edit: false,
|
|
@@ -3,7 +3,7 @@ import type { AgentConfig } from "@opencode-ai/sdk";
|
|
|
3
3
|
export const brainstormerAgent: AgentConfig = {
|
|
4
4
|
description: "Refines rough ideas into fully-formed designs through collaborative questioning",
|
|
5
5
|
mode: "primary",
|
|
6
|
-
model: "
|
|
6
|
+
model: "openai/gpt-5.2-codex",
|
|
7
7
|
temperature: 0.7,
|
|
8
8
|
tools: {
|
|
9
9
|
spawn_agent: false, // Primary agents use built-in Task tool, not spawn_agent
|
|
@@ -3,7 +3,7 @@ import type { AgentConfig } from "@opencode-ai/sdk";
|
|
|
3
3
|
export const codebaseAnalyzerAgent: AgentConfig = {
|
|
4
4
|
description: "Explains HOW code works with precise file:line references",
|
|
5
5
|
mode: "subagent",
|
|
6
|
-
model: "
|
|
6
|
+
model: "openai/gpt-5.2-codex",
|
|
7
7
|
temperature: 0.2,
|
|
8
8
|
tools: {
|
|
9
9
|
write: false,
|
|
@@ -3,7 +3,7 @@ import type { AgentConfig } from "@opencode-ai/sdk";
|
|
|
3
3
|
export const codebaseLocatorAgent: AgentConfig = {
|
|
4
4
|
description: "Finds WHERE files live in the codebase",
|
|
5
5
|
mode: "subagent",
|
|
6
|
-
model: "
|
|
6
|
+
model: "openai/gpt-5.2-codex",
|
|
7
7
|
temperature: 0.1,
|
|
8
8
|
tools: {
|
|
9
9
|
write: false,
|
package/src/agents/commander.ts
CHANGED
|
@@ -141,7 +141,7 @@ Just do it - including obvious follow-up actions.
|
|
|
141
141
|
export const primaryAgent: AgentConfig = {
|
|
142
142
|
description: "Pragmatic orchestrator. Direct, honest, delegates to specialists.",
|
|
143
143
|
mode: "primary",
|
|
144
|
-
model: "
|
|
144
|
+
model: "openai/gpt-5.2-codex",
|
|
145
145
|
temperature: 0.2,
|
|
146
146
|
thinking: {
|
|
147
147
|
type: "enabled",
|
package/src/agents/executor.ts
CHANGED
|
@@ -3,7 +3,7 @@ import type { AgentConfig } from "@opencode-ai/sdk";
|
|
|
3
3
|
export const executorAgent: AgentConfig = {
|
|
4
4
|
description: "Executes plan task-by-task with parallel execution where possible",
|
|
5
5
|
mode: "subagent",
|
|
6
|
-
model: "
|
|
6
|
+
model: "openai/gpt-5.2-codex",
|
|
7
7
|
temperature: 0.2,
|
|
8
8
|
prompt: `<environment>
|
|
9
9
|
You are running as part of the "micode" OpenCode plugin (NOT Claude Code).
|
|
@@ -3,7 +3,7 @@ import type { AgentConfig } from "@opencode-ai/sdk";
|
|
|
3
3
|
export const implementerAgent: AgentConfig = {
|
|
4
4
|
description: "Executes implementation tasks from a plan",
|
|
5
5
|
mode: "subagent",
|
|
6
|
-
model: "
|
|
6
|
+
model: "openai/gpt-5.2-codex",
|
|
7
7
|
temperature: 0.1,
|
|
8
8
|
prompt: `<environment>
|
|
9
9
|
You are running as part of the "micode" OpenCode plugin (NOT Claude Code).
|
|
@@ -4,7 +4,7 @@ import type { AgentConfig } from "@opencode-ai/sdk";
|
|
|
4
4
|
export const ledgerCreatorAgent: AgentConfig = {
|
|
5
5
|
description: "Creates and updates continuity ledgers for session state preservation",
|
|
6
6
|
mode: "subagent",
|
|
7
|
-
model: "
|
|
7
|
+
model: "openai/gpt-5.2-codex",
|
|
8
8
|
temperature: 0.2,
|
|
9
9
|
tools: {
|
|
10
10
|
edit: false,
|
|
@@ -3,7 +3,7 @@ import type { AgentConfig } from "@opencode-ai/sdk";
|
|
|
3
3
|
export const patternFinderAgent: AgentConfig = {
|
|
4
4
|
description: "Finds existing patterns and examples to model after",
|
|
5
5
|
mode: "subagent",
|
|
6
|
-
model: "
|
|
6
|
+
model: "openai/gpt-5.2-codex",
|
|
7
7
|
temperature: 0.2,
|
|
8
8
|
tools: {
|
|
9
9
|
write: false,
|
package/src/agents/planner.ts
CHANGED
|
@@ -3,7 +3,7 @@ import type { AgentConfig } from "@opencode-ai/sdk";
|
|
|
3
3
|
export const plannerAgent: AgentConfig = {
|
|
4
4
|
description: "Creates detailed implementation plans with exact file paths, complete code examples, and TDD steps",
|
|
5
5
|
mode: "subagent",
|
|
6
|
-
model: "
|
|
6
|
+
model: "openai/gpt-5.2-codex",
|
|
7
7
|
temperature: 0.3,
|
|
8
8
|
prompt: `<environment>
|
|
9
9
|
You are running as part of the "micode" OpenCode plugin (NOT Claude Code).
|
|
@@ -218,7 +218,7 @@ Available micode agents: codebase-locator, codebase-analyzer, pattern-finder.
|
|
|
218
218
|
|
|
219
219
|
export const projectInitializerAgent: AgentConfig = {
|
|
220
220
|
mode: "subagent",
|
|
221
|
-
model: "
|
|
221
|
+
model: "openai/gpt-5.2-codex",
|
|
222
222
|
temperature: 0.3,
|
|
223
223
|
maxTokens: 32000,
|
|
224
224
|
prompt: PROMPT,
|
package/src/agents/reviewer.ts
CHANGED
|
@@ -3,7 +3,7 @@ import type { AgentConfig } from "@opencode-ai/sdk";
|
|
|
3
3
|
export const reviewerAgent: AgentConfig = {
|
|
4
4
|
description: "Reviews implementation for correctness and style",
|
|
5
5
|
mode: "subagent",
|
|
6
|
-
model: "
|
|
6
|
+
model: "openai/gpt-5.2-codex",
|
|
7
7
|
temperature: 0.3,
|
|
8
8
|
tools: {
|
|
9
9
|
write: false,
|
|
@@ -133,6 +133,10 @@ ${summaryText}
|
|
|
133
133
|
})
|
|
134
134
|
.catch(() => {});
|
|
135
135
|
|
|
136
|
+
// Set up listener BEFORE calling summarize to avoid race condition
|
|
137
|
+
// (summary message event could fire before we start listening)
|
|
138
|
+
const compactionPromise = waitForCompaction(sessionID);
|
|
139
|
+
|
|
136
140
|
// Start the compaction - this returns immediately while compaction runs async
|
|
137
141
|
await ctx.client.session.summarize({
|
|
138
142
|
path: { id: sessionID },
|
|
@@ -140,8 +144,8 @@ ${summaryText}
|
|
|
140
144
|
query: { directory: ctx.directory },
|
|
141
145
|
});
|
|
142
146
|
|
|
143
|
-
// Wait for the
|
|
144
|
-
await
|
|
147
|
+
// Wait for the summary message to be created (message.updated with summary: true)
|
|
148
|
+
await compactionPromise;
|
|
145
149
|
|
|
146
150
|
state.lastCompactTime.set(sessionID, Date.now());
|
|
147
151
|
|
|
@@ -179,20 +183,6 @@ ${summaryText}
|
|
|
179
183
|
event: async ({ event }: { event: { type: string; properties?: unknown } }) => {
|
|
180
184
|
const props = event.properties as Record<string, unknown> | undefined;
|
|
181
185
|
|
|
182
|
-
// Handle compaction completion
|
|
183
|
-
if (event.type === "session.compacted") {
|
|
184
|
-
const sessionID = props?.sessionID as string | undefined;
|
|
185
|
-
if (sessionID) {
|
|
186
|
-
const pending = state.pendingCompactions.get(sessionID);
|
|
187
|
-
if (pending) {
|
|
188
|
-
clearTimeout(pending.timeoutId);
|
|
189
|
-
state.pendingCompactions.delete(sessionID);
|
|
190
|
-
pending.resolve();
|
|
191
|
-
}
|
|
192
|
-
}
|
|
193
|
-
return;
|
|
194
|
-
}
|
|
195
|
-
|
|
196
186
|
// Cleanup on session delete
|
|
197
187
|
if (event.type === "session.deleted") {
|
|
198
188
|
const sessionInfo = props?.info as { id?: string } | undefined;
|
|
@@ -209,15 +199,26 @@ ${summaryText}
|
|
|
209
199
|
return;
|
|
210
200
|
}
|
|
211
201
|
|
|
212
|
-
// Monitor
|
|
202
|
+
// Monitor message events
|
|
213
203
|
if (event.type === "message.updated") {
|
|
214
204
|
const info = props?.info as Record<string, unknown> | undefined;
|
|
215
205
|
const sessionID = info?.sessionID as string | undefined;
|
|
216
206
|
|
|
217
207
|
if (!sessionID || info?.role !== "assistant") return;
|
|
218
208
|
|
|
219
|
-
//
|
|
220
|
-
if (info?.summary === true)
|
|
209
|
+
// Check if this is a summary message - signals compaction complete
|
|
210
|
+
if (info?.summary === true) {
|
|
211
|
+
const pending = state.pendingCompactions.get(sessionID);
|
|
212
|
+
if (pending) {
|
|
213
|
+
clearTimeout(pending.timeoutId);
|
|
214
|
+
state.pendingCompactions.delete(sessionID);
|
|
215
|
+
pending.resolve();
|
|
216
|
+
}
|
|
217
|
+
return;
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
// Skip triggering compaction if we're already waiting for one
|
|
221
|
+
if (state.pendingCompactions.has(sessionID)) return;
|
|
221
222
|
|
|
222
223
|
const tokens = info?.tokens as { input?: number; cache?: { read?: number } } | undefined;
|
|
223
224
|
const inputTokens = tokens?.input || 0;
|