@oh-my-pi/pi-coding-agent 4.2.0 → 4.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +26 -0
- package/package.json +5 -5
- package/src/core/agent-session.ts +3 -3
- package/src/core/export-html/index.ts +1 -33
- package/src/core/system-prompt.ts +42 -0
- package/src/core/tools/complete.ts +5 -2
- package/src/core/tools/task/commands.ts +2 -6
- package/src/discovery/builtin.ts +9 -54
- package/src/discovery/claude.ts +16 -69
- package/src/discovery/codex.ts +11 -36
- package/src/discovery/helpers.ts +52 -1
- package/src/modes/interactive/controllers/selector-controller.ts +3 -3
- package/src/modes/interactive/interactive-mode.ts +3 -0
- package/src/prompts/agents/planner.md +112 -0
- package/src/prompts/agents/task.md +5 -4
- package/src/prompts/system/system-prompt.md +5 -0
- package/src/prompts/tools/task.md +25 -19
- package/src/prompts/agents/architect-plan.md +0 -10
- package/src/prompts/agents/implement-with-critic.md +0 -11
- package/src/prompts/agents/implement.md +0 -11
package/CHANGELOG.md
CHANGED
|
@@ -2,6 +2,32 @@
|
|
|
2
2
|
|
|
3
3
|
## [Unreleased]
|
|
4
4
|
|
|
5
|
+
## [4.2.1] - 2026-01-11
|
|
6
|
+
### Added
|
|
7
|
+
|
|
8
|
+
- Added automatic discovery and listing of AGENTS.md files in the system prompt, providing agents with an authoritative list of project-specific instruction files without runtime searching
|
|
9
|
+
- Added `planner` built-in agent for comprehensive implementation planning with slow model
|
|
10
|
+
|
|
11
|
+
### Changed
|
|
12
|
+
|
|
13
|
+
- Refactored skill discovery to use unified `loadSkillsFromDir` helper across all providers, reducing code duplication
|
|
14
|
+
- Updated skill discovery to scan only `skills/*/SKILL.md` entries instead of recursive walks in Codex provider
|
|
15
|
+
- Added guidance to Task tool documentation to isolate file scopes when assigning tasks to prevent agent conflicts
|
|
16
|
+
- Updated Task tool documentation to emphasize that subagents have no access to conversation history and require all relevant context to be explicitly passed
|
|
17
|
+
- Revised task agent prompt to clarify that subagents have full tool access and can make file edits, run commands, and create files
|
|
18
|
+
- OpenAI Codex: updated to use bundled system prompt from upstream
|
|
19
|
+
- Changed `complete` tool to make `data` parameter optional when aborting, while still requiring it for successful completions
|
|
20
|
+
- Skills discovery now scans only `skills/*/SKILL.md` entries instead of recursive walks
|
|
21
|
+
|
|
22
|
+
### Removed
|
|
23
|
+
|
|
24
|
+
- Removed `architect-plan`, `implement`, and `implement-with-critic` built-in agent commands
|
|
25
|
+
|
|
26
|
+
### Fixed
|
|
27
|
+
|
|
28
|
+
- Fixed editor border rendering glitch after canceling slash command autocomplete
|
|
29
|
+
- Fixed login/logout credential path message to reference agent.db
|
|
30
|
+
|
|
5
31
|
## [4.2.0] - 2026-01-10
|
|
6
32
|
|
|
7
33
|
### Added
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@oh-my-pi/pi-coding-agent",
|
|
3
|
-
"version": "4.2.
|
|
3
|
+
"version": "4.2.1",
|
|
4
4
|
"description": "Coding agent CLI with read, bash, edit, write tools and session management",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"ompConfig": {
|
|
@@ -39,10 +39,10 @@
|
|
|
39
39
|
"prepublishOnly": "bun run generate-template && bun run clean && bun run build"
|
|
40
40
|
},
|
|
41
41
|
"dependencies": {
|
|
42
|
-
"@oh-my-pi/pi-ai": "4.2.
|
|
43
|
-
"@oh-my-pi/pi-agent-core": "4.2.
|
|
44
|
-
"@oh-my-pi/pi-git-tool": "4.2.
|
|
45
|
-
"@oh-my-pi/pi-tui": "4.2.
|
|
42
|
+
"@oh-my-pi/pi-ai": "4.2.1",
|
|
43
|
+
"@oh-my-pi/pi-agent-core": "4.2.1",
|
|
44
|
+
"@oh-my-pi/pi-git-tool": "4.2.1",
|
|
45
|
+
"@oh-my-pi/pi-tui": "4.2.1",
|
|
46
46
|
"@openai/agents": "^0.3.7",
|
|
47
47
|
"@sinclair/typebox": "^0.34.46",
|
|
48
48
|
"ajv": "^8.17.1",
|
|
@@ -17,7 +17,7 @@ import type { Agent, AgentEvent, AgentMessage, AgentState, AgentTool, ThinkingLe
|
|
|
17
17
|
import type { AssistantMessage, ImageContent, Message, Model, TextContent, Usage } from "@oh-my-pi/pi-ai";
|
|
18
18
|
import { isContextOverflow, modelsAreEqual, supportsXhigh } from "@oh-my-pi/pi-ai";
|
|
19
19
|
import type { Rule } from "../capability/rule";
|
|
20
|
-
import {
|
|
20
|
+
import { getAgentDbPath } from "../config";
|
|
21
21
|
import { theme } from "../modes/interactive/theme/theme";
|
|
22
22
|
import { type BashResult, executeBash as executeBashCommand, executeBashWithOperations } from "./bash-executor";
|
|
23
23
|
import {
|
|
@@ -761,7 +761,7 @@ export class AgentSession {
|
|
|
761
761
|
if (!this.model) {
|
|
762
762
|
throw new Error(
|
|
763
763
|
"No model selected.\n\n" +
|
|
764
|
-
`Use /login, set an API key environment variable, or create ${
|
|
764
|
+
`Use /login, set an API key environment variable, or create ${getAgentDbPath()}\n\n` +
|
|
765
765
|
"Then use /model to select a model.",
|
|
766
766
|
);
|
|
767
767
|
}
|
|
@@ -771,7 +771,7 @@ export class AgentSession {
|
|
|
771
771
|
if (!apiKey) {
|
|
772
772
|
throw new Error(
|
|
773
773
|
`No API key found for ${this.model.provider}.\n\n` +
|
|
774
|
-
`Use /login, set an API key environment variable, or create ${
|
|
774
|
+
`Use /login, set an API key environment variable, or create ${getAgentDbPath()}`,
|
|
775
775
|
);
|
|
776
776
|
}
|
|
777
777
|
|
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
import { existsSync, writeFileSync } from "node:fs";
|
|
2
2
|
import { basename } from "node:path";
|
|
3
|
-
import type { AgentState
|
|
4
|
-
import { buildCodexPiBridge, getCodexInstructions } from "@oh-my-pi/pi-ai";
|
|
3
|
+
import type { AgentState } from "@oh-my-pi/pi-agent-core";
|
|
5
4
|
import { APP_NAME } from "../../config";
|
|
6
5
|
import { getResolvedThemeColors, getThemeExportColors } from "../../modes/interactive/theme/theme";
|
|
7
6
|
import { SessionManager } from "../session-manager";
|
|
@@ -14,33 +13,6 @@ export interface ExportOptions {
|
|
|
14
13
|
themeName?: string;
|
|
15
14
|
}
|
|
16
15
|
|
|
17
|
-
/** Info about Codex injection to show inline with model_change entries. */
|
|
18
|
-
interface CodexInjectionInfo {
|
|
19
|
-
/** Codex instructions text. */
|
|
20
|
-
instructions: string;
|
|
21
|
-
/** Bridge text (tool list). */
|
|
22
|
-
bridge: string;
|
|
23
|
-
}
|
|
24
|
-
|
|
25
|
-
/** Build Codex injection info for display inline with model_change entries. */
|
|
26
|
-
async function buildCodexInjectionInfo(tools?: AgentTool[]): Promise<CodexInjectionInfo | undefined> {
|
|
27
|
-
let instructions: string | null = null;
|
|
28
|
-
try {
|
|
29
|
-
instructions = await getCodexInstructions("gpt-5.1-codex");
|
|
30
|
-
} catch {
|
|
31
|
-
// Cache miss is expected before the first Codex request.
|
|
32
|
-
}
|
|
33
|
-
|
|
34
|
-
const bridgeText = buildCodexPiBridge(tools);
|
|
35
|
-
const instructionsText =
|
|
36
|
-
instructions ?? "(Codex instructions not cached. Run a Codex request to populate the local cache.)";
|
|
37
|
-
|
|
38
|
-
return {
|
|
39
|
-
instructions: instructionsText,
|
|
40
|
-
bridge: bridgeText,
|
|
41
|
-
};
|
|
42
|
-
}
|
|
43
|
-
|
|
44
16
|
/** Parse a color string to RGB values. */
|
|
45
17
|
function parseColor(color: string): { r: number; g: number; b: number } | undefined {
|
|
46
18
|
const hexMatch = color.match(/^#([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})$/);
|
|
@@ -125,8 +97,6 @@ interface SessionData {
|
|
|
125
97
|
entries: ReturnType<SessionManager["getEntries"]>;
|
|
126
98
|
leafId: string | null;
|
|
127
99
|
systemPrompt?: string;
|
|
128
|
-
/** Info for rendering Codex injection inline with model_change entries. */
|
|
129
|
-
codexInjectionInfo?: CodexInjectionInfo;
|
|
130
100
|
tools?: { name: string; description: string }[];
|
|
131
101
|
}
|
|
132
102
|
|
|
@@ -158,7 +128,6 @@ export async function exportSessionToHtml(
|
|
|
158
128
|
entries: sm.getEntries(),
|
|
159
129
|
leafId: sm.getLeafId(),
|
|
160
130
|
systemPrompt: state?.systemPrompt,
|
|
161
|
-
codexInjectionInfo: await buildCodexInjectionInfo(state?.tools),
|
|
162
131
|
tools: state?.tools?.map((t) => ({ name: t.name, description: t.description })),
|
|
163
132
|
};
|
|
164
133
|
|
|
@@ -180,7 +149,6 @@ export async function exportFromFile(inputPath: string, options?: ExportOptions
|
|
|
180
149
|
header: sm.getHeader(),
|
|
181
150
|
entries: sm.getEntries(),
|
|
182
151
|
leafId: sm.getLeafId(),
|
|
183
|
-
codexInjectionInfo: await buildCodexInjectionInfo(),
|
|
184
152
|
};
|
|
185
153
|
|
|
186
154
|
const html = generateHtml(sessionData, opts.themeName);
|
|
@@ -148,6 +148,45 @@ function stripQuotes(value: string): string {
|
|
|
148
148
|
return value.replace(/^"|"$/g, "");
|
|
149
149
|
}
|
|
150
150
|
|
|
151
|
+
const AGENTS_MD_PATTERN = "**/AGENTS.md";
|
|
152
|
+
const AGENTS_MD_LIMIT = 200;
|
|
153
|
+
|
|
154
|
+
interface AgentsMdSearch {
|
|
155
|
+
scopePath: string;
|
|
156
|
+
limit: number;
|
|
157
|
+
pattern: string;
|
|
158
|
+
files: string[];
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
function normalizePath(value: string): string {
|
|
162
|
+
return value.replace(/\\/g, "/");
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
function listAgentsMdFiles(root: string, limit: number): string[] {
|
|
166
|
+
try {
|
|
167
|
+
const entries = Array.from(
|
|
168
|
+
new Bun.Glob(AGENTS_MD_PATTERN).scanSync({ cwd: root, onlyFiles: true, dot: false, absolute: false }),
|
|
169
|
+
);
|
|
170
|
+
const normalized = entries
|
|
171
|
+
.map((entry) => normalizePath(entry))
|
|
172
|
+
.filter((entry) => entry.length > 0 && !entry.includes("node_modules"))
|
|
173
|
+
.sort();
|
|
174
|
+
return normalized.length > limit ? normalized.slice(0, limit) : normalized;
|
|
175
|
+
} catch {
|
|
176
|
+
return [];
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
function buildAgentsMdSearch(cwd: string): AgentsMdSearch {
|
|
181
|
+
const files = listAgentsMdFiles(cwd, AGENTS_MD_LIMIT);
|
|
182
|
+
return {
|
|
183
|
+
scopePath: ".",
|
|
184
|
+
limit: AGENTS_MD_LIMIT,
|
|
185
|
+
pattern: AGENTS_MD_PATTERN,
|
|
186
|
+
files,
|
|
187
|
+
};
|
|
188
|
+
}
|
|
189
|
+
|
|
151
190
|
function getOsName(): string {
|
|
152
191
|
switch (process.platform) {
|
|
153
192
|
case "win32":
|
|
@@ -625,6 +664,7 @@ export function buildSystemPrompt(options: BuildSystemPromptOptions = {}): strin
|
|
|
625
664
|
|
|
626
665
|
// Resolve context files: use provided or discover
|
|
627
666
|
const contextFiles = providedContextFiles ?? loadProjectContextFiles({ cwd: resolvedCwd });
|
|
667
|
+
const agentsMdSearch = buildAgentsMdSearch(resolvedCwd);
|
|
628
668
|
|
|
629
669
|
// Build tool descriptions array
|
|
630
670
|
// Priority: toolNames (explicit list) > tools (Map) > defaults
|
|
@@ -663,6 +703,7 @@ export function buildSystemPrompt(options: BuildSystemPromptOptions = {}): strin
|
|
|
663
703
|
customPrompt: resolvedCustomPrompt,
|
|
664
704
|
appendPrompt: resolvedAppendPrompt ?? "",
|
|
665
705
|
contextFiles,
|
|
706
|
+
agentsMdSearch,
|
|
666
707
|
toolDescriptions: toolDescriptionsArray,
|
|
667
708
|
git,
|
|
668
709
|
skills: filteredSkills,
|
|
@@ -678,6 +719,7 @@ export function buildSystemPrompt(options: BuildSystemPromptOptions = {}): strin
|
|
|
678
719
|
environment: getEnvironmentInfo(),
|
|
679
720
|
systemPromptCustomization: systemPromptCustomization ?? "",
|
|
680
721
|
contextFiles,
|
|
722
|
+
agentsMdSearch,
|
|
681
723
|
git,
|
|
682
724
|
skills: filteredSkills,
|
|
683
725
|
rules: rules ?? [],
|
|
@@ -79,7 +79,7 @@ export function createCompleteTool(session: ToolSession) {
|
|
|
79
79
|
: Type.Any({ description: "Structured JSON output (no schema specified)" });
|
|
80
80
|
|
|
81
81
|
const completeParams = Type.Object({
|
|
82
|
-
data: dataSchema,
|
|
82
|
+
data: Type.Optional(dataSchema),
|
|
83
83
|
status: Type.Optional(
|
|
84
84
|
Type.Union([Type.Literal("success"), Type.Literal("aborted")], {
|
|
85
85
|
default: "success",
|
|
@@ -99,8 +99,11 @@ export function createCompleteTool(session: ToolSession) {
|
|
|
99
99
|
execute: async (_toolCallId, params) => {
|
|
100
100
|
const status = params.status ?? "success";
|
|
101
101
|
|
|
102
|
-
// Skip
|
|
102
|
+
// Skip validation when aborting - data is optional for aborts
|
|
103
103
|
if (status === "success") {
|
|
104
|
+
if (params.data === undefined) {
|
|
105
|
+
throw new Error("data is required when status is 'success'");
|
|
106
|
+
}
|
|
104
107
|
if (schemaError) {
|
|
105
108
|
throw new Error(`Invalid output schema: ${schemaError}`);
|
|
106
109
|
}
|
|
@@ -9,17 +9,13 @@ import { type SlashCommand, slashCommandCapability } from "../../../capability/s
|
|
|
9
9
|
import { loadSync } from "../../../discovery";
|
|
10
10
|
|
|
11
11
|
// Embed command markdown files at build time
|
|
12
|
-
import architectPlanMd from "../../../prompts/agents/architect-plan.md" with { type: "text" };
|
|
13
|
-
import implementMd from "../../../prompts/agents/implement.md" with { type: "text" };
|
|
14
|
-
import implementWithCriticMd from "../../../prompts/agents/implement-with-critic.md" with { type: "text" };
|
|
15
12
|
import initMd from "../../../prompts/agents/init.md" with { type: "text" };
|
|
13
|
+
import plannerMd from "../../../prompts/agents/planner.md" with { type: "text" };
|
|
16
14
|
import { renderPromptTemplate } from "../../prompt-templates";
|
|
17
15
|
|
|
18
16
|
const EMBEDDED_COMMANDS: { name: string; content: string }[] = [
|
|
19
|
-
{ name: "architect-plan.md", content: renderPromptTemplate(architectPlanMd) },
|
|
20
|
-
{ name: "implement-with-critic.md", content: renderPromptTemplate(implementWithCriticMd) },
|
|
21
|
-
{ name: "implement.md", content: renderPromptTemplate(implementMd) },
|
|
22
17
|
{ name: "init.md", content: renderPromptTemplate(initMd) },
|
|
18
|
+
{ name: "planner.md", content: renderPromptTemplate(plannerMd) },
|
|
23
19
|
];
|
|
24
20
|
|
|
25
21
|
export const EMBEDDED_COMMAND_TEMPLATES: ReadonlyArray<{ name: string; content: string }> = EMBEDDED_COMMANDS;
|
package/src/discovery/builtin.ts
CHANGED
|
@@ -5,7 +5,7 @@
|
|
|
5
5
|
* .pi is an alias for backwards compatibility.
|
|
6
6
|
*/
|
|
7
7
|
|
|
8
|
-
import {
|
|
8
|
+
import { dirname, isAbsolute, join, resolve } from "path";
|
|
9
9
|
import { type ContextFile, contextFileCapability } from "../capability/context-file";
|
|
10
10
|
import { type Extension, type ExtensionManifest, extensionCapability } from "../capability/extension";
|
|
11
11
|
import { type ExtensionModule, extensionModuleCapability } from "../capability/extension-module";
|
|
@@ -16,7 +16,7 @@ import { type MCPServer, mcpCapability } from "../capability/mcp";
|
|
|
16
16
|
import { type Prompt, promptCapability } from "../capability/prompt";
|
|
17
17
|
import { type Rule, ruleCapability } from "../capability/rule";
|
|
18
18
|
import { type Settings, settingsCapability } from "../capability/settings";
|
|
19
|
-
import { type Skill,
|
|
19
|
+
import { type Skill, skillCapability } from "../capability/skill";
|
|
20
20
|
import { type SlashCommand, slashCommandCapability } from "../capability/slash-command";
|
|
21
21
|
import { type SystemPrompt, systemPromptCapability } from "../capability/system-prompt";
|
|
22
22
|
import { type CustomTool, toolCapability } from "../capability/tool";
|
|
@@ -27,6 +27,7 @@ import {
|
|
|
27
27
|
expandEnvVarsDeep,
|
|
28
28
|
getExtensionNameFromPath,
|
|
29
29
|
loadFilesFromDir,
|
|
30
|
+
loadSkillsFromDir,
|
|
30
31
|
parseFrontmatter,
|
|
31
32
|
parseJSON,
|
|
32
33
|
SOURCE_PATHS,
|
|
@@ -190,64 +191,18 @@ registerProvider<SystemPrompt>(systemPromptCapability.id, {
|
|
|
190
191
|
});
|
|
191
192
|
|
|
192
193
|
// Skills
|
|
193
|
-
function loadSkillFromFile(ctx: LoadContext, path: string, level: "user" | "project"): Skill | null {
|
|
194
|
-
const content = ctx.fs.readFile(path);
|
|
195
|
-
if (!content) return null;
|
|
196
|
-
|
|
197
|
-
const { frontmatter, body } = parseFrontmatter(content);
|
|
198
|
-
const skillDir = dirname(path);
|
|
199
|
-
const parentDirName = basename(skillDir);
|
|
200
|
-
const name = (frontmatter.name as string) || parentDirName;
|
|
201
|
-
|
|
202
|
-
if (!frontmatter.description) return null;
|
|
203
|
-
|
|
204
|
-
return {
|
|
205
|
-
name,
|
|
206
|
-
path,
|
|
207
|
-
content: body,
|
|
208
|
-
frontmatter: frontmatter as SkillFrontmatter,
|
|
209
|
-
level,
|
|
210
|
-
_source: createSourceMeta(PROVIDER_ID, path, level),
|
|
211
|
-
};
|
|
212
|
-
}
|
|
213
|
-
|
|
214
|
-
function loadSkillsRecursive(ctx: LoadContext, dir: string, level: "user" | "project"): LoadResult<Skill> {
|
|
215
|
-
const items: Skill[] = [];
|
|
216
|
-
const warnings: string[] = [];
|
|
217
|
-
|
|
218
|
-
if (!ctx.fs.isDir(dir)) return { items, warnings };
|
|
219
|
-
|
|
220
|
-
for (const name of ctx.fs.readDir(dir)) {
|
|
221
|
-
if (name.startsWith(".") || name === "node_modules") continue;
|
|
222
|
-
|
|
223
|
-
const path = join(dir, name);
|
|
224
|
-
|
|
225
|
-
if (ctx.fs.isDir(path)) {
|
|
226
|
-
const skillFile = join(path, "SKILL.md");
|
|
227
|
-
if (ctx.fs.isFile(skillFile)) {
|
|
228
|
-
const skill = loadSkillFromFile(ctx, skillFile, level);
|
|
229
|
-
if (skill) items.push(skill);
|
|
230
|
-
}
|
|
231
|
-
|
|
232
|
-
const sub = loadSkillsRecursive(ctx, path, level);
|
|
233
|
-
items.push(...sub.items);
|
|
234
|
-
if (sub.warnings) warnings.push(...sub.warnings);
|
|
235
|
-
} else if (name === "SKILL.md") {
|
|
236
|
-
const skill = loadSkillFromFile(ctx, path, level);
|
|
237
|
-
if (skill) items.push(skill);
|
|
238
|
-
}
|
|
239
|
-
}
|
|
240
|
-
|
|
241
|
-
return { items, warnings };
|
|
242
|
-
}
|
|
243
|
-
|
|
244
194
|
function loadSkills(ctx: LoadContext): LoadResult<Skill> {
|
|
245
195
|
const items: Skill[] = [];
|
|
246
196
|
const warnings: string[] = [];
|
|
247
197
|
|
|
248
198
|
for (const { dir, level } of getConfigDirs(ctx)) {
|
|
249
199
|
const skillsDir = join(dir, "skills");
|
|
250
|
-
const result =
|
|
200
|
+
const result = loadSkillsFromDir(ctx, {
|
|
201
|
+
dir: skillsDir,
|
|
202
|
+
providerId: PROVIDER_ID,
|
|
203
|
+
level,
|
|
204
|
+
requireDescription: true,
|
|
205
|
+
});
|
|
251
206
|
items.push(...result.items);
|
|
252
207
|
if (result.warnings) warnings.push(...result.warnings);
|
|
253
208
|
}
|
package/src/discovery/claude.ts
CHANGED
|
@@ -24,7 +24,7 @@ import {
|
|
|
24
24
|
expandEnvVarsDeep,
|
|
25
25
|
getExtensionNameFromPath,
|
|
26
26
|
loadFilesFromDir,
|
|
27
|
-
|
|
27
|
+
loadSkillsFromDir,
|
|
28
28
|
parseJSON,
|
|
29
29
|
} from "./helpers";
|
|
30
30
|
|
|
@@ -218,78 +218,25 @@ function loadSkills(ctx: LoadContext): LoadResult<Skill> {
|
|
|
218
218
|
const items: Skill[] = [];
|
|
219
219
|
const warnings: string[] = [];
|
|
220
220
|
|
|
221
|
-
|
|
222
|
-
const
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
if (dirName.startsWith(".")) continue;
|
|
230
|
-
|
|
231
|
-
const skillDir = join(userSkillsDir, dirName);
|
|
232
|
-
if (!ctx.fs.isDir(skillDir)) continue;
|
|
233
|
-
|
|
234
|
-
const skillFile = join(skillDir, "SKILL.md");
|
|
235
|
-
if (!ctx.fs.isFile(skillFile)) continue;
|
|
236
|
-
|
|
237
|
-
const content = ctx.fs.readFile(skillFile);
|
|
238
|
-
if (!content) {
|
|
239
|
-
warnings.push(`Failed to read ${skillFile}`);
|
|
240
|
-
continue;
|
|
241
|
-
}
|
|
242
|
-
|
|
243
|
-
const { frontmatter, body } = parseFrontmatter(content);
|
|
244
|
-
const name = (frontmatter.name as string) || dirName;
|
|
245
|
-
|
|
246
|
-
items.push({
|
|
247
|
-
name,
|
|
248
|
-
path: skillFile,
|
|
249
|
-
content: body,
|
|
250
|
-
frontmatter,
|
|
251
|
-
level: "user",
|
|
252
|
-
_source: createSourceMeta(PROVIDER_ID, skillFile, "user"),
|
|
253
|
-
});
|
|
254
|
-
}
|
|
255
|
-
}
|
|
221
|
+
const userSkillsDir = join(getUserClaude(ctx), "skills");
|
|
222
|
+
const userResult = loadSkillsFromDir(ctx, {
|
|
223
|
+
dir: userSkillsDir,
|
|
224
|
+
providerId: PROVIDER_ID,
|
|
225
|
+
level: "user",
|
|
226
|
+
});
|
|
227
|
+
items.push(...userResult.items);
|
|
228
|
+
if (userResult.warnings) warnings.push(...userResult.warnings);
|
|
256
229
|
|
|
257
|
-
// Project-level: <project>/.claude/skills/*/SKILL.md
|
|
258
230
|
const projectBase = getProjectClaude(ctx);
|
|
259
231
|
if (projectBase) {
|
|
260
232
|
const projectSkillsDir = join(projectBase, "skills");
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
const skillDir = join(projectSkillsDir, dirName);
|
|
269
|
-
if (!ctx.fs.isDir(skillDir)) continue;
|
|
270
|
-
|
|
271
|
-
const skillFile = join(skillDir, "SKILL.md");
|
|
272
|
-
if (!ctx.fs.isFile(skillFile)) continue;
|
|
273
|
-
|
|
274
|
-
const content = ctx.fs.readFile(skillFile);
|
|
275
|
-
if (!content) {
|
|
276
|
-
warnings.push(`Failed to read ${skillFile}`);
|
|
277
|
-
continue;
|
|
278
|
-
}
|
|
279
|
-
|
|
280
|
-
const { frontmatter, body } = parseFrontmatter(content);
|
|
281
|
-
const name = (frontmatter.name as string) || dirName;
|
|
282
|
-
|
|
283
|
-
items.push({
|
|
284
|
-
name,
|
|
285
|
-
path: skillFile,
|
|
286
|
-
content: body,
|
|
287
|
-
frontmatter,
|
|
288
|
-
level: "project",
|
|
289
|
-
_source: createSourceMeta(PROVIDER_ID, skillFile, "project"),
|
|
290
|
-
});
|
|
291
|
-
}
|
|
292
|
-
}
|
|
233
|
+
const projectResult = loadSkillsFromDir(ctx, {
|
|
234
|
+
dir: projectSkillsDir,
|
|
235
|
+
providerId: PROVIDER_ID,
|
|
236
|
+
level: "project",
|
|
237
|
+
});
|
|
238
|
+
items.push(...projectResult.items);
|
|
239
|
+
if (projectResult.warnings) warnings.push(...projectResult.warnings);
|
|
293
240
|
}
|
|
294
241
|
|
|
295
242
|
return { items, warnings };
|
package/src/discovery/codex.ts
CHANGED
|
@@ -33,6 +33,7 @@ import {
|
|
|
33
33
|
discoverExtensionModulePaths,
|
|
34
34
|
getExtensionNameFromPath,
|
|
35
35
|
loadFilesFromDir,
|
|
36
|
+
loadSkillsFromDir,
|
|
36
37
|
parseFrontmatter,
|
|
37
38
|
SOURCE_PATHS,
|
|
38
39
|
} from "./helpers";
|
|
@@ -209,51 +210,25 @@ function loadSkills(ctx: LoadContext): LoadResult<Skill> {
|
|
|
209
210
|
const items: Skill[] = [];
|
|
210
211
|
const warnings: string[] = [];
|
|
211
212
|
|
|
212
|
-
// User level: ~/.codex/skills/
|
|
213
213
|
const userSkillsDir = join(ctx.home, SOURCE_PATHS.codex.userBase, "skills");
|
|
214
|
-
const userResult =
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
const { frontmatter, body } = parseFrontmatter(content);
|
|
219
|
-
const skillName = frontmatter.name || name.replace(/\.md$/, "");
|
|
220
|
-
|
|
221
|
-
return {
|
|
222
|
-
name: String(skillName),
|
|
223
|
-
path,
|
|
224
|
-
content: body,
|
|
225
|
-
frontmatter,
|
|
226
|
-
level: "user" as const,
|
|
227
|
-
_source: source,
|
|
228
|
-
};
|
|
229
|
-
},
|
|
214
|
+
const userResult = loadSkillsFromDir(ctx, {
|
|
215
|
+
dir: userSkillsDir,
|
|
216
|
+
providerId: PROVIDER_ID,
|
|
217
|
+
level: "user",
|
|
230
218
|
});
|
|
231
219
|
items.push(...userResult.items);
|
|
232
|
-
warnings.push(...
|
|
220
|
+
if (userResult.warnings) warnings.push(...userResult.warnings);
|
|
233
221
|
|
|
234
|
-
// Project level: .codex/skills/
|
|
235
222
|
const codexDir = ctx.fs.walkUp(".codex", { dir: true });
|
|
236
223
|
if (codexDir) {
|
|
237
224
|
const projectSkillsDir = join(codexDir, "skills");
|
|
238
|
-
const projectResult =
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
const { frontmatter, body } = parseFrontmatter(content);
|
|
243
|
-
const skillName = frontmatter.name || name.replace(/\.md$/, "");
|
|
244
|
-
|
|
245
|
-
return {
|
|
246
|
-
name: String(skillName),
|
|
247
|
-
path,
|
|
248
|
-
content: body,
|
|
249
|
-
frontmatter,
|
|
250
|
-
level: "project" as const,
|
|
251
|
-
_source: source,
|
|
252
|
-
};
|
|
253
|
-
},
|
|
225
|
+
const projectResult = loadSkillsFromDir(ctx, {
|
|
226
|
+
dir: projectSkillsDir,
|
|
227
|
+
providerId: PROVIDER_ID,
|
|
228
|
+
level: "project",
|
|
254
229
|
});
|
|
255
230
|
items.push(...projectResult.items);
|
|
256
|
-
warnings.push(...
|
|
231
|
+
if (projectResult.warnings) warnings.push(...projectResult.warnings);
|
|
257
232
|
}
|
|
258
233
|
|
|
259
234
|
return { items, warnings };
|
package/src/discovery/helpers.ts
CHANGED
|
@@ -4,6 +4,7 @@
|
|
|
4
4
|
|
|
5
5
|
import { join, resolve } from "path";
|
|
6
6
|
import { parse as parseYAML } from "yaml";
|
|
7
|
+
import type { Skill, SkillFrontmatter } from "../capability/skill";
|
|
7
8
|
import type { LoadContext, LoadResult, SourceMeta } from "../capability/types";
|
|
8
9
|
|
|
9
10
|
/**
|
|
@@ -126,6 +127,56 @@ export function parseFrontmatter(content: string): {
|
|
|
126
127
|
}
|
|
127
128
|
}
|
|
128
129
|
|
|
130
|
+
export function loadSkillsFromDir(
|
|
131
|
+
ctx: LoadContext,
|
|
132
|
+
options: {
|
|
133
|
+
dir: string;
|
|
134
|
+
providerId: string;
|
|
135
|
+
level: "user" | "project";
|
|
136
|
+
requireDescription?: boolean;
|
|
137
|
+
},
|
|
138
|
+
): LoadResult<Skill> {
|
|
139
|
+
const items: Skill[] = [];
|
|
140
|
+
const warnings: string[] = [];
|
|
141
|
+
const { dir, level, providerId, requireDescription = false } = options;
|
|
142
|
+
|
|
143
|
+
if (!ctx.fs.isDir(dir)) {
|
|
144
|
+
return { items, warnings };
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
for (const name of ctx.fs.readDir(dir)) {
|
|
148
|
+
if (name.startsWith(".") || name === "node_modules") continue;
|
|
149
|
+
|
|
150
|
+
const skillDir = join(dir, name);
|
|
151
|
+
if (!ctx.fs.isDir(skillDir)) continue;
|
|
152
|
+
|
|
153
|
+
const skillFile = join(skillDir, "SKILL.md");
|
|
154
|
+
if (!ctx.fs.isFile(skillFile)) continue;
|
|
155
|
+
|
|
156
|
+
const content = ctx.fs.readFile(skillFile);
|
|
157
|
+
if (!content) {
|
|
158
|
+
warnings.push(`Failed to read ${skillFile}`);
|
|
159
|
+
continue;
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
const { frontmatter, body } = parseFrontmatter(content);
|
|
163
|
+
if (requireDescription && !frontmatter.description) {
|
|
164
|
+
continue;
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
items.push({
|
|
168
|
+
name: (frontmatter.name as string) || name,
|
|
169
|
+
path: skillFile,
|
|
170
|
+
content: body,
|
|
171
|
+
frontmatter: frontmatter as SkillFrontmatter,
|
|
172
|
+
level,
|
|
173
|
+
_source: createSourceMeta(providerId, skillFile, level),
|
|
174
|
+
});
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
return { items, warnings };
|
|
178
|
+
}
|
|
179
|
+
|
|
129
180
|
/**
|
|
130
181
|
* Expand environment variables in a string.
|
|
131
182
|
* Supports ${VAR} and ${VAR:-default} syntax.
|
|
@@ -286,7 +337,7 @@ export function discoverExtensionModulePaths(ctx: LoadContext, dir: string): str
|
|
|
286
337
|
const discovered: string[] = [];
|
|
287
338
|
|
|
288
339
|
for (const name of ctx.fs.readDir(dir)) {
|
|
289
|
-
if (name.startsWith(".")) continue;
|
|
340
|
+
if (name.startsWith(".") || name === "node_modules") continue;
|
|
290
341
|
|
|
291
342
|
const entryPath = join(dir, name);
|
|
292
343
|
|
|
@@ -2,7 +2,7 @@ import type { ThinkingLevel } from "@oh-my-pi/pi-agent-core";
|
|
|
2
2
|
import type { OAuthProvider } from "@oh-my-pi/pi-ai";
|
|
3
3
|
import type { Component } from "@oh-my-pi/pi-tui";
|
|
4
4
|
import { Input, Loader, Spacer, Text } from "@oh-my-pi/pi-tui";
|
|
5
|
-
import {
|
|
5
|
+
import { getAgentDbPath } from "../../../config";
|
|
6
6
|
import { SessionManager } from "../../../core/session-manager";
|
|
7
7
|
import { setPreferredImageProvider, setPreferredWebSearchProvider } from "../../../core/tools/index";
|
|
8
8
|
import { disableProvider, enableProvider } from "../../../discovery";
|
|
@@ -546,7 +546,7 @@ export class SelectorController {
|
|
|
546
546
|
),
|
|
547
547
|
);
|
|
548
548
|
this.ctx.chatContainer.addChild(
|
|
549
|
-
new Text(theme.fg("dim", `Credentials saved to ${
|
|
549
|
+
new Text(theme.fg("dim", `Credentials saved to ${getAgentDbPath()}`), 1, 0),
|
|
550
550
|
);
|
|
551
551
|
this.ctx.ui.requestRender();
|
|
552
552
|
} catch (error: unknown) {
|
|
@@ -566,7 +566,7 @@ export class SelectorController {
|
|
|
566
566
|
),
|
|
567
567
|
);
|
|
568
568
|
this.ctx.chatContainer.addChild(
|
|
569
|
-
new Text(theme.fg("dim", `Credentials removed from ${
|
|
569
|
+
new Text(theme.fg("dim", `Credentials removed from ${getAgentDbPath()}`), 1, 0),
|
|
570
570
|
);
|
|
571
571
|
this.ctx.ui.requestRender();
|
|
572
572
|
} catch (error: unknown) {
|
|
@@ -149,6 +149,9 @@ export class InteractiveMode implements InteractiveModeContext {
|
|
|
149
149
|
this.statusContainer = new Container();
|
|
150
150
|
this.editor = new CustomEditor(getEditorTheme());
|
|
151
151
|
this.editor.setUseTerminalCursor(true);
|
|
152
|
+
this.editor.onAutocompleteCancel = () => {
|
|
153
|
+
this.ui.requestRender(true);
|
|
154
|
+
};
|
|
152
155
|
try {
|
|
153
156
|
this.historyStorage = HistoryStorage.open();
|
|
154
157
|
this.editor.setHistoryStorage(this.historyStorage);
|
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: planner
|
|
3
|
+
description: Software architect that explores codebase and produces detailed implementation plans
|
|
4
|
+
tools: read, grep, find, ls, bash
|
|
5
|
+
spawns: explore
|
|
6
|
+
model: pi/slow, gpt-5.2-codex, gpt-5.2, codex, gpt
|
|
7
|
+
---
|
|
8
|
+
|
|
9
|
+
<role>Senior software architect producing implementation plans. READ-ONLY — no file modifications, no state changes.</role>
|
|
10
|
+
|
|
11
|
+
<context>
|
|
12
|
+
Another engineer will execute your plan without re-exploring the codebase. Your plan must be specific enough to implement directly.
|
|
13
|
+
</context>
|
|
14
|
+
|
|
15
|
+
<process>
|
|
16
|
+
## Phase 1: Understand
|
|
17
|
+
|
|
18
|
+
1. Parse the task requirements precisely
|
|
19
|
+
2. Identify ambiguities — list assumptions you're making
|
|
20
|
+
3. Spawn parallel `explore` agents if the task spans multiple areas
|
|
21
|
+
|
|
22
|
+
## Phase 2: Explore
|
|
23
|
+
|
|
24
|
+
Investigate thoroughly before designing:
|
|
25
|
+
|
|
26
|
+
1. Find existing patterns via grep/find
|
|
27
|
+
2. Read key files to understand current architecture
|
|
28
|
+
3. Trace data flow through relevant code paths
|
|
29
|
+
4. Identify types, interfaces, and contracts involved
|
|
30
|
+
5. Note dependencies between components
|
|
31
|
+
|
|
32
|
+
Spawn `explore` agents for independent search areas. Synthesize findings.
|
|
33
|
+
|
|
34
|
+
## Phase 3: Design
|
|
35
|
+
|
|
36
|
+
Create implementation approach:
|
|
37
|
+
|
|
38
|
+
1. List concrete changes required (files, functions, types)
|
|
39
|
+
2. Define the sequence — what depends on what
|
|
40
|
+
3. Identify edge cases and error conditions
|
|
41
|
+
4. Consider alternatives; justify your choice
|
|
42
|
+
5. Note potential pitfalls or tricky parts
|
|
43
|
+
|
|
44
|
+
## Phase 4: Produce Plan
|
|
45
|
+
|
|
46
|
+
Write a plan another engineer can execute without re-exploring the codebase.
|
|
47
|
+
</process>
|
|
48
|
+
|
|
49
|
+
<example>
|
|
50
|
+
## Summary
|
|
51
|
+
What we're building and why (one paragraph).
|
|
52
|
+
|
|
53
|
+
## Changes
|
|
54
|
+
1. **`path/to/file.ts`** — What to change
|
|
55
|
+
- Specific modifications
|
|
56
|
+
2. **`path/to/other.ts`** — ...
|
|
57
|
+
|
|
58
|
+
## Sequence
|
|
59
|
+
1. X (no dependencies)
|
|
60
|
+
2. Y (depends on X)
|
|
61
|
+
3. Z (integration)
|
|
62
|
+
|
|
63
|
+
## Edge Cases
|
|
64
|
+
- Case: How to handle
|
|
65
|
+
|
|
66
|
+
## Verification
|
|
67
|
+
- [ ] Test command or check
|
|
68
|
+
- [ ] Expected behavior
|
|
69
|
+
|
|
70
|
+
## Critical Files
|
|
71
|
+
- `path/to/file.ts` (lines 50-120) — Why to read
|
|
72
|
+
</example>
|
|
73
|
+
|
|
74
|
+
<example>
|
|
75
|
+
## Summary
|
|
76
|
+
Add rate limiting to the API gateway to prevent abuse. Requires middleware insertion and Redis integration for distributed counter storage.
|
|
77
|
+
|
|
78
|
+
## Changes
|
|
79
|
+
1. **`src/middleware/rate-limit.ts`** — New file
|
|
80
|
+
- Create `RateLimitMiddleware` class using sliding window algorithm
|
|
81
|
+
- Accept `maxRequests`, `windowMs`, `keyGenerator` options
|
|
82
|
+
2. **`src/gateway/index.ts`** — Wire middleware
|
|
83
|
+
- Import and register before auth middleware (line 45)
|
|
84
|
+
3. **`src/config/redis.ts`** — Add rate limit key prefix
|
|
85
|
+
- Add `RATE_LIMIT_PREFIX` constant
|
|
86
|
+
|
|
87
|
+
## Sequence
|
|
88
|
+
1. `rate-limit.ts` (standalone, no deps)
|
|
89
|
+
2. `redis.ts` (config only)
|
|
90
|
+
3. `gateway/index.ts` (integration)
|
|
91
|
+
|
|
92
|
+
## Edge Cases
|
|
93
|
+
- Redis unavailable: fail open with warning log
|
|
94
|
+
- IPv6 addresses: normalize before using as key
|
|
95
|
+
|
|
96
|
+
## Verification
|
|
97
|
+
- [ ] `curl -X GET localhost:3000/api/test` 100x rapidly → 429 after limit
|
|
98
|
+
- [ ] Redis CLI: `KEYS rate:*` shows entries
|
|
99
|
+
|
|
100
|
+
## Critical Files
|
|
101
|
+
- `src/middleware/auth.ts` (lines 20-50) — Pattern to follow
|
|
102
|
+
- `src/types/middleware.ts` — Interface to implement
|
|
103
|
+
</example>
|
|
104
|
+
|
|
105
|
+
<requirements>
|
|
106
|
+
- Plan must be specific enough to implement without additional exploration
|
|
107
|
+
- Include exact file paths and line ranges where relevant
|
|
108
|
+
- Sequence must respect dependencies
|
|
109
|
+
- Verification must be concrete and testable
|
|
110
|
+
</requirements>
|
|
111
|
+
|
|
112
|
+
Keep going until complete. This matters — get it right.
|
|
@@ -1,14 +1,15 @@
|
|
|
1
|
-
You are a worker agent for delegated tasks
|
|
1
|
+
You are a worker agent for delegated tasks. You have FULL access to all tools (edit, write, bash, grep, read, etc.) - use them as needed to complete your task.
|
|
2
|
+
|
|
3
|
+
Finish only the assigned work and return the minimum useful result.
|
|
2
4
|
|
|
3
5
|
Principles:
|
|
4
6
|
|
|
7
|
+
- You CAN and SHOULD make file edits, run commands, and create files when your task requires it.
|
|
5
8
|
- Be concise. No filler, repetition, or tool transcripts.
|
|
6
|
-
- If blocked, ask a single focused question; otherwise proceed autonomously.
|
|
7
9
|
- Prefer narrow search (grep/find) then read only needed ranges.
|
|
8
10
|
- Avoid full-file reads unless necessary.
|
|
9
|
-
-
|
|
11
|
+
- Prefer edits to existing files over creating new ones.
|
|
10
12
|
- NEVER create documentation files (\*.md) unless explicitly requested.
|
|
11
|
-
- Any file paths in your response MUST be absolute.
|
|
12
13
|
- When spawning subagents with the Task tool, include a 5-8 word user-facing description.
|
|
13
14
|
- Include the smallest relevant code snippet when discussing code or config.
|
|
14
15
|
- Follow the main agent's instructions.
|
|
@@ -137,6 +137,11 @@ Before reading any file:
|
|
|
137
137
|
|
|
138
138
|
## Project Integration
|
|
139
139
|
- Follow AGENTS.md by scope: nearest file applies, deeper overrides higher.
|
|
140
|
+
- Do not search for AGENTS.md during execution; use this list as authoritative.
|
|
141
|
+
{{#if agentsMdSearch.files.length}}
|
|
142
|
+
Relevant files are:
|
|
143
|
+
{{#list agentsMdSearch.files join="\n"}}- {{this}}{{/list}}
|
|
144
|
+
{{/if}}
|
|
140
145
|
- Resolve blockers before yielding.
|
|
141
146
|
</instructions>
|
|
142
147
|
|
|
@@ -2,6 +2,13 @@ Launch a new agent to handle complex, multi-step tasks autonomously.
|
|
|
2
2
|
|
|
3
3
|
The Task tool launches specialized agents (workers) that autonomously handle complex tasks. Each agent type has specific capabilities and tools available to it.
|
|
4
4
|
|
|
5
|
+
**CRITICAL: Subagents have NO access to conversation history.** They only see:
|
|
6
|
+
1. Their agent-specific system prompt
|
|
7
|
+
2. The `context` string you provide
|
|
8
|
+
3. The `task` string you provide
|
|
9
|
+
|
|
10
|
+
If you discussed requirements, plans, schemas, or decisions with the user, you MUST include that information in `context`. Subagents cannot see prior messages - they start fresh with only what you explicitly pass them.
|
|
11
|
+
|
|
5
12
|
## Available Agents
|
|
6
13
|
|
|
7
14
|
{{#list agents prefix="- " join="\n"}}
|
|
@@ -26,8 +33,9 @@ The Task tool launches specialized agents (workers) that autonomously handle com
|
|
|
26
33
|
- **Minimize tool chatter**: Avoid repeating large context; use Output tool with output ids for full logs
|
|
27
34
|
- **Structured completion**: If `output` is provided, subagents must call `complete` to finish
|
|
28
35
|
- **Parallelize**: Launch multiple agents concurrently whenever possible
|
|
36
|
+
- **Isolate file scopes**: Assign each task distinct files or directories so agents don't conflict
|
|
29
37
|
- **Results are intermediate data**: Agent findings provide context for YOU to perform actual work. Do not treat agent reports as "task complete" signals.
|
|
30
|
-
- **Stateless invocations**:
|
|
38
|
+
- **Stateless invocations**: Subagents have zero memory of your conversation. Pass ALL relevant context: requirements discussed, decisions made, schemas agreed upon, file paths mentioned. If you reference something from earlier discussion without including it, the subagent will fail.
|
|
31
39
|
- **Trust outputs**: Agent results should generally be trusted
|
|
32
40
|
- **Clarify intent**: Tell the agent whether you expect code changes or just research (search, file reads, web fetches)
|
|
33
41
|
- **Proactive use**: If an agent description says to use it proactively, do so without waiting for explicit user request
|
|
@@ -35,7 +43,7 @@ The Task tool launches specialized agents (workers) that autonomously handle com
|
|
|
35
43
|
## Parameters
|
|
36
44
|
|
|
37
45
|
- `agent`: Agent type to use for all tasks
|
|
38
|
-
- `context`:
|
|
46
|
+
- `context`: **Required context from conversation** - include ALL relevant info: requirements, schemas, decisions, constraints. Subagents cannot see chat history.
|
|
39
47
|
- `model`: (optional) Model override (fuzzy matching, e.g., "sonnet", "opus")
|
|
40
48
|
- `tasks`: Array of `{id, task, description}` - tasks to run in parallel (max {{MAX_PARALLEL_TASKS}}, {{MAX_CONCURRENCY}} concurrent)
|
|
41
49
|
- `id`: Short CamelCase identifier for display (max 20 chars, e.g., "SessionStore", "LspRefactor")
|
|
@@ -46,30 +54,28 @@ The Task tool launches specialized agents (workers) that autonomously handle com
|
|
|
46
54
|
## Example
|
|
47
55
|
|
|
48
56
|
<example>
|
|
49
|
-
user: "
|
|
50
|
-
assistant: I'll
|
|
57
|
+
user: "Looks good, execute the plan"
|
|
58
|
+
assistant: I'll execute the refactoring plan.
|
|
51
59
|
assistant: Uses the Task tool:
|
|
52
60
|
{
|
|
53
|
-
"agent": "
|
|
54
|
-
"context": "
|
|
61
|
+
"agent": "task",
|
|
62
|
+
"context": "Refactoring the auth module into separate concerns.\n\nPlan:\n1. AuthProvider - Extract React context and provider from src/auth/index.tsx\n2. AuthApi - Extract API calls to src/auth/api.ts, use existing fetchJson helper\n3. AuthTypes - Move types to src/auth/types.ts, re-export from index\n\nConstraints:\n- Preserve all existing exports from src/auth/index.tsx\n- Use project's fetchJson (src/utils/http.ts), don't use raw fetch\n- No new dependencies",
|
|
55
63
|
"output": {
|
|
56
64
|
"properties": {
|
|
57
|
-
"
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
"file": { "type": "string" },
|
|
61
|
-
"line": { "type": "uint32" },
|
|
62
|
-
"text": { "type": "string" },
|
|
63
|
-
"suggestedKey": { "type": "string" }
|
|
64
|
-
}
|
|
65
|
-
}
|
|
66
|
-
}
|
|
65
|
+
"summary": { "type": "string" },
|
|
66
|
+
"decisions": { "elements": { "type": "string" } },
|
|
67
|
+
"concerns": { "elements": { "type": "string" } }
|
|
67
68
|
}
|
|
68
69
|
},
|
|
69
70
|
"tasks": [
|
|
70
|
-
{ "id": "
|
|
71
|
-
{ "id": "
|
|
72
|
-
{ "id": "
|
|
71
|
+
{ "id": "AuthProvider", "task": "Execute step 1: Extract AuthProvider and AuthContext", "description": "Extract React context" },
|
|
72
|
+
{ "id": "AuthApi", "task": "Execute step 2: Extract API calls to api.ts", "description": "Extract API layer" },
|
|
73
|
+
{ "id": "AuthTypes", "task": "Execute step 3: Move types to types.ts", "description": "Extract types" }
|
|
73
74
|
]
|
|
74
75
|
}
|
|
75
76
|
</example>
|
|
77
|
+
|
|
78
|
+
Key points:
|
|
79
|
+
- **Plan in context**: The full plan is written once; each task references its step without repeating shared constraints
|
|
80
|
+
- **Parallel execution**: 3 agents run concurrently, each owning one step - no duplicated work
|
|
81
|
+
- **Structured output**: JTD schema ensures consistent reporting across all agents
|
|
@@ -1,10 +0,0 @@
|
|
|
1
|
-
---
|
|
2
|
-
description: Explore gathers context, planner creates implementation plan (no implementation)
|
|
3
|
-
---
|
|
4
|
-
|
|
5
|
-
Use the subagent tool with the chain parameter to execute this workflow:
|
|
6
|
-
|
|
7
|
-
1. First, use the "explore" agent to find all code relevant to: $@
|
|
8
|
-
2. Then, use the "planner" agent to create an implementation plan for "$@" using the context from the previous step (use {previous} placeholder)
|
|
9
|
-
|
|
10
|
-
Execute this as a chain, passing output between steps via {previous}. Do NOT implement - just return the plan.
|
|
@@ -1,11 +0,0 @@
|
|
|
1
|
-
---
|
|
2
|
-
description: Task implements, reviewer reviews, task applies feedback
|
|
3
|
-
---
|
|
4
|
-
|
|
5
|
-
Use the subagent tool with the chain parameter to execute this workflow:
|
|
6
|
-
|
|
7
|
-
1. First, use the "task" agent to implement: $@
|
|
8
|
-
2. Then, use the "reviewer" agent to review the implementation from the previous step (use {previous} placeholder)
|
|
9
|
-
3. Finally, use the "task" agent to apply the feedback from the review (use {previous} placeholder)
|
|
10
|
-
|
|
11
|
-
Execute this as a chain, passing output between steps via {previous}.
|
|
@@ -1,11 +0,0 @@
|
|
|
1
|
-
---
|
|
2
|
-
description: Full implementation workflow - explore gathers context, planner creates plan, task implements
|
|
3
|
-
---
|
|
4
|
-
|
|
5
|
-
Use the subagent tool with the chain parameter to execute this workflow:
|
|
6
|
-
|
|
7
|
-
1. First, use the "explore" agent to find all code relevant to: $@
|
|
8
|
-
2. Then, use the "planner" agent to create an implementation plan for "$@" using the context from the previous step (use {previous} placeholder)
|
|
9
|
-
3. Finally, use the "task" agent to implement the plan from the previous step (use {previous} placeholder)
|
|
10
|
-
|
|
11
|
-
Execute this as a chain, passing output between steps via {previous}.
|