@oh-my-pi/pi-coding-agent 4.2.3 → 4.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/CHANGELOG.md +23 -0
  2. package/package.json +5 -5
  3. package/src/cli/update-cli.ts +2 -2
  4. package/src/config.ts +5 -5
  5. package/src/core/auth-storage.ts +13 -1
  6. package/src/core/cursor/exec-bridge.ts +234 -0
  7. package/src/core/custom-commands/loader.ts +3 -1
  8. package/src/core/custom-tools/loader.ts +1 -18
  9. package/src/core/extensions/loader.ts +5 -21
  10. package/src/core/hooks/loader.ts +1 -18
  11. package/src/core/keybindings.ts +3 -1
  12. package/src/core/logger.ts +1 -2
  13. package/src/core/model-resolver.ts +1 -0
  14. package/src/core/prompt-templates.ts +5 -4
  15. package/src/core/sdk.ts +17 -4
  16. package/src/core/skills.ts +5 -4
  17. package/src/core/tools/edit-diff.ts +44 -21
  18. package/src/core/tools/exa/mcp-client.ts +2 -2
  19. package/src/core/tools/task/agents.ts +5 -64
  20. package/src/core/tools/task/commands.ts +7 -33
  21. package/src/core/tools/task/discovery.ts +4 -66
  22. package/src/core/tools/task/executor.ts +32 -3
  23. package/src/core/tools/task/index.ts +11 -2
  24. package/src/core/tools/task/render.ts +25 -15
  25. package/src/core/tools/task/types.ts +3 -0
  26. package/src/core/tools/task/worker-protocol.ts +2 -1
  27. package/src/core/tools/task/worker.ts +2 -1
  28. package/src/core/tools/web-scrapers/huggingface.ts +1 -1
  29. package/src/core/tools/web-scrapers/readthedocs.ts +1 -1
  30. package/src/core/tools/web-scrapers/types.ts +1 -1
  31. package/src/core/tools/web-search/auth.ts +5 -3
  32. package/src/discovery/codex.ts +3 -1
  33. package/src/discovery/helpers.ts +124 -3
  34. package/src/migrations.ts +11 -9
  35. package/src/modes/interactive/components/extensions/state-manager.ts +19 -18
  36. package/src/prompts/agents/frontmatter.md +1 -0
  37. package/src/prompts/agents/reviewer.md +32 -4
  38. package/src/prompts/tools/task.md +3 -1
@@ -29,6 +29,7 @@ import { slashCommandCapability } from "../capability/slash-command";
29
29
  import type { CustomTool } from "../capability/tool";
30
30
  import { toolCapability } from "../capability/tool";
31
31
  import type { LoadContext, LoadResult } from "../capability/types";
32
+ import { logger } from "../core/logger";
32
33
  import {
33
34
  createSourceMeta,
34
35
  discoverExtensionModulePaths,
@@ -117,7 +118,8 @@ async function loadTomlConfig(_ctx: LoadContext, path: string): Promise<Record<s
117
118
 
118
119
  try {
119
120
  return parseToml(content) as Record<string, unknown>;
120
- } catch (_err) {
121
+ } catch (error) {
122
+ logger.warn("Failed to parse TOML config", { path, error: String(error) });
121
123
  return null;
122
124
  }
123
125
  }
@@ -2,11 +2,38 @@
2
2
  * Shared helpers for discovery providers.
3
3
  */
4
4
 
5
+ import { homedir } from "node:os";
5
6
  import { join, resolve } from "node:path";
7
+ import type { ThinkingLevel } from "@oh-my-pi/pi-agent-core";
6
8
  import { parse as parseYAML } from "yaml";
7
9
  import { readDirEntries, readFile } from "../capability/fs";
8
10
  import type { Skill, SkillFrontmatter } from "../capability/skill";
9
11
  import type { LoadContext, LoadResult, SourceMeta } from "../capability/types";
12
+ import { logger } from "../core/logger";
13
+
14
+ const VALID_THINKING_LEVELS: readonly string[] = ["off", "minimal", "low", "medium", "high", "xhigh"];
15
+ const UNICODE_SPACES = /[\u00A0\u2000-\u200A\u202F\u205F\u3000]/g;
16
+
17
+ /**
18
+ * Normalize unicode spaces to regular spaces.
19
+ */
20
+ export function normalizeUnicodeSpaces(str: string): string {
21
+ return str.replace(UNICODE_SPACES, " ");
22
+ }
23
+
24
+ /**
25
+ * Expand ~ to home directory and normalize unicode spaces.
26
+ */
27
+ export function expandPath(p: string): string {
28
+ const normalized = normalizeUnicodeSpaces(p);
29
+ if (normalized.startsWith("~/")) {
30
+ return join(homedir(), normalized.slice(2));
31
+ }
32
+ if (normalized.startsWith("~")) {
33
+ return join(homedir(), normalized.slice(1));
34
+ }
35
+ return normalized;
36
+ }
10
37
 
11
38
  /**
12
39
  * Standard paths for each config source.
@@ -117,14 +144,108 @@ export function parseFrontmatter(content: string): {
117
144
  const body = normalized.slice(endIndex + 4).trim();
118
145
 
119
146
  try {
120
- const frontmatter = parseYAML(raw) as Record<string, unknown> | null;
147
+ // Replace tabs with spaces for YAML compatibility, use failsafe mode for robustness
148
+ const frontmatter = parseYAML(raw.replaceAll("\t", " "), { compat: "failsafe" }) as Record<
149
+ string,
150
+ unknown
151
+ > | null;
121
152
  return { frontmatter: frontmatter ?? {}, body, raw };
122
- } catch {
123
- // Fallback to empty frontmatter on parse error
153
+ } catch (error) {
154
+ logger.warn("Failed to parse YAML frontmatter", { error: String(error) });
124
155
  return { frontmatter: {}, body, raw };
125
156
  }
126
157
  }
127
158
 
159
+ /**
160
+ * Parse thinking level from frontmatter.
161
+ * Supports keys: thinkingLevel, thinking-level, thinking
162
+ */
163
+ export function parseThinkingLevel(frontmatter: Record<string, unknown>): ThinkingLevel | undefined {
164
+ const raw = frontmatter.thinkingLevel ?? frontmatter["thinking-level"] ?? frontmatter.thinking;
165
+ if (typeof raw === "string" && VALID_THINKING_LEVELS.includes(raw)) {
166
+ return raw as ThinkingLevel;
167
+ }
168
+ return undefined;
169
+ }
170
+
171
+ /**
172
+ * Parse a comma-separated string into an array of trimmed, non-empty strings.
173
+ */
174
+ export function parseCSV(value: string): string[] {
175
+ return value
176
+ .split(",")
177
+ .map((s) => s.trim())
178
+ .filter(Boolean);
179
+ }
180
+
181
+ /**
182
+ * Parse a value that may be an array of strings or a comma-separated string.
183
+ * Returns undefined if the result would be empty.
184
+ */
185
+ export function parseArrayOrCSV(value: unknown): string[] | undefined {
186
+ if (Array.isArray(value)) {
187
+ const filtered = value.filter((item): item is string => typeof item === "string");
188
+ return filtered.length > 0 ? filtered : undefined;
189
+ }
190
+ if (typeof value === "string") {
191
+ const parsed = parseCSV(value);
192
+ return parsed.length > 0 ? parsed : undefined;
193
+ }
194
+ return undefined;
195
+ }
196
+
197
+ /** Parsed agent fields from frontmatter (excludes source/filePath/systemPrompt) */
198
+ export interface ParsedAgentFields {
199
+ name: string;
200
+ description: string;
201
+ tools?: string[];
202
+ spawns?: string[] | "*";
203
+ model?: string;
204
+ output?: unknown;
205
+ thinkingLevel?: ThinkingLevel;
206
+ }
207
+
208
+ /**
209
+ * Parse agent fields from frontmatter.
210
+ * Returns null if required fields (name, description) are missing.
211
+ */
212
+ export function parseAgentFields(frontmatter: Record<string, unknown>): ParsedAgentFields | null {
213
+ const name = typeof frontmatter.name === "string" ? frontmatter.name : undefined;
214
+ const description = typeof frontmatter.description === "string" ? frontmatter.description : undefined;
215
+
216
+ if (!name || !description) {
217
+ return null;
218
+ }
219
+
220
+ const tools = parseArrayOrCSV(frontmatter.tools);
221
+
222
+ // Parse spawns field (array, "*", or CSV)
223
+ let spawns: string[] | "*" | undefined;
224
+ if (frontmatter.spawns === "*") {
225
+ spawns = "*";
226
+ } else if (typeof frontmatter.spawns === "string") {
227
+ const trimmed = frontmatter.spawns.trim();
228
+ if (trimmed === "*") {
229
+ spawns = "*";
230
+ } else {
231
+ spawns = parseArrayOrCSV(trimmed);
232
+ }
233
+ } else {
234
+ spawns = parseArrayOrCSV(frontmatter.spawns);
235
+ }
236
+
237
+ // Backward compat: infer spawns: "*" when tools includes "task"
238
+ if (spawns === undefined && tools?.includes("task")) {
239
+ spawns = "*";
240
+ }
241
+
242
+ const output = frontmatter.output !== undefined ? frontmatter.output : undefined;
243
+ const model = typeof frontmatter.model === "string" ? frontmatter.model : undefined;
244
+ const thinkingLevel = parseThinkingLevel(frontmatter);
245
+
246
+ return { name, description, tools, spawns, model, output, thinkingLevel };
247
+ }
248
+
128
249
  export async function loadSkillsFromDir(
129
250
  _ctx: LoadContext,
130
251
  options: {
package/src/migrations.ts CHANGED
@@ -8,6 +8,7 @@ import chalk from "chalk";
8
8
  import { getAgentDbPath, getAgentDir, getBinDir } from "./config";
9
9
  import { AgentStorage } from "./core/agent-storage";
10
10
  import type { AuthCredential } from "./core/auth-storage";
11
+ import { logger } from "./core/logger";
11
12
 
12
13
  /**
13
14
  * Migrate PI_* environment variables to OMP_* equivalents.
@@ -55,8 +56,8 @@ export function migrateAuthToAgentDb(): string[] {
55
56
  providers.push(provider);
56
57
  }
57
58
  renameSync(oauthPath, `${oauthPath}.migrated`);
58
- } catch {
59
- // Skip on error
59
+ } catch (error) {
60
+ logger.warn("Failed to migrate oauth.json", { path: oauthPath, error: String(error) });
60
61
  }
61
62
  }
62
63
 
@@ -75,8 +76,8 @@ export function migrateAuthToAgentDb(): string[] {
75
76
  delete settings.apiKeys;
76
77
  writeFileSync(settingsPath, JSON.stringify(settings, null, 2));
77
78
  }
78
- } catch {
79
- // Skip on error
79
+ } catch (error) {
80
+ logger.warn("Failed to migrate settings.json apiKeys", { path: settingsPath, error: String(error) });
80
81
  }
81
82
  }
82
83
 
@@ -105,7 +106,8 @@ export function migrateSessionsFromAgentRoot(): void {
105
106
  files = readdirSync(agentDir)
106
107
  .filter((f) => f.endsWith(".jsonl"))
107
108
  .map((f) => join(agentDir, f));
108
- } catch {
109
+ } catch (error) {
110
+ logger.warn("Failed to read agent directory for session migration", { path: agentDir, error: String(error) });
109
111
  return;
110
112
  }
111
113
 
@@ -137,8 +139,8 @@ export function migrateSessionsFromAgentRoot(): void {
137
139
  if (existsSync(newPath)) continue; // Skip if target exists
138
140
 
139
141
  renameSync(file, newPath);
140
- } catch {
141
- // Skip files that can't be migrated
142
+ } catch (error) {
143
+ logger.warn("Failed to migrate session file", { path: file, error: String(error) });
142
144
  }
143
145
  }
144
146
  }
@@ -168,8 +170,8 @@ function migrateToolsToBin(): void {
168
170
  try {
169
171
  renameSync(oldPath, newPath);
170
172
  movedAny = true;
171
- } catch {
172
- // Ignore errors
173
+ } catch (error) {
174
+ logger.warn("Failed to migrate binary", { from: oldPath, to: newPath, error: String(error) });
173
175
  }
174
176
  } else {
175
177
  // Target exists, just delete the old one
@@ -13,6 +13,7 @@ import type { Skill } from "../../../../capability/skill";
13
13
  import type { SlashCommand } from "../../../../capability/slash-command";
14
14
  import type { CustomTool } from "../../../../capability/tool";
15
15
  import type { SourceMeta } from "../../../../capability/types";
16
+ import { logger } from "../../../../core/logger";
16
17
  import {
17
18
  disableProvider,
18
19
  enableProvider,
@@ -105,8 +106,8 @@ export async function loadAllExtensions(cwd?: string, disabledIds?: string[]): P
105
106
  getDescription: (s) => s.frontmatter?.description,
106
107
  getTrigger: (s) => s.frontmatter?.globs?.join(", "),
107
108
  });
108
- } catch {
109
- // Capability may not be registered
109
+ } catch (error) {
110
+ logger.warn("Failed to load skills capability", { error: String(error) });
110
111
  }
111
112
 
112
113
  // Load rules
@@ -116,8 +117,8 @@ export async function loadAllExtensions(cwd?: string, disabledIds?: string[]): P
116
117
  getDescription: (r) => r.description,
117
118
  getTrigger: (r) => r.globs?.join(", ") || (r.alwaysApply ? "always" : undefined),
118
119
  });
119
- } catch {
120
- // Capability may not be registered
120
+ } catch (error) {
121
+ logger.warn("Failed to load rules capability", { error: String(error) });
121
122
  }
122
123
 
123
124
  // Load custom tools
@@ -126,8 +127,8 @@ export async function loadAllExtensions(cwd?: string, disabledIds?: string[]): P
126
127
  addItems(tools.all, "tool", {
127
128
  getDescription: (t) => t.description,
128
129
  });
129
- } catch {
130
- // Capability may not be registered
130
+ } catch (error) {
131
+ logger.warn("Failed to load tools capability", { error: String(error) });
131
132
  }
132
133
 
133
134
  // Load extension modules
@@ -135,8 +136,8 @@ export async function loadAllExtensions(cwd?: string, disabledIds?: string[]): P
135
136
  const modules = await loadCapability<ExtensionModule>("extension-modules", loadOpts);
136
137
  const nativeModules = modules.all.filter((module) => module._source.provider === "native");
137
138
  addItems(nativeModules, "extension-module");
138
- } catch {
139
- // Capability may not be registered
139
+ } catch (error) {
140
+ logger.warn("Failed to load extension-modules capability", { error: String(error) });
140
141
  }
141
142
 
142
143
  // Load MCP servers
@@ -178,8 +179,8 @@ export async function loadAllExtensions(cwd?: string, disabledIds?: string[]): P
178
179
  raw: server,
179
180
  });
180
181
  }
181
- } catch {
182
- // Capability may not be registered
182
+ } catch (error) {
183
+ logger.warn("Failed to load mcps capability", { error: String(error) });
183
184
  }
184
185
 
185
186
  // Load prompts
@@ -189,8 +190,8 @@ export async function loadAllExtensions(cwd?: string, disabledIds?: string[]): P
189
190
  getDescription: () => undefined,
190
191
  getTrigger: (p) => `/prompts:${p.name}`,
191
192
  });
192
- } catch {
193
- // Capability may not be registered
193
+ } catch (error) {
194
+ logger.warn("Failed to load prompts capability", { error: String(error) });
194
195
  }
195
196
 
196
197
  // Load slash commands
@@ -200,8 +201,8 @@ export async function loadAllExtensions(cwd?: string, disabledIds?: string[]): P
200
201
  getDescription: () => undefined,
201
202
  getTrigger: (c) => `/${c.name}`,
202
203
  });
203
- } catch {
204
- // Capability may not be registered
204
+ } catch (error) {
205
+ logger.warn("Failed to load slash-commands capability", { error: String(error) });
205
206
  }
206
207
 
207
208
  // Load hooks
@@ -243,8 +244,8 @@ export async function loadAllExtensions(cwd?: string, disabledIds?: string[]): P
243
244
  raw: hook,
244
245
  });
245
246
  }
246
- } catch {
247
- // Capability may not be registered
247
+ } catch (error) {
248
+ logger.warn("Failed to load hooks capability", { error: String(error) });
248
249
  }
249
250
 
250
251
  // Load context files
@@ -288,8 +289,8 @@ export async function loadAllExtensions(cwd?: string, disabledIds?: string[]): P
288
289
  raw: file,
289
290
  });
290
291
  }
291
- } catch {
292
- // Capability may not be registered
292
+ } catch (error) {
293
+ logger.warn("Failed to load context-files capability", { error: String(error) });
293
294
  }
294
295
 
295
296
  return extensions;
@@ -3,5 +3,6 @@ name: {{name}}
3
3
  description: {{description}}
4
4
  {{#if spawns}}spawns: {{spawns}}
5
5
  {{/if}}{{#if model}}model: {{model}}
6
+ {{/if}}{{#if thinkingLevel}}thinkingLevel: {{thinkingLevel}}
6
7
  {{/if}}---
7
8
  {{body}}
@@ -4,6 +4,33 @@ description: Code review specialist for quality and security analysis
4
4
  tools: read, grep, find, ls, bash, report_finding
5
5
  spawns: explore, task
6
6
  model: pi/slow, gpt-5.2-codex, gpt-5.2, codex, gpt
7
+ output:
8
+ properties:
9
+ overall_correctness:
10
+ enum: [correct, incorrect]
11
+ explanation:
12
+ type: string
13
+ confidence:
14
+ type: number
15
+ optionalProperties:
16
+ findings:
17
+ elements:
18
+ properties:
19
+ title:
20
+ type: string
21
+ body:
22
+ type: string
23
+ priority:
24
+ type: number
25
+ confidence:
26
+ type: number
27
+ file_path:
28
+ type: string
29
+ line_start:
30
+ type: number
31
+ line_end:
32
+ type: number
33
+ required: [overall_correctness, explanation, confidence]
7
34
  ---
8
35
 
9
36
  You are a senior engineer reviewing a proposed code change. Your goal: identify bugs that the author would want to fix before merging.
@@ -64,11 +91,12 @@ Each `report_finding` requires:
64
91
  - `file_path`: Absolute path
65
92
  - `line_start`, `line_end`: Range ≤10 lines, must overlap the diff
66
93
 
67
- Final `complete` call:
94
+ Final `complete` call (payload goes under `data`):
68
95
 
69
- - `overall_correctness`: "correct" (no bugs/blockers) or "incorrect"
70
- - `explanation`: 1-3 sentences
71
- - `confidence`: 0.0-1.0
96
+ - `data.overall_correctness`: "correct" (no bugs/blockers) or "incorrect"
97
+ - `data.explanation`: Plain text, 1-3 sentences summarizing your verdict. Do NOT include JSON, do NOT repeat findings here (they're already captured via `report_finding`).
98
+ - `data.confidence`: 0.0-1.0
99
+ - `data.findings`: Optional; MUST omit (it is populated from `report_finding` calls)
72
100
 
73
101
  Correctness judgment ignores non-blocking issues (style, docs, nits).
74
102
 
@@ -12,12 +12,14 @@ If you discussed requirements, plans, schemas, or decisions with the user, you M
12
12
  ## Available Agents
13
13
 
14
14
  {{#list agents prefix="- " join="\n"}}
15
- {{name}}: {{description}} (Tools: {{default (join tools ", ") "All tools"}})
15
+ {{name}}: {{description}} (Tools: {{default (join tools ", ") "All tools"}}{{#if output}}, Output: structured{{/if}})
16
16
  {{/list}}
17
17
  {{#if moreAgents}}
18
18
  ...and {{moreAgents}} more agents
19
19
  {{/if}}
20
20
 
21
+ Agents with "Output: structured" have a fixed schema enforced via frontmatter; your `output` parameter will be ignored for these agents.
22
+
21
23
  ## When NOT to Use
22
24
 
23
25
  - Reading a specific file path → Use Read tool instead