@oh-my-pi/pi-coding-agent 6.8.1 → 6.8.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,40 @@
2
2
 
3
3
  ## [Unreleased]
4
4
 
5
+ ## [6.8.3] - 2026-01-21
6
+
7
+ ### Changed
8
+
9
+ - Updated keybinding system to normalize key IDs to lowercase
10
+ - Changed label edit shortcut from 'l' to 'Shift+L' in tree selector
11
+ - Changed output file extension from `.out.md` to `.md` for artifacts
12
+
13
+ ### Removed
14
+
15
+ - Removed bundled worktree command from custom commands loader
16
+
17
+ ### Fixed
18
+
19
+ - Fixed keybinding case sensitivity issues by normalizing all key IDs
20
+ - Fixed task artifact path handling and simplified file structure
21
+
22
+ ## [6.8.2] - 2026-01-21
23
+
24
+ ### Fixed
25
+
26
+ - Improved error messages when multiple text occurrences are found by showing line previews and context
27
+ - Enhanced patch application to better handle duplicate content in context lines
28
+ - Added occurrence previews to help users disambiguate between multiple matches
29
+ - Fixed cache invalidation for streaming edits to prevent stale data
30
+ - Fixed file existence check for prompt templates directory
31
+ - Fixed bash output streaming to prevent premature stream closure
32
+ - Fixed LSP client request handling when signal is already aborted
33
+ - Fixed git apply operations with stdin input handling
34
+
35
+ ### Security
36
+
37
+ - Updated Anthropic authentication to handle manual code input securely
38
+
5
39
  ## [6.8.1] - 2026-01-20
6
40
 
7
41
  ### Fixed
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@oh-my-pi/pi-coding-agent",
3
- "version": "6.8.1",
3
+ "version": "6.8.3",
4
4
  "description": "Coding agent CLI with read, bash, edit, write tools and session management",
5
5
  "type": "module",
6
6
  "ompConfig": {
@@ -40,11 +40,11 @@
40
40
  "prepublishOnly": "bun run generate-template && bun run clean && bun run build"
41
41
  },
42
42
  "dependencies": {
43
- "@oh-my-pi/pi-agent-core": "6.8.1",
44
- "@oh-my-pi/pi-ai": "6.8.1",
45
- "@oh-my-pi/pi-git-tool": "6.8.1",
46
- "@oh-my-pi/pi-tui": "6.8.1",
47
- "@oh-my-pi/pi-utils": "6.8.1",
43
+ "@oh-my-pi/pi-agent-core": "6.8.3",
44
+ "@oh-my-pi/pi-ai": "6.8.3",
45
+ "@oh-my-pi/pi-git-tool": "6.8.3",
46
+ "@oh-my-pi/pi-tui": "6.8.3",
47
+ "@oh-my-pi/pi-utils": "6.8.3",
48
48
  "@openai/agents": "^0.3.7",
49
49
  "@sinclair/typebox": "^0.34.46",
50
50
  "ajv": "^8.17.1",
@@ -63,7 +63,6 @@ import { unmountAll } from "./ssh/sshfs-mount";
63
63
  import type { BashOperations } from "./tools/bash";
64
64
  import { normalizeDiff, normalizeToLF, ParseError, previewPatch, stripBom } from "./tools/patch";
65
65
  import { resolveToCwd } from "./tools/path-utils";
66
- import { getArtifactsDir } from "./tools/task/artifacts";
67
66
  import type { TodoItem } from "./tools/todo-write";
68
67
  import type { TtsrManager } from "./ttsr";
69
68
 
@@ -454,15 +453,19 @@ export class AgentSession {
454
453
  }
455
454
 
456
455
  if (event.message.role === "toolResult") {
457
- const { $normative, toolCallId } = event.message as {
456
+ const { toolName, $normative, toolCallId, details } = event.message as {
458
457
  toolName?: string;
459
458
  toolCallId?: string;
460
- details?: unknown;
459
+ details?: { path?: string };
461
460
  $normative?: Record<string, unknown>;
462
461
  };
463
462
  if ($normative && toolCallId && this.settingsManager.getNormativeRewrite()) {
464
463
  await this._rewriteToolCallArgs(toolCallId, $normative);
465
464
  }
465
+ // Invalidate streaming edit cache when edit tool completes to prevent stale data
466
+ if (toolName === "edit" && details?.path) {
467
+ this._invalidateFileCacheForPath(details.path);
468
+ }
466
469
  }
467
470
  }
468
471
 
@@ -579,11 +582,16 @@ export class AgentSession {
579
582
  this._streamingEditFileCache.set(resolvedPath, normalizeToLF(text));
580
583
  }
581
584
  } catch {
582
- // Ignore errors - mark as empty string so we don't retry
583
- this._streamingEditFileCache.set(resolvedPath, "");
585
+ // Don't cache on read errors - let the edit tool handle them
584
586
  }
585
587
  }
586
588
 
589
+ /** Invalidate cache for a file after an edit completes to prevent stale data */
590
+ private _invalidateFileCacheForPath(path: string): void {
591
+ const resolvedPath = resolveToCwd(path, this.sessionManager.getCwd());
592
+ this._streamingEditFileCache.delete(resolvedPath);
593
+ }
594
+
587
595
  private _maybeAbortStreamingEdit(event: AgentEvent): void {
588
596
  if (!this.settingsManager.getEditStreamingAbort()) return;
589
597
  if (this._streamingEditAbortTriggered) return;
@@ -1982,10 +1990,7 @@ export class AgentSession {
1982
1990
  const sessionFile = this.sessionManager.getSessionFile();
1983
1991
  if (!sessionFile) return;
1984
1992
 
1985
- const artifactsDir = getArtifactsDir(sessionFile);
1986
- if (!artifactsDir) return;
1987
-
1988
- const todoPath = `${artifactsDir}/todos.json`;
1993
+ const todoPath = `${sessionFile.slice(0, -6)}/todos.json`;
1989
1994
  const file = Bun.file(todoPath);
1990
1995
  if (!(await file.exists())) {
1991
1996
  this._todoReminderCount = 0;
@@ -560,7 +560,11 @@ export class AuthStorage {
560
560
 
561
561
  switch (provider) {
562
562
  case "anthropic":
563
- credentials = await loginAnthropic(ctrl);
563
+ credentials = await loginAnthropic({
564
+ ...ctrl,
565
+ onManualCodeInput: async () =>
566
+ ctrl.onPrompt({ message: "Paste the authorization code (or full redirect URL):" }),
567
+ });
564
568
  break;
565
569
  case "github-copilot":
566
570
  credentials = await loginGitHubCopilot({
@@ -34,7 +34,7 @@ export async function executeBash(command: string, options?: BashExecutorOptions
34
34
  const prefixedCommand = prefix ? `${prefix} ${command}` : command;
35
35
  const finalCommand = `${snapshotPrefix}${prefixedCommand}`;
36
36
 
37
- const stream = new OutputSink({ onLine: options?.onChunk });
37
+ const stream = new OutputSink({ onChunk: options?.onChunk });
38
38
 
39
39
  const child = cspawn([shell, ...args, finalCommand], {
40
40
  cwd: options?.cwd,
@@ -44,6 +44,7 @@ export async function executeBash(command: string, options?: BashExecutorOptions
44
44
  });
45
45
 
46
46
  // Pump streams - errors during abort/timeout are expected
47
+ // Use preventClose to avoid closing the shared sink when either stream finishes
47
48
  await Promise.allSettled([
48
49
  child.stdout.pipeTo(stream.createWritable()),
49
50
  child.stderr.pipeTo(stream.createWritable()),
@@ -92,7 +93,7 @@ export async function executeBashWithOperations(
92
93
  operations: BashOperations,
93
94
  options?: BashExecutorOptions,
94
95
  ): Promise<BashResult> {
95
- const stream = new OutputSink({ onLine: options?.onChunk });
96
+ const stream = new OutputSink({ onChunk: options?.onChunk });
96
97
  const writable = stream.createWritable();
97
98
  const writer = writable.getWriter();
98
99
 
@@ -13,7 +13,6 @@ import { getAgentDir, getConfigDirs } from "../../config";
13
13
  import * as piCodingAgent from "../../index";
14
14
  import { execCommand } from "../exec";
15
15
  import { ReviewCommand } from "./bundled/review";
16
- import { WorktreeCommand } from "./bundled/wt";
17
16
  import type {
18
17
  CustomCommand,
19
18
  CustomCommandAPI,
@@ -153,13 +152,6 @@ function loadBundledCommands(sharedApi: CustomCommandAPI): LoadedCustomCommand[]
153
152
  source: "bundled",
154
153
  });
155
154
 
156
- bundled.push({
157
- path: "bundled:wt",
158
- resolvedPath: "bundled:wt",
159
- command: new WorktreeCommand(sharedApi),
160
- source: "bundled",
161
- });
162
-
163
155
  return bundled;
164
156
  }
165
157
 
@@ -124,6 +124,8 @@ const KEY_LABELS: Record<string, string> = {
124
124
  right: "Right",
125
125
  };
126
126
 
127
+ const normalizeKeyId = (key: KeyId): KeyId => key.toLowerCase() as KeyId;
128
+
127
129
  function formatKeyPart(part: string): string {
128
130
  const lower = part.toLowerCase();
129
131
  const modifier = MODIFIER_LABELS[lower];
@@ -199,14 +201,20 @@ export class KeybindingsManager {
199
201
  // Set defaults for app actions
200
202
  for (const [action, keys] of Object.entries(DEFAULT_APP_KEYBINDINGS)) {
201
203
  const keyArray = Array.isArray(keys) ? keys : [keys];
202
- this.appActionToKeys.set(action as AppAction, [...keyArray]);
204
+ this.appActionToKeys.set(
205
+ action as AppAction,
206
+ keyArray.map((key) => normalizeKeyId(key as KeyId)),
207
+ );
203
208
  }
204
209
 
205
210
  // Override with user config (app actions only)
206
211
  for (const [action, keys] of Object.entries(this.config)) {
207
212
  if (keys === undefined || !isAppAction(action)) continue;
208
213
  const keyArray = Array.isArray(keys) ? keys : [keys];
209
- this.appActionToKeys.set(action, keyArray);
214
+ this.appActionToKeys.set(
215
+ action,
216
+ keyArray.map((key) => normalizeKeyId(key as KeyId)),
217
+ );
210
218
  }
211
219
  }
212
220
 
@@ -432,7 +432,7 @@ async function loadTemplatesFromDir(
432
432
  }
433
433
  }
434
434
  } catch (error) {
435
- if (!Bun.file(dir).exists()) {
435
+ if (!(await Bun.file(dir).exists())) {
436
436
  return [];
437
437
  }
438
438
  logger.warn("Failed to scan prompt templates directory", { dir, error: String(error) });
@@ -741,7 +741,7 @@ export async function sendRequest(
741
741
  signal.addEventListener("abort", abortHandler, { once: true });
742
742
  if (signal.aborted) {
743
743
  abortHandler();
744
- return;
744
+ return promise;
745
745
  }
746
746
  }
747
747
 
@@ -25,7 +25,6 @@ import {
25
25
  TRUNCATE_LENGTHS,
26
26
  truncate,
27
27
  } from "./render-utils";
28
- import { getArtifactsDir } from "./task/artifacts";
29
28
 
30
29
  const outputSchema = Type.Object({
31
30
  ids: Type.Array(Type.String(), {
@@ -164,7 +163,7 @@ function applyQuery(data: unknown, query: string): unknown {
164
163
  function listAvailableOutputs(artifactsDir: string): string[] {
165
164
  try {
166
165
  const files = fs.readdirSync(artifactsDir);
167
- return files.filter((f) => f.endsWith(".out.md")).map((f) => f.replace(".out.md", ""));
166
+ return files.filter((f) => f.endsWith(".md")).map((f) => f.replace(".md", ""));
168
167
  } catch {
169
168
  return [];
170
169
  }
@@ -274,8 +273,8 @@ export class OutputTool implements AgentTool<typeof outputSchema, OutputToolDeta
274
273
  };
275
274
  }
276
275
 
277
- const artifactsDir = getArtifactsDir(sessionFile);
278
- if (!artifactsDir || !fs.existsSync(artifactsDir)) {
276
+ const artifactsDir = sessionFile.slice(0, -6); // strip .jsonl extension
277
+ if (!fs.existsSync(artifactsDir)) {
279
278
  return {
280
279
  content: [{ type: "text", text: "No artifacts directory found" }],
281
280
  details: { outputs: [], notFound: params.ids },
@@ -296,14 +295,14 @@ export class OutputTool implements AgentTool<typeof outputSchema, OutputToolDeta
296
295
  const queryResults: Array<{ id: string; value: unknown }> = [];
297
296
 
298
297
  for (const id of params.ids) {
299
- const outputPath = path.join(artifactsDir, `${id}.out.md`);
300
-
301
- if (!fs.existsSync(outputPath)) {
298
+ const outputPath = path.join(artifactsDir, `${id}.md`);
299
+ const file = Bun.file(outputPath);
300
+ if (!(await file.exists())) {
302
301
  notFound.push(id);
303
302
  continue;
304
303
  }
305
304
 
306
- const rawContent = fs.readFileSync(outputPath, "utf-8");
305
+ const rawContent = await file.text();
307
306
  const rawLines = rawContent.split("\n");
308
307
  const totalLines = rawLines.length;
309
308
  const totalChars = rawContent.length;
@@ -92,17 +92,17 @@ function adjustLinesIndentation(patternLines: string[], actualLines: string[], n
92
92
  }
93
93
  }
94
94
 
95
- // Build a map from trimmed content to available (pattern index, actual index) pairs
96
- // This lets us find context lines and their corresponding actual content
97
- const contentToIndices = new Map<string, Array<{ patternIdx: number; actualIdx: number }>>();
98
- for (let i = 0; i < Math.min(patternLines.length, actualLines.length); i++) {
99
- const trimmed = patternLines[i].trim();
95
+ // Build a map from trimmed content to actual lines (by content, not position)
96
+ // This handles fuzzy matches where pattern and actual may not be positionally aligned
97
+ const contentToActualLines = new Map<string, string[]>();
98
+ for (const line of actualLines) {
99
+ const trimmed = line.trim();
100
100
  if (trimmed.length === 0) continue;
101
- const arr = contentToIndices.get(trimmed);
101
+ const arr = contentToActualLines.get(trimmed);
102
102
  if (arr) {
103
- arr.push({ patternIdx: i, actualIdx: i });
103
+ arr.push(line);
104
104
  } else {
105
- contentToIndices.set(trimmed, [{ patternIdx: i, actualIdx: i }]);
105
+ contentToActualLines.set(trimmed, [line]);
106
106
  }
107
107
  }
108
108
 
@@ -119,8 +119,8 @@ function adjustLinesIndentation(patternLines: string[], actualLines: string[], n
119
119
  }
120
120
  const avgDelta = deltaCount > 0 ? Math.round(totalDelta / deltaCount) : 0;
121
121
 
122
- // Track which indices we've used to handle duplicate content correctly
123
- const usedIndices = new Set<number>();
122
+ // Track which actual lines we've used to handle duplicate content correctly
123
+ const usedActualLines = new Map<string, number>(); // trimmed content -> count used
124
124
 
125
125
  return newLines.map((newLine) => {
126
126
  if (newLine.trim().length === 0) {
@@ -128,16 +128,15 @@ function adjustLinesIndentation(patternLines: string[], actualLines: string[], n
128
128
  }
129
129
 
130
130
  const trimmed = newLine.trim();
131
- const indices = contentToIndices.get(trimmed);
132
-
133
- // Check if this is a context line (same trimmed content exists in pattern)
134
- if (indices) {
135
- for (const { patternIdx, actualIdx } of indices) {
136
- if (!usedIndices.has(patternIdx)) {
137
- usedIndices.add(patternIdx);
138
- // Use actual file content directly for context lines
139
- return actualLines[actualIdx];
140
- }
131
+ const matchingActualLines = contentToActualLines.get(trimmed);
132
+
133
+ // Check if this is a context line (same trimmed content exists in actual)
134
+ if (matchingActualLines && matchingActualLines.length > 0) {
135
+ const usedCount = usedActualLines.get(trimmed) ?? 0;
136
+ if (usedCount < matchingActualLines.length) {
137
+ usedActualLines.set(trimmed, usedCount + 1);
138
+ // Use actual file content directly for context lines
139
+ return matchingActualLines[usedCount];
141
140
  }
142
141
  }
143
142
 
@@ -599,9 +598,11 @@ function applyCharacterMatch(
599
598
 
600
599
  // Check for multiple exact occurrences
601
600
  if (matchOutcome.occurrences && matchOutcome.occurrences > 1) {
601
+ const previews = matchOutcome.occurrencePreviews?.join("\n\n") ?? "";
602
+ const moreMsg = matchOutcome.occurrences > 5 ? ` (showing first 5 of ${matchOutcome.occurrences})` : "";
602
603
  throw new ApplyPatchError(
603
- `Found ${matchOutcome.occurrences} occurrences of the text in ${path}. ` +
604
- `The text must be unique. Please provide more context to make it unique.`,
604
+ `Found ${matchOutcome.occurrences} occurrences in ${path}${moreMsg}:\n\n${previews}\n\n` +
605
+ `Add more context lines to disambiguate.`,
605
606
  );
606
607
  }
607
608
 
@@ -857,9 +858,22 @@ function computeReplacements(
857
858
  if (hunk.changeContext === undefined && !hunk.hasContextLines && !hunk.isEndOfFile && lineHint === undefined) {
858
859
  const secondMatch = seekSequence(originalLines, pattern, found + 1, false, { allowFuzzy });
859
860
  if (secondMatch.index !== undefined) {
861
+ // Extract 3-line previews for each match
862
+ const formatPreview = (startIdx: number) => {
863
+ const lines = originalLines.slice(startIdx, startIdx + 3);
864
+ return lines
865
+ .map((line, i) => {
866
+ const num = startIdx + i + 1;
867
+ const truncated = line.length > 60 ? `${line.slice(0, 57)}...` : line;
868
+ return ` ${num} | ${truncated}`;
869
+ })
870
+ .join("\n");
871
+ };
872
+ const preview1 = formatPreview(found);
873
+ const preview2 = formatPreview(secondMatch.index);
860
874
  throw new ApplyPatchError(
861
- `Found 2 occurrences of the text in ${path}. ` +
862
- `The text must be unique. Please provide more context to make it unique.`,
875
+ `Found 2 occurrences in ${path}:\n\n${preview1}\n\n${preview2}\n\n` +
876
+ `Add more context lines to disambiguate.`,
863
877
  );
864
878
  }
865
879
  }
@@ -228,9 +228,11 @@ export function replaceText(content: string, oldText: string, newText: string, o
228
228
  });
229
229
 
230
230
  if (matchOutcome.occurrences && matchOutcome.occurrences > 1) {
231
+ const previews = matchOutcome.occurrencePreviews?.join("\n\n") ?? "";
232
+ const moreMsg = matchOutcome.occurrences > 5 ? ` (showing first 5 of ${matchOutcome.occurrences})` : "";
231
233
  throw new Error(
232
- `Found ${matchOutcome.occurrences} occurrences of the text. ` +
233
- `The text must be unique. Please provide more context to make it unique, or use all: true to replace all.`,
234
+ `Found ${matchOutcome.occurrences} occurrences${moreMsg}:\n\n${previews}\n\n` +
235
+ `Add more context lines to disambiguate.`,
234
236
  );
235
237
  }
236
238
 
@@ -307,8 +309,10 @@ export async function computeEditDiff(
307
309
  });
308
310
 
309
311
  if (matchOutcome.occurrences && matchOutcome.occurrences > 1) {
312
+ const previews = matchOutcome.occurrencePreviews?.join("\n\n") ?? "";
313
+ const moreMsg = matchOutcome.occurrences > 5 ? ` (showing first 5 of ${matchOutcome.occurrences})` : "";
310
314
  return {
311
- error: `Found ${matchOutcome.occurrences} occurrences of the text in ${path}. The text must be unique. Please provide more context to make it unique, or use all: true to replace all.`,
315
+ error: `Found ${matchOutcome.occurrences} occurrences in ${path}${moreMsg}:\n\n${previews}\n\nAdd more context lines to disambiguate.`,
312
316
  };
313
317
  }
314
318
 
@@ -215,7 +215,25 @@ export function findMatch(
215
215
  if (exactIndex !== -1) {
216
216
  const occurrences = content.split(target).length - 1;
217
217
  if (occurrences > 1) {
218
- return { occurrences };
218
+ // Find line numbers and previews for each occurrence (up to 5)
219
+ const contentLines = content.split("\n");
220
+ const occurrenceLines: number[] = [];
221
+ const occurrencePreviews: string[] = [];
222
+ let searchStart = 0;
223
+ for (let i = 0; i < 5; i++) {
224
+ const idx = content.indexOf(target, searchStart);
225
+ if (idx === -1) break;
226
+ const lineNumber = content.slice(0, idx).split("\n").length;
227
+ occurrenceLines.push(lineNumber);
228
+ // Extract 3 lines starting from match (0-indexed)
229
+ const previewLines = contentLines.slice(lineNumber - 1, lineNumber + 2);
230
+ const preview = previewLines
231
+ .map((line, i) => ` ${lineNumber + i} | ${line.length > 60 ? `${line.slice(0, 57)}...` : line}`)
232
+ .join("\n");
233
+ occurrencePreviews.push(preview);
234
+ searchStart = idx + 1;
235
+ }
236
+ return { occurrences, occurrenceLines, occurrencePreviews };
219
237
  }
220
238
  const startLine = content.slice(0, exactIndex).split("\n").length;
221
239
  return {
@@ -390,8 +390,11 @@ export class EditTool implements AgentTool<TInput> {
390
390
  });
391
391
 
392
392
  if (matchOutcome.occurrences && matchOutcome.occurrences > 1) {
393
+ const previews = matchOutcome.occurrencePreviews?.join("\n\n") ?? "";
394
+ const moreMsg = matchOutcome.occurrences > 5 ? ` (showing first 5 of ${matchOutcome.occurrences})` : "";
393
395
  throw new Error(
394
- `Found ${matchOutcome.occurrences} occurrences of the text in ${path}. The text must be unique. Please provide more context to make it unique, or use all: true to replace all.`,
396
+ `Found ${matchOutcome.occurrences} occurrences in ${path}${moreMsg}:\n\n${previews}\n\n` +
397
+ `Add more context lines to disambiguate.`,
395
398
  );
396
399
  }
397
400
 
@@ -40,6 +40,10 @@ export interface MatchOutcome {
40
40
  closest?: FuzzyMatch;
41
41
  /** Number of occurrences if multiple exact matches found */
42
42
  occurrences?: number;
43
+ /** Line numbers where occurrences were found (1-indexed) */
44
+ occurrenceLines?: number[];
45
+ /** Preview snippets for each occurrence (up to 5) */
46
+ occurrencePreviews?: string[];
43
47
  /** Number of fuzzy matches above threshold */
44
48
  fuzzyMatches?: number;
45
49
  }
@@ -4,19 +4,19 @@
4
4
  * Runs each subagent in a Bun Worker and forwards AgentEvents for progress tracking.
5
5
  */
6
6
 
7
+ import path from "node:path";
7
8
  import type { AgentEvent, ThinkingLevel } from "@oh-my-pi/pi-agent-core";
8
9
  import type { AuthStorage } from "../../auth-storage";
9
10
  import type { EventBus } from "../../event-bus";
10
11
  import { callTool } from "../../mcp/client";
11
12
  import type { MCPManager } from "../../mcp/manager";
12
13
  import type { ModelRegistry } from "../../model-registry";
14
+ import { formatModelString, parseModelPattern } from "../../model-resolver";
13
15
  import { checkPythonKernelAvailability } from "../../python-kernel";
14
16
  import type { ToolSession } from "..";
15
17
  import { LspTool } from "../lsp/index";
16
18
  import type { LspParams } from "../lsp/types";
17
19
  import { PythonTool } from "../python";
18
- import { ensureArtifactsDir, getArtifactPaths } from "./artifacts";
19
- import { resolveModelPattern } from "./model-resolver";
20
20
  import { subprocessToolRegistry } from "./subprocess-tool-registry";
21
21
  import {
22
22
  type AgentDefinition,
@@ -256,20 +256,9 @@ export async function runSubprocess(options: ExecutorOptions): Promise<SingleRes
256
256
  const fullTask = context ? `${context}\n\n${task}` : task;
257
257
 
258
258
  // Set up artifact paths and write input file upfront if artifacts dir provided
259
- let artifactPaths: { inputPath: string; outputPath: string; jsonlPath: string } | undefined;
260
259
  let subtaskSessionFile: string | undefined;
261
-
262
260
  if (options.artifactsDir) {
263
- ensureArtifactsDir(options.artifactsDir);
264
- artifactPaths = getArtifactPaths(options.artifactsDir, taskId);
265
- subtaskSessionFile = artifactPaths.jsonlPath;
266
-
267
- // Write input file immediately (real-time visibility)
268
- try {
269
- await Bun.write(artifactPaths.inputPath, fullTask);
270
- } catch {
271
- // Non-fatal, continue without input artifact
272
- }
261
+ subtaskSessionFile = path.join(options.artifactsDir, `${taskId}.jsonl`);
273
262
  }
274
263
 
275
264
  // Add tools if specified
@@ -296,10 +285,26 @@ export async function runSubprocess(options: ExecutorOptions): Promise<SingleRes
296
285
  }
297
286
 
298
287
  const serializedSettings = options.settingsManager?.serialize();
299
- const availableModels = options.modelRegistry?.getAvailable().map((model) => `${model.provider}/${model.id}`);
300
-
301
- // Resolve and add model
302
- const resolvedModel = await resolveModelPattern(modelOverride || agent.model, availableModels, serializedSettings);
288
+ const availableModels = options.modelRegistry?.getAvailable() ?? [];
289
+
290
+ // Resolve model pattern to provider/modelId string
291
+ const modelPattern = modelOverride ?? agent.model;
292
+ let resolvedModel: string | undefined;
293
+ if (modelPattern) {
294
+ // Handle omp/<role> or pi/<role> aliases (e.g., "omp/slow", "pi/fast")
295
+ let effectivePattern = modelPattern;
296
+ const lower = modelPattern.toLowerCase();
297
+ if (lower.startsWith("omp/") || lower.startsWith("pi/")) {
298
+ const role = lower.startsWith("omp/") ? modelPattern.slice(4) : modelPattern.slice(3);
299
+ const roles = serializedSettings?.modelRoles as Record<string, string> | undefined;
300
+ const configured = roles?.[role] ?? roles?.[role.toLowerCase()];
301
+ if (configured) {
302
+ effectivePattern = configured;
303
+ }
304
+ }
305
+ const { model } = parseModelPattern(effectivePattern, availableModels);
306
+ resolvedModel = model ? formatModelString(model) : undefined;
307
+ }
303
308
  const sessionFile = subtaskSessionFile ?? null;
304
309
  const spawnsEnv = agent.spawns === undefined ? "" : agent.spawns === "*" ? "*" : agent.spawns.join(",");
305
310
 
@@ -1026,9 +1031,11 @@ export async function runSubprocess(options: ExecutorOptions): Promise<SingleRes
1026
1031
  // Write output artifact (input and jsonl already written in real-time)
1027
1032
  // Compute output metadata for Output tool integration
1028
1033
  let outputMeta: { lineCount: number; charCount: number } | undefined;
1029
- if (artifactPaths) {
1034
+ let outputPath: string | undefined;
1035
+ if (options.artifactsDir) {
1036
+ outputPath = path.join(options.artifactsDir, `${taskId}.md`);
1030
1037
  try {
1031
- await Bun.write(artifactPaths.outputPath, rawOutput);
1038
+ await Bun.write(outputPath, rawOutput);
1032
1039
  outputMeta = {
1033
1040
  lineCount: rawOutput.split("\n").length,
1034
1041
  charCount: rawOutput.length,
@@ -1060,7 +1067,7 @@ export async function runSubprocess(options: ExecutorOptions): Promise<SingleRes
1060
1067
  error: exitCode !== 0 && stderr ? stderr : undefined,
1061
1068
  aborted: wasAborted,
1062
1069
  usage: hasUsage ? accumulatedUsage : undefined,
1063
- artifactPaths,
1070
+ outputPath,
1064
1071
  extractedToolData: progress.extractedToolData,
1065
1072
  outputMeta,
1066
1073
  };
@@ -13,13 +13,17 @@
13
13
  * - Session artifacts for debugging
14
14
  */
15
15
 
16
+ import { mkdir, rm } from "node:fs/promises";
17
+ import { tmpdir } from "node:os";
18
+ import path from "node:path";
16
19
  import type { AgentTool, AgentToolResult, AgentToolUpdateCallback } from "@oh-my-pi/pi-agent-core";
17
20
  import type { Usage } from "@oh-my-pi/pi-ai";
21
+ import { nanoid } from "nanoid";
18
22
  import type { Theme } from "../../../modes/interactive/theme/theme";
19
23
  import taskDescriptionTemplate from "../../../prompts/tools/task.md" with { type: "text" };
20
24
  import { renderPromptTemplate } from "../../prompt-templates";
25
+ import type { ToolSession } from "..";
21
26
  import { formatDuration } from "../render-utils";
22
- import { cleanupTempDir, createTempArtifactsDir, getArtifactsDir } from "./artifacts";
23
27
  import { discoverAgents, getAgent } from "./discovery";
24
28
  import { runSubprocess } from "./executor";
25
29
  import { mapWithConcurrencyLimit } from "./parallel";
@@ -36,7 +40,6 @@ import {
36
40
 
37
41
  // Import review tools for side effects (registers subagent tool handlers)
38
42
  import "../review";
39
- import type { ToolSession } from "..";
40
43
 
41
44
  /** Format byte count for display */
42
45
  function formatBytes(bytes: number): string {
@@ -276,9 +279,10 @@ export class TaskTool implements AgentTool<typeof taskSchema, TaskToolDetails, T
276
279
 
277
280
  // Derive artifacts directory
278
281
  const sessionFile = this.session.getSessionFile();
279
- const artifactsDir = sessionFile ? getArtifactsDir(sessionFile) : null;
280
- const tempArtifactsDir = artifactsDir ? null : createTempArtifactsDir();
282
+ const artifactsDir = sessionFile ? sessionFile.slice(0, -6) : null;
283
+ const tempArtifactsDir = artifactsDir ? null : path.join(tmpdir(), `omp-task-${nanoid()}`);
281
284
  const effectiveArtifactsDir = artifactsDir || tempArtifactsDir!;
285
+ await mkdir(effectiveArtifactsDir, { recursive: true });
282
286
 
283
287
  // Initialize progress tracking
284
288
  const progressMap = new Map<number, AgentProgress>();
@@ -435,8 +439,8 @@ export class TaskTool implements AgentTool<typeof taskSchema, TaskToolDetails, T
435
439
  // Collect output paths (artifacts already written by executor in real-time)
436
440
  const outputPaths: string[] = [];
437
441
  for (const result of results) {
438
- if (result.artifactPaths) {
439
- outputPaths.push(result.artifactPaths.outputPath);
442
+ if (result.outputPath) {
443
+ outputPaths.push(result.outputPath);
440
444
  }
441
445
  }
442
446
 
@@ -468,7 +472,7 @@ export class TaskTool implements AgentTool<typeof taskSchema, TaskToolDetails, T
468
472
 
469
473
  // Cleanup temp directory if used
470
474
  if (tempArtifactsDir) {
471
- await cleanupTempDir(tempArtifactsDir);
475
+ await rm(tempArtifactsDir, { recursive: true, force: true });
472
476
  }
473
477
 
474
478
  return {
@@ -482,11 +486,6 @@ export class TaskTool implements AgentTool<typeof taskSchema, TaskToolDetails, T
482
486
  },
483
487
  };
484
488
  } catch (err) {
485
- // Cleanup temp directory on error
486
- if (tempArtifactsDir) {
487
- await cleanupTempDir(tempArtifactsDir);
488
- }
489
-
490
489
  return {
491
490
  content: [{ type: "text", text: `Task execution failed: ${err}` }],
492
491
  details: {
@@ -153,7 +153,8 @@ export interface SingleResult {
153
153
  aborted?: boolean;
154
154
  /** Aggregated usage from the subprocess, accumulated incrementally from message_end events. */
155
155
  usage?: Usage;
156
- artifactPaths?: { inputPath: string; outputPath: string; jsonlPath?: string };
156
+ /** Output path for the task result */
157
+ outputPath?: string;
157
158
  /** Data extracted by registered subprocess tool handlers (keyed by tool name) */
158
159
  extractedToolData?: Record<string, unknown[]>;
159
160
  /** Output metadata for Output tool integration */