pi-prompt-template-model 0.6.0 → 0.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,25 @@
2
2
 
3
3
  ## [Unreleased]
4
4
 
5
+ ## [0.6.1] - 2026-03-20
6
+
7
+ ### Added
8
+ - Added delegated prompt execution via direct extension event bus communication with `subagent` (`prompt-template:subagent:*` channels), including delegated custom-message persistence for loop summaries and context carry-forward.
9
+ - Added prompt frontmatter support for `subagent` and `inheritContext`, with `inheritContext: true` mapped to delegated fork context.
10
+ - Fork context preamble is handled by the subagent extension directly (via `DEFAULT_FORK_PREAMBLE` in `types.ts`), applying to all fork-context subagent runs universally.
11
+ - Added runtime delegation override flags: `--subagent`, `--subagent=<name>`, and `--subagent:<name>`.
12
+ - Added live progress widget above editor during delegated subagent runs showing elapsed time, tool count, tokens, current tool, and task preview — matching the native subagent tool card layout.
13
+ - Added styled completion card with task preview, tool call history, expandable output (Ctrl+O), and usage stats footer.
14
+
15
+ ### Changed
16
+ - Updated provider priority for ambiguous bare model IDs to prefer `openai-codex` before `anthropic`, `github-copilot`, and `openrouter`.
17
+ - Updated loop convergence and fresh-summary analysis to account for delegated subagent message payloads.
18
+
19
+ ### Fixed
20
+ - Delegated start-time hangs now fail fast with explicit timeout errors when a subagent run never emits a start signal.
21
+ - Delegated runs no longer treat arbitrary escape-sequence-bearing terminal input as an Esc cancellation signal; only literal Esc cancels.
22
+ - Chain and loop restore paths now use live runtime model/thinking state during cleanup, preventing skipped restore on mid-step failures.
23
+
5
24
  ## [0.6.0] - 2026-03-19
6
25
 
7
26
  ### Changed
package/README.md CHANGED
@@ -47,6 +47,14 @@ pi install npm:pi-prompt-template-model
47
47
 
48
48
  Restart pi to load the extension.
49
49
 
50
+ For delegated subagent execution (`subagent` and `inheritContext` frontmatter), install [pi-subagents](https://github.com/nicobailon/pi-subagents/) separately:
51
+
52
+ ```bash
53
+ pi install npm:pi-subagents
54
+ ```
55
+
56
+ pi-subagents is optional — everything else works without it. If you use `subagent: true` in a prompt template without pi-subagents installed, execution fails fast with a clear error.
57
+
50
58
  ## Quick Start
51
59
 
52
60
  Add `model` (or omit it to inherit the current session model) and optionally `skill` to any prompt template:
@@ -82,6 +90,43 @@ $@
82
90
 
83
91
  Here `skill: surf` loads `~/.pi/agent/skills/surf/SKILL.md` and injects its content as a context message on the next turn before the agent handles your task. No decision-making, no read tool, just immediate expertise. It's a forcing function for when you know exactly what workflow the agent needs.
84
92
 
93
+ ## Delegated Subagent Execution
94
+
95
+ You can delegate a prompt template directly to the `subagent` extension without metaprompted tool-call instructions.
96
+
97
+ ```markdown
98
+ ---
99
+ model: anthropic/claude-sonnet-4-20250514
100
+ subagent: true
101
+ ---
102
+ Review and simplify this code: $@
103
+ ```
104
+
105
+ `subagent: true` uses the default `worker` agent. To target a specific agent, set a string value:
106
+
107
+ ```markdown
108
+ ---
109
+ model: anthropic/claude-sonnet-4-20250514
110
+ subagent: reviewer
111
+ inheritContext: true
112
+ ---
113
+ Audit this diff for correctness and edge cases: $@
114
+ ```
115
+
116
+ `inheritContext: true` maps to delegated `context: "fork"`. It is valid only when `subagent` is configured.
117
+
118
+ Forked subagents receive a default preamble (from the subagent extension's `DEFAULT_FORK_PREAMBLE`) that anchors them to the task and prevents them from continuing the parent conversation.
119
+
120
+ During execution, a live progress widget appears above the editor showing elapsed time, tool count, token usage, and the current/last tool — matching the native subagent tool card layout. The widget updates in real-time and clears when the run completes, replaced by a styled completion card with task preview, tool call history, output, and usage stats.
121
+
122
+ You can override delegation at runtime per invocation:
123
+
124
+ - `--subagent`
125
+ - `--subagent=<name>`
126
+ - `--subagent:<name>`
127
+
128
+ Runtime flags take precedence for that invocation only. Bare `--subagent` keeps template agent when present, otherwise defaults to `worker`.
129
+
85
130
  ## Frontmatter Fields
86
131
 
87
132
  | Field | Required | Default | Description |
@@ -90,6 +135,9 @@ Here `skill: surf` loads `~/.pi/agent/skills/surf/SKILL.md` and injects its cont
90
135
  | `chain` | Conditional | - | Chain declaration (`step -> step --loop 2`) for orchestration templates; body is ignored |
91
136
  | `skill` | No | - | Skill name to inject as next-turn context message |
92
137
  | `thinking` | No | - | Thinking level: `off`, `minimal`, `low`, `medium`, `high`, `xhigh` |
138
+ | `subagent` | No | - | Delegate execution to subagent mode (`true` for default `worker`, or explicit agent name string) |
139
+ | `inheritContext` | No | `false` | Only with `subagent`; when `true`, delegates with subagent `context: "fork"` |
140
+
93
141
  | `description` | No | - | Shown in autocomplete |
94
142
  | `restore` | No | `true` | Restore previous model and thinking level after response |
95
143
  | `fresh` | No | `false` | Collapse context between loop iterations (applies when looping via `--loop` or frontmatter `loop`) |
@@ -469,6 +517,8 @@ Once enabled, the agent sees `run-prompt` in its tool list and can call it with
469
517
  ```
470
518
  run-prompt({ command: "deslop --loop 5 --fresh" })
471
519
  run-prompt({ command: "deslop --loop" })
520
+ run-prompt({ command: "deslop --subagent" })
521
+ run-prompt({ command: "deslop --subagent:reviewer" })
472
522
  run-prompt({ command: "chain-prompts analyze -> fix --loop 3" })
473
523
  ```
474
524
 
@@ -514,4 +564,5 @@ The model switches, a skill context message is injected, the agent responds, and
514
564
  - Model restore state is in-memory. Closing pi mid-response loses restore state.
515
565
  - Model-less templates are only managed by this extension when they use extension features (for example `skill`, `thinking`, loop flags, or inline `<if-model ...>`). Plain prompt templates without extension features stay with pi's default prompt loader to avoid command conflicts.
516
566
  - In chains, model-less steps inherit the chain-start model snapshot, not the immediately previous step model. This is intentional for deterministic behavior.
567
+ - Delegated `subagent` prompts require [pi-subagents](https://github.com/nicobailon/pi-subagents/) (`pi install npm:pi-subagents`).
517
568
  - The `run-prompt` tool must be explicitly enabled with `/prompt-tool on` before the agent can use it.
package/args.ts CHANGED
@@ -11,6 +11,16 @@ export interface LoopFlags {
11
11
  converge: boolean;
12
12
  }
13
13
 
14
+ export interface SubagentOverride {
15
+ enabled: true;
16
+ agent?: string;
17
+ }
18
+
19
+ export interface SubagentOverrideExtraction {
20
+ args: string;
21
+ override?: SubagentOverride;
22
+ }
23
+
14
24
  export function extractLoopCount(argsString: string): LoopExtraction | null {
15
25
  let loopCount: number | null = null;
16
26
  let loopFound = false;
@@ -152,6 +162,58 @@ export function extractLoopFlags(argsString: string): LoopFlags {
152
162
  return { args: cleaned.trim(), fresh, converge: !noConverge };
153
163
  }
154
164
 
165
+ export function extractSubagentOverride(argsString: string): SubagentOverrideExtraction {
166
+ let override: SubagentOverride | undefined;
167
+ const tokensToRemove: Array<{ start: number; end: number }> = [];
168
+
169
+ let i = 0;
170
+ while (i < argsString.length) {
171
+ const char = argsString[i];
172
+
173
+ if (char === '"' || char === "'") {
174
+ const quote = char;
175
+ i++;
176
+ while (i < argsString.length && argsString[i] !== quote) i++;
177
+ if (i < argsString.length) i++;
178
+ continue;
179
+ }
180
+
181
+ if (/\s/.test(char)) {
182
+ i++;
183
+ continue;
184
+ }
185
+
186
+ const tokenStart = i;
187
+ while (i < argsString.length && !/\s/.test(argsString[i])) i++;
188
+ const token = argsString.slice(tokenStart, i);
189
+
190
+ if (token === "--subagent") {
191
+ tokensToRemove.push({ start: tokenStart, end: i });
192
+ override = { enabled: true };
193
+ continue;
194
+ }
195
+
196
+ if (token.startsWith("--subagent=") || token.startsWith("--subagent:")) {
197
+ tokensToRemove.push({ start: tokenStart, end: i });
198
+ const value = token.includes("=") ? token.slice("--subagent=".length) : token.slice("--subagent:".length);
199
+ override = value ? { enabled: true, agent: value } : { enabled: true };
200
+ }
201
+ }
202
+
203
+ if (!override) return { args: argsString.trim() };
204
+
205
+ tokensToRemove.sort((a, b) => b.start - a.start);
206
+ let cleaned = argsString;
207
+ for (const { start, end } of tokensToRemove) {
208
+ cleaned = cleaned.slice(0, start) + cleaned.slice(end);
209
+ }
210
+
211
+ return {
212
+ args: cleaned.trim(),
213
+ override,
214
+ };
215
+ }
216
+
155
217
  export function splitByUnquotedSeparator(input: string, separator: string): string[] {
156
218
  const parts: string[] = [];
157
219
  let start = 0;
package/chain-parser.ts CHANGED
@@ -135,12 +135,13 @@ export function parseChainSteps(args: string): ParsedChainSteps {
135
135
  invalidSegments.push(rawSegment);
136
136
  continue;
137
137
  }
138
- const tokens = parseCommandArgs(segment);
138
+ const { cleanedSegment, loopCount } = extractStepLoopCount(segment);
139
+ const tokens = parseCommandArgs(cleanedSegment);
139
140
  if (tokens.length === 0) {
140
141
  invalidSegments.push(segment);
141
142
  continue;
142
143
  }
143
- steps.push({ name: tokens[0], args: tokens.slice(1) });
144
+ steps.push({ name: tokens[0], args: tokens.slice(1), loopCount });
144
145
  }
145
146
 
146
147
  return { steps, sharedArgs: parseCommandArgs(argsPart), invalidSegments };
package/index.ts CHANGED
@@ -1,7 +1,7 @@
1
1
  import type { Model } from "@mariozechner/pi-ai";
2
2
  import type { ExtensionAPI, ExtensionCommandContext, ExtensionContext } from "@mariozechner/pi-coding-agent";
3
3
  import type { ThinkingLevel } from "@mariozechner/pi-agent-core";
4
- import { extractLoopCount, extractLoopFlags, parseCommandArgs } from "./args.js";
4
+ import { extractLoopCount, extractLoopFlags, extractSubagentOverride, parseCommandArgs, type SubagentOverride } from "./args.js";
5
5
  import { parseChainSteps, parseChainDeclaration, type ChainStep } from "./chain-parser.js";
6
6
  import { generateIterationSummary, didIterationMakeChanges, getIterationEntries } from "./loop-utils.js";
7
7
  import { notify, summarizePromptDiagnostics, diagnosticsFingerprint } from "./notifications.js";
@@ -9,6 +9,9 @@ import { preparePromptExecution } from "./prompt-execution.js";
9
9
  import { buildPromptCommandDescription, loadPromptsWithModel, readSkillContent, resolveSkillPath, type PromptWithModel } from "./prompt-loader.js";
10
10
  import { renderSkillLoaded, type SkillLoadedDetails } from "./skill-loaded-renderer.js";
11
11
  import { createToolManager } from "./tool-manager.js";
12
+ import { executeSubagentPromptStep } from "./subagent-step.js";
13
+ import { PROMPT_TEMPLATE_SUBAGENT_MESSAGE_TYPE } from "./subagent-runtime.js";
14
+ import { renderDelegatedSubagentResult } from "./subagent-renderer.js";
12
15
 
13
16
  interface LoopState {
14
17
  currentIteration: number;
@@ -39,6 +42,10 @@ interface ExecutionErrorState {
39
42
  error: unknown;
40
43
  }
41
44
 
45
+ interface PromptStepResult {
46
+ changed: boolean;
47
+ }
48
+
42
49
  export default function promptModelExtension(pi: ExtensionAPI) {
43
50
  let prompts = new Map<string, PromptWithModel>();
44
51
  let previousModel: Model<any> | undefined;
@@ -72,6 +79,7 @@ export default function promptModelExtension(pi: ExtensionAPI) {
72
79
  }
73
80
 
74
81
  pi.registerMessageRenderer<SkillLoadedDetails>("skill-loaded", renderSkillLoaded);
82
+ pi.registerMessageRenderer(PROMPT_TEMPLATE_SUBAGENT_MESSAGE_TYPE, renderDelegatedSubagentResult);
75
83
 
76
84
  function registerPromptCommand(name: string) {
77
85
  pi.registerCommand(name, {
@@ -172,6 +180,78 @@ export default function promptModelExtension(pi: ExtensionAPI) {
172
180
  }
173
181
  }
174
182
 
183
+ function shouldDelegatePrompt(prompt: PromptWithModel, override?: SubagentOverride): boolean {
184
+ return prompt.subagent !== undefined || override?.enabled === true;
185
+ }
186
+
187
+ async function executePromptStep(
188
+ prompt: PromptWithModel,
189
+ args: string[],
190
+ ctx: ExtensionCommandContext,
191
+ currentModel: Model<any> | undefined,
192
+ override?: SubagentOverride,
193
+ inheritedModel?: Model<any>,
194
+ ): Promise<PromptStepResult | "aborted"> {
195
+ if (shouldDelegatePrompt(prompt, override)) {
196
+ const delegated = await executeSubagentPromptStep({
197
+ pi,
198
+ prompt,
199
+ args,
200
+ ctx,
201
+ currentModel,
202
+ override,
203
+ inheritedModel,
204
+ });
205
+ if (!delegated) {
206
+ throw new Error(`Prompt \`${prompt.name}\` is not configured for delegated execution.`);
207
+ }
208
+ return { changed: delegated.changed };
209
+ }
210
+
211
+ const prepared =
212
+ inheritedModel === undefined
213
+ ? await preparePromptExecution(prompt, args, currentModel, ctx.modelRegistry)
214
+ : await preparePromptExecution(prompt, args, currentModel, ctx.modelRegistry, { inheritedModel });
215
+ if (!prepared) {
216
+ notify(ctx, `No available model from: ${prompt.models.join(", ")}`, "error");
217
+ return "aborted";
218
+ }
219
+ if ("message" in prepared) {
220
+ if (prepared.warning) notify(ctx, prepared.warning, "warning");
221
+ notify(ctx, prepared.message, "error");
222
+ return "aborted";
223
+ }
224
+ if (prepared.warning) {
225
+ notify(ctx, prepared.warning, "warning");
226
+ }
227
+
228
+ const skillResolution = resolveSkillMessage(prompt.skill, ctx.cwd);
229
+ if (skillResolution.kind === "error") {
230
+ notify(ctx, skillResolution.error, "error");
231
+ return "aborted";
232
+ }
233
+
234
+ if (!prepared.selectedModel.alreadyActive) {
235
+ const switched = await pi.setModel(prepared.selectedModel.model);
236
+ if (!switched) {
237
+ notify(ctx, `Failed to switch to model ${prepared.selectedModel.model.provider}/${prepared.selectedModel.model.id}`, "error");
238
+ return "aborted";
239
+ }
240
+ runtimeModel = prepared.selectedModel.model;
241
+ }
242
+
243
+ if (prompt.thinking) {
244
+ pi.setThinkingLevel(prompt.thinking);
245
+ }
246
+ pendingSkillMessage = skillResolution.kind === "ready" ? skillResolution.message : undefined;
247
+
248
+ const startId = ctx.sessionManager.getLeafId();
249
+ pi.sendUserMessage(prepared.content);
250
+ await waitForTurnStart(ctx);
251
+ await ctx.waitForIdle();
252
+ return { changed: didIterationMakeChanges(getIterationEntries(ctx, startId)) };
253
+ }
254
+
175
255
  async function restoreSessionState(
176
256
  ctx: ExtensionContext,
177
257
  originalModel: Model<any> | undefined,
@@ -291,6 +371,7 @@ export default function promptModelExtension(pi: ExtensionAPI) {
291
371
  freshFlag: boolean,
292
372
  converge: boolean,
293
373
  ctx: ExtensionCommandContext,
374
+ subagentOverride?: SubagentOverride,
294
375
  ) {
295
376
  refreshPrompts(ctx.cwd, ctx);
296
377
  const initialPrompt = prompts.get(name);
@@ -331,51 +412,24 @@ export default function promptModelExtension(pi: ExtensionAPI) {
331
412
  break;
332
413
  }
333
414
 
334
- const prepared = await preparePromptExecution(prompt, parseCommandArgs(cleanedArgs), currentModel, ctx.modelRegistry);
335
- if (!prepared) {
336
- notify(ctx, `No available model from: ${prompt.models.join(", ")}`, "error");
337
- break;
338
- }
339
- if ("message" in prepared) {
340
- if (prepared.warning) notify(ctx, prepared.warning, "warning");
341
- notify(ctx, prepared.message, "error");
342
- break;
343
- }
344
- if (prepared.warning) {
345
- notify(ctx, prepared.warning, "warning");
346
- }
347
-
348
- const skillResolution = resolveSkillMessage(prompt.skill, ctx.cwd);
349
- if (skillResolution.kind === "error") {
350
- notify(ctx, skillResolution.error, "error");
351
- break;
352
- }
353
-
354
- if (!prepared.selectedModel.alreadyActive) {
355
- const switched = await pi.setModel(prepared.selectedModel.model);
356
- if (!switched) {
357
- notify(ctx, `Failed to switch to model ${prepared.selectedModel.model.provider}/${prepared.selectedModel.model.id}`, "error");
358
- break;
359
- }
360
- runtimeModel = prepared.selectedModel.model;
361
- }
362
- currentModel = prepared.selectedModel.model;
363
- currentThinking = pi.getThinkingLevel();
364
-
365
- if (prompt.thinking) {
366
- pi.setThinkingLevel(prompt.thinking);
367
- currentThinking = pi.getThinkingLevel();
368
- }
369
-
370
- pendingSkillMessage = skillResolution.kind === "ready" ? skillResolution.message : undefined;
371
415
  const iterationStartId = ctx.sessionManager.getLeafId();
416
+ const stepResult = await executePromptStep(
417
+ prompt,
418
+ parseCommandArgs(cleanedArgs),
419
+ ctx,
420
+ currentModel,
421
+ subagentOverride,
422
+ );
423
+ if (stepResult === "aborted") break;
372
424
 
373
- pi.sendUserMessage(prepared.content);
374
- await waitForTurnStart(ctx);
375
- await ctx.waitForIdle();
425
+ currentModel = getCurrentModel(ctx);
426
+ currentThinking = pi.getThinkingLevel();
376
427
  completedIterations++;
377
428
 
378
- if (useConverge && (isUnlimited || effectiveMax > 1) && !didIterationMakeChanges(getIterationEntries(ctx, iterationStartId))) {
429
+ const iterationChanged = shouldDelegatePrompt(prompt, subagentOverride)
430
+ ? stepResult.changed
431
+ : didIterationMakeChanges(getIterationEntries(ctx, iterationStartId));
432
+ if (useConverge && (isUnlimited || effectiveMax > 1) && !iterationChanged) {
379
433
  converged = true;
380
434
  break;
381
435
  }
@@ -398,8 +452,8 @@ export default function promptModelExtension(pi: ExtensionAPI) {
398
452
  shouldRestore,
399
453
  savedModel,
400
454
  savedThinking,
401
- currentModel,
402
- currentThinking,
455
+ getCurrentModel(ctx),
456
+ pi.getThinkingLevel(),
403
457
  loopErrorState,
404
458
  "loop",
405
459
  );
@@ -428,6 +482,7 @@ export default function promptModelExtension(pi: ExtensionAPI) {
428
482
  converge: boolean,
429
483
  shouldRestore: boolean,
430
484
  ctx: ExtensionCommandContext,
485
+ subagentOverride?: SubagentOverride,
431
486
  ) {
432
487
  const validateChainSteps = (): boolean => {
433
488
  const missingTemplates = steps.filter((step) => !prompts.has(step.name));
@@ -481,13 +536,13 @@ export default function promptModelExtension(pi: ExtensionAPI) {
481
536
  if (!validateChainSteps()) break;
482
537
  }
483
538
 
484
- const iterationStartId = ctx.sessionManager.getLeafId();
485
539
  const templates = steps.map((step) => ({
486
540
  ...prompts.get(step.name)!,
487
541
  stepArgs: step.args,
488
542
  stepLoop: step.loopCount ?? 1,
489
543
  }));
490
544
  let aborted = false;
545
+ let iterationChanged = false;
491
546
 
492
547
  for (const [index, template] of templates.entries()) {
493
548
  const stepNumber = index + 1;
@@ -515,63 +570,28 @@ export default function promptModelExtension(pi: ExtensionAPI) {
515
570
  const iterSuffix = stepLoop > 1 ? ` (iter ${stepIteration + 1}/${stepLoop})` : "";
516
571
  notify(ctx, `${loopPrefix}Step ${stepNumber}/${templates.length}: ${template.name}${iterSuffix} ${buildPromptCommandDescription(template)}`, "info");
517
572
 
518
- const prepared = await preparePromptExecution(template, effectiveArgs, currentModel, ctx.modelRegistry, {
519
- inheritedModel: chainInheritedModel,
520
- });
521
- if (!prepared) {
522
- notify(
523
- ctx,
524
- `Step ${stepNumber}/${templates.length} failed: no available model from ${template.models.join(", ")}`,
525
- "error",
526
- );
527
- aborted = true;
528
- break;
529
- }
530
- if ("message" in prepared) {
531
- if (prepared.warning) notify(ctx, prepared.warning, "warning");
532
- notify(ctx, `Step ${stepNumber}/${templates.length} failed: ${prepared.message}`, "error");
533
- aborted = true;
534
- break;
535
- }
536
- if (prepared.warning) {
537
- notify(ctx, prepared.warning, "warning");
538
- }
539
-
540
- const skillResolution = resolveSkillMessage(template.skill, ctx.cwd);
541
- if (skillResolution.kind === "error") {
542
- notify(ctx, `Step ${stepNumber}/${templates.length} failed: ${skillResolution.error}`, "error");
573
+ const stepIterationStartId = ctx.sessionManager.getLeafId();
574
+ const stepResult = await executePromptStep(
575
+ template,
576
+ effectiveArgs,
577
+ ctx,
578
+ currentModel,
579
+ subagentOverride,
580
+ chainInheritedModel,
581
+ );
582
+ if (stepResult === "aborted") {
543
583
  aborted = true;
544
584
  break;
545
585
  }
546
586
 
547
- if (!prepared.selectedModel.alreadyActive) {
548
- const switched = await pi.setModel(prepared.selectedModel.model);
549
- if (!switched) {
550
- notify(
551
- ctx,
552
- `Step ${stepNumber}/${templates.length} failed: could not switch to ${prepared.selectedModel.model.provider}/${prepared.selectedModel.model.id}`,
553
- "error",
554
- );
555
- aborted = true;
556
- break;
557
- }
558
- runtimeModel = prepared.selectedModel.model;
559
- }
560
-
561
- currentModel = prepared.selectedModel.model;
587
+ currentModel = getCurrentModel(ctx);
562
588
  currentThinking = pi.getThinkingLevel();
563
- if (template.thinking) {
564
- pi.setThinkingLevel(template.thinking);
565
- currentThinking = pi.getThinkingLevel();
566
- }
567
- pendingSkillMessage = skillResolution.kind === "ready" ? skillResolution.message : undefined;
568
-
569
- const stepIterationStartId = ctx.sessionManager.getLeafId();
570
- pi.sendUserMessage(prepared.content);
571
- await waitForTurnStart(ctx);
572
- await ctx.waitForIdle();
573
589
 
574
- if (stepLoop > 1 && template.converge !== false && !didIterationMakeChanges(getIterationEntries(ctx, stepIterationStartId))) {
590
+ const stepChanged = shouldDelegatePrompt(template, subagentOverride)
591
+ ? stepResult.changed
592
+ : didIterationMakeChanges(getIterationEntries(ctx, stepIterationStartId));
593
+ if (stepChanged) iterationChanged = true;
594
+ if (stepLoop > 1 && template.converge !== false && !stepChanged) {
575
595
  break;
576
596
  }
577
597
  }
@@ -588,7 +608,7 @@ export default function promptModelExtension(pi: ExtensionAPI) {
588
608
  if (aborted) break;
589
609
  completedIterations++;
590
610
 
591
- if (useConverge && (isUnlimited || effectiveMax > 1) && !didIterationMakeChanges(getIterationEntries(ctx, iterationStartId))) {
611
+ if (useConverge && (isUnlimited || effectiveMax > 1) && !iterationChanged) {
592
612
  converged = true;
593
613
  break;
594
614
  }
@@ -612,8 +632,8 @@ export default function promptModelExtension(pi: ExtensionAPI) {
612
632
  shouldRestore,
613
633
  originalModel,
614
634
  originalThinking,
615
- currentModel,
616
- currentThinking,
635
+ getCurrentModel(ctx),
636
+ pi.getThinkingLevel(),
617
637
  chainErrorState,
618
638
  "chain",
619
639
  );
@@ -644,12 +664,15 @@ export default function promptModelExtension(pi: ExtensionAPI) {
644
664
  return;
645
665
  }
646
666
 
667
+ const subagent = extractSubagentOverride(args);
668
+ const argsWithoutSubagent = subagent.args;
669
+
647
670
  if (prompt.chain) {
648
- const loop = extractLoopCount(args);
671
+ const loop = extractLoopCount(argsWithoutSubagent);
649
672
  let totalIterations: number | null = prompt.loop ?? 1;
650
673
  let fresh = false;
651
674
  let converge = true;
652
- let cleanedArgs = args;
675
+ let cleanedArgs = argsWithoutSubagent;
653
676
 
654
677
  if (loop) {
655
678
  totalIterations = loop.loopCount;
@@ -657,7 +680,7 @@ export default function promptModelExtension(pi: ExtensionAPI) {
657
680
  converge = loop.converge;
658
681
  cleanedArgs = loop.args;
659
682
  } else if (prompt.loop !== undefined) {
660
- const flags = extractLoopFlags(args);
683
+ const flags = extractLoopFlags(argsWithoutSubagent);
661
684
  fresh = flags.fresh;
662
685
  converge = flags.converge;
663
686
  cleanedArgs = flags.args;
@@ -681,68 +704,44 @@ export default function promptModelExtension(pi: ExtensionAPI) {
681
704
  converge && prompt.converge !== false,
682
705
  prompt.restore,
683
706
  ctx,
707
+ subagent.override,
684
708
  );
685
709
  return;
686
710
  }
687
711
 
688
- const loop = extractLoopCount(args);
712
+ const loop = extractLoopCount(argsWithoutSubagent);
689
713
  if (loop) {
690
- await runPromptLoop(name, loop.args, loop.loopCount, loop.fresh, loop.converge, ctx);
714
+ await runPromptLoop(name, loop.args, loop.loopCount, loop.fresh, loop.converge, ctx, subagent.override);
691
715
  return;
692
716
  }
693
717
 
694
718
  if (prompt.loop !== undefined) {
695
- const flags = extractLoopFlags(args);
696
- await runPromptLoop(name, flags.args, prompt.loop, flags.fresh, flags.converge, ctx);
719
+ const flags = extractLoopFlags(argsWithoutSubagent);
720
+ await runPromptLoop(name, flags.args, prompt.loop, flags.fresh, flags.converge, ctx, subagent.override);
697
721
  return;
698
722
  }
699
723
 
700
724
  const savedModel = getCurrentModel(ctx);
701
725
  const savedThinking = pi.getThinkingLevel();
702
- const prepared = await preparePromptExecution(prompt, parseCommandArgs(args), savedModel, ctx.modelRegistry);
703
- if (!prepared) {
704
- notify(ctx, `No available model from: ${prompt.models.join(", ")}`, "error");
705
- return;
706
- }
707
- if ("message" in prepared) {
708
- if (prepared.warning) notify(ctx, prepared.warning, "warning");
709
- notify(ctx, prepared.message, "error");
710
- return;
711
- }
712
- if (prepared.warning) {
713
- notify(ctx, prepared.warning, "warning");
714
- }
715
-
716
- const skillResolution = resolveSkillMessage(prompt.skill, ctx.cwd);
717
- if (skillResolution.kind === "error") {
718
- notify(ctx, skillResolution.error, "error");
719
- return;
720
- }
726
+ const stepResult = await executePromptStep(
727
+ prompt,
728
+ parseCommandArgs(argsWithoutSubagent),
729
+ ctx,
730
+ savedModel,
731
+ subagent.override,
732
+ );
733
+ if (stepResult === "aborted") return;
721
734
 
722
- if (!prepared.selectedModel.alreadyActive) {
723
- const switched = await pi.setModel(prepared.selectedModel.model);
724
- if (!switched) {
725
- notify(ctx, `Failed to switch to model ${prepared.selectedModel.model.provider}/${prepared.selectedModel.model.id}`, "error");
726
- return;
735
+ if (!shouldDelegatePrompt(prompt, subagent.override) && prompt.restore) {
736
+ const currentModel = getCurrentModel(ctx);
737
+ if (savedModel && currentModel && !sameModel(savedModel, currentModel)) {
738
+ previousModel = savedModel;
739
+ previousThinking = savedThinking;
727
740
  }
728
- runtimeModel = prepared.selectedModel.model;
729
- }
730
-
731
- if (prompt.restore && !prepared.selectedModel.alreadyActive) {
732
- previousModel = savedModel;
733
- previousThinking = savedThinking;
734
- }
735
- if (prompt.thinking) {
736
- if (prompt.restore && previousThinking === undefined && prompt.thinking !== savedThinking) {
741
+ if (prompt.thinking && previousThinking === undefined && prompt.thinking !== savedThinking) {
737
742
  previousThinking = savedThinking;
738
743
  }
739
- pi.setThinkingLevel(prompt.thinking);
740
744
  }
741
- pendingSkillMessage = skillResolution.kind === "ready" ? skillResolution.message : undefined;
742
-
743
- pi.sendUserMessage(prepared.content);
744
- await waitForTurnStart(ctx);
745
- await ctx.waitForIdle();
746
745
  }
747
746
 
748
747
  function resetSessionScopedState(ctx: ExtensionContext) {
@@ -840,8 +839,9 @@ export default function promptModelExtension(pi: ExtensionAPI) {
840
839
  storedCommandCtx = ctx;
841
840
  refreshPrompts(ctx.cwd, ctx);
842
841
 
843
- const loop = extractLoopCount(args);
844
- const cleanedArgs = loop ? loop.args : args;
842
+ const subagent = extractSubagentOverride(args);
843
+ const loop = extractLoopCount(subagent.args);
844
+ const cleanedArgs = loop ? loop.args : subagent.args;
845
845
 
846
846
  const { steps, sharedArgs, invalidSegments } = parseChainSteps(cleanedArgs);
847
847
  if (invalidSegments.length > 0) {
@@ -853,7 +853,16 @@ export default function promptModelExtension(pi: ExtensionAPI) {
853
853
  return;
854
854
  }
855
855
 
856
- await runSharedChainExecution(steps, sharedArgs, loop ? loop.loopCount : 1, loop?.fresh === true, loop?.converge ?? true, true, ctx);
856
+ await runSharedChainExecution(
857
+ steps,
858
+ sharedArgs,
859
+ loop ? loop.loopCount : 1,
860
+ loop?.fresh === true,
861
+ loop?.converge ?? true,
862
+ true,
863
+ ctx,
864
+ subagent.override,
865
+ );
857
866
  }
858
867
 
859
868
  refreshPrompts(process.cwd());