pi-prompt-template-model 0.6.4 → 0.6.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,6 +1,26 @@
1
1
  # Changelog
2
2
 
3
- ## [Unreleased]
3
+ ## [0.6.6] - 2026-03-28
4
+
5
+ ### Added
6
+ - Added `--model=provider/model-id` runtime flag to override a template's model for a single invocation. Works with single execution, loops, and delegation.
7
+ - Added `--fork` runtime flag to enable `inheritContext` (forked context) at invocation time. Implies `--subagent` if not already set.
8
+ - Inline loop iterations now include a `[Loop 2/5]` prefix so the agent knows it's in a loop and which iteration it's on. Delegated (subagent) loops are unaffected.
9
+ - Added `loop: unlimited` (and `loop: true`) frontmatter for open-ended loops that run until convergence, user interrupt, or the 999-iteration safety cap.
10
+ - Added model rotation for loop iterations via `rotate: true` frontmatter. Cycles through comma-separated models and thinking levels each iteration instead of using fallback semantics.
11
+
12
+ ### Fixed
13
+ - Pressing Escape during a loop or chain iteration now stops the loop. Previously, aborted inline turns were treated as "no changes" and the loop continued.
14
+ - Delegation errors during loop iterations no longer abort the entire loop. The error is reported and the loop continues to the next iteration (useful with model rotation where one model may fail but others succeed).
15
+ - Per-step bare `--loop` in chain declarations (e.g., `double-check --loop -> deslop`) now correctly runs unlimited iterations instead of running once.
16
+
17
+ ### Changed
18
+ - Unlimited loops (`--loop` bare or `loop: unlimited`) no longer force convergence on. Convergence follows the `converge` field like bounded loops. Safety cap raised from 50 to 999.
19
+
20
+ ## [0.6.5] - 2026-03-24
21
+
22
+ ### Added
23
+ - Added delegated chain-step context summaries via `chainContext: summary` (chain frontmatter), `/chain-prompts ... --chain-context` (command-level), and per-step `--with-context` (single delegated chain steps).
4
24
 
5
25
  ## [0.6.4] - 2026-03-23
6
26
 
package/README.md CHANGED
@@ -62,13 +62,15 @@ All fields are optional. Templates that don't use any extension features (no `mo
62
62
  | `thinking` | — | Thinking level for the model: `off`, `minimal`, `low`, `medium`, `high`, or `xhigh`. |
63
63
  | `description` | — | Short text shown next to the command in autocomplete. |
64
64
  | `chain` | — | Declares a reusable pipeline of templates (`step -> step`). When set, the body is ignored. See [Chain Templates](#chain-templates). |
65
+ | `chainContext` | — | Chain templates only. Set to `summary` so delegated steps receive a compact summary of what previous steps did. Steps with `inheritContext: true` are excluded. See [Chain context for delegated steps](#chain-context-for-delegated-steps). |
65
66
 
66
67
  ### Execution Control
67
68
 
68
69
  | Field | Default | What it does |
69
70
  |-------|---------|--------------|
70
71
  | `restore` | `true` | After the command finishes, switch back to whatever model and thinking level were active before. Set `false` to stay on the new model. |
71
- | `loop` | — | Run this template multiple times by default (1–999). CLI `--loop` overrides this. See [Loop Execution](#loop-execution). |
72
+ | `loop` | — | Run this template multiple times by default (1–999, `true`, or `unlimited`). CLI `--loop` overrides this. See [Loop Execution](#loop-execution). |
73
+ | `rotate` | `false` | When `true` and looping, cycle through models in the `model` list instead of using fallback semantics. Thinking levels can also be comma-separated to pair with each model. |
72
74
  | `fresh` | `false` | When looping, collapse the conversation between iterations to a brief summary instead of carrying the full context forward. Saves tokens on long loops. |
73
75
  | `converge` | `true` | When looping, stop early if an iteration makes no file changes. Set `false` to always run every iteration. |
74
76
 
@@ -240,6 +242,17 @@ During execution, a live progress widget appears above the editor showing elapse
240
242
 
241
243
  You can override delegation at runtime per invocation with `--subagent`, `--subagent=<name>`, `--subagent:<name>`, or `--cwd=<path>`. `--cwd=<path>` must be absolute after optional `~/...` expansion. Runtime flags take precedence for that invocation only.
242
244
 
245
+ Two additional runtime flags work for any prompt (not just delegated ones):
246
+
247
+ - `--model=provider/model-id` — override the template's `model` for this invocation. Works with single execution, loops, and delegation.
248
+ - `--fork` — run with `inheritContext` (forked context). Implies `--subagent` if not already set.
249
+
250
+ ```
251
+ /double-check --model=anthropic/claude-opus-4-6
252
+ /double-check --fork --subagent:worker
253
+ /deslop --model=openai/gpt-5.4 --loop 3
254
+ ```
255
+
243
256
  ## Loop Execution
244
257
 
245
258
  Run a template multiple times with `--loop`:
@@ -247,7 +260,7 @@ Run a template multiple times with `--loop`:
247
260
  ```
248
261
  /deslop --loop 5
249
262
  /deslop --loop=5
250
- /deslop --loop # unlimited — runs until convergence (50-iteration cap)
263
+ /deslop --loop # unlimited — runs until convergence or cap (999)
251
264
  ```
252
265
 
253
266
  You can also set a default in frontmatter. CLI `--loop` always overrides:
@@ -258,11 +271,22 @@ loop: 5
258
271
  ---
259
272
  ```
260
273
 
274
+ Use `loop: unlimited` (or `loop: true`) for open-ended loops that run until convergence, user interrupt, or the safety cap of 999 iterations:
275
+
276
+ ```markdown
277
+ ---
278
+ loop: unlimited
279
+ converge: false
280
+ fresh: true
281
+ subagent: true
282
+ ---
283
+ ```
284
+
261
285
  ### How looping works
262
286
 
263
287
  Each iteration runs the same prompt. By default, context accumulates — iteration 3 sees the full conversation from iterations 1 and 2 and builds on that work.
264
288
 
265
- **Convergence**: If an iteration makes no file changes (no `write` or `edit` tool calls), the loop stops early. This is on by default. Use `--no-converge` or `converge: false` to always run every iteration. Bare `--loop` (unlimited) always forces convergence on, since its whole purpose is "run until nothing changes."
289
+ **Convergence**: If an iteration makes no file changes (no `write` or `edit` tool calls), the loop stops early. This is on by default. Use `--no-converge` or `converge: false` to always run every iteration.
266
290
 
267
291
  **Fresh context**: Add `--fresh` (or `fresh: true` in frontmatter) to collapse the conversation between iterations. Each iteration gets a clean slate with only brief summaries of what previous iterations did. Good for long loops where accumulated context would blow up the token count.
268
292
 
@@ -270,6 +294,60 @@ Each iteration runs the same prompt. By default, context accumulates — iterati
270
294
 
271
295
  Model, thinking level, and skill are maintained throughout. If `restore: true` (the default), everything is restored after the final iteration.
272
296
 
297
+ ## Model Rotation
298
+
299
+ `rotate: true` turns a comma-separated `model` list from a fallback chain into a cycling list. Each loop iteration uses the next model in the list, wrapping around:
300
+
301
+ ```markdown
302
+ ---
303
+ model: claude-opus-4-6, gpt-5.4, gpt-5.3-codex
304
+ thinking: high, xhigh, off
305
+ loop: 9
306
+ rotate: true
307
+ fresh: true
308
+ ---
309
+ Review and fix issues in this codebase.
310
+ ```
311
+
312
+ Iteration 1 runs Opus + `high`, iteration 2 runs GPT-5.4 + `xhigh`, iteration 3 runs Codex + `off`, then wraps back to Opus. The status bar shows which model is active: `loop 2/9 · gpt-5.4 xhigh`.
313
+
314
+ This is especially useful for [ralph-style loops](https://ghuntley.com/ralph/) where different models catch different things. The `subagent` examples below require [pi-subagents](https://github.com/nicobailon/pi-subagents/). A single-model ralph loop that delegates with fresh context each iteration:
315
+
316
+ ```markdown
317
+ ---
318
+ model: claude-sonnet-4-20250514
319
+ subagent: true
320
+ inheritContext: true
321
+ loop: 5
322
+ fresh: true
323
+ ---
324
+ Simplify this code: $@
325
+ ```
326
+
327
+ Add `rotate` and multiple models to cycle different perspectives on each pass:
328
+
329
+ ```markdown
330
+ ---
331
+ model: claude-opus-4-6, gpt-5.4, gpt-5.3-codex
332
+ thinking: xhigh, high, high
333
+ loop: 9
334
+ rotate: true
335
+ fresh: true
336
+ subagent: true
337
+ ---
338
+ Review and fix issues in this codebase.
339
+ ```
340
+
341
+ Each iteration gets fresh context, a different model, and its own thinking level. Convergence stops the loop when an iteration makes no file changes — use `converge: false` to guarantee every model gets at least one shot.
342
+
343
+ `thinking` pairing with `rotate: true`:
344
+
345
+ - Single value (`thinking: high`) — applied to every model.
346
+ - Comma-separated (`thinking: high, xhigh, off`) — positional, must match the number of models.
347
+ - Omitted — each iteration inherits the session default.
348
+
349
+ Without `loop`, `rotate` has no effect and comma-separated `model` keeps normal fallback behavior.
350
+
273
351
  ## Chaining Templates
274
352
 
275
353
  `/chain-prompts` runs multiple templates in sequence. Each step uses its own model, skill, and thinking level, while conversation context flows between them:
@@ -328,6 +406,33 @@ This runs the full analyze → fix chain 3 times, with fresh context between ite
328
406
 
329
407
  When a chain template sets `cwd`, it becomes the default delegated subprocess working directory for all delegated steps in that chain. Runtime `--cwd=<path>` overrides the chain template value.
330
408
 
409
+ ### Chain context for delegated steps
410
+
411
+ Delegated chain steps start fresh — they don't see what earlier steps did. Chain context prepends a compact summary of previous steps to each delegated task so later steps can build on earlier work.
412
+
413
+ Enable it chain-wide with `chainContext: summary` in frontmatter or `--chain-context` on the CLI:
414
+
415
+ ```markdown
416
+ ---
417
+ chain: analyze -> fix
418
+ chainContext: summary
419
+ ---
420
+ ```
421
+
422
+ ```
423
+ /chain-prompts analyze -> fix --chain-context
424
+ ```
425
+
426
+ To enable it for a single step, attach `--with-context` to that step name:
427
+
428
+ ```
429
+ /chain-prompts analyze -> reviewer --with-context -> summarize
430
+ ```
431
+
432
+ Here only `reviewer` receives the summary of `analyze`. The `summarize` step does not.
433
+
434
+ Steps using `inheritContext: true` already fork the full parent conversation and skip the summary preamble. `--with-context` is not supported inside `parallel(...)` groups. When a chain uses `loop`, summaries reset each iteration.
435
+
331
436
  ### Parallel and looping from the CLI
332
437
 
333
438
  Parallel groups work in `/chain-prompts` too:
@@ -359,6 +464,7 @@ Once enabled, the agent sees `run-prompt` in its tool list:
359
464
 
360
465
  ```
361
466
  run-prompt({ command: "deslop --loop 5 --fresh" })
467
+ run-prompt({ command: "chain-prompts analyze -> fix --chain-context" })
362
468
  run-prompt({ command: "chain-prompts analyze -> fix --loop 3" })
363
469
  run-prompt({ command: "deslop --subagent" })
364
470
  ```
package/args.ts CHANGED
@@ -20,6 +20,8 @@ export interface SubagentOverrideExtraction {
20
20
  args: string;
21
21
  override?: SubagentOverride;
22
22
  cwd?: string;
23
+ model?: string;
24
+ fork?: boolean;
23
25
  }
24
26
 
25
27
  export function extractLoopCount(argsString: string): LoopExtraction | null {
@@ -113,7 +115,7 @@ export function extractLoopCount(argsString: string): LoopExtraction | null {
113
115
  cleaned = cleaned.slice(0, start) + cleaned.slice(end);
114
116
  }
115
117
 
116
- const converge = loopCount === null ? true : !noConverge;
118
+ const converge = !noConverge;
117
119
  return { args: cleaned.trim(), loopCount, fresh, converge };
118
120
  }
119
121
 
@@ -163,9 +165,55 @@ export function extractLoopFlags(argsString: string): LoopFlags {
163
165
  return { args: cleaned.trim(), fresh, converge: !noConverge };
164
166
  }
165
167
 
168
+ export function extractChainContextFlag(argsString: string): { args: string; chainContext: boolean } {
169
+ let chainContext = false;
170
+ const tokensToRemove: Array<{ start: number; end: number }> = [];
171
+
172
+ let i = 0;
173
+ while (i < argsString.length) {
174
+ const char = argsString[i];
175
+
176
+ if (char === '"' || char === "'") {
177
+ const quote = char;
178
+ i++;
179
+ while (i < argsString.length && argsString[i] !== quote) i++;
180
+ if (i < argsString.length) i++;
181
+ continue;
182
+ }
183
+
184
+ if (/\s/.test(char)) {
185
+ i++;
186
+ continue;
187
+ }
188
+
189
+ const tokenStart = i;
190
+ while (i < argsString.length && !/\s/.test(argsString[i])) i++;
191
+ const token = argsString.slice(tokenStart, i);
192
+
193
+ if (token === "--chain-context") {
194
+ chainContext = true;
195
+ tokensToRemove.push({ start: tokenStart, end: i });
196
+ }
197
+ }
198
+
199
+ if (tokensToRemove.length === 0) {
200
+ return { args: argsString.trim(), chainContext: false };
201
+ }
202
+
203
+ tokensToRemove.sort((a, b) => b.start - a.start);
204
+ let cleaned = argsString;
205
+ for (const { start, end } of tokensToRemove) {
206
+ cleaned = cleaned.slice(0, start) + cleaned.slice(end);
207
+ }
208
+
209
+ return { args: cleaned.trim(), chainContext };
210
+ }
211
+
166
212
  export function extractSubagentOverride(argsString: string): SubagentOverrideExtraction {
167
213
  let override: SubagentOverride | undefined;
168
214
  let cwdRaw: string | undefined;
215
+ let modelRaw: string | undefined;
216
+ let fork = false;
169
217
  const tokensToRemove: Array<{ start: number; end: number }> = [];
170
218
 
171
219
  let i = 0;
@@ -206,6 +254,20 @@ export function extractSubagentOverride(argsString: string): SubagentOverrideExt
206
254
  tokensToRemove.push({ start: tokenStart, end: i });
207
255
  const value = token.slice("--cwd=".length);
208
256
  cwdRaw = value || undefined;
257
+ continue;
258
+ }
259
+
260
+ if (token.startsWith("--model=")) {
261
+ tokensToRemove.push({ start: tokenStart, end: i });
262
+ const value = token.slice("--model=".length);
263
+ modelRaw = value || undefined;
264
+ continue;
265
+ }
266
+
267
+ if (token === "--fork") {
268
+ tokensToRemove.push({ start: tokenStart, end: i });
269
+ fork = true;
270
+ continue;
209
271
  }
210
272
  }
211
273
 
@@ -217,10 +279,14 @@ export function extractSubagentOverride(argsString: string): SubagentOverrideExt
217
279
  cleaned = cleaned.slice(0, start) + cleaned.slice(end);
218
280
  }
219
281
 
282
+ if (fork && !override) override = { enabled: true };
283
+
220
284
  return {
221
285
  args: cleaned.trim(),
222
286
  ...(override ? { override } : {}),
223
287
  ...(cwdRaw !== undefined ? { cwd: cwdRaw } : {}),
288
+ ...(modelRaw !== undefined ? { model: modelRaw } : {}),
289
+ ...(fork ? { fork: true } : {}),
224
290
  };
225
291
  }
226
292
 
package/chain-parser.ts CHANGED
@@ -3,7 +3,8 @@ import { parseCommandArgs } from "./args.js";
3
3
  export interface ChainStep {
4
4
  name: string;
5
5
  args: string[];
6
- loopCount?: number;
6
+ loopCount?: number | null;
7
+ withContext?: boolean;
7
8
  }
8
9
 
9
10
  export interface ParallelChainStep {
@@ -80,15 +81,23 @@ function scanSegmentTokens(segment: string): SegmentToken[] {
80
81
  return tokens;
81
82
  }
82
83
 
83
- function extractStepLoopCount(segment: string): { cleanedSegment: string; loopCount?: number } {
84
+ function extractStepFlags(segment: string): { cleanedSegment: string; loopCount?: number | null; withContext: boolean } {
84
85
  const tokens = scanSegmentTokens(segment);
85
86
  const loopTokenRanges: Array<{ start: number; end: number }> = [];
86
- let loopCount: number | undefined;
87
+ const withContextTokenRanges: Array<{ start: number; end: number }> = [];
88
+ let loopCount: number | null | undefined;
89
+ let withContext = false;
87
90
 
88
91
  for (let i = 1; i < tokens.length; i++) {
89
92
  const token = tokens[i];
90
93
  if (token.quoted) continue;
91
94
 
95
+ if (token.value === "--with-context") {
96
+ withContext = true;
97
+ withContextTokenRanges.push({ start: token.start, end: token.end });
98
+ continue;
99
+ }
100
+
92
101
  if (token.value.startsWith("--loop=")) {
93
102
  loopTokenRanges.push({ start: token.start, end: token.end });
94
103
  const value = token.value.slice("--loop=".length);
@@ -100,31 +109,39 @@ function extractStepLoopCount(segment: string): { cleanedSegment: string; loopCo
100
109
  continue;
101
110
  }
102
111
 
103
- if (token.value === "--loop" && i + 1 < tokens.length) {
104
- const next = tokens[i + 1];
105
- if (!next.quoted && /^\d+$/.test(next.value)) {
106
- loopTokenRanges.push({ start: token.start, end: token.end }, { start: next.start, end: next.end });
107
- const parsed = parseInt(next.value, 10);
108
- if (parsed >= 1 && parsed <= 999 && loopCount === undefined) {
109
- loopCount = parsed;
112
+ if (token.value === "--loop") {
113
+ loopTokenRanges.push({ start: token.start, end: token.end });
114
+ if (i + 1 < tokens.length) {
115
+ const next = tokens[i + 1];
116
+ if (!next.quoted && /^\d+$/.test(next.value)) {
117
+ loopTokenRanges.push({ start: next.start, end: next.end });
118
+ const parsed = parseInt(next.value, 10);
119
+ if (parsed >= 1 && parsed <= 999 && loopCount === undefined) {
120
+ loopCount = parsed;
121
+ }
122
+ i++;
123
+ continue;
110
124
  }
111
- i++;
112
- continue;
113
125
  }
126
+ if (loopCount === undefined) {
127
+ loopCount = null;
128
+ }
129
+ continue;
114
130
  }
115
131
  }
116
132
 
117
- if (loopCount === undefined || loopTokenRanges.length === 0) {
118
- return { cleanedSegment: segment };
133
+ const loopRangesToRemove = loopCount !== undefined ? loopTokenRanges : [];
134
+ if (loopRangesToRemove.length === 0 && withContextTokenRanges.length === 0) {
135
+ return { cleanedSegment: segment, withContext: false };
119
136
  }
120
137
 
121
- loopTokenRanges.sort((a, b) => b.start - a.start);
138
+ const rangesToRemove = [...loopRangesToRemove, ...withContextTokenRanges].sort((a, b) => b.start - a.start);
122
139
  let cleanedSegment = segment;
123
- for (const { start, end } of loopTokenRanges) {
140
+ for (const { start, end } of rangesToRemove) {
124
141
  cleanedSegment = `${cleanedSegment.slice(0, start)}${cleanedSegment.slice(end)}`;
125
142
  }
126
143
 
127
- return { cleanedSegment: cleanedSegment.trim(), loopCount };
144
+ return { cleanedSegment: cleanedSegment.trim(), loopCount, withContext };
128
145
  }
129
146
 
130
147
  function splitByTopLevelSeparator(input: string, separator: string): string[] {
@@ -192,10 +209,10 @@ function findMatchingParen(segment: string, openIndex: number): number {
192
209
  }
193
210
 
194
211
  function parseSingleStepSegment(segment: string): ChainStep | undefined {
195
- const { cleanedSegment, loopCount } = extractStepLoopCount(segment);
212
+ const { cleanedSegment, loopCount, withContext } = extractStepFlags(segment);
196
213
  const tokens = parseCommandArgs(cleanedSegment);
197
214
  if (tokens.length === 0) return undefined;
198
- return { name: tokens[0], args: tokens.slice(1), loopCount };
215
+ return { name: tokens[0], args: tokens.slice(1), loopCount, ...(withContext ? { withContext: true } : {}) };
199
216
  }
200
217
 
201
218
  function parseParallelStepSegment(segment: string): ParallelChainStep | undefined {
package/index.ts CHANGED
@@ -1,9 +1,9 @@
1
1
  import type { Model } from "@mariozechner/pi-ai";
2
2
  import type { ExtensionAPI, ExtensionCommandContext, ExtensionContext } from "@mariozechner/pi-coding-agent";
3
3
  import type { ThinkingLevel } from "@mariozechner/pi-agent-core";
4
- import { extractLoopCount, extractLoopFlags, extractSubagentOverride, parseCommandArgs, type SubagentOverride } from "./args.js";
4
+ import { extractChainContextFlag, extractLoopCount, extractLoopFlags, extractSubagentOverride, parseCommandArgs, type SubagentOverride } from "./args.js";
5
5
  import { parseChainSteps, parseChainDeclaration, type ChainStep, type ChainStepOrParallel, type ParallelChainStep } from "./chain-parser.js";
6
- import { generateIterationSummary, didIterationMakeChanges, getIterationEntries } from "./loop-utils.js";
6
+ import { generateChainStepSummary, generateIterationSummary, didIterationMakeChanges, getIterationEntries, wasIterationAborted } from "./loop-utils.js";
7
7
  import { notify, summarizePromptDiagnostics, diagnosticsFingerprint } from "./notifications.js";
8
8
  import { preparePromptExecution } from "./prompt-execution.js";
9
9
  import { buildPromptCommandDescription, expandCwdPath, loadPromptsWithModel, readSkillContent, resolveSkillPath, type PromptWithModel } from "./prompt-loader.js";
@@ -16,6 +16,7 @@ import { renderDelegatedSubagentResult } from "./subagent-renderer.js";
16
16
  interface LoopState {
17
17
  currentIteration: number;
18
18
  totalIterations: number | null;
19
+ rotationLabel?: string;
19
20
  }
20
21
 
21
22
  interface FreshCollapse {
@@ -58,7 +59,7 @@ export default function promptModelExtension(pi: ExtensionAPI) {
58
59
  let accumulatedSummaries: string[] = [];
59
60
  let lastDiagnostics = "";
60
61
  let storedCommandCtx: ExtensionCommandContext | null = null;
61
- const UNLIMITED_LOOP_CAP = 50;
62
+ const UNLIMITED_LOOP_CAP = 999;
62
63
 
63
64
  const toolManager = createToolManager(pi, {
64
65
  isActive: () => !!(loopState || chainActive),
@@ -195,6 +196,8 @@ export default function promptModelExtension(pi: ExtensionAPI) {
195
196
  currentModel: Model<any> | undefined,
196
197
  override?: SubagentOverride,
197
198
  inheritedModel?: Model<any>,
199
+ taskPreamble?: string,
200
+ loopContext?: string,
198
201
  ): Promise<PromptStepResult | "aborted"> {
199
202
  if (shouldDelegatePrompt(prompt, override)) {
200
203
  try {
@@ -206,6 +209,7 @@ export default function promptModelExtension(pi: ExtensionAPI) {
206
209
  currentModel,
207
210
  override,
208
211
  inheritedModel,
212
+ taskPreamble,
209
213
  });
210
214
  if (!delegated) {
211
215
  notify(ctx, `Prompt \`${prompt.name}\` is not configured for delegated execution.`, "error");
@@ -214,7 +218,7 @@ export default function promptModelExtension(pi: ExtensionAPI) {
214
218
  return { changed: delegated.changed };
215
219
  } catch (error) {
216
220
  notify(ctx, error instanceof Error ? error.message : String(error), "error");
217
- return "aborted";
221
+ return { changed: false };
218
222
  }
219
223
  }
220
224
 
@@ -256,10 +260,14 @@ export default function promptModelExtension(pi: ExtensionAPI) {
256
260
  pendingSkillMessage = skillResolution.kind === "ready" ? skillResolution.message : undefined;
257
261
 
258
262
  const startId = ctx.sessionManager.getLeafId();
259
- pi.sendUserMessage(prepared.content);
263
+ const content = loopContext ? `[${loopContext}]\n\n${prepared.content}` : prepared.content;
264
+ pi.sendUserMessage(content);
260
265
  await waitForTurnStart(ctx);
261
266
  await ctx.waitForIdle();
262
- return { changed: didIterationMakeChanges(getIterationEntries(ctx, startId)) };
267
+
268
+ const entries = getIterationEntries(ctx, startId);
269
+ if (wasIterationAborted(entries)) return "aborted";
270
+ return { changed: didIterationMakeChanges(entries) };
263
271
  }
264
272
 
265
273
  async function restoreSessionState(
@@ -351,10 +359,11 @@ export default function promptModelExtension(pi: ExtensionAPI) {
351
359
  function updateLoopStatus(ctx: ExtensionContext) {
352
360
  if (!ctx.hasUI) return;
353
361
  if (loopState) {
362
+ const suffix = loopState.rotationLabel ? ` · ${loopState.rotationLabel}` : "";
354
363
  const label =
355
364
  loopState.totalIterations !== null
356
- ? `loop ${loopState.currentIteration}/${loopState.totalIterations}`
357
- : `loop ${loopState.currentIteration}`;
365
+ ? `loop ${loopState.currentIteration}/${loopState.totalIterations}${suffix}`
366
+ : `loop ${loopState.currentIteration}${suffix}`;
358
367
  ctx.ui.setStatus("prompt-loop", ctx.ui.theme.fg("warning", label));
359
368
  } else {
360
369
  ctx.ui.setStatus("prompt-loop", undefined);
@@ -383,6 +392,7 @@ export default function promptModelExtension(pi: ExtensionAPI) {
383
392
  ctx: ExtensionCommandContext,
384
393
  subagentOverride?: SubagentOverride,
385
394
  cwdOverride?: string,
395
+ promptOverrides?: Partial<Pick<PromptWithModel, "models" | "inheritContext">>,
386
396
  ) {
387
397
  refreshPrompts(ctx.cwd, ctx);
388
398
  const initialPrompt = prompts.get(name);
@@ -399,7 +409,7 @@ export default function promptModelExtension(pi: ExtensionAPI) {
399
409
  const useFresh = freshFlag || initialPrompt.fresh === true;
400
410
  const effectiveMax = totalIterations ?? UNLIMITED_LOOP_CAP;
401
411
  const isUnlimited = totalIterations === null;
402
- const useConverge = isUnlimited ? true : converge && initialPrompt.converge !== false;
412
+ const useConverge = converge && initialPrompt.converge !== false;
403
413
  const anchorId = useFresh ? ctx.sessionManager.getLeafId() : null;
404
414
 
405
415
  loopState = { currentIteration: 1, totalIterations };
@@ -412,9 +422,7 @@ export default function promptModelExtension(pi: ExtensionAPI) {
412
422
  try {
413
423
  for (let i = 0; i < effectiveMax; i++) {
414
424
  loopState.currentIteration = i + 1;
415
- updateLoopStatus(ctx);
416
425
  const iterationLabel = totalIterations !== null ? `${i + 1}/${totalIterations}` : `${i + 1}`;
417
- notify(ctx, `Loop ${iterationLabel}: ${name}`, "info");
418
426
 
419
427
  refreshPrompts(ctx.cwd, ctx);
420
428
  const prompt = prompts.get(name);
@@ -422,15 +430,40 @@ export default function promptModelExtension(pi: ExtensionAPI) {
422
430
  notify(ctx, `Prompt "${name}" no longer exists`, "error");
423
431
  break;
424
432
  }
425
- const effectivePrompt = cwdOverride ? { ...prompt, cwd: cwdOverride } : prompt;
433
+ const effectivePrompt = { ...prompt, ...(cwdOverride ? { cwd: cwdOverride } : {}), ...promptOverrides };
434
+ let iterationPrompt = effectivePrompt;
435
+ loopState!.rotationLabel = undefined;
436
+ if (effectivePrompt.rotate && effectivePrompt.models.length > 1) {
437
+ const rotationIndex = i % effectivePrompt.models.length;
438
+ const rotatedThinking = effectivePrompt.thinkingLevels
439
+ ? effectivePrompt.thinkingLevels[rotationIndex]
440
+ : effectivePrompt.thinking;
441
+ iterationPrompt = {
442
+ ...effectivePrompt,
443
+ models: [effectivePrompt.models[rotationIndex]],
444
+ thinking: rotatedThinking,
445
+ };
446
+ const shortModel = effectivePrompt.models[rotationIndex].split("/").pop() || effectivePrompt.models[rotationIndex];
447
+ const thinkingLabel = rotatedThinking ? ` ${rotatedThinking}` : "";
448
+ loopState!.rotationLabel = `${shortModel}${thinkingLabel}`;
449
+ }
450
+ updateLoopStatus(ctx);
451
+ const rotationSuffix = loopState!.rotationLabel ? ` [${loopState!.rotationLabel}]` : "";
452
+ notify(ctx, `Loop ${iterationLabel}: ${name}${rotationSuffix}`, "info");
426
453
 
454
+ const loopContext = loopState!.rotationLabel
455
+ ? `Loop ${iterationLabel} · ${loopState!.rotationLabel}`
456
+ : `Loop ${iterationLabel}`;
427
457
  const iterationStartId = ctx.sessionManager.getLeafId();
428
458
  const stepResult = await executePromptStep(
429
- effectivePrompt,
459
+ iterationPrompt,
430
460
  parseCommandArgs(cleanedArgs),
431
461
  ctx,
432
462
  currentModel,
433
463
  subagentOverride,
464
+ undefined,
465
+ undefined,
466
+ loopContext,
434
467
  );
435
468
  if (stepResult === "aborted") break;
436
469
 
@@ -438,7 +471,7 @@ export default function promptModelExtension(pi: ExtensionAPI) {
438
471
  currentThinking = pi.getThinkingLevel();
439
472
  completedIterations++;
440
473
 
441
- const iterationChanged = shouldDelegatePrompt(effectivePrompt, subagentOverride)
474
+ const iterationChanged = shouldDelegatePrompt(iterationPrompt, subagentOverride)
442
475
  ? stepResult.changed
443
476
  : didIterationMakeChanges(getIterationEntries(ctx, iterationStartId));
444
477
  if (useConverge && (isUnlimited || effectiveMax > 1) && !iterationChanged) {
@@ -496,6 +529,7 @@ export default function promptModelExtension(pi: ExtensionAPI) {
496
529
  ctx: ExtensionCommandContext,
497
530
  subagentOverride?: SubagentOverride,
498
531
  cwdOverride?: string,
532
+ chainContextEnabled = false,
499
533
  ) {
500
534
  const flattenChainSteps = (): ChainStep[] => {
501
535
  const flattened: ChainStep[] = [];
@@ -524,6 +558,10 @@ export default function promptModelExtension(pi: ExtensionAPI) {
524
558
  notify(ctx, `Step "${parallelStep.name}" in parallel() does not support per-task --loop.`, "error");
525
559
  return false;
526
560
  }
561
+ if (parallelStep.withContext === true) {
562
+ notify(ctx, `Step "${parallelStep.name}" in parallel() does not support per-task --with-context.`, "error");
563
+ return false;
564
+ }
527
565
  const stepPrompt = prompts.get(parallelStep.name);
528
566
  if (!stepPrompt) continue;
529
567
  if (stepPrompt.chain) {
@@ -560,7 +598,7 @@ export default function promptModelExtension(pi: ExtensionAPI) {
560
598
  pendingSkillMessage = undefined;
561
599
  const effectiveMax = totalIterations ?? UNLIMITED_LOOP_CAP;
562
600
  const isUnlimited = totalIterations === null;
563
- const useConverge = isUnlimited ? true : converge;
601
+ const useConverge = converge;
564
602
 
565
603
  const anchorId = fresh ? ctx.sessionManager.getLeafId() : null;
566
604
  const chainStepNames = steps
@@ -599,14 +637,18 @@ export default function promptModelExtension(pi: ExtensionAPI) {
599
637
  }
600
638
  : {
601
639
  kind: "single" as const,
602
- template: {
603
- ...prompts.get(step.name)!,
604
- ...(cwdOverride ? { cwd: cwdOverride } : {}),
640
+ singleStep: {
641
+ prompt: {
642
+ ...prompts.get(step.name)!,
643
+ ...(cwdOverride ? { cwd: cwdOverride } : {}),
644
+ },
605
645
  stepArgs: step.args,
606
- stepLoop: step.loopCount ?? 1,
646
+ stepLoop: step.loopCount !== undefined ? step.loopCount : 1,
647
+ stepWithContext: step.withContext === true,
607
648
  },
608
649
  },
609
650
  );
651
+ const chainStepSummaries: string[] = [];
610
652
  let aborted = false;
611
653
  let iterationChanged = false;
612
654
  let loopPrefix = "";
@@ -619,10 +661,17 @@ export default function promptModelExtension(pi: ExtensionAPI) {
619
661
  const stepNumber = index + 1;
620
662
  if (stepTemplate.kind === "parallel") {
621
663
  const stepNames = stepTemplate.tasks.map((task) => task.name).join(", ");
664
+ const stepLabel = `parallel(${stepNames})`;
622
665
  notify(ctx, `${loopPrefix}Step ${stepNumber}/${templates.length}: parallel(${stepNames})`, "info");
623
666
  if (ctx.hasUI) {
624
667
  ctx.ui.setStatus("prompt-chain", ctx.ui.theme.fg("warning", `step ${stepNumber}/${templates.length}: parallel(${stepNames})`));
625
668
  }
669
+ const stepStartId = ctx.sessionManager.getLeafId();
670
+ let taskPreamble: string | undefined;
671
+ const isForkedParallelContext = stepTemplate.tasks.some((task) => task.prompt.inheritContext === true);
672
+ if (chainContextEnabled && !isForkedParallelContext && chainStepSummaries.length > 0) {
673
+ taskPreamble = `[Previous chain steps]\n\n${chainStepSummaries.join("\n\n")}`;
674
+ }
626
675
 
627
676
  let delegated;
628
677
  try {
@@ -636,6 +685,7 @@ export default function promptModelExtension(pi: ExtensionAPI) {
636
685
  prompt: task.prompt,
637
686
  args: task.args.length > 0 ? task.args : sharedArgs,
638
687
  })),
688
+ taskPreamble,
639
689
  });
640
690
  } catch (error) {
641
691
  notify(ctx, error instanceof Error ? error.message : String(error), "error");
@@ -650,40 +700,65 @@ export default function promptModelExtension(pi: ExtensionAPI) {
650
700
 
651
701
  currentModel = getCurrentModel(ctx);
652
702
  currentThinking = pi.getThinkingLevel();
653
- if (delegated.changed) iterationChanged = true;
703
+ const stepEntries = getIterationEntries(ctx, stepStartId);
704
+ if (didIterationMakeChanges(stepEntries)) iterationChanged = true;
705
+ chainStepSummaries.push(generateChainStepSummary(stepEntries, stepLabel, stepNumber));
654
706
  continue;
655
707
  }
656
708
 
657
- const template = stepTemplate.template;
658
- const stepLoop = template.stepLoop;
659
- const effectiveArgs = template.stepArgs.length > 0 ? template.stepArgs : sharedArgs;
709
+ const singleStep = stepTemplate.singleStep;
710
+ const stepLoopTotal = singleStep.stepLoop;
711
+ const stepLoopMax = stepLoopTotal ?? UNLIMITED_LOOP_CAP;
712
+ const isStepLooping = stepLoopMax > 1;
713
+ const effectiveArgs = singleStep.stepArgs.length > 0 ? singleStep.stepArgs : sharedArgs;
714
+ const shouldInjectSummary =
715
+ shouldDelegatePrompt(singleStep.prompt, subagentOverride) &&
716
+ singleStep.prompt.inheritContext !== true &&
717
+ (chainContextEnabled || singleStep.stepWithContext === true);
660
718
  const outerLoopState = loopState ? { ...loopState } : null;
661
- if (stepLoop > 1) {
662
- loopState = { currentIteration: 1, totalIterations: stepLoop };
719
+ const stepStartId = ctx.sessionManager.getLeafId();
720
+ if (isStepLooping) {
721
+ loopState = { currentIteration: 1, totalIterations: stepLoopTotal };
663
722
  updateLoopStatus(ctx);
664
723
  }
665
724
 
666
725
  try {
667
- for (let stepIteration = 0; stepIteration < stepLoop; stepIteration++) {
668
- if (stepLoop > 1) {
669
- loopState = { currentIteration: stepIteration + 1, totalIterations: stepLoop };
726
+ for (let stepIteration = 0; stepIteration < stepLoopMax; stepIteration++) {
727
+ if (isStepLooping) {
728
+ loopState = { currentIteration: stepIteration + 1, totalIterations: stepLoopTotal };
670
729
  updateLoopStatus(ctx);
671
730
  }
672
731
 
673
- const iterSuffix = stepLoop > 1 ? ` (iter ${stepIteration + 1}/${stepLoop})` : "";
674
- notify(ctx, `${loopPrefix}Step ${stepNumber}/${templates.length}: ${template.name}${iterSuffix} ${buildPromptCommandDescription(template)}`, "info");
732
+ const iterSuffix = isStepLooping
733
+ ? stepLoopTotal !== null
734
+ ? ` (iter ${stepIteration + 1}/${stepLoopTotal})`
735
+ : ` (iter ${stepIteration + 1})`
736
+ : "";
737
+ notify(
738
+ ctx,
739
+ `${loopPrefix}Step ${stepNumber}/${templates.length}: ${singleStep.prompt.name}${iterSuffix} ${buildPromptCommandDescription(singleStep.prompt)}`,
740
+ "info",
741
+ );
675
742
  if (ctx.hasUI) {
676
- ctx.ui.setStatus("prompt-chain", ctx.ui.theme.fg("warning", `step ${stepNumber}/${templates.length}: ${template.name}`));
743
+ ctx.ui.setStatus("prompt-chain", ctx.ui.theme.fg("warning", `step ${stepNumber}/${templates.length}: ${singleStep.prompt.name}`));
677
744
  }
745
+ const taskPreamble = shouldInjectSummary && chainStepSummaries.length > 0
746
+ ? `[Previous chain steps]\n\n${chainStepSummaries.join("\n\n")}`
747
+ : undefined;
678
748
 
749
+ const stepLoopContext = isStepLooping
750
+ ? `Step ${stepNumber}/${templates.length}: ${singleStep.prompt.name}${iterSuffix}`
751
+ : undefined;
679
752
  const stepIterationStartId = ctx.sessionManager.getLeafId();
680
753
  const stepResult = await executePromptStep(
681
- template,
754
+ singleStep.prompt,
682
755
  effectiveArgs,
683
756
  ctx,
684
757
  currentModel,
685
758
  subagentOverride,
686
759
  chainInheritedModel,
760
+ taskPreamble,
761
+ stepLoopContext,
687
762
  );
688
763
  if (stepResult === "aborted") {
689
764
  aborted = true;
@@ -693,22 +768,23 @@ export default function promptModelExtension(pi: ExtensionAPI) {
693
768
  currentModel = getCurrentModel(ctx);
694
769
  currentThinking = pi.getThinkingLevel();
695
770
 
696
- const stepChanged = shouldDelegatePrompt(template, subagentOverride)
697
- ? stepResult.changed
698
- : didIterationMakeChanges(getIterationEntries(ctx, stepIterationStartId));
699
- if (stepChanged) iterationChanged = true;
700
- if (stepLoop > 1 && template.converge !== false && !stepChanged) {
771
+ const stepIterationEntries = getIterationEntries(ctx, stepIterationStartId);
772
+ const stepIterationChanged = didIterationMakeChanges(stepIterationEntries);
773
+ if (isStepLooping && singleStep.prompt.converge !== false && !stepIterationChanged) {
701
774
  break;
702
775
  }
703
776
  }
704
777
  } finally {
705
- if (stepLoop > 1) {
778
+ if (isStepLooping) {
706
779
  loopState = outerLoopState ? { ...outerLoopState } : null;
707
780
  updateLoopStatus(ctx);
708
781
  }
709
782
  }
710
783
 
711
784
  if (aborted) break;
785
+ const stepEntries = getIterationEntries(ctx, stepStartId);
786
+ if (didIterationMakeChanges(stepEntries)) iterationChanged = true;
787
+ chainStepSummaries.push(generateChainStepSummary(stepEntries, singleStep.prompt.name, stepNumber));
712
788
  }
713
789
 
714
790
  if (aborted) break;
@@ -782,11 +858,15 @@ export default function promptModelExtension(pi: ExtensionAPI) {
782
858
  const argsWithoutSubagent = subagent.args;
783
859
 
784
860
  if (prompt.chain) {
785
- const loop = extractLoopCount(argsWithoutSubagent);
786
- let totalIterations: number | null = prompt.loop ?? 1;
861
+ if (subagent.model) notify(ctx, `--model is not supported on chain prompts (ignored)`, "warning");
862
+ if (subagent.fork) notify(ctx, `--fork is not supported on chain prompts (ignored)`, "warning");
863
+ const extracted = extractChainContextFlag(argsWithoutSubagent);
864
+ const chainContextEnabled = extracted.chainContext || prompt.chainContext === "summary";
865
+ const loop = extractLoopCount(extracted.args);
866
+ let totalIterations: number | null = prompt.loop !== undefined ? prompt.loop : 1;
787
867
  let fresh = false;
788
868
  let converge = true;
789
- let cleanedArgs = argsWithoutSubagent;
869
+ let cleanedArgs = extracted.args;
790
870
 
791
871
  if (loop) {
792
872
  totalIterations = loop.loopCount;
@@ -794,7 +874,7 @@ export default function promptModelExtension(pi: ExtensionAPI) {
794
874
  converge = loop.converge;
795
875
  cleanedArgs = loop.args;
796
876
  } else if (prompt.loop !== undefined) {
797
- const flags = extractLoopFlags(argsWithoutSubagent);
877
+ const flags = extractLoopFlags(extracted.args);
798
878
  fresh = flags.fresh;
799
879
  converge = flags.converge;
800
880
  cleanedArgs = flags.args;
@@ -821,23 +901,29 @@ export default function promptModelExtension(pi: ExtensionAPI) {
821
901
  ctx,
822
902
  subagent.override,
823
903
  cwdOverride,
904
+ chainContextEnabled,
824
905
  );
825
906
  return;
826
907
  }
827
908
 
909
+ const promptOverrides: Partial<Pick<PromptWithModel, "models" | "inheritContext">> = {
910
+ ...(subagent.model ? { models: [subagent.model] } : {}),
911
+ ...(subagent.fork ? { inheritContext: true } : {}),
912
+ };
913
+
828
914
  const loop = extractLoopCount(argsWithoutSubagent);
829
915
  if (loop) {
830
- await runPromptLoop(name, loop.args, loop.loopCount, loop.fresh, loop.converge, ctx, subagent.override, runtimeCwd);
916
+ await runPromptLoop(name, loop.args, loop.loopCount, loop.fresh, loop.converge, ctx, subagent.override, runtimeCwd, promptOverrides);
831
917
  return;
832
918
  }
833
919
 
834
920
  if (prompt.loop !== undefined) {
835
921
  const flags = extractLoopFlags(argsWithoutSubagent);
836
- await runPromptLoop(name, flags.args, prompt.loop, flags.fresh, flags.converge, ctx, subagent.override, runtimeCwd);
922
+ await runPromptLoop(name, flags.args, prompt.loop, flags.fresh, flags.converge, ctx, subagent.override, runtimeCwd, promptOverrides);
837
923
  return;
838
924
  }
839
925
 
840
- const effectivePrompt = runtimeCwd ? { ...prompt, cwd: runtimeCwd } : prompt;
926
+ const effectivePrompt = { ...prompt, ...(runtimeCwd ? { cwd: runtimeCwd } : {}), ...promptOverrides };
841
927
  const savedModel = getCurrentModel(ctx);
842
928
  const savedThinking = pi.getThinkingLevel();
843
929
  const stepResult = await executePromptStep(
@@ -962,8 +1048,9 @@ export default function promptModelExtension(pi: ExtensionAPI) {
962
1048
  notify(ctx, `Invalid --cwd path: must be absolute`, "error");
963
1049
  return;
964
1050
  }
965
- const loop = extractLoopCount(subagent.args);
966
- const cleanedArgs = loop ? loop.args : subagent.args;
1051
+ const extracted = extractChainContextFlag(subagent.args);
1052
+ const loop = extractLoopCount(extracted.args);
1053
+ const cleanedArgs = loop ? loop.args : extracted.args;
967
1054
 
968
1055
  const { steps, sharedArgs, invalidSegments } = parseChainSteps(cleanedArgs);
969
1056
  if (invalidSegments.length > 0) {
@@ -985,6 +1072,7 @@ export default function promptModelExtension(pi: ExtensionAPI) {
985
1072
  ctx,
986
1073
  subagent.override,
987
1074
  runtimeCwd,
1075
+ extracted.chainContext,
988
1076
  );
989
1077
  }
990
1078
 
package/loop-utils.ts CHANGED
@@ -7,6 +7,13 @@ interface DelegatedMessageDetails {
7
7
  parallelResults?: Array<{ messages?: Message[] }>;
8
8
  }
9
9
 
10
+ interface CollectedSummaryData {
11
+ filesRead: Set<string>;
12
+ filesWritten: Set<string>;
13
+ commandCount: number;
14
+ lastAssistantText: string;
15
+ }
16
+
10
17
  function collectAssistantActions(messages: Message[], filesRead: Set<string>, filesWritten: Set<string>): { commandCount: number; lastText: string } {
11
18
  let commandCount = 0;
12
19
  let lastText = "";
@@ -39,7 +46,7 @@ function delegatedDetails(entry: SessionEntry): DelegatedMessageDetails | undefi
39
46
  return entry.details as DelegatedMessageDetails;
40
47
  }
41
48
 
42
- export function generateIterationSummary(entries: SessionEntry[], task: string, iteration: number, totalIterations: number | null): string {
49
+ function collectSummaryData(entries: SessionEntry[]): CollectedSummaryData {
43
50
  const filesRead = new Set<string>();
44
51
  const filesWritten = new Set<string>();
45
52
  let commandCount = 0;
@@ -68,7 +75,18 @@ export function generateIterationSummary(entries: SessionEntry[], task: string,
68
75
  }
69
76
  }
70
77
 
71
- let summary = totalIterations !== null ? `[Loop iteration ${iteration}/${totalIterations}]\nTask: "${task}"` : `[Loop iteration ${iteration}]\nTask: "${task}"`;
78
+ return {
79
+ filesRead,
80
+ filesWritten,
81
+ commandCount,
82
+ lastAssistantText,
83
+ };
84
+ }
85
+
86
+ function formatSummary(header: string, entries: SessionEntry[]): string {
87
+ const { filesRead, filesWritten, commandCount, lastAssistantText } = collectSummaryData(entries);
88
+
89
+ let summary = header;
72
90
 
73
91
  const actionParts: string[] = [];
74
92
  if (filesRead.size > 0) actionParts.push(`read ${filesRead.size} file(s)`);
@@ -87,6 +105,17 @@ export function generateIterationSummary(entries: SessionEntry[], task: string,
87
105
  return summary;
88
106
  }
89
107
 
108
+ export function generateIterationSummary(entries: SessionEntry[], task: string, iteration: number, totalIterations: number | null): string {
109
+ const header = totalIterations !== null
110
+ ? `[Loop iteration ${iteration}/${totalIterations}]\nTask: "${task}"`
111
+ : `[Loop iteration ${iteration}]\nTask: "${task}"`;
112
+ return formatSummary(header, entries);
113
+ }
114
+
115
+ export function generateChainStepSummary(entries: SessionEntry[], stepLabel: string, stepNumber: number): string {
116
+ return formatSummary(`Step ${stepNumber} — ${stepLabel}:`, entries);
117
+ }
118
+
90
119
  export function didIterationMakeChanges(entries: SessionEntry[]): boolean {
91
120
  for (const entry of entries) {
92
121
  if (entry.type === "message") {
@@ -124,3 +153,11 @@ export function getIterationEntries(ctx: Pick<ExtensionContext, "sessionManager"
124
153
  if (startIdx < 0) return branch;
125
154
  return branch.slice(startIdx + 1);
126
155
  }
156
+
157
+ export function wasIterationAborted(entries: SessionEntry[]): boolean {
158
+ for (const entry of entries) {
159
+ if (entry.type !== "message" || entry.message.role !== "assistant") continue;
160
+ if ((entry.message as AssistantMessage).stopReason === "aborted") return true;
161
+ }
162
+ return false;
163
+ }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "pi-prompt-template-model",
3
- "version": "0.6.4",
3
+ "version": "0.6.6",
4
4
  "type": "module",
5
5
  "description": "Prompt template model selector extension for pi coding agent",
6
6
  "author": "Nico Bailon",
package/prompt-loader.ts CHANGED
@@ -38,11 +38,14 @@ export interface PromptWithModel {
38
38
  content: string;
39
39
  models: string[];
40
40
  chain?: string;
41
+ chainContext?: "summary";
41
42
  restore: boolean;
42
43
  skill?: string;
43
44
  thinking?: ThinkingLevel;
45
+ thinkingLevels?: ThinkingLevel[];
46
+ rotate?: boolean;
44
47
  fresh?: boolean;
45
- loop?: number;
48
+ loop?: number | null;
46
49
  converge?: boolean;
47
50
  subagent?: true | string;
48
51
  inheritContext?: boolean;
@@ -242,14 +245,43 @@ function normalizeFresh(
242
245
  return false;
243
246
  }
244
247
 
248
+ function normalizeRotate(
249
+ value: unknown,
250
+ filePath: string,
251
+ source: PromptSource,
252
+ diagnostics: PromptLoaderDiagnostic[],
253
+ ): boolean {
254
+ if (value === undefined) return false;
255
+ if (typeof value === "boolean") return value;
256
+ if (typeof value === "string") {
257
+ const normalized = value.trim().toLowerCase();
258
+ if (normalized === "true") return true;
259
+ if (normalized === "false") return false;
260
+ }
261
+
262
+ diagnostics.push(
263
+ createDiagnostic(
264
+ "invalid-rotate",
265
+ filePath,
266
+ source,
267
+ `Using default rotate=false for ${filePath}: frontmatter field "rotate" must be true or false.`,
268
+ ),
269
+ );
270
+ return false;
271
+ }
272
+
245
273
  function normalizeLoop(
246
274
  value: unknown,
247
275
  filePath: string,
248
276
  source: PromptSource,
249
277
  diagnostics: PromptLoaderDiagnostic[],
250
- ): number | undefined {
278
+ ): number | null | undefined {
251
279
  if (value === undefined) return undefined;
252
280
 
281
+ if (value === true || (typeof value === "string" && value.trim().toLowerCase() === "unlimited")) {
282
+ return null;
283
+ }
284
+
253
285
  let normalizedValue: number | undefined;
254
286
  if (typeof value === "number") {
255
287
  normalizedValue = value;
@@ -266,7 +298,7 @@ function normalizeLoop(
266
298
  "invalid-loop",
267
299
  filePath,
268
300
  source,
269
- `Ignoring invalid loop value in ${filePath}: frontmatter field "loop" must be an integer between 1 and 999.`,
301
+ `Ignoring invalid loop value in ${filePath}: frontmatter field "loop" must be an integer between 1 and 999, true, or "unlimited".`,
270
302
  ),
271
303
  );
272
304
  return undefined;
@@ -432,6 +464,29 @@ function normalizeChain(
432
464
  return undefined;
433
465
  }
434
466
 
467
+ function normalizeChainContext(
468
+ value: unknown,
469
+ filePath: string,
470
+ source: PromptSource,
471
+ diagnostics: PromptLoaderDiagnostic[],
472
+ ): "summary" | undefined {
473
+ if (value === undefined) return undefined;
474
+ if (typeof value === "string") {
475
+ const normalized = value.trim().toLowerCase();
476
+ if (normalized === "summary") return "summary";
477
+ }
478
+
479
+ diagnostics.push(
480
+ createDiagnostic(
481
+ "invalid-chain-context",
482
+ filePath,
483
+ source,
484
+ `Ignoring invalid chainContext value in ${filePath}: frontmatter field "chainContext" must be "summary".`,
485
+ ),
486
+ );
487
+ return undefined;
488
+ }
489
+
435
490
  function normalizeThinking(
436
491
  value: unknown,
437
492
  filePath: string,
@@ -457,6 +512,48 @@ function normalizeThinking(
457
512
  return undefined;
458
513
  }
459
514
 
515
+ function normalizeThinkingLevels(
516
+ value: unknown,
517
+ modelCount: number,
518
+ filePath: string,
519
+ source: PromptSource,
520
+ diagnostics: PromptLoaderDiagnostic[],
521
+ ): ThinkingLevel[] | undefined {
522
+ if (typeof value !== "string") return undefined;
523
+
524
+ const levels = value
525
+ .split(",")
526
+ .map((item) => item.trim())
527
+ .filter(Boolean);
528
+
529
+ const invalidLevel = levels.find((level) => !(VALID_THINKING_LEVELS as readonly string[]).includes(level.toLowerCase()));
530
+ if (invalidLevel) {
531
+ diagnostics.push(
532
+ createDiagnostic(
533
+ "invalid-thinking-levels",
534
+ filePath,
535
+ source,
536
+ `Ignoring invalid thinking level in ${filePath}: ${JSON.stringify(invalidLevel)}.`,
537
+ ),
538
+ );
539
+ return undefined;
540
+ }
541
+
542
+ if (levels.length !== modelCount) {
543
+ diagnostics.push(
544
+ createDiagnostic(
545
+ "invalid-thinking-level-count",
546
+ filePath,
547
+ source,
548
+ `Ignoring comma-separated thinking levels in ${filePath}: expected ${modelCount} entries to match frontmatter field "model".`,
549
+ ),
550
+ );
551
+ return undefined;
552
+ }
553
+
554
+ return levels.map((level) => level.toLowerCase() as ThinkingLevel);
555
+ }
556
+
460
557
  function loadPromptsWithModelFromDir(
461
558
  dir: string,
462
559
  source: PromptSource,
@@ -542,6 +639,7 @@ function loadPromptsWithModelFromDir(
542
639
  if (!frontmatter) continue;
543
640
  const { body } = parsed;
544
641
  const chain = normalizeChain(frontmatter.chain, fullPath, source, diagnostics);
642
+ const chainContext = chain ? normalizeChainContext(frontmatter.chainContext, fullPath, source, diagnostics) : undefined;
545
643
  if (chain && /\bparallel\s*\(/.test(chain)) {
546
644
  const parsedChain = parseChainDeclaration(chain);
547
645
  if (parsedChain.invalidSegments.length > 0 || parsedChain.steps.length === 0) {
@@ -594,6 +692,7 @@ function loadPromptsWithModelFromDir(
594
692
  const parsedModels = chain ? [] : normalizeModelSpecs(frontmatter.model, fullPath, source, diagnostics);
595
693
  if (!chain && hasModelField && !parsedModels) continue;
596
694
  const models = chain ? [] : (parsedModels ?? []);
695
+ const rotate = chain ? false : normalizeRotate(frontmatter.rotate, fullPath, source, diagnostics);
597
696
 
598
697
  const name = entry.name.slice(0, -3);
599
698
  if (RESERVED_COMMAND_NAMES.has(name)) {
@@ -612,7 +711,15 @@ function loadPromptsWithModelFromDir(
612
711
  const safeCwd = (chain || subagent !== undefined) ? cwd : undefined;
613
712
  const description = normalizeStringField("description", frontmatter.description, fullPath, source, diagnostics) ?? "";
614
713
  const skill = chain ? undefined : normalizeStringField("skill", frontmatter.skill, fullPath, source, diagnostics);
615
- const thinking = chain ? undefined : normalizeThinking(frontmatter.thinking, fullPath, source, diagnostics);
714
+ let thinking: ThinkingLevel | undefined;
715
+ let thinkingLevels: ThinkingLevel[] | undefined;
716
+ if (!chain) {
717
+ if (rotate && typeof frontmatter.thinking === "string" && frontmatter.thinking.includes(",")) {
718
+ thinkingLevels = normalizeThinkingLevels(frontmatter.thinking, models.length, fullPath, source, diagnostics);
719
+ } else {
720
+ thinking = normalizeThinking(frontmatter.thinking, fullPath, source, diagnostics);
721
+ }
722
+ }
616
723
  const restore = normalizeRestore(frontmatter.restore, fullPath, source, diagnostics);
617
724
  const fresh = normalizeFresh(frontmatter.fresh, fullPath, source, diagnostics);
618
725
  const loop = normalizeLoop(frontmatter.loop, fullPath, source, diagnostics);
@@ -637,11 +744,14 @@ function loadPromptsWithModelFromDir(
637
744
  content: body,
638
745
  models,
639
746
  chain: chain || undefined,
747
+ chainContext,
640
748
  restore,
641
749
  skill,
642
750
  thinking,
751
+ thinkingLevels,
752
+ rotate: rotate || undefined,
643
753
  fresh: fresh || undefined,
644
- loop: loop || undefined,
754
+ loop: loop !== undefined ? loop : undefined,
645
755
  converge: converge === false ? false : undefined,
646
756
  subagent,
647
757
  inheritContext: safeInheritContext || undefined,
@@ -721,18 +831,21 @@ export function loadPromptsWithModel(cwd: string): LoadPromptsWithModelResult {
721
831
  export function buildPromptCommandDescription(prompt: PromptWithModel): string {
722
832
  const sourceLabel = prompt.subdir ? `(${prompt.source}:${prompt.subdir})` : `(${prompt.source})`;
723
833
  if (prompt.chain) {
834
+ const chainContextLabel = prompt.chainContext ? ` ${prompt.chainContext}` : "";
724
835
  const cwdLabel = prompt.cwd ? ` cwd:${prompt.cwd}` : "";
725
- const details = `[chain: ${prompt.chain}${cwdLabel}] ${sourceLabel}`;
836
+ const details = `[chain: ${prompt.chain}${chainContextLabel}${cwdLabel}] ${sourceLabel}`;
726
837
  return prompt.description ? `${prompt.description} ${details}` : details;
727
838
  }
728
839
  const modelLabel = prompt.models.length > 0 ? prompt.models.map((model) => model.split("/").pop() || model).join("|") : "current";
840
+ const rotateLabel = prompt.rotate ? " rotate" : "";
729
841
  const skillLabel = prompt.skill ? ` +${prompt.skill}` : "";
730
- const thinkingLabel = prompt.thinking ? ` ${prompt.thinking}` : "";
731
- const loopLabel = prompt.loop ? ` loop:${prompt.loop}` : "";
842
+ const thinkingValue = prompt.thinkingLevels ? prompt.thinkingLevels.join(",") : prompt.thinking;
843
+ const thinkingLabel = thinkingValue ? ` ${thinkingValue}` : "";
844
+ const loopLabel = prompt.loop !== undefined ? ` loop:${prompt.loop === null ? "unlimited" : prompt.loop}` : "";
732
845
  const subagentLabel = prompt.subagent ? ` subagent:${prompt.subagent === true ? "delegate" : prompt.subagent}` : "";
733
846
  const cwdLabel = prompt.cwd ? ` cwd:${prompt.cwd}` : "";
734
847
  const inheritContextLabel = prompt.inheritContext ? " fork" : "";
735
- const details = `[${modelLabel}${thinkingLabel}${skillLabel}${loopLabel}${subagentLabel}${cwdLabel}${inheritContextLabel}] ${sourceLabel}`;
848
+ const details = `[${modelLabel}${rotateLabel}${thinkingLabel}${skillLabel}${loopLabel}${subagentLabel}${cwdLabel}${inheritContextLabel}] ${sourceLabel}`;
736
849
  return prompt.description ? `${prompt.description} ${details}` : details;
737
850
  }
738
851
 
package/subagent-step.ts CHANGED
@@ -38,6 +38,7 @@ interface DelegatedPromptBaseOptions {
38
38
  override?: SubagentOverride;
39
39
  signal?: AbortSignal;
40
40
  inheritedModel?: Model<any>;
41
+ taskPreamble?: string;
41
42
  }
42
43
 
43
44
  interface DelegatedSinglePromptOptions extends DelegatedPromptBaseOptions {
@@ -156,6 +157,7 @@ async function prepareDelegatedTask(
156
157
  currentModel: Model<any> | undefined,
157
158
  override: SubagentOverride | undefined,
158
159
  inheritedModel: Model<any> | undefined,
160
+ taskPreamble: string | undefined,
159
161
  runtime: Awaited<ReturnType<typeof ensureSubagentRuntime>>,
160
162
  ): Promise<PreparedDelegatedTask> {
161
163
  const requestedAgent = resolveDelegationName(task.prompt, override);
@@ -183,11 +185,15 @@ async function prepareDelegatedTask(
183
185
  if (effectiveCwd !== ctx.cwd && !existsSync(effectiveCwd)) {
184
186
  throw new Error(`cwd directory does not exist: ${effectiveCwd}`);
185
187
  }
188
+ let taskText = prepared.content;
189
+ if (!task.prompt.inheritContext && taskPreamble) {
190
+ taskText = `${taskPreamble}\n\n---\n\n${prepared.content}`;
191
+ }
186
192
 
187
193
  return {
188
194
  promptName: task.prompt.name,
189
195
  agent,
190
- task: prepared.content,
196
+ task: taskText,
191
197
  context: task.prompt.inheritContext ? "fork" : "fresh",
192
198
  model: `${prepared.selectedModel.model.provider}/${prepared.selectedModel.model.id}`,
193
199
  cwd: effectiveCwd,
@@ -429,7 +435,7 @@ async function requestDelegatedRun(
429
435
  }
430
436
 
431
437
  export async function executeSubagentPromptStep(options: DelegatedPromptOptions): Promise<DelegatedPromptOutcome | undefined> {
432
- const { pi, ctx, currentModel, override, signal, inheritedModel } = options;
438
+ const { pi, ctx, currentModel, override, signal, inheritedModel, taskPreamble } = options;
433
439
  const runtime = await ensureSubagentRuntime(ctx.cwd);
434
440
  const isParallelRequest = "parallel" in options;
435
441
 
@@ -440,7 +446,7 @@ export async function executeSubagentPromptStep(options: DelegatedPromptOptions)
440
446
 
441
447
  const preparedTasks: PreparedDelegatedTask[] = [];
442
448
  for (const task of tasks) {
443
- const preparedTask = await prepareDelegatedTask(task, ctx, currentModel, override, inheritedModel, runtime);
449
+ const preparedTask = await prepareDelegatedTask(task, ctx, currentModel, override, inheritedModel, taskPreamble, runtime);
444
450
  preparedTasks.push(preparedTask);
445
451
  }
446
452
 
package/tool-manager.ts CHANGED
@@ -61,10 +61,10 @@ export function createToolManager(pi: ExtensionAPI, deps: ToolManagerDeps) {
61
61
  "Supports --loop for loops (e.g. 'deslop --loop 5', 'deslop --loop=5', 'deslop --loop' for unlimited until convergence with a 50-iteration cap), " +
62
62
  "--fresh for context collapse between iterations, and --no-converge to disable early stopping for bounded loops. " +
63
63
  "Supports runtime delegation override via --subagent, --subagent=<name>, or --subagent:<name>. " +
64
- "Use 'chain-prompts template1 -> template2' for chaining.",
64
+ "Use 'chain-prompts template1 -> template2' for chaining and add --chain-context to pass previous step summaries into delegated steps.",
65
65
  parameters: Type.Object({
66
66
  command: Type.String({
67
- description: "Template name and arguments (e.g. 'deslop --loop 5 --fresh', 'deslop --subagent:worker', 'deslop --subagent', 'chain-prompts analyze -> fix --loop=3')",
67
+ description: "Template name and arguments (e.g. 'deslop --loop 5 --fresh', 'deslop --subagent:worker', 'deslop --subagent', 'chain-prompts analyze -> fix --chain-context', 'chain-prompts analyze -> fix --loop=3')",
68
68
  }),
69
69
  }),
70
70
  execute: async (_id, params) => {