pi-prompt-template-model 0.5.0 → 0.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,55 @@
2
2
 
3
3
  ## [Unreleased]
4
4
 
5
+ ## [0.6.1] - 2026-03-20
6
+
7
+ ### Added
8
+ - Added delegated prompt execution via direct extension event bus communication with `subagent` (`prompt-template:subagent:*` channels), including delegated custom-message persistence for loop summaries and context carry-forward.
9
+ - Added prompt frontmatter support for `subagent` and `inheritContext`, with `inheritContext: true` mapped to delegated fork context.
10
+ - Fork context preamble is handled by the subagent extension directly (via `DEFAULT_FORK_PREAMBLE` in `types.ts`), applying to all fork-context subagent runs universally.
11
+ - Added runtime delegation override flags: `--subagent`, `--subagent=<name>`, and `--subagent:<name>`.
12
+ - Added live progress widget above editor during delegated subagent runs showing elapsed time, tool count, tokens, current tool, and task preview — matching the native subagent tool card layout.
13
+ - Added styled completion card with task preview, tool call history, expandable output (Ctrl+O), and usage stats footer.
14
+
15
+ ### Changed
16
+ - Updated provider priority for ambiguous bare model IDs to prefer `openai-codex` before `anthropic`, `github-copilot`, and `openrouter`.
17
+ - Updated loop convergence and fresh-summary analysis to account for delegated subagent message payloads.
18
+
19
+ ### Fixed
20
+ - Delegated start-time hangs now fail fast with explicit timeout errors when a subagent run never emits a start signal.
21
+ - Delegated runs no longer treat arbitrary escape-sequence-bearing terminal input as an Esc cancellation signal; only literal Esc cancels.
22
+ - Chain and loop restore paths now use live runtime model/thinking state during cleanup, preventing skipped restore on mid-step failures.
23
+
24
+ ## [0.6.0] - 2026-03-19
25
+
26
+ ### Changed
27
+
28
+ - Clarified the README chain examples so the optional ` -- ` shared-args separator is clearly distinct from loop flags like `--loop`, `--fresh`, and `--no-converge`.
29
+ - Clarified in the README that chain frontmatter declarations support per-step `--loop N` inside the `chain:` value.
30
+ - Argument substitution now accepts `@$` as an alias for `$@` for compatibility with commonly-typed placeholder variants.
31
+ - Skill injection now uses a next-turn context message from `before_agent_start` instead of mutating the turn system prompt.
32
+ - Non-chain templates can now omit `model` and inherit the current session model, so inline `<if-model ...>` rendering and skill injection still work without explicit model frontmatter.
33
+ - Chain steps without `model` now inherit a fixed chain-start model snapshot, so model-less chain steps behave as if that model were declared in frontmatter while remaining deterministic across step switches.
34
+
35
+ ### Fixed
36
+
37
+ - Chain step execution now avoids implicit previous-step model bleed for model-less templates by resolving them against the chain-start model snapshot instead of whichever model was active after the prior step.
38
+ - Model-less prompt loading now skips plain templates that do not use extension features, preventing command collisions with other extension commands like `/review` and `/handover`.
39
+ - Model-less prompt loading now also ignores no-op/invalid-only extension metadata (for example `restore`-only or invalid loop flags), so ineffective frontmatter does not unnecessarily claim command names.
40
+ - Model-less prompt loading now recognizes invalid conditional closers like `</else>` as extension-relevant markup, so those templates stay in this extension path and surface proper conditional-parse warnings instead of silently bypassing extension handling.
41
+ - Model-less prompt execution now tracks runtime model changes (`model_select` + internal switches/restores) and uses that tracked model instead of potentially stale command-context snapshots.
42
+ - Prompt commands now fail fast when a configured `skill` file is missing or unreadable, instead of silently sending the prompt without skill context.
43
+ - Skill resolution now returns a typed success/error outcome that callers handle explicitly, rather than emitting notifications from inside the resolver and returning sentinel `null` values.
44
+ - Session start/switch now clear any queued skill context message so stale pending skill payloads cannot leak across session boundaries.
45
+ - Session start/switch now also clear pending single-command restore state (`previousModel`/`previousThinking`) so restore writes cannot leak into a different session.
46
+ - Skill frontmatter resolution now checks registered skill commands first (`pi.getCommands()` skill entries), accepts both `<name>` and `skill:<name>` values, searches additional standard pi skill locations (`.agents/skills` in project ancestors and `~/.agents/skills`), supports direct `<skill>.md` files alongside `SKILL.md` directories, and rejects traversal-like skill names for path fallback.
47
+ - `extractLoopCount()` now strips repeated unquoted `--loop` tokens once looping is active, preventing stray loop flags from leaking into prompt arguments.
48
+ - Chain frontmatter step parsing now strips repeated per-step `--loop` tokens once a valid per-step loop is resolved, and keeps the first valid value (including mixed invalid/valid numeric sequences like `--loop 1000 --loop 2`).
49
+ - Loop-mode restore now tracks runtime model/thinking state per iteration instead of relying on command-context model snapshots, so model restoration remains correct even when command context values are stale.
50
+ - Chain execution now restores model/thinking state in a `finally` path, so restore still runs after unexpected runtime errors during a chain step and chain cleanup state is still reset even when restore itself fails.
51
+ - Loop and chain executions no longer report `Loop finished`/`Loop converged` when runtime errors abort execution mid-loop.
52
+ - Loop and chain error propagation now preserves thrown falsy values (for example `throw 0`) instead of treating them as success, preventing swallowed errors and false completion notifications.
53
+
5
54
  ## [0.5.0] - 2026-03-17
6
55
 
7
56
  ### Added
package/README.md CHANGED
@@ -15,7 +15,7 @@
15
15
  │ /debug-python ──► Extension detects model + skill │
16
16
  │ │ │
17
17
  │ ▼ │
18
- │ Switches to Sonnet ──► Injects tmux skill into system prompt
18
+ │ Switches to Sonnet ──► Queues tmux skill context for next turn
19
19
  │ │ │
20
20
  │ ▼ │
21
21
  │ Agent responds with Sonnet + tmux expertise │
@@ -47,9 +47,17 @@ pi install npm:pi-prompt-template-model
47
47
 
48
48
  Restart pi to load the extension.
49
49
 
50
+ For delegated subagent execution (`subagent` and `inheritContext` frontmatter), install [pi-subagents](https://github.com/nicobailon/pi-subagents/) separately:
51
+
52
+ ```bash
53
+ pi install npm:pi-subagents
54
+ ```
55
+
56
+ pi-subagents is optional — everything else works without it. If you use `subagent: true` in a prompt template without pi-subagents installed, execution fails fast with a clear error.
57
+
50
58
  ## Quick Start
51
59
 
52
- Add `model` and optionally `skill` to any prompt template:
60
+ Add `model` (or omit it to inherit the current session model) and optionally `skill` to any prompt template:
53
61
 
54
62
  ```markdown
55
63
  ---
@@ -80,16 +88,56 @@ skill: surf
80
88
  $@
81
89
  ```
82
90
 
83
- Here `skill: surf` loads `~/.pi/agent/skills/surf/SKILL.md` and injects its content directly into the system prompt before the agent even sees your task. No decision-making, no read tool, just immediate expertise. It's a forcing function for when you know exactly what workflow the agent needs.
91
+ Here `skill: surf` loads `~/.pi/agent/skills/surf/SKILL.md` and injects its content as a context message on the next turn before the agent handles your task. No decision-making, no read tool, just immediate expertise. It's a forcing function for when you know exactly what workflow the agent needs.
92
+
93
+ ## Delegated Subagent Execution
94
+
95
+ You can delegate a prompt template directly to the `subagent` extension without metaprompted tool-call instructions.
96
+
97
+ ```markdown
98
+ ---
99
+ model: anthropic/claude-sonnet-4-20250514
100
+ subagent: true
101
+ ---
102
+ Review and simplify this code: $@
103
+ ```
104
+
105
+ `subagent: true` uses the default `worker` agent. To target a specific agent, set a string value:
106
+
107
+ ```markdown
108
+ ---
109
+ model: anthropic/claude-sonnet-4-20250514
110
+ subagent: reviewer
111
+ inheritContext: true
112
+ ---
113
+ Audit this diff for correctness and edge cases: $@
114
+ ```
115
+
116
+ `inheritContext: true` maps to delegated `context: "fork"`. It is valid only when `subagent` is configured.
117
+
118
+ Forked subagents receive a default preamble (from the subagent extension's `DEFAULT_FORK_PREAMBLE`) that anchors them to the task and prevents them from continuing the parent conversation.
119
+
120
+ During execution, a live progress widget appears above the editor showing elapsed time, tool count, token usage, and the current/last tool — matching the native subagent tool card layout. The widget updates in real-time and clears when the run completes, replaced by a styled completion card with task preview, tool call history, output, and usage stats.
121
+
122
+ You can override delegation at runtime per invocation:
123
+
124
+ - `--subagent`
125
+ - `--subagent=<name>`
126
+ - `--subagent:<name>`
127
+
128
+ Runtime flags take precedence for that invocation only. Bare `--subagent` keeps template agent when present, otherwise defaults to `worker`.
84
129
 
85
130
  ## Frontmatter Fields
86
131
 
87
132
  | Field | Required | Default | Description |
88
133
  |-------|----------|---------|-------------|
89
- | `model` | Conditional | - | Required for non-chain templates; ignored when `chain` is set |
134
+ | `model` | No | `current` | Target model(s). If omitted on a non-chain template, execution inherits the current session model. Ignored when `chain` is set. |
90
135
  | `chain` | Conditional | - | Chain declaration (`step -> step --loop 2`) for orchestration templates; body is ignored |
91
- | `skill` | No | - | Skill name to inject into system prompt |
136
+ | `skill` | No | - | Skill name to inject as next-turn context message |
92
137
  | `thinking` | No | - | Thinking level: `off`, `minimal`, `low`, `medium`, `high`, `xhigh` |
138
+ | `subagent` | No | - | Delegate execution to subagent mode (`true` for default `worker`, or explicit agent name string) |
139
+ | `inheritContext` | No | `false` | Only with `subagent`; when `true`, delegates with subagent `context: "fork"` |
140
+
93
141
  | `description` | No | - | Shown in autocomplete |
94
142
  | `restore` | No | `true` | Restore previous model and thinking level after response |
95
143
  | `fresh` | No | `false` | Collapse context between loop iterations (applies when looping via `--loop` or frontmatter `loop`) |
@@ -153,7 +201,7 @@ Do a deeper pass and call out subtle risks.
153
201
  </if-model>
154
202
  ```
155
203
 
156
- Conditionals are evaluated against the model that actually runs the command after fallback resolution. That means the same template can render differently depending on which candidate was selected.
204
+ Conditionals are evaluated against the model that actually runs the command. For fallback prompts, that means after candidate resolution; for prompts without `model`, that means the current session model. The same template can render differently depending on which model is active.
157
205
 
158
206
  Supported matches inside `is="..."`:
159
207
 
@@ -183,6 +231,7 @@ Prompt bodies support argument placeholders that expand to command arguments:
183
231
  |-------------|-------------|
184
232
  | `$1`, `$2`, ... | Positional argument (1-indexed) |
185
233
  | `$@` | All arguments joined with spaces |
234
+ | `@$` | Alias for `$@` |
186
235
  | `$ARGUMENTS` | Same as `$@` |
187
236
  | `${@:N}` | All arguments from position N onward |
188
237
  | `${@:N:L}` | L arguments starting from position N |
@@ -203,17 +252,22 @@ Running `/analyze src/main.ts performance edge cases error handling` expands to:
203
252
 
204
253
  ## Skill Resolution
205
254
 
206
- The `skill` field matches the skill's directory name:
255
+ The `skill` field accepts either a bare skill name or a slash-command style name:
207
256
 
208
257
  ```yaml
209
258
  skill: tmux
259
+ # also valid
260
+ skill: skill:tmux
210
261
  ```
211
262
 
212
- Resolves to (checked in order):
213
- 1. `<cwd>/.pi/skills/tmux/SKILL.md` (project)
214
- 2. `~/.pi/agent/skills/tmux/SKILL.md` (user)
263
+ Resolution order:
264
+ 1. Registered skill commands from `pi.getCommands()` (`source: "skill"`), matched by `skill:name` or `name`
265
+ 2. `<cwd>/.pi/skills/<name>/SKILL.md` or `<cwd>/.pi/skills/<name>.md`
266
+ 3. `.agents/skills` in `cwd` and ancestor directories (up to git repo root)
267
+ 4. `~/.pi/agent/skills/<name>/SKILL.md` or `~/.pi/agent/skills/<name>.md`
268
+ 5. `~/.agents/skills/<name>/SKILL.md` or `~/.agents/skills/<name>.md`
215
269
 
216
- This matches pi's precedence - project skills override user skills.
270
+ If the configured skill file is missing or unreadable, the command fails fast and does not send the prompt body to the model.
217
271
 
218
272
  ## Subdirectories
219
273
 
@@ -310,13 +364,13 @@ Switched to Haiku. How can I help?
310
364
 
311
365
  ## Chaining Templates
312
366
 
313
- The `/chain-prompts` command runs multiple templates sequentially. Each step switches to its own model, renders any inline model conditionals against that step’s resolved model, injects its own skill, and the conversation context carries forward between steps.
367
+ The `/chain-prompts` command runs multiple templates sequentially. Each step switches to its own model (or, if the step has no `model`, to the chain-start model snapshot), renders inline model conditionals against that resolved step model, injects its own skill context message, and conversation context carries forward between steps.
314
368
 
315
369
  ```
316
370
  /chain-prompts analyze-code -> fix-plan -> summarize -- src/main.ts
317
371
  ```
318
372
 
319
- This runs `analyze-code` first, then `fix-plan` (which sees the analysis in conversation context), then `summarize`. The `-- src/main.ts` provides shared args substituted into every template's `$@`.
373
+ This runs `analyze-code` first, then `fix-plan` (which sees the analysis in conversation context), then `summarize`. The ` -- src/main.ts` part is optional. The literal ` -- ` separator means "shared args start here": everything after it is passed to each step as `$@`, unless that step already has its own inline args.
320
374
 
321
375
  Each step can also receive its own args, overriding the shared args for that step:
322
376
 
@@ -348,7 +402,7 @@ chain: double-check --loop 2 -> deslop --loop 2
348
402
  ignored — chain templates don't use the body
349
403
  ```
350
404
 
351
- This registers `/review-then-clean` as a command that runs `double-check` twice, then `deslop` twice. Each step references a separate prompt template with its own `model`. The chain template itself doesn't need a `model` field each step uses whatever model its template specifies.
405
+ This registers `/review-then-clean` as a command that runs `double-check` twice, then `deslop` twice. Each step references a separate prompt template. Steps with `model` use their configured model; steps without `model` inherit the chain-start model snapshot (the model active when the chain command began), so behavior stays deterministic even if earlier steps switch models.
352
406
 
353
407
  Per-step `--loop N` repeats that step N times before moving to the next. Per-step convergence applies: if a step makes no file changes on an iteration, its inner loop stops early (unless the step's template has `converge: false`).
354
408
 
@@ -440,14 +494,15 @@ Review the codebase and improve code quality. $@
440
494
  Chains support the same looping forms:
441
495
 
442
496
  ```
497
+ /chain-prompts analyze -> fix --loop 3
498
+ /chain-prompts analyze -> fix --loop=3
499
+ /chain-prompts analyze -> fix --loop
500
+ /chain-prompts analyze -> fix --loop 3 --fresh
501
+ /chain-prompts analyze -> fix --loop 3 --no-converge
443
502
  /chain-prompts analyze -> fix --loop 3 -- src/main.ts
444
- /chain-prompts analyze -> fix --loop=3 -- src/main.ts
445
- /chain-prompts analyze -> fix --loop -- src/main.ts
446
- /chain-prompts analyze -> fix --loop 3 --fresh -- src/main.ts
447
- /chain-prompts analyze -> fix --loop 3 --no-converge -- src/main.ts
448
503
  ```
449
504
 
450
- This runs the full chain (analyze → fix) three times. Convergence detection applies across all steps in each iteration — if no step made file changes, the loop stops. Each iteration re-reads prompts from disk, so template edits take effect between iterations. The status bar shows `loop 2/3` during execution.
505
+ This runs the full chain (analyze → fix) three times. The final example adds optional shared args: ` -- src/main.ts` means "pass `src/main.ts` to any step that doesn't already have its own args." If you don't need shared args, leave that part out entirely. Convergence detection applies across all steps in each iteration — if no step made file changes, the loop stops. Each iteration re-reads prompts from disk, so template edits take effect between iterations. The status bar shows `loop 2/3` during execution. Chain frontmatter declarations also support per-step `--loop N` inside the `chain:` value (for example `chain: double-check --loop 3 -> simplify -> deslop`).
451
506
 
452
507
  ## Agent Tool
453
508
 
@@ -462,6 +517,8 @@ Once enabled, the agent sees `run-prompt` in its tool list and can call it with
462
517
  ```
463
518
  run-prompt({ command: "deslop --loop 5 --fresh" })
464
519
  run-prompt({ command: "deslop --loop" })
520
+ run-prompt({ command: "deslop --subagent" })
521
+ run-prompt({ command: "deslop --subagent:reviewer" })
465
522
  run-prompt({ command: "chain-prompts analyze -> fix --loop 3" })
466
523
  ```
467
524
 
@@ -499,11 +556,13 @@ These commands work in print mode too:
499
556
  pi -p "/debug-python my code crashes on line 42"
500
557
  ```
501
558
 
502
- The model switches, skill injects, agent responds, and output prints to stdout. Useful for scripting or piping to other tools.
559
+ The model switches, a skill context message is injected, the agent responds, and output prints to stdout. Useful for scripting or piping to other tools.
503
560
 
504
561
  ## Limitations
505
562
 
506
563
  - Prompt files are reloaded on session start and whenever an extension-owned prompt command runs. If you add a brand-new prompt file while already inside a session, run another extension-owned command such as `/chain-prompts`, start a new session, or reload pi so the new slash command is registered.
507
564
  - Model restore state is in-memory. Closing pi mid-response loses restore state.
508
- - Chain steps must reference templates with a `model` field. Chain templates themselves use `chain` and do not execute their own body.
565
+ - Model-less templates are only managed by this extension when they use extension features (for example `skill`, `thinking`, loop flags, or inline `<if-model ...>`). Plain prompt templates without extension features stay with pi's default prompt loader to avoid command conflicts.
566
+ - In chains, model-less steps inherit the chain-start model snapshot, not the immediately previous step model. This is intentional for deterministic behavior.
567
+ - Delegated `subagent` prompts require [pi-subagents](https://github.com/nicobailon/pi-subagents/) (`pi install npm:pi-subagents`).
509
568
  - The `run-prompt` tool must be explicitly enabled with `/prompt-tool on` before the agent can use it.
package/args.ts CHANGED
@@ -11,12 +11,23 @@ export interface LoopFlags {
11
11
  converge: boolean;
12
12
  }
13
13
 
14
+ export interface SubagentOverride {
15
+ enabled: true;
16
+ agent?: string;
17
+ }
18
+
19
+ export interface SubagentOverrideExtraction {
20
+ args: string;
21
+ override?: SubagentOverride;
22
+ }
23
+
14
24
  export function extractLoopCount(argsString: string): LoopExtraction | null {
15
25
  let loopCount: number | null = null;
16
26
  let loopFound = false;
17
27
  let fresh = false;
18
28
  let noConverge = false;
19
29
  const tokensToRemove: Array<{ start: number; end: number }> = [];
30
+ const loopTokenRanges: Array<{ start: number; end: number }> = [];
20
31
 
21
32
  let i = 0;
22
33
  while (i < argsString.length) {
@@ -39,45 +50,46 @@ export function extractLoopCount(argsString: string): LoopExtraction | null {
39
50
  while (i < argsString.length && !/\s/.test(argsString[i])) i++;
40
51
  const token = argsString.slice(tokenStart, i);
41
52
 
42
- if (!loopFound && (token === "--loop" || token.startsWith("--loop="))) {
43
- if (token.startsWith("--loop=")) {
44
- const value = token.slice("--loop=".length);
45
- if (/^\d+$/.test(value)) {
46
- const parsed = parseInt(value, 10);
47
- if (parsed >= 1 && parsed <= 999) {
48
- loopFound = true;
49
- loopCount = parsed;
50
- tokensToRemove.push({ start: tokenStart, end: i });
51
- }
53
+ if (token.startsWith("--loop=")) {
54
+ loopTokenRanges.push({ start: tokenStart, end: i });
55
+ const value = token.slice("--loop=".length);
56
+ if (/^\d+$/.test(value)) {
57
+ const parsed = parseInt(value, 10);
58
+ if (parsed >= 1 && parsed <= 999 && !loopFound) {
59
+ loopFound = true;
60
+ loopCount = parsed;
52
61
  }
53
- } else {
54
- let lookahead = i;
55
- while (lookahead < argsString.length && /\s/.test(argsString[lookahead])) lookahead++;
56
-
57
- if (lookahead < argsString.length && argsString[lookahead] !== '"' && argsString[lookahead] !== "'") {
58
- const nextTokenStart = lookahead;
59
- while (lookahead < argsString.length && !/\s/.test(argsString[lookahead])) lookahead++;
60
- const nextToken = argsString.slice(nextTokenStart, lookahead);
61
-
62
- if (/^\d+$/.test(nextToken)) {
63
- const parsed = parseInt(nextToken, 10);
64
- if (parsed >= 1 && parsed <= 999) {
65
- loopFound = true;
66
- loopCount = parsed;
67
- tokensToRemove.push({ start: tokenStart, end: i }, { start: nextTokenStart, end: lookahead });
68
- i = lookahead;
69
- }
70
- } else {
62
+ }
63
+ continue;
64
+ }
65
+
66
+ if (token === "--loop") {
67
+ let lookahead = i;
68
+ while (lookahead < argsString.length && /\s/.test(argsString[lookahead])) lookahead++;
69
+
70
+ if (lookahead < argsString.length && argsString[lookahead] !== '"' && argsString[lookahead] !== "'") {
71
+ const nextTokenStart = lookahead;
72
+ while (lookahead < argsString.length && !/\s/.test(argsString[lookahead])) lookahead++;
73
+ const nextToken = argsString.slice(nextTokenStart, lookahead);
74
+
75
+ if (/^\d+$/.test(nextToken)) {
76
+ loopTokenRanges.push({ start: tokenStart, end: i }, { start: nextTokenStart, end: lookahead });
77
+ const parsed = parseInt(nextToken, 10);
78
+ if (parsed >= 1 && parsed <= 999 && !loopFound) {
71
79
  loopFound = true;
72
- loopCount = null;
73
- tokensToRemove.push({ start: tokenStart, end: i });
80
+ loopCount = parsed;
74
81
  }
75
- } else {
76
- loopFound = true;
77
- loopCount = null;
78
- tokensToRemove.push({ start: tokenStart, end: i });
82
+ i = lookahead;
83
+ continue;
79
84
  }
80
85
  }
86
+
87
+ loopTokenRanges.push({ start: tokenStart, end: i });
88
+ if (!loopFound) {
89
+ loopFound = true;
90
+ loopCount = null;
91
+ }
92
+ continue;
81
93
  }
82
94
 
83
95
  if (token === "--fresh") {
@@ -91,15 +103,16 @@ export function extractLoopCount(argsString: string): LoopExtraction | null {
91
103
  }
92
104
  }
93
105
 
94
- if (loopCount === null && !loopFound) return null;
106
+ if (!loopFound) return null;
95
107
 
96
- tokensToRemove.sort((a, b) => b.start - a.start);
108
+ const allRanges = [...tokensToRemove, ...loopTokenRanges];
109
+ allRanges.sort((a, b) => b.start - a.start);
97
110
  let cleaned = argsString;
98
- for (const { start, end } of tokensToRemove) {
111
+ for (const { start, end } of allRanges) {
99
112
  cleaned = cleaned.slice(0, start) + cleaned.slice(end);
100
113
  }
101
114
 
102
- const converge = loopFound && loopCount === null ? true : !noConverge;
115
+ const converge = loopCount === null ? true : !noConverge;
103
116
  return { args: cleaned.trim(), loopCount, fresh, converge };
104
117
  }
105
118
 
@@ -149,6 +162,58 @@ export function extractLoopFlags(argsString: string): LoopFlags {
149
162
  return { args: cleaned.trim(), fresh, converge: !noConverge };
150
163
  }
151
164
 
165
+ export function extractSubagentOverride(argsString: string): SubagentOverrideExtraction {
166
+ let override: SubagentOverride | undefined;
167
+ const tokensToRemove: Array<{ start: number; end: number }> = [];
168
+
169
+ let i = 0;
170
+ while (i < argsString.length) {
171
+ const char = argsString[i];
172
+
173
+ if (char === '"' || char === "'") {
174
+ const quote = char;
175
+ i++;
176
+ while (i < argsString.length && argsString[i] !== quote) i++;
177
+ if (i < argsString.length) i++;
178
+ continue;
179
+ }
180
+
181
+ if (/\s/.test(char)) {
182
+ i++;
183
+ continue;
184
+ }
185
+
186
+ const tokenStart = i;
187
+ while (i < argsString.length && !/\s/.test(argsString[i])) i++;
188
+ const token = argsString.slice(tokenStart, i);
189
+
190
+ if (token === "--subagent") {
191
+ tokensToRemove.push({ start: tokenStart, end: i });
192
+ override = { enabled: true };
193
+ continue;
194
+ }
195
+
196
+ if (token.startsWith("--subagent=") || token.startsWith("--subagent:")) {
197
+ tokensToRemove.push({ start: tokenStart, end: i });
198
+ const value = token.includes("=") ? token.slice("--subagent=".length) : token.slice("--subagent:".length);
199
+ override = value ? { enabled: true, agent: value } : { enabled: true };
200
+ }
201
+ }
202
+
203
+ if (!override) return { args: argsString.trim() };
204
+
205
+ tokensToRemove.sort((a, b) => b.start - a.start);
206
+ let cleaned = argsString;
207
+ for (const { start, end } of tokensToRemove) {
208
+ cleaned = cleaned.slice(0, start) + cleaned.slice(end);
209
+ }
210
+
211
+ return {
212
+ args: cleaned.trim(),
213
+ override,
214
+ };
215
+ }
216
+
152
217
  export function splitByUnquotedSeparator(input: string, separator: string): string[] {
153
218
  const parts: string[] = [];
154
219
  let start = 0;
@@ -227,6 +292,7 @@ export function substituteArgs(content: string, args: string[]): string {
227
292
  const allArgs = args.join(" ");
228
293
  result = result.replace(/\$ARGUMENTS/g, allArgs);
229
294
  result = result.replace(/\$@/g, allArgs);
295
+ result = result.replace(/@\$/g, allArgs);
230
296
 
231
297
  return result;
232
298
  }
package/chain-parser.ts CHANGED
@@ -76,19 +76,20 @@ function scanSegmentTokens(segment: string): SegmentToken[] {
76
76
 
77
77
  function extractStepLoopCount(segment: string): { cleanedSegment: string; loopCount?: number } {
78
78
  const tokens = scanSegmentTokens(segment);
79
+ const loopTokenRanges: Array<{ start: number; end: number }> = [];
80
+ let loopCount: number | undefined;
79
81
 
80
82
  for (let i = 1; i < tokens.length; i++) {
81
83
  const token = tokens[i];
82
84
  if (token.quoted) continue;
83
85
 
84
86
  if (token.value.startsWith("--loop=")) {
87
+ loopTokenRanges.push({ start: token.start, end: token.end });
85
88
  const value = token.value.slice("--loop=".length);
86
- if (/^\d+$/.test(value)) {
87
- const parsed = parseInt(value, 10);
88
- if (parsed >= 1 && parsed <= 999) {
89
- const cleanedSegment = `${segment.slice(0, token.start)}${segment.slice(token.end)}`.trim();
90
- return { cleanedSegment, loopCount: parsed };
91
- }
89
+ if (!/^\d+$/.test(value)) continue;
90
+ const parsed = parseInt(value, 10);
91
+ if (parsed >= 1 && parsed <= 999 && loopCount === undefined) {
92
+ loopCount = parsed;
92
93
  }
93
94
  continue;
94
95
  }
@@ -96,16 +97,28 @@ function extractStepLoopCount(segment: string): { cleanedSegment: string; loopCo
96
97
  if (token.value === "--loop" && i + 1 < tokens.length) {
97
98
  const next = tokens[i + 1];
98
99
  if (!next.quoted && /^\d+$/.test(next.value)) {
100
+ loopTokenRanges.push({ start: token.start, end: token.end }, { start: next.start, end: next.end });
99
101
  const parsed = parseInt(next.value, 10);
100
- if (parsed >= 1 && parsed <= 999) {
101
- const cleanedSegment = `${segment.slice(0, token.start)}${segment.slice(next.end)}`.trim();
102
- return { cleanedSegment, loopCount: parsed };
102
+ if (parsed >= 1 && parsed <= 999 && loopCount === undefined) {
103
+ loopCount = parsed;
103
104
  }
105
+ i++;
106
+ continue;
104
107
  }
105
108
  }
106
109
  }
107
110
 
108
- return { cleanedSegment: segment };
111
+ if (loopCount === undefined || loopTokenRanges.length === 0) {
112
+ return { cleanedSegment: segment };
113
+ }
114
+
115
+ loopTokenRanges.sort((a, b) => b.start - a.start);
116
+ let cleanedSegment = segment;
117
+ for (const { start, end } of loopTokenRanges) {
118
+ cleanedSegment = `${cleanedSegment.slice(0, start)}${cleanedSegment.slice(end)}`;
119
+ }
120
+
121
+ return { cleanedSegment: cleanedSegment.trim(), loopCount };
109
122
  }
110
123
 
111
124
  export function parseChainSteps(args: string): ParsedChainSteps {
@@ -122,12 +135,13 @@ export function parseChainSteps(args: string): ParsedChainSteps {
122
135
  invalidSegments.push(rawSegment);
123
136
  continue;
124
137
  }
125
- const tokens = parseCommandArgs(segment);
138
+ const { cleanedSegment, loopCount } = extractStepLoopCount(segment);
139
+ const tokens = parseCommandArgs(cleanedSegment);
126
140
  if (tokens.length === 0) {
127
141
  invalidSegments.push(segment);
128
142
  continue;
129
143
  }
130
- steps.push({ name: tokens[0], args: tokens.slice(1) });
144
+ steps.push({ name: tokens[0], args: tokens.slice(1), loopCount });
131
145
  }
132
146
 
133
147
  return { steps, sharedArgs: parseCommandArgs(argsPart), invalidSegments };