pi-prompt-template-model 0.4.0 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,56 @@
1
1
  # Changelog
2
2
 
3
+ ## [Unreleased]
4
+
5
+ ## [0.6.0] - 2026-03-19
6
+
7
+ ### Changed
8
+
9
+ - Clarified the README chain examples so the optional ` -- ` shared-args separator is clearly distinct from loop flags like `--loop`, `--fresh`, and `--no-converge`.
10
+ - Clarified in the README that chain frontmatter declarations support per-step `--loop N` inside the `chain:` value.
11
+ - Argument substitution now accepts `@$` as an alias for `$@` for compatibility with commonly-typed placeholder variants.
12
+ - Skill injection now uses a next-turn context message from `before_agent_start` instead of mutating the turn system prompt.
13
+ - Non-chain templates can now omit `model` and inherit the current session model, so inline `<if-model ...>` rendering and skill injection still work without explicit model frontmatter.
14
+ - Chain steps without `model` now inherit a fixed chain-start model snapshot, so model-less chain steps behave as if that model were declared in frontmatter while remaining deterministic across step switches.
15
+
16
+ ### Fixed
17
+
18
+ - Chain step execution now avoids implicit previous-step model bleed for model-less templates by resolving them against the chain-start model snapshot instead of whichever model was active after the prior step.
19
+ - Model-less prompt loading now skips plain templates that do not use extension features, preventing command collisions with other extension commands like `/review` and `/handover`.
20
+ - Model-less prompt loading now also ignores no-op/invalid-only extension metadata (for example `restore`-only or invalid loop flags), so ineffective frontmatter does not unnecessarily claim command names.
21
+ - Model-less prompt loading now recognizes invalid conditional closers like `</else>` as extension-relevant markup, so those templates stay in this extension path and surface proper conditional-parse warnings instead of silently bypassing extension handling.
22
+ - Model-less prompt execution now tracks runtime model changes (`model_select` + internal switches/restores) and uses that tracked model instead of potentially stale command-context snapshots.
23
+ - Prompt commands now fail fast when a configured `skill` file is missing or unreadable, instead of silently sending the prompt without skill context.
24
+ - Skill resolution now returns a typed success/error outcome that callers handle explicitly, rather than emitting notifications from inside the resolver and returning sentinel `null` values.
25
+ - Session start/switch now clear any queued skill context message so stale pending skill payloads cannot leak across session boundaries.
26
+ - Session start/switch now also clear pending single-command restore state (`previousModel`/`previousThinking`) so restore writes cannot leak into a different session.
27
+ - Skill frontmatter resolution now checks registered skill commands first (`pi.getCommands()` skill entries), accepts both `<name>` and `skill:<name>` values, searches additional standard pi skill locations (`.agents/skills` in project ancestors and `~/.agents/skills`), supports direct `<skill>.md` files alongside `SKILL.md` directories, and rejects traversal-like skill names for path fallback.
28
+ - `extractLoopCount()` now strips repeated unquoted `--loop` tokens once looping is active, preventing stray loop flags from leaking into prompt arguments.
29
+ - Chain frontmatter step parsing now strips repeated per-step `--loop` tokens once a valid per-step loop is resolved, and keeps the first valid value (including mixed invalid/valid numeric sequences like `--loop 1000 --loop 2`).
30
+ - Loop-mode restore now tracks runtime model/thinking state per iteration instead of relying on command-context model snapshots, so model restoration remains correct even when command context values are stale.
31
+ - Chain execution now restores model/thinking state in a `finally` path, so restore still runs after unexpected runtime errors during a chain step and chain cleanup state is still reset even when restore itself fails.
32
+ - Loop and chain executions no longer report `Loop finished`/`Loop converged` when runtime errors abort execution mid-loop.
33
+ - Loop and chain error propagation now preserves thrown falsy values (for example `throw 0`) instead of treating them as success, preventing swallowed errors and false completion notifications.
34
+
35
+ ## [0.5.0] - 2026-03-17
36
+
37
+ ### Added
38
+
39
+ - Loop execution via `--loop` flag: `--loop N`, `--loop=N` to run a prompt N times (1-999), or bare `--loop` for unlimited until convergence with a 50-iteration safety cap. Bare `--loop` always forces convergence on.
40
+ - Frontmatter loop controls: templates can now set `loop: N` (1-999) and `converge: false` defaults; CLI `--loop` overrides frontmatter `loop`, and `--no-converge` disables convergence for bounded loops.
41
+ - Convergence detection: loops stop early when an iteration makes no file changes (`write`/`edit`). Enabled by default; `--no-converge` opts out.
42
+ - Fresh context mode: `--fresh` flag or `fresh: true` frontmatter collapses conversation between loop iterations, keeping only accumulated summaries. Saves tokens on long loops.
43
+ - Loop iteration context injected into the system prompt so the agent builds on previous work across iterations.
44
+ - Loop progress indicator in the TUI status bar.
45
+ - `run-prompt` agent tool: the agent can run prompt templates, chains, and loops on its own. Opt-in via `/prompt-tool on [guidance]`. Config persists in `~/.pi/agent/prompt-template-model.json`.
46
+ - Chain templates: new `chain` frontmatter field to declare reusable template pipelines (`chain: double-check --loop 2 -> deslop --loop 2`). Per-step `--loop N` loops each step independently. No `model` required — each step uses its own. Supports `loop`, `fresh`, `converge`, `restore` for overall execution control. Chain nesting is rejected at runtime.
47
+
48
+ ### Fixed
49
+
50
+ - `readSkillContent` no longer swallows read errors. The caller now sees the actual error message (e.g., permission denied) instead of a generic "Failed to read skill" notification.
51
+ - `restoreSessionState` no longer clears `pendingSkill` as a side effect unrelated to model/thinking restoration.
52
+ - Error diagnostics now consistently use `String(error)` instead of hardcoded fallback strings.
53
+
3
54
  ## [0.4.0] - 2026-03-13
4
55
 
5
56
  ### Added
package/README.md CHANGED
@@ -15,7 +15,7 @@
15
15
  │ /debug-python ──► Extension detects model + skill │
16
16
  │ │ │
17
17
  │ ▼ │
18
- │ Switches to Sonnet ──► Injects tmux skill into system prompt
18
+ │ Switches to Sonnet ──► Queues tmux skill context for next turn
19
19
  │ │ │
20
20
  │ ▼ │
21
21
  │ Agent responds with Sonnet + tmux expertise │
@@ -49,7 +49,7 @@ Restart pi to load the extension.
49
49
 
50
50
  ## Quick Start
51
51
 
52
- Add `model` and optionally `skill` to any prompt template:
52
+ Add `model` (or omit it to inherit the current session model) and optionally `skill` to any prompt template:
53
53
 
54
54
  ```markdown
55
55
  ---
@@ -80,17 +80,21 @@ skill: surf
80
80
  $@
81
81
  ```
82
82
 
83
- Here `skill: surf` loads `~/.pi/agent/skills/surf/SKILL.md` and injects its content directly into the system prompt before the agent even sees your task. No decision-making, no read tool, just immediate expertise. It's a forcing function for when you know exactly what workflow the agent needs.
83
+ Here `skill: surf` loads `~/.pi/agent/skills/surf/SKILL.md` and injects its content as a context message on the next turn before the agent handles your task. No decision-making, no read tool, just immediate expertise. It's a forcing function for when you know exactly what workflow the agent needs.
84
84
 
85
85
  ## Frontmatter Fields
86
86
 
87
87
  | Field | Required | Default | Description |
88
88
  |-------|----------|---------|-------------|
89
- | `model` | Yes | - | Model ID, `provider/model-id`, or comma-separated list for fallback |
90
- | `skill` | No | - | Skill name to inject into system prompt |
89
+ | `model` | No | `current` | Target model(s). If omitted on a non-chain template, execution inherits the current session model. Ignored when `chain` is set. |
90
+ | `chain` | Conditional | - | Chain declaration (`step -> step --loop 2`) for orchestration templates; body is ignored |
91
+ | `skill` | No | - | Skill name to inject as next-turn context message |
91
92
  | `thinking` | No | - | Thinking level: `off`, `minimal`, `low`, `medium`, `high`, `xhigh` |
92
93
  | `description` | No | - | Shown in autocomplete |
93
94
  | `restore` | No | `true` | Restore previous model and thinking level after response |
95
+ | `fresh` | No | `false` | Collapse context between loop iterations (applies when looping via `--loop` or frontmatter `loop`) |
96
+ | `loop` | No | - | Default loop count for this template (`1`-`999`) |
97
+ | `converge` | No | `true` | Loop convergence behavior; set `false` to always run all iterations |
94
98
 
95
99
  ## Model Format
96
100
 
@@ -149,7 +153,7 @@ Do a deeper pass and call out subtle risks.
149
153
  </if-model>
150
154
  ```
151
155
 
152
- Conditionals are evaluated against the model that actually runs the command after fallback resolution. That means the same template can render differently depending on which candidate was selected.
156
+ Conditionals are evaluated against the model that actually runs the command. For fallback prompts, that means after candidate resolution; for prompts without `model`, that means the current session model. The same template can render differently depending on which model is active.
153
157
 
154
158
  Supported matches inside `is="..."`:
155
159
 
@@ -179,6 +183,7 @@ Prompt bodies support argument placeholders that expand to command arguments:
179
183
  |-------------|-------------|
180
184
  | `$1`, `$2`, ... | Positional argument (1-indexed) |
181
185
  | `$@` | All arguments joined with spaces |
186
+ | `@$` | Alias for `$@` |
182
187
  | `$ARGUMENTS` | Same as `$@` |
183
188
  | `${@:N}` | All arguments from position N onward |
184
189
  | `${@:N:L}` | L arguments starting from position N |
@@ -199,17 +204,22 @@ Running `/analyze src/main.ts performance edge cases error handling` expands to:
199
204
 
200
205
  ## Skill Resolution
201
206
 
202
- The `skill` field matches the skill's directory name:
207
+ The `skill` field accepts either a bare skill name or a slash-command style name:
203
208
 
204
209
  ```yaml
205
210
  skill: tmux
211
+ # also valid
212
+ skill: skill:tmux
206
213
  ```
207
214
 
208
- Resolves to (checked in order):
209
- 1. `<cwd>/.pi/skills/tmux/SKILL.md` (project)
210
- 2. `~/.pi/agent/skills/tmux/SKILL.md` (user)
215
+ Resolution order:
216
+ 1. Registered skill commands from `pi.getCommands()` (`source: "skill"`), matched by `skill:name` or `name`
217
+ 2. `<cwd>/.pi/skills/<name>/SKILL.md` or `<cwd>/.pi/skills/<name>.md`
218
+ 3. `.agents/skills` in `cwd` and ancestor directories (up to git repo root)
219
+ 4. `~/.pi/agent/skills/<name>/SKILL.md` or `~/.pi/agent/skills/<name>.md`
220
+ 5. `~/.agents/skills/<name>/SKILL.md` or `~/.agents/skills/<name>.md`
211
221
 
212
- This matches pi's precedence - project skills override user skills.
222
+ If the configured skill file is missing or unreadable, the command fails fast and does not send the prompt body to the model.
213
223
 
214
224
  ## Subdirectories
215
225
 
@@ -306,13 +316,13 @@ Switched to Haiku. How can I help?
306
316
 
307
317
  ## Chaining Templates
308
318
 
309
- The `/chain-prompts` command runs multiple templates sequentially. Each step switches to its own model, renders any inline model conditionals against that step’s resolved model, injects its own skill, and the conversation context carries forward between steps.
319
+ The `/chain-prompts` command runs multiple templates sequentially. Each step switches to its own model (or, if the step has no `model`, to the chain-start model snapshot), renders inline model conditionals against that resolved step model, injects its own skill context message, and conversation context carries forward between steps.
310
320
 
311
321
  ```
312
322
  /chain-prompts analyze-code -> fix-plan -> summarize -- src/main.ts
313
323
  ```
314
324
 
315
- This runs `analyze-code` first, then `fix-plan` (which sees the analysis in conversation context), then `summarize`. The `-- src/main.ts` provides shared args substituted into every template's `$@`.
325
+ This runs `analyze-code` first, then `fix-plan` (which sees the analysis in conversation context), then `summarize`. The ` -- src/main.ts` part is optional. The literal ` -- ` separator means "shared args start here": everything after it is passed to each step as `$@`, unless that step already has its own inline args.
316
326
 
317
327
  Each step can also receive its own args, overriding the shared args for that step:
318
328
 
@@ -332,6 +342,150 @@ Step 1 uses its per-step args (`"error handling"`), steps 2 and 3 fall back to t
332
342
 
333
343
  The chain captures your current model and thinking level before starting, and restores them when the chain finishes (or if any step fails mid-chain). Individual template `restore` settings are ignored during chain execution.
334
344
 
345
+ ### Chain Templates
346
+
347
+ For reusable pipelines, define a chain in frontmatter instead of typing `/chain-prompts` every time:
348
+
349
+ ```markdown
350
+ ---
351
+ description: Review then clean up
352
+ chain: double-check --loop 2 -> deslop --loop 2
353
+ ---
354
+ ignored — chain templates don't use the body
355
+ ```
356
+
357
+ This registers `/review-then-clean` as a command that runs `double-check` twice, then `deslop` twice. Each step references a separate prompt template. Steps with `model` use their configured model; steps without `model` inherit the chain-start model snapshot (the model active when the chain command began), so behavior stays deterministic even if earlier steps switch models.
358
+
359
+ Per-step `--loop N` repeats that step N times before moving to the next. Per-step convergence applies: if a step makes no file changes on an iteration, its inner loop stops early (unless the step's template has `converge: false`).
360
+
361
+ Chain templates support `loop`, `fresh`, `converge`, and `restore` in their frontmatter for overall execution control:
362
+
363
+ ```markdown
364
+ ---
365
+ chain: analyze -> fix
366
+ loop: 3
367
+ fresh: true
368
+ converge: false
369
+ ---
370
+ ```
371
+
372
+ This runs the full analyze → fix chain 3 times, with fresh context between iterations and no early stopping. CLI `--loop` overrides frontmatter `loop` when invoking the command.
373
+
374
+ Chain nesting is not supported — a chain template's steps cannot reference other chain templates.
375
+
376
+ ## Loop Execution
377
+
378
+ Looping uses the `--loop` flag:
379
+
380
+ ```
381
+ /deslop --loop 5
382
+ /deslop --loop=5
383
+ /deslop "focus on performance" --loop 3
384
+ /deslop --loop
385
+ ```
386
+
387
+ `--loop` without a number means unlimited looping until convergence, with a built-in safety cap of 50 iterations.
388
+
389
+ You can also set a default loop count in frontmatter:
390
+
391
+ ```markdown
392
+ ---
393
+ model: claude-sonnet-4-20250514
394
+ loop: 5
395
+ ---
396
+ ...
397
+ ```
398
+
399
+ With that template, `/deslop` runs 5 iterations by default. CLI `--loop` overrides frontmatter (`/deslop --loop 3` runs 3 iterations).
400
+
401
+ The agent runs the same prompt N times. Context accumulates across iterations — by iteration 3, the agent sees the full conversation from iterations 1 and 2 and builds on that work. Use `--fresh` to collapse context between iterations instead (see below).
402
+
403
+ By default, the loop stops early if an iteration makes no file changes (no `write` or `edit` tool calls), since there's nothing left to improve. Add `--no-converge` to always run all iterations for bounded loops, or set `converge: false` in frontmatter:
404
+
405
+ ```
406
+ /deslop --loop 5 --no-converge
407
+ ```
408
+
409
+ ```markdown
410
+ ---
411
+ model: claude-sonnet-4-20250514
412
+ loop: 5
413
+ converge: false
414
+ ---
415
+ ...
416
+ ```
417
+
418
+ Bare `--loop` always forces convergence on (even with `--no-converge` or `converge: false`) because its intent is "run until no changes." `--loop N` and `--loop=N` support range 1-999. Quoted `"--loop"` is treated as a regular argument.
419
+
420
+ Model, thinking level, and skill are maintained throughout the loop. If the template has `restore: true` (the default), the original model and thinking level are restored after the final iteration (or if any iteration fails). If `restore: false`, the switched model persists after the loop ends.
421
+
422
+ ### Fresh Context
423
+
424
+ Add `--fresh` to collapse context between iterations:
425
+
426
+ ```
427
+ /deslop --loop 5 --fresh
428
+ /deslop --fresh # when frontmatter sets loop: N
429
+ ```
430
+
431
+ Each iteration's conversation is collapsed to a brief summary (files read, files modified, outcome) before the next iteration starts. The agent sees accumulated summaries from all previous iterations but not the full conversation. This saves tokens on long loops and gives each iteration a clean slate for reasoning.
432
+
433
+ You can also set `fresh: true` in the template frontmatter to make it the default when looped:
434
+
435
+ ```markdown
436
+ ---
437
+ description: Remove AI slop from code
438
+ model: claude-sonnet-4-20250514
439
+ fresh: true
440
+ ---
441
+ Review the codebase and improve code quality. $@
442
+ ```
443
+
444
+ ### Loop with Chains
445
+
446
+ Chains support the same looping forms:
447
+
448
+ ```
449
+ /chain-prompts analyze -> fix --loop 3
450
+ /chain-prompts analyze -> fix --loop=3
451
+ /chain-prompts analyze -> fix --loop
452
+ /chain-prompts analyze -> fix --loop 3 --fresh
453
+ /chain-prompts analyze -> fix --loop 3 --no-converge
454
+ /chain-prompts analyze -> fix --loop 3 -- src/main.ts
455
+ ```
456
+
457
+ This runs the full chain (analyze → fix) three times. The final example adds optional shared args: ` -- src/main.ts` means "pass `src/main.ts` to any step that doesn't already have its own args." If you don't need shared args, leave that part out entirely. Convergence detection applies across all steps in each iteration — if no step made file changes, the loop stops. Each iteration re-reads prompts from disk, so template edits take effect between iterations. The status bar shows `loop 2/3` during execution. Chain frontmatter declarations also support per-step `--loop N` inside the `chain:` value (for example `chain: double-check --loop 3 -> simplify -> deslop`).
458
+
459
+ ## Agent Tool
460
+
461
+ The agent can run prompt templates on its own via the `run-prompt` tool. Disabled by default — enable it with:
462
+
463
+ ```
464
+ /prompt-tool on
465
+ ```
466
+
467
+ Once enabled, the agent sees `run-prompt` in its tool list and can call it with any template command:
468
+
469
+ ```
470
+ run-prompt({ command: "deslop --loop 5 --fresh" })
471
+ run-prompt({ command: "deslop --loop" })
472
+ run-prompt({ command: "chain-prompts analyze -> fix --loop 3" })
473
+ ```
474
+
475
+ The tool queues the command for execution when the agent's current turn ends. All loop, fresh context, and convergence features work the same as when invoked via slash commands.
476
+
477
+ Add guidance to steer when the agent uses it:
478
+
479
+ ```
480
+ /prompt-tool on Use run-prompt for iterative code improvement tasks
481
+ /prompt-tool guidance Use sparingly, only for multi-pass refinement
482
+ /prompt-tool guidance clear
483
+ /prompt-tool off
484
+ /prompt-tool
485
+ ```
486
+
487
+ Config persists across sessions in `~/.pi/agent/prompt-template-model.json`.
488
+
335
489
  ## Autocomplete Display
336
490
 
337
491
  Commands show model, thinking level, and skill in the description:
@@ -352,10 +506,12 @@ These commands work in print mode too:
352
506
  pi -p "/debug-python my code crashes on line 42"
353
507
  ```
354
508
 
355
- The model switches, skill injects, agent responds, and output prints to stdout. Useful for scripting or piping to other tools.
509
+ The model switches, a skill context message is injected, the agent responds, and output prints to stdout. Useful for scripting or piping to other tools.
356
510
 
357
511
  ## Limitations
358
512
 
359
513
  - Prompt files are reloaded on session start and whenever an extension-owned prompt command runs. If you add a brand-new prompt file while already inside a session, run another extension-owned command such as `/chain-prompts`, start a new session, or reload pi so the new slash command is registered.
360
514
  - Model restore state is in-memory. Closing pi mid-response loses restore state.
361
- - Only templates with a `model` field can be chained. Templates without `model` are handled by pi core and invisible to this extension.
515
+ - Model-less templates are only managed by this extension when they use extension features (for example `skill`, `thinking`, loop flags, or inline `<if-model ...>`). Plain prompt templates without extension features stay with pi's default prompt loader to avoid command conflicts.
516
+ - In chains, model-less steps inherit the chain-start model snapshot, not the immediately previous step model. This is intentional for deterministic behavior.
517
+ - The `run-prompt` tool must be explicitly enabled with `/prompt-tool on` before the agent can use it.
package/args.ts CHANGED
@@ -1,3 +1,179 @@
1
+ export interface LoopExtraction {
2
+ args: string;
3
+ loopCount: number | null;
4
+ fresh: boolean;
5
+ converge: boolean;
6
+ }
7
+
8
+ export interface LoopFlags {
9
+ args: string;
10
+ fresh: boolean;
11
+ converge: boolean;
12
+ }
13
+
14
+ export function extractLoopCount(argsString: string): LoopExtraction | null {
15
+ let loopCount: number | null = null;
16
+ let loopFound = false;
17
+ let fresh = false;
18
+ let noConverge = false;
19
+ const tokensToRemove: Array<{ start: number; end: number }> = [];
20
+ const loopTokenRanges: Array<{ start: number; end: number }> = [];
21
+
22
+ let i = 0;
23
+ while (i < argsString.length) {
24
+ const char = argsString[i];
25
+
26
+ if (char === '"' || char === "'") {
27
+ const quote = char;
28
+ i++;
29
+ while (i < argsString.length && argsString[i] !== quote) i++;
30
+ if (i < argsString.length) i++;
31
+ continue;
32
+ }
33
+
34
+ if (/\s/.test(char)) {
35
+ i++;
36
+ continue;
37
+ }
38
+
39
+ const tokenStart = i;
40
+ while (i < argsString.length && !/\s/.test(argsString[i])) i++;
41
+ const token = argsString.slice(tokenStart, i);
42
+
43
+ if (token.startsWith("--loop=")) {
44
+ loopTokenRanges.push({ start: tokenStart, end: i });
45
+ const value = token.slice("--loop=".length);
46
+ if (/^\d+$/.test(value)) {
47
+ const parsed = parseInt(value, 10);
48
+ if (parsed >= 1 && parsed <= 999 && !loopFound) {
49
+ loopFound = true;
50
+ loopCount = parsed;
51
+ }
52
+ }
53
+ continue;
54
+ }
55
+
56
+ if (token === "--loop") {
57
+ let lookahead = i;
58
+ while (lookahead < argsString.length && /\s/.test(argsString[lookahead])) lookahead++;
59
+
60
+ if (lookahead < argsString.length && argsString[lookahead] !== '"' && argsString[lookahead] !== "'") {
61
+ const nextTokenStart = lookahead;
62
+ while (lookahead < argsString.length && !/\s/.test(argsString[lookahead])) lookahead++;
63
+ const nextToken = argsString.slice(nextTokenStart, lookahead);
64
+
65
+ if (/^\d+$/.test(nextToken)) {
66
+ loopTokenRanges.push({ start: tokenStart, end: i }, { start: nextTokenStart, end: lookahead });
67
+ const parsed = parseInt(nextToken, 10);
68
+ if (parsed >= 1 && parsed <= 999 && !loopFound) {
69
+ loopFound = true;
70
+ loopCount = parsed;
71
+ }
72
+ i = lookahead;
73
+ continue;
74
+ }
75
+ }
76
+
77
+ loopTokenRanges.push({ start: tokenStart, end: i });
78
+ if (!loopFound) {
79
+ loopFound = true;
80
+ loopCount = null;
81
+ }
82
+ continue;
83
+ }
84
+
85
+ if (token === "--fresh") {
86
+ fresh = true;
87
+ tokensToRemove.push({ start: tokenStart, end: i });
88
+ }
89
+
90
+ if (token === "--no-converge") {
91
+ noConverge = true;
92
+ tokensToRemove.push({ start: tokenStart, end: i });
93
+ }
94
+ }
95
+
96
+ if (!loopFound) return null;
97
+
98
+ const allRanges = [...tokensToRemove, ...loopTokenRanges];
99
+ allRanges.sort((a, b) => b.start - a.start);
100
+ let cleaned = argsString;
101
+ for (const { start, end } of allRanges) {
102
+ cleaned = cleaned.slice(0, start) + cleaned.slice(end);
103
+ }
104
+
105
+ const converge = loopCount === null ? true : !noConverge;
106
+ return { args: cleaned.trim(), loopCount, fresh, converge };
107
+ }
108
+
109
+ export function extractLoopFlags(argsString: string): LoopFlags {
110
+ let fresh = false;
111
+ let noConverge = false;
112
+ const tokensToRemove: Array<{ start: number; end: number }> = [];
113
+
114
+ let i = 0;
115
+ while (i < argsString.length) {
116
+ const char = argsString[i];
117
+
118
+ if (char === '"' || char === "'") {
119
+ const quote = char;
120
+ i++;
121
+ while (i < argsString.length && argsString[i] !== quote) i++;
122
+ if (i < argsString.length) i++;
123
+ continue;
124
+ }
125
+
126
+ if (/\s/.test(char)) {
127
+ i++;
128
+ continue;
129
+ }
130
+
131
+ const tokenStart = i;
132
+ while (i < argsString.length && !/\s/.test(argsString[i])) i++;
133
+ const token = argsString.slice(tokenStart, i);
134
+
135
+ if (token === "--fresh") {
136
+ fresh = true;
137
+ tokensToRemove.push({ start: tokenStart, end: i });
138
+ }
139
+
140
+ if (token === "--no-converge") {
141
+ noConverge = true;
142
+ tokensToRemove.push({ start: tokenStart, end: i });
143
+ }
144
+ }
145
+
146
+ tokensToRemove.sort((a, b) => b.start - a.start);
147
+ let cleaned = argsString;
148
+ for (const { start, end } of tokensToRemove) {
149
+ cleaned = cleaned.slice(0, start) + cleaned.slice(end);
150
+ }
151
+
152
+ return { args: cleaned.trim(), fresh, converge: !noConverge };
153
+ }
154
+
155
+ export function splitByUnquotedSeparator(input: string, separator: string): string[] {
156
+ const parts: string[] = [];
157
+ let start = 0;
158
+ let inQuote: string | null = null;
159
+
160
+ for (let i = 0; i < input.length; i++) {
161
+ const char = input[i];
162
+ if (inQuote) {
163
+ if (char === inQuote) inQuote = null;
164
+ } else if (char === '"' || char === "'") {
165
+ inQuote = char;
166
+ } else if (i <= input.length - separator.length && input.startsWith(separator, i)) {
167
+ parts.push(input.slice(start, i));
168
+ start = i + separator.length;
169
+ i += separator.length - 1;
170
+ }
171
+ }
172
+
173
+ parts.push(input.slice(start));
174
+ return parts;
175
+ }
176
+
1
177
  export function parseCommandArgs(argsString: string): string[] {
2
178
  const args: string[] = [];
3
179
  let current = "";
@@ -14,7 +190,7 @@ export function parseCommandArgs(argsString: string): string[] {
14
190
  }
15
191
  } else if (char === '"' || char === "'") {
16
192
  inQuote = char;
17
- } else if (char === " " || char === "\t") {
193
+ } else if (/\s/.test(char)) {
18
194
  if (current) {
19
195
  args.push(current);
20
196
  current = "";
@@ -54,6 +230,7 @@ export function substituteArgs(content: string, args: string[]): string {
54
230
  const allArgs = args.join(" ");
55
231
  result = result.replace(/\$ARGUMENTS/g, allArgs);
56
232
  result = result.replace(/\$@/g, allArgs);
233
+ result = result.replace(/@\$/g, allArgs);
57
234
 
58
235
  return result;
59
236
  }