pi-prompt-template-model 0.3.1 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,60 @@
2
2
 
3
3
  ## [Unreleased]
4
4
 
5
+ ## [0.5.0] - 2026-03-17
6
+
7
+ ### Added
8
+
9
+ - Loop execution via `--loop` flag: `--loop N`, `--loop=N` to run a prompt N times (1-999), or bare `--loop` for unlimited until convergence with a 50-iteration safety cap. Bare `--loop` always forces convergence on.
10
+ - Frontmatter loop controls: templates can now set `loop: N` (1-999) and `converge: false` defaults; CLI `--loop` overrides frontmatter `loop`, and `--no-converge` disables convergence for bounded loops.
11
+ - Convergence detection: loops stop early when an iteration makes no file changes (`write`/`edit`). Enabled by default; `--no-converge` opts out.
12
+ - Fresh context mode: `--fresh` flag or `fresh: true` frontmatter collapses conversation between loop iterations, keeping only accumulated summaries. Saves tokens on long loops.
13
+ - Loop iteration context injected into the system prompt so the agent builds on previous work across iterations.
14
+ - Loop progress indicator in the TUI status bar.
15
+ - `run-prompt` agent tool: the agent can run prompt templates, chains, and loops on its own. Opt-in via `/prompt-tool on [guidance]`. Config persists in `~/.pi/agent/prompt-template-model.json`.
16
+ - Chain templates: new `chain` frontmatter field to declare reusable template pipelines (`chain: double-check --loop 2 -> deslop --loop 2`). Per-step `--loop N` loops each step independently. No `model` required — each step uses its own. Supports `loop`, `fresh`, `converge`, `restore` for overall execution control. Chain nesting is rejected at runtime.
17
+
18
+ ### Fixed
19
+
20
+ - `readSkillContent` no longer swallows read errors. The caller now sees the actual error message (e.g., permission denied) instead of a generic "Failed to read skill" notification.
21
+ - `restoreSessionState` no longer clears `pendingSkill` as a side effect unrelated to model/thinking restoration.
22
+ - Error diagnostics now consistently use `String(error)` instead of hardcoded fallback strings.
23
+
24
+ ## [0.4.0] - 2026-03-13
25
+
26
+ ### Added
27
+
28
+ - Inline `<if-model is="...">...</if-model>` blocks with optional `<else>` branches inside prompt bodies.
29
+ - Provider wildcard matching in conditionals with syntax like `anthropic/*`.
30
+ - Conditional rendering now happens after model fallback resolution for both single prompt commands and `/chain-prompts`.
31
+ - Prompt argument substitution now mirrors pi core more closely, including `${@:N}` and `${@:N:L}` slice syntax. See README for full placeholder reference.
32
+
33
+ ### Fixed
34
+
35
+ - Model fallback now preserves the currently active model whenever it matches any listed fallback candidate, including ambiguous bare model IDs that would otherwise resolve through provider preference, instead of switching to an earlier candidate unnecessarily.
36
+ - Prompt templates that collide with reserved slash commands, including built-ins like `/model` and the extension’s own `/chain-prompts`, are now skipped with a warning instead of being silently shadowed.
37
+ - Prompt discovery is now deterministic in a locale-independent way, and duplicate model-enabled prompt names within the same source layer are skipped with a warning instead of silently depending on traversal order.
38
+ - Invalid `model` frontmatter declarations are now rejected during prompt loading with diagnostics instead of failing later at execution time.
39
+ - Literal tags like `<elsewhere>` and `</if-modeling>` no longer get misparsed as malformed conditional directives.
40
+ - Non-interactive notifications now go to stderr so print-mode stdout stays clean.
41
+ - Bare model IDs with multiple providers can now still resolve through OAuth-backed auth checks even when fast availability checks alone are inconclusive.
42
+ - Optional string frontmatter fields are now trimmed so quoted values like `thinking: " high "` and `skill: " tmux "` behave as expected.
43
+ - Existing prompt commands now refresh prompt files before execution, so edits made during a session take effect on the next run instead of waiting for a new session.
44
+ - Skill-loaded custom messages now fail safe if their details payload is missing instead of crashing the renderer.
45
+ - Frontmatter `model` specs and inline conditional `is` specs now reject internal whitespace like `anthropic /model` or `anthropic /*` instead of silently registering values that can never match.
46
+ - Recursive prompt discovery now detects already-visited directories and skips symlink loops instead of risking infinite recursion or duplicate traversal.
47
+ - Bare model IDs now honor provider priority across all auth-capable candidates, including OAuth-backed providers, instead of incorrectly favoring a lower-priority provider just because it appeared in the fast-available set.
48
+ - Prompt loading now rejects non-object YAML frontmatter roots, like lists, with a diagnostic instead of silently treating them as missing `model` fields.
49
+ - `/chain-prompts` now only restores model and thinking when they actually changed, avoiding redundant state writes and noisy restore notifications on no-op chains.
50
+ - `/chain-prompts` now tracks thinking changes caused by model switches even when a step does not set `thinking`, so final restoration stays correct when the runtime clamps or resets thinking during a model change.
51
+ - `/chain-prompts` now rejects empty or quote-only step segments explicitly instead of treating them as blank template names.
52
+ - Single-command auto-restore now also skips no-op thinking restores and notifications when the runtime is already back on the original thinking level.
53
+ - Removed unnecessary exports: `modelSpecMatches` from model-selection.ts and `VALID_THINKING_LEVELS` from prompt-loader.ts are now internal implementation details.
54
+ - `/chain-prompts` now correctly ignores ` -- ` and `->` inside quoted per-step arguments instead of misinterpreting them as separators.
55
+ - `</else>` is now explicitly rejected with a helpful error message explaining that `<else>` is a separator, not a container.
56
+ - Fast-path optimization now correctly includes `</else>` check so standalone invalid tags are caught.
57
+ - Empty prompt abort in single-command mode now notifies as "error" instead of "warning" for consistency with chain mode.
58
+
5
59
  ## [0.3.1] - 2026-02-08
6
60
 
7
61
  ### Fixed
package/README.md CHANGED
@@ -86,11 +86,15 @@ Here `skill: surf` loads `~/.pi/agent/skills/surf/SKILL.md` and injects its cont
86
86
 
87
87
  | Field | Required | Default | Description |
88
88
  |-------|----------|---------|-------------|
89
- | `model` | Yes | - | Model ID, `provider/model-id`, or comma-separated list for fallback |
89
+ | `model` | Conditional | - | Required for non-chain templates; ignored when `chain` is set |
90
+ | `chain` | Conditional | - | Chain declaration (`step -> step --loop 2`) for orchestration templates; body is ignored |
90
91
  | `skill` | No | - | Skill name to inject into system prompt |
91
92
  | `thinking` | No | - | Thinking level: `off`, `minimal`, `low`, `medium`, `high`, `xhigh` |
92
93
  | `description` | No | - | Shown in autocomplete |
93
94
  | `restore` | No | `true` | Restore previous model and thinking level after response |
95
+ | `fresh` | No | `false` | Collapse context between loop iterations (applies when looping via `--loop` or frontmatter `loop`) |
96
+ | `loop` | No | - | Default loop count for this template (`1`-`999`) |
97
+ | `converge` | No | `true` | Loop convergence behavior; set `false` to always run all iterations |
94
98
 
95
99
  ## Model Format
96
100
 
@@ -131,6 +135,72 @@ Here the extension tries Haiku on Anthropic first, then Haiku on OpenRouter, the
131
135
 
132
136
  When all candidates fail, a single error notification lists everything that was tried.
133
137
 
138
+ ## Inline Model Conditionals
139
+
140
+ Prompt bodies can embed model-specific instructions directly in the markdown:
141
+
142
+ ```markdown
143
+ ---
144
+ description: Cross-model code review
145
+ model: claude-haiku-4-5, claude-sonnet-4-20250514
146
+ ---
147
+ Summarize the change first.
148
+
149
+ <if-model is="claude-haiku-4-5">
150
+ Keep the answer brief and cost-conscious.
151
+ <else>
152
+ Do a deeper pass and call out subtle risks.
153
+ </if-model>
154
+ ```
155
+
156
+ Conditionals are evaluated against the model that actually runs the command after fallback resolution. That means the same template can render differently depending on which candidate was selected.
157
+
158
+ Supported matches inside `is="..."`:
159
+
160
+ - Exact `provider/model-id`
161
+ - Exact bare `model-id`
162
+ - Provider wildcard like `anthropic/*`
163
+ - Comma-separated lists combining any of the above
164
+
165
+ Examples:
166
+
167
+ ```markdown
168
+ <if-model is="anthropic/claude-sonnet-4-20250514">...</if-model>
169
+ <if-model is="claude-sonnet-4-20250514">...</if-model>
170
+ <if-model is="anthropic/*">...</if-model>
171
+ <if-model is="openai/gpt-5.2, anthropic/*">...</if-model>
172
+ ```
173
+
174
+ `<else>` is the fallback branch for the current `<if-model>` block. Nested blocks are supported.
175
+
176
+ Conditionals are a raw text preprocessing step, not markdown-aware syntax. If you want to show the directive literally inside a prompt, escape it in the source text, for example with `&lt;if-model is="anthropic/*"&gt;`.
177
+
178
+ ## Argument Substitution
179
+
180
+ Prompt bodies support argument placeholders that expand to command arguments:
181
+
182
+ | Placeholder | Description |
183
+ |-------------|-------------|
184
+ | `$1`, `$2`, ... | Positional argument (1-indexed) |
185
+ | `$@` | All arguments joined with spaces |
186
+ | `$ARGUMENTS` | Same as `$@` |
187
+ | `${@:N}` | All arguments from position N onward |
188
+ | `${@:N:L}` | L arguments starting from position N |
189
+
190
+ Example:
191
+
192
+ ```markdown
193
+ ---
194
+ model: claude-sonnet-4-20250514
195
+ ---
196
+ Analyze $1 focusing on $2. Additional context: ${@:3}
197
+ ```
198
+
199
+ Running `/analyze src/main.ts performance edge cases error handling` expands to:
200
+ - `$1` → `src/main.ts`
201
+ - `$2` → `performance`
202
+ - `${@:3}` → `edge cases error handling`
203
+
134
204
  ## Skill Resolution
135
205
 
136
206
  The `skill` field matches the skill's directory name:
@@ -158,7 +228,7 @@ Organize prompts in subdirectories for namespacing:
158
228
  └── hook.md → /hook (user:frontend)
159
229
  ```
160
230
 
161
- The subdirectory shows in autocomplete as the source label. Note: command names are based on filename only, so avoid duplicate filenames across subdirectories (e.g., `quick.md` and `frontend/quick.md` would collide).
231
+ The subdirectory shows in autocomplete as the source label. Command names are based on filename only. If duplicates exist within the same source layer, the first one found after lexical sorting wins and later duplicates are skipped with a warning. Reserved command names like `model`, `reload`, and `chain-prompts` are also skipped with a warning.
162
232
 
163
233
  ## Examples
164
234
 
@@ -240,7 +310,7 @@ Switched to Haiku. How can I help?
240
310
 
241
311
  ## Chaining Templates
242
312
 
243
- The `/chain-prompts` command runs multiple templates sequentially. Each step switches to its own model, injects its own skill, and the conversation context carries forward between steps.
313
+ The `/chain-prompts` command runs multiple templates sequentially. Each step switches to its own model, renders any inline model conditionals against that step’s resolved model, injects its own skill, and the conversation context carries forward between steps.
244
314
 
245
315
  ```
246
316
  /chain-prompts analyze-code -> fix-plan -> summarize -- src/main.ts
@@ -266,6 +336,149 @@ Step 1 uses its per-step args (`"error handling"`), steps 2 and 3 fall back to t
266
336
 
267
337
  The chain captures your current model and thinking level before starting, and restores them when the chain finishes (or if any step fails mid-chain). Individual template `restore` settings are ignored during chain execution.
268
338
 
339
+ ### Chain Templates
340
+
341
+ For reusable pipelines, define a chain in frontmatter instead of typing `/chain-prompts` every time:
342
+
343
+ ```markdown
344
+ ---
345
+ description: Review then clean up
346
+ chain: double-check --loop 2 -> deslop --loop 2
347
+ ---
348
+ ignored — chain templates don't use the body
349
+ ```
350
+
351
+ This registers `/review-then-clean` as a command that runs `double-check` twice, then `deslop` twice. Each step references a separate prompt template with its own `model`. The chain template itself doesn't need a `model` field — each step uses whatever model its template specifies.
352
+
353
+ Per-step `--loop N` repeats that step N times before moving to the next. Per-step convergence applies: if a step makes no file changes on an iteration, its inner loop stops early (unless the step's template has `converge: false`).
354
+
355
+ Chain templates support `loop`, `fresh`, `converge`, and `restore` in their frontmatter for overall execution control:
356
+
357
+ ```markdown
358
+ ---
359
+ chain: analyze -> fix
360
+ loop: 3
361
+ fresh: true
362
+ converge: false
363
+ ---
364
+ ```
365
+
366
+ This runs the full analyze → fix chain 3 times, with fresh context between iterations and no early stopping. CLI `--loop` overrides frontmatter `loop` when invoking the command.
367
+
368
+ Chain nesting is not supported — a chain template's steps cannot reference other chain templates.
369
+
370
+ ## Loop Execution
371
+
372
+ Looping uses the `--loop` flag:
373
+
374
+ ```
375
+ /deslop --loop 5
376
+ /deslop --loop=5
377
+ /deslop "focus on performance" --loop 3
378
+ /deslop --loop
379
+ ```
380
+
381
+ `--loop` without a number means unlimited looping until convergence, with a built-in safety cap of 50 iterations.
382
+
383
+ You can also set a default loop count in frontmatter:
384
+
385
+ ```markdown
386
+ ---
387
+ model: claude-sonnet-4-20250514
388
+ loop: 5
389
+ ---
390
+ ...
391
+ ```
392
+
393
+ With that template, `/deslop` runs 5 iterations by default. CLI `--loop` overrides frontmatter (`/deslop --loop 3` runs 3 iterations).
394
+
395
+ The agent runs the same prompt N times. Context accumulates across iterations — by iteration 3, the agent sees the full conversation from iterations 1 and 2 and builds on that work. Use `--fresh` to collapse context between iterations instead (see below).
396
+
397
+ By default, the loop stops early if an iteration makes no file changes (no `write` or `edit` tool calls), since there's nothing left to improve. Add `--no-converge` to always run all iterations for bounded loops, or set `converge: false` in frontmatter:
398
+
399
+ ```
400
+ /deslop --loop 5 --no-converge
401
+ ```
402
+
403
+ ```markdown
404
+ ---
405
+ model: claude-sonnet-4-20250514
406
+ loop: 5
407
+ converge: false
408
+ ---
409
+ ...
410
+ ```
411
+
412
+ Bare `--loop` always forces convergence on (even with `--no-converge` or `converge: false`) because its intent is "run until no changes." `--loop N` and `--loop=N` support range 1-999. Quoted `"--loop"` is treated as a regular argument.
413
+
414
+ Model, thinking level, and skill are maintained throughout the loop. If the template has `restore: true` (the default), the original model and thinking level are restored after the final iteration (or if any iteration fails). If `restore: false`, the switched model persists after the loop ends.
415
+
416
+ ### Fresh Context
417
+
418
+ Add `--fresh` to collapse context between iterations:
419
+
420
+ ```
421
+ /deslop --loop 5 --fresh
422
+ /deslop --fresh # when frontmatter sets loop: N
423
+ ```
424
+
425
+ Each iteration's conversation is collapsed to a brief summary (files read, files modified, outcome) before the next iteration starts. The agent sees accumulated summaries from all previous iterations but not the full conversation. This saves tokens on long loops and gives each iteration a clean slate for reasoning.
426
+
427
+ You can also set `fresh: true` in the template frontmatter to make it the default when looped:
428
+
429
+ ```markdown
430
+ ---
431
+ description: Remove AI slop from code
432
+ model: claude-sonnet-4-20250514
433
+ fresh: true
434
+ ---
435
+ Review the codebase and improve code quality. $@
436
+ ```
437
+
438
+ ### Loop with Chains
439
+
440
+ Chains support the same looping forms:
441
+
442
+ ```
443
+ /chain-prompts analyze -> fix --loop 3 -- src/main.ts
444
+ /chain-prompts analyze -> fix --loop=3 -- src/main.ts
445
+ /chain-prompts analyze -> fix --loop -- src/main.ts
446
+ /chain-prompts analyze -> fix --loop 3 --fresh -- src/main.ts
447
+ /chain-prompts analyze -> fix --loop 3 --no-converge -- src/main.ts
448
+ ```
449
+
450
+ This runs the full chain (analyze → fix) three times. Convergence detection applies across all steps in each iteration — if no step made file changes, the loop stops. Each iteration re-reads prompts from disk, so template edits take effect between iterations. The status bar shows `loop 2/3` during execution.
451
+
452
+ ## Agent Tool
453
+
454
+ The agent can run prompt templates on its own via the `run-prompt` tool. Disabled by default — enable it with:
455
+
456
+ ```
457
+ /prompt-tool on
458
+ ```
459
+
460
+ Once enabled, the agent sees `run-prompt` in its tool list and can call it with any template command:
461
+
462
+ ```
463
+ run-prompt({ command: "deslop --loop 5 --fresh" })
464
+ run-prompt({ command: "deslop --loop" })
465
+ run-prompt({ command: "chain-prompts analyze -> fix --loop 3" })
466
+ ```
467
+
468
+ The tool queues the command for execution when the agent's current turn ends. All loop, fresh context, and convergence features work the same as when invoked via slash commands.
469
+
470
+ Add guidance to steer when the agent uses it:
471
+
472
+ ```
473
+ /prompt-tool on Use run-prompt for iterative code improvement tasks
474
+ /prompt-tool guidance Use sparingly, only for multi-pass refinement
475
+ /prompt-tool guidance clear
476
+ /prompt-tool off
477
+ /prompt-tool
478
+ ```
479
+
480
+ Config persists across sessions in `~/.pi/agent/prompt-template-model.json`.
481
+
269
482
  ## Autocomplete Display
270
483
 
271
484
  Commands show model, thinking level, and skill in the description:
@@ -290,7 +503,7 @@ The model switches, skill injects, agent responds, and output prints to stdout.
290
503
 
291
504
  ## Limitations
292
505
 
293
- - Templates discovered at startup. Restart pi after adding/modifying.
506
+ - Prompt files are reloaded on session start and whenever an extension-owned prompt command runs. If you add a brand-new prompt file while already inside a session, run another extension-owned command such as `/chain-prompts`, start a new session, or reload pi so the new slash command is registered.
294
507
  - Model restore state is in-memory. Closing pi mid-response loses restore state.
295
- - Only templates with a `model` field can be chained. Templates without `model` are handled by pi core and invisible to this extension.
296
- - Per-step args containing a literal `->` will be misinterpreted as a step separator. Use shared `--` args or a template file instead.
508
+ - Chain steps must reference templates with a `model` field. Chain templates themselves use `chain` and do not execute their own body.
509
+ - The `run-prompt` tool must be explicitly enabled with `/prompt-tool on` before the agent can use it.
package/args.ts ADDED
@@ -0,0 +1,232 @@
1
+ export interface LoopExtraction {
2
+ args: string;
3
+ loopCount: number | null;
4
+ fresh: boolean;
5
+ converge: boolean;
6
+ }
7
+
8
+ export interface LoopFlags {
9
+ args: string;
10
+ fresh: boolean;
11
+ converge: boolean;
12
+ }
13
+
14
+ export function extractLoopCount(argsString: string): LoopExtraction | null {
15
+ let loopCount: number | null = null;
16
+ let loopFound = false;
17
+ let fresh = false;
18
+ let noConverge = false;
19
+ const tokensToRemove: Array<{ start: number; end: number }> = [];
20
+
21
+ let i = 0;
22
+ while (i < argsString.length) {
23
+ const char = argsString[i];
24
+
25
+ if (char === '"' || char === "'") {
26
+ const quote = char;
27
+ i++;
28
+ while (i < argsString.length && argsString[i] !== quote) i++;
29
+ if (i < argsString.length) i++;
30
+ continue;
31
+ }
32
+
33
+ if (/\s/.test(char)) {
34
+ i++;
35
+ continue;
36
+ }
37
+
38
+ const tokenStart = i;
39
+ while (i < argsString.length && !/\s/.test(argsString[i])) i++;
40
+ const token = argsString.slice(tokenStart, i);
41
+
42
+ if (!loopFound && (token === "--loop" || token.startsWith("--loop="))) {
43
+ if (token.startsWith("--loop=")) {
44
+ const value = token.slice("--loop=".length);
45
+ if (/^\d+$/.test(value)) {
46
+ const parsed = parseInt(value, 10);
47
+ if (parsed >= 1 && parsed <= 999) {
48
+ loopFound = true;
49
+ loopCount = parsed;
50
+ tokensToRemove.push({ start: tokenStart, end: i });
51
+ }
52
+ }
53
+ } else {
54
+ let lookahead = i;
55
+ while (lookahead < argsString.length && /\s/.test(argsString[lookahead])) lookahead++;
56
+
57
+ if (lookahead < argsString.length && argsString[lookahead] !== '"' && argsString[lookahead] !== "'") {
58
+ const nextTokenStart = lookahead;
59
+ while (lookahead < argsString.length && !/\s/.test(argsString[lookahead])) lookahead++;
60
+ const nextToken = argsString.slice(nextTokenStart, lookahead);
61
+
62
+ if (/^\d+$/.test(nextToken)) {
63
+ const parsed = parseInt(nextToken, 10);
64
+ if (parsed >= 1 && parsed <= 999) {
65
+ loopFound = true;
66
+ loopCount = parsed;
67
+ tokensToRemove.push({ start: tokenStart, end: i }, { start: nextTokenStart, end: lookahead });
68
+ i = lookahead;
69
+ }
70
+ } else {
71
+ loopFound = true;
72
+ loopCount = null;
73
+ tokensToRemove.push({ start: tokenStart, end: i });
74
+ }
75
+ } else {
76
+ loopFound = true;
77
+ loopCount = null;
78
+ tokensToRemove.push({ start: tokenStart, end: i });
79
+ }
80
+ }
81
+ }
82
+
83
+ if (token === "--fresh") {
84
+ fresh = true;
85
+ tokensToRemove.push({ start: tokenStart, end: i });
86
+ }
87
+
88
+ if (token === "--no-converge") {
89
+ noConverge = true;
90
+ tokensToRemove.push({ start: tokenStart, end: i });
91
+ }
92
+ }
93
+
94
+ if (loopCount === null && !loopFound) return null;
95
+
96
+ tokensToRemove.sort((a, b) => b.start - a.start);
97
+ let cleaned = argsString;
98
+ for (const { start, end } of tokensToRemove) {
99
+ cleaned = cleaned.slice(0, start) + cleaned.slice(end);
100
+ }
101
+
102
+ const converge = loopFound && loopCount === null ? true : !noConverge;
103
+ return { args: cleaned.trim(), loopCount, fresh, converge };
104
+ }
105
+
106
+ export function extractLoopFlags(argsString: string): LoopFlags {
107
+ let fresh = false;
108
+ let noConverge = false;
109
+ const tokensToRemove: Array<{ start: number; end: number }> = [];
110
+
111
+ let i = 0;
112
+ while (i < argsString.length) {
113
+ const char = argsString[i];
114
+
115
+ if (char === '"' || char === "'") {
116
+ const quote = char;
117
+ i++;
118
+ while (i < argsString.length && argsString[i] !== quote) i++;
119
+ if (i < argsString.length) i++;
120
+ continue;
121
+ }
122
+
123
+ if (/\s/.test(char)) {
124
+ i++;
125
+ continue;
126
+ }
127
+
128
+ const tokenStart = i;
129
+ while (i < argsString.length && !/\s/.test(argsString[i])) i++;
130
+ const token = argsString.slice(tokenStart, i);
131
+
132
+ if (token === "--fresh") {
133
+ fresh = true;
134
+ tokensToRemove.push({ start: tokenStart, end: i });
135
+ }
136
+
137
+ if (token === "--no-converge") {
138
+ noConverge = true;
139
+ tokensToRemove.push({ start: tokenStart, end: i });
140
+ }
141
+ }
142
+
143
+ tokensToRemove.sort((a, b) => b.start - a.start);
144
+ let cleaned = argsString;
145
+ for (const { start, end } of tokensToRemove) {
146
+ cleaned = cleaned.slice(0, start) + cleaned.slice(end);
147
+ }
148
+
149
+ return { args: cleaned.trim(), fresh, converge: !noConverge };
150
+ }
151
+
152
+ export function splitByUnquotedSeparator(input: string, separator: string): string[] {
153
+ const parts: string[] = [];
154
+ let start = 0;
155
+ let inQuote: string | null = null;
156
+
157
+ for (let i = 0; i < input.length; i++) {
158
+ const char = input[i];
159
+ if (inQuote) {
160
+ if (char === inQuote) inQuote = null;
161
+ } else if (char === '"' || char === "'") {
162
+ inQuote = char;
163
+ } else if (i <= input.length - separator.length && input.startsWith(separator, i)) {
164
+ parts.push(input.slice(start, i));
165
+ start = i + separator.length;
166
+ i += separator.length - 1;
167
+ }
168
+ }
169
+
170
+ parts.push(input.slice(start));
171
+ return parts;
172
+ }
173
+
174
+ export function parseCommandArgs(argsString: string): string[] {
175
+ const args: string[] = [];
176
+ let current = "";
177
+ let inQuote: string | null = null;
178
+
179
+ for (let i = 0; i < argsString.length; i++) {
180
+ const char = argsString[i];
181
+
182
+ if (inQuote) {
183
+ if (char === inQuote) {
184
+ inQuote = null;
185
+ } else {
186
+ current += char;
187
+ }
188
+ } else if (char === '"' || char === "'") {
189
+ inQuote = char;
190
+ } else if (/\s/.test(char)) {
191
+ if (current) {
192
+ args.push(current);
193
+ current = "";
194
+ }
195
+ } else {
196
+ current += char;
197
+ }
198
+ }
199
+
200
+ if (current) {
201
+ args.push(current);
202
+ }
203
+
204
+ return args;
205
+ }
206
+
207
+ export function substituteArgs(content: string, args: string[]): string {
208
+ let result = content;
209
+
210
+ result = result.replace(/\$(\d+)/g, (_, num) => {
211
+ const index = parseInt(num, 10) - 1;
212
+ return args[index] ?? "";
213
+ });
214
+
215
+ result = result.replace(/\$\{@:(\d+)(?::(\d+))?\}/g, (_, startStr, lengthStr) => {
216
+ let start = parseInt(startStr, 10) - 1;
217
+ if (start < 0) start = 0;
218
+
219
+ if (lengthStr) {
220
+ const length = parseInt(lengthStr, 10);
221
+ return args.slice(start, start + length).join(" ");
222
+ }
223
+
224
+ return args.slice(start).join(" ");
225
+ });
226
+
227
+ const allArgs = args.join(" ");
228
+ result = result.replace(/\$ARGUMENTS/g, allArgs);
229
+ result = result.replace(/\$@/g, allArgs);
230
+
231
+ return result;
232
+ }