pi-prompt-template-model 0.2.0 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +16 -0
- package/README.md +30 -0
- package/index.ts +108 -7
- package/package.json +1 -1
package/CHANGELOG.md
CHANGED
|
@@ -2,6 +2,22 @@
|
|
|
2
2
|
|
|
3
3
|
## [Unreleased]
|
|
4
4
|
|
|
5
|
+
## [0.3.0] - 2026-02-08
|
|
6
|
+
|
|
7
|
+
### Added
|
|
8
|
+
|
|
9
|
+
- **Chain command**: `/chain` orchestrates multiple prompt templates sequentially, each with its own model, skill, and thinking level. Conversation context flows between steps naturally.
|
|
10
|
+
- Per-step args override shared args: `/chain analyze "error handling" -> fix-plan "focus on perf" -> summarize -- src/main.ts`
|
|
11
|
+
- Mid-chain failure rolls back to the original model and thinking level
|
|
12
|
+
- Step progress notifications show which step is running
|
|
13
|
+
- State isolation: chain uses local variables, never interferes with single-command restore behavior
|
|
14
|
+
|
|
15
|
+
## [0.2.1] - 2026-01-31
|
|
16
|
+
|
|
17
|
+
### Fixed
|
|
18
|
+
|
|
19
|
+
- Thinking level now correctly restored after commands that switch model without a `thinking` field. Previously, running a prompt template that only specified `model` would reset thinking to "off" instead of restoring the original level (e.g., "high").
|
|
20
|
+
|
|
5
21
|
## [0.2.0] - 2025-01-31
|
|
6
22
|
|
|
7
23
|
### Added
|
package/README.md
CHANGED
|
@@ -238,6 +238,34 @@ restore: false
|
|
|
238
238
|
Switched to Haiku. How can I help?
|
|
239
239
|
```
|
|
240
240
|
|
|
241
|
+
## Chaining Templates
|
|
242
|
+
|
|
243
|
+
The `/chain` command runs multiple templates sequentially. Each step switches to its own model, injects its own skill, and the conversation context carries forward between steps.
|
|
244
|
+
|
|
245
|
+
```
|
|
246
|
+
/chain analyze-code -> fix-plan -> summarize -- src/main.ts
|
|
247
|
+
```
|
|
248
|
+
|
|
249
|
+
This runs `analyze-code` first, then `fix-plan` (which sees the analysis in conversation context), then `summarize`. The `-- src/main.ts` provides shared args substituted into every template's `$@`.
|
|
250
|
+
|
|
251
|
+
Each step can also receive its own args, overriding the shared args for that step:
|
|
252
|
+
|
|
253
|
+
```
|
|
254
|
+
/chain analyze-code "look at error handling" -> fix-plan "focus on perf" -> summarize
|
|
255
|
+
```
|
|
256
|
+
|
|
257
|
+
Here `analyze-code` gets `$@ = "look at error handling"`, `fix-plan` gets `$@ = "focus on perf"`, and `summarize` has no per-step args so it falls back to the shared args (empty in this case, but conversation context from prior steps is usually enough).
|
|
258
|
+
|
|
259
|
+
You can mix both:
|
|
260
|
+
|
|
261
|
+
```
|
|
262
|
+
/chain analyze-code "error handling" -> fix-plan -> summarize -- src/main.ts
|
|
263
|
+
```
|
|
264
|
+
|
|
265
|
+
Step 1 uses its per-step args (`"error handling"`), steps 2 and 3 fall back to the shared args (`"src/main.ts"`).
|
|
266
|
+
|
|
267
|
+
The chain captures your current model and thinking level before starting, and restores them when the chain finishes (or if any step fails mid-chain). Individual template `restore` settings are ignored during chain execution.
|
|
268
|
+
|
|
241
269
|
## Autocomplete Display
|
|
242
270
|
|
|
243
271
|
Commands show model, thinking level, and skill in the description:
|
|
@@ -264,3 +292,5 @@ The model switches, skill injects, agent responds, and output prints to stdout.
|
|
|
264
292
|
|
|
265
293
|
- Templates discovered at startup. Restart pi after adding/modifying.
|
|
266
294
|
- Model restore state is in-memory. Closing pi mid-response loses restore state.
|
|
295
|
+
- Only templates with a `model` field can be chained. Templates without `model` are handled by pi core and invisible to this extension.
|
|
296
|
+
- Per-step args containing a literal `->` will be misinterpreted as a step separator. Use shared `--` args or a template file instead.
|
package/index.ts
CHANGED
|
@@ -376,6 +376,7 @@ export default function promptModelExtension(pi: ExtensionAPI) {
|
|
|
376
376
|
let previousModel: Model<any> | undefined;
|
|
377
377
|
let previousThinking: ThinkingLevel | undefined;
|
|
378
378
|
let pendingSkill: { name: string; cwd: string } | undefined;
|
|
379
|
+
let chainActive = false;
|
|
379
380
|
|
|
380
381
|
// Register custom message renderer for skill-loaded messages
|
|
381
382
|
pi.registerMessageRenderer<SkillLoadedDetails>("skill-loaded", renderSkillLoaded);
|
|
@@ -493,6 +494,7 @@ export default function promptModelExtension(pi: ExtensionAPI) {
|
|
|
493
494
|
|
|
494
495
|
// Restore model and thinking level after the agent finishes responding
|
|
495
496
|
pi.on("agent_end", async (_event, ctx) => {
|
|
497
|
+
if (chainActive) return;
|
|
496
498
|
const restoredParts: string[] = [];
|
|
497
499
|
|
|
498
500
|
if (previousModel) {
|
|
@@ -549,8 +551,9 @@ export default function promptModelExtension(pi: ExtensionAPI) {
|
|
|
549
551
|
return;
|
|
550
552
|
}
|
|
551
553
|
|
|
552
|
-
// Capture current
|
|
554
|
+
// Capture current state before any switching (needed for restore)
|
|
553
555
|
const savedModel = ctx.model;
|
|
556
|
+
const savedThinking = pi.getThinkingLevel();
|
|
554
557
|
|
|
555
558
|
// Resolve and switch to the first available model from the list
|
|
556
559
|
const result = await resolveAndSwitch(currentPrompt.models, ctx);
|
|
@@ -558,17 +561,15 @@ export default function promptModelExtension(pi: ExtensionAPI) {
|
|
|
558
561
|
|
|
559
562
|
if (!result.alreadyActive && currentPrompt.restore) {
|
|
560
563
|
previousModel = savedModel;
|
|
564
|
+
previousThinking = savedThinking;
|
|
561
565
|
}
|
|
562
566
|
|
|
563
567
|
// Set thinking level if specified
|
|
564
568
|
if (currentPrompt.thinking) {
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
if (currentPrompt.restore) {
|
|
568
|
-
previousThinking = currentThinking;
|
|
569
|
-
}
|
|
570
|
-
pi.setThinkingLevel(currentPrompt.thinking);
|
|
569
|
+
if (currentPrompt.restore && previousThinking === undefined && currentPrompt.thinking !== savedThinking) {
|
|
570
|
+
previousThinking = savedThinking;
|
|
571
571
|
}
|
|
572
|
+
pi.setThinkingLevel(currentPrompt.thinking);
|
|
572
573
|
}
|
|
573
574
|
|
|
574
575
|
// Set pending skill for before_agent_start handler
|
|
@@ -592,4 +593,104 @@ export default function promptModelExtension(pi: ExtensionAPI) {
|
|
|
592
593
|
},
|
|
593
594
|
});
|
|
594
595
|
}
|
|
596
|
+
|
|
597
|
+
pi.registerCommand("chain", {
|
|
598
|
+
description: "Chain prompt templates sequentially [template -> template -> ...]",
|
|
599
|
+
handler: async (args, ctx) => {
|
|
600
|
+
let templatesPart = args;
|
|
601
|
+
let argsPart = "";
|
|
602
|
+
const argsSeparator = args.indexOf(" -- ");
|
|
603
|
+
if (argsSeparator !== -1) {
|
|
604
|
+
templatesPart = args.slice(0, argsSeparator);
|
|
605
|
+
argsPart = args.slice(argsSeparator + 4);
|
|
606
|
+
}
|
|
607
|
+
|
|
608
|
+
const steps = templatesPart
|
|
609
|
+
.split("->")
|
|
610
|
+
.map(s => s.trim())
|
|
611
|
+
.filter(Boolean)
|
|
612
|
+
.map(segment => {
|
|
613
|
+
const tokens = parseCommandArgs(segment);
|
|
614
|
+
return { name: tokens[0], args: tokens.slice(1) };
|
|
615
|
+
});
|
|
616
|
+
|
|
617
|
+
if (steps.length === 0) {
|
|
618
|
+
ctx.ui.notify("No templates specified", "error");
|
|
619
|
+
return;
|
|
620
|
+
}
|
|
621
|
+
|
|
622
|
+
const missingTemplates = steps.filter(s => !prompts.has(s.name));
|
|
623
|
+
if (missingTemplates.length > 0) {
|
|
624
|
+
ctx.ui.notify(`Templates not found: ${missingTemplates.map(s => s.name).join(", ")}`, "error");
|
|
625
|
+
return;
|
|
626
|
+
}
|
|
627
|
+
|
|
628
|
+
const templates = steps.map(s => ({ ...prompts.get(s.name)!, stepArgs: s.args }));
|
|
629
|
+
const originalModel = ctx.model;
|
|
630
|
+
const originalThinking = pi.getThinkingLevel();
|
|
631
|
+
const parsedArgs = parseCommandArgs(argsPart);
|
|
632
|
+
|
|
633
|
+
try {
|
|
634
|
+
chainActive = true;
|
|
635
|
+
|
|
636
|
+
for (const [index, tmpl] of templates.entries()) {
|
|
637
|
+
const stepNumber = index + 1;
|
|
638
|
+
const modelLabel = tmpl.models
|
|
639
|
+
.map(m => m.split("/").pop() || m)
|
|
640
|
+
.join("|");
|
|
641
|
+
const skillLabel = tmpl.skill ? ` +${tmpl.skill}` : "";
|
|
642
|
+
const thinkingLabel = tmpl.thinking ? ` ${tmpl.thinking}` : "";
|
|
643
|
+
ctx.ui.notify(
|
|
644
|
+
`Step ${stepNumber}/${templates.length}: ${tmpl.name} [${modelLabel}${thinkingLabel}${skillLabel}]`,
|
|
645
|
+
"info"
|
|
646
|
+
);
|
|
647
|
+
|
|
648
|
+
const result = await resolveAndSwitch(tmpl.models, ctx);
|
|
649
|
+
if (!result) {
|
|
650
|
+
ctx.ui.notify(`Step ${stepNumber}/${templates.length} failed: ${tmpl.name}`, "error");
|
|
651
|
+
if (originalModel) {
|
|
652
|
+
await pi.setModel(originalModel);
|
|
653
|
+
}
|
|
654
|
+
if (originalThinking !== undefined) {
|
|
655
|
+
pi.setThinkingLevel(originalThinking);
|
|
656
|
+
}
|
|
657
|
+
return;
|
|
658
|
+
}
|
|
659
|
+
|
|
660
|
+
if (tmpl.thinking) {
|
|
661
|
+
pi.setThinkingLevel(tmpl.thinking);
|
|
662
|
+
}
|
|
663
|
+
|
|
664
|
+
pendingSkill = undefined;
|
|
665
|
+
if (tmpl.skill) {
|
|
666
|
+
pendingSkill = { name: tmpl.skill, cwd: ctx.cwd };
|
|
667
|
+
}
|
|
668
|
+
|
|
669
|
+
const effectiveArgs = tmpl.stepArgs.length > 0 ? tmpl.stepArgs : parsedArgs;
|
|
670
|
+
const expandedContent = substituteArgs(tmpl.content, effectiveArgs);
|
|
671
|
+
pi.sendUserMessage(expandedContent);
|
|
672
|
+
|
|
673
|
+
while (ctx.isIdle()) {
|
|
674
|
+
await new Promise(resolve => setTimeout(resolve, 10));
|
|
675
|
+
}
|
|
676
|
+
await ctx.waitForIdle();
|
|
677
|
+
}
|
|
678
|
+
|
|
679
|
+
const restoredParts: string[] = [];
|
|
680
|
+
if (originalModel) {
|
|
681
|
+
restoredParts.push(originalModel.id);
|
|
682
|
+
await pi.setModel(originalModel);
|
|
683
|
+
}
|
|
684
|
+
if (originalThinking !== undefined) {
|
|
685
|
+
restoredParts.push(`thinking:${originalThinking}`);
|
|
686
|
+
pi.setThinkingLevel(originalThinking);
|
|
687
|
+
}
|
|
688
|
+
if (restoredParts.length > 0) {
|
|
689
|
+
ctx.ui.notify(`Restored to ${restoredParts.join(", ")}`, "info");
|
|
690
|
+
}
|
|
691
|
+
} finally {
|
|
692
|
+
chainActive = false;
|
|
693
|
+
}
|
|
694
|
+
},
|
|
695
|
+
});
|
|
595
696
|
}
|