pi-agent-extensions 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +181 -0
- package/docs/README.md +32 -0
- package/docs/dev/ask-user/test-cases.md +304 -0
- package/docs/dev/handoff/eval-strategy.md +455 -0
- package/docs/dev/handoff/implementation-log.md +330 -0
- package/docs/dev/handoff/spec.md +567 -0
- package/docs/extensions/ask-user.md +644 -0
- package/docs/extensions/handoff.md +195 -0
- package/docs/extensions/sessions.md +34 -0
- package/docs/guides/manual-testing.md +98 -0
- package/docs/guides/vertex-ai-setup.md +135 -0
- package/extensions/ask-user/README.md +125 -0
- package/extensions/ask-user/index.ts +103 -0
- package/extensions/ask-user/modes/print.ts +62 -0
- package/extensions/ask-user/tool.ts +121 -0
- package/extensions/ask-user/types.ts +74 -0
- package/extensions/ask-user/ui/index.ts +262 -0
- package/extensions/handoff/config.ts +141 -0
- package/extensions/handoff/extraction.ts +153 -0
- package/extensions/handoff/index.ts +534 -0
- package/extensions/handoff/metadata.ts +155 -0
- package/extensions/handoff/parser.ts +180 -0
- package/extensions/handoff/progress.ts +131 -0
- package/extensions/handoff/prompt.ts +139 -0
- package/extensions/handoff/types.ts +115 -0
- package/extensions/sessions/index.ts +228 -0
- package/extensions/sessions/sessions.ts +74 -0
- package/package.json +51 -0
|
@@ -0,0 +1,153 @@
|
|
|
1
|
+
import type { ExtractionOutput, HandoffConfig, ParseResult } from "./types.js";
|
|
2
|
+
import { parseExtractionResponse, normalizeExtraction } from "./parser.js";
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* System prompt for the extraction LLM call.
|
|
6
|
+
* Instructs the model to extract structured context from the conversation.
|
|
7
|
+
*/
|
|
8
|
+
export const EXTRACTION_SYSTEM_PROMPT = `You are a context extraction assistant. Your task is to analyze a conversation and extract the most relevant context for continuing work in a new thread.
|
|
9
|
+
|
|
10
|
+
Given the conversation history and the user's goal for the next thread, extract:
|
|
11
|
+
1. **relevantFiles**: Files that were ACTUALLY MENTIONED in the conversation that are relevant to the goal. Include a brief reason for each.
|
|
12
|
+
2. **relevantCommands**: Commands that were run and may need to be run again
|
|
13
|
+
3. **relevantInformation**: Key context for accomplishing the goal
|
|
14
|
+
4. **decisions**: Important decisions made during the conversation
|
|
15
|
+
5. **openQuestions**: Unresolved questions, risks, or blockers
|
|
16
|
+
|
|
17
|
+
## Output Format
|
|
18
|
+
|
|
19
|
+
Respond with ONLY a valid JSON object in this exact format:
|
|
20
|
+
|
|
21
|
+
\`\`\`json
|
|
22
|
+
{
|
|
23
|
+
"relevantFiles": [
|
|
24
|
+
{ "path": "path/to/file.ts", "reason": "Brief reason why this file matters" }
|
|
25
|
+
],
|
|
26
|
+
"relevantCommands": ["npm test", "git status"],
|
|
27
|
+
"relevantInformation": [
|
|
28
|
+
"Key fact or context point",
|
|
29
|
+
"Another important detail"
|
|
30
|
+
],
|
|
31
|
+
"decisions": [
|
|
32
|
+
"Decision that was made and why"
|
|
33
|
+
],
|
|
34
|
+
"openQuestions": [
|
|
35
|
+
"Question that remains unanswered"
|
|
36
|
+
]
|
|
37
|
+
}
|
|
38
|
+
\`\`\`
|
|
39
|
+
|
|
40
|
+
## What to Extract
|
|
41
|
+
|
|
42
|
+
**relevantInformation** - Focus on:
|
|
43
|
+
- Project conventions learned (e.g., "Use TypeBox, not Zod for schemas")
|
|
44
|
+
- Runtime behaviors discovered (e.g., "Extensions hot-reload with /reload")
|
|
45
|
+
- Gotchas that could trip up the next agent (e.g., "Must use .js extension in imports")
|
|
46
|
+
- Technical constraints or requirements
|
|
47
|
+
- Key findings from exploration
|
|
48
|
+
|
|
49
|
+
**relevantFiles** - Only include files that:
|
|
50
|
+
- Were EXPLICITLY MENTIONED in the conversation (by path or filename)
|
|
51
|
+
- Are directly related to accomplishing the goal
|
|
52
|
+
- Contain patterns to follow or will need to be modified
|
|
53
|
+
|
|
54
|
+
## What NOT to Extract
|
|
55
|
+
|
|
56
|
+
- Completed tasks or work history ("We implemented X, then Y, then Z")
|
|
57
|
+
- Obvious actions the agent will do anyway (running tests, building, linting)
|
|
58
|
+
- Generic observations that don't help the specific goal
|
|
59
|
+
- Files that were NOT mentioned in the conversation (do not invent paths)
|
|
60
|
+
|
|
61
|
+
## Guidelines
|
|
62
|
+
|
|
63
|
+
- Be GOAL-FOCUSED: extract what helps accomplish the user's stated goal
|
|
64
|
+
- Be FUTURE-ORIENTED: what does the NEXT agent need to know?
|
|
65
|
+
- Be CONCISE: one line per entry, no fluff
|
|
66
|
+
- ONLY include files that were actually discussed - never invent file paths
|
|
67
|
+
- If a category has no relevant items, use an empty array
|
|
68
|
+
- Do NOT include any text outside the JSON block
|
|
69
|
+
- Do NOT explain your reasoning - just output the JSON`;
|
|
70
|
+
|
|
71
|
+
/**
|
|
72
|
+
* Stricter prompt for retry after parse failure
|
|
73
|
+
*/
|
|
74
|
+
export const EXTRACTION_RETRY_PROMPT = `Your previous response was not valid JSON. Please output ONLY a valid JSON object with this structure:
|
|
75
|
+
|
|
76
|
+
{
|
|
77
|
+
"relevantFiles": [{ "path": "string", "reason": "string" }],
|
|
78
|
+
"relevantCommands": ["string"],
|
|
79
|
+
"relevantInformation": ["string"],
|
|
80
|
+
"decisions": ["string"],
|
|
81
|
+
"openQuestions": ["string"]
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
No explanations, no markdown, no text before or after - ONLY the JSON object.`;
|
|
85
|
+
|
|
86
|
+
/**
|
|
87
|
+
* Builds the user message for the extraction call
|
|
88
|
+
*/
|
|
89
|
+
export function buildExtractionUserMessage(
|
|
90
|
+
conversationText: string,
|
|
91
|
+
goal: string,
|
|
92
|
+
): string {
|
|
93
|
+
return `## Conversation History
|
|
94
|
+
|
|
95
|
+
${conversationText}
|
|
96
|
+
|
|
97
|
+
## User's Goal for New Thread
|
|
98
|
+
|
|
99
|
+
${goal}
|
|
100
|
+
|
|
101
|
+
Extract the relevant context for this goal and output ONLY the JSON object.`;
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
/**
|
|
105
|
+
* Result of an extraction attempt
|
|
106
|
+
*/
|
|
107
|
+
export interface ExtractionResult {
|
|
108
|
+
success: boolean;
|
|
109
|
+
extraction?: ExtractionOutput;
|
|
110
|
+
error?: string;
|
|
111
|
+
retried?: boolean;
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
/**
|
|
115
|
+
* Processes the LLM response and handles retry logic.
|
|
116
|
+
*
|
|
117
|
+
* @param responseText - The text response from the LLM
|
|
118
|
+
* @param config - Handoff configuration for normalization
|
|
119
|
+
* @param conversationText - Optional conversation text for file validation
|
|
120
|
+
* @returns Extraction result with normalized data or error
|
|
121
|
+
*/
|
|
122
|
+
export function processExtractionResponse(
|
|
123
|
+
responseText: string,
|
|
124
|
+
config: HandoffConfig,
|
|
125
|
+
conversationText?: string,
|
|
126
|
+
): ParseResult & { normalized?: ExtractionOutput } {
|
|
127
|
+
const parseResult = parseExtractionResponse(responseText);
|
|
128
|
+
|
|
129
|
+
if (!parseResult.success || !parseResult.data) {
|
|
130
|
+
return parseResult;
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
// Normalize the extraction (dedupe, cap limits, strip @ prefixes, validate files)
|
|
134
|
+
const normalized = normalizeExtraction(parseResult.data, config, conversationText);
|
|
135
|
+
|
|
136
|
+
return {
|
|
137
|
+
success: true,
|
|
138
|
+
data: parseResult.data,
|
|
139
|
+
normalized,
|
|
140
|
+
};
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
/**
|
|
144
|
+
* Extracts text content from an assistant message
|
|
145
|
+
*/
|
|
146
|
+
export function extractTextFromAssistantMessage(
|
|
147
|
+
content: Array<{ type: string; text?: string }>,
|
|
148
|
+
): string {
|
|
149
|
+
return content
|
|
150
|
+
.filter((c): c is { type: "text"; text: string } => c.type === "text")
|
|
151
|
+
.map((c) => c.text)
|
|
152
|
+
.join("\n");
|
|
153
|
+
}
|
|
@@ -0,0 +1,534 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Handoff Extension
|
|
3
|
+
*
|
|
4
|
+
* Provides a `/handoff` command that generates a high-quality "new thread prompt"
|
|
5
|
+
* from the current session, then starts a new session with that prompt pre-filled
|
|
6
|
+
* for the user to review and send.
|
|
7
|
+
*
|
|
8
|
+
* Usage:
|
|
9
|
+
* /handoff implement team-level handoff with proper tests
|
|
10
|
+
* /handoff fix the authentication bug in login flow
|
|
11
|
+
* /handoff add unit tests for the parser module
|
|
12
|
+
*/
|
|
13
|
+
|
|
14
|
+
import { complete, getModel, type Message, type Model } from "@mariozechner/pi-ai";
|
|
15
|
+
import type {
|
|
16
|
+
ExtensionAPI,
|
|
17
|
+
ExtensionCommandContext,
|
|
18
|
+
} from "@mariozechner/pi-coding-agent";
|
|
19
|
+
import {
|
|
20
|
+
BorderedLoader,
|
|
21
|
+
convertToLlm,
|
|
22
|
+
serializeConversation,
|
|
23
|
+
} from "@mariozechner/pi-coding-agent";
|
|
24
|
+
|
|
25
|
+
import { loadConfig, validateGoal } from "./config.js";
|
|
26
|
+
import { ProgressLoader, EXTRACTION_PHASES } from "./progress.js";
|
|
27
|
+
import {
|
|
28
|
+
EXTRACTION_SYSTEM_PROMPT,
|
|
29
|
+
EXTRACTION_RETRY_PROMPT,
|
|
30
|
+
buildExtractionUserMessage,
|
|
31
|
+
processExtractionResponse,
|
|
32
|
+
extractTextFromAssistantMessage,
|
|
33
|
+
} from "./extraction.js";
|
|
34
|
+
import { collectSessionMetadata } from "./metadata.js";
|
|
35
|
+
import { assembleHandoffPrompt } from "./prompt.js";
|
|
36
|
+
import {
|
|
37
|
+
SKILL_ENTRY_TYPE,
|
|
38
|
+
type HandoffConfig,
|
|
39
|
+
type SkillEntry,
|
|
40
|
+
} from "./types.js";
|
|
41
|
+
|
|
42
|
+
/**
|
|
43
|
+
* Resolves the model to use for extraction based on config
|
|
44
|
+
*/
|
|
45
|
+
function resolveExtractionModel(
|
|
46
|
+
ctx: ExtensionCommandContext,
|
|
47
|
+
config: HandoffConfig,
|
|
48
|
+
): Model<any> | undefined {
|
|
49
|
+
// Use current model if configured to do so or no override specified
|
|
50
|
+
if (config.useCurrentModel || !config.model) {
|
|
51
|
+
return ctx.model;
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
// Try to get the override model
|
|
55
|
+
const [provider, ...modelParts] = config.model.split("/");
|
|
56
|
+
const modelId = modelParts.join("/");
|
|
57
|
+
|
|
58
|
+
if (!provider || !modelId) {
|
|
59
|
+
// Invalid format, fall back to current
|
|
60
|
+
return ctx.model;
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
const overrideModel = getModel(provider, modelId);
|
|
64
|
+
if (!overrideModel) {
|
|
65
|
+
// Model not found, fall back to current
|
|
66
|
+
console.warn(`Handoff: Model ${config.model} not found, using current model`);
|
|
67
|
+
return ctx.model;
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
return overrideModel;
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
/**
|
|
74
|
+
* Main handoff command handler
|
|
75
|
+
*/
|
|
76
|
+
async function runHandoffCommand(
|
|
77
|
+
args: string | undefined,
|
|
78
|
+
ctx: ExtensionCommandContext,
|
|
79
|
+
pi: ExtensionAPI,
|
|
80
|
+
lastSkill: string | undefined,
|
|
81
|
+
): Promise<void> {
|
|
82
|
+
// Load config from .pi/settings.json
|
|
83
|
+
const cwd = ctx.sessionManager.getCwd();
|
|
84
|
+
const config = loadConfig(cwd);
|
|
85
|
+
|
|
86
|
+
// Validate goal
|
|
87
|
+
const goal = args?.trim() ?? "";
|
|
88
|
+
const goalValidation = validateGoal(goal, config.minGoalLength);
|
|
89
|
+
|
|
90
|
+
if (!goalValidation.valid) {
|
|
91
|
+
if (ctx.hasUI) {
|
|
92
|
+
ctx.ui.notify(goalValidation.error!, "error");
|
|
93
|
+
} else {
|
|
94
|
+
console.error(goalValidation.error);
|
|
95
|
+
}
|
|
96
|
+
return;
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
// Check for model
|
|
100
|
+
if (!ctx.model) {
|
|
101
|
+
const errorMsg = "No model selected. Use /model to select a model first.";
|
|
102
|
+
if (ctx.hasUI) {
|
|
103
|
+
ctx.ui.notify(errorMsg, "error");
|
|
104
|
+
} else {
|
|
105
|
+
console.error(errorMsg);
|
|
106
|
+
}
|
|
107
|
+
return;
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
// Get conversation context
|
|
111
|
+
const sessionContext = ctx.sessionManager.buildSessionContext();
|
|
112
|
+
const messages = sessionContext.messages;
|
|
113
|
+
|
|
114
|
+
if (messages.length === 0) {
|
|
115
|
+
const errorMsg = "No conversation to hand off.";
|
|
116
|
+
if (ctx.hasUI) {
|
|
117
|
+
ctx.ui.notify(errorMsg, "error");
|
|
118
|
+
} else {
|
|
119
|
+
console.error(errorMsg);
|
|
120
|
+
}
|
|
121
|
+
return;
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
// Convert messages to LLM format and serialize
|
|
125
|
+
const llmMessages = convertToLlm(messages);
|
|
126
|
+
const conversationText = serializeConversation(llmMessages);
|
|
127
|
+
const currentSessionFile = ctx.sessionManager.getSessionFile();
|
|
128
|
+
|
|
129
|
+
// Collect metadata
|
|
130
|
+
const activeTools = pi.getActiveTools();
|
|
131
|
+
const sessionName = ctx.sessionManager.getSessionName();
|
|
132
|
+
const thinkingLevel = pi.getThinkingLevel();
|
|
133
|
+
|
|
134
|
+
const metadata = await collectSessionMetadata({
|
|
135
|
+
model: ctx.model ? { provider: ctx.model.provider, id: ctx.model.id } : undefined,
|
|
136
|
+
thinkingLevel: thinkingLevel !== "off" ? thinkingLevel : undefined,
|
|
137
|
+
tools: activeTools,
|
|
138
|
+
sessionName: sessionName ?? undefined,
|
|
139
|
+
lastSkill,
|
|
140
|
+
exec: (cmd, args, opts) => pi.exec(cmd, args, opts),
|
|
141
|
+
});
|
|
142
|
+
|
|
143
|
+
// Resolve which model to use for extraction
|
|
144
|
+
const extractionModel = resolveExtractionModel(ctx, config);
|
|
145
|
+
if (!extractionModel) {
|
|
146
|
+
const errorMsg = "No model available for extraction.";
|
|
147
|
+
if (ctx.hasUI) {
|
|
148
|
+
ctx.ui.notify(errorMsg, "error");
|
|
149
|
+
} else {
|
|
150
|
+
console.error(errorMsg);
|
|
151
|
+
}
|
|
152
|
+
return;
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
// Generate extraction via LLM
|
|
156
|
+
const extractionResult = await generateExtraction(
|
|
157
|
+
conversationText,
|
|
158
|
+
goal,
|
|
159
|
+
config,
|
|
160
|
+
ctx,
|
|
161
|
+
extractionModel,
|
|
162
|
+
);
|
|
163
|
+
|
|
164
|
+
if (!extractionResult.success || !extractionResult.extraction) {
|
|
165
|
+
if (ctx.hasUI) {
|
|
166
|
+
ctx.ui.notify(
|
|
167
|
+
extractionResult.error ?? "Failed to generate handoff context",
|
|
168
|
+
"error",
|
|
169
|
+
);
|
|
170
|
+
} else {
|
|
171
|
+
console.error(extractionResult.error ?? "Failed to generate handoff context");
|
|
172
|
+
}
|
|
173
|
+
return;
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
// Assemble the handoff prompt
|
|
177
|
+
const handoffPrompt = assembleHandoffPrompt(
|
|
178
|
+
extractionResult.extraction,
|
|
179
|
+
goal,
|
|
180
|
+
metadata,
|
|
181
|
+
config,
|
|
182
|
+
);
|
|
183
|
+
|
|
184
|
+
// Non-UI mode: just print the prompt
|
|
185
|
+
if (!ctx.hasUI) {
|
|
186
|
+
console.log(handoffPrompt);
|
|
187
|
+
return;
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
// Interactive mode: let user edit the prompt
|
|
191
|
+
const editedPrompt = await ctx.ui.editor("Edit handoff prompt", handoffPrompt);
|
|
192
|
+
|
|
193
|
+
if (editedPrompt === undefined) {
|
|
194
|
+
ctx.ui.notify("Handoff cancelled", "info");
|
|
195
|
+
return;
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
// Create new session with parent tracking
|
|
199
|
+
const newSessionResult = await ctx.newSession({
|
|
200
|
+
parentSession: currentSessionFile,
|
|
201
|
+
});
|
|
202
|
+
|
|
203
|
+
if (newSessionResult.cancelled) {
|
|
204
|
+
ctx.ui.notify("New session cancelled", "info");
|
|
205
|
+
return;
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
// Set the edited prompt in the editor for submission
|
|
209
|
+
ctx.ui.setEditorText(editedPrompt);
|
|
210
|
+
ctx.ui.notify("Handoff ready. Press Enter to send.", "info");
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
/**
|
|
214
|
+
* Extraction result type
|
|
215
|
+
*/
|
|
216
|
+
interface ExtractionResult {
|
|
217
|
+
success: boolean;
|
|
218
|
+
extraction?: ReturnType<typeof processExtractionResponse>["normalized"];
|
|
219
|
+
error?: string;
|
|
220
|
+
completionMessage?: string;
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
/**
|
|
224
|
+
* Generates the extraction by calling the LLM with retry on parse failure
|
|
225
|
+
*/
|
|
226
|
+
async function generateExtraction(
|
|
227
|
+
conversationText: string,
|
|
228
|
+
goal: string,
|
|
229
|
+
config: HandoffConfig,
|
|
230
|
+
ctx: ExtensionCommandContext,
|
|
231
|
+
model: Model<any>,
|
|
232
|
+
): Promise<ExtractionResult> {
|
|
233
|
+
if (!ctx.hasUI) {
|
|
234
|
+
// Non-UI mode: direct call without loader
|
|
235
|
+
return await doExtraction(conversationText, goal, config, ctx, model);
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
// Interactive mode: show loader during extraction
|
|
239
|
+
if (config.showProgressPhases) {
|
|
240
|
+
// Use phase-based progress loader
|
|
241
|
+
return await ctx.ui.custom<ExtractionResult>((tui, theme, _kb, done) => {
|
|
242
|
+
const loader = new ProgressLoader(tui, theme, EXTRACTION_PHASES[0]);
|
|
243
|
+
loader.onAbort = () => {
|
|
244
|
+
loader.dispose();
|
|
245
|
+
done({ success: false, error: "Cancelled" });
|
|
246
|
+
};
|
|
247
|
+
|
|
248
|
+
doExtractionWithPhases(conversationText, goal, config, ctx, model, loader.signal, (phase) => {
|
|
249
|
+
loader.setPhase(phase);
|
|
250
|
+
})
|
|
251
|
+
.then((result) => {
|
|
252
|
+
const completionMessage = loader.getCompletionMessage();
|
|
253
|
+
loader.dispose();
|
|
254
|
+
done({ ...result, completionMessage });
|
|
255
|
+
})
|
|
256
|
+
.catch((err) => {
|
|
257
|
+
loader.dispose();
|
|
258
|
+
console.error("Handoff extraction failed:", err);
|
|
259
|
+
done({ success: false, error: err.message ?? "Unknown error" });
|
|
260
|
+
});
|
|
261
|
+
|
|
262
|
+
return loader;
|
|
263
|
+
});
|
|
264
|
+
} else {
|
|
265
|
+
// Use simple bordered loader
|
|
266
|
+
return await ctx.ui.custom<ExtractionResult>((tui, theme, _kb, done) => {
|
|
267
|
+
const loader = new BorderedLoader(tui, theme, "Generating handoff context...");
|
|
268
|
+
loader.onAbort = () => done({ success: false, error: "Cancelled" });
|
|
269
|
+
|
|
270
|
+
doExtraction(conversationText, goal, config, ctx, model, loader.signal)
|
|
271
|
+
.then(done)
|
|
272
|
+
.catch((err) => {
|
|
273
|
+
console.error("Handoff extraction failed:", err);
|
|
274
|
+
done({ success: false, error: err.message ?? "Unknown error" });
|
|
275
|
+
});
|
|
276
|
+
|
|
277
|
+
return loader;
|
|
278
|
+
});
|
|
279
|
+
}
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
/**
|
|
283
|
+
* Performs the actual LLM extraction call with retry
|
|
284
|
+
*/
|
|
285
|
+
async function doExtraction(
|
|
286
|
+
conversationText: string,
|
|
287
|
+
goal: string,
|
|
288
|
+
config: HandoffConfig,
|
|
289
|
+
ctx: ExtensionCommandContext,
|
|
290
|
+
model: Model<any>,
|
|
291
|
+
signal?: AbortSignal,
|
|
292
|
+
): Promise<ExtractionResult> {
|
|
293
|
+
const apiKey = await ctx.modelRegistry.getApiKey(model);
|
|
294
|
+
|
|
295
|
+
// Build user message
|
|
296
|
+
const userMessage: Message = {
|
|
297
|
+
role: "user",
|
|
298
|
+
content: [
|
|
299
|
+
{ type: "text", text: buildExtractionUserMessage(conversationText, goal) },
|
|
300
|
+
],
|
|
301
|
+
timestamp: Date.now(),
|
|
302
|
+
};
|
|
303
|
+
|
|
304
|
+
// First attempt
|
|
305
|
+
const response = await complete(
|
|
306
|
+
model,
|
|
307
|
+
{ systemPrompt: EXTRACTION_SYSTEM_PROMPT, messages: [userMessage] },
|
|
308
|
+
{ apiKey, signal },
|
|
309
|
+
);
|
|
310
|
+
|
|
311
|
+
if (response.stopReason === "aborted") {
|
|
312
|
+
return { success: false, error: "Cancelled" };
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
if (response.stopReason === "error") {
|
|
316
|
+
return { success: false, error: response.errorMessage ?? "LLM error" };
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
const responseText = extractTextFromAssistantMessage(response.content);
|
|
320
|
+
const result = processExtractionResponse(responseText, config, conversationText);
|
|
321
|
+
|
|
322
|
+
if (result.success && result.normalized) {
|
|
323
|
+
return { success: true, extraction: result.normalized };
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
// Retry with stricter prompt
|
|
327
|
+
const retryMessage: Message = {
|
|
328
|
+
role: "user",
|
|
329
|
+
content: [{ type: "text", text: EXTRACTION_RETRY_PROMPT }],
|
|
330
|
+
timestamp: Date.now(),
|
|
331
|
+
};
|
|
332
|
+
|
|
333
|
+
const assistantMessage: Message = {
|
|
334
|
+
role: "assistant",
|
|
335
|
+
content: response.content,
|
|
336
|
+
api: response.api,
|
|
337
|
+
provider: response.provider,
|
|
338
|
+
model: response.model,
|
|
339
|
+
usage: response.usage,
|
|
340
|
+
stopReason: response.stopReason,
|
|
341
|
+
timestamp: response.timestamp,
|
|
342
|
+
};
|
|
343
|
+
|
|
344
|
+
const retryResponse = await complete(
|
|
345
|
+
model,
|
|
346
|
+
{
|
|
347
|
+
systemPrompt: EXTRACTION_SYSTEM_PROMPT,
|
|
348
|
+
messages: [userMessage, assistantMessage, retryMessage],
|
|
349
|
+
},
|
|
350
|
+
{ apiKey, signal },
|
|
351
|
+
);
|
|
352
|
+
|
|
353
|
+
if (retryResponse.stopReason === "aborted") {
|
|
354
|
+
return { success: false, error: "Cancelled" };
|
|
355
|
+
}
|
|
356
|
+
|
|
357
|
+
if (retryResponse.stopReason === "error") {
|
|
358
|
+
return { success: false, error: retryResponse.errorMessage ?? "LLM error on retry" };
|
|
359
|
+
}
|
|
360
|
+
|
|
361
|
+
const retryText = extractTextFromAssistantMessage(retryResponse.content);
|
|
362
|
+
const retryResult = processExtractionResponse(retryText, config, conversationText);
|
|
363
|
+
|
|
364
|
+
if (retryResult.success && retryResult.normalized) {
|
|
365
|
+
return { success: true, extraction: retryResult.normalized };
|
|
366
|
+
}
|
|
367
|
+
|
|
368
|
+
return {
|
|
369
|
+
success: false,
|
|
370
|
+
error: `Failed to parse extraction after retry: ${retryResult.error}`,
|
|
371
|
+
};
|
|
372
|
+
}
|
|
373
|
+
|
|
374
|
+
/**
|
|
375
|
+
* Performs extraction with phase updates for progress UI
|
|
376
|
+
*/
|
|
377
|
+
async function doExtractionWithPhases(
|
|
378
|
+
conversationText: string,
|
|
379
|
+
goal: string,
|
|
380
|
+
config: HandoffConfig,
|
|
381
|
+
ctx: ExtensionCommandContext,
|
|
382
|
+
model: Model<any>,
|
|
383
|
+
signal: AbortSignal,
|
|
384
|
+
onPhase: (phase: string) => void,
|
|
385
|
+
): Promise<ExtractionResult> {
|
|
386
|
+
const apiKey = await ctx.modelRegistry.getApiKey(model);
|
|
387
|
+
|
|
388
|
+
// Phase 1: Analyzing conversation
|
|
389
|
+
onPhase(EXTRACTION_PHASES[0]);
|
|
390
|
+
|
|
391
|
+
// Build user message
|
|
392
|
+
const userMessage: Message = {
|
|
393
|
+
role: "user",
|
|
394
|
+
content: [
|
|
395
|
+
{ type: "text", text: buildExtractionUserMessage(conversationText, goal) },
|
|
396
|
+
],
|
|
397
|
+
timestamp: Date.now(),
|
|
398
|
+
};
|
|
399
|
+
|
|
400
|
+
// Phase 2: Extracting context (LLM call)
|
|
401
|
+
onPhase(EXTRACTION_PHASES[1]);
|
|
402
|
+
|
|
403
|
+
// First attempt
|
|
404
|
+
const response = await complete(
|
|
405
|
+
model,
|
|
406
|
+
{ systemPrompt: EXTRACTION_SYSTEM_PROMPT, messages: [userMessage] },
|
|
407
|
+
{ apiKey, signal },
|
|
408
|
+
);
|
|
409
|
+
|
|
410
|
+
if (response.stopReason === "aborted") {
|
|
411
|
+
return { success: false, error: "Cancelled" };
|
|
412
|
+
}
|
|
413
|
+
|
|
414
|
+
if (response.stopReason === "error") {
|
|
415
|
+
return { success: false, error: response.errorMessage ?? "LLM error" };
|
|
416
|
+
}
|
|
417
|
+
|
|
418
|
+
// Phase 3: Assembling prompt
|
|
419
|
+
onPhase(EXTRACTION_PHASES[2]);
|
|
420
|
+
|
|
421
|
+
const responseText = extractTextFromAssistantMessage(response.content);
|
|
422
|
+
const result = processExtractionResponse(responseText, config, conversationText);
|
|
423
|
+
|
|
424
|
+
if (result.success && result.normalized) {
|
|
425
|
+
return { success: true, extraction: result.normalized };
|
|
426
|
+
}
|
|
427
|
+
|
|
428
|
+
// Retry needed - stay on phase 2
|
|
429
|
+
onPhase("Retrying extraction...");
|
|
430
|
+
|
|
431
|
+
const retryMessage: Message = {
|
|
432
|
+
role: "user",
|
|
433
|
+
content: [{ type: "text", text: EXTRACTION_RETRY_PROMPT }],
|
|
434
|
+
timestamp: Date.now(),
|
|
435
|
+
};
|
|
436
|
+
|
|
437
|
+
const assistantMessage: Message = {
|
|
438
|
+
role: "assistant",
|
|
439
|
+
content: response.content,
|
|
440
|
+
api: response.api,
|
|
441
|
+
provider: response.provider,
|
|
442
|
+
model: response.model,
|
|
443
|
+
usage: response.usage,
|
|
444
|
+
stopReason: response.stopReason,
|
|
445
|
+
timestamp: response.timestamp,
|
|
446
|
+
};
|
|
447
|
+
|
|
448
|
+
const retryResponse = await complete(
|
|
449
|
+
model,
|
|
450
|
+
{
|
|
451
|
+
systemPrompt: EXTRACTION_SYSTEM_PROMPT,
|
|
452
|
+
messages: [userMessage, assistantMessage, retryMessage],
|
|
453
|
+
},
|
|
454
|
+
{ apiKey, signal },
|
|
455
|
+
);
|
|
456
|
+
|
|
457
|
+
if (retryResponse.stopReason === "aborted") {
|
|
458
|
+
return { success: false, error: "Cancelled" };
|
|
459
|
+
}
|
|
460
|
+
|
|
461
|
+
if (retryResponse.stopReason === "error") {
|
|
462
|
+
return { success: false, error: retryResponse.errorMessage ?? "LLM error on retry" };
|
|
463
|
+
}
|
|
464
|
+
|
|
465
|
+
// Back to phase 3
|
|
466
|
+
onPhase(EXTRACTION_PHASES[2]);
|
|
467
|
+
|
|
468
|
+
const retryText = extractTextFromAssistantMessage(retryResponse.content);
|
|
469
|
+
const retryResult = processExtractionResponse(retryText, config, conversationText);
|
|
470
|
+
|
|
471
|
+
if (retryResult.success && retryResult.normalized) {
|
|
472
|
+
return { success: true, extraction: retryResult.normalized };
|
|
473
|
+
}
|
|
474
|
+
|
|
475
|
+
return {
|
|
476
|
+
success: false,
|
|
477
|
+
error: `Failed to parse extraction after retry: ${retryResult.error}`,
|
|
478
|
+
};
|
|
479
|
+
}
|
|
480
|
+
|
|
481
|
+
/**
|
|
482
|
+
* Main extension entry point
|
|
483
|
+
*/
|
|
484
|
+
export default function handoffExtension(pi: ExtensionAPI) {
|
|
485
|
+
// Track last used skill
|
|
486
|
+
let lastSkill: string | undefined;
|
|
487
|
+
|
|
488
|
+
// Restore last skill from session on startup
|
|
489
|
+
pi.on("session_start", async (_event, ctx) => {
|
|
490
|
+
lastSkill = undefined;
|
|
491
|
+
for (const entry of ctx.sessionManager.getEntries()) {
|
|
492
|
+
if (
|
|
493
|
+
entry.type === "custom" &&
|
|
494
|
+
(entry as any).customType === SKILL_ENTRY_TYPE
|
|
495
|
+
) {
|
|
496
|
+
const data = (entry as any).data as SkillEntry | undefined;
|
|
497
|
+
if (data?.skillName) {
|
|
498
|
+
lastSkill = data.skillName;
|
|
499
|
+
}
|
|
500
|
+
}
|
|
501
|
+
}
|
|
502
|
+
});
|
|
503
|
+
|
|
504
|
+
// Track skill usage via input event
|
|
505
|
+
pi.on("input", async (event, _ctx) => {
|
|
506
|
+
const text = event.text.trim();
|
|
507
|
+
|
|
508
|
+
// Check if this is a skill command
|
|
509
|
+
if (text.startsWith("/skill:")) {
|
|
510
|
+
const skillMatch = text.match(/^\/skill:([^\s]+)/);
|
|
511
|
+
if (skillMatch) {
|
|
512
|
+
const skillName = skillMatch[1];
|
|
513
|
+
lastSkill = skillName;
|
|
514
|
+
|
|
515
|
+
// Persist to session
|
|
516
|
+
pi.appendEntry(SKILL_ENTRY_TYPE, {
|
|
517
|
+
skillName,
|
|
518
|
+
timestamp: Date.now(),
|
|
519
|
+
} as SkillEntry);
|
|
520
|
+
}
|
|
521
|
+
}
|
|
522
|
+
|
|
523
|
+
// Let the input continue processing
|
|
524
|
+
return { action: "continue" };
|
|
525
|
+
});
|
|
526
|
+
|
|
527
|
+
// Register the /handoff command
|
|
528
|
+
pi.registerCommand("handoff", {
|
|
529
|
+
description: "Transfer context to a new focused session",
|
|
530
|
+
handler: async (args, ctx) => {
|
|
531
|
+
await runHandoffCommand(args, ctx, pi, lastSkill);
|
|
532
|
+
},
|
|
533
|
+
});
|
|
534
|
+
}
|