portable-agent-layer 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (90) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +80 -0
  3. package/assets/agents/claude-researcher.md +43 -0
  4. package/assets/agents/investigative-researcher.md +44 -0
  5. package/assets/agents/multi-perspective-researcher.md +43 -0
  6. package/assets/skills/analyze-pdf.md +40 -0
  7. package/assets/skills/analyze-youtube.md +35 -0
  8. package/assets/skills/council.md +43 -0
  9. package/assets/skills/create-skill.md +31 -0
  10. package/assets/skills/extract-entities.md +63 -0
  11. package/assets/skills/extract-wisdom.md +18 -0
  12. package/assets/skills/first-principles.md +17 -0
  13. package/assets/skills/fyzz-chat-api.md +43 -0
  14. package/assets/skills/reflect.md +87 -0
  15. package/assets/skills/research.md +68 -0
  16. package/assets/skills/review.md +19 -0
  17. package/assets/skills/summarize.md +15 -0
  18. package/assets/templates/AGENTS.md.template +45 -0
  19. package/assets/templates/telos/BELIEFS.md +4 -0
  20. package/assets/templates/telos/CHALLENGES.md +4 -0
  21. package/assets/templates/telos/GOALS.md +12 -0
  22. package/assets/templates/telos/IDEAS.md +4 -0
  23. package/assets/templates/telos/IDENTITY.md +4 -0
  24. package/assets/templates/telos/LEARNED.md +4 -0
  25. package/assets/templates/telos/MISSION.md +4 -0
  26. package/assets/templates/telos/MODELS.md +4 -0
  27. package/assets/templates/telos/NARRATIVES.md +4 -0
  28. package/assets/templates/telos/PROJECTS.md +7 -0
  29. package/assets/templates/telos/STRATEGIES.md +4 -0
  30. package/bin/pal +24 -0
  31. package/bin/pal.bat +8 -0
  32. package/bin/pal.ps1 +30 -0
  33. package/package.json +82 -0
  34. package/src/cli/index.ts +344 -0
  35. package/src/cli/install.ts +86 -0
  36. package/src/cli/uninstall.ts +45 -0
  37. package/src/hooks/LoadContext.ts +41 -0
  38. package/src/hooks/SecurityValidator.ts +52 -0
  39. package/src/hooks/SkillGuard.ts +41 -0
  40. package/src/hooks/StopOrchestrator.ts +35 -0
  41. package/src/hooks/UserPromptOrchestrator.ts +35 -0
  42. package/src/hooks/handlers/backup.ts +41 -0
  43. package/src/hooks/handlers/failure.ts +136 -0
  44. package/src/hooks/handlers/rating.ts +409 -0
  45. package/src/hooks/handlers/relationship.ts +113 -0
  46. package/src/hooks/handlers/session-name.ts +121 -0
  47. package/src/hooks/handlers/synthesis.ts +109 -0
  48. package/src/hooks/handlers/tab.ts +8 -0
  49. package/src/hooks/handlers/update-counts.ts +151 -0
  50. package/src/hooks/handlers/work-learning.ts +183 -0
  51. package/src/hooks/handlers/work-session.ts +58 -0
  52. package/src/hooks/lib/claude-md.ts +121 -0
  53. package/src/hooks/lib/context.ts +433 -0
  54. package/src/hooks/lib/entities.ts +304 -0
  55. package/src/hooks/lib/export.ts +76 -0
  56. package/src/hooks/lib/inference.ts +91 -0
  57. package/src/hooks/lib/learning-category.ts +14 -0
  58. package/src/hooks/lib/log.ts +53 -0
  59. package/src/hooks/lib/models.ts +16 -0
  60. package/src/hooks/lib/paths.ts +80 -0
  61. package/src/hooks/lib/relationship.ts +135 -0
  62. package/src/hooks/lib/security.ts +122 -0
  63. package/src/hooks/lib/session-names.ts +247 -0
  64. package/src/hooks/lib/setup.ts +189 -0
  65. package/src/hooks/lib/signal-trends.ts +117 -0
  66. package/src/hooks/lib/signals.ts +37 -0
  67. package/src/hooks/lib/stdin.ts +18 -0
  68. package/src/hooks/lib/stop.ts +155 -0
  69. package/src/hooks/lib/time.ts +19 -0
  70. package/src/hooks/lib/token-usage.ts +42 -0
  71. package/src/hooks/lib/transcript.ts +76 -0
  72. package/src/hooks/lib/wisdom.ts +48 -0
  73. package/src/hooks/lib/work-tracking.ts +193 -0
  74. package/src/hooks/setup-check.ts +42 -0
  75. package/src/targets/claude/install.ts +145 -0
  76. package/src/targets/claude/uninstall.ts +101 -0
  77. package/src/targets/lib.ts +337 -0
  78. package/src/targets/opencode/install.ts +59 -0
  79. package/src/targets/opencode/plugin.ts +328 -0
  80. package/src/targets/opencode/uninstall.ts +57 -0
  81. package/src/tools/entity-save.ts +110 -0
  82. package/src/tools/export.ts +34 -0
  83. package/src/tools/fyzz-api.ts +104 -0
  84. package/src/tools/import.ts +123 -0
  85. package/src/tools/pattern-synthesis.ts +435 -0
  86. package/src/tools/pdf-download.ts +102 -0
  87. package/src/tools/relationship-reflect.ts +362 -0
  88. package/src/tools/session-summary.ts +206 -0
  89. package/src/tools/token-cost.ts +301 -0
  90. package/src/tools/youtube-analyze.ts +105 -0
@@ -0,0 +1,41 @@
1
+ /**
2
+ * Stop handler: automatic weekly backup.
3
+ * Creates a zip of all gitignored personal files if the last backup
4
+ * is older than 7 days, or if no backup exists yet.
5
+ */
6
+
7
+ import { readdirSync, statSync } from "node:fs";
8
+ import { resolve } from "node:path";
9
+ import { exportZip, timestamp } from "../lib/export";
10
+ import { logDebug } from "../lib/log";
11
+ import { paths } from "../lib/paths";
12
+
13
+ const BACKUP_INTERVAL_MS = 7 * 24 * 60 * 60 * 1000; // 7 days
14
+
15
+ export async function autoBackup(): Promise<void> {
16
+ const backupDir = paths.backups();
17
+
18
+ // Check most recent backup
19
+ const existing = readdirSync(backupDir)
20
+ .filter((f) => f.startsWith("pal-backup-") && f.endsWith(".zip"))
21
+ .sort()
22
+ .reverse();
23
+
24
+ if (existing.length > 0) {
25
+ const latestPath = resolve(backupDir, existing[0]);
26
+ const latestMtime = statSync(latestPath).mtimeMs;
27
+ if (Date.now() - latestMtime < BACKUP_INTERVAL_MS) {
28
+ logDebug("backup", "Skipping — last backup is less than 7 days old");
29
+ return;
30
+ }
31
+ }
32
+
33
+ const outputPath = resolve(backupDir, `pal-backup-${timestamp()}.zip`);
34
+ const count = exportZip(outputPath);
35
+
36
+ if (count === 0) {
37
+ logDebug("backup", "Nothing to back up");
38
+ } else {
39
+ logDebug("backup", `Backed up ${count} files → ${outputPath}`);
40
+ }
41
+ }
@@ -0,0 +1,136 @@
1
+ /**
2
+ * Deep Failure Capture — full context dump for ratings 1–3.
3
+ *
4
+ * Writes to memory/learning/failures/YYYY-MM/{timestamp}_{slug}/
5
+ * CONTEXT.md — full failure context with transcript excerpt
6
+ * sentiment.json — structured rating + metadata
7
+ */
8
+
9
+ import { writeFileSync } from "node:fs";
10
+ import { resolve } from "node:path";
11
+ import { inference } from "../lib/inference";
12
+ import { ensureDir, paths } from "../lib/paths";
13
+ import { fileTimestamp, monthPath } from "../lib/time";
14
+ import { logTokenUsage } from "../lib/token-usage";
15
+ import {
16
+ extractContent,
17
+ extractLastAssistant,
18
+ extractLastUser,
19
+ parseMessages,
20
+ } from "../lib/transcript";
21
+
22
+ function slugify(text: string): string {
23
+ return (
24
+ text
25
+ .toLowerCase()
26
+ .replace(/[^a-z0-9\s]/g, "")
27
+ .trim()
28
+ .split(/\s+/)
29
+ .slice(0, 8)
30
+ .join("-") || "failure"
31
+ );
32
+ }
33
+
34
+ export async function captureFailure(
35
+ rating: number,
36
+ context: string,
37
+ transcript: string,
38
+ detailedContext?: string,
39
+ savedResponse?: string,
40
+ savedUserMessage?: string
41
+ ): Promise<void> {
42
+ const messages = parseMessages(transcript);
43
+ // Prefer messages saved at rating time (before the AI replied to the rating)
44
+ const lastUser =
45
+ savedUserMessage?.slice(0, 400) ||
46
+ extractContent(extractLastUser(messages)).slice(0, 400);
47
+ const lastAssistant =
48
+ savedResponse?.slice(0, 600) ||
49
+ extractContent(extractLastAssistant(messages)).slice(0, 600);
50
+
51
+ const slug = slugify(context);
52
+ const dir = ensureDir(
53
+ resolve(paths.failures(), monthPath(), `${fileTimestamp()}_${slug}`)
54
+ );
55
+
56
+ // Attempt inference to fill root cause analysis
57
+ let whatWentWrong = "";
58
+ let whatToDoDifferently = "";
59
+ try {
60
+ const analysisResult = await inference({
61
+ system:
62
+ "You are analyzing a failed AI assistant interaction. Based on the context, identify what went wrong and what should be done differently. Be specific and actionable.",
63
+ user: [
64
+ `Rating: ${rating}/10`,
65
+ `Context: ${context}`,
66
+ detailedContext ? `Analysis: ${detailedContext}` : "",
67
+ `User said: ${lastUser}`,
68
+ `Assistant said: ${lastAssistant}`,
69
+ ]
70
+ .filter(Boolean)
71
+ .join("\n"),
72
+ maxTokens: 300,
73
+ timeout: 8000,
74
+ jsonSchema: {
75
+ type: "object" as const,
76
+ additionalProperties: false,
77
+ properties: {
78
+ what_went_wrong: { type: "string" as const },
79
+ what_to_do_differently: { type: "string" as const },
80
+ },
81
+ required: ["what_went_wrong", "what_to_do_differently"],
82
+ },
83
+ });
84
+ if (analysisResult.usage) logTokenUsage("failure", analysisResult.usage);
85
+ if (analysisResult.success && analysisResult.output) {
86
+ const parsed = JSON.parse(analysisResult.output) as {
87
+ what_went_wrong?: string;
88
+ what_to_do_differently?: string;
89
+ };
90
+ whatWentWrong = parsed.what_went_wrong ?? "";
91
+ whatToDoDifferently = parsed.what_to_do_differently ?? "";
92
+ }
93
+ } catch {
94
+ // Graceful fallback — empty sections are still useful with the other context
95
+ }
96
+
97
+ const contextMdPath = resolve(dir, "CONTEXT.md");
98
+ writeFileSync(
99
+ contextMdPath,
100
+ [
101
+ `# Failure Capture — Rating ${rating}/10`,
102
+ `**Date:** ${new Date().toISOString().slice(0, 10)}`,
103
+ `**Context:** ${context}`,
104
+ "",
105
+ "## Last User Message",
106
+ lastUser || "*(unavailable)*",
107
+ "",
108
+ "## Last Assistant Response",
109
+ lastAssistant || "*(unavailable)*",
110
+ "",
111
+ ...(detailedContext ? ["## AI Response Context", detailedContext, ""] : []),
112
+ "## What Went Wrong?",
113
+ whatWentWrong || "",
114
+ "",
115
+ "## What Should Be Done Differently?",
116
+ whatToDoDifferently || "",
117
+ "",
118
+ ].join("\n"),
119
+ "utf-8"
120
+ );
121
+
122
+ writeFileSync(
123
+ resolve(dir, "sentiment.json"),
124
+ JSON.stringify(
125
+ {
126
+ rating,
127
+ context,
128
+ ts: new Date().toISOString(),
129
+ slug,
130
+ },
131
+ null,
132
+ 2
133
+ ),
134
+ "utf-8"
135
+ );
136
+ }
@@ -0,0 +1,409 @@
1
+ /**
2
+ * UserPromptSubmit handler: detects explicit and implicit ratings.
3
+ * Ported from original PAI's RatingCapture.hook.ts with rich sentiment analysis.
4
+ *
5
+ * - Explicit: "7", "8 - great work", "rating: 8"
6
+ * - Implicit: Haiku-powered sentiment inference on every user message
7
+ * - Low ratings (<5) write detailed learning markdown
8
+ * - Very low ratings (<=3) write pending-failure.json for Stop handler
9
+ */
10
+
11
+ import { existsSync, readFileSync, writeFileSync } from "node:fs";
12
+ import { resolve } from "node:path";
13
+ import { inference } from "../lib/inference";
14
+ import { categorizeLearning } from "../lib/learning-category";
15
+ import { ensureDir, paths } from "../lib/paths";
16
+ import { emitRating } from "../lib/signals";
17
+ import { fileTimestamp, monthPath, now } from "../lib/time";
18
+ import { logTokenUsage } from "../lib/token-usage";
19
+
20
+ /** Read cached last assistant response (written by StopOrchestrator), looked up by session */
21
+ function getLastResponse(sessionId?: string): string {
22
+ try {
23
+ const cachePath = resolve(paths.state(), "last-responses.json");
24
+ if (!existsSync(cachePath)) return "";
25
+ const cache = JSON.parse(readFileSync(cachePath, "utf-8")) as Record<
26
+ string,
27
+ { response?: string }
28
+ >;
29
+ // Look up by session, or fall back to most recent entry
30
+ if (sessionId && cache[sessionId]) {
31
+ return cache[sessionId].response ?? "";
32
+ }
33
+ // No session match — return empty rather than wrong context
34
+ return "";
35
+ } catch {
36
+ /* non-critical */
37
+ }
38
+ return "";
39
+ }
40
+
41
+ // ── Explicit Rating Detection ──
42
+
43
+ /**
44
+ * Parse explicit rating pattern from prompt.
45
+ * Matches: "7", "8 - good work", "6: needs work", "9 excellent", "10!"
46
+ * Rejects: "3 items", "5 things to fix", "7th thing", "10/10"
47
+ */
48
+ function parseExplicitRating(
49
+ prompt: string
50
+ ): { rating: number; comment?: string } | null {
51
+ const trimmed = prompt.trim();
52
+ const match = trimmed.match(/^(10|[1-9])(?:\s*[-:]\s*|\s+)?(.*)$/);
53
+ if (!match) return null;
54
+
55
+ const rating = parseInt(match[1], 10);
56
+ if (rating < 1 || rating > 10) return null;
57
+
58
+ // Reject if char after number is not a separator (catches "10/10", "3.5", "7th")
59
+ const afterNumber = trimmed.slice(match[1].length);
60
+ if (afterNumber.length > 0 && /^[/.\dA-Za-z]/.test(afterNumber)) return null;
61
+
62
+ const rest = match[2]?.trim() || undefined;
63
+
64
+ // Reject if rest starts with words indicating a sentence, not a rating
65
+ if (rest) {
66
+ const sentenceStarters =
67
+ /^(items?|things?|steps?|files?|lines?|bugs?|issues?|errors?|times?|minutes?|hours?|days?|seconds?|percent|%|th\b|st\b|nd\b|rd\b|of\b|in\b|at\b|to\b|the\b|a\b|an\b)/i;
68
+ if (sentenceStarters.test(rest)) return null;
69
+ }
70
+
71
+ return { rating, comment: rest };
72
+ }
73
+
74
+ // ── Praise Fast-Path ──
75
+
76
+ const POSITIVE_PRAISE_WORDS = new Set([
77
+ "excellent",
78
+ "amazing",
79
+ "brilliant",
80
+ "fantastic",
81
+ "wonderful",
82
+ "incredible",
83
+ "awesome",
84
+ "perfect",
85
+ "great",
86
+ "nice",
87
+ "superb",
88
+ "outstanding",
89
+ "stellar",
90
+ "phenomenal",
91
+ "remarkable",
92
+ "terrific",
93
+ "splendid",
94
+ ]);
95
+
96
+ const POSITIVE_PHRASES = new Set([
97
+ "great job",
98
+ "good job",
99
+ "nice work",
100
+ "well done",
101
+ "nice job",
102
+ "good work",
103
+ "love it",
104
+ "nailed it",
105
+ "looks great",
106
+ "looks good",
107
+ "rock solid",
108
+ "thats great",
109
+ "that works",
110
+ "thank you",
111
+ "thanks",
112
+ ]);
113
+
114
+ function isPraise(prompt: string): boolean {
115
+ const normalized = prompt
116
+ .trim()
117
+ .toLowerCase()
118
+ .replace(/[.!?,'"]/g, "");
119
+ const words = normalized.split(/\s+/);
120
+ if (words.length > 3) return false;
121
+ return (
122
+ POSITIVE_PRAISE_WORDS.has(normalized) ||
123
+ POSITIVE_PHRASES.has(normalized) ||
124
+ (words.length === 2 && words.every((w) => POSITIVE_PRAISE_WORDS.has(w)))
125
+ );
126
+ }
127
+
128
+ // ── System-Injected Tag Stripping ──
129
+
130
+ /**
131
+ * Strip IDE/system-injected XML tags from the prompt to recover the raw user text.
132
+ * Claude Code VSCode extension prepends tags like <ide_opened_file>...</ide_opened_file>
133
+ * and <ide_selection>...</ide_selection> to the prompt field in hooks.
134
+ */
135
+ const INJECTED_TAG_RE =
136
+ /<(?:ide_opened_file|ide_selection|system-reminder|task-notification)[^>]*>[\s\S]*?<\/(?:ide_opened_file|ide_selection|system-reminder|task-notification)>/gi;
137
+
138
+ function stripInjectedTags(prompt: string): string {
139
+ return prompt.replace(INJECTED_TAG_RE, "").trim();
140
+ }
141
+
142
+ // ── System Text Filters ──
143
+
144
+ const SYSTEM_TEXT_PATTERNS = [
145
+ /^<task-notification>/i,
146
+ /^<system-reminder>/i,
147
+ /^This session is being continued from a previous conversation/i,
148
+ /^Please continue the conversation/i,
149
+ /^Note:.*was read before/i,
150
+ ];
151
+
152
+ function isSystemText(prompt: string): boolean {
153
+ const trimmed = prompt.trim();
154
+ return SYSTEM_TEXT_PATTERNS.some((re) => re.test(trimmed));
155
+ }
156
+
157
+ // ── Sentiment Analysis ──
158
+
159
+ const SENTIMENT_SCHEMA = {
160
+ type: "object",
161
+ properties: {
162
+ rating: { type: ["number", "null"] },
163
+ sentiment: { enum: ["positive", "negative", "neutral"] },
164
+ confidence: { type: "number" },
165
+ summary: { type: "string" },
166
+ detailed_context: { type: "string" },
167
+ },
168
+ required: ["rating", "sentiment", "confidence", "summary", "detailed_context"],
169
+ additionalProperties: false,
170
+ } as const;
171
+
172
+ interface SentimentResult {
173
+ rating: number | null;
174
+ sentiment: "positive" | "negative" | "neutral";
175
+ confidence: number;
176
+ summary: string;
177
+ detailed_context: string;
178
+ }
179
+
180
+ const SENTIMENT_SYSTEM_PROMPT = `Analyze the user's message for emotional sentiment toward the AI assistant.
181
+
182
+ OUTPUT FORMAT (JSON only):
183
+ {
184
+ "rating": <1-10 or null>,
185
+ "sentiment": "positive" | "negative" | "neutral",
186
+ "confidence": <0.0-1.0>,
187
+ "summary": "<brief explanation, 10 words max>",
188
+ "detailed_context": "<comprehensive analysis, 50-150 words>"
189
+ }
190
+
191
+ DETAILED_CONTEXT REQUIREMENTS:
192
+ 1. What the user was trying to accomplish
193
+ 2. What the AI did (or failed to do)
194
+ 3. Why the user reacted this way (root cause)
195
+ 4. What specific behavior triggered this reaction
196
+ 5. What the AI should do differently (negative) or what worked (positive)
197
+
198
+ RATING SCALE:
199
+ - 1-2: Strong frustration, anger, disappointment
200
+ - 3-4: Mild frustration, dissatisfaction
201
+ - 5: Neutral
202
+ - 6-7: Satisfaction, approval
203
+ - 8-9: Strong approval, impressed
204
+ - 10: Extraordinary enthusiasm
205
+
206
+ CRITICAL DISTINCTIONS:
207
+ - Profanity can indicate EITHER frustration OR excitement — use context
208
+ - Sarcasm: "Oh great, another error" = negative despite "great"
209
+ - Short praise ("great job", "nice") = STRONG APPROVAL (8-9), not mild
210
+
211
+ IMPLIED SENTIMENT (most feedback is implied, not explicit):
212
+
213
+ Implied NEGATIVE (rate 2-4):
214
+ - CORRECTIONS: "No, I meant..." / "That's not what I said" -> 3-4
215
+ - REPEATED REQUESTS: Having to ask the same thing twice -> 2-3
216
+ - BEHAVIORAL CORRECTIONS: "Don't do that" / "Stop doing X" -> 3
217
+ - EXASPERATED QUESTIONS: "Why is this still broken?" -> 2-3
218
+ - SHORT DISMISSALS: "whatever" / "fine" / "just do it" -> 3-4
219
+ - POINTING OUT OMISSIONS: "What about X?" (obviously required) -> 4
220
+
221
+ Implied POSITIVE (rate 6-8):
222
+ - TRUST SIGNALS: "Alright, fix all of it" / "Go ahead" -> 7
223
+ - BUILDING ON WORK: "Now also add..." / "Next, do..." -> 6-7
224
+ - ENGAGED FOLLOW-UPS: "What about X?" (exploring, not correcting) -> 6
225
+ - MOVING FORWARD: Accepting output and giving next task -> 6
226
+
227
+ WHEN TO RETURN null FOR RATING:
228
+ - Neutral technical questions ("Can you check the logs?")
229
+ - Simple commands ("Do it", "Yes", "Continue")
230
+ - No emotional indicators present`;
231
+
232
+ const MIN_CONFIDENCE = 0.5;
233
+
234
+ // ── Rating Handling ──
235
+
236
+ function writeLearningMarkdown(
237
+ rating: number,
238
+ source: string,
239
+ context: string,
240
+ detailedContext: string,
241
+ responsePreview: string
242
+ ): void {
243
+ const category = categorizeLearning(context, detailedContext);
244
+ const dir = ensureDir(resolve(paths.sessionLearning(), monthPath()));
245
+ const filename = `${fileTimestamp()}_${source}-rating-${rating}_${category}.md`;
246
+
247
+ const content = [
248
+ `# ${source === "explicit" ? "Low Rating" : "Implicit Low Rating"}: ${rating}/10`,
249
+ `**Title:** ${context.slice(0, 100) || "(low rating)"}`,
250
+ `**Date:** ${new Date().toISOString().slice(0, 10)}`,
251
+ `**Rating:** ${rating}/10`,
252
+ `**Source:** ${source}`,
253
+ `**Category:** ${category.toUpperCase()}`,
254
+ "",
255
+ "## Context",
256
+ context || "*(unavailable)*",
257
+ "",
258
+ ...(detailedContext ? ["## Analysis", detailedContext, ""] : []),
259
+ "## Last Response",
260
+ responsePreview || "*(unavailable)*",
261
+ "",
262
+ ].join("\n");
263
+
264
+ writeFileSync(resolve(dir, filename), content, "utf-8");
265
+ }
266
+
267
+ function handleRating(
268
+ rating: number,
269
+ context: string,
270
+ source: string,
271
+ detailedContext?: string,
272
+ sessionId?: string,
273
+ userMessage?: string
274
+ ): void {
275
+ const responsePreview = getLastResponse(sessionId).slice(0, 500);
276
+ emitRating(rating, context, source, responsePreview);
277
+
278
+ if (rating <= 3) {
279
+ // Deep failure — write pending file for Stop handler with full transcript
280
+ const userPreview = userMessage?.slice(0, 400);
281
+ writeFileSync(
282
+ resolve(paths.state(), "pending-failure.json"),
283
+ JSON.stringify(
284
+ {
285
+ rating,
286
+ context,
287
+ source,
288
+ detailedContext,
289
+ responsePreview,
290
+ userPreview,
291
+ ts: now(),
292
+ },
293
+ null,
294
+ 2
295
+ ),
296
+ "utf-8"
297
+ );
298
+ // Also write learning markdown
299
+ writeLearningMarkdown(
300
+ rating,
301
+ source,
302
+ context,
303
+ detailedContext ?? "",
304
+ responsePreview
305
+ );
306
+ } else if (rating < 5) {
307
+ // Low but not critical — write learning markdown
308
+ writeLearningMarkdown(
309
+ rating,
310
+ source,
311
+ context,
312
+ detailedContext ?? "",
313
+ responsePreview
314
+ );
315
+ }
316
+ }
317
+
318
+ // ── Implicit Sentiment ──
319
+
320
+ async function handleImplicitSentiment(
321
+ message: string,
322
+ sessionId?: string
323
+ ): Promise<void> {
324
+ const trimmed = message.trim();
325
+
326
+ // Fast-path: short praise -> rating 8
327
+ if (isPraise(trimmed)) {
328
+ handleRating(
329
+ 8,
330
+ `Direct praise: "${trimmed}"`,
331
+ "implicit",
332
+ undefined,
333
+ sessionId,
334
+ trimmed
335
+ );
336
+ return;
337
+ }
338
+
339
+ // Skip system-injected text
340
+ if (isSystemText(trimmed)) return;
341
+
342
+ // Skip very short, very long, or code-like messages
343
+ if (trimmed.length < 5 || trimmed.length > 500) return;
344
+ if (/^[/$`{]/.test(trimmed) || trimmed.includes("\n\n")) return;
345
+
346
+ const lastResponse = getLastResponse(sessionId).slice(0, 300);
347
+ const contextBlock = lastResponse
348
+ ? `CONTEXT (last AI response excerpt):\n${lastResponse}\n\nCURRENT USER MESSAGE:\n${trimmed.slice(0, 300)}`
349
+ : trimmed.slice(0, 300);
350
+
351
+ const result = await inference({
352
+ system: SENTIMENT_SYSTEM_PROMPT,
353
+ user: contextBlock,
354
+ maxTokens: 500,
355
+ timeout: 8000,
356
+ jsonSchema: SENTIMENT_SCHEMA,
357
+ });
358
+
359
+ if (result.usage) logTokenUsage("rating", result.usage);
360
+
361
+ if (!result.success || !result.output) return;
362
+
363
+ try {
364
+ const parsed = JSON.parse(result.output) as SentimentResult;
365
+
366
+ // Skip if no sentiment detected or low confidence
367
+ if (parsed.rating === null) return;
368
+ if (parsed.confidence < MIN_CONFIDENCE) return;
369
+
370
+ const rating = parsed.rating;
371
+ if (typeof rating === "number" && rating >= 1 && rating <= 10 && rating !== 5) {
372
+ handleRating(
373
+ rating,
374
+ `${parsed.summary}: ${trimmed.slice(0, 150)}`,
375
+ "implicit",
376
+ parsed.detailed_context,
377
+ sessionId,
378
+ trimmed
379
+ );
380
+ }
381
+ } catch (err) {
382
+ const { logError } = await import("../lib/log");
383
+ logError("rating:implicit", err);
384
+ }
385
+ }
386
+
387
+ // ── Main Export ──
388
+
389
+ export async function captureRating(message: string, sessionId?: string): Promise<void> {
390
+ // Strip IDE/system-injected tags to recover raw user text
391
+ const cleaned = stripInjectedTags(message);
392
+
393
+ // Path 1: Explicit rating
394
+ const explicit = parseExplicitRating(cleaned);
395
+ if (explicit) {
396
+ handleRating(
397
+ explicit.rating,
398
+ explicit.comment || cleaned.slice(0, 200),
399
+ "explicit",
400
+ undefined,
401
+ sessionId,
402
+ cleaned
403
+ );
404
+ return;
405
+ }
406
+
407
+ // Path 2: Implicit sentiment (requires ANTHROPIC_API_KEY — inference silently no-ops without it)
408
+ await handleImplicitSentiment(cleaned, sessionId);
409
+ }
@@ -0,0 +1,113 @@
1
+ /**
2
+ * Stop handler: extracts relationship observations via Haiku inference.
3
+ * Only runs on substantial sessions (≥10 messages).
4
+ * Dedup: skips if this session already has notes in today's file.
5
+ */
6
+
7
+ import { inference } from "../lib/inference";
8
+ import { logDebug, logError } from "../lib/log";
9
+ import { appendNotes, hasSessionNotes, type RelationshipNote } from "../lib/relationship";
10
+ import { logTokenUsage } from "../lib/token-usage";
11
+ import { extractContent, parseMessages } from "../lib/transcript";
12
+
13
+ const OBSERVATION_SCHEMA = {
14
+ type: "object",
15
+ properties: {
16
+ observations: {
17
+ type: "array",
18
+ items: {
19
+ type: "object",
20
+ properties: {
21
+ type: {
22
+ type: "string",
23
+ enum: ["O", "W"],
24
+ description: "O=opinion/preference, W=factual observation",
25
+ },
26
+ text: { type: "string" },
27
+ confidence: { type: "number" },
28
+ },
29
+ required: ["type", "text", "confidence"],
30
+ additionalProperties: false,
31
+ },
32
+ },
33
+ },
34
+ required: ["observations"],
35
+ additionalProperties: false,
36
+ } as const;
37
+
38
+ export async function captureRelationship(
39
+ transcript: string,
40
+ sessionId?: string
41
+ ): Promise<void> {
42
+ if (sessionId && hasSessionNotes(sessionId)) {
43
+ logDebug("relationship", "Skipped: session already has notes");
44
+ return;
45
+ }
46
+
47
+ const messages = parseMessages(transcript);
48
+ logDebug("relationship", `Messages: ${messages.length}`);
49
+ if (messages.length < 10) {
50
+ logDebug("relationship", "Skipped: < 10 messages");
51
+ return;
52
+ }
53
+
54
+ if (!process.env.ANTHROPIC_API_KEY) {
55
+ logDebug("relationship", "Skipped: no ANTHROPIC_API_KEY");
56
+ return;
57
+ }
58
+
59
+ // Collect user messages for analysis
60
+ const userMessages = messages
61
+ .filter((m) => m.role === "user")
62
+ .map((m) => extractContent(m))
63
+ .filter((t) => t.length > 0)
64
+ .slice(-15)
65
+ .map((t) => t.slice(0, 200));
66
+
67
+ logDebug("relationship", `User messages: ${userMessages.length}`);
68
+ if (userMessages.length < 3) {
69
+ logDebug("relationship", "Skipped: < 3 user messages");
70
+ return;
71
+ }
72
+
73
+ logDebug("relationship", "Calling inference...");
74
+ const result = await inference({
75
+ system:
76
+ "You analyze user messages from an AI coding session to extract relationship observations. " +
77
+ "Focus on: preferences (how they like to work), corrections (what they pushed back on), " +
78
+ "frustrations, positive reactions, communication style patterns. " +
79
+ "Return 0-3 observations. If nothing notable, return empty observations array. Be concise.",
80
+ user: `User messages from this session:\n${userMessages.map((m, i) => `${i + 1}. ${m}`).join("\n")}`,
81
+ maxTokens: 300,
82
+ timeout: 8000,
83
+ jsonSchema: OBSERVATION_SCHEMA,
84
+ });
85
+
86
+ if (result.usage) logTokenUsage("relationship", result.usage);
87
+
88
+ logDebug("relationship", `Inference result: success=${result.success}`);
89
+ if (!result.success || !result.output) {
90
+ logDebug("relationship", "Skipped: inference failed or empty output");
91
+ return;
92
+ }
93
+
94
+ try {
95
+ const parsed = JSON.parse(result.output) as {
96
+ observations: Array<{ type: "O" | "W"; text: string; confidence: number }>;
97
+ };
98
+
99
+ logDebug("relationship", `Parsed ${parsed.observations?.length ?? 0} observations`);
100
+ if (!parsed.observations || parsed.observations.length === 0) return;
101
+
102
+ const notes: RelationshipNote[] = parsed.observations.map((o) => ({
103
+ type: o.type,
104
+ text: o.text,
105
+ confidence: o.confidence,
106
+ }));
107
+
108
+ appendNotes(notes, sessionId);
109
+ logDebug("relationship", `Captured ${notes.length} observations via inference`);
110
+ } catch (err) {
111
+ logError("relationship", err);
112
+ }
113
+ }