ei-tui 0.9.3 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (94) hide show
  1. package/README.md +22 -3
  2. package/package.json +8 -1
  3. package/src/README.md +10 -26
  4. package/src/core/context-utils.ts +2 -2
  5. package/src/core/handlers/document-segmentation.ts +113 -0
  6. package/src/core/handlers/heartbeat.ts +9 -1
  7. package/src/core/handlers/human-extraction.ts +4 -1
  8. package/src/core/handlers/human-matching.ts +5 -53
  9. package/src/core/handlers/index.ts +3 -51
  10. package/src/core/handlers/persona-generation.ts +1 -28
  11. package/src/core/handlers/rewrite.ts +13 -9
  12. package/src/core/handlers/utils.ts +2 -9
  13. package/src/core/heartbeat-manager.ts +5 -5
  14. package/src/core/llm-client.ts +11 -1
  15. package/src/core/message-manager.ts +26 -23
  16. package/src/core/orchestrators/ceremony.ts +87 -49
  17. package/src/core/orchestrators/extraction-chunker.ts +3 -3
  18. package/src/core/orchestrators/human-extraction.ts +22 -18
  19. package/src/core/orchestrators/index.ts +0 -1
  20. package/src/core/orchestrators/persona-topics.ts +1 -1
  21. package/src/core/orchestrators/room-extraction.ts +5 -5
  22. package/src/core/persona-manager.ts +4 -0
  23. package/src/core/processor.ts +98 -22
  24. package/src/core/prompt-context-builder.ts +7 -6
  25. package/src/core/queue-manager.ts +35 -0
  26. package/src/core/state/personas.ts +1 -17
  27. package/src/core/state/queue.ts +9 -1
  28. package/src/core/state-manager.ts +4 -66
  29. package/src/core/types/entities.ts +17 -3
  30. package/src/core/types/enums.ts +1 -2
  31. package/src/core/types/integrations.ts +2 -0
  32. package/src/core/types/llm.ts +9 -0
  33. package/src/core/types/rooms.ts +1 -1
  34. package/src/integrations/claude-code/importer.ts +1 -1
  35. package/src/integrations/cursor/importer.ts +1 -1
  36. package/src/integrations/document/chunker.ts +88 -0
  37. package/src/integrations/document/importer.ts +82 -0
  38. package/src/integrations/document/index.ts +2 -0
  39. package/src/integrations/document/invoice.ts +63 -0
  40. package/src/integrations/document/types.ts +16 -0
  41. package/src/integrations/document/unsource.ts +164 -0
  42. package/src/integrations/opencode/importer.ts +1 -1
  43. package/src/integrations/persona-history/importer.ts +197 -0
  44. package/src/integrations/persona-history/index.ts +3 -0
  45. package/src/integrations/persona-history/types.ts +7 -0
  46. package/src/prompts/ceremony/dedup.ts +7 -3
  47. package/src/prompts/ceremony/index.ts +2 -11
  48. package/src/prompts/ceremony/people-rewrite.ts +190 -0
  49. package/src/prompts/ceremony/{rewrite.ts → topic-rewrite.ts} +103 -78
  50. package/src/prompts/ceremony/types.ts +1 -42
  51. package/src/prompts/generation/index.ts +0 -3
  52. package/src/prompts/generation/types.ts +0 -15
  53. package/src/prompts/heartbeat/check.ts +18 -6
  54. package/src/prompts/heartbeat/types.ts +2 -1
  55. package/src/prompts/human/index.ts +0 -2
  56. package/src/prompts/human/person-scan.ts +13 -4
  57. package/src/prompts/human/topic-scan.ts +16 -2
  58. package/src/prompts/human/topic-update.ts +36 -4
  59. package/src/prompts/human/types.ts +1 -16
  60. package/src/prompts/index.ts +0 -19
  61. package/src/prompts/reflection/index.ts +35 -5
  62. package/src/prompts/reflection/types.ts +1 -1
  63. package/src/prompts/response/index.ts +5 -0
  64. package/src/prompts/response/sections.ts +26 -0
  65. package/src/prompts/response/types.ts +3 -0
  66. package/src/storage/indexed.ts +4 -0
  67. package/src/storage/interface.ts +1 -0
  68. package/src/storage/local.ts +4 -0
  69. package/src/templates/emmett.ts +49 -0
  70. package/tui/README.md +22 -0
  71. package/tui/src/app.tsx +9 -6
  72. package/tui/src/commands/delete.tsx +7 -1
  73. package/tui/src/commands/import.tsx +30 -0
  74. package/tui/src/commands/registry.test.ts +10 -5
  75. package/tui/src/commands/unsource.tsx +115 -0
  76. package/tui/src/components/PromptInput.tsx +4 -0
  77. package/tui/src/components/WelcomeOverlay.tsx +58 -32
  78. package/tui/src/context/ei.tsx +80 -60
  79. package/tui/src/globals.d.ts +57 -0
  80. package/tui/src/index.tsx +14 -0
  81. package/tui/src/storage/file.ts +11 -5
  82. package/tui/src/util/e2e-flags.ts +4 -3
  83. package/tui/src/util/help-content.ts +20 -0
  84. package/tui/src/util/provider-detection.ts +251 -0
  85. package/tui/src/util/yaml-human.ts +7 -1
  86. package/tui/src/util/yaml-persona.ts +8 -4
  87. package/tui/src/util/yaml-settings.ts +3 -3
  88. package/src/core/orchestrators/person-migration.ts +0 -55
  89. package/src/prompts/ceremony/description-check.ts +0 -54
  90. package/src/prompts/ceremony/expire.ts +0 -37
  91. package/src/prompts/ceremony/explore.ts +0 -77
  92. package/src/prompts/ceremony/person-migration.ts +0 -77
  93. package/src/prompts/generation/descriptions.ts +0 -91
  94. package/src/prompts/human/fact-scan.ts +0 -150
package/README.md CHANGED
@@ -83,7 +83,7 @@ Ei can operate with three types of input, and three types of output.
83
83
  ^
84
84
  Sessions
85
85
  |
86
- [OpenCode]
86
+ [OpenCode / Claude Code / Cursor]
87
87
  ```
88
88
 
89
89
  ```
@@ -169,6 +169,22 @@ All sessions map to a single "Cursor" persona.
169
169
 
170
170
  Sessions are processed oldest-first, one per queue cycle, so Ei won't overwhelm your LLM provider on first run. See [TUI Readme](tui/README.md)
171
171
 
172
+ ## Document Import
173
+
174
+ Got notes, journals, markdown files? You can feed them directly to Ei.
175
+
176
+ **Web**: Open **☰ menu** → **My Data** → **Documents** tab. Drop a `.txt`, `.md`, or `.markdown` file and Ei gets to work.
177
+
178
+ **TUI**:
179
+ ```bash
180
+ /import ~/notes/my-journal.md
181
+ /import /path/to/report.pdf
182
+ ```
183
+
184
+ Ei splits the document into segments, runs them through the extraction pipeline, and pulls out facts, topics, people, and quotes — exactly like it does with your conversations. The extracted knowledge is attributed to a reserved persona called **Emmett** so it doesn't pollute your chat history.
185
+
186
+ Both surfaces show you which documents have been imported and let you remove their extracted knowledge (web: Delete button in the Documents tab; TUI: `/unsource <source_tag>`).
187
+
172
188
  ## Built-in Tool Integrations
173
189
 
174
190
  Personas can use tools. Not just read-from-memory tools — *actual* tools. Web search. Your music. Your filesystem. Here's what ships with Ei out of the box:
@@ -184,6 +200,7 @@ Personas can use tools. Not just read-from-memory tools — *actual* tools. Web
184
200
  | `search_files` | Find files by name pattern *(TUI only)* |
185
201
  | `grep` | Search file contents by regex *(TUI only)* |
186
202
  | `get_file_info` | File/directory metadata *(TUI only)* |
203
+ | `web_fetch` | Fetch a URL and return its text content *(TUI only — blocked by CORS in browsers)* |
187
204
 
188
205
  The filesystem tools make Ei a legitimate coding assistant in the TUI. Ask a persona to review a file, understand a project structure, or track down where something is defined — it can actually look.
189
206
 
@@ -264,13 +281,15 @@ Tag a version to publish automatically:
264
281
 
265
282
  ```bash
266
283
  # bump version in package.json
267
- git commit -am "chore: bump to v0.1.4"
268
- git tag v0.1.4
284
+ git commit -am "chore: bump to v1.0.0"
285
+ git tag v1.0.0
269
286
  git push && git push --tags
270
287
  ```
271
288
 
272
289
  GitHub Actions picks up the tag and publishes to npm with provenance via OIDC. No stored secrets.
273
290
 
291
+ > **Note**: Run the pre-flight checklist in `AGENTS.md` (or use the `release` skill in OpenCode) before tagging. The v0.1.9 incident is a cautionary tale.
292
+
274
293
  ## Project Structure
275
294
 
276
295
  See `AGENTS.md` for detailed architecture and contribution guidelines.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "ei-tui",
3
- "version": "0.9.3",
3
+ "version": "1.0.0",
4
4
  "author": "Flare576",
5
5
  "repository": {
6
6
  "type": "git",
@@ -57,13 +57,20 @@
57
57
  "test:evals:topic-scan": "vite-node tests/evals/topic-scan.eval.ts",
58
58
  "test:evals:topic-match": "vite-node tests/evals/topic-match.eval.ts",
59
59
  "test:evals:topic-update": "vite-node tests/evals/topic-update.eval.ts",
60
+ "test:evals:topic-technical": "vite-node tests/evals/topic-technical.eval.ts",
61
+ "test:evals:rewrite-scan": "vite-node tests/evals/rewrite-scan.eval.ts",
62
+ "test:evals:rewrite-rewrite": "vite-node tests/evals/rewrite-rewrite.eval.ts",
63
+ "test:evals:rewrite-real-data": "vite-node tests/evals/rewrite-real-data.eval.ts",
60
64
  "test:evals:topic-validate": "vite-node tests/evals/topic-validate.eval.ts",
61
65
  "test:evals:person-scan": "vite-node tests/evals/person-scan.eval.ts",
62
66
  "test:evals:person-update": "vite-node tests/evals/person-update.eval.ts",
63
67
  "test:evals:persona-trait": "vite-node tests/evals/persona-trait-extraction.eval.ts",
64
68
  "test:evals:dedup": "vite-node tests/evals/dedup-tool-calls.eval.ts",
65
69
  "test:evals:response-read-memory": "vite-node tests/evals/response-read-memory.eval.ts",
70
+ "test:evals:response-pending-update": "vite-node tests/evals/response-pending-update.eval.ts",
71
+ "test:evals:heartbeat-pending-update": "vite-node tests/evals/heartbeat-pending-update.eval.ts",
66
72
  "test:evals:real-data": "vite-node tests/evals/real-data-example.eval.ts",
73
+ "test:evals:persona-data-check": "vite-node tests/evals/persona-data-check.eval.ts",
67
74
  "test:all": "npm run test && npm run test:e2e && npm run test:e2e:tui",
68
75
  "typecheck": "tsc --noEmit",
69
76
  "web": "cd web && npm run dev",
package/src/README.md CHANGED
@@ -57,39 +57,23 @@ Each Topic will have an "exposure" rating similar to those on Human Data points.
57
57
 
58
58
  # Ceremony Intent
59
59
 
60
- Every 24 hours, we want to freshen up the system. We do this in 4 parts: Exposure, Decay, Expire, Explore
60
+ Every 24 hours, the system runs a ceremony to keep knowledge fresh and healthy. Phases run sequentially via `ceremony_progress`:
61
61
 
62
- ## Exposure
62
+ **Phase 1 → Dedup**: User-triggered only (not automated). Merges confirmed duplicate records.
63
63
 
64
- I also frequently refer to this as "Extract," but this is the first step where we determine what the human and that Persona talked about that day. It serves two purposes:
64
+ **Phase 2 Expose**: Human extraction catch-up (facts, topics, people) + persona topic rating for any messages that didn't hit the per-send threshold during the day.
65
65
 
66
- ### Detail Extraction
66
+ **Phase 3 → EventSummary**: Summarizes significant events from recent conversations.
67
67
 
68
- Since we also pull out details during normal discourse (see above), this is the less-important step at this point, but still vital for catching up with the last few messages, or Personas that only received a few messages during the day and may not have hit the current limit for natural extraction.
68
+ **Decay** (synchronous after Phase 3): Applies exposure decay to persona topics + prunes old messages. Human ceremony (decay for human topics/people) runs here too.
69
69
 
70
- ### Exposure Adjustment
70
+ **Phase 4 → Person Rewrite**: Scans bloated Person records (>750 chars) and extracts non-relationship content into Topics. Gated so Topic Rewrite can snapshot the updated Topic list afterward.
71
71
 
72
- Exposure is calculated by two metrics - `desired` and `current`. If an entity REALLY likes talking about a subject, their `desired` will be very high (1.0 max), ranging down to 0.0 for subjects which that entity does NOT wish to discuss. You may have guessed already, but `current` is how much they've recently talked about a topic.
72
+ **Topic Rewrite** (fire-and-forget after Phase 4): Scans bloated Topic records (>750 chars) and splits them into focused sub-topics. Topics created by Person Rewrite are included.
73
73
 
74
- Adjusting the values is different for Human Topics/People than Persona Topics. The Human subjects are actually adjusted during the previous step, while extracting details.
74
+ **Reflection** (fire-and-forget alongside Phase 4): Persona-side critic pass on person records.
75
75
 
76
- The Persona Topic update only happens during the Ceremony, and really this step only increases exposure IF the subject was discussed, bumping the last_updated field accordingly.
77
-
78
- ## Decay
79
-
80
- After we determine if topics were discussed (increasing exposure), we adjust exposure the _other_ way. Based on some heuristics (like current level, desired level, and time-since-discussion), we decrease the current exposure levels down.
81
-
82
- ## Expire
83
-
84
- This and the following step (Explore) are exclusive to Persona Topics right now. In Expire, we analyze the Person Topics to determine if any of them have
85
- - Lost their meaning to the Persona
86
- - Been ignored or dismissed by the user
87
-
88
- This is largely tracked by exposure, but expiration is dictated by an Agent.
89
-
90
- ## Explore
91
-
92
- After we've removed irrelevant topics, this is the Agent's opportunity to add NEW topics that might be of interest to the Persona (and the user). Again, it's a prompt to an agent if the Persona doesn't have its full capacity of Topics.
76
+ > **Note**: Expire and Explore phases were removed in the Persona Ceremony Simplification (2026-04-05). Persona topics now only update `exposure_current` during ceremony. See CONTRACTS.md changelog for details.
93
77
 
94
78
  # Opencode Importer
95
79
 
@@ -148,7 +132,7 @@ This is intentional. Concurrent LLM calls sound appealing until you're watching
148
132
 
149
133
  # Context Windows
150
134
 
151
- Personas don't send their entire message history to the LLM. By default, only messages from the last 8 hours are included (`context_window_hours`, configurable per persona). Older messages are still stored — they're just not in the prompt.
135
+ Personas don't send their entire message history to the LLM. By default, only messages from the last 8 hours are included (`context_window_ms`, configurable per persona). Older messages are still stored — they're just not in the prompt.
152
136
 
153
137
  Message rolloff works differently: messages are kept until there are at least 200 of them _and_ any are older than 14 days. So a persona you chat with daily will roll off old messages gradually; one you chat with twice a year will keep everything.
154
138
 
@@ -8,12 +8,12 @@ import { ContextStatus as ContextStatusEnum } from "./types.js";
8
8
  export function filterMessagesForContext(
9
9
  messages: Message[],
10
10
  contextBoundary: string | undefined,
11
- contextWindowHours: number
11
+ contextWindowMs: number
12
12
  ): Message[] {
13
13
  if (messages.length === 0) return [];
14
14
 
15
15
  const now = Date.now();
16
- const windowStartMs = now - contextWindowHours * 60 * 60 * 1000;
16
+ const windowStartMs = now - contextWindowMs;
17
17
  const boundaryMs = contextBoundary ? new Date(contextBoundary).getTime() : 0;
18
18
 
19
19
  return messages.filter((msg) => {
@@ -0,0 +1,113 @@
1
+ import { ContextStatus } from "../types.js";
2
+ import type { LLMResponse, Message } from "../types.js";
3
+ import type { StateManager } from "../state-manager.js";
4
+ import {
5
+ queueAllScans,
6
+ type ExtractionContext,
7
+ } from "../orchestrators/human-extraction.js";
8
+
9
+ function parseSegmentArray(content: string): string[] | null {
10
+ const jsonMatch = content.match(/```json\s*([\s\S]*?)```/) ?? content.match(/```\s*([\s\S]*?)```/);
11
+ const jsonText = jsonMatch ? jsonMatch[1].trim() : content.trim();
12
+
13
+ const arrayMatch = jsonText.match(/\[[\s\S]*\]/);
14
+ if (!arrayMatch) return null;
15
+
16
+ try {
17
+ const parsed = JSON.parse(arrayMatch[0]);
18
+ if (!Array.isArray(parsed)) return null;
19
+ return parsed.filter((item): item is string => typeof item === "string" && item.trim().length > 0);
20
+ } catch {
21
+ return null;
22
+ }
23
+ }
24
+
25
+ export function handleDocumentSegmentation(response: LLMResponse, state: StateManager): void {
26
+ const { batchId, filename, originalContent } = response.request.data as {
27
+ batchId: string;
28
+ filename: string;
29
+ originalContent: string;
30
+ };
31
+
32
+ if (!batchId || !filename) {
33
+ console.error("[handleDocumentSegmentation] Missing batchId or filename in request data");
34
+ return;
35
+ }
36
+
37
+ let segments: string[];
38
+ if (response.content) {
39
+ const parsed = parseSegmentArray(response.content);
40
+ segments = (parsed && parsed.length > 0) ? parsed : [originalContent];
41
+ } else {
42
+ segments = [originalContent];
43
+ }
44
+
45
+ const emmett = state.persona_getById("emmet");
46
+ if (!emmett) {
47
+ console.warn("[handleDocumentSegmentation] Emmett persona not found — skipping segment write");
48
+ return;
49
+ }
50
+
51
+ const now = new Date().toISOString();
52
+ const sourceTag = `import:document:${filename}`;
53
+
54
+ for (const segment of segments) {
55
+ const message: Message = {
56
+ id: crypto.randomUUID(),
57
+ role: "system",
58
+ content: segment,
59
+ timestamp: now,
60
+ read: true,
61
+ context_status: ContextStatus.Always,
62
+ external: true,
63
+ source_tag: sourceTag,
64
+ };
65
+ state.messages_append("emmet", message);
66
+ }
67
+
68
+ console.log(`[handleDocumentSegmentation] Wrote ${segments.length} segment(s) for batch ${batchId} (${filename})`);
69
+ }
70
+
71
+ export function finishDocumentBatch(batchId: string, filename: string, state: StateManager): void {
72
+ const sourceTag = `import:document:${filename}`;
73
+
74
+ const emmettMessages = state.messages_get("emmet");
75
+ const docMessages = emmettMessages.filter(m => m.external === true && m.source_tag === sourceTag);
76
+
77
+ if (docMessages.length === 0) {
78
+ console.warn(`[finishDocumentBatch] No messages found for ${sourceTag} — skipping extraction`);
79
+ } else {
80
+ const extractionContext: ExtractionContext = {
81
+ personaId: "emmet",
82
+ channelDisplayName: "Document",
83
+ messages_context: [],
84
+ messages_analyze: docMessages,
85
+ sources: [sourceTag],
86
+ };
87
+
88
+ const docSettings = state.getHuman().settings?.document;
89
+ queueAllScans(extractionContext, state, {
90
+ extraction_model: docSettings?.extraction_model,
91
+ external_filter: "only",
92
+ });
93
+
94
+ console.log(`[finishDocumentBatch] Queued extraction for ${docMessages.length} message(s) from ${filename}`);
95
+ }
96
+
97
+ const updatedHuman = state.getHuman();
98
+ state.setHuman({
99
+ ...updatedHuman,
100
+ settings: {
101
+ ...updatedHuman.settings,
102
+ document: {
103
+ ...updatedHuman.settings?.document,
104
+ processed_documents: {
105
+ ...(updatedHuman.settings?.document?.processed_documents ?? {}),
106
+ [filename]: new Date().toISOString(),
107
+ },
108
+ },
109
+ },
110
+ });
111
+
112
+ console.log(`[finishDocumentBatch] Batch ${batchId} complete, ${filename} marked processed`);
113
+ }
@@ -126,13 +126,15 @@ export function handleReflectionCritic(response: LLMResponse, state: StateManage
126
126
  const personaDisplayName = response.request.data.personaDisplayName as string;
127
127
 
128
128
  const result = response.parsed as ReflectionCriticResult | undefined;
129
- if (!result?.updated_identity || !result.critique) {
129
+ if (!result?.critique) {
130
130
  console.error(`[ReflectionCritic ${personaDisplayName}] Invalid or missing parsed result`);
131
131
  return;
132
132
  }
133
133
 
134
134
  const personRecord = state.human_person_getByIdentifier("Ei Persona", personaId);
135
135
  if (personRecord) {
136
+ // TODO: Remove before v1 — debug logging to inspect person log before it's cleared
137
+ console.log(`[ReflectionCritic ${personaDisplayName}] person_log_snapshot (${personRecord.description?.length ?? 0} chars): ${personRecord.description ?? ""}`);
136
138
  state.human_person_upsert({
137
139
  ...personRecord,
138
140
  description: "",
@@ -140,6 +142,12 @@ export function handleReflectionCritic(response: LLMResponse, state: StateManage
140
142
  console.log(`[ReflectionCritic ${personaDisplayName}] Person record description cleared — ready for fresh evidence after reflection`);
141
143
  }
142
144
 
145
+ // Escape hatch: critic found no meaningful drift — log critique and skip pending_update
146
+ if (!result.updated_identity) {
147
+ console.log(`[ReflectionCritic ${personaDisplayName}] No drift detected — skipping pending_update. Critique: ${result.critique}`);
148
+ return;
149
+ }
150
+
143
151
  const persona = state.persona_getById(personaId);
144
152
  if (!persona) {
145
153
  console.error(`[ReflectionCritic ${personaDisplayName}] Persona not found after critic`);
@@ -189,7 +189,10 @@ export async function handleHumanPersonScan(response: LLMResponse, state: StateM
189
189
  return;
190
190
  }
191
191
 
192
- const context = response.request.data as unknown as ExtractionContext;
192
+ const context = {
193
+ ...(response.request.data as unknown as ExtractionContext),
194
+ channelDisplayName: (response.request.data as Record<string, unknown>).personaDisplayName as string,
195
+ };
193
196
  if (!context?.personaId) return;
194
197
 
195
198
  const { messages_context, messages_analyze } = resolveMessageWindow(response, state);
@@ -8,7 +8,7 @@ import {
8
8
  import type { PersonIdentifier } from "../types/data-items.js";
9
9
  import type { StateManager } from "../state-manager.js";
10
10
  import type { ItemMatchResult, ExposureImpact, TopicUpdateResult, PersonUpdateResult } from "../../prompts/human/types.js";
11
- import { queueTopicUpdate, queuePersonUpdate, queueTopicValidate, type ExtractionContext } from "../orchestrators/index.js";
11
+ import { queueTopicUpdate, queueTopicValidate, type ExtractionContext } from "../orchestrators/index.js";
12
12
  import { getEmbeddingService, getTopicEmbeddingText, getPersonEmbeddingText } from "../embedding-service.js";
13
13
  import { calculateExposureCurrent } from "../utils/exposure.js";
14
14
 
@@ -48,7 +48,7 @@ export function handleTopicMatch(response: LLMResponse, state: StateManager): vo
48
48
  extraction_model?: string;
49
49
  } = {
50
50
  personaId,
51
- personaDisplayName,
51
+ channelDisplayName: personaDisplayName,
52
52
  roomId,
53
53
  messages_context,
54
54
  messages_analyze,
@@ -64,54 +64,6 @@ export function handleTopicMatch(response: LLMResponse, state: StateManager): vo
64
64
  console.log(`[handleTopicMatch] topic "${context.candidateName}": ${matched}`);
65
65
  }
66
66
 
67
- export function handlePersonMatch(response: LLMResponse, state: StateManager): void {
68
- const result = response.parsed as ItemMatchResult | undefined;
69
- if (!result) {
70
- throw new Error("[handlePersonMatch] No parsed result");
71
- }
72
-
73
- const personaId = response.request.data.personaId as string;
74
- const personaDisplayName = response.request.data.personaDisplayName as string;
75
- const roomId = response.request.data.roomId as string | undefined;
76
- const { messages_context, messages_analyze } = resolveMessageWindow(response, state);
77
-
78
- let matched_guid = result.matched_guid;
79
- let resolvedPerson: import('../types/data-items.js').Person | null = null;
80
- if (matched_guid === "new") {
81
- matched_guid = null;
82
- } else if (matched_guid) {
83
- const human = state.getHuman();
84
- resolvedPerson = human.people.find(p => p.id === matched_guid) ?? null;
85
- if (!resolvedPerson) {
86
- console.warn(`[handlePersonMatch] matched_guid "${matched_guid}" not found in people — treating as new`);
87
- matched_guid = null;
88
- }
89
- }
90
- result.matched_guid = matched_guid;
91
-
92
- const context: ExtractionContext & {
93
- candidateName: string;
94
- candidateDescription: string;
95
- candidateRelationship: string;
96
- extraction_model?: string;
97
- } = {
98
- personaId,
99
- personaDisplayName,
100
- roomId,
101
- messages_context,
102
- messages_analyze,
103
- sources: response.request.data.sources as string[] | undefined,
104
- candidateName: response.request.data.candidateName as string,
105
- candidateDescription: response.request.data.candidateDescription as string,
106
- candidateRelationship: response.request.data.candidateRelationship as string,
107
- extraction_model: response.request.data.extraction_model as string | undefined,
108
- };
109
-
110
- queuePersonUpdate(result, context, state, resolvedPerson);
111
- const matched = matched_guid ? `matched GUID "${matched_guid}"` : "no match (new person)";
112
- console.log(`[handlePersonMatch] person "${context.candidateName}": ${matched}`);
113
- }
114
-
115
67
  export async function handleTopicUpdate(response: LLMResponse, state: StateManager): Promise<void> {
116
68
  const result = response.parsed as (TopicUpdateResult & { quotes?: Array<{ text: string; reason: string }> }) | undefined;
117
69
 
@@ -447,7 +399,7 @@ async function validateAndStoreQuotes(
447
399
  candidates: Array<{ text: string; reason: string }> | undefined,
448
400
  messages: Message[],
449
401
  dataItemId: string,
450
- personaName: string,
402
+ channelDisplayName: string,
451
403
  personaGroup: string | null,
452
404
  state: StateManager
453
405
  ): Promise<void> {
@@ -540,8 +492,8 @@ async function validateAndStoreQuotes(
540
492
  data_item_ids: [dataItemId],
541
493
  persona_groups: [personaGroup || "General"],
542
494
  text: matchText,
543
- speaker: message.role === "human" ? "human" : (message.speaker_name ?? personaName),
544
- channel: personaName,
495
+ speaker: message.role === "human" ? "human" : (message.speaker_name ?? channelDisplayName),
496
+ channel: channelDisplayName,
545
497
  timestamp: message.timestamp,
546
498
  start: matchStart,
547
499
  end: matchEnd,
@@ -1,14 +1,11 @@
1
1
  import { LLMNextStep } from "../types.js";
2
- import type { LLMResponse } from "../types.js";
3
- import type { StateManager } from "../state-manager.js";
4
2
  import type { ResponseHandler } from "./persona-response.js";
5
- import type { PersonIdentifier } from "../types/data-items.js";
6
3
 
7
4
  export type { ResponseHandler } from "./persona-response.js";
8
5
 
9
6
  import { handlePersonaResponse, handleToolContinuation, handleOneShot, handleOneShotJSON } from "./persona-response.js";
10
7
  import { handleHeartbeatCheck, handleEiHeartbeat, handleReflectionCritic } from "./heartbeat.js";
11
- import { handlePersonaGeneration, handlePersonaDescriptions, handlePersonaTraitExtraction } from "./persona-generation.js";
8
+ import { handlePersonaGeneration, handlePersonaTraitExtraction } from "./persona-generation.js";
12
9
  import {
13
10
  handlePersonaTopicRating,
14
11
  } from "./persona-topics.js";
@@ -18,56 +15,11 @@ import { handleRewriteScan, handleRewriteRewrite } from "./rewrite.js";
18
15
  import { handleDedupCurate } from "./dedup.js";
19
16
  import { handleRoomResponse, handleRoomJudge } from "./rooms.js";
20
17
  import { handlePersonaPreview } from "./persona-preview.js";
21
-
22
- function handlePersonIdentifierMigration(response: LLMResponse, state: StateManager): void {
23
- const personId = response.request.data.person_id as string;
24
- if (!personId) {
25
- console.error("[handlePersonIdentifierMigration] Missing person_id in request data");
26
- return;
27
- }
28
-
29
- const human = state.getHuman();
30
- const person = human.people.find(p => p.id === personId);
31
- if (!person) {
32
- console.error(`[handlePersonIdentifierMigration] Person not found: ${personId}`);
33
- return;
34
- }
35
-
36
- const result = response.parsed as { identifiers?: Array<{ type: string; value: string; is_primary?: boolean }> } | undefined;
37
- if (!result?.identifiers || !Array.isArray(result.identifiers) || result.identifiers.length === 0) {
38
- console.error(`[handlePersonIdentifierMigration] Invalid or empty identifiers for ${person.name}`);
39
- return;
40
- }
41
-
42
- const hasName = result.identifiers.some(i => i.value === person.name);
43
- if (!hasName) {
44
- result.identifiers.unshift({ type: "nickname", value: person.name });
45
- }
46
-
47
- const hasPrimary = result.identifiers.some(i => i.is_primary);
48
- if (!hasPrimary) {
49
- result.identifiers[0].is_primary = true;
50
- }
51
-
52
- const identifiers: PersonIdentifier[] = result.identifiers.map(i => ({
53
- type: i.type,
54
- value: i.value,
55
- ...(i.is_primary ? { is_primary: i.is_primary } : {}),
56
- }));
57
-
58
- state.human_person_upsert({
59
- ...person,
60
- identifiers,
61
- last_updated: new Date().toISOString(),
62
- });
63
-
64
- console.log(`[handlePersonIdentifierMigration] Migrated ${identifiers.length} identifier(s) for ${person.name}`);
65
- }
18
+ import { handleDocumentSegmentation } from "./document-segmentation.js";
66
19
 
67
20
  export const handlers: Record<LLMNextStep, ResponseHandler> = {
68
21
  handlePersonaResponse,
69
22
  handlePersonaGeneration,
70
- handlePersonaDescriptions,
71
23
  handleFactFind,
72
24
  handleHumanTopicScan,
73
25
  handleHumanPersonScan,
@@ -88,7 +40,7 @@ export const handlers: Record<LLMNextStep, ResponseHandler> = {
88
40
  handleRoomResponse,
89
41
  handleRoomJudge,
90
42
  handlePersonaPreview,
91
- [LLMNextStep.HandlePersonIdentifierMigration]: handlePersonIdentifierMigration,
92
43
  [LLMNextStep.HandleTopicValidate]: handleDedupCurate,
93
44
  [LLMNextStep.HandleReflectionCritic]: handleReflectionCritic,
45
+ [LLMNextStep.HandleDocumentSegmentation]: handleDocumentSegmentation,
94
46
  };
@@ -4,7 +4,7 @@ import {
4
4
  type PersonaTopic,
5
5
  } from "../types.js";
6
6
  import type { StateManager } from "../state-manager.js";
7
- import type { PersonaGenerationResult, PersonaDescriptionsResult } from "../../prompts/generation/types.js";
7
+ import type { PersonaGenerationResult } from "../../prompts/generation/types.js";
8
8
  import type { TraitResult } from "../../prompts/persona/types.js";
9
9
  import { orchestratePersonaGeneration, type PartialPersona } from "../orchestrators/index.js";
10
10
 
@@ -111,33 +111,6 @@ export function handlePersonaGeneration(response: LLMResponse, state: StateManag
111
111
  console.log(`[handlePersonaGeneration] Orchestrated: ${personaDisplayName}`);
112
112
  }
113
113
 
114
- export function handlePersonaDescriptions(response: LLMResponse, state: StateManager): void {
115
- const personaId = response.request.data.personaId as string;
116
- const personaDisplayName = response.request.data.personaDisplayName as string;
117
- if (!personaId) {
118
- console.error("[handlePersonaDescriptions] No personaId in request data");
119
- return;
120
- }
121
-
122
- const result = response.parsed as PersonaDescriptionsResult | undefined;
123
- if (!result) {
124
- console.error("[handlePersonaDescriptions] No parsed result");
125
- return;
126
- }
127
-
128
- if (result.no_change) {
129
- console.log(`[handlePersonaDescriptions] No change needed for ${personaDisplayName}`);
130
- return;
131
- }
132
-
133
- state.persona_update(personaId, {
134
- short_description: result.short_description,
135
- long_description: result.long_description,
136
- last_updated: new Date().toISOString(),
137
- });
138
- console.log(`[handlePersonaDescriptions] Updated descriptions for ${personaDisplayName}`);
139
- }
140
-
141
114
  export function handlePersonaTraitExtraction(response: LLMResponse, state: StateManager): void {
142
115
  const personaId = response.request.data.personaId as string;
143
116
  const personaDisplayName = response.request.data.personaDisplayName as string;
@@ -14,7 +14,8 @@ import type {
14
14
  RewriteResult,
15
15
  RewriteSubjectMatch,
16
16
  } from "../../prompts/ceremony/types.js";
17
- import { buildRewritePrompt } from "../../prompts/ceremony/rewrite.js";
17
+ import { buildPersonRewriteSplitPrompt } from "../../prompts/ceremony/people-rewrite.js";
18
+ import { buildTopicRewriteSplitPrompt } from "../../prompts/ceremony/topic-rewrite.js";
18
19
  import { getEmbeddingService, getItemEmbeddingText } from "../embedding-service.js";
19
20
 
20
21
  import { searchHumanData } from "../human-data-manager.js";
@@ -79,12 +80,10 @@ export async function handleRewriteScan(response: LLMResponse, state: StateManag
79
80
  }
80
81
  }
81
82
 
82
- // Build Phase 2 prompt and queue it
83
- const prompt = buildRewritePrompt({
84
- item: currentItem,
85
- itemType,
86
- subjects: subjectMatches,
87
- });
83
+ const splitData = { item: currentItem, itemType, subjects: subjectMatches };
84
+ const prompt = itemType === "person"
85
+ ? buildPersonRewriteSplitPrompt(splitData)
86
+ : buildTopicRewriteSplitPrompt(splitData);
88
87
 
89
88
  state.queue_enqueue({
90
89
  type: LLMRequestType.JSON,
@@ -125,6 +124,11 @@ export async function handleRewriteRewrite(response: LLMResponse, state: StateMa
125
124
  const human = state.getHuman();
126
125
  const now = new Date().toISOString();
127
126
 
127
+ const originalItem = itemType === "topic"
128
+ ? human.topics.find(t => t.id === itemId)
129
+ : human.people.find(p => p.id === itemId);
130
+ const originalCategory = itemType === "topic" ? (originalItem as Topic | undefined)?.category : undefined;
131
+
128
132
  const allItems: DataItemBase[] = [
129
133
  ...human.topics, ...human.people,
130
134
  ];
@@ -228,11 +232,11 @@ export async function handleRewriteRewrite(response: LLMResponse, state: StateMa
228
232
  switch (item.type) {
229
233
  case "topic": {
230
234
  if (!item.category) {
231
- console.warn(`[handleRewriteRewrite] New topic "${item.name}" missing category — defaulting to "Interest"`);
235
+ console.warn(`[handleRewriteRewrite] New topic "${item.name}" missing category — inheriting from original (${originalCategory ?? "Interest"})`);
232
236
  }
233
237
  const topic: Topic = {
234
238
  ...baseFields,
235
- category: item.category ?? "Interest",
239
+ category: item.category ?? originalCategory ?? "Interest",
236
240
  exposure_current: 0.5,
237
241
  exposure_desired: 0.5,
238
242
  };
@@ -1,15 +1,8 @@
1
1
  import type { Message, RoomMessage, LLMResponse } from "../types.js";
2
2
  import type { StateManager } from "../state-manager.js";
3
3
 
4
- export function getMessageContent(msg: { content?: string; verbal_response?: string; action_response?: string }): string {
5
- if (msg.content) return msg.content;
6
- // Legacy fallback for data not yet migrated on disk
7
- // TODO(v1.0.0): Remove legacy verbal_response/action_response fallback
8
- const legacy = msg as { verbal_response?: string; action_response?: string };
9
- const parts: string[] = [];
10
- if (legacy.action_response) parts.push(`_${legacy.action_response}_`);
11
- if (legacy.verbal_response) parts.push(legacy.verbal_response);
12
- return parts.join('\n\n');
4
+ export function getMessageContent(msg: { content?: string }): string {
5
+ return msg.content ?? '';
13
6
  }
14
7
 
15
8
  export function normalizeRoomMessages(messages: RoomMessage[], state: StateManager): Message[] {
@@ -143,7 +143,7 @@ export async function queueEiHeartbeat(
143
143
  }
144
144
 
145
145
  const activePersonas = personas
146
- .filter((p) => !p.is_archived && !p.is_paused && p.id !== "ei")
146
+ .filter((p) => !p.is_archived && !p.is_paused && !p.is_static && p.id !== "ei")
147
147
  .map((p) => {
148
148
  const msgs = sm.messages_get(p.id);
149
149
  const lastHuman = [...msgs].reverse().find((m) => m.role === "human");
@@ -169,7 +169,7 @@ export async function queueEiHeartbeat(
169
169
  }
170
170
 
171
171
  const personasWithPendingUpdate = personas.filter(
172
- (p) => !p.is_archived && !p.is_paused && p.id !== "ei" && p.pending_update?.critique
172
+ (p) => !p.is_archived && !p.is_paused && !p.is_static && p.id !== "ei" && p.pending_update?.critique
173
173
  );
174
174
  for (const p of personasWithPendingUpdate) {
175
175
  items.push({
@@ -217,8 +217,8 @@ export async function queueHeartbeatCheck(sm: StateManager, personaId: string, i
217
217
  console.log(`[HeartbeatCheck ${persona.display_name}] Queueing heartbeat check (model: ${model})`);
218
218
  const human = sm.getHuman();
219
219
  const history = sm.messages_get(personaId);
220
- const contextWindowHours = persona.context_window_hours ?? human.settings?.default_context_window_hours ?? 8;
221
- const contextHistory = filterMessagesForContext(history, persona.context_boundary, contextWindowHours);
220
+ const contextWindowMs = persona.context_window_ms ?? human.settings?.default_context_window_ms ?? 28800000;
221
+ const contextHistory = filterMessagesForContext(history, persona.context_boundary, contextWindowMs);
222
222
 
223
223
  if (personaId === "ei") {
224
224
  await queueEiHeartbeat(sm, human, contextHistory, isTUI);
@@ -244,7 +244,7 @@ export async function queueHeartbeatCheck(sm: StateManager, personaId: string, i
244
244
  name: persona.display_name,
245
245
  traits: persona.traits,
246
246
  topics: persona.topics,
247
- has_pending_update: !!persona.pending_update,
247
+ pending_update: persona.pending_update,
248
248
  },
249
249
  human: {
250
250
  topics: sortByEngagementGap(filteredHuman.topics).slice(0, 5),