ei-tui 0.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (133) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +170 -0
  3. package/package.json +63 -0
  4. package/src/README.md +96 -0
  5. package/src/cli/README.md +47 -0
  6. package/src/cli/commands/facts.ts +25 -0
  7. package/src/cli/commands/people.ts +25 -0
  8. package/src/cli/commands/quotes.ts +19 -0
  9. package/src/cli/commands/topics.ts +25 -0
  10. package/src/cli/commands/traits.ts +25 -0
  11. package/src/cli/retrieval.ts +269 -0
  12. package/src/cli.ts +176 -0
  13. package/src/core/AGENTS.md +104 -0
  14. package/src/core/embedding-service.ts +241 -0
  15. package/src/core/handlers/index.ts +1057 -0
  16. package/src/core/index.ts +4 -0
  17. package/src/core/llm-client.ts +265 -0
  18. package/src/core/model-context-windows.ts +49 -0
  19. package/src/core/orchestrators/ceremony.ts +500 -0
  20. package/src/core/orchestrators/extraction-chunker.ts +138 -0
  21. package/src/core/orchestrators/human-extraction.ts +457 -0
  22. package/src/core/orchestrators/index.ts +28 -0
  23. package/src/core/orchestrators/persona-generation.ts +76 -0
  24. package/src/core/orchestrators/persona-topics.ts +117 -0
  25. package/src/core/personas/index.ts +5 -0
  26. package/src/core/personas/opencode-agent.ts +81 -0
  27. package/src/core/processor.ts +1413 -0
  28. package/src/core/queue-processor.ts +197 -0
  29. package/src/core/state/checkpoints.ts +68 -0
  30. package/src/core/state/human.ts +176 -0
  31. package/src/core/state/index.ts +5 -0
  32. package/src/core/state/personas.ts +217 -0
  33. package/src/core/state/queue.ts +144 -0
  34. package/src/core/state-manager.ts +347 -0
  35. package/src/core/types.ts +421 -0
  36. package/src/core/utils/decay.ts +33 -0
  37. package/src/index.ts +1 -0
  38. package/src/integrations/opencode/importer.ts +896 -0
  39. package/src/integrations/opencode/index.ts +16 -0
  40. package/src/integrations/opencode/json-reader.ts +304 -0
  41. package/src/integrations/opencode/reader-factory.ts +35 -0
  42. package/src/integrations/opencode/sqlite-reader.ts +189 -0
  43. package/src/integrations/opencode/types.ts +244 -0
  44. package/src/prompts/AGENTS.md +62 -0
  45. package/src/prompts/ceremony/description-check.ts +47 -0
  46. package/src/prompts/ceremony/expire.ts +30 -0
  47. package/src/prompts/ceremony/explore.ts +60 -0
  48. package/src/prompts/ceremony/index.ts +11 -0
  49. package/src/prompts/ceremony/types.ts +42 -0
  50. package/src/prompts/generation/descriptions.ts +91 -0
  51. package/src/prompts/generation/index.ts +15 -0
  52. package/src/prompts/generation/persona.ts +155 -0
  53. package/src/prompts/generation/seeds.ts +31 -0
  54. package/src/prompts/generation/types.ts +47 -0
  55. package/src/prompts/heartbeat/check.ts +179 -0
  56. package/src/prompts/heartbeat/ei.ts +208 -0
  57. package/src/prompts/heartbeat/index.ts +15 -0
  58. package/src/prompts/heartbeat/types.ts +70 -0
  59. package/src/prompts/human/fact-scan.ts +152 -0
  60. package/src/prompts/human/index.ts +32 -0
  61. package/src/prompts/human/item-match.ts +74 -0
  62. package/src/prompts/human/item-update.ts +322 -0
  63. package/src/prompts/human/person-scan.ts +115 -0
  64. package/src/prompts/human/topic-scan.ts +135 -0
  65. package/src/prompts/human/trait-scan.ts +115 -0
  66. package/src/prompts/human/types.ts +127 -0
  67. package/src/prompts/index.ts +90 -0
  68. package/src/prompts/message-utils.ts +39 -0
  69. package/src/prompts/persona/index.ts +16 -0
  70. package/src/prompts/persona/topics-match.ts +69 -0
  71. package/src/prompts/persona/topics-scan.ts +98 -0
  72. package/src/prompts/persona/topics-update.ts +157 -0
  73. package/src/prompts/persona/traits.ts +117 -0
  74. package/src/prompts/persona/types.ts +74 -0
  75. package/src/prompts/response/index.ts +147 -0
  76. package/src/prompts/response/sections.ts +355 -0
  77. package/src/prompts/response/types.ts +38 -0
  78. package/src/prompts/validation/ei.ts +93 -0
  79. package/src/prompts/validation/index.ts +6 -0
  80. package/src/prompts/validation/types.ts +22 -0
  81. package/src/storage/crypto.ts +96 -0
  82. package/src/storage/index.ts +5 -0
  83. package/src/storage/interface.ts +9 -0
  84. package/src/storage/local.ts +79 -0
  85. package/src/storage/merge.ts +69 -0
  86. package/src/storage/remote.ts +145 -0
  87. package/src/templates/welcome.ts +91 -0
  88. package/tui/README.md +62 -0
  89. package/tui/bunfig.toml +4 -0
  90. package/tui/src/app.tsx +55 -0
  91. package/tui/src/commands/archive.tsx +93 -0
  92. package/tui/src/commands/context.tsx +124 -0
  93. package/tui/src/commands/delete.tsx +71 -0
  94. package/tui/src/commands/details.tsx +41 -0
  95. package/tui/src/commands/editor.tsx +46 -0
  96. package/tui/src/commands/help.tsx +12 -0
  97. package/tui/src/commands/me.tsx +145 -0
  98. package/tui/src/commands/model.ts +47 -0
  99. package/tui/src/commands/new.ts +31 -0
  100. package/tui/src/commands/pause.ts +46 -0
  101. package/tui/src/commands/persona.tsx +58 -0
  102. package/tui/src/commands/provider.tsx +124 -0
  103. package/tui/src/commands/quit.ts +22 -0
  104. package/tui/src/commands/quotes.tsx +172 -0
  105. package/tui/src/commands/registry.test.ts +137 -0
  106. package/tui/src/commands/registry.ts +130 -0
  107. package/tui/src/commands/resume.ts +39 -0
  108. package/tui/src/commands/setsync.tsx +43 -0
  109. package/tui/src/commands/settings.tsx +83 -0
  110. package/tui/src/components/ConfirmOverlay.tsx +51 -0
  111. package/tui/src/components/ConflictOverlay.tsx +78 -0
  112. package/tui/src/components/HelpOverlay.tsx +69 -0
  113. package/tui/src/components/Layout.tsx +24 -0
  114. package/tui/src/components/MessageList.tsx +174 -0
  115. package/tui/src/components/PersonaListOverlay.tsx +186 -0
  116. package/tui/src/components/PromptInput.tsx +145 -0
  117. package/tui/src/components/ProviderListOverlay.tsx +208 -0
  118. package/tui/src/components/QuotesOverlay.tsx +157 -0
  119. package/tui/src/components/Sidebar.tsx +95 -0
  120. package/tui/src/components/StatusBar.tsx +77 -0
  121. package/tui/src/components/WelcomeOverlay.tsx +73 -0
  122. package/tui/src/context/ei.tsx +623 -0
  123. package/tui/src/context/keyboard.tsx +164 -0
  124. package/tui/src/context/overlay.tsx +53 -0
  125. package/tui/src/index.tsx +8 -0
  126. package/tui/src/storage/file.ts +185 -0
  127. package/tui/src/util/duration.ts +32 -0
  128. package/tui/src/util/editor.ts +188 -0
  129. package/tui/src/util/logger.ts +109 -0
  130. package/tui/src/util/persona-editor.tsx +181 -0
  131. package/tui/src/util/provider-editor.tsx +168 -0
  132. package/tui/src/util/syntax.ts +35 -0
  133. package/tui/src/util/yaml-serializers.ts +755 -0
@@ -0,0 +1,457 @@
1
+ import { LLMRequestType, LLMPriority, LLMNextStep, type Message, type DataItemType, type Fact, type Trait, type Topic, type Person } from "../types.js";
2
+ import type { StateManager } from "../state-manager.js";
3
+ import {
4
+ buildHumanFactScanPrompt,
5
+ buildHumanTraitScanPrompt,
6
+ buildHumanTopicScanPrompt,
7
+ buildHumanPersonScanPrompt,
8
+ buildHumanItemMatchPrompt,
9
+ buildHumanItemUpdatePrompt,
10
+ type FactScanCandidate,
11
+ type TraitScanCandidate,
12
+ type TopicScanCandidate,
13
+ type PersonScanCandidate,
14
+ type ItemMatchResult,
15
+ } from "../../prompts/human/index.js";
16
+ import { chunkExtractionContext } from "./extraction-chunker.js";
17
+ import { getEmbeddingService, findTopK } from "../embedding-service.js";
18
+ import { resolveTokenLimit } from "../llm-client.js";
19
+
20
+ type ScanCandidate = FactScanCandidate | TraitScanCandidate | TopicScanCandidate | PersonScanCandidate;
21
+
22
+ export interface ExtractionContext {
23
+ personaId: string;
24
+ personaDisplayName: string;
25
+ messages_context: Message[];
26
+ messages_analyze: Message[];
27
+ extraction_flag?: "f" | "r" | "p" | "o";
28
+ }
29
+
30
+ export interface ExtractionOptions {
31
+ ceremony_progress?: boolean;
32
+ }
33
+
34
+ function getAnalyzeFromTimestamp(context: ExtractionContext): string | null {
35
+ if (context.messages_analyze.length === 0) return null;
36
+ return context.messages_analyze[0].timestamp;
37
+ }
38
+
39
+ const EXTRACTION_BUDGET_RATIO = 0.75;
40
+ const MIN_EXTRACTION_TOKENS = 10000;
41
+
42
+ function getExtractionMaxTokens(state: StateManager): number {
43
+ const human = state.getHuman();
44
+ const tokenLimit = resolveTokenLimit(human.settings?.default_model, human.settings?.accounts);
45
+ return Math.max(MIN_EXTRACTION_TOKENS, Math.floor(tokenLimit * EXTRACTION_BUDGET_RATIO));
46
+ }
47
+
48
+ export function queueFactScan(context: ExtractionContext, state: StateManager, options?: ExtractionOptions): number {
49
+ const { chunks } = chunkExtractionContext(context, getExtractionMaxTokens(state));
50
+
51
+ if (chunks.length === 0) return 0;
52
+
53
+ for (const chunk of chunks) {
54
+ const prompt = buildHumanFactScanPrompt({
55
+ persona_name: chunk.personaDisplayName,
56
+ messages_context: chunk.messages_context,
57
+ messages_analyze: chunk.messages_analyze,
58
+ });
59
+
60
+ state.queue_enqueue({
61
+ type: LLMRequestType.JSON,
62
+ priority: LLMPriority.Normal,
63
+ system: prompt.system,
64
+ user: prompt.user,
65
+ next_step: LLMNextStep.HandleHumanFactScan,
66
+ data: {
67
+ ...options,
68
+ personaId: chunk.personaId,
69
+ personaDisplayName: chunk.personaDisplayName,
70
+ analyze_from_timestamp: getAnalyzeFromTimestamp(chunk),
71
+ extraction_flag: context.extraction_flag,
72
+ message_ids_to_mark: chunk.messages_analyze.map(m => m.id),
73
+ },
74
+ });
75
+ }
76
+
77
+ return chunks.length;
78
+ }
79
+
80
+ export function queueTraitScan(context: ExtractionContext, state: StateManager, options?: ExtractionOptions): number {
81
+ const { chunks } = chunkExtractionContext(context, getExtractionMaxTokens(state));
82
+
83
+ if (chunks.length === 0) return 0;
84
+
85
+ for (const chunk of chunks) {
86
+ const prompt = buildHumanTraitScanPrompt({
87
+ persona_name: chunk.personaDisplayName,
88
+ messages_context: chunk.messages_context,
89
+ messages_analyze: chunk.messages_analyze,
90
+ });
91
+
92
+ state.queue_enqueue({
93
+ type: LLMRequestType.JSON,
94
+ priority: LLMPriority.Normal,
95
+ system: prompt.system,
96
+ user: prompt.user,
97
+ next_step: LLMNextStep.HandleHumanTraitScan,
98
+ data: {
99
+ ...options,
100
+ personaId: chunk.personaId,
101
+ personaDisplayName: chunk.personaDisplayName,
102
+ analyze_from_timestamp: getAnalyzeFromTimestamp(chunk),
103
+ extraction_flag: context.extraction_flag,
104
+ message_ids_to_mark: chunk.messages_analyze.map(m => m.id),
105
+ },
106
+ });
107
+ }
108
+
109
+ return chunks.length;
110
+ }
111
+
112
+ export function queueTopicScan(context: ExtractionContext, state: StateManager, options?: ExtractionOptions): number {
113
+ const { chunks } = chunkExtractionContext(context, getExtractionMaxTokens(state));
114
+
115
+ if (chunks.length === 0) return 0;
116
+
117
+ for (const chunk of chunks) {
118
+ const prompt = buildHumanTopicScanPrompt({
119
+ persona_name: chunk.personaDisplayName,
120
+ messages_context: chunk.messages_context,
121
+ messages_analyze: chunk.messages_analyze,
122
+ });
123
+
124
+ state.queue_enqueue({
125
+ type: LLMRequestType.JSON,
126
+ priority: LLMPriority.Low,
127
+ system: prompt.system,
128
+ user: prompt.user,
129
+ next_step: LLMNextStep.HandleHumanTopicScan,
130
+ data: {
131
+ ...options,
132
+ personaId: chunk.personaId,
133
+ personaDisplayName: chunk.personaDisplayName,
134
+ analyze_from_timestamp: getAnalyzeFromTimestamp(chunk),
135
+ extraction_flag: context.extraction_flag,
136
+ message_ids_to_mark: chunk.messages_analyze.map(m => m.id),
137
+ },
138
+ });
139
+ }
140
+
141
+ return chunks.length;
142
+ }
143
+
144
+ export function queuePersonScan(context: ExtractionContext, state: StateManager, options?: ExtractionOptions): number {
145
+ const { chunks } = chunkExtractionContext(context, getExtractionMaxTokens(state));
146
+
147
+ if (chunks.length === 0) return 0;
148
+
149
+ const personas = state.persona_getAll();
150
+ const knownPersonaNames = personas.flatMap(p => p.aliases ?? []);
151
+
152
+ for (const chunk of chunks) {
153
+ const prompt = buildHumanPersonScanPrompt({
154
+ persona_name: chunk.personaDisplayName,
155
+ messages_context: chunk.messages_context,
156
+ messages_analyze: chunk.messages_analyze,
157
+ known_persona_names: knownPersonaNames,
158
+ });
159
+
160
+ state.queue_enqueue({
161
+ type: LLMRequestType.JSON,
162
+ priority: LLMPriority.Normal,
163
+ system: prompt.system,
164
+ user: prompt.user,
165
+ next_step: LLMNextStep.HandleHumanPersonScan,
166
+ data: {
167
+ ...options,
168
+ personaId: chunk.personaId,
169
+ personaDisplayName: chunk.personaDisplayName,
170
+ analyze_from_timestamp: getAnalyzeFromTimestamp(chunk),
171
+ extraction_flag: context.extraction_flag,
172
+ message_ids_to_mark: chunk.messages_analyze.map(m => m.id),
173
+ },
174
+ });
175
+ }
176
+
177
+ return chunks.length;
178
+ }
179
+
180
+ export function queueAllScans(context: ExtractionContext, state: StateManager, options?: ExtractionOptions): void {
181
+ queueFactScan(context, state, options);
182
+ queueTraitScan(context, state, options);
183
+ queuePersonScan(context, state, options);
184
+ queueTopicScan(context, state, options);
185
+ }
186
+
187
+ /**
188
+ * Queue a direct Topic Update, bypassing scan/match.
189
+ *
190
+ * Use this when we KNOW the topic already exists (e.g., OpenCode sessions
191
+ * where each session IS a topic). This avoids the queue explosion from
192
+ * scan → match → update pipeline.
193
+ *
194
+ * @param topic - The known Topic to update
195
+ * @param context - Messages to analyze for this topic
196
+ * @param state - StateManager for queue operations
197
+ * @returns Number of chunks queued
198
+ */
199
+ export function queueDirectTopicUpdate(
200
+ topic: import("../types.js").Topic,
201
+ context: ExtractionContext,
202
+ state: StateManager
203
+ ): number {
204
+ const { chunks } = chunkExtractionContext(context, getExtractionMaxTokens(state));
205
+
206
+ if (chunks.length === 0) return 0;
207
+
208
+ for (const chunk of chunks) {
209
+ const prompt = buildHumanItemUpdatePrompt({
210
+ data_type: "topic",
211
+ existing_item: topic,
212
+ messages_context: chunk.messages_context,
213
+ messages_analyze: chunk.messages_analyze,
214
+ persona_name: chunk.personaDisplayName,
215
+ });
216
+
217
+ state.queue_enqueue({
218
+ type: LLMRequestType.JSON,
219
+ priority: LLMPriority.Low,
220
+ system: prompt.system,
221
+ user: prompt.user,
222
+ next_step: LLMNextStep.HandleHumanItemUpdate,
223
+ data: {
224
+ personaId: context.personaId,
225
+ personaDisplayName: context.personaDisplayName,
226
+ candidateType: "topic",
227
+ matchedType: "topic",
228
+ isNewItem: false,
229
+ existingItemId: topic.id,
230
+ analyze_from_timestamp: getAnalyzeFromTimestamp(chunk),
231
+ },
232
+ });
233
+ }
234
+
235
+ return chunks.length;
236
+ }
237
+
238
+ function truncateDescription(description: string, maxLength: number = 255): string {
239
+ if (description.length <= maxLength) return description;
240
+ return description.slice(0, maxLength) + "...";
241
+ }
242
+
243
+ const EMBEDDING_TOP_K = 20;
244
+ const EMBEDDING_MIN_SIMILARITY = 0.3;
245
+
246
+ /**
247
+ * Queue an item match request using embedding-based similarity.
248
+ *
249
+ * Instead of sending ALL items to the LLM, we:
250
+ * 1. Compute embedding for the candidate (name + value)
251
+ * 2. Find top-K most similar existing items via cosine similarity
252
+ * 3. Send only those candidates to the LLM for final matching decision
253
+ *
254
+ * This reduces prompt size from O(all_items) to O(K) where K=20.
255
+ */
256
+ export async function queueItemMatch(
257
+ dataType: DataItemType,
258
+ candidate: ScanCandidate,
259
+ context: ExtractionContext,
260
+ state: StateManager
261
+ ): Promise<void> {
262
+ const human = state.getHuman();
263
+
264
+ let itemName: string;
265
+ let itemValue: string;
266
+
267
+ switch (dataType) {
268
+ case "fact":
269
+ itemName = (candidate as FactScanCandidate).type_of_fact;
270
+ itemValue = (candidate as FactScanCandidate).value_of_fact;
271
+ break;
272
+ case "trait":
273
+ itemName = (candidate as TraitScanCandidate).type_of_trait;
274
+ itemValue = (candidate as TraitScanCandidate).value_of_trait;
275
+ break;
276
+ case "topic":
277
+ itemName = (candidate as TopicScanCandidate).value_of_topic;
278
+ itemValue = (candidate as TopicScanCandidate).type_of_topic;
279
+ break;
280
+ case "person":
281
+ itemName = (candidate as PersonScanCandidate).name_of_person;
282
+ itemValue = (candidate as PersonScanCandidate).type_of_person;
283
+ break;
284
+ }
285
+
286
+ const allItemsWithEmbeddings = [
287
+ ...human.facts.map(f => ({ ...f, data_type: "fact" as DataItemType })),
288
+ ...human.traits.map(t => ({ ...t, data_type: "trait" as DataItemType })),
289
+ ...human.topics.map(t => ({ ...t, data_type: "topic" as DataItemType })),
290
+ ...human.people.map(p => ({ ...p, data_type: "person" as DataItemType })),
291
+ ].filter(item => item.embedding && item.embedding.length > 0);
292
+
293
+ let topKItems: Array<{
294
+ data_type: DataItemType;
295
+ data_id: string;
296
+ data_name: string;
297
+ data_description: string;
298
+ }> = [];
299
+
300
+ if (allItemsWithEmbeddings.length > 0) {
301
+ try {
302
+ const embeddingService = getEmbeddingService();
303
+ const candidateText = `${itemName}: ${itemValue}`;
304
+ const candidateVector = await embeddingService.embed(candidateText);
305
+
306
+ const topK = findTopK(candidateVector, allItemsWithEmbeddings, EMBEDDING_TOP_K);
307
+
308
+ topKItems = topK
309
+ .filter(({ similarity }) => similarity >= EMBEDDING_MIN_SIMILARITY)
310
+ .map(({ item }) => ({
311
+ data_type: item.data_type,
312
+ data_id: item.id,
313
+ data_name: item.name,
314
+ data_description: item.data_type === dataType
315
+ ? item.description
316
+ : truncateDescription(item.description),
317
+ }));
318
+
319
+ console.log(`[queueItemMatch] Embedding search: ${allItemsWithEmbeddings.length} items → ${topKItems.length} candidates (top-K=${EMBEDDING_TOP_K}, min_sim=${EMBEDDING_MIN_SIMILARITY})`);
320
+ } catch (err) {
321
+ console.error(`[queueItemMatch] Embedding search failed, falling back to all items:`, err);
322
+ }
323
+ }
324
+
325
+ if (topKItems.length === 0) {
326
+ console.log(`[queueItemMatch] No embeddings available, using all ${human.facts.length + human.traits.length + human.topics.length + human.people.length} items`);
327
+
328
+ for (const fact of human.facts) {
329
+ topKItems.push({
330
+ data_type: "fact",
331
+ data_id: fact.id,
332
+ data_name: fact.name,
333
+ data_description: dataType === "fact" ? fact.description : truncateDescription(fact.description),
334
+ });
335
+ }
336
+
337
+ for (const trait of human.traits) {
338
+ topKItems.push({
339
+ data_type: "trait",
340
+ data_id: trait.id,
341
+ data_name: trait.name,
342
+ data_description: dataType === "trait" ? trait.description : truncateDescription(trait.description),
343
+ });
344
+ }
345
+
346
+ for (const topic of human.topics) {
347
+ topKItems.push({
348
+ data_type: "topic",
349
+ data_id: topic.id,
350
+ data_name: topic.name,
351
+ data_description: dataType === "topic" ? topic.description : truncateDescription(topic.description),
352
+ });
353
+ }
354
+
355
+ for (const person of human.people) {
356
+ topKItems.push({
357
+ data_type: "person",
358
+ data_id: person.id,
359
+ data_name: person.name,
360
+ data_description: dataType === "person" ? person.description : truncateDescription(person.description),
361
+ });
362
+ }
363
+ }
364
+
365
+ const prompt = buildHumanItemMatchPrompt({
366
+ candidate_type: dataType,
367
+ candidate_name: itemName,
368
+ candidate_value: itemValue,
369
+ all_items: topKItems,
370
+ });
371
+
372
+
373
+
374
+ state.queue_enqueue({
375
+ type: LLMRequestType.JSON,
376
+ priority: LLMPriority.Low,
377
+ system: prompt.system,
378
+ user: prompt.user,
379
+ next_step: LLMNextStep.HandleHumanItemMatch,
380
+ data: {
381
+ ...context,
382
+ candidateType: dataType,
383
+ itemName,
384
+ itemValue,
385
+ },
386
+ });
387
+ }
388
+
389
+ export function queueItemUpdate(
390
+ candidateType: DataItemType,
391
+ matchResult: ItemMatchResult,
392
+ context: ExtractionContext & { itemName: string; itemValue: string; itemCategory?: string },
393
+ state: StateManager
394
+ ): number {
395
+ const human = state.getHuman();
396
+ const matchedGuid = matchResult.matched_guid;
397
+ const isNewItem = matchedGuid === null;
398
+
399
+ let existingItem: Fact | Trait | Topic | Person | null = null;
400
+ let matchedType: DataItemType | null = null;
401
+
402
+ if (!isNewItem) {
403
+ existingItem = human.facts.find(f => f.id === matchedGuid) ?? null;
404
+ if (existingItem) matchedType = "fact";
405
+
406
+ if (!existingItem) {
407
+ existingItem = human.traits.find(t => t.id === matchedGuid) ?? null;
408
+ if (existingItem) matchedType = "trait";
409
+ }
410
+
411
+ if (!existingItem) {
412
+ existingItem = human.topics.find(t => t.id === matchedGuid) ?? null;
413
+ if (existingItem) matchedType = "topic";
414
+ }
415
+
416
+ if (!existingItem) {
417
+ existingItem = human.people.find(p => p.id === matchedGuid) ?? null;
418
+ if (existingItem) matchedType = "person";
419
+ }
420
+ }
421
+
422
+ const { chunks } = chunkExtractionContext(context, getExtractionMaxTokens(state));
423
+
424
+ if (chunks.length === 0) return 0;
425
+
426
+ for (const chunk of chunks) {
427
+ const prompt = buildHumanItemUpdatePrompt({
428
+ data_type: candidateType,
429
+ existing_item: existingItem,
430
+ messages_context: chunk.messages_context,
431
+ messages_analyze: chunk.messages_analyze,
432
+ persona_name: chunk.personaDisplayName,
433
+ new_item_name: isNewItem ? context.itemName : undefined,
434
+ new_item_value: isNewItem ? context.itemValue : undefined,
435
+ });
436
+
437
+ state.queue_enqueue({
438
+ type: LLMRequestType.JSON,
439
+ priority: LLMPriority.Low,
440
+ system: prompt.system,
441
+ user: prompt.user,
442
+ next_step: LLMNextStep.HandleHumanItemUpdate,
443
+ data: {
444
+ personaId: context.personaId,
445
+ personaDisplayName: context.personaDisplayName,
446
+ candidateType,
447
+ matchedType,
448
+ isNewItem,
449
+ existingItemId: existingItem?.id,
450
+ itemCategory: context.itemCategory,
451
+ analyze_from_timestamp: getAnalyzeFromTimestamp(chunk),
452
+ },
453
+ });
454
+ }
455
+
456
+ return chunks.length;
457
+ }
@@ -0,0 +1,28 @@
1
+ export { orchestratePersonaGeneration, type PartialPersona } from "./persona-generation.js";
2
+ export {
3
+ queueFactScan,
4
+ queueTraitScan,
5
+ queueTopicScan,
6
+ queuePersonScan,
7
+ queueAllScans,
8
+ queueItemMatch,
9
+ queueItemUpdate,
10
+ type ExtractionContext,
11
+ type ExtractionOptions,
12
+ } from "./human-extraction.js";
13
+ export {
14
+ shouldStartCeremony,
15
+ startCeremony,
16
+ handleCeremonyProgress,
17
+ prunePersonaMessages,
18
+ queueExpirePhase,
19
+ queueExplorePhase,
20
+ queueDescriptionCheck,
21
+ runHumanCeremony,
22
+ } from "./ceremony.js";
23
+ export {
24
+ queuePersonaTopicScan,
25
+ queuePersonaTopicMatch,
26
+ queuePersonaTopicUpdate,
27
+ type PersonaTopicContext,
28
+ } from "./persona-topics.js";
@@ -0,0 +1,76 @@
1
+ import { LLMRequestType, LLMPriority, LLMNextStep, type Trait, type PersonaTopic } from "../types.js";
2
+ import type { StateManager } from "../state-manager.js";
3
+ import { buildPersonaGenerationPrompt } from "../../prompts/index.js";
4
+
5
+ const MAX_ORCHESTRATOR_LOOPS = 4;
6
+
7
+ export interface PartialPersona {
8
+ id: string;
9
+ name: string;
10
+ aliases?: string[];
11
+ description?: string;
12
+ short_description?: string;
13
+ long_description?: string;
14
+ traits?: Partial<Trait>[];
15
+ topics?: Partial<PersonaTopic>[];
16
+ model?: string;
17
+ group_primary?: string;
18
+ groups_visible?: string[];
19
+ loop_counter?: number;
20
+ step?: "description" | "traits" | "topics";
21
+ }
22
+
23
+ export function orchestratePersonaGeneration(
24
+ partial: PartialPersona,
25
+ stateManager: StateManager,
26
+ onComplete?: () => void
27
+ ): void {
28
+ const loopCounter = (partial.loop_counter ?? 0) + 1;
29
+
30
+ if (loopCounter > MAX_ORCHESTRATOR_LOOPS) {
31
+ console.error(`[orchestratePersonaGeneration] Max loops (${MAX_ORCHESTRATOR_LOOPS}) exceeded for ${partial.name}`);
32
+ return;
33
+ }
34
+
35
+ const needsShortDescription = !partial.short_description;
36
+ const traitCount = partial.traits?.filter(t => t.name?.trim()).length ?? 0;
37
+ const topicCount = partial.topics?.filter(t => t.name?.trim()).length ?? 0;
38
+ const needsMoreTraits = traitCount < 3;
39
+ const needsMoreTopics = topicCount < 3;
40
+
41
+ if (needsShortDescription || needsMoreTraits || needsMoreTopics) {
42
+ const prompt = buildPersonaGenerationPrompt({
43
+ name: partial.name,
44
+ long_description: partial.long_description,
45
+ short_description: partial.short_description,
46
+ existing_traits: partial.traits,
47
+ existing_topics: partial.topics,
48
+ });
49
+
50
+ stateManager.queue_enqueue({
51
+ type: LLMRequestType.JSON,
52
+ priority: LLMPriority.High,
53
+ system: prompt.system,
54
+ user: prompt.user,
55
+ next_step: LLMNextStep.HandlePersonaGeneration,
56
+ data: {
57
+ personaId: partial.id,
58
+ personaDisplayName: partial.name,
59
+ partial: { ...partial, loop_counter: loopCounter },
60
+ },
61
+ });
62
+ return;
63
+ }
64
+
65
+ const now = new Date().toISOString();
66
+ stateManager.persona_update(partial.id, {
67
+ short_description: partial.short_description,
68
+ long_description: partial.long_description,
69
+ traits: partial.traits as Trait[],
70
+ topics: partial.topics as PersonaTopic[],
71
+ last_updated: now,
72
+ });
73
+
74
+ console.log(`[orchestratePersonaGeneration] Completed: ${partial.name}`);
75
+ onComplete?.();
76
+ }
@@ -0,0 +1,117 @@
1
+ import { LLMRequestType, LLMPriority, LLMNextStep, type Message, type PersonaTopic } from "../types.js";
2
+ import type { StateManager } from "../state-manager.js";
3
+ import {
4
+ buildPersonaTopicScanPrompt,
5
+ buildPersonaTopicMatchPrompt,
6
+ buildPersonaTopicUpdatePrompt,
7
+ type PersonaTopicScanCandidate,
8
+ type PersonaTopicMatchResult,
9
+ } from "../../prompts/persona/index.js";
10
+
11
+ export interface PersonaTopicContext {
12
+ personaId: string;
13
+ personaDisplayName: string;
14
+ messages_context: Message[];
15
+ messages_analyze: Message[];
16
+ }
17
+
18
+ function getAnalyzeFromTimestamp(context: PersonaTopicContext): string | null {
19
+ if (context.messages_analyze.length === 0) return null;
20
+ return context.messages_analyze[0].timestamp;
21
+ }
22
+
23
+ export function queuePersonaTopicScan(context: PersonaTopicContext, state: StateManager): void {
24
+ const prompt = buildPersonaTopicScanPrompt({
25
+ persona_name: context.personaDisplayName,
26
+ messages_context: context.messages_context,
27
+ messages_analyze: context.messages_analyze,
28
+ });
29
+
30
+ state.queue_enqueue({
31
+ type: LLMRequestType.JSON,
32
+ priority: LLMPriority.Low,
33
+ system: prompt.system,
34
+ user: prompt.user,
35
+ next_step: LLMNextStep.HandlePersonaTopicScan,
36
+ data: {
37
+ personaId: context.personaId,
38
+ personaDisplayName: context.personaDisplayName,
39
+ analyze_from_timestamp: getAnalyzeFromTimestamp(context),
40
+ },
41
+ });
42
+ }
43
+
44
+ export function queuePersonaTopicMatch(
45
+ candidate: PersonaTopicScanCandidate,
46
+ context: PersonaTopicContext,
47
+ state: StateManager
48
+ ): void {
49
+ const persona = state.persona_getById(context.personaId);
50
+ if (!persona) {
51
+ console.error(`[queuePersonaTopicMatch] Persona not found: ${context.personaId}`);
52
+ return;
53
+ }
54
+
55
+ const prompt = buildPersonaTopicMatchPrompt({
56
+ persona_name: context.personaDisplayName,
57
+ candidate,
58
+ existing_topics: persona.topics,
59
+ });
60
+
61
+ state.queue_enqueue({
62
+ type: LLMRequestType.JSON,
63
+ priority: LLMPriority.Low,
64
+ system: prompt.system,
65
+ user: prompt.user,
66
+ next_step: LLMNextStep.HandlePersonaTopicMatch,
67
+ data: {
68
+ personaId: context.personaId,
69
+ personaDisplayName: context.personaDisplayName,
70
+ candidate,
71
+ analyze_from_timestamp: getAnalyzeFromTimestamp(context),
72
+ },
73
+ });
74
+ }
75
+
76
+ export function queuePersonaTopicUpdate(
77
+ candidate: PersonaTopicScanCandidate,
78
+ matchResult: PersonaTopicMatchResult,
79
+ context: PersonaTopicContext,
80
+ state: StateManager
81
+ ): void {
82
+ const persona = state.persona_getById(context.personaId);
83
+ if (!persona) {
84
+ console.error(`[queuePersonaTopicUpdate] Persona not found: ${context.personaId}`);
85
+ return;
86
+ }
87
+
88
+ const existingTopic = matchResult.matched_id
89
+ ? persona.topics.find((t: PersonaTopic) => t.id === matchResult.matched_id)
90
+ : undefined;
91
+
92
+ const prompt = buildPersonaTopicUpdatePrompt({
93
+ persona_name: context.personaDisplayName,
94
+ short_description: persona.short_description,
95
+ long_description: persona.long_description,
96
+ traits: persona.traits,
97
+ existing_topic: existingTopic,
98
+ candidate,
99
+ messages_context: context.messages_context,
100
+ messages_analyze: context.messages_analyze,
101
+ });
102
+
103
+ state.queue_enqueue({
104
+ type: LLMRequestType.JSON,
105
+ priority: LLMPriority.Low,
106
+ system: prompt.system,
107
+ user: prompt.user,
108
+ next_step: LLMNextStep.HandlePersonaTopicUpdate,
109
+ data: {
110
+ personaId: context.personaId,
111
+ personaDisplayName: context.personaDisplayName,
112
+ candidate,
113
+ matched_id: matchResult.matched_id,
114
+ analyze_from_timestamp: getAnalyzeFromTimestamp(context),
115
+ },
116
+ });
117
+ }
@@ -0,0 +1,5 @@
1
+ export {
2
+ ensureAgentPersona,
3
+ ensureAllAgentPersonas,
4
+ type EnsureAgentPersonaOptions,
5
+ } from "./opencode-agent.js";