ei-tui 0.1.5 → 0.1.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "ei-tui",
3
- "version": "0.1.5",
3
+ "version": "0.1.7",
4
4
  "author": "Flare576",
5
5
  "repository": {
6
6
  "type": "git",
@@ -121,7 +121,7 @@ function handlePersonaResponse(response: LLMResponse, state: StateManager): void
121
121
  silence_reason: reason,
122
122
  timestamp: new Date().toISOString(),
123
123
  read: false,
124
- context_status: ContextStatus.Never,
124
+ context_status: ContextStatus.Default,
125
125
  };
126
126
  state.messages_append(personaId, silentMessage);
127
127
  } else {
@@ -59,7 +59,7 @@ export function queueFactScan(context: ExtractionContext, state: StateManager, o
59
59
 
60
60
  state.queue_enqueue({
61
61
  type: LLMRequestType.JSON,
62
- priority: LLMPriority.Normal,
62
+ priority: LLMPriority.Low,
63
63
  system: prompt.system,
64
64
  user: prompt.user,
65
65
  next_step: LLMNextStep.HandleHumanFactScan,
@@ -91,7 +91,7 @@ export function queueTraitScan(context: ExtractionContext, state: StateManager,
91
91
 
92
92
  state.queue_enqueue({
93
93
  type: LLMRequestType.JSON,
94
- priority: LLMPriority.Normal,
94
+ priority: LLMPriority.Low,
95
95
  system: prompt.system,
96
96
  user: prompt.user,
97
97
  next_step: LLMNextStep.HandleHumanTraitScan,
@@ -146,20 +146,18 @@ export function queuePersonScan(context: ExtractionContext, state: StateManager,
146
146
 
147
147
  if (chunks.length === 0) return 0;
148
148
 
149
- const personas = state.persona_getAll();
150
- const knownPersonaNames = personas.flatMap(p => p.aliases ?? []);
149
+
151
150
 
152
151
  for (const chunk of chunks) {
153
152
  const prompt = buildHumanPersonScanPrompt({
154
153
  persona_name: chunk.personaDisplayName,
155
154
  messages_context: chunk.messages_context,
156
155
  messages_analyze: chunk.messages_analyze,
157
- known_persona_names: knownPersonaNames,
158
156
  });
159
157
 
160
158
  state.queue_enqueue({
161
159
  type: LLMRequestType.JSON,
162
- priority: LLMPriority.Normal,
160
+ priority: LLMPriority.Low,
163
161
  system: prompt.system,
164
162
  user: prompt.user,
165
163
  next_step: LLMNextStep.HandleHumanPersonScan,
@@ -216,7 +214,7 @@ export function queueDirectTopicUpdate(
216
214
 
217
215
  state.queue_enqueue({
218
216
  type: LLMRequestType.JSON,
219
- priority: LLMPriority.Low,
217
+ priority: LLMPriority.Normal,
220
218
  system: prompt.system,
221
219
  user: prompt.user,
222
220
  next_step: LLMNextStep.HandleHumanItemUpdate,
@@ -373,7 +371,7 @@ export async function queueItemMatch(
373
371
 
374
372
  state.queue_enqueue({
375
373
  type: LLMRequestType.JSON,
376
- priority: LLMPriority.Low,
374
+ priority: LLMPriority.Normal,
377
375
  system: prompt.system,
378
376
  user: prompt.user,
379
377
  next_step: LLMNextStep.HandleHumanItemMatch,
@@ -436,7 +434,7 @@ export function queueItemUpdate(
436
434
 
437
435
  state.queue_enqueue({
438
436
  type: LLMRequestType.JSON,
439
- priority: LLMPriority.Low,
437
+ priority: LLMPriority.Normal,
440
438
  system: prompt.system,
441
439
  user: prompt.user,
442
440
  next_step: LLMNextStep.HandleHumanItemUpdate,
@@ -131,15 +131,24 @@ export async function importOpenCodeSessions(
131
131
  (a, b) => a.time.updated - b.time.updated
132
132
  );
133
133
  let targetSession: OpenCodeSession | null = null;
134
+ const MIN_SESSION_AGE_MS = 20 * 60 * 1000; // 20 minutes
135
+ const now = Date.now();
136
+
134
137
  for (const session of sortedSessions) {
135
138
  const lastImported = processedSessions[session.id];
136
139
  if (!lastImported) {
137
- targetSession = session;
138
- break;
140
+ const ageMs = now - session.time.updated;
141
+ if (ageMs >= MIN_SESSION_AGE_MS) {
142
+ targetSession = session;
143
+ break;
144
+ }
139
145
  }
140
146
  if (session.time.updated > new Date(lastImported).getTime()) {
141
- targetSession = session;
142
- break;
147
+ const ageMs = now - session.time.updated;
148
+ if (ageMs >= MIN_SESSION_AGE_MS) {
149
+ targetSession = session;
150
+ break;
151
+ }
143
152
  }
144
153
  }
145
154
 
@@ -7,9 +7,6 @@ export function buildHumanPersonScanPrompt(data: PersonScanPromptData): PromptOu
7
7
  }
8
8
 
9
9
  const personaName = data.persona_name;
10
- const knownPersonasList = data.known_persona_names.length > 0
11
- ? data.known_persona_names.map(name => ` + ${name}`).join('\n')
12
- : ' + (none)';
13
10
 
14
11
  const taskFragment = `# Task
15
12
 
@@ -42,17 +39,14 @@ Your job is to quickly identify:
42
39
  * Lover / Love Interest
43
40
  * Fiance / Spouse
44
41
  * Coworker
42
+ * AI Persona (use \`type_of_person: "Persona"\`)
45
43
 
46
44
  **A PERSON Is NOT**
47
45
  - Biographical data: Birthday, Location, Job, Marital Status, Gender, Eye Color, Hair Color
48
46
  - Other unchangeable Data: Wedding Day, Allergies
49
47
  - Trait: Personality patterns, communication style, behavioral tendencies
50
48
  - General Topic: Interests, Hobbies, General subjects
51
- - Personas: AI personas they discuss
52
- * Known Personas:
53
- ${knownPersonasList}
54
49
  - Characters: Fictitious entities from books, movies, stories, media, etc.`;
55
-
56
50
  const criticalFragment = `# CRITICAL INSTRUCTIONS
57
51
 
58
52
  ONLY ANALYZE the "Most Recent Messages" in the following conversation. The "Earlier Conversation" is provided for your context and has already been processed!
@@ -82,8 +82,8 @@ The JSON format is:
82
82
  "topics": [
83
83
  {
84
84
  "type_of_topic": "Interest|Goal|Dream|Conflict|Concern|etc.",
85
- "value_of_topic": "woodworking|Become Millionaire|Visit Spain|etc.",
86
- "reason": "User stated...|Assumed from..."
85
+ "type_of_topic": "Interest|Goal|Dream|Conflict|Concern|etc.",
86
+ "value_of_topic": "<actual topic from the conversation>",
87
87
  }
88
88
  ]
89
89
  }
@@ -124,7 +124,7 @@ Scan the "Most Recent Messages" for TOPICS of interest to the human user.
124
124
  "topics": [
125
125
  {
126
126
  "type_of_topic": "Interest|Goal|Dream|etc.",
127
- "value_of_topic": "woodworking|Become Millionaire|etc.",
127
+ "value_of_topic": "<actual topic from the conversation>",
128
128
  "reason": "User stated..."
129
129
  }
130
130
  ]
@@ -17,9 +17,7 @@ export interface TraitScanPromptData extends BaseScanPromptData {}
17
17
 
18
18
  export interface TopicScanPromptData extends BaseScanPromptData {}
19
19
 
20
- export interface PersonScanPromptData extends BaseScanPromptData {
21
- known_persona_names: string[];
22
- }
20
+ export interface PersonScanPromptData extends BaseScanPromptData {}
23
21
 
24
22
  export interface FactScanCandidate {
25
23
  type_of_fact: string;
@@ -44,10 +44,8 @@ export function formatMessageAsPlaceholder(message: Message, personaName: string
44
44
  }
45
45
 
46
46
  export function formatMessagesAsPlaceholders(messages: Message[], personaName: string): string {
47
- // Skip silence-only messages they're not conversational context for the LLM
48
- const conversational = messages.filter(m => m.silence_reason === undefined);
49
- if (conversational.length === 0) return "(No messages)";
50
- return conversational.map(m => formatMessageAsPlaceholder(m, personaName)).join('\n\n');
47
+ if (messages.length === 0) return "(No messages)";
48
+ return messages.map(m => formatMessageAsPlaceholder(m, personaName)).join('\n\n');
51
49
  }
52
50
 
53
51
  export function hydratePromptPlaceholders(
@@ -371,7 +371,21 @@ The human can view and edit all of this by ${seeHumanDataAction}.
371
371
  // =============================================================================
372
372
 
373
373
  export function buildResponseFormatSection(): string {
374
- const jsonResponding = [
374
+ const jsonVerbalOnly = [
375
+ '{',
376
+ ' "should_respond": true,',
377
+ ' "verbal_response": "What you would say out loud"',
378
+ '}'
379
+ ].join('\n');
380
+
381
+ const jsonActionOnly = [
382
+ '{',
383
+ ' "should_respond": true,',
384
+ ' "action_response": "What you would do (rendered in italics, like stage directions)"',
385
+ '}'
386
+ ].join('\n');
387
+
388
+ const jsonBoth = [
375
389
  '{',
376
390
  ' "should_respond": true,',
377
391
  ' "verbal_response": "What you would say out loud",',
@@ -388,20 +402,31 @@ export function buildResponseFormatSection(): string {
388
402
 
389
403
  return `## Response Format
390
404
 
391
- Always respond with JSON in this exact format:
405
+ Always respond with JSON. You have four valid forms:
406
+
407
+ **Words only** (most common):
408
+ \`\`\`json
409
+ ${jsonVerbalOnly}
410
+ \`\`\`
392
411
 
412
+ **Action only** (a gesture, expression, or physical reaction with no words):
393
413
  \`\`\`json
394
- ${jsonResponding}
414
+ ${jsonActionOnly}
395
415
  \`\`\`
396
416
 
397
- Or, if staying silent:
417
+ **Words and action** (speaking while doing something):
418
+ \`\`\`json
419
+ ${jsonBoth}
420
+ \`\`\`
398
421
 
422
+ **Silent** (choosing not to respond):
399
423
  \`\`\`json
400
424
  ${jsonSilent}
401
425
  \`\`\`
402
426
 
403
427
  Rules:
404
- - \`verbal_response\` and \`action_response\` are both optional - include whichever applies
428
+ - Use whichever combination fits the moment — both fields are optional, but at least one must be present when \`should_respond\` is true
429
+ - \`action_response\` alone is valid — a smile, a shrug, or a thoughtful pause can speak volumes
405
430
  - \`reason\` is only used when \`should_respond\` is false
406
431
  - Do NOT include \`<thinking>\` blocks or analysis outside the JSON
407
432
  - The JSON must be valid - use double quotes, no trailing commas`;