ei-tui 1.0.1 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -18,6 +18,77 @@ import { webFetchExecutor } from "./builtin/web-fetch.js";
18
18
  /** Hard upper limit on total tool calls per interaction, regardless of individual limits. */
19
19
  export const HARD_TOOL_CALL_LIMIT = 10;
20
20
 
21
+ /**
22
+ * System tools — injected unconditionally into every LLM call that uses tools.
23
+ * NOT stored in state.json. NOT user-configurable. Do NOT count against HARD_TOOL_CALL_LIMIT.
24
+ * Enforce their own per-tool limits via max_calls_per_interaction.
25
+ */
26
+ export const SYSTEM_TOOLS: ToolDefinition[] = [
27
+ {
28
+ id: "builtin-find-memory",
29
+ provider_id: "ei",
30
+ name: "find_memory",
31
+ display_name: "Find Memory",
32
+ description: "Semantic search of your personal memory — facts, topics, people, and quotes learned across ALL conversations over time, not just this one. Use when the human references something from the past, mentions a person, or asks about a topic you might have learned about. Supports optional filters: types (array of 'facts', 'topics', 'people', 'quotes'), limit (1-20, default 10), recent (true = sort by recency), persona (filter to what a specific persona has learned — use display name).",
33
+ input_schema: {
34
+ type: "object",
35
+ properties: {
36
+ query: { type: "string", description: "What to search for" },
37
+ types: { type: "array", items: { type: "string", enum: ["facts", "topics", "people", "quotes"] }, description: "Filter to specific types" },
38
+ limit: { type: "number", description: "Max results (1-20, default 10)" },
39
+ recent: { type: "boolean", description: "Sort by most recently mentioned instead of relevance" },
40
+ persona: { type: "string", description: "Filter to what a specific persona has learned. Use their display name." },
41
+ },
42
+ required: ["query"],
43
+ },
44
+ runtime: "any",
45
+ builtin: true,
46
+ enabled: true,
47
+ created_at: new Date(0).toISOString(),
48
+ max_calls_per_interaction: 3,
49
+ },
50
+ {
51
+ id: "builtin-fetch-memory",
52
+ provider_id: "ei",
53
+ name: "fetch_memory",
54
+ display_name: "Fetch Memory",
55
+ description: "Retrieve the full record for a specific memory by its ID. Use when find_memory returns an item and you need its complete details, or when a system prompt references a memory ID. Returns the full Fact, Topic, Person, or Quote record.",
56
+ input_schema: {
57
+ type: "object",
58
+ properties: {
59
+ id: { type: "string", description: "The ID of the memory to retrieve" },
60
+ },
61
+ required: ["id"],
62
+ },
63
+ runtime: "any",
64
+ builtin: true,
65
+ enabled: true,
66
+ created_at: new Date(0).toISOString(),
67
+ max_calls_per_interaction: 3,
68
+ },
69
+ {
70
+ id: "builtin-fetch-message",
71
+ provider_id: "ei",
72
+ name: "fetch_message",
73
+ display_name: "Fetch Message",
74
+ description: "Retrieve a specific message by its ID, with optional surrounding context. Use when find_memory returns a quote with a message_id and you want to read the original conversation, or when a temporal anchor references a message ID. The 'before' and 'after' parameters return that many additional messages for context (default 0).",
75
+ input_schema: {
76
+ type: "object",
77
+ properties: {
78
+ id: { type: "string", description: "The message ID to retrieve" },
79
+ before: { type: "number", description: "Number of preceding messages to include for context (default 0)" },
80
+ after: { type: "number", description: "Number of following messages to include for context (default 0)" },
81
+ },
82
+ required: ["id"],
83
+ },
84
+ runtime: "any",
85
+ builtin: true,
86
+ enabled: true,
87
+ created_at: new Date(0).toISOString(),
88
+ max_calls_per_interaction: 5,
89
+ },
90
+ ];
91
+
21
92
  /** Default max calls per tool if not set on the ToolDefinition. */
22
93
  const DEFAULT_MAX_CALLS = 3;
23
94
 
@@ -32,7 +103,7 @@ export function registerExecutor(executor: ToolExecutor): void {
32
103
  executorRegistry.set(executor.name, executor);
33
104
  }
34
105
 
35
- // Register builtins. read_memory is registered lazily via registerReadMemoryExecutor()
106
+ // Register builtins. find_memory is registered lazily via registerFindMemoryExecutor()
36
107
  // because it requires Processor.searchHumanData injection.
37
108
  registerExecutor(tavilyWebSearchExecutor);
38
109
  registerExecutor(tavilyNewsSearchExecutor);
@@ -42,10 +113,18 @@ registerExecutor(webFetchExecutor);
42
113
  // file_read and list_directory are registered lazily via registerFileReadExecutor() — Node/TUI only.
43
114
 
44
115
  /**
45
- * Register the read_memory executor — called by Processor after it's initialized,
116
+ * Register the find_memory executor — called by Processor after it's initialized,
46
117
  * injecting its own searchHumanData method to avoid circular imports.
47
118
  */
48
- export function registerReadMemoryExecutor(executor: ToolExecutor): void {
119
+ export function registerFindMemoryExecutor(executor: ToolExecutor): void {
120
+ executorRegistry.set(executor.name, executor);
121
+ }
122
+
123
+ export function registerFetchMemoryExecutor(executor: ToolExecutor): void {
124
+ executorRegistry.set(executor.name, executor);
125
+ }
126
+
127
+ export function registerFetchMessageExecutor(executor: ToolExecutor): void {
49
128
  executorRegistry.set(executor.name, executor);
50
129
  }
51
130
 
@@ -122,7 +201,9 @@ export async function executeToolCalls(
122
201
  const toolsByName = new Map(tools.map(t => [t.name, t]));
123
202
 
124
203
  for (const call of calls) {
125
- if (totalCalls.count >= HARD_TOOL_CALL_LIMIT) {
204
+ const isSystemTool = SYSTEM_TOOLS.some(t => t.name === call.name);
205
+
206
+ if (!isSystemTool && totalCalls.count >= HARD_TOOL_CALL_LIMIT) {
126
207
  console.log(`[Tools] Hard limit (${HARD_TOOL_CALL_LIMIT}) reached — skipping remaining tool calls`);
127
208
  break;
128
209
  }
@@ -168,7 +249,9 @@ export async function executeToolCalls(
168
249
  }
169
250
 
170
251
  callCounts.set(call.name, currentCount + 1);
171
- totalCalls.count++;
252
+ if (!isSystemTool) {
253
+ totalCalls.count++;
254
+ }
172
255
 
173
256
  const newCount = currentCount + 1;
174
257
  if (newCount >= maxCalls) {
@@ -3,6 +3,7 @@ import type { Message } from "../../core/types.js";
3
3
  import {
4
4
  queueTopicScan,
5
5
  queuePersonScan,
6
+ queueFactFind,
6
7
  type ExtractionContext,
7
8
  } from "../../core/orchestrators/human-extraction.js";
8
9
 
@@ -101,11 +102,12 @@ export async function importPersonaHistory(
101
102
  };
102
103
 
103
104
  const extractionModel = settings?.extraction_model;
105
+ queueFactFind(context, stateManager, { extraction_model: extractionModel });
104
106
  queueTopicScan(context, stateManager, { extraction_model: extractionModel });
105
107
  queuePersonScan(context, stateManager, { extraction_model: extractionModel });
106
108
 
107
109
  result.personasProcessed++;
108
- result.scansQueued += 2;
110
+ result.scansQueued += 3;
109
111
  }
110
112
 
111
113
  for (const room of Object.values((stateManager.getStorageState() as any).rooms ?? {})) {
@@ -20,7 +20,7 @@ export function buildDedupPrompt(data: DedupPromptData): { system: string; user:
20
20
  You are working with Opus 4.6 constraints. These rules prevent overthinking and ensure decisive action:
21
21
 
22
22
  ### 1. TOOL BUDGET
23
- - You have **6 \`read_memory\` calls** for this cluster
23
+ - You have **6 \`find_memory\` calls** for this cluster
24
24
  - Prioritize: verify ambiguous relationships > check parent concepts > validate new entities
25
25
  - After 6 calls, make decisions with available information
26
26
  - Do NOT waste calls re-checking pairs you already examined
@@ -55,7 +55,7 @@ You are acting as the curator for a user's internal database. You have been give
55
55
 
56
56
  Your secondary directive is to ORGANIZE IT into small, non-repetitive components. The user NEEDS the data, but the data is used by AI agents, so duplication limits usefulness—agents waste tokens re-reading the same information under different names.
57
57
 
58
- You have access to a tool called \`read_memory\` (6 calls max — see HARD RULES above). Use it strategically to verify relationships, check for related records, or gather context before making merge decisions.
58
+ You have access to a tool called \`find_memory\` (6 calls max — see HARD RULES above). Use it strategically to verify relationships, check for related records, or gather context before making merge decisions.
59
59
 
60
60
  ### Decision Process:
61
61
  1. **Identify true duplicates**: Examine each record. Are these genuinely the same thing with different wording (85%+ core meaning overlap), or are they distinct but related concepts?
@@ -94,7 +94,7 @@ ${buildRecordFormatExamples(data.itemType)}
94
94
  - Every removed record MUST have "replaced_by" pointing to the canonical record that absorbed its data.
95
95
  - The "update" array should contain AT LEAST ONE record (the canonical/merged one), even if all others are removed.
96
96
  - If records are NOT duplicates (just similar), return them ALL in "update" unchanged, with empty "remove" and "add" arrays.
97
- - Use \`read_memory\` strategically (6 calls max) to check for related records or gather context before making irreversible merge decisions.`;
97
+ - Use \`find_memory\` strategically (6 calls max) to check for related records or gather context before making irreversible merge decisions.`;
98
98
 
99
99
  const payload = JSON.stringify({
100
100
  cluster: data.cluster.map(stripEmbedding),
@@ -15,7 +15,7 @@ export interface RewriteScanPromptData {
15
15
  /** Phase 1 output: array of subject strings (parsed from LLM JSON response). */
16
16
  export type RewriteScanResult = string[];
17
17
 
18
- /** A single subject and the read_memory matches found for it. */
18
+ /** A single subject and the find_memory matches found for it. */
19
19
  export interface RewriteSubjectMatch {
20
20
  searchTerm: string;
21
21
  matches: DataItemBase[]; // Top 3 from searchHumanData, may be empty
@@ -93,6 +93,19 @@ If you are unsure of the type, use \`Nickname\` as a fallback. Do NOT invent typ
93
93
 
94
94
  Only include \`identifiers\` when explicitly mentioned in the conversation — omit it entirely if nothing qualifies.
95
95
 
96
+ ## Confidence & Relationship Type
97
+
98
+ For each person, rate how important they are to the human user's life:
99
+
100
+ - \`confidence\`: integer 1–5
101
+ - 1–2 = mentioned in passing, single event, no ongoing relevance
102
+ - 3 = unclear significance — may matter, may not
103
+ - 4–5 = clearly important, recurring presence, meaningful relationship
104
+ - \`relationship_type\`: one of \`"family"\` | \`"friend"\` | \`"colleague"\` | \`"acquaintance"\` | \`"transactional"\` | \`"unknown"\`
105
+ - Use \`"transactional"\` when the person appeared only in the context of a single transaction (purchase, sale, support ticket, delivery)
106
+
107
+ Use the full range. Most extractions should score 1–3. A confidence of 4–5 means this person genuinely matters to the user's life.
108
+
96
109
  ## Output Format
97
110
 
98
111
  \`\`\`json
@@ -105,6 +118,8 @@ Only include \`identifiers\` when explicitly mentioned in the conversation — o
105
118
  ],
106
119
  "description": "1-2 sentences: who this person is and their role in the user's life",
107
120
  "relationship": "Father|Mother|Brother|Son|Friend|Coworker|Self|etc.",
121
+ "relationship_type": "family|friend|colleague|acquaintance|transactional|unknown",
122
+ "confidence": 4,
108
123
  "reason": "Evidence from the conversation that justified flagging this person"
109
124
  }
110
125
  ]
@@ -143,6 +158,8 @@ Scan the "Most Recent Messages" for PEOPLE in the human user's life.
143
158
  "identifiers": [{ "type": "GitHub", "value": "handle" }],
144
159
  "description": "1-2 sentences: who this person is and their role in the user's life",
145
160
  "relationship": "Father|Mother|Brother|Son|Friend|Coworker|Self|etc.",
161
+ "relationship_type": "family|friend|colleague|acquaintance|transactional|unknown",
162
+ "confidence": 4,
146
163
  "reason": "Evidence from the conversation that justified flagging this person"
147
164
  }
148
165
  ]
@@ -55,11 +55,15 @@ export interface TopicScanCandidate {
55
55
  reason: string;
56
56
  }
57
57
 
58
+ export type PersonRelationshipType = "family" | "friend" | "colleague" | "acquaintance" | "transactional" | "unknown";
59
+
58
60
  export interface PersonScanCandidate {
59
61
  name: string;
60
62
  identifiers?: Array<{ type: string; value: string; is_primary?: boolean }>;
61
63
  description: string;
62
64
  relationship: string;
65
+ relationship_type?: PersonRelationshipType;
66
+ confidence?: number;
63
67
  reason: string;
64
68
  }
65
69
 
@@ -464,21 +464,28 @@ export function buildTemporalAnchorsSection(anchors: TemporalAnchor[], humanName
464
464
 
465
465
  const formatted = anchors.map(a => {
466
466
  const speaker = a.role === "human" ? humanName : "You";
467
- let text: string;
468
- if (a._synthesis && a.content) {
469
- text = `[${humanName} used your conversation to generate an image. The full prompt was: "${a.content}"]`;
467
+ let preview: string;
468
+ if (a._synthesis) {
469
+ const raw = a.content ?? "";
470
+ const firstSentenceEnd = raw.search(/\.\s/);
471
+ const snippet = firstSentenceEnd > 0 && firstSentenceEnd <= 120
472
+ ? raw.slice(0, firstSentenceEnd + 1)
473
+ : raw.slice(0, 100);
474
+ preview = `[${humanName} generated an image: "${snippet}…"]`;
470
475
  } else if (a.silence_reason) {
471
476
  const silentParty = a.role === "human" ? humanName : "You";
472
- text = `${silentParty} chose not to respond because: ${a.silence_reason}`;
477
+ const truncated = a.silence_reason.length > 80 ? `${a.silence_reason.slice(0, 80)}…` : a.silence_reason;
478
+ preview = `${silentParty} chose not to respond: "${truncated}"`;
473
479
  } else {
474
- text = a.content ?? "";
480
+ const raw = a.content ?? "";
481
+ preview = raw.length > 80 ? `${raw.slice(0, 80)}…` : raw;
475
482
  }
476
- return `[${formatTimestamp(a.timestamp)}] ${speaker}: ${text}`;
483
+ return `[${formatTimestamp(a.timestamp)}] ${speaker}: ${preview}\n → fetch_message("${a.id}") for full content`;
477
484
  }).join("\n\n");
478
485
 
479
486
  return `## Temporal Anchors
480
487
 
481
- These are pinned moments from your shared history — preserved across context windows as part of who you are:
488
+ Pinned moments from your shared history. These are snapshots use fetch_message(id) if one feels relevant to pull the full memory:
482
489
 
483
490
  ${formatted}`;
484
491
  }
@@ -8,6 +8,7 @@ import type { ToolDefinition } from "../../core/types.js";
8
8
  import type { PersonaEntity } from "../../core/types/entities.js";
9
9
 
10
10
  export interface TemporalAnchor {
11
+ id: string;
11
12
  role: "human" | "system";
12
13
  content?: string;
13
14
  silence_reason?: string;
@@ -1,70 +0,0 @@
1
- import type { ToolExecutor } from "../types.js";
2
- import type { Fact, Topic, Person, Quote } from "../../types.js";
3
-
4
- interface PersonaSummary {
5
- id: string;
6
- display_name: string;
7
- }
8
-
9
- type SearchHumanData = (
10
- query: string,
11
- options?: { types?: Array<"fact" | "topic" | "person" | "quote">; limit?: number; recent?: boolean; persona_filter?: string }
12
- ) => Promise<{ facts: Fact[]; topics: Topic[]; people: Person[]; quotes: Quote[] }>;
13
-
14
- type GetPersonaList = () => Promise<PersonaSummary[]>;
15
-
16
- export function createReadMemoryExecutor(searchHumanData: SearchHumanData, getPersonaList?: GetPersonaList): ToolExecutor {
17
- return {
18
- name: "read_memory",
19
-
20
- async execute(args: Record<string, unknown>): Promise<string> {
21
- const query = typeof args.query === "string" ? args.query.trim() : "";
22
- const recent = args.recent === true;
23
- const personaArg = typeof args.persona === "string" ? args.persona.trim() : "";
24
- console.log(`[read_memory] called with query="${query}", types=${JSON.stringify(args.types ?? null)}, limit=${args.limit ?? 10}, recent=${recent}, persona="${personaArg}"`);
25
-
26
- if (!query && !recent) {
27
- console.warn("[read_memory] missing query argument");
28
- return JSON.stringify({ error: "Missing required argument: query (or use recent: true)" });
29
- }
30
-
31
- const types = Array.isArray(args.types)
32
- ? (args.types.filter(
33
- t => typeof t === "string" && ["fact", "topic", "person", "quote"].includes(t)
34
- ) as Array<"fact" | "topic" | "person" | "quote">)
35
- : undefined;
36
-
37
- const limit = typeof args.limit === "number" && args.limit > 0 ? Math.min(args.limit, 20) : 10;
38
-
39
- // Resolve persona display_name to ID
40
- let persona_filter: string | undefined;
41
- if (personaArg && getPersonaList) {
42
- const personas = await getPersonaList();
43
- const match = personas.find(p => p.display_name.toLowerCase() === personaArg.toLowerCase());
44
- if (match) {
45
- persona_filter = match.id;
46
- console.log(`[read_memory] resolved persona "${personaArg}" to ID "${persona_filter}"`);
47
- } else {
48
- console.warn(`[read_memory] persona "${personaArg}" not found, proceeding without filter`);
49
- }
50
- }
51
-
52
- const results = await searchHumanData(query, { types, limit, recent, persona_filter });
53
-
54
- const total = results.facts.length + results.topics.length + results.people.length + results.quotes.length;
55
- console.log(`[read_memory] query="${query}" => ${total} results (facts=${results.facts.length}, topics=${results.topics.length}, people=${results.people.length}, quotes=${results.quotes.length})`);
56
-
57
- const output: Record<string, unknown[]> = {};
58
- if (results.facts.length > 0) output.facts = results.facts.map(f => ({ name: f.name, description: f.description }));
59
- if (results.topics.length > 0) output.topics = results.topics.map(t => ({ name: t.name, description: t.description }));
60
- if (results.people.length > 0) output.people = results.people.map(p => ({ name: p.name, relationship: p.relationship, description: p.description, identifiers: p.identifiers ?? [] }));
61
- if (results.quotes.length > 0) output.quotes = results.quotes.map(q => ({ text: q.text, speaker: q.speaker }));
62
-
63
- if (Object.keys(output).length === 0) {
64
- return JSON.stringify({ result: "No relevant memories found for this query." });
65
- }
66
-
67
- return JSON.stringify(output);
68
- },
69
- };
70
- }