ei-tui 0.1.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +170 -0
- package/package.json +63 -0
- package/src/README.md +96 -0
- package/src/cli/README.md +47 -0
- package/src/cli/commands/facts.ts +25 -0
- package/src/cli/commands/people.ts +25 -0
- package/src/cli/commands/quotes.ts +19 -0
- package/src/cli/commands/topics.ts +25 -0
- package/src/cli/commands/traits.ts +25 -0
- package/src/cli/retrieval.ts +269 -0
- package/src/cli.ts +176 -0
- package/src/core/AGENTS.md +104 -0
- package/src/core/embedding-service.ts +241 -0
- package/src/core/handlers/index.ts +1057 -0
- package/src/core/index.ts +4 -0
- package/src/core/llm-client.ts +265 -0
- package/src/core/model-context-windows.ts +49 -0
- package/src/core/orchestrators/ceremony.ts +500 -0
- package/src/core/orchestrators/extraction-chunker.ts +138 -0
- package/src/core/orchestrators/human-extraction.ts +457 -0
- package/src/core/orchestrators/index.ts +28 -0
- package/src/core/orchestrators/persona-generation.ts +76 -0
- package/src/core/orchestrators/persona-topics.ts +117 -0
- package/src/core/personas/index.ts +5 -0
- package/src/core/personas/opencode-agent.ts +81 -0
- package/src/core/processor.ts +1413 -0
- package/src/core/queue-processor.ts +197 -0
- package/src/core/state/checkpoints.ts +68 -0
- package/src/core/state/human.ts +176 -0
- package/src/core/state/index.ts +5 -0
- package/src/core/state/personas.ts +217 -0
- package/src/core/state/queue.ts +144 -0
- package/src/core/state-manager.ts +347 -0
- package/src/core/types.ts +421 -0
- package/src/core/utils/decay.ts +33 -0
- package/src/index.ts +1 -0
- package/src/integrations/opencode/importer.ts +896 -0
- package/src/integrations/opencode/index.ts +16 -0
- package/src/integrations/opencode/json-reader.ts +304 -0
- package/src/integrations/opencode/reader-factory.ts +35 -0
- package/src/integrations/opencode/sqlite-reader.ts +189 -0
- package/src/integrations/opencode/types.ts +244 -0
- package/src/prompts/AGENTS.md +62 -0
- package/src/prompts/ceremony/description-check.ts +47 -0
- package/src/prompts/ceremony/expire.ts +30 -0
- package/src/prompts/ceremony/explore.ts +60 -0
- package/src/prompts/ceremony/index.ts +11 -0
- package/src/prompts/ceremony/types.ts +42 -0
- package/src/prompts/generation/descriptions.ts +91 -0
- package/src/prompts/generation/index.ts +15 -0
- package/src/prompts/generation/persona.ts +155 -0
- package/src/prompts/generation/seeds.ts +31 -0
- package/src/prompts/generation/types.ts +47 -0
- package/src/prompts/heartbeat/check.ts +179 -0
- package/src/prompts/heartbeat/ei.ts +208 -0
- package/src/prompts/heartbeat/index.ts +15 -0
- package/src/prompts/heartbeat/types.ts +70 -0
- package/src/prompts/human/fact-scan.ts +152 -0
- package/src/prompts/human/index.ts +32 -0
- package/src/prompts/human/item-match.ts +74 -0
- package/src/prompts/human/item-update.ts +322 -0
- package/src/prompts/human/person-scan.ts +115 -0
- package/src/prompts/human/topic-scan.ts +135 -0
- package/src/prompts/human/trait-scan.ts +115 -0
- package/src/prompts/human/types.ts +127 -0
- package/src/prompts/index.ts +90 -0
- package/src/prompts/message-utils.ts +39 -0
- package/src/prompts/persona/index.ts +16 -0
- package/src/prompts/persona/topics-match.ts +69 -0
- package/src/prompts/persona/topics-scan.ts +98 -0
- package/src/prompts/persona/topics-update.ts +157 -0
- package/src/prompts/persona/traits.ts +117 -0
- package/src/prompts/persona/types.ts +74 -0
- package/src/prompts/response/index.ts +147 -0
- package/src/prompts/response/sections.ts +355 -0
- package/src/prompts/response/types.ts +38 -0
- package/src/prompts/validation/ei.ts +93 -0
- package/src/prompts/validation/index.ts +6 -0
- package/src/prompts/validation/types.ts +22 -0
- package/src/storage/crypto.ts +96 -0
- package/src/storage/index.ts +5 -0
- package/src/storage/interface.ts +9 -0
- package/src/storage/local.ts +79 -0
- package/src/storage/merge.ts +69 -0
- package/src/storage/remote.ts +145 -0
- package/src/templates/welcome.ts +91 -0
- package/tui/README.md +62 -0
- package/tui/bunfig.toml +4 -0
- package/tui/src/app.tsx +55 -0
- package/tui/src/commands/archive.tsx +93 -0
- package/tui/src/commands/context.tsx +124 -0
- package/tui/src/commands/delete.tsx +71 -0
- package/tui/src/commands/details.tsx +41 -0
- package/tui/src/commands/editor.tsx +46 -0
- package/tui/src/commands/help.tsx +12 -0
- package/tui/src/commands/me.tsx +145 -0
- package/tui/src/commands/model.ts +47 -0
- package/tui/src/commands/new.ts +31 -0
- package/tui/src/commands/pause.ts +46 -0
- package/tui/src/commands/persona.tsx +58 -0
- package/tui/src/commands/provider.tsx +124 -0
- package/tui/src/commands/quit.ts +22 -0
- package/tui/src/commands/quotes.tsx +172 -0
- package/tui/src/commands/registry.test.ts +137 -0
- package/tui/src/commands/registry.ts +130 -0
- package/tui/src/commands/resume.ts +39 -0
- package/tui/src/commands/setsync.tsx +43 -0
- package/tui/src/commands/settings.tsx +83 -0
- package/tui/src/components/ConfirmOverlay.tsx +51 -0
- package/tui/src/components/ConflictOverlay.tsx +78 -0
- package/tui/src/components/HelpOverlay.tsx +69 -0
- package/tui/src/components/Layout.tsx +24 -0
- package/tui/src/components/MessageList.tsx +174 -0
- package/tui/src/components/PersonaListOverlay.tsx +186 -0
- package/tui/src/components/PromptInput.tsx +145 -0
- package/tui/src/components/ProviderListOverlay.tsx +208 -0
- package/tui/src/components/QuotesOverlay.tsx +157 -0
- package/tui/src/components/Sidebar.tsx +95 -0
- package/tui/src/components/StatusBar.tsx +77 -0
- package/tui/src/components/WelcomeOverlay.tsx +73 -0
- package/tui/src/context/ei.tsx +623 -0
- package/tui/src/context/keyboard.tsx +164 -0
- package/tui/src/context/overlay.tsx +53 -0
- package/tui/src/index.tsx +8 -0
- package/tui/src/storage/file.ts +185 -0
- package/tui/src/util/duration.ts +32 -0
- package/tui/src/util/editor.ts +188 -0
- package/tui/src/util/logger.ts +109 -0
- package/tui/src/util/persona-editor.tsx +181 -0
- package/tui/src/util/provider-editor.tsx +168 -0
- package/tui/src/util/syntax.ts +35 -0
- package/tui/src/util/yaml-serializers.ts +755 -0
|
@@ -0,0 +1,265 @@
|
|
|
1
|
+
import type { ChatMessage, ProviderAccount } from "./types.js";
|
|
2
|
+
import { getKnownContextWindow, DEFAULT_TOKEN_LIMIT } from "./model-context-windows.js";
|
|
3
|
+
|
|
4
|
+
export interface ProviderConfig {
|
|
5
|
+
baseURL: string;
|
|
6
|
+
apiKey: string;
|
|
7
|
+
name: string;
|
|
8
|
+
}
|
|
9
|
+
|
|
10
|
+
export interface ResolvedModel {
|
|
11
|
+
provider: string;
|
|
12
|
+
model: string;
|
|
13
|
+
config: ProviderConfig;
|
|
14
|
+
extraHeaders?: Record<string, string>;
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
export interface LLMCallOptions {
|
|
18
|
+
signal?: AbortSignal;
|
|
19
|
+
temperature?: number;
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
export interface LLMRawResponse {
|
|
23
|
+
content: string | null;
|
|
24
|
+
finishReason: string | null;
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
let llmCallCount = 0;
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
export function resolveModel(modelSpec?: string, accounts?: ProviderAccount[]): ResolvedModel {
|
|
32
|
+
if (!modelSpec) {
|
|
33
|
+
throw new Error("No model specified. Set a provider on this persona with /provider, or set a default_model in settings.");
|
|
34
|
+
}
|
|
35
|
+
let provider = "";
|
|
36
|
+
let model = modelSpec;
|
|
37
|
+
|
|
38
|
+
if (modelSpec.includes(":")) {
|
|
39
|
+
const [p, ...rest] = modelSpec.split(":");
|
|
40
|
+
provider = p;
|
|
41
|
+
model = rest.join(":");
|
|
42
|
+
}
|
|
43
|
+
// Try to find matching account by name (case-insensitive)
|
|
44
|
+
// Check both "provider:model" format AND bare account names
|
|
45
|
+
if (accounts) {
|
|
46
|
+
const searchName = provider || modelSpec; // If no ":", the whole spec might be an account name
|
|
47
|
+
const matchingAccount = accounts.find(
|
|
48
|
+
(acc) => acc.name.toLowerCase() === searchName.toLowerCase() && acc.enabled
|
|
49
|
+
);
|
|
50
|
+
if (matchingAccount) {
|
|
51
|
+
// If bare account name was used, get model from account's default_model
|
|
52
|
+
const resolvedModel = provider ? model : (matchingAccount.default_model || model);
|
|
53
|
+
return {
|
|
54
|
+
provider: matchingAccount.name,
|
|
55
|
+
model: resolvedModel,
|
|
56
|
+
config: {
|
|
57
|
+
name: matchingAccount.name,
|
|
58
|
+
baseURL: matchingAccount.url,
|
|
59
|
+
apiKey: matchingAccount.api_key || "",
|
|
60
|
+
},
|
|
61
|
+
extraHeaders: matchingAccount.extra_headers,
|
|
62
|
+
};
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
throw new Error(
|
|
67
|
+
`No provider "${provider || modelSpec}" found. Create one with /provider new, or check that it's enabled.`
|
|
68
|
+
);
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
const tokenLimitLoggedModels = new Set<string>();
|
|
72
|
+
|
|
73
|
+
export function resolveTokenLimit(
|
|
74
|
+
modelSpec?: string,
|
|
75
|
+
accounts?: ProviderAccount[]
|
|
76
|
+
): number {
|
|
77
|
+
const spec = modelSpec || "";
|
|
78
|
+
|
|
79
|
+
let provider = "";
|
|
80
|
+
let model = spec;
|
|
81
|
+
if (spec.includes(":")) {
|
|
82
|
+
const [p, ...rest] = spec.split(":");
|
|
83
|
+
provider = p;
|
|
84
|
+
model = rest.join(":");
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
// 1. User override on matching account
|
|
88
|
+
if (accounts) {
|
|
89
|
+
const searchName = provider || spec;
|
|
90
|
+
const matchingAccount = accounts.find(
|
|
91
|
+
(acc) => acc.name.toLowerCase() === searchName.toLowerCase() && acc.enabled
|
|
92
|
+
);
|
|
93
|
+
if (matchingAccount?.token_limit) {
|
|
94
|
+
logTokenLimit(model, "user-override", matchingAccount.token_limit);
|
|
95
|
+
return matchingAccount.token_limit;
|
|
96
|
+
}
|
|
97
|
+
if (matchingAccount && !provider) {
|
|
98
|
+
model = matchingAccount.default_model || model;
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
// 2. Lookup table
|
|
103
|
+
const known = getKnownContextWindow(model);
|
|
104
|
+
if (known) {
|
|
105
|
+
logTokenLimit(model, "lookup-table", known);
|
|
106
|
+
return known;
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
// 3. Conservative default
|
|
110
|
+
logTokenLimit(model, "default", DEFAULT_TOKEN_LIMIT);
|
|
111
|
+
return DEFAULT_TOKEN_LIMIT;
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
function logTokenLimit(model: string, source: string, tokens: number): void {
|
|
115
|
+
if (tokenLimitLoggedModels.has(model)) return;
|
|
116
|
+
tokenLimitLoggedModels.add(model);
|
|
117
|
+
|
|
118
|
+
const budget = Math.floor(tokens * 0.75);
|
|
119
|
+
if (source === "default") {
|
|
120
|
+
console.warn(`[TokenLimit] Unknown model "${model}" — using conservative default (${DEFAULT_TOKEN_LIMIT})`);
|
|
121
|
+
} else {
|
|
122
|
+
console.log(`[TokenLimit] ${model}: ${source} → ${tokens} tokens (extraction budget: ${budget})`);
|
|
123
|
+
}
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
export async function callLLMRaw(
|
|
127
|
+
systemPrompt: string,
|
|
128
|
+
userPrompt: string,
|
|
129
|
+
messages: ChatMessage[] = [],
|
|
130
|
+
modelSpec?: string,
|
|
131
|
+
options: LLMCallOptions = {},
|
|
132
|
+
accounts?: ProviderAccount[]
|
|
133
|
+
): Promise<LLMRawResponse> {
|
|
134
|
+
llmCallCount++;
|
|
135
|
+
|
|
136
|
+
const { signal, temperature = 0.7 } = options;
|
|
137
|
+
|
|
138
|
+
if (signal?.aborted) {
|
|
139
|
+
throw new Error("LLM call aborted");
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
const { model, config, extraHeaders } = resolveModel(modelSpec, accounts);
|
|
143
|
+
|
|
144
|
+
const chatMessages: ChatMessage[] = [
|
|
145
|
+
{ role: "system", content: systemPrompt },
|
|
146
|
+
...messages,
|
|
147
|
+
{ role: "user", content: userPrompt },
|
|
148
|
+
];
|
|
149
|
+
|
|
150
|
+
const finalMessages = ensureUserFirst(chatMessages);
|
|
151
|
+
|
|
152
|
+
if (finalMessages.length !== chatMessages.length) {
|
|
153
|
+
console.log(`[LLM] Injected user-first placeholder (${chatMessages.length} → ${finalMessages.length} messages)`);
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
const totalChars = finalMessages.reduce((sum, m) => sum + m.content.length, 0);
|
|
157
|
+
const estimatedTokens = Math.ceil(totalChars / 4);
|
|
158
|
+
console.log(`[LLM] Call #${llmCallCount} - ~${estimatedTokens} tokens (${totalChars} chars)`);
|
|
159
|
+
|
|
160
|
+
const normalizedBaseURL = config.baseURL.replace(/\/+$/, "");
|
|
161
|
+
|
|
162
|
+
const headers: Record<string, string> = {
|
|
163
|
+
"Content-Type": "application/json",
|
|
164
|
+
...(config.apiKey ? { Authorization: `Bearer ${config.apiKey}` } : {}),
|
|
165
|
+
...(extraHeaders || {}),
|
|
166
|
+
};
|
|
167
|
+
|
|
168
|
+
// Anthropic requires this header for browser-based CORS access
|
|
169
|
+
if (normalizedBaseURL.includes("anthropic.com")) {
|
|
170
|
+
headers["anthropic-dangerous-direct-browser-access"] = "true";
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
const response = await fetch(`${normalizedBaseURL}/chat/completions`, {
|
|
174
|
+
method: "POST",
|
|
175
|
+
headers,
|
|
176
|
+
body: JSON.stringify({
|
|
177
|
+
model,
|
|
178
|
+
messages: finalMessages,
|
|
179
|
+
temperature,
|
|
180
|
+
}),
|
|
181
|
+
signal,
|
|
182
|
+
});
|
|
183
|
+
|
|
184
|
+
if (!response.ok) {
|
|
185
|
+
const errorText = await response.text();
|
|
186
|
+
throw new Error(`LLM API error (${response.status}): ${errorText}`);
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
const data = await response.json();
|
|
190
|
+
const choice = data.choices?.[0];
|
|
191
|
+
|
|
192
|
+
return {
|
|
193
|
+
content: choice?.message?.content ?? null,
|
|
194
|
+
finishReason: choice?.finish_reason ?? null,
|
|
195
|
+
};
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
/**
|
|
199
|
+
* Ensures the message array starts with a user message after system.
|
|
200
|
+
* Some models (Gemma, Mistral) require system → user → assistant ordering.
|
|
201
|
+
*/
|
|
202
|
+
function ensureUserFirst(messages: ChatMessage[]): ChatMessage[] {
|
|
203
|
+
if (messages.length === 0) return [];
|
|
204
|
+
|
|
205
|
+
const result = [...messages];
|
|
206
|
+
|
|
207
|
+
if (result[0].role === "system" && result.length > 1 && result[1].role === "assistant") {
|
|
208
|
+
result.splice(1, 0, { role: "user", content: "(conversation start)" });
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
return result;
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
const JSON_REPAIR_PATTERNS: Array<{ pattern: RegExp; replacement: string }> = [
|
|
215
|
+
{ pattern: /\/\/[^\n]*/g, replacement: "" },
|
|
216
|
+
{ pattern: /\\'/g, replacement: "'" },
|
|
217
|
+
{ pattern: /:\s*(\d{4}-\d{2}-\d{2}T[^"}\],\n]+)/g, replacement: ': "$1"' },
|
|
218
|
+
{ pattern: /:\s*0([1-9][0-9]*)([,\s\n\r\]}])/g, replacement: ": 0.$1$2" },
|
|
219
|
+
{ pattern: /,(\s*[\]}])/g, replacement: "$1" },
|
|
220
|
+
];
|
|
221
|
+
|
|
222
|
+
export function repairJSON(jsonStr: string): string {
|
|
223
|
+
let repaired = JSON_REPAIR_PATTERNS.reduce(
|
|
224
|
+
(str, { pattern, replacement }) => str.replace(pattern, replacement),
|
|
225
|
+
jsonStr
|
|
226
|
+
);
|
|
227
|
+
|
|
228
|
+
const quoteCount = (repaired.match(/(?<!\\)"/g) || []).length;
|
|
229
|
+
if (quoteCount % 2 !== 0) {
|
|
230
|
+
repaired += '"';
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
const openBraces = (repaired.match(/{/g) || []).length;
|
|
234
|
+
const closeBraces = (repaired.match(/}/g) || []).length;
|
|
235
|
+
const openBrackets = (repaired.match(/\[/g) || []).length;
|
|
236
|
+
const closeBrackets = (repaired.match(/\]/g) || []).length;
|
|
237
|
+
|
|
238
|
+
for (let i = 0; i < openBrackets - closeBrackets; i++) {
|
|
239
|
+
repaired += "]";
|
|
240
|
+
}
|
|
241
|
+
for (let i = 0; i < openBraces - closeBraces; i++) {
|
|
242
|
+
repaired += "}";
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
return repaired;
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
export function parseJSONResponse(content: string): unknown {
|
|
249
|
+
const jsonMatch = content.match(/```(?:json)?\s*([\s\S]*?)```/);
|
|
250
|
+
const jsonStr = jsonMatch ? jsonMatch[1].trim() : content.trim();
|
|
251
|
+
|
|
252
|
+
try {
|
|
253
|
+
return JSON.parse(jsonStr);
|
|
254
|
+
} catch {
|
|
255
|
+
const repaired = repairJSON(jsonStr);
|
|
256
|
+
return JSON.parse(repaired);
|
|
257
|
+
}
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
export function cleanResponseContent(content: string): string {
|
|
261
|
+
return content
|
|
262
|
+
.replace(/<think>[\s\S]*?<\/think>/gi, "")
|
|
263
|
+
.replace(/<thinking>[\s\S]*?<\/thinking>/gi, "")
|
|
264
|
+
.trim();
|
|
265
|
+
}
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
// Last updated: 2026-02-22
|
|
2
|
+
// Prefix-based lookup: "gpt-4o" matches "gpt-4o", "gpt-4o-2024-08-06", "gpt-4o-mini", etc.
|
|
3
|
+
const KNOWN_CONTEXT_WINDOWS: [string, number][] = [
|
|
4
|
+
// OpenAI
|
|
5
|
+
["gpt-4.1", 1_048_576],
|
|
6
|
+
["gpt-4o", 128_000],
|
|
7
|
+
["gpt-3.5-turbo", 16_384],
|
|
8
|
+
|
|
9
|
+
// Anthropic
|
|
10
|
+
["claude-opus-4", 200_000],
|
|
11
|
+
["claude-sonnet-4", 200_000],
|
|
12
|
+
["claude-3.5", 200_000],
|
|
13
|
+
["claude-3", 200_000],
|
|
14
|
+
|
|
15
|
+
// Google
|
|
16
|
+
["gemini-2.5", 1_000_000],
|
|
17
|
+
["gemini-2.0", 1_000_000],
|
|
18
|
+
["gemini-1.5", 1_000_000],
|
|
19
|
+
|
|
20
|
+
// Meta Llama
|
|
21
|
+
["llama-3.3", 131_072],
|
|
22
|
+
["llama-3.2", 131_072],
|
|
23
|
+
["llama-3.1", 131_072],
|
|
24
|
+
|
|
25
|
+
// Mistral
|
|
26
|
+
["mixtral", 32_768],
|
|
27
|
+
["mistral", 32_768],
|
|
28
|
+
|
|
29
|
+
// DeepSeek
|
|
30
|
+
["deepseek-coder-v2", 163_840],
|
|
31
|
+
["deepseek-v3", 131_072],
|
|
32
|
+
["deepseek", 131_072],
|
|
33
|
+
|
|
34
|
+
// Qwen
|
|
35
|
+
["qwen-2.5", 131_072],
|
|
36
|
+
["qwen", 131_072],
|
|
37
|
+
];
|
|
38
|
+
|
|
39
|
+
const DEFAULT_TOKEN_LIMIT = 8192;
|
|
40
|
+
|
|
41
|
+
export function getKnownContextWindow(modelName: string): number | undefined {
|
|
42
|
+
const lower = modelName.toLowerCase();
|
|
43
|
+
for (const [prefix, tokens] of KNOWN_CONTEXT_WINDOWS) {
|
|
44
|
+
if (lower.startsWith(prefix.toLowerCase())) return tokens;
|
|
45
|
+
}
|
|
46
|
+
return undefined;
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
export { DEFAULT_TOKEN_LIMIT };
|