ei-tui 0.6.5 → 0.6.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "ei-tui",
3
- "version": "0.6.5",
3
+ "version": "0.6.6",
4
4
  "author": "Flare576",
5
5
  "repository": {
6
6
  "type": "git",
@@ -51,9 +51,10 @@ function isGuid(str: string): boolean {
51
51
  }
52
52
 
53
53
  function buildResolvedModel(account: ProviderAccount, model: ModelConfig): ResolvedModel {
54
+ const apiModelId = model.model_id ?? model.name;
54
55
  return {
55
56
  provider: account.name,
56
- model: model.name === "(default)" ? undefined : model.name,
57
+ model: apiModelId === "(default)" ? undefined : apiModelId,
57
58
  config: {
58
59
  name: account.name,
59
60
  baseURL: account.url,
@@ -164,10 +165,16 @@ function findModelAndAccount(
164
165
  const model = account?.models?.find((m) => m.name === modelName);
165
166
  return { model, account };
166
167
  }
168
+ // Try matching by model UUID first
167
169
  for (const account of accounts) {
168
170
  const model = account.models?.find((m) => m.id === spec);
169
171
  if (model) return { model, account };
170
172
  }
173
+ // Fall back to matching by account name (bare spec like "EG" or "RnP")
174
+ const accountByName = accounts.find(
175
+ (a) => a.name.toLowerCase() === spec.toLowerCase() && a.enabled
176
+ );
177
+ if (accountByName) return { model: undefined, account: accountByName };
171
178
  return { model: undefined, account: undefined };
172
179
  }
173
180
 
@@ -265,6 +272,10 @@ export async function callLLMRaw(
265
272
  max_tokens: modelConfig?.max_output_tokens ?? DEFAULT_MAX_OUTPUT_TOKENS,
266
273
  };
267
274
 
275
+ if (modelConfig?.thinking_budget !== undefined) {
276
+ requestBody.think = { budget_tokens: modelConfig.thinking_budget };
277
+ }
278
+
268
279
  if (options.tools && options.tools.length > 0) {
269
280
  requestBody.tools = options.tools;
270
281
  requestBody.tool_choice = "auto";
@@ -1130,6 +1130,9 @@ const toolNextSteps = new Set([
1130
1130
  });
1131
1131
  }
1132
1132
  },
1133
+ onUsageUpdate: (modelId, usage) => {
1134
+ this.stateManager.model_update_usage(modelId, usage);
1135
+ },
1133
1136
  }
1134
1137
  );
1135
1138
 
@@ -37,9 +37,10 @@ export interface QueueProcessorStartOptions {
37
37
  onEnqueue?: EnqueueCallback;
38
38
  /**
39
39
  * Called when a tool executor updates its provider config (e.g. Spotify refresh token rotation).
40
- * Injected by Processor to persist the updated config back to storage.
40
+ * Injected by Processor pointing to stateManager.queue_enqueue.
41
41
  */
42
42
  onProviderConfigUpdate?: (providerId: string, updates: Record<string, string>) => void;
43
+ onUsageUpdate?: (modelId: string, usage: { calls: number; tokens_in: number; tokens_out: number }) => void;
43
44
  }
44
45
 
45
46
  export class QueueProcessor {
@@ -52,6 +53,7 @@ export class QueueProcessor {
52
53
  private currentTools: ToolDefinition[] | undefined;
53
54
  private currentOnEnqueue: EnqueueCallback | undefined;
54
55
  private currentOnProviderConfigUpdate: ((providerId: string, updates: Record<string, string>) => void) | undefined;
56
+ private currentOnUsageUpdate: ((modelId: string, usage: { calls: number; tokens_in: number; tokens_out: number }) => void) | undefined;
55
57
 
56
58
  getState(): QueueProcessorState {
57
59
  return this.state;
@@ -70,6 +72,7 @@ export class QueueProcessor {
70
72
  this.currentTools = options?.tools;
71
73
  this.currentOnEnqueue = options?.onEnqueue;
72
74
  this.currentOnProviderConfigUpdate = options?.onProviderConfigUpdate;
75
+ this.currentOnUsageUpdate = options?.onUsageUpdate;
73
76
  this.abortController = new AbortController();
74
77
 
75
78
  this.processRequest(request)
@@ -197,7 +200,7 @@ export class QueueProcessor {
197
200
  hydratedUser,
198
201
  messages,
199
202
  request.model,
200
- { signal: this.abortController?.signal, tools: openAITools },
203
+ { signal: this.abortController?.signal, tools: openAITools, onUsageUpdate: this.currentOnUsageUpdate },
201
204
  this.currentAccounts
202
205
  );
203
206
 
@@ -304,7 +307,7 @@ export class QueueProcessor {
304
307
  hydratedUser,
305
308
  messages,
306
309
  request.model,
307
- { signal: this.abortController?.signal, tools: openAITools },
310
+ { signal: this.abortController?.signal, tools: openAITools, onUsageUpdate: this.currentOnUsageUpdate },
308
311
  this.currentAccounts
309
312
  );
310
313
  if (thinking) {
@@ -496,7 +499,7 @@ export class QueueProcessor {
496
499
  reformatUserPrompt,
497
500
  messages, // existing tool history — gives full context without duplicating the ask
498
501
  request.model,
499
- { signal: this.abortController?.signal },
502
+ { signal: this.abortController?.signal, onUsageUpdate: this.currentOnUsageUpdate },
500
503
  this.currentAccounts
501
504
  );
502
505
 
@@ -553,7 +556,7 @@ export class QueueProcessor {
553
556
  reformatUserPrompt,
554
557
  [], // no message history needed — schema is already in the system prompt
555
558
  request.model,
556
- { signal: this.abortController?.signal },
559
+ { signal: this.abortController?.signal, onUsageUpdate: this.currentOnUsageUpdate },
557
560
  this.currentAccounts
558
561
  );
559
562
 
@@ -1064,6 +1064,24 @@ export class StateManager {
1064
1064
  return { success: true, cleared };
1065
1065
  }
1066
1066
 
1067
+ model_update_usage(modelId: string, delta: { calls: number; tokens_in: number; tokens_out: number }): void {
1068
+ const human = this.humanState.get();
1069
+ const accounts = human.settings?.accounts;
1070
+ if (!accounts) return;
1071
+
1072
+ for (const account of accounts) {
1073
+ const model = account.models?.find(m => m.id === modelId);
1074
+ if (model) {
1075
+ model.total_calls = (model.total_calls ?? 0) + delta.calls;
1076
+ model.total_tokens_in = (model.total_tokens_in ?? 0) + delta.tokens_in;
1077
+ model.total_tokens_out = (model.total_tokens_out ?? 0) + delta.tokens_out;
1078
+ model.last_used = new Date().toISOString();
1079
+ this.scheduleSave();
1080
+ return;
1081
+ }
1082
+ }
1083
+ }
1084
+
1067
1085
  async flush(): Promise<void> {
1068
1086
  await this.persistenceState.flush();
1069
1087
  }
@@ -44,9 +44,11 @@ export interface BackupConfig {
44
44
  */
45
45
  export interface ModelConfig {
46
46
  id: string; // GUID (crypto.randomUUID())
47
- name: string; // Model identifier, e.g. "claude-haiku-4-5", "(default)"
47
+ name: string; // Display name shown in UI, e.g. "Gemma4 (thinking)", "(default)"
48
+ model_id?: string; // Actual model identifier sent to API — falls back to name if absent
48
49
  token_limit?: number; // Input token limit (user sets effective limit)
49
50
  max_output_tokens?: number; // Output token limit (API-enforced)
51
+ thinking_budget?: number; // Thinking token budget: 0 = disabled, N = enable with N tokens, undefined = don't send
50
52
  total_calls?: number; // Usage counter
51
53
  total_tokens_in?: number; // Usage counter
52
54
  total_tokens_out?: number; // Usage counter
@@ -103,7 +103,8 @@ export const meCommand: Command = {
103
103
  }
104
104
 
105
105
  try {
106
- const parsed = humanFromYAML(result.content, filteredHuman);
106
+ const currentHuman = await ctx.ei.getHuman();
107
+ const parsed = humanFromYAML(result.content, filteredHuman, currentHuman);
107
108
 
108
109
  for (const id of parsed.deletedFactIds) {
109
110
  await ctx.ei.removeDataItem("fact", id);
@@ -131,14 +132,20 @@ export const meCommand: Command = {
131
132
  }
132
133
  }
133
134
 
134
- const deleteCount = parsed.deletedFactIds.length +
135
- parsed.deletedTopicIds.length +
135
+ const deleteCount = parsed.deletedFactIds.length +
136
+ parsed.deletedTopicIds.length +
136
137
  parsed.deletedPersonIds.length;
137
- const updateCount = parsed.changedFactIds.size +
138
- parsed.changedTopicIds.size +
138
+ const updateCount = parsed.changedFactIds.size +
139
+ parsed.changedTopicIds.size +
139
140
  parsed.changedPersonIds.size;
140
-
141
- ctx.showNotification(`Updated ${updateCount} items, deleted ${deleteCount}`, "info");
141
+ const skippedCount = parsed.skippedFactCount +
142
+ parsed.skippedTopicCount +
143
+ parsed.skippedPersonCount;
144
+
145
+ const msg = skippedCount > 0
146
+ ? `Updated ${updateCount}, deleted ${deleteCount}, skipped ${skippedCount} (changed by another process)`
147
+ : `Updated ${updateCount} items, deleted ${deleteCount}`;
148
+ ctx.showNotification(msg, "info");
142
149
  return;
143
150
 
144
151
  } catch (parseError) {
@@ -14,6 +14,7 @@ import { Processor } from "../../../src/core/processor.js";
14
14
  import { FileStorage } from "../storage/file.js";
15
15
  import { remoteSync } from "../../../src/storage/remote.js";
16
16
  import { logger, clearLog, interceptConsole } from "../util/logger.js";
17
+ import { E2E_SKIP_LOCAL_DETECT } from "../util/e2e-flags.js";
17
18
  import { ConflictOverlay } from "../components/ConflictOverlay.js";
18
19
  import type {
19
20
  Ei_Interface,
@@ -698,7 +699,10 @@ export const EiProvider: ParentComponent = (props) => {
698
699
  try {
699
700
  const human = await processor!.getHuman();
700
701
  const hasAccounts = human.settings?.accounts && human.settings.accounts.length > 0;
701
- if (!hasAccounts) {
702
+ if (!hasAccounts && E2E_SKIP_LOCAL_DETECT) {
703
+ logger.info("E2E_SKIP_LOCAL_DETECT active, skipping local LLM check");
704
+ setShowWelcomeOverlay(true);
705
+ } else if (!hasAccounts) {
702
706
  logger.info("No LLM accounts configured, checking for local LLM...");
703
707
  try {
704
708
  const response = await fetch("http://127.0.0.1:1234/v1/models", {
@@ -707,6 +711,7 @@ export const EiProvider: ParentComponent = (props) => {
707
711
  });
708
712
  if (response.ok) {
709
713
  logger.info("Local LLM detected, auto-configuring...");
714
+ const defaultModelId = crypto.randomUUID();
710
715
  const localAccount: ProviderAccount = {
711
716
  id: crypto.randomUUID(),
712
717
  name: "Local LLM",
@@ -714,13 +719,15 @@ export const EiProvider: ParentComponent = (props) => {
714
719
  url: "http://127.0.0.1:1234/v1",
715
720
  enabled: true,
716
721
  created_at: new Date().toISOString(),
722
+ default_model: defaultModelId,
723
+ models: [{ id: defaultModelId, name: "(default)" }],
717
724
  };
718
725
  const currentHuman = await processor!.getHuman();
719
726
  await processor!.updateHuman({
720
727
  settings: {
721
728
  ...currentHuman.settings,
722
729
  accounts: [localAccount],
723
- default_model: "Local LLM",
730
+ default_model: defaultModelId,
724
731
  },
725
732
  });
726
733
  showNotification("Local LLM detected and configured!", "info");
@@ -0,0 +1,13 @@
1
+ /**
2
+ * EI_E2E_MODE — bitfield for test seams that can't be solved via data seeding.
3
+ *
4
+ * Use prime-power bits so combinations are unambiguous:
5
+ * 1 — skip local LLM auto-detect (fetch to :1234/:11434)
6
+ * 2 — (reserved for next scenario)
7
+ * 3 — flags 1 + 2 combined
8
+ *
9
+ * Production code should never set this. Tests pass it via env in test.use({ env: { EI_E2E_MODE: "1" } }).
10
+ */
11
+ const E2E_MODE = parseInt(process.env.EI_E2E_MODE ?? "0", 10);
12
+
13
+ export const E2E_SKIP_LOCAL_DETECT = (E2E_MODE & 1) !== 0;
@@ -225,6 +225,9 @@ export interface HumanYAMLResult {
225
225
  changedFactIds: Set<string>;
226
226
  changedTopicIds: Set<string>;
227
227
  changedPersonIds: Set<string>;
228
+ skippedFactCount: number;
229
+ skippedTopicCount: number;
230
+ skippedPersonCount: number;
228
231
  }
229
232
 
230
233
  function identifiersEqual(a: PersonIdentifier[] | undefined, b: PersonIdentifier[] | undefined): boolean {
@@ -279,7 +282,7 @@ function personChanged(parsed: Person, original: Person): boolean {
279
282
  return !identifiersEqual(parsed.identifiers, original.identifiers);
280
283
  }
281
284
 
282
- export function humanFromYAML(yamlContent: string, original?: HumanEntity): HumanYAMLResult {
285
+ export function humanFromYAML(yamlContent: string, original?: HumanEntity, current?: HumanEntity): HumanYAMLResult {
283
286
  const stripped = yamlContent
284
287
  .split('\n')
285
288
  .filter(line => !/^\s*#\s*\[read-only\]/.test(line))
@@ -292,6 +295,15 @@ export function humanFromYAML(yamlContent: string, original?: HumanEntity): Huma
292
295
  const changedFactIds = new Set<string>();
293
296
  const changedTopicIds = new Set<string>();
294
297
  const changedPersonIds = new Set<string>();
298
+ let skippedFactCount = 0;
299
+ let skippedTopicCount = 0;
300
+ let skippedPersonCount = 0;
301
+
302
+ const staleInState = (id: string | undefined, originalItem: { last_updated: string } | undefined, currentItems: { id: string; last_updated: string }[] | undefined): boolean => {
303
+ if (!id || !originalItem || !current || !currentItems) return false;
304
+ const currentItem = currentItems.find(i => i.id === id);
305
+ return !!currentItem && currentItem.last_updated !== originalItem.last_updated;
306
+ };
295
307
 
296
308
  const facts: Fact[] = [];
297
309
  for (const f of data.facts ?? []) {
@@ -306,10 +318,14 @@ export function humanFromYAML(yamlContent: string, original?: HumanEntity): Huma
306
318
  : { ...parsed, last_updated: new Date().toISOString(), persona_groups: parseGroupCheckboxMap(groupMap) };
307
319
  facts.push(fact);
308
320
  if (!originalFact || factChanged(fact, originalFact)) {
309
- if (fact.description && !originalFact?.validated_date) {
310
- fact.validated_date = new Date().toISOString();
321
+ if (staleInState(parsed.id, originalFact, current?.facts)) {
322
+ skippedFactCount++;
323
+ } else {
324
+ if (fact.description && !originalFact?.validated_date) {
325
+ fact.validated_date = new Date().toISOString();
326
+ }
327
+ changedFactIds.add(fact.id);
311
328
  }
312
- changedFactIds.add(fact.id);
313
329
  }
314
330
  }
315
331
  }
@@ -327,7 +343,11 @@ export function humanFromYAML(yamlContent: string, original?: HumanEntity): Huma
327
343
  : { ...parsed, last_updated: new Date().toISOString(), persona_groups: parseGroupCheckboxMap(groupMap) };
328
344
  topics.push(topic);
329
345
  if (!originalTopic || topicChanged(topic, originalTopic)) {
330
- changedTopicIds.add(topic.id);
346
+ if (staleInState(parsed.id, originalTopic, current?.topics)) {
347
+ skippedTopicCount++;
348
+ } else {
349
+ changedTopicIds.add(topic.id);
350
+ }
331
351
  }
332
352
  }
333
353
  }
@@ -350,7 +370,11 @@ export function humanFromYAML(yamlContent: string, original?: HumanEntity): Huma
350
370
  : { ...parsed, last_updated: new Date().toISOString(), identifiers, persona_groups: parseGroupCheckboxMap(groupMap) };
351
371
  people.push(person);
352
372
  if (!originalPerson || personChanged(person, originalPerson)) {
353
- changedPersonIds.add(person.id);
373
+ if (staleInState(parsed.id, originalPerson, current?.people)) {
374
+ skippedPersonCount++;
375
+ } else {
376
+ changedPersonIds.add(person.id);
377
+ }
354
378
  }
355
379
  }
356
380
  }
@@ -365,5 +389,8 @@ export function humanFromYAML(yamlContent: string, original?: HumanEntity): Huma
365
389
  changedFactIds,
366
390
  changedTopicIds,
367
391
  changedPersonIds,
392
+ skippedFactCount,
393
+ skippedTopicCount,
394
+ skippedPersonCount,
368
395
  };
369
396
  }
@@ -5,10 +5,15 @@ import type {
5
5
  } from "../../../src/core/types.js";
6
6
  import { modelGuidToDisplay } from "./yaml-shared.js";
7
7
 
8
+ const tokenFormatter = new Intl.NumberFormat("en-US", { notation: "compact", maximumFractionDigits: 1 });
9
+ const formatTokens = (n: number) => tokenFormatter.format(n);
10
+
8
11
  interface EditableModelData {
9
12
  name: string;
13
+ model_id?: string;
10
14
  token_limit?: number;
11
15
  max_output_tokens?: number;
16
+ thinking_budget?: number;
12
17
  _delete?: boolean;
13
18
  }
14
19
 
@@ -45,11 +50,14 @@ function parseModels(editableModels: EditableModelData[]): import('../../../src/
45
50
  const result: import('../../../src/core/types.js').ModelConfig[] = [];
46
51
  for (const m of editableModels) {
47
52
  if (m._delete) continue;
53
+ const modelId = m.model_id ?? undefined;
48
54
  result.push({
49
55
  id: crypto.randomUUID(),
50
56
  name: m.name,
51
- token_limit: m.token_limit,
52
- max_output_tokens: m.max_output_tokens,
57
+ model_id: (modelId === null || modelId === m.name) ? undefined : modelId,
58
+ token_limit: m.token_limit ?? undefined,
59
+ max_output_tokens: m.max_output_tokens ?? undefined,
60
+ thinking_budget: m.thinking_budget ?? undefined,
53
61
  });
54
62
  }
55
63
  return result;
@@ -70,6 +78,10 @@ export function newProviderToYAML(name?: string): string {
70
78
  const modelsYAML = [
71
79
  "models:",
72
80
  " - name: (default)",
81
+ " model_id: (default)",
82
+ " token_limit: null",
83
+ " max_output_tokens: null",
84
+ " thinking_budget: null",
73
85
  " # _delete: true",
74
86
  "# _delete: true # Delete this entire provider",
75
87
  ].join("\n");
@@ -141,16 +153,26 @@ export function providerToYAML(account: ProviderAccount): string {
141
153
  if (modelList.length > 0) {
142
154
  for (const m of modelList) {
143
155
  modelLines.push(` - name: ${m.name}`);
144
- if (m.token_limit !== undefined) {
145
- modelLines.push(` token_limit: ${m.token_limit}`);
146
- }
147
- if (m.max_output_tokens !== undefined) {
148
- modelLines.push(` max_output_tokens: ${m.max_output_tokens}`);
156
+ modelLines.push(` model_id: ${m.model_id ?? m.name}`);
157
+ modelLines.push(` token_limit: ${m.token_limit ?? null}`);
158
+ modelLines.push(` max_output_tokens: ${m.max_output_tokens ?? null}`);
159
+ modelLines.push(` thinking_budget: ${m.thinking_budget ?? null}`);
160
+ if (m.total_calls !== undefined || m.total_tokens_in !== undefined) {
161
+ const tokensIn = m.total_tokens_in ?? 0;
162
+ const tokensOut = m.total_tokens_out ?? 0;
163
+ modelLines.push(` # stats: ${formatTokens(m.total_calls ?? 0)} calls · ${formatTokens(tokensIn)} in / ${formatTokens(tokensOut)} out`);
164
+ if (m.last_used) {
165
+ modelLines.push(` # used: ${m.last_used}`);
166
+ }
149
167
  }
150
168
  modelLines.push(` _delete: false`);
151
169
  }
152
170
  } else {
153
171
  modelLines.push(" - name: (default)");
172
+ modelLines.push(` model_id: (default)`);
173
+ modelLines.push(` token_limit: null`);
174
+ modelLines.push(` max_output_tokens: null`);
175
+ modelLines.push(` thinking_budget: null`);
154
176
  modelLines.push(" _delete: false");
155
177
  }
156
178
  modelLines.push("_delete: false # Set to true to delete this entire provider");
@@ -185,11 +207,14 @@ export function providerFromYAML(yamlContent: string, original: ProviderAccount)
185
207
  for (const m of data.models ?? []) {
186
208
  if (m._delete) continue;
187
209
  const existing = existingModels.find(em => em.name === m.name);
210
+ const modelId = m.model_id ?? undefined;
188
211
  parsedModels.push({
189
212
  id: existing?.id ?? crypto.randomUUID(),
190
213
  name: m.name,
191
- token_limit: m.token_limit,
192
- max_output_tokens: m.max_output_tokens,
214
+ model_id: (modelId === null || modelId === m.name) ? undefined : modelId,
215
+ token_limit: m.token_limit ?? undefined,
216
+ max_output_tokens: m.max_output_tokens ?? undefined,
217
+ thinking_budget: m.thinking_budget ?? undefined,
193
218
  total_calls: existing?.total_calls,
194
219
  total_tokens_in: existing?.total_tokens_in,
195
220
  total_tokens_out: existing?.total_tokens_out,