ei-tui 1.0.0 → 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "ei-tui",
3
- "version": "1.0.0",
3
+ "version": "1.0.1",
4
4
  "author": "Flare576",
5
5
  "repository": {
6
6
  "type": "git",
@@ -92,7 +92,7 @@ export async function handleFactFind(response: LLMResponse, state: StateManager)
92
92
  markMessagesExtracted(response, state, "f");
93
93
 
94
94
  if (!result?.facts || !Array.isArray(result.facts)) {
95
- console.log("[handleFactFind] No facts detected or invalid result");
95
+ console.debug("[handleFactFind] No facts detected or invalid result");
96
96
  return;
97
97
  }
98
98
 
@@ -106,26 +106,26 @@ export async function handleFactFind(response: LLMResponse, state: StateManager)
106
106
  for (const factResult of result.facts) {
107
107
  // Only upsert facts that match a built-in name
108
108
  if (!BUILT_IN_FACT_NAMES.has(factResult.name)) {
109
- console.log(`[handleFactFind] Skipping non-built-in fact: "${factResult.name}"`);
109
+ console.warn(`[handleFactFind] Skipping non-built-in fact: "${factResult.name}"`);
110
110
  continue;
111
111
  }
112
112
 
113
113
  // Find the existing fact in state
114
114
  const existingFact = human.facts.find(f => f.name === factResult.name);
115
115
  if (!existingFact) {
116
- console.log(`[handleFactFind] Skipping unknown fact: "${factResult.name}"`);
116
+ console.warn(`[handleFactFind] Skipping unknown fact: "${factResult.name}"`);
117
117
  continue;
118
118
  }
119
119
 
120
120
  // Skip facts that already have descriptions (only fill empty ones)
121
121
  if (existingFact.description && existingFact.description !== "") {
122
- console.log(`[handleFactFind] Skipping fact with existing description: "${factResult.name}"`);
122
+ console.debug(`[handleFactFind] Skipping fact with existing description: "${factResult.name}"`);
123
123
  continue;
124
124
  }
125
125
 
126
126
  // Skip if the LLM returned a null/empty/non-string value — don't store booleans or nulls
127
127
  if (!factResult.value || typeof factResult.value !== 'string') {
128
- console.log(`[handleFactFind] Skipping fact with null/empty/non-string value: "${factResult.name}" (got ${typeof factResult.value})`);
128
+ console.warn(`[handleFactFind] Skipping fact with null/empty/non-string value: "${factResult.name}" (got ${typeof factResult.value})`);
129
129
  continue;
130
130
  }
131
131
 
@@ -165,7 +165,7 @@ export async function handleHumanTopicScan(response: LLMResponse, state: StateMa
165
165
  markMessagesExtracted(response, state, "t");
166
166
 
167
167
  if (!result?.topics || !Array.isArray(result.topics)) {
168
- console.log("[handleHumanTopicScan] No topics detected or invalid result");
168
+ console.debug("[handleHumanTopicScan] No topics detected or invalid result");
169
169
  return;
170
170
  }
171
171
 
@@ -185,7 +185,7 @@ export async function handleHumanPersonScan(response: LLMResponse, state: StateM
185
185
  markMessagesExtracted(response, state, "p");
186
186
 
187
187
  if (!result?.people || !Array.isArray(result.people)) {
188
- console.log("[handleHumanPersonScan] No people detected or invalid result");
188
+ console.debug("[handleHumanPersonScan] No people detected or invalid result");
189
189
  return;
190
190
  }
191
191
 
@@ -231,7 +231,7 @@ export async function handleHumanPersonScan(response: LLMResponse, state: StateM
231
231
  }
232
232
  }
233
233
  if (!matchedPerson) {
234
- console.log(`[handleHumanPersonScan] Multi-match for "${candidate.name}" (${matches.length} hits) — no embedding above threshold, creating new record`);
234
+ console.debug(`[handleHumanPersonScan] Multi-match for "${candidate.name}" (${matches.length} hits) — no embedding above threshold, creating new record`);
235
235
  }
236
236
  } catch (err) {
237
237
  console.warn(`[handleHumanPersonScan] Multi-match embedding failed for "${candidate.name}", using first match:`, err);
@@ -253,7 +253,7 @@ export async function handleHumanPersonScan(response: LLMResponse, state: StateM
253
253
  if (isUnknownPlaceholder || isSingleton) {
254
254
  matchedPerson = existing;
255
255
  const reason = isUnknownPlaceholder ? 'unnamed placeholder' : 'singleton relationship';
256
- console.log(`[handleHumanPersonScan] Relationship unique match: "${candidate.name}" → "${existing.name}" (sole ${candidate.relationship}, ${reason})`);
256
+ console.debug(`[handleHumanPersonScan] Relationship unique match: "${candidate.name}" → "${existing.name}" (sole ${candidate.relationship}, ${reason})`);
257
257
  }
258
258
  } else {
259
259
  // N>1 same relationship → cosine within that subset.
@@ -267,7 +267,7 @@ export async function handleHumanPersonScan(response: LLMResponse, state: StateM
267
267
  : `all ${human.people.length} people`;
268
268
 
269
269
  if (searchPool.length > 0) {
270
- console.log(`[handleHumanPersonScan] "${candidate.name}": cosine against ${searchPool.length} embedded (${poolLabel})`);
270
+ console.debug(`[handleHumanPersonScan] "${candidate.name}": cosine against ${searchPool.length} embedded (${poolLabel})`);
271
271
  try {
272
272
  const embeddingService = getEmbeddingService();
273
273
  const candidateText = getPersonEmbeddingText({
@@ -288,15 +288,15 @@ export async function handleHumanPersonScan(response: LLMResponse, state: StateM
288
288
  }
289
289
  const top3 = scores.sort((a, b) => b.sim - a.sim).slice(0, 3).map(s => `"${s.name}"=${s.sim.toFixed(3)}`).join(', ');
290
290
  if (matchedPerson) {
291
- console.log(`[handleHumanPersonScan] Cosine matched "${candidate.name}" → "${matchedPerson.name}" (${bestSimilarity.toFixed(3)}) | top3: ${top3}`);
291
+ console.debug(`[handleHumanPersonScan] Cosine matched "${candidate.name}" → "${matchedPerson.name}" (${bestSimilarity.toFixed(3)}) | top3: ${top3}`);
292
292
  } else {
293
- console.log(`[handleHumanPersonScan] Cosine: no match above ${ZERO_MATCH_COSINE_THRESHOLD} for "${candidate.name}" | top3: ${top3}`);
293
+ console.debug(`[handleHumanPersonScan] Cosine: no match above ${ZERO_MATCH_COSINE_THRESHOLD} for "${candidate.name}" | top3: ${top3}`);
294
294
  }
295
295
  } catch (err) {
296
296
  console.warn(`[handleHumanPersonScan] Cosine failed for "${candidate.name}":`, err);
297
297
  }
298
298
  } else {
299
- console.log(`[handleHumanPersonScan] "${candidate.name}": no embedded people in pool (${poolLabel}) — new person`);
299
+ console.debug(`[handleHumanPersonScan] "${candidate.name}": no embedded people in pool (${poolLabel}) — new person`);
300
300
  }
301
301
  }
302
302
  }
@@ -305,7 +305,7 @@ export async function handleHumanPersonScan(response: LLMResponse, state: StateM
305
305
  const linkedPersonaId = matchedPerson.identifiers
306
306
  ?.find(i => i.type === "Ei Persona")?.value;
307
307
  if (linkedPersonaId) {
308
- console.log(`[handleHumanPersonScan] Skipping update for "${candidate.name}" — scan marked as reflection drain (reflection_progress=1)`);
308
+ console.debug(`[handleHumanPersonScan] Skipping update for "${candidate.name}" — scan marked as reflection drain (reflection_progress=1)`);
309
309
  continue;
310
310
  }
311
311
  }
@@ -326,7 +326,7 @@ export async function handleHumanPersonScan(response: LLMResponse, state: StateM
326
326
  : matches.length > 1
327
327
  ? `multi-match ambiguous (${matches.length} hits) — new record`
328
328
  : "no match (new person)";
329
- console.log(`[handleHumanPersonScan] person "${candidate.name}": ${matched}`);
329
+ console.debug(`[handleHumanPersonScan] person "${candidate.name}": ${matched}`);
330
330
  }
331
331
  console.log(`[handleHumanPersonScan] Processed ${result.people.length} person(s)`);
332
332
  }
@@ -337,7 +337,7 @@ export async function handleEventScan(response: LLMResponse, state: StateManager
337
337
  const result = response.parsed as { events?: Array<{ name: string; description: string; reason: string }> } | undefined;
338
338
 
339
339
  if (!result?.events || !Array.isArray(result.events) || result.events.length === 0) {
340
- console.log("[handleEventScan] No epic events detected");
340
+ console.debug("[handleEventScan] No epic events detected");
341
341
  return;
342
342
  }
343
343
 
@@ -2,6 +2,36 @@ import type { ChatMessage, ProviderAccount, ModelConfig } from "./types.js";
2
2
  const DEFAULT_TOKEN_LIMIT = 8192;
3
3
  const DEFAULT_MAX_OUTPUT_TOKENS = 8000;
4
4
 
5
+ // Lazy verbose network dump — only active when EI_DEBUG_NETWORK_VERBOSE=1.
6
+ // Uses dynamic import so the web bundle never pulls in node:fs.
7
+ async function writeNetworkDump(
8
+ callNumber: number,
9
+ nextStep: string,
10
+ meta: { model: string; provider: string; latency_ms: number; status_code: number; tokens_in: number; tokens_out: number },
11
+ request: unknown,
12
+ response: unknown
13
+ ): Promise<void> {
14
+ const dataPath = (typeof process !== "undefined" && process.env?.EI_DATA_PATH) ||
15
+ (typeof Bun !== "undefined" && (Bun as Record<string, unknown>).env && ((Bun as { env: Record<string, string> }).env.EI_DATA_PATH));
16
+ if (!dataPath) return;
17
+
18
+ try {
19
+ const { mkdirSync, writeFileSync } = await import("node:fs");
20
+ const { join } = await import("node:path");
21
+ const logsDir = join(dataPath as string, "logs");
22
+ mkdirSync(logsDir, { recursive: true });
23
+
24
+ const timestamp = new Date().toISOString().replace(/[:.]/g, "-");
25
+ const safeName = nextStep.replace(/[^a-zA-Z0-9_-]/g, "_");
26
+ const filename = join(logsDir, `${timestamp}_call${callNumber}_${safeName}.json`);
27
+
28
+ const payload = JSON.stringify({ meta, request, response }, null, 2);
29
+ writeFileSync(filename, payload);
30
+ } catch {
31
+ // Silent — verbose dump failures must never crash the main path
32
+ }
33
+ }
34
+
5
35
  export interface ProviderConfig {
6
36
  baseURL: string;
7
37
  apiKey: string;
@@ -22,6 +52,8 @@ export interface LLMCallOptions {
22
52
  tools?: Record<string, unknown>[];
23
53
  /** Fire-and-forget callback invoked after a successful response to increment usage counters. */
24
54
  onUsageUpdate?: (modelId: string, usage: { calls: number; tokens_in: number; tokens_out: number }) => void;
55
+ /** Queue step name passed through to EI_DEBUG_NETWORK_VERBOSE file dumps. */
56
+ nextStep?: string;
25
57
  }
26
58
 
27
59
  export interface LLMRawResponse {
@@ -212,7 +244,7 @@ function logTokenLimit(model: string, source: string, tokens: number): void {
212
244
  if (source === "default") {
213
245
  console.warn(`[TokenLimit] Unknown model "${model}" — using conservative default (${DEFAULT_TOKEN_LIMIT})`);
214
246
  } else {
215
- console.log(`[TokenLimit] ${model}: ${source} → ${tokens} tokens (extraction budget: ${budget})`);
247
+ console.debug(`[TokenLimit] ${model}: ${source} → ${tokens} tokens (extraction budget: ${budget})`);
216
248
  }
217
249
  }
218
250
 
@@ -226,7 +258,7 @@ export async function callLLMRaw(
226
258
  ): Promise<LLMRawResponse> {
227
259
  llmCallCount++;
228
260
 
229
- const { signal, temperature = 0.7, onUsageUpdate } = options;
261
+ const { signal, temperature = 0.7, onUsageUpdate, nextStep = "unknown" } = options;
230
262
 
231
263
  if (signal?.aborted) {
232
264
  throw new Error("LLM call aborted");
@@ -251,7 +283,9 @@ export async function callLLMRaw(
251
283
 
252
284
  const totalChars = finalMessages.reduce((sum, m) => sum + (m.content?.length ?? 0), 0);
253
285
  const estimatedTokens = Math.ceil(totalChars / 4);
254
- console.log(`[LLM] Call #${llmCallCount} - ~${estimatedTokens} tokens (${totalChars} chars)`);
286
+ const modelLabel = model ?? "default";
287
+ console.log(`[LLM] Call #${llmCallCount} — ${config.name}:${modelLabel}, ~${estimatedTokens} tokens est.`);
288
+ const _llmCallStart = Date.now();
255
289
 
256
290
  const normalizedBaseURL = config.baseURL.replace(/\/+$/, "");
257
291
 
@@ -275,14 +309,15 @@ export async function callLLMRaw(
275
309
 
276
310
  if (modelConfig?.thinking_budget !== undefined) {
277
311
  if (modelConfig.thinking_budget === 0) {
278
- // Universal kill switch works on Ollama, LM Studio, and all OpenAI-compat providers.
279
- requestBody.reasoning_effort = "none";
312
+ // Universal kill switch across all known providers. Non-conflicting each reads
313
+ // whichever field it understands and ignores the rest.
314
+ requestBody.reasoning_effort = "none"; // Ollama, OpenAI-compat
315
+ requestBody.enable_thinking = false; // Rapid-MLX
280
316
  } else {
281
- // Pass both signals: providers that honor the token budget get it (Qwen3 via Ollama,
282
- // Anthropic), providers that reduce thinking to on/off use reasoning_effort as the
283
- // on-signal (Gemma4 via Ollama/LM Studio). Non-conflicting — each provider reads
284
- // whichever field it understands.
317
+ // Pass all on-signals: providers that honor the token budget get it (Qwen3, Anthropic),
318
+ // providers that reduce thinking to on/off use reasoning_effort or enable_thinking.
285
319
  requestBody.reasoning_effort = "high";
320
+ requestBody.enable_thinking = true;
286
321
  requestBody.think = { budget_tokens: modelConfig.thinking_budget };
287
322
  }
288
323
  }
@@ -306,9 +341,24 @@ export async function callLLMRaw(
306
341
 
307
342
  const data = await response.json();
308
343
 
344
+ const _llmLatency = Date.now() - _llmCallStart;
345
+ const tokensIn = data.usage?.prompt_tokens ?? data.usage?.input_tokens ?? 0;
346
+ const tokensOut = data.usage?.completion_tokens ?? data.usage?.output_tokens ?? 0;
347
+ console.log(`[LLM] Response #${llmCallCount} — ${response.status} ${_llmLatency}ms | in: ${tokensIn} out: ${tokensOut}`);
348
+
349
+ const isVerbose = (typeof process !== "undefined" && process.env?.EI_DEBUG_NETWORK_VERBOSE === "1") ||
350
+ (typeof Bun !== "undefined" && (Bun as { env: Record<string, string> }).env?.EI_DEBUG_NETWORK_VERBOSE === "1");
351
+ if (isVerbose) {
352
+ void writeNetworkDump(
353
+ llmCallCount,
354
+ nextStep,
355
+ { model: modelLabel, provider: config.name, latency_ms: _llmLatency, status_code: response.status, tokens_in: tokensIn, tokens_out: tokensOut },
356
+ requestBody,
357
+ data
358
+ );
359
+ }
360
+
309
361
  if (onUsageUpdate && modelConfig) {
310
- const tokensIn = data.usage?.prompt_tokens ?? data.usage?.input_tokens ?? 0;
311
- const tokensOut = data.usage?.completion_tokens ?? data.usage?.output_tokens ?? 0;
312
362
  onUsageUpdate(modelConfig.id, { calls: 1, tokens_in: tokensIn, tokens_out: tokensOut });
313
363
  }
314
364
 
@@ -200,7 +200,7 @@ export class QueueProcessor {
200
200
  hydratedUser,
201
201
  messages,
202
202
  request.model,
203
- { signal: this.abortController?.signal, tools: openAITools, onUsageUpdate: this.currentOnUsageUpdate },
203
+ { signal: this.abortController?.signal, tools: openAITools, onUsageUpdate: this.currentOnUsageUpdate, nextStep: `${request.data.originalNextStep ?? request.next_step}+tool_continuation` },
204
204
  this.currentAccounts
205
205
  );
206
206
 
@@ -219,7 +219,7 @@ export class QueueProcessor {
219
219
  if (!args.should_respond && args.content) {
220
220
  args.should_respond = true;
221
221
  }
222
- console.log(`[QueueProcessor] submit tool "${submitCall.name}" called — returning arguments as parsed response`);
222
+ console.debug(`[QueueProcessor] submit tool "${submitCall.name}" called — returning arguments as parsed response`);
223
223
  return {
224
224
  request,
225
225
  success: true,
@@ -297,9 +297,9 @@ export class QueueProcessor {
297
297
  const isHeartbeat = request.next_step === LLMNextStep.HandleHeartbeatCheck || request.next_step === LLMNextStep.HandleEiHeartbeat;
298
298
  if (isHeartbeat) {
299
299
  const personaName = request.data.personaDisplayName as string | undefined ?? 'Ei';
300
- console.log(`[${personaName} Heartbeat] LLM call - tools offered: ${openAITools.length} (${activeTools.map(t => t.name).join(', ') || 'none'})`);
300
+ console.debug(`[${personaName} Heartbeat] LLM call - tools offered: ${openAITools.length} (${activeTools.map(t => t.name).join(', ') || 'none'})`);
301
301
  } else {
302
- console.log(`[QueueProcessor] LLM call for ${request.next_step}, tools=${openAITools.length}`);
302
+ console.debug(`[QueueProcessor] LLM call for ${request.next_step}, tools=${openAITools.length}`);
303
303
  }
304
304
 
305
305
  const { content, finishReason, rawToolCalls, assistantMessage, thinking } = await callLLMRaw(
@@ -307,18 +307,18 @@ export class QueueProcessor {
307
307
  hydratedUser,
308
308
  messages,
309
309
  request.model,
310
- { signal: this.abortController?.signal, tools: openAITools, onUsageUpdate: this.currentOnUsageUpdate },
310
+ { signal: this.abortController?.signal, tools: openAITools, onUsageUpdate: this.currentOnUsageUpdate, nextStep: request.next_step },
311
311
  this.currentAccounts
312
312
  );
313
313
  if (thinking) {
314
- console.log(`[QueueProcessor] Extended thinking on ${request.next_step} (${thinking.length} chars) — TODO(#13): stream to TUI`);
314
+ console.debug(`[QueueProcessor] Extended thinking on ${request.next_step} (${thinking.length} chars) — TODO(#13): stream to TUI`);
315
315
  }
316
316
 
317
317
  // =========================================================================
318
318
  // Tool call path: execute tools, enqueue HandleToolContinuation, done.
319
319
  // =========================================================================
320
320
  if (finishReason === "tool_calls" && rawToolCalls?.length) {
321
- console.log(`[QueueProcessor] finish_reason=tool_calls — executing tools, will enqueue HandleToolContinuation`);
321
+ console.debug(`[QueueProcessor] finish_reason=tool_calls — executing tools, will enqueue HandleToolContinuation`);
322
322
 
323
323
  const toolCalls = parseToolCalls(rawToolCalls);
324
324
  if (toolCalls.length === 0) {
@@ -364,7 +364,7 @@ export class QueueProcessor {
364
364
  });
365
365
  }
366
366
 
367
- console.log(`[QueueProcessor] Tool execution complete: ${results.length} result(s). Enqueueing HandleToolContinuation.`);
367
+ console.debug(`[QueueProcessor] Tool execution complete: ${results.length} result(s). Enqueueing HandleToolContinuation.`);
368
368
 
369
369
  if (this.currentOnEnqueue) {
370
370
  this.currentOnEnqueue({
@@ -412,7 +412,7 @@ export class QueueProcessor {
412
412
  // =========================================================================
413
413
  // Normal stop path
414
414
  // =========================================================================
415
- console.log(`[QueueProcessor] finish_reason="${finishReason}" — normal stop`);
415
+ console.debug(`[QueueProcessor] finish_reason="${finishReason}" — normal stop`);
416
416
  return this.handleResponseType(request, content ?? "", finishReason);
417
417
  }
418
418
 
@@ -497,9 +497,9 @@ export class QueueProcessor {
497
497
  const { content: reformatContent, finishReason: reformatReason } = await callLLMRaw(
498
498
  request.system,
499
499
  reformatUserPrompt,
500
- messages, // existing tool history — gives full context without duplicating the ask
500
+ messages,
501
501
  request.model,
502
- { signal: this.abortController?.signal, onUsageUpdate: this.currentOnUsageUpdate },
502
+ { signal: this.abortController?.signal, onUsageUpdate: this.currentOnUsageUpdate, nextStep: `${request.data.originalNextStep ?? request.next_step}+prose_reformat` },
503
503
  this.currentAccounts
504
504
  );
505
505
 
@@ -554,9 +554,9 @@ export class QueueProcessor {
554
554
  const { content: reformatContent, finishReason: reformatReason } = await callLLMRaw(
555
555
  request.system,
556
556
  reformatUserPrompt,
557
- [], // no message history needed — schema is already in the system prompt
557
+ [],
558
558
  request.model,
559
- { signal: this.abortController?.signal, onUsageUpdate: this.currentOnUsageUpdate },
559
+ { signal: this.abortController?.signal, onUsageUpdate: this.currentOnUsageUpdate, nextStep: `${request.next_step}+json_reformat` },
560
560
  this.currentAccounts
561
561
  );
562
562
 
@@ -1054,7 +1054,7 @@ export class StateManager {
1054
1054
  tools_getForPersona(personaId: string, isTUI: boolean): ToolDefinition[] {
1055
1055
  const persona = this.personaState.getById(personaId);
1056
1056
  if (!persona?.tools?.length) {
1057
- console.log(`[Tools] tools_getForPersona(${personaId}): persona has no assigned tools`);
1057
+ console.debug(`[Tools] tools_getForPersona(${personaId}): persona has no assigned tools`);
1058
1058
  return [];
1059
1059
  }
1060
1060
  const assignedIds = new Set(persona.tools);
@@ -1077,13 +1077,13 @@ export class StateManager {
1077
1077
  if (result.length < assignedIds.size) {
1078
1078
  for (const id of assignedIds) {
1079
1079
  const tool = this.tools.find(t => t.id === id);
1080
- if (!tool) { console.log(`[Tools] tools_getForPersona: assigned tool id=${id} not found in registry`); continue; }
1081
- if (!tool.enabled) { console.log(`[Tools] tools_getForPersona: tool "${tool.name}" is disabled`); continue; }
1082
- if (!enabledProviderIds.has(tool.provider_id)) { console.log(`[Tools] tools_getForPersona: tool "${tool.name}" provider is disabled`); continue; }
1083
- if (!(tool.runtime === "any" || (tool.runtime === "node" && isTUI))) { console.log(`[Tools] tools_getForPersona: tool "${tool.name}" runtime "${tool.runtime}" not available (isTUI=${isTUI})`); continue; }
1080
+ if (!tool) { console.debug(`[Tools] tools_getForPersona: assigned tool id=${id} not found in registry`); continue; }
1081
+ if (!tool.enabled) { console.debug(`[Tools] tools_getForPersona: tool "${tool.name}" is disabled`); continue; }
1082
+ if (!enabledProviderIds.has(tool.provider_id)) { console.debug(`[Tools] tools_getForPersona: tool "${tool.name}" provider is disabled`); continue; }
1083
+ if (!(tool.runtime === "any" || (tool.runtime === "node" && isTUI))) { console.debug(`[Tools] tools_getForPersona: tool "${tool.name}" runtime "${tool.runtime}" not available (isTUI=${isTUI})`); continue; }
1084
1084
  }
1085
1085
  }
1086
- console.log(`[Tools] tools_getForPersona(${personaId}): resolved ${result.length}/${assignedIds.size} tools: [${result.map(t => t.name).join(", ")}]`);
1086
+ console.debug(`[Tools] tools_getForPersona(${personaId}): resolved ${result.length}/${assignedIds.size} tools: [${result.map(t => t.name).join(", ")}]`);
1087
1087
  return result;
1088
1088
  }
1089
1089
 
package/tui/README.md CHANGED
@@ -168,9 +168,10 @@ Rooms have three modes, set at creation time:
168
168
  | `XDG_DATA_HOME` | `~/.local/share` | XDG base directory. Ignored if `EI_DATA_PATH` is set. |
169
169
  | `EI_SYNC_USERNAME` | — | Username for remote sync. If set at startup, bootstraps sync credentials automatically (useful for dotfiles/scripts). |
170
170
  | `EI_SYNC_PASSPHRASE` | — | Passphrase for remote sync. Paired with `EI_SYNC_USERNAME`. |
171
- | `EDITOR` / `VISUAL` | `vi` | Editor opened by `/details`, `/me`, `/settings`, `/context`, `/quotes`, etc. Falls back to `VISUAL` if `EDITOR` is unset. |
171
+ | `EI_LOG_LEVEL` | `warn` | Log verbosity written to `tui.log`: `error`, `warn`, `info`, `debug`. |
172
+ | `EI_DEBUG_NETWORK_VERBOSE` | — | Set to `1` to dump full LLM request/response payloads as JSON files under `$EI_DATA_PATH/logs/`. One file per call, named `TIMESTAMP_callN_STEP.json`. |
172
173
 
173
- > **Tip**: `tail -f $EI_DATA_PATH/tui.log` to watch live debug output.
174
+ > **Tip**: `tail -f $EI_DATA_PATH/tui.log` to watch live TUI output. Set `EI_LOG_LEVEL=info` to see LLM call summaries (model, latency, token counts). Set `EI_DEBUG_NETWORK_VERBOSE=1` to dump full request/response payloads to `$EI_DATA_PATH/logs/`.
174
175
 
175
176
 
176
177
  # Development
@@ -22,7 +22,7 @@ const LOG_LEVELS: Record<LogLevel, number> = {
22
22
  error: 3,
23
23
  };
24
24
 
25
- const currentLevel: LogLevel = (Bun.env.EI_LOG_LEVEL as LogLevel) || "debug";
25
+ const currentLevel: LogLevel = (Bun.env.EI_LOG_LEVEL as LogLevel) || "warn";
26
26
 
27
27
  function shouldLog(level: LogLevel): boolean {
28
28
  return LOG_LEVELS[level] >= LOG_LEVELS[currentLevel];