@pencil-agent/nano-pencil 1.13.4 → 1.13.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -30,7 +30,7 @@ loop/scheduler-parser.ts: Loop command parsing with flags/subcommands, parseSche
30
30
  loop/scheduler-types.ts: Scheduled loop types, LoopPayloadKind/ScheduledLoopTask/LoopStartSpec/ParsedSchedulerCommand
31
31
  loop/README.md: Loop extension documentation - recurring scheduler usage and flags
32
32
  btw/index.ts: BTW extension entry - /btw command for quick side questions without interrupting main task, uses completeSimple() for lightweight response, BTW_MESSAGE_TYPE renderer
33
- debug/index.ts: Debug extension entry - /debug command for system diagnostics with three-layer analysis (Phenomenon/Essence/Philosophy), supports /debug env|session|model subcommands, uses completeSimple() for LLM analysis, DEBUG_MESSAGE_TYPE renderer
33
+ debug/index.ts: Debug extension entry - /debug command dispatches diagnostics through full agent loop (sendUserMessage + before_agent_start hook), three-layer analysis (Phenomenon/Essence/Philosophy), supports /debug env|session|model quick subcommands, DEBUG_MESSAGE_TYPE renderer
34
34
  debug/collectors.ts: Diagnostic data collectors for /debug command, collectSystemInfo/collectModelInfo/collectSessionInfo/collectConfigInfo/collectGitInfo/collectAgentState, sanitizeForLLM, formatDiagnosticData
35
35
  plan/index.ts: Plan Mode extension entry - registers /plan command, EnterPlanMode/ExitPlanMode tools, permission gating, workflow prompt injection
36
36
  plan/types.ts: PlanModeState, PlanModeAttachment types, PlanModeConfig, PlanApprovalRequest/Response
@@ -43,7 +43,7 @@ plan/plan-agents.ts: Explore/Plan subagent definitions with read-only tools for
43
43
  plan/plan-validation.ts: validatePlan() - validates plan has required sections (Context, Approach, Files, Verification)
44
44
  plan/teammate-approval.ts: isInTeammateContext(), submitPlanToLeader(), formatPlanSubmittedMessage() - teammate plan approval integration
45
45
  sal/index.ts: SAL extension entry, enabled by default, registers --nosal/--sal-rebuild-terrain flags, /sal:coverage /sal:status /sal:setup commands, before_agent_start/tool_execution_start/agent_end hooks; /sal:setup writes ~/.memory-experiments/credentials.json with adapter inference (insforge/jsonl/noop); publishes structuralAnchor via core/runtime/turn-context (no SAL-specific globals); emits run_start/turn_anchor/memory_recalls/run_end eval events through pluggable EvalSink; reads memoryRecallSnapshot from turn-context bus in agent_end; runtime no-op when --nosal is set
46
- sal/terrain.ts: TerrainSnapshot/TerrainNode/TerrainEdge model, buildTerrainIndex(), checkDipCoverage(), isSnapshotStale(), moduleIdForPath(), parses P2 CLAUDE.md and P3 file headers
46
+ sal/terrain.ts: TerrainSnapshot/TerrainNode/TerrainEdge model, async buildTerrainIndex()/isSnapshotStale() (fs/promises + periodic yields so TUI can flush under block terminals like Warp), checkDipCoverage(), moduleIdForPath(), parses P2 CLAUDE.md and P3 file headers
47
47
  sal/anchors.ts: StructuralAnchor/AnchorResolution model, locateTask(), locateAction(), evidence-driven scoring with tunable SalWeights, CJK bigram tokenization
48
48
  sal/weights.ts: SalWeights interface, SAL_DEFAULT_WEIGHTS, loadSalWeights() reads sal-config.json from workspace or .memory-experiments/sal/
49
49
  sal/eval/index.ts: createEvalSink() factory + barrel re-exports; adapter selection via options.adapter or endpoint scheme inference (http(s)→insforge, file://|/|./|../→jsonl, missing→noop); ONLY entry point SAL imports from
@@ -82,4 +82,15 @@ export declare function collectGitInfo(cwd: string): Promise<CollectorResult<Git
82
82
  export declare function collectAgentState(ctx: ExtensionContext): Promise<CollectorResult<AgentState>>;
83
83
  export declare function sanitizeForLLM(data: DiagnosticData): DiagnosticData;
84
84
  export declare function formatDiagnosticData(data: DiagnosticData): string;
85
+ export interface PreferencesInfo {
86
+ locale: string;
87
+ localeSource: "memory" | "settings" | "system";
88
+ memoryDir: string;
89
+ languagePreference: {
90
+ found: boolean;
91
+ name?: string;
92
+ summary?: string;
93
+ }[];
94
+ }
95
+ export declare function collectPreferencesInfo(ctx: ExtensionContext): Promise<CollectorResult<PreferencesInfo>>;
85
96
  export {};
@@ -246,3 +246,70 @@ export function formatDiagnosticData(data) {
246
246
  }
247
247
  return parts.join("\n\n");
248
248
  }
249
+ export async function collectPreferencesInfo(ctx) {
250
+ try {
251
+ const os = await import("node:os");
252
+ const fs = await import("node:fs");
253
+ const path = await import("node:path");
254
+ // Check memory directory for language preferences
255
+ const memoryDir = process.env.NANOMEM_MEMORY_DIR || path.join(os.homedir(), ".nanopencil", "agent", "memory");
256
+ let locale = "en";
257
+ let localeSource = "system";
258
+ const languagePreference = [];
259
+ // Try to read from preferences.json
260
+ const prefsPath = path.join(memoryDir, "preferences.json");
261
+ if (fs.existsSync(prefsPath)) {
262
+ try {
263
+ const prefs = JSON.parse(fs.readFileSync(prefsPath, "utf-8"));
264
+ // Find language-related preferences
265
+ const langPrefs = prefs.filter((p) => {
266
+ const text = (p.name || "") + (p.summary || "") + (p.detail || "");
267
+ return /中文|chinese|语言|locale|zh/i.test(text);
268
+ });
269
+ if (langPrefs.length > 0) {
270
+ locale = "zh";
271
+ localeSource = "memory";
272
+ for (const p of langPrefs.slice(0, 3)) {
273
+ languagePreference.push({
274
+ found: true,
275
+ name: p.name,
276
+ summary: (p.summary || "").slice(0, 80),
277
+ });
278
+ }
279
+ }
280
+ }
281
+ catch {
282
+ // Ignore read errors
283
+ }
284
+ }
285
+ // Check settings.json for locale
286
+ const settingsPath = path.join(os.homedir(), ".nanopencil", "agent", "settings.json");
287
+ if (localeSource === "system" && fs.existsSync(settingsPath)) {
288
+ try {
289
+ const settings = JSON.parse(fs.readFileSync(settingsPath, "utf-8"));
290
+ if (settings.locale) {
291
+ locale = settings.locale;
292
+ localeSource = "settings";
293
+ }
294
+ }
295
+ catch {
296
+ // Ignore read errors
297
+ }
298
+ }
299
+ return {
300
+ data: {
301
+ locale,
302
+ localeSource,
303
+ memoryDir,
304
+ languagePreference,
305
+ },
306
+ error: null,
307
+ };
308
+ }
309
+ catch (error) {
310
+ return {
311
+ data: null,
312
+ error: String(error),
313
+ };
314
+ }
315
+ }
@@ -1,8 +1,8 @@
1
1
  /**
2
- * [WHO]: debugExtension - registers /debug command and DEBUG_MESSAGE_TYPE renderer
2
+ * [WHO]: debugExtension - /debug command, before_agent_start hook injects diagnostic system prompt, agent_end cleanup, dispatched via sendUserMessage for streaming UX
3
3
  * [FROM]: Depends on core/extensions/types, @pencil-agent/tui, ./collectors
4
4
  * [TO]: Auto-loaded by builtin-extensions.ts as a default extension
5
- * [HERE]: extensions/defaults/debug/index.ts - system diagnostics with three-layer analysis
5
+ * [HERE]: extensions/defaults/debug/index.ts - system diagnostics with three-layer analysis through full agent loop
6
6
  */
7
7
  import type { ExtensionAPI } from "../../../core/extensions/types.js";
8
8
  export default function debugExtension(api: ExtensionAPI): Promise<void>;
@@ -1,13 +1,14 @@
1
1
  /**
2
- * [WHO]: debugExtension - registers /debug command and DEBUG_MESSAGE_TYPE renderer
2
+ * [WHO]: debugExtension - /debug command, before_agent_start hook injects diagnostic system prompt, agent_end cleanup, dispatched via sendUserMessage for streaming UX
3
3
  * [FROM]: Depends on core/extensions/types, @pencil-agent/tui, ./collectors
4
4
  * [TO]: Auto-loaded by builtin-extensions.ts as a default extension
5
- * [HERE]: extensions/defaults/debug/index.ts - system diagnostics with three-layer analysis
5
+ * [HERE]: extensions/defaults/debug/index.ts - system diagnostics with three-layer analysis through full agent loop
6
6
  */
7
7
  import { Box, Container, Spacer, Text } from "@pencil-agent/tui";
8
8
  import { collectSystemInfo, collectModelInfo, collectSessionInfo, collectConfigInfo, collectGitInfo, collectAgentState, sanitizeForLLM, formatDiagnosticData, } from "./collectors.js";
9
9
  const DEBUG_MESSAGE_TYPE = "debug";
10
- const DEBUG_TIMEOUT_MS = 45_000;
10
+ const DEBUG_PROMPT_PREFIX = "[DEBUG:";
11
+ const DEBUG_TAG = "[DEBUG]";
11
12
  const DEBUG_SYSTEM_PROMPT = `You are a diagnostic analyst for nanoPencil (a terminal-native AI coding agent).
12
13
  Analyze the provided system state and produce a structured three-layer diagnostic report.
13
14
 
@@ -37,7 +38,15 @@ Rules:
37
38
  - If the user provided an issue description, focus analysis on that issue
38
39
  - If no specific issue, perform a general health assessment
39
40
  - Use concise language; prefer tables and bullet lists over prose
40
- - If a diagnostic collection failed, treat that failure itself as a diagnostic signal`;
41
+ - If a diagnostic collection failed, treat that failure itself as a diagnostic signal
42
+ - Do NOT use any tools — this is a pure analysis task`;
43
+ // ============================================================================
44
+ // Pending diagnostic state (set by command handler, consumed by hooks)
45
+ // ============================================================================
46
+ let pendingDiagnosticPrompt;
47
+ function isDebugPrompt(text) {
48
+ return text.startsWith(DEBUG_PROMPT_PREFIX);
49
+ }
41
50
  function parseDebugArgs(args) {
42
51
  const trimmed = args.trim().toLowerCase();
43
52
  if (trimmed === "env")
@@ -49,14 +58,12 @@ function parseDebugArgs(args) {
49
58
  return { subcommand: "full", issueDescription: args.trim() || undefined };
50
59
  }
51
60
  // ============================================================================
52
- // Full diagnostic flow
61
+ // Full diagnostic flow — collect then dispatch through agent loop
53
62
  // ============================================================================
54
63
  async function handleFullDiagnostic(args, ctx, api) {
55
64
  const parsed = parseDebugArgs(args);
56
- // Show status indicator
57
65
  ctx.ui.setStatus("debug", "Collecting diagnostics...");
58
66
  try {
59
- // Collect all categories in parallel
60
67
  const [system, model, session, config, git, agent] = await Promise.allSettled([
61
68
  collectSystemInfo(),
62
69
  collectModelInfo(ctx),
@@ -74,35 +81,17 @@ async function handleFullDiagnostic(args, ctx, api) {
74
81
  agent: agent.status === "fulfilled" ? agent.value : { data: null, error: String(agent.reason) },
75
82
  };
76
83
  const data = sanitizeForLLM(raw);
77
- // Build user message for LLM
84
+ ctx.ui.setStatus("debug", undefined);
78
85
  const parts = [];
86
+ parts.push(`${DEBUG_TAG} Perform a three-layer diagnostic analysis.`);
79
87
  if (parsed.issueDescription) {
80
- parts.push(`## User-Reported Issue\n${parsed.issueDescription}\n`);
88
+ parts.push(`\nUser-Reported Issue: ${parsed.issueDescription}`);
81
89
  }
82
- parts.push("## Collected Diagnostics\n");
90
+ parts.push(`\nCollected Diagnostics:\n`);
83
91
  parts.push(formatDiagnosticData(data));
84
- const userMessage = parts.join("\n");
85
- // Call LLM with timeout
86
- const response = await Promise.race([
87
- ctx.completeSimple(DEBUG_SYSTEM_PROMPT, userMessage),
88
- new Promise((resolve) => setTimeout(() => resolve(undefined), DEBUG_TIMEOUT_MS)),
89
- ]);
90
- ctx.ui.setStatus("debug", undefined);
91
- if (response) {
92
- api.sendMessage({
93
- customType: DEBUG_MESSAGE_TYPE,
94
- content: response,
95
- display: true,
96
- });
97
- }
98
- else {
99
- // LLM unavailable — show raw data as fallback
100
- api.sendMessage({
101
- customType: DEBUG_MESSAGE_TYPE,
102
- content: `**LLM analysis unavailable** (timeout or no API key). Raw diagnostics:\n\n${formatDiagnosticData(data)}`,
103
- display: true,
104
- });
105
- }
92
+ const prompt = `${DEBUG_PROMPT_PREFIX}${Date.now()}]\n${parts.join("\n")}`;
93
+ pendingDiagnosticPrompt = prompt;
94
+ api.sendUserMessage(prompt, { deliverAs: "followUp" });
106
95
  }
107
96
  catch (error) {
108
97
  ctx.ui.setStatus("debug", undefined);
@@ -111,7 +100,7 @@ async function handleFullDiagnostic(args, ctx, api) {
111
100
  }
112
101
  }
113
102
  // ============================================================================
114
- // Quick subcommand — show raw data without LLM
103
+ // Quick subcommand — show raw data without agent loop
115
104
  // ============================================================================
116
105
  async function handleQuickSub(subcommand, ctx, api) {
117
106
  let result;
@@ -166,7 +155,6 @@ async function handleDebugCommand(args, ctx, api) {
166
155
  // Extension entry
167
156
  // ============================================================================
168
157
  export default async function debugExtension(api) {
169
- // Register debug message renderer (same pattern as btw)
170
158
  api.registerMessageRenderer(DEBUG_MESSAGE_TYPE, (message, _options, theme) => {
171
159
  const text = typeof message.content === "string"
172
160
  ? message.content
@@ -181,9 +169,75 @@ export default async function debugExtension(api) {
181
169
  container.addChild(box);
182
170
  return container;
183
171
  });
184
- // Register /debug command
172
+ api.on("before_agent_start", (event) => {
173
+ if (!isDebugPrompt(event.prompt))
174
+ return;
175
+ return { appendSystemPrompt: DEBUG_SYSTEM_PROMPT };
176
+ });
177
+ api.on("agent_end", () => {
178
+ if (pendingDiagnosticPrompt) {
179
+ pendingDiagnosticPrompt = undefined;
180
+ }
181
+ });
185
182
  api.registerCommand("debug", {
186
- description: "Run system diagnostics with three-layer analysis (/debug [env|session|model|<issue>])",
183
+ description: "Run system diagnostics (/debug [env|session|model|preferences|<issue>])",
187
184
  handler: (args, ctx) => handleDebugCommand(args, ctx, api),
188
185
  });
186
+ // Register /set-locale command
187
+ api.registerCommand("set-locale", {
188
+ description: "Set language preference (/set-locale zh|en)",
189
+ handler: async (args, ctx) => {
190
+ const trimmed = args.trim().toLowerCase();
191
+ if (trimmed !== "zh" && trimmed !== "en") {
192
+ ctx.ui.notify("Usage: /set-locale zh or /set-locale en", "info");
193
+ return;
194
+ }
195
+ // Get memory directory
196
+ const os = await import("node:os");
197
+ const fs = await import("node:fs");
198
+ const path = await import("node:path");
199
+ const memoryDir = process.env.NANOMEM_MEMORY_DIR || path.join(os.homedir(), ".nanopencil", "agent", "memory");
200
+ const prefsPath = path.join(memoryDir, "preferences.json");
201
+ try {
202
+ let prefs = [];
203
+ if (fs.existsSync(prefsPath)) {
204
+ prefs = JSON.parse(fs.readFileSync(prefsPath, "utf-8"));
205
+ }
206
+ // Check if language preference already exists
207
+ const existingIndex = prefs.findIndex((p) => {
208
+ const name = p.name || "";
209
+ return name.includes("用户偏好") || name.includes("language preference") || name.includes("locale");
210
+ });
211
+ const newPref = {
212
+ id: `set-locale-${Date.now()}`,
213
+ type: "preference",
214
+ name: trimmed === "zh" ? "用户偏好中文" : "Language Preference (English)",
215
+ summary: trimmed === "zh" ? "用户希望我用中文回复" : "User prefers English",
216
+ detail: trimmed === "zh" ? "用户通过 /set-locale 命令明确设置语言为中文" : "User explicitly set language to English via /set-locale command",
217
+ content: trimmed === "zh" ? "用户希望用中文回复" : "User prefers English responses",
218
+ tags: ["locale", "language", trimmed === "zh" ? "中文" : "english"],
219
+ importance: 10,
220
+ strength: 1000,
221
+ created: new Date().toISOString(),
222
+ eventTime: new Date().toISOString(),
223
+ accessCount: 0,
224
+ retention: "core",
225
+ salience: 10,
226
+ stability: "stable",
227
+ relations: [],
228
+ };
229
+ if (existingIndex >= 0) {
230
+ prefs[existingIndex] = newPref;
231
+ }
232
+ else {
233
+ prefs.push(newPref);
234
+ }
235
+ fs.writeFileSync(prefsPath, JSON.stringify(prefs, null, 2));
236
+ ctx.ui.notify(`Locale set to ${trimmed === "zh" ? "中文" : "English"}. Restart or run /debug preferences to verify.`, "info");
237
+ }
238
+ catch (error) {
239
+ ctx.ui.notify(`Failed to set locale: ${error}`, "error");
240
+ }
241
+ },
242
+ });
189
243
  }
@@ -7,13 +7,60 @@
7
7
  import type { ExtensionAPI } from "../../../core/extensions/types.js";
8
8
  declare function getFallbackOpeningLines(locale?: "en" | "zh"): string[];
9
9
  declare function getFallbackIdleLines(locale?: "en" | "zh"): string[];
10
+ type PresenceState = {
11
+ lastActivityAt: number;
12
+ idleReminderSent: boolean;
13
+ openingStartedAt?: number;
14
+ openingSent: boolean;
15
+ openingTimer?: ReturnType<typeof setTimeout>;
16
+ idleTimer?: ReturnType<typeof setInterval>;
17
+ unsubscribeInput?: () => void;
18
+ memEngine?: {
19
+ getAllEntries(): Promise<{
20
+ knowledge: Array<{
21
+ type?: string;
22
+ tags: string[];
23
+ name?: string;
24
+ summary?: string;
25
+ detail?: string;
26
+ content?: string;
27
+ }>;
28
+ lessons: Array<{
29
+ type?: string;
30
+ tags: string[];
31
+ name?: string;
32
+ summary?: string;
33
+ detail?: string;
34
+ content?: string;
35
+ importance?: number;
36
+ }>;
37
+ }>;
38
+ getAllEpisodes(): Promise<Array<{
39
+ date?: string;
40
+ consolidated?: boolean;
41
+ endedAt?: string;
42
+ startedAt?: string;
43
+ summary?: string;
44
+ userGoal?: string;
45
+ }>>;
46
+ searchEntries(query: string): Promise<Array<{
47
+ type?: string;
48
+ tags: string[];
49
+ }>>;
50
+ };
51
+ recentPresenceLines: string[];
52
+ lastPresenceAt?: number;
53
+ idleGenerating?: boolean;
54
+ };
10
55
  declare function resolveBundledPackageEntry(packageName: "mem-core" | "soul-core"): string | undefined;
11
56
  declare function importRuntimeModule<T>(moduleNames: string[], bundledPackageName?: "mem-core" | "soul-core"): Promise<T | undefined>;
57
+ declare function detectLanguageFromMemory(state: PresenceState): Promise<"en" | "zh" | undefined>;
12
58
  export default function presenceExtension(api: ExtensionAPI): Promise<void>;
13
59
  export declare const __testUtils: {
14
60
  getFallbackOpeningLines: typeof getFallbackOpeningLines;
15
61
  getFallbackIdleLines: typeof getFallbackIdleLines;
16
62
  resolveBundledPackageEntry: typeof resolveBundledPackageEntry;
17
63
  importRuntimeModule: typeof importRuntimeModule;
64
+ detectLanguageFromMemory: typeof detectLanguageFromMemory;
18
65
  };
19
66
  export {};
@@ -153,10 +153,6 @@ function clearTimers(state) {
153
153
  state.unsubscribeInput?.();
154
154
  state.unsubscribeInput = undefined;
155
155
  }
156
- function getMemoryDir() {
157
- // Use the same memory directory as the main app
158
- return process.env.NANOMEM_MEMORY_DIR || join(homedir(), ".nanomem", "memory");
159
- }
160
156
  async function initMemEngine(state) {
161
157
  if (state.memEngine)
162
158
  return;
@@ -176,6 +172,18 @@ async function initMemEngine(state) {
176
172
  state.memEngine = undefined;
177
173
  }
178
174
  }
175
+ function getMemoryDir() {
176
+ // Use the same memory directory as the main app
177
+ // Priority: env var > nanopencil default > legacy nanomem path
178
+ if (process.env.NANOMEM_MEMORY_DIR)
179
+ return process.env.NANOMEM_MEMORY_DIR;
180
+ // Check if nanopencil's memory directory exists
181
+ const nanopencilMemory = join(homedir(), ".nanopencil", "agent", "memory");
182
+ if (existsSync(nanopencilMemory))
183
+ return nanopencilMemory;
184
+ // Fallback to legacy path
185
+ return join(homedir(), ".nanomem", "memory");
186
+ }
179
187
  function getProject() {
180
188
  const parts = process.cwd().split("/").filter(Boolean);
181
189
  return parts.length >= 2
@@ -203,21 +211,32 @@ async function detectLanguageFromMemory(state) {
203
211
  }
204
212
  }
205
213
  catch { /* ignore */ }
214
+ let zhScore = 0;
215
+ let enScore = 0;
216
+ const zhTerms = "(中文|chinese|zh-hans|mandarin|普通话)";
217
+ const enTerms = "(英文|english|en-us)";
218
+ const negPrefix = "(?:don't|do not|no|not|不用|不要|别|不想用)";
219
+ const useWords = "(?:\\s+use|\\s+using|\\s+说|\\s+讲|\\s+用)?";
220
+ const zhNegative = new RegExp(`${negPrefix}${useWords}\\s*${zhTerms}`);
221
+ const enNegative = new RegExp(`${negPrefix}${useWords}\\s*${enTerms}`);
222
+ const zhPositive = new RegExp(zhTerms);
223
+ const enPositive = new RegExp(enTerms);
206
224
  // Check preference content for language indicators
207
225
  for (const pref of preferences) {
208
226
  const text = `${pref.name || ""} ${pref.summary || ""} ${pref.detail || ""} ${pref.content || ""}`.toLowerCase();
209
- // Check for Chinese preference
210
- if (/中文|chinese|zh-hans|mandarin|普通话/.test(text)) {
211
- if (!text.includes("don't") && !text.includes("no chinese") && !text.includes("不用中文")) {
212
- return "zh";
213
- }
214
- }
215
- // Check for explicit English preference
216
- if (/英文|english|en-us/.test(text)) {
217
- if (!text.includes("don't") && !text.includes("no english") && !text.includes("不用英文")) {
218
- return "en";
219
- }
220
- }
227
+ const hasZh = zhPositive.test(text);
228
+ const hasEn = enPositive.test(text);
229
+ const noZh = zhNegative.test(text);
230
+ const noEn = enNegative.test(text);
231
+ if (hasZh && !noZh)
232
+ zhScore += 2;
233
+ if (hasEn && !noEn)
234
+ enScore += 2;
235
+ // Cross-language hints: "don't use Chinese" slightly supports English, and vice versa.
236
+ if (noZh)
237
+ enScore += 1;
238
+ if (noEn)
239
+ zhScore += 1;
221
240
  }
222
241
  // Check recent episodes for language patterns
223
242
  const episodes = await state.memEngine.getAllEpisodes();
@@ -233,8 +252,12 @@ async function detectLanguageFromMemory(state) {
233
252
  englishContent++;
234
253
  }
235
254
  if (chineseContent > englishContent)
236
- return "zh";
255
+ zhScore += 1;
237
256
  if (englishContent > chineseContent && englishContent > 2)
257
+ enScore += 1;
258
+ if (zhScore > enScore && zhScore > 0)
259
+ return "zh";
260
+ if (enScore > zhScore && enScore > 0)
238
261
  return "en";
239
262
  return undefined;
240
263
  }
@@ -437,9 +460,10 @@ function getLastUserMessage(ctx) {
437
460
  const entry = entries[i];
438
461
  if (entry.type !== "message")
439
462
  continue;
440
- if (entry.role !== "user")
463
+ const message = entry.message;
464
+ if (!message || message.role !== "user")
441
465
  continue;
442
- const c = entry.content;
466
+ const c = message.content;
443
467
  if (typeof c === "string")
444
468
  return c;
445
469
  if (Array.isArray(c)) {
@@ -705,4 +729,5 @@ export const __testUtils = {
705
729
  getFallbackIdleLines,
706
730
  resolveBundledPackageEntry,
707
731
  importRuntimeModule,
732
+ detectLanguageFromMemory,
708
733
  };
@@ -20,6 +20,10 @@ pencil --nosal -p "your prompt"
20
20
 
21
21
  When `--nosal` is set, all hooks return early and zero work is performed.
22
22
 
23
+ ## Terminal compatibility (Warp, block UIs)
24
+
25
+ SAL builds a **terrain snapshot** of the workspace (walk + read DIP headers). That work is **asynchronous and periodically yields to the event loop** so the TUI can flush user input and status lines to the terminal while indexing runs. If you still see UI glitches in a specific terminal, use `--nosal` to confirm whether SAL is involved, then file an issue with `TERM_PROGRAM`, Warp version, and repro steps.
26
+
23
27
  ## Slash commands
24
28
 
25
29
  | Command | Purpose |
@@ -36,8 +36,11 @@ const TIPS = [
36
36
  isRelevant: () => true,
37
37
  },
38
38
  ];
39
- // Track last shown tip per session (sessionId -> tipId)
40
- const lastShownTip = new Map();
39
+ const TIP_MIN_DISPLAY_MS = 8000;
40
+ // Track last shown session index per tip (sessionId -> tipId -> sessionNum)
41
+ const tipHistoryBySession = new Map();
42
+ // Keep the current active tip stable for a minimum duration to avoid flicker.
43
+ const activeTipBySession = new Map();
41
44
  const sessionTipCount = new Map();
42
45
  /**
43
46
  * Get the next relevant tip to show on the spinner.
@@ -46,16 +49,24 @@ const sessionTipCount = new Map();
46
49
  export function getTipToShow(sessionId) {
47
50
  const now = Date.now();
48
51
  const sessionNum = sessionTipCount.get(sessionId) ?? 0;
52
+ const active = activeTipBySession.get(sessionId);
53
+ if (active && now < active.expiresAt) {
54
+ return active.content;
55
+ }
56
+ let tipHistory = tipHistoryBySession.get(sessionId);
57
+ if (!tipHistory) {
58
+ tipHistory = new Map();
59
+ tipHistoryBySession.set(sessionId, tipHistory);
60
+ }
49
61
  // Filter to relevant tips
50
62
  const relevantTips = TIPS.filter((tip) => {
51
63
  if (tip.isRelevant && !tip.isRelevant())
52
64
  return false;
53
- const last = lastShownTip.get(sessionId);
54
- if (!last)
55
- return true;
56
65
  // Check cooldown (using sessions as cooldown unit)
57
66
  const cooldown = tip.cooldownSessions ?? 3;
58
- if (last.tipId === tip.id && sessionNum - last.sessionNum < cooldown) {
67
+ const lastSessionNum = tipHistory.get(tip.id);
68
+ if (lastSessionNum !== undefined &&
69
+ sessionNum - lastSessionNum < cooldown) {
59
70
  return false;
60
71
  }
61
72
  return true;
@@ -66,8 +77,7 @@ export function getTipToShow(sessionId) {
66
77
  let oldest = null;
67
78
  let oldestTime = Infinity;
68
79
  for (const tip of relevantTips) {
69
- const last = lastShownTip.get(sessionId);
70
- const lastTime = last?.tipId === tip.id ? last.shownAt : 0;
80
+ const lastTime = tipHistory.get(tip.id) ?? -Infinity;
71
81
  if (lastTime < oldestTime) {
72
82
  oldestTime = lastTime;
73
83
  oldest = tip;
@@ -76,8 +86,15 @@ export function getTipToShow(sessionId) {
76
86
  if (!oldest)
77
87
  return null;
78
88
  // Record this tip as shown
79
- lastShownTip.set(sessionId, { tipId: oldest.id, shownAt: now, sessionNum });
80
- return typeof oldest.content === "function" ? oldest.content() : oldest.content;
89
+ tipHistory.set(oldest.id, sessionNum);
90
+ const content = typeof oldest.content === "function" ? oldest.content() : oldest.content;
91
+ activeTipBySession.set(sessionId, {
92
+ tipId: oldest.id,
93
+ content,
94
+ shownAt: now,
95
+ expiresAt: now + TIP_MIN_DISPLAY_MS,
96
+ });
97
+ return content;
81
98
  }
82
99
  /**
83
100
  * Reset tip cooldown when session count increments.
@@ -86,11 +103,13 @@ export function getTipToShow(sessionId) {
86
103
  export function onSessionIncrement(sessionId) {
87
104
  const current = sessionTipCount.get(sessionId) ?? 0;
88
105
  sessionTipCount.set(sessionId, current + 1);
106
+ activeTipBySession.delete(sessionId);
89
107
  }
90
108
  /**
91
109
  * Clear cooldown tracking for a session (e.g., on session reset).
92
110
  */
93
111
  export function resetTipsForSession(sessionId) {
94
112
  sessionTipCount.delete(sessionId);
95
- lastShownTip.delete(sessionId);
113
+ tipHistoryBySession.delete(sessionId);
114
+ activeTipBySession.delete(sessionId);
96
115
  }
@@ -12831,6 +12831,23 @@ export declare const MODELS: {
12831
12831
  contextWindow: number;
12832
12832
  maxTokens: number;
12833
12833
  };
12834
+ readonly "moonshotai/kimi-k2.6": {
12835
+ id: string;
12836
+ name: string;
12837
+ api: "anthropic-messages";
12838
+ provider: string;
12839
+ baseUrl: string;
12840
+ reasoning: true;
12841
+ input: ("text" | "image")[];
12842
+ cost: {
12843
+ input: number;
12844
+ output: number;
12845
+ cacheRead: number;
12846
+ cacheWrite: number;
12847
+ };
12848
+ contextWindow: number;
12849
+ maxTokens: number;
12850
+ };
12834
12851
  readonly "nvidia/nemotron-nano-12b-v2-vl": {
12835
12852
  id: string;
12836
12853
  name: string;
@@ -7756,7 +7756,7 @@ export const MODELS = {
7756
7756
  cacheRead: 0.024999999999999998,
7757
7757
  cacheWrite: 0.08333333333333334,
7758
7758
  },
7759
- contextWindow: 1048576,
7759
+ contextWindow: 1000000,
7760
7760
  maxTokens: 8192,
7761
7761
  },
7762
7762
  "google/gemini-2.0-flash-lite-001": {
@@ -12640,6 +12640,23 @@ export const MODELS = {
12640
12640
  contextWindow: 262114,
12641
12641
  maxTokens: 262114,
12642
12642
  },
12643
+ "moonshotai/kimi-k2.6": {
12644
+ id: "moonshotai/kimi-k2.6",
12645
+ name: "Kimi K2.6",
12646
+ api: "anthropic-messages",
12647
+ provider: "vercel-ai-gateway",
12648
+ baseUrl: "https://ai-gateway.vercel.sh",
12649
+ reasoning: true,
12650
+ input: ["text", "image"],
12651
+ cost: {
12652
+ input: 0.95,
12653
+ output: 4,
12654
+ cacheRead: 0.16,
12655
+ cacheWrite: 0,
12656
+ },
12657
+ contextWindow: 262000,
12658
+ maxTokens: 262000,
12659
+ },
12643
12660
  "nvidia/nemotron-nano-12b-v2-vl": {
12644
12661
  id: "nvidia/nemotron-nano-12b-v2-vl",
12645
12662
  name: "Nvidia Nemotron Nano 12B V2 VL",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@pencil-agent/nano-pencil",
3
- "version": "1.13.4",
3
+ "version": "1.13.5",
4
4
  "description": "CLI writing agent with read, bash, edit, write tools and session management. Supports DashScope Coding Plan. Soul enabled by default for AI personality evolution.",
5
5
  "type": "module",
6
6
  "bin": {