@blockrun/franklin 3.15.3 → 3.15.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -83,7 +83,13 @@ A user approving an action once does NOT mean they approve it in all contexts. M
83
83
  }
84
84
  function getOutputEfficiencySection() {
85
85
  return `# Output Efficiency
86
- Go straight to the point. Lead with the action, not the reasoning. Do not restate what the user said. Do not narrate your actions ("Let me read the file...", "I'll now search for..."). Just call the tools.
86
+ Go straight to the point. Lead with the action, not the reasoning. Do not restate what the user said.
87
+
88
+ **No pre-tool narration.** Do NOT write things like "让我先 X...", "Let me read the file...", "I'll now search for...", "好的,让我研究一下...", "现在我来 X", "OK now I have everything I need", "完美!", "好,现在我完全明白了". These phrases are internal monologue — the user can see your tool calls directly and does not need step-by-step play-by-play. Just call the tool.
89
+
90
+ The exception: a single short sentence between tool calls is fine when it tells the user something they would otherwise miss — a finding ("Build passes — moving on to tests."), a course correction ("That approach won't work — switching to X."), or a one-line status before a long-running operation. One sentence per update is enough.
91
+
92
+ **No internal-language leakage.** Always write your visible response in the same language the user is using. If your private reasoning happens in a different language (English while the user writes Chinese, Korean while the user writes Chinese, etc.), do NOT let phrases from that language appear in the user-facing text. The user should never see a stray "좋아", "OK now", or "Alright" in the middle of a Chinese reply.
87
93
 
88
94
  Focus text output on:
89
95
  - Decisions that need the user's input
@@ -97,7 +103,7 @@ function getToneAndStyleSection() {
97
103
  - Only use emojis if the user explicitly requests it. Avoid using emojis in all communication unless asked.
98
104
  - Your responses should be short and concise.
99
105
  - When referencing specific functions or pieces of code include the pattern file_path:line_number to allow the user to easily navigate to the source code location.
100
- - Do not use a colon before tool calls. Your tool calls may not be shown directly in the output, so text like "Let me read the file:" followed by a read tool call should just be "Let me read the file." with a period.`;
106
+ - See "Output Efficiency" above for the rules on pre-tool narration and language consistency. Those override any habit you may have of writing "Let me X..." before a tool call.`;
101
107
  }
102
108
  function getGitProtocolSection() {
103
109
  return `# Git Protocol
@@ -289,13 +289,23 @@ function anySignal(signals) {
289
289
  export function renderGroundingFollowup(result) {
290
290
  if (result.verdict === 'GROUNDED' || result.verdict === 'SKIPPED')
291
291
  return '';
292
+ // Headers state the situation directly. Old phrasing told the user to "re-run
293
+ // with the suggested tools" which both put the burden on them and exposed
294
+ // FRANKLIN_NO_EVAL as a one-flag escape hatch from the quality gate. New
295
+ // phrasing names the gap and offers a concrete next action.
292
296
  const header = result.verdict === 'UNGROUNDED'
293
- ? '⚠️ **Grounding check failed** — the previous answer relied on memory where a tool call was available:'
294
- : '⚠️ **Grounding check flagged some claims** re-run with the suggested tools for a verified answer:';
297
+ ? '⚠️ **Unverified answer** — the model produced specific claims without calling any tool to back them up:'
298
+ : '⚠️ **Partial verification** some claims in the answer aren\'t backed by tool output:';
295
299
  const body = result.issues.length > 0
296
300
  ? result.issues.map(i => `- ${i}`).join('\n')
297
- : '(evaluator returned no specific items — check the transcript manually)';
298
- return `\n\n${header}\n${body}\n\n_Ask again with an explicit instruction to call the tools, or disable these checks with \`FRANKLIN_NO_EVAL=1\`._`;
301
+ : '_(evaluator returned no specific items — check the transcript manually)_';
302
+ // Action line: tell the user exactly how to follow up, in their own voice.
303
+ // No env-var escape hatch in the user-facing text — that's a config concern,
304
+ // not a "make this warning go away" concern.
305
+ const action = result.verdict === 'UNGROUNDED'
306
+ ? '\n\n_Reply "verify" to re-run with required tool use, or accept the answer as-is._'
307
+ : '\n\n_Reply "verify" to fact-check the flagged claims, or accept the answer as-is._';
308
+ return `\n\n${header}\n${body}${action}`;
299
309
  }
300
310
  /**
301
311
  * Build a synthetic user message that instructs the agent to retry with the
@@ -651,13 +651,27 @@ export async function interactiveSession(config, getUserInput, onEvent, onAbortR
651
651
  // Circuit breaker: stop retrying after 3 consecutive failures
652
652
  if (compactFailures < 3) {
653
653
  try {
654
+ // Capture pre-compaction size so we can surface "saved X%" to the
655
+ // user. Without this, the per-turn input-token count would silently
656
+ // drop from e.g. 215K → 9K and look like a metric bug.
657
+ const beforeTokens = estimateHistoryTokens(history);
654
658
  const { history: compacted, compacted: didCompact } = await autoCompactIfNeeded(history, config.model, client, config.debug);
655
659
  if (didCompact) {
656
660
  replaceHistory(history, compacted);
657
661
  resetTokenAnchor();
658
662
  compactFailures = 0;
663
+ const afterTokens = estimateHistoryTokens(history);
664
+ const pct = beforeTokens > 0
665
+ ? Math.round((1 - afterTokens / beforeTokens) * 100)
666
+ : 0;
667
+ // Visible to the user — explains the upcoming token-count drop
668
+ // in the next turn footer and frames it as a feature, not a bug.
669
+ onEvent({
670
+ kind: 'text_delta',
671
+ text: `\n*🗜 Auto-compacted: ~${(beforeTokens / 1000).toFixed(0)}K → ~${(afterTokens / 1000).toFixed(0)}K tokens (saved ${pct}%)*\n\n`,
672
+ });
659
673
  if (config.debug) {
660
- console.error(`[franklin] History compacted: ~${estimateHistoryTokens(history)} tokens`);
674
+ console.error(`[franklin] History compacted: ~${afterTokens} tokens`);
661
675
  }
662
676
  }
663
677
  }
@@ -110,8 +110,26 @@ const REASONING_KEYWORDS = [
110
110
  'formally', 'mathematical', 'proof', 'logically', '证明', '定理', '推导',
111
111
  ];
112
112
  const SIMPLE_KEYWORDS = [
113
- 'what is', 'define', 'translate', 'hello', 'yes or no', 'capital of',
114
- 'how old', 'who is', 'when was', '什么是', '翻译', '你好',
113
+ // True simple intents: greeting, definition lookup, translation. Factual
114
+ // lookups ("who is", "when was", "capital of") were moved to RESEARCH below
115
+ // because they look easy but require external recall — sending them to
116
+ // SIMPLE-tier models reliably produces hallucinated subscriber counts,
117
+ // birth years, etc. that the post-hoc grounding check then has to flag.
118
+ 'define', 'translate', 'hello', 'yes or no', '翻译', '你好',
119
+ ];
120
+ // Research / fact-retrieval intent: questions whose correct answer depends
121
+ // on data the model can't reliably recall from weights — current statistics,
122
+ // latest news, comparisons, "best" rankings, identities of people/orgs.
123
+ // Bumping tier here pushes them to a MEDIUM/COMPLEX model that has
124
+ // WebSearch in its toolset, instead of letting a cheap text-only model
125
+ // fabricate plausible-looking numbers.
126
+ const RESEARCH_KEYWORDS = [
127
+ 'who is', 'who was', 'when was', 'when did', 'what is the capital',
128
+ 'how old', 'how many', 'how much',
129
+ 'best', 'top ', 'most popular', 'compare', 'vs ', ' vs.',
130
+ 'latest', 'current', 'recent', 'today', 'now',
131
+ 'subscribers', 'members', 'followers', 'market cap', 'price of',
132
+ '最好的', '最新', '最近', '现在', '当前', '排名', '对比',
115
133
  ];
116
134
  const TECHNICAL_KEYWORDS = [
117
135
  'algorithm', 'optimize', 'architecture', 'distributed', 'kubernetes',
@@ -130,6 +148,11 @@ const AGENTIC_URL_PATTERNS = [
130
148
  /github\.com/i, /gitlab\.com/i, /bitbucket\.org/i,
131
149
  /npmjs\.com/i, /pypi\.org/i, /crates\.io/i,
132
150
  /stackoverflow\.com/i, /docs\.\w+/i,
151
+ // Media URLs need the model to actually fetch+understand content,
152
+ // not just regurgitate from weights. Bumping these prevents the
153
+ // "user pastes 3 YouTube links → SIMPLE-tier model gives up" path.
154
+ /youtube\.com/i, /youtu\.be/i,
155
+ /twitter\.com/i, /x\.com/i,
133
156
  ];
134
157
  function countMatches(text, keywords) {
135
158
  const lower = text.toLowerCase();
@@ -180,6 +203,17 @@ function classifyRequest(prompt, tokenCount) {
180
203
  score -= 0.25;
181
204
  signals.push('simple');
182
205
  }
206
+ // Research / fact-lookup detection (weight: +0.30). Bumps tier upward so
207
+ // questions like "best subreddit", "current price of X", "how many members"
208
+ // route to a model that can actually call WebSearch instead of guessing
209
+ // from weights. Capped at one keyword's worth — research questions
210
+ // typically signal with one phrase, and stacking would push trivial
211
+ // questions into REASONING.
212
+ const researchMatches = countMatches(prompt, RESEARCH_KEYWORDS);
213
+ if (researchMatches >= 1) {
214
+ score += 0.30;
215
+ signals.push('research');
216
+ }
183
217
  // Technical complexity (weight: 0.15) - increased
184
218
  const techMatches = countMatches(prompt, TECHNICAL_KEYWORDS);
185
219
  if (techMatches >= 2) {
@@ -59,6 +59,35 @@ async function execute(input, ctx) {
59
59
  return { output: `Error: only http/https URLs are supported`, isError: true };
60
60
  }
61
61
  const maxLen = Math.min(max_length ?? DEFAULT_MAX_LENGTH, MAX_BODY_BYTES);
62
+ // ── YouTube special case ──
63
+ // Plain HTML fetch on a youtube.com URL returns the SPA bundle (a wall of
64
+ // minified JS), which is useless to the model and was the failure mode
65
+ // behind "I can't access YouTube" responses. Auto-redirect to the caption
66
+ // track so the model gets the actual spoken content. Transparent to
67
+ // callers — same WebFetch tool, the right thing happens for video URLs.
68
+ const videoId = extractYouTubeVideoId(parsed);
69
+ if (videoId) {
70
+ const ytKey = cacheKey(`youtube-transcript:${videoId}`, maxLen);
71
+ const ytCached = getCached(ytKey);
72
+ if (ytCached)
73
+ return { output: ytCached + '\n\n(cached)' };
74
+ const transcript = await fetchYouTubeTranscript(videoId, ctx.abortSignal);
75
+ if (transcript.ok) {
76
+ const truncated = transcript.text.length > maxLen
77
+ ? transcript.text.slice(0, maxLen) + '\n\n... (transcript truncated)'
78
+ : transcript.text;
79
+ const output = `URL: ${url}\nSource: YouTube auto-captions (videoId=${videoId}, lang=${transcript.lang})\n\n${truncated}`;
80
+ setCached(ytKey, output);
81
+ return { output };
82
+ }
83
+ // Fall through to raw HTML fetch only if transcript path failed entirely;
84
+ // surface why so the model can decide what to do (e.g., suggest a manual
85
+ // step) instead of silently scraping JS.
86
+ return {
87
+ output: `YouTube transcript unavailable for ${url} — ${transcript.reason}. The video may have captions disabled or be region-locked.`,
88
+ isError: true,
89
+ };
90
+ }
62
91
  const key = cacheKey(url, maxLen);
63
92
  // Check cache first
64
93
  const cached = getCached(key);
@@ -147,6 +176,143 @@ async function execute(input, ctx) {
147
176
  ctx.abortSignal.removeEventListener('abort', onAbort);
148
177
  }
149
178
  }
179
+ // ─── YouTube transcript fetcher ─────────────────────────────────────────────
180
+ // Fetches auto-generated or uploaded captions for a YouTube video by parsing
181
+ // the watch-page's `ytInitialPlayerResponse` JSON. Pure HTTP, no deps. Saves
182
+ // us from the alternative (shelling out to yt-dlp, which the user may not
183
+ // have installed) and from leaving the model to guess at JS bundles.
184
+ function extractYouTubeVideoId(parsed) {
185
+ const host = parsed.hostname.replace(/^www\./, '');
186
+ if (host === 'youtu.be') {
187
+ return parsed.pathname.slice(1).split('/')[0] || null;
188
+ }
189
+ if (host === 'youtube.com' || host === 'm.youtube.com' || host === 'music.youtube.com') {
190
+ if (parsed.pathname === '/watch') {
191
+ return parsed.searchParams.get('v');
192
+ }
193
+ // /shorts/{id}, /live/{id}, /embed/{id}
194
+ const shortsMatch = parsed.pathname.match(/^\/(?:shorts|live|embed)\/([A-Za-z0-9_-]{6,})/);
195
+ if (shortsMatch)
196
+ return shortsMatch[1];
197
+ }
198
+ return null;
199
+ }
200
+ async function fetchYouTubeTranscript(videoId, abortSignal) {
201
+ const watchUrl = `https://www.youtube.com/watch?v=${encodeURIComponent(videoId)}&hl=en`;
202
+ const ctrl = new AbortController();
203
+ const timer = setTimeout(() => ctrl.abort(), 20_000);
204
+ const onAbort = () => ctrl.abort();
205
+ abortSignal.addEventListener('abort', onAbort, { once: true });
206
+ try {
207
+ const res = await fetch(watchUrl, {
208
+ signal: ctrl.signal,
209
+ headers: {
210
+ // Pretend to be a desktop browser so YouTube serves the watch page
211
+ // with the player config inlined. The default Node fetch UA gets a
212
+ // consent-redirect HTML stub that has no caption metadata.
213
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
214
+ 'Accept-Language': 'en-US,en;q=0.9',
215
+ },
216
+ redirect: 'follow',
217
+ });
218
+ if (!res.ok) {
219
+ return { ok: false, reason: `watch page HTTP ${res.status}` };
220
+ }
221
+ const html = await res.text();
222
+ // ytInitialPlayerResponse can be assigned in two shapes; both occur in
223
+ // practice across mobile vs desktop responses.
224
+ const match = html.match(/var\s+ytInitialPlayerResponse\s*=\s*(\{.+?\})\s*;\s*var\s+meta/s) ||
225
+ html.match(/ytInitialPlayerResponse\s*=\s*(\{.+?\});/s);
226
+ if (!match) {
227
+ return { ok: false, reason: 'could not locate ytInitialPlayerResponse in watch page' };
228
+ }
229
+ let player;
230
+ try {
231
+ player = JSON.parse(match[1]);
232
+ }
233
+ catch {
234
+ return { ok: false, reason: 'ytInitialPlayerResponse JSON parse failed' };
235
+ }
236
+ const tracks = player.captions?.playerCaptionsTracklistRenderer?.captionTracks ?? [];
237
+ if (tracks.length === 0) {
238
+ return { ok: false, reason: 'no caption tracks (video has captions disabled)' };
239
+ }
240
+ // Prefer English; fall back to first available; auto-captions are fine.
241
+ const track = tracks.find(t => (t.languageCode || '').startsWith('en')) ||
242
+ tracks[0];
243
+ if (!track?.baseUrl) {
244
+ return { ok: false, reason: 'caption track has no baseUrl' };
245
+ }
246
+ // Request the JSON3 format — easier to parse than the default XML and
247
+ // YouTube serves it on the same endpoint with a query flag.
248
+ const captionUrl = track.baseUrl + (track.baseUrl.includes('fmt=') ? '' : '&fmt=json3');
249
+ const capRes = await fetch(captionUrl, {
250
+ signal: ctrl.signal,
251
+ headers: { 'User-Agent': 'Mozilla/5.0' },
252
+ });
253
+ if (!capRes.ok) {
254
+ return { ok: false, reason: `caption fetch HTTP ${capRes.status}` };
255
+ }
256
+ const capRaw = await capRes.text();
257
+ const text = parseJson3Captions(capRaw) || parseXmlCaptions(capRaw);
258
+ if (!text) {
259
+ return { ok: false, reason: 'caption response had no readable text segments' };
260
+ }
261
+ return { ok: true, text, lang: track.languageCode || 'unknown' };
262
+ }
263
+ catch (err) {
264
+ if (abortSignal.aborted) {
265
+ return { ok: false, reason: 'request aborted' };
266
+ }
267
+ return {
268
+ ok: false,
269
+ reason: `fetch error: ${err instanceof Error ? err.message : String(err)}`,
270
+ };
271
+ }
272
+ finally {
273
+ clearTimeout(timer);
274
+ abortSignal.removeEventListener('abort', onAbort);
275
+ }
276
+ }
277
+ function parseJson3Captions(raw) {
278
+ try {
279
+ const obj = JSON.parse(raw);
280
+ if (!obj.events)
281
+ return '';
282
+ const out = [];
283
+ for (const ev of obj.events) {
284
+ if (!ev.segs)
285
+ continue;
286
+ for (const seg of ev.segs) {
287
+ if (seg.utf8)
288
+ out.push(seg.utf8);
289
+ }
290
+ }
291
+ // Collapse the per-word fragments YouTube emits into readable lines.
292
+ return out.join('').replace(/\n+/g, ' ').replace(/\s{2,}/g, ' ').trim();
293
+ }
294
+ catch {
295
+ return '';
296
+ }
297
+ }
298
+ function parseXmlCaptions(raw) {
299
+ // Fallback for older XML format. Regex-only parse — captions text is
300
+ // simple enough that pulling in xml2js for this would be overkill.
301
+ const matches = [...raw.matchAll(/<text[^>]*>([\s\S]*?)<\/text>/g)];
302
+ if (matches.length === 0)
303
+ return '';
304
+ return matches
305
+ .map(m => m[1]
306
+ .replace(/&amp;/g, '&')
307
+ .replace(/&lt;/g, '<')
308
+ .replace(/&gt;/g, '>')
309
+ .replace(/&quot;/g, '"')
310
+ .replace(/&#39;/g, "'")
311
+ .replace(/\s+/g, ' ')
312
+ .trim())
313
+ .filter(Boolean)
314
+ .join(' ');
315
+ }
150
316
  function stripHtml(html) {
151
317
  return html
152
318
  // Remove non-content elements
package/dist/ui/app.js CHANGED
@@ -204,6 +204,7 @@ function RunCodeApp({ initialModel, workDir, walletAddress, walletBalance, chain
204
204
  const turnModelRef = useRef(undefined);
205
205
  const turnTierRef = useRef(undefined);
206
206
  const turnSavingsRef = useRef(undefined);
207
+ const turnCtxPctRef = useRef(undefined);
207
208
  const queuedInputsRef = useRef([]);
208
209
  // Keep refs in sync so memoized event handlers can read current values
209
210
  streamTextRef.current = streamText;
@@ -241,6 +242,7 @@ function RunCodeApp({ initialModel, workDir, walletAddress, walletBalance, chain
241
242
  model: turnModelRef.current,
242
243
  tier: turnTierRef.current,
243
244
  savings: turnSavingsRef.current,
245
+ ctxPct: turnCtxPctRef.current,
244
246
  thinkMs,
245
247
  thinkChars,
246
248
  }];
@@ -423,6 +425,7 @@ function RunCodeApp({ initialModel, workDir, walletAddress, walletBalance, chain
423
425
  turnModelRef.current = undefined;
424
426
  turnTierRef.current = undefined;
425
427
  turnSavingsRef.current = undefined;
428
+ turnCtxPctRef.current = undefined;
426
429
  setWaiting(true);
427
430
  setReady(false);
428
431
  // Pass through to agent loop to clear the actual conversation history
@@ -444,6 +447,7 @@ function RunCodeApp({ initialModel, workDir, walletAddress, walletBalance, chain
444
447
  turnModelRef.current = undefined;
445
448
  turnTierRef.current = undefined;
446
449
  turnSavingsRef.current = undefined;
450
+ turnCtxPctRef.current = undefined;
447
451
  onSubmit(lastPrompt);
448
452
  return;
449
453
  default:
@@ -494,6 +498,7 @@ function RunCodeApp({ initialModel, workDir, walletAddress, walletBalance, chain
494
498
  turnModelRef.current = undefined;
495
499
  turnTierRef.current = undefined;
496
500
  turnSavingsRef.current = undefined;
501
+ turnCtxPctRef.current = undefined;
497
502
  onSubmit(trimmed);
498
503
  }, [ready, currentModel, totalCost, onSubmit, onModelChange, onAbort, onExit, exit, lastPrompt, inputHistory, showStatus]);
499
504
  // Mouse support — OFF by default because Node stdin is shared: mouse escape
@@ -658,8 +663,10 @@ function RunCodeApp({ initialModel, workDir, walletAddress, walletBalance, chain
658
663
  turnTierRef.current = event.tier;
659
664
  if (event.savings !== undefined)
660
665
  turnSavingsRef.current = event.savings;
661
- if (event.contextPct !== undefined)
666
+ if (event.contextPct !== undefined) {
662
667
  setContextPct(event.contextPct);
668
+ turnCtxPctRef.current = event.contextPct;
669
+ }
663
670
  break;
664
671
  }
665
672
  case 'turn_done': {
@@ -757,9 +764,13 @@ function RunCodeApp({ initialModel, workDir, walletAddress, walletBalance, chain
757
764
  const isUserMsg = r.key.startsWith('user-');
758
765
  return (_jsxs(Box, { flexDirection: "column", children: [!isUserMsg && (r.tokens.input > 0 || r.tokens.output > 0) && (_jsx(Box, { marginTop: 1, children: _jsx(Text, { dimColor: true, children: '─'.repeat(60) }) })), isUserMsg && (_jsx(Box, { marginTop: 1 })), !isUserMsg && r.thinkMs !== undefined && r.thinkMs >= 500 && (_jsx(Box, { paddingLeft: 2, children: _jsxs(Text, { color: "magenta", dimColor: true, children: ["\u273B Thought for ", (r.thinkMs / 1000).toFixed(1), "s", r.thinkChars && r.thinkChars > 20
759
766
  ? ` · ~${Math.round(r.thinkChars / 4)} tokens`
760
- : ''] }) })), _jsx(Box, { paddingLeft: isUserMsg ? 0 : 2, children: _jsx(Text, { wrap: "wrap", children: renderMarkdown(r.text) }) }), (r.tokens.input > 0 || r.tokens.output > 0) && (_jsx(Box, { marginLeft: 2, marginBottom: 1, children: _jsxs(Text, { dimColor: true, children: [r.tier && _jsxs(Text, { color: "cyan", children: ["[", r.tier, "] "] }), r.model ? shortModelName(r.model) : '', r.model ? ' · ' : '', r.tokens.calls > 0 && r.tokens.input === 0
767
+ : ''] }) })), _jsx(Box, { paddingLeft: isUserMsg ? 0 : 2, children: _jsx(Text, { wrap: "wrap", children: renderMarkdown(r.text) }) }), (r.tokens.input > 0 || r.tokens.output > 0) && (_jsx(Box, { marginLeft: 2, marginBottom: 1, children: _jsxs(Text, { dimColor: true, children: [r.tier
768
+ ? _jsxs(Text, { color: "cyan", children: ["[", r.tier, "] "] })
769
+ : (r.model ? _jsx(Text, { dimColor: true, children: "[direct] " }) : null), r.model ? shortModelName(r.model) : '', r.model ? ' · ' : '', r.tokens.calls > 0 && r.tokens.input === 0
761
770
  ? `${r.tokens.calls} calls`
762
- : `${formatTokens(r.tokens.input)} in / ${formatTokens(r.tokens.output)} out`, r.cost > 0 ? ` · $${r.cost.toFixed(4)}` : '', r.savings !== undefined && r.savings > 0 ? _jsxs(Text, { color: "green", children: [" saved ", Math.round(r.savings * 100), "%"] }) : ''] }) }))] }, r.key));
771
+ : `${formatTokens(r.tokens.input)} in / ${formatTokens(r.tokens.output)} out`, r.cost > 0 ? ` · $${r.cost.toFixed(4)}` : '', r.savings !== undefined && r.savings > 0 ? _jsxs(Text, { color: "green", children: [" saved ", Math.round(r.savings * 100), "%"] }) : '', r.ctxPct !== undefined && r.ctxPct >= 5
772
+ ? _jsxs(Text, { color: r.ctxPct >= 80 ? 'red' : r.ctxPct >= 50 ? 'yellow' : undefined, dimColor: r.ctxPct < 50, children: [" \u00B7 ctx ", r.ctxPct, "%"] })
773
+ : ''] }) }))] }, r.key));
763
774
  } }), permissionRequest && (_jsxs(Box, { flexDirection: "column", marginTop: 1, marginLeft: 2, children: [_jsx(Text, { color: "yellow", children: "\u256D\u2500 Permission required \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500" }), _jsxs(Text, { color: "yellow", children: ["\u2502 ", _jsx(Text, { bold: true, children: permissionRequest.toolName })] }), permissionRequest.description.split('\n').map((line, i) => (_jsxs(Text, { dimColor: true, children: ["\u2502 ", line] }, i))), _jsx(Text, { color: "yellow", children: "\u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500" }), _jsx(Box, { marginLeft: 2, children: _jsxs(Text, { children: [_jsx(Text, { bold: true, color: "green", children: "[y]" }), _jsx(Text, { dimColor: true, children: " yes " }), _jsx(Text, { bold: true, color: "cyan", children: "[a]" }), _jsx(Text, { dimColor: true, children: " always " }), _jsx(Text, { bold: true, color: "red", children: "[n]" }), _jsx(Text, { dimColor: true, children: " no" })] }) })] })), askUserRequest && (_jsxs(Box, { flexDirection: "column", marginTop: 1, marginLeft: 2, children: [_jsx(Text, { color: "cyan", children: "\u256D\u2500 Question \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500" }), _jsxs(Text, { color: "cyan", children: ["\u2502 ", _jsx(Text, { bold: true, children: askUserRequest.question })] }), askUserRequest.options && askUserRequest.options.length > 0 && (askUserRequest.options.map((opt, i) => (_jsxs(Text, { dimColor: true, children: ["\u2502 ", i + 1, ". ", opt] }, i)))), _jsx(Text, { color: "cyan", children: "\u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500" }), _jsxs(Box, { marginLeft: 2, children: [_jsx(Text, { bold: true, children: "answer> " }), _jsx(TextInput, { value: askUserInput, onChange: setAskUserInput, onSubmit: (val) => {
764
775
  const answer = val.trim() || '(no response)';
765
776
  const r = askUserRequest.resolve;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@blockrun/franklin",
3
- "version": "3.15.3",
3
+ "version": "3.15.5",
4
4
  "description": "Franklin — The AI agent with a wallet. Spends USDC autonomously to get real work done. Pay per action, no subscriptions.",
5
5
  "type": "module",
6
6
  "exports": {