@blockrun/franklin 3.15.89 → 3.15.91

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -27,7 +27,7 @@ import { setSessionPersistenceDisabled } from '../session/storage.js';
27
27
  import { estimateCost, OPUS_PRICING } from '../pricing.js';
28
28
  import { maybeMidSessionExtract } from '../learnings/extractor.js';
29
29
  import { extractMentions, buildEntityContext, loadEntities } from '../brain/store.js';
30
- import { routeRequestAsync, resolveTierToModel, parseRoutingProfile, getFallbackChain, pickFreeFallback } from '../router/index.js';
30
+ import { routeRequestAsync, resolveTierToModel, parseRoutingProfile, getFallbackChain, pickFreeFallback, isVisionModel, messageNeedsVision, pickVisionSibling } from '../router/index.js';
31
31
  import { recordOutcome } from '../router/local-elo.js';
32
32
  import { shouldPlan, getPlanningPrompt, getExecutorModel, isExecutorStuck, toolCallSignature } from './planner.js';
33
33
  import { shouldVerify, runVerification } from './verification.js';
@@ -1118,6 +1118,16 @@ export async function interactiveSession(config, getUserInput, onEvent, onAbortR
1118
1118
  onProgress: (id, text) => onEvent({ kind: 'capability_progress', id, text }),
1119
1119
  sessionId,
1120
1120
  });
1121
+ // ── Vision-need detection (per turn) ──
1122
+ // Images enter a turn one of two ways: the user types an image path
1123
+ // and the Read tool will inline bytes mid-turn, or the user references
1124
+ // an image in their last message directly. We can only see (1) at this
1125
+ // point — but that's the case we care about: the router has to decide
1126
+ // BEFORE the model call which model to use. If the model can't see
1127
+ // images, Read's tool_result image blocks get tokenized as base64 text
1128
+ // by the gateway (verified 2026-05-09) and the model hallucinates from
1129
+ // the "Image file: <path>" stub. Detect upfront, route accordingly.
1130
+ const turnNeedsVision = loopCount === 1 && messageNeedsVision(lastUserInput);
1121
1131
  // ── Router: resolve routing profiles to concrete models ──
1122
1132
  // Uses the tier already decided by the turn-analyzer — one LLM call
1123
1133
  // up-front rather than a separate classifier here. Fallback to the
@@ -1129,8 +1139,8 @@ export async function interactiveSession(config, getUserInput, onEvent, onAbortR
1129
1139
  let routingSavings;
1130
1140
  if (routingProfile) {
1131
1141
  const routing = turnAnalysis
1132
- ? resolveTierToModel(turnAnalysis.tier, routingProfile)
1133
- : await routeRequestAsync(lastUserInput || '', routingProfile);
1142
+ ? resolveTierToModel(turnAnalysis.tier, routingProfile, turnNeedsVision)
1143
+ : await routeRequestAsync(lastUserInput || '', routingProfile, undefined, turnNeedsVision);
1134
1144
  resolvedModel = routing.model;
1135
1145
  routingTier = routing.tier;
1136
1146
  routingConfidence = routing.confidence;
@@ -1138,12 +1148,31 @@ export async function interactiveSession(config, getUserInput, onEvent, onAbortR
1138
1148
  lastRoutedModel = routing.model;
1139
1149
  lastRoutedCategory = routing.category || '';
1140
1150
  if (loopCount === 1) {
1151
+ const visionTag = turnNeedsVision ? ' 👁️' : '';
1141
1152
  onEvent({
1142
1153
  kind: 'text_delta',
1143
- text: `*Auto → ${routing.model}*\n\n`,
1154
+ text: `*Auto → ${routing.model}${visionTag}*\n\n`,
1144
1155
  });
1145
1156
  }
1146
1157
  }
1158
+ else if (turnNeedsVision && !isVisionModel(resolvedModel)) {
1159
+ // ── Manual-mode guard ──
1160
+ // User explicitly picked a model that can't see images. Don't silently
1161
+ // send the image — the model would only see the text stub from Read
1162
+ // and hallucinate. Swap to the closest vision sibling JUST for this
1163
+ // turn (next turn's model-recovery block at the top of the user-input
1164
+ // handler resets to baseModel, so the user's intent isn't permanently
1165
+ // overridden). Always emit a visible notice so the user knows their
1166
+ // pick was overridden and why.
1167
+ const original = resolvedModel;
1168
+ const visionSwap = pickVisionSibling(original);
1169
+ resolvedModel = visionSwap;
1170
+ config.model = visionSwap;
1171
+ onEvent({
1172
+ kind: 'text_delta',
1173
+ text: `*⚠️ ${original} can't see images — using ${visionSwap} for this turn.*\n\n`,
1174
+ });
1175
+ }
1147
1176
  // Update token estimation model for more accurate byte-per-token ratio
1148
1177
  setEstimationModel(resolvedModel);
1149
1178
  // ── Plan-then-execute: detect and activate ──
@@ -44,13 +44,37 @@ export function ageToolResults(history) {
44
44
  const aged = parts.map(part => {
45
45
  if (part.type !== 'tool_result')
46
46
  return part;
47
+ // Vision tool_results carry [text, image] arrays. JSON.stringify-ing
48
+ // them and rebuilding `content` as a truncated string drops the
49
+ // image block entirely. Measure only the text portion; aging code
50
+ // below rebuilds as a string only when no image is present, otherwise
51
+ // we leave the part untouched (image bytes are already context-cheap
52
+ // once cached, and turn them into placeholders is the wrong fix).
53
+ let hasImage = false;
54
+ let textOnly = '';
55
+ if (Array.isArray(part.content)) {
56
+ for (const block of part.content) {
57
+ if (block?.type === 'text' && typeof block.text === 'string') {
58
+ textOnly += (textOnly ? '\n' : '') + block.text;
59
+ }
60
+ else if (block?.type === 'image') {
61
+ hasImage = true;
62
+ }
63
+ }
64
+ }
47
65
  const content = typeof part.content === 'string'
48
66
  ? part.content
49
- : JSON.stringify(part.content);
67
+ : Array.isArray(part.content)
68
+ ? textOnly
69
+ : JSON.stringify(part.content);
50
70
  const charLen = content.length;
51
71
  // Recent 3 results: keep full
52
72
  if (age <= 3)
53
73
  return part;
74
+ // Preserve image-bearing tool_results regardless of age — replacing
75
+ // them with a text stub would silently delete the model's vision context.
76
+ if (hasImage)
77
+ return part;
54
78
  // Age 4-8: keep first 500 chars
55
79
  if (age <= 8 && charLen > 500) {
56
80
  modified = true;
@@ -206,7 +230,32 @@ export function deduplicateToolResultLines(history) {
206
230
  const newParts = parts.map(part => {
207
231
  if (part.type !== 'tool_result')
208
232
  return part;
209
- const raw = typeof part.content === 'string' ? part.content : JSON.stringify(part.content);
233
+ // Vision tool_results carry [text, image] arrays. JSON.stringify-ing
234
+ // them and writing back as a string would destroy the image (same
235
+ // bug class as ageToolResults / budgetToolResults — sibling site,
236
+ // verified 2026-05-10 during PR #53 review). For arrays, dedupe
237
+ // only the text segments; image segments pass through untouched.
238
+ let raw;
239
+ const imageBlocks = [];
240
+ if (typeof part.content === 'string') {
241
+ raw = part.content;
242
+ }
243
+ else if (Array.isArray(part.content)) {
244
+ const blocks = part.content;
245
+ const texts = [];
246
+ for (const b of blocks) {
247
+ if (b?.type === 'text' && typeof b.text === 'string') {
248
+ texts.push(b.text);
249
+ }
250
+ else if (b?.type === 'image') {
251
+ imageBlocks.push(b);
252
+ }
253
+ }
254
+ raw = texts.join('\n');
255
+ }
256
+ else {
257
+ raw = JSON.stringify(part.content);
258
+ }
210
259
  // Strip ANSI codes
211
260
  const stripped = raw.replace(ANSI_RE_REDUCE, '');
212
261
  // Collapse repeated consecutive lines
@@ -231,6 +280,19 @@ export function deduplicateToolResultLines(history) {
231
280
  if (result === raw)
232
281
  return part;
233
282
  partModified = true;
283
+ // If the original content was an array with image blocks, rebuild
284
+ // as an array — keep all image segments, replace the joined text
285
+ // payload with a single deduped text segment. This way dedupe runs
286
+ // for free on image-bearing results without losing vision context.
287
+ if (Array.isArray(part.content) && imageBlocks.length > 0) {
288
+ return {
289
+ ...part,
290
+ content: [
291
+ { type: 'text', text: result },
292
+ ...imageBlocks,
293
+ ],
294
+ };
295
+ }
234
296
  return { ...part, content: result };
235
297
  });
236
298
  if (!partModified)
@@ -301,6 +363,16 @@ export function collapseRepetitiveTools(history) {
301
363
  const parts = msg.content.map(part => {
302
364
  if (part.type !== 'tool_result' || !oldIds.has(part.tool_use_id))
303
365
  return part;
366
+ // Image-bearing results (third sibling site of the JSON.stringify
367
+ // bug class — same pattern as ageToolResults / budgetToolResults /
368
+ // deduplicateToolResultLines). Don't collapse; replacing them with
369
+ // a `[first-line...]` string would destroy the vision context.
370
+ // Image bytes are already cache-cheap upstream once prompt-cached;
371
+ // the cost-control intent of this collapser is satisfied without
372
+ // touching them.
373
+ if (Array.isArray(part.content) && part.content.some((b) => b.type === 'image')) {
374
+ return part;
375
+ }
304
376
  const content = typeof part.content === 'string' ? part.content : JSON.stringify(part.content);
305
377
  if (content.length <= 80)
306
378
  return part;
@@ -4,7 +4,7 @@ import { recordUsage } from '../stats/tracker.js';
4
4
  import { appendSettlementRow } from '../stats/cost-log.js';
5
5
  import { appendAudit } from '../stats/audit.js';
6
6
  import { buildFallbackChain, DEFAULT_FALLBACK_CONFIG, ROUTING_PROFILES, } from './fallback.js';
7
- import { routeRequest, parseRoutingProfile, } from '../router/index.js';
7
+ import { routeRequest, parseRoutingProfile, isVisionModel, messagesNeedVision, pickVisionSibling, } from '../router/index.js';
8
8
  import { estimateCost } from '../pricing.js';
9
9
  import { VERSION } from '../config.js';
10
10
  // User-Agent for backend requests
@@ -342,6 +342,13 @@ export function createProxy(options) {
342
342
  parsed.model = currentModel || DEFAULT_MODEL;
343
343
  }
344
344
  requestModel = parsed.model || DEFAULT_MODEL;
345
+ // Vision-need detection: does this request carry an image? We
346
+ // check messages[] for explicit image / image_url parts AND for
347
+ // image paths embedded in text — Anthropic-format proxies stream
348
+ // both shapes. Used both by the Auto router (pick a vision-capable
349
+ // tier model) and by the manual-mode guard (swap when the user
350
+ // explicitly picked a text-only model).
351
+ const proxyNeedsVision = messagesNeedVision(parsed.messages || []);
345
352
  // Smart routing: if model is a routing profile, classify and route
346
353
  const routingProfile = parseRoutingProfile(requestModel);
347
354
  if (routingProfile) {
@@ -360,13 +367,27 @@ export function createProxy(options) {
360
367
  .join('\n');
361
368
  }
362
369
  }
363
- // Route the request
364
- const routing = routeRequest(promptText, routingProfile);
370
+ // Route the request — propagate vision-need so AUTO_TIERS' V4
371
+ // Pro default doesn't get picked for an image-bearing turn.
372
+ const routing = routeRequest(promptText, routingProfile, proxyNeedsVision);
365
373
  parsed.model = routing.model;
366
374
  requestModel = routing.model;
367
375
  logger.info(`[franklin] 🧠 Smart routing: ${routingProfile} → ${routing.tier} → ${routing.model} ` +
368
376
  `(${(routing.savings * 100).toFixed(0)}% savings) [${routing.signals.join(', ')}]`);
369
377
  }
378
+ else if (proxyNeedsVision && !isVisionModel(requestModel)) {
379
+ // Manual-mode guard: user (or an upstream client) passed a
380
+ // concrete text-only model alongside an image. Swap to the
381
+ // family-closest vision sibling and log loudly — silently
382
+ // sending the image would tokenize as base64 text and produce
383
+ // a hallucinated answer. Same swap policy as the agent loop's
384
+ // interactive path so behavior is consistent across surfaces.
385
+ const original = requestModel;
386
+ const visionSwap = pickVisionSibling(original);
387
+ parsed.model = visionSwap;
388
+ requestModel = visionSwap;
389
+ logger.warn(`[franklin] 👁️ Vision swap: ${original} can't see images → ${visionSwap}`);
390
+ }
370
391
  {
371
392
  const original = parsed.max_tokens;
372
393
  const model = (parsed.model || '').toLowerCase();
@@ -10,6 +10,7 @@
10
10
  * Local Elo adjustments personalize routing per user over time.
11
11
  */
12
12
  import { type Category } from './categories.js';
13
+ export { isVisionModel, messageNeedsVision, messagesNeedVision, pickVisionSibling } from './vision.js';
13
14
  export type Tier = 'SIMPLE' | 'MEDIUM' | 'COMPLEX' | 'REASONING';
14
15
  export type RoutingProfile = 'auto' | 'free';
15
16
  export interface RoutingResult {
@@ -33,7 +34,7 @@ export declare function llmClassifyRequest(prompt: string): Promise<Tier | null>
33
34
  * Profile-specific tier tables (AUTO / ECO / PREMIUM / FREE) still pick
34
35
  * the concrete model; the classifier only picks the TIER.
35
36
  */
36
- export declare function routeRequestAsync(prompt: string, profile?: RoutingProfile, classify?: TierClassifier): Promise<RoutingResult>;
37
+ export declare function routeRequestAsync(prompt: string, profile?: RoutingProfile, classify?: TierClassifier, needsVision?: boolean): Promise<RoutingResult>;
37
38
  /**
38
39
  * Map a pre-classified tier to a concrete model + savings using the profile's
39
40
  * tier table. No classifier call — assumes the caller already decided the
@@ -43,8 +44,8 @@ export declare function routeRequestAsync(prompt: string, profile?: RoutingProfi
43
44
  * Use this when you have a tier already. Use `routeRequestAsync` when you
44
45
  * need the classifier to produce the tier.
45
46
  */
46
- export declare function resolveTierToModel(tier: Tier, profile?: RoutingProfile): RoutingResult;
47
- export declare function routeRequest(prompt: string, profile?: RoutingProfile): RoutingResult;
47
+ export declare function resolveTierToModel(tier: Tier, profile?: RoutingProfile, needsVision?: boolean): RoutingResult;
48
+ export declare function routeRequest(prompt: string, profile?: RoutingProfile, needsVision?: boolean): RoutingResult;
48
49
  /**
49
50
  * Get fallback models for a tier
50
51
  */
@@ -16,6 +16,8 @@ import { BLOCKRUN_DIR } from '../config.js';
16
16
  import { detectCategory, mapCategoryToTier } from './categories.js';
17
17
  import { selectModel } from './selector.js';
18
18
  import { computeLocalElo, blendElo } from './local-elo.js';
19
+ import { isVisionModel } from './vision.js';
20
+ export { isVisionModel, messageNeedsVision, messagesNeedVision, pickVisionSibling } from './vision.js';
19
21
  // ─── Learned Weights Loading ───
20
22
  const WEIGHTS_FILE = path.join(BLOCKRUN_DIR, 'router-weights.json');
21
23
  let cachedWeights; // undefined = not loaded yet
@@ -69,6 +71,27 @@ const AUTO_TIERS = {
69
71
  ],
70
72
  },
71
73
  };
74
+ /**
75
+ * If this turn carries an image, the picked tier model must be able to see it.
76
+ * Walks the tier's primary+fallback chain for the first vision-capable model;
77
+ * if none of them have vision, escalates to COMPLEX (Opus is always vision).
78
+ *
79
+ * Note: only applied when the caller signals needsVision=true. Without that
80
+ * hint the classic per-tier defaults still rule — V4 Pro's $0.50/$1.00 promo
81
+ * is the right SIMPLE/MEDIUM pick for text-only turns and we don't want to
82
+ * blanket-upgrade everyone to a vision model.
83
+ */
84
+ function pickVisionTierModel(tier) {
85
+ const chain = [AUTO_TIERS[tier].primary, ...AUTO_TIERS[tier].fallback];
86
+ const visionInTier = chain.find(isVisionModel);
87
+ if (visionInTier)
88
+ return { model: visionInTier, tier, signal: 'vision-required' };
89
+ // Tier chain is fully text-only (unusual but possible if cheap tiers get
90
+ // re-tuned). Escalate to COMPLEX whose primary (Opus) is always vision.
91
+ const escalated = [AUTO_TIERS.COMPLEX.primary, ...AUTO_TIERS.COMPLEX.fallback]
92
+ .find(isVisionModel) ?? AUTO_TIERS.COMPLEX.primary;
93
+ return { model: escalated, tier: 'COMPLEX', signal: 'vision-escalated' };
94
+ }
72
95
  // ─── Keywords for Classification ───
73
96
  //
74
97
  // Keyword fast-path uses English only by policy (English-only-source rule).
@@ -250,7 +273,7 @@ function classifyRequest(prompt, tokenCount) {
250
273
  return { tier, confidence, signals };
251
274
  }
252
275
  // ─── Classic Router (keyword-based fallback) ───
253
- function classicRouteRequest(prompt, profile) {
276
+ function classicRouteRequest(prompt, profile, needsVision = false) {
254
277
  // Estimate token count (use byte length / 4 for better accuracy with non-ASCII)
255
278
  const byteLen = Buffer.byteLength(prompt, 'utf-8');
256
279
  const tokenCount = Math.ceil(byteLen / 4);
@@ -260,11 +283,21 @@ function classicRouteRequest(prompt, profile) {
260
283
  // 2026-05-03 — see comment on RoutingProfile above). 'free' is handled
261
284
  // earlier by the caller path; if it ever reaches here, fall through to
262
285
  // AUTO_TIERS rather than crashing.
263
- const tierConfigs = AUTO_TIERS;
264
- const model = tierConfigs[tier].primary;
286
+ let model;
287
+ let finalTier = tier;
288
+ const finalSignals = [...signals];
289
+ if (needsVision) {
290
+ const v = pickVisionTierModel(tier);
291
+ model = v.model;
292
+ finalTier = v.tier;
293
+ finalSignals.push(v.signal);
294
+ }
295
+ else {
296
+ model = AUTO_TIERS[tier].primary;
297
+ }
265
298
  const savings = computeSavings(model);
266
299
  const category = detectCategory(prompt, loadLearnedWeights()?.category_keywords).category;
267
- return { model, tier, confidence, signals, savings, category };
300
+ return { model, tier: finalTier, confidence, signals: finalSignals, savings, category };
268
301
  }
269
302
  // ─── LLM-based classifier ───
270
303
  //
@@ -362,25 +395,35 @@ export async function llmClassifyRequest(prompt) {
362
395
  * Profile-specific tier tables (AUTO / ECO / PREMIUM / FREE) still pick
363
396
  * the concrete model; the classifier only picks the TIER.
364
397
  */
365
- export async function routeRequestAsync(prompt, profile = 'auto', classify = llmClassifyRequest) {
398
+ export async function routeRequestAsync(prompt, profile = 'auto', classify = llmClassifyRequest, needsVision = false) {
366
399
  // Free / short-circuit profiles — no classifier needed.
367
400
  if (profile === 'free')
368
- return routeRequest(prompt, profile);
401
+ return routeRequest(prompt, profile, needsVision);
369
402
  const tier = await classify(prompt).catch(() => null);
370
403
  if (!tier) {
371
404
  // Classifier miss or disabled — fall through to the sync keyword router.
372
- return routeRequest(prompt, profile);
405
+ return routeRequest(prompt, profile, needsVision);
373
406
  }
374
407
  // Build a RoutingResult from the LLM-picked tier using the same tier
375
408
  // tables the keyword path uses. Keeps downstream code path-identical.
376
- const tierConfigs = AUTO_TIERS;
377
- const model = tierConfigs[tier].primary;
409
+ let model;
410
+ let finalTier = tier;
411
+ const signals = ['llm-classified'];
412
+ if (needsVision) {
413
+ const v = pickVisionTierModel(tier);
414
+ model = v.model;
415
+ finalTier = v.tier;
416
+ signals.push(v.signal);
417
+ }
418
+ else {
419
+ model = AUTO_TIERS[tier].primary;
420
+ }
378
421
  const category = detectCategory(prompt, loadLearnedWeights()?.category_keywords).category;
379
422
  return {
380
423
  model,
381
- tier,
424
+ tier: finalTier,
382
425
  confidence: 0.85, // LLM classification — medium-high confidence
383
- signals: ['llm-classified'],
426
+ signals,
384
427
  savings: computeSavings(model),
385
428
  category,
386
429
  };
@@ -394,36 +437,51 @@ export async function routeRequestAsync(prompt, profile = 'auto', classify = llm
394
437
  * Use this when you have a tier already. Use `routeRequestAsync` when you
395
438
  * need the classifier to produce the tier.
396
439
  */
397
- export function resolveTierToModel(tier, profile = 'auto') {
440
+ export function resolveTierToModel(tier, profile = 'auto', needsVision = false) {
398
441
  // Free profile short-circuits — everything routes to a single free model.
442
+ // qwen3-coder-480b is text-only; on a vision turn the free profile can't
443
+ // help us. Caller should detect this and warn the user that Free won't
444
+ // handle images — for now we just return the free pick and let the model
445
+ // fail gracefully. (Open question: should we hard-fall to nvidia/llama-4-
446
+ // maverick here? Skipped until we see a real user hit this path.)
399
447
  if (profile === 'free') {
400
448
  return {
401
449
  model: 'nvidia/qwen3-coder-480b',
402
450
  tier: 'SIMPLE',
403
451
  confidence: 1.0,
404
- signals: ['free-profile'],
452
+ signals: needsVision ? ['free-profile', 'vision-unsupported'] : ['free-profile'],
405
453
  savings: 1.0,
406
454
  };
407
455
  }
408
- const tierConfigs = AUTO_TIERS;
409
- const model = tierConfigs[tier].primary;
456
+ let model;
457
+ let finalTier = tier;
458
+ const signals = ['pre-classified'];
459
+ if (needsVision) {
460
+ const v = pickVisionTierModel(tier);
461
+ model = v.model;
462
+ finalTier = v.tier;
463
+ signals.push(v.signal);
464
+ }
465
+ else {
466
+ model = AUTO_TIERS[tier].primary;
467
+ }
410
468
  return {
411
469
  model,
412
- tier,
470
+ tier: finalTier,
413
471
  confidence: 0.85,
414
- signals: ['pre-classified'],
472
+ signals,
415
473
  savings: computeSavings(model),
416
474
  };
417
475
  }
418
476
  // ─── Main Router ───
419
- export function routeRequest(prompt, profile = 'auto') {
477
+ export function routeRequest(prompt, profile = 'auto', needsVision = false) {
420
478
  // Free profile — always use free model
421
479
  if (profile === 'free') {
422
480
  return {
423
481
  model: 'nvidia/qwen3-coder-480b',
424
482
  tier: 'SIMPLE',
425
483
  confidence: 1.0,
426
- signals: ['free-profile'],
484
+ signals: needsVision ? ['free-profile', 'vision-unsupported'] : ['free-profile'],
427
485
  savings: 1.0,
428
486
  };
429
487
  }
@@ -432,7 +490,7 @@ export function routeRequest(prompt, profile = 'auto') {
432
490
  // cheap/weak models on agentic work. Classic AUTO_TIERS defaults are
433
491
  // agent-tuned (Sonnet-tier backbone) and more predictable for users.
434
492
  if (profile === 'auto') {
435
- return classicRouteRequest(prompt, profile);
493
+ return classicRouteRequest(prompt, profile, needsVision);
436
494
  }
437
495
  // ── Learned routing (if weights available) ──
438
496
  const weights = loadLearnedWeights();
@@ -457,6 +515,21 @@ export function routeRequest(prompt, profile = 'auto') {
457
515
  const selected = selectModel(category, profile, adjustedWeights);
458
516
  if (selected) {
459
517
  const tier = mapCategoryToTier(category);
518
+ // Vision-aware substitution: if the Elo-picked model is text-only but
519
+ // the turn needs vision, swap to the tier's first vision-capable model.
520
+ // We deliberately don't blend Elo with vision capability — vision is a
521
+ // hard requirement, not a quality dimension.
522
+ if (needsVision && !isVisionModel(selected.model)) {
523
+ const v = pickVisionTierModel(tier);
524
+ return {
525
+ model: v.model,
526
+ tier: v.tier,
527
+ confidence,
528
+ signals: [category, v.signal],
529
+ savings: computeSavings(v.model),
530
+ category,
531
+ };
532
+ }
460
533
  const savings = computeSavings(selected.model);
461
534
  return {
462
535
  model: selected.model,
@@ -470,7 +543,7 @@ export function routeRequest(prompt, profile = 'auto') {
470
543
  // Fall through to classic if selectModel returns null (no candidates for category)
471
544
  }
472
545
  // ── Classic routing (keyword-based fallback) ──
473
- return classicRouteRequest(prompt, profile);
546
+ return classicRouteRequest(prompt, profile, needsVision);
474
547
  }
475
548
  function computeSavings(model) {
476
549
  const opusCostPer1K = (OPUS_PRICING.input + OPUS_PRICING.output) / 2 / 1000;
@@ -0,0 +1,51 @@
1
+ /**
2
+ * Vision capability + image-attachment detection.
3
+ *
4
+ * Two jobs:
5
+ * 1. isVisionModel(id) — does this gateway model accept image input?
6
+ * 2. messageNeedsVision(text) — does this user message reference an image?
7
+ *
8
+ * Source of truth: a hand-curated allowlist below. The gateway exposes a
9
+ * 'vision' category on /v1/models, but resolving it at routing time would
10
+ * make routeRequest async and gate sync proxy paths on a network call. The
11
+ * allowlist is small (~18 entries) and changes only when models do, which
12
+ * already touches the router + pricing tables — updating one more file is
13
+ * the right tradeoff vs. async fan-out across every routing callsite.
14
+ *
15
+ * Background: prior to this module, Auto routing could pick a text-only model
16
+ * (e.g. deepseek-v4-pro) on an image-bearing turn. The Read tool would still
17
+ * inline image bytes, the gateway would tokenize the base64 as text, and the
18
+ * model — having no vision pathway — would hallucinate based on the
19
+ * `Image file: <path>` description string. Expensive AND wrong.
20
+ */
21
+ /** Does this concrete gateway model accept image input? */
22
+ export declare function isVisionModel(modelId: string | undefined | null): boolean;
23
+ /**
24
+ * Pick a vision-capable replacement closest to the user's chosen model.
25
+ * Prefers same provider family (so the user's intent — "I want Claude" vs
26
+ * "I want Gemini" — survives the swap), then falls back to a sensible
27
+ * vision default (Sonnet 4.6 — agent-tuned, mid-tier price).
28
+ */
29
+ export declare function pickVisionSibling(modelId: string): string;
30
+ /**
31
+ * Does this user-typed message reference an image file? Used by the router
32
+ * to bump Auto mode to a vision-capable tier, and by the manual-mode guard
33
+ * to swap a text-only model for one turn.
34
+ *
35
+ * Detection is intentionally a regex over file extensions rather than a
36
+ * filesystem stat — the user may type a path that doesn't yet exist
37
+ * (about to wget it) or a glob; what we care about is "does the model need
38
+ * eyes for this turn?" The false-positive risk is benign (we route to a
39
+ * slightly stronger model than strictly needed).
40
+ */
41
+ export declare function messageNeedsVision(text: string | undefined | null): boolean;
42
+ /**
43
+ * Messages-array variant: scans OpenAI- and Anthropic-format content blocks
44
+ * for explicit image parts (image / image_url / input_image) and for image
45
+ * paths embedded in text parts. Used by the proxy router which receives a
46
+ * fully-formed messages[] payload, not a single string.
47
+ */
48
+ export declare function messagesNeedVision(messages: Array<{
49
+ role?: string;
50
+ content?: unknown;
51
+ }> | undefined | null): boolean;
@@ -0,0 +1,127 @@
1
+ /**
2
+ * Vision capability + image-attachment detection.
3
+ *
4
+ * Two jobs:
5
+ * 1. isVisionModel(id) — does this gateway model accept image input?
6
+ * 2. messageNeedsVision(text) — does this user message reference an image?
7
+ *
8
+ * Source of truth: a hand-curated allowlist below. The gateway exposes a
9
+ * 'vision' category on /v1/models, but resolving it at routing time would
10
+ * make routeRequest async and gate sync proxy paths on a network call. The
11
+ * allowlist is small (~18 entries) and changes only when models do, which
12
+ * already touches the router + pricing tables — updating one more file is
13
+ * the right tradeoff vs. async fan-out across every routing callsite.
14
+ *
15
+ * Background: prior to this module, Auto routing could pick a text-only model
16
+ * (e.g. deepseek-v4-pro) on an image-bearing turn. The Read tool would still
17
+ * inline image bytes, the gateway would tokenize the base64 as text, and the
18
+ * model — having no vision pathway — would hallucinate based on the
19
+ * `Image file: <path>` description string. Expensive AND wrong.
20
+ */
21
+ const VISION_MODELS = new Set([
22
+ // Anthropic — native vision across the line
23
+ 'anthropic/claude-opus-4.7',
24
+ 'anthropic/claude-opus-4.6',
25
+ 'anthropic/claude-sonnet-4.6',
26
+ 'anthropic/claude-haiku-4.5-20251001',
27
+ // OpenAI — multimodal flagships + o3 (Codex 5.3 is text-only, excluded)
28
+ 'openai/gpt-5.5',
29
+ 'openai/gpt-5.4',
30
+ 'openai/gpt-5.4-pro',
31
+ 'openai/gpt-5.2',
32
+ 'openai/gpt-5.2-pro',
33
+ 'openai/gpt-5-mini',
34
+ 'openai/gpt-4.1',
35
+ 'openai/o3',
36
+ // Google — vision baked into every Gemini SKU we surface
37
+ 'google/gemini-3.1-pro',
38
+ 'google/gemini-2.5-pro',
39
+ 'google/gemini-2.5-flash',
40
+ // xAI — only Grok 4 base supports vision; grok-4-1-fast-reasoning is text-only
41
+ 'xai/grok-4-0709',
42
+ 'xai/grok-3',
43
+ // Moonshot — K2.6 added vision + reasoning when it replaced K2.5
44
+ 'moonshot/kimi-k2.6',
45
+ // NVIDIA inference — Llama 4 Maverick is multimodal; deepseek/qwen-coder are not
46
+ 'nvidia/llama-4-maverick',
47
+ ]);
48
+ /** Does this concrete gateway model accept image input? */
49
+ export function isVisionModel(modelId) {
50
+ if (!modelId)
51
+ return false;
52
+ return VISION_MODELS.has(modelId);
53
+ }
54
+ /** Lower-cased copy used for prefix family matching in pickVisionSibling. */
55
+ const VISION_MODELS_LIST = Array.from(VISION_MODELS);
56
+ /**
57
+ * Pick a vision-capable replacement closest to the user's chosen model.
58
+ * Prefers same provider family (so the user's intent — "I want Claude" vs
59
+ * "I want Gemini" — survives the swap), then falls back to a sensible
60
+ * vision default (Sonnet 4.6 — agent-tuned, mid-tier price).
61
+ */
62
+ export function pickVisionSibling(modelId) {
63
+ const family = modelId.split('/')[0]?.toLowerCase();
64
+ if (family) {
65
+ const sibling = VISION_MODELS_LIST.find(m => m.startsWith(`${family}/`));
66
+ if (sibling)
67
+ return sibling;
68
+ }
69
+ return 'anthropic/claude-sonnet-4.6';
70
+ }
71
+ // Image file extensions Franklin's Read tool inlines as vision content. Keep
72
+ // this in sync with IMAGE_MEDIA_TYPES in src/tools/read.ts — if Read learns a
73
+ // new format (e.g. .avif), this regex needs to learn it too or routing will
74
+ // silently miss it.
75
+ //
76
+ // We match the basename only ("foo.png"), preceded by any path separator or
77
+ // punctuation. Trying to match full path prefixes ("./", "/", "~/", "C:\")
78
+ // in one regex produced false negatives on Windows-style paths because of
79
+ // the `:` and `\` separators. The basename anchor is enough — a bare
80
+ // `foo.png` reference is what the Read tool actually needs to inline bytes.
81
+ const IMAGE_PATH_RE = /(?:^|[\s"'`(<\[\\/])[\w@%+-]+\.(?:png|jpe?g|gif|webp)(?=$|[\s"'`)>\],.?!:;])/i;
82
+ /**
83
+ * Does this user-typed message reference an image file? Used by the router
84
+ * to bump Auto mode to a vision-capable tier, and by the manual-mode guard
85
+ * to swap a text-only model for one turn.
86
+ *
87
+ * Detection is intentionally a regex over file extensions rather than a
88
+ * filesystem stat — the user may type a path that doesn't yet exist
89
+ * (about to wget it) or a glob; what we care about is "does the model need
90
+ * eyes for this turn?" The false-positive risk is benign (we route to a
91
+ * slightly stronger model than strictly needed).
92
+ */
93
+ export function messageNeedsVision(text) {
94
+ if (!text)
95
+ return false;
96
+ return IMAGE_PATH_RE.test(text);
97
+ }
98
+ /**
99
+ * Messages-array variant: scans OpenAI- and Anthropic-format content blocks
100
+ * for explicit image parts (image / image_url / input_image) and for image
101
+ * paths embedded in text parts. Used by the proxy router which receives a
102
+ * fully-formed messages[] payload, not a single string.
103
+ */
104
+ export function messagesNeedVision(messages) {
105
+ if (!messages || messages.length === 0)
106
+ return false;
107
+ for (const msg of messages) {
108
+ if (msg.role && msg.role !== 'user')
109
+ continue;
110
+ const content = msg.content;
111
+ if (typeof content === 'string') {
112
+ if (messageNeedsVision(content))
113
+ return true;
114
+ continue;
115
+ }
116
+ if (!Array.isArray(content))
117
+ continue;
118
+ for (const part of content) {
119
+ const t = part?.type;
120
+ if (t === 'image' || t === 'image_url' || t === 'input_image')
121
+ return true;
122
+ if (t === 'text' && messageNeedsVision(part.text))
123
+ return true;
124
+ }
125
+ }
126
+ return false;
127
+ }
@@ -103,12 +103,64 @@ async function execute(input, ctx) {
103
103
  output: `Image file: ${resolved} (${ext}, ${sizeStr}). Too large to inline for vision (>${Math.round(IMAGE_MAX_BYTES / 1_000_000)}MB). Resize or crop first.`,
104
104
  };
105
105
  }
106
- const bytes = fs.readFileSync(resolved);
107
- const base64 = bytes.toString('base64');
106
+ // Client-side normalization to bound vision-token cost. The BlockRun
107
+ // gateway (verified 2026-05-09) tokenizes image base64 as text on the
108
+ // /v1/messages forward path, so a 1.9MB PNG → ~2.5M base64 chars →
109
+ // ~1.36M billed tokens (~$0.50 per call) instead of Anthropic's
110
+ // native vision tokenization (~1.6k tokens). Resizing the long edge
111
+ // to 1280px and re-encoding as JPEG q85 cuts payload to ~80KB while
112
+ // keeping vision usable. Skip work if the file is already small;
113
+ // preserve PNG when transparency matters (alpha sample).
114
+ const SKIP_BELOW_BYTES = 150 * 1024;
115
+ const MAX_LONG_EDGE = 1280;
116
+ const JPEG_QUALITY = 85;
117
+ const rawBytes = fs.readFileSync(resolved);
118
+ let outBytes = rawBytes;
119
+ let outMedia = IMAGE_MEDIA_TYPES[ext];
120
+ let normalizeNote = '';
121
+ if (stat.size > SKIP_BELOW_BYTES) {
122
+ try {
123
+ const sharpMod = await import('sharp');
124
+ const sharp = sharpMod.default;
125
+ const img = sharp(rawBytes, { failOn: 'none' });
126
+ const meta = await img.metadata();
127
+ const longEdge = Math.max(meta.width ?? 0, meta.height ?? 0);
128
+ // Detect transparency: GIF/WebP/PNG with non-opaque alpha → keep PNG.
129
+ let hasAlpha = false;
130
+ if (meta.hasAlpha) {
131
+ const stats = await sharp(rawBytes).stats();
132
+ const alpha = stats.channels[stats.channels.length - 1];
133
+ hasAlpha = alpha?.min !== undefined && alpha.min < 255;
134
+ }
135
+ let pipeline = sharp(rawBytes, { failOn: 'none' });
136
+ if (longEdge > MAX_LONG_EDGE) {
137
+ pipeline = pipeline.resize({
138
+ width: meta.width && meta.width >= (meta.height ?? 0) ? MAX_LONG_EDGE : undefined,
139
+ height: meta.height && meta.height > (meta.width ?? 0) ? MAX_LONG_EDGE : undefined,
140
+ fit: 'inside',
141
+ withoutEnlargement: true,
142
+ });
143
+ }
144
+ if (hasAlpha) {
145
+ outBytes = await pipeline.png({ compressionLevel: 9 }).toBuffer();
146
+ outMedia = 'image/png';
147
+ }
148
+ else {
149
+ outBytes = await pipeline.jpeg({ quality: JPEG_QUALITY, mozjpeg: true }).toBuffer();
150
+ outMedia = 'image/jpeg';
151
+ }
152
+ const outKb = (outBytes.length / 1024).toFixed(1);
153
+ normalizeNote = ` Normalized: ${sizeStr} → ${outKb}KB (${meta.width}×${meta.height}${longEdge > MAX_LONG_EDGE ? ` → long edge ${MAX_LONG_EDGE}` : ''}, ${hasAlpha ? 'PNG/alpha' : `JPEG q${JPEG_QUALITY}`}).`;
154
+ }
155
+ catch {
156
+ // Best-effort — if sharp fails, fall through with raw bytes.
157
+ }
158
+ }
159
+ const base64 = outBytes.toString('base64');
108
160
  fileReadTracker.set(resolved, { mtimeMs: stat.mtimeMs, readAt: Date.now() });
109
161
  return {
110
- output: `Image file: ${resolved} (${ext}, ${sizeStr}). Rendered below for vision-capable models.`,
111
- images: [{ mediaType: IMAGE_MEDIA_TYPES[ext], base64 }],
162
+ output: `Image file: ${resolved} (${ext}, ${sizeStr}).${normalizeNote} Rendered below for vision-capable models.`,
163
+ images: [{ mediaType: outMedia, base64 }],
112
164
  };
113
165
  }
114
166
  const binaryExts = new Set(['.ico', '.bmp', '.pdf', '.zip', '.tar', '.gz', '.woff', '.woff2', '.ttf', '.eot', '.mp3', '.mp4', '.wav', '.avi', '.mov', '.exe', '.dll', '.so', '.dylib']);
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@blockrun/franklin",
3
- "version": "3.15.89",
3
+ "version": "3.15.91",
4
4
  "description": "Franklin — The AI agent with a wallet. Spends USDC autonomously to get real work done. Pay per action, no subscriptions.",
5
5
  "type": "module",
6
6
  "exports": {
@@ -80,6 +80,7 @@
80
80
  "playwright-core": "^1.49.1",
81
81
  "qrcode": "^1.5.4",
82
82
  "react": "^19.2.4",
83
+ "sharp": "^0.34.5",
83
84
  "viem": "^2.48.1"
84
85
  },
85
86
  "devDependencies": {