@webmcp-auto-ui/agent 2.5.26 → 2.5.27
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/src/autoui-server.ts +27 -0
- package/src/index.ts +20 -6
- package/src/loop.ts +4 -12
- package/src/notebook-widgets/compact.ts +312 -0
- package/src/notebook-widgets/document.ts +372 -0
- package/src/notebook-widgets/editorial.ts +348 -0
- package/src/notebook-widgets/recipes/compact.md +104 -0
- package/src/notebook-widgets/recipes/document.md +100 -0
- package/src/notebook-widgets/recipes/editorial.md +104 -0
- package/src/notebook-widgets/recipes/workspace.md +94 -0
- package/src/notebook-widgets/shared.ts +1064 -0
- package/src/notebook-widgets/workspace.ts +328 -0
- package/src/prompts/claude-prompt-builder.ts +81 -0
- package/src/prompts/gemma4-prompt-builder.ts +205 -0
- package/src/prompts/index.ts +55 -0
- package/src/prompts/mistral-prompt-builder.ts +90 -0
- package/src/prompts/qwen-prompt-builder.ts +90 -0
- package/src/prompts/tool-call-parsers.ts +322 -0
- package/src/prompts/tool-refs.ts +196 -0
- package/src/providers/factory.ts +20 -3
- package/src/providers/transformers-models.ts +143 -0
- package/src/providers/transformers-serialize.ts +81 -0
- package/src/providers/transformers.ts +329 -0
- package/src/providers/transformers.worker.ts +667 -0
- package/src/providers/wasm.ts +132 -332
- package/src/recipes/_generated.ts +242 -0
- package/src/recipes/hackathon-assemblee-nationale.md +111 -0
- package/src/recipes/notebook-playbook.md +129 -0
- package/src/tool-layers.ts +7 -403
- package/src/trace-observer.ts +669 -0
- package/src/types.ts +17 -7
- package/src/util/opfs-cache.ts +265 -0
- package/tests/gemma-prompt.test.ts +472 -0
- package/tests/loop.test.ts +3 -3
- package/tests/transformers-serialize.test.ts +103 -0
|
@@ -0,0 +1,667 @@
|
|
|
1
|
+
/// <reference lib="webworker" />
|
|
2
|
+
/**
|
|
3
|
+
* transformers.worker.ts — Web Worker that runs transformers.js v4 (ONNX + WebGPU).
|
|
4
|
+
*
|
|
5
|
+
* Protocol (see transformers.ts for the main-thread side):
|
|
6
|
+
* main → worker: { type: 'load', modelId, entry, contextSize }
|
|
7
|
+
* main → worker: { type: 'generate', requestId, options,
|
|
8
|
+
* prompt?, chatMessages?, image? }
|
|
9
|
+
* - `prompt` is a pre-built string (Gemma wire format). Used as-is.
|
|
10
|
+
* - `chatMessages` is a [{role, content}] array that the worker feeds to
|
|
11
|
+
* tokenizer.apply_chat_template (Qwen / Mistral native chat_template).
|
|
12
|
+
* - For vision turns, `image` is attached; for Qwen/Mistral VLMs the
|
|
13
|
+
* worker applies the chat_template first, then passes the string to
|
|
14
|
+
* processor(prompt, raw).
|
|
15
|
+
* main → worker: { type: 'abort', requestId }
|
|
16
|
+
* main → worker: { type: 'dispose' }
|
|
17
|
+
*
|
|
18
|
+
* worker → main: { type: 'progress', fileProgress, totalProgress, status, loaded, total }
|
|
19
|
+
* worker → main: { type: 'ready' }
|
|
20
|
+
* worker → main: { type: 'warning', message }
|
|
21
|
+
* worker → main: { type: 'error', message, requestId? }
|
|
22
|
+
* worker → main: { type: 'token', requestId, token }
|
|
23
|
+
* worker → main: { type: 'done', requestId, content, stats, usage }
|
|
24
|
+
*
|
|
25
|
+
* The worker keeps one model loaded at a time and reuses `past_key_values`
|
|
26
|
+
* across turns when no image is attached. Vision turns reset the KV cache.
|
|
27
|
+
*/
|
|
28
|
+
|
|
29
|
+
import type { ContentBlock } from '../types.js';
|
|
30
|
+
import type { TransformersModelEntry } from './transformers-models.js';
|
|
31
|
+
|
|
32
|
+
// --------------------------------------------------------------------------
|
|
33
|
+
// Gemma 4 chat_template override.
|
|
34
|
+
//
|
|
35
|
+
// The chat_template baked into onnx-community/gemma-4-E{2,4}B-it-ONNX is the
|
|
36
|
+
// VLM variant shipped by HF, which iterates `message.content` as a list
|
|
37
|
+
// ({% for part in content %}). Our worker feeds plain strings, which trips
|
|
38
|
+
// the minified Jinja `for` loop inside transformers.js with the opaque error
|
|
39
|
+
// "C is not iterable". We replace the template with a string-safe variant
|
|
40
|
+
// that accepts either a string or a list of {type:'text', text} parts.
|
|
41
|
+
// Mirrors the approach in Chong's TurboQuant-WASM demo
|
|
42
|
+
// (demo/src/draw/prompts/preamble.ts on github.com/teamchong/turboquant-wasm).
|
|
43
|
+
// --------------------------------------------------------------------------
|
|
44
|
+
|
|
45
|
+
const GEMMA4_CHAT_TEMPLATE = `{{- bos_token -}}
|
|
46
|
+
{%- for message in messages -%}
|
|
47
|
+
{%- set role = message['role'] -%}
|
|
48
|
+
{%- if role == 'assistant' -%}{%- set role = 'model' -%}{%- endif -%}
|
|
49
|
+
<|turn>{{ role }}
|
|
50
|
+
{%- if message['content'] is string %}
|
|
51
|
+
{{ message['content'] | trim }}
|
|
52
|
+
{%- else -%}
|
|
53
|
+
{%- for part in message['content'] -%}
|
|
54
|
+
{%- if part['type'] == 'text' -%}{{ part['text'] | trim }}{%- endif -%}
|
|
55
|
+
{%- endfor -%}
|
|
56
|
+
{%- endif %}
|
|
57
|
+
<turn|>
|
|
58
|
+
{%- endfor -%}
|
|
59
|
+
{%- if add_generation_prompt -%}
|
|
60
|
+
<|turn>model
|
|
61
|
+
{%- endif -%}`;
|
|
62
|
+
|
|
63
|
+
// --------------------------------------------------------------------------
|
|
64
|
+
// Lazy imports — resolved on first 'load'. Kept as `any` because the v4 API
|
|
65
|
+
// surface isn't fully typed yet.
|
|
66
|
+
// --------------------------------------------------------------------------
|
|
67
|
+
|
|
68
|
+
let transformersMod: any = null;
|
|
69
|
+
let processor: any = null;
|
|
70
|
+
let model: any = null;
|
|
71
|
+
let tokenizer: any = null;
|
|
72
|
+
let entry: TransformersModelEntry | null = null;
|
|
73
|
+
let stoppingCriteria: any = null;
|
|
74
|
+
|
|
75
|
+
/**
|
|
76
|
+
* past_key_values slot. Populated transiently during a generate() call and
|
|
77
|
+
* ALWAYS disposed before and after (see disposePastKeyValues()). The cache is
|
|
78
|
+
* intra-generate only — `use_cache: true` in generateArgs keeps attention at
|
|
79
|
+
* O(n) per token inside a single generate() — but it is NEVER retained across
|
|
80
|
+
* generate() calls. Cross-turn reuse was removed in commit 9bb7d04 (perf:
|
|
81
|
+
* re-enable intra-generate KV cache) after the earlier fix 98d7d57 that
|
|
82
|
+
* disabled reuse entirely because of a SWA mask/score shape desync.
|
|
83
|
+
*/
|
|
84
|
+
let pastKeyValues: any = null;
|
|
85
|
+
|
|
86
|
+
/** Active generation request id — set on 'generate', cleared on 'done'/'error'. */
|
|
87
|
+
let activeRequestId: string | null = null;
|
|
88
|
+
|
|
89
|
+
// --------------------------------------------------------------------------
|
|
90
|
+
// Tool-call parser — loaded lazily with a best-effort fallback stub.
|
|
91
|
+
// --------------------------------------------------------------------------
|
|
92
|
+
|
|
93
|
+
type ParsedToolCallBlock =
|
|
94
|
+
| { type: 'text'; text: string }
|
|
95
|
+
| { type: 'tool_use'; id: string; name: string; input: Record<string, unknown> };
|
|
96
|
+
|
|
97
|
+
async function parseToolCalls(
|
|
98
|
+
fullText: string,
|
|
99
|
+
toolFormat: string,
|
|
100
|
+
): Promise<ParsedToolCallBlock[]> {
|
|
101
|
+
try {
|
|
102
|
+
// Optional fallback import — module is shipped (../prompts/tool-call-parsers.ts);
|
|
103
|
+
// the try/catch is defensive only, guarding against bundler quirks that
|
|
104
|
+
// could drop the worker-side import.
|
|
105
|
+
const mod: any = await import('../prompts/tool-call-parsers.js');
|
|
106
|
+
const fn = mod.parseToolCalls ?? mod.default;
|
|
107
|
+
if (typeof fn === 'function') return await fn(fullText, toolFormat);
|
|
108
|
+
} catch {
|
|
109
|
+
// Import resolution failed — fall through to stub.
|
|
110
|
+
}
|
|
111
|
+
// Stub: ship the raw text as a single text block. Parsing will arrive in a
|
|
112
|
+
// later agent iteration.
|
|
113
|
+
return [{ type: 'text', text: fullText }];
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
// --------------------------------------------------------------------------
|
|
117
|
+
// OPFS cache — loaded lazily with a best-effort fallback that defers entirely
|
|
118
|
+
// to transformers.js's built-in HF cache (no OPFS intervention on our side).
|
|
119
|
+
// --------------------------------------------------------------------------
|
|
120
|
+
|
|
121
|
+
async function loadOrDownloadModel(
|
|
122
|
+
_repo: string,
|
|
123
|
+
_onProgress: (fileProgress: number, totalProgress: number, status: string, loaded?: number, total?: number) => void,
|
|
124
|
+
): Promise<void> {
|
|
125
|
+
try {
|
|
126
|
+
// Optional fallback import — module is shipped (../util/opfs-cache.ts);
|
|
127
|
+
// the try/catch is defensive only, guarding against bundler quirks or
|
|
128
|
+
// OPFS being unavailable in the worker (older browsers).
|
|
129
|
+
const mod: any = await import('../util/opfs-cache.js');
|
|
130
|
+
const fn = mod.loadOrDownloadModel ?? mod.default;
|
|
131
|
+
if (typeof fn === 'function') return await fn(_repo, _onProgress);
|
|
132
|
+
} catch {
|
|
133
|
+
// Import/OPFS unavailable — transformers.js falls back to its internal
|
|
134
|
+
// HTTP fetch + `caches` API. Progress arrives via from_pretrained's
|
|
135
|
+
// progress_callback below.
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
// --------------------------------------------------------------------------
|
|
140
|
+
// Helpers
|
|
141
|
+
// --------------------------------------------------------------------------
|
|
142
|
+
|
|
143
|
+
function post(msg: any, transfer?: Transferable[]): void {
|
|
144
|
+
(self as unknown as Worker).postMessage(msg, transfer ?? []);
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
function disposePastKeyValues(): void {
|
|
148
|
+
if (!pastKeyValues) return;
|
|
149
|
+
try {
|
|
150
|
+
if (typeof pastKeyValues === 'object') {
|
|
151
|
+
for (const v of Object.values(pastKeyValues) as any[]) {
|
|
152
|
+
try { v?.dispose?.(); } catch {}
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
} catch {}
|
|
156
|
+
pastKeyValues = null;
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
function resetAll(): void {
|
|
160
|
+
disposePastKeyValues();
|
|
161
|
+
try { stoppingCriteria?.reset?.(); } catch {}
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
// --------------------------------------------------------------------------
|
|
165
|
+
// Model loading
|
|
166
|
+
// --------------------------------------------------------------------------
|
|
167
|
+
|
|
168
|
+
async function loadModel(modelEntry: TransformersModelEntry): Promise<void> {
|
|
169
|
+
entry = modelEntry;
|
|
170
|
+
|
|
171
|
+
post({
|
|
172
|
+
type: 'progress',
|
|
173
|
+
fileProgress: 0,
|
|
174
|
+
totalProgress: 0,
|
|
175
|
+
status: 'importing transformers.js',
|
|
176
|
+
loaded: 0,
|
|
177
|
+
total: modelEntry.size,
|
|
178
|
+
});
|
|
179
|
+
|
|
180
|
+
// Dynamic import — Web Workers don't inherit the document's import-map, so
|
|
181
|
+
// the bare specifier can't resolve when externalized from the worker bundle.
|
|
182
|
+
// Hardcode the CDN URL (mirrors the pin in app.html). Keep /* @vite-ignore */
|
|
183
|
+
// to stop Vite from pre-resolving the runtime string.
|
|
184
|
+
// Version pinning per family — Mistral3 was only fully wired (name → class
|
|
185
|
+
// registry) in transformers.js 3.8.1; 4.1.0 regresses that path but adds
|
|
186
|
+
// Gemma4/Qwen3.5. So route each family to the version that actually works.
|
|
187
|
+
// Gemma 4 is additionally pinned to 4.0.1 because 4.1.0 shipped Jinja
|
|
188
|
+
// regressions that combine badly with the VLM chat_template on
|
|
189
|
+
// onnx-community/gemma-4-*. 4.0.1 is the version Chong validated in
|
|
190
|
+
// TurboQuant-WASM. Qwen3.5 still needs 4.1.0 (that's where its class
|
|
191
|
+
// registry landed).
|
|
192
|
+
const TRANSFORMERS_URL = modelEntry.family === 'mistral'
|
|
193
|
+
? 'https://esm.sh/@huggingface/transformers@3.8.1'
|
|
194
|
+
: modelEntry.family === 'gemma4'
|
|
195
|
+
? 'https://esm.sh/@huggingface/transformers@4.0.1'
|
|
196
|
+
: 'https://esm.sh/@huggingface/transformers@4.1.0';
|
|
197
|
+
const imported: any = await import(/* @vite-ignore */ TRANSFORMERS_URL);
|
|
198
|
+
// Some CDN bundles park named exports under `.default`; flatten so the
|
|
199
|
+
// destructure below finds them either way.
|
|
200
|
+
transformersMod = imported?.AutoTokenizer ? imported : (imported?.default ?? imported);
|
|
201
|
+
const topKeys = Object.keys(transformersMod ?? {});
|
|
202
|
+
post({ type: 'warning', message: `[transformers] module loaded. ${topKeys.length} top-level keys. AutoTokenizer=${typeof transformersMod?.AutoTokenizer}, AutoModelForImageTextToText=${typeof transformersMod?.AutoModelForImageTextToText}, AutoModelForCausalLM=${typeof transformersMod?.AutoModelForCausalLM}` });
|
|
203
|
+
const {
|
|
204
|
+
AutoProcessor,
|
|
205
|
+
AutoTokenizer,
|
|
206
|
+
AutoModelForCausalLM,
|
|
207
|
+
InterruptableStoppingCriteria,
|
|
208
|
+
env,
|
|
209
|
+
} = transformersMod;
|
|
210
|
+
if (!AutoTokenizer || !AutoModelForCausalLM) {
|
|
211
|
+
throw new Error(`[transformers] CDN module missing core exports. Keys seen: ${topKeys.slice(0, 40).join(',')}`);
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
// Point ONNX Runtime WASM binaries to the jsdelivr CDN so they're not bundled.
|
|
215
|
+
// esm.sh hosts the JS modules; the native .wasm binaries are served by jsdelivr.
|
|
216
|
+
try {
|
|
217
|
+
if (env?.backends?.onnx?.wasm && modelEntry.family !== 'mistral') {
|
|
218
|
+
// Only override for 4.1.0 / ORT 1.26 (we host the matching .wasm binaries
|
|
219
|
+
// on jsdelivr). For the 3.8.1 path, ORT 1.22.0-dev is a transformers.js-
|
|
220
|
+
// internal build not mirrored on jsdelivr — let transformers.js use its
|
|
221
|
+
// default wasmPaths (which resolve against esm.sh, matching the JS bundle).
|
|
222
|
+
env.backends.onnx.wasm.wasmPaths = 'https://cdn.jsdelivr.net/npm/onnxruntime-web@1.26.0-dev.20260410-5e55544225/dist/';
|
|
223
|
+
}
|
|
224
|
+
if (env) {
|
|
225
|
+
env.allowLocalModels = false;
|
|
226
|
+
env.useBrowserCache = true;
|
|
227
|
+
}
|
|
228
|
+
} catch {}
|
|
229
|
+
|
|
230
|
+
stoppingCriteria = new InterruptableStoppingCriteria();
|
|
231
|
+
|
|
232
|
+
// Pre-download (OPFS-aware when the cache module is available).
|
|
233
|
+
await loadOrDownloadModel(modelEntry.repo, (fp, tp, status, loaded, total) => {
|
|
234
|
+
post({
|
|
235
|
+
type: 'progress',
|
|
236
|
+
fileProgress: fp,
|
|
237
|
+
totalProgress: tp,
|
|
238
|
+
status,
|
|
239
|
+
loaded: loaded ?? 0,
|
|
240
|
+
total: total ?? modelEntry.size,
|
|
241
|
+
});
|
|
242
|
+
});
|
|
243
|
+
|
|
244
|
+
// Aggregated progress callback — sums loaded/total across every file we see,
|
|
245
|
+
// emitting a monotonic aggregate ratio. Two guards eliminate flicker:
|
|
246
|
+
// 1. Files with total < 1_000_000 bytes are ignored (configs, tokenizers,
|
|
247
|
+
// chat_templates are all <100KB and would jump instantly to 100%,
|
|
248
|
+
// momentarily overwriting the big weight-shard progress).
|
|
249
|
+
// 2. We emit sum(loaded) / sum(total) — so small-file completions cannot
|
|
250
|
+
// make the overall ratio regress.
|
|
251
|
+
const fileStats = new Map<string, { loaded: number; total: number }>();
|
|
252
|
+
const progressCallback = (p: any) => {
|
|
253
|
+
if (p?.status !== 'progress' || typeof p?.file !== 'string') return;
|
|
254
|
+
const loaded = typeof p.loaded === 'number' ? p.loaded : 0;
|
|
255
|
+
const total = typeof p.total === 'number' && p.total > 0 ? p.total : 0;
|
|
256
|
+
if (total < 1_000_000) return; // skip tiny files
|
|
257
|
+
fileStats.set(p.file, { loaded, total });
|
|
258
|
+
let sumLoaded = 0;
|
|
259
|
+
let sumTotal = 0;
|
|
260
|
+
for (const v of fileStats.values()) { sumLoaded += v.loaded; sumTotal += v.total; }
|
|
261
|
+
const fp = sumTotal > 0 ? sumLoaded / sumTotal : 0;
|
|
262
|
+
post({
|
|
263
|
+
type: 'progress',
|
|
264
|
+
fileProgress: fp,
|
|
265
|
+
totalProgress: fp,
|
|
266
|
+
status: 'downloading',
|
|
267
|
+
loaded: sumLoaded,
|
|
268
|
+
total: sumTotal,
|
|
269
|
+
});
|
|
270
|
+
};
|
|
271
|
+
|
|
272
|
+
// No progress_callback: OPFS already pre-downloaded every weight, so
|
|
273
|
+
// from_pretrained reads from browser cache. Wiring progress_callback here
|
|
274
|
+
// made the loader flicker because each sub-ONNX (embed/decoder/vision/audio)
|
|
275
|
+
// fires its own 0→100% event, overwriting the aggregated OPFS progress.
|
|
276
|
+
const fromPretrainedOpts = {
|
|
277
|
+
dtype: modelEntry.dtype,
|
|
278
|
+
device: 'webgpu' as const,
|
|
279
|
+
progress_callback: progressCallback,
|
|
280
|
+
};
|
|
281
|
+
post({
|
|
282
|
+
type: 'progress',
|
|
283
|
+
fileProgress: 1,
|
|
284
|
+
totalProgress: 1,
|
|
285
|
+
status: 'initializing model weights',
|
|
286
|
+
loaded: modelEntry.size,
|
|
287
|
+
total: modelEntry.size,
|
|
288
|
+
});
|
|
289
|
+
|
|
290
|
+
// Tokenizer + processor — processor is required for VLMs and harmless otherwise.
|
|
291
|
+
// No progress_callback here: OPFS already downloaded every file, so transformers.js
|
|
292
|
+
// just reads from cache. Wiring a progress_callback would flicker the UI between
|
|
293
|
+
// the big decoder bar and tiny tokenizer/config progress events.
|
|
294
|
+
post({
|
|
295
|
+
type: 'progress',
|
|
296
|
+
fileProgress: 1,
|
|
297
|
+
totalProgress: 1,
|
|
298
|
+
status: 'initializing tokenizer',
|
|
299
|
+
loaded: modelEntry.size,
|
|
300
|
+
total: modelEntry.size,
|
|
301
|
+
});
|
|
302
|
+
try {
|
|
303
|
+
tokenizer = await AutoTokenizer.from_pretrained(modelEntry.repo, { progress_callback: progressCallback });
|
|
304
|
+
} catch (err) {
|
|
305
|
+
post({ type: 'warning', message: `tokenizer load: ${String(err)}` });
|
|
306
|
+
tokenizer = null;
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
if (modelEntry.family === 'gemma4' && tokenizer) {
|
|
310
|
+
// Override the VLM chat_template baked into onnx-community/gemma-4-* with a
|
|
311
|
+
// string-safe variant. The shipped template iterates message.content as a
|
|
312
|
+
// list ({% for part in content %}); our serializer emits strings, which
|
|
313
|
+
// triggers "C is not iterable" inside transformers.js's minified Jinja
|
|
314
|
+
// runtime. Mirrors the approach Chong takes in TurboQuant-WASM
|
|
315
|
+
// (demo/src/draw/prompts/preamble.ts).
|
|
316
|
+
try {
|
|
317
|
+
(tokenizer as any).chat_template = GEMMA4_CHAT_TEMPLATE;
|
|
318
|
+
} catch { /* best-effort */ }
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
try {
|
|
322
|
+
processor = await AutoProcessor.from_pretrained(modelEntry.repo, { progress_callback: progressCallback });
|
|
323
|
+
} catch {
|
|
324
|
+
// Some text-only checkpoints ship without an AutoProcessor — that's fine.
|
|
325
|
+
processor = null;
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
// Model class — pick a specialized VLM class when the catalog hints at one,
|
|
329
|
+
// otherwise fall back to AutoModelForCausalLM.
|
|
330
|
+
let ModelClass: any = AutoModelForCausalLM;
|
|
331
|
+
if (modelEntry.modelClass) {
|
|
332
|
+
const resolved = transformersMod[modelEntry.modelClass];
|
|
333
|
+
if (resolved && typeof resolved.from_pretrained === 'function') {
|
|
334
|
+
ModelClass = resolved;
|
|
335
|
+
post({ type: 'warning', message: `[transformers] using ModelClass=${modelEntry.modelClass}` });
|
|
336
|
+
} else {
|
|
337
|
+
const autoKeys = Object.keys(transformersMod).filter((k) => k.startsWith('AutoModel') || k.includes('ForConditional') || k.includes('ForImageText')).join(',');
|
|
338
|
+
post({ type: 'warning', message: `[transformers] modelClass '${modelEntry.modelClass}' not found. Available Auto* keys: ${autoKeys}. Falling back to AutoModelForCausalLM.` });
|
|
339
|
+
}
|
|
340
|
+
}
|
|
341
|
+
if (!ModelClass || typeof ModelClass.from_pretrained !== 'function') {
|
|
342
|
+
throw new Error(`[transformers] No usable model class. AutoModelForCausalLM=${typeof AutoModelForCausalLM}, mod keys sample: ${Object.keys(transformersMod).slice(0, 20).join(',')}`);
|
|
343
|
+
}
|
|
344
|
+
|
|
345
|
+
try {
|
|
346
|
+
model = await ModelClass.from_pretrained(modelEntry.repo, fromPretrainedOpts);
|
|
347
|
+
} catch (err) {
|
|
348
|
+
// WebGPU can fail on older drivers — fall back to WASM and warn the UI.
|
|
349
|
+
post({
|
|
350
|
+
type: 'warning',
|
|
351
|
+
message: `WebGPU unavailable, falling back to WASM: ${String(err)}`,
|
|
352
|
+
});
|
|
353
|
+
model = await ModelClass.from_pretrained(modelEntry.repo, {
|
|
354
|
+
...fromPretrainedOpts,
|
|
355
|
+
device: 'wasm',
|
|
356
|
+
});
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
post({ type: 'ready' });
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
// --------------------------------------------------------------------------
|
|
363
|
+
// Generation
|
|
364
|
+
// --------------------------------------------------------------------------
|
|
365
|
+
|
|
366
|
+
interface GenerateOptions {
|
|
367
|
+
maxTokens?: number;
|
|
368
|
+
temperature?: number;
|
|
369
|
+
topK?: number;
|
|
370
|
+
}
|
|
371
|
+
|
|
372
|
+
async function handleGenerate(
|
|
373
|
+
requestId: string,
|
|
374
|
+
prompt: string | undefined,
|
|
375
|
+
chatMessages: Array<{ role: string; content: string }> | undefined,
|
|
376
|
+
options: GenerateOptions,
|
|
377
|
+
image?: Uint8Array,
|
|
378
|
+
): Promise<void> {
|
|
379
|
+
if (!model || !entry) {
|
|
380
|
+
post({ type: 'error', requestId, message: 'model not loaded' });
|
|
381
|
+
return;
|
|
382
|
+
}
|
|
383
|
+
|
|
384
|
+
activeRequestId = requestId;
|
|
385
|
+
|
|
386
|
+
const { TextStreamer, RawImage } = transformersMod;
|
|
387
|
+
|
|
388
|
+
// If the main thread sent chatMessages, apply the tokenizer's native
|
|
389
|
+
// chat_template (Jinja) now. This is how Qwen3 and Mistral produce correctly
|
|
390
|
+
// tagged prompts (<|im_start|>user … / [INST] …). Falling back to the raw
|
|
391
|
+
// string lets the Gemma path (custom wire format) keep working unchanged.
|
|
392
|
+
let effectivePrompt: string | undefined;
|
|
393
|
+
if (chatMessages && entry.family === 'mistral' && processor && typeof processor.apply_chat_template === 'function') {
|
|
394
|
+
try {
|
|
395
|
+
effectivePrompt = processor.apply_chat_template(chatMessages);
|
|
396
|
+
} catch (err) {
|
|
397
|
+
post({ type: 'warning', message: `processor.apply_chat_template failed, falling back to tokenizer: ${String(err)}` });
|
|
398
|
+
// fall through to tokenizer branch below
|
|
399
|
+
}
|
|
400
|
+
}
|
|
401
|
+
if (!effectivePrompt && chatMessages && tokenizer && typeof tokenizer.apply_chat_template === 'function') {
|
|
402
|
+
try {
|
|
403
|
+
effectivePrompt = tokenizer.apply_chat_template(chatMessages, {
|
|
404
|
+
tokenize: false,
|
|
405
|
+
add_generation_prompt: true,
|
|
406
|
+
});
|
|
407
|
+
} catch (err) {
|
|
408
|
+
post({ type: 'warning', message: `apply_chat_template failed on string content, retrying with structured parts: ${String(err)}` });
|
|
409
|
+
try {
|
|
410
|
+
const structured = chatMessages.map(m => ({
|
|
411
|
+
role: m.role,
|
|
412
|
+
content: [{ type: 'text', text: m.content }],
|
|
413
|
+
}));
|
|
414
|
+
effectivePrompt = tokenizer.apply_chat_template(structured, {
|
|
415
|
+
tokenize: false,
|
|
416
|
+
add_generation_prompt: true,
|
|
417
|
+
});
|
|
418
|
+
} catch (err2) {
|
|
419
|
+
post({ type: 'warning', message: `apply_chat_template failed twice, falling back to raw concat: ${String(err2)}` });
|
|
420
|
+
effectivePrompt = chatMessages.map(m => `${m.role}: ${m.content}`).join('\n\n');
|
|
421
|
+
}
|
|
422
|
+
}
|
|
423
|
+
}
|
|
424
|
+
if (!effectivePrompt && typeof prompt === 'string') {
|
|
425
|
+
effectivePrompt = prompt;
|
|
426
|
+
}
|
|
427
|
+
if (!effectivePrompt) {
|
|
428
|
+
post({ type: 'error', requestId, message: 'generate requires either prompt or chatMessages' });
|
|
429
|
+
activeRequestId = null;
|
|
430
|
+
return;
|
|
431
|
+
}
|
|
432
|
+
|
|
433
|
+
const t0 = performance.now();
|
|
434
|
+
let tokenCount = 0;
|
|
435
|
+
let fullText = '';
|
|
436
|
+
|
|
437
|
+
const streamerTokenizer = entry.family === 'mistral' ? (processor?.tokenizer ?? tokenizer) : tokenizer;
|
|
438
|
+
const streamer = new TextStreamer(streamerTokenizer, {
|
|
439
|
+
skip_prompt: true,
|
|
440
|
+
skip_special_tokens: entry.family === 'mistral',
|
|
441
|
+
token_callback_function: () => {
|
|
442
|
+
tokenCount += 1;
|
|
443
|
+
},
|
|
444
|
+
callback_function: (token: string) => {
|
|
445
|
+
fullText += token;
|
|
446
|
+
post({ type: 'token', requestId, token });
|
|
447
|
+
},
|
|
448
|
+
});
|
|
449
|
+
|
|
450
|
+
// Build model inputs — VLM path goes through processor(prompt, image),
|
|
451
|
+
// text path goes through tokenizer(prompt).
|
|
452
|
+
// KV reuse is disabled: the agent loop rebuilds the full prompt each turn,
|
|
453
|
+
// so reusing past_key_values double-prefixes and triggers mask/score shape
|
|
454
|
+
// mismatches (Where node broadcast error on dim 3).
|
|
455
|
+
disposePastKeyValues();
|
|
456
|
+
let inputs: any;
|
|
457
|
+
let isVisionTurn = false;
|
|
458
|
+
try {
|
|
459
|
+
if (image && processor && entry.vision) {
|
|
460
|
+
isVisionTurn = true;
|
|
461
|
+
const blob = new Blob([image]);
|
|
462
|
+
const raw: any = await RawImage.read(blob);
|
|
463
|
+
// Mistral/Pixtral: let image_processor drive sizing (longest_edge), do NOT force 448×448.
|
|
464
|
+
if (entry.family === 'mistral' && processor.image_processor) {
|
|
465
|
+
try { processor.image_processor.size = { longest_edge: 480 }; } catch {}
|
|
466
|
+
} else {
|
|
467
|
+
try { raw.resize?.(448, 448); } catch {}
|
|
468
|
+
}
|
|
469
|
+
// processor(images, text, opts) — ARG ORDER MATTERS for Pixtral processor.
|
|
470
|
+
inputs = await processor(raw, effectivePrompt, { add_special_tokens: false });
|
|
471
|
+
} else if (tokenizer) {
|
|
472
|
+
// Text-only turn — always go through the tokenizer, even on a VLM.
|
|
473
|
+
// VLM processors (Qwen3.5, Mistral3, Gemma4) expect messages-with-content-
|
|
474
|
+
// blocks rather than a plain prompt string, so calling processor(prompt)
|
|
475
|
+
// throws "X is not iterable" on the template path.
|
|
476
|
+
inputs = await tokenizer(effectivePrompt, { return_tensors: 'pt' });
|
|
477
|
+
} else {
|
|
478
|
+
post({ type: 'error', requestId, message: 'no tokenizer/processor available' });
|
|
479
|
+
activeRequestId = null;
|
|
480
|
+
return;
|
|
481
|
+
}
|
|
482
|
+
} catch (err) {
|
|
483
|
+
post({ type: 'error', requestId, message: `input preparation failed: ${String(err)}` });
|
|
484
|
+
activeRequestId = null;
|
|
485
|
+
return;
|
|
486
|
+
}
|
|
487
|
+
|
|
488
|
+
const generateArgs: any = {
|
|
489
|
+
...inputs,
|
|
490
|
+
max_new_tokens: options.maxTokens ?? 2048,
|
|
491
|
+
do_sample: true,
|
|
492
|
+
return_dict_in_generate: true,
|
|
493
|
+
// Keep the intra-generate KV cache (O(n) per token vs O(n²)). The SWA
|
|
494
|
+
// desync bug only manifested when past_key_values were reused ACROSS
|
|
495
|
+
// generate() calls, which we now prevent via disposePastKeyValues()
|
|
496
|
+
// before each call.
|
|
497
|
+
use_cache: true,
|
|
498
|
+
// Sampling defaults — without these transformers.js degenerates into
|
|
499
|
+
// single-token loops ("Salut! Salut! Salut!...") on Qwen3 especially.
|
|
500
|
+
temperature: typeof options.temperature === 'number' ? options.temperature : 0.7,
|
|
501
|
+
top_p: 0.9,
|
|
502
|
+
top_k: typeof options.topK === 'number' ? options.topK : 50,
|
|
503
|
+
repetition_penalty: 1.1,
|
|
504
|
+
streamer,
|
|
505
|
+
stopping_criteria: stoppingCriteria,
|
|
506
|
+
};
|
|
507
|
+
// past_key_values deliberately never reused (see comment above).
|
|
508
|
+
|
|
509
|
+
if (entry.family === 'mistral') {
|
|
510
|
+
generateArgs.do_sample = false;
|
|
511
|
+
generateArgs.repetition_penalty = 1.2;
|
|
512
|
+
delete generateArgs.temperature;
|
|
513
|
+
delete generateArgs.top_p;
|
|
514
|
+
delete generateArgs.top_k;
|
|
515
|
+
delete generateArgs.return_dict_in_generate;
|
|
516
|
+
delete generateArgs.stopping_criteria;
|
|
517
|
+
}
|
|
518
|
+
|
|
519
|
+
let result: any;
|
|
520
|
+
try {
|
|
521
|
+
result = await model.generate(generateArgs);
|
|
522
|
+
} catch (err) {
|
|
523
|
+
const msg = String(err);
|
|
524
|
+
// Abort / stopping-criteria interrupt → deliver what we have so far.
|
|
525
|
+
if (!msg.includes('interrupt') && !msg.includes('stopping')) {
|
|
526
|
+
post({ type: 'error', requestId, message: msg });
|
|
527
|
+
activeRequestId = null;
|
|
528
|
+
return;
|
|
529
|
+
}
|
|
530
|
+
}
|
|
531
|
+
|
|
532
|
+
// KV cache intentionally not retained — the agent loop re-sends the full
|
|
533
|
+
// prompt each turn, so a stale cache would double-prefix and break shapes.
|
|
534
|
+
try { if (result?.past_key_values) {
|
|
535
|
+
if (typeof result.past_key_values === 'object') {
|
|
536
|
+
for (const v of Object.values(result.past_key_values) as any[]) {
|
|
537
|
+
try { v?.dispose?.(); } catch {}
|
|
538
|
+
}
|
|
539
|
+
}
|
|
540
|
+
} } catch {}
|
|
541
|
+
|
|
542
|
+
// Parse thinking: everything before </think> is routed to the `thinking`
|
|
543
|
+
// option on the leading text block, the rest becomes normal content.
|
|
544
|
+
let thinking: string | undefined;
|
|
545
|
+
let visible = fullText;
|
|
546
|
+
const thinkEnd = fullText.indexOf('</think>');
|
|
547
|
+
if (thinkEnd !== -1) {
|
|
548
|
+
thinking = fullText.slice(0, thinkEnd).replace(/^<think>/, '').trim();
|
|
549
|
+
visible = fullText.slice(thinkEnd + '</think>'.length).trim();
|
|
550
|
+
}
|
|
551
|
+
|
|
552
|
+
// Tool-call parsing (format-aware).
|
|
553
|
+
const parsed = await parseToolCalls(visible, entry.toolFormat);
|
|
554
|
+
const content: ContentBlock[] = [];
|
|
555
|
+
let attachedThinking = false;
|
|
556
|
+
for (const block of parsed) {
|
|
557
|
+
if (block.type === 'text') {
|
|
558
|
+
const text = block.text;
|
|
559
|
+
if (!attachedThinking && thinking) {
|
|
560
|
+
content.push({ type: 'text', text });
|
|
561
|
+
// Embed `thinking` as a side-channel prefix in the block text so the
|
|
562
|
+
// existing ContentBlock surface stays unchanged. Consumers that care
|
|
563
|
+
// can extract the <think>…</think> span from the raw text.
|
|
564
|
+
attachedThinking = true;
|
|
565
|
+
} else {
|
|
566
|
+
content.push({ type: 'text', text });
|
|
567
|
+
}
|
|
568
|
+
} else {
|
|
569
|
+
content.push(block);
|
|
570
|
+
}
|
|
571
|
+
}
|
|
572
|
+
if (content.length === 0) {
|
|
573
|
+
content.push({ type: 'text', text: visible });
|
|
574
|
+
}
|
|
575
|
+
|
|
576
|
+
const latencyMs = performance.now() - t0;
|
|
577
|
+
const tokensPerSec = tokenCount > 0 ? tokenCount / (latencyMs / 1000) : 0;
|
|
578
|
+
|
|
579
|
+
let inputTokens = 0;
|
|
580
|
+
try {
|
|
581
|
+
const ids = (inputs?.input_ids?.dims ?? inputs?.input_ids?.size ?? 0);
|
|
582
|
+
inputTokens = Array.isArray(ids) ? ids[ids.length - 1] : Number(ids) || 0;
|
|
583
|
+
} catch {}
|
|
584
|
+
|
|
585
|
+
post({
|
|
586
|
+
type: 'done',
|
|
587
|
+
requestId,
|
|
588
|
+
content,
|
|
589
|
+
stats: {
|
|
590
|
+
tokensPerSec,
|
|
591
|
+
totalTokens: tokenCount,
|
|
592
|
+
latencyMs,
|
|
593
|
+
},
|
|
594
|
+
usage: {
|
|
595
|
+
input_tokens: inputTokens,
|
|
596
|
+
output_tokens: tokenCount,
|
|
597
|
+
},
|
|
598
|
+
// Thinking is exposed for observers that postMessage-proxy the worker.
|
|
599
|
+
thinking,
|
|
600
|
+
});
|
|
601
|
+
|
|
602
|
+
activeRequestId = null;
|
|
603
|
+
}
|
|
604
|
+
|
|
605
|
+
// --------------------------------------------------------------------------
|
|
606
|
+
// Message dispatch
|
|
607
|
+
// --------------------------------------------------------------------------
|
|
608
|
+
|
|
609
|
+
self.addEventListener('message', async (ev: MessageEvent) => {
|
|
610
|
+
const msg = ev.data;
|
|
611
|
+
if (!msg || typeof msg !== 'object') return;
|
|
612
|
+
|
|
613
|
+
try {
|
|
614
|
+
switch (msg.type) {
|
|
615
|
+
case 'load': {
|
|
616
|
+
if (!msg.entry) {
|
|
617
|
+
post({ type: 'error', message: 'missing entry in load message' });
|
|
618
|
+
return;
|
|
619
|
+
}
|
|
620
|
+
await loadModel(msg.entry as TransformersModelEntry);
|
|
621
|
+
return;
|
|
622
|
+
}
|
|
623
|
+
case 'generate': {
|
|
624
|
+
const requestId: string = msg.requestId;
|
|
625
|
+
const prompt: string | undefined = typeof msg.prompt === 'string' ? msg.prompt : undefined;
|
|
626
|
+
const chatMessages: Array<{ role: string; content: string }> | undefined =
|
|
627
|
+
Array.isArray(msg.chatMessages) ? msg.chatMessages : undefined;
|
|
628
|
+
const options: GenerateOptions = msg.options ?? {};
|
|
629
|
+
const image: Uint8Array | undefined = msg.image instanceof Uint8Array ? msg.image : undefined;
|
|
630
|
+
await handleGenerate(requestId, prompt, chatMessages, options, image);
|
|
631
|
+
return;
|
|
632
|
+
}
|
|
633
|
+
case 'abort': {
|
|
634
|
+
// Shared stopping criteria across requests — any abort interrupts the
|
|
635
|
+
// current generation. The pending main-thread promise resolves via the
|
|
636
|
+
// 'done' or 'error' path depending on how generate() unwinds.
|
|
637
|
+
try { stoppingCriteria?.interrupt?.(); } catch {}
|
|
638
|
+
if (activeRequestId && activeRequestId !== msg.requestId) {
|
|
639
|
+
// Different requestId — still interrupt; the main side filters by id.
|
|
640
|
+
}
|
|
641
|
+
return;
|
|
642
|
+
}
|
|
643
|
+
case 'reset': {
|
|
644
|
+
resetAll();
|
|
645
|
+
return;
|
|
646
|
+
}
|
|
647
|
+
case 'dispose': {
|
|
648
|
+
resetAll();
|
|
649
|
+
try { model?.dispose?.(); } catch {}
|
|
650
|
+
model = null;
|
|
651
|
+
tokenizer = null;
|
|
652
|
+
processor = null;
|
|
653
|
+
entry = null;
|
|
654
|
+
transformersMod = null;
|
|
655
|
+
return;
|
|
656
|
+
}
|
|
657
|
+
default:
|
|
658
|
+
return;
|
|
659
|
+
}
|
|
660
|
+
} catch (err) {
|
|
661
|
+
const requestId = msg?.requestId;
|
|
662
|
+
post({ type: 'error', requestId, message: String(err) });
|
|
663
|
+
activeRequestId = null;
|
|
664
|
+
}
|
|
665
|
+
});
|
|
666
|
+
|
|
667
|
+
export {}; // Mark this file as a module for TS.
|