@webmcp-auto-ui/agent 2.5.26 → 2.5.28

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/types.ts CHANGED
@@ -6,11 +6,22 @@ export type { Recipe, McpRecipe } from './recipes/types.js';
6
6
  // Short model IDs for remote (Anthropic-compatible) providers
7
7
  export type RemoteModelId = 'haiku' | 'sonnet' | 'opus' | string;
8
8
 
9
- // Model IDs for in-browser WASM providers
9
+ // Model IDs for in-browser WASM providers (MediaPipe/LiteRT)
10
10
  export type WasmModelId = 'gemma-e2b' | 'gemma-e4b' | string;
11
11
 
12
+ // Model IDs for in-browser transformers.js providers (ONNX + WebGPU)
13
+ // Canonical list in ./providers/transformers-models.ts.
14
+ export type TransformersModelId =
15
+ | 'transformers-gemma-4-e2b'
16
+ | 'transformers-gemma-4-e4b'
17
+ | 'transformers-qwen-3-4b'
18
+ | 'transformers-qwen-3.5-2b'
19
+ | 'transformers-qwen-3.5-4b'
20
+ | 'transformers-ministral-3-3b'
21
+ | string;
22
+
12
23
  // Union of all LLM IDs used by canvas.llm and LLMSelector
13
- export type LLMId = RemoteModelId | WasmModelId;
24
+ export type LLMId = RemoteModelId | WasmModelId | TransformersModelId;
14
25
 
15
26
  // Backward compat alias
16
27
  export type ModelId = LLMId;
@@ -22,6 +33,7 @@ export interface ChatMessage {
22
33
 
23
34
  export type ContentBlock =
24
35
  | { type: 'text'; text: string }
36
+ | { type: 'image'; data: string; mediaType: string }
25
37
  | { type: 'tool_use'; id: string; name: string; input: Record<string, unknown> }
26
38
  | { type: 'tool_result'; tool_use_id: string; content: string };
27
39
 
@@ -50,9 +62,9 @@ export interface LLMProvider {
50
62
  readonly model: string;
51
63
  /** Hint for system prompt builders: which syntax this provider expects for tool
52
64
  * references. `undefined` → treated as `'generic'`. Providers using a non-standard
53
- * native call syntax (e.g. Gemma) should set this so the agent loop can build
54
- * the prompt with the correct formatting. */
55
- readonly promptKind?: 'generic' | 'gemma';
65
+ * native call syntax (e.g. Gemma, Qwen ChatML, Mistral [INST]) should set this so
66
+ * the agent loop can build the prompt with the correct formatting. */
67
+ readonly promptKind?: 'generic' | 'gemma' | 'qwen' | 'mistral';
56
68
  chat(
57
69
  messages: ChatMessage[],
58
70
  tools: ProviderTool[],
@@ -67,8 +79,6 @@ export interface ToolCall {
67
79
  result?: string;
68
80
  error?: string;
69
81
  elapsed?: number;
70
- /** true if this call was preceded by a discovery tool (search_recipes, get_recipe, etc.) */
71
- guided?: boolean;
72
82
  }
73
83
 
74
84
  export interface AgentMetrics {
@@ -0,0 +1,364 @@
1
+ /**
2
+ * OPFS model cache — download & cache N files per model repo in the
3
+ * Origin Private File System, returning a streamable Uint8Array for each file.
4
+ *
5
+ * Designed for large-model scenarios (MediaPipe .task, transformers.js ONNX
6
+ * multi-file bundles, tokenizers, configs). Each file is streamed from network
7
+ * with `tee()` so the consumer and the OPFS writer share a single download.
8
+ *
9
+ * Cache validation strategy: for every file we write a sibling `<file>.complete`
10
+ * marker containing the total size in bytes. A cache is considered valid only
11
+ * when the file exists AND the marker exists AND their sizes match (and if the
12
+ * caller provided `expectedSize`, that too). This avoids serving half-written
13
+ * files when a tab is closed mid-download.
14
+ */
15
+
16
+ export interface ModelFileSpec {
17
+ /** Path relative to the repo root (e.g. "onnx/model.onnx", "tokenizer.json") */
18
+ path: string;
19
+ /** Expected byte size if known — enables exact match cache validation */
20
+ expectedSize?: number;
21
+ }
22
+
23
+ export interface CacheProgress {
24
+ /** 0-1 progress of the file currently being processed */
25
+ fileProgress: number;
26
+ /** 0-1 aggregate progress across all files */
27
+ totalProgress: number;
28
+ status: 'cached' | 'downloading' | 'initializing' | 'error';
29
+ currentFile?: string;
30
+ /** Bytes loaded for current file */
31
+ loaded: number;
32
+ /** Total bytes expected for current file */
33
+ total: number;
34
+ }
35
+
36
+ /** Sanitize a repo id (e.g. "owner/name") into a valid OPFS folder name. */
37
+ function sanitizeRepoKey(repo: string): string {
38
+ return repo.replace(/\//g, '__').replace(/[^a-zA-Z0-9_.-]/g, '_');
39
+ }
40
+
41
+ /**
42
+ * Walk nested directory segments, creating subdirectories as needed.
43
+ * Returns `{ parent, filename }` where `parent` is the deepest directory
44
+ * handle and `filename` is the leaf name.
45
+ */
46
+ async function resolveFileParent(
47
+ repoDir: FileSystemDirectoryHandle,
48
+ relPath: string,
49
+ ): Promise<{ parent: FileSystemDirectoryHandle; filename: string }> {
50
+ const segments = relPath.split('/').filter((s) => s.length > 0);
51
+ if (segments.length === 0) throw new Error(`Invalid file path: ${relPath}`);
52
+ let parent = repoDir;
53
+ for (let i = 0; i < segments.length - 1; i++) {
54
+ parent = await parent.getDirectoryHandle(segments[i], { create: true });
55
+ }
56
+ return { parent, filename: segments[segments.length - 1] };
57
+ }
58
+
59
+ /**
60
+ * Removes legacy cache entries: pre-refactor the helper stored model files
61
+ * directly under `webmcp-models/<filename>`. The new layout nests them under
62
+ * `webmcp-models/<repo-key>/<filename>`, so the old top-level files are
63
+ * orphaned and can each weigh several GB.
64
+ * Runs once per process.
65
+ */
66
+ let legacyCleanupDone = false;
67
+ async function cleanupLegacyModelFiles(modelsDir: FileSystemDirectoryHandle): Promise<void> {
68
+ if (legacyCleanupDone) return;
69
+ legacyCleanupDone = true;
70
+ try {
71
+ const dir = modelsDir as unknown as {
72
+ entries: () => AsyncIterable<[string, FileSystemHandle]>;
73
+ };
74
+ for await (const [name, handle] of dir.entries()) {
75
+ if (handle.kind === 'file') {
76
+ try { await modelsDir.removeEntry(name); } catch { /* best-effort */ }
77
+ }
78
+ }
79
+ } catch { /* iteration unsupported or blocked — skip silently */ }
80
+ }
81
+
82
+ /**
83
+ * Load every requested file from the OPFS cache if valid, otherwise stream it
84
+ * from the HuggingFace repo and cache it in the background.
85
+ *
86
+ * The returned Map is keyed by the original `file.path` (including any
87
+ * subdirectory prefix) and contains a `ReadableStream<Uint8Array>` per file.
88
+ * Consumers are responsible for consuming the streams.
89
+ */
90
+ export async function loadOrDownloadModel(
91
+ repo: string,
92
+ files: ModelFileSpec[],
93
+ onProgress?: (progress: CacheProgress) => void,
94
+ ): Promise<Map<string, ReadableStream<Uint8Array>>> {
95
+ const root = await navigator.storage.getDirectory();
96
+ const modelsDir = await root.getDirectoryHandle('webmcp-models', { create: true });
97
+ await cleanupLegacyModelFiles(modelsDir);
98
+ const repoKey = sanitizeRepoKey(repo);
99
+ const repoDir = await modelsDir.getDirectoryHandle(repoKey, { create: true });
100
+
101
+ const totalExpected = files.reduce((s, f) => s + (f.expectedSize ?? 0), 0);
102
+ let totalLoaded = 0;
103
+ const result = new Map<string, ReadableStream<Uint8Array>>();
104
+
105
+ for (const file of files) {
106
+ const { parent, filename } = await resolveFileParent(repoDir, file.path);
107
+ const markerName = `${filename}.complete`;
108
+
109
+ // Clean orphan .crswap files (Chrome WritableStream leftovers).
110
+ try { await parent.removeEntry(`${filename}.crswap`); } catch { /* no swap — OK */ }
111
+
112
+ // ── Cache hit attempt ───────────────────────────────────────────
113
+ let cacheHitSize: number | null = null;
114
+ try {
115
+ const fileHandle = await parent.getFileHandle(filename);
116
+ const fileObj = await fileHandle.getFile();
117
+ let expectedFromMarker: number | null = null;
118
+ try {
119
+ const markerHandle = await parent.getFileHandle(markerName);
120
+ const markerText = await (await markerHandle.getFile()).text();
121
+ expectedFromMarker = Number(markerText.trim());
122
+ } catch {
123
+ // Marker missing — try backfill via HEAD request
124
+ try {
125
+ const head = await fetch(`https://huggingface.co/${repo}/resolve/main/${file.path}`, { method: 'HEAD' });
126
+ if (head.ok) {
127
+ const headerSize = Number(head.headers.get('content-length'));
128
+ if (Number.isFinite(headerSize) && headerSize > 0 && fileObj.size === headerSize) {
129
+ // Backfill marker
130
+ const markerHandle = await parent.getFileHandle(markerName, { create: true });
131
+ const markerWritable = await markerHandle.createWritable();
132
+ await markerWritable.write(String(headerSize));
133
+ await markerWritable.close();
134
+ expectedFromMarker = headerSize;
135
+ } else {
136
+ // Size mismatch — drop cached file
137
+ try { await parent.removeEntry(filename); } catch {}
138
+ }
139
+ }
140
+ } catch { /* network/HEAD failed — treat as cache miss */ }
141
+ }
142
+
143
+ if (
144
+ expectedFromMarker !== null
145
+ && fileObj.size === expectedFromMarker
146
+ && (file.expectedSize === undefined || file.expectedSize === fileObj.size)
147
+ ) {
148
+ cacheHitSize = fileObj.size;
149
+ onProgress?.({
150
+ fileProgress: 1,
151
+ totalProgress: totalExpected > 0 ? (totalLoaded + fileObj.size) / totalExpected : 1,
152
+ status: 'cached',
153
+ currentFile: file.path,
154
+ loaded: fileObj.size,
155
+ total: fileObj.size,
156
+ });
157
+ totalLoaded += fileObj.size;
158
+ result.set(file.path, fileObj.stream() as ReadableStream<Uint8Array>);
159
+ }
160
+ } catch {
161
+ // Cache miss — fall through to download
162
+ }
163
+
164
+ if (cacheHitSize !== null) continue;
165
+
166
+ // ── Network download (retry on 503) ─────────────────────────────
167
+ const url = `https://huggingface.co/${repo}/resolve/main/${file.path}`;
168
+ let response: Response | null = null;
169
+ for (let attempt = 0; attempt < 3; attempt++) {
170
+ response = await fetch(url);
171
+ if (response.ok) break;
172
+ if (response.status === 503 && attempt < 2) {
173
+ const wait = (attempt + 1) * 5000;
174
+ onProgress?.({
175
+ fileProgress: 0,
176
+ totalProgress: totalExpected > 0 ? totalLoaded / totalExpected : 0,
177
+ status: 'downloading',
178
+ currentFile: file.path,
179
+ loaded: 0,
180
+ total: file.expectedSize ?? 0,
181
+ });
182
+ await new Promise((r) => setTimeout(r, wait));
183
+ continue;
184
+ }
185
+ throw new Error(`Download failed for ${file.path}: ${response.status} ${response.statusText}`);
186
+ }
187
+ if (!response || !response.ok) throw new Error(`Download failed for ${file.path} after retries`);
188
+ if (!response.body) throw new Error(`Response body is null for ${file.path}`);
189
+
190
+ const headerTotal = Number(response.headers.get('content-length'));
191
+ const total = Number.isFinite(headerTotal) && headerTotal > 0
192
+ ? headerTotal
193
+ : (file.expectedSize ?? 0);
194
+
195
+ const [streamForConsumer, streamForCache] = response.body.tee();
196
+
197
+ // Background OPFS cache (fire-and-forget). Marker is written AFTER close
198
+ // succeeds, so a crashed tab will leave a file without a marker, which is
199
+ // detected as "invalid cache" on the next load.
200
+ (async () => {
201
+ try {
202
+ const handle = await parent.getFileHandle(filename, { create: true });
203
+ const writable = await handle.createWritable();
204
+ await streamForCache.pipeTo(writable);
205
+ const markerHandle = await parent.getFileHandle(markerName, { create: true });
206
+ const markerWritable = await markerHandle.createWritable();
207
+ await markerWritable.write(String(total));
208
+ await markerWritable.close();
209
+ } catch {
210
+ try { await parent.removeEntry(filename); } catch {}
211
+ try { await parent.removeEntry(markerName); } catch {}
212
+ }
213
+ })();
214
+
215
+ // Capture outer variables for the transform stream closure
216
+ const filePath = file.path;
217
+ const baselineTotalLoaded = totalLoaded;
218
+ const totalExpectedLocal = totalExpected;
219
+ let loaded = 0;
220
+ const progressTransform = new TransformStream<Uint8Array, Uint8Array>({
221
+ transform(chunk, controller) {
222
+ loaded += chunk.length;
223
+ const denom = totalExpectedLocal || total || 1;
224
+ onProgress?.({
225
+ fileProgress: total > 0 ? loaded / total : 0,
226
+ totalProgress: (baselineTotalLoaded + loaded) / denom,
227
+ status: 'downloading',
228
+ currentFile: filePath,
229
+ loaded,
230
+ total,
231
+ });
232
+ controller.enqueue(chunk);
233
+ },
234
+ flush() {
235
+ // No-op: totalLoaded is advanced eagerly below so that subsequent
236
+ // files reflect the contribution of the current file even if the
237
+ // consumer is still draining the stream.
238
+ },
239
+ });
240
+
241
+ // Eagerly advance the baseline for subsequent files: by the time we
242
+ // process the next file we assume this one will complete (or fail — in
243
+ // which case progress is moot anyway).
244
+ totalLoaded += total;
245
+
246
+ result.set(file.path, streamForConsumer.pipeThrough(progressTransform));
247
+ }
248
+
249
+ return result;
250
+ }
251
+
252
+ /**
253
+ * Remove every cached file for a given repo. Silently no-ops if the repo
254
+ * directory does not exist.
255
+ *
256
+ * Accepts either the original `owner/name` form OR the sanitized key form
257
+ * (`owner__name`). Both are tried so UIs that list cached repos via
258
+ * `listCachedModels()` (which only knows the sanitized key) can delete.
259
+ */
260
+ export async function clearModelCache(repo: string): Promise<void> {
261
+ try {
262
+ const root = await navigator.storage.getDirectory();
263
+ const modelsDir = await root.getDirectoryHandle('webmcp-models', { create: false });
264
+ const candidates = new Set<string>([repo, sanitizeRepoKey(repo)]);
265
+ for (const key of candidates) {
266
+ try { await modelsDir.removeEntry(key, { recursive: true }); } catch { /* not present */ }
267
+ }
268
+ } catch {
269
+ // Nothing to clear
270
+ }
271
+ }
272
+
273
+ /**
274
+ * Info about a single cached model repo in OPFS.
275
+ *
276
+ * Note: `repo` is the sanitized folder name as it appears on disk
277
+ * (e.g. `google__gemma-3n-E2B-it-litert-preview`). The original `owner/name`
278
+ * is not recoverable after sanitization.
279
+ */
280
+ export interface CachedModelInfo {
281
+ repo: string;
282
+ size: number;
283
+ fileCount: number;
284
+ lastModified: number;
285
+ }
286
+
287
+ /**
288
+ * Recursively sum file sizes under a directory handle, tracking count and
289
+ * max lastModified. Ignores entries that fail to enumerate.
290
+ */
291
+ export async function walkDirectoryStats(
292
+ dir: FileSystemDirectoryHandle,
293
+ ): Promise<{ size: number; fileCount: number; lastModified: number }> {
294
+ let size = 0;
295
+ let fileCount = 0;
296
+ let lastModified = 0;
297
+ try {
298
+ const iter = dir as unknown as { entries: () => AsyncIterable<[string, FileSystemHandle]> };
299
+ for await (const [, handle] of iter.entries()) {
300
+ if (handle.kind === 'file') {
301
+ try {
302
+ const f = await (handle as FileSystemFileHandle).getFile();
303
+ size += f.size;
304
+ fileCount += 1;
305
+ if (f.lastModified > lastModified) lastModified = f.lastModified;
306
+ } catch { /* skip */ }
307
+ } else if (handle.kind === 'directory') {
308
+ const sub = await walkDirectoryStats(handle as FileSystemDirectoryHandle);
309
+ size += sub.size;
310
+ fileCount += sub.fileCount;
311
+ if (sub.lastModified > lastModified) lastModified = sub.lastModified;
312
+ }
313
+ }
314
+ } catch { /* iteration unsupported */ }
315
+ return { size, fileCount, lastModified };
316
+ }
317
+
318
+ /**
319
+ * List every cached model repo in OPFS with cumulative size, file count and
320
+ * last-modified timestamp. Returns `[]` if `webmcp-models` does not exist
321
+ * or if OPFS itself is unavailable.
322
+ */
323
+ export async function listCachedModels(): Promise<CachedModelInfo[]> {
324
+ try {
325
+ if (!navigator.storage?.getDirectory) return [];
326
+ const root = await navigator.storage.getDirectory();
327
+ let modelsDir: FileSystemDirectoryHandle;
328
+ try {
329
+ modelsDir = await root.getDirectoryHandle('webmcp-models', { create: false });
330
+ } catch {
331
+ return [];
332
+ }
333
+ const out: CachedModelInfo[] = [];
334
+ const iter = modelsDir as unknown as { entries: () => AsyncIterable<[string, FileSystemHandle]> };
335
+ try {
336
+ for await (const [name, handle] of iter.entries()) {
337
+ if (handle.kind !== 'directory') continue;
338
+ const stats = await walkDirectoryStats(handle as FileSystemDirectoryHandle);
339
+ if (stats.size === 0 || stats.fileCount === 0) {
340
+ // Orphan directory (e.g. worker bug that created an empty repo key).
341
+ try { await modelsDir.removeEntry(name, { recursive: true }); } catch { /* best-effort */ }
342
+ continue;
343
+ }
344
+ out.push({ repo: name, size: stats.size, fileCount: stats.fileCount, lastModified: stats.lastModified });
345
+ }
346
+ } catch { /* iteration unsupported */ }
347
+ out.sort((a, b) => b.size - a.size);
348
+ return out;
349
+ } catch {
350
+ return [];
351
+ }
352
+ }
353
+
354
+ /**
355
+ * Nuke the whole `webmcp-models` directory. No-op if it does not exist.
356
+ */
357
+ export async function clearAllModelCaches(): Promise<void> {
358
+ try {
359
+ const root = await navigator.storage.getDirectory();
360
+ await root.removeEntry('webmcp-models', { recursive: true });
361
+ } catch {
362
+ // Nothing to clear
363
+ }
364
+ }
@@ -0,0 +1,195 @@
1
+ /**
2
+ * storage-inventory — enumerate OPFS (outside webmcp-models), Cache Storage
3
+ * API and IndexedDB entries. Best-effort: any per-entry failure is swallowed.
4
+ */
5
+ import { walkDirectoryStats } from './opfs-cache.js';
6
+
7
+ export type StorageSource = 'opfs' | 'cache-storage' | 'indexeddb';
8
+
9
+ export interface StorageEntry {
10
+ source: StorageSource;
11
+ key: string;
12
+ size: number;
13
+ sizeKnown: boolean;
14
+ itemCount: number;
15
+ lastModified: number;
16
+ modelLike: boolean;
17
+ }
18
+
19
+ const MODEL_HINTS = [
20
+ 'huggingface', 'hf-', 'hf_', 'gemma', 'litert', 'onnx',
21
+ 'qwen', 'mistral', 'llama', 'transformers', 'tokenizer', 'mediapipe',
22
+ ];
23
+
24
+ const BLOB_SIZE_LIMIT = 50 * 1024 * 1024; // 50 MB — skip blob() fallback above this
25
+
26
+ function isModelLike(key: string): boolean {
27
+ const k = key.toLowerCase();
28
+ return MODEL_HINTS.some((h) => k.includes(h));
29
+ }
30
+
31
+ /** OPFS entries outside the `webmcp-models` directory (which has its own UI). */
32
+ async function listOpfsEntries(): Promise<StorageEntry[]> {
33
+ const out: StorageEntry[] = [];
34
+ try {
35
+ if (!navigator.storage?.getDirectory) return out;
36
+ const root = await navigator.storage.getDirectory();
37
+ const iter = root as unknown as { entries: () => AsyncIterable<[string, FileSystemHandle]> };
38
+ for await (const [name, handle] of iter.entries()) {
39
+ if (name === 'webmcp-models') continue;
40
+ if (handle.kind !== 'directory') continue;
41
+ try {
42
+ const stats = await walkDirectoryStats(handle as FileSystemDirectoryHandle);
43
+ out.push({
44
+ source: 'opfs',
45
+ key: name,
46
+ size: stats.size,
47
+ sizeKnown: true,
48
+ itemCount: stats.fileCount,
49
+ lastModified: stats.lastModified,
50
+ modelLike: isModelLike(name),
51
+ });
52
+ } catch { /* skip entry */ }
53
+ }
54
+ } catch { /* OPFS unavailable */ }
55
+ return out;
56
+ }
57
+
58
+ async function measureResponse(response: Response): Promise<{ size: number; sizeKnown: boolean; lastModified: number }> {
59
+ let size = 0;
60
+ let sizeKnown = false;
61
+ let lastModified = 0;
62
+ try {
63
+ const cl = response.headers.get('content-length');
64
+ const parsed = cl !== null ? parseInt(cl, 10) : NaN;
65
+ if (Number.isFinite(parsed) && parsed >= 0) {
66
+ size = parsed;
67
+ sizeKnown = true;
68
+ } else {
69
+ // Fallback: blob() — but only for small responses to avoid GB memory hits.
70
+ try {
71
+ const blob = await response.clone().blob();
72
+ if (blob.size < BLOB_SIZE_LIMIT) {
73
+ size = blob.size;
74
+ sizeKnown = true;
75
+ }
76
+ } catch { /* blob failed */ }
77
+ }
78
+ const dateHdr = response.headers.get('date');
79
+ if (dateHdr) {
80
+ const t = new Date(dateHdr).getTime();
81
+ if (Number.isFinite(t)) lastModified = t;
82
+ }
83
+ } catch { /* header access failed */ }
84
+ return { size, sizeKnown, lastModified };
85
+ }
86
+
87
+ async function listCacheStorageEntries(): Promise<StorageEntry[]> {
88
+ const out: StorageEntry[] = [];
89
+ try {
90
+ if (typeof caches === 'undefined' || !caches.keys) return out;
91
+ const names = await caches.keys();
92
+ for (const name of names) {
93
+ try {
94
+ const cache = await caches.open(name);
95
+ const requests = await cache.keys();
96
+ let totalSize = 0;
97
+ let anyUnknown = false;
98
+ let lastModified = 0;
99
+ for (const req of requests) {
100
+ try {
101
+ const resp = await cache.match(req);
102
+ if (!resp) continue;
103
+ const m = await measureResponse(resp);
104
+ if (m.sizeKnown) totalSize += m.size;
105
+ else anyUnknown = true;
106
+ if (m.lastModified > lastModified) lastModified = m.lastModified;
107
+ } catch { /* per-request skip */ }
108
+ }
109
+ out.push({
110
+ source: 'cache-storage',
111
+ key: name,
112
+ size: totalSize,
113
+ sizeKnown: !anyUnknown,
114
+ itemCount: requests.length,
115
+ lastModified,
116
+ modelLike: isModelLike(name),
117
+ });
118
+ } catch { /* skip cache */ }
119
+ }
120
+ } catch { /* Cache API unavailable */ }
121
+ return out;
122
+ }
123
+
124
+ async function listIndexedDbEntries(): Promise<StorageEntry[]> {
125
+ const out: StorageEntry[] = [];
126
+ try {
127
+ const idb = indexedDB as IDBFactory & { databases?: () => Promise<Array<{ name?: string; version?: number }>> };
128
+ if (typeof idb.databases !== 'function') return out;
129
+ const dbs = await idb.databases();
130
+ for (const db of dbs) {
131
+ if (!db.name) continue;
132
+ out.push({
133
+ source: 'indexeddb',
134
+ key: db.name,
135
+ size: 0,
136
+ sizeKnown: false,
137
+ itemCount: db.version ?? 0,
138
+ lastModified: 0,
139
+ modelLike: isModelLike(db.name),
140
+ });
141
+ }
142
+ } catch { /* IDB listing unsupported (e.g. older Safari) */ }
143
+ return out;
144
+ }
145
+
146
+ /** Enumerate all Chrome-visible caches — OPFS (minus webmcp-models), Cache Storage, IndexedDB. */
147
+ export async function listAllStorage(): Promise<StorageEntry[]> {
148
+ const [opfs, cacheStorage, idb] = await Promise.all([
149
+ listOpfsEntries(),
150
+ listCacheStorageEntries(),
151
+ listIndexedDbEntries(),
152
+ ]);
153
+ return [...opfs, ...cacheStorage, ...idb];
154
+ }
155
+
156
+ function deleteIdb(name: string): Promise<void> {
157
+ return new Promise((resolve) => {
158
+ try {
159
+ const req = indexedDB.deleteDatabase(name);
160
+ req.onsuccess = () => resolve();
161
+ req.onerror = () => resolve();
162
+ req.onblocked = () => resolve();
163
+ } catch { resolve(); }
164
+ });
165
+ }
166
+
167
+ /** Delete a single entry regardless of source. */
168
+ export async function deleteStorageEntry(entry: StorageEntry): Promise<void> {
169
+ try {
170
+ if (entry.source === 'opfs') {
171
+ const root = await navigator.storage.getDirectory();
172
+ try { await root.removeEntry(entry.key, { recursive: true }); } catch { /* best-effort */ }
173
+ } else if (entry.source === 'cache-storage') {
174
+ try { await caches.delete(entry.key); } catch { /* best-effort */ }
175
+ } else if (entry.source === 'indexeddb') {
176
+ await deleteIdb(entry.key);
177
+ }
178
+ } catch { /* best-effort */ }
179
+ }
180
+
181
+ /** Delete every entry of a given source. */
182
+ export async function clearAllStorage(source: StorageSource): Promise<void> {
183
+ try {
184
+ if (source === 'opfs') {
185
+ const entries = await listOpfsEntries();
186
+ for (const e of entries) await deleteStorageEntry(e);
187
+ } else if (source === 'cache-storage') {
188
+ const names = await caches.keys();
189
+ for (const n of names) { try { await caches.delete(n); } catch { /* skip */ } }
190
+ } else if (source === 'indexeddb') {
191
+ const entries = await listIndexedDbEntries();
192
+ for (const e of entries) await deleteIdb(e.key);
193
+ }
194
+ } catch { /* best-effort */ }
195
+ }