@blank-utils/llm 0.2.2 → 0.2.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +355 -87
- package/dist/backends/transformers.d.ts +3 -20
- package/dist/backends/transformers.d.ts.map +1 -1
- package/dist/backends/webllm.d.ts +3 -38
- package/dist/backends/webllm.d.ts.map +1 -1
- package/dist/core.d.ts +1 -1
- package/dist/core.d.ts.map +1 -1
- package/dist/index.d.ts +1 -1
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +782 -64
- package/dist/models.d.ts +74 -0
- package/dist/models.d.ts.map +1 -0
- package/dist/react/chat-input.d.ts +43 -0
- package/dist/react/chat-input.d.ts.map +1 -0
- package/dist/react/components.d.ts +52 -0
- package/dist/react/components.d.ts.map +1 -0
- package/dist/react/index.d.ts +3 -1
- package/dist/react/index.d.ts.map +1 -1
- package/dist/react/index.js +780 -64
- package/dist/types.d.ts +3 -1
- package/dist/types.d.ts.map +1 -1
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -1,25 +1,42 @@
|
|
|
1
|
-
|
|
1
|
+
<p align="center">
|
|
2
|
+
<strong>@blank-utils/llm</strong>
|
|
3
|
+
</p>
|
|
2
4
|
|
|
3
|
-
>
|
|
5
|
+
<p align="center">
|
|
6
|
+
Run LLMs directly in your browser — zero server, zero API keys.
|
|
7
|
+
</p>
|
|
4
8
|
|
|
5
|
-
|
|
9
|
+
<p align="center">
|
|
10
|
+
<a href="https://www.npmjs.com/package/@blank-utils/llm"><img src="https://img.shields.io/npm/v/@blank-utils/llm?style=flat-square&color=0ea5e9" alt="npm"></a>
|
|
11
|
+
<a href="https://github.com/kiritocode1/local-llm/blob/main/LICENSE"><img src="https://img.shields.io/npm/l/@blank-utils/llm?style=flat-square" alt="license"></a>
|
|
12
|
+
<img src="https://img.shields.io/badge/react-%E2%89%A518-61dafb?style=flat-square" alt="react">
|
|
13
|
+
<img src="https://img.shields.io/badge/webgpu-supported-brightgreen?style=flat-square" alt="webgpu">
|
|
14
|
+
</p>
|
|
6
15
|
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
-
|
|
16
|
+
---
|
|
17
|
+
|
|
18
|
+
## Features
|
|
19
|
+
|
|
20
|
+
- 🚀 **WebGPU acceleration** via [WebLLM](https://github.com/mlc-ai/web-llm) — falls back to WASM through [Transformers.js](https://github.com/huggingface/transformers.js)
|
|
21
|
+
- ⚛️ **React hooks** — `useChat`, `useStream`, `useCompletion` with eager background loading
|
|
22
|
+
- 🔤 **Type-safe model selection** — full autocomplete for 30+ supported models across both backends
|
|
23
|
+
- 📝 **Streaming support** — real-time token output with abort control
|
|
24
|
+
- 🔄 **Message queueing** — users can type while models download; messages are processed once ready
|
|
25
|
+
- 🧩 **Vanilla JS friendly** — works outside React with DOM helpers and a simple `createLLM()` API
|
|
26
|
+
- 📦 **Zero config** — auto-detects WebGPU/WASM and picks the best backend
|
|
12
27
|
|
|
13
28
|
## Installation
|
|
14
29
|
|
|
15
30
|
```bash
|
|
16
31
|
pnpm add @blank-utils/llm
|
|
17
32
|
# or
|
|
18
|
-
npm install @blank-utils/llm
|
|
19
|
-
# or
|
|
20
33
|
bun add @blank-utils/llm
|
|
21
34
|
```
|
|
22
35
|
|
|
36
|
+
> React is an **optional** peer dependency. The core API works without it.
|
|
37
|
+
|
|
38
|
+
---
|
|
39
|
+
|
|
23
40
|
## Quick Start
|
|
24
41
|
|
|
25
42
|
### React (Recommended)
|
|
@@ -45,7 +62,10 @@ function Chat() {
|
|
|
45
62
|
isGenerating,
|
|
46
63
|
isPending,
|
|
47
64
|
streamingText,
|
|
48
|
-
} = useChat(
|
|
65
|
+
} = useChat({
|
|
66
|
+
systemPrompt: "You are a helpful assistant.",
|
|
67
|
+
queueWhileLoading: true,
|
|
68
|
+
});
|
|
49
69
|
|
|
50
70
|
return (
|
|
51
71
|
<div>
|
|
@@ -53,7 +73,7 @@ function Chat() {
|
|
|
53
73
|
|
|
54
74
|
{messages.map((m, i) => (
|
|
55
75
|
<div key={i}>
|
|
56
|
-
{m.role}
|
|
76
|
+
<strong>{m.role}:</strong> {m.content}
|
|
57
77
|
</div>
|
|
58
78
|
))}
|
|
59
79
|
|
|
@@ -83,148 +103,396 @@ const llm = await createLLM({
|
|
|
83
103
|
});
|
|
84
104
|
|
|
85
105
|
// Streaming
|
|
86
|
-
await llm.stream("Tell me a joke", (token) => {
|
|
87
|
-
|
|
106
|
+
await llm.stream("Tell me a joke", (token, fullText) => {
|
|
107
|
+
document.getElementById("output")!.textContent = fullText;
|
|
88
108
|
});
|
|
89
109
|
|
|
90
|
-
//
|
|
110
|
+
// Non-streaming
|
|
91
111
|
const response = await llm.chat("Hello!");
|
|
112
|
+
console.log(response);
|
|
113
|
+
```
|
|
114
|
+
|
|
115
|
+
### Attach to DOM Elements
|
|
116
|
+
|
|
117
|
+
```typescript
|
|
118
|
+
import { createLLM } from "@blank-utils/llm";
|
|
119
|
+
|
|
120
|
+
const llm = await createLLM({ model: "smollm2-360m" });
|
|
121
|
+
|
|
122
|
+
// Wire up an input + output with one call
|
|
123
|
+
const cleanup = llm.attachToInput("#prompt-input", "#response-output", {
|
|
124
|
+
triggerOnEnter: true,
|
|
125
|
+
clearOnSend: true,
|
|
126
|
+
});
|
|
127
|
+
```
|
|
128
|
+
|
|
129
|
+
---
|
|
130
|
+
|
|
131
|
+
## Architecture
|
|
132
|
+
|
|
133
|
+
```
|
|
134
|
+
@blank-utils/llm
|
|
135
|
+
├── src/
|
|
136
|
+
│ ├── index.ts # Main entry — re-exports everything
|
|
137
|
+
│ ├── core.ts # createLLM() factory, LocalLLM interface
|
|
138
|
+
│ ├── models.ts # Centralized model registry (single source of truth)
|
|
139
|
+
│ ├── types.ts # All TypeScript interfaces & types
|
|
140
|
+
│ ├── detect.ts # WebGPU / WASM capability detection
|
|
141
|
+
│ ├── helpers.ts # DOM utilities (attachToElements, createChatUI, etc.)
|
|
142
|
+
│ ├── backends/
|
|
143
|
+
│ │ ├── webllm.ts # WebLLM backend (WebGPU)
|
|
144
|
+
│ │ └── transformers.ts # Transformers.js backend (WASM / WebGPU)
|
|
145
|
+
│ └── react/
|
|
146
|
+
│ └── index.tsx # React context, provider, hooks, components
|
|
147
|
+
└── dist/ # Built output (ESM)
|
|
148
|
+
```
|
|
149
|
+
|
|
150
|
+
### Dual Backend System
|
|
151
|
+
|
|
152
|
+
| | **WebLLM** | **Transformers.js** |
|
|
153
|
+
| ----------------- | ------------------------- | ------------------- |
|
|
154
|
+
| **Engine** | MLC / TVM compiled models | ONNX Runtime |
|
|
155
|
+
| **Device** | WebGPU only | WebGPU or WASM |
|
|
156
|
+
| **Performance** | Best (GPU-native) | Good (CPU fallback) |
|
|
157
|
+
| **Model source** | MLC prebuilt cache | HuggingFace Hub |
|
|
158
|
+
| **Auto-detected** | ✅ when WebGPU present | ✅ fallback |
|
|
159
|
+
|
|
160
|
+
The library auto-selects the best backend via `detectCapabilities()`. You can also force a backend:
|
|
161
|
+
|
|
162
|
+
```tsx
|
|
163
|
+
<LLMProvider model="llama-3.2-3b" backend="webllm" />
|
|
92
164
|
```
|
|
93
165
|
|
|
94
|
-
|
|
166
|
+
---
|
|
95
167
|
|
|
96
|
-
|
|
168
|
+
## React API
|
|
97
169
|
|
|
98
|
-
|
|
170
|
+
### `<LLMProvider>`
|
|
171
|
+
|
|
172
|
+
Wrap your app to enable LLM functionality. All hooks must be used inside this provider.
|
|
99
173
|
|
|
100
174
|
```tsx
|
|
101
175
|
<LLMProvider
|
|
102
|
-
model="qwen-2.5-0.5b" // Model
|
|
176
|
+
model="qwen-2.5-0.5b" // Model alias or full ID
|
|
103
177
|
backend="auto" // 'webllm' | 'transformers' | 'auto'
|
|
104
|
-
autoLoad={true} // Start loading
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
178
|
+
autoLoad={true} // Start loading on mount
|
|
179
|
+
device="auto" // 'webgpu' | 'wasm' | 'auto'
|
|
180
|
+
quantization="q4" // 'q4' | 'q8' | 'fp16' | 'fp32'
|
|
181
|
+
systemPrompt="..." // Default system prompt
|
|
182
|
+
onProgress={(p) => {}} // Loading progress
|
|
183
|
+
onLoad={(llm) => {}} // Called when model is ready
|
|
184
|
+
onError={(err) => {}} // Error handler
|
|
108
185
|
>
|
|
109
186
|
{children}
|
|
110
187
|
</LLMProvider>
|
|
111
188
|
```
|
|
112
189
|
|
|
190
|
+
> **Tip:** Use `key={modelId}` on `<LLMProvider>` to force a full re-mount when switching models dynamically.
|
|
191
|
+
|
|
113
192
|
### `useLLM()`
|
|
114
193
|
|
|
115
|
-
Access the LLM instance and loading state:
|
|
194
|
+
Access the raw LLM instance and loading state:
|
|
116
195
|
|
|
117
196
|
```tsx
|
|
118
197
|
const {
|
|
119
|
-
llm, //
|
|
120
|
-
isLoading, //
|
|
121
|
-
isReady, //
|
|
122
|
-
loadProgress, // { progress: number, status: string }
|
|
123
|
-
error, // Error
|
|
124
|
-
modelId, //
|
|
125
|
-
backend, // 'webllm' | 'transformers'
|
|
126
|
-
reload, //
|
|
127
|
-
unload, //
|
|
198
|
+
llm, // LocalLLM | null
|
|
199
|
+
isLoading, // boolean — model is downloading
|
|
200
|
+
isReady, // boolean — model ready for inference
|
|
201
|
+
loadProgress, // { progress: number, status: string } | null
|
|
202
|
+
error, // Error | null
|
|
203
|
+
modelId, // string | null — current model ID
|
|
204
|
+
backend, // 'webllm' | 'transformers' | null
|
|
205
|
+
reload, // () => Promise<void>
|
|
206
|
+
unload, // () => Promise<void>
|
|
128
207
|
} = useLLM();
|
|
129
208
|
```
|
|
130
209
|
|
|
131
|
-
### `useChat(options)`
|
|
210
|
+
### `useChat(options?)`
|
|
132
211
|
|
|
133
|
-
Full chat conversation management with **eager loading**
|
|
212
|
+
Full chat conversation management with **eager loading** — users can send messages while the model downloads. Messages are queued and processed automatically once the model is ready.
|
|
134
213
|
|
|
135
214
|
```tsx
|
|
136
215
|
const {
|
|
137
216
|
messages, // ChatMessage[]
|
|
138
|
-
input, //
|
|
139
|
-
setInput, //
|
|
140
|
-
send, //
|
|
141
|
-
isGenerating, //
|
|
142
|
-
isPending, //
|
|
143
|
-
streamingText, //
|
|
144
|
-
stop, //
|
|
145
|
-
clear, //
|
|
146
|
-
append, //
|
|
147
|
-
reload, //
|
|
217
|
+
input, // string — controlled input value
|
|
218
|
+
setInput, // (value: string) => void
|
|
219
|
+
send, // (content?: string) => Promise<string>
|
|
220
|
+
isGenerating, // boolean
|
|
221
|
+
isPending, // boolean — message queued, waiting for model
|
|
222
|
+
streamingText, // string — current partial response
|
|
223
|
+
stop, // () => void
|
|
224
|
+
clear, // () => void
|
|
225
|
+
append, // (message: ChatMessage) => void
|
|
226
|
+
reload, // () => Promise<string> — regenerate last response
|
|
148
227
|
} = useChat({
|
|
149
228
|
systemPrompt: "You are a helpful assistant.",
|
|
150
|
-
queueWhileLoading: true, //
|
|
229
|
+
queueWhileLoading: true, // default: true
|
|
230
|
+
initialMessages: [],
|
|
231
|
+
generateOptions: { temperature: 0.7, maxTokens: 512 },
|
|
232
|
+
onStart: () => {},
|
|
151
233
|
onToken: (token, fullText) => {},
|
|
152
234
|
onFinish: (response) => {},
|
|
235
|
+
onError: (error) => {},
|
|
153
236
|
});
|
|
154
237
|
```
|
|
155
238
|
|
|
156
|
-
### `useStream(options)`
|
|
239
|
+
### `useStream(options?)`
|
|
157
240
|
|
|
158
|
-
Simple streaming generation:
|
|
241
|
+
Simple streaming generation without chat history management:
|
|
159
242
|
|
|
160
243
|
```tsx
|
|
161
|
-
const { text, isStreaming, stream, stop, clear } = useStream(
|
|
244
|
+
const { text, isStreaming, stream, stop, clear } = useStream({
|
|
245
|
+
onToken: (token, fullText) => {},
|
|
246
|
+
onFinish: (response) => {},
|
|
247
|
+
onError: (error) => {},
|
|
248
|
+
generateOptions: { temperature: 0.7 },
|
|
249
|
+
});
|
|
162
250
|
|
|
163
251
|
await stream("Tell me a story");
|
|
252
|
+
// or with message array:
|
|
253
|
+
await stream([{ role: "user", content: "Tell me a story" }]);
|
|
164
254
|
```
|
|
165
255
|
|
|
166
|
-
### `useCompletion(options)`
|
|
256
|
+
### `useCompletion(options?)`
|
|
167
257
|
|
|
168
|
-
Non-streaming completion:
|
|
258
|
+
Non-streaming, single-shot completion:
|
|
169
259
|
|
|
170
260
|
```tsx
|
|
171
|
-
const { completion, isLoading, complete, clear } = useCompletion(
|
|
261
|
+
const { completion, isLoading, complete, clear } = useCompletion({
|
|
262
|
+
generateOptions: { maxTokens: 256 },
|
|
263
|
+
});
|
|
172
264
|
|
|
173
|
-
await complete("Summarize this text");
|
|
265
|
+
const result = await complete("Summarize this text");
|
|
174
266
|
```
|
|
175
267
|
|
|
176
|
-
|
|
268
|
+
### `<LLMLoading>` / `<LLMReady>`
|
|
177
269
|
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
Shows content only while loading:
|
|
270
|
+
Conditional rendering components:
|
|
181
271
|
|
|
182
272
|
```tsx
|
|
183
|
-
<LLMLoading>
|
|
184
|
-
<p>
|
|
273
|
+
<LLMLoading className="loading-state">
|
|
274
|
+
<p>Downloading model...</p>
|
|
185
275
|
</LLMLoading>
|
|
276
|
+
|
|
277
|
+
<LLMReady fallback={<Spinner />}>
|
|
278
|
+
<ChatInterface />
|
|
279
|
+
</LLMReady>
|
|
186
280
|
```
|
|
187
281
|
|
|
188
|
-
|
|
282
|
+
---
|
|
189
283
|
|
|
190
|
-
|
|
284
|
+
## Vanilla JS API
|
|
191
285
|
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
286
|
+
### `createLLM(config?)`
|
|
287
|
+
|
|
288
|
+
Factory function that auto-detects capabilities, picks a backend, loads a model, and returns a ready-to-use `LocalLLM` instance:
|
|
289
|
+
|
|
290
|
+
```typescript
|
|
291
|
+
import { createLLM } from "@blank-utils/llm";
|
|
292
|
+
|
|
293
|
+
const llm = await createLLM({
|
|
294
|
+
model: "phi-3.5-mini",
|
|
295
|
+
backend: "auto",
|
|
296
|
+
systemPrompt: "You are a helpful assistant.",
|
|
297
|
+
onLoadProgress: (p) => console.log(`${p.status}: ${p.progress}%`),
|
|
298
|
+
});
|
|
299
|
+
|
|
300
|
+
// Chat (non-streaming)
|
|
301
|
+
const answer = await llm.chat("What is 2+2?");
|
|
302
|
+
|
|
303
|
+
// Stream
|
|
304
|
+
await llm.stream("Write a poem", (token, fullText) => {
|
|
305
|
+
process.stdout.write(token);
|
|
306
|
+
});
|
|
307
|
+
|
|
308
|
+
// Attach to DOM
|
|
309
|
+
const cleanup = llm.attachToInput("#input", "#output");
|
|
310
|
+
|
|
311
|
+
// Free resources
|
|
312
|
+
await llm.unload();
|
|
196
313
|
```
|
|
197
314
|
|
|
198
|
-
|
|
315
|
+
### DOM Helpers
|
|
316
|
+
|
|
317
|
+
```typescript
|
|
318
|
+
import {
|
|
319
|
+
createOutputStreamer,
|
|
320
|
+
attachToElements,
|
|
321
|
+
createChatUI,
|
|
322
|
+
createLoadingIndicator,
|
|
323
|
+
} from "@blank-utils/llm";
|
|
324
|
+
|
|
325
|
+
// Auto-scroll streaming output into an element
|
|
326
|
+
const streamer = createOutputStreamer("#output", { scrollToBottom: true });
|
|
327
|
+
|
|
328
|
+
// Create a full chat UI in one call
|
|
329
|
+
const { input, output, sendButton, cleanup } = createChatUI("#container");
|
|
330
|
+
|
|
331
|
+
// Progress indicator
|
|
332
|
+
const loading = createLoadingIndicator("#loading-container");
|
|
333
|
+
loading.show();
|
|
334
|
+
loading.setProgress(50, "Downloading weights...");
|
|
335
|
+
loading.hide();
|
|
336
|
+
```
|
|
337
|
+
|
|
338
|
+
### Capability Detection
|
|
339
|
+
|
|
340
|
+
```typescript
|
|
341
|
+
import {
|
|
342
|
+
detectCapabilities,
|
|
343
|
+
logCapabilities,
|
|
344
|
+
isWebGPUSupported,
|
|
345
|
+
} from "@blank-utils/llm";
|
|
346
|
+
|
|
347
|
+
const caps = await detectCapabilities();
|
|
348
|
+
// { webgpu: true, wasm: true, recommendedBackend: 'webllm', recommendedDevice: 'webgpu' }
|
|
349
|
+
|
|
350
|
+
await logCapabilities(); // Pretty-prints to console
|
|
199
351
|
|
|
200
|
-
|
|
352
|
+
if (await isWebGPUSupported()) {
|
|
353
|
+
console.log("WebGPU is available!");
|
|
354
|
+
}
|
|
355
|
+
```
|
|
356
|
+
|
|
357
|
+
---
|
|
358
|
+
|
|
359
|
+
## Available Models
|
|
201
360
|
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
| `qwen-2.5-0.5b` | Qwen 2.5 0.5B Instruct | ~350MB |
|
|
205
|
-
| `qwen-2.5-1.5b` | Qwen 2.5 1.5B Instruct | ~900MB |
|
|
206
|
-
| `qwen-2.5-coder-0.5b` | Qwen 2.5 Coder 0.5B | ~350MB |
|
|
207
|
-
| `smollm2-135m` | SmolLM2 135M | ~100MB |
|
|
208
|
-
| `smollm2-360m` | SmolLM2 360M | ~250MB |
|
|
209
|
-
| `tinyllama` | TinyLlama 1.1B | ~700MB |
|
|
210
|
-
| `phi-3-mini` | Phi-3 Mini 4K | ~2.3GB |
|
|
361
|
+
All models are defined in `src/models.ts` and exported as `WEBLLM_MODELS` and `TRANSFORMERS_MODELS`.
|
|
362
|
+
You can use either the **alias** (short name) or the **full model ID** when specifying a model.
|
|
211
363
|
|
|
212
364
|
### WebLLM Backend (WebGPU)
|
|
213
365
|
|
|
214
|
-
| Alias
|
|
215
|
-
|
|
|
216
|
-
| `
|
|
217
|
-
| `llama-3.2-
|
|
218
|
-
| `llama-3.
|
|
219
|
-
| `
|
|
220
|
-
| `
|
|
221
|
-
| `
|
|
366
|
+
| Alias | Model | Notes |
|
|
367
|
+
| ----------------------- | ---------------------------- | --------------------------- |
|
|
368
|
+
| `llama-3.2-1b` | Llama 3.2 1B Instruct | Compact, great quality |
|
|
369
|
+
| `llama-3.2-3b` | Llama 3.2 3B Instruct | Balanced |
|
|
370
|
+
| `llama-3.1-8b` | Llama 3.1 8B Instruct | High quality |
|
|
371
|
+
| `llama-3.1-8b-1k` | Llama 3.1 8B (1K ctx) | Lower memory |
|
|
372
|
+
| `phi-3.5-mini` | Phi 3.5 Mini Instruct | **Default** — great balance |
|
|
373
|
+
| `phi-3.5-mini-1k` | Phi 3.5 Mini (1K ctx) | Lower memory |
|
|
374
|
+
| `phi-3.5-vision` | Phi 3.5 Vision | Vision model |
|
|
375
|
+
| `qwen-2.5-0.5b` | Qwen 2.5 0.5B | Tiny, fast |
|
|
376
|
+
| `qwen-2.5-1.5b` | Qwen 2.5 1.5B | Small |
|
|
377
|
+
| `qwen-2.5-3b` | Qwen 2.5 3B | Medium |
|
|
378
|
+
| `qwen-2.5-7b` | Qwen 2.5 7B | Large |
|
|
379
|
+
| `qwen-2.5-coder-0.5b` | Qwen 2.5 Coder 0.5B | Code-focused |
|
|
380
|
+
| `qwen-2.5-coder-1.5b` | Qwen 2.5 Coder 1.5B | Code-focused |
|
|
381
|
+
| `qwen-3-0.6b` | Qwen 3 0.6B | Latest gen |
|
|
382
|
+
| `qwen-3-1.7b` | Qwen 3 1.7B | Latest gen |
|
|
383
|
+
| `qwen-3-4b` | Qwen 3 4B | Latest gen |
|
|
384
|
+
| `qwen-3-8b` | Qwen 3 8B | Latest gen |
|
|
385
|
+
| `gemma-2-2b` | Gemma 2 2B | Google, efficient |
|
|
386
|
+
| `gemma-2-2b-1k` | Gemma 2 2B (1K ctx) | Lower memory |
|
|
387
|
+
| `gemma-2-9b` | Gemma 2 9B | Large |
|
|
388
|
+
| `smollm2-135m` | SmolLM2 135M | Ultra lightweight |
|
|
389
|
+
| `smollm2-360m` | SmolLM2 360M | Lightweight |
|
|
390
|
+
| `smollm2-1.7b` | SmolLM2 1.7B | Small |
|
|
391
|
+
| `mistral-7b` | Mistral 7B v0.3 | General purpose |
|
|
392
|
+
| `deepseek-r1-qwen-7b` | DeepSeek R1 Distill Qwen 7B | Reasoning |
|
|
393
|
+
| `deepseek-r1-llama-8b` | DeepSeek R1 Distill Llama 8B | Reasoning |
|
|
394
|
+
| `hermes-3-llama-3.2-3b` | Hermes 3 Llama 3.2 3B | Function calling |
|
|
395
|
+
| `hermes-3-llama-3.1-8b` | Hermes 3 Llama 3.1 8B | Function calling |
|
|
396
|
+
|
|
397
|
+
### Transformers.js Backend (CPU / WASM)
|
|
398
|
+
|
|
399
|
+
| Alias | HuggingFace Model ID | Notes |
|
|
400
|
+
| --------------------- | -------------------------------------------- | ------------ |
|
|
401
|
+
| `qwen-2.5-0.5b` | `onnx-community/Qwen2.5-0.5B-Instruct` | **Default** |
|
|
402
|
+
| `qwen-2.5-1.5b` | `onnx-community/Qwen2.5-1.5B-Instruct` | Good quality |
|
|
403
|
+
| `qwen-2.5-coder-0.5b` | `onnx-community/Qwen2.5-Coder-0.5B-Instruct` | Code |
|
|
404
|
+
| `qwen-2.5-coder-1.5b` | `onnx-community/Qwen2.5-Coder-1.5B-Instruct` | Code |
|
|
405
|
+
| `qwen-3-0.6b` | `onnx-community/Qwen3-0.6B-ONNX` | Latest gen |
|
|
406
|
+
| `smollm2-135m` | `HuggingFaceTB/SmolLM2-135M-Instruct` | Ultra fast |
|
|
407
|
+
| `smollm2-360m` | `HuggingFaceTB/SmolLM2-360M-Instruct` | Fast |
|
|
408
|
+
| `smollm2-1.7b` | `HuggingFaceTB/SmolLM2-1.7B-Instruct` | Good |
|
|
409
|
+
| `phi-3-mini` | `Xenova/Phi-3-mini-4k-instruct` | Strong |
|
|
410
|
+
| `tinyllama` | `Xenova/TinyLlama-1.1B-Chat-v1.0` | Very fast |
|
|
411
|
+
|
|
412
|
+
### Type-Safe Model Selection
|
|
413
|
+
|
|
414
|
+
The `model` prop accepts any key from `WEBLLM_MODELS` or `TRANSFORMERS_MODELS` with full autocomplete, while still allowing arbitrary strings for custom models:
|
|
415
|
+
|
|
416
|
+
```typescript
|
|
417
|
+
import type {
|
|
418
|
+
SupportedModel,
|
|
419
|
+
WebLLMModelID,
|
|
420
|
+
TransformersModelID,
|
|
421
|
+
} from "@blank-utils/llm";
|
|
422
|
+
|
|
423
|
+
// Full autocomplete for known models
|
|
424
|
+
const model: SupportedModel = "qwen-2.5-0.5b"; // ✅ autocomplete
|
|
425
|
+
|
|
426
|
+
// Custom model IDs still work
|
|
427
|
+
const custom: SupportedModel = "my-org/custom-model-onnx"; // ✅ no error
|
|
428
|
+
|
|
429
|
+
// Import the model maps for programmatic use
|
|
430
|
+
import { WEBLLM_MODELS, TRANSFORMERS_MODELS } from "@blank-utils/llm";
|
|
431
|
+
|
|
432
|
+
Object.keys(WEBLLM_MODELS); // all WebLLM aliases
|
|
433
|
+
Object.keys(TRANSFORMERS_MODELS); // all Transformers.js aliases
|
|
434
|
+
```
|
|
435
|
+
|
|
436
|
+
---
|
|
437
|
+
|
|
438
|
+
## Build & Development
|
|
439
|
+
|
|
440
|
+
```bash
|
|
441
|
+
# Install dependencies
|
|
442
|
+
bun install
|
|
443
|
+
|
|
444
|
+
# Build (clean → bundle → assets → types)
|
|
445
|
+
bun run build
|
|
446
|
+
|
|
447
|
+
# Type-check only
|
|
448
|
+
bun run typecheck
|
|
449
|
+
|
|
450
|
+
# Run demo page
|
|
451
|
+
bun run demo
|
|
452
|
+
|
|
453
|
+
# Run tests
|
|
454
|
+
bun test
|
|
455
|
+
```
|
|
456
|
+
|
|
457
|
+
### Build Pipeline
|
|
458
|
+
|
|
459
|
+
| Script | What it does |
|
|
460
|
+
| ------------- | ------------------------------------------------------------------------------------------------------------------ |
|
|
461
|
+
| `clean` | Removes `dist/` |
|
|
462
|
+
| `build:js` | Bundles `src/index.ts` → `dist/index.js` and `src/react/index.tsx` → `dist/react/index.js` (ESM, externals: react) |
|
|
463
|
+
| `postbuild` | Copies WASM + ONNX runtime assets into `dist/` and `dist/react/` |
|
|
464
|
+
| `build:types` | Generates `.d.ts` declaration files via `tsc` |
|
|
465
|
+
| `build` | Runs all of the above in sequence |
|
|
466
|
+
|
|
467
|
+
### Package Exports
|
|
468
|
+
|
|
469
|
+
```jsonc
|
|
470
|
+
{
|
|
471
|
+
".": {
|
|
472
|
+
"types": "./dist/index.d.ts",
|
|
473
|
+
"import": "./dist/index.js",
|
|
474
|
+
},
|
|
475
|
+
"./react": {
|
|
476
|
+
"types": "./dist/react/index.d.ts",
|
|
477
|
+
"import": "./dist/react/index.js",
|
|
478
|
+
},
|
|
479
|
+
}
|
|
480
|
+
```
|
|
481
|
+
|
|
482
|
+
---
|
|
222
483
|
|
|
223
484
|
## Browser Requirements
|
|
224
485
|
|
|
225
|
-
|
|
226
|
-
|
|
486
|
+
| Feature | Minimum | Notes |
|
|
487
|
+
| --------------------- | -------------------------- | ------------------------------ |
|
|
488
|
+
| **WebGPU** | Chrome 113+, Edge 113+ | Required for WebLLM backend |
|
|
489
|
+
| **WebAssembly** | All modern browsers | Fallback for Transformers.js |
|
|
490
|
+
| **SharedArrayBuffer** | Requires COOP/COEP headers | Needed for multi-threaded WASM |
|
|
491
|
+
|
|
492
|
+
The library automatically detects capabilities and picks the best backend. No manual configuration needed.
|
|
493
|
+
|
|
494
|
+
---
|
|
227
495
|
|
|
228
496
|
## License
|
|
229
497
|
|
|
230
|
-
MIT
|
|
498
|
+
MIT © [blank](https://github.com/kiritocode1)
|
|
@@ -7,26 +7,9 @@ import type { LLMProvider, ChatMessage, GenerateOptions, StreamCallback, LoadPro
|
|
|
7
7
|
* Default model for Transformers.js backend
|
|
8
8
|
* Using Qwen2.5 0.5B as it's well-tested with ONNX
|
|
9
9
|
*/
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
* These are specifically converted for browser use via transformers.js
|
|
14
|
-
*
|
|
15
|
-
* @see https://huggingface.co/onnx-community for more models
|
|
16
|
-
*/
|
|
17
|
-
export declare const TRANSFORMERS_MODELS: {
|
|
18
|
-
readonly 'qwen-2.5-0.5b': "onnx-community/Qwen2.5-0.5B-Instruct";
|
|
19
|
-
readonly 'qwen-2.5-1.5b': "onnx-community/Qwen2.5-1.5B-Instruct";
|
|
20
|
-
readonly 'qwen-2.5-coder-0.5b': "onnx-community/Qwen2.5-Coder-0.5B-Instruct";
|
|
21
|
-
readonly 'qwen-2.5-coder-1.5b': "onnx-community/Qwen2.5-Coder-1.5B-Instruct";
|
|
22
|
-
readonly 'qwen-3-0.6b': "onnx-community/Qwen3-0.6B-ONNX";
|
|
23
|
-
readonly 'smollm2-135m': "HuggingFaceTB/SmolLM2-135M-Instruct";
|
|
24
|
-
readonly 'smollm2-360m': "HuggingFaceTB/SmolLM2-360M-Instruct";
|
|
25
|
-
readonly 'smollm2-1.7b': "HuggingFaceTB/SmolLM2-1.7B-Instruct";
|
|
26
|
-
readonly 'phi-3-mini': "Xenova/Phi-3-mini-4k-instruct";
|
|
27
|
-
readonly tinyllama: "Xenova/TinyLlama-1.1B-Chat-v1.0";
|
|
28
|
-
};
|
|
29
|
-
export type TransformersModelAlias = keyof typeof TRANSFORMERS_MODELS;
|
|
10
|
+
import { DEFAULT_TRANSFORMERS_MODEL, TRANSFORMERS_MODELS, type TransformersModelID } from '../models';
|
|
11
|
+
export { DEFAULT_TRANSFORMERS_MODEL, TRANSFORMERS_MODELS };
|
|
12
|
+
export type TransformersModelAlias = TransformersModelID;
|
|
30
13
|
/**
|
|
31
14
|
* Model size estimates for UI display
|
|
32
15
|
*/
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"transformers.d.ts","sourceRoot":"","sources":["../../src/backends/transformers.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAEH,OAAO,KAAK,EACV,WAAW,EACX,WAAW,EACX,eAAe,EACf,cAAc,EACd,oBAAoB,EAEpB,OAAO,EACP,MAAM,EACN,YAAY,EACb,MAAM,UAAU,CAAC;AAKlB;;;GAGG;AACH,
|
|
1
|
+
{"version":3,"file":"transformers.d.ts","sourceRoot":"","sources":["../../src/backends/transformers.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAEH,OAAO,KAAK,EACV,WAAW,EACX,WAAW,EACX,eAAe,EACf,cAAc,EACd,oBAAoB,EAEpB,OAAO,EACP,MAAM,EACN,YAAY,EACb,MAAM,UAAU,CAAC;AAKlB;;;GAGG;AACH,OAAO,EAAE,0BAA0B,EAAE,mBAAmB,EAAE,KAAK,mBAAmB,EAAE,MAAM,WAAW,CAAC;AACtG,OAAO,EAAE,0BAA0B,EAAE,mBAAmB,EAAE,CAAC;AAE3D,MAAM,MAAM,sBAAsB,GAAG,mBAAmB,CAAC;AAEzD;;GAEG;AACH,eAAO,MAAM,wBAAwB,EAAE,MAAM,CAAC,sBAAsB,EAAE,MAAM,CAW3E,CAAC;AAkGF;;GAEG;AACH,MAAM,WAAW,0BAA0B;IACzC,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,YAAY,CAAC,EAAE,YAAY,CAAC;CAC7B;AAED;;GAEG;AACH,qBAAa,oBAAqB,YAAW,WAAW;IACtD,QAAQ,CAAC,OAAO,EAAE,OAAO,CAAkB;IAE3C,OAAO,CAAC,QAAQ,CAAuC;IACvD,OAAO,CAAC,YAAY,CAAuB;IAC3C,OAAO,CAAC,MAAM,CAAS;IACvB,OAAO,CAAC,YAAY,CAAe;gBAEvB,MAAM,GAAE,0BAA+B;IAKnD,IAAI,OAAO,IAAI,OAAO,CAErB;IAED,IAAI,OAAO,IAAI,MAAM,GAAG,IAAI,CAE3B;IAEK,IAAI,CAAC,OAAO,EAAE,MAAM,EAAE,UAAU,CAAC,EAAE,oBAAoB,GAAG,OAAO,CAAC,IAAI,CAAC;IAkDvE,IAAI,CAAC,QAAQ,EAAE,WAAW,EAAE,EAAE,OAAO,CAAC,EAAE,eAAe,GAAG,OAAO,CAAC,MAAM,CAAC;IAoBzE,MAAM,CACV,QAAQ,EAAE,WAAW,EAAE,EACvB,OAAO,EAAE,cAAc,EACvB,OAAO,CAAC,EAAE,eAAe,GACxB,OAAO,CAAC,MAAM,CAAC;IAgCZ,MAAM,IAAI,OAAO,CAAC,IAAI,CAAC;CAI9B;AAED;;GAEG;AACH,wBAAgB,0BAA0B,CAAC,MAAM,CAAC,EAAE,0BAA0B,GAAG,oBAAoB,CAEpG"}
|
|
@@ -7,44 +7,9 @@ import type { LLMProvider, ChatMessage, GenerateOptions, StreamCallback, LoadPro
|
|
|
7
7
|
* Default model for WebLLM backend
|
|
8
8
|
* Using Phi 3.5 Mini as it's well-tested and reasonably sized
|
|
9
9
|
*/
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
* These IDs must match exactly what's in web-llm's prebuiltAppConfig
|
|
14
|
-
*
|
|
15
|
-
* @see https://github.com/mlc-ai/web-llm/blob/main/src/config.ts
|
|
16
|
-
*/
|
|
17
|
-
export declare const WEBLLM_MODELS: {
|
|
18
|
-
readonly 'llama-3.2-1b': "Llama-3.2-1B-Instruct-q4f16_1-MLC";
|
|
19
|
-
readonly 'llama-3.2-3b': "Llama-3.2-3B-Instruct-q4f16_1-MLC";
|
|
20
|
-
readonly 'llama-3.1-8b': "Llama-3.1-8B-Instruct-q4f16_1-MLC";
|
|
21
|
-
readonly 'llama-3.1-8b-1k': "Llama-3.1-8B-Instruct-q4f16_1-MLC-1k";
|
|
22
|
-
readonly 'phi-3.5-mini': "Phi-3.5-mini-instruct-q4f16_1-MLC";
|
|
23
|
-
readonly 'phi-3.5-mini-1k': "Phi-3.5-mini-instruct-q4f16_1-MLC-1k";
|
|
24
|
-
readonly 'phi-3.5-vision': "Phi-3.5-vision-instruct-q4f16_1-MLC";
|
|
25
|
-
readonly 'qwen-2.5-0.5b': "Qwen2.5-0.5B-Instruct-q4f16_1-MLC";
|
|
26
|
-
readonly 'qwen-2.5-1.5b': "Qwen2.5-1.5B-Instruct-q4f16_1-MLC";
|
|
27
|
-
readonly 'qwen-2.5-3b': "Qwen2.5-3B-Instruct-q4f16_1-MLC";
|
|
28
|
-
readonly 'qwen-2.5-7b': "Qwen2.5-7B-Instruct-q4f16_1-MLC";
|
|
29
|
-
readonly 'qwen-2.5-coder-0.5b': "Qwen2.5-Coder-0.5B-Instruct-q4f16_1-MLC";
|
|
30
|
-
readonly 'qwen-2.5-coder-1.5b': "Qwen2.5-Coder-1.5B-Instruct-q4f16_1-MLC";
|
|
31
|
-
readonly 'qwen-3-0.6b': "Qwen3-0.6B-q4f16_1-MLC";
|
|
32
|
-
readonly 'qwen-3-1.7b': "Qwen3-1.7B-q4f16_1-MLC";
|
|
33
|
-
readonly 'qwen-3-4b': "Qwen3-4B-q4f16_1-MLC";
|
|
34
|
-
readonly 'qwen-3-8b': "Qwen3-8B-q4f16_1-MLC";
|
|
35
|
-
readonly 'gemma-2-2b': "gemma-2-2b-it-q4f16_1-MLC";
|
|
36
|
-
readonly 'gemma-2-2b-1k': "gemma-2-2b-it-q4f16_1-MLC-1k";
|
|
37
|
-
readonly 'gemma-2-9b': "gemma-2-9b-it-q4f16_1-MLC";
|
|
38
|
-
readonly 'smollm2-135m': "SmolLM2-135M-Instruct-q0f16-MLC";
|
|
39
|
-
readonly 'smollm2-360m': "SmolLM2-360M-Instruct-q4f16_1-MLC";
|
|
40
|
-
readonly 'smollm2-1.7b': "SmolLM2-1.7B-Instruct-q4f16_1-MLC";
|
|
41
|
-
readonly 'mistral-7b': "Mistral-7B-Instruct-v0.3-q4f16_1-MLC";
|
|
42
|
-
readonly 'deepseek-r1-qwen-7b': "DeepSeek-R1-Distill-Qwen-7B-q4f16_1-MLC";
|
|
43
|
-
readonly 'deepseek-r1-llama-8b': "DeepSeek-R1-Distill-Llama-8B-q4f16_1-MLC";
|
|
44
|
-
readonly 'hermes-3-llama-3.2-3b': "Hermes-3-Llama-3.2-3B-q4f16_1-MLC";
|
|
45
|
-
readonly 'hermes-3-llama-3.1-8b': "Hermes-3-Llama-3.1-8B-q4f16_1-MLC";
|
|
46
|
-
};
|
|
47
|
-
export type WebLLMModelAlias = keyof typeof WEBLLM_MODELS;
|
|
10
|
+
import { DEFAULT_WEBLLM_MODEL, WEBLLM_MODELS, type WebLLMModelID } from '../models';
|
|
11
|
+
export { DEFAULT_WEBLLM_MODEL, WEBLLM_MODELS };
|
|
12
|
+
export type WebLLMModelAlias = WebLLMModelID;
|
|
48
13
|
/**
|
|
49
14
|
* Model size estimates for UI display
|
|
50
15
|
*/
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"webllm.d.ts","sourceRoot":"","sources":["../../src/backends/webllm.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAEH,OAAO,KAAK,EACV,WAAW,EACX,WAAW,EACX,eAAe,EACf,cAAc,EACd,oBAAoB,EAEpB,OAAO,EACR,MAAM,UAAU,CAAC;AAMlB;;;GAGG;AACH,
|
|
1
|
+
{"version":3,"file":"webllm.d.ts","sourceRoot":"","sources":["../../src/backends/webllm.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAEH,OAAO,KAAK,EACV,WAAW,EACX,WAAW,EACX,eAAe,EACf,cAAc,EACd,oBAAoB,EAEpB,OAAO,EACR,MAAM,UAAU,CAAC;AAMlB;;;GAGG;AACH,OAAO,EAAE,oBAAoB,EAAE,aAAa,EAAE,KAAK,aAAa,EAAE,MAAM,WAAW,CAAC;AACpF,OAAO,EAAE,oBAAoB,EAAE,aAAa,EAAE,CAAC;AAE/C,MAAM,MAAM,gBAAgB,GAAG,aAAa,CAAC;AAE7C;;GAEG;AACH,eAAO,MAAM,kBAAkB,EAAE,MAAM,CAAC,gBAAgB,EAAE,MAAM,CA6B/D,CAAC;AAYF;;GAEG;AACH,qBAAa,cAAe,YAAW,WAAW;IAChD,QAAQ,CAAC,OAAO,EAAE,OAAO,CAAY;IAErC,OAAO,CAAC,MAAM,CAA0B;IACxC,OAAO,CAAC,YAAY,CAAuB;IAE3C,IAAI,OAAO,IAAI,OAAO,CAErB;IAED,IAAI,OAAO,IAAI,MAAM,GAAG,IAAI,CAE3B;IAEK,IAAI,CAAC,OAAO,EAAE,MAAM,EAAE,UAAU,CAAC,EAAE,oBAAoB,GAAG,OAAO,CAAC,IAAI,CAAC;IAwBvE,IAAI,CAAC,QAAQ,EAAE,WAAW,EAAE,EAAE,OAAO,CAAC,EAAE,eAAe,GAAG,OAAO,CAAC,MAAM,CAAC;IAmBzE,MAAM,CACV,QAAQ,EAAE,WAAW,EAAE,EACvB,OAAO,EAAE,cAAc,EACvB,OAAO,CAAC,EAAE,eAAe,GACxB,OAAO,CAAC,MAAM,CAAC;IA8BZ,MAAM,IAAI,OAAO,CAAC,IAAI,CAAC;CAO9B;AAED;;GAEG;AACH,wBAAgB,oBAAoB,IAAI,cAAc,CAErD"}
|
package/dist/core.d.ts
CHANGED
|
@@ -4,7 +4,7 @@
|
|
|
4
4
|
*
|
|
5
5
|
* @module local-llm/core
|
|
6
6
|
*/
|
|
7
|
-
export type { Backend, Device, Quantization, LLMConfig, MessageRole, ChatMessage, StreamCallback, LoadProgress, LoadProgressCallback, GenerateOptions, LLMProvider as LLMProviderInterface, AttachOptions, BrowserCapabilities, } from './types';
|
|
7
|
+
export type { Backend, Device, Quantization, LLMConfig, MessageRole, ChatMessage, StreamCallback, LoadProgress, LoadProgressCallback, GenerateOptions, LLMProvider as LLMProviderInterface, AttachOptions, BrowserCapabilities, SupportedModel, } from './types';
|
|
8
8
|
export { checkWebGPU, checkWasm, detectCapabilities, logCapabilities } from './detect';
|
|
9
9
|
export { WebLLMProvider, createWebLLMProvider, DEFAULT_WEBLLM_MODEL, WEBLLM_MODELS, } from './backends/webllm';
|
|
10
10
|
export { TransformersProvider, createTransformersProvider, DEFAULT_TRANSFORMERS_MODEL, TRANSFORMERS_MODELS, } from './backends/transformers';
|
package/dist/core.d.ts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"core.d.ts","sourceRoot":"","sources":["../src/core.ts"],"names":[],"mappings":"AAAA;;;;;GAKG;AAGH,YAAY,EACV,OAAO,EACP,MAAM,EACN,YAAY,EACZ,SAAS,EACT,WAAW,EACX,WAAW,EACX,cAAc,EACd,YAAY,EACZ,oBAAoB,EACpB,eAAe,EACf,WAAW,IAAI,oBAAoB,EACnC,aAAa,EACb,mBAAmB,
|
|
1
|
+
{"version":3,"file":"core.d.ts","sourceRoot":"","sources":["../src/core.ts"],"names":[],"mappings":"AAAA;;;;;GAKG;AAGH,YAAY,EACV,OAAO,EACP,MAAM,EACN,YAAY,EACZ,SAAS,EACT,WAAW,EACX,WAAW,EACX,cAAc,EACd,YAAY,EACZ,oBAAoB,EACpB,eAAe,EACf,WAAW,IAAI,oBAAoB,EACnC,aAAa,EACb,mBAAmB,EACnB,cAAc,GACf,MAAM,SAAS,CAAC;AAGjB,OAAO,EAAE,WAAW,EAAE,SAAS,EAAE,kBAAkB,EAAE,eAAe,EAAE,MAAM,UAAU,CAAC;AAGvF,OAAO,EACL,cAAc,EACd,oBAAoB,EACpB,oBAAoB,EACpB,aAAa,GACd,MAAM,mBAAmB,CAAC;AAE3B,OAAO,EACL,oBAAoB,EACpB,0BAA0B,EAC1B,0BAA0B,EAC1B,mBAAmB,GACpB,MAAM,yBAAyB,CAAC;AAGjC,OAAO,EACL,oBAAoB,EACpB,gBAAgB,EAChB,YAAY,EACZ,sBAAsB,GACvB,MAAM,WAAW,CAAC;AAGnB,OAAO,KAAK,EACV,SAAS,EACT,WAAW,EACX,eAAe,EACf,cAAc,EACd,aAAa,EACd,MAAM,SAAS,CAAC;AAOjB;;GAEG;AACH,MAAM,WAAW,QAAQ;IACvB;;OAEG;IACH,QAAQ,CAAC,OAAO,EAAE,OAAO,CAAC;IAE1B;;OAEG;IACH,QAAQ,CAAC,OAAO,EAAE,MAAM,GAAG,IAAI,CAAC;IAEhC;;OAEG;IACH,QAAQ,CAAC,OAAO,EAAE,QAAQ,GAAG,cAAc,CAAC;IAE5C;;OAEG;IACH,IAAI,CACF,QAAQ,EAAE,WAAW,EAAE,GAAG,MAAM,EAChC,OAAO,CAAC,EAAE,eAAe,GACxB,OAAO,CAAC,MAAM,CAAC,CAAC;IAEnB;;OAEG;IACH,MAAM,CACJ,QAAQ,EAAE,WAAW,EAAE,GAAG,MAAM,EAChC,OAAO,EAAE,cAAc,EACvB,OAAO,CAAC,EAAE,eAAe,GACxB,OAAO,CAAC,MAAM,CAAC,CAAC;IAEnB;;OAEG;IACH,aAAa,CACX,aAAa,EAAE,MAAM,GAAG,gBAAgB,GAAG,mBAAmB,EAC9D,cAAc,EAAE,MAAM,GAAG,WAAW,EACpC,OAAO,CAAC,EAAE,aAAa,GACtB,MAAM,IAAI,CAAC;IAEd;;OAEG;IACH,MAAM,IAAI,OAAO,CAAC,IAAI,CAAC,CAAC;CACzB;AAwBD;;;;;;;;;;;;;;;;;;;GAmBG;AACH,wBAAsB,SAAS,CAAC,MAAM,GAAE,SAAc,GAAG,OAAO,CAAC,QAAQ,CAAC,CAyFzE;AAED;;GAEG;AACH,wBAAsB,iBAAiB,IAAI,OAAO,CAAC,OAAO,CAAC,CAG1D"}
|