localm-web 0.4.0 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -7,6 +7,55 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
7
7
 
8
8
  ## [Unreleased]
9
9
 
10
+ ## [0.5.0] - 2026-05-10
11
+
12
+ ### Added
13
+
14
+ - **ORT-Web fallback path (v0.5)** — `TransformersTextEngine` in
15
+ `src/core/transformers-engine.ts` implements the runtime-agnostic
16
+ `Engine` contract on top of
17
+ [`@huggingface/transformers`](https://github.com/huggingface/transformers.js).
18
+ Lazy-imports the optional peer dep so the WebLLM hot path stays slim.
19
+ Runs ONNX models on WebGPU when available and on WASM-SIMD otherwise,
20
+ with a `TextStreamer` → async-iterable bridge for `stream()` /
21
+ `streamCompletion()` parity with `WebLLMEngine`.
22
+ - **Backend selector + auto-routing** — new `BackendChoice` type
23
+ (`"auto" | "webllm" | "transformers"`) on `LMTaskCreateOptions.backend`.
24
+ `"auto"` (default) picks WebLLM when WebGPU is available and falls
25
+ back to the transformers.js engine otherwise. `resolveBackend(choice,
26
+ preset, webGPUAvailable)` exported from the package root for unit
27
+ tests and custom routing logic. `BackendNotAvailableError` is raised
28
+ when no backend can satisfy the request (e.g. `"transformers"` forced
29
+ on a preset without `transformersId`).
30
+ - `ModelPreset.transformersId?: string` — HuggingFace Hub repo id used
31
+ by the transformers.js fallback. Replaces the unused `ortUrl` field.
32
+ - 4 presets now carry `transformersId` mappings: `phi-3.5-mini-int4`,
33
+ `llama-3.2-1b-int4`, `qwen2.5-1.5b-int4`, and the new
34
+ `smollm2-360m-int8` (the smallest viable chat model, intended as the
35
+ default for low-end devices on the fallback path).
36
+ - Public exports: `TransformersTextEngine`, `WebLLMEngine`,
37
+ `resolveBackend`, `BackendChoice`.
38
+ - 6 unit tests in `test/resolve-backend.test.ts` covering each
39
+ combination of `BackendChoice` × WebGPU availability × preset
40
+ capability, including the two `BackendNotAvailableError` paths.
41
+
42
+ ### Changed
43
+
44
+ - **CI / dev runtime moved to Node 22 + 24.**
45
+ - `engines.node` bumped from `>=20.19.0` to `>=22.0.0`. Node 20
46
+ reached end-of-life on 2026-04-30 per the Node release schedule
47
+ and the `Release to npm` workflow started warning about
48
+ `actions/checkout@v4` / `actions/setup-node@v4` running on Node 20.
49
+ - CI matrix in `.github/workflows/ci.yml` flipped from `["20", "22"]`
50
+ to `["22", "24"]`.
51
+ - Release workflow (`.github/workflows/release-npm.yml`) now sets up
52
+ Node 22 (was 20).
53
+ - `actions/checkout@v4` → `@v5` and `actions/setup-node@v4` → `@v5`
54
+ in both workflows. Eliminates the Node 20 deprecation notice that
55
+ appeared on the v0.4.0 publish run.
56
+ - `docs/getting-started.md` prerequisite row updated to reflect the
57
+ new Node 22+ requirement.
58
+
10
59
  ## [0.4.0] - 2026-05-10
11
60
 
12
61
  ### Added
package/dist/index.d.ts CHANGED
@@ -30,6 +30,19 @@
30
30
  */
31
31
  export declare function assertJsonSchema(schema: unknown): asserts schema is object;
32
32
 
33
+ /**
34
+ * Inference backend selector.
35
+ *
36
+ * - `"auto"` (default): pick WebLLM when WebGPU is available, fall back to
37
+ * the transformers.js engine otherwise.
38
+ * - `"webllm"`: force WebLLM. Throws `WebGPUUnavailableError` on browsers
39
+ * without WebGPU.
40
+ * - `"transformers"`: force the transformers.js engine. Loads from the
41
+ * preset's `transformersId`; throws `BackendNotAvailableError` when the
42
+ * preset has no `transformersId`.
43
+ */
44
+ export declare type BackendChoice = "auto" | "webllm" | "transformers";
45
+
33
46
  /** Thrown when no usable backend is available on the current platform. */
34
47
  export declare class BackendNotAvailableError extends LocalmWebError {
35
48
  }
@@ -530,7 +543,7 @@ export declare abstract class LMTask {
530
543
  * @param options - Task creation options.
531
544
  */
532
545
  protected static createEngine(modelId: string, options?: LMTaskCreateOptions): Promise<ResolvedEngine>;
533
- private static defaultEngine;
546
+ private static instantiateEngine;
534
547
  /** Release engine resources. Safe to call multiple times. */
535
548
  unload(): Promise<void>;
536
549
  /** Whether the underlying engine has a loaded model. */
@@ -554,8 +567,19 @@ export declare interface LMTaskCreateOptions {
554
567
  * `Worker` support or when debugging the runtime directly).
555
568
  *
556
569
  * Ignored when {@link engine} is provided.
570
+ *
571
+ * **Note (v0.5):** the bundled worker entry only supports the WebLLM
572
+ * backend. When `backend` resolves to `"transformers"` the worker option
573
+ * is forced to `false` and inference runs on the main thread. A worker
574
+ * variant for the transformers.js path is on the v0.6 roadmap.
557
575
  */
558
576
  inWorker?: boolean;
577
+ /**
578
+ * Inference backend selector (v0.5+). Defaults to `"auto"` which picks
579
+ * WebLLM when WebGPU is available and the transformers.js fallback when
580
+ * it is not. See {@link BackendChoice}.
581
+ */
582
+ backend?: BackendChoice;
559
583
  }
560
584
 
561
585
  /**
@@ -731,8 +755,13 @@ export declare interface ModelPreset {
731
755
  quantization: string;
732
756
  /** Identifier expected by the WebLLM runtime. */
733
757
  webllmId: string;
734
- /** Optional ONNX URL used by the future ORT-Web fallback (v0.5+). */
735
- ortUrl?: string;
758
+ /**
759
+ * Optional HuggingFace Hub repo id used by the transformers.js fallback
760
+ * (v0.5+). Models without a `transformersId` cannot run on the fallback
761
+ * path — loading them in a browser without WebGPU raises
762
+ * `BackendNotAvailableError`.
763
+ */
764
+ transformersId?: string;
736
765
  /** Maximum context window in tokens. */
737
766
  contextWindow: number;
738
767
  /** Short human description. */
@@ -883,6 +912,19 @@ export declare interface RerankPipeline {
883
912
  unload?(): Promise<void>;
884
913
  }
885
914
 
915
+ /**
916
+ * Pure backend resolver, exported for unit tests.
917
+ *
918
+ * @param choice - Caller's preference (`"auto"`, `"webllm"`, `"transformers"`).
919
+ * @param preset - Resolved model preset.
920
+ * @param webGPUAvailable - Whether WebGPU is available in the host environment.
921
+ * @returns The concrete backend to instantiate.
922
+ * @throws BackendNotAvailableError when the choice cannot be satisfied (e.g.
923
+ * `"transformers"` requested but the preset has no `transformersId`, or
924
+ * `"auto"` with no WebGPU and no `transformersId`).
925
+ */
926
+ export declare function resolveBackend(choice: BackendChoice, preset: ModelPreset, webGPUAvailable: boolean): "webllm" | "transformers";
927
+
886
928
  /** Internal payload returned by {@link LMTask.createEngine}. */
887
929
  declare interface ResolvedEngine {
888
930
  engine: Engine;
@@ -975,6 +1017,33 @@ export declare interface TokenChunk {
975
1017
  done: boolean;
976
1018
  }
977
1019
 
1020
+ /**
1021
+ * Inference engine backed by
1022
+ * [`@huggingface/transformers`](https://github.com/huggingface/transformers.js)
1023
+ * (transformers.js).
1024
+ *
1025
+ * Used by the SDK as the **fallback path** for browsers without WebGPU and as
1026
+ * an explicit alternative backend selectable via `LMTaskCreateOptions.backend`.
1027
+ * It runs ONNX models on WebGPU when available and on WASM-SIMD otherwise, so
1028
+ * a wider range of browsers can run language models with a graceful — if
1029
+ * slower — degrade.
1030
+ *
1031
+ * The package is an optional peer dependency; import it on the consumer side
1032
+ * before instantiating tasks that resolve to this backend.
1033
+ */
1034
+ export declare class TransformersTextEngine implements Engine {
1035
+ private generator;
1036
+ private currentAbortController;
1037
+ isLoaded(): boolean;
1038
+ load(modelId: string, onProgress?: ProgressCallback): Promise<void>;
1039
+ generate(messages: Message[], options?: GenerationOptions): Promise<string>;
1040
+ stream(messages: Message[], options?: GenerationOptions): AsyncIterable<TokenChunk>;
1041
+ complete(prompt: string, options?: GenerationOptions): Promise<string>;
1042
+ streamCompletion(prompt: string, options?: GenerationOptions): AsyncIterable<TokenChunk>;
1043
+ unload(): Promise<void>;
1044
+ private requireGenerator;
1045
+ }
1046
+
978
1047
  /** Thrown when a model id is not present in the curated registry. */
979
1048
  export declare class UnknownModelError extends LocalmWebError {
980
1049
  }
@@ -986,6 +1055,24 @@ export declare const VERSION: string;
986
1055
  export declare class WebGPUUnavailableError extends LocalmWebError {
987
1056
  }
988
1057
 
1058
+ /**
1059
+ * Inference engine backed by [WebLLM (MLC)](https://github.com/mlc-ai/web-llm).
1060
+ *
1061
+ * Requires WebGPU. The fallback path planned for v0.5 will route to ORT-Web
1062
+ * when WebGPU is missing.
1063
+ */
1064
+ export declare class WebLLMEngine implements Engine {
1065
+ private engine;
1066
+ isLoaded(): boolean;
1067
+ load(modelId: string, onProgress?: ProgressCallback): Promise<void>;
1068
+ generate(messages: Message[], options?: GenerationOptions): Promise<string>;
1069
+ stream(messages: Message[], options?: GenerationOptions): AsyncIterable<TokenChunk>;
1070
+ complete(prompt: string, options?: GenerationOptions): Promise<string>;
1071
+ streamCompletion(prompt: string, options?: GenerationOptions): AsyncIterable<TokenChunk>;
1072
+ unload(): Promise<void>;
1073
+ private requireEngine;
1074
+ }
1075
+
989
1076
  /**
990
1077
  * Engine implementation that proxies all calls to a Web Worker.
991
1078
  *
package/dist/index.js CHANGED
@@ -1,10 +1,3 @@
1
- const DOWNLOAD_PATTERN = /\b(fetch|download|loading from cache|cache hit|param)/i;
2
- const COMPILE_PATTERN = /\b(compil|shader|kernel|tensor|init|allocat|warm)/i;
3
- function classifyLoadPhase(text) {
4
- if (DOWNLOAD_PATTERN.test(text)) return "downloading";
5
- if (COMPILE_PATTERN.test(text)) return "compiling";
6
- return "loading";
7
- }
8
1
  class LocalmWebError extends Error {
9
2
  /**
10
3
  * @param message - Human-readable description of the error.
@@ -32,6 +25,264 @@ class BackendNotAvailableError extends LocalmWebError {
32
25
  }
33
26
  class StructuredOutputError extends LocalmWebError {
34
27
  }
28
+ const DOWNLOAD_PATTERN = /\b(fetch|download|loading from cache|cache hit|param)/i;
29
+ const COMPILE_PATTERN = /\b(compil|shader|kernel|tensor|init|allocat|warm)/i;
30
+ function classifyLoadPhase(text) {
31
+ if (DOWNLOAD_PATTERN.test(text)) return "downloading";
32
+ if (COMPILE_PATTERN.test(text)) return "compiling";
33
+ return "loading";
34
+ }
35
+ let transformersModulePromise$2 = null;
36
+ async function loadTransformers$2() {
37
+ if (!transformersModulePromise$2) {
38
+ transformersModulePromise$2 = import("@huggingface/transformers");
39
+ }
40
+ return transformersModulePromise$2;
41
+ }
42
+ function buildSamplingKwargs(options) {
43
+ const kwargs = {};
44
+ if (options.maxTokens !== void 0) kwargs.max_new_tokens = options.maxTokens;
45
+ if (options.temperature !== void 0) kwargs.temperature = options.temperature;
46
+ if (options.topP !== void 0) kwargs.top_p = options.topP;
47
+ if (options.topK !== void 0) kwargs.top_k = options.topK;
48
+ if (options.temperature !== void 0 && options.temperature > 0) {
49
+ kwargs.do_sample = true;
50
+ }
51
+ return kwargs;
52
+ }
53
+ function toChatMessages$1(messages) {
54
+ return messages.map((m) => ({ role: m.role, content: m.content }));
55
+ }
56
+ function lastAssistantContent(output, promptText) {
57
+ const item = Array.isArray(output) ? output[0] : output;
58
+ if (!item) return "";
59
+ const generated = item.generated_text;
60
+ if (typeof generated === "string") {
61
+ return generated.startsWith(promptText) ? generated.slice(promptText.length) : generated;
62
+ }
63
+ if (Array.isArray(generated)) {
64
+ for (let i = generated.length - 1; i >= 0; i -= 1) {
65
+ const turn = generated[i];
66
+ if (turn && turn.role === "assistant") return turn.content;
67
+ }
68
+ }
69
+ return "";
70
+ }
71
+ function createAsyncQueue() {
72
+ const buffer = [];
73
+ let waiters = [];
74
+ let finished = false;
75
+ let pendingError = null;
76
+ const drain = () => {
77
+ while (buffer.length > 0 && waiters.length > 0) {
78
+ const resolver = waiters.shift();
79
+ const value = buffer.shift();
80
+ resolver?.({ value, done: false });
81
+ }
82
+ if ((finished || pendingError) && waiters.length > 0) {
83
+ const all = waiters;
84
+ waiters = [];
85
+ for (const w of all) {
86
+ if (pendingError) {
87
+ w({ value: void 0, done: true });
88
+ } else {
89
+ w({ value: void 0, done: true });
90
+ }
91
+ }
92
+ }
93
+ };
94
+ return {
95
+ push(item) {
96
+ buffer.push(item);
97
+ drain();
98
+ },
99
+ end(error) {
100
+ finished = true;
101
+ if (error) pendingError = error;
102
+ drain();
103
+ },
104
+ iterator: {
105
+ [Symbol.asyncIterator]() {
106
+ return {
107
+ next() {
108
+ if (buffer.length > 0) {
109
+ return Promise.resolve({ value: buffer.shift(), done: false });
110
+ }
111
+ if (pendingError) {
112
+ const err = pendingError;
113
+ pendingError = null;
114
+ return Promise.reject(err);
115
+ }
116
+ if (finished) {
117
+ return Promise.resolve({ value: void 0, done: true });
118
+ }
119
+ return new Promise((resolve) => waiters.push(resolve));
120
+ }
121
+ };
122
+ }
123
+ }
124
+ };
125
+ }
126
+ class TransformersTextEngine {
127
+ generator = null;
128
+ currentAbortController = null;
129
+ isLoaded() {
130
+ return this.generator !== null;
131
+ }
132
+ async load(modelId, onProgress) {
133
+ const transformers = await loadTransformers$2();
134
+ try {
135
+ const generator = await transformers.pipeline("text-generation", modelId, {
136
+ progress_callback: (report) => {
137
+ const progress = typeof report.progress === "number" ? report.progress / 100 : 0;
138
+ const text = report.status ?? "loading";
139
+ onProgress?.({
140
+ progress,
141
+ text,
142
+ loaded: 0,
143
+ total: 0,
144
+ phase: classifyLoadPhase(text)
145
+ });
146
+ }
147
+ });
148
+ this.generator = generator;
149
+ onProgress?.({
150
+ progress: 1,
151
+ text: "Model ready.",
152
+ loaded: 0,
153
+ total: 0,
154
+ phase: "ready"
155
+ });
156
+ } catch (err) {
157
+ throw new ModelLoadError(`Failed to load transformers model "${modelId}".`, err);
158
+ }
159
+ }
160
+ async generate(messages, options = {}) {
161
+ const generator = this.requireGenerator();
162
+ if (options.signal?.aborted) {
163
+ throw new GenerationAbortedError("Generation aborted before start.");
164
+ }
165
+ const chat = toChatMessages$1(messages);
166
+ try {
167
+ const output = await generator(chat, buildSamplingKwargs(options));
168
+ return lastAssistantContent(output, "");
169
+ } catch (err) {
170
+ if (err instanceof GenerationAbortedError) throw err;
171
+ throw new ModelLoadError("Transformers generation failed.", err);
172
+ }
173
+ }
174
+ async *stream(messages, options = {}) {
175
+ const generator = this.requireGenerator();
176
+ if (options.signal?.aborted) {
177
+ throw new GenerationAbortedError("Generation aborted before start.");
178
+ }
179
+ const transformers = await loadTransformers$2();
180
+ const queue = createAsyncQueue();
181
+ let index = 0;
182
+ const tokenizer = generator.tokenizer;
183
+ const streamer = new transformers.TextStreamer(tokenizer, {
184
+ skip_prompt: true,
185
+ skip_special_tokens: true,
186
+ callback_function: (text) => {
187
+ if (text) {
188
+ queue.push({ text, index, done: false });
189
+ index += 1;
190
+ }
191
+ }
192
+ });
193
+ const abortPromise = new Promise((_, reject) => {
194
+ if (options.signal) {
195
+ const onAbort = () => {
196
+ reject(new GenerationAbortedError("Generation aborted by signal."));
197
+ };
198
+ options.signal.addEventListener("abort", onAbort, { once: true });
199
+ }
200
+ });
201
+ const chat = toChatMessages$1(messages);
202
+ const generation = generator(chat, { ...buildSamplingKwargs(options), streamer }).then(() => {
203
+ queue.push({ text: "", index, done: true });
204
+ queue.end();
205
+ }).catch((err) => {
206
+ queue.end(err instanceof Error ? err : new Error(String(err)));
207
+ });
208
+ void Promise.race([generation, abortPromise]).catch((err) => {
209
+ if (err instanceof GenerationAbortedError) queue.end(err);
210
+ });
211
+ for await (const chunk of queue.iterator) {
212
+ yield chunk;
213
+ }
214
+ }
215
+ async complete(prompt, options = {}) {
216
+ const generator = this.requireGenerator();
217
+ if (options.signal?.aborted) {
218
+ throw new GenerationAbortedError("Generation aborted before start.");
219
+ }
220
+ try {
221
+ const output = await generator(prompt, buildSamplingKwargs(options));
222
+ return lastAssistantContent(output, prompt);
223
+ } catch (err) {
224
+ if (err instanceof GenerationAbortedError) throw err;
225
+ throw new ModelLoadError("Transformers completion failed.", err);
226
+ }
227
+ }
228
+ async *streamCompletion(prompt, options = {}) {
229
+ const generator = this.requireGenerator();
230
+ if (options.signal?.aborted) {
231
+ throw new GenerationAbortedError("Generation aborted before start.");
232
+ }
233
+ const transformers = await loadTransformers$2();
234
+ const queue = createAsyncQueue();
235
+ let index = 0;
236
+ const tokenizer = generator.tokenizer;
237
+ const streamer = new transformers.TextStreamer(tokenizer, {
238
+ skip_prompt: true,
239
+ skip_special_tokens: true,
240
+ callback_function: (text) => {
241
+ if (text) {
242
+ queue.push({ text, index, done: false });
243
+ index += 1;
244
+ }
245
+ }
246
+ });
247
+ generator(prompt, { ...buildSamplingKwargs(options), streamer }).then(() => {
248
+ queue.push({ text: "", index, done: true });
249
+ queue.end();
250
+ }).catch((err) => {
251
+ queue.end(err instanceof Error ? err : new Error(String(err)));
252
+ });
253
+ if (options.signal) {
254
+ options.signal.addEventListener(
255
+ "abort",
256
+ () => {
257
+ queue.end(new GenerationAbortedError("Generation aborted by signal."));
258
+ },
259
+ { once: true }
260
+ );
261
+ }
262
+ for await (const chunk of queue.iterator) {
263
+ yield chunk;
264
+ }
265
+ }
266
+ async unload() {
267
+ if (this.generator) {
268
+ const disposable = this.generator;
269
+ if (typeof disposable.dispose === "function") {
270
+ await disposable.dispose();
271
+ }
272
+ this.generator = null;
273
+ }
274
+ this.currentAbortController?.abort();
275
+ this.currentAbortController = null;
276
+ }
277
+ requireGenerator() {
278
+ if (!this.generator) {
279
+ throw new ModelNotLoadedError(
280
+ "TransformersTextEngine not loaded. Call load() before generation."
281
+ );
282
+ }
283
+ return this.generator;
284
+ }
285
+ }
35
286
  function assertJsonSchema(schema) {
36
287
  if (schema === null || typeof schema !== "object" || Array.isArray(schema)) {
37
288
  throw new StructuredOutputError("jsonSchema must be a plain object describing a JSON Schema.");
@@ -545,6 +796,7 @@ const MODEL_PRESETS = Object.freeze({
545
796
  parameters: "3.8B",
546
797
  quantization: "q4f16_1",
547
798
  webllmId: "Phi-3.5-mini-instruct-q4f16_1-MLC",
799
+ transformersId: "onnx-community/Phi-3.5-mini-instruct-onnx-web",
548
800
  contextWindow: 4096,
549
801
  description: "Microsoft Phi-3.5 mini, INT4 quantized for browser inference."
550
802
  },
@@ -554,6 +806,7 @@ const MODEL_PRESETS = Object.freeze({
554
806
  parameters: "1B",
555
807
  quantization: "q4f16_1",
556
808
  webllmId: "Llama-3.2-1B-Instruct-q4f16_1-MLC",
809
+ transformersId: "onnx-community/Llama-3.2-1B-Instruct",
557
810
  contextWindow: 4096,
558
811
  description: "Meta Llama 3.2 1B Instruct, INT4 quantized."
559
812
  },
@@ -563,8 +816,19 @@ const MODEL_PRESETS = Object.freeze({
563
816
  parameters: "1.5B",
564
817
  quantization: "q4f16_1",
565
818
  webllmId: "Qwen2.5-1.5B-Instruct-q4f16_1-MLC",
819
+ transformersId: "onnx-community/Qwen2.5-1.5B-Instruct",
566
820
  contextWindow: 4096,
567
821
  description: "Alibaba Qwen 2.5 1.5B Instruct, INT4 quantized."
822
+ },
823
+ "smollm2-360m-int8": {
824
+ id: "smollm2-360m-int8",
825
+ family: "SmolLM2",
826
+ parameters: "360M",
827
+ quantization: "q8",
828
+ webllmId: "SmolLM2-360M-Instruct-q4f16_1-MLC",
829
+ transformersId: "HuggingFaceTB/SmolLM2-360M-Instruct",
830
+ contextWindow: 2048,
831
+ description: "HuggingFace SmolLM2 360M Instruct — smallest viable chat model, ideal for the fallback path on low-end devices."
568
832
  }
569
833
  });
570
834
  function resolveModelPreset(modelId) {
@@ -643,6 +907,27 @@ function createInferenceWorker() {
643
907
  type: "module"
644
908
  });
645
909
  }
910
+ function defaultWebGPUDetector() {
911
+ return typeof navigator !== "undefined" && "gpu" in navigator;
912
+ }
913
+ function resolveBackend(choice, preset, webGPUAvailable) {
914
+ if (choice === "webllm") return "webllm";
915
+ if (choice === "transformers") {
916
+ if (!preset.transformersId) {
917
+ throw new BackendNotAvailableError(
918
+ `Model "${preset.id}" has no transformersId — cannot run on the transformers.js backend.`
919
+ );
920
+ }
921
+ return "transformers";
922
+ }
923
+ if (webGPUAvailable) return "webllm";
924
+ if (!preset.transformersId) {
925
+ throw new BackendNotAvailableError(
926
+ `WebGPU is unavailable and model "${preset.id}" has no transformersId for the fallback path.`
927
+ );
928
+ }
929
+ return "transformers";
930
+ }
646
931
  class LMTask {
647
932
  constructor(engine, preset) {
648
933
  this.engine = engine;
@@ -658,13 +943,29 @@ class LMTask {
658
943
  */
659
944
  static async createEngine(modelId, options = {}) {
660
945
  const preset = resolveModelPreset(modelId);
661
- const engine = options.engine ?? LMTask.defaultEngine(options);
946
+ if (options.engine) {
947
+ if (!options.engine.isLoaded()) {
948
+ await options.engine.load(preset.webllmId, options.onProgress);
949
+ }
950
+ return { engine: options.engine, preset };
951
+ }
952
+ const choice = options.backend ?? "auto";
953
+ const resolved = resolveBackend(
954
+ choice,
955
+ preset,
956
+ defaultWebGPUDetector()
957
+ );
958
+ const engine = LMTask.instantiateEngine(resolved, options);
959
+ const loadId = resolved === "transformers" ? preset.transformersId ?? "" : preset.webllmId;
662
960
  if (!engine.isLoaded()) {
663
- await engine.load(preset.webllmId, options.onProgress);
961
+ await engine.load(loadId, options.onProgress);
664
962
  }
665
963
  return { engine, preset };
666
964
  }
667
- static defaultEngine(options) {
965
+ static instantiateEngine(resolved, options) {
966
+ if (resolved === "transformers") {
967
+ return new TransformersTextEngine();
968
+ }
668
969
  const useWorker = options.inWorker ?? true;
669
970
  if (useWorker) {
670
971
  return new WorkerEngine(createInferenceWorker());
@@ -1185,7 +1486,7 @@ async function* tap(stream, onChunk) {
1185
1486
  yield chunk;
1186
1487
  }
1187
1488
  }
1188
- const VERSION = "0.4.0";
1489
+ const VERSION = "0.5.0";
1189
1490
  export {
1190
1491
  BackendNotAvailableError,
1191
1492
  Chat,
@@ -1205,9 +1506,11 @@ export {
1205
1506
  RERANKER_PRESETS,
1206
1507
  Reranker,
1207
1508
  StructuredOutputError,
1509
+ TransformersTextEngine,
1208
1510
  UnknownModelError,
1209
1511
  VERSION,
1210
1512
  WebGPUUnavailableError,
1513
+ WebLLMEngine,
1211
1514
  WorkerEngine,
1212
1515
  assertJsonSchema,
1213
1516
  collectStream,
@@ -1216,6 +1519,7 @@ export {
1216
1519
  listSupportedModels,
1217
1520
  listSupportedRerankerModels,
1218
1521
  parseStructuredOutput,
1522
+ resolveBackend,
1219
1523
  resolveEmbeddingPreset,
1220
1524
  resolveModelPreset,
1221
1525
  resolveRerankerPreset,
package/dist/index.js.map CHANGED
@@ -1 +1 @@
1
- {"version":3,"file":"index.js","sources":["../src/core/load-phase.ts","../src/core/exceptions.ts","../src/structured/json-schema.ts","../src/core/webllm-engine.ts","../src/worker/protocol.ts","../src/core/worker-engine.ts","../src/presets/models.ts","../src/worker/create-worker.ts","../src/tasks/lm-task.ts","../src/results.ts","../src/tasks/chat.ts","../src/tasks/completion.ts","../src/tasks/embeddings.ts","../src/tasks/reranker.ts","../src/cache/model-cache.ts","../src/streaming/token-stream.ts","../src/index.ts"],"sourcesContent":["import type { ModelLoadPhase } from \"../types\";\n\nconst DOWNLOAD_PATTERN: RegExp = /\\b(fetch|download|loading from cache|cache hit|param)/i;\nconst COMPILE_PATTERN: RegExp = /\\b(compil|shader|kernel|tensor|init|allocat|warm)/i;\n\n/**\n * Classify a runtime status text into a {@link ModelLoadPhase}.\n *\n * Heuristic: match download-related verbs first (network or cache hits are\n * treated as `downloading`), then compile-related verbs. Anything else falls\n * back to the generic `loading` bucket. The `ready` phase is never returned\n * here — callers emit it explicitly when the load resolves.\n *\n * @param text - The raw status string from the runtime.\n * @returns The classified phase.\n */\nexport function classifyLoadPhase(text: string): ModelLoadPhase {\n if (DOWNLOAD_PATTERN.test(text)) return \"downloading\";\n if (COMPILE_PATTERN.test(text)) return \"compiling\";\n return \"loading\";\n}\n","/**\n * Error hierarchy for localm-web.\n *\n * All errors thrown by the SDK extend `LocalmWebError` so consumers can\n * distinguish SDK errors from unrelated runtime errors with a single\n * `instanceof` check.\n */\n\n/** Base class for every error raised by localm-web. */\nexport class LocalmWebError extends Error {\n /**\n * @param message - Human-readable description of the error.\n * @param cause - Underlying error, if any.\n */\n constructor(\n message: string,\n public readonly cause?: unknown\n ) {\n super(message);\n this.name = new.target.name;\n }\n}\n\n/** Thrown when WebGPU is required but not available in the host browser. */\nexport class WebGPUUnavailableError extends LocalmWebError {}\n\n/** Thrown when a model fails to load (network, parsing, runtime init). */\nexport class ModelLoadError extends LocalmWebError {}\n\n/** Thrown when an inference call is made before a model has loaded. */\nexport class ModelNotLoadedError extends LocalmWebError {}\n\n/** Thrown when a model id is not present in the curated registry. */\nexport class UnknownModelError extends LocalmWebError {}\n\n/** Thrown when generation is aborted via an `AbortSignal`. */\nexport class GenerationAbortedError extends LocalmWebError {}\n\n/** Thrown when the browser denies storage quota for the model cache. */\nexport class QuotaExceededError extends LocalmWebError {}\n\n/** Thrown when no usable backend is available on the current platform. */\nexport class BackendNotAvailableError extends LocalmWebError {}\n\n/**\n * Thrown when structured output (JSON mode or JSON Schema constrained\n * decoding) fails to parse as valid JSON.\n *\n * Wraps the underlying `SyntaxError` from `JSON.parse` so consumers can\n * distinguish SDK-issued failures from unrelated runtime exceptions.\n */\nexport class StructuredOutputError extends LocalmWebError {}\n","/**\n * JSON Schema helpers for structured output.\n *\n * The SDK delegates the actual constrained decoding to the underlying\n * runtime (xgrammar inside WebLLM today, ORT-Web equivalent later). These\n * helpers normalize user input — turning a JS object schema into the\n * JSON-string shape that WebLLM's `response_format.schema` expects — and\n * parse the runtime's textual output back into typed JSON.\n */\n\nimport { StructuredOutputError } from \"../core/exceptions\";\n\n/**\n * Minimal structural sanity check for a JSON Schema.\n *\n * Does not validate the schema against the JSON Schema meta-schema. The goal\n * is to fail fast on obvious mistakes (passing a string, an array, `null`)\n * before handing the value off to the runtime, where errors surface much\n * later and with much worse messages.\n *\n * @param schema - Candidate JSON Schema object.\n * @throws StructuredOutputError when `schema` is not a plain object or has\n * no recognizable schema shape (`type`, `$ref`, `oneOf`, `anyOf`, `allOf`,\n * `enum`).\n */\nexport function assertJsonSchema(schema: unknown): asserts schema is object {\n if (schema === null || typeof schema !== \"object\" || Array.isArray(schema)) {\n throw new StructuredOutputError(\"jsonSchema must be a plain object describing a JSON Schema.\");\n }\n const keys: string[] = Object.keys(schema);\n const recognized: readonly string[] = [\n \"type\",\n \"$ref\",\n \"oneOf\",\n \"anyOf\",\n \"allOf\",\n \"enum\",\n \"const\",\n \"properties\",\n ];\n if (!keys.some((key) => recognized.includes(key))) {\n throw new StructuredOutputError(\n \"jsonSchema does not look like a JSON Schema (missing type/$ref/oneOf/anyOf/allOf/enum/const/properties).\"\n );\n }\n}\n\n/**\n * Serialize a JSON Schema object for the WebLLM `response_format.schema`\n * field.\n *\n * WebLLM expects the schema as a JSON-encoded string (xgrammar parses it\n * server-side). Validates the shape via {@link assertJsonSchema} first.\n *\n * @param schema - JSON Schema object.\n * @returns The schema serialized as a JSON string.\n * @throws StructuredOutputError when `schema` is not a recognizable JSON\n * Schema shape.\n */\nexport function serializeJsonSchema(schema: unknown): string {\n assertJsonSchema(schema);\n return JSON.stringify(schema);\n}\n\n/**\n * Parse the textual output of a structured-decoding generation as JSON.\n *\n * @typeParam T - The expected parsed shape. The function does not validate\n * the parsed value against `T`; that is the caller's responsibility.\n * @param text - Raw text returned by the engine.\n * @returns The parsed JSON value cast to `T`.\n * @throws StructuredOutputError when the text is not valid JSON.\n */\nexport function parseStructuredOutput<T = unknown>(text: string): T {\n try {\n return JSON.parse(text) as T;\n } catch (err) {\n throw new StructuredOutputError(\n \"Engine output is not valid JSON. The model may have ignored the constrained decoding directive.\",\n err\n );\n }\n}\n","import type { Engine } from \"./engine\";\nimport { classifyLoadPhase } from \"./load-phase\";\nimport type { GenerationOptions, Message, ProgressCallback, TokenChunk } from \"../types\";\nimport {\n GenerationAbortedError,\n ModelLoadError,\n ModelNotLoadedError,\n WebGPUUnavailableError,\n} from \"./exceptions\";\nimport { serializeJsonSchema } from \"../structured/json-schema\";\n\ntype WebLLMModule = typeof import(\"@mlc-ai/web-llm\");\ntype MLCEngine = import(\"@mlc-ai/web-llm\").MLCEngineInterface;\ntype ChatCompletionMessageParam = import(\"@mlc-ai/web-llm\").ChatCompletionMessageParam;\ntype ResponseFormat = import(\"@mlc-ai/web-llm\").ResponseFormat;\n\nlet webllmModulePromise: Promise<WebLLMModule> | null = null;\n\nasync function loadWebLLM(): Promise<WebLLMModule> {\n if (!webllmModulePromise) {\n webllmModulePromise = import(\"@mlc-ai/web-llm\");\n }\n return webllmModulePromise;\n}\n\nfunction isWebGPUAvailable(): boolean {\n return typeof navigator !== \"undefined\" && \"gpu\" in navigator;\n}\n\ninterface SamplingParams {\n max_tokens?: number;\n temperature?: number;\n top_p?: number;\n}\n\nfunction buildSamplingParams(options: GenerationOptions): SamplingParams {\n const params: SamplingParams = {};\n if (options.maxTokens !== undefined) params.max_tokens = options.maxTokens;\n if (options.temperature !== undefined) params.temperature = options.temperature;\n if (options.topP !== undefined) params.top_p = options.topP;\n return params;\n}\n\n/**\n * Build the WebLLM `response_format` payload from generation options.\n *\n * Returns `undefined` when the caller has not requested structured output —\n * letting WebLLM use its default free-text decoding path. When `jsonSchema`\n * is set it takes priority and is serialized into the `schema` field\n * (xgrammar parses it server-side). When only `json` is set the payload\n * carries `{ type: \"json_object\" }` for unconstrained-but-valid JSON.\n */\nfunction buildResponseFormat(options: GenerationOptions): ResponseFormat | undefined {\n if (options.jsonSchema !== undefined) {\n return { type: \"json_object\", schema: serializeJsonSchema(options.jsonSchema) };\n }\n if (options.json) {\n return { type: \"json_object\" };\n }\n return undefined;\n}\n\nfunction toChatMessages(messages: Message[]): ChatCompletionMessageParam[] {\n return messages.map((m): ChatCompletionMessageParam => {\n switch (m.role) {\n case \"system\":\n return { role: \"system\", content: m.content };\n case \"user\":\n return { role: \"user\", content: m.content };\n case \"assistant\":\n return { role: \"assistant\", content: m.content };\n case \"tool\":\n return { role: \"tool\", content: m.content, tool_call_id: m.name ?? \"\" };\n }\n });\n}\n\n/**\n * Inference engine backed by [WebLLM (MLC)](https://github.com/mlc-ai/web-llm).\n *\n * Requires WebGPU. The fallback path planned for v0.5 will route to ORT-Web\n * when WebGPU is missing.\n */\nexport class WebLLMEngine implements Engine {\n private engine: MLCEngine | null = null;\n\n isLoaded(): boolean {\n return this.engine !== null;\n }\n\n async load(modelId: string, onProgress?: ProgressCallback): Promise<void> {\n if (!isWebGPUAvailable()) {\n throw new WebGPUUnavailableError(\n \"WebGPU is not available in this browser. The ORT-Web fallback is planned for v0.5.\"\n );\n }\n const webllm = await loadWebLLM();\n try {\n this.engine = await webllm.CreateMLCEngine(modelId, {\n initProgressCallback: (report): void => {\n onProgress?.({\n progress: report.progress,\n text: report.text,\n loaded: 0,\n total: 0,\n phase: classifyLoadPhase(report.text),\n });\n },\n });\n onProgress?.({\n progress: 1,\n text: \"Model ready.\",\n loaded: 0,\n total: 0,\n phase: \"ready\",\n });\n } catch (err) {\n throw new ModelLoadError(`Failed to load model \"${modelId}\".`, err);\n }\n }\n\n async generate(messages: Message[], options: GenerationOptions = {}): Promise<string> {\n const engine = this.requireEngine();\n if (options.signal?.aborted) {\n throw new GenerationAbortedError(\"Generation aborted before start.\");\n }\n const responseFormat = buildResponseFormat(options);\n const completion = await engine.chat.completions.create({\n ...buildSamplingParams(options),\n messages: toChatMessages(messages),\n stream: false,\n ...(responseFormat ? { response_format: responseFormat } : {}),\n });\n return completion.choices[0]?.message?.content ?? \"\";\n }\n\n async *stream(messages: Message[], options: GenerationOptions = {}): AsyncIterable<TokenChunk> {\n const engine = this.requireEngine();\n if (options.signal?.aborted) {\n throw new GenerationAbortedError(\"Generation aborted before start.\");\n }\n const responseFormat = buildResponseFormat(options);\n const completion = await engine.chat.completions.create({\n ...buildSamplingParams(options),\n messages: toChatMessages(messages),\n stream: true,\n ...(responseFormat ? { response_format: responseFormat } : {}),\n });\n let index: number = 0;\n let finished: boolean = false;\n try {\n for await (const chunk of completion) {\n if (options.signal?.aborted) {\n throw new GenerationAbortedError(\"Generation aborted by signal.\");\n }\n const choice = chunk.choices[0];\n const delta = choice?.delta?.content ?? \"\";\n if (delta) {\n yield { text: delta, index, done: false };\n index += 1;\n }\n if (choice?.finish_reason) {\n finished = true;\n yield { text: \"\", index, done: true };\n index += 1;\n }\n }\n if (!finished) {\n yield { text: \"\", index, done: true };\n }\n } catch (err) {\n if (err instanceof GenerationAbortedError) throw err;\n throw new ModelLoadError(\"Streaming generation failed.\", err);\n }\n }\n\n async complete(prompt: string, options: GenerationOptions = {}): Promise<string> {\n const engine = this.requireEngine();\n if (options.signal?.aborted) {\n throw new GenerationAbortedError(\"Generation aborted before start.\");\n }\n const responseFormat = buildResponseFormat(options);\n const completion = await engine.completions.create({\n ...buildSamplingParams(options),\n prompt,\n stream: false,\n ...(responseFormat ? { response_format: responseFormat } : {}),\n });\n return completion.choices[0]?.text ?? \"\";\n }\n\n async *streamCompletion(\n prompt: string,\n options: GenerationOptions = {}\n ): AsyncIterable<TokenChunk> {\n const engine = this.requireEngine();\n if (options.signal?.aborted) {\n throw new GenerationAbortedError(\"Generation aborted before start.\");\n }\n const responseFormat = buildResponseFormat(options);\n const completion = await engine.completions.create({\n ...buildSamplingParams(options),\n prompt,\n stream: true,\n ...(responseFormat ? { response_format: responseFormat } : {}),\n });\n let index: number = 0;\n let finished: boolean = false;\n try {\n for await (const chunk of completion) {\n if (options.signal?.aborted) {\n throw new GenerationAbortedError(\"Generation aborted by signal.\");\n }\n const choice = chunk.choices[0];\n const delta = choice?.text ?? \"\";\n if (delta) {\n yield { text: delta, index, done: false };\n index += 1;\n }\n if (choice?.finish_reason) {\n finished = true;\n yield { text: \"\", index, done: true };\n index += 1;\n }\n }\n if (!finished) {\n yield { text: \"\", index, done: true };\n }\n } catch (err) {\n if (err instanceof GenerationAbortedError) throw err;\n throw new ModelLoadError(\"Streaming completion failed.\", err);\n }\n }\n\n async unload(): Promise<void> {\n if (this.engine) {\n await this.engine.unload();\n this.engine = null;\n }\n }\n\n private requireEngine(): MLCEngine {\n if (!this.engine) {\n throw new ModelNotLoadedError(\"Engine not loaded. Call load() before generation.\");\n }\n return this.engine;\n }\n}\n","import type { GenerationOptions, Message, ModelLoadProgress, TokenChunk } from \"../types\";\n\n/**\n * Subset of {@link GenerationOptions} that survives `postMessage`.\n *\n * `AbortSignal` cannot be cloned across the worker boundary, so it is replaced\n * by a separate {@link AbortRequest} message keyed on the same operation id.\n */\nexport type SerializableGenerationOptions = Omit<GenerationOptions, \"signal\">;\n\n/** Strip `signal` from a {@link GenerationOptions} before posting it. */\nexport function toSerializableOptions(\n options: GenerationOptions = {}\n): SerializableGenerationOptions {\n const { signal: _signal, ...rest } = options;\n void _signal;\n return rest;\n}\n\n/** Operation request sent from the main thread to the worker. */\nexport type WorkerRequest =\n | { op: \"load\"; id: number; modelId: string }\n | {\n op: \"generate\";\n id: number;\n messages: Message[];\n options: SerializableGenerationOptions;\n }\n | {\n op: \"stream\";\n id: number;\n messages: Message[];\n options: SerializableGenerationOptions;\n }\n | {\n op: \"complete\";\n id: number;\n prompt: string;\n options: SerializableGenerationOptions;\n }\n | {\n op: \"stream-completion\";\n id: number;\n prompt: string;\n options: SerializableGenerationOptions;\n }\n | { op: \"abort\"; id: number }\n | { op: \"unload\"; id: number }\n | { op: \"isLoaded\"; id: number };\n\n/** Operation response sent from the worker back to the main thread. */\nexport type WorkerResponse =\n | { op: \"loaded\"; id: number }\n | { op: \"generated\"; id: number; text: string }\n | { op: \"progress\"; id: number; payload: ModelLoadProgress }\n | { op: \"token\"; id: number; chunk: TokenChunk }\n | { op: \"stream-end\"; id: number }\n | { op: \"error\"; id: number; name: string; message: string }\n | { op: \"unloaded\"; id: number }\n | { op: \"is-loaded\"; id: number; value: boolean };\n\n/** Subset of `Worker` we depend on. Lets tests inject a mock. */\nexport interface WorkerLike {\n postMessage(message: WorkerRequest): void;\n addEventListener(type: \"message\", listener: (event: MessageEvent<WorkerResponse>) => void): void;\n removeEventListener(\n type: \"message\",\n listener: (event: MessageEvent<WorkerResponse>) => void\n ): void;\n terminate(): void;\n}\n\n/** Internal alias used when the message direction is irrelevant (logging, debug). */\nexport type AbortRequest = Extract<WorkerRequest, { op: \"abort\" }>;\n","import { GenerationAbortedError, ModelLoadError, ModelNotLoadedError } from \"./exceptions\";\nimport type { Engine } from \"./engine\";\nimport type { GenerationOptions, Message, ProgressCallback, TokenChunk } from \"../types\";\nimport {\n toSerializableOptions,\n type WorkerLike,\n type WorkerRequest,\n type WorkerResponse,\n} from \"../worker/protocol\";\n\ninterface PendingGenerate {\n resolve: (text: string) => void;\n reject: (err: Error) => void;\n}\n\ninterface PendingStream {\n push: (chunk: TokenChunk) => void;\n end: () => void;\n fail: (err: Error) => void;\n}\n\n/**\n * Engine implementation that proxies all calls to a Web Worker.\n *\n * The worker holds the actual {@link WebLLMEngine}; this class is a thin RPC\n * shell that serializes requests, tracks pending operations by a numeric id,\n * and turns worker responses back into Promises and async iterables.\n *\n * Use {@link createInferenceWorker} to obtain a real worker. Tests can pass a\n * {@link WorkerLike} mock implementing the same `postMessage` /\n * `addEventListener` surface.\n */\nexport class WorkerEngine implements Engine {\n private nextId: number = 1;\n private loaded: boolean = false;\n private currentLoad: { resolve: () => void; reject: (e: Error) => void } | null = null;\n private currentLoadId: number = 0;\n private currentLoadProgress: ProgressCallback | undefined = undefined;\n private currentUnload: { resolve: () => void; reject: (e: Error) => void } | null = null;\n private currentUnloadId: number = 0;\n private pendingGenerates: Map<number, PendingGenerate> = new Map();\n private pendingStreams: Map<number, PendingStream> = new Map();\n\n private readonly listener: (event: MessageEvent<WorkerResponse>) => void;\n\n constructor(private readonly worker: WorkerLike) {\n this.listener = (event): void => this.handleMessage(event.data);\n this.worker.addEventListener(\"message\", this.listener);\n }\n\n isLoaded(): boolean {\n return this.loaded;\n }\n\n async load(modelId: string, onProgress?: ProgressCallback): Promise<void> {\n if (this.currentLoad) {\n throw new ModelLoadError(\"Another load is already in progress.\");\n }\n const id: number = this.allocateId();\n this.currentLoadId = id;\n this.currentLoadProgress = onProgress;\n return new Promise<void>((resolve, reject) => {\n this.currentLoad = { resolve, reject };\n this.send({ op: \"load\", id, modelId });\n });\n }\n\n async generate(messages: Message[], options: GenerationOptions = {}): Promise<string> {\n const id: number = this.allocateId();\n return new Promise<string>((resolve, reject) => {\n this.pendingGenerates.set(id, { resolve, reject });\n this.send({\n op: \"generate\",\n id,\n messages,\n options: toSerializableOptions(options),\n });\n options.signal?.addEventListener(\"abort\", () => this.send({ op: \"abort\", id }));\n });\n }\n\n async *stream(messages: Message[], options: GenerationOptions = {}): AsyncIterable<TokenChunk> {\n const id: number = this.allocateId();\n const queue: TokenChunk[] = [];\n let done: boolean = false;\n let error: Error | null = null;\n let notify: (() => void) | null = null;\n\n const wakeup = (): void => {\n if (notify) {\n const fn = notify;\n notify = null;\n fn();\n }\n };\n\n this.pendingStreams.set(id, {\n push: (chunk): void => {\n queue.push(chunk);\n wakeup();\n },\n end: (): void => {\n done = true;\n wakeup();\n },\n fail: (err): void => {\n error = err;\n done = true;\n wakeup();\n },\n });\n\n this.send({\n op: \"stream\",\n id,\n messages,\n options: toSerializableOptions(options),\n });\n options.signal?.addEventListener(\"abort\", () => this.send({ op: \"abort\", id }));\n\n try {\n while (true) {\n if (queue.length > 0) {\n const chunk = queue.shift();\n if (chunk) yield chunk;\n continue;\n }\n if (error) throw error;\n if (done) return;\n await new Promise<void>((r) => {\n notify = r;\n });\n }\n } finally {\n this.pendingStreams.delete(id);\n }\n }\n\n async complete(prompt: string, options: GenerationOptions = {}): Promise<string> {\n const id: number = this.allocateId();\n return new Promise<string>((resolve, reject) => {\n this.pendingGenerates.set(id, { resolve, reject });\n this.send({\n op: \"complete\",\n id,\n prompt,\n options: toSerializableOptions(options),\n });\n options.signal?.addEventListener(\"abort\", () => this.send({ op: \"abort\", id }));\n });\n }\n\n async *streamCompletion(\n prompt: string,\n options: GenerationOptions = {}\n ): AsyncIterable<TokenChunk> {\n const id: number = this.allocateId();\n const queue: TokenChunk[] = [];\n let done: boolean = false;\n let error: Error | null = null;\n let notify: (() => void) | null = null;\n\n const wakeup = (): void => {\n if (notify) {\n const fn = notify;\n notify = null;\n fn();\n }\n };\n\n this.pendingStreams.set(id, {\n push: (chunk): void => {\n queue.push(chunk);\n wakeup();\n },\n end: (): void => {\n done = true;\n wakeup();\n },\n fail: (err): void => {\n error = err;\n done = true;\n wakeup();\n },\n });\n\n this.send({\n op: \"stream-completion\",\n id,\n prompt,\n options: toSerializableOptions(options),\n });\n options.signal?.addEventListener(\"abort\", () => this.send({ op: \"abort\", id }));\n\n try {\n while (true) {\n if (queue.length > 0) {\n const chunk = queue.shift();\n if (chunk) yield chunk;\n continue;\n }\n if (error) throw error;\n if (done) return;\n await new Promise<void>((r) => {\n notify = r;\n });\n }\n } finally {\n this.pendingStreams.delete(id);\n }\n }\n\n async unload(): Promise<void> {\n if (!this.loaded) return;\n if (this.currentUnload) {\n throw new ModelLoadError(\"Another unload is already in progress.\");\n }\n const id: number = this.allocateId();\n this.currentUnloadId = id;\n return new Promise<void>((resolve, reject) => {\n this.currentUnload = { resolve, reject };\n this.send({ op: \"unload\", id });\n });\n }\n\n /** Tear down the underlying worker. The engine is unusable after this. */\n terminate(): void {\n this.worker.removeEventListener(\"message\", this.listener);\n this.worker.terminate();\n this.loaded = false;\n }\n\n private allocateId(): number {\n const id = this.nextId;\n this.nextId += 1;\n return id;\n }\n\n private send(req: WorkerRequest): void {\n this.worker.postMessage(req);\n }\n\n private handleMessage(msg: WorkerResponse): void {\n switch (msg.op) {\n case \"loaded\":\n if (this.currentLoad && msg.id === this.currentLoadId) {\n this.loaded = true;\n this.currentLoad.resolve();\n this.currentLoad = null;\n this.currentLoadProgress = undefined;\n }\n return;\n case \"progress\":\n if (msg.id === this.currentLoadId) {\n this.currentLoadProgress?.(msg.payload);\n }\n return;\n case \"generated\": {\n const pending = this.pendingGenerates.get(msg.id);\n if (pending) {\n pending.resolve(msg.text);\n this.pendingGenerates.delete(msg.id);\n }\n return;\n }\n case \"token\": {\n const stream = this.pendingStreams.get(msg.id);\n stream?.push(msg.chunk);\n return;\n }\n case \"stream-end\": {\n const stream = this.pendingStreams.get(msg.id);\n stream?.end();\n return;\n }\n case \"unloaded\":\n if (this.currentUnload && msg.id === this.currentUnloadId) {\n this.loaded = false;\n this.currentUnload.resolve();\n this.currentUnload = null;\n }\n return;\n case \"is-loaded\":\n return;\n case \"error\": {\n const err = mapError(msg.name, msg.message);\n if (this.currentLoad && msg.id === this.currentLoadId) {\n this.currentLoad.reject(err);\n this.currentLoad = null;\n this.currentLoadProgress = undefined;\n return;\n }\n if (this.currentUnload && msg.id === this.currentUnloadId) {\n this.currentUnload.reject(err);\n this.currentUnload = null;\n return;\n }\n const generate = this.pendingGenerates.get(msg.id);\n if (generate) {\n generate.reject(err);\n this.pendingGenerates.delete(msg.id);\n return;\n }\n const stream = this.pendingStreams.get(msg.id);\n if (stream) {\n stream.fail(err);\n return;\n }\n return;\n }\n }\n }\n}\n\nfunction mapError(name: string, message: string): Error {\n switch (name) {\n case \"ModelLoadError\":\n return new ModelLoadError(message);\n case \"ModelNotLoadedError\":\n return new ModelNotLoadedError(message);\n case \"GenerationAbortedError\":\n return new GenerationAbortedError(message);\n default: {\n const err = new Error(message);\n err.name = name;\n return err;\n }\n }\n}\n","import type { ModelPreset } from \"../types\";\nimport { UnknownModelError } from \"../core/exceptions\";\n\n/**\n * Curated registry of supported models for v0.1.\n *\n * Each entry maps a friendly id (e.g. `\"phi-3.5-mini-int4\"`) to the underlying\n * runtime identifier and metadata. Friendly ids are stable; backend ids may\n * change as upstream MLC packages evolve.\n *\n * Only models that have been validated to load in browsers with WebGPU and\n * that fit the SLM target (≤ 4B parameters at INT4) are included.\n */\nexport const MODEL_PRESETS: Readonly<Record<string, ModelPreset>> = Object.freeze({\n \"phi-3.5-mini-int4\": {\n id: \"phi-3.5-mini-int4\",\n family: \"Phi-3.5\",\n parameters: \"3.8B\",\n quantization: \"q4f16_1\",\n webllmId: \"Phi-3.5-mini-instruct-q4f16_1-MLC\",\n contextWindow: 4096,\n description: \"Microsoft Phi-3.5 mini, INT4 quantized for browser inference.\",\n },\n \"llama-3.2-1b-int4\": {\n id: \"llama-3.2-1b-int4\",\n family: \"Llama-3.2\",\n parameters: \"1B\",\n quantization: \"q4f16_1\",\n webllmId: \"Llama-3.2-1B-Instruct-q4f16_1-MLC\",\n contextWindow: 4096,\n description: \"Meta Llama 3.2 1B Instruct, INT4 quantized.\",\n },\n \"qwen2.5-1.5b-int4\": {\n id: \"qwen2.5-1.5b-int4\",\n family: \"Qwen2.5\",\n parameters: \"1.5B\",\n quantization: \"q4f16_1\",\n webllmId: \"Qwen2.5-1.5B-Instruct-q4f16_1-MLC\",\n contextWindow: 4096,\n description: \"Alibaba Qwen 2.5 1.5B Instruct, INT4 quantized.\",\n },\n});\n\n/**\n * Resolve a friendly model id to its full preset metadata.\n *\n * @param modelId - Friendly id (e.g. `\"phi-3.5-mini-int4\"`).\n * @returns The matching preset.\n * @throws UnknownModelError if no preset matches.\n */\nexport function resolveModelPreset(modelId: string): ModelPreset {\n const preset = MODEL_PRESETS[modelId];\n if (!preset) {\n const available = Object.keys(MODEL_PRESETS).join(\", \");\n throw new UnknownModelError(`Unknown model \"${modelId}\". Available models: ${available}.`);\n }\n return preset;\n}\n\n/** Return the list of supported friendly model ids. */\nexport function listSupportedModels(): string[] {\n return Object.keys(MODEL_PRESETS);\n}\n\n/** Curated metadata for a supported embedding model. */\nexport interface EmbeddingPreset {\n /** Friendly identifier (e.g. `\"bge-small-en-v1.5\"`). */\n id: string;\n /** Family name (e.g. `\"BGE\"`). */\n family: string;\n /** Embedding dimension. */\n dimension: number;\n /** Maximum input length in tokens. */\n maxTokens: number;\n /** Identifier passed to `@huggingface/transformers`. */\n transformersId: string;\n /** Approximate quantization scheme (e.g. `\"fp32\"`, `\"int8\"`). */\n quantization: string;\n /** Short human description. */\n description: string;\n}\n\n/**\n * Curated registry of supported embedding models for v0.3.\n *\n * Each entry maps a friendly id to the underlying transformers.js model id.\n */\nexport const EMBEDDING_PRESETS: Readonly<Record<string, EmbeddingPreset>> = Object.freeze({\n \"bge-small-en-v1.5\": {\n id: \"bge-small-en-v1.5\",\n family: \"BGE\",\n dimension: 384,\n maxTokens: 512,\n transformersId: \"Xenova/bge-small-en-v1.5\",\n quantization: \"fp32\",\n description: \"BAAI BGE small English v1.5, 384-dim sentence embeddings.\",\n },\n \"bge-base-en-v1.5\": {\n id: \"bge-base-en-v1.5\",\n family: \"BGE\",\n dimension: 768,\n maxTokens: 512,\n transformersId: \"Xenova/bge-base-en-v1.5\",\n quantization: \"fp32\",\n description: \"BAAI BGE base English v1.5, 768-dim sentence embeddings.\",\n },\n});\n\n/**\n * Resolve a friendly embedding model id to its full preset metadata.\n *\n * @param modelId - Friendly id (e.g. `\"bge-small-en-v1.5\"`).\n * @returns The matching preset.\n * @throws UnknownModelError if no preset matches.\n */\nexport function resolveEmbeddingPreset(modelId: string): EmbeddingPreset {\n const preset = EMBEDDING_PRESETS[modelId];\n if (!preset) {\n const available = Object.keys(EMBEDDING_PRESETS).join(\", \");\n throw new UnknownModelError(\n `Unknown embedding model \"${modelId}\". Available models: ${available}.`\n );\n }\n return preset;\n}\n\n/** Return the list of supported embedding model ids. */\nexport function listSupportedEmbeddingModels(): string[] {\n return Object.keys(EMBEDDING_PRESETS);\n}\n\n/** Curated metadata for a supported reranker (cross-encoder) model. */\nexport interface RerankerPreset {\n /** Friendly identifier (e.g. `\"bge-reranker-base\"`). */\n id: string;\n /** Family name (e.g. `\"BGE Reranker\"`). */\n family: string;\n /** Maximum input length in tokens (combined query + document). */\n maxTokens: number;\n /** Identifier passed to `@huggingface/transformers`. */\n transformersId: string;\n /** Approximate quantization (e.g. `\"fp32\"`). */\n quantization: string;\n /** Short human description. */\n description: string;\n}\n\n/**\n * Curated registry of supported reranker models for v0.3.\n */\nexport const RERANKER_PRESETS: Readonly<Record<string, RerankerPreset>> = Object.freeze({\n \"bge-reranker-base\": {\n id: \"bge-reranker-base\",\n family: \"BGE Reranker\",\n maxTokens: 512,\n transformersId: \"Xenova/bge-reranker-base\",\n quantization: \"fp32\",\n description: \"BAAI BGE reranker base — multilingual cross-encoder.\",\n },\n});\n\n/**\n * Resolve a friendly reranker model id to its full preset metadata.\n *\n * @param modelId - Friendly id (e.g. `\"bge-reranker-base\"`).\n * @throws UnknownModelError if no preset matches.\n */\nexport function resolveRerankerPreset(modelId: string): RerankerPreset {\n const preset = RERANKER_PRESETS[modelId];\n if (!preset) {\n const available = Object.keys(RERANKER_PRESETS).join(\", \");\n throw new UnknownModelError(\n `Unknown reranker model \"${modelId}\". Available models: ${available}.`\n );\n }\n return preset;\n}\n\n/** Return the list of supported reranker model ids. */\nexport function listSupportedRerankerModels(): string[] {\n return Object.keys(RERANKER_PRESETS);\n}\n","import type { WorkerLike } from \"./protocol\";\n\n/**\n * Spawn a new inference Web Worker.\n *\n * Uses Vite/webpack-friendly `new Worker(new URL(...), { type: \"module\" })`\n * syntax. The bundler emits the worker as a separate ES module chunk.\n *\n * Consumers normally do not call this directly — `LMTask.create()` invokes it\n * when `inWorker: true` is set. It is exported for advanced scenarios (custom\n * worker management, pooling, lifecycle integration with a host app).\n *\n * @returns A {@link WorkerLike}-compatible Worker instance.\n */\nexport function createInferenceWorker(): WorkerLike {\n return new Worker(new URL(\"./inference.worker.ts\", import.meta.url), {\n type: \"module\",\n }) as unknown as WorkerLike;\n}\n","import type { Engine } from \"../core/engine\";\nimport { WebLLMEngine } from \"../core/webllm-engine\";\nimport { WorkerEngine } from \"../core/worker-engine\";\nimport { resolveModelPreset } from \"../presets/models\";\nimport { createInferenceWorker } from \"../worker/create-worker\";\nimport type { ModelPreset, ProgressCallback } from \"../types\";\n\n/** Common options accepted by every task's `create()` factory. */\nexport interface LMTaskCreateOptions {\n /** Optional callback for model load progress updates. */\n onProgress?: ProgressCallback;\n /**\n * Override the engine used for inference. Intended for testing.\n * Production callers should let the SDK pick a backend automatically.\n */\n engine?: Engine;\n /**\n * Run inference inside a Web Worker, isolating the UI thread from\n * tokenization and generation. **Default `true` from v0.3** — the\n * `WorkerEngine` is the recommended path. Pass `false` to keep\n * inference on the main thread (useful for environments without\n * `Worker` support or when debugging the runtime directly).\n *\n * Ignored when {@link engine} is provided.\n */\n inWorker?: boolean;\n}\n\n/** Internal payload returned by {@link LMTask.createEngine}. */\nexport interface ResolvedEngine {\n engine: Engine;\n preset: ModelPreset;\n}\n\n/**\n * Base class shared by all language-model tasks (`Chat` for v0.1; `Completion`,\n * `Embeddings` and `Reranker` planned for later versions).\n *\n * The base owns:\n * - resolving a friendly model id to a {@link ModelPreset};\n * - selecting and loading an {@link Engine} (defaulting to WebLLM);\n * - exposing `unload()` for cleanup.\n *\n * Subclasses add task-specific public methods (`send`, `stream`, etc.).\n */\nexport abstract class LMTask {\n protected constructor(\n /** Engine used for inference. */\n protected readonly engine: Engine,\n /** Resolved metadata for the loaded model. */\n public readonly preset: ModelPreset\n ) {}\n\n /**\n * Load a model into a backend and return the wired-up engine + preset.\n *\n * Subclasses call this from their static `create()` factories.\n *\n * @param modelId - Friendly model id from the registry.\n * @param options - Task creation options.\n */\n protected static async createEngine(\n modelId: string,\n options: LMTaskCreateOptions = {}\n ): Promise<ResolvedEngine> {\n const preset = resolveModelPreset(modelId);\n const engine = options.engine ?? LMTask.defaultEngine(options);\n if (!engine.isLoaded()) {\n await engine.load(preset.webllmId, options.onProgress);\n }\n return { engine, preset };\n }\n\n private static defaultEngine(options: LMTaskCreateOptions): Engine {\n const useWorker: boolean = options.inWorker ?? true;\n if (useWorker) {\n return new WorkerEngine(createInferenceWorker());\n }\n return new WebLLMEngine();\n }\n\n /** Release engine resources. Safe to call multiple times. */\n async unload(): Promise<void> {\n await this.engine.unload();\n }\n\n /** Whether the underlying engine has a loaded model. */\n isLoaded(): boolean {\n return this.engine.isLoaded();\n }\n}\n","import { parseStructuredOutput } from \"./structured/json-schema\";\nimport type { FinishReason, Message } from \"./types\";\n\n/**\n * Result returned by `Chat.send()`.\n *\n * Holds the assistant's textual reply, the structured assistant message\n * (already appended to the chat history), and metadata about the generation.\n */\nexport class ChatReply {\n constructor(\n /** The assistant's reply text. */\n public readonly text: string,\n /** The structured assistant message (already appended to chat history). */\n public readonly message: Message,\n /** Number of tokens generated. 0 when the engine does not report it. */\n public readonly tokensGenerated: number,\n /** Why the generation loop stopped. */\n public readonly finishReason: FinishReason\n ) {}\n\n /**\n * Parse {@link ChatReply.text} as JSON.\n *\n * Intended for replies generated with `json: true` or `jsonSchema`.\n * The result is cast to `T` without runtime validation; pair with Zod /\n * Ajv on the call site if you need to verify the schema.\n *\n * @typeParam T - Expected parsed shape.\n * @returns The parsed JSON value.\n * @throws StructuredOutputError if the text is not valid JSON.\n */\n json<T = unknown>(): T {\n return parseStructuredOutput<T>(this.text);\n }\n}\n\n/**\n * Result returned by `Completion.predict()`.\n *\n * Holds the generated continuation text (the prompt itself is not included)\n * plus metadata about the generation loop.\n */\nexport class CompletionResult {\n constructor(\n /** The generated text (continuation only, prompt excluded). */\n public readonly text: string,\n /** The original prompt that was fed to the model. */\n public readonly prompt: string,\n /** Number of tokens generated. 0 when the engine does not report it. */\n public readonly tokensGenerated: number,\n /** Why the generation loop stopped. */\n public readonly finishReason: FinishReason\n ) {}\n\n /**\n * Parse {@link CompletionResult.text} as JSON.\n *\n * Intended for completions generated with `json: true` or `jsonSchema`.\n * The result is cast to `T` without runtime validation.\n *\n * @typeParam T - Expected parsed shape.\n * @returns The parsed JSON value.\n * @throws StructuredOutputError if the text is not valid JSON.\n */\n json<T = unknown>(): T {\n return parseStructuredOutput<T>(this.text);\n }\n}\n","import { LMTask, type LMTaskCreateOptions } from \"./lm-task\";\nimport type { Engine } from \"../core/engine\";\nimport { ChatReply } from \"../results\";\nimport type { GenerationOptions, Message, ModelPreset, TokenChunk } from \"../types\";\n\n/**\n * Multi-turn chat task.\n *\n * Maintains an in-memory conversation history and applies the chat template\n * configured for the loaded model. Use {@link Chat.create} to construct an\n * instance — the constructor is private.\n *\n * @example\n * ```ts\n * const chat = await Chat.create(\"phi-3.5-mini-int4\");\n * const reply = await chat.send(\"Explain ONNX in one sentence.\");\n * console.log(reply.text);\n * ```\n *\n * @example Streaming\n * ```ts\n * const controller = new AbortController();\n * for await (const token of chat.stream(\"Explain ONNX.\", { signal: controller.signal })) {\n * process.stdout.write(token.text);\n * }\n * ```\n */\nexport class Chat extends LMTask {\n private readonly history: Message[] = [];\n private systemPrompt: string | null = null;\n\n private constructor(engine: Engine, preset: ModelPreset) {\n super(engine, preset);\n }\n\n /**\n * Create and load a `Chat` task for the given model.\n *\n * @param modelId - Friendly model id from the registry (e.g. `\"phi-3.5-mini-int4\"`).\n * @param options - Optional creation options (progress callback, engine override).\n */\n static async create(modelId: string, options: LMTaskCreateOptions = {}): Promise<Chat> {\n const { engine, preset } = await LMTask.createEngine(modelId, options);\n return new Chat(engine, preset);\n }\n\n /** Set or replace the system prompt prepended to every conversation. */\n setSystemPrompt(prompt: string): void {\n this.systemPrompt = prompt;\n }\n\n /** Clear the system prompt. */\n clearSystemPrompt(): void {\n this.systemPrompt = null;\n }\n\n /** Reset the conversation history. The system prompt is preserved. */\n resetHistory(): void {\n this.history.length = 0;\n }\n\n /** A read-only snapshot of the conversation history. */\n getHistory(): readonly Message[] {\n return this.history.slice();\n }\n\n /**\n * Send a user message and await the full assistant reply.\n *\n * The user message and the assistant reply are appended to the history.\n *\n * @param message - The user-facing message text.\n * @param options - Generation options.\n * @returns A {@link ChatReply} with the assistant's reply.\n */\n async send(message: string, options: GenerationOptions = {}): Promise<ChatReply> {\n const messages = this.buildMessages(message);\n const text = await this.engine.generate(messages, options);\n const userMsg: Message = { role: \"user\", content: message };\n const assistantMsg: Message = { role: \"assistant\", content: text };\n this.history.push(userMsg, assistantMsg);\n return new ChatReply(text, assistantMsg, 0, \"stop\");\n }\n\n /**\n * Stream the assistant reply token-by-token as an async iterable.\n *\n * The full reply is appended to the history when the stream completes\n * normally. If the stream is aborted, neither message is appended.\n *\n * @param message - The user-facing message text.\n * @param options - Generation options including an optional `signal`.\n */\n async *stream(message: string, options: GenerationOptions = {}): AsyncIterable<TokenChunk> {\n const messages = this.buildMessages(message);\n const userMsg: Message = { role: \"user\", content: message };\n let acc: string = \"\";\n for await (const chunk of this.engine.stream(messages, options)) {\n acc += chunk.text;\n yield chunk;\n }\n const assistantMsg: Message = { role: \"assistant\", content: acc };\n this.history.push(userMsg, assistantMsg);\n }\n\n private buildMessages(userMessage: string): Message[] {\n const messages: Message[] = [];\n if (this.systemPrompt) {\n messages.push({ role: \"system\", content: this.systemPrompt });\n }\n messages.push(...this.history);\n messages.push({ role: \"user\", content: userMessage });\n return messages;\n }\n}\n","import { LMTask, type LMTaskCreateOptions } from \"./lm-task\";\nimport type { Engine } from \"../core/engine\";\nimport { CompletionResult } from \"../results\";\nimport type { GenerationOptions, ModelPreset, TokenChunk } from \"../types\";\n\n/**\n * Raw text-completion task.\n *\n * Unlike {@link Chat}, `Completion` does not maintain a conversation history\n * and does not apply a chat template. The prompt is fed to the model verbatim\n * and the model continues it. Useful for \"Once upon a time…\" style generation,\n * code completion, or any scenario where chat formatting would interfere.\n *\n * Use {@link Completion.create} to construct an instance — the constructor is\n * private.\n *\n * @example\n * ```ts\n * const comp = await Completion.create(\"qwen2.5-1.5b-int4\");\n * const result = await comp.predict(\"Once upon a time\", { maxTokens: 50 });\n * console.log(result.text);\n * ```\n *\n * @example Streaming\n * ```ts\n * const controller = new AbortController();\n * for await (const token of comp.stream(\"def fibonacci(n):\", { signal: controller.signal })) {\n * process.stdout.write(token.text);\n * }\n * ```\n */\nexport class Completion extends LMTask {\n private constructor(engine: Engine, preset: ModelPreset) {\n super(engine, preset);\n }\n\n /**\n * Create and load a `Completion` task for the given model.\n *\n * @param modelId - Friendly model id from the registry (e.g. `\"qwen2.5-1.5b-int4\"`).\n * @param options - Optional creation options (progress callback, engine override).\n */\n static async create(modelId: string, options: LMTaskCreateOptions = {}): Promise<Completion> {\n const { engine, preset } = await LMTask.createEngine(modelId, options);\n return new Completion(engine, preset);\n }\n\n /**\n * Generate a continuation for the given prompt.\n *\n * @param prompt - Raw text fed to the model.\n * @param options - Generation options.\n * @returns A {@link CompletionResult} with the generated continuation.\n */\n async predict(prompt: string, options: GenerationOptions = {}): Promise<CompletionResult> {\n const text = await this.engine.complete(prompt, options);\n return new CompletionResult(text, prompt, 0, \"stop\");\n }\n\n /**\n * Stream a continuation for the given prompt as an async iterable of token\n * chunks.\n *\n * @param prompt - Raw text fed to the model.\n * @param options - Generation options including an optional `signal`.\n */\n async *stream(prompt: string, options: GenerationOptions = {}): AsyncIterable<TokenChunk> {\n for await (const chunk of this.engine.streamCompletion(prompt, options)) {\n yield chunk;\n }\n }\n}\n","import { ModelLoadError, ModelNotLoadedError } from \"../core/exceptions\";\nimport { resolveEmbeddingPreset, type EmbeddingPreset } from \"../presets/models\";\nimport type { ProgressCallback } from \"../types\";\n\n/** Options accepted by {@link Embeddings.create}. */\nexport interface EmbeddingsCreateOptions {\n /** Optional callback for model load progress updates. */\n onProgress?: ProgressCallback;\n /** Override the embedding pipeline. Intended for testing. */\n pipeline?: EmbedPipeline;\n}\n\n/** Options accepted by {@link Embeddings.embed}. */\nexport interface EmbedOptions {\n /** L2-normalize each vector. Recommended for cosine similarity downstream. Default `true`. */\n normalize?: boolean;\n /** Pooling strategy. BGE-style models use `\"cls\"`. Most sentence-transformers use `\"mean\"`. Default `\"mean\"`. */\n pooling?: \"mean\" | \"cls\";\n}\n\n/**\n * Minimal pipeline contract that {@link Embeddings} depends on.\n *\n * The default implementation wraps `@huggingface/transformers`. Tests inject\n * a fake satisfying the same shape — they never load the real runtime.\n */\nexport interface EmbedPipeline {\n /**\n * Run the encoder on a batch of inputs and return raw vectors.\n *\n * @param texts - Input strings.\n * @param options - Pooling + normalization passed to the underlying pipeline.\n */\n embed(texts: string[], options: Required<EmbedOptions>): Promise<number[][]>;\n /** Release pipeline resources. */\n unload?(): Promise<void>;\n}\n\ntype TransformersModule = typeof import(\"@huggingface/transformers\");\n\nlet transformersModulePromise: Promise<TransformersModule> | null = null;\n\nasync function loadTransformers(): Promise<TransformersModule> {\n if (!transformersModulePromise) {\n transformersModulePromise = import(\"@huggingface/transformers\");\n }\n return transformersModulePromise;\n}\n\nasync function buildDefaultPipeline(\n preset: EmbeddingPreset,\n onProgress?: ProgressCallback\n): Promise<EmbedPipeline> {\n const transformers = await loadTransformers();\n try {\n const pipe = await transformers.pipeline(\"feature-extraction\", preset.transformersId, {\n progress_callback: (report: unknown): void => {\n if (!onProgress) return;\n const r = report as { progress?: number; status?: string };\n onProgress({\n progress: typeof r.progress === \"number\" ? r.progress / 100 : 0,\n text: r.status ?? \"\",\n loaded: 0,\n total: 0,\n phase: \"downloading\",\n });\n },\n });\n return {\n async embed(texts, options): Promise<number[][]> {\n const output = await pipe(texts, {\n pooling: options.pooling,\n normalize: options.normalize,\n });\n return output.tolist();\n },\n async unload(): Promise<void> {\n if (typeof (pipe as { dispose?: () => Promise<void> }).dispose === \"function\") {\n await (pipe as unknown as { dispose: () => Promise<void> }).dispose();\n }\n },\n };\n } catch (err) {\n throw new ModelLoadError(`Failed to load embedding model \"${preset.id}\".`, err);\n }\n}\n\n/**\n * Sentence embedding task backed by `@huggingface/transformers`.\n *\n * Use {@link Embeddings.create} to construct an instance — the constructor is\n * private. The default backend lazy-loads the transformers.js runtime; tests\n * inject a {@link EmbedPipeline} mock instead.\n *\n * @example\n * ```ts\n * const emb = await Embeddings.create(\"bge-small-en-v1.5\");\n * const vectors = await emb.embed([\"hello world\", \"another sentence\"]);\n * console.log(vectors[0].length); // 384\n * ```\n */\nexport class Embeddings {\n private constructor(\n private readonly pipeline: EmbedPipeline,\n /** Resolved metadata for the loaded model. */\n public readonly preset: EmbeddingPreset\n ) {}\n\n /**\n * Create and load an `Embeddings` task for the given model.\n *\n * @param modelId - Friendly id from the embedding registry.\n * @param options - Optional creation options.\n * @throws UnknownModelError if `modelId` is not in the registry.\n * @throws ModelLoadError if the underlying pipeline fails to load.\n */\n static async create(modelId: string, options: EmbeddingsCreateOptions = {}): Promise<Embeddings> {\n const preset = resolveEmbeddingPreset(modelId);\n const pipeline = options.pipeline ?? (await buildDefaultPipeline(preset, options.onProgress));\n return new Embeddings(pipeline, preset);\n }\n\n /**\n * Encode an array of strings into dense vectors.\n *\n * Returns one vector per input, in the same order. Empty input array\n * returns an empty array (no error).\n *\n * @param texts - Input strings.\n * @param options - Pooling + normalization. Defaults: `pooling: \"mean\"`, `normalize: true`.\n */\n async embed(texts: string[], options: EmbedOptions = {}): Promise<number[][]> {\n if (texts.length === 0) return [];\n if (!this.pipeline) {\n throw new ModelNotLoadedError(\"Embeddings pipeline not initialized.\");\n }\n const merged: Required<EmbedOptions> = {\n normalize: options.normalize ?? true,\n pooling: options.pooling ?? \"mean\",\n };\n return this.pipeline.embed(texts, merged);\n }\n\n /**\n * Convenience: encode a single string and return its vector.\n *\n * @param text - Input string.\n * @param options - Forwarded to {@link Embeddings.embed}.\n */\n async embedSingle(text: string, options: EmbedOptions = {}): Promise<number[]> {\n const [vec] = await this.embed([text], options);\n if (!vec) {\n throw new ModelLoadError(\"Embedding pipeline returned no result.\");\n }\n return vec;\n }\n\n /** Embedding dimension exposed by the loaded model. */\n get dimension(): number {\n return this.preset.dimension;\n }\n\n /** Release pipeline resources. Safe to call multiple times. */\n async unload(): Promise<void> {\n await this.pipeline.unload?.();\n }\n}\n","import { ModelLoadError, ModelNotLoadedError } from \"../core/exceptions\";\nimport { resolveRerankerPreset, type RerankerPreset } from \"../presets/models\";\nimport type { ProgressCallback } from \"../types\";\n\n/** Options accepted by {@link Reranker.create}. */\nexport interface RerankerCreateOptions {\n /** Optional callback for model load progress updates. */\n onProgress?: ProgressCallback;\n /** Override the rerank pipeline. Intended for testing. */\n pipeline?: RerankPipeline;\n}\n\n/** Options accepted by {@link Reranker.score}. */\nexport interface RerankOptions {\n /**\n * Apply sigmoid to logits to map scores into `[0, 1]`. Recommended when the\n * downstream code uses scores as probabilities. Default `false` (raw logits).\n */\n sigmoid?: boolean;\n}\n\n/** A document paired with its score, for {@link Reranker.rank}. */\nexport interface RankedDocument {\n /** The document text. */\n text: string;\n /** Score from the cross-encoder. */\n score: number;\n /** Original index of the document in the input array. */\n index: number;\n}\n\n/**\n * Minimal pipeline contract that {@link Reranker} depends on.\n *\n * The default implementation wraps `@huggingface/transformers`. Tests inject\n * a fake satisfying the same shape — they never load the real runtime.\n */\nexport interface RerankPipeline {\n /**\n * Score `(query, doc)` pairs. One score per doc, in the same order.\n *\n * @param query - Single query string.\n * @param docs - Documents to score against the query.\n */\n score(query: string, docs: string[]): Promise<number[]>;\n /** Release pipeline resources. */\n unload?(): Promise<void>;\n}\n\ntype TransformersModule = typeof import(\"@huggingface/transformers\");\n\nlet transformersModulePromise: Promise<TransformersModule> | null = null;\n\nasync function loadTransformers(): Promise<TransformersModule> {\n if (!transformersModulePromise) {\n transformersModulePromise = import(\"@huggingface/transformers\");\n }\n return transformersModulePromise;\n}\n\nfunction sigmoidValue(x: number): number {\n return 1 / (1 + Math.exp(-x));\n}\n\nasync function buildDefaultPipeline(\n preset: RerankerPreset,\n onProgress?: ProgressCallback\n): Promise<RerankPipeline> {\n const transformers = await loadTransformers();\n try {\n const tokenizer = await transformers.AutoTokenizer.from_pretrained(preset.transformersId, {\n progress_callback: (report: unknown): void => {\n if (!onProgress) return;\n const r = report as { progress?: number; status?: string };\n onProgress({\n progress: typeof r.progress === \"number\" ? r.progress / 100 : 0,\n text: r.status ?? \"\",\n loaded: 0,\n total: 0,\n phase: \"downloading\",\n });\n },\n });\n const model = await transformers.AutoModelForSequenceClassification.from_pretrained(\n preset.transformersId,\n {\n progress_callback: (report: unknown): void => {\n if (!onProgress) return;\n const r = report as { progress?: number; status?: string };\n onProgress({\n progress: typeof r.progress === \"number\" ? r.progress / 100 : 0,\n text: r.status ?? \"\",\n loaded: 0,\n total: 0,\n phase: \"downloading\",\n });\n },\n }\n );\n return {\n async score(query, docs): Promise<number[]> {\n if (docs.length === 0) return [];\n const queries: string[] = docs.map(() => query);\n // `transformers.js` AutoTokenizer accepts `(text, options)` where\n // `options.text_pair` carries the second sequence; pair-input typing\n // isn't exported, so we cast through `unknown`.\n const tokenize = tokenizer as unknown as (\n text: string[],\n options: Record<string, unknown>\n ) => Record<string, unknown>;\n const inputs = tokenize(queries, {\n text_pair: docs,\n padding: true,\n truncation: true,\n max_length: preset.maxTokens,\n });\n const callModel = model as unknown as (\n inputs: Record<string, unknown>\n ) => Promise<{ logits: { tolist: () => number[][] } }>;\n const outputs = await callModel(inputs);\n const logits: number[][] = outputs.logits.tolist();\n return logits.map((row) => row[0] ?? 0);\n },\n async unload(): Promise<void> {\n const m = model as unknown as { dispose?: () => Promise<unknown> };\n if (typeof m.dispose === \"function\") await m.dispose();\n },\n };\n } catch (err) {\n throw new ModelLoadError(`Failed to load reranker model \"${preset.id}\".`, err);\n }\n}\n\n/**\n * Cross-encoder reranking task backed by `@huggingface/transformers`.\n *\n * Use {@link Reranker.create} to construct an instance — the constructor is\n * private. Useful as a second-stage step in a retrieve-then-rerank pipeline:\n * pull top-K candidates with a fast embedding similarity, then rerank with\n * a cross-encoder for higher precision.\n *\n * @example\n * ```ts\n * const rerank = await Reranker.create(\"bge-reranker-base\");\n * const scores = await rerank.score(\"what is webgpu?\", [\n * \"WebGPU is a modern graphics API\",\n * \"Bananas grow on trees\",\n * ]);\n * // scores[0] >> scores[1]\n * ```\n *\n * @example Ranked output sorted by score\n * ```ts\n * const ranked = await rerank.rank(\"what is webgpu?\", docs);\n * for (const r of ranked) console.log(r.score, r.text);\n * ```\n */\nexport class Reranker {\n private constructor(\n private readonly pipeline: RerankPipeline,\n /** Resolved metadata for the loaded model. */\n public readonly preset: RerankerPreset\n ) {}\n\n /**\n * Create and load a `Reranker` task for the given model.\n *\n * @param modelId - Friendly id from the reranker registry.\n * @param options - Optional creation options.\n * @throws UnknownModelError if `modelId` is not in the registry.\n * @throws ModelLoadError if the underlying pipeline fails to load.\n */\n static async create(modelId: string, options: RerankerCreateOptions = {}): Promise<Reranker> {\n const preset = resolveRerankerPreset(modelId);\n const pipeline = options.pipeline ?? (await buildDefaultPipeline(preset, options.onProgress));\n return new Reranker(pipeline, preset);\n }\n\n /**\n * Score each document against the query. Returns one score per doc, in\n * the same order. Empty `docs` returns `[]` (no error).\n *\n * @param query - Query string.\n * @param docs - Documents to score.\n * @param options - `sigmoid: true` maps logits into `[0, 1]`.\n */\n async score(query: string, docs: string[], options: RerankOptions = {}): Promise<number[]> {\n if (docs.length === 0) return [];\n if (!this.pipeline) {\n throw new ModelNotLoadedError(\"Reranker pipeline not initialized.\");\n }\n const raw = await this.pipeline.score(query, docs);\n return options.sigmoid ? raw.map(sigmoidValue) : raw;\n }\n\n /**\n * Score and sort documents by score in descending order. Returns a list of\n * {@link RankedDocument}s carrying the original index.\n *\n * @param query - Query string.\n * @param docs - Documents to rank.\n * @param options - Forwarded to {@link Reranker.score}.\n */\n async rank(\n query: string,\n docs: string[],\n options: RerankOptions = {}\n ): Promise<RankedDocument[]> {\n const scores = await this.score(query, docs, options);\n const ranked: RankedDocument[] = scores.map((score, index) => {\n const text: string = docs[index] ?? \"\";\n return { text, score, index };\n });\n ranked.sort((a, b) => b.score - a.score);\n return ranked;\n }\n\n /** Release pipeline resources. Safe to call multiple times. */\n async unload(): Promise<void> {\n await this.pipeline.unload?.();\n }\n}\n","import { MODEL_PRESETS, resolveModelPreset } from \"../presets/models\";\nimport { UnknownModelError } from \"../core/exceptions\";\n\n/** Snapshot of a single cached model's metadata. */\nexport interface CachedModelEntry {\n /** Friendly id from the registry (e.g. `\"llama-3.2-1b-int4\"`). */\n id: string;\n /** Backend-specific id (e.g. WebLLM `webllmId`). */\n backendId: string;\n /** Human-readable family name. */\n family: string;\n /** Approx parameter count, e.g. `\"1B\"`. */\n parameters: string;\n}\n\n/** Aggregate storage usage reported by the browser. */\nexport interface CacheUsage {\n /** Bytes used by the entire origin's storage (not just our cache). */\n usage: number;\n /** Bytes the browser is willing to give the origin. */\n quota: number;\n}\n\n/**\n * Hooks the {@link ModelCache} uses to talk to the underlying runtime and\n * the browser. Tests inject mocks; production code leaves them undefined,\n * letting `ModelCache` resolve the real `@mlc-ai/web-llm` helpers and\n * `navigator.storage.estimate()` lazily.\n */\nexport interface ModelCacheOptions {\n /** Override `hasModelInCache` from the runtime. */\n hasModel?: (backendId: string) => Promise<boolean>;\n /** Override `deleteModelInCache` from the runtime. */\n deleteModel?: (backendId: string) => Promise<void>;\n /** Override `navigator.storage.estimate()`. */\n estimate?: () => Promise<CacheUsage>;\n}\n\ntype WebLLMCacheModule = {\n hasModelInCache: (id: string) => Promise<boolean>;\n deleteModelInCache: (id: string) => Promise<void>;\n};\n\nlet webllmCachePromise: Promise<WebLLMCacheModule> | null = null;\n\nasync function loadWebLLMCacheHelpers(): Promise<WebLLMCacheModule> {\n if (!webllmCachePromise) {\n webllmCachePromise = import(\"@mlc-ai/web-llm\").then((m) => ({\n hasModelInCache: m.hasModelInCache,\n deleteModelInCache: m.deleteModelInCache,\n }));\n }\n return webllmCachePromise;\n}\n\nasync function defaultEstimate(): Promise<CacheUsage> {\n if (typeof navigator === \"undefined\" || !navigator.storage?.estimate) {\n return { usage: 0, quota: 0 };\n }\n const estimate = await navigator.storage.estimate();\n return {\n usage: estimate.usage ?? 0,\n quota: estimate.quota ?? 0,\n };\n}\n\n/**\n * Inspect and manage cached model weights.\n *\n * `localm-web` does not download or cache weights itself — that work is owned\n * by `@mlc-ai/web-llm`, which writes to the browser Cache API. `ModelCache`\n * is a thin wrapper that lets a consuming app surface cache state in its UI:\n * \"this model is downloaded\", \"you have 1.4 GB cached, free up space?\",\n * \"clear all models on logout\".\n *\n * @example\n * ```ts\n * const cache = new ModelCache();\n * if (await cache.has(\"llama-3.2-1b-int4\")) {\n * console.log(\"ready offline\");\n * }\n * const cached = await cache.list();\n * await cache.delete(\"phi-3.5-mini-int4\");\n * const usage = await cache.estimateUsage();\n * console.log(`${usage.usage} / ${usage.quota} bytes`);\n * ```\n */\nexport class ModelCache {\n private readonly hasModelHook: ((id: string) => Promise<boolean>) | undefined;\n private readonly deleteModelHook: ((id: string) => Promise<void>) | undefined;\n private readonly estimateHook: () => Promise<CacheUsage>;\n\n constructor(options: ModelCacheOptions = {}) {\n this.hasModelHook = options.hasModel;\n this.deleteModelHook = options.deleteModel;\n this.estimateHook = options.estimate ?? defaultEstimate;\n }\n\n /**\n * Whether the model's weights are present in the browser cache.\n *\n * @param modelId - Friendly id from the registry.\n * @throws UnknownModelError if `modelId` is not in the registry.\n */\n async has(modelId: string): Promise<boolean> {\n const backendId: string = resolveModelPreset(modelId).webllmId;\n const fn = this.hasModelHook ?? (await loadWebLLMCacheHelpers()).hasModelInCache;\n return fn(backendId);\n }\n\n /**\n * Delete a single model's weights from the browser cache. No-op when the\n * model is not cached.\n *\n * @param modelId - Friendly id from the registry.\n * @throws UnknownModelError if `modelId` is not in the registry.\n */\n async delete(modelId: string): Promise<void> {\n const backendId: string = resolveModelPreset(modelId).webllmId;\n const fn = this.deleteModelHook ?? (await loadWebLLMCacheHelpers()).deleteModelInCache;\n await fn(backendId);\n }\n\n /**\n * List the registry models that are currently cached.\n *\n * Iterates `MODEL_PRESETS` and probes each one. Only returns models known\n * to the SDK — models cached by external WebLLM calls outside our registry\n * are not included.\n *\n * @returns Empty list when nothing is cached.\n */\n async list(): Promise<CachedModelEntry[]> {\n const fn = this.hasModelHook ?? (await loadWebLLMCacheHelpers()).hasModelInCache;\n const probes = await Promise.all(\n Object.values(MODEL_PRESETS).map(async (preset) => {\n const cached: boolean = await fn(preset.webllmId);\n if (!cached) return null;\n const entry: CachedModelEntry = {\n id: preset.id,\n backendId: preset.webllmId,\n family: preset.family,\n parameters: preset.parameters,\n };\n return entry;\n })\n );\n return probes.filter((p): p is CachedModelEntry => p !== null);\n }\n\n /**\n * Delete every registry model from the cache. Useful for logout flows or\n * \"reset\" buttons. Models cached outside the registry are not touched.\n */\n async clear(): Promise<void> {\n const fn = this.deleteModelHook ?? (await loadWebLLMCacheHelpers()).deleteModelInCache;\n await Promise.all(Object.values(MODEL_PRESETS).map((p) => fn(p.webllmId)));\n }\n\n /**\n * Aggregate storage stats from the browser. Returned numbers cover the\n * entire origin (Cache API + IndexedDB + Service Workers + OPFS), not\n * just our model cache — use it for \"you have X of Y available\" hints.\n */\n async estimateUsage(): Promise<CacheUsage> {\n return this.estimateHook();\n }\n\n /**\n * Throw a descriptive error if the given id is not in the registry.\n * Exposed for code paths that want to validate before calling other\n * methods (those already throw on their own).\n *\n * @throws UnknownModelError\n */\n static assertKnown(modelId: string): void {\n if (!(modelId in MODEL_PRESETS)) {\n const available = Object.keys(MODEL_PRESETS).join(\", \");\n throw new UnknownModelError(`Unknown model \"${modelId}\". Available models: ${available}.`);\n }\n }\n}\n","import type { TokenChunk } from \"../types\";\n\n/**\n * Drain an async iterable of token chunks into a single string.\n *\n * Useful in tests, for non-streaming consumers, and as a one-line way to\n * reconstruct the final text from a `Chat.stream(...)` call.\n *\n * @param stream - The token-chunk async iterable to consume.\n * @returns The concatenation of every chunk's `text` field.\n */\nexport async function collectStream(stream: AsyncIterable<TokenChunk>): Promise<string> {\n let acc: string = \"\";\n for await (const chunk of stream) {\n acc += chunk.text;\n }\n return acc;\n}\n\n/**\n * Wrap an async iterable so that each `TokenChunk` is also passed to a\n * caller-supplied side-effect callback before being yielded downstream.\n *\n * This is intentionally a passthrough — it does not buffer.\n *\n * @param stream - The upstream token-chunk async iterable.\n * @param onChunk - Side-effect invoked for every chunk.\n * @returns A new async iterable yielding the same chunks.\n */\nexport async function* tap(\n stream: AsyncIterable<TokenChunk>,\n onChunk: (chunk: TokenChunk) => void\n): AsyncIterable<TokenChunk> {\n for await (const chunk of stream) {\n onChunk(chunk);\n yield chunk;\n }\n}\n","/**\n * localm-web — browser-only TypeScript SDK for running LLMs and SLMs locally.\n *\n * Public API surface for v0.1.\n *\n * @packageDocumentation\n */\n\nexport { Chat } from \"./tasks/chat\";\nexport { Completion } from \"./tasks/completion\";\nexport { Embeddings } from \"./tasks/embeddings\";\nexport type { EmbeddingsCreateOptions, EmbedOptions, EmbedPipeline } from \"./tasks/embeddings\";\nexport { Reranker } from \"./tasks/reranker\";\nexport type {\n RerankerCreateOptions,\n RerankOptions,\n RerankPipeline,\n RankedDocument,\n} from \"./tasks/reranker\";\nexport { LMTask } from \"./tasks/lm-task\";\nexport type { LMTaskCreateOptions } from \"./tasks/lm-task\";\n\nexport { ChatReply, CompletionResult } from \"./results\";\n\nexport {\n MODEL_PRESETS,\n resolveModelPreset,\n listSupportedModels,\n EMBEDDING_PRESETS,\n resolveEmbeddingPreset,\n listSupportedEmbeddingModels,\n RERANKER_PRESETS,\n resolveRerankerPreset,\n listSupportedRerankerModels,\n} from \"./presets/models\";\nexport type { EmbeddingPreset, RerankerPreset } from \"./presets/models\";\n\nexport {\n LocalmWebError,\n WebGPUUnavailableError,\n ModelLoadError,\n ModelNotLoadedError,\n UnknownModelError,\n GenerationAbortedError,\n QuotaExceededError,\n BackendNotAvailableError,\n StructuredOutputError,\n} from \"./core/exceptions\";\n\nexport {\n assertJsonSchema,\n serializeJsonSchema,\n parseStructuredOutput,\n} from \"./structured/json-schema\";\n\nexport type { Engine } from \"./core/engine\";\nexport { WorkerEngine } from \"./core/worker-engine\";\nexport { createInferenceWorker } from \"./worker/create-worker\";\nexport type { WorkerLike } from \"./worker/protocol\";\n\nexport { ModelCache } from \"./cache\";\nexport type { CachedModelEntry, CacheUsage, ModelCacheOptions } from \"./cache\";\n\nexport { collectStream, tap } from \"./streaming/token-stream\";\n\nexport type {\n Role,\n FinishReason,\n Message,\n GenerationOptions,\n ModelLoadProgress,\n ModelLoadPhase,\n ProgressCallback,\n TokenChunk,\n ModelPreset,\n} from \"./types\";\n\n/** Current package version. Updated at release time. */\nexport const VERSION: string = \"0.4.0\";\n"],"names":["transformersModulePromise","loadTransformers","buildDefaultPipeline"],"mappings":"AAEA,MAAM,mBAA2B;AACjC,MAAM,kBAA0B;AAazB,SAAS,kBAAkB,MAA8B;AAC9D,MAAI,iBAAiB,KAAK,IAAI,EAAG,QAAO;AACxC,MAAI,gBAAgB,KAAK,IAAI,EAAG,QAAO;AACvC,SAAO;AACT;ACXO,MAAM,uBAAuB,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA,EAKxC,YACE,SACgB,OAChB;AACA,UAAM,OAAO;AAFG,SAAA,QAAA;AAGhB,SAAK,OAAO,WAAW;AAAA,EACzB;AACF;AAGO,MAAM,+BAA+B,eAAe;AAAC;AAGrD,MAAM,uBAAuB,eAAe;AAAC;AAG7C,MAAM,4BAA4B,eAAe;AAAC;AAGlD,MAAM,0BAA0B,eAAe;AAAC;AAGhD,MAAM,+BAA+B,eAAe;AAAC;AAGrD,MAAM,2BAA2B,eAAe;AAAC;AAGjD,MAAM,iCAAiC,eAAe;AAAC;AASvD,MAAM,8BAA8B,eAAe;AAAC;AC1BpD,SAAS,iBAAiB,QAA2C;AAC1E,MAAI,WAAW,QAAQ,OAAO,WAAW,YAAY,MAAM,QAAQ,MAAM,GAAG;AAC1E,UAAM,IAAI,sBAAsB,6DAA6D;AAAA,EAC/F;AACA,QAAM,OAAiB,OAAO,KAAK,MAAM;AACzC,QAAM,aAAgC;AAAA,IACpC;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAEF,MAAI,CAAC,KAAK,KAAK,CAAC,QAAQ,WAAW,SAAS,GAAG,CAAC,GAAG;AACjD,UAAM,IAAI;AAAA,MACR;AAAA,IAAA;AAAA,EAEJ;AACF;AAcO,SAAS,oBAAoB,QAAyB;AAC3D,mBAAiB,MAAM;AACvB,SAAO,KAAK,UAAU,MAAM;AAC9B;AAWO,SAAS,sBAAmC,MAAiB;AAClE,MAAI;AACF,WAAO,KAAK,MAAM,IAAI;AAAA,EACxB,SAAS,KAAK;AACZ,UAAM,IAAI;AAAA,MACR;AAAA,MACA;AAAA,IAAA;AAAA,EAEJ;AACF;AClEA,IAAI,sBAAoD;AAExD,eAAe,aAAoC;AACjD,MAAI,CAAC,qBAAqB;AACxB,0BAAsB,OAAO,iBAAiB;AAAA,EAChD;AACA,SAAO;AACT;AAEA,SAAS,oBAA6B;AACpC,SAAO,OAAO,cAAc,eAAe,SAAS;AACtD;AAQA,SAAS,oBAAoB,SAA4C;AACvE,QAAM,SAAyB,CAAA;AAC/B,MAAI,QAAQ,cAAc,OAAW,QAAO,aAAa,QAAQ;AACjE,MAAI,QAAQ,gBAAgB,OAAW,QAAO,cAAc,QAAQ;AACpE,MAAI,QAAQ,SAAS,OAAW,QAAO,QAAQ,QAAQ;AACvD,SAAO;AACT;AAWA,SAAS,oBAAoB,SAAwD;AACnF,MAAI,QAAQ,eAAe,QAAW;AACpC,WAAO,EAAE,MAAM,eAAe,QAAQ,oBAAoB,QAAQ,UAAU,EAAA;AAAA,EAC9E;AACA,MAAI,QAAQ,MAAM;AAChB,WAAO,EAAE,MAAM,cAAA;AAAA,EACjB;AACA,SAAO;AACT;AAEA,SAAS,eAAe,UAAmD;AACzE,SAAO,SAAS,IAAI,CAAC,MAAkC;AACrD,YAAQ,EAAE,MAAA;AAAA,MACR,KAAK;AACH,eAAO,EAAE,MAAM,UAAU,SAAS,EAAE,QAAA;AAAA,MACtC,KAAK;AACH,eAAO,EAAE,MAAM,QAAQ,SAAS,EAAE,QAAA;AAAA,MACpC,KAAK;AACH,eAAO,EAAE,MAAM,aAAa,SAAS,EAAE,QAAA;AAAA,MACzC,KAAK;AACH,eAAO,EAAE,MAAM,QAAQ,SAAS,EAAE,SAAS,cAAc,EAAE,QAAQ,GAAA;AAAA,IAAG;AAAA,EAE5E,CAAC;AACH;AAQO,MAAM,aAA+B;AAAA,EAClC,SAA2B;AAAA,EAEnC,WAAoB;AAClB,WAAO,KAAK,WAAW;AAAA,EACzB;AAAA,EAEA,MAAM,KAAK,SAAiB,YAA8C;AACxE,QAAI,CAAC,qBAAqB;AACxB,YAAM,IAAI;AAAA,QACR;AAAA,MAAA;AAAA,IAEJ;AACA,UAAM,SAAS,MAAM,WAAA;AACrB,QAAI;AACF,WAAK,SAAS,MAAM,OAAO,gBAAgB,SAAS;AAAA,QAClD,sBAAsB,CAAC,WAAiB;AACtC,uBAAa;AAAA,YACX,UAAU,OAAO;AAAA,YACjB,MAAM,OAAO;AAAA,YACb,QAAQ;AAAA,YACR,OAAO;AAAA,YACP,OAAO,kBAAkB,OAAO,IAAI;AAAA,UAAA,CACrC;AAAA,QACH;AAAA,MAAA,CACD;AACD,mBAAa;AAAA,QACX,UAAU;AAAA,QACV,MAAM;AAAA,QACN,QAAQ;AAAA,QACR,OAAO;AAAA,QACP,OAAO;AAAA,MAAA,CACR;AAAA,IACH,SAAS,KAAK;AACZ,YAAM,IAAI,eAAe,yBAAyB,OAAO,MAAM,GAAG;AAAA,IACpE;AAAA,EACF;AAAA,EAEA,MAAM,SAAS,UAAqB,UAA6B,IAAqB;AACpF,UAAM,SAAS,KAAK,cAAA;AACpB,QAAI,QAAQ,QAAQ,SAAS;AAC3B,YAAM,IAAI,uBAAuB,kCAAkC;AAAA,IACrE;AACA,UAAM,iBAAiB,oBAAoB,OAAO;AAClD,UAAM,aAAa,MAAM,OAAO,KAAK,YAAY,OAAO;AAAA,MACtD,GAAG,oBAAoB,OAAO;AAAA,MAC9B,UAAU,eAAe,QAAQ;AAAA,MACjC,QAAQ;AAAA,MACR,GAAI,iBAAiB,EAAE,iBAAiB,mBAAmB,CAAA;AAAA,IAAC,CAC7D;AACD,WAAO,WAAW,QAAQ,CAAC,GAAG,SAAS,WAAW;AAAA,EACpD;AAAA,EAEA,OAAO,OAAO,UAAqB,UAA6B,IAA+B;AAC7F,UAAM,SAAS,KAAK,cAAA;AACpB,QAAI,QAAQ,QAAQ,SAAS;AAC3B,YAAM,IAAI,uBAAuB,kCAAkC;AAAA,IACrE;AACA,UAAM,iBAAiB,oBAAoB,OAAO;AAClD,UAAM,aAAa,MAAM,OAAO,KAAK,YAAY,OAAO;AAAA,MACtD,GAAG,oBAAoB,OAAO;AAAA,MAC9B,UAAU,eAAe,QAAQ;AAAA,MACjC,QAAQ;AAAA,MACR,GAAI,iBAAiB,EAAE,iBAAiB,mBAAmB,CAAA;AAAA,IAAC,CAC7D;AACD,QAAI,QAAgB;AACpB,QAAI,WAAoB;AACxB,QAAI;AACF,uBAAiB,SAAS,YAAY;AACpC,YAAI,QAAQ,QAAQ,SAAS;AAC3B,gBAAM,IAAI,uBAAuB,+BAA+B;AAAA,QAClE;AACA,cAAM,SAAS,MAAM,QAAQ,CAAC;AAC9B,cAAM,QAAQ,QAAQ,OAAO,WAAW;AACxC,YAAI,OAAO;AACT,gBAAM,EAAE,MAAM,OAAO,OAAO,MAAM,MAAA;AAClC,mBAAS;AAAA,QACX;AACA,YAAI,QAAQ,eAAe;AACzB,qBAAW;AACX,gBAAM,EAAE,MAAM,IAAI,OAAO,MAAM,KAAA;AAC/B,mBAAS;AAAA,QACX;AAAA,MACF;AACA,UAAI,CAAC,UAAU;AACb,cAAM,EAAE,MAAM,IAAI,OAAO,MAAM,KAAA;AAAA,MACjC;AAAA,IACF,SAAS,KAAK;AACZ,UAAI,eAAe,uBAAwB,OAAM;AACjD,YAAM,IAAI,eAAe,gCAAgC,GAAG;AAAA,IAC9D;AAAA,EACF;AAAA,EAEA,MAAM,SAAS,QAAgB,UAA6B,IAAqB;AAC/E,UAAM,SAAS,KAAK,cAAA;AACpB,QAAI,QAAQ,QAAQ,SAAS;AAC3B,YAAM,IAAI,uBAAuB,kCAAkC;AAAA,IACrE;AACA,UAAM,iBAAiB,oBAAoB,OAAO;AAClD,UAAM,aAAa,MAAM,OAAO,YAAY,OAAO;AAAA,MACjD,GAAG,oBAAoB,OAAO;AAAA,MAC9B;AAAA,MACA,QAAQ;AAAA,MACR,GAAI,iBAAiB,EAAE,iBAAiB,mBAAmB,CAAA;AAAA,IAAC,CAC7D;AACD,WAAO,WAAW,QAAQ,CAAC,GAAG,QAAQ;AAAA,EACxC;AAAA,EAEA,OAAO,iBACL,QACA,UAA6B,IACF;AAC3B,UAAM,SAAS,KAAK,cAAA;AACpB,QAAI,QAAQ,QAAQ,SAAS;AAC3B,YAAM,IAAI,uBAAuB,kCAAkC;AAAA,IACrE;AACA,UAAM,iBAAiB,oBAAoB,OAAO;AAClD,UAAM,aAAa,MAAM,OAAO,YAAY,OAAO;AAAA,MACjD,GAAG,oBAAoB,OAAO;AAAA,MAC9B;AAAA,MACA,QAAQ;AAAA,MACR,GAAI,iBAAiB,EAAE,iBAAiB,mBAAmB,CAAA;AAAA,IAAC,CAC7D;AACD,QAAI,QAAgB;AACpB,QAAI,WAAoB;AACxB,QAAI;AACF,uBAAiB,SAAS,YAAY;AACpC,YAAI,QAAQ,QAAQ,SAAS;AAC3B,gBAAM,IAAI,uBAAuB,+BAA+B;AAAA,QAClE;AACA,cAAM,SAAS,MAAM,QAAQ,CAAC;AAC9B,cAAM,QAAQ,QAAQ,QAAQ;AAC9B,YAAI,OAAO;AACT,gBAAM,EAAE,MAAM,OAAO,OAAO,MAAM,MAAA;AAClC,mBAAS;AAAA,QACX;AACA,YAAI,QAAQ,eAAe;AACzB,qBAAW;AACX,gBAAM,EAAE,MAAM,IAAI,OAAO,MAAM,KAAA;AAC/B,mBAAS;AAAA,QACX;AAAA,MACF;AACA,UAAI,CAAC,UAAU;AACb,cAAM,EAAE,MAAM,IAAI,OAAO,MAAM,KAAA;AAAA,MACjC;AAAA,IACF,SAAS,KAAK;AACZ,UAAI,eAAe,uBAAwB,OAAM;AACjD,YAAM,IAAI,eAAe,gCAAgC,GAAG;AAAA,IAC9D;AAAA,EACF;AAAA,EAEA,MAAM,SAAwB;AAC5B,QAAI,KAAK,QAAQ;AACf,YAAM,KAAK,OAAO,OAAA;AAClB,WAAK,SAAS;AAAA,IAChB;AAAA,EACF;AAAA,EAEQ,gBAA2B;AACjC,QAAI,CAAC,KAAK,QAAQ;AAChB,YAAM,IAAI,oBAAoB,mDAAmD;AAAA,IACnF;AACA,WAAO,KAAK;AAAA,EACd;AACF;AC5OO,SAAS,sBACd,UAA6B,IACE;AAC/B,QAAM,EAAE,QAAQ,SAAS,GAAG,SAAS;AAErC,SAAO;AACT;ACeO,MAAM,aAA+B;AAAA,EAa1C,YAA6B,QAAoB;AAApB,SAAA,SAAA;AAC3B,SAAK,WAAW,CAAC,UAAgB,KAAK,cAAc,MAAM,IAAI;AAC9D,SAAK,OAAO,iBAAiB,WAAW,KAAK,QAAQ;AAAA,EACvD;AAAA,EAfQ,SAAiB;AAAA,EACjB,SAAkB;AAAA,EAClB,cAA0E;AAAA,EAC1E,gBAAwB;AAAA,EACxB,sBAAoD;AAAA,EACpD,gBAA4E;AAAA,EAC5E,kBAA0B;AAAA,EAC1B,uCAAqD,IAAA;AAAA,EACrD,qCAAiD,IAAA;AAAA,EAExC;AAAA,EAOjB,WAAoB;AAClB,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,MAAM,KAAK,SAAiB,YAA8C;AACxE,QAAI,KAAK,aAAa;AACpB,YAAM,IAAI,eAAe,sCAAsC;AAAA,IACjE;AACA,UAAM,KAAa,KAAK,WAAA;AACxB,SAAK,gBAAgB;AACrB,SAAK,sBAAsB;AAC3B,WAAO,IAAI,QAAc,CAAC,SAAS,WAAW;AAC5C,WAAK,cAAc,EAAE,SAAS,OAAA;AAC9B,WAAK,KAAK,EAAE,IAAI,QAAQ,IAAI,SAAS;AAAA,IACvC,CAAC;AAAA,EACH;AAAA,EAEA,MAAM,SAAS,UAAqB,UAA6B,IAAqB;AACpF,UAAM,KAAa,KAAK,WAAA;AACxB,WAAO,IAAI,QAAgB,CAAC,SAAS,WAAW;AAC9C,WAAK,iBAAiB,IAAI,IAAI,EAAE,SAAS,QAAQ;AACjD,WAAK,KAAK;AAAA,QACR,IAAI;AAAA,QACJ;AAAA,QACA;AAAA,QACA,SAAS,sBAAsB,OAAO;AAAA,MAAA,CACvC;AACD,cAAQ,QAAQ,iBAAiB,SAAS,MAAM,KAAK,KAAK,EAAE,IAAI,SAAS,GAAA,CAAI,CAAC;AAAA,IAChF,CAAC;AAAA,EACH;AAAA,EAEA,OAAO,OAAO,UAAqB,UAA6B,IAA+B;AAC7F,UAAM,KAAa,KAAK,WAAA;AACxB,UAAM,QAAsB,CAAA;AAC5B,QAAI,OAAgB;AACpB,QAAI,QAAsB;AAC1B,QAAI,SAA8B;AAElC,UAAM,SAAS,MAAY;AACzB,UAAI,QAAQ;AACV,cAAM,KAAK;AACX,iBAAS;AACT,WAAA;AAAA,MACF;AAAA,IACF;AAEA,SAAK,eAAe,IAAI,IAAI;AAAA,MAC1B,MAAM,CAAC,UAAgB;AACrB,cAAM,KAAK,KAAK;AAChB,eAAA;AAAA,MACF;AAAA,MACA,KAAK,MAAY;AACf,eAAO;AACP,eAAA;AAAA,MACF;AAAA,MACA,MAAM,CAAC,QAAc;AACnB,gBAAQ;AACR,eAAO;AACP,eAAA;AAAA,MACF;AAAA,IAAA,CACD;AAED,SAAK,KAAK;AAAA,MACR,IAAI;AAAA,MACJ;AAAA,MACA;AAAA,MACA,SAAS,sBAAsB,OAAO;AAAA,IAAA,CACvC;AACD,YAAQ,QAAQ,iBAAiB,SAAS,MAAM,KAAK,KAAK,EAAE,IAAI,SAAS,GAAA,CAAI,CAAC;AAE9E,QAAI;AACF,aAAO,MAAM;AACX,YAAI,MAAM,SAAS,GAAG;AACpB,gBAAM,QAAQ,MAAM,MAAA;AACpB,cAAI,MAAO,OAAM;AACjB;AAAA,QACF;AACA,YAAI,MAAO,OAAM;AACjB,YAAI,KAAM;AACV,cAAM,IAAI,QAAc,CAAC,MAAM;AAC7B,mBAAS;AAAA,QACX,CAAC;AAAA,MACH;AAAA,IACF,UAAA;AACE,WAAK,eAAe,OAAO,EAAE;AAAA,IAC/B;AAAA,EACF;AAAA,EAEA,MAAM,SAAS,QAAgB,UAA6B,IAAqB;AAC/E,UAAM,KAAa,KAAK,WAAA;AACxB,WAAO,IAAI,QAAgB,CAAC,SAAS,WAAW;AAC9C,WAAK,iBAAiB,IAAI,IAAI,EAAE,SAAS,QAAQ;AACjD,WAAK,KAAK;AAAA,QACR,IAAI;AAAA,QACJ;AAAA,QACA;AAAA,QACA,SAAS,sBAAsB,OAAO;AAAA,MAAA,CACvC;AACD,cAAQ,QAAQ,iBAAiB,SAAS,MAAM,KAAK,KAAK,EAAE,IAAI,SAAS,GAAA,CAAI,CAAC;AAAA,IAChF,CAAC;AAAA,EACH;AAAA,EAEA,OAAO,iBACL,QACA,UAA6B,IACF;AAC3B,UAAM,KAAa,KAAK,WAAA;AACxB,UAAM,QAAsB,CAAA;AAC5B,QAAI,OAAgB;AACpB,QAAI,QAAsB;AAC1B,QAAI,SAA8B;AAElC,UAAM,SAAS,MAAY;AACzB,UAAI,QAAQ;AACV,cAAM,KAAK;AACX,iBAAS;AACT,WAAA;AAAA,MACF;AAAA,IACF;AAEA,SAAK,eAAe,IAAI,IAAI;AAAA,MAC1B,MAAM,CAAC,UAAgB;AACrB,cAAM,KAAK,KAAK;AAChB,eAAA;AAAA,MACF;AAAA,MACA,KAAK,MAAY;AACf,eAAO;AACP,eAAA;AAAA,MACF;AAAA,MACA,MAAM,CAAC,QAAc;AACnB,gBAAQ;AACR,eAAO;AACP,eAAA;AAAA,MACF;AAAA,IAAA,CACD;AAED,SAAK,KAAK;AAAA,MACR,IAAI;AAAA,MACJ;AAAA,MACA;AAAA,MACA,SAAS,sBAAsB,OAAO;AAAA,IAAA,CACvC;AACD,YAAQ,QAAQ,iBAAiB,SAAS,MAAM,KAAK,KAAK,EAAE,IAAI,SAAS,GAAA,CAAI,CAAC;AAE9E,QAAI;AACF,aAAO,MAAM;AACX,YAAI,MAAM,SAAS,GAAG;AACpB,gBAAM,QAAQ,MAAM,MAAA;AACpB,cAAI,MAAO,OAAM;AACjB;AAAA,QACF;AACA,YAAI,MAAO,OAAM;AACjB,YAAI,KAAM;AACV,cAAM,IAAI,QAAc,CAAC,MAAM;AAC7B,mBAAS;AAAA,QACX,CAAC;AAAA,MACH;AAAA,IACF,UAAA;AACE,WAAK,eAAe,OAAO,EAAE;AAAA,IAC/B;AAAA,EACF;AAAA,EAEA,MAAM,SAAwB;AAC5B,QAAI,CAAC,KAAK,OAAQ;AAClB,QAAI,KAAK,eAAe;AACtB,YAAM,IAAI,eAAe,wCAAwC;AAAA,IACnE;AACA,UAAM,KAAa,KAAK,WAAA;AACxB,SAAK,kBAAkB;AACvB,WAAO,IAAI,QAAc,CAAC,SAAS,WAAW;AAC5C,WAAK,gBAAgB,EAAE,SAAS,OAAA;AAChC,WAAK,KAAK,EAAE,IAAI,UAAU,IAAI;AAAA,IAChC,CAAC;AAAA,EACH;AAAA;AAAA,EAGA,YAAkB;AAChB,SAAK,OAAO,oBAAoB,WAAW,KAAK,QAAQ;AACxD,SAAK,OAAO,UAAA;AACZ,SAAK,SAAS;AAAA,EAChB;AAAA,EAEQ,aAAqB;AAC3B,UAAM,KAAK,KAAK;AAChB,SAAK,UAAU;AACf,WAAO;AAAA,EACT;AAAA,EAEQ,KAAK,KAA0B;AACrC,SAAK,OAAO,YAAY,GAAG;AAAA,EAC7B;AAAA,EAEQ,cAAc,KAA2B;AAC/C,YAAQ,IAAI,IAAA;AAAA,MACV,KAAK;AACH,YAAI,KAAK,eAAe,IAAI,OAAO,KAAK,eAAe;AACrD,eAAK,SAAS;AACd,eAAK,YAAY,QAAA;AACjB,eAAK,cAAc;AACnB,eAAK,sBAAsB;AAAA,QAC7B;AACA;AAAA,MACF,KAAK;AACH,YAAI,IAAI,OAAO,KAAK,eAAe;AACjC,eAAK,sBAAsB,IAAI,OAAO;AAAA,QACxC;AACA;AAAA,MACF,KAAK,aAAa;AAChB,cAAM,UAAU,KAAK,iBAAiB,IAAI,IAAI,EAAE;AAChD,YAAI,SAAS;AACX,kBAAQ,QAAQ,IAAI,IAAI;AACxB,eAAK,iBAAiB,OAAO,IAAI,EAAE;AAAA,QACrC;AACA;AAAA,MACF;AAAA,MACA,KAAK,SAAS;AACZ,cAAM,SAAS,KAAK,eAAe,IAAI,IAAI,EAAE;AAC7C,gBAAQ,KAAK,IAAI,KAAK;AACtB;AAAA,MACF;AAAA,MACA,KAAK,cAAc;AACjB,cAAM,SAAS,KAAK,eAAe,IAAI,IAAI,EAAE;AAC7C,gBAAQ,IAAA;AACR;AAAA,MACF;AAAA,MACA,KAAK;AACH,YAAI,KAAK,iBAAiB,IAAI,OAAO,KAAK,iBAAiB;AACzD,eAAK,SAAS;AACd,eAAK,cAAc,QAAA;AACnB,eAAK,gBAAgB;AAAA,QACvB;AACA;AAAA,MACF,KAAK;AACH;AAAA,MACF,KAAK,SAAS;AACZ,cAAM,MAAM,SAAS,IAAI,MAAM,IAAI,OAAO;AAC1C,YAAI,KAAK,eAAe,IAAI,OAAO,KAAK,eAAe;AACrD,eAAK,YAAY,OAAO,GAAG;AAC3B,eAAK,cAAc;AACnB,eAAK,sBAAsB;AAC3B;AAAA,QACF;AACA,YAAI,KAAK,iBAAiB,IAAI,OAAO,KAAK,iBAAiB;AACzD,eAAK,cAAc,OAAO,GAAG;AAC7B,eAAK,gBAAgB;AACrB;AAAA,QACF;AACA,cAAM,WAAW,KAAK,iBAAiB,IAAI,IAAI,EAAE;AACjD,YAAI,UAAU;AACZ,mBAAS,OAAO,GAAG;AACnB,eAAK,iBAAiB,OAAO,IAAI,EAAE;AACnC;AAAA,QACF;AACA,cAAM,SAAS,KAAK,eAAe,IAAI,IAAI,EAAE;AAC7C,YAAI,QAAQ;AACV,iBAAO,KAAK,GAAG;AACf;AAAA,QACF;AACA;AAAA,MACF;AAAA,IAAA;AAAA,EAEJ;AACF;AAEA,SAAS,SAAS,MAAc,SAAwB;AACtD,UAAQ,MAAA;AAAA,IACN,KAAK;AACH,aAAO,IAAI,eAAe,OAAO;AAAA,IACnC,KAAK;AACH,aAAO,IAAI,oBAAoB,OAAO;AAAA,IACxC,KAAK;AACH,aAAO,IAAI,uBAAuB,OAAO;AAAA,IAC3C,SAAS;AACP,YAAM,MAAM,IAAI,MAAM,OAAO;AAC7B,UAAI,OAAO;AACX,aAAO;AAAA,IACT;AAAA,EAAA;AAEJ;AC3TO,MAAM,gBAAuD,OAAO,OAAO;AAAA,EAChF,qBAAqB;AAAA,IACnB,IAAI;AAAA,IACJ,QAAQ;AAAA,IACR,YAAY;AAAA,IACZ,cAAc;AAAA,IACd,UAAU;AAAA,IACV,eAAe;AAAA,IACf,aAAa;AAAA,EAAA;AAAA,EAEf,qBAAqB;AAAA,IACnB,IAAI;AAAA,IACJ,QAAQ;AAAA,IACR,YAAY;AAAA,IACZ,cAAc;AAAA,IACd,UAAU;AAAA,IACV,eAAe;AAAA,IACf,aAAa;AAAA,EAAA;AAAA,EAEf,qBAAqB;AAAA,IACnB,IAAI;AAAA,IACJ,QAAQ;AAAA,IACR,YAAY;AAAA,IACZ,cAAc;AAAA,IACd,UAAU;AAAA,IACV,eAAe;AAAA,IACf,aAAa;AAAA,EAAA;AAEjB,CAAC;AASM,SAAS,mBAAmB,SAA8B;AAC/D,QAAM,SAAS,cAAc,OAAO;AACpC,MAAI,CAAC,QAAQ;AACX,UAAM,YAAY,OAAO,KAAK,aAAa,EAAE,KAAK,IAAI;AACtD,UAAM,IAAI,kBAAkB,kBAAkB,OAAO,wBAAwB,SAAS,GAAG;AAAA,EAC3F;AACA,SAAO;AACT;AAGO,SAAS,sBAAgC;AAC9C,SAAO,OAAO,KAAK,aAAa;AAClC;AAyBO,MAAM,oBAA+D,OAAO,OAAO;AAAA,EACxF,qBAAqB;AAAA,IACnB,IAAI;AAAA,IACJ,QAAQ;AAAA,IACR,WAAW;AAAA,IACX,WAAW;AAAA,IACX,gBAAgB;AAAA,IAChB,cAAc;AAAA,IACd,aAAa;AAAA,EAAA;AAAA,EAEf,oBAAoB;AAAA,IAClB,IAAI;AAAA,IACJ,QAAQ;AAAA,IACR,WAAW;AAAA,IACX,WAAW;AAAA,IACX,gBAAgB;AAAA,IAChB,cAAc;AAAA,IACd,aAAa;AAAA,EAAA;AAEjB,CAAC;AASM,SAAS,uBAAuB,SAAkC;AACvE,QAAM,SAAS,kBAAkB,OAAO;AACxC,MAAI,CAAC,QAAQ;AACX,UAAM,YAAY,OAAO,KAAK,iBAAiB,EAAE,KAAK,IAAI;AAC1D,UAAM,IAAI;AAAA,MACR,4BAA4B,OAAO,wBAAwB,SAAS;AAAA,IAAA;AAAA,EAExE;AACA,SAAO;AACT;AAGO,SAAS,+BAAyC;AACvD,SAAO,OAAO,KAAK,iBAAiB;AACtC;AAqBO,MAAM,mBAA6D,OAAO,OAAO;AAAA,EACtF,qBAAqB;AAAA,IACnB,IAAI;AAAA,IACJ,QAAQ;AAAA,IACR,WAAW;AAAA,IACX,gBAAgB;AAAA,IAChB,cAAc;AAAA,IACd,aAAa;AAAA,EAAA;AAEjB,CAAC;AAQM,SAAS,sBAAsB,SAAiC;AACrE,QAAM,SAAS,iBAAiB,OAAO;AACvC,MAAI,CAAC,QAAQ;AACX,UAAM,YAAY,OAAO,KAAK,gBAAgB,EAAE,KAAK,IAAI;AACzD,UAAM,IAAI;AAAA,MACR,2BAA2B,OAAO,wBAAwB,SAAS;AAAA,IAAA;AAAA,EAEvE;AACA,SAAO;AACT;AAGO,SAAS,8BAAwC;AACtD,SAAO,OAAO,KAAK,gBAAgB;AACrC;ACvKO,SAAS,wBAAoC;AAClD,SAAO,IAAI,OAAO,IAAA;AAAA;AAAA,IAAA;AAAA,IAAA,YAAA;AAAA,EAAA,GAAmD;AAAA,IACnE,MAAM;AAAA,EAAA,CACP;AACH;AC2BO,MAAe,OAAO;AAAA,EACjB,YAEW,QAEH,QAChB;AAHmB,SAAA,SAAA;AAEH,SAAA,SAAA;AAAA,EACf;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUH,aAAuB,aACrB,SACA,UAA+B,IACN;AACzB,UAAM,SAAS,mBAAmB,OAAO;AACzC,UAAM,SAAS,QAAQ,UAAU,OAAO,cAAc,OAAO;AAC7D,QAAI,CAAC,OAAO,YAAY;AACtB,YAAM,OAAO,KAAK,OAAO,UAAU,QAAQ,UAAU;AAAA,IACvD;AACA,WAAO,EAAE,QAAQ,OAAA;AAAA,EACnB;AAAA,EAEA,OAAe,cAAc,SAAsC;AACjE,UAAM,YAAqB,QAAQ,YAAY;AAC/C,QAAI,WAAW;AACb,aAAO,IAAI,aAAa,uBAAuB;AAAA,IACjD;AACA,WAAO,IAAI,aAAA;AAAA,EACb;AAAA;AAAA,EAGA,MAAM,SAAwB;AAC5B,UAAM,KAAK,OAAO,OAAA;AAAA,EACpB;AAAA;AAAA,EAGA,WAAoB;AAClB,WAAO,KAAK,OAAO,SAAA;AAAA,EACrB;AACF;ACjFO,MAAM,UAAU;AAAA,EACrB,YAEkB,MAEA,SAEA,iBAEA,cAChB;AAPgB,SAAA,OAAA;AAEA,SAAA,UAAA;AAEA,SAAA,kBAAA;AAEA,SAAA,eAAA;AAAA,EACf;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAaH,OAAuB;AACrB,WAAO,sBAAyB,KAAK,IAAI;AAAA,EAC3C;AACF;AAQO,MAAM,iBAAiB;AAAA,EAC5B,YAEkB,MAEA,QAEA,iBAEA,cAChB;AAPgB,SAAA,OAAA;AAEA,SAAA,SAAA;AAEA,SAAA,kBAAA;AAEA,SAAA,eAAA;AAAA,EACf;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYH,OAAuB;AACrB,WAAO,sBAAyB,KAAK,IAAI;AAAA,EAC3C;AACF;ACzCO,MAAM,aAAa,OAAO;AAAA,EACd,UAAqB,CAAA;AAAA,EAC9B,eAA8B;AAAA,EAE9B,YAAY,QAAgB,QAAqB;AACvD,UAAM,QAAQ,MAAM;AAAA,EACtB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,aAAa,OAAO,SAAiB,UAA+B,IAAmB;AACrF,UAAM,EAAE,QAAQ,OAAA,IAAW,MAAM,OAAO,aAAa,SAAS,OAAO;AACrE,WAAO,IAAI,KAAK,QAAQ,MAAM;AAAA,EAChC;AAAA;AAAA,EAGA,gBAAgB,QAAsB;AACpC,SAAK,eAAe;AAAA,EACtB;AAAA;AAAA,EAGA,oBAA0B;AACxB,SAAK,eAAe;AAAA,EACtB;AAAA;AAAA,EAGA,eAAqB;AACnB,SAAK,QAAQ,SAAS;AAAA,EACxB;AAAA;AAAA,EAGA,aAAiC;AAC/B,WAAO,KAAK,QAAQ,MAAA;AAAA,EACtB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,KAAK,SAAiB,UAA6B,IAAwB;AAC/E,UAAM,WAAW,KAAK,cAAc,OAAO;AAC3C,UAAM,OAAO,MAAM,KAAK,OAAO,SAAS,UAAU,OAAO;AACzD,UAAM,UAAmB,EAAE,MAAM,QAAQ,SAAS,QAAA;AAClD,UAAM,eAAwB,EAAE,MAAM,aAAa,SAAS,KAAA;AAC5D,SAAK,QAAQ,KAAK,SAAS,YAAY;AACvC,WAAO,IAAI,UAAU,MAAM,cAAc,GAAG,MAAM;AAAA,EACpD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,OAAO,OAAO,SAAiB,UAA6B,IAA+B;AACzF,UAAM,WAAW,KAAK,cAAc,OAAO;AAC3C,UAAM,UAAmB,EAAE,MAAM,QAAQ,SAAS,QAAA;AAClD,QAAI,MAAc;AAClB,qBAAiB,SAAS,KAAK,OAAO,OAAO,UAAU,OAAO,GAAG;AAC/D,aAAO,MAAM;AACb,YAAM;AAAA,IACR;AACA,UAAM,eAAwB,EAAE,MAAM,aAAa,SAAS,IAAA;AAC5D,SAAK,QAAQ,KAAK,SAAS,YAAY;AAAA,EACzC;AAAA,EAEQ,cAAc,aAAgC;AACpD,UAAM,WAAsB,CAAA;AAC5B,QAAI,KAAK,cAAc;AACrB,eAAS,KAAK,EAAE,MAAM,UAAU,SAAS,KAAK,cAAc;AAAA,IAC9D;AACA,aAAS,KAAK,GAAG,KAAK,OAAO;AAC7B,aAAS,KAAK,EAAE,MAAM,QAAQ,SAAS,aAAa;AACpD,WAAO;AAAA,EACT;AACF;ACnFO,MAAM,mBAAmB,OAAO;AAAA,EAC7B,YAAY,QAAgB,QAAqB;AACvD,UAAM,QAAQ,MAAM;AAAA,EACtB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,aAAa,OAAO,SAAiB,UAA+B,IAAyB;AAC3F,UAAM,EAAE,QAAQ,OAAA,IAAW,MAAM,OAAO,aAAa,SAAS,OAAO;AACrE,WAAO,IAAI,WAAW,QAAQ,MAAM;AAAA,EACtC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,MAAM,QAAQ,QAAgB,UAA6B,IAA+B;AACxF,UAAM,OAAO,MAAM,KAAK,OAAO,SAAS,QAAQ,OAAO;AACvD,WAAO,IAAI,iBAAiB,MAAM,QAAQ,GAAG,MAAM;AAAA,EACrD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,OAAO,OAAO,QAAgB,UAA6B,IAA+B;AACxF,qBAAiB,SAAS,KAAK,OAAO,iBAAiB,QAAQ,OAAO,GAAG;AACvE,YAAM;AAAA,IACR;AAAA,EACF;AACF;AC/BA,IAAIA,8BAAgE;AAEpE,eAAeC,qBAAgD;AAC7D,MAAI,CAACD,6BAA2B;AAC9BA,kCAA4B,OAAO,2BAA2B;AAAA,EAChE;AACA,SAAOA;AACT;AAEA,eAAeE,uBACb,QACA,YACwB;AACxB,QAAM,eAAe,MAAMD,mBAAA;AAC3B,MAAI;AACF,UAAM,OAAO,MAAM,aAAa,SAAS,sBAAsB,OAAO,gBAAgB;AAAA,MACpF,mBAAmB,CAAC,WAA0B;AAC5C,YAAI,CAAC,WAAY;AACjB,cAAM,IAAI;AACV,mBAAW;AAAA,UACT,UAAU,OAAO,EAAE,aAAa,WAAW,EAAE,WAAW,MAAM;AAAA,UAC9D,MAAM,EAAE,UAAU;AAAA,UAClB,QAAQ;AAAA,UACR,OAAO;AAAA,UACP,OAAO;AAAA,QAAA,CACR;AAAA,MACH;AAAA,IAAA,CACD;AACD,WAAO;AAAA,MACL,MAAM,MAAM,OAAO,SAA8B;AAC/C,cAAM,SAAS,MAAM,KAAK,OAAO;AAAA,UAC/B,SAAS,QAAQ;AAAA,UACjB,WAAW,QAAQ;AAAA,QAAA,CACpB;AACD,eAAO,OAAO,OAAA;AAAA,MAChB;AAAA,MACA,MAAM,SAAwB;AAC5B,YAAI,OAAQ,KAA2C,YAAY,YAAY;AAC7E,gBAAO,KAAqD,QAAA;AAAA,QAC9D;AAAA,MACF;AAAA,IAAA;AAAA,EAEJ,SAAS,KAAK;AACZ,UAAM,IAAI,eAAe,mCAAmC,OAAO,EAAE,MAAM,GAAG;AAAA,EAChF;AACF;AAgBO,MAAM,WAAW;AAAA,EACd,YACW,UAED,QAChB;AAHiB,SAAA,WAAA;AAED,SAAA,SAAA;AAAA,EACf;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUH,aAAa,OAAO,SAAiB,UAAmC,IAAyB;AAC/F,UAAM,SAAS,uBAAuB,OAAO;AAC7C,UAAM,WAAW,QAAQ,YAAa,MAAMC,uBAAqB,QAAQ,QAAQ,UAAU;AAC3F,WAAO,IAAI,WAAW,UAAU,MAAM;AAAA,EACxC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,MAAM,OAAiB,UAAwB,IAAyB;AAC5E,QAAI,MAAM,WAAW,EAAG,QAAO,CAAA;AAC/B,QAAI,CAAC,KAAK,UAAU;AAClB,YAAM,IAAI,oBAAoB,sCAAsC;AAAA,IACtE;AACA,UAAM,SAAiC;AAAA,MACrC,WAAW,QAAQ,aAAa;AAAA,MAChC,SAAS,QAAQ,WAAW;AAAA,IAAA;AAE9B,WAAO,KAAK,SAAS,MAAM,OAAO,MAAM;AAAA,EAC1C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,YAAY,MAAc,UAAwB,IAAuB;AAC7E,UAAM,CAAC,GAAG,IAAI,MAAM,KAAK,MAAM,CAAC,IAAI,GAAG,OAAO;AAC9C,QAAI,CAAC,KAAK;AACR,YAAM,IAAI,eAAe,wCAAwC;AAAA,IACnE;AACA,WAAO;AAAA,EACT;AAAA;AAAA,EAGA,IAAI,YAAoB;AACtB,WAAO,KAAK,OAAO;AAAA,EACrB;AAAA;AAAA,EAGA,MAAM,SAAwB;AAC5B,UAAM,KAAK,SAAS,SAAA;AAAA,EACtB;AACF;ACnHA,IAAI,4BAAgE;AAEpE,eAAe,mBAAgD;AAC7D,MAAI,CAAC,2BAA2B;AAC9B,gCAA4B,OAAO,2BAA2B;AAAA,EAChE;AACA,SAAO;AACT;AAEA,SAAS,aAAa,GAAmB;AACvC,SAAO,KAAK,IAAI,KAAK,IAAI,CAAC,CAAC;AAC7B;AAEA,eAAe,qBACb,QACA,YACyB;AACzB,QAAM,eAAe,MAAM,iBAAA;AAC3B,MAAI;AACF,UAAM,YAAY,MAAM,aAAa,cAAc,gBAAgB,OAAO,gBAAgB;AAAA,MACxF,mBAAmB,CAAC,WAA0B;AAC5C,YAAI,CAAC,WAAY;AACjB,cAAM,IAAI;AACV,mBAAW;AAAA,UACT,UAAU,OAAO,EAAE,aAAa,WAAW,EAAE,WAAW,MAAM;AAAA,UAC9D,MAAM,EAAE,UAAU;AAAA,UAClB,QAAQ;AAAA,UACR,OAAO;AAAA,UACP,OAAO;AAAA,QAAA,CACR;AAAA,MACH;AAAA,IAAA,CACD;AACD,UAAM,QAAQ,MAAM,aAAa,mCAAmC;AAAA,MAClE,OAAO;AAAA,MACP;AAAA,QACE,mBAAmB,CAAC,WAA0B;AAC5C,cAAI,CAAC,WAAY;AACjB,gBAAM,IAAI;AACV,qBAAW;AAAA,YACT,UAAU,OAAO,EAAE,aAAa,WAAW,EAAE,WAAW,MAAM;AAAA,YAC9D,MAAM,EAAE,UAAU;AAAA,YAClB,QAAQ;AAAA,YACR,OAAO;AAAA,YACP,OAAO;AAAA,UAAA,CACR;AAAA,QACH;AAAA,MAAA;AAAA,IACF;AAEF,WAAO;AAAA,MACL,MAAM,MAAM,OAAO,MAAyB;AAC1C,YAAI,KAAK,WAAW,EAAG,QAAO,CAAA;AAC9B,cAAM,UAAoB,KAAK,IAAI,MAAM,KAAK;AAI9C,cAAM,WAAW;AAIjB,cAAM,SAAS,SAAS,SAAS;AAAA,UAC/B,WAAW;AAAA,UACX,SAAS;AAAA,UACT,YAAY;AAAA,UACZ,YAAY,OAAO;AAAA,QAAA,CACpB;AACD,cAAM,YAAY;AAGlB,cAAM,UAAU,MAAM,UAAU,MAAM;AACtC,cAAM,SAAqB,QAAQ,OAAO,OAAA;AAC1C,eAAO,OAAO,IAAI,CAAC,QAAQ,IAAI,CAAC,KAAK,CAAC;AAAA,MACxC;AAAA,MACA,MAAM,SAAwB;AAC5B,cAAM,IAAI;AACV,YAAI,OAAO,EAAE,YAAY,WAAY,OAAM,EAAE,QAAA;AAAA,MAC/C;AAAA,IAAA;AAAA,EAEJ,SAAS,KAAK;AACZ,UAAM,IAAI,eAAe,kCAAkC,OAAO,EAAE,MAAM,GAAG;AAAA,EAC/E;AACF;AA0BO,MAAM,SAAS;AAAA,EACZ,YACW,UAED,QAChB;AAHiB,SAAA,WAAA;AAED,SAAA,SAAA;AAAA,EACf;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUH,aAAa,OAAO,SAAiB,UAAiC,IAAuB;AAC3F,UAAM,SAAS,sBAAsB,OAAO;AAC5C,UAAM,WAAW,QAAQ,YAAa,MAAM,qBAAqB,QAAQ,QAAQ,UAAU;AAC3F,WAAO,IAAI,SAAS,UAAU,MAAM;AAAA,EACtC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,MAAM,MAAM,OAAe,MAAgB,UAAyB,CAAA,GAAuB;AACzF,QAAI,KAAK,WAAW,EAAG,QAAO,CAAA;AAC9B,QAAI,CAAC,KAAK,UAAU;AAClB,YAAM,IAAI,oBAAoB,oCAAoC;AAAA,IACpE;AACA,UAAM,MAAM,MAAM,KAAK,SAAS,MAAM,OAAO,IAAI;AACjD,WAAO,QAAQ,UAAU,IAAI,IAAI,YAAY,IAAI;AAAA,EACnD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,MAAM,KACJ,OACA,MACA,UAAyB,CAAA,GACE;AAC3B,UAAM,SAAS,MAAM,KAAK,MAAM,OAAO,MAAM,OAAO;AACpD,UAAM,SAA2B,OAAO,IAAI,CAAC,OAAO,UAAU;AAC5D,YAAM,OAAe,KAAK,KAAK,KAAK;AACpC,aAAO,EAAE,MAAM,OAAO,MAAA;AAAA,IACxB,CAAC;AACD,WAAO,KAAK,CAAC,GAAG,MAAM,EAAE,QAAQ,EAAE,KAAK;AACvC,WAAO;AAAA,EACT;AAAA;AAAA,EAGA,MAAM,SAAwB;AAC5B,UAAM,KAAK,SAAS,SAAA;AAAA,EACtB;AACF;AClLA,IAAI,qBAAwD;AAE5D,eAAe,yBAAqD;AAClE,MAAI,CAAC,oBAAoB;AACvB,yBAAqB,OAAO,iBAAiB,EAAE,KAAK,CAAC,OAAO;AAAA,MAC1D,iBAAiB,EAAE;AAAA,MACnB,oBAAoB,EAAE;AAAA,IAAA,EACtB;AAAA,EACJ;AACA,SAAO;AACT;AAEA,eAAe,kBAAuC;AACpD,MAAI,OAAO,cAAc,eAAe,CAAC,UAAU,SAAS,UAAU;AACpE,WAAO,EAAE,OAAO,GAAG,OAAO,EAAA;AAAA,EAC5B;AACA,QAAM,WAAW,MAAM,UAAU,QAAQ,SAAA;AACzC,SAAO;AAAA,IACL,OAAO,SAAS,SAAS;AAAA,IACzB,OAAO,SAAS,SAAS;AAAA,EAAA;AAE7B;AAuBO,MAAM,WAAW;AAAA,EACL;AAAA,EACA;AAAA,EACA;AAAA,EAEjB,YAAY,UAA6B,IAAI;AAC3C,SAAK,eAAe,QAAQ;AAC5B,SAAK,kBAAkB,QAAQ;AAC/B,SAAK,eAAe,QAAQ,YAAY;AAAA,EAC1C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,IAAI,SAAmC;AAC3C,UAAM,YAAoB,mBAAmB,OAAO,EAAE;AACtD,UAAM,KAAK,KAAK,iBAAiB,MAAM,0BAA0B;AACjE,WAAO,GAAG,SAAS;AAAA,EACrB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,MAAM,OAAO,SAAgC;AAC3C,UAAM,YAAoB,mBAAmB,OAAO,EAAE;AACtD,UAAM,KAAK,KAAK,oBAAoB,MAAM,0BAA0B;AACpE,UAAM,GAAG,SAAS;AAAA,EACpB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,OAAoC;AACxC,UAAM,KAAK,KAAK,iBAAiB,MAAM,0BAA0B;AACjE,UAAM,SAAS,MAAM,QAAQ;AAAA,MAC3B,OAAO,OAAO,aAAa,EAAE,IAAI,OAAO,WAAW;AACjD,cAAM,SAAkB,MAAM,GAAG,OAAO,QAAQ;AAChD,YAAI,CAAC,OAAQ,QAAO;AACpB,cAAM,QAA0B;AAAA,UAC9B,IAAI,OAAO;AAAA,UACX,WAAW,OAAO;AAAA,UAClB,QAAQ,OAAO;AAAA,UACf,YAAY,OAAO;AAAA,QAAA;AAErB,eAAO;AAAA,MACT,CAAC;AAAA,IAAA;AAEH,WAAO,OAAO,OAAO,CAAC,MAA6B,MAAM,IAAI;AAAA,EAC/D;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,QAAuB;AAC3B,UAAM,KAAK,KAAK,oBAAoB,MAAM,0BAA0B;AACpE,UAAM,QAAQ,IAAI,OAAO,OAAO,aAAa,EAAE,IAAI,CAAC,MAAM,GAAG,EAAE,QAAQ,CAAC,CAAC;AAAA,EAC3E;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,gBAAqC;AACzC,WAAO,KAAK,aAAA;AAAA,EACd;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,OAAO,YAAY,SAAuB;AACxC,QAAI,EAAE,WAAW,gBAAgB;AAC/B,YAAM,YAAY,OAAO,KAAK,aAAa,EAAE,KAAK,IAAI;AACtD,YAAM,IAAI,kBAAkB,kBAAkB,OAAO,wBAAwB,SAAS,GAAG;AAAA,IAC3F;AAAA,EACF;AACF;AC1KA,eAAsB,cAAc,QAAoD;AACtF,MAAI,MAAc;AAClB,mBAAiB,SAAS,QAAQ;AAChC,WAAO,MAAM;AAAA,EACf;AACA,SAAO;AACT;AAYA,gBAAuB,IACrB,QACA,SAC2B;AAC3B,mBAAiB,SAAS,QAAQ;AAChC,YAAQ,KAAK;AACb,UAAM;AAAA,EACR;AACF;ACyCO,MAAM,UAAkB;"}
1
+ {"version":3,"file":"index.js","sources":["../src/core/exceptions.ts","../src/core/load-phase.ts","../src/core/transformers-engine.ts","../src/structured/json-schema.ts","../src/core/webllm-engine.ts","../src/worker/protocol.ts","../src/core/worker-engine.ts","../src/presets/models.ts","../src/worker/create-worker.ts","../src/tasks/lm-task.ts","../src/results.ts","../src/tasks/chat.ts","../src/tasks/completion.ts","../src/tasks/embeddings.ts","../src/tasks/reranker.ts","../src/cache/model-cache.ts","../src/streaming/token-stream.ts","../src/index.ts"],"sourcesContent":["/**\n * Error hierarchy for localm-web.\n *\n * All errors thrown by the SDK extend `LocalmWebError` so consumers can\n * distinguish SDK errors from unrelated runtime errors with a single\n * `instanceof` check.\n */\n\n/** Base class for every error raised by localm-web. */\nexport class LocalmWebError extends Error {\n /**\n * @param message - Human-readable description of the error.\n * @param cause - Underlying error, if any.\n */\n constructor(\n message: string,\n public readonly cause?: unknown\n ) {\n super(message);\n this.name = new.target.name;\n }\n}\n\n/** Thrown when WebGPU is required but not available in the host browser. */\nexport class WebGPUUnavailableError extends LocalmWebError {}\n\n/** Thrown when a model fails to load (network, parsing, runtime init). */\nexport class ModelLoadError extends LocalmWebError {}\n\n/** Thrown when an inference call is made before a model has loaded. */\nexport class ModelNotLoadedError extends LocalmWebError {}\n\n/** Thrown when a model id is not present in the curated registry. */\nexport class UnknownModelError extends LocalmWebError {}\n\n/** Thrown when generation is aborted via an `AbortSignal`. */\nexport class GenerationAbortedError extends LocalmWebError {}\n\n/** Thrown when the browser denies storage quota for the model cache. */\nexport class QuotaExceededError extends LocalmWebError {}\n\n/** Thrown when no usable backend is available on the current platform. */\nexport class BackendNotAvailableError extends LocalmWebError {}\n\n/**\n * Thrown when structured output (JSON mode or JSON Schema constrained\n * decoding) fails to parse as valid JSON.\n *\n * Wraps the underlying `SyntaxError` from `JSON.parse` so consumers can\n * distinguish SDK-issued failures from unrelated runtime exceptions.\n */\nexport class StructuredOutputError extends LocalmWebError {}\n","import type { ModelLoadPhase } from \"../types\";\n\nconst DOWNLOAD_PATTERN: RegExp = /\\b(fetch|download|loading from cache|cache hit|param)/i;\nconst COMPILE_PATTERN: RegExp = /\\b(compil|shader|kernel|tensor|init|allocat|warm)/i;\n\n/**\n * Classify a runtime status text into a {@link ModelLoadPhase}.\n *\n * Heuristic: match download-related verbs first (network or cache hits are\n * treated as `downloading`), then compile-related verbs. Anything else falls\n * back to the generic `loading` bucket. The `ready` phase is never returned\n * here — callers emit it explicitly when the load resolves.\n *\n * @param text - The raw status string from the runtime.\n * @returns The classified phase.\n */\nexport function classifyLoadPhase(text: string): ModelLoadPhase {\n if (DOWNLOAD_PATTERN.test(text)) return \"downloading\";\n if (COMPILE_PATTERN.test(text)) return \"compiling\";\n return \"loading\";\n}\n","import type { Engine } from \"./engine\";\nimport type { GenerationOptions, Message, ProgressCallback, Role, TokenChunk } from \"../types\";\nimport { GenerationAbortedError, ModelLoadError, ModelNotLoadedError } from \"./exceptions\";\nimport { classifyLoadPhase } from \"./load-phase\";\n\ntype TransformersModule = typeof import(\"@huggingface/transformers\");\ntype Pipeline = Awaited<ReturnType<TransformersModule[\"pipeline\"]>>;\n\nlet transformersModulePromise: Promise<TransformersModule> | null = null;\n\n/**\n * Lazy import of `@huggingface/transformers`.\n *\n * The package is an **optional** peer dependency. Loading it on demand keeps\n * the WebLLM hot path free of the ~MB-sized transformers.js graph for users\n * who never trigger the fallback.\n */\nasync function loadTransformers(): Promise<TransformersModule> {\n if (!transformersModulePromise) {\n transformersModulePromise = import(\"@huggingface/transformers\");\n }\n return transformersModulePromise;\n}\n\ninterface SamplingKwargs {\n max_new_tokens?: number;\n temperature?: number;\n top_p?: number;\n top_k?: number;\n do_sample?: boolean;\n}\n\nfunction buildSamplingKwargs(options: GenerationOptions): SamplingKwargs {\n const kwargs: SamplingKwargs = {};\n if (options.maxTokens !== undefined) kwargs.max_new_tokens = options.maxTokens;\n if (options.temperature !== undefined) kwargs.temperature = options.temperature;\n if (options.topP !== undefined) kwargs.top_p = options.topP;\n if (options.topK !== undefined) kwargs.top_k = options.topK;\n if (options.temperature !== undefined && options.temperature > 0) {\n kwargs.do_sample = true;\n }\n return kwargs;\n}\n\ninterface TransformersChatMessage {\n role: Role;\n content: string;\n}\n\nfunction toChatMessages(messages: Message[]): TransformersChatMessage[] {\n return messages.map((m) => ({ role: m.role, content: m.content }));\n}\n\ninterface TextGenerationOutputItem {\n generated_text: string | TransformersChatMessage[];\n}\n\nfunction lastAssistantContent(\n output: TextGenerationOutputItem | TextGenerationOutputItem[] | undefined,\n promptText: string\n): string {\n const item = Array.isArray(output) ? output[0] : output;\n if (!item) return \"\";\n const generated = item.generated_text;\n if (typeof generated === \"string\") {\n return generated.startsWith(promptText) ? generated.slice(promptText.length) : generated;\n }\n if (Array.isArray(generated)) {\n for (let i = generated.length - 1; i >= 0; i -= 1) {\n const turn = generated[i];\n if (turn && turn.role === \"assistant\") return turn.content;\n }\n }\n return \"\";\n}\n\ninterface AsyncQueue<T> {\n push(item: T): void;\n end(error?: Error): void;\n iterator: AsyncIterable<T>;\n}\n\n/**\n * Minimal async queue used to bridge `TextStreamer`'s push-based callback into\n * an `AsyncIterable` consumable by the SDK's streaming API.\n */\nfunction createAsyncQueue<T>(): AsyncQueue<T> {\n const buffer: T[] = [];\n let waiters: ((value: IteratorResult<T>) => void)[] = [];\n let finished: boolean = false;\n let pendingError: Error | null = null;\n\n const drain = (): void => {\n while (buffer.length > 0 && waiters.length > 0) {\n const resolver = waiters.shift();\n const value = buffer.shift();\n resolver?.({ value: value as T, done: false });\n }\n if ((finished || pendingError) && waiters.length > 0) {\n const all = waiters;\n waiters = [];\n for (const w of all) {\n if (pendingError) {\n w({ value: undefined as unknown as T, done: true });\n } else {\n w({ value: undefined as unknown as T, done: true });\n }\n }\n }\n };\n\n return {\n push(item: T): void {\n buffer.push(item);\n drain();\n },\n end(error?: Error): void {\n finished = true;\n if (error) pendingError = error;\n drain();\n },\n iterator: {\n [Symbol.asyncIterator](): AsyncIterator<T> {\n return {\n next(): Promise<IteratorResult<T>> {\n if (buffer.length > 0) {\n return Promise.resolve({ value: buffer.shift() as T, done: false });\n }\n if (pendingError) {\n const err = pendingError;\n pendingError = null;\n return Promise.reject(err);\n }\n if (finished) {\n return Promise.resolve({ value: undefined as unknown as T, done: true });\n }\n return new Promise<IteratorResult<T>>((resolve) => waiters.push(resolve));\n },\n };\n },\n },\n };\n}\n\n/**\n * Inference engine backed by\n * [`@huggingface/transformers`](https://github.com/huggingface/transformers.js)\n * (transformers.js).\n *\n * Used by the SDK as the **fallback path** for browsers without WebGPU and as\n * an explicit alternative backend selectable via `LMTaskCreateOptions.backend`.\n * It runs ONNX models on WebGPU when available and on WASM-SIMD otherwise, so\n * a wider range of browsers can run language models with a graceful — if\n * slower — degrade.\n *\n * The package is an optional peer dependency; import it on the consumer side\n * before instantiating tasks that resolve to this backend.\n */\nexport class TransformersTextEngine implements Engine {\n private generator: Pipeline | null = null;\n private currentAbortController: AbortController | null = null;\n\n isLoaded(): boolean {\n return this.generator !== null;\n }\n\n async load(modelId: string, onProgress?: ProgressCallback): Promise<void> {\n const transformers = await loadTransformers();\n try {\n const generator = await transformers.pipeline(\"text-generation\", modelId, {\n progress_callback: (report: { progress?: number; status?: string }): void => {\n const progress: number = typeof report.progress === \"number\" ? report.progress / 100 : 0;\n const text: string = report.status ?? \"loading\";\n onProgress?.({\n progress,\n text,\n loaded: 0,\n total: 0,\n phase: classifyLoadPhase(text),\n });\n },\n } as Parameters<TransformersModule[\"pipeline\"]>[2]);\n this.generator = generator;\n onProgress?.({\n progress: 1,\n text: \"Model ready.\",\n loaded: 0,\n total: 0,\n phase: \"ready\",\n });\n } catch (err) {\n throw new ModelLoadError(`Failed to load transformers model \"${modelId}\".`, err);\n }\n }\n\n async generate(messages: Message[], options: GenerationOptions = {}): Promise<string> {\n const generator = this.requireGenerator();\n if (options.signal?.aborted) {\n throw new GenerationAbortedError(\"Generation aborted before start.\");\n }\n const chat = toChatMessages(messages);\n try {\n const output = (await (\n generator as unknown as (\n input: TransformersChatMessage[],\n kw?: SamplingKwargs\n ) => Promise<TextGenerationOutputItem | TextGenerationOutputItem[]>\n )(chat, buildSamplingKwargs(options))) as\n | TextGenerationOutputItem\n | TextGenerationOutputItem[];\n return lastAssistantContent(output, \"\");\n } catch (err) {\n if (err instanceof GenerationAbortedError) throw err;\n throw new ModelLoadError(\"Transformers generation failed.\", err);\n }\n }\n\n async *stream(messages: Message[], options: GenerationOptions = {}): AsyncIterable<TokenChunk> {\n const generator = this.requireGenerator();\n if (options.signal?.aborted) {\n throw new GenerationAbortedError(\"Generation aborted before start.\");\n }\n const transformers = await loadTransformers();\n const queue = createAsyncQueue<TokenChunk>();\n let index: number = 0;\n const tokenizer = (\n generator as unknown as {\n tokenizer: ConstructorParameters<TransformersModule[\"TextStreamer\"]>[0];\n }\n ).tokenizer;\n const streamer = new transformers.TextStreamer(tokenizer, {\n skip_prompt: true,\n skip_special_tokens: true,\n callback_function: (text: string): void => {\n if (text) {\n queue.push({ text, index, done: false });\n index += 1;\n }\n },\n });\n\n const abortPromise: Promise<never> = new Promise<never>((_, reject) => {\n if (options.signal) {\n const onAbort = (): void => {\n reject(new GenerationAbortedError(\"Generation aborted by signal.\"));\n };\n options.signal.addEventListener(\"abort\", onAbort, { once: true });\n }\n });\n\n const chat = toChatMessages(messages);\n const generation = (\n generator as unknown as (\n input: TransformersChatMessage[],\n kw?: SamplingKwargs & { streamer?: unknown }\n ) => Promise<TextGenerationOutputItem | TextGenerationOutputItem[]>\n )(chat, { ...buildSamplingKwargs(options), streamer })\n .then((): void => {\n queue.push({ text: \"\", index, done: true });\n queue.end();\n })\n .catch((err: unknown): void => {\n queue.end(err instanceof Error ? err : new Error(String(err)));\n });\n\n void Promise.race([generation, abortPromise]).catch((err: unknown): void => {\n if (err instanceof GenerationAbortedError) queue.end(err);\n });\n\n for await (const chunk of queue.iterator) {\n yield chunk;\n }\n }\n\n async complete(prompt: string, options: GenerationOptions = {}): Promise<string> {\n const generator = this.requireGenerator();\n if (options.signal?.aborted) {\n throw new GenerationAbortedError(\"Generation aborted before start.\");\n }\n try {\n const output = (await (\n generator as unknown as (\n input: string,\n kw?: SamplingKwargs\n ) => Promise<TextGenerationOutputItem | TextGenerationOutputItem[]>\n )(prompt, buildSamplingKwargs(options))) as\n | TextGenerationOutputItem\n | TextGenerationOutputItem[];\n return lastAssistantContent(output, prompt);\n } catch (err) {\n if (err instanceof GenerationAbortedError) throw err;\n throw new ModelLoadError(\"Transformers completion failed.\", err);\n }\n }\n\n async *streamCompletion(\n prompt: string,\n options: GenerationOptions = {}\n ): AsyncIterable<TokenChunk> {\n const generator = this.requireGenerator();\n if (options.signal?.aborted) {\n throw new GenerationAbortedError(\"Generation aborted before start.\");\n }\n const transformers = await loadTransformers();\n const queue = createAsyncQueue<TokenChunk>();\n let index: number = 0;\n const tokenizer = (\n generator as unknown as {\n tokenizer: ConstructorParameters<TransformersModule[\"TextStreamer\"]>[0];\n }\n ).tokenizer;\n const streamer = new transformers.TextStreamer(tokenizer, {\n skip_prompt: true,\n skip_special_tokens: true,\n callback_function: (text: string): void => {\n if (text) {\n queue.push({ text, index, done: false });\n index += 1;\n }\n },\n });\n\n (\n generator as unknown as (\n input: string,\n kw?: SamplingKwargs & { streamer?: unknown }\n ) => Promise<TextGenerationOutputItem | TextGenerationOutputItem[]>\n )(prompt, { ...buildSamplingKwargs(options), streamer })\n .then((): void => {\n queue.push({ text: \"\", index, done: true });\n queue.end();\n })\n .catch((err: unknown): void => {\n queue.end(err instanceof Error ? err : new Error(String(err)));\n });\n\n if (options.signal) {\n options.signal.addEventListener(\n \"abort\",\n (): void => {\n queue.end(new GenerationAbortedError(\"Generation aborted by signal.\"));\n },\n { once: true }\n );\n }\n\n for await (const chunk of queue.iterator) {\n yield chunk;\n }\n }\n\n async unload(): Promise<void> {\n if (this.generator) {\n const disposable = this.generator as unknown as { dispose?: () => Promise<void> };\n if (typeof disposable.dispose === \"function\") {\n await disposable.dispose();\n }\n this.generator = null;\n }\n this.currentAbortController?.abort();\n this.currentAbortController = null;\n }\n\n private requireGenerator(): Pipeline {\n if (!this.generator) {\n throw new ModelNotLoadedError(\n \"TransformersTextEngine not loaded. Call load() before generation.\"\n );\n }\n return this.generator;\n }\n}\n","/**\n * JSON Schema helpers for structured output.\n *\n * The SDK delegates the actual constrained decoding to the underlying\n * runtime (xgrammar inside WebLLM today, ORT-Web equivalent later). These\n * helpers normalize user input — turning a JS object schema into the\n * JSON-string shape that WebLLM's `response_format.schema` expects — and\n * parse the runtime's textual output back into typed JSON.\n */\n\nimport { StructuredOutputError } from \"../core/exceptions\";\n\n/**\n * Minimal structural sanity check for a JSON Schema.\n *\n * Does not validate the schema against the JSON Schema meta-schema. The goal\n * is to fail fast on obvious mistakes (passing a string, an array, `null`)\n * before handing the value off to the runtime, where errors surface much\n * later and with much worse messages.\n *\n * @param schema - Candidate JSON Schema object.\n * @throws StructuredOutputError when `schema` is not a plain object or has\n * no recognizable schema shape (`type`, `$ref`, `oneOf`, `anyOf`, `allOf`,\n * `enum`).\n */\nexport function assertJsonSchema(schema: unknown): asserts schema is object {\n if (schema === null || typeof schema !== \"object\" || Array.isArray(schema)) {\n throw new StructuredOutputError(\"jsonSchema must be a plain object describing a JSON Schema.\");\n }\n const keys: string[] = Object.keys(schema);\n const recognized: readonly string[] = [\n \"type\",\n \"$ref\",\n \"oneOf\",\n \"anyOf\",\n \"allOf\",\n \"enum\",\n \"const\",\n \"properties\",\n ];\n if (!keys.some((key) => recognized.includes(key))) {\n throw new StructuredOutputError(\n \"jsonSchema does not look like a JSON Schema (missing type/$ref/oneOf/anyOf/allOf/enum/const/properties).\"\n );\n }\n}\n\n/**\n * Serialize a JSON Schema object for the WebLLM `response_format.schema`\n * field.\n *\n * WebLLM expects the schema as a JSON-encoded string (xgrammar parses it\n * server-side). Validates the shape via {@link assertJsonSchema} first.\n *\n * @param schema - JSON Schema object.\n * @returns The schema serialized as a JSON string.\n * @throws StructuredOutputError when `schema` is not a recognizable JSON\n * Schema shape.\n */\nexport function serializeJsonSchema(schema: unknown): string {\n assertJsonSchema(schema);\n return JSON.stringify(schema);\n}\n\n/**\n * Parse the textual output of a structured-decoding generation as JSON.\n *\n * @typeParam T - The expected parsed shape. The function does not validate\n * the parsed value against `T`; that is the caller's responsibility.\n * @param text - Raw text returned by the engine.\n * @returns The parsed JSON value cast to `T`.\n * @throws StructuredOutputError when the text is not valid JSON.\n */\nexport function parseStructuredOutput<T = unknown>(text: string): T {\n try {\n return JSON.parse(text) as T;\n } catch (err) {\n throw new StructuredOutputError(\n \"Engine output is not valid JSON. The model may have ignored the constrained decoding directive.\",\n err\n );\n }\n}\n","import type { Engine } from \"./engine\";\nimport { classifyLoadPhase } from \"./load-phase\";\nimport type { GenerationOptions, Message, ProgressCallback, TokenChunk } from \"../types\";\nimport {\n GenerationAbortedError,\n ModelLoadError,\n ModelNotLoadedError,\n WebGPUUnavailableError,\n} from \"./exceptions\";\nimport { serializeJsonSchema } from \"../structured/json-schema\";\n\ntype WebLLMModule = typeof import(\"@mlc-ai/web-llm\");\ntype MLCEngine = import(\"@mlc-ai/web-llm\").MLCEngineInterface;\ntype ChatCompletionMessageParam = import(\"@mlc-ai/web-llm\").ChatCompletionMessageParam;\ntype ResponseFormat = import(\"@mlc-ai/web-llm\").ResponseFormat;\n\nlet webllmModulePromise: Promise<WebLLMModule> | null = null;\n\nasync function loadWebLLM(): Promise<WebLLMModule> {\n if (!webllmModulePromise) {\n webllmModulePromise = import(\"@mlc-ai/web-llm\");\n }\n return webllmModulePromise;\n}\n\nfunction isWebGPUAvailable(): boolean {\n return typeof navigator !== \"undefined\" && \"gpu\" in navigator;\n}\n\ninterface SamplingParams {\n max_tokens?: number;\n temperature?: number;\n top_p?: number;\n}\n\nfunction buildSamplingParams(options: GenerationOptions): SamplingParams {\n const params: SamplingParams = {};\n if (options.maxTokens !== undefined) params.max_tokens = options.maxTokens;\n if (options.temperature !== undefined) params.temperature = options.temperature;\n if (options.topP !== undefined) params.top_p = options.topP;\n return params;\n}\n\n/**\n * Build the WebLLM `response_format` payload from generation options.\n *\n * Returns `undefined` when the caller has not requested structured output —\n * letting WebLLM use its default free-text decoding path. When `jsonSchema`\n * is set it takes priority and is serialized into the `schema` field\n * (xgrammar parses it server-side). When only `json` is set the payload\n * carries `{ type: \"json_object\" }` for unconstrained-but-valid JSON.\n */\nfunction buildResponseFormat(options: GenerationOptions): ResponseFormat | undefined {\n if (options.jsonSchema !== undefined) {\n return { type: \"json_object\", schema: serializeJsonSchema(options.jsonSchema) };\n }\n if (options.json) {\n return { type: \"json_object\" };\n }\n return undefined;\n}\n\nfunction toChatMessages(messages: Message[]): ChatCompletionMessageParam[] {\n return messages.map((m): ChatCompletionMessageParam => {\n switch (m.role) {\n case \"system\":\n return { role: \"system\", content: m.content };\n case \"user\":\n return { role: \"user\", content: m.content };\n case \"assistant\":\n return { role: \"assistant\", content: m.content };\n case \"tool\":\n return { role: \"tool\", content: m.content, tool_call_id: m.name ?? \"\" };\n }\n });\n}\n\n/**\n * Inference engine backed by [WebLLM (MLC)](https://github.com/mlc-ai/web-llm).\n *\n * Requires WebGPU. The fallback path planned for v0.5 will route to ORT-Web\n * when WebGPU is missing.\n */\nexport class WebLLMEngine implements Engine {\n private engine: MLCEngine | null = null;\n\n isLoaded(): boolean {\n return this.engine !== null;\n }\n\n async load(modelId: string, onProgress?: ProgressCallback): Promise<void> {\n if (!isWebGPUAvailable()) {\n throw new WebGPUUnavailableError(\n \"WebGPU is not available in this browser. The ORT-Web fallback is planned for v0.5.\"\n );\n }\n const webllm = await loadWebLLM();\n try {\n this.engine = await webllm.CreateMLCEngine(modelId, {\n initProgressCallback: (report): void => {\n onProgress?.({\n progress: report.progress,\n text: report.text,\n loaded: 0,\n total: 0,\n phase: classifyLoadPhase(report.text),\n });\n },\n });\n onProgress?.({\n progress: 1,\n text: \"Model ready.\",\n loaded: 0,\n total: 0,\n phase: \"ready\",\n });\n } catch (err) {\n throw new ModelLoadError(`Failed to load model \"${modelId}\".`, err);\n }\n }\n\n async generate(messages: Message[], options: GenerationOptions = {}): Promise<string> {\n const engine = this.requireEngine();\n if (options.signal?.aborted) {\n throw new GenerationAbortedError(\"Generation aborted before start.\");\n }\n const responseFormat = buildResponseFormat(options);\n const completion = await engine.chat.completions.create({\n ...buildSamplingParams(options),\n messages: toChatMessages(messages),\n stream: false,\n ...(responseFormat ? { response_format: responseFormat } : {}),\n });\n return completion.choices[0]?.message?.content ?? \"\";\n }\n\n async *stream(messages: Message[], options: GenerationOptions = {}): AsyncIterable<TokenChunk> {\n const engine = this.requireEngine();\n if (options.signal?.aborted) {\n throw new GenerationAbortedError(\"Generation aborted before start.\");\n }\n const responseFormat = buildResponseFormat(options);\n const completion = await engine.chat.completions.create({\n ...buildSamplingParams(options),\n messages: toChatMessages(messages),\n stream: true,\n ...(responseFormat ? { response_format: responseFormat } : {}),\n });\n let index: number = 0;\n let finished: boolean = false;\n try {\n for await (const chunk of completion) {\n if (options.signal?.aborted) {\n throw new GenerationAbortedError(\"Generation aborted by signal.\");\n }\n const choice = chunk.choices[0];\n const delta = choice?.delta?.content ?? \"\";\n if (delta) {\n yield { text: delta, index, done: false };\n index += 1;\n }\n if (choice?.finish_reason) {\n finished = true;\n yield { text: \"\", index, done: true };\n index += 1;\n }\n }\n if (!finished) {\n yield { text: \"\", index, done: true };\n }\n } catch (err) {\n if (err instanceof GenerationAbortedError) throw err;\n throw new ModelLoadError(\"Streaming generation failed.\", err);\n }\n }\n\n async complete(prompt: string, options: GenerationOptions = {}): Promise<string> {\n const engine = this.requireEngine();\n if (options.signal?.aborted) {\n throw new GenerationAbortedError(\"Generation aborted before start.\");\n }\n const responseFormat = buildResponseFormat(options);\n const completion = await engine.completions.create({\n ...buildSamplingParams(options),\n prompt,\n stream: false,\n ...(responseFormat ? { response_format: responseFormat } : {}),\n });\n return completion.choices[0]?.text ?? \"\";\n }\n\n async *streamCompletion(\n prompt: string,\n options: GenerationOptions = {}\n ): AsyncIterable<TokenChunk> {\n const engine = this.requireEngine();\n if (options.signal?.aborted) {\n throw new GenerationAbortedError(\"Generation aborted before start.\");\n }\n const responseFormat = buildResponseFormat(options);\n const completion = await engine.completions.create({\n ...buildSamplingParams(options),\n prompt,\n stream: true,\n ...(responseFormat ? { response_format: responseFormat } : {}),\n });\n let index: number = 0;\n let finished: boolean = false;\n try {\n for await (const chunk of completion) {\n if (options.signal?.aborted) {\n throw new GenerationAbortedError(\"Generation aborted by signal.\");\n }\n const choice = chunk.choices[0];\n const delta = choice?.text ?? \"\";\n if (delta) {\n yield { text: delta, index, done: false };\n index += 1;\n }\n if (choice?.finish_reason) {\n finished = true;\n yield { text: \"\", index, done: true };\n index += 1;\n }\n }\n if (!finished) {\n yield { text: \"\", index, done: true };\n }\n } catch (err) {\n if (err instanceof GenerationAbortedError) throw err;\n throw new ModelLoadError(\"Streaming completion failed.\", err);\n }\n }\n\n async unload(): Promise<void> {\n if (this.engine) {\n await this.engine.unload();\n this.engine = null;\n }\n }\n\n private requireEngine(): MLCEngine {\n if (!this.engine) {\n throw new ModelNotLoadedError(\"Engine not loaded. Call load() before generation.\");\n }\n return this.engine;\n }\n}\n","import type { GenerationOptions, Message, ModelLoadProgress, TokenChunk } from \"../types\";\n\n/**\n * Subset of {@link GenerationOptions} that survives `postMessage`.\n *\n * `AbortSignal` cannot be cloned across the worker boundary, so it is replaced\n * by a separate {@link AbortRequest} message keyed on the same operation id.\n */\nexport type SerializableGenerationOptions = Omit<GenerationOptions, \"signal\">;\n\n/** Strip `signal` from a {@link GenerationOptions} before posting it. */\nexport function toSerializableOptions(\n options: GenerationOptions = {}\n): SerializableGenerationOptions {\n const { signal: _signal, ...rest } = options;\n void _signal;\n return rest;\n}\n\n/** Operation request sent from the main thread to the worker. */\nexport type WorkerRequest =\n | { op: \"load\"; id: number; modelId: string }\n | {\n op: \"generate\";\n id: number;\n messages: Message[];\n options: SerializableGenerationOptions;\n }\n | {\n op: \"stream\";\n id: number;\n messages: Message[];\n options: SerializableGenerationOptions;\n }\n | {\n op: \"complete\";\n id: number;\n prompt: string;\n options: SerializableGenerationOptions;\n }\n | {\n op: \"stream-completion\";\n id: number;\n prompt: string;\n options: SerializableGenerationOptions;\n }\n | { op: \"abort\"; id: number }\n | { op: \"unload\"; id: number }\n | { op: \"isLoaded\"; id: number };\n\n/** Operation response sent from the worker back to the main thread. */\nexport type WorkerResponse =\n | { op: \"loaded\"; id: number }\n | { op: \"generated\"; id: number; text: string }\n | { op: \"progress\"; id: number; payload: ModelLoadProgress }\n | { op: \"token\"; id: number; chunk: TokenChunk }\n | { op: \"stream-end\"; id: number }\n | { op: \"error\"; id: number; name: string; message: string }\n | { op: \"unloaded\"; id: number }\n | { op: \"is-loaded\"; id: number; value: boolean };\n\n/** Subset of `Worker` we depend on. Lets tests inject a mock. */\nexport interface WorkerLike {\n postMessage(message: WorkerRequest): void;\n addEventListener(type: \"message\", listener: (event: MessageEvent<WorkerResponse>) => void): void;\n removeEventListener(\n type: \"message\",\n listener: (event: MessageEvent<WorkerResponse>) => void\n ): void;\n terminate(): void;\n}\n\n/** Internal alias used when the message direction is irrelevant (logging, debug). */\nexport type AbortRequest = Extract<WorkerRequest, { op: \"abort\" }>;\n","import { GenerationAbortedError, ModelLoadError, ModelNotLoadedError } from \"./exceptions\";\nimport type { Engine } from \"./engine\";\nimport type { GenerationOptions, Message, ProgressCallback, TokenChunk } from \"../types\";\nimport {\n toSerializableOptions,\n type WorkerLike,\n type WorkerRequest,\n type WorkerResponse,\n} from \"../worker/protocol\";\n\ninterface PendingGenerate {\n resolve: (text: string) => void;\n reject: (err: Error) => void;\n}\n\ninterface PendingStream {\n push: (chunk: TokenChunk) => void;\n end: () => void;\n fail: (err: Error) => void;\n}\n\n/**\n * Engine implementation that proxies all calls to a Web Worker.\n *\n * The worker holds the actual {@link WebLLMEngine}; this class is a thin RPC\n * shell that serializes requests, tracks pending operations by a numeric id,\n * and turns worker responses back into Promises and async iterables.\n *\n * Use {@link createInferenceWorker} to obtain a real worker. Tests can pass a\n * {@link WorkerLike} mock implementing the same `postMessage` /\n * `addEventListener` surface.\n */\nexport class WorkerEngine implements Engine {\n private nextId: number = 1;\n private loaded: boolean = false;\n private currentLoad: { resolve: () => void; reject: (e: Error) => void } | null = null;\n private currentLoadId: number = 0;\n private currentLoadProgress: ProgressCallback | undefined = undefined;\n private currentUnload: { resolve: () => void; reject: (e: Error) => void } | null = null;\n private currentUnloadId: number = 0;\n private pendingGenerates: Map<number, PendingGenerate> = new Map();\n private pendingStreams: Map<number, PendingStream> = new Map();\n\n private readonly listener: (event: MessageEvent<WorkerResponse>) => void;\n\n constructor(private readonly worker: WorkerLike) {\n this.listener = (event): void => this.handleMessage(event.data);\n this.worker.addEventListener(\"message\", this.listener);\n }\n\n isLoaded(): boolean {\n return this.loaded;\n }\n\n async load(modelId: string, onProgress?: ProgressCallback): Promise<void> {\n if (this.currentLoad) {\n throw new ModelLoadError(\"Another load is already in progress.\");\n }\n const id: number = this.allocateId();\n this.currentLoadId = id;\n this.currentLoadProgress = onProgress;\n return new Promise<void>((resolve, reject) => {\n this.currentLoad = { resolve, reject };\n this.send({ op: \"load\", id, modelId });\n });\n }\n\n async generate(messages: Message[], options: GenerationOptions = {}): Promise<string> {\n const id: number = this.allocateId();\n return new Promise<string>((resolve, reject) => {\n this.pendingGenerates.set(id, { resolve, reject });\n this.send({\n op: \"generate\",\n id,\n messages,\n options: toSerializableOptions(options),\n });\n options.signal?.addEventListener(\"abort\", () => this.send({ op: \"abort\", id }));\n });\n }\n\n async *stream(messages: Message[], options: GenerationOptions = {}): AsyncIterable<TokenChunk> {\n const id: number = this.allocateId();\n const queue: TokenChunk[] = [];\n let done: boolean = false;\n let error: Error | null = null;\n let notify: (() => void) | null = null;\n\n const wakeup = (): void => {\n if (notify) {\n const fn = notify;\n notify = null;\n fn();\n }\n };\n\n this.pendingStreams.set(id, {\n push: (chunk): void => {\n queue.push(chunk);\n wakeup();\n },\n end: (): void => {\n done = true;\n wakeup();\n },\n fail: (err): void => {\n error = err;\n done = true;\n wakeup();\n },\n });\n\n this.send({\n op: \"stream\",\n id,\n messages,\n options: toSerializableOptions(options),\n });\n options.signal?.addEventListener(\"abort\", () => this.send({ op: \"abort\", id }));\n\n try {\n while (true) {\n if (queue.length > 0) {\n const chunk = queue.shift();\n if (chunk) yield chunk;\n continue;\n }\n if (error) throw error;\n if (done) return;\n await new Promise<void>((r) => {\n notify = r;\n });\n }\n } finally {\n this.pendingStreams.delete(id);\n }\n }\n\n async complete(prompt: string, options: GenerationOptions = {}): Promise<string> {\n const id: number = this.allocateId();\n return new Promise<string>((resolve, reject) => {\n this.pendingGenerates.set(id, { resolve, reject });\n this.send({\n op: \"complete\",\n id,\n prompt,\n options: toSerializableOptions(options),\n });\n options.signal?.addEventListener(\"abort\", () => this.send({ op: \"abort\", id }));\n });\n }\n\n async *streamCompletion(\n prompt: string,\n options: GenerationOptions = {}\n ): AsyncIterable<TokenChunk> {\n const id: number = this.allocateId();\n const queue: TokenChunk[] = [];\n let done: boolean = false;\n let error: Error | null = null;\n let notify: (() => void) | null = null;\n\n const wakeup = (): void => {\n if (notify) {\n const fn = notify;\n notify = null;\n fn();\n }\n };\n\n this.pendingStreams.set(id, {\n push: (chunk): void => {\n queue.push(chunk);\n wakeup();\n },\n end: (): void => {\n done = true;\n wakeup();\n },\n fail: (err): void => {\n error = err;\n done = true;\n wakeup();\n },\n });\n\n this.send({\n op: \"stream-completion\",\n id,\n prompt,\n options: toSerializableOptions(options),\n });\n options.signal?.addEventListener(\"abort\", () => this.send({ op: \"abort\", id }));\n\n try {\n while (true) {\n if (queue.length > 0) {\n const chunk = queue.shift();\n if (chunk) yield chunk;\n continue;\n }\n if (error) throw error;\n if (done) return;\n await new Promise<void>((r) => {\n notify = r;\n });\n }\n } finally {\n this.pendingStreams.delete(id);\n }\n }\n\n async unload(): Promise<void> {\n if (!this.loaded) return;\n if (this.currentUnload) {\n throw new ModelLoadError(\"Another unload is already in progress.\");\n }\n const id: number = this.allocateId();\n this.currentUnloadId = id;\n return new Promise<void>((resolve, reject) => {\n this.currentUnload = { resolve, reject };\n this.send({ op: \"unload\", id });\n });\n }\n\n /** Tear down the underlying worker. The engine is unusable after this. */\n terminate(): void {\n this.worker.removeEventListener(\"message\", this.listener);\n this.worker.terminate();\n this.loaded = false;\n }\n\n private allocateId(): number {\n const id = this.nextId;\n this.nextId += 1;\n return id;\n }\n\n private send(req: WorkerRequest): void {\n this.worker.postMessage(req);\n }\n\n private handleMessage(msg: WorkerResponse): void {\n switch (msg.op) {\n case \"loaded\":\n if (this.currentLoad && msg.id === this.currentLoadId) {\n this.loaded = true;\n this.currentLoad.resolve();\n this.currentLoad = null;\n this.currentLoadProgress = undefined;\n }\n return;\n case \"progress\":\n if (msg.id === this.currentLoadId) {\n this.currentLoadProgress?.(msg.payload);\n }\n return;\n case \"generated\": {\n const pending = this.pendingGenerates.get(msg.id);\n if (pending) {\n pending.resolve(msg.text);\n this.pendingGenerates.delete(msg.id);\n }\n return;\n }\n case \"token\": {\n const stream = this.pendingStreams.get(msg.id);\n stream?.push(msg.chunk);\n return;\n }\n case \"stream-end\": {\n const stream = this.pendingStreams.get(msg.id);\n stream?.end();\n return;\n }\n case \"unloaded\":\n if (this.currentUnload && msg.id === this.currentUnloadId) {\n this.loaded = false;\n this.currentUnload.resolve();\n this.currentUnload = null;\n }\n return;\n case \"is-loaded\":\n return;\n case \"error\": {\n const err = mapError(msg.name, msg.message);\n if (this.currentLoad && msg.id === this.currentLoadId) {\n this.currentLoad.reject(err);\n this.currentLoad = null;\n this.currentLoadProgress = undefined;\n return;\n }\n if (this.currentUnload && msg.id === this.currentUnloadId) {\n this.currentUnload.reject(err);\n this.currentUnload = null;\n return;\n }\n const generate = this.pendingGenerates.get(msg.id);\n if (generate) {\n generate.reject(err);\n this.pendingGenerates.delete(msg.id);\n return;\n }\n const stream = this.pendingStreams.get(msg.id);\n if (stream) {\n stream.fail(err);\n return;\n }\n return;\n }\n }\n }\n}\n\nfunction mapError(name: string, message: string): Error {\n switch (name) {\n case \"ModelLoadError\":\n return new ModelLoadError(message);\n case \"ModelNotLoadedError\":\n return new ModelNotLoadedError(message);\n case \"GenerationAbortedError\":\n return new GenerationAbortedError(message);\n default: {\n const err = new Error(message);\n err.name = name;\n return err;\n }\n }\n}\n","import type { ModelPreset } from \"../types\";\nimport { UnknownModelError } from \"../core/exceptions\";\n\n/**\n * Curated registry of supported models for v0.1.\n *\n * Each entry maps a friendly id (e.g. `\"phi-3.5-mini-int4\"`) to the underlying\n * runtime identifier and metadata. Friendly ids are stable; backend ids may\n * change as upstream MLC packages evolve.\n *\n * Only models that have been validated to load in browsers with WebGPU and\n * that fit the SLM target (≤ 4B parameters at INT4) are included.\n */\nexport const MODEL_PRESETS: Readonly<Record<string, ModelPreset>> = Object.freeze({\n \"phi-3.5-mini-int4\": {\n id: \"phi-3.5-mini-int4\",\n family: \"Phi-3.5\",\n parameters: \"3.8B\",\n quantization: \"q4f16_1\",\n webllmId: \"Phi-3.5-mini-instruct-q4f16_1-MLC\",\n transformersId: \"onnx-community/Phi-3.5-mini-instruct-onnx-web\",\n contextWindow: 4096,\n description: \"Microsoft Phi-3.5 mini, INT4 quantized for browser inference.\",\n },\n \"llama-3.2-1b-int4\": {\n id: \"llama-3.2-1b-int4\",\n family: \"Llama-3.2\",\n parameters: \"1B\",\n quantization: \"q4f16_1\",\n webllmId: \"Llama-3.2-1B-Instruct-q4f16_1-MLC\",\n transformersId: \"onnx-community/Llama-3.2-1B-Instruct\",\n contextWindow: 4096,\n description: \"Meta Llama 3.2 1B Instruct, INT4 quantized.\",\n },\n \"qwen2.5-1.5b-int4\": {\n id: \"qwen2.5-1.5b-int4\",\n family: \"Qwen2.5\",\n parameters: \"1.5B\",\n quantization: \"q4f16_1\",\n webllmId: \"Qwen2.5-1.5B-Instruct-q4f16_1-MLC\",\n transformersId: \"onnx-community/Qwen2.5-1.5B-Instruct\",\n contextWindow: 4096,\n description: \"Alibaba Qwen 2.5 1.5B Instruct, INT4 quantized.\",\n },\n \"smollm2-360m-int8\": {\n id: \"smollm2-360m-int8\",\n family: \"SmolLM2\",\n parameters: \"360M\",\n quantization: \"q8\",\n webllmId: \"SmolLM2-360M-Instruct-q4f16_1-MLC\",\n transformersId: \"HuggingFaceTB/SmolLM2-360M-Instruct\",\n contextWindow: 2048,\n description:\n \"HuggingFace SmolLM2 360M Instruct — smallest viable chat model, ideal for the fallback path on low-end devices.\",\n },\n});\n\n/**\n * Resolve a friendly model id to its full preset metadata.\n *\n * @param modelId - Friendly id (e.g. `\"phi-3.5-mini-int4\"`).\n * @returns The matching preset.\n * @throws UnknownModelError if no preset matches.\n */\nexport function resolveModelPreset(modelId: string): ModelPreset {\n const preset = MODEL_PRESETS[modelId];\n if (!preset) {\n const available = Object.keys(MODEL_PRESETS).join(\", \");\n throw new UnknownModelError(`Unknown model \"${modelId}\". Available models: ${available}.`);\n }\n return preset;\n}\n\n/** Return the list of supported friendly model ids. */\nexport function listSupportedModels(): string[] {\n return Object.keys(MODEL_PRESETS);\n}\n\n/** Curated metadata for a supported embedding model. */\nexport interface EmbeddingPreset {\n /** Friendly identifier (e.g. `\"bge-small-en-v1.5\"`). */\n id: string;\n /** Family name (e.g. `\"BGE\"`). */\n family: string;\n /** Embedding dimension. */\n dimension: number;\n /** Maximum input length in tokens. */\n maxTokens: number;\n /** Identifier passed to `@huggingface/transformers`. */\n transformersId: string;\n /** Approximate quantization scheme (e.g. `\"fp32\"`, `\"int8\"`). */\n quantization: string;\n /** Short human description. */\n description: string;\n}\n\n/**\n * Curated registry of supported embedding models for v0.3.\n *\n * Each entry maps a friendly id to the underlying transformers.js model id.\n */\nexport const EMBEDDING_PRESETS: Readonly<Record<string, EmbeddingPreset>> = Object.freeze({\n \"bge-small-en-v1.5\": {\n id: \"bge-small-en-v1.5\",\n family: \"BGE\",\n dimension: 384,\n maxTokens: 512,\n transformersId: \"Xenova/bge-small-en-v1.5\",\n quantization: \"fp32\",\n description: \"BAAI BGE small English v1.5, 384-dim sentence embeddings.\",\n },\n \"bge-base-en-v1.5\": {\n id: \"bge-base-en-v1.5\",\n family: \"BGE\",\n dimension: 768,\n maxTokens: 512,\n transformersId: \"Xenova/bge-base-en-v1.5\",\n quantization: \"fp32\",\n description: \"BAAI BGE base English v1.5, 768-dim sentence embeddings.\",\n },\n});\n\n/**\n * Resolve a friendly embedding model id to its full preset metadata.\n *\n * @param modelId - Friendly id (e.g. `\"bge-small-en-v1.5\"`).\n * @returns The matching preset.\n * @throws UnknownModelError if no preset matches.\n */\nexport function resolveEmbeddingPreset(modelId: string): EmbeddingPreset {\n const preset = EMBEDDING_PRESETS[modelId];\n if (!preset) {\n const available = Object.keys(EMBEDDING_PRESETS).join(\", \");\n throw new UnknownModelError(\n `Unknown embedding model \"${modelId}\". Available models: ${available}.`\n );\n }\n return preset;\n}\n\n/** Return the list of supported embedding model ids. */\nexport function listSupportedEmbeddingModels(): string[] {\n return Object.keys(EMBEDDING_PRESETS);\n}\n\n/** Curated metadata for a supported reranker (cross-encoder) model. */\nexport interface RerankerPreset {\n /** Friendly identifier (e.g. `\"bge-reranker-base\"`). */\n id: string;\n /** Family name (e.g. `\"BGE Reranker\"`). */\n family: string;\n /** Maximum input length in tokens (combined query + document). */\n maxTokens: number;\n /** Identifier passed to `@huggingface/transformers`. */\n transformersId: string;\n /** Approximate quantization (e.g. `\"fp32\"`). */\n quantization: string;\n /** Short human description. */\n description: string;\n}\n\n/**\n * Curated registry of supported reranker models for v0.3.\n */\nexport const RERANKER_PRESETS: Readonly<Record<string, RerankerPreset>> = Object.freeze({\n \"bge-reranker-base\": {\n id: \"bge-reranker-base\",\n family: \"BGE Reranker\",\n maxTokens: 512,\n transformersId: \"Xenova/bge-reranker-base\",\n quantization: \"fp32\",\n description: \"BAAI BGE reranker base — multilingual cross-encoder.\",\n },\n});\n\n/**\n * Resolve a friendly reranker model id to its full preset metadata.\n *\n * @param modelId - Friendly id (e.g. `\"bge-reranker-base\"`).\n * @throws UnknownModelError if no preset matches.\n */\nexport function resolveRerankerPreset(modelId: string): RerankerPreset {\n const preset = RERANKER_PRESETS[modelId];\n if (!preset) {\n const available = Object.keys(RERANKER_PRESETS).join(\", \");\n throw new UnknownModelError(\n `Unknown reranker model \"${modelId}\". Available models: ${available}.`\n );\n }\n return preset;\n}\n\n/** Return the list of supported reranker model ids. */\nexport function listSupportedRerankerModels(): string[] {\n return Object.keys(RERANKER_PRESETS);\n}\n","import type { WorkerLike } from \"./protocol\";\n\n/**\n * Spawn a new inference Web Worker.\n *\n * Uses Vite/webpack-friendly `new Worker(new URL(...), { type: \"module\" })`\n * syntax. The bundler emits the worker as a separate ES module chunk.\n *\n * Consumers normally do not call this directly — `LMTask.create()` invokes it\n * when `inWorker: true` is set. It is exported for advanced scenarios (custom\n * worker management, pooling, lifecycle integration with a host app).\n *\n * @returns A {@link WorkerLike}-compatible Worker instance.\n */\nexport function createInferenceWorker(): WorkerLike {\n return new Worker(new URL(\"./inference.worker.ts\", import.meta.url), {\n type: \"module\",\n }) as unknown as WorkerLike;\n}\n","import type { Engine } from \"../core/engine\";\nimport { BackendNotAvailableError } from \"../core/exceptions\";\nimport { TransformersTextEngine } from \"../core/transformers-engine\";\nimport { WebLLMEngine } from \"../core/webllm-engine\";\nimport { WorkerEngine } from \"../core/worker-engine\";\nimport { resolveModelPreset } from \"../presets/models\";\nimport { createInferenceWorker } from \"../worker/create-worker\";\nimport type { ModelPreset, ProgressCallback } from \"../types\";\n\n/**\n * Inference backend selector.\n *\n * - `\"auto\"` (default): pick WebLLM when WebGPU is available, fall back to\n * the transformers.js engine otherwise.\n * - `\"webllm\"`: force WebLLM. Throws `WebGPUUnavailableError` on browsers\n * without WebGPU.\n * - `\"transformers\"`: force the transformers.js engine. Loads from the\n * preset's `transformersId`; throws `BackendNotAvailableError` when the\n * preset has no `transformersId`.\n */\nexport type BackendChoice = \"auto\" | \"webllm\" | \"transformers\";\n\nfunction defaultWebGPUDetector(): boolean {\n return typeof navigator !== \"undefined\" && \"gpu\" in navigator;\n}\n\n/**\n * Pure backend resolver, exported for unit tests.\n *\n * @param choice - Caller's preference (`\"auto\"`, `\"webllm\"`, `\"transformers\"`).\n * @param preset - Resolved model preset.\n * @param webGPUAvailable - Whether WebGPU is available in the host environment.\n * @returns The concrete backend to instantiate.\n * @throws BackendNotAvailableError when the choice cannot be satisfied (e.g.\n * `\"transformers\"` requested but the preset has no `transformersId`, or\n * `\"auto\"` with no WebGPU and no `transformersId`).\n */\nexport function resolveBackend(\n choice: BackendChoice,\n preset: ModelPreset,\n webGPUAvailable: boolean\n): \"webllm\" | \"transformers\" {\n if (choice === \"webllm\") return \"webllm\";\n if (choice === \"transformers\") {\n if (!preset.transformersId) {\n throw new BackendNotAvailableError(\n `Model \"${preset.id}\" has no transformersId — cannot run on the transformers.js backend.`\n );\n }\n return \"transformers\";\n }\n if (webGPUAvailable) return \"webllm\";\n if (!preset.transformersId) {\n throw new BackendNotAvailableError(\n `WebGPU is unavailable and model \"${preset.id}\" has no transformersId for the fallback path.`\n );\n }\n return \"transformers\";\n}\n\n/** Common options accepted by every task's `create()` factory. */\nexport interface LMTaskCreateOptions {\n /** Optional callback for model load progress updates. */\n onProgress?: ProgressCallback;\n /**\n * Override the engine used for inference. Intended for testing.\n * Production callers should let the SDK pick a backend automatically.\n */\n engine?: Engine;\n /**\n * Run inference inside a Web Worker, isolating the UI thread from\n * tokenization and generation. **Default `true` from v0.3** — the\n * `WorkerEngine` is the recommended path. Pass `false` to keep\n * inference on the main thread (useful for environments without\n * `Worker` support or when debugging the runtime directly).\n *\n * Ignored when {@link engine} is provided.\n *\n * **Note (v0.5):** the bundled worker entry only supports the WebLLM\n * backend. When `backend` resolves to `\"transformers\"` the worker option\n * is forced to `false` and inference runs on the main thread. A worker\n * variant for the transformers.js path is on the v0.6 roadmap.\n */\n inWorker?: boolean;\n /**\n * Inference backend selector (v0.5+). Defaults to `\"auto\"` which picks\n * WebLLM when WebGPU is available and the transformers.js fallback when\n * it is not. See {@link BackendChoice}.\n */\n backend?: BackendChoice;\n}\n\n/** Internal payload returned by {@link LMTask.createEngine}. */\nexport interface ResolvedEngine {\n engine: Engine;\n preset: ModelPreset;\n}\n\n/**\n * Base class shared by all language-model tasks (`Chat` for v0.1; `Completion`,\n * `Embeddings` and `Reranker` planned for later versions).\n *\n * The base owns:\n * - resolving a friendly model id to a {@link ModelPreset};\n * - selecting and loading an {@link Engine} (defaulting to WebLLM);\n * - exposing `unload()` for cleanup.\n *\n * Subclasses add task-specific public methods (`send`, `stream`, etc.).\n */\nexport abstract class LMTask {\n protected constructor(\n /** Engine used for inference. */\n protected readonly engine: Engine,\n /** Resolved metadata for the loaded model. */\n public readonly preset: ModelPreset\n ) {}\n\n /**\n * Load a model into a backend and return the wired-up engine + preset.\n *\n * Subclasses call this from their static `create()` factories.\n *\n * @param modelId - Friendly model id from the registry.\n * @param options - Task creation options.\n */\n protected static async createEngine(\n modelId: string,\n options: LMTaskCreateOptions = {}\n ): Promise<ResolvedEngine> {\n const preset = resolveModelPreset(modelId);\n if (options.engine) {\n if (!options.engine.isLoaded()) {\n await options.engine.load(preset.webllmId, options.onProgress);\n }\n return { engine: options.engine, preset };\n }\n const choice: BackendChoice = options.backend ?? \"auto\";\n const resolved: \"webllm\" | \"transformers\" = resolveBackend(\n choice,\n preset,\n defaultWebGPUDetector()\n );\n const engine: Engine = LMTask.instantiateEngine(resolved, options);\n const loadId: string =\n resolved === \"transformers\" ? (preset.transformersId ?? \"\") : preset.webllmId;\n if (!engine.isLoaded()) {\n await engine.load(loadId, options.onProgress);\n }\n return { engine, preset };\n }\n\n private static instantiateEngine(\n resolved: \"webllm\" | \"transformers\",\n options: LMTaskCreateOptions\n ): Engine {\n if (resolved === \"transformers\") {\n // The bundled inference worker only supports WebLLM today, so force\n // main-thread execution when the transformers.js backend is selected.\n return new TransformersTextEngine();\n }\n const useWorker: boolean = options.inWorker ?? true;\n if (useWorker) {\n return new WorkerEngine(createInferenceWorker());\n }\n return new WebLLMEngine();\n }\n\n /** Release engine resources. Safe to call multiple times. */\n async unload(): Promise<void> {\n await this.engine.unload();\n }\n\n /** Whether the underlying engine has a loaded model. */\n isLoaded(): boolean {\n return this.engine.isLoaded();\n }\n}\n","import { parseStructuredOutput } from \"./structured/json-schema\";\nimport type { FinishReason, Message } from \"./types\";\n\n/**\n * Result returned by `Chat.send()`.\n *\n * Holds the assistant's textual reply, the structured assistant message\n * (already appended to the chat history), and metadata about the generation.\n */\nexport class ChatReply {\n constructor(\n /** The assistant's reply text. */\n public readonly text: string,\n /** The structured assistant message (already appended to chat history). */\n public readonly message: Message,\n /** Number of tokens generated. 0 when the engine does not report it. */\n public readonly tokensGenerated: number,\n /** Why the generation loop stopped. */\n public readonly finishReason: FinishReason\n ) {}\n\n /**\n * Parse {@link ChatReply.text} as JSON.\n *\n * Intended for replies generated with `json: true` or `jsonSchema`.\n * The result is cast to `T` without runtime validation; pair with Zod /\n * Ajv on the call site if you need to verify the schema.\n *\n * @typeParam T - Expected parsed shape.\n * @returns The parsed JSON value.\n * @throws StructuredOutputError if the text is not valid JSON.\n */\n json<T = unknown>(): T {\n return parseStructuredOutput<T>(this.text);\n }\n}\n\n/**\n * Result returned by `Completion.predict()`.\n *\n * Holds the generated continuation text (the prompt itself is not included)\n * plus metadata about the generation loop.\n */\nexport class CompletionResult {\n constructor(\n /** The generated text (continuation only, prompt excluded). */\n public readonly text: string,\n /** The original prompt that was fed to the model. */\n public readonly prompt: string,\n /** Number of tokens generated. 0 when the engine does not report it. */\n public readonly tokensGenerated: number,\n /** Why the generation loop stopped. */\n public readonly finishReason: FinishReason\n ) {}\n\n /**\n * Parse {@link CompletionResult.text} as JSON.\n *\n * Intended for completions generated with `json: true` or `jsonSchema`.\n * The result is cast to `T` without runtime validation.\n *\n * @typeParam T - Expected parsed shape.\n * @returns The parsed JSON value.\n * @throws StructuredOutputError if the text is not valid JSON.\n */\n json<T = unknown>(): T {\n return parseStructuredOutput<T>(this.text);\n }\n}\n","import { LMTask, type LMTaskCreateOptions } from \"./lm-task\";\nimport type { Engine } from \"../core/engine\";\nimport { ChatReply } from \"../results\";\nimport type { GenerationOptions, Message, ModelPreset, TokenChunk } from \"../types\";\n\n/**\n * Multi-turn chat task.\n *\n * Maintains an in-memory conversation history and applies the chat template\n * configured for the loaded model. Use {@link Chat.create} to construct an\n * instance — the constructor is private.\n *\n * @example\n * ```ts\n * const chat = await Chat.create(\"phi-3.5-mini-int4\");\n * const reply = await chat.send(\"Explain ONNX in one sentence.\");\n * console.log(reply.text);\n * ```\n *\n * @example Streaming\n * ```ts\n * const controller = new AbortController();\n * for await (const token of chat.stream(\"Explain ONNX.\", { signal: controller.signal })) {\n * process.stdout.write(token.text);\n * }\n * ```\n */\nexport class Chat extends LMTask {\n private readonly history: Message[] = [];\n private systemPrompt: string | null = null;\n\n private constructor(engine: Engine, preset: ModelPreset) {\n super(engine, preset);\n }\n\n /**\n * Create and load a `Chat` task for the given model.\n *\n * @param modelId - Friendly model id from the registry (e.g. `\"phi-3.5-mini-int4\"`).\n * @param options - Optional creation options (progress callback, engine override).\n */\n static async create(modelId: string, options: LMTaskCreateOptions = {}): Promise<Chat> {\n const { engine, preset } = await LMTask.createEngine(modelId, options);\n return new Chat(engine, preset);\n }\n\n /** Set or replace the system prompt prepended to every conversation. */\n setSystemPrompt(prompt: string): void {\n this.systemPrompt = prompt;\n }\n\n /** Clear the system prompt. */\n clearSystemPrompt(): void {\n this.systemPrompt = null;\n }\n\n /** Reset the conversation history. The system prompt is preserved. */\n resetHistory(): void {\n this.history.length = 0;\n }\n\n /** A read-only snapshot of the conversation history. */\n getHistory(): readonly Message[] {\n return this.history.slice();\n }\n\n /**\n * Send a user message and await the full assistant reply.\n *\n * The user message and the assistant reply are appended to the history.\n *\n * @param message - The user-facing message text.\n * @param options - Generation options.\n * @returns A {@link ChatReply} with the assistant's reply.\n */\n async send(message: string, options: GenerationOptions = {}): Promise<ChatReply> {\n const messages = this.buildMessages(message);\n const text = await this.engine.generate(messages, options);\n const userMsg: Message = { role: \"user\", content: message };\n const assistantMsg: Message = { role: \"assistant\", content: text };\n this.history.push(userMsg, assistantMsg);\n return new ChatReply(text, assistantMsg, 0, \"stop\");\n }\n\n /**\n * Stream the assistant reply token-by-token as an async iterable.\n *\n * The full reply is appended to the history when the stream completes\n * normally. If the stream is aborted, neither message is appended.\n *\n * @param message - The user-facing message text.\n * @param options - Generation options including an optional `signal`.\n */\n async *stream(message: string, options: GenerationOptions = {}): AsyncIterable<TokenChunk> {\n const messages = this.buildMessages(message);\n const userMsg: Message = { role: \"user\", content: message };\n let acc: string = \"\";\n for await (const chunk of this.engine.stream(messages, options)) {\n acc += chunk.text;\n yield chunk;\n }\n const assistantMsg: Message = { role: \"assistant\", content: acc };\n this.history.push(userMsg, assistantMsg);\n }\n\n private buildMessages(userMessage: string): Message[] {\n const messages: Message[] = [];\n if (this.systemPrompt) {\n messages.push({ role: \"system\", content: this.systemPrompt });\n }\n messages.push(...this.history);\n messages.push({ role: \"user\", content: userMessage });\n return messages;\n }\n}\n","import { LMTask, type LMTaskCreateOptions } from \"./lm-task\";\nimport type { Engine } from \"../core/engine\";\nimport { CompletionResult } from \"../results\";\nimport type { GenerationOptions, ModelPreset, TokenChunk } from \"../types\";\n\n/**\n * Raw text-completion task.\n *\n * Unlike {@link Chat}, `Completion` does not maintain a conversation history\n * and does not apply a chat template. The prompt is fed to the model verbatim\n * and the model continues it. Useful for \"Once upon a time…\" style generation,\n * code completion, or any scenario where chat formatting would interfere.\n *\n * Use {@link Completion.create} to construct an instance — the constructor is\n * private.\n *\n * @example\n * ```ts\n * const comp = await Completion.create(\"qwen2.5-1.5b-int4\");\n * const result = await comp.predict(\"Once upon a time\", { maxTokens: 50 });\n * console.log(result.text);\n * ```\n *\n * @example Streaming\n * ```ts\n * const controller = new AbortController();\n * for await (const token of comp.stream(\"def fibonacci(n):\", { signal: controller.signal })) {\n * process.stdout.write(token.text);\n * }\n * ```\n */\nexport class Completion extends LMTask {\n private constructor(engine: Engine, preset: ModelPreset) {\n super(engine, preset);\n }\n\n /**\n * Create and load a `Completion` task for the given model.\n *\n * @param modelId - Friendly model id from the registry (e.g. `\"qwen2.5-1.5b-int4\"`).\n * @param options - Optional creation options (progress callback, engine override).\n */\n static async create(modelId: string, options: LMTaskCreateOptions = {}): Promise<Completion> {\n const { engine, preset } = await LMTask.createEngine(modelId, options);\n return new Completion(engine, preset);\n }\n\n /**\n * Generate a continuation for the given prompt.\n *\n * @param prompt - Raw text fed to the model.\n * @param options - Generation options.\n * @returns A {@link CompletionResult} with the generated continuation.\n */\n async predict(prompt: string, options: GenerationOptions = {}): Promise<CompletionResult> {\n const text = await this.engine.complete(prompt, options);\n return new CompletionResult(text, prompt, 0, \"stop\");\n }\n\n /**\n * Stream a continuation for the given prompt as an async iterable of token\n * chunks.\n *\n * @param prompt - Raw text fed to the model.\n * @param options - Generation options including an optional `signal`.\n */\n async *stream(prompt: string, options: GenerationOptions = {}): AsyncIterable<TokenChunk> {\n for await (const chunk of this.engine.streamCompletion(prompt, options)) {\n yield chunk;\n }\n }\n}\n","import { ModelLoadError, ModelNotLoadedError } from \"../core/exceptions\";\nimport { resolveEmbeddingPreset, type EmbeddingPreset } from \"../presets/models\";\nimport type { ProgressCallback } from \"../types\";\n\n/** Options accepted by {@link Embeddings.create}. */\nexport interface EmbeddingsCreateOptions {\n /** Optional callback for model load progress updates. */\n onProgress?: ProgressCallback;\n /** Override the embedding pipeline. Intended for testing. */\n pipeline?: EmbedPipeline;\n}\n\n/** Options accepted by {@link Embeddings.embed}. */\nexport interface EmbedOptions {\n /** L2-normalize each vector. Recommended for cosine similarity downstream. Default `true`. */\n normalize?: boolean;\n /** Pooling strategy. BGE-style models use `\"cls\"`. Most sentence-transformers use `\"mean\"`. Default `\"mean\"`. */\n pooling?: \"mean\" | \"cls\";\n}\n\n/**\n * Minimal pipeline contract that {@link Embeddings} depends on.\n *\n * The default implementation wraps `@huggingface/transformers`. Tests inject\n * a fake satisfying the same shape — they never load the real runtime.\n */\nexport interface EmbedPipeline {\n /**\n * Run the encoder on a batch of inputs and return raw vectors.\n *\n * @param texts - Input strings.\n * @param options - Pooling + normalization passed to the underlying pipeline.\n */\n embed(texts: string[], options: Required<EmbedOptions>): Promise<number[][]>;\n /** Release pipeline resources. */\n unload?(): Promise<void>;\n}\n\ntype TransformersModule = typeof import(\"@huggingface/transformers\");\n\nlet transformersModulePromise: Promise<TransformersModule> | null = null;\n\nasync function loadTransformers(): Promise<TransformersModule> {\n if (!transformersModulePromise) {\n transformersModulePromise = import(\"@huggingface/transformers\");\n }\n return transformersModulePromise;\n}\n\nasync function buildDefaultPipeline(\n preset: EmbeddingPreset,\n onProgress?: ProgressCallback\n): Promise<EmbedPipeline> {\n const transformers = await loadTransformers();\n try {\n const pipe = await transformers.pipeline(\"feature-extraction\", preset.transformersId, {\n progress_callback: (report: unknown): void => {\n if (!onProgress) return;\n const r = report as { progress?: number; status?: string };\n onProgress({\n progress: typeof r.progress === \"number\" ? r.progress / 100 : 0,\n text: r.status ?? \"\",\n loaded: 0,\n total: 0,\n phase: \"downloading\",\n });\n },\n });\n return {\n async embed(texts, options): Promise<number[][]> {\n const output = await pipe(texts, {\n pooling: options.pooling,\n normalize: options.normalize,\n });\n return output.tolist();\n },\n async unload(): Promise<void> {\n if (typeof (pipe as { dispose?: () => Promise<void> }).dispose === \"function\") {\n await (pipe as unknown as { dispose: () => Promise<void> }).dispose();\n }\n },\n };\n } catch (err) {\n throw new ModelLoadError(`Failed to load embedding model \"${preset.id}\".`, err);\n }\n}\n\n/**\n * Sentence embedding task backed by `@huggingface/transformers`.\n *\n * Use {@link Embeddings.create} to construct an instance — the constructor is\n * private. The default backend lazy-loads the transformers.js runtime; tests\n * inject a {@link EmbedPipeline} mock instead.\n *\n * @example\n * ```ts\n * const emb = await Embeddings.create(\"bge-small-en-v1.5\");\n * const vectors = await emb.embed([\"hello world\", \"another sentence\"]);\n * console.log(vectors[0].length); // 384\n * ```\n */\nexport class Embeddings {\n private constructor(\n private readonly pipeline: EmbedPipeline,\n /** Resolved metadata for the loaded model. */\n public readonly preset: EmbeddingPreset\n ) {}\n\n /**\n * Create and load an `Embeddings` task for the given model.\n *\n * @param modelId - Friendly id from the embedding registry.\n * @param options - Optional creation options.\n * @throws UnknownModelError if `modelId` is not in the registry.\n * @throws ModelLoadError if the underlying pipeline fails to load.\n */\n static async create(modelId: string, options: EmbeddingsCreateOptions = {}): Promise<Embeddings> {\n const preset = resolveEmbeddingPreset(modelId);\n const pipeline = options.pipeline ?? (await buildDefaultPipeline(preset, options.onProgress));\n return new Embeddings(pipeline, preset);\n }\n\n /**\n * Encode an array of strings into dense vectors.\n *\n * Returns one vector per input, in the same order. Empty input array\n * returns an empty array (no error).\n *\n * @param texts - Input strings.\n * @param options - Pooling + normalization. Defaults: `pooling: \"mean\"`, `normalize: true`.\n */\n async embed(texts: string[], options: EmbedOptions = {}): Promise<number[][]> {\n if (texts.length === 0) return [];\n if (!this.pipeline) {\n throw new ModelNotLoadedError(\"Embeddings pipeline not initialized.\");\n }\n const merged: Required<EmbedOptions> = {\n normalize: options.normalize ?? true,\n pooling: options.pooling ?? \"mean\",\n };\n return this.pipeline.embed(texts, merged);\n }\n\n /**\n * Convenience: encode a single string and return its vector.\n *\n * @param text - Input string.\n * @param options - Forwarded to {@link Embeddings.embed}.\n */\n async embedSingle(text: string, options: EmbedOptions = {}): Promise<number[]> {\n const [vec] = await this.embed([text], options);\n if (!vec) {\n throw new ModelLoadError(\"Embedding pipeline returned no result.\");\n }\n return vec;\n }\n\n /** Embedding dimension exposed by the loaded model. */\n get dimension(): number {\n return this.preset.dimension;\n }\n\n /** Release pipeline resources. Safe to call multiple times. */\n async unload(): Promise<void> {\n await this.pipeline.unload?.();\n }\n}\n","import { ModelLoadError, ModelNotLoadedError } from \"../core/exceptions\";\nimport { resolveRerankerPreset, type RerankerPreset } from \"../presets/models\";\nimport type { ProgressCallback } from \"../types\";\n\n/** Options accepted by {@link Reranker.create}. */\nexport interface RerankerCreateOptions {\n /** Optional callback for model load progress updates. */\n onProgress?: ProgressCallback;\n /** Override the rerank pipeline. Intended for testing. */\n pipeline?: RerankPipeline;\n}\n\n/** Options accepted by {@link Reranker.score}. */\nexport interface RerankOptions {\n /**\n * Apply sigmoid to logits to map scores into `[0, 1]`. Recommended when the\n * downstream code uses scores as probabilities. Default `false` (raw logits).\n */\n sigmoid?: boolean;\n}\n\n/** A document paired with its score, for {@link Reranker.rank}. */\nexport interface RankedDocument {\n /** The document text. */\n text: string;\n /** Score from the cross-encoder. */\n score: number;\n /** Original index of the document in the input array. */\n index: number;\n}\n\n/**\n * Minimal pipeline contract that {@link Reranker} depends on.\n *\n * The default implementation wraps `@huggingface/transformers`. Tests inject\n * a fake satisfying the same shape — they never load the real runtime.\n */\nexport interface RerankPipeline {\n /**\n * Score `(query, doc)` pairs. One score per doc, in the same order.\n *\n * @param query - Single query string.\n * @param docs - Documents to score against the query.\n */\n score(query: string, docs: string[]): Promise<number[]>;\n /** Release pipeline resources. */\n unload?(): Promise<void>;\n}\n\ntype TransformersModule = typeof import(\"@huggingface/transformers\");\n\nlet transformersModulePromise: Promise<TransformersModule> | null = null;\n\nasync function loadTransformers(): Promise<TransformersModule> {\n if (!transformersModulePromise) {\n transformersModulePromise = import(\"@huggingface/transformers\");\n }\n return transformersModulePromise;\n}\n\nfunction sigmoidValue(x: number): number {\n return 1 / (1 + Math.exp(-x));\n}\n\nasync function buildDefaultPipeline(\n preset: RerankerPreset,\n onProgress?: ProgressCallback\n): Promise<RerankPipeline> {\n const transformers = await loadTransformers();\n try {\n const tokenizer = await transformers.AutoTokenizer.from_pretrained(preset.transformersId, {\n progress_callback: (report: unknown): void => {\n if (!onProgress) return;\n const r = report as { progress?: number; status?: string };\n onProgress({\n progress: typeof r.progress === \"number\" ? r.progress / 100 : 0,\n text: r.status ?? \"\",\n loaded: 0,\n total: 0,\n phase: \"downloading\",\n });\n },\n });\n const model = await transformers.AutoModelForSequenceClassification.from_pretrained(\n preset.transformersId,\n {\n progress_callback: (report: unknown): void => {\n if (!onProgress) return;\n const r = report as { progress?: number; status?: string };\n onProgress({\n progress: typeof r.progress === \"number\" ? r.progress / 100 : 0,\n text: r.status ?? \"\",\n loaded: 0,\n total: 0,\n phase: \"downloading\",\n });\n },\n }\n );\n return {\n async score(query, docs): Promise<number[]> {\n if (docs.length === 0) return [];\n const queries: string[] = docs.map(() => query);\n // `transformers.js` AutoTokenizer accepts `(text, options)` where\n // `options.text_pair` carries the second sequence; pair-input typing\n // isn't exported, so we cast through `unknown`.\n const tokenize = tokenizer as unknown as (\n text: string[],\n options: Record<string, unknown>\n ) => Record<string, unknown>;\n const inputs = tokenize(queries, {\n text_pair: docs,\n padding: true,\n truncation: true,\n max_length: preset.maxTokens,\n });\n const callModel = model as unknown as (\n inputs: Record<string, unknown>\n ) => Promise<{ logits: { tolist: () => number[][] } }>;\n const outputs = await callModel(inputs);\n const logits: number[][] = outputs.logits.tolist();\n return logits.map((row) => row[0] ?? 0);\n },\n async unload(): Promise<void> {\n const m = model as unknown as { dispose?: () => Promise<unknown> };\n if (typeof m.dispose === \"function\") await m.dispose();\n },\n };\n } catch (err) {\n throw new ModelLoadError(`Failed to load reranker model \"${preset.id}\".`, err);\n }\n}\n\n/**\n * Cross-encoder reranking task backed by `@huggingface/transformers`.\n *\n * Use {@link Reranker.create} to construct an instance — the constructor is\n * private. Useful as a second-stage step in a retrieve-then-rerank pipeline:\n * pull top-K candidates with a fast embedding similarity, then rerank with\n * a cross-encoder for higher precision.\n *\n * @example\n * ```ts\n * const rerank = await Reranker.create(\"bge-reranker-base\");\n * const scores = await rerank.score(\"what is webgpu?\", [\n * \"WebGPU is a modern graphics API\",\n * \"Bananas grow on trees\",\n * ]);\n * // scores[0] >> scores[1]\n * ```\n *\n * @example Ranked output sorted by score\n * ```ts\n * const ranked = await rerank.rank(\"what is webgpu?\", docs);\n * for (const r of ranked) console.log(r.score, r.text);\n * ```\n */\nexport class Reranker {\n private constructor(\n private readonly pipeline: RerankPipeline,\n /** Resolved metadata for the loaded model. */\n public readonly preset: RerankerPreset\n ) {}\n\n /**\n * Create and load a `Reranker` task for the given model.\n *\n * @param modelId - Friendly id from the reranker registry.\n * @param options - Optional creation options.\n * @throws UnknownModelError if `modelId` is not in the registry.\n * @throws ModelLoadError if the underlying pipeline fails to load.\n */\n static async create(modelId: string, options: RerankerCreateOptions = {}): Promise<Reranker> {\n const preset = resolveRerankerPreset(modelId);\n const pipeline = options.pipeline ?? (await buildDefaultPipeline(preset, options.onProgress));\n return new Reranker(pipeline, preset);\n }\n\n /**\n * Score each document against the query. Returns one score per doc, in\n * the same order. Empty `docs` returns `[]` (no error).\n *\n * @param query - Query string.\n * @param docs - Documents to score.\n * @param options - `sigmoid: true` maps logits into `[0, 1]`.\n */\n async score(query: string, docs: string[], options: RerankOptions = {}): Promise<number[]> {\n if (docs.length === 0) return [];\n if (!this.pipeline) {\n throw new ModelNotLoadedError(\"Reranker pipeline not initialized.\");\n }\n const raw = await this.pipeline.score(query, docs);\n return options.sigmoid ? raw.map(sigmoidValue) : raw;\n }\n\n /**\n * Score and sort documents by score in descending order. Returns a list of\n * {@link RankedDocument}s carrying the original index.\n *\n * @param query - Query string.\n * @param docs - Documents to rank.\n * @param options - Forwarded to {@link Reranker.score}.\n */\n async rank(\n query: string,\n docs: string[],\n options: RerankOptions = {}\n ): Promise<RankedDocument[]> {\n const scores = await this.score(query, docs, options);\n const ranked: RankedDocument[] = scores.map((score, index) => {\n const text: string = docs[index] ?? \"\";\n return { text, score, index };\n });\n ranked.sort((a, b) => b.score - a.score);\n return ranked;\n }\n\n /** Release pipeline resources. Safe to call multiple times. */\n async unload(): Promise<void> {\n await this.pipeline.unload?.();\n }\n}\n","import { MODEL_PRESETS, resolveModelPreset } from \"../presets/models\";\nimport { UnknownModelError } from \"../core/exceptions\";\n\n/** Snapshot of a single cached model's metadata. */\nexport interface CachedModelEntry {\n /** Friendly id from the registry (e.g. `\"llama-3.2-1b-int4\"`). */\n id: string;\n /** Backend-specific id (e.g. WebLLM `webllmId`). */\n backendId: string;\n /** Human-readable family name. */\n family: string;\n /** Approx parameter count, e.g. `\"1B\"`. */\n parameters: string;\n}\n\n/** Aggregate storage usage reported by the browser. */\nexport interface CacheUsage {\n /** Bytes used by the entire origin's storage (not just our cache). */\n usage: number;\n /** Bytes the browser is willing to give the origin. */\n quota: number;\n}\n\n/**\n * Hooks the {@link ModelCache} uses to talk to the underlying runtime and\n * the browser. Tests inject mocks; production code leaves them undefined,\n * letting `ModelCache` resolve the real `@mlc-ai/web-llm` helpers and\n * `navigator.storage.estimate()` lazily.\n */\nexport interface ModelCacheOptions {\n /** Override `hasModelInCache` from the runtime. */\n hasModel?: (backendId: string) => Promise<boolean>;\n /** Override `deleteModelInCache` from the runtime. */\n deleteModel?: (backendId: string) => Promise<void>;\n /** Override `navigator.storage.estimate()`. */\n estimate?: () => Promise<CacheUsage>;\n}\n\ntype WebLLMCacheModule = {\n hasModelInCache: (id: string) => Promise<boolean>;\n deleteModelInCache: (id: string) => Promise<void>;\n};\n\nlet webllmCachePromise: Promise<WebLLMCacheModule> | null = null;\n\nasync function loadWebLLMCacheHelpers(): Promise<WebLLMCacheModule> {\n if (!webllmCachePromise) {\n webllmCachePromise = import(\"@mlc-ai/web-llm\").then((m) => ({\n hasModelInCache: m.hasModelInCache,\n deleteModelInCache: m.deleteModelInCache,\n }));\n }\n return webllmCachePromise;\n}\n\nasync function defaultEstimate(): Promise<CacheUsage> {\n if (typeof navigator === \"undefined\" || !navigator.storage?.estimate) {\n return { usage: 0, quota: 0 };\n }\n const estimate = await navigator.storage.estimate();\n return {\n usage: estimate.usage ?? 0,\n quota: estimate.quota ?? 0,\n };\n}\n\n/**\n * Inspect and manage cached model weights.\n *\n * `localm-web` does not download or cache weights itself — that work is owned\n * by `@mlc-ai/web-llm`, which writes to the browser Cache API. `ModelCache`\n * is a thin wrapper that lets a consuming app surface cache state in its UI:\n * \"this model is downloaded\", \"you have 1.4 GB cached, free up space?\",\n * \"clear all models on logout\".\n *\n * @example\n * ```ts\n * const cache = new ModelCache();\n * if (await cache.has(\"llama-3.2-1b-int4\")) {\n * console.log(\"ready offline\");\n * }\n * const cached = await cache.list();\n * await cache.delete(\"phi-3.5-mini-int4\");\n * const usage = await cache.estimateUsage();\n * console.log(`${usage.usage} / ${usage.quota} bytes`);\n * ```\n */\nexport class ModelCache {\n private readonly hasModelHook: ((id: string) => Promise<boolean>) | undefined;\n private readonly deleteModelHook: ((id: string) => Promise<void>) | undefined;\n private readonly estimateHook: () => Promise<CacheUsage>;\n\n constructor(options: ModelCacheOptions = {}) {\n this.hasModelHook = options.hasModel;\n this.deleteModelHook = options.deleteModel;\n this.estimateHook = options.estimate ?? defaultEstimate;\n }\n\n /**\n * Whether the model's weights are present in the browser cache.\n *\n * @param modelId - Friendly id from the registry.\n * @throws UnknownModelError if `modelId` is not in the registry.\n */\n async has(modelId: string): Promise<boolean> {\n const backendId: string = resolveModelPreset(modelId).webllmId;\n const fn = this.hasModelHook ?? (await loadWebLLMCacheHelpers()).hasModelInCache;\n return fn(backendId);\n }\n\n /**\n * Delete a single model's weights from the browser cache. No-op when the\n * model is not cached.\n *\n * @param modelId - Friendly id from the registry.\n * @throws UnknownModelError if `modelId` is not in the registry.\n */\n async delete(modelId: string): Promise<void> {\n const backendId: string = resolveModelPreset(modelId).webllmId;\n const fn = this.deleteModelHook ?? (await loadWebLLMCacheHelpers()).deleteModelInCache;\n await fn(backendId);\n }\n\n /**\n * List the registry models that are currently cached.\n *\n * Iterates `MODEL_PRESETS` and probes each one. Only returns models known\n * to the SDK — models cached by external WebLLM calls outside our registry\n * are not included.\n *\n * @returns Empty list when nothing is cached.\n */\n async list(): Promise<CachedModelEntry[]> {\n const fn = this.hasModelHook ?? (await loadWebLLMCacheHelpers()).hasModelInCache;\n const probes = await Promise.all(\n Object.values(MODEL_PRESETS).map(async (preset) => {\n const cached: boolean = await fn(preset.webllmId);\n if (!cached) return null;\n const entry: CachedModelEntry = {\n id: preset.id,\n backendId: preset.webllmId,\n family: preset.family,\n parameters: preset.parameters,\n };\n return entry;\n })\n );\n return probes.filter((p): p is CachedModelEntry => p !== null);\n }\n\n /**\n * Delete every registry model from the cache. Useful for logout flows or\n * \"reset\" buttons. Models cached outside the registry are not touched.\n */\n async clear(): Promise<void> {\n const fn = this.deleteModelHook ?? (await loadWebLLMCacheHelpers()).deleteModelInCache;\n await Promise.all(Object.values(MODEL_PRESETS).map((p) => fn(p.webllmId)));\n }\n\n /**\n * Aggregate storage stats from the browser. Returned numbers cover the\n * entire origin (Cache API + IndexedDB + Service Workers + OPFS), not\n * just our model cache — use it for \"you have X of Y available\" hints.\n */\n async estimateUsage(): Promise<CacheUsage> {\n return this.estimateHook();\n }\n\n /**\n * Throw a descriptive error if the given id is not in the registry.\n * Exposed for code paths that want to validate before calling other\n * methods (those already throw on their own).\n *\n * @throws UnknownModelError\n */\n static assertKnown(modelId: string): void {\n if (!(modelId in MODEL_PRESETS)) {\n const available = Object.keys(MODEL_PRESETS).join(\", \");\n throw new UnknownModelError(`Unknown model \"${modelId}\". Available models: ${available}.`);\n }\n }\n}\n","import type { TokenChunk } from \"../types\";\n\n/**\n * Drain an async iterable of token chunks into a single string.\n *\n * Useful in tests, for non-streaming consumers, and as a one-line way to\n * reconstruct the final text from a `Chat.stream(...)` call.\n *\n * @param stream - The token-chunk async iterable to consume.\n * @returns The concatenation of every chunk's `text` field.\n */\nexport async function collectStream(stream: AsyncIterable<TokenChunk>): Promise<string> {\n let acc: string = \"\";\n for await (const chunk of stream) {\n acc += chunk.text;\n }\n return acc;\n}\n\n/**\n * Wrap an async iterable so that each `TokenChunk` is also passed to a\n * caller-supplied side-effect callback before being yielded downstream.\n *\n * This is intentionally a passthrough — it does not buffer.\n *\n * @param stream - The upstream token-chunk async iterable.\n * @param onChunk - Side-effect invoked for every chunk.\n * @returns A new async iterable yielding the same chunks.\n */\nexport async function* tap(\n stream: AsyncIterable<TokenChunk>,\n onChunk: (chunk: TokenChunk) => void\n): AsyncIterable<TokenChunk> {\n for await (const chunk of stream) {\n onChunk(chunk);\n yield chunk;\n }\n}\n","/**\n * localm-web — browser-only TypeScript SDK for running LLMs and SLMs locally.\n *\n * Public API surface for v0.1.\n *\n * @packageDocumentation\n */\n\nexport { Chat } from \"./tasks/chat\";\nexport { Completion } from \"./tasks/completion\";\nexport { Embeddings } from \"./tasks/embeddings\";\nexport type { EmbeddingsCreateOptions, EmbedOptions, EmbedPipeline } from \"./tasks/embeddings\";\nexport { Reranker } from \"./tasks/reranker\";\nexport type {\n RerankerCreateOptions,\n RerankOptions,\n RerankPipeline,\n RankedDocument,\n} from \"./tasks/reranker\";\nexport { LMTask, resolveBackend } from \"./tasks/lm-task\";\nexport type { LMTaskCreateOptions, BackendChoice } from \"./tasks/lm-task\";\n\nexport { ChatReply, CompletionResult } from \"./results\";\n\nexport {\n MODEL_PRESETS,\n resolveModelPreset,\n listSupportedModels,\n EMBEDDING_PRESETS,\n resolveEmbeddingPreset,\n listSupportedEmbeddingModels,\n RERANKER_PRESETS,\n resolveRerankerPreset,\n listSupportedRerankerModels,\n} from \"./presets/models\";\nexport type { EmbeddingPreset, RerankerPreset } from \"./presets/models\";\n\nexport {\n LocalmWebError,\n WebGPUUnavailableError,\n ModelLoadError,\n ModelNotLoadedError,\n UnknownModelError,\n GenerationAbortedError,\n QuotaExceededError,\n BackendNotAvailableError,\n StructuredOutputError,\n} from \"./core/exceptions\";\n\nexport {\n assertJsonSchema,\n serializeJsonSchema,\n parseStructuredOutput,\n} from \"./structured/json-schema\";\n\nexport type { Engine } from \"./core/engine\";\nexport { WorkerEngine } from \"./core/worker-engine\";\nexport { WebLLMEngine } from \"./core/webllm-engine\";\nexport { TransformersTextEngine } from \"./core/transformers-engine\";\nexport { createInferenceWorker } from \"./worker/create-worker\";\nexport type { WorkerLike } from \"./worker/protocol\";\n\nexport { ModelCache } from \"./cache\";\nexport type { CachedModelEntry, CacheUsage, ModelCacheOptions } from \"./cache\";\n\nexport { collectStream, tap } from \"./streaming/token-stream\";\n\nexport type {\n Role,\n FinishReason,\n Message,\n GenerationOptions,\n ModelLoadProgress,\n ModelLoadPhase,\n ProgressCallback,\n TokenChunk,\n ModelPreset,\n} from \"./types\";\n\n/** Current package version. Updated at release time. */\nexport const VERSION: string = \"0.5.0\";\n"],"names":["transformersModulePromise","loadTransformers","toChatMessages","buildDefaultPipeline"],"mappings":"AASO,MAAM,uBAAuB,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA,EAKxC,YACE,SACgB,OAChB;AACA,UAAM,OAAO;AAFG,SAAA,QAAA;AAGhB,SAAK,OAAO,WAAW;AAAA,EACzB;AACF;AAGO,MAAM,+BAA+B,eAAe;AAAC;AAGrD,MAAM,uBAAuB,eAAe;AAAC;AAG7C,MAAM,4BAA4B,eAAe;AAAC;AAGlD,MAAM,0BAA0B,eAAe;AAAC;AAGhD,MAAM,+BAA+B,eAAe;AAAC;AAGrD,MAAM,2BAA2B,eAAe;AAAC;AAGjD,MAAM,iCAAiC,eAAe;AAAC;AASvD,MAAM,8BAA8B,eAAe;AAAC;ACjD3D,MAAM,mBAA2B;AACjC,MAAM,kBAA0B;AAazB,SAAS,kBAAkB,MAA8B;AAC9D,MAAI,iBAAiB,KAAK,IAAI,EAAG,QAAO;AACxC,MAAI,gBAAgB,KAAK,IAAI,EAAG,QAAO;AACvC,SAAO;AACT;ACZA,IAAIA,8BAAgE;AASpE,eAAeC,qBAAgD;AAC7D,MAAI,CAACD,6BAA2B;AAC9BA,kCAA4B,OAAO,2BAA2B;AAAA,EAChE;AACA,SAAOA;AACT;AAUA,SAAS,oBAAoB,SAA4C;AACvE,QAAM,SAAyB,CAAA;AAC/B,MAAI,QAAQ,cAAc,OAAW,QAAO,iBAAiB,QAAQ;AACrE,MAAI,QAAQ,gBAAgB,OAAW,QAAO,cAAc,QAAQ;AACpE,MAAI,QAAQ,SAAS,OAAW,QAAO,QAAQ,QAAQ;AACvD,MAAI,QAAQ,SAAS,OAAW,QAAO,QAAQ,QAAQ;AACvD,MAAI,QAAQ,gBAAgB,UAAa,QAAQ,cAAc,GAAG;AAChE,WAAO,YAAY;AAAA,EACrB;AACA,SAAO;AACT;AAOA,SAASE,iBAAe,UAAgD;AACtE,SAAO,SAAS,IAAI,CAAC,OAAO,EAAE,MAAM,EAAE,MAAM,SAAS,EAAE,QAAA,EAAU;AACnE;AAMA,SAAS,qBACP,QACA,YACQ;AACR,QAAM,OAAO,MAAM,QAAQ,MAAM,IAAI,OAAO,CAAC,IAAI;AACjD,MAAI,CAAC,KAAM,QAAO;AAClB,QAAM,YAAY,KAAK;AACvB,MAAI,OAAO,cAAc,UAAU;AACjC,WAAO,UAAU,WAAW,UAAU,IAAI,UAAU,MAAM,WAAW,MAAM,IAAI;AAAA,EACjF;AACA,MAAI,MAAM,QAAQ,SAAS,GAAG;AAC5B,aAAS,IAAI,UAAU,SAAS,GAAG,KAAK,GAAG,KAAK,GAAG;AACjD,YAAM,OAAO,UAAU,CAAC;AACxB,UAAI,QAAQ,KAAK,SAAS,oBAAoB,KAAK;AAAA,IACrD;AAAA,EACF;AACA,SAAO;AACT;AAYA,SAAS,mBAAqC;AAC5C,QAAM,SAAc,CAAA;AACpB,MAAI,UAAkD,CAAA;AACtD,MAAI,WAAoB;AACxB,MAAI,eAA6B;AAEjC,QAAM,QAAQ,MAAY;AACxB,WAAO,OAAO,SAAS,KAAK,QAAQ,SAAS,GAAG;AAC9C,YAAM,WAAW,QAAQ,MAAA;AACzB,YAAM,QAAQ,OAAO,MAAA;AACrB,iBAAW,EAAE,OAAmB,MAAM,MAAA,CAAO;AAAA,IAC/C;AACA,SAAK,YAAY,iBAAiB,QAAQ,SAAS,GAAG;AACpD,YAAM,MAAM;AACZ,gBAAU,CAAA;AACV,iBAAW,KAAK,KAAK;AACnB,YAAI,cAAc;AAChB,YAAE,EAAE,OAAO,QAA2B,MAAM,MAAM;AAAA,QACpD,OAAO;AACL,YAAE,EAAE,OAAO,QAA2B,MAAM,MAAM;AAAA,QACpD;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AAAA,IACL,KAAK,MAAe;AAClB,aAAO,KAAK,IAAI;AAChB,YAAA;AAAA,IACF;AAAA,IACA,IAAI,OAAqB;AACvB,iBAAW;AACX,UAAI,MAAO,gBAAe;AAC1B,YAAA;AAAA,IACF;AAAA,IACA,UAAU;AAAA,MACR,CAAC,OAAO,aAAa,IAAsB;AACzC,eAAO;AAAA,UACL,OAAmC;AACjC,gBAAI,OAAO,SAAS,GAAG;AACrB,qBAAO,QAAQ,QAAQ,EAAE,OAAO,OAAO,MAAA,GAAc,MAAM,OAAO;AAAA,YACpE;AACA,gBAAI,cAAc;AAChB,oBAAM,MAAM;AACZ,6BAAe;AACf,qBAAO,QAAQ,OAAO,GAAG;AAAA,YAC3B;AACA,gBAAI,UAAU;AACZ,qBAAO,QAAQ,QAAQ,EAAE,OAAO,QAA2B,MAAM,MAAM;AAAA,YACzE;AACA,mBAAO,IAAI,QAA2B,CAAC,YAAY,QAAQ,KAAK,OAAO,CAAC;AAAA,UAC1E;AAAA,QAAA;AAAA,MAEJ;AAAA,IAAA;AAAA,EACF;AAEJ;AAgBO,MAAM,uBAAyC;AAAA,EAC5C,YAA6B;AAAA,EAC7B,yBAAiD;AAAA,EAEzD,WAAoB;AAClB,WAAO,KAAK,cAAc;AAAA,EAC5B;AAAA,EAEA,MAAM,KAAK,SAAiB,YAA8C;AACxE,UAAM,eAAe,MAAMD,mBAAA;AAC3B,QAAI;AACF,YAAM,YAAY,MAAM,aAAa,SAAS,mBAAmB,SAAS;AAAA,QACxE,mBAAmB,CAAC,WAAyD;AAC3E,gBAAM,WAAmB,OAAO,OAAO,aAAa,WAAW,OAAO,WAAW,MAAM;AACvF,gBAAM,OAAe,OAAO,UAAU;AACtC,uBAAa;AAAA,YACX;AAAA,YACA;AAAA,YACA,QAAQ;AAAA,YACR,OAAO;AAAA,YACP,OAAO,kBAAkB,IAAI;AAAA,UAAA,CAC9B;AAAA,QACH;AAAA,MAAA,CACgD;AAClD,WAAK,YAAY;AACjB,mBAAa;AAAA,QACX,UAAU;AAAA,QACV,MAAM;AAAA,QACN,QAAQ;AAAA,QACR,OAAO;AAAA,QACP,OAAO;AAAA,MAAA,CACR;AAAA,IACH,SAAS,KAAK;AACZ,YAAM,IAAI,eAAe,sCAAsC,OAAO,MAAM,GAAG;AAAA,IACjF;AAAA,EACF;AAAA,EAEA,MAAM,SAAS,UAAqB,UAA6B,IAAqB;AACpF,UAAM,YAAY,KAAK,iBAAA;AACvB,QAAI,QAAQ,QAAQ,SAAS;AAC3B,YAAM,IAAI,uBAAuB,kCAAkC;AAAA,IACrE;AACA,UAAM,OAAOC,iBAAe,QAAQ;AACpC,QAAI;AACF,YAAM,SAAU,MACd,UAIA,MAAM,oBAAoB,OAAO,CAAC;AAGpC,aAAO,qBAAqB,QAAQ,EAAE;AAAA,IACxC,SAAS,KAAK;AACZ,UAAI,eAAe,uBAAwB,OAAM;AACjD,YAAM,IAAI,eAAe,mCAAmC,GAAG;AAAA,IACjE;AAAA,EACF;AAAA,EAEA,OAAO,OAAO,UAAqB,UAA6B,IAA+B;AAC7F,UAAM,YAAY,KAAK,iBAAA;AACvB,QAAI,QAAQ,QAAQ,SAAS;AAC3B,YAAM,IAAI,uBAAuB,kCAAkC;AAAA,IACrE;AACA,UAAM,eAAe,MAAMD,mBAAA;AAC3B,UAAM,QAAQ,iBAAA;AACd,QAAI,QAAgB;AACpB,UAAM,YACJ,UAGA;AACF,UAAM,WAAW,IAAI,aAAa,aAAa,WAAW;AAAA,MACxD,aAAa;AAAA,MACb,qBAAqB;AAAA,MACrB,mBAAmB,CAAC,SAAuB;AACzC,YAAI,MAAM;AACR,gBAAM,KAAK,EAAE,MAAM,OAAO,MAAM,OAAO;AACvC,mBAAS;AAAA,QACX;AAAA,MACF;AAAA,IAAA,CACD;AAED,UAAM,eAA+B,IAAI,QAAe,CAAC,GAAG,WAAW;AACrE,UAAI,QAAQ,QAAQ;AAClB,cAAM,UAAU,MAAY;AAC1B,iBAAO,IAAI,uBAAuB,+BAA+B,CAAC;AAAA,QACpE;AACA,gBAAQ,OAAO,iBAAiB,SAAS,SAAS,EAAE,MAAM,MAAM;AAAA,MAClE;AAAA,IACF,CAAC;AAED,UAAM,OAAOC,iBAAe,QAAQ;AACpC,UAAM,aACJ,UAIA,MAAM,EAAE,GAAG,oBAAoB,OAAO,GAAG,SAAA,CAAU,EAClD,KAAK,MAAY;AAChB,YAAM,KAAK,EAAE,MAAM,IAAI,OAAO,MAAM,MAAM;AAC1C,YAAM,IAAA;AAAA,IACR,CAAC,EACA,MAAM,CAAC,QAAuB;AAC7B,YAAM,IAAI,eAAe,QAAQ,MAAM,IAAI,MAAM,OAAO,GAAG,CAAC,CAAC;AAAA,IAC/D,CAAC;AAEH,SAAK,QAAQ,KAAK,CAAC,YAAY,YAAY,CAAC,EAAE,MAAM,CAAC,QAAuB;AAC1E,UAAI,eAAe,uBAAwB,OAAM,IAAI,GAAG;AAAA,IAC1D,CAAC;AAED,qBAAiB,SAAS,MAAM,UAAU;AACxC,YAAM;AAAA,IACR;AAAA,EACF;AAAA,EAEA,MAAM,SAAS,QAAgB,UAA6B,IAAqB;AAC/E,UAAM,YAAY,KAAK,iBAAA;AACvB,QAAI,QAAQ,QAAQ,SAAS;AAC3B,YAAM,IAAI,uBAAuB,kCAAkC;AAAA,IACrE;AACA,QAAI;AACF,YAAM,SAAU,MACd,UAIA,QAAQ,oBAAoB,OAAO,CAAC;AAGtC,aAAO,qBAAqB,QAAQ,MAAM;AAAA,IAC5C,SAAS,KAAK;AACZ,UAAI,eAAe,uBAAwB,OAAM;AACjD,YAAM,IAAI,eAAe,mCAAmC,GAAG;AAAA,IACjE;AAAA,EACF;AAAA,EAEA,OAAO,iBACL,QACA,UAA6B,IACF;AAC3B,UAAM,YAAY,KAAK,iBAAA;AACvB,QAAI,QAAQ,QAAQ,SAAS;AAC3B,YAAM,IAAI,uBAAuB,kCAAkC;AAAA,IACrE;AACA,UAAM,eAAe,MAAMD,mBAAA;AAC3B,UAAM,QAAQ,iBAAA;AACd,QAAI,QAAgB;AACpB,UAAM,YACJ,UAGA;AACF,UAAM,WAAW,IAAI,aAAa,aAAa,WAAW;AAAA,MACxD,aAAa;AAAA,MACb,qBAAqB;AAAA,MACrB,mBAAmB,CAAC,SAAuB;AACzC,YAAI,MAAM;AACR,gBAAM,KAAK,EAAE,MAAM,OAAO,MAAM,OAAO;AACvC,mBAAS;AAAA,QACX;AAAA,MACF;AAAA,IAAA,CACD;AAGC,cAIA,QAAQ,EAAE,GAAG,oBAAoB,OAAO,GAAG,SAAA,CAAU,EACpD,KAAK,MAAY;AAChB,YAAM,KAAK,EAAE,MAAM,IAAI,OAAO,MAAM,MAAM;AAC1C,YAAM,IAAA;AAAA,IACR,CAAC,EACA,MAAM,CAAC,QAAuB;AAC7B,YAAM,IAAI,eAAe,QAAQ,MAAM,IAAI,MAAM,OAAO,GAAG,CAAC,CAAC;AAAA,IAC/D,CAAC;AAEH,QAAI,QAAQ,QAAQ;AAClB,cAAQ,OAAO;AAAA,QACb;AAAA,QACA,MAAY;AACV,gBAAM,IAAI,IAAI,uBAAuB,+BAA+B,CAAC;AAAA,QACvE;AAAA,QACA,EAAE,MAAM,KAAA;AAAA,MAAK;AAAA,IAEjB;AAEA,qBAAiB,SAAS,MAAM,UAAU;AACxC,YAAM;AAAA,IACR;AAAA,EACF;AAAA,EAEA,MAAM,SAAwB;AAC5B,QAAI,KAAK,WAAW;AAClB,YAAM,aAAa,KAAK;AACxB,UAAI,OAAO,WAAW,YAAY,YAAY;AAC5C,cAAM,WAAW,QAAA;AAAA,MACnB;AACA,WAAK,YAAY;AAAA,IACnB;AACA,SAAK,wBAAwB,MAAA;AAC7B,SAAK,yBAAyB;AAAA,EAChC;AAAA,EAEQ,mBAA6B;AACnC,QAAI,CAAC,KAAK,WAAW;AACnB,YAAM,IAAI;AAAA,QACR;AAAA,MAAA;AAAA,IAEJ;AACA,WAAO,KAAK;AAAA,EACd;AACF;AC1VO,SAAS,iBAAiB,QAA2C;AAC1E,MAAI,WAAW,QAAQ,OAAO,WAAW,YAAY,MAAM,QAAQ,MAAM,GAAG;AAC1E,UAAM,IAAI,sBAAsB,6DAA6D;AAAA,EAC/F;AACA,QAAM,OAAiB,OAAO,KAAK,MAAM;AACzC,QAAM,aAAgC;AAAA,IACpC;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAEF,MAAI,CAAC,KAAK,KAAK,CAAC,QAAQ,WAAW,SAAS,GAAG,CAAC,GAAG;AACjD,UAAM,IAAI;AAAA,MACR;AAAA,IAAA;AAAA,EAEJ;AACF;AAcO,SAAS,oBAAoB,QAAyB;AAC3D,mBAAiB,MAAM;AACvB,SAAO,KAAK,UAAU,MAAM;AAC9B;AAWO,SAAS,sBAAmC,MAAiB;AAClE,MAAI;AACF,WAAO,KAAK,MAAM,IAAI;AAAA,EACxB,SAAS,KAAK;AACZ,UAAM,IAAI;AAAA,MACR;AAAA,MACA;AAAA,IAAA;AAAA,EAEJ;AACF;AClEA,IAAI,sBAAoD;AAExD,eAAe,aAAoC;AACjD,MAAI,CAAC,qBAAqB;AACxB,0BAAsB,OAAO,iBAAiB;AAAA,EAChD;AACA,SAAO;AACT;AAEA,SAAS,oBAA6B;AACpC,SAAO,OAAO,cAAc,eAAe,SAAS;AACtD;AAQA,SAAS,oBAAoB,SAA4C;AACvE,QAAM,SAAyB,CAAA;AAC/B,MAAI,QAAQ,cAAc,OAAW,QAAO,aAAa,QAAQ;AACjE,MAAI,QAAQ,gBAAgB,OAAW,QAAO,cAAc,QAAQ;AACpE,MAAI,QAAQ,SAAS,OAAW,QAAO,QAAQ,QAAQ;AACvD,SAAO;AACT;AAWA,SAAS,oBAAoB,SAAwD;AACnF,MAAI,QAAQ,eAAe,QAAW;AACpC,WAAO,EAAE,MAAM,eAAe,QAAQ,oBAAoB,QAAQ,UAAU,EAAA;AAAA,EAC9E;AACA,MAAI,QAAQ,MAAM;AAChB,WAAO,EAAE,MAAM,cAAA;AAAA,EACjB;AACA,SAAO;AACT;AAEA,SAAS,eAAe,UAAmD;AACzE,SAAO,SAAS,IAAI,CAAC,MAAkC;AACrD,YAAQ,EAAE,MAAA;AAAA,MACR,KAAK;AACH,eAAO,EAAE,MAAM,UAAU,SAAS,EAAE,QAAA;AAAA,MACtC,KAAK;AACH,eAAO,EAAE,MAAM,QAAQ,SAAS,EAAE,QAAA;AAAA,MACpC,KAAK;AACH,eAAO,EAAE,MAAM,aAAa,SAAS,EAAE,QAAA;AAAA,MACzC,KAAK;AACH,eAAO,EAAE,MAAM,QAAQ,SAAS,EAAE,SAAS,cAAc,EAAE,QAAQ,GAAA;AAAA,IAAG;AAAA,EAE5E,CAAC;AACH;AAQO,MAAM,aAA+B;AAAA,EAClC,SAA2B;AAAA,EAEnC,WAAoB;AAClB,WAAO,KAAK,WAAW;AAAA,EACzB;AAAA,EAEA,MAAM,KAAK,SAAiB,YAA8C;AACxE,QAAI,CAAC,qBAAqB;AACxB,YAAM,IAAI;AAAA,QACR;AAAA,MAAA;AAAA,IAEJ;AACA,UAAM,SAAS,MAAM,WAAA;AACrB,QAAI;AACF,WAAK,SAAS,MAAM,OAAO,gBAAgB,SAAS;AAAA,QAClD,sBAAsB,CAAC,WAAiB;AACtC,uBAAa;AAAA,YACX,UAAU,OAAO;AAAA,YACjB,MAAM,OAAO;AAAA,YACb,QAAQ;AAAA,YACR,OAAO;AAAA,YACP,OAAO,kBAAkB,OAAO,IAAI;AAAA,UAAA,CACrC;AAAA,QACH;AAAA,MAAA,CACD;AACD,mBAAa;AAAA,QACX,UAAU;AAAA,QACV,MAAM;AAAA,QACN,QAAQ;AAAA,QACR,OAAO;AAAA,QACP,OAAO;AAAA,MAAA,CACR;AAAA,IACH,SAAS,KAAK;AACZ,YAAM,IAAI,eAAe,yBAAyB,OAAO,MAAM,GAAG;AAAA,IACpE;AAAA,EACF;AAAA,EAEA,MAAM,SAAS,UAAqB,UAA6B,IAAqB;AACpF,UAAM,SAAS,KAAK,cAAA;AACpB,QAAI,QAAQ,QAAQ,SAAS;AAC3B,YAAM,IAAI,uBAAuB,kCAAkC;AAAA,IACrE;AACA,UAAM,iBAAiB,oBAAoB,OAAO;AAClD,UAAM,aAAa,MAAM,OAAO,KAAK,YAAY,OAAO;AAAA,MACtD,GAAG,oBAAoB,OAAO;AAAA,MAC9B,UAAU,eAAe,QAAQ;AAAA,MACjC,QAAQ;AAAA,MACR,GAAI,iBAAiB,EAAE,iBAAiB,mBAAmB,CAAA;AAAA,IAAC,CAC7D;AACD,WAAO,WAAW,QAAQ,CAAC,GAAG,SAAS,WAAW;AAAA,EACpD;AAAA,EAEA,OAAO,OAAO,UAAqB,UAA6B,IAA+B;AAC7F,UAAM,SAAS,KAAK,cAAA;AACpB,QAAI,QAAQ,QAAQ,SAAS;AAC3B,YAAM,IAAI,uBAAuB,kCAAkC;AAAA,IACrE;AACA,UAAM,iBAAiB,oBAAoB,OAAO;AAClD,UAAM,aAAa,MAAM,OAAO,KAAK,YAAY,OAAO;AAAA,MACtD,GAAG,oBAAoB,OAAO;AAAA,MAC9B,UAAU,eAAe,QAAQ;AAAA,MACjC,QAAQ;AAAA,MACR,GAAI,iBAAiB,EAAE,iBAAiB,mBAAmB,CAAA;AAAA,IAAC,CAC7D;AACD,QAAI,QAAgB;AACpB,QAAI,WAAoB;AACxB,QAAI;AACF,uBAAiB,SAAS,YAAY;AACpC,YAAI,QAAQ,QAAQ,SAAS;AAC3B,gBAAM,IAAI,uBAAuB,+BAA+B;AAAA,QAClE;AACA,cAAM,SAAS,MAAM,QAAQ,CAAC;AAC9B,cAAM,QAAQ,QAAQ,OAAO,WAAW;AACxC,YAAI,OAAO;AACT,gBAAM,EAAE,MAAM,OAAO,OAAO,MAAM,MAAA;AAClC,mBAAS;AAAA,QACX;AACA,YAAI,QAAQ,eAAe;AACzB,qBAAW;AACX,gBAAM,EAAE,MAAM,IAAI,OAAO,MAAM,KAAA;AAC/B,mBAAS;AAAA,QACX;AAAA,MACF;AACA,UAAI,CAAC,UAAU;AACb,cAAM,EAAE,MAAM,IAAI,OAAO,MAAM,KAAA;AAAA,MACjC;AAAA,IACF,SAAS,KAAK;AACZ,UAAI,eAAe,uBAAwB,OAAM;AACjD,YAAM,IAAI,eAAe,gCAAgC,GAAG;AAAA,IAC9D;AAAA,EACF;AAAA,EAEA,MAAM,SAAS,QAAgB,UAA6B,IAAqB;AAC/E,UAAM,SAAS,KAAK,cAAA;AACpB,QAAI,QAAQ,QAAQ,SAAS;AAC3B,YAAM,IAAI,uBAAuB,kCAAkC;AAAA,IACrE;AACA,UAAM,iBAAiB,oBAAoB,OAAO;AAClD,UAAM,aAAa,MAAM,OAAO,YAAY,OAAO;AAAA,MACjD,GAAG,oBAAoB,OAAO;AAAA,MAC9B;AAAA,MACA,QAAQ;AAAA,MACR,GAAI,iBAAiB,EAAE,iBAAiB,mBAAmB,CAAA;AAAA,IAAC,CAC7D;AACD,WAAO,WAAW,QAAQ,CAAC,GAAG,QAAQ;AAAA,EACxC;AAAA,EAEA,OAAO,iBACL,QACA,UAA6B,IACF;AAC3B,UAAM,SAAS,KAAK,cAAA;AACpB,QAAI,QAAQ,QAAQ,SAAS;AAC3B,YAAM,IAAI,uBAAuB,kCAAkC;AAAA,IACrE;AACA,UAAM,iBAAiB,oBAAoB,OAAO;AAClD,UAAM,aAAa,MAAM,OAAO,YAAY,OAAO;AAAA,MACjD,GAAG,oBAAoB,OAAO;AAAA,MAC9B;AAAA,MACA,QAAQ;AAAA,MACR,GAAI,iBAAiB,EAAE,iBAAiB,mBAAmB,CAAA;AAAA,IAAC,CAC7D;AACD,QAAI,QAAgB;AACpB,QAAI,WAAoB;AACxB,QAAI;AACF,uBAAiB,SAAS,YAAY;AACpC,YAAI,QAAQ,QAAQ,SAAS;AAC3B,gBAAM,IAAI,uBAAuB,+BAA+B;AAAA,QAClE;AACA,cAAM,SAAS,MAAM,QAAQ,CAAC;AAC9B,cAAM,QAAQ,QAAQ,QAAQ;AAC9B,YAAI,OAAO;AACT,gBAAM,EAAE,MAAM,OAAO,OAAO,MAAM,MAAA;AAClC,mBAAS;AAAA,QACX;AACA,YAAI,QAAQ,eAAe;AACzB,qBAAW;AACX,gBAAM,EAAE,MAAM,IAAI,OAAO,MAAM,KAAA;AAC/B,mBAAS;AAAA,QACX;AAAA,MACF;AACA,UAAI,CAAC,UAAU;AACb,cAAM,EAAE,MAAM,IAAI,OAAO,MAAM,KAAA;AAAA,MACjC;AAAA,IACF,SAAS,KAAK;AACZ,UAAI,eAAe,uBAAwB,OAAM;AACjD,YAAM,IAAI,eAAe,gCAAgC,GAAG;AAAA,IAC9D;AAAA,EACF;AAAA,EAEA,MAAM,SAAwB;AAC5B,QAAI,KAAK,QAAQ;AACf,YAAM,KAAK,OAAO,OAAA;AAClB,WAAK,SAAS;AAAA,IAChB;AAAA,EACF;AAAA,EAEQ,gBAA2B;AACjC,QAAI,CAAC,KAAK,QAAQ;AAChB,YAAM,IAAI,oBAAoB,mDAAmD;AAAA,IACnF;AACA,WAAO,KAAK;AAAA,EACd;AACF;AC5OO,SAAS,sBACd,UAA6B,IACE;AAC/B,QAAM,EAAE,QAAQ,SAAS,GAAG,SAAS;AAErC,SAAO;AACT;ACeO,MAAM,aAA+B;AAAA,EAa1C,YAA6B,QAAoB;AAApB,SAAA,SAAA;AAC3B,SAAK,WAAW,CAAC,UAAgB,KAAK,cAAc,MAAM,IAAI;AAC9D,SAAK,OAAO,iBAAiB,WAAW,KAAK,QAAQ;AAAA,EACvD;AAAA,EAfQ,SAAiB;AAAA,EACjB,SAAkB;AAAA,EAClB,cAA0E;AAAA,EAC1E,gBAAwB;AAAA,EACxB,sBAAoD;AAAA,EACpD,gBAA4E;AAAA,EAC5E,kBAA0B;AAAA,EAC1B,uCAAqD,IAAA;AAAA,EACrD,qCAAiD,IAAA;AAAA,EAExC;AAAA,EAOjB,WAAoB;AAClB,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,MAAM,KAAK,SAAiB,YAA8C;AACxE,QAAI,KAAK,aAAa;AACpB,YAAM,IAAI,eAAe,sCAAsC;AAAA,IACjE;AACA,UAAM,KAAa,KAAK,WAAA;AACxB,SAAK,gBAAgB;AACrB,SAAK,sBAAsB;AAC3B,WAAO,IAAI,QAAc,CAAC,SAAS,WAAW;AAC5C,WAAK,cAAc,EAAE,SAAS,OAAA;AAC9B,WAAK,KAAK,EAAE,IAAI,QAAQ,IAAI,SAAS;AAAA,IACvC,CAAC;AAAA,EACH;AAAA,EAEA,MAAM,SAAS,UAAqB,UAA6B,IAAqB;AACpF,UAAM,KAAa,KAAK,WAAA;AACxB,WAAO,IAAI,QAAgB,CAAC,SAAS,WAAW;AAC9C,WAAK,iBAAiB,IAAI,IAAI,EAAE,SAAS,QAAQ;AACjD,WAAK,KAAK;AAAA,QACR,IAAI;AAAA,QACJ;AAAA,QACA;AAAA,QACA,SAAS,sBAAsB,OAAO;AAAA,MAAA,CACvC;AACD,cAAQ,QAAQ,iBAAiB,SAAS,MAAM,KAAK,KAAK,EAAE,IAAI,SAAS,GAAA,CAAI,CAAC;AAAA,IAChF,CAAC;AAAA,EACH;AAAA,EAEA,OAAO,OAAO,UAAqB,UAA6B,IAA+B;AAC7F,UAAM,KAAa,KAAK,WAAA;AACxB,UAAM,QAAsB,CAAA;AAC5B,QAAI,OAAgB;AACpB,QAAI,QAAsB;AAC1B,QAAI,SAA8B;AAElC,UAAM,SAAS,MAAY;AACzB,UAAI,QAAQ;AACV,cAAM,KAAK;AACX,iBAAS;AACT,WAAA;AAAA,MACF;AAAA,IACF;AAEA,SAAK,eAAe,IAAI,IAAI;AAAA,MAC1B,MAAM,CAAC,UAAgB;AACrB,cAAM,KAAK,KAAK;AAChB,eAAA;AAAA,MACF;AAAA,MACA,KAAK,MAAY;AACf,eAAO;AACP,eAAA;AAAA,MACF;AAAA,MACA,MAAM,CAAC,QAAc;AACnB,gBAAQ;AACR,eAAO;AACP,eAAA;AAAA,MACF;AAAA,IAAA,CACD;AAED,SAAK,KAAK;AAAA,MACR,IAAI;AAAA,MACJ;AAAA,MACA;AAAA,MACA,SAAS,sBAAsB,OAAO;AAAA,IAAA,CACvC;AACD,YAAQ,QAAQ,iBAAiB,SAAS,MAAM,KAAK,KAAK,EAAE,IAAI,SAAS,GAAA,CAAI,CAAC;AAE9E,QAAI;AACF,aAAO,MAAM;AACX,YAAI,MAAM,SAAS,GAAG;AACpB,gBAAM,QAAQ,MAAM,MAAA;AACpB,cAAI,MAAO,OAAM;AACjB;AAAA,QACF;AACA,YAAI,MAAO,OAAM;AACjB,YAAI,KAAM;AACV,cAAM,IAAI,QAAc,CAAC,MAAM;AAC7B,mBAAS;AAAA,QACX,CAAC;AAAA,MACH;AAAA,IACF,UAAA;AACE,WAAK,eAAe,OAAO,EAAE;AAAA,IAC/B;AAAA,EACF;AAAA,EAEA,MAAM,SAAS,QAAgB,UAA6B,IAAqB;AAC/E,UAAM,KAAa,KAAK,WAAA;AACxB,WAAO,IAAI,QAAgB,CAAC,SAAS,WAAW;AAC9C,WAAK,iBAAiB,IAAI,IAAI,EAAE,SAAS,QAAQ;AACjD,WAAK,KAAK;AAAA,QACR,IAAI;AAAA,QACJ;AAAA,QACA;AAAA,QACA,SAAS,sBAAsB,OAAO;AAAA,MAAA,CACvC;AACD,cAAQ,QAAQ,iBAAiB,SAAS,MAAM,KAAK,KAAK,EAAE,IAAI,SAAS,GAAA,CAAI,CAAC;AAAA,IAChF,CAAC;AAAA,EACH;AAAA,EAEA,OAAO,iBACL,QACA,UAA6B,IACF;AAC3B,UAAM,KAAa,KAAK,WAAA;AACxB,UAAM,QAAsB,CAAA;AAC5B,QAAI,OAAgB;AACpB,QAAI,QAAsB;AAC1B,QAAI,SAA8B;AAElC,UAAM,SAAS,MAAY;AACzB,UAAI,QAAQ;AACV,cAAM,KAAK;AACX,iBAAS;AACT,WAAA;AAAA,MACF;AAAA,IACF;AAEA,SAAK,eAAe,IAAI,IAAI;AAAA,MAC1B,MAAM,CAAC,UAAgB;AACrB,cAAM,KAAK,KAAK;AAChB,eAAA;AAAA,MACF;AAAA,MACA,KAAK,MAAY;AACf,eAAO;AACP,eAAA;AAAA,MACF;AAAA,MACA,MAAM,CAAC,QAAc;AACnB,gBAAQ;AACR,eAAO;AACP,eAAA;AAAA,MACF;AAAA,IAAA,CACD;AAED,SAAK,KAAK;AAAA,MACR,IAAI;AAAA,MACJ;AAAA,MACA;AAAA,MACA,SAAS,sBAAsB,OAAO;AAAA,IAAA,CACvC;AACD,YAAQ,QAAQ,iBAAiB,SAAS,MAAM,KAAK,KAAK,EAAE,IAAI,SAAS,GAAA,CAAI,CAAC;AAE9E,QAAI;AACF,aAAO,MAAM;AACX,YAAI,MAAM,SAAS,GAAG;AACpB,gBAAM,QAAQ,MAAM,MAAA;AACpB,cAAI,MAAO,OAAM;AACjB;AAAA,QACF;AACA,YAAI,MAAO,OAAM;AACjB,YAAI,KAAM;AACV,cAAM,IAAI,QAAc,CAAC,MAAM;AAC7B,mBAAS;AAAA,QACX,CAAC;AAAA,MACH;AAAA,IACF,UAAA;AACE,WAAK,eAAe,OAAO,EAAE;AAAA,IAC/B;AAAA,EACF;AAAA,EAEA,MAAM,SAAwB;AAC5B,QAAI,CAAC,KAAK,OAAQ;AAClB,QAAI,KAAK,eAAe;AACtB,YAAM,IAAI,eAAe,wCAAwC;AAAA,IACnE;AACA,UAAM,KAAa,KAAK,WAAA;AACxB,SAAK,kBAAkB;AACvB,WAAO,IAAI,QAAc,CAAC,SAAS,WAAW;AAC5C,WAAK,gBAAgB,EAAE,SAAS,OAAA;AAChC,WAAK,KAAK,EAAE,IAAI,UAAU,IAAI;AAAA,IAChC,CAAC;AAAA,EACH;AAAA;AAAA,EAGA,YAAkB;AAChB,SAAK,OAAO,oBAAoB,WAAW,KAAK,QAAQ;AACxD,SAAK,OAAO,UAAA;AACZ,SAAK,SAAS;AAAA,EAChB;AAAA,EAEQ,aAAqB;AAC3B,UAAM,KAAK,KAAK;AAChB,SAAK,UAAU;AACf,WAAO;AAAA,EACT;AAAA,EAEQ,KAAK,KAA0B;AACrC,SAAK,OAAO,YAAY,GAAG;AAAA,EAC7B;AAAA,EAEQ,cAAc,KAA2B;AAC/C,YAAQ,IAAI,IAAA;AAAA,MACV,KAAK;AACH,YAAI,KAAK,eAAe,IAAI,OAAO,KAAK,eAAe;AACrD,eAAK,SAAS;AACd,eAAK,YAAY,QAAA;AACjB,eAAK,cAAc;AACnB,eAAK,sBAAsB;AAAA,QAC7B;AACA;AAAA,MACF,KAAK;AACH,YAAI,IAAI,OAAO,KAAK,eAAe;AACjC,eAAK,sBAAsB,IAAI,OAAO;AAAA,QACxC;AACA;AAAA,MACF,KAAK,aAAa;AAChB,cAAM,UAAU,KAAK,iBAAiB,IAAI,IAAI,EAAE;AAChD,YAAI,SAAS;AACX,kBAAQ,QAAQ,IAAI,IAAI;AACxB,eAAK,iBAAiB,OAAO,IAAI,EAAE;AAAA,QACrC;AACA;AAAA,MACF;AAAA,MACA,KAAK,SAAS;AACZ,cAAM,SAAS,KAAK,eAAe,IAAI,IAAI,EAAE;AAC7C,gBAAQ,KAAK,IAAI,KAAK;AACtB;AAAA,MACF;AAAA,MACA,KAAK,cAAc;AACjB,cAAM,SAAS,KAAK,eAAe,IAAI,IAAI,EAAE;AAC7C,gBAAQ,IAAA;AACR;AAAA,MACF;AAAA,MACA,KAAK;AACH,YAAI,KAAK,iBAAiB,IAAI,OAAO,KAAK,iBAAiB;AACzD,eAAK,SAAS;AACd,eAAK,cAAc,QAAA;AACnB,eAAK,gBAAgB;AAAA,QACvB;AACA;AAAA,MACF,KAAK;AACH;AAAA,MACF,KAAK,SAAS;AACZ,cAAM,MAAM,SAAS,IAAI,MAAM,IAAI,OAAO;AAC1C,YAAI,KAAK,eAAe,IAAI,OAAO,KAAK,eAAe;AACrD,eAAK,YAAY,OAAO,GAAG;AAC3B,eAAK,cAAc;AACnB,eAAK,sBAAsB;AAC3B;AAAA,QACF;AACA,YAAI,KAAK,iBAAiB,IAAI,OAAO,KAAK,iBAAiB;AACzD,eAAK,cAAc,OAAO,GAAG;AAC7B,eAAK,gBAAgB;AACrB;AAAA,QACF;AACA,cAAM,WAAW,KAAK,iBAAiB,IAAI,IAAI,EAAE;AACjD,YAAI,UAAU;AACZ,mBAAS,OAAO,GAAG;AACnB,eAAK,iBAAiB,OAAO,IAAI,EAAE;AACnC;AAAA,QACF;AACA,cAAM,SAAS,KAAK,eAAe,IAAI,IAAI,EAAE;AAC7C,YAAI,QAAQ;AACV,iBAAO,KAAK,GAAG;AACf;AAAA,QACF;AACA;AAAA,MACF;AAAA,IAAA;AAAA,EAEJ;AACF;AAEA,SAAS,SAAS,MAAc,SAAwB;AACtD,UAAQ,MAAA;AAAA,IACN,KAAK;AACH,aAAO,IAAI,eAAe,OAAO;AAAA,IACnC,KAAK;AACH,aAAO,IAAI,oBAAoB,OAAO;AAAA,IACxC,KAAK;AACH,aAAO,IAAI,uBAAuB,OAAO;AAAA,IAC3C,SAAS;AACP,YAAM,MAAM,IAAI,MAAM,OAAO;AAC7B,UAAI,OAAO;AACX,aAAO;AAAA,IACT;AAAA,EAAA;AAEJ;AC3TO,MAAM,gBAAuD,OAAO,OAAO;AAAA,EAChF,qBAAqB;AAAA,IACnB,IAAI;AAAA,IACJ,QAAQ;AAAA,IACR,YAAY;AAAA,IACZ,cAAc;AAAA,IACd,UAAU;AAAA,IACV,gBAAgB;AAAA,IAChB,eAAe;AAAA,IACf,aAAa;AAAA,EAAA;AAAA,EAEf,qBAAqB;AAAA,IACnB,IAAI;AAAA,IACJ,QAAQ;AAAA,IACR,YAAY;AAAA,IACZ,cAAc;AAAA,IACd,UAAU;AAAA,IACV,gBAAgB;AAAA,IAChB,eAAe;AAAA,IACf,aAAa;AAAA,EAAA;AAAA,EAEf,qBAAqB;AAAA,IACnB,IAAI;AAAA,IACJ,QAAQ;AAAA,IACR,YAAY;AAAA,IACZ,cAAc;AAAA,IACd,UAAU;AAAA,IACV,gBAAgB;AAAA,IAChB,eAAe;AAAA,IACf,aAAa;AAAA,EAAA;AAAA,EAEf,qBAAqB;AAAA,IACnB,IAAI;AAAA,IACJ,QAAQ;AAAA,IACR,YAAY;AAAA,IACZ,cAAc;AAAA,IACd,UAAU;AAAA,IACV,gBAAgB;AAAA,IAChB,eAAe;AAAA,IACf,aACE;AAAA,EAAA;AAEN,CAAC;AASM,SAAS,mBAAmB,SAA8B;AAC/D,QAAM,SAAS,cAAc,OAAO;AACpC,MAAI,CAAC,QAAQ;AACX,UAAM,YAAY,OAAO,KAAK,aAAa,EAAE,KAAK,IAAI;AACtD,UAAM,IAAI,kBAAkB,kBAAkB,OAAO,wBAAwB,SAAS,GAAG;AAAA,EAC3F;AACA,SAAO;AACT;AAGO,SAAS,sBAAgC;AAC9C,SAAO,OAAO,KAAK,aAAa;AAClC;AAyBO,MAAM,oBAA+D,OAAO,OAAO;AAAA,EACxF,qBAAqB;AAAA,IACnB,IAAI;AAAA,IACJ,QAAQ;AAAA,IACR,WAAW;AAAA,IACX,WAAW;AAAA,IACX,gBAAgB;AAAA,IAChB,cAAc;AAAA,IACd,aAAa;AAAA,EAAA;AAAA,EAEf,oBAAoB;AAAA,IAClB,IAAI;AAAA,IACJ,QAAQ;AAAA,IACR,WAAW;AAAA,IACX,WAAW;AAAA,IACX,gBAAgB;AAAA,IAChB,cAAc;AAAA,IACd,aAAa;AAAA,EAAA;AAEjB,CAAC;AASM,SAAS,uBAAuB,SAAkC;AACvE,QAAM,SAAS,kBAAkB,OAAO;AACxC,MAAI,CAAC,QAAQ;AACX,UAAM,YAAY,OAAO,KAAK,iBAAiB,EAAE,KAAK,IAAI;AAC1D,UAAM,IAAI;AAAA,MACR,4BAA4B,OAAO,wBAAwB,SAAS;AAAA,IAAA;AAAA,EAExE;AACA,SAAO;AACT;AAGO,SAAS,+BAAyC;AACvD,SAAO,OAAO,KAAK,iBAAiB;AACtC;AAqBO,MAAM,mBAA6D,OAAO,OAAO;AAAA,EACtF,qBAAqB;AAAA,IACnB,IAAI;AAAA,IACJ,QAAQ;AAAA,IACR,WAAW;AAAA,IACX,gBAAgB;AAAA,IAChB,cAAc;AAAA,IACd,aAAa;AAAA,EAAA;AAEjB,CAAC;AAQM,SAAS,sBAAsB,SAAiC;AACrE,QAAM,SAAS,iBAAiB,OAAO;AACvC,MAAI,CAAC,QAAQ;AACX,UAAM,YAAY,OAAO,KAAK,gBAAgB,EAAE,KAAK,IAAI;AACzD,UAAM,IAAI;AAAA,MACR,2BAA2B,OAAO,wBAAwB,SAAS;AAAA,IAAA;AAAA,EAEvE;AACA,SAAO;AACT;AAGO,SAAS,8BAAwC;AACtD,SAAO,OAAO,KAAK,gBAAgB;AACrC;ACrLO,SAAS,wBAAoC;AAClD,SAAO,IAAI,OAAO,IAAA;AAAA;AAAA,IAAA;AAAA,IAAA,YAAA;AAAA,EAAA,GAAmD;AAAA,IACnE,MAAM;AAAA,EAAA,CACP;AACH;ACIA,SAAS,wBAAiC;AACxC,SAAO,OAAO,cAAc,eAAe,SAAS;AACtD;AAaO,SAAS,eACd,QACA,QACA,iBAC2B;AAC3B,MAAI,WAAW,SAAU,QAAO;AAChC,MAAI,WAAW,gBAAgB;AAC7B,QAAI,CAAC,OAAO,gBAAgB;AAC1B,YAAM,IAAI;AAAA,QACR,UAAU,OAAO,EAAE;AAAA,MAAA;AAAA,IAEvB;AACA,WAAO;AAAA,EACT;AACA,MAAI,gBAAiB,QAAO;AAC5B,MAAI,CAAC,OAAO,gBAAgB;AAC1B,UAAM,IAAI;AAAA,MACR,oCAAoC,OAAO,EAAE;AAAA,IAAA;AAAA,EAEjD;AACA,SAAO;AACT;AAmDO,MAAe,OAAO;AAAA,EACjB,YAEW,QAEH,QAChB;AAHmB,SAAA,SAAA;AAEH,SAAA,SAAA;AAAA,EACf;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUH,aAAuB,aACrB,SACA,UAA+B,IACN;AACzB,UAAM,SAAS,mBAAmB,OAAO;AACzC,QAAI,QAAQ,QAAQ;AAClB,UAAI,CAAC,QAAQ,OAAO,YAAY;AAC9B,cAAM,QAAQ,OAAO,KAAK,OAAO,UAAU,QAAQ,UAAU;AAAA,MAC/D;AACA,aAAO,EAAE,QAAQ,QAAQ,QAAQ,OAAA;AAAA,IACnC;AACA,UAAM,SAAwB,QAAQ,WAAW;AACjD,UAAM,WAAsC;AAAA,MAC1C;AAAA,MACA;AAAA,MACA,sBAAA;AAAA,IAAsB;AAExB,UAAM,SAAiB,OAAO,kBAAkB,UAAU,OAAO;AACjE,UAAM,SACJ,aAAa,iBAAkB,OAAO,kBAAkB,KAAM,OAAO;AACvE,QAAI,CAAC,OAAO,YAAY;AACtB,YAAM,OAAO,KAAK,QAAQ,QAAQ,UAAU;AAAA,IAC9C;AACA,WAAO,EAAE,QAAQ,OAAA;AAAA,EACnB;AAAA,EAEA,OAAe,kBACb,UACA,SACQ;AACR,QAAI,aAAa,gBAAgB;AAG/B,aAAO,IAAI,uBAAA;AAAA,IACb;AACA,UAAM,YAAqB,QAAQ,YAAY;AAC/C,QAAI,WAAW;AACb,aAAO,IAAI,aAAa,uBAAuB;AAAA,IACjD;AACA,WAAO,IAAI,aAAA;AAAA,EACb;AAAA;AAAA,EAGA,MAAM,SAAwB;AAC5B,UAAM,KAAK,OAAO,OAAA;AAAA,EACpB;AAAA;AAAA,EAGA,WAAoB;AAClB,WAAO,KAAK,OAAO,SAAA;AAAA,EACrB;AACF;ACvKO,MAAM,UAAU;AAAA,EACrB,YAEkB,MAEA,SAEA,iBAEA,cAChB;AAPgB,SAAA,OAAA;AAEA,SAAA,UAAA;AAEA,SAAA,kBAAA;AAEA,SAAA,eAAA;AAAA,EACf;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAaH,OAAuB;AACrB,WAAO,sBAAyB,KAAK,IAAI;AAAA,EAC3C;AACF;AAQO,MAAM,iBAAiB;AAAA,EAC5B,YAEkB,MAEA,QAEA,iBAEA,cAChB;AAPgB,SAAA,OAAA;AAEA,SAAA,SAAA;AAEA,SAAA,kBAAA;AAEA,SAAA,eAAA;AAAA,EACf;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYH,OAAuB;AACrB,WAAO,sBAAyB,KAAK,IAAI;AAAA,EAC3C;AACF;ACzCO,MAAM,aAAa,OAAO;AAAA,EACd,UAAqB,CAAA;AAAA,EAC9B,eAA8B;AAAA,EAE9B,YAAY,QAAgB,QAAqB;AACvD,UAAM,QAAQ,MAAM;AAAA,EACtB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,aAAa,OAAO,SAAiB,UAA+B,IAAmB;AACrF,UAAM,EAAE,QAAQ,OAAA,IAAW,MAAM,OAAO,aAAa,SAAS,OAAO;AACrE,WAAO,IAAI,KAAK,QAAQ,MAAM;AAAA,EAChC;AAAA;AAAA,EAGA,gBAAgB,QAAsB;AACpC,SAAK,eAAe;AAAA,EACtB;AAAA;AAAA,EAGA,oBAA0B;AACxB,SAAK,eAAe;AAAA,EACtB;AAAA;AAAA,EAGA,eAAqB;AACnB,SAAK,QAAQ,SAAS;AAAA,EACxB;AAAA;AAAA,EAGA,aAAiC;AAC/B,WAAO,KAAK,QAAQ,MAAA;AAAA,EACtB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,KAAK,SAAiB,UAA6B,IAAwB;AAC/E,UAAM,WAAW,KAAK,cAAc,OAAO;AAC3C,UAAM,OAAO,MAAM,KAAK,OAAO,SAAS,UAAU,OAAO;AACzD,UAAM,UAAmB,EAAE,MAAM,QAAQ,SAAS,QAAA;AAClD,UAAM,eAAwB,EAAE,MAAM,aAAa,SAAS,KAAA;AAC5D,SAAK,QAAQ,KAAK,SAAS,YAAY;AACvC,WAAO,IAAI,UAAU,MAAM,cAAc,GAAG,MAAM;AAAA,EACpD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,OAAO,OAAO,SAAiB,UAA6B,IAA+B;AACzF,UAAM,WAAW,KAAK,cAAc,OAAO;AAC3C,UAAM,UAAmB,EAAE,MAAM,QAAQ,SAAS,QAAA;AAClD,QAAI,MAAc;AAClB,qBAAiB,SAAS,KAAK,OAAO,OAAO,UAAU,OAAO,GAAG;AAC/D,aAAO,MAAM;AACb,YAAM;AAAA,IACR;AACA,UAAM,eAAwB,EAAE,MAAM,aAAa,SAAS,IAAA;AAC5D,SAAK,QAAQ,KAAK,SAAS,YAAY;AAAA,EACzC;AAAA,EAEQ,cAAc,aAAgC;AACpD,UAAM,WAAsB,CAAA;AAC5B,QAAI,KAAK,cAAc;AACrB,eAAS,KAAK,EAAE,MAAM,UAAU,SAAS,KAAK,cAAc;AAAA,IAC9D;AACA,aAAS,KAAK,GAAG,KAAK,OAAO;AAC7B,aAAS,KAAK,EAAE,MAAM,QAAQ,SAAS,aAAa;AACpD,WAAO;AAAA,EACT;AACF;ACnFO,MAAM,mBAAmB,OAAO;AAAA,EAC7B,YAAY,QAAgB,QAAqB;AACvD,UAAM,QAAQ,MAAM;AAAA,EACtB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,aAAa,OAAO,SAAiB,UAA+B,IAAyB;AAC3F,UAAM,EAAE,QAAQ,OAAA,IAAW,MAAM,OAAO,aAAa,SAAS,OAAO;AACrE,WAAO,IAAI,WAAW,QAAQ,MAAM;AAAA,EACtC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,MAAM,QAAQ,QAAgB,UAA6B,IAA+B;AACxF,UAAM,OAAO,MAAM,KAAK,OAAO,SAAS,QAAQ,OAAO;AACvD,WAAO,IAAI,iBAAiB,MAAM,QAAQ,GAAG,MAAM;AAAA,EACrD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,OAAO,OAAO,QAAgB,UAA6B,IAA+B;AACxF,qBAAiB,SAAS,KAAK,OAAO,iBAAiB,QAAQ,OAAO,GAAG;AACvE,YAAM;AAAA,IACR;AAAA,EACF;AACF;AC/BA,IAAID,8BAAgE;AAEpE,eAAeC,qBAAgD;AAC7D,MAAI,CAACD,6BAA2B;AAC9BA,kCAA4B,OAAO,2BAA2B;AAAA,EAChE;AACA,SAAOA;AACT;AAEA,eAAeG,uBACb,QACA,YACwB;AACxB,QAAM,eAAe,MAAMF,mBAAA;AAC3B,MAAI;AACF,UAAM,OAAO,MAAM,aAAa,SAAS,sBAAsB,OAAO,gBAAgB;AAAA,MACpF,mBAAmB,CAAC,WAA0B;AAC5C,YAAI,CAAC,WAAY;AACjB,cAAM,IAAI;AACV,mBAAW;AAAA,UACT,UAAU,OAAO,EAAE,aAAa,WAAW,EAAE,WAAW,MAAM;AAAA,UAC9D,MAAM,EAAE,UAAU;AAAA,UAClB,QAAQ;AAAA,UACR,OAAO;AAAA,UACP,OAAO;AAAA,QAAA,CACR;AAAA,MACH;AAAA,IAAA,CACD;AACD,WAAO;AAAA,MACL,MAAM,MAAM,OAAO,SAA8B;AAC/C,cAAM,SAAS,MAAM,KAAK,OAAO;AAAA,UAC/B,SAAS,QAAQ;AAAA,UACjB,WAAW,QAAQ;AAAA,QAAA,CACpB;AACD,eAAO,OAAO,OAAA;AAAA,MAChB;AAAA,MACA,MAAM,SAAwB;AAC5B,YAAI,OAAQ,KAA2C,YAAY,YAAY;AAC7E,gBAAO,KAAqD,QAAA;AAAA,QAC9D;AAAA,MACF;AAAA,IAAA;AAAA,EAEJ,SAAS,KAAK;AACZ,UAAM,IAAI,eAAe,mCAAmC,OAAO,EAAE,MAAM,GAAG;AAAA,EAChF;AACF;AAgBO,MAAM,WAAW;AAAA,EACd,YACW,UAED,QAChB;AAHiB,SAAA,WAAA;AAED,SAAA,SAAA;AAAA,EACf;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUH,aAAa,OAAO,SAAiB,UAAmC,IAAyB;AAC/F,UAAM,SAAS,uBAAuB,OAAO;AAC7C,UAAM,WAAW,QAAQ,YAAa,MAAME,uBAAqB,QAAQ,QAAQ,UAAU;AAC3F,WAAO,IAAI,WAAW,UAAU,MAAM;AAAA,EACxC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,MAAM,OAAiB,UAAwB,IAAyB;AAC5E,QAAI,MAAM,WAAW,EAAG,QAAO,CAAA;AAC/B,QAAI,CAAC,KAAK,UAAU;AAClB,YAAM,IAAI,oBAAoB,sCAAsC;AAAA,IACtE;AACA,UAAM,SAAiC;AAAA,MACrC,WAAW,QAAQ,aAAa;AAAA,MAChC,SAAS,QAAQ,WAAW;AAAA,IAAA;AAE9B,WAAO,KAAK,SAAS,MAAM,OAAO,MAAM;AAAA,EAC1C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,YAAY,MAAc,UAAwB,IAAuB;AAC7E,UAAM,CAAC,GAAG,IAAI,MAAM,KAAK,MAAM,CAAC,IAAI,GAAG,OAAO;AAC9C,QAAI,CAAC,KAAK;AACR,YAAM,IAAI,eAAe,wCAAwC;AAAA,IACnE;AACA,WAAO;AAAA,EACT;AAAA;AAAA,EAGA,IAAI,YAAoB;AACtB,WAAO,KAAK,OAAO;AAAA,EACrB;AAAA;AAAA,EAGA,MAAM,SAAwB;AAC5B,UAAM,KAAK,SAAS,SAAA;AAAA,EACtB;AACF;ACnHA,IAAI,4BAAgE;AAEpE,eAAe,mBAAgD;AAC7D,MAAI,CAAC,2BAA2B;AAC9B,gCAA4B,OAAO,2BAA2B;AAAA,EAChE;AACA,SAAO;AACT;AAEA,SAAS,aAAa,GAAmB;AACvC,SAAO,KAAK,IAAI,KAAK,IAAI,CAAC,CAAC;AAC7B;AAEA,eAAe,qBACb,QACA,YACyB;AACzB,QAAM,eAAe,MAAM,iBAAA;AAC3B,MAAI;AACF,UAAM,YAAY,MAAM,aAAa,cAAc,gBAAgB,OAAO,gBAAgB;AAAA,MACxF,mBAAmB,CAAC,WAA0B;AAC5C,YAAI,CAAC,WAAY;AACjB,cAAM,IAAI;AACV,mBAAW;AAAA,UACT,UAAU,OAAO,EAAE,aAAa,WAAW,EAAE,WAAW,MAAM;AAAA,UAC9D,MAAM,EAAE,UAAU;AAAA,UAClB,QAAQ;AAAA,UACR,OAAO;AAAA,UACP,OAAO;AAAA,QAAA,CACR;AAAA,MACH;AAAA,IAAA,CACD;AACD,UAAM,QAAQ,MAAM,aAAa,mCAAmC;AAAA,MAClE,OAAO;AAAA,MACP;AAAA,QACE,mBAAmB,CAAC,WAA0B;AAC5C,cAAI,CAAC,WAAY;AACjB,gBAAM,IAAI;AACV,qBAAW;AAAA,YACT,UAAU,OAAO,EAAE,aAAa,WAAW,EAAE,WAAW,MAAM;AAAA,YAC9D,MAAM,EAAE,UAAU;AAAA,YAClB,QAAQ;AAAA,YACR,OAAO;AAAA,YACP,OAAO;AAAA,UAAA,CACR;AAAA,QACH;AAAA,MAAA;AAAA,IACF;AAEF,WAAO;AAAA,MACL,MAAM,MAAM,OAAO,MAAyB;AAC1C,YAAI,KAAK,WAAW,EAAG,QAAO,CAAA;AAC9B,cAAM,UAAoB,KAAK,IAAI,MAAM,KAAK;AAI9C,cAAM,WAAW;AAIjB,cAAM,SAAS,SAAS,SAAS;AAAA,UAC/B,WAAW;AAAA,UACX,SAAS;AAAA,UACT,YAAY;AAAA,UACZ,YAAY,OAAO;AAAA,QAAA,CACpB;AACD,cAAM,YAAY;AAGlB,cAAM,UAAU,MAAM,UAAU,MAAM;AACtC,cAAM,SAAqB,QAAQ,OAAO,OAAA;AAC1C,eAAO,OAAO,IAAI,CAAC,QAAQ,IAAI,CAAC,KAAK,CAAC;AAAA,MACxC;AAAA,MACA,MAAM,SAAwB;AAC5B,cAAM,IAAI;AACV,YAAI,OAAO,EAAE,YAAY,WAAY,OAAM,EAAE,QAAA;AAAA,MAC/C;AAAA,IAAA;AAAA,EAEJ,SAAS,KAAK;AACZ,UAAM,IAAI,eAAe,kCAAkC,OAAO,EAAE,MAAM,GAAG;AAAA,EAC/E;AACF;AA0BO,MAAM,SAAS;AAAA,EACZ,YACW,UAED,QAChB;AAHiB,SAAA,WAAA;AAED,SAAA,SAAA;AAAA,EACf;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUH,aAAa,OAAO,SAAiB,UAAiC,IAAuB;AAC3F,UAAM,SAAS,sBAAsB,OAAO;AAC5C,UAAM,WAAW,QAAQ,YAAa,MAAM,qBAAqB,QAAQ,QAAQ,UAAU;AAC3F,WAAO,IAAI,SAAS,UAAU,MAAM;AAAA,EACtC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,MAAM,MAAM,OAAe,MAAgB,UAAyB,CAAA,GAAuB;AACzF,QAAI,KAAK,WAAW,EAAG,QAAO,CAAA;AAC9B,QAAI,CAAC,KAAK,UAAU;AAClB,YAAM,IAAI,oBAAoB,oCAAoC;AAAA,IACpE;AACA,UAAM,MAAM,MAAM,KAAK,SAAS,MAAM,OAAO,IAAI;AACjD,WAAO,QAAQ,UAAU,IAAI,IAAI,YAAY,IAAI;AAAA,EACnD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,MAAM,KACJ,OACA,MACA,UAAyB,CAAA,GACE;AAC3B,UAAM,SAAS,MAAM,KAAK,MAAM,OAAO,MAAM,OAAO;AACpD,UAAM,SAA2B,OAAO,IAAI,CAAC,OAAO,UAAU;AAC5D,YAAM,OAAe,KAAK,KAAK,KAAK;AACpC,aAAO,EAAE,MAAM,OAAO,MAAA;AAAA,IACxB,CAAC;AACD,WAAO,KAAK,CAAC,GAAG,MAAM,EAAE,QAAQ,EAAE,KAAK;AACvC,WAAO;AAAA,EACT;AAAA;AAAA,EAGA,MAAM,SAAwB;AAC5B,UAAM,KAAK,SAAS,SAAA;AAAA,EACtB;AACF;AClLA,IAAI,qBAAwD;AAE5D,eAAe,yBAAqD;AAClE,MAAI,CAAC,oBAAoB;AACvB,yBAAqB,OAAO,iBAAiB,EAAE,KAAK,CAAC,OAAO;AAAA,MAC1D,iBAAiB,EAAE;AAAA,MACnB,oBAAoB,EAAE;AAAA,IAAA,EACtB;AAAA,EACJ;AACA,SAAO;AACT;AAEA,eAAe,kBAAuC;AACpD,MAAI,OAAO,cAAc,eAAe,CAAC,UAAU,SAAS,UAAU;AACpE,WAAO,EAAE,OAAO,GAAG,OAAO,EAAA;AAAA,EAC5B;AACA,QAAM,WAAW,MAAM,UAAU,QAAQ,SAAA;AACzC,SAAO;AAAA,IACL,OAAO,SAAS,SAAS;AAAA,IACzB,OAAO,SAAS,SAAS;AAAA,EAAA;AAE7B;AAuBO,MAAM,WAAW;AAAA,EACL;AAAA,EACA;AAAA,EACA;AAAA,EAEjB,YAAY,UAA6B,IAAI;AAC3C,SAAK,eAAe,QAAQ;AAC5B,SAAK,kBAAkB,QAAQ;AAC/B,SAAK,eAAe,QAAQ,YAAY;AAAA,EAC1C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,IAAI,SAAmC;AAC3C,UAAM,YAAoB,mBAAmB,OAAO,EAAE;AACtD,UAAM,KAAK,KAAK,iBAAiB,MAAM,0BAA0B;AACjE,WAAO,GAAG,SAAS;AAAA,EACrB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,MAAM,OAAO,SAAgC;AAC3C,UAAM,YAAoB,mBAAmB,OAAO,EAAE;AACtD,UAAM,KAAK,KAAK,oBAAoB,MAAM,0BAA0B;AACpE,UAAM,GAAG,SAAS;AAAA,EACpB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,OAAoC;AACxC,UAAM,KAAK,KAAK,iBAAiB,MAAM,0BAA0B;AACjE,UAAM,SAAS,MAAM,QAAQ;AAAA,MAC3B,OAAO,OAAO,aAAa,EAAE,IAAI,OAAO,WAAW;AACjD,cAAM,SAAkB,MAAM,GAAG,OAAO,QAAQ;AAChD,YAAI,CAAC,OAAQ,QAAO;AACpB,cAAM,QAA0B;AAAA,UAC9B,IAAI,OAAO;AAAA,UACX,WAAW,OAAO;AAAA,UAClB,QAAQ,OAAO;AAAA,UACf,YAAY,OAAO;AAAA,QAAA;AAErB,eAAO;AAAA,MACT,CAAC;AAAA,IAAA;AAEH,WAAO,OAAO,OAAO,CAAC,MAA6B,MAAM,IAAI;AAAA,EAC/D;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,QAAuB;AAC3B,UAAM,KAAK,KAAK,oBAAoB,MAAM,0BAA0B;AACpE,UAAM,QAAQ,IAAI,OAAO,OAAO,aAAa,EAAE,IAAI,CAAC,MAAM,GAAG,EAAE,QAAQ,CAAC,CAAC;AAAA,EAC3E;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,gBAAqC;AACzC,WAAO,KAAK,aAAA;AAAA,EACd;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,OAAO,YAAY,SAAuB;AACxC,QAAI,EAAE,WAAW,gBAAgB;AAC/B,YAAM,YAAY,OAAO,KAAK,aAAa,EAAE,KAAK,IAAI;AACtD,YAAM,IAAI,kBAAkB,kBAAkB,OAAO,wBAAwB,SAAS,GAAG;AAAA,IAC3F;AAAA,EACF;AACF;AC1KA,eAAsB,cAAc,QAAoD;AACtF,MAAI,MAAc;AAClB,mBAAiB,SAAS,QAAQ;AAChC,WAAO,MAAM;AAAA,EACf;AACA,SAAO;AACT;AAYA,gBAAuB,IACrB,QACA,SAC2B;AAC3B,mBAAiB,SAAS,QAAQ;AAChC,YAAQ,KAAK;AACb,UAAM;AAAA,EACR;AACF;AC2CO,MAAM,UAAkB;"}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "localm-web",
3
- "version": "0.4.0",
3
+ "version": "0.5.0",
4
4
  "description": "Browser-only TypeScript SDK for running LLMs and SLMs locally with WebGPU. Ultralytics-style DX, Vite-first.",
5
5
  "type": "module",
6
6
  "main": "./dist/index.js",
@@ -53,7 +53,7 @@
53
53
  "vitest": "^3.2.4"
54
54
  },
55
55
  "engines": {
56
- "node": ">=20.19.0"
56
+ "node": ">=22.0.0"
57
57
  },
58
58
  "overrides": {
59
59
  "esbuild": "^0.25.0"