tensorgrad 0.0.9 → 0.0.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,119 +1,119 @@
1
- # tensorgrad
2
-
3
- A tiny TypeScript-native tensor library with autograd that compiles directly
4
- to WebGPU. Designed for training small models in the browser — without
5
- hand-writing WGSL kernels and without dragging in a 5 MB ML framework.
6
-
7
- ```sh
8
- npm i tensorgrad
9
- ```
10
-
11
- Roughly **3000 lines of zero-dependency TypeScript**, ~10 KB gzipped after
12
- build. Targets WebGPU only. Static shapes only. Forward + reverse-mode
13
- autograd; Adam optimizer; the whole training pipeline runs as compiled WGSL.
14
-
15
- ## Quick example
16
-
17
- A 2-layer MLP fitting `y = sin(x)`:
18
-
19
- ```ts
20
- import {
21
- Module, compileModule,
22
- add, mul, sub, sumLast, reshape, matmul, relu,
23
- type Tensor,
24
- } from 'tensorgrad'
25
-
26
- class Linear extends Module {
27
- W: Tensor; b: Tensor
28
- constructor(public inDim: number, public outDim: number) {
29
- super()
30
- this.W = this.param([inDim, outDim]) // randn, scale 0.02
31
- this.b = this.param([outDim], { init: 'zeros' })
32
- }
33
- }
34
-
35
- class MLP extends Module {
36
- l1 = new Linear(1, 64)
37
- l2 = new Linear(64, 64)
38
- l3 = new Linear(64, 1)
39
- }
40
-
41
- const linear = (p: Linear, x: Tensor) => add(matmul(x, p.W), p.b)
42
-
43
- function forward(m: MLP, x: Tensor): Tensor {
44
- return linear(m.l3, relu(linear(m.l2, relu(linear(m.l1, x)))))
45
- }
46
-
47
- function loss(m: MLP, x: Tensor, y: Tensor): Tensor {
48
- const diff = sub(forward(m, x), y)
49
- return mul(sumLast(reshape(mul(diff, diff), [B])), 1 / B)
50
- }
51
-
52
- const B = 256
53
- const compiled = await compileModule(() => new MLP(), loss, {
54
- adam: { lr: 0.005 },
55
- inputs: [
56
- { name: 'x', shape: [B, 1], dtype: 'f32' },
57
- { name: 'y', shape: [B, 1], dtype: 'f32' },
58
- ],
59
- })
60
-
61
- compiled.uploadInitialParams() // applies the per-param init declared above
62
-
63
- for (let step = 0; step < 1000; step++) {
64
- const { x, y } = generateBatch()
65
- const lossVal = await compiled.step({ x, y })
66
- if (step % 100 === 0) console.log('step', step, 'loss', lossVal)
67
- }
68
- ```
69
-
70
- That's the whole user-facing surface for this model: `Module` for parameter
71
- storage, plain functions for the forward pass, `compileModule` to JIT-compile
72
- to WGSL with autograd + Adam wired in. No decorators, no `tf.GradientTape`,
73
- no `register_pytree_node`.
74
-
75
- For a more involved example — a 3-layer transformer trained from scratch on
76
- 2-digit addition — see the [`samples/`](./samples) workspace
77
- (`pnpm --filter samples dev`).
78
-
79
- ## What this library is for
80
-
81
- Small browser-side ML where you want to *train* the model, not just run
82
- inference of a pretrained model. Educational artifacts, interactive
83
- demos, on-device personalization, "transformer from scratch in your browser"
84
- blog posts. Roughly the niche where the model is small enough to fit
85
- comfortably in a browser tab but where you still want autograd and a real
86
- optimizer.
87
-
88
- If you want to ship inference of a pretrained model, use
89
- [ONNX Runtime Web](https://github.com/microsoft/onnxruntime) or
90
- [transformers.js](https://github.com/xenova/transformers.js).
91
- If you need full JAX (vmap / pmap / dynamic shapes / multi-backend), use
92
- [jax-js](https://github.com/jax-js/jax).
93
-
94
- ## Scope (deliberately small)
95
-
96
- The library only does what it does because of what it doesn't do. The
97
- load-bearing "out of scope" decisions are:
98
-
99
- - **WebGPU only** — no Wasm or WebGL fallback.
100
- - **Static shapes only** — every shape is fixed at compile time. This is
101
- what lets us bake constants into the WGSL instead of carrying shape
102
- uniforms.
103
- - **`grad` is the only transformation** — no `vmap`, `pmap`, `jvp`,
104
- `custom_vjp`. Batch your data explicitly.
105
- - **`f32` only** — no dtype promotion, no mixed precision.
106
- - **Closed op set** — about 25 ops, listed in `SPEC.md`. Compositions of
107
- those handle most needs (GELU, RMS norm, etc. are a few lines on top).
108
- - **Adam lives in the IR** — bias correction included; no CPU↔GPU
109
- round-trip per step.
110
-
111
- ## Status
112
-
113
- Alpha. Two real working models (a transformer training to <0.1 loss on
114
- addition, an MLP fitting `sin`). API may change before 1.0. Filing issues
115
- welcome.
116
-
117
- ## License
118
-
119
- MIT
1
+ # tensorgrad
2
+
3
+ A tiny TypeScript-native tensor library with autograd that compiles directly
4
+ to WebGPU. Designed for training small models in the browser — without
5
+ hand-writing WGSL kernels and without dragging in a 5 MB ML framework.
6
+
7
+ ```sh
8
+ npm i tensorgrad
9
+ ```
10
+
11
+ Roughly **3000 lines of zero-dependency TypeScript**, ~10 KB gzipped after
12
+ build. Targets WebGPU only. Static shapes only. Forward + reverse-mode
13
+ autograd; Adam optimizer; the whole training pipeline runs as compiled WGSL.
14
+
15
+ ## Quick example
16
+
17
+ A 2-layer MLP fitting `y = sin(x)`:
18
+
19
+ ```ts
20
+ import {
21
+ Module, compileModule,
22
+ add, mul, sub, sumLast, reshape, matmul, relu,
23
+ type Tensor,
24
+ } from 'tensorgrad'
25
+
26
+ class Linear extends Module {
27
+ W: Tensor; b: Tensor
28
+ constructor(public inDim: number, public outDim: number) {
29
+ super()
30
+ this.W = this.param([inDim, outDim]) // randn, scale 0.02
31
+ this.b = this.param([outDim], { init: 'zeros' })
32
+ }
33
+ }
34
+
35
+ class MLP extends Module {
36
+ l1 = new Linear(1, 64)
37
+ l2 = new Linear(64, 64)
38
+ l3 = new Linear(64, 1)
39
+ }
40
+
41
+ const linear = (p: Linear, x: Tensor) => add(matmul(x, p.W), p.b)
42
+
43
+ function forward(m: MLP, x: Tensor): Tensor {
44
+ return linear(m.l3, relu(linear(m.l2, relu(linear(m.l1, x)))))
45
+ }
46
+
47
+ function loss(m: MLP, { x, y }: { x: Tensor; y: Tensor }): Tensor {
48
+ const diff = sub(forward(m, x), y)
49
+ return mul(sumLast(reshape(mul(diff, diff), [B])), 1 / B)
50
+ }
51
+
52
+ const B = 256
53
+ const compiled = await compileModule(() => new MLP(), loss, {
54
+ adam: { lr: 0.005 },
55
+ inputs: {
56
+ x: { shape: [B, 1], dtype: 'f32' },
57
+ y: { shape: [B, 1], dtype: 'f32' },
58
+ },
59
+ })
60
+
61
+ // Initial params are uploaded automatically — no manual step needed.
62
+
63
+ for (let step = 0; step < 1000; step++) {
64
+ const { x, y } = generateBatch()
65
+ const lossVal = await compiled.step({ x, y })
66
+ if (step % 100 === 0) console.log('step', step, 'loss', lossVal)
67
+ }
68
+ ```
69
+
70
+ That's the whole user-facing surface for this model: `Module` for parameter
71
+ storage, plain functions for the forward pass, `compileModule` to JIT-compile
72
+ to WGSL with autograd + Adam wired in. No decorators, no `tf.GradientTape`,
73
+ no `register_pytree_node`.
74
+
75
+ For a more involved example — a 3-layer transformer trained from scratch on
76
+ 2-digit addition — see the [`samples/`](./samples) workspace
77
+ (`pnpm --filter samples dev`).
78
+
79
+ ## What this library is for
80
+
81
+ Small browser-side ML where you want to *train* the model, not just run
82
+ inference of a pretrained model. Educational artifacts, interactive
83
+ demos, on-device personalization, "transformer from scratch in your browser"
84
+ blog posts. Roughly the niche where the model is small enough to fit
85
+ comfortably in a browser tab but where you still want autograd and a real
86
+ optimizer.
87
+
88
+ If you want to ship inference of a pretrained model, use
89
+ [ONNX Runtime Web](https://github.com/microsoft/onnxruntime) or
90
+ [transformers.js](https://github.com/xenova/transformers.js).
91
+ If you need full JAX (vmap / pmap / dynamic shapes / multi-backend), use
92
+ [jax-js](https://github.com/jax-js/jax).
93
+
94
+ ## Scope (deliberately small)
95
+
96
+ The library only does what it does because of what it doesn't do. The
97
+ load-bearing "out of scope" decisions are:
98
+
99
+ - **WebGPU only** — no Wasm or WebGL fallback.
100
+ - **Static shapes only** — every shape is fixed at compile time. This is
101
+ what lets us bake constants into the WGSL instead of carrying shape
102
+ uniforms.
103
+ - **`grad` is the only transformation** — no `vmap`, `pmap`, `jvp`,
104
+ `custom_vjp`. Batch your data explicitly.
105
+ - **`f32` only** — no dtype promotion, no mixed precision.
106
+ - **Closed op set** — about 25 ops, listed in `SPEC.md`. Compositions of
107
+ those handle most needs (GELU, RMS norm, etc. are a few lines on top).
108
+ - **Adam lives in the IR** — bias correction included; no CPU↔GPU
109
+ round-trip per step.
110
+
111
+ ## Status
112
+
113
+ Alpha. Two real working models (a transformer training to <0.1 loss on
114
+ addition, an MLP fitting `sin`). API may change before 1.0. Filing issues
115
+ welcome.
116
+
117
+ ## License
118
+
119
+ MIT
package/dist/compile.d.ts CHANGED
@@ -5,14 +5,29 @@ import { type BufferPlan } from './buffers.js';
5
5
  import { type KernelSpec } from './codegen.js';
6
6
  import { type CompiledRuntime, type CompiledForward, type RuntimeOpts } from './runtime.js';
7
7
  import { Module } from './module.js';
8
- /** Declares one input tensor of the model's forward function. Order matches
9
- * the function's parameter list (after `model`). The `name` is used at
10
- * runtime to upload data via `step({ [name]: data })`. */
8
+ /** Declares one input tensor of the model's forward function. The name is the
9
+ * key in the `inputs:` Record at compile time and the key on the `step()`/
10
+ * `run()` data object at runtime. */
11
11
  export interface InputDecl {
12
- name: string;
13
12
  shape: Shape;
14
13
  dtype?: Dtype;
15
14
  }
15
+ /** Inputs declaration: a Record from input name to its shape/dtype. The name
16
+ * doubles as the key the forward fn destructures and the key the runtime
17
+ * expects in `step({...})` / `run({...})`. */
18
+ export type InputDecls = Record<string, InputDecl>;
19
+ /** Maps an `InputDecls` Record to its forward-time tensor counterpart —
20
+ * same keys, each value is a Tensor. Used to type the forward function's
21
+ * `inputs` argument from the declared shape Record. */
22
+ export type InputsTensors<I extends InputDecls> = {
23
+ [K in keyof I]: Tensor;
24
+ };
25
+ /** Forward function shape: takes the materialized model and a Record of
26
+ * named input tensors (matching the declared `inputs:` keys), returns the
27
+ * output tensor (loss for compileModule; logits/etc. for compileForward).
28
+ * The second generic flows from the inputs declaration so destructuring
29
+ * the input record stays typed. */
30
+ export type ForwardFn<M extends Module, I extends InputDecls = InputDecls> = (m: M, inputs: InputsTensors<I>) => Tensor;
16
31
  export interface CompiledIR {
17
32
  graph: GradResult['graph'];
18
33
  paramGrads: GradResult['paramGrads'];
@@ -26,18 +41,45 @@ export declare function compileToIR(traceFn: () => Tensor): CompiledIR;
26
41
  export declare function compile(traceFn: () => Tensor, opts?: RuntimeOpts): Promise<CompiledRuntime & {
27
42
  ir: CompiledIR;
28
43
  }>;
29
- export interface CompileModuleOptions extends RuntimeOpts {
30
- /** Per-step data inputs to the forward function. Order matches the forward
31
- * function's parameters (after the model). e.g. for
32
- * `(model, tokens, targets, mask) => loss`, inputs is
33
- * `[{name:'tokens',...}, {name:'targets',...}, {name:'mask',...}]`. */
34
- inputs?: InputDecl[];
44
+ export interface CompileModuleOptions<I extends InputDecls = InputDecls> extends RuntimeOpts {
45
+ /** Per-step data inputs to the forward function, keyed by name. The forward
46
+ * fn destructures these out of its second argument; runtime calls to
47
+ * `step()` / `run()` pass typed arrays under the same keys. */
48
+ inputs?: I;
35
49
  /** Adam hyperparameters. If omitted, no optimizer is appended (forward-only). */
36
50
  adam?: AdamConfig;
37
51
  }
38
- export interface CompileForwardOptions extends RuntimeOpts {
39
- /** Per-step data inputs to the forward function. */
40
- inputs?: InputDecl[];
52
+ export interface CompileForwardOptions<I extends InputDecls = InputDecls> extends RuntimeOpts {
53
+ /** Per-step data inputs to the forward function, keyed by name. */
54
+ inputs?: I;
55
+ }
56
+ /** Forward-only compile options as taken by the `compileForward` *method* on
57
+ * a training runtime — no `device` (inherited) and no `sharedParams`
58
+ * (auto-supplied from the train graph's params). */
59
+ export interface CompileForwardMethodOptions<I extends InputDecls = InputDecls> {
60
+ inputs?: I;
61
+ }
62
+ /** Returned by `compileModule`. Adds training-graph extras (auto-init, reset,
63
+ * sibling-graph compile) on top of the base runtime. */
64
+ export interface CompiledModule<M extends Module> extends CompiledRuntime {
65
+ ir: CompiledIR;
66
+ /** Number of dispatchable kernels (excludes leaf no-ops). */
67
+ kernelCount: number;
68
+ /** Re-initialize all params from their declared init specs and zero the
69
+ * optimizer state. Use to start training over without recompiling. */
70
+ reset(): void;
71
+ /** Compile a sibling forward-only graph (e.g., a B=1 inference graph or a
72
+ * B=N held-out eval graph) that shares this runtime's device and param
73
+ * buffers. Pass the forward fn (typically distinct from your loss fn —
74
+ * it returns logits, not a scalar) and any shape changes via `inputs`.
75
+ * Auto-initialization is a no-op since params are shared. */
76
+ compileForward<I extends InputDecls>(forward: ForwardFn<M, I>, opts?: CompileForwardMethodOptions<I>): Promise<CompiledForwardModule>;
77
+ }
78
+ /** Returned by `compileForward` (and by the `compileForward` method). */
79
+ export interface CompiledForwardModule extends CompiledForward {
80
+ ir: CompiledIR;
81
+ /** Number of dispatchable kernels (excludes leaf no-ops). */
82
+ kernelCount: number;
41
83
  }
42
84
  /**
43
85
  * Compile a Module-based model. Pass a *factory* `() => new Model()`, not the
@@ -45,37 +87,44 @@ export interface CompileForwardOptions extends RuntimeOpts {
45
87
  * field becomes a real `Tensor`), so the instance is consumed and shouldn't be
46
88
  * referenced afterwards. Re-call the factory if you need a fresh tree.
47
89
  *
48
- * The forward function takes the materialized model and returns the loss
49
- * tensor.
90
+ * The forward function takes the materialized model and a Record of named
91
+ * input tensors, returns the loss tensor. Inputs are matched by name with the
92
+ * `inputs:` declaration:
93
+ *
94
+ * inputs: {
95
+ * tokens: { shape: [B, T], dtype: 'i32' },
96
+ * targets: { shape: [B, T], dtype: 'i32' },
97
+ * }
98
+ * forward: (m, { tokens, targets }) => …
50
99
  *
51
100
  * Walks the module tree to materialize params with auto-derived names, then
52
- * runs trace → grad → adam → buffer plan → codegen → runtime.
101
+ * runs trace → grad → adam → buffer plan → codegen → runtime. Initial
102
+ * parameter values are uploaded automatically before this function returns;
103
+ * call `reset()` later to re-randomize.
53
104
  *
54
105
  * If `opts.adam` is set, the runtime's `step()` automatically tracks an
55
106
  * internal step count and injects the bias-corrected `lrt` scalar each call;
56
107
  * users don't need to provide it themselves.
57
108
  */
58
- export declare function compileModule<M extends Module>(modelFactory: () => M, forward: (m: M, ...inputs: Tensor[]) => Tensor, opts?: CompileModuleOptions): Promise<CompiledRuntime & {
59
- ir: CompiledIR;
60
- uploadInitialParams: () => void;
61
- }>;
109
+ export declare function compileModule<M extends Module, I extends InputDecls = InputDecls>(modelFactory: () => M, forward: ForwardFn<M, I>, opts?: CompileModuleOptions<I>): Promise<CompiledModule<M>>;
62
110
  /**
63
111
  * Compile a Module-based model in forward-only mode (no autograd, no Adam).
64
112
  * The forward function returns the output tensor (e.g., logits) instead of a
65
113
  * scalar loss; runtime exposes `run(inputs)` returning the full output as a
66
114
  * `Float32Array`.
67
115
  *
116
+ * **Prefer the `compileForward` method on a training runtime** when both
117
+ * graphs use the same Module class — it auto-supplies `device` and
118
+ * `sharedParams`. This standalone form is for forward-only models with no
119
+ * training graph at all, or for sharing params across a different model.
120
+ *
68
121
  * **Sharing params with a training compile.** Pass `opts.sharedParams =
69
122
  * trainCompiled.params` to bind this graph's param buffers to an existing
70
123
  * training runtime's GPU buffers — every train step is then immediately
71
- * visible to `run()` calls here, no copies. The forward graph's
72
- * `uploadInitialParams()` skips any param covered by `sharedParams`.
124
+ * visible to `run()` calls here, no copies.
73
125
  *
74
- * Typical use: a B=1 inference graph alongside a B=512 training graph,
75
- * built from the same `Module` factory.
126
+ * Initial param values are uploaded automatically for params *not* covered
127
+ * by `sharedParams` (those are owned by the sibling compile).
76
128
  */
77
- export declare function compileForward<M extends Module>(modelFactory: () => M, forward: (m: M, ...inputs: Tensor[]) => Tensor, opts?: CompileForwardOptions): Promise<CompiledForward & {
78
- ir: CompiledIR;
79
- uploadInitialParams: () => void;
80
- }>;
129
+ export declare function compileForward<M extends Module, I extends InputDecls = InputDecls>(modelFactory: () => M, forward: ForwardFn<M, I>, opts?: CompileForwardOptions<I>): Promise<CompiledForwardModule>;
81
130
  //# sourceMappingURL=compile.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"compile.d.ts","sourceRoot":"","sources":["../src/compile.ts"],"names":[],"mappings":"AAUA,OAAO,KAAK,EAAE,MAAM,EAAE,KAAK,EAAE,KAAK,EAAE,MAAM,SAAS,CAAA;AAEnD,OAAO,EAAc,KAAK,UAAU,EAAE,MAAM,WAAW,CAAA;AACvD,OAAO,EAAc,KAAK,UAAU,EAAE,MAAM,WAAW,CAAA;AACvD,OAAO,EAAe,KAAK,UAAU,EAAE,MAAM,cAAc,CAAA;AAC3D,OAAO,EAAe,KAAK,UAAU,EAAE,MAAM,cAAc,CAAA;AAC3D,OAAO,EAAuC,KAAK,eAAe,EAAE,KAAK,eAAe,EAAE,KAAK,WAAW,EAAE,MAAM,cAAc,CAAA;AAChI,OAAO,EAAE,MAAM,EAAqB,MAAM,aAAa,CAAA;AAEvD;;2DAE2D;AAC3D,MAAM,WAAW,SAAS;IACxB,IAAI,EAAE,MAAM,CAAA;IACZ,KAAK,EAAE,KAAK,CAAA;IACZ,KAAK,CAAC,EAAE,KAAK,CAAA;CACd;AAED,MAAM,WAAW,UAAU;IACzB,KAAK,EAAE,UAAU,CAAC,OAAO,CAAC,CAAA;IAC1B,UAAU,EAAE,UAAU,CAAC,YAAY,CAAC,CAAA;IACpC,IAAI,EAAE,MAAM,CAAA;IACZ,IAAI,EAAE,UAAU,CAAA;IAChB,OAAO,EAAE,UAAU,EAAE,CAAA;CACtB;AAED,yEAAyE;AACzE,wBAAgB,WAAW,CAAC,OAAO,EAAE,MAAM,MAAM,GAAG,UAAU,CAM7D;AAED,0EAA0E;AAC1E,wBAAsB,OAAO,CAAC,OAAO,EAAE,MAAM,MAAM,EAAE,IAAI,GAAE,WAAgB,GAAG,OAAO,CAAC,eAAe,GAAG;IAAE,EAAE,EAAE,UAAU,CAAA;CAAE,CAAC,CAK1H;AAMD,MAAM,WAAW,oBAAqB,SAAQ,WAAW;IACvD;;;4EAGwE;IACxE,MAAM,CAAC,EAAE,SAAS,EAAE,CAAA;IACpB,iFAAiF;IACjF,IAAI,CAAC,EAAE,UAAU,CAAA;CAClB;AAED,MAAM,WAAW,qBAAsB,SAAQ,WAAW;IACxD,oDAAoD;IACpD,MAAM,CAAC,EAAE,SAAS,EAAE,CAAA;CACrB;AAED;;;;;;;;;;;;;;;GAeG;AACH,wBAAsB,aAAa,CAAC,CAAC,SAAS,MAAM,EAClD,YAAY,EAAE,MAAM,CAAC,EACrB,OAAO,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,GAAG,MAAM,EAAE,MAAM,EAAE,KAAK,MAAM,EAC9C,IAAI,GAAE,oBAAyB,GAC9B,OAAO,CAAC,eAAe,GAAG;IAAE,EAAE,EAAE,UAAU,CAAC;IAAC,mBAAmB,EAAE,MAAM,IAAI,CAAA;CAAE,CAAC,CA6DhF;AA4BD;;;;;;;;;;;;;;GAcG;AACH,wBAAsB,cAAc,CAAC,CAAC,SAAS,MAAM,EACnD,YAAY,EAAE,MAAM,CAAC,EACrB,OAAO,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,GAAG,MAAM,EAAE,MAAM,EAAE,KAAK,MAAM,EAC9C,IAAI,GAAE,qBAA0B,GAC/B,OAAO,CAAC,eAAe,GAAG;IAAE,EAAE,EAAE,UAAU,CAAC;IAAC,mBAAmB,EAAE,MAAM,IAAI,CAAA;CAAE,CAAC,CA0BhF"}
1
+ {"version":3,"file":"compile.d.ts","sourceRoot":"","sources":["../src/compile.ts"],"names":[],"mappings":"AAUA,OAAO,KAAK,EAAE,MAAM,EAAE,KAAK,EAAE,KAAK,EAAE,MAAM,SAAS,CAAA;AAEnD,OAAO,EAAc,KAAK,UAAU,EAAE,MAAM,WAAW,CAAA;AACvD,OAAO,EAAc,KAAK,UAAU,EAAE,MAAM,WAAW,CAAA;AACvD,OAAO,EAAe,KAAK,UAAU,EAAE,MAAM,cAAc,CAAA;AAC3D,OAAO,EAAe,KAAK,UAAU,EAAE,MAAM,cAAc,CAAA;AAC3D,OAAO,EAAuC,KAAK,eAAe,EAAE,KAAK,eAAe,EAAE,KAAK,WAAW,EAAE,MAAM,cAAc,CAAA;AAChI,OAAO,EAAE,MAAM,EAAqB,MAAM,aAAa,CAAA;AAEvD;;sCAEsC;AACtC,MAAM,WAAW,SAAS;IACxB,KAAK,EAAE,KAAK,CAAA;IACZ,KAAK,CAAC,EAAE,KAAK,CAAA;CACd;AAED;;+CAE+C;AAC/C,MAAM,MAAM,UAAU,GAAG,MAAM,CAAC,MAAM,EAAE,SAAS,CAAC,CAAA;AAElD;;wDAEwD;AACxD,MAAM,MAAM,aAAa,CAAC,CAAC,SAAS,UAAU,IAAI;KAAG,CAAC,IAAI,MAAM,CAAC,GAAG,MAAM;CAAE,CAAA;AAE5E;;;;oCAIoC;AACpC,MAAM,MAAM,SAAS,CAAC,CAAC,SAAS,MAAM,EAAE,CAAC,SAAS,UAAU,GAAG,UAAU,IACvE,CAAC,CAAC,EAAE,CAAC,EAAE,MAAM,EAAE,aAAa,CAAC,CAAC,CAAC,KAAK,MAAM,CAAA;AAE5C,MAAM,WAAW,UAAU;IACzB,KAAK,EAAE,UAAU,CAAC,OAAO,CAAC,CAAA;IAC1B,UAAU,EAAE,UAAU,CAAC,YAAY,CAAC,CAAA;IACpC,IAAI,EAAE,MAAM,CAAA;IACZ,IAAI,EAAE,UAAU,CAAA;IAChB,OAAO,EAAE,UAAU,EAAE,CAAA;CACtB;AAED,yEAAyE;AACzE,wBAAgB,WAAW,CAAC,OAAO,EAAE,MAAM,MAAM,GAAG,UAAU,CAM7D;AAED,0EAA0E;AAC1E,wBAAsB,OAAO,CAAC,OAAO,EAAE,MAAM,MAAM,EAAE,IAAI,GAAE,WAAgB,GAAG,OAAO,CAAC,eAAe,GAAG;IAAE,EAAE,EAAE,UAAU,CAAA;CAAE,CAAC,CAK1H;AAMD,MAAM,WAAW,oBAAoB,CAAC,CAAC,SAAS,UAAU,GAAG,UAAU,CAAE,SAAQ,WAAW;IAC1F;;oEAEgE;IAChE,MAAM,CAAC,EAAE,CAAC,CAAA;IACV,iFAAiF;IACjF,IAAI,CAAC,EAAE,UAAU,CAAA;CAClB;AAED,MAAM,WAAW,qBAAqB,CAAC,CAAC,SAAS,UAAU,GAAG,UAAU,CAAE,SAAQ,WAAW;IAC3F,mEAAmE;IACnE,MAAM,CAAC,EAAE,CAAC,CAAA;CACX;AAED;;qDAEqD;AACrD,MAAM,WAAW,2BAA2B,CAAC,CAAC,SAAS,UAAU,GAAG,UAAU;IAC5E,MAAM,CAAC,EAAE,CAAC,CAAA;CACX;AAED;yDACyD;AACzD,MAAM,WAAW,cAAc,CAAC,CAAC,SAAS,MAAM,CAAE,SAAQ,eAAe;IACvE,EAAE,EAAE,UAAU,CAAA;IACd,6DAA6D;IAC7D,WAAW,EAAE,MAAM,CAAA;IACnB;2EACuE;IACvE,KAAK,IAAI,IAAI,CAAA;IACb;;;;kEAI8D;IAC9D,cAAc,CAAC,CAAC,SAAS,UAAU,EACjC,OAAO,EAAE,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,EACxB,IAAI,CAAC,EAAE,2BAA2B,CAAC,CAAC,CAAC,GACpC,OAAO,CAAC,qBAAqB,CAAC,CAAA;CAClC;AAED,yEAAyE;AACzE,MAAM,WAAW,qBAAsB,SAAQ,eAAe;IAC5D,EAAE,EAAE,UAAU,CAAA;IACd,6DAA6D;IAC7D,WAAW,EAAE,MAAM,CAAA;CACpB;AAED;;;;;;;;;;;;;;;;;;;;;;;;GAwBG;AACH,wBAAsB,aAAa,CAAC,CAAC,SAAS,MAAM,EAAE,CAAC,SAAS,UAAU,GAAG,UAAU,EACrF,YAAY,EAAE,MAAM,CAAC,EACrB,OAAO,EAAE,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,EACxB,IAAI,GAAE,oBAAoB,CAAC,CAAC,CAAM,GACjC,OAAO,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,CAyC5B;AAMD;;;;;;;;;;;;;;;;;;GAkBG;AACH,wBAAsB,cAAc,CAAC,CAAC,SAAS,MAAM,EAAE,CAAC,SAAS,UAAU,GAAG,UAAU,EACtF,YAAY,EAAE,MAAM,CAAC,EACrB,OAAO,EAAE,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,EACxB,IAAI,GAAE,qBAAqB,CAAC,CAAC,CAAM,GAClC,OAAO,CAAC,qBAAqB,CAAC,CAYhC"}