@genai-fi/nanogpt 0.2.9 → 0.2.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/dist/Generator.d.ts +2 -0
  2. package/dist/Generator.js +37 -32
  3. package/dist/NanoGPTModel.d.ts +4 -1
  4. package/dist/NanoGPTModel.js +33 -25
  5. package/dist/TeachableLLM.d.ts +4 -0
  6. package/dist/TeachableLLM.js +32 -15
  7. package/dist/{complex-Cd8sqiBC.js → complex-CJ-qCcLB.js} +6 -6
  8. package/dist/{index-Dsg28SG6.js → index-YPKosni4.js} +59 -51
  9. package/dist/layers/BaseLayer.d.ts +8 -0
  10. package/dist/layers/BaseLayer.js +18 -0
  11. package/dist/layers/CausalSelfAttention.d.ts +4 -1
  12. package/dist/layers/CausalSelfAttention.js +47 -55
  13. package/dist/layers/MLP.d.ts +2 -1
  14. package/dist/layers/MLP.js +16 -14
  15. package/dist/layers/RMSNorm.d.ts +2 -1
  16. package/dist/layers/RMSNorm.js +13 -11
  17. package/dist/layers/RoPECache.d.ts +4 -2
  18. package/dist/layers/RoPECache.js +13 -7
  19. package/dist/layers/TiedEmbedding.js +16 -15
  20. package/dist/layers/TransformerBlock.d.ts +4 -1
  21. package/dist/layers/TransformerBlock.js +9 -5
  22. package/dist/main.js +18 -16
  23. package/dist/{mat_mul-BAYDrXvE.js → mat_mul-Bu7bhLms.js} +5 -5
  24. package/dist/ops/attentionMask.js +31 -25
  25. package/dist/ops/gatherSub.js +2 -2
  26. package/dist/ops/node/sparseCrossEntropy.js +1 -1
  27. package/dist/ops/qkv.d.ts +7 -0
  28. package/dist/ops/qkv.js +127 -0
  29. package/dist/ops/rope.d.ts +8 -0
  30. package/dist/ops/rope.js +153 -0
  31. package/dist/ops/scatterSub.js +14 -14
  32. package/dist/reshape-DmnmKT6r.js +25 -0
  33. package/dist/{stack-1o648CP_.js → stack-BtKpB0Ry.js} +5 -5
  34. package/dist/sum-D7fu15XL.js +27 -0
  35. package/dist/training/AdamExt.js +1 -1
  36. package/dist/training/Trainer.js +30 -29
  37. package/dist/training/sparseCrossEntropy.js +34 -33
  38. package/dist/utilities/profile.d.ts +10 -0
  39. package/dist/utilities/profile.js +29 -0
  40. package/package.json +1 -1
  41. package/dist/sum-NWazHI7f.js +0 -49
@@ -1,6 +1,6 @@
1
1
  import { engine as l } from "@tensorflow/tfjs";
2
- import { o as g, c as i, E as b, G as d, r as c, a as h } from "../index-Dsg28SG6.js";
3
- import { r as p, s as f } from "../stack-1o648CP_.js";
2
+ import { o as g, d as i, E as b, G as d, r as c, b as h } from "../index-YPKosni4.js";
3
+ import { r as p, s as f } from "../stack-BtKpB0Ry.js";
4
4
  /**
5
5
  * @license
6
6
  * Copyright 2018 Google LLC. All Rights Reserved.
@@ -1,4 +1,4 @@
1
- import { r as o } from "../../index-Dsg28SG6.js";
1
+ import { r as o } from "../../index-YPKosni4.js";
2
2
  function r(e) {
3
3
  const { logits: t, labels: n } = e.inputs;
4
4
  return e.backend.executeMultipleOutputs("SparseSoftmaxCrossEntropyWithLogits", [], [t, n], 2);
@@ -0,0 +1,7 @@
1
+ import { Tensor } from '@tensorflow/tfjs';
2
+ import { TensorInfo, NamedTensorInfoMap, NamedAttrMap } from '@tensorflow/tfjs-core';
3
+ export declare function qkvCPU(args: {
4
+ inputs: NamedTensorInfoMap;
5
+ attrs?: NamedAttrMap;
6
+ }): TensorInfo[];
7
+ export declare function qkv(x: Tensor, kernel: Tensor, heads: number): Tensor[];
@@ -0,0 +1,127 @@
1
+ import { engine as C } from "@tensorflow/tfjs";
2
+ import { o as N, d as V, E as Q, i as T, r as b, c as G } from "../index-YPKosni4.js";
3
+ import { r as x } from "../reshape-DmnmKT6r.js";
4
+ /**
5
+ * @license
6
+ * Copyright 2020 Google LLC. All Rights Reserved.
7
+ * Licensed under the Apache License, Version 2.0 (the "License");
8
+ * you may not use this file except in compliance with the License.
9
+ * You may obtain a copy of the License at
10
+ *
11
+ * http://www.apache.org/licenses/LICENSE-2.0
12
+ *
13
+ * Unless required by applicable law or agreed to in writing, software
14
+ * distributed under the License is distributed on an "AS IS" BASIS,
15
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
+ * See the License for the specific language governing permissions and
17
+ * limitations under the License.
18
+ * =============================================================================
19
+ */
20
+ function M(r, e, o = 0) {
21
+ const t = { x: V(r, "x", "split") }, s = { numOrSizeSplits: e, axis: o };
22
+ return Q.runKernel(T, t, s);
23
+ }
24
+ const P = /* @__PURE__ */ N({ split_: M });
25
+ class S {
26
+ variableNames = ["x", "kernel"];
27
+ outputShape;
28
+ userCode;
29
+ // enableShapeUniforms = true;
30
+ customUniforms = [{ name: "mode", type: "int" }];
31
+ constructor(e, o, a, t) {
32
+ const s = t / o;
33
+ this.outputShape = [e, o, a, s], this.userCode = `
34
+ void main() {
35
+ ivec4 coords = getOutputCoords(); // [b, h, t, d]
36
+ int b = coords.x;
37
+ int h = coords.y;
38
+ int t = coords.z;
39
+ int d = coords.w;
40
+
41
+ // Compute output channel index in fused kernel
42
+ int out_offset = mode * ${o} * ${s} + h * ${s} + d;
43
+
44
+ float sum = 0.0;
45
+ for (int c = 0; c < ${t}; ++c) {
46
+ float xval = getX(b, t, c); // fetch from x
47
+ float kval = getKernel(c, out_offset); // fetch from kernel
48
+ sum += xval * kval;
49
+ }
50
+
51
+ setOutput(sum);
52
+ }
53
+ `;
54
+ }
55
+ }
56
+ function $(r) {
57
+ const { x: e, kernel: o } = r.inputs, { heads: a } = r.attrs, t = r.backend, s = e.shape[0], l = e.shape[1], d = e.shape[2], c = new S(s, a, l, d);
58
+ return [
59
+ t.runWebGLProgram(c, [e, o], "float32", [[0]]),
60
+ t.runWebGLProgram(c, [e, o], "float32", [[1]]),
61
+ t.runWebGLProgram(c, [e, o], "float32", [[2]])
62
+ ];
63
+ }
64
+ const w = {
65
+ kernelName: "QKV",
66
+ backendName: "webgl",
67
+ kernelFunc: $
68
+ };
69
+ b(w);
70
+ function q(r) {
71
+ const { x: e, kernel: o } = r.inputs, { heads: a } = r.attrs, [t, s, l] = e.shape, d = x(e, [t * s, l]), c = d.dot(o);
72
+ d.dispose();
73
+ const n = x(c, [t, s, 3 * l]);
74
+ c.dispose();
75
+ const [p, m, k] = P(n, 3, -1);
76
+ n.dispose();
77
+ const f = l / a, v = x(p, [t, s, a, f]);
78
+ p.dispose();
79
+ const g = v.transpose([0, 2, 1, 3]);
80
+ v.dispose();
81
+ const u = x(m, [t, s, a, f]);
82
+ m.dispose();
83
+ const h = u.transpose([0, 2, 1, 3]);
84
+ u.dispose();
85
+ const i = x(k, [t, s, a, f]);
86
+ k.dispose();
87
+ const K = i.transpose([0, 2, 1, 3]);
88
+ return i.dispose(), [g, h, K];
89
+ }
90
+ const F = {
91
+ kernelName: "QKV",
92
+ backendName: "cpu",
93
+ kernelFunc: q
94
+ };
95
+ b(F);
96
+ const L = {
97
+ kernelName: "QKV",
98
+ backendName: "tensorflow",
99
+ kernelFunc: q
100
+ };
101
+ b(L);
102
+ function y(r, e, o) {
103
+ return C().runKernel("QKV", { x: r, kernel: e }, { heads: o });
104
+ }
105
+ const _ = {
106
+ kernelName: "QKV",
107
+ inputsToSave: ["x", "kernel"],
108
+ outputsToSave: [],
109
+ gradFunc: (r, e) => {
110
+ const [o, a, t] = r, [s, l] = e, [d, c, n] = s.shape, p = o.transpose([0, 2, 1, 3]).reshape([d * c, n]), m = a.transpose([0, 2, 1, 3]).reshape([d * c, n]), k = t.transpose([0, 2, 1, 3]).reshape([d * c, n]), f = l.slice([0, 0], [n, n]), v = l.slice([0, n], [n, n]), g = l.slice([0, 2 * n], [n, n]);
111
+ return {
112
+ x: () => {
113
+ const u = p.matMul(f, !1, !0), h = m.matMul(v, !1, !0), i = k.matMul(g, !1, !0);
114
+ return u.add(h).add(i).reshape([d, c, n]);
115
+ },
116
+ kernel: () => {
117
+ const u = s.reshape([d * c, n]), h = u.matMul(p, !0, !1), i = u.matMul(m, !0, !1), K = u.matMul(k, !0, !1);
118
+ return h.concat(i, 1).concat(K, 1);
119
+ }
120
+ };
121
+ }
122
+ };
123
+ G(_);
124
+ export {
125
+ y as qkv,
126
+ q as qkvCPU
127
+ };
@@ -0,0 +1,8 @@
1
+ import { default as RoPECache } from '../layers/RoPECache';
2
+ import { Tensor } from '@tensorflow/tfjs';
3
+ import { TensorInfo, NamedTensorInfoMap, NamedAttrMap } from '@tensorflow/tfjs-core';
4
+ export declare function ropeCPU(args: {
5
+ inputs: NamedTensorInfoMap;
6
+ attrs?: NamedAttrMap;
7
+ }): TensorInfo;
8
+ export declare function rope(x: Tensor, cache: RoPECache, pastLength: number): Tensor;
@@ -0,0 +1,153 @@
1
+ import { engine as D } from "@tensorflow/tfjs";
2
+ import { o as G, l as F, k as _, n as z, E as K, p as O, d as T, q as U, r as g, c as A } from "../index-YPKosni4.js";
3
+ import { r as $, s as B } from "../stack-BtKpB0Ry.js";
4
+ /**
5
+ * @license
6
+ * Copyright 2020 Google LLC. All Rights Reserved.
7
+ * Licensed under the Apache License, Version 2.0 (the "License");
8
+ * you may not use this file except in compliance with the License.
9
+ * You may obtain a copy of the License at
10
+ *
11
+ * http://www.apache.org/licenses/LICENSE-2.0
12
+ *
13
+ * Unless required by applicable law or agreed to in writing, software
14
+ * distributed under the License is distributed on an "AS IS" BASIS,
15
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
+ * See the License for the specific language governing permissions and
17
+ * limitations under the License.
18
+ * =============================================================================
19
+ */
20
+ function W(e, t = 0) {
21
+ F(e.length >= 1, () => "Pass at least one tensor to concat");
22
+ const o = _(e, "tensors", "concat", "string_or_numeric");
23
+ if (o[0].dtype === "complex64" && o.forEach((s) => {
24
+ if (s.dtype !== "complex64")
25
+ throw new Error(`Cannot concatenate complex64 tensors with a tensor
26
+ with dtype ${s.dtype}. `);
27
+ }), o.length === 1)
28
+ return z(o[0]);
29
+ const n = o, r = { axis: t };
30
+ return K.runKernel(O, n, r);
31
+ }
32
+ const j = /* @__PURE__ */ G({ concat_: W });
33
+ /**
34
+ * @license
35
+ * Copyright 2018 Google LLC. All Rights Reserved.
36
+ * Licensed under the Apache License, Version 2.0 (the "License");
37
+ * you may not use this file except in compliance with the License.
38
+ * You may obtain a copy of the License at
39
+ *
40
+ * http://www.apache.org/licenses/LICENSE-2.0
41
+ *
42
+ * Unless required by applicable law or agreed to in writing, software
43
+ * distributed under the License is distributed on an "AS IS" BASIS,
44
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
45
+ * See the License for the specific language governing permissions and
46
+ * limitations under the License.
47
+ * =============================================================================
48
+ */
49
+ function H(e, t, o = 0, n = 0) {
50
+ const r = T(e, "x", "gather"), s = T(t, "indices", "gather", "int32"), c = { x: r, indices: s }, a = { axis: o, batchDims: n };
51
+ return K.runKernel(U, c, a);
52
+ }
53
+ const E = /* @__PURE__ */ G({ gather_: H });
54
+ class J {
55
+ variableNames = ["x", "sin", "cos"];
56
+ outputShape;
57
+ userCode;
58
+ // enableShapeUniforms = true;
59
+ constructor(t, o, n, r, s) {
60
+ this.outputShape = [t, o, n, r], this.userCode = `
61
+ void main() {
62
+ ivec4 coords = getOutputCoords(); // [b, h, t, d]
63
+ int b = coords.x;
64
+ int h = coords.y;
65
+ int t = coords.z;
66
+ int d = coords.w;
67
+
68
+ int rotaryDim = ${r};
69
+
70
+ float outVal = 0.0;
71
+
72
+ if (d < rotaryDim) {
73
+ int pairIdx = d / 2;
74
+ float cos = getCos(t + ${s}, pairIdx, 0);
75
+ float sin = getSin(t + ${s}, pairIdx, 0);
76
+
77
+ if (d % 2 == 0) {
78
+ // even index
79
+ float even = getX(b, h, t, d);
80
+ float odd = getX(b, h, t, d + 1);
81
+ outVal = even * cos - odd * sin;
82
+ } else {
83
+ // odd index
84
+ float even = getX(b, h, t, d - 1);
85
+ float odd = getX(b, h, t, d);
86
+ outVal = even * sin + odd * cos;
87
+ }
88
+ } else {
89
+ // pass through for non-rotary dims
90
+ outVal = getX(b, h, t, d);
91
+ }
92
+
93
+ setOutput(outVal);
94
+ }
95
+ `;
96
+ }
97
+ }
98
+ function M(e) {
99
+ const { x: t, sin: o, cos: n } = e.inputs, { pastLen: r } = e.attrs, s = e.backend, c = t.shape[0], a = t.shape[1], i = t.shape[2], d = t.shape[3], p = new J(c, a, i, d, r);
100
+ return s.runWebGLProgram(p, [t, o, n], "float32");
101
+ }
102
+ const Q = {
103
+ kernelName: "Rope",
104
+ backendName: "webgl",
105
+ kernelFunc: M
106
+ };
107
+ g(Q);
108
+ function V(e, t, o, n, r) {
109
+ const s = n.shape[3], c = o;
110
+ if (c > s) return n;
111
+ const a = n.shape[2], i = c / 2, d = t.slice([r, 0, 0], [a, i, 1]).reshape([1, 1, a, i]), p = e.slice([r, 0, 0], [a, i, 1]).reshape([1, 1, a, i]), u = n.shape[0], l = n.shape[1], m = $(0, c, 2, "int32"), x = $(1, c, 2, "int32"), X = ((b) => {
112
+ const v = b.slice([0, 0, 0, 0], [u, l, a, c]), k = c < s ? b.slice([0, 0, 0, c], [u, l, a, s - c]) : null, h = E(v, m, 3), f = E(v, x, 3), C = h.mul(d), y = f.mul(p), R = C.sub(y), N = f.mul(d), S = h.mul(p), w = N.add(S);
113
+ h.dispose(), f.dispose(), d.dispose(), p.dispose(), C.dispose(), y.dispose(), N.dispose(), S.dispose();
114
+ const P = B([R, w], -1);
115
+ R.dispose(), w.dispose();
116
+ const I = P.reshape([u, l, a, c]);
117
+ return P.dispose(), k ? j([I, k], 3) : I;
118
+ })(n);
119
+ return m.dispose(), x.dispose(), X;
120
+ }
121
+ function L(e) {
122
+ const { x: t, sin: o, cos: n } = e.inputs, { pastLen: r } = e.attrs, s = t.shape[3];
123
+ return V(o, n, s, t, r);
124
+ }
125
+ const Y = {
126
+ kernelName: "Rope",
127
+ backendName: "cpu",
128
+ kernelFunc: L
129
+ };
130
+ g(Y);
131
+ const Z = {
132
+ kernelName: "Rope",
133
+ backendName: "tensorflow",
134
+ kernelFunc: L
135
+ };
136
+ g(Z);
137
+ function st(e, t, o) {
138
+ return t.ensureRopeCache(e.shape[1]), D().runKernel("Rope", { x: e, sin: t.getSin(), cos: t.getCos() }, { pastLen: o });
139
+ }
140
+ const q = {
141
+ kernelName: "Rope",
142
+ inputsToSave: ["x", "sin", "cos"],
143
+ outputsToSave: [],
144
+ gradFunc: (e, t) => {
145
+ const [o, n, r] = t, s = n.neg(), c = o.shape[3], i = V(s, r, c, e, 0);
146
+ return s.dispose(), { x: () => i };
147
+ }
148
+ };
149
+ A(q);
150
+ export {
151
+ st as rope,
152
+ L as ropeCPU
153
+ };
@@ -1,7 +1,7 @@
1
1
  import { engine as $ } from "@tensorflow/tfjs";
2
- import { k as u, l as S, n as p, E as f, p as E, o as N, c as l, q as y, r as h, a as D, m as x } from "../index-Dsg28SG6.js";
3
- import { c as m } from "../complex-Cd8sqiBC.js";
4
- import { r as v, s as T } from "../stack-1o648CP_.js";
2
+ import { t as u, u as S, v as l, E as f, w as E, o as N, d as h, x as y, r as p, b as x, a as D } from "../index-YPKosni4.js";
3
+ import { c as d } from "../complex-CJ-qCcLB.js";
4
+ import { r as v, s as T } from "../stack-BtKpB0Ry.js";
5
5
  /**
6
6
  * @license
7
7
  * Copyright 2018 Google LLC. All Rights Reserved.
@@ -21,9 +21,9 @@ import { r as v, s as T } from "../stack-1o648CP_.js";
21
21
  function i(e, t = "float32") {
22
22
  if (u(e), t === "complex64") {
23
23
  const a = i(e, "float32"), o = i(e, "float32");
24
- return m(a, o);
24
+ return d(a, o);
25
25
  }
26
- const r = S(p(e), t);
26
+ const r = S(l(e), t);
27
27
  return f.makeTensor(r, e, t);
28
28
  }
29
29
  /**
@@ -42,12 +42,12 @@ function i(e, t = "float32") {
42
42
  * limitations under the License.
43
43
  * =============================================================================
44
44
  */
45
- function d(e, t = "float32") {
45
+ function m(e, t = "float32") {
46
46
  if (u(e), t === "complex64") {
47
- const a = d(e, "float32"), o = i(e, "float32");
48
- return m(a, o);
47
+ const a = m(e, "float32"), o = i(e, "float32");
48
+ return d(a, o);
49
49
  }
50
- const r = E(p(e), t);
50
+ const r = E(l(e), t);
51
51
  return f.makeTensor(r, e, t);
52
52
  }
53
53
  function C(e, t, r) {
@@ -100,7 +100,7 @@ function O(e, t, r) {
100
100
  */
101
101
  function z(e, t, r) {
102
102
  u(r);
103
- const a = l(e, "indices", "scatterND", "int32"), o = l(t, "updates", "scatterND");
103
+ const a = h(e, "indices", "scatterND", "int32"), o = h(t, "updates", "scatterND");
104
104
  O(o, a, r);
105
105
  const s = { indices: a, updates: o }, n = { shape: r };
106
106
  return f.runKernel(y, s, n);
@@ -131,17 +131,17 @@ const K = {
131
131
  backendName: "webgl",
132
132
  kernelFunc: P
133
133
  };
134
- h(K);
134
+ p(K);
135
135
  function A(e) {
136
- const { logits: t, labels: r, dy: a } = e.inputs, o = r.shape[0], s = t.shape[1], n = v(0, o, 1, "int32"), c = T([n, r], 1), b = d([o]), g = I(c, b, [o, s]), k = D(t, g), w = a.reshape([o, 1]);
137
- return x(k, w);
136
+ const { logits: t, labels: r, dy: a } = e.inputs, o = r.shape[0], s = t.shape[1], n = v(0, o, 1, "int32"), c = T([n, r], 1), b = m([o]), g = I(c, b, [o, s]), k = x(t, g), w = a.reshape([o, 1]);
137
+ return D(k, w);
138
138
  }
139
139
  const F = {
140
140
  kernelName: "EfficientScatterSub",
141
141
  backendName: "cpu",
142
142
  kernelFunc: A
143
143
  };
144
- h(F);
144
+ p(F);
145
145
  function R(e, t, r) {
146
146
  return $().runKernel("EfficientScatterSub", { logits: e, labels: t, dy: r }, {});
147
147
  }
@@ -0,0 +1,25 @@
1
+ import { o, d as t, E as a, R as p } from "./index-YPKosni4.js";
2
+ /**
3
+ * @license
4
+ * Copyright 2020 Google LLC. All Rights Reserved.
5
+ * Licensed under the Apache License, Version 2.0 (the "License");
6
+ * you may not use this file except in compliance with the License.
7
+ * You may obtain a copy of the License at
8
+ *
9
+ * http://www.apache.org/licenses/LICENSE-2.0
10
+ *
11
+ * Unless required by applicable law or agreed to in writing, software
12
+ * distributed under the License is distributed on an "AS IS" BASIS,
13
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ * See the License for the specific language governing permissions and
15
+ * limitations under the License.
16
+ * =============================================================================
17
+ */
18
+ function c(r, s) {
19
+ const e = { x: t(r, "x", "reshape", "string_or_numeric") }, n = { shape: s };
20
+ return a.runKernel(p, e, n);
21
+ }
22
+ const x = /* @__PURE__ */ o({ reshape_: c });
23
+ export {
24
+ x as r
25
+ };
@@ -1,4 +1,4 @@
1
- import { E as e, R as c, o as f, g as u, h as a, P as i } from "./index-Dsg28SG6.js";
1
+ import { E as e, j as c, o as f, k as u, l as a, P as i } from "./index-YPKosni4.js";
2
2
  /**
3
3
  * @license
4
4
  * Copyright 2018 Google LLC. All Rights Reserved.
@@ -15,7 +15,7 @@ import { E as e, R as c, o as f, g as u, h as a, P as i } from "./index-Dsg28SG6
15
15
  * limitations under the License.
16
16
  * =============================================================================
17
17
  */
18
- function h(n, s, t = 1, r = "float32") {
18
+ function g(n, s, t = 1, r = "float32") {
19
19
  if (t === 0)
20
20
  throw new Error("Cannot have a step of zero");
21
21
  const o = { start: n, stop: s, step: t, dtype: r };
@@ -43,8 +43,8 @@ function k(n, s = 0) {
43
43
  const r = t, o = { axis: s };
44
44
  return e.runKernel(i, r, o);
45
45
  }
46
- const l = /* @__PURE__ */ f({ stack_: k });
46
+ const h = /* @__PURE__ */ f({ stack_: k });
47
47
  export {
48
- h as r,
49
- l as s
48
+ g as r,
49
+ h as s
50
50
  };
@@ -0,0 +1,27 @@
1
+ import { o as e, d as u, h as c, E as l, S as m } from "./index-YPKosni4.js";
2
+ /**
3
+ * @license
4
+ * Copyright 2018 Google LLC. All Rights Reserved.
5
+ * Licensed under the Apache License, Version 2.0 (the "License");
6
+ * you may not use this file except in compliance with the License.
7
+ * You may obtain a copy of the License at
8
+ *
9
+ * http://www.apache.org/licenses/LICENSE-2.0
10
+ *
11
+ * Unless required by applicable law or agreed to in writing, software
12
+ * distributed under the License is distributed on an "AS IS" BASIS,
13
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ * See the License for the specific language governing permissions and
15
+ * limitations under the License.
16
+ * =============================================================================
17
+ */
18
+ function i(t, o = null, n = !1) {
19
+ let s = u(t, "x", "sum");
20
+ s.dtype === "bool" && (s = c(s, "int32"));
21
+ const r = { x: s }, a = { axis: o, keepDims: n };
22
+ return l.runKernel(m, r, a);
23
+ }
24
+ const f = /* @__PURE__ */ e({ sum_: i });
25
+ export {
26
+ f as s
27
+ };
@@ -1,4 +1,4 @@
1
- import { A as r, m as c, s as h, a as g, e as o } from "../index-Dsg28SG6.js";
1
+ import { A as r, a as c, s as h, b as g, e as o } from "../index-YPKosni4.js";
2
2
  class u extends r {
3
3
  constructor(t, e, s, a, i) {
4
4
  super(t, e, s, a), this.config = i, this.startLearningRate = t;
@@ -1,8 +1,8 @@
1
1
  import { DatasetBuilder as d } from "./DatasetBuilder.js";
2
- import p from "./AdamExt.js";
3
- class u {
4
- constructor(t, e, s, i = 1e-3) {
5
- this.tokenizer = s, this.tf = t, this.model = e, this.learningRate = i, this.resetOptimizer(), this.datasetBuilder = new d(this.tf, s, e.config.blockSize);
2
+ import h from "./AdamExt.js";
3
+ class g {
4
+ constructor(t, s, e, i = 1e-3) {
5
+ this.tokenizer = e, this.tf = t, this.model = s, this.learningRate = i, this.resetOptimizer(), this.datasetBuilder = new d(this.tf, e, s.config.blockSize);
6
6
  }
7
7
  model;
8
8
  optimizer;
@@ -25,7 +25,7 @@ class u {
25
25
  }
26
26
  resetOptimizer(t = { learningRateFactor: 1, beta1: 0.9, beta2: 0.99, epsilon: 1e-8 }) {
27
27
  this.optimizer && this.optimizer.dispose();
28
- const e = new p(
28
+ const s = new h(
29
29
  t.learningRateFactor * this.learningRate,
30
30
  t.beta1,
31
31
  t.beta2,
@@ -37,58 +37,59 @@ class u {
37
37
  weightDecay: 0
38
38
  }
39
39
  );
40
- this.optimizer = e;
40
+ this.optimizer = s;
41
41
  }
42
42
  printGradients(t) {
43
- Object.keys(t).forEach((e) => {
44
- const s = t[e];
45
- console.log(`${e}:`), console.log(` Shape: ${s.shape}`), console.log(` Mean: ${this.tf.mean(s).dataSync()[0]}`), console.log(` Std: ${this.tf.moments(s).variance.sqrt().dataSync()[0]}`), console.log(` Min: ${this.tf.min(s).dataSync()[0]}`), console.log(` Max: ${this.tf.max(s).dataSync()[0]}`), console.log(` Norm: ${this.tf.norm(s).dataSync()[0]}`);
43
+ Object.keys(t).forEach((s) => {
44
+ const e = t[s];
45
+ console.log(`${s}:`), console.log(` Shape: ${e.shape}`), console.log(` Mean: ${this.tf.mean(e).dataSync()[0]}`), console.log(` Std: ${this.tf.moments(e).variance.sqrt().dataSync()[0]}`), console.log(` Min: ${this.tf.min(e).dataSync()[0]}`), console.log(` Max: ${this.tf.max(e).dataSync()[0]}`), console.log(` Norm: ${this.tf.norm(e).dataSync()[0]}`);
46
46
  });
47
47
  }
48
- trainStep(t, e = !1, s = !1) {
48
+ trainStep(t, s = !1, e = !1) {
49
49
  return this.tf.tidy(() => {
50
+ this.model.getProfiler()?.startMemory();
50
51
  const { xs: i, ys: a } = t, o = () => {
51
52
  const { loss: l, logits: c } = this.model.forward(i, a, !0);
52
53
  return c.dispose(), l;
53
54
  }, { value: n, grads: r } = this.tf.variableGrads(o);
54
- return e || (s && (console.log("-------"), this.printGradients(r), console.log("-------")), this.optimizer.applyGradients(r), this.tf.dispose(r)), n;
55
+ return s ? this.model.getProfiler()?.endMemory("Training") : (e && (console.log("-------"), this.printGradients(r), console.log("-------")), this.optimizer.applyGradients(r), this.model.getProfiler()?.endMemory("Training"), this.tf.dispose(r)), n;
55
56
  });
56
57
  }
57
58
  dummyPass() {
58
- const t = this.tf.zeros([1, this.model.config.blockSize], "int32"), e = this.tf.zeros([1, this.model.config.blockSize], "int32");
59
+ const t = this.tf.zeros([1, this.model.config.blockSize], "int32"), s = this.tf.zeros([1, this.model.config.blockSize], "int32");
59
60
  try {
60
- const s = this.trainStep({ xs: t, ys: e }, !0);
61
- s.dataSync(), s.dispose();
62
- } catch (s) {
63
- console.error("Error during dummy pass:", s);
61
+ const e = this.trainStep({ xs: t, ys: s }, !0);
62
+ e.dataSync(), e.dispose();
63
+ } catch (e) {
64
+ console.error("Error during dummy pass:", e);
64
65
  } finally {
65
- t.dispose(), e.dispose();
66
+ t.dispose(), s.dispose();
66
67
  }
67
68
  }
68
- async trainBatch(t, e) {
69
+ async trainBatch(t, s) {
69
70
  try {
70
- const s = this.trainStep(e, !1, !1);
71
- return e.xs.dispose(), e.ys.dispose(), t.step++, t.totalSteps++, s.array().then((i) => (t.lastLoss = i, t.losses.push(t.lastLoss), s.dispose(), t.lastLoss));
72
- } catch (s) {
73
- throw console.error(`Error processing batch at step ${t.step}:`, s), this.tf.dispose(), s;
71
+ const e = this.trainStep(s, !1, !1);
72
+ return s.xs.dispose(), s.ys.dispose(), t.step++, t.totalSteps++, e.array().then((i) => (t.lastLoss = i, t.losses.push(t.lastLoss), e.dispose(), t.lastLoss));
73
+ } catch (e) {
74
+ throw console.error(`Error processing batch at step ${t.step}:`, e), this.tf.dispose(), e;
74
75
  }
75
76
  }
76
- async createTrainValidationSplit(t, e = 32, s = 0.1) {
77
- const i = await this.datasetBuilder.createTextDataset(t, e, 0, 1 - s), a = await this.datasetBuilder.createTextDataset(
77
+ async createTrainValidationSplit(t, s = 32, e = 0.1) {
78
+ const i = await this.datasetBuilder.createTextDataset(t, s, 0, 1 - e), a = await this.datasetBuilder.createTextDataset(
78
79
  t,
79
- e,
80
- 1 - s,
80
+ s,
81
+ 1 - e,
81
82
  1
82
83
  );
83
84
  return { trainDataset: i, validationDataset: a };
84
85
  }
85
- async createDataset(t, e = 32) {
86
- return await this.datasetBuilder.createTextDataset(t, e);
86
+ async createDataset(t, s = 32) {
87
+ return await this.datasetBuilder.createTextDataset(t, s);
87
88
  }
88
89
  dispose() {
89
90
  this.optimizer && this.optimizer.dispose();
90
91
  }
91
92
  }
92
93
  export {
93
- u as default
94
+ g as default
94
95
  };