@genai-fi/nanogpt 0.8.1 → 0.8.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -45,7 +45,8 @@ export default class Generator extends EE<'start' | 'stop' | 'tokens'> {
45
45
  generate(prompt?: string, options?: IGenerateOptions): Promise<string>;
46
46
  stop(): void;
47
47
  getText(): string;
48
- getAttentionData(): number[][][][];
48
+ getAttentionData(): number[][][][][];
49
49
  getProbabilitiesData(): number[][][];
50
+ getEmbeddingsData(): number[][][][];
50
51
  getTokens(): number[];
51
52
  }
package/dist/Generator.js CHANGED
@@ -73,7 +73,7 @@ function N(m, t, e, i = !1) {
73
73
  const a = { logits: n === 1 ? x(o, [1, -1]) : o }, p = { numSamples: t, seed: e, normalized: i }, l = C.runKernel(I, a, p);
74
74
  return n === 1 ? x(l, [l.size]) : l;
75
75
  }
76
- const S = /* @__PURE__ */ A({ multinomial_: N }), H = [
76
+ const D = /* @__PURE__ */ A({ multinomial_: N }), H = [
77
77
  ...Array.from({ length: 95 }, (m, t) => String.fromCharCode(t + 32)),
78
78
  // ASCII
79
79
  // Spanish accented letters and punctuation
@@ -112,7 +112,9 @@ class qt extends z {
112
112
  return null;
113
113
  const n = await t.decode([s]);
114
114
  if (i) {
115
- const d = await Promise.all(i.map((a) => a.array().then((p) => p)));
115
+ const d = await Promise.all(
116
+ i.map((a) => a.array().then((p) => p))
117
+ );
116
118
  i.forEach((a) => a.dispose()), this.attentionData.push(d);
117
119
  }
118
120
  if (o) {
@@ -134,10 +136,10 @@ class qt extends z {
134
136
  const r = t, h = r.shape[1], u = h <= this.model.config.blockSize ? r : r.slice(
135
137
  [0, h - this.model.config.blockSize],
136
138
  [r.shape[0], this.model.config.blockSize]
137
- ), k = d ? this.model.config.blockSize - u.shape[1] : 0, b = k > 0 ? _(u, [
139
+ ), g = d ? this.model.config.blockSize - u.shape[1] : 0, b = g > 0 ? _(u, [
138
140
  [0, 0],
139
- [0, k]
140
- ]) : u, [f] = this.model.forward(a, b), y = f.shape[1] - 1 - k, c = f.slice([0, y, 0], [f.shape[0], 1, f.shape[2]]);
141
+ [0, g]
142
+ ]) : u, [f] = this.model.forward(a, b), y = f.shape[1] - 1 - g, c = f.slice([0, y, 0], [f.shape[0], 1, f.shape[2]]);
141
143
  return a.attentionScores?.attentionOut && a.attentionScores.attentionOut.forEach((T, E) => {
142
144
  T.shape[1] !== 1 && (a.attentionScores.attentionOut[E] = R(
143
145
  T.slice([0, y, 0], [T.shape[0], 1, T.shape[2]])
@@ -148,19 +150,19 @@ class qt extends z {
148
150
  if (n) {
149
151
  const r = v(p), h = await r.array();
150
152
  r.dispose();
151
- const u = h[0].map((c, g) => ({ prob: c, index: g })).sort((c, g) => g.prob - c.prob);
152
- let k = 0;
153
+ const u = h[0].map((c, k) => ({ prob: c, index: k })).sort((c, k) => k.prob - c.prob);
154
+ let g = 0;
153
155
  const b = new Array(u.length).fill(0);
154
156
  for (const c of u)
155
- if (k += c.prob, b[c.index] = c.prob, k >= n)
157
+ if (g += c.prob, b[c.index] = c.prob, g >= n)
156
158
  break;
157
- const f = b.reduce((c, g) => c + g, 0), y = b.map((c) => c / f);
159
+ const f = b.reduce((c, k) => c + k, 0), y = b.map((c) => c / f);
158
160
  l = $(y);
159
161
  } else if (s) {
160
- const { values: r, indices: h } = K(p, s), u = S(r, 1);
162
+ const { values: r, indices: h } = K(p, s), u = D(r, 1);
161
163
  l = q(h, u, 1), r.dispose(), h.dispose(), u.dispose();
162
164
  } else
163
- l = S(p, 1);
165
+ l = D(p, 1);
164
166
  let w;
165
167
  i?.includeProbabilities && (w = v(p)), a.embeddings && this.embeddingsData.push(
166
168
  await Promise.all(
@@ -170,8 +172,8 @@ class qt extends z {
170
172
  })
171
173
  )
172
174
  );
173
- const D = l.reshape([1, 1]);
174
- return l.dispose(), l = D, p.dispose(), { output: l, probabilities: w, attention: a.attentionScores?.attentionOut };
175
+ const S = l.reshape([1, 1]);
176
+ return l.dispose(), l = S, p.dispose(), { output: l, probabilities: w, attention: a.attentionScores?.attentionOut };
175
177
  }
176
178
  /** Generate multiple tokens in a loop and produce text */
177
179
  async _generate(t) {
@@ -239,6 +241,9 @@ class qt extends z {
239
241
  getProbabilitiesData() {
240
242
  return this.probabilitiesData;
241
243
  }
244
+ getEmbeddingsData() {
245
+ return this.embeddingsData;
246
+ }
242
247
  getTokens() {
243
248
  return this.tokens;
244
249
  }
@@ -6,6 +6,7 @@ import { execute as normRMSGrad } from './normRMSGrad';
6
6
  import { execute as appendCache } from './appendCache';
7
7
  import { execute as attentionMask } from './attentionMask';
8
8
  import { default as runCheck } from './check';
9
+ import { createWeightStatistics, createTensorStatistics } from './weights';
9
10
  declare const checks: {
10
11
  rope: typeof rope;
11
12
  qkv: typeof qkv;
@@ -15,5 +16,7 @@ declare const checks: {
15
16
  appendCache: typeof appendCache;
16
17
  attentionMask: typeof attentionMask;
17
18
  runCheck: typeof runCheck;
19
+ createLayerWeightStatistics: typeof createWeightStatistics;
20
+ createWeightStatistics: typeof createTensorStatistics;
18
21
  };
19
22
  export default checks;
@@ -1,20 +1,23 @@
1
1
  import { execute as e } from "./rope.js";
2
2
  import { execute as t } from "./normRMS.js";
3
- import { execute as o } from "./qkv.js";
4
- import { execute as r } from "./gelu.js";
5
- import { execute as c } from "./normRMSGrad.js";
6
- import { execute as m } from "./appendCache.js";
7
- import { execute as u } from "./attentionMask.js";
8
- import x from "./check.js";
3
+ import { execute as r } from "./qkv.js";
4
+ import { execute as c } from "./gelu.js";
5
+ import { execute as o } from "./normRMSGrad.js";
6
+ import { execute as a } from "./appendCache.js";
7
+ import { execute as i } from "./attentionMask.js";
8
+ import m from "./check.js";
9
+ import { createTensorStatistics as s, createWeightStatistics as u } from "./weights.js";
9
10
  const d = {
10
11
  rope: e,
11
- qkv: o,
12
- gelu: r,
12
+ qkv: r,
13
+ gelu: c,
13
14
  normRMS: t,
14
- normRMSGrad: c,
15
- appendCache: m,
16
- attentionMask: u,
17
- runCheck: x
15
+ normRMSGrad: o,
16
+ appendCache: a,
17
+ attentionMask: i,
18
+ runCheck: m,
19
+ createLayerWeightStatistics: u,
20
+ createWeightStatistics: s
18
21
  };
19
22
  export {
20
23
  d as default
@@ -1,9 +1,34 @@
1
- import { s as c, e as m } from "../index-DdmHGZjq.js";
2
- import { t as i } from "../tensor3d-BOukqWwr.js";
3
- import { t as u } from "../tensor2d-CObBWBkW.js";
4
- async function w(a) {
5
- await c(a);
6
- const o = i(
1
+ import { z as i, A as u, B as c, s as l, e as h } from "../index-DdmHGZjq.js";
2
+ import { t as f } from "../tensor2d-CObBWBkW.js";
3
+ /**
4
+ * @license
5
+ * Copyright 2018 Google LLC. All Rights Reserved.
6
+ * Licensed under the Apache License, Version 2.0 (the "License");
7
+ * you may not use this file except in compliance with the License.
8
+ * You may obtain a copy of the License at
9
+ *
10
+ * http://www.apache.org/licenses/LICENSE-2.0
11
+ *
12
+ * Unless required by applicable law or agreed to in writing, software
13
+ * distributed under the License is distributed on an "AS IS" BASIS,
14
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ * See the License for the specific language governing permissions and
16
+ * limitations under the License.
17
+ * =============================================================================
18
+ */
19
+ function m(t, e, n) {
20
+ if (i(t), e != null && e.length !== 3)
21
+ throw new Error("tensor3d() requires shape to have three numbers");
22
+ const r = u(t, n);
23
+ if (r.length !== 3 && r.length !== 1)
24
+ throw new Error("tensor3d() requires values to be number[][][] or flat/TypedArray");
25
+ if (r.length === 1 && e == null)
26
+ throw new Error("tensor3d() requires shape to be provided when `values` are a flat array");
27
+ return c(t, e, r, n);
28
+ }
29
+ async function y(t) {
30
+ await l(t);
31
+ const e = m(
7
32
  [
8
33
  [
9
34
  [0.1, 0.2],
@@ -11,15 +36,15 @@ async function w(a) {
11
36
  ]
12
37
  ],
13
38
  [1, 2, 2]
14
- ), r = u(
39
+ ), n = f(
15
40
  [
16
41
  [0.5, 0.6, 0.9, 1, 1.3, 1.4],
17
42
  [0.7, 0.8, 1.1, 1.2, 1.5, 1.6]
18
43
  ],
19
44
  [2, 6]
20
- ), t = m().runKernel("QKV", { x: o, kernel: r }, { heads: 1 }), s = await t[0].array(), n = await t[1].array(), e = await t[2].array();
21
- return [s, n, e];
45
+ ), r = h().runKernel("QKV", { x: e, kernel: n }, { heads: 1 }), o = await r[0].array(), a = await r[1].array(), s = await r[2].array();
46
+ return [o, a, s];
22
47
  }
23
48
  export {
24
- w as execute
49
+ y as execute
25
50
  };
@@ -1,9 +1,9 @@
1
+ import t from "../layers/RoPECache.js";
1
2
  import { s as c, e as i } from "../index-DdmHGZjq.js";
2
- import { t as m } from "../tensor4d-DLtk7Nxh.js";
3
- import { t } from "../tensor3d-BOukqWwr.js";
4
- async function y(n) {
5
- await c(n);
6
- const s = m(
3
+ import { t as p } from "../tensor4d-DLtk7Nxh.js";
4
+ async function y(a) {
5
+ await c(a);
6
+ const o = p(
7
7
  [
8
8
  [
9
9
  [
@@ -13,8 +13,25 @@ async function y(n) {
13
13
  ]
14
14
  ],
15
15
  [1, 1, 2, 2]
16
- ), e = t([0.5, 0.6], [2, 1, 1]), o = t([0.9, 1], [2, 1, 1]), r = i().runKernel("Rope", { x: s, sin: e, cos: o }, { pastLen: 0 });
17
- return Array.isArray(r) ? r.map((a) => a.array()) : r.array();
16
+ ), n = {
17
+ biasInLayerNorm: !1,
18
+ vocabSize: 20,
19
+ nEmbed: 16,
20
+ nHead: 2,
21
+ nLayer: 1,
22
+ biasInLinear: !1,
23
+ dropout: 0,
24
+ blockSize: 128,
25
+ mlpFactor: 4,
26
+ useRope: !0
27
+ }, e = new t(n);
28
+ e.ensureRopeCache(120);
29
+ const r = i().runKernel(
30
+ "Rope",
31
+ { x: o, sin: e.getSin(), cos: e.getCos() },
32
+ { pastLen: 20 }
33
+ );
34
+ return Array.isArray(r) ? r.map((s) => s.array()) : r.array();
18
35
  }
19
36
  export {
20
37
  y as execute
@@ -0,0 +1,16 @@
1
+ import { default as BaseLayer } from '../layers/BaseLayer';
2
+ import { Tensor } from '@tensorflow/tfjs-core';
3
+ export interface TensorStatistics {
4
+ mean: number;
5
+ std: number;
6
+ min: number;
7
+ max: number;
8
+ sparsity: number;
9
+ isFinite: boolean;
10
+ hasNaN: boolean;
11
+ closeToZeroCount: number;
12
+ }
13
+ export declare function createTensorStatistics(weight: Tensor | number[]): Promise<TensorStatistics>;
14
+ export declare function createWeightStatistics(layer: BaseLayer): Promise<{
15
+ [key: string]: TensorStatistics;
16
+ }>;
@@ -0,0 +1,29 @@
1
+ async function d(s) {
2
+ const e = Array.isArray(s) ? s : await s.data(), a = e.length;
3
+ let n = 0, l = 0, i = e[0], r = e[0], u = 0, h = !0, f = !1, m = 0;
4
+ for (let c = 0; c < a; c++) {
5
+ const t = e[c];
6
+ n += t, l += t * t, t < i && (i = t), t > r && (r = t), t === 0 && u++, Math.abs(t) < 1e-8 && m++, Number.isNaN(t) && (f = !0);
7
+ }
8
+ const o = n / a, y = l / a - o * o, N = Math.sqrt(y), b = u / a;
9
+ return {
10
+ mean: o,
11
+ std: N,
12
+ min: i,
13
+ max: r,
14
+ sparsity: b,
15
+ isFinite: h,
16
+ hasNaN: f,
17
+ closeToZeroCount: m
18
+ };
19
+ }
20
+ async function S(s) {
21
+ const e = s.trainableVariables, a = {};
22
+ for (const n of e)
23
+ a[n.name] = await d(n);
24
+ return a;
25
+ }
26
+ export {
27
+ d as createTensorStatistics,
28
+ S as createWeightStatistics
29
+ };
@@ -1,16 +1,16 @@
1
1
  import { attentionMask as g } from "../ops/attentionMask.js";
2
2
  import O from "./BaseLayer.js";
3
- import { qkv as P } from "../ops/qkv.js";
3
+ import { qkv as _ } from "../ops/qkv.js";
4
4
  import { rope as v } from "../ops/rope.js";
5
5
  import { appendCache as V } from "../ops/appendCache.js";
6
- import { k as c, t as C } from "../index-DdmHGZjq.js";
7
- import { fusedSoftmax as T } from "../ops/fusedSoftmax.js";
8
- import { d as L } from "../random_width-DKGeiFuR.js";
9
- import { v as b } from "../variable-DPFOJyRG.js";
10
- import { r as k, d as y } from "../dropout-CcKSfOYE.js";
11
- import { r as N } from "../reshape-WeJkT3ja.js";
12
- import { m as R } from "../mat_mul-Dpy2mMRu.js";
13
- class $ extends O {
6
+ import { k as c, t as P } from "../index-DdmHGZjq.js";
7
+ import { fusedSoftmax as b } from "../ops/fusedSoftmax.js";
8
+ import { d as C } from "../random_width-DKGeiFuR.js";
9
+ import { v as k } from "../variable-DPFOJyRG.js";
10
+ import { r as T, d as L } from "../dropout-CcKSfOYE.js";
11
+ import { r as j } from "../reshape-WeJkT3ja.js";
12
+ import { m as x } from "../mat_mul-Dpy2mMRu.js";
13
+ class W extends O {
14
14
  divisor;
15
15
  index;
16
16
  units;
@@ -23,34 +23,34 @@ class $ extends O {
23
23
  build() {
24
24
  this.hasVariable(this.ATTN) === !1 && this.setVariable(
25
25
  this.ATTN,
26
- b(
27
- k([this.config.nEmbed, this.units], 0, 0.02),
28
- !0
29
- //`block_${this.index}_attn_cAttn_kernel`
26
+ k(
27
+ T([this.config.nEmbed, this.units], 0, 0.02),
28
+ !0,
29
+ `block_${this.index}_attn_cAttn_kernel`
30
30
  )
31
31
  ), this.hasVariable(this.PROJ) === !1 && this.setVariable(
32
32
  this.PROJ,
33
- b(
34
- k([this.projUnits, this.config.nEmbed], 0, 0.02),
35
- !0
36
- //`block_${this.index}_attn_cProj_kernel`
33
+ k(
34
+ T([this.projUnits, this.config.nEmbed], 0, 0.02),
35
+ !0,
36
+ `block_${this.index}_attn_cProj_kernel`
37
37
  )
38
38
  );
39
39
  }
40
40
  getAttentionScores(t, i, s, o) {
41
- const e = g(t, i, this.divisor), n = T(e, s ? this.config.dropout : 0, o);
41
+ const e = g(t, i, this.divisor), n = b(e, s ? this.config.dropout : 0, o);
42
42
  return e.dispose(), n;
43
43
  }
44
44
  // Attention with optional past. If pastLen > 0 and T_cur == 1, no mask needed.
45
45
  getAttentionScoresWithPast(t, i, s) {
46
- const o = g(t, i, this.divisor, s), e = T(o, 0, 0);
46
+ const o = g(t, i, this.divisor, s), e = b(o, 0, 0);
47
47
  return o.dispose(), e;
48
48
  }
49
49
  getQKV(t) {
50
- return P(t, this.getVariable(this.ATTN), this.config.nHead);
50
+ return _(t, this.getVariable(this.ATTN), this.config.nHead);
51
51
  }
52
52
  getOutputProjection(t) {
53
- const i = t.shape[0], s = t.shape[2], o = this.config.nEmbed, e = t.transpose([0, 2, 1, 3]), n = N(e, [i, s, o]), r = L(n, this.getVariable(this.PROJ));
53
+ const i = t.shape[0], s = t.shape[2], o = this.config.nEmbed, e = t.transpose([0, 2, 1, 3]), n = j(e, [i, s, o]), r = C(n, this.getVariable(this.PROJ));
54
54
  return n.dispose(), e.dispose(), r;
55
55
  }
56
56
  updateCache(t, i, s) {
@@ -62,19 +62,19 @@ class $ extends O {
62
62
  s.length = d, s.cumulativeLength = h, s.k = c(r), s.v = c(p);
63
63
  }
64
64
  forward(t, i) {
65
- return C(() => {
65
+ return P(() => {
66
66
  this.startMemory();
67
67
  const [s, o, e] = this.getQKV(i), n = t.pastKV ? t.pastKV.cumulativeLength : 0, r = t.ropeCache, p = r ? v(s, r, n) : s, d = r ? v(o, r, n) : o;
68
68
  r && (s.dispose(), o.dispose());
69
69
  const h = t.pastKV ? t.pastKV.length : 0;
70
70
  t.pastKV && !t.training && this.updateCache(d, e, t.pastKV);
71
- const u = t.pastKV?.k ? t.pastKV.k : d, m = t.pastKV?.v ? t.pastKV.v : e;
71
+ const u = t.pastKV?.k ? t.pastKV.k : d, l = t.pastKV?.v ? t.pastKV.v : e;
72
72
  let a;
73
73
  h > 0 ? a = this.getAttentionScoresWithPast(p, u, h) : a = this.getAttentionScores(p, u, t.training, t.seed || 0), p.dispose(), t.pastKV || u.dispose();
74
- const l = R(a, m), f = t.attentionScores !== void 0 && t.attentionScores.attentionOut !== void 0;
75
- f || a.dispose(), t.pastKV || m.dispose();
76
- const A = this.getOutputProjection(l);
77
- if (l.dispose(), f && t.attentionScores && t.attentionScores.attentionOut !== void 0) {
74
+ const m = x(a, l), f = t.attentionScores !== void 0 && t.attentionScores.attentionOut !== void 0;
75
+ f || a.dispose(), t.pastKV || l.dispose();
76
+ const A = this.getOutputProjection(m);
77
+ if (m.dispose(), f && t.attentionScores && t.attentionScores.attentionOut !== void 0) {
78
78
  const K = a.shape[1], S = a.shape[2];
79
79
  t.attentionScores.attentionOut?.push(
80
80
  c(a.slice([0, 0, 0, 0], [1, -1, -1, -1]).reshape([K, S, -1]))
@@ -85,12 +85,12 @@ class $ extends O {
85
85
  }
86
86
  dropout(t) {
87
87
  if (this.config.dropout > 0) {
88
- const i = y(t, this.config.dropout);
88
+ const i = L(t, this.config.dropout);
89
89
  return t.dispose(), i;
90
90
  } else
91
91
  return t;
92
92
  }
93
93
  }
94
94
  export {
95
- $ as default
95
+ W as default
96
96
  };
@@ -1,10 +1,10 @@
1
1
  import { t as p } from "../index-DdmHGZjq.js";
2
2
  import u from "./BaseLayer.js";
3
3
  import { matMulGelu as M } from "../ops/matMulGelu.js";
4
- import { v as o } from "../variable-DPFOJyRG.js";
5
- import { r as h, d as f } from "../dropout-CcKSfOYE.js";
6
- import { r as d } from "../reshape-WeJkT3ja.js";
7
- import { m as c } from "../mat_mul-Dpy2mMRu.js";
4
+ import { v as a } from "../variable-DPFOJyRG.js";
5
+ import { r as d, d as c } from "../dropout-CcKSfOYE.js";
6
+ import { r as h } from "../reshape-WeJkT3ja.js";
7
+ import { m as b } from "../mat_mul-Dpy2mMRu.js";
8
8
  class H extends u {
9
9
  index;
10
10
  hiddenUnits;
@@ -16,32 +16,32 @@ class H extends u {
16
16
  build() {
17
17
  this.hasVariable(this.MLPHIDDEN) === !1 && this.setVariable(
18
18
  this.MLPHIDDEN,
19
- o(
20
- h([this.config.nEmbed, this.hiddenUnits], 0, 0.02),
21
- !0
22
- //`block_${this.index}_attn_cAttn_kernel`
19
+ a(
20
+ d([this.config.nEmbed, this.hiddenUnits], 0, 0.02),
21
+ !0,
22
+ `block_${this.index}_mlpHidden_kernel`
23
23
  )
24
24
  ), this.hasVariable(this.MLPOUT) === !1 && this.setVariable(
25
25
  this.MLPOUT,
26
- o(
27
- h([this.hiddenUnits, this.config.nEmbed], 0, 0.02 / Math.sqrt(2 * this.config.nLayer)),
28
- !0
29
- //`block_${this.index}_attn_cProj_kernel`
26
+ a(
27
+ d([this.hiddenUnits, this.config.nEmbed], 0, 0.02 / Math.sqrt(2 * this.config.nLayer)),
28
+ !0,
29
+ `block_${this.index}_mlpOut_kernel`
30
30
  )
31
31
  );
32
32
  }
33
33
  forward(i, t) {
34
34
  return p(() => {
35
35
  this.startMemory();
36
- const [s, r, e] = t.shape, n = d(t, [s * r, e]), a = M(n, this.getVariable(this.MLPHIDDEN)), m = c(a, this.getVariable(this.MLPOUT));
37
- a.dispose();
38
- const l = d(m, [s, r, e]);
39
- return this.endMemory("MLP"), l;
36
+ const [s, e, r] = t.shape, n = h(t, [s * e, r]), o = M(n, this.getVariable(this.MLPHIDDEN)), l = b(o, this.getVariable(this.MLPOUT));
37
+ o.dispose();
38
+ const m = h(l, [s, e, r]);
39
+ return this.endMemory("MLP"), m;
40
40
  });
41
41
  }
42
42
  dropout(i) {
43
43
  if (this.config.dropout > 0) {
44
- const t = f(i, this.config.dropout);
44
+ const t = c(i, this.config.dropout);
45
45
  return i.dispose(), t;
46
46
  }
47
47
  return i;
@@ -2,28 +2,28 @@ import { d as r } from "../random_width-DKGeiFuR.js";
2
2
  import "../index-DdmHGZjq.js";
3
3
  import { r as a } from "../exports_initializers-DKk7-bsx.js";
4
4
  import s from "./BaseLayer.js";
5
- import { v as m } from "../variable-DPFOJyRG.js";
6
- import { g as o } from "../gather-CPg6ZlQA.js";
5
+ import { v as o } from "../variable-DPFOJyRG.js";
6
+ import { g as m } from "../gather-CPg6ZlQA.js";
7
7
  class S extends s {
8
8
  vocabSize;
9
9
  embedDim;
10
10
  initializer;
11
11
  WEIGHTS;
12
- constructor(i, e, t) {
13
- super(i, t), this.WEIGHTS = e, this.vocabSize = i.vocabSize, this.embedDim = i.nEmbed, this.initializer = a({
12
+ constructor(i, t, e) {
13
+ super(i, e), this.WEIGHTS = t, this.vocabSize = i.vocabSize, this.embedDim = i.nEmbed, this.initializer = a({
14
14
  mean: 0,
15
15
  stddev: 0.02
16
- }), this.addVariable(this.WEIGHTS, m(this.initializer.apply([this.vocabSize, this.embedDim]), !0));
16
+ }), this.addVariable(this.WEIGHTS, o(this.initializer.apply([this.vocabSize, this.embedDim]), !0, t));
17
17
  }
18
18
  embed(i) {
19
- return o(this.getVariable(this.WEIGHTS), i, 0);
19
+ return m(this.getVariable(this.WEIGHTS), i, 0);
20
20
  }
21
21
  project(i) {
22
22
  return r(i, this.getVariable(this.WEIGHTS).transpose());
23
23
  }
24
24
  // Dummy, should not be used.
25
- forward(i, e) {
26
- return this.project(e);
25
+ forward(i, t) {
26
+ return this.project(t);
27
27
  }
28
28
  }
29
29
  export {
package/dist/main.d.ts CHANGED
@@ -26,3 +26,4 @@ export declare const layers: {
26
26
  };
27
27
  export { default as AdamExt } from './training/AdamExt';
28
28
  export { default as checks } from './checks';
29
+ export type { TensorStatistics } from './checks/weights';
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@genai-fi/nanogpt",
3
- "version": "0.8.1",
3
+ "version": "0.8.2",
4
4
  "type": "module",
5
5
  "main": "dist/main.js",
6
6
  "types": "dist/main.d.ts",
@@ -1,30 +0,0 @@
1
- import { z as o, A as a, B as s } from "./index-DdmHGZjq.js";
2
- /**
3
- * @license
4
- * Copyright 2018 Google LLC. All Rights Reserved.
5
- * Licensed under the Apache License, Version 2.0 (the "License");
6
- * you may not use this file except in compliance with the License.
7
- * You may obtain a copy of the License at
8
- *
9
- * http://www.apache.org/licenses/LICENSE-2.0
10
- *
11
- * Unless required by applicable law or agreed to in writing, software
12
- * distributed under the License is distributed on an "AS IS" BASIS,
13
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- * See the License for the specific language governing permissions and
15
- * limitations under the License.
16
- * =============================================================================
17
- */
18
- function h(n, r, t) {
19
- if (o(n), r != null && r.length !== 3)
20
- throw new Error("tensor3d() requires shape to have three numbers");
21
- const e = a(n, t);
22
- if (e.length !== 3 && e.length !== 1)
23
- throw new Error("tensor3d() requires values to be number[][][] or flat/TypedArray");
24
- if (e.length === 1 && r == null)
25
- throw new Error("tensor3d() requires shape to be provided when `values` are a flat array");
26
- return s(n, r, e, t);
27
- }
28
- export {
29
- h as t
30
- };