@genai-fi/nanogpt 0.8.1 → 0.8.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -45,7 +45,11 @@ export default class Generator extends EE<'start' | 'stop' | 'tokens'> {
45
45
  generate(prompt?: string, options?: IGenerateOptions): Promise<string>;
46
46
  stop(): void;
47
47
  getText(): string;
48
- getAttentionData(): number[][][][];
48
+ getAttentionData(): number[][][][][];
49
49
  getProbabilitiesData(): number[][][];
50
+ getEmbeddingsData(): {
51
+ name: string;
52
+ tensor: number[][];
53
+ }[][];
50
54
  getTokens(): number[];
51
55
  }
package/dist/Generator.js CHANGED
@@ -63,18 +63,18 @@ import { c as G } from "./concat-pHiVqR3L.js";
63
63
  * limitations under the License.
64
64
  * =============================================================================
65
65
  */
66
- function N(m, t, e, i = !1) {
67
- const o = L(m, "logits", "multinomial"), s = o.size, n = o.rank;
66
+ function N(h, t, e, i = !1) {
67
+ const o = L(h, "logits", "multinomial"), s = o.size, n = o.rank;
68
68
  if (s < 2)
69
69
  throw new Error(`Error in multinomial: you need at least 2 outcomes, but got ${s}.`);
70
70
  if (n > 2)
71
71
  throw new Error(`Rank of probabilities must be 1 or 2, but is ${n}`);
72
72
  e = e || Math.random();
73
- const a = { logits: n === 1 ? x(o, [1, -1]) : o }, p = { numSamples: t, seed: e, normalized: i }, l = C.runKernel(I, a, p);
74
- return n === 1 ? x(l, [l.size]) : l;
73
+ const a = { logits: n === 1 ? x(o, [1, -1]) : o }, l = { numSamples: t, seed: e, normalized: i }, m = C.runKernel(I, a, l);
74
+ return n === 1 ? x(m, [m.size]) : m;
75
75
  }
76
- const S = /* @__PURE__ */ A({ multinomial_: N }), H = [
77
- ...Array.from({ length: 95 }, (m, t) => String.fromCharCode(t + 32)),
76
+ const D = /* @__PURE__ */ A({ multinomial_: N }), H = [
77
+ ...Array.from({ length: 95 }, (h, t) => String.fromCharCode(t + 32)),
78
78
  // ASCII
79
79
  // Spanish accented letters and punctuation
80
80
  ..."áéíóúüñ¿¡",
@@ -85,8 +85,8 @@ const S = /* @__PURE__ */ A({ multinomial_: N }), H = [
85
85
  // Cyrillic letters
86
86
  ..."абвгдеёжзийклмнопрстуфхцчшщъыьэюяАБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ"
87
87
  ];
88
- function U(m, t) {
89
- return m.length === t ? m : m.length > t ? m.slice(0, t) : m.concat(Array(t - m.length).fill(""));
88
+ function U(h, t) {
89
+ return h.length === t ? h : h.length > t ? h.slice(0, t) : h.concat(Array(t - h.length).fill(""));
90
90
  }
91
91
  class qt extends z {
92
92
  constructor(t, e) {
@@ -112,7 +112,9 @@ class qt extends z {
112
112
  return null;
113
113
  const n = await t.decode([s]);
114
114
  if (i) {
115
- const d = await Promise.all(i.map((a) => a.array().then((p) => p)));
115
+ const d = await Promise.all(
116
+ i.map((a) => a.array().then((l) => l))
117
+ );
116
118
  i.forEach((a) => a.dispose()), this.attentionData.push(d);
117
119
  }
118
120
  if (o) {
@@ -130,48 +132,47 @@ class qt extends z {
130
132
  } : void 0,
131
133
  cache: e,
132
134
  outputEmbeddings: i?.embeddings ?? !1
133
- }, p = O(() => {
134
- const r = t, h = r.shape[1], u = h <= this.model.config.blockSize ? r : r.slice(
135
- [0, h - this.model.config.blockSize],
136
- [r.shape[0], this.model.config.blockSize]
137
- ), k = d ? this.model.config.blockSize - u.shape[1] : 0, b = k > 0 ? _(u, [
135
+ }, l = O(() => {
136
+ const p = t, u = p.shape[1], r = u <= this.model.config.blockSize ? p : p.slice(
137
+ [0, u - this.model.config.blockSize],
138
+ [p.shape[0], this.model.config.blockSize]
139
+ ), f = d ? this.model.config.blockSize - r.shape[1] : 0, b = f > 0 ? _(r, [
138
140
  [0, 0],
139
- [0, k]
140
- ]) : u, [f] = this.model.forward(a, b), y = f.shape[1] - 1 - k, c = f.slice([0, y, 0], [f.shape[0], 1, f.shape[2]]);
141
+ [0, f]
142
+ ]) : r, [g] = this.model.forward(a, b), y = g.shape[1] - 1 - f, c = g.slice([0, y, 0], [g.shape[0], 1, g.shape[2]]);
141
143
  return a.attentionScores?.attentionOut && a.attentionScores.attentionOut.forEach((T, E) => {
142
144
  T.shape[1] !== 1 && (a.attentionScores.attentionOut[E] = R(
143
145
  T.slice([0, y, 0], [T.shape[0], 1, T.shape[2]])
144
146
  ), T.dispose());
145
- }), f.dispose(), c.div(o).squeeze([1]);
147
+ }), g.dispose(), c.div(o).squeeze([1]);
146
148
  });
147
- let l;
149
+ let m;
148
150
  if (n) {
149
- const r = v(p), h = await r.array();
150
- r.dispose();
151
- const u = h[0].map((c, g) => ({ prob: c, index: g })).sort((c, g) => g.prob - c.prob);
152
- let k = 0;
153
- const b = new Array(u.length).fill(0);
154
- for (const c of u)
155
- if (k += c.prob, b[c.index] = c.prob, k >= n)
151
+ const p = v(l), u = await p.array();
152
+ p.dispose();
153
+ const r = u[0].map((c, k) => ({ prob: c, index: k })).sort((c, k) => k.prob - c.prob);
154
+ let f = 0;
155
+ const b = new Array(r.length).fill(0);
156
+ for (const c of r)
157
+ if (f += c.prob, b[c.index] = c.prob, f >= n)
156
158
  break;
157
- const f = b.reduce((c, g) => c + g, 0), y = b.map((c) => c / f);
158
- l = $(y);
159
+ const g = b.reduce((c, k) => c + k, 0), y = b.map((c) => c / g);
160
+ m = $(y);
159
161
  } else if (s) {
160
- const { values: r, indices: h } = K(p, s), u = S(r, 1);
161
- l = q(h, u, 1), r.dispose(), h.dispose(), u.dispose();
162
+ const { values: p, indices: u } = K(l, s), r = D(p, 1);
163
+ m = q(u, r, 1), p.dispose(), u.dispose(), r.dispose();
162
164
  } else
163
- l = S(p, 1);
165
+ m = D(l, 1);
164
166
  let w;
165
- i?.includeProbabilities && (w = v(p)), a.embeddings && this.embeddingsData.push(
166
- await Promise.all(
167
- a.embeddings.map(async (r) => {
168
- const h = await r.array();
169
- return r.dispose(), h;
170
- })
171
- )
172
- );
173
- const D = l.reshape([1, 1]);
174
- return l.dispose(), l = D, p.dispose(), { output: l, probabilities: w, attention: a.attentionScores?.attentionOut };
167
+ if (i?.includeProbabilities && (w = v(l)), a.embeddings) {
168
+ const p = a.embeddings.map(async (r) => {
169
+ const f = await r.tensor.array();
170
+ return r.tensor.dispose(), { name: r.name, tensor: f };
171
+ }), u = await Promise.all(p);
172
+ this.embeddingsData.push(u);
173
+ }
174
+ const S = m.reshape([1, 1]);
175
+ return m.dispose(), m = S, l.dispose(), { output: m, probabilities: w, attention: a.attentionScores?.attentionOut };
175
176
  }
176
177
  /** Generate multiple tokens in a loop and produce text */
177
178
  async _generate(t) {
@@ -189,8 +190,8 @@ class qt extends z {
189
190
  if (this.cache)
190
191
  e.dispose(), e = s;
191
192
  else {
192
- const p = e;
193
- e = G([e, s], 1), p.dispose();
193
+ const l = e;
194
+ e = G([e, s], 1), l.dispose();
194
195
  }
195
196
  const a = await this.processResponse(this.actualTokeniser, s, d, n);
196
197
  if (this.cache || s.dispose(), a === null)
@@ -239,6 +240,9 @@ class qt extends z {
239
240
  getProbabilitiesData() {
240
241
  return this.probabilitiesData;
241
242
  }
243
+ getEmbeddingsData() {
244
+ return this.embeddingsData;
245
+ }
242
246
  getTokens() {
243
247
  return this.tokens;
244
248
  }
@@ -6,6 +6,7 @@ import { execute as normRMSGrad } from './normRMSGrad';
6
6
  import { execute as appendCache } from './appendCache';
7
7
  import { execute as attentionMask } from './attentionMask';
8
8
  import { default as runCheck } from './check';
9
+ import { createWeightStatistics, createTensorStatistics } from './weights';
9
10
  declare const checks: {
10
11
  rope: typeof rope;
11
12
  qkv: typeof qkv;
@@ -15,5 +16,7 @@ declare const checks: {
15
16
  appendCache: typeof appendCache;
16
17
  attentionMask: typeof attentionMask;
17
18
  runCheck: typeof runCheck;
19
+ createLayerWeightStatistics: typeof createWeightStatistics;
20
+ createWeightStatistics: typeof createTensorStatistics;
18
21
  };
19
22
  export default checks;
@@ -1,20 +1,23 @@
1
1
  import { execute as e } from "./rope.js";
2
2
  import { execute as t } from "./normRMS.js";
3
- import { execute as o } from "./qkv.js";
4
- import { execute as r } from "./gelu.js";
5
- import { execute as c } from "./normRMSGrad.js";
6
- import { execute as m } from "./appendCache.js";
7
- import { execute as u } from "./attentionMask.js";
8
- import x from "./check.js";
3
+ import { execute as r } from "./qkv.js";
4
+ import { execute as c } from "./gelu.js";
5
+ import { execute as o } from "./normRMSGrad.js";
6
+ import { execute as a } from "./appendCache.js";
7
+ import { execute as i } from "./attentionMask.js";
8
+ import m from "./check.js";
9
+ import { createTensorStatistics as s, createWeightStatistics as u } from "./weights.js";
9
10
  const d = {
10
11
  rope: e,
11
- qkv: o,
12
- gelu: r,
12
+ qkv: r,
13
+ gelu: c,
13
14
  normRMS: t,
14
- normRMSGrad: c,
15
- appendCache: m,
16
- attentionMask: u,
17
- runCheck: x
15
+ normRMSGrad: o,
16
+ appendCache: a,
17
+ attentionMask: i,
18
+ runCheck: m,
19
+ createLayerWeightStatistics: u,
20
+ createWeightStatistics: s
18
21
  };
19
22
  export {
20
23
  d as default
@@ -1,9 +1,34 @@
1
- import { s as c, e as m } from "../index-DdmHGZjq.js";
2
- import { t as i } from "../tensor3d-BOukqWwr.js";
3
- import { t as u } from "../tensor2d-CObBWBkW.js";
4
- async function w(a) {
5
- await c(a);
6
- const o = i(
1
+ import { z as i, A as u, B as c, s as l, e as h } from "../index-DdmHGZjq.js";
2
+ import { t as f } from "../tensor2d-CObBWBkW.js";
3
+ /**
4
+ * @license
5
+ * Copyright 2018 Google LLC. All Rights Reserved.
6
+ * Licensed under the Apache License, Version 2.0 (the "License");
7
+ * you may not use this file except in compliance with the License.
8
+ * You may obtain a copy of the License at
9
+ *
10
+ * http://www.apache.org/licenses/LICENSE-2.0
11
+ *
12
+ * Unless required by applicable law or agreed to in writing, software
13
+ * distributed under the License is distributed on an "AS IS" BASIS,
14
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ * See the License for the specific language governing permissions and
16
+ * limitations under the License.
17
+ * =============================================================================
18
+ */
19
+ function m(t, e, n) {
20
+ if (i(t), e != null && e.length !== 3)
21
+ throw new Error("tensor3d() requires shape to have three numbers");
22
+ const r = u(t, n);
23
+ if (r.length !== 3 && r.length !== 1)
24
+ throw new Error("tensor3d() requires values to be number[][][] or flat/TypedArray");
25
+ if (r.length === 1 && e == null)
26
+ throw new Error("tensor3d() requires shape to be provided when `values` are a flat array");
27
+ return c(t, e, r, n);
28
+ }
29
+ async function y(t) {
30
+ await l(t);
31
+ const e = m(
7
32
  [
8
33
  [
9
34
  [0.1, 0.2],
@@ -11,15 +36,15 @@ async function w(a) {
11
36
  ]
12
37
  ],
13
38
  [1, 2, 2]
14
- ), r = u(
39
+ ), n = f(
15
40
  [
16
41
  [0.5, 0.6, 0.9, 1, 1.3, 1.4],
17
42
  [0.7, 0.8, 1.1, 1.2, 1.5, 1.6]
18
43
  ],
19
44
  [2, 6]
20
- ), t = m().runKernel("QKV", { x: o, kernel: r }, { heads: 1 }), s = await t[0].array(), n = await t[1].array(), e = await t[2].array();
21
- return [s, n, e];
45
+ ), r = h().runKernel("QKV", { x: e, kernel: n }, { heads: 1 }), o = await r[0].array(), a = await r[1].array(), s = await r[2].array();
46
+ return [o, a, s];
22
47
  }
23
48
  export {
24
- w as execute
49
+ y as execute
25
50
  };
@@ -1,9 +1,9 @@
1
+ import t from "../layers/RoPECache.js";
1
2
  import { s as c, e as i } from "../index-DdmHGZjq.js";
2
- import { t as m } from "../tensor4d-DLtk7Nxh.js";
3
- import { t } from "../tensor3d-BOukqWwr.js";
4
- async function y(n) {
5
- await c(n);
6
- const s = m(
3
+ import { t as p } from "../tensor4d-DLtk7Nxh.js";
4
+ async function y(a) {
5
+ await c(a);
6
+ const o = p(
7
7
  [
8
8
  [
9
9
  [
@@ -13,8 +13,25 @@ async function y(n) {
13
13
  ]
14
14
  ],
15
15
  [1, 1, 2, 2]
16
- ), e = t([0.5, 0.6], [2, 1, 1]), o = t([0.9, 1], [2, 1, 1]), r = i().runKernel("Rope", { x: s, sin: e, cos: o }, { pastLen: 0 });
17
- return Array.isArray(r) ? r.map((a) => a.array()) : r.array();
16
+ ), n = {
17
+ biasInLayerNorm: !1,
18
+ vocabSize: 20,
19
+ nEmbed: 16,
20
+ nHead: 2,
21
+ nLayer: 1,
22
+ biasInLinear: !1,
23
+ dropout: 0,
24
+ blockSize: 128,
25
+ mlpFactor: 4,
26
+ useRope: !0
27
+ }, e = new t(n);
28
+ e.ensureRopeCache(120);
29
+ const r = i().runKernel(
30
+ "Rope",
31
+ { x: o, sin: e.getSin(), cos: e.getCos() },
32
+ { pastLen: 20 }
33
+ );
34
+ return Array.isArray(r) ? r.map((s) => s.array()) : r.array();
18
35
  }
19
36
  export {
20
37
  y as execute
@@ -0,0 +1,16 @@
1
+ import { default as BaseLayer } from '../layers/BaseLayer';
2
+ import { Tensor } from '@tensorflow/tfjs-core';
3
+ export interface TensorStatistics {
4
+ mean: number;
5
+ std: number;
6
+ min: number;
7
+ max: number;
8
+ sparsity: number;
9
+ isFinite: boolean;
10
+ hasNaN: boolean;
11
+ closeToZeroCount: number;
12
+ }
13
+ export declare function createTensorStatistics(weight: Tensor | number[]): Promise<TensorStatistics>;
14
+ export declare function createWeightStatistics(layer: BaseLayer): Promise<{
15
+ [key: string]: TensorStatistics;
16
+ }>;
@@ -0,0 +1,29 @@
1
+ async function d(s) {
2
+ const e = Array.isArray(s) ? s : await s.data(), a = e.length;
3
+ let n = 0, l = 0, i = e[0], r = e[0], u = 0, h = !0, f = !1, m = 0;
4
+ for (let c = 0; c < a; c++) {
5
+ const t = e[c];
6
+ n += t, l += t * t, t < i && (i = t), t > r && (r = t), t === 0 && u++, Math.abs(t) < 1e-8 && m++, Number.isNaN(t) && (f = !0);
7
+ }
8
+ const o = n / a, y = l / a - o * o, N = Math.sqrt(y), b = u / a;
9
+ return {
10
+ mean: o,
11
+ std: N,
12
+ min: i,
13
+ max: r,
14
+ sparsity: b,
15
+ isFinite: h,
16
+ hasNaN: f,
17
+ closeToZeroCount: m
18
+ };
19
+ }
20
+ async function S(s) {
21
+ const e = s.trainableVariables, a = {};
22
+ for (const n of e)
23
+ a[n.name] = await d(n);
24
+ return a;
25
+ }
26
+ export {
27
+ d as createTensorStatistics,
28
+ S as createWeightStatistics
29
+ };
@@ -6,6 +6,11 @@ export interface ForwardAttributes {
6
6
  training: boolean;
7
7
  checkpointing?: boolean;
8
8
  ropeCache?: RoPECache;
9
+ outputEmbeddings?: boolean;
10
+ embeddings?: {
11
+ name: string;
12
+ tensor: Tensor;
13
+ }[];
9
14
  }
10
15
  export default abstract class BaseLayer<ATTR extends ForwardAttributes = ForwardAttributes> {
11
16
  readonly parent?: BaseLayer;
@@ -1,16 +1,16 @@
1
1
  import { attentionMask as g } from "../ops/attentionMask.js";
2
2
  import O from "./BaseLayer.js";
3
- import { qkv as P } from "../ops/qkv.js";
3
+ import { qkv as _ } from "../ops/qkv.js";
4
4
  import { rope as v } from "../ops/rope.js";
5
5
  import { appendCache as V } from "../ops/appendCache.js";
6
- import { k as c, t as C } from "../index-DdmHGZjq.js";
7
- import { fusedSoftmax as T } from "../ops/fusedSoftmax.js";
8
- import { d as L } from "../random_width-DKGeiFuR.js";
9
- import { v as b } from "../variable-DPFOJyRG.js";
10
- import { r as k, d as y } from "../dropout-CcKSfOYE.js";
11
- import { r as N } from "../reshape-WeJkT3ja.js";
12
- import { m as R } from "../mat_mul-Dpy2mMRu.js";
13
- class $ extends O {
6
+ import { k as c, t as P } from "../index-DdmHGZjq.js";
7
+ import { fusedSoftmax as b } from "../ops/fusedSoftmax.js";
8
+ import { d as C } from "../random_width-DKGeiFuR.js";
9
+ import { v as k } from "../variable-DPFOJyRG.js";
10
+ import { r as T, d as L } from "../dropout-CcKSfOYE.js";
11
+ import { r as j } from "../reshape-WeJkT3ja.js";
12
+ import { m as x } from "../mat_mul-Dpy2mMRu.js";
13
+ class W extends O {
14
14
  divisor;
15
15
  index;
16
16
  units;
@@ -23,34 +23,34 @@ class $ extends O {
23
23
  build() {
24
24
  this.hasVariable(this.ATTN) === !1 && this.setVariable(
25
25
  this.ATTN,
26
- b(
27
- k([this.config.nEmbed, this.units], 0, 0.02),
28
- !0
29
- //`block_${this.index}_attn_cAttn_kernel`
26
+ k(
27
+ T([this.config.nEmbed, this.units], 0, 0.02),
28
+ !0,
29
+ `block_${this.index}_attn_cAttn_kernel`
30
30
  )
31
31
  ), this.hasVariable(this.PROJ) === !1 && this.setVariable(
32
32
  this.PROJ,
33
- b(
34
- k([this.projUnits, this.config.nEmbed], 0, 0.02),
35
- !0
36
- //`block_${this.index}_attn_cProj_kernel`
33
+ k(
34
+ T([this.projUnits, this.config.nEmbed], 0, 0.02),
35
+ !0,
36
+ `block_${this.index}_attn_cProj_kernel`
37
37
  )
38
38
  );
39
39
  }
40
40
  getAttentionScores(t, i, s, o) {
41
- const e = g(t, i, this.divisor), n = T(e, s ? this.config.dropout : 0, o);
41
+ const e = g(t, i, this.divisor), n = b(e, s ? this.config.dropout : 0, o);
42
42
  return e.dispose(), n;
43
43
  }
44
44
  // Attention with optional past. If pastLen > 0 and T_cur == 1, no mask needed.
45
45
  getAttentionScoresWithPast(t, i, s) {
46
- const o = g(t, i, this.divisor, s), e = T(o, 0, 0);
46
+ const o = g(t, i, this.divisor, s), e = b(o, 0, 0);
47
47
  return o.dispose(), e;
48
48
  }
49
49
  getQKV(t) {
50
- return P(t, this.getVariable(this.ATTN), this.config.nHead);
50
+ return _(t, this.getVariable(this.ATTN), this.config.nHead);
51
51
  }
52
52
  getOutputProjection(t) {
53
- const i = t.shape[0], s = t.shape[2], o = this.config.nEmbed, e = t.transpose([0, 2, 1, 3]), n = N(e, [i, s, o]), r = L(n, this.getVariable(this.PROJ));
53
+ const i = t.shape[0], s = t.shape[2], o = this.config.nEmbed, e = t.transpose([0, 2, 1, 3]), n = j(e, [i, s, o]), r = C(n, this.getVariable(this.PROJ));
54
54
  return n.dispose(), e.dispose(), r;
55
55
  }
56
56
  updateCache(t, i, s) {
@@ -62,19 +62,19 @@ class $ extends O {
62
62
  s.length = d, s.cumulativeLength = h, s.k = c(r), s.v = c(p);
63
63
  }
64
64
  forward(t, i) {
65
- return C(() => {
65
+ return P(() => {
66
66
  this.startMemory();
67
67
  const [s, o, e] = this.getQKV(i), n = t.pastKV ? t.pastKV.cumulativeLength : 0, r = t.ropeCache, p = r ? v(s, r, n) : s, d = r ? v(o, r, n) : o;
68
68
  r && (s.dispose(), o.dispose());
69
69
  const h = t.pastKV ? t.pastKV.length : 0;
70
70
  t.pastKV && !t.training && this.updateCache(d, e, t.pastKV);
71
- const u = t.pastKV?.k ? t.pastKV.k : d, m = t.pastKV?.v ? t.pastKV.v : e;
71
+ const u = t.pastKV?.k ? t.pastKV.k : d, l = t.pastKV?.v ? t.pastKV.v : e;
72
72
  let a;
73
73
  h > 0 ? a = this.getAttentionScoresWithPast(p, u, h) : a = this.getAttentionScores(p, u, t.training, t.seed || 0), p.dispose(), t.pastKV || u.dispose();
74
- const l = R(a, m), f = t.attentionScores !== void 0 && t.attentionScores.attentionOut !== void 0;
75
- f || a.dispose(), t.pastKV || m.dispose();
76
- const A = this.getOutputProjection(l);
77
- if (l.dispose(), f && t.attentionScores && t.attentionScores.attentionOut !== void 0) {
74
+ const m = x(a, l), f = t.attentionScores !== void 0 && t.attentionScores.attentionOut !== void 0;
75
+ f || a.dispose(), t.pastKV || l.dispose();
76
+ const A = this.getOutputProjection(m);
77
+ if (m.dispose(), f && t.attentionScores && t.attentionScores.attentionOut !== void 0) {
78
78
  const K = a.shape[1], S = a.shape[2];
79
79
  t.attentionScores.attentionOut?.push(
80
80
  c(a.slice([0, 0, 0, 0], [1, -1, -1, -1]).reshape([K, S, -1]))
@@ -85,12 +85,12 @@ class $ extends O {
85
85
  }
86
86
  dropout(t) {
87
87
  if (this.config.dropout > 0) {
88
- const i = y(t, this.config.dropout);
88
+ const i = L(t, this.config.dropout);
89
89
  return t.dispose(), i;
90
90
  } else
91
91
  return t;
92
92
  }
93
93
  }
94
94
  export {
95
- $ as default
95
+ W as default
96
96
  };
@@ -1,10 +1,10 @@
1
1
  import { t as p } from "../index-DdmHGZjq.js";
2
2
  import u from "./BaseLayer.js";
3
3
  import { matMulGelu as M } from "../ops/matMulGelu.js";
4
- import { v as o } from "../variable-DPFOJyRG.js";
5
- import { r as h, d as f } from "../dropout-CcKSfOYE.js";
6
- import { r as d } from "../reshape-WeJkT3ja.js";
7
- import { m as c } from "../mat_mul-Dpy2mMRu.js";
4
+ import { v as a } from "../variable-DPFOJyRG.js";
5
+ import { r as d, d as c } from "../dropout-CcKSfOYE.js";
6
+ import { r as h } from "../reshape-WeJkT3ja.js";
7
+ import { m as b } from "../mat_mul-Dpy2mMRu.js";
8
8
  class H extends u {
9
9
  index;
10
10
  hiddenUnits;
@@ -16,32 +16,32 @@ class H extends u {
16
16
  build() {
17
17
  this.hasVariable(this.MLPHIDDEN) === !1 && this.setVariable(
18
18
  this.MLPHIDDEN,
19
- o(
20
- h([this.config.nEmbed, this.hiddenUnits], 0, 0.02),
21
- !0
22
- //`block_${this.index}_attn_cAttn_kernel`
19
+ a(
20
+ d([this.config.nEmbed, this.hiddenUnits], 0, 0.02),
21
+ !0,
22
+ `block_${this.index}_mlpHidden_kernel`
23
23
  )
24
24
  ), this.hasVariable(this.MLPOUT) === !1 && this.setVariable(
25
25
  this.MLPOUT,
26
- o(
27
- h([this.hiddenUnits, this.config.nEmbed], 0, 0.02 / Math.sqrt(2 * this.config.nLayer)),
28
- !0
29
- //`block_${this.index}_attn_cProj_kernel`
26
+ a(
27
+ d([this.hiddenUnits, this.config.nEmbed], 0, 0.02 / Math.sqrt(2 * this.config.nLayer)),
28
+ !0,
29
+ `block_${this.index}_mlpOut_kernel`
30
30
  )
31
31
  );
32
32
  }
33
33
  forward(i, t) {
34
34
  return p(() => {
35
35
  this.startMemory();
36
- const [s, r, e] = t.shape, n = d(t, [s * r, e]), a = M(n, this.getVariable(this.MLPHIDDEN)), m = c(a, this.getVariable(this.MLPOUT));
37
- a.dispose();
38
- const l = d(m, [s, r, e]);
39
- return this.endMemory("MLP"), l;
36
+ const [s, e, r] = t.shape, n = h(t, [s * e, r]), o = M(n, this.getVariable(this.MLPHIDDEN)), l = b(o, this.getVariable(this.MLPOUT));
37
+ o.dispose();
38
+ const m = h(l, [s, e, r]);
39
+ return this.endMemory("MLP"), m;
40
40
  });
41
41
  }
42
42
  dropout(i) {
43
43
  if (this.config.dropout > 0) {
44
- const t = f(i, this.config.dropout);
44
+ const t = c(i, this.config.dropout);
45
45
  return i.dispose(), t;
46
46
  }
47
47
  return i;
@@ -2,28 +2,28 @@ import { d as r } from "../random_width-DKGeiFuR.js";
2
2
  import "../index-DdmHGZjq.js";
3
3
  import { r as a } from "../exports_initializers-DKk7-bsx.js";
4
4
  import s from "./BaseLayer.js";
5
- import { v as m } from "../variable-DPFOJyRG.js";
6
- import { g as o } from "../gather-CPg6ZlQA.js";
5
+ import { v as o } from "../variable-DPFOJyRG.js";
6
+ import { g as m } from "../gather-CPg6ZlQA.js";
7
7
  class S extends s {
8
8
  vocabSize;
9
9
  embedDim;
10
10
  initializer;
11
11
  WEIGHTS;
12
- constructor(i, e, t) {
13
- super(i, t), this.WEIGHTS = e, this.vocabSize = i.vocabSize, this.embedDim = i.nEmbed, this.initializer = a({
12
+ constructor(i, t, e) {
13
+ super(i, e), this.WEIGHTS = t, this.vocabSize = i.vocabSize, this.embedDim = i.nEmbed, this.initializer = a({
14
14
  mean: 0,
15
15
  stddev: 0.02
16
- }), this.addVariable(this.WEIGHTS, m(this.initializer.apply([this.vocabSize, this.embedDim]), !0));
16
+ }), this.addVariable(this.WEIGHTS, o(this.initializer.apply([this.vocabSize, this.embedDim]), !0, t));
17
17
  }
18
18
  embed(i) {
19
- return o(this.getVariable(this.WEIGHTS), i, 0);
19
+ return m(this.getVariable(this.WEIGHTS), i, 0);
20
20
  }
21
21
  project(i) {
22
22
  return r(i, this.getVariable(this.WEIGHTS).transpose());
23
23
  }
24
24
  // Dummy, should not be used.
25
- forward(i, e) {
26
- return this.project(e);
25
+ forward(i, t) {
26
+ return this.project(t);
27
27
  }
28
28
  }
29
29
  export {
@@ -1,32 +1,32 @@
1
1
  import l from "./CausalSelfAttention.js";
2
- import r from "./MLP.js";
2
+ import p from "./MLP.js";
3
3
  import o from "./RMSNorm.js";
4
- import d from "./BaseLayer.js";
5
- import { t as p } from "../index-DdmHGZjq.js";
6
- class k extends d {
4
+ import m from "./BaseLayer.js";
5
+ import { k as n, t as h } from "../index-DdmHGZjq.js";
6
+ class k extends m {
7
7
  ln1;
8
8
  attn;
9
9
  ln2;
10
10
  mlp;
11
11
  index;
12
12
  skipped = !1;
13
- constructor(t, s, i) {
14
- super(s, i), this.index = t, this.ln1 = new o(s, `block_${this.index}_rms1`, this), this.attn = new l(this.index, s, this), this.ln2 = new o(s, `block_${this.index}_rms2`, this), this.mlp = new r(this.index, s, this);
13
+ constructor(i, s, e) {
14
+ super(s, e), this.index = i, this.ln1 = new o(s, `block_${this.index}_rms1`, this), this.attn = new l(this.index, s, this), this.ln2 = new o(s, `block_${this.index}_rms2`, this), this.mlp = new p(this.index, s, this);
15
15
  }
16
- getMLPOutput(t, s) {
17
- const i = this.ln2.call({ training: s }, t), e = this.mlp.call({ training: s }, i);
18
- i.dispose();
19
- const n = t.add(e);
20
- return t.dispose(), e.dispose(), n;
16
+ getMLPOutput(i, s) {
17
+ const e = this.ln2.call({ training: s.training }, i), t = this.mlp.call({ training: s.training }, e);
18
+ s.outputEmbeddings ? (n(e), s.embeddings.push({ name: `block_ln2_${this.index}`, tensor: e })) : e.dispose();
19
+ const d = i.add(t);
20
+ return i.dispose(), s.outputEmbeddings ? (n(t), s.embeddings.push({ name: `block_mlp_out_${this.index}`, tensor: t })) : t.dispose(), d;
21
21
  }
22
- forward(t, s) {
23
- return p(() => {
22
+ forward(i, s) {
23
+ return h(() => {
24
24
  if (this.skipped)
25
25
  return s;
26
- const i = this.ln1.call(t, s), e = this.attn.call(t, i);
27
- i.dispose();
28
- const n = s.add(e);
29
- return e.dispose(), this.getMLPOutput(n, t.training);
26
+ const e = this.ln1.call(i, s), t = this.attn.call(i, e);
27
+ i.outputEmbeddings ? (n(e), i.embeddings.push({ name: `block_ln1_${this.index}`, tensor: e })) : e.dispose();
28
+ const d = s.add(t);
29
+ return i.outputEmbeddings ? (n(t), i.embeddings.push({ name: `block_attn_out_${this.index}`, tensor: t })) : t.dispose(), this.getMLPOutput(d, i);
30
30
  });
31
31
  }
32
32
  dispose() {
package/dist/main.d.ts CHANGED
@@ -26,3 +26,4 @@ export declare const layers: {
26
26
  };
27
27
  export { default as AdamExt } from './training/AdamExt';
28
28
  export { default as checks } from './checks';
29
+ export type { TensorStatistics } from './checks/weights';
@@ -1,6 +1,6 @@
1
- import { defaultConfig as m } from "./config.js";
2
- import f from "../layers/TransformerBlock.js";
3
- import u from "../layers/TiedEmbedding.js";
1
+ import { defaultConfig as a } from "./config.js";
2
+ import u from "../layers/TransformerBlock.js";
3
+ import f from "../layers/TiedEmbedding.js";
4
4
  import g from "../layers/RoPECache.js";
5
5
  import b from "../layers/RMSNorm.js";
6
6
  import { t as l, k as p } from "../index-DdmHGZjq.js";
@@ -17,9 +17,9 @@ class R extends w {
17
17
  // Final layer norm
18
18
  ropeCache;
19
19
  constructor(e = {}) {
20
- super({ ...m, ...e }), this.wte = new u(this.config, "token_embedding", this), this.config.useRope === !1 ? this.wpe = new k(this.config, "positional_embedding", this) : this.ropeCache = new g(this.config), this.blocks = [];
20
+ super({ ...a, ...e }), this.wte = new f(this.config, "token_embedding", this), this.config.useRope === !1 ? this.wpe = new k(this.config, "positional_embedding", this) : this.ropeCache = new g(this.config), this.blocks = [];
21
21
  for (let i = 0; i < this.config.nLayer; i++)
22
- this.blocks.push(new f(i, this.config, this));
22
+ this.blocks.push(new u(i, this.config, this));
23
23
  this.lnF = new b(this.config, "final_rms_norm", this);
24
24
  }
25
25
  getClassName() {
@@ -47,17 +47,15 @@ class R extends w {
47
47
  );
48
48
  for (let t = 0; t < this.blocks.length; t++) {
49
49
  const c = this.blocks[t], d = Math.random() * 1e9, r = {
50
- ropeCache: e.ropeCache,
51
- training: e.training,
50
+ ...e,
52
51
  seed: d,
53
- attentionScores: e.attentionScores,
54
52
  pastKV: e.cache ? e.cache[t] : void 0
55
- }, a = e.checkpointing && e.training ? c.callCheckpoint(r, o) : c.call(r, o);
56
- e.outputEmbeddings ? (p(o), e.embeddings.push(o)) : o.dispose(), o = a;
53
+ }, m = e.checkpointing && e.training ? c.callCheckpoint(r, o) : c.call(r, o);
54
+ e.outputEmbeddings ? (p(o), e.embeddings.push({ name: `block_output_${t}`, tensor: o })) : o.dispose(), o = m;
57
55
  }
58
56
  o = this.lnF.call(e, o);
59
57
  const n = this.wte.project(o);
60
- e.outputEmbeddings ? (p(o), e.embeddings.push(o)) : o.dispose();
58
+ e.outputEmbeddings ? (p(o), e.embeddings.push({ name: "final_norm_output", tensor: o })) : o.dispose();
61
59
  let h;
62
60
  return s && (h = this.calculateLoss(n, s)), this.endMemory("Forward"), h ? [n, h] : [n];
63
61
  });
@@ -5,8 +5,6 @@ export interface ModelForwardAttributes extends ForwardAttributes {
5
5
  cache?: KVCache[];
6
6
  attentionScores?: AttentionScores;
7
7
  seed?: number;
8
- outputEmbeddings?: boolean;
9
- embeddings?: Tensor[];
10
8
  }
11
9
  interface TrainingState {
12
10
  steps: number;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@genai-fi/nanogpt",
3
- "version": "0.8.1",
3
+ "version": "0.8.3",
4
4
  "type": "module",
5
5
  "main": "dist/main.js",
6
6
  "types": "dist/main.d.ts",
@@ -1,30 +0,0 @@
1
- import { z as o, A as a, B as s } from "./index-DdmHGZjq.js";
2
- /**
3
- * @license
4
- * Copyright 2018 Google LLC. All Rights Reserved.
5
- * Licensed under the Apache License, Version 2.0 (the "License");
6
- * you may not use this file except in compliance with the License.
7
- * You may obtain a copy of the License at
8
- *
9
- * http://www.apache.org/licenses/LICENSE-2.0
10
- *
11
- * Unless required by applicable law or agreed to in writing, software
12
- * distributed under the License is distributed on an "AS IS" BASIS,
13
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- * See the License for the specific language governing permissions and
15
- * limitations under the License.
16
- * =============================================================================
17
- */
18
- function h(n, r, t) {
19
- if (o(n), r != null && r.length !== 3)
20
- throw new Error("tensor3d() requires shape to have three numbers");
21
- const e = a(n, t);
22
- if (e.length !== 3 && e.length !== 1)
23
- throw new Error("tensor3d() requires values to be number[][][] or flat/TypedArray");
24
- if (e.length === 1 && r == null)
25
- throw new Error("tensor3d() requires shape to be provided when `values` are a flat array");
26
- return s(n, r, e, t);
27
- }
28
- export {
29
- h as t
30
- };