@genai-fi/nanogpt 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +7 -0
- package/README.md +20 -0
- package/dist/Generator.d.ts +14 -0
- package/dist/Generator.js +39 -0
- package/dist/NanoGPTModel.d.ts +35 -0
- package/dist/NanoGPTModel.js +129 -0
- package/dist/TeachableLLM.d.ts +21 -0
- package/dist/TeachableLLM.js +47 -0
- package/dist/Trainer.d.ts +19 -0
- package/dist/Trainer.js +34 -0
- package/dist/_commonjsHelpers-DaMA6jEr.js +8 -0
- package/dist/assets/worker-BYeSPNkq.js +1 -0
- package/dist/config.d.ts +11 -0
- package/dist/config.js +19 -0
- package/dist/index-B8nyc6IR.js +3899 -0
- package/dist/index-SOhdqzHq.js +113 -0
- package/dist/jszip.min-BLbRbbKt.js +2324 -0
- package/dist/layers/CausalSelfAttention.d.ts +22 -0
- package/dist/layers/CausalSelfAttention.js +75 -0
- package/dist/layers/LayerNorm.d.ts +12 -0
- package/dist/layers/LayerNorm.js +30 -0
- package/dist/layers/MLP.d.ts +17 -0
- package/dist/layers/MLP.js +57 -0
- package/dist/layers/TiedEmbedding.d.ts +22 -0
- package/dist/layers/TiedEmbedding.js +532 -0
- package/dist/layers/TransformerBlock.d.ts +19 -0
- package/dist/layers/TransformerBlock.js +47 -0
- package/dist/main.d.ts +6 -0
- package/dist/main.js +8 -0
- package/dist/tokeniser/CharTokeniser.d.ts +20 -0
- package/dist/tokeniser/CharTokeniser.js +52 -0
- package/dist/tokeniser/NodeTokeniser.d.ts +19 -0
- package/dist/tokeniser/NodeTokeniser.js +46 -0
- package/dist/tokeniser/WebTokeniser.d.ts +18 -0
- package/dist/tokeniser/WebTokeniser.js +96 -0
- package/dist/tokeniser/bpe.d.ts +14 -0
- package/dist/tokeniser/bpe.js +102 -0
- package/dist/tokeniser/messages.d.ts +61 -0
- package/dist/tokeniser/messages.js +1 -0
- package/dist/tokeniser/type.d.ts +14 -0
- package/dist/tokeniser/type.js +1 -0
- package/dist/tokeniser/worker.d.ts +1 -0
- package/dist/tokeniser/worker.js +53 -0
- package/dist/training/AdamExt.d.ts +23 -0
- package/dist/training/AdamExt.js +43 -0
- package/dist/training/DatasetBuilder.d.ts +12 -0
- package/dist/training/DatasetBuilder.js +27 -0
- package/dist/training/FullTrainer.d.ts +17 -0
- package/dist/training/FullTrainer.js +75 -0
- package/dist/training/LayerTrainer.d.ts +28 -0
- package/dist/training/LayerTrainer.js +108 -0
- package/dist/training/Trainer.d.ts +73 -0
- package/dist/training/Trainer.js +87 -0
- package/dist/training/lwSchedule.d.ts +7 -0
- package/dist/training/lwSchedule.js +162 -0
- package/dist/utilities/generate.d.ts +3 -0
- package/dist/utilities/generate.js +22 -0
- package/dist/utilities/load.d.ts +7 -0
- package/dist/utilities/load.js +47 -0
- package/dist/utilities/save.d.ts +3 -0
- package/dist/utilities/save.js +21 -0
- package/dist/utilities/textLoader.d.ts +1 -0
- package/dist/utilities/textLoader.js +438 -0
- package/dist/utilities/tokenParse.d.ts +1 -0
- package/dist/utilities/tokenParse.js +66 -0
- package/dist/utilities/weights.d.ts +12 -0
- package/dist/utilities/weights.js +43 -0
- package/package.json +59 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
Copyright 2024 Nicolas Pope
|
|
2
|
+
|
|
3
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
|
4
|
+
|
|
5
|
+
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
|
6
|
+
|
|
7
|
+
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
# Introduction
|
|
2
|
+
TODO: Give a short introduction of your project. Let this section explain the objectives or the motivation behind this project.
|
|
3
|
+
|
|
4
|
+
# Getting Started
|
|
5
|
+
TODO: Guide users through getting your code up and running on their own system. In this section you can talk about:
|
|
6
|
+
1. Installation process
|
|
7
|
+
2. Software dependencies
|
|
8
|
+
3. Latest releases
|
|
9
|
+
4. API references
|
|
10
|
+
|
|
11
|
+
# Build and Test
|
|
12
|
+
TODO: Describe and show how to build your code and run the tests.
|
|
13
|
+
|
|
14
|
+
# Contribute
|
|
15
|
+
TODO: Explain how other users and developers can contribute to make your code better.
|
|
16
|
+
|
|
17
|
+
If you want to learn more about creating good readme files then refer the following [guidelines](https://docs.microsoft.com/en-us/azure/devops/repos/git/create-a-readme?view=azure-devops). You can also seek inspiration from the below readme files:
|
|
18
|
+
- [ASP.NET Core](https://github.com/aspnet/Home)
|
|
19
|
+
- [Visual Studio Code](https://github.com/Microsoft/vscode)
|
|
20
|
+
- [Chakra Core](https://github.com/Microsoft/ChakraCore)
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
import { default as NanoGPT } from './NanoGPTModel';
|
|
2
|
+
import { ITokeniser } from './tokeniser/type';
|
|
3
|
+
import { default as EE } from 'eventemitter3';
|
|
4
|
+
export interface IGenerateOptions {
|
|
5
|
+
maxLength?: number;
|
|
6
|
+
temperature?: number;
|
|
7
|
+
}
|
|
8
|
+
export default class Generator extends EE<'start' | 'stop' | 'tokens'> {
|
|
9
|
+
private readonly model;
|
|
10
|
+
private readonly tokeniser;
|
|
11
|
+
constructor(model: NanoGPT, tokeniser: ITokeniser);
|
|
12
|
+
private generateBlockOfTokens;
|
|
13
|
+
generate(prompt?: string, options?: IGenerateOptions): Promise<string>;
|
|
14
|
+
}
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
import { E as f } from "./index-SOhdqzHq.js";
|
|
2
|
+
const u = 4;
|
|
3
|
+
class p extends f {
|
|
4
|
+
constructor(s, t) {
|
|
5
|
+
super(), this.model = s, this.tokeniser = t;
|
|
6
|
+
}
|
|
7
|
+
generateBlockOfTokens(s, t) {
|
|
8
|
+
const r = t?.temperature ?? 1;
|
|
9
|
+
let e = s;
|
|
10
|
+
for (let n = 0; n < u; n++) {
|
|
11
|
+
const i = this.model.generate(e, r), a = e;
|
|
12
|
+
e = this.model.tf.concat([e, i], 1), a.dispose(), i.dispose();
|
|
13
|
+
}
|
|
14
|
+
return e;
|
|
15
|
+
}
|
|
16
|
+
async generate(s, t) {
|
|
17
|
+
const r = s ? await this.tokeniser.tokenise([s], !0) : [[this.tokeniser.eosToken]];
|
|
18
|
+
let e = this.model.tf.tensor2d(r, [1, r[0].length], "int32");
|
|
19
|
+
this.emit("start");
|
|
20
|
+
let n = s || "";
|
|
21
|
+
for (; ; ) {
|
|
22
|
+
const i = this.generateBlockOfTokens(e, t), a = e;
|
|
23
|
+
e = i;
|
|
24
|
+
const l = i.slice([0, a.shape[1]], [1, u]), o = (await l.array())[0];
|
|
25
|
+
let h = !1, c = !1;
|
|
26
|
+
const d = o.indexOf(this.tokeniser.eosToken);
|
|
27
|
+
d !== -1 && (h = !0, o.splice(d)), o.length + n.length >= (t?.maxLength ?? 1e3) && (c = !0, o.splice(
|
|
28
|
+
t?.maxLength ? t.maxLength - n.length : o.length
|
|
29
|
+
));
|
|
30
|
+
const k = await this.tokeniser.decode(o);
|
|
31
|
+
if (n += k, this.emit("tokens", o, k), a.dispose(), l.dispose(), h || c)
|
|
32
|
+
break;
|
|
33
|
+
}
|
|
34
|
+
return e.dispose(), this.emit("stop"), n;
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
export {
|
|
38
|
+
p as default
|
|
39
|
+
};
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
import { default as TF } from '@tensorflow/tfjs';
|
|
2
|
+
import { GPTConfig } from './config';
|
|
3
|
+
export interface TrainingLogEntry {
|
|
4
|
+
epoch: number;
|
|
5
|
+
loss: number;
|
|
6
|
+
valLoss?: number;
|
|
7
|
+
step: number;
|
|
8
|
+
time: number;
|
|
9
|
+
example?: string;
|
|
10
|
+
batchSize: number;
|
|
11
|
+
}
|
|
12
|
+
export default class NanoGPT {
|
|
13
|
+
readonly config: GPTConfig;
|
|
14
|
+
private wte;
|
|
15
|
+
private wpe;
|
|
16
|
+
private drop;
|
|
17
|
+
private blocks;
|
|
18
|
+
private lnF;
|
|
19
|
+
readonly tf: typeof TF;
|
|
20
|
+
log: TrainingLogEntry[];
|
|
21
|
+
constructor(tf: typeof TF, config?: Partial<GPTConfig>);
|
|
22
|
+
get variables(): TF.Variable[];
|
|
23
|
+
saveWeights(): Map<string, TF.Tensor[]>;
|
|
24
|
+
loadWeights(weights: Map<string, TF.Tensor[]>): void;
|
|
25
|
+
private inputPhase;
|
|
26
|
+
setSkipMask(mask: boolean[]): void;
|
|
27
|
+
setTrainableMask(mask: boolean[]): void;
|
|
28
|
+
set trainable(value: boolean);
|
|
29
|
+
forward(idx: TF.Tensor, targets?: TF.Tensor, training?: boolean): {
|
|
30
|
+
logits: TF.Tensor;
|
|
31
|
+
loss?: TF.Tensor;
|
|
32
|
+
};
|
|
33
|
+
generate(idx: TF.Tensor, temperature?: number, topK?: number): TF.Tensor;
|
|
34
|
+
getNumParams(): number;
|
|
35
|
+
}
|
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
import { defaultConfig as b } from "./config.js";
|
|
2
|
+
import p from "./layers/TransformerBlock.js";
|
|
3
|
+
import m from "./layers/TiedEmbedding.js";
|
|
4
|
+
import d from "./layers/LayerNorm.js";
|
|
5
|
+
class S {
|
|
6
|
+
config;
|
|
7
|
+
wte;
|
|
8
|
+
// Token embeddings
|
|
9
|
+
wpe;
|
|
10
|
+
// Position embeddings
|
|
11
|
+
drop;
|
|
12
|
+
// Dropout
|
|
13
|
+
blocks;
|
|
14
|
+
lnF;
|
|
15
|
+
// Final layer norm
|
|
16
|
+
tf;
|
|
17
|
+
log = [];
|
|
18
|
+
// Training log
|
|
19
|
+
constructor(t, e = {}) {
|
|
20
|
+
this.tf = t, this.config = { ...b, ...e }, this.wte = new m(t, {
|
|
21
|
+
vocabSize: this.config.vocabSize,
|
|
22
|
+
embedDim: this.config.nEmbed,
|
|
23
|
+
name: "token_embedding"
|
|
24
|
+
}), this.wpe = this.tf.layers.embedding({
|
|
25
|
+
inputDim: this.config.blockSize,
|
|
26
|
+
outputDim: this.config.nEmbed,
|
|
27
|
+
name: "positional_embedding",
|
|
28
|
+
embeddingsInitializer: this.tf.initializers.randomNormal({ mean: 0, stddev: 0.02 })
|
|
29
|
+
}), this.drop = this.tf.layers.dropout({ rate: this.config.dropout }), this.blocks = [];
|
|
30
|
+
for (let s = 0; s < this.config.nLayer; s++)
|
|
31
|
+
this.blocks.push(new p(this.tf, s, this.config));
|
|
32
|
+
this.lnF = new d(t, [this.config.nEmbed], 1e-5, "final_layer_norm");
|
|
33
|
+
}
|
|
34
|
+
get variables() {
|
|
35
|
+
return [
|
|
36
|
+
...this.wpe.trainableWeights.map((t) => t.read()),
|
|
37
|
+
...this.blocks.flatMap((t) => t.variables),
|
|
38
|
+
...this.lnF.trainableWeights.map((t) => t),
|
|
39
|
+
...this.wte.variables
|
|
40
|
+
];
|
|
41
|
+
}
|
|
42
|
+
saveWeights() {
|
|
43
|
+
const t = /* @__PURE__ */ new Map();
|
|
44
|
+
t.set("token_embedding", this.wte.getWeights()), t.set("positional_embedding", this.wpe.getWeights());
|
|
45
|
+
for (let e = 0; e < this.blocks.length; e++)
|
|
46
|
+
this.blocks[e].saveWeights(t);
|
|
47
|
+
return t.set("final_layer_norm", this.lnF.getWeights()), t;
|
|
48
|
+
}
|
|
49
|
+
loadWeights(t) {
|
|
50
|
+
this.wte.setWeights(t.get("token_embedding") || []), this.wpe.setWeights(t.get("positional_embedding") || []);
|
|
51
|
+
for (let e = 0; e < this.blocks.length; e++)
|
|
52
|
+
this.blocks[e].loadWeights(t);
|
|
53
|
+
this.lnF.setWeights(t.get("final_layer_norm") || []);
|
|
54
|
+
}
|
|
55
|
+
inputPhase(t, e = !1) {
|
|
56
|
+
return this.tf.tidy(() => {
|
|
57
|
+
const [, s] = t.shape, i = this.wte.embed(t), o = this.tf.range(0, s, 1, "int32"), r = this.wpe.apply(o), n = i.add(r);
|
|
58
|
+
return this.drop.apply(n, { training: e });
|
|
59
|
+
});
|
|
60
|
+
}
|
|
61
|
+
setSkipMask(t) {
|
|
62
|
+
if (t.length !== this.blocks.length)
|
|
63
|
+
throw new Error(`Mask length ${t.length} does not match number of blocks ${this.blocks.length}`);
|
|
64
|
+
for (let e = 0; e < this.blocks.length; e++)
|
|
65
|
+
this.blocks[e].skipped = t[e];
|
|
66
|
+
}
|
|
67
|
+
setTrainableMask(t) {
|
|
68
|
+
if (t.length !== this.blocks.length)
|
|
69
|
+
throw new Error(`Mask length ${t.length} does not match number of blocks ${this.blocks.length}`);
|
|
70
|
+
for (let e = 0; e < this.blocks.length; e++)
|
|
71
|
+
this.blocks[e].trainable = t[e];
|
|
72
|
+
}
|
|
73
|
+
set trainable(t) {
|
|
74
|
+
for (const e of this.blocks)
|
|
75
|
+
e.trainable = t;
|
|
76
|
+
this.wpe.trainable = t, this.lnF.trainable = t;
|
|
77
|
+
}
|
|
78
|
+
forward(t, e, s = !1) {
|
|
79
|
+
if (t.shape.length !== 2)
|
|
80
|
+
throw new Error(`Invalid input shape: expected [batch_size, sequence_length], got ${t.shape}`);
|
|
81
|
+
if (t.shape[1] > this.config.blockSize)
|
|
82
|
+
throw new Error(`Input sequence length ${t.shape[1]} isn't block size ${this.config.blockSize}`);
|
|
83
|
+
if (t.dtype !== "int32")
|
|
84
|
+
throw new Error(`Input tensor must be of type int32, got ${t.dtype}`);
|
|
85
|
+
return this.tf.tidy(() => {
|
|
86
|
+
const [, i] = t.shape;
|
|
87
|
+
if (i > this.config.blockSize)
|
|
88
|
+
throw new Error(`Cannot forward sequence of length ${i}, block size is only ${this.config.blockSize}`);
|
|
89
|
+
let o = this.inputPhase(t, s);
|
|
90
|
+
for (const h of this.blocks)
|
|
91
|
+
o = h.call(o);
|
|
92
|
+
o = this.lnF.apply(o);
|
|
93
|
+
const r = this.wte.project(o);
|
|
94
|
+
let n;
|
|
95
|
+
if (e)
|
|
96
|
+
try {
|
|
97
|
+
n = this.tf.losses.softmaxCrossEntropy(e, r, this.tf.Reduction.MEAN);
|
|
98
|
+
} catch (h) {
|
|
99
|
+
throw console.error("Error computing loss:", h), new Error(`Loss computation failed: ${h}`);
|
|
100
|
+
}
|
|
101
|
+
return { logits: r, loss: n };
|
|
102
|
+
});
|
|
103
|
+
}
|
|
104
|
+
generate(t, e = 1, s) {
|
|
105
|
+
return this.tf.tidy(() => {
|
|
106
|
+
const i = t, o = i.shape[1], r = o <= this.config.blockSize ? i : i.slice(
|
|
107
|
+
[0, o - this.config.blockSize],
|
|
108
|
+
[i.shape[0], this.config.blockSize]
|
|
109
|
+
), { logits: n } = this.forward(r, void 0, !1), h = n.shape[1] - 1, a = n.slice([0, h, 0], [n.shape[0], 1, n.shape[2]]).div(e);
|
|
110
|
+
let l;
|
|
111
|
+
if (s) {
|
|
112
|
+
const { values: c, indices: g } = this.tf.topk(a, s), f = this.tf.multinomial(c.squeeze([1]), 1);
|
|
113
|
+
l = this.tf.gather(g.squeeze([1]), f, 1);
|
|
114
|
+
} else
|
|
115
|
+
l = this.tf.multinomial(a.squeeze([1]), 1);
|
|
116
|
+
return l = l.reshape([1, 1]), l;
|
|
117
|
+
});
|
|
118
|
+
}
|
|
119
|
+
// Get number of parameters
|
|
120
|
+
getNumParams() {
|
|
121
|
+
const t = this.config.vocabSize * this.config.nEmbed + this.config.blockSize * this.config.nEmbed, e = this.config.nLayer * (4 * this.config.nEmbed * this.config.nEmbed + // qkv + proj
|
|
122
|
+
2 * this.config.nEmbed), s = this.config.nLayer * (4 * this.config.nEmbed * this.config.nEmbed + // fc
|
|
123
|
+
this.config.nEmbed * 4 * this.config.nEmbed), i = this.config.nEmbed + this.config.vocabSize * this.config.nEmbed;
|
|
124
|
+
return t + e + s + i;
|
|
125
|
+
}
|
|
126
|
+
}
|
|
127
|
+
export {
|
|
128
|
+
S as default
|
|
129
|
+
};
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
import { default as TF } from '@tensorflow/tfjs';
|
|
2
|
+
import { GPTConfig } from './config';
|
|
3
|
+
import { ITokeniser } from './tokeniser/type';
|
|
4
|
+
import { default as NanoGPT } from './NanoGPTModel';
|
|
5
|
+
import { default as Generator, IGenerateOptions } from './Generator';
|
|
6
|
+
import { default as Trainer, ITrainerOptions } from './Trainer';
|
|
7
|
+
export default class TeachableLLM {
|
|
8
|
+
readonly config: GPTConfig;
|
|
9
|
+
readonly model: NanoGPT;
|
|
10
|
+
readonly tf: typeof TF;
|
|
11
|
+
readonly tokeniser: ITokeniser;
|
|
12
|
+
constructor(tf: typeof TF, tokeniser: ITokeniser, model: NanoGPT);
|
|
13
|
+
saveModel(): Promise<Blob>;
|
|
14
|
+
static loadModel(tf: typeof TF, data: Blob | Buffer | string): Promise<TeachableLLM>;
|
|
15
|
+
static create(tf: typeof TF, tokeniser: ITokeniser, config?: Partial<GPTConfig>): TeachableLLM;
|
|
16
|
+
getNumParams(): number;
|
|
17
|
+
trainer(): Trainer;
|
|
18
|
+
train(text: string[], options?: ITrainerOptions): Promise<void>;
|
|
19
|
+
generator(): Generator;
|
|
20
|
+
generateText(prompt?: string, options?: IGenerateOptions): Promise<string>;
|
|
21
|
+
}
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
import s from "./NanoGPTModel.js";
|
|
2
|
+
import { defaultConfig as a } from "./config.js";
|
|
3
|
+
import { saveModel as m } from "./utilities/save.js";
|
|
4
|
+
import { loadModel as l } from "./utilities/load.js";
|
|
5
|
+
import d from "./Generator.js";
|
|
6
|
+
import c from "./Trainer.js";
|
|
7
|
+
import "./tokeniser/CharTokeniser.js";
|
|
8
|
+
class i {
|
|
9
|
+
config;
|
|
10
|
+
model;
|
|
11
|
+
tf;
|
|
12
|
+
tokeniser;
|
|
13
|
+
constructor(e, t, r) {
|
|
14
|
+
this.tf = e, this.config = r.config, this.tokeniser = t, this.model = r;
|
|
15
|
+
}
|
|
16
|
+
saveModel() {
|
|
17
|
+
return m(this.model, this.tokeniser);
|
|
18
|
+
}
|
|
19
|
+
static async loadModel(e, t) {
|
|
20
|
+
const { model: r, tokeniser: o } = await l(e, t);
|
|
21
|
+
return new i(e, o, r);
|
|
22
|
+
}
|
|
23
|
+
static create(e, t, r = {}) {
|
|
24
|
+
const o = { ...a, ...r };
|
|
25
|
+
o.vocabSize = t.vocabSize;
|
|
26
|
+
const n = new s(e, o);
|
|
27
|
+
return new i(e, t, n);
|
|
28
|
+
}
|
|
29
|
+
getNumParams() {
|
|
30
|
+
return this.model.getNumParams();
|
|
31
|
+
}
|
|
32
|
+
trainer() {
|
|
33
|
+
return new c(this.model, this.tokeniser);
|
|
34
|
+
}
|
|
35
|
+
train(e, t) {
|
|
36
|
+
return this.trainer().train(e, t);
|
|
37
|
+
}
|
|
38
|
+
generator() {
|
|
39
|
+
return new d(this.model, this.tokeniser);
|
|
40
|
+
}
|
|
41
|
+
generateText(e, t) {
|
|
42
|
+
return this.generator().generate(e, t);
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
export {
|
|
46
|
+
i as default
|
|
47
|
+
};
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
import { default as NanoGPT } from './NanoGPTModel';
|
|
2
|
+
import { ITokeniser } from './tokeniser/type';
|
|
3
|
+
import { default as EE } from 'eventemitter3';
|
|
4
|
+
export interface ITrainerOptions {
|
|
5
|
+
epochs?: number;
|
|
6
|
+
batchSize?: number;
|
|
7
|
+
learningRate?: number;
|
|
8
|
+
maxSteps?: number;
|
|
9
|
+
desiredLoss?: number;
|
|
10
|
+
logInterval?: number;
|
|
11
|
+
prompt?: string;
|
|
12
|
+
validationSplit?: number;
|
|
13
|
+
}
|
|
14
|
+
export default class Trainer extends EE<'start' | 'stop' | 'log'> {
|
|
15
|
+
private trainer;
|
|
16
|
+
constructor(model: NanoGPT, tokeniser: ITokeniser);
|
|
17
|
+
stop(): void;
|
|
18
|
+
train(text: string[], options?: ITrainerOptions): Promise<void>;
|
|
19
|
+
}
|
package/dist/Trainer.js
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
import { E as s } from "./index-SOhdqzHq.js";
|
|
2
|
+
import n from "./training/FullTrainer.js";
|
|
3
|
+
class o extends s {
|
|
4
|
+
trainer;
|
|
5
|
+
constructor(a, t) {
|
|
6
|
+
super(), this.trainer = new n(a.tf, a, t, 1e-3);
|
|
7
|
+
}
|
|
8
|
+
stop() {
|
|
9
|
+
}
|
|
10
|
+
async train(a, t) {
|
|
11
|
+
const { trainDataset: e, validationDataset: r } = await this.trainer.createTrainValidationSplit(
|
|
12
|
+
a,
|
|
13
|
+
t?.batchSize || 32,
|
|
14
|
+
t?.validationSplit || 0.1
|
|
15
|
+
);
|
|
16
|
+
this.emit("start"), await this.trainer.trainOnDataset(
|
|
17
|
+
e,
|
|
18
|
+
{
|
|
19
|
+
epochs: t?.epochs || 2,
|
|
20
|
+
prompt: t?.prompt,
|
|
21
|
+
stepsPerEpoch: t?.maxSteps || 100,
|
|
22
|
+
logInterval: t?.logInterval || 10,
|
|
23
|
+
desiredLoss: t?.desiredLoss || 0.01,
|
|
24
|
+
onStep: async (i) => {
|
|
25
|
+
this.emit("log", i);
|
|
26
|
+
}
|
|
27
|
+
},
|
|
28
|
+
r
|
|
29
|
+
), this.emit("stop");
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
export {
|
|
33
|
+
o as default
|
|
34
|
+
};
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
var o = typeof globalThis < "u" ? globalThis : typeof window < "u" ? window : typeof global < "u" ? global : typeof self < "u" ? self : {};
|
|
2
|
+
function l(e) {
|
|
3
|
+
return e && e.__esModule && Object.prototype.hasOwnProperty.call(e, "default") ? e.default : e;
|
|
4
|
+
}
|
|
5
|
+
export {
|
|
6
|
+
o as c,
|
|
7
|
+
l as g
|
|
8
|
+
};
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
(function(){"use strict";function u(n,t){const e=n,s=[];let a="";for(let o=0;o<e.length;o++){const c=e[o];switch(c){case"0":case"1":case"2":case"3":case"4":case"5":case"6":case"7":case"8":case"9":case":":case";":case",":case".":case"?":case"!":case'"':case"'":case"`":case"(":case")":case"[":case"]":case"{":case"}":case"-":case"_":case"/":case"\\":case"%":case"<":case">":case"=":case"+":case"*":case"&":case"^":case"|":case"~":case"@":case"#":case"$":s.push(a),s.push(c),a="";break;case" ":s.push(a),a=c;break;default:a+=c;break}}return a.length>0&&s.push(a),s}function k(n){const t=new Map;for(let e=0;e<n.length;e++){const s=n[e];for(let a=0;a<s.length-1;a++){const o=`${s[a]}${s[a+1]}`,c=t.get(o)||{a:s[a],b:s[a+1],count:0,instances:new Set};c.count+=1,c.instances.add(e),t.set(o,c)}}return{pairs:t,tokens:n}}function p(n,t,e,s,a){const o=`${t}${e}`;if(n.pairs.has(o)){const c=n.pairs.get(o);c.count+=a,c.instances.add(s)}else n.pairs.set(o,{a:t,b:e,count:a,instances:new Set([s])})}function b(n){let t=null,e=0;for(const s of n.pairs.values())s.count>e&&(e=s.count,t=s);return t}function m(n,t){return n.map(e=>{const s=[];for(let a=0;a<e.length;a++)a<e.length-1&&e[a]===t[0]&&e[a+1]===t[1]?(s.push(t[0]+t[1]),a++):s.push(e[a]);return s})}function y(n,t){t.instances.forEach(e=>{const s=n.tokens[e],a=[];for(let o=0;o<s.length;o++)if(o<s.length-1&&s[o]===t.a&&s[o+1]===t.b){const c=t.a+t.b;a.push(c),o>0&&(p(n,s[o-1],t.a,e,-1),p(n,s[o-1],c,e,1)),o++,o<s.length-1&&(p(n,t.b,s[o+1],e,-1),p(n,c,s[o+1],e,1))}else a.push(s[o]);n.tokens[e]=a}),n.pairs.delete(`${t.a}${t.b}`)}class l{vocab=new Set;vocabIndex=new Map;merges=[];pretokenMap=new Map;constructor(t,e){t&&t.forEach((s,a)=>{this.vocab.add(s),this.vocabIndex.set(s,a)}),e&&(this.merges=e)}train(t,e,s){const a=t.map(i=>u(i)).flat(1),o=new Set(a);this.vocab=new Set,this.pretokenMap.clear(),this.merges=[],this.vocab.add("<eos>");const c=Array.from(o),d=c.map(i=>i.split("").map(h=>(this.vocab.add(h),h))),f=k(d);for(;this.vocab.size<e&&this.merges.length<e;){const i=b(f);if(!i)break;this.merges.push([i.a,i.b]),this.vocab.add(i.a+i.b),y(f,i),s&&this.vocab.size%100===0&&s(this.vocab.size/e,this.vocab.size)}c.forEach((i,g)=>{const h=d[g];this.pretokenMap.set(i,h)}),this.vocabIndex.clear();let M=0;for(const i of this.vocab.keys())this.vocabIndex.set(i,M++)}getVocab(){return Array.from(this.vocab)}getMerges(){return this.merges}tokeniseWord(t){let e=t.split("");return this.merges.forEach(s=>{e=m([e],s)[0]}),this.pretokenMap.set(t,e),e}tokeniseStrings(t){return t.map(e=>u(e).map(o=>this.pretokenMap.has(o)?this.pretokenMap.get(o):this.tokeniseWord(o)).flat(1))}tokenise(t,e){const s=this.tokeniseStrings(t);return e?s.map(a=>a.map(o=>this.vocabIndex.get(o)??-1)):s}}let r=new l;onmessage=async n=>{if(n.data.type==="tokenise")if(n.data.numeric){const t=r.tokenise(n.data.text,!0),e={type:"tokeniseResponse",id:n.data.id,tokens:t,numeric:!0};postMessage(e)}else{const t=r.tokenise(n.data.text),e={type:"tokeniseResponse",id:n.data.id,tokens:t,numeric:!1};postMessage(e)}else if(n.data.type==="detokenise"){const t=r.getVocab(),e=n.data.tokens.map(a=>a.map(o=>t[o]).join("")),s={type:"detokeniseResponse",id:n.data.id,text:e};postMessage(s)}else if(n.data.type==="train"){r=new l,r.train(n.data.text,n.data.vocabSize??100,(e,s)=>{const a={type:"trainStatus",id:n.data.id,progress:e,vocabSize:s};postMessage(a)});const t={type:"trainResponse",id:n.data.id,vocabSize:r.getVocab().length};postMessage(t)}else if(n.data.type==="tokens"){const t=r.getVocab(),e={type:"tokensResponse",id:n.data.id,tokens:t};postMessage(e)}}})();
|
package/dist/config.d.ts
ADDED
package/dist/config.js
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
const e = {
|
|
2
|
+
vocabSize: 50304,
|
|
3
|
+
// GPT-2 vocab size
|
|
4
|
+
blockSize: 1024,
|
|
5
|
+
// Maximum sequence length
|
|
6
|
+
nLayer: 12,
|
|
7
|
+
// Number of transformer layers
|
|
8
|
+
nHead: 12,
|
|
9
|
+
// Number of attention heads
|
|
10
|
+
nEmbed: 768,
|
|
11
|
+
// Embedding dimension
|
|
12
|
+
dropout: 0,
|
|
13
|
+
// Dropout probability
|
|
14
|
+
biasInLinear: !1,
|
|
15
|
+
biasInLayerNorm: !1
|
|
16
|
+
};
|
|
17
|
+
export {
|
|
18
|
+
e as defaultConfig
|
|
19
|
+
};
|