@genai-fi/nanogpt 0.11.0 → 0.12.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/Generator.js +29 -29
- package/dist/{RealDiv-Ds-jvL09.js → RealDiv-C8neBwFi.js} +17 -17
- package/dist/{Reshape-Cd6e-Otn.js → Reshape-Bd4V_4X7.js} +1 -1
- package/dist/{Reshape-Ct266DEk.js → Reshape-Ck29jQSY.js} +7 -7
- package/dist/TeachableLLM.d.ts +2 -1
- package/dist/TeachableLLM.js +9 -9
- package/dist/Trainer.d.ts +4 -2
- package/dist/Trainer.js +11 -8
- package/dist/{axis_util-DofAuy0p.js → axis_util-DGqbT-FX.js} +1 -1
- package/dist/backend.js +2 -2
- package/dist/{backend_util-C7NWHpv7.js → backend_util-DC3rBo_H.js} +18 -18
- package/dist/{backend_webgpu-B0Vls736.js → backend_webgpu-mbhNnlx9.js} +10 -10
- package/dist/{broadcast_to-DDaNMbX7.js → broadcast_to-D1Dmg2Oz.js} +2 -2
- package/dist/checks/appendCache.js +2 -2
- package/dist/checks/attentionMask.js +3 -3
- package/dist/checks/gelu.js +2 -2
- package/dist/checks/matMulGelu.js +2 -2
- package/dist/checks/normRMS.js +4 -4
- package/dist/checks/normRMSGrad.js +3 -3
- package/dist/checks/packUnpack.js +2 -2
- package/dist/checks/qkv.js +2 -2
- package/dist/checks/rope.js +2 -2
- package/dist/clip_by_value-fg2aKzUy.js +12 -0
- package/dist/{complex-DClmWqJt.js → complex-Cyg-eQeZ.js} +1 -1
- package/dist/concat-CSm2rMwe.js +17 -0
- package/dist/{concat_util-CHsJFZJJ.js → concat_util-D0je5Ppu.js} +1 -1
- package/dist/{dataset-DcjWqUVQ.js → dataset-CVIJu7Xa.js} +3 -3
- package/dist/{dropout-OxuaJz6z.js → dropout-DLhSMNTZ.js} +14 -14
- package/dist/expand_dims-ChkuOp6I.js +11 -0
- package/dist/{exports_initializers-eS9QJ6ut.js → exports_initializers-1KWPiStI.js} +1 -1
- package/dist/{floor-DIb-lN_u.js → floor-BRMPgeIs.js} +1 -1
- package/dist/gather-BSULDalH.js +9 -0
- package/dist/{gelu-DqTbCx5x.js → gelu-BK1k-n1i.js} +1 -1
- package/dist/{gpgpu_math-CJcbnKPC.js → gpgpu_math-BJSTk_mW.js} +25 -25
- package/dist/{index-Dj5TkmPY.js → index-BBVLAXZD.js} +14 -14
- package/dist/{index-D0RBWjq8.js → index-Duu1Lvvv.js} +45 -45
- package/dist/{kernel_funcs_utils-CSaumNDs.js → kernel_funcs_utils-BtYrPoJu.js} +8 -8
- package/dist/layers/BaseLayer.js +2 -2
- package/dist/layers/CausalSelfAttention.js +6 -6
- package/dist/layers/MLP.js +4 -4
- package/dist/layers/PositionEmbedding.js +5 -5
- package/dist/layers/RMSNorm.js +3 -3
- package/dist/layers/RoPECache.js +4 -4
- package/dist/layers/TiedEmbedding.js +6 -6
- package/dist/layers/TransformerBlock.js +1 -1
- package/dist/loader/loadTransformers.js +1 -1
- package/dist/loader/oldZipLoad.js +17 -17
- package/dist/{log_sum_exp-VLZgbFAH.js → log_sum_exp-CVqLsVLl.js} +4 -4
- package/dist/main.d.ts +9 -0
- package/dist/main.js +68 -58
- package/dist/{matMul16-cDxwemKj.js → matMul16-xswmhSuF.js} +7 -7
- package/dist/{matMulGelu-B2s_80-H.js → matMulGelu-BpvgnYG8.js} +26 -26
- package/dist/mat_mul-Bn2BDpT4.js +11 -0
- package/dist/{mod-PrOKlFxH.js → mod-B4AUd1Np.js} +1 -1
- package/dist/models/NanoGPTV1.js +2 -2
- package/dist/models/model.js +9 -9
- package/dist/{ones-BX_wEgzB.js → ones-CBI1AQjb.js} +3 -3
- package/dist/ops/adamAdjust.js +1 -1
- package/dist/ops/adamMoments.js +1 -1
- package/dist/ops/add16.js +1 -1
- package/dist/ops/appendCache.js +3 -3
- package/dist/ops/attentionMask.js +1 -1
- package/dist/ops/concat16.js +2 -2
- package/dist/ops/cpu/adamAdjust.js +7 -7
- package/dist/ops/cpu/adamMoments.js +5 -5
- package/dist/ops/cpu/appendCache.js +6 -6
- package/dist/ops/cpu/attentionMask.js +6 -6
- package/dist/ops/cpu/fusedSoftmax.js +5 -5
- package/dist/ops/cpu/gatherSub.js +7 -7
- package/dist/ops/cpu/gelu.js +5 -5
- package/dist/ops/cpu/matMul16.js +2 -2
- package/dist/ops/cpu/matMulGelu.js +3 -3
- package/dist/ops/cpu/matMulMul.js +5 -5
- package/dist/ops/cpu/mulDropout.js +1 -1
- package/dist/ops/cpu/normRMS.js +5 -5
- package/dist/ops/cpu/qkv.js +3 -3
- package/dist/ops/cpu/rope.js +9 -9
- package/dist/ops/cpu/scatterSub.js +5 -5
- package/dist/ops/dot16.js +2 -2
- package/dist/ops/gatherSub.js +1 -1
- package/dist/ops/gelu.js +2 -2
- package/dist/ops/grads/add16.js +1 -1
- package/dist/ops/grads/attentionMask.js +2 -2
- package/dist/ops/grads/gelu.js +2 -2
- package/dist/ops/grads/matMul16.js +3 -3
- package/dist/ops/grads/matMulGelu.js +5 -5
- package/dist/ops/grads/normRMS.js +6 -6
- package/dist/ops/grads/pack16.js +3 -3
- package/dist/ops/grads/qkv.js +9 -9
- package/dist/ops/grads/rope.js +2 -2
- package/dist/ops/grads/softmax16.js +1 -1
- package/dist/ops/grads/unpack16.js +2 -2
- package/dist/ops/matMul16.js +3 -3
- package/dist/ops/matMulGelu.js +2 -2
- package/dist/ops/matMulMul.js +1 -1
- package/dist/ops/mul16.js +1 -1
- package/dist/ops/mulDrop.js +1 -1
- package/dist/ops/normRMS.js +1 -1
- package/dist/ops/pack16.js +2 -2
- package/dist/ops/qkv.js +1 -1
- package/dist/ops/reshape16.js +6 -6
- package/dist/ops/rope.js +2 -2
- package/dist/ops/scatterSub.js +1 -1
- package/dist/ops/slice16.js +2 -2
- package/dist/ops/softmax16.js +1 -1
- package/dist/ops/sub16.js +1 -1
- package/dist/ops/sum16.js +2 -2
- package/dist/ops/transpose16.js +6 -6
- package/dist/ops/unpack16.js +2 -2
- package/dist/ops/webgl/adamAdjust.js +2 -2
- package/dist/ops/webgl/adamMoments.js +1 -1
- package/dist/ops/webgl/appendCache.js +1 -1
- package/dist/ops/webgl/attentionMask.js +4 -4
- package/dist/ops/webgl/fusedSoftmax.js +6 -6
- package/dist/ops/webgl/gatherSub.js +1 -1
- package/dist/ops/webgl/gelu.js +2 -2
- package/dist/ops/webgl/log.js +3 -3
- package/dist/ops/webgl/matMul16.js +10 -10
- package/dist/ops/webgl/matMulGelu.js +4 -4
- package/dist/ops/webgl/matMulMul.js +2 -2
- package/dist/ops/webgl/mulDropout.js +1 -1
- package/dist/ops/webgl/normRMS.js +2 -2
- package/dist/ops/webgl/qkv.js +1 -1
- package/dist/ops/webgl/rope.js +4 -4
- package/dist/ops/webgl/scatterSub.js +1 -1
- package/dist/ops/webgpu/adamAdjust.js +3 -3
- package/dist/ops/webgpu/adamMoments.js +5 -5
- package/dist/ops/webgpu/add16.js +1 -1
- package/dist/ops/webgpu/appendCache.js +3 -3
- package/dist/ops/webgpu/attentionMask.js +5 -5
- package/dist/ops/webgpu/attentionMask32_program.js +2 -2
- package/dist/ops/webgpu/concat16.js +5 -5
- package/dist/ops/webgpu/gatherSub.js +3 -3
- package/dist/ops/webgpu/gelu.js +3 -3
- package/dist/ops/webgpu/matMul16.js +19 -19
- package/dist/ops/webgpu/matMul16_program.js +2 -2
- package/dist/ops/webgpu/mul16.js +1 -1
- package/dist/ops/webgpu/normRMS.js +2 -2
- package/dist/ops/webgpu/normRMSGrad.js +4 -4
- package/dist/ops/webgpu/pack16.js +3 -3
- package/dist/ops/webgpu/pack16_program.js +2 -2
- package/dist/ops/webgpu/qkv.js +4 -4
- package/dist/ops/webgpu/rope.js +3 -3
- package/dist/ops/webgpu/scatterSub.js +3 -3
- package/dist/ops/webgpu/slice16.js +4 -4
- package/dist/ops/webgpu/softmax16.js +4 -4
- package/dist/ops/webgpu/softmax16_program.js +2 -2
- package/dist/ops/webgpu/softmax16_subgroup_program.js +2 -2
- package/dist/ops/webgpu/softmax16grad.js +1 -1
- package/dist/ops/webgpu/sub16.js +1 -1
- package/dist/ops/webgpu/sum16.js +5 -5
- package/dist/ops/webgpu/transpose16.js +2 -2
- package/dist/ops/webgpu/transpose16_program.js +2 -2
- package/dist/ops/webgpu/transpose16_shared_program.js +3 -3
- package/dist/ops/webgpu/unpack16.js +5 -5
- package/dist/ops/webgpu/utils/binary_op.js +3 -3
- package/dist/ops/webgpu/utils/reductions.js +4 -4
- package/dist/{ops-FJapAPfm.js → ops-C2_OXuZ4.js} +35 -35
- package/dist/{pack16-k4jq6aMX.js → pack16-atD0eYRm.js} +6 -6
- package/dist/patches/webgpu_backend.js +8 -8
- package/dist/patches/webgpu_base.js +1 -1
- package/dist/patches/webgpu_program.js +2 -2
- package/dist/{random_width-UGQn4OWb.js → random_width-BN4wGJaW.js} +33 -33
- package/dist/{range-CuGvVN2c.js → range-DKmP1-OQ.js} +1 -1
- package/dist/relu-BsXmGzzu.js +9 -0
- package/dist/{reshape-CkjKPPqB.js → reshape-BI0yzp1T.js} +1 -1
- package/dist/{resize_nearest_neighbor-DB8k9KN_.js → resize_nearest_neighbor-BA_BX-ub.js} +25 -25
- package/dist/{rope-BmZmp9uP.js → rope-DJ7Y7c-u.js} +1 -1
- package/dist/{scatter_nd_util-BY22Cc-C.js → scatter_nd_util-k9MUVUkn.js} +1 -1
- package/dist/{selu_util-BuLbmbrl.js → selu_util-DyW0X1WG.js} +5 -5
- package/dist/{shared-B7USJZgw.js → shared-Q3BS6T03.js} +1 -1
- package/dist/{shared-BQboIImQ.js → shared-nnSWpC3u.js} +6 -6
- package/dist/{slice-Aqy7KbJh.js → slice-wBNvzVyz.js} +3 -3
- package/dist/{slice_util-D8CQRenR.js → slice_util-zN8KFC5I.js} +7 -7
- package/dist/{softmax-faLoUZVT.js → softmax-DfuYyjMh.js} +1 -1
- package/dist/split-BYrLboMq.js +9 -0
- package/dist/squeeze-Bk8Brcct.js +10 -0
- package/dist/{stack-WJK22CFn.js → stack-CDWShFHF.js} +1 -1
- package/dist/{step-dXR33iOg.js → step-BS5JXRR6.js} +14 -14
- package/dist/sum-BPUfDB2X.js +11 -0
- package/dist/{tensor-BQqrDvpx.js → tensor-CEt9Nm2s.js} +1 -1
- package/dist/{tensor1d-LxP9asMm.js → tensor1d-Cc_KCIDg.js} +1 -1
- package/dist/{tensor2d-BN1sSfQO.js → tensor2d-BN97fF71.js} +1 -1
- package/dist/{tensor4d-DVwr7pLF.js → tensor4d-vuDDgdUI.js} +1 -1
- package/dist/{tfjs_backend-Vi4JfLzT.js → tfjs_backend-806hyYve.js} +36 -36
- package/dist/tile-OWUvpIVt.js +11 -0
- package/dist/tokeniser/BaseTokeniser.d.ts +6 -8
- package/dist/tokeniser/BaseTokeniser.js +6 -6
- package/dist/tokeniser/CharTokeniser.d.ts +6 -6
- package/dist/tokeniser/CharTokeniser.js +26 -26
- package/dist/tokeniser/bpe.d.ts +6 -6
- package/dist/tokeniser/bpe.js +9 -9
- package/dist/tokeniser/type.d.ts +6 -8
- package/dist/training/Adam.js +2 -2
- package/dist/training/AdamExt.js +1 -1
- package/dist/training/DatasetBuilder.d.ts +1 -1
- package/dist/training/DatasetBuilder.js +29 -29
- package/dist/training/FullTrainer.js +1 -1
- package/dist/training/Trainer.d.ts +5 -4
- package/dist/training/Trainer.js +22 -25
- package/dist/training/sparseCrossEntropy.js +3 -3
- package/dist/training/tasks/ConversationTask.d.ts +11 -0
- package/dist/training/tasks/ConversationTask.js +26 -0
- package/dist/training/tasks/PretrainingTask.d.ts +11 -0
- package/dist/training/tasks/PretrainingTask.js +34 -0
- package/dist/training/tasks/StartSentenceTask.d.ts +12 -0
- package/dist/training/tasks/StartSentenceTask.js +42 -0
- package/dist/training/tasks/Task.d.ts +8 -0
- package/dist/training/tasks/Task.js +41 -0
- package/dist/{transpose-JawVKyZy.js → transpose-BUkQCJp9.js} +7 -7
- package/dist/{unsorted_segment_sum-LAbmE9G4.js → unsorted_segment_sum-BljxHhCY.js} +78 -78
- package/dist/utilities/dummy.js +3 -3
- package/dist/utilities/multinomialCPU.js +2 -2
- package/dist/utilities/packed.js +1 -1
- package/dist/utilities/performance.js +1 -1
- package/dist/utilities/profile.js +1 -1
- package/dist/utilities/safetensors.js +2 -2
- package/dist/utilities/sentences.d.ts +1 -1
- package/dist/utilities/sentences.js +11 -11
- package/dist/utilities/weights.js +2 -2
- package/dist/{variable-DQ9yYgEU.js → variable-DPt_Iuog.js} +1 -1
- package/dist/{webgpu_program-CAE4RICo.js → webgpu_program-BpWRlghH.js} +1 -1
- package/dist/{webgpu_util-BdovYhXr.js → webgpu_util-DMiKzzQM.js} +7 -7
- package/dist/{zeros-DeiE2zTa.js → zeros-5YROwwUH.js} +2 -2
- package/dist/{zeros_like-BAz3iKru.js → zeros_like-De4n1C3m.js} +57 -57
- package/package.json +1 -1
- package/dist/clip_by_value-Dn5tzexi.js +0 -12
- package/dist/concat-C6X3AAlQ.js +0 -17
- package/dist/expand_dims-BzfJK2uc.js +0 -11
- package/dist/gather-BcO5UQNJ.js +0 -9
- package/dist/mat_mul-DxpNTCRz.js +0 -11
- package/dist/relu-Cf80uA2p.js +0 -9
- package/dist/split-BNz5jcGc.js +0 -9
- package/dist/squeeze--YMgaAAf.js +0 -10
- package/dist/sum-BdplSvq_.js +0 -11
- package/dist/tile-CvN_LyVr.js +0 -11
|
@@ -24,11 +24,11 @@ class k extends r {
|
|
|
24
24
|
addSpecialToken(e, t) {
|
|
25
25
|
this.specialTokens.set(e, t), this.specialTokenSet.add(t);
|
|
26
26
|
}
|
|
27
|
-
|
|
28
|
-
const t =
|
|
27
|
+
encodeSequence(e) {
|
|
28
|
+
const t = this.encode(e);
|
|
29
29
|
return [this.bosToken, ...t, this.eosToken];
|
|
30
30
|
}
|
|
31
|
-
|
|
31
|
+
encodeConversation(e, t) {
|
|
32
32
|
const s = [[this.bosToken]], a = [
|
|
33
33
|
this.getSpecialTokenIndex("<|user_start|>"),
|
|
34
34
|
this.getSpecialTokenIndex("<|assistant_start|>"),
|
|
@@ -39,7 +39,7 @@ class k extends r {
|
|
|
39
39
|
this.getSpecialTokenIndex("<|system_end|>")
|
|
40
40
|
];
|
|
41
41
|
for (const i of e) {
|
|
42
|
-
const c =
|
|
42
|
+
const c = this.encode(i.content);
|
|
43
43
|
switch (i.role) {
|
|
44
44
|
case "user":
|
|
45
45
|
s.push([a[0]]);
|
|
@@ -66,7 +66,7 @@ class k extends r {
|
|
|
66
66
|
const o = s.flat();
|
|
67
67
|
return t ? o.push(a[1]) : o.push(this.eosToken), o;
|
|
68
68
|
}
|
|
69
|
-
|
|
69
|
+
decodeConversation(e) {
|
|
70
70
|
const t = [];
|
|
71
71
|
let s = 0;
|
|
72
72
|
for (; s < e.length; ) {
|
|
@@ -77,7 +77,7 @@ class k extends r {
|
|
|
77
77
|
const o = [];
|
|
78
78
|
for (; s < e.length && e[s] !== this.getSpecialTokenIndex(`<|${n}_end|>`); )
|
|
79
79
|
o.push(e[s]), s++;
|
|
80
|
-
const i =
|
|
80
|
+
const i = this.decode(o);
|
|
81
81
|
t.push({ role: n, content: i });
|
|
82
82
|
}
|
|
83
83
|
s++;
|
|
@@ -13,12 +13,12 @@ export default class CharTokeniser extends BaseTokeniser {
|
|
|
13
13
|
get trained(): boolean;
|
|
14
14
|
destroy(): void;
|
|
15
15
|
train(text: string[]): Promise<number>;
|
|
16
|
-
tokenise(text: string[], numeric: true):
|
|
17
|
-
tokenise(text: string[]):
|
|
18
|
-
detokenise(tokens: number[][]):
|
|
19
|
-
encode(text: string):
|
|
20
|
-
decode(tokens: number[]):
|
|
16
|
+
tokenise(text: string[], numeric: true): number[][];
|
|
17
|
+
tokenise(text: string[]): string[][];
|
|
18
|
+
detokenise(tokens: (number[] | Uint16Array)[]): string[];
|
|
19
|
+
encode(text: string): number[];
|
|
20
|
+
decode(tokens: number[] | Uint16Array): string;
|
|
21
21
|
getVocab(): string[];
|
|
22
|
-
getMerges():
|
|
22
|
+
getMerges(): [string, string][];
|
|
23
23
|
createTrainingData(text: string[], windowSize?: number): Promise<[number[], number[]]>;
|
|
24
24
|
}
|
|
@@ -8,9 +8,9 @@ class b extends k {
|
|
|
8
8
|
vocab = [];
|
|
9
9
|
cache = /* @__PURE__ */ new Map();
|
|
10
10
|
_trained = !1;
|
|
11
|
-
constructor(
|
|
12
|
-
if (super(), Array.isArray(
|
|
13
|
-
if (this.vocab =
|
|
11
|
+
constructor(i) {
|
|
12
|
+
if (super(), Array.isArray(i)) {
|
|
13
|
+
if (this.vocab = i, this.vocab.length > 0)
|
|
14
14
|
this.vocabSize = this.vocab.length, d.forEach((t) => {
|
|
15
15
|
const e = this.vocab.indexOf(t);
|
|
16
16
|
e !== -1 && this.addSpecialToken(t, e);
|
|
@@ -21,37 +21,37 @@ class b extends k {
|
|
|
21
21
|
throw new Error("Vocab cannot be empty");
|
|
22
22
|
this._trained = !0;
|
|
23
23
|
} else
|
|
24
|
-
this.vocabSize =
|
|
24
|
+
this.vocabSize = i, this.vocab = new Array(this.vocabSize).fill(""), this.addSpecialTokens(), this.eosToken = this.getSpecialTokenIndex("<eos>"), this.bosToken = this.getSpecialTokenIndex("<bos>") ?? this.eosToken, this.unkToken = this.getSpecialTokenIndex(""), this.vocab.forEach((t, e) => {
|
|
25
25
|
this.cache.set(t, e);
|
|
26
26
|
}), this.cache.set("", this.unkToken);
|
|
27
27
|
}
|
|
28
|
-
addToken(
|
|
29
|
-
if (this.cache.has(
|
|
30
|
-
return this.cache.get(
|
|
28
|
+
addToken(i, t) {
|
|
29
|
+
if (this.cache.has(i))
|
|
30
|
+
return this.cache.get(i);
|
|
31
31
|
let e;
|
|
32
32
|
if (t !== void 0 ? e = t : (e = this.vocab.indexOf("", this.unkToken + 1), e === -1 && (e = this.vocabSize)), e >= this.vocabSize)
|
|
33
33
|
throw new Error("Vocab size exceeded");
|
|
34
|
-
return this.vocab[e] =
|
|
34
|
+
return this.vocab[e] = i, this.cache.set(i, e), e;
|
|
35
35
|
}
|
|
36
36
|
get trained() {
|
|
37
37
|
return this.vocab.length === this.vocabSize && this._trained;
|
|
38
38
|
}
|
|
39
39
|
destroy() {
|
|
40
40
|
}
|
|
41
|
-
async train(
|
|
42
|
-
const t =
|
|
41
|
+
async train(i) {
|
|
42
|
+
const t = i.map((n) => n.split("")).flat(), e = new Set(t), s = Array.from(e), h = this.vocab.indexOf("", this.unkToken + 1), o = this.vocabSize - u.length;
|
|
43
43
|
if (h === -1)
|
|
44
44
|
return this.vocabSize;
|
|
45
|
-
if (this._trained = !0,
|
|
45
|
+
if (this._trained = !0, s.length > o) {
|
|
46
46
|
const n = /* @__PURE__ */ new Map();
|
|
47
47
|
t.forEach((a) => {
|
|
48
48
|
n.set(a, (n.get(a) || 0) + 1);
|
|
49
|
-
}),
|
|
49
|
+
}), s.sort((a, r) => (n.get(a) || 0) - (n.get(r) || 0)), s.splice(0, s.length - o);
|
|
50
50
|
}
|
|
51
51
|
let c = h;
|
|
52
52
|
if (c !== -1) {
|
|
53
53
|
const n = new Set(this.vocab);
|
|
54
|
-
for (const a of
|
|
54
|
+
for (const a of s)
|
|
55
55
|
if (!n.has(a) && (this.vocab[c] = a, n.add(a), c = this.vocab.indexOf("", c + 1), c === -1))
|
|
56
56
|
break;
|
|
57
57
|
}
|
|
@@ -59,34 +59,34 @@ class b extends k {
|
|
|
59
59
|
this.cache.set(n, a);
|
|
60
60
|
}), this.emit("trainStatus", "trained"), this.vocabSize;
|
|
61
61
|
}
|
|
62
|
-
|
|
62
|
+
tokenise(i, t) {
|
|
63
63
|
if (!this.trained)
|
|
64
64
|
throw new Error("Tokeniser not trained");
|
|
65
|
-
return
|
|
65
|
+
return i.map((s) => t ? s.split("").map((h) => this.cache.get(h) ?? this.unkToken) : s.split("").map((h) => {
|
|
66
66
|
const o = this.cache.get(h);
|
|
67
67
|
return o !== void 0 ? this.vocab[o] : "";
|
|
68
68
|
}));
|
|
69
69
|
}
|
|
70
|
-
|
|
71
|
-
return
|
|
70
|
+
detokenise(i) {
|
|
71
|
+
return i.map((e) => Array.from(e).map((s) => this.vocab[s] || "").join(""));
|
|
72
72
|
}
|
|
73
|
-
|
|
74
|
-
return
|
|
73
|
+
encode(i) {
|
|
74
|
+
return this.tokenise([i], !0)[0];
|
|
75
75
|
}
|
|
76
|
-
|
|
77
|
-
return
|
|
76
|
+
decode(i) {
|
|
77
|
+
return this.detokenise([i])[0];
|
|
78
78
|
}
|
|
79
79
|
getVocab() {
|
|
80
80
|
return this.vocab;
|
|
81
81
|
}
|
|
82
|
-
|
|
82
|
+
getMerges() {
|
|
83
83
|
return [];
|
|
84
84
|
}
|
|
85
|
-
async createTrainingData(
|
|
86
|
-
const e = await this.tokenise(
|
|
85
|
+
async createTrainingData(i, t = 5) {
|
|
86
|
+
const e = await this.tokenise(i, !0), s = [], h = [];
|
|
87
87
|
for (let o = 0; o < e.length - t; o++)
|
|
88
|
-
|
|
89
|
-
return [
|
|
88
|
+
s.push(...e[o].slice(0, t)), h.push(e[o + 1][0]);
|
|
89
|
+
return [s, h];
|
|
90
90
|
}
|
|
91
91
|
}
|
|
92
92
|
export {
|
package/dist/tokeniser/bpe.d.ts
CHANGED
|
@@ -16,12 +16,12 @@ export default class BPETokeniser extends BaseTokeniser {
|
|
|
16
16
|
get unkToken(): number;
|
|
17
17
|
train(text: string[]): Promise<number>;
|
|
18
18
|
getVocab(): string[];
|
|
19
|
-
getMerges():
|
|
19
|
+
getMerges(): [string, string][];
|
|
20
20
|
private tokeniseWord;
|
|
21
21
|
private tokeniseStrings;
|
|
22
|
-
tokenise(text: string[], numeric: true):
|
|
23
|
-
tokenise(text: string[]):
|
|
24
|
-
detokenise(tokens: number[][]):
|
|
25
|
-
encode(text: string):
|
|
26
|
-
decode(tokens: number[]):
|
|
22
|
+
tokenise(text: string[], numeric: true): number[][];
|
|
23
|
+
tokenise(text: string[]): string[][];
|
|
24
|
+
detokenise(tokens: number[][]): string[];
|
|
25
|
+
encode(text: string): number[];
|
|
26
|
+
decode(tokens: number[]): string;
|
|
27
27
|
}
|
package/dist/tokeniser/bpe.js
CHANGED
|
@@ -53,7 +53,7 @@ function v(o, e) {
|
|
|
53
53
|
o.tokens[s] = n;
|
|
54
54
|
}), o.pairs.delete(u(e.a, e.b));
|
|
55
55
|
}
|
|
56
|
-
class
|
|
56
|
+
class x extends d {
|
|
57
57
|
targetSize;
|
|
58
58
|
vocab = /* @__PURE__ */ new Set();
|
|
59
59
|
vocabIndex = /* @__PURE__ */ new Map();
|
|
@@ -116,7 +116,7 @@ class T extends d {
|
|
|
116
116
|
getVocab() {
|
|
117
117
|
return Array.from(this.vocab);
|
|
118
118
|
}
|
|
119
|
-
|
|
119
|
+
getMerges() {
|
|
120
120
|
return this.merges;
|
|
121
121
|
}
|
|
122
122
|
tokeniseWord(e) {
|
|
@@ -128,21 +128,21 @@ class T extends d {
|
|
|
128
128
|
tokeniseStrings(e) {
|
|
129
129
|
return e.map((s) => l(s).map((r) => this.pretokenMap.has(r) ? this.pretokenMap.get(r) : this.tokeniseWord(r)).flat(1));
|
|
130
130
|
}
|
|
131
|
-
|
|
131
|
+
tokenise(e, s) {
|
|
132
132
|
const t = this.tokeniseStrings(e);
|
|
133
133
|
return s ? t.map((n) => n.map((r) => this.vocabIndex.get(r) ?? this.unkToken)) : t.map((n) => n.map((r) => this.vocab.has(r) ? r : ""));
|
|
134
134
|
}
|
|
135
|
-
|
|
135
|
+
detokenise(e) {
|
|
136
136
|
const s = this.getVocab();
|
|
137
137
|
return e.map((n) => n.map((r) => s[r]).join(""));
|
|
138
138
|
}
|
|
139
|
-
|
|
140
|
-
return
|
|
139
|
+
encode(e) {
|
|
140
|
+
return this.tokenise([e], !0)[0];
|
|
141
141
|
}
|
|
142
|
-
|
|
143
|
-
return
|
|
142
|
+
decode(e) {
|
|
143
|
+
return this.detokenise([e])[0];
|
|
144
144
|
}
|
|
145
145
|
}
|
|
146
146
|
export {
|
|
147
|
-
|
|
147
|
+
x as default
|
|
148
148
|
};
|
package/dist/tokeniser/type.d.ts
CHANGED
|
@@ -6,16 +6,14 @@ export interface Conversation {
|
|
|
6
6
|
}
|
|
7
7
|
export interface ITokeniser extends EE<'trainStatus'> {
|
|
8
8
|
train(text: string[]): Promise<number>;
|
|
9
|
-
tokenise(text: string[], numeric?: boolean): Promise<string[][] | number[][]>;
|
|
10
|
-
detokenise(tokens: string[][] | number[][]): Promise<string[]>;
|
|
11
9
|
getVocab(): string[];
|
|
12
|
-
getMerges():
|
|
10
|
+
getMerges(): [string, string][];
|
|
13
11
|
destroy(): void;
|
|
14
|
-
encode(text: string):
|
|
15
|
-
encodeConversation(conversation: Conversation[], completion?: boolean):
|
|
16
|
-
encodeSequence(text: string):
|
|
17
|
-
decode(tokens: number[]):
|
|
18
|
-
decodeConversation(tokens: number[]):
|
|
12
|
+
encode(text: string): number[];
|
|
13
|
+
encodeConversation(conversation: Conversation[], completion?: boolean): number[];
|
|
14
|
+
encodeSequence(text: string): number[];
|
|
15
|
+
decode(tokens: number[] | Uint16Array): string;
|
|
16
|
+
decodeConversation(tokens: number[] | Uint16Array): Conversation[];
|
|
19
17
|
vocabSize: number;
|
|
20
18
|
eosToken: number;
|
|
21
19
|
bosToken: number;
|
package/dist/training/Adam.js
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { adamAdjust as b } from "../ops/adamAdjust.js";
|
|
2
2
|
import { adamMoments as d } from "../ops/adamMoments.js";
|
|
3
|
-
import { O as g, e as h, t as o, d as B } from "../index-
|
|
4
|
-
import { z as M } from "../zeros-
|
|
3
|
+
import { O as g, e as h, t as o, d as B } from "../index-Duu1Lvvv.js";
|
|
4
|
+
import { z as M } from "../zeros-5YROwwUH.js";
|
|
5
5
|
class R extends g {
|
|
6
6
|
constructor(t, a, e, s, i = null) {
|
|
7
7
|
super(), this.learningRate = t, this.beta1 = a, this.beta2 = e, this.lossScaling = s, this.epsilon = i, this.accBeta1 = a, this.accBeta2 = e, i === null && (this.epsilon = h().backend.epsilon());
|
package/dist/training/AdamExt.js
CHANGED
|
@@ -8,7 +8,7 @@ export declare class DatasetBuilder {
|
|
|
8
8
|
blockSize: number;
|
|
9
9
|
private pageSize;
|
|
10
10
|
constructor(tokenizer: ITokeniser, blockSize?: number);
|
|
11
|
-
createTextDataset(flatTokens:
|
|
11
|
+
createTextDataset(flatTokens: Uint16Array, batchSize?: number, masked?: Set<number>, invertMask?: boolean): Promise<Dataset<{
|
|
12
12
|
xs: Tensor;
|
|
13
13
|
ys: Tensor;
|
|
14
14
|
}>>;
|
|
@@ -1,63 +1,63 @@
|
|
|
1
|
-
import { t as
|
|
2
|
-
import { d as
|
|
1
|
+
import { t as y } from "../index-Duu1Lvvv.js";
|
|
2
|
+
import { d as g, i as z } from "../dataset-CVIJu7Xa.js";
|
|
3
3
|
import "../index-Cp39cXWe.js";
|
|
4
|
-
function
|
|
5
|
-
return
|
|
4
|
+
function b(a) {
|
|
5
|
+
return g(async () => {
|
|
6
6
|
const t = await a();
|
|
7
|
-
return
|
|
7
|
+
return z(() => t.next());
|
|
8
8
|
});
|
|
9
9
|
}
|
|
10
|
-
const
|
|
11
|
-
async function
|
|
12
|
-
return (await Promise.all(a.map((
|
|
10
|
+
const f = 8;
|
|
11
|
+
async function w(a, t) {
|
|
12
|
+
return (await Promise.all(a.map((s) => t.encodeConversation(s)))).flat();
|
|
13
13
|
}
|
|
14
|
-
class
|
|
14
|
+
class m {
|
|
15
15
|
tokenizer;
|
|
16
16
|
blockSize;
|
|
17
17
|
pageSize;
|
|
18
|
-
constructor(t,
|
|
19
|
-
this.tokenizer = t, this.blockSize =
|
|
18
|
+
constructor(t, r = 128) {
|
|
19
|
+
this.tokenizer = t, this.blockSize = r, this.pageSize = r * f;
|
|
20
20
|
}
|
|
21
21
|
// Create dataset from text files
|
|
22
|
-
async createTextDataset(t,
|
|
22
|
+
async createTextDataset(t, r = 32, i, s) {
|
|
23
23
|
if (t.length < this.blockSize + 1)
|
|
24
24
|
throw new Error(`Not enough tokens (${t.length}) for block size ${this.blockSize}`);
|
|
25
25
|
if (i && i.size > t.length / this.pageSize / 2)
|
|
26
26
|
throw new Error("Too many masked pages - would leave insufficient training data");
|
|
27
27
|
const l = (function* () {
|
|
28
|
-
if (i &&
|
|
28
|
+
if (i && s) {
|
|
29
29
|
const e = Array.from(i);
|
|
30
30
|
for (; ; ) {
|
|
31
|
-
const
|
|
32
|
-
if (
|
|
31
|
+
const o = Math.floor(Math.random() * e.length), h = Math.floor(Math.random() * this.pageSize), n = e[o] * this.pageSize + h;
|
|
32
|
+
if (n + this.blockSize + 1 > t.length)
|
|
33
33
|
continue;
|
|
34
|
-
const c = t.
|
|
35
|
-
yield { xs: c, ys:
|
|
34
|
+
const c = new Int32Array(t.subarray(n, n + this.blockSize)), u = new Int32Array(t.subarray(n + 1, n + this.blockSize + 1));
|
|
35
|
+
yield { xs: c, ys: u };
|
|
36
36
|
}
|
|
37
37
|
} else
|
|
38
38
|
for (; ; ) {
|
|
39
39
|
const e = Math.floor(Math.random() * (t.length - this.blockSize - 1));
|
|
40
40
|
if (i) {
|
|
41
|
-
const
|
|
42
|
-
if (c && !
|
|
41
|
+
const n = Math.floor(e / this.pageSize), c = i.has(n);
|
|
42
|
+
if (c && !s || !c && s)
|
|
43
43
|
continue;
|
|
44
44
|
}
|
|
45
|
-
const
|
|
46
|
-
yield { xs:
|
|
45
|
+
const o = new Int32Array(t.subarray(e, e + this.blockSize)), h = new Int32Array(t.subarray(e + 1, e + this.blockSize + 1));
|
|
46
|
+
yield { xs: o, ys: h };
|
|
47
47
|
}
|
|
48
48
|
}).bind(this);
|
|
49
|
-
return
|
|
50
|
-
const
|
|
51
|
-
return
|
|
52
|
-
xs:
|
|
53
|
-
ys:
|
|
49
|
+
return b(l).batch(r).map((e) => {
|
|
50
|
+
const o = e;
|
|
51
|
+
return y(() => ({
|
|
52
|
+
xs: o.xs.cast("int32"),
|
|
53
|
+
ys: o.ys.cast("int32")
|
|
54
54
|
// this.tf.oneHot(batchData.ys.cast('int32'), this.tokenizer.vocabSize),
|
|
55
55
|
}));
|
|
56
56
|
}).prefetch(2);
|
|
57
57
|
}
|
|
58
58
|
}
|
|
59
59
|
export {
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
60
|
+
m as DatasetBuilder,
|
|
61
|
+
f as PAGE_FACTOR,
|
|
62
|
+
w as flattenTokens
|
|
63
63
|
};
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import b from "./Trainer.js";
|
|
2
2
|
import L from "./Evaluator.js";
|
|
3
|
-
import { d as w } from "../index-
|
|
3
|
+
import { d as w } from "../index-Duu1Lvvv.js";
|
|
4
4
|
import y from "../utilities/profile.js";
|
|
5
5
|
import { createTensorStatistics as D } from "../checks/weights.js";
|
|
6
6
|
const T = {
|
|
@@ -1,11 +1,12 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { ITokeniser } from '../tokeniser/type';
|
|
2
2
|
import { DatasetBuilder } from './DatasetBuilder';
|
|
3
3
|
import { default as AdamExt } from './AdamExt';
|
|
4
|
-
import { NamedTensorMap
|
|
4
|
+
import { NamedTensorMap } from '@tensorflow/tfjs-core/dist/tensor_types';
|
|
5
5
|
import { Scalar, Tensor } from '@tensorflow/tfjs-core';
|
|
6
6
|
import { Dataset } from '@tensorflow/tfjs-data';
|
|
7
7
|
import { default as Model, ModelForwardAttributes } from '../models/model';
|
|
8
8
|
import { TensorStatistics } from '../checks/weights';
|
|
9
|
+
import { Task } from './tasks/Task';
|
|
9
10
|
export interface TrainingLogEntry {
|
|
10
11
|
loss: number;
|
|
11
12
|
valLoss?: number;
|
|
@@ -93,7 +94,7 @@ export default abstract class GPTTrainer {
|
|
|
93
94
|
log: TrainingLogEntry;
|
|
94
95
|
progress: TrainingProgress;
|
|
95
96
|
}>;
|
|
96
|
-
createTrainValidationSplit(
|
|
97
|
+
createTrainValidationSplit(tasks: Task[], batchSize?: number, validationSplit?: number): Promise<{
|
|
97
98
|
trainDataset: Dataset<{
|
|
98
99
|
xs: Tensor;
|
|
99
100
|
ys: Tensor;
|
|
@@ -102,7 +103,7 @@ export default abstract class GPTTrainer {
|
|
|
102
103
|
xs: Tensor;
|
|
103
104
|
ys: Tensor;
|
|
104
105
|
}>;
|
|
106
|
+
size: number;
|
|
105
107
|
}>;
|
|
106
|
-
createDataset(textData: Conversation[][], batchSize?: number): Promise<Dataset<TensorContainer>>;
|
|
107
108
|
dispose(): void;
|
|
108
109
|
}
|
package/dist/training/Trainer.js
CHANGED
|
@@ -1,10 +1,11 @@
|
|
|
1
|
-
import { DatasetBuilder as
|
|
1
|
+
import { DatasetBuilder as u, PAGE_FACTOR as f } from "./DatasetBuilder.js";
|
|
2
2
|
import z from "./AdamExt.js";
|
|
3
|
-
import { t as S, v as
|
|
4
|
-
import {
|
|
5
|
-
|
|
3
|
+
import { t as S, v as y, k, d as h, b as p } from "../index-Duu1Lvvv.js";
|
|
4
|
+
import { tokensFromTasks as x } from "./tasks/Task.js";
|
|
5
|
+
import { z as m } from "../zeros-5YROwwUH.js";
|
|
6
|
+
class B {
|
|
6
7
|
constructor(t, e, s = 1e-3) {
|
|
7
|
-
this.tokenizer = e, this.model = t, this.lossScaling = t.lossScaling, this.learningRate = s, this.resetOptimizer(), this.datasetBuilder = new
|
|
8
|
+
this.tokenizer = e, this.model = t, this.lossScaling = t.lossScaling, this.learningRate = s, this.resetOptimizer(), this.datasetBuilder = new u(e, t.config.blockSize);
|
|
8
9
|
}
|
|
9
10
|
model;
|
|
10
11
|
optimizer;
|
|
@@ -53,8 +54,8 @@ class M {
|
|
|
53
54
|
trainStep(t, e, s = !1, i = !1) {
|
|
54
55
|
return S(() => {
|
|
55
56
|
this.model.getProfiler()?.startMemory();
|
|
56
|
-
const { xs: a, ys: l } = e,
|
|
57
|
-
const [
|
|
57
|
+
const { xs: a, ys: l } = e, d = () => {
|
|
58
|
+
const [o, c] = this.model.forward(
|
|
58
59
|
{
|
|
59
60
|
training: !0,
|
|
60
61
|
checkpointing: this._gradientCheckpointing,
|
|
@@ -63,15 +64,15 @@ class M {
|
|
|
63
64
|
a,
|
|
64
65
|
l
|
|
65
66
|
);
|
|
66
|
-
|
|
67
|
-
const
|
|
68
|
-
return
|
|
69
|
-
}, { value:
|
|
70
|
-
return s ? this.model.getProfiler()?.endMemory("Training") : (this.optimizer.applyGradients(r), this.model.getProfiler()?.endMemory("Training"), i ? (t.gradients = r, Object.values(r).forEach((
|
|
67
|
+
o.dispose();
|
|
68
|
+
const g = c.mul(p(this.lossScaling));
|
|
69
|
+
return c.dispose(), g;
|
|
70
|
+
}, { value: n, grads: r } = y(d);
|
|
71
|
+
return s ? this.model.getProfiler()?.endMemory("Training") : (this.optimizer.applyGradients(r), this.model.getProfiler()?.endMemory("Training"), i ? (t.gradients = r, Object.values(r).forEach((o) => k(o))) : h(r)), n.mul(p(1 / this.lossScaling));
|
|
71
72
|
});
|
|
72
73
|
}
|
|
73
74
|
async dummyPass() {
|
|
74
|
-
const t =
|
|
75
|
+
const t = m([1, this.model.config.blockSize], "int32"), e = m([1, this.model.config.blockSize], "int32");
|
|
75
76
|
try {
|
|
76
77
|
const s = this.trainStep({}, { xs: t, ys: e }, !0);
|
|
77
78
|
await s.data(), s.dispose();
|
|
@@ -86,34 +87,30 @@ class M {
|
|
|
86
87
|
const i = this.trainStep(t, e, !1, s);
|
|
87
88
|
return e.xs.dispose(), e.ys.dispose(), t.step++, t.totalSteps++, i;
|
|
88
89
|
} catch (i) {
|
|
89
|
-
throw console.error(`Error processing batch at step ${t.step}:`, i),
|
|
90
|
+
throw console.error(`Error processing batch at step ${t.step}:`, i), h(), i;
|
|
90
91
|
}
|
|
91
92
|
}
|
|
92
93
|
async createTrainValidationSplit(t, e = 32, s = 0.1) {
|
|
93
|
-
const i = await
|
|
94
|
+
const i = await x(t, this.tokenizer), a = /* @__PURE__ */ new Set();
|
|
94
95
|
if (s > 0) {
|
|
95
|
-
const
|
|
96
|
+
const n = Math.floor(i.length / (this.datasetBuilder.blockSize * f)), r = Math.max(1, Math.floor(n * s));
|
|
96
97
|
for (; a.size < r; ) {
|
|
97
|
-
const
|
|
98
|
-
a.add(
|
|
98
|
+
const o = Math.floor(Math.random() * n);
|
|
99
|
+
a.add(o);
|
|
99
100
|
}
|
|
100
101
|
}
|
|
101
|
-
const l = await this.datasetBuilder.createTextDataset(i, e, a, !1),
|
|
102
|
+
const l = await this.datasetBuilder.createTextDataset(i, e, a, !1), d = await this.datasetBuilder.createTextDataset(
|
|
102
103
|
i,
|
|
103
104
|
e,
|
|
104
105
|
a,
|
|
105
106
|
!0
|
|
106
107
|
);
|
|
107
|
-
return { trainDataset: l, validationDataset:
|
|
108
|
-
}
|
|
109
|
-
async createDataset(t, e = 32) {
|
|
110
|
-
const s = await h(t, this.tokenizer);
|
|
111
|
-
return await this.datasetBuilder.createTextDataset(s, e);
|
|
108
|
+
return { trainDataset: l, validationDataset: d, size: i.length };
|
|
112
109
|
}
|
|
113
110
|
dispose() {
|
|
114
111
|
this.optimizer && this.optimizer.dispose();
|
|
115
112
|
}
|
|
116
113
|
}
|
|
117
114
|
export {
|
|
118
|
-
|
|
115
|
+
B as default
|
|
119
116
|
};
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
import { gatherSub as x } from "../ops/gatherSub.js";
|
|
2
2
|
import { scatterSub as L } from "../ops/scatterSub.js";
|
|
3
|
-
import {
|
|
4
|
-
import { s as y } from "../softmax-
|
|
5
|
-
import { m as z, l as v } from "../log_sum_exp-
|
|
3
|
+
import { a1 as C, t as u, a2 as E, c as G } from "../index-Duu1Lvvv.js";
|
|
4
|
+
import { s as y } from "../softmax-DfuYyjMh.js";
|
|
5
|
+
import { m as z, l as v } from "../log_sum_exp-CVqLsVLl.js";
|
|
6
6
|
function k(t, s) {
|
|
7
7
|
return u(() => {
|
|
8
8
|
const n = t.shape[t.shape.length - 1], c = t.shape.slice(0, -1).reduce((o, e) => o * e, 1), h = t.shape.length > 2 ? t.reshape([c, n]) : t, p = s.shape.length > 1 ? s.reshape([c]).cast("int32") : s.cast("int32"), r = z(h, -1, !0), a = G(h, r), d = v(a, -1);
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import { Conversation, ITokeniser } from '../../main';
|
|
2
|
+
import { Task } from './Task';
|
|
3
|
+
export default class ConversationTask extends Task {
|
|
4
|
+
private rawConvo;
|
|
5
|
+
private index;
|
|
6
|
+
get length(): number;
|
|
7
|
+
constructor(conversations: Conversation[][]);
|
|
8
|
+
hasMoreConversations(): boolean;
|
|
9
|
+
nextConversation(): Conversation[] | null;
|
|
10
|
+
estimateTokens(tokeniser: ITokeniser): Promise<number>;
|
|
11
|
+
}
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
import { Task as t } from "./Task.js";
|
|
2
|
+
class s extends t {
|
|
3
|
+
rawConvo;
|
|
4
|
+
index = 0;
|
|
5
|
+
get length() {
|
|
6
|
+
return this.rawConvo.length;
|
|
7
|
+
}
|
|
8
|
+
constructor(n) {
|
|
9
|
+
super(), this.rawConvo = n;
|
|
10
|
+
}
|
|
11
|
+
hasMoreConversations() {
|
|
12
|
+
return this.index < this.rawConvo.length;
|
|
13
|
+
}
|
|
14
|
+
nextConversation() {
|
|
15
|
+
if (this.index >= this.rawConvo.length)
|
|
16
|
+
return null;
|
|
17
|
+
const n = this.rawConvo[this.index];
|
|
18
|
+
return this.index++, n;
|
|
19
|
+
}
|
|
20
|
+
async estimateTokens(n) {
|
|
21
|
+
return (await n.encodeConversation(this.rawConvo[0])).length * this.length;
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
export {
|
|
25
|
+
s as default
|
|
26
|
+
};
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import { Conversation, ITokeniser } from '../../main';
|
|
2
|
+
import { Task } from './Task';
|
|
3
|
+
export default class PretrainingTask extends Task {
|
|
4
|
+
private rawText;
|
|
5
|
+
private index;
|
|
6
|
+
get length(): number;
|
|
7
|
+
constructor(texts: string[]);
|
|
8
|
+
hasMoreConversations(): boolean;
|
|
9
|
+
nextConversation(): Conversation[] | null;
|
|
10
|
+
estimateTokens(tokeniser: ITokeniser): Promise<number>;
|
|
11
|
+
}
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
import { Task as e } from "./Task.js";
|
|
2
|
+
class r extends e {
|
|
3
|
+
rawText;
|
|
4
|
+
index = 0;
|
|
5
|
+
get length() {
|
|
6
|
+
return this.rawText.length;
|
|
7
|
+
}
|
|
8
|
+
constructor(t) {
|
|
9
|
+
super(), this.rawText = t;
|
|
10
|
+
}
|
|
11
|
+
hasMoreConversations() {
|
|
12
|
+
return this.index < this.rawText.length;
|
|
13
|
+
}
|
|
14
|
+
nextConversation() {
|
|
15
|
+
if (this.index >= this.rawText.length)
|
|
16
|
+
return null;
|
|
17
|
+
const t = {
|
|
18
|
+
role: "assistant",
|
|
19
|
+
content: this.rawText[this.index]
|
|
20
|
+
};
|
|
21
|
+
return this.index++, [t];
|
|
22
|
+
}
|
|
23
|
+
async estimateTokens(t) {
|
|
24
|
+
return (await t.encodeConversation([
|
|
25
|
+
{
|
|
26
|
+
role: "assistant",
|
|
27
|
+
content: this.rawText[0]
|
|
28
|
+
}
|
|
29
|
+
])).length * this.length;
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
export {
|
|
33
|
+
r as default
|
|
34
|
+
};
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import { Conversation, ITokeniser } from '../../main';
|
|
2
|
+
import { Task } from './Task';
|
|
3
|
+
export default class StartSentenceTask extends Task {
|
|
4
|
+
private rawText;
|
|
5
|
+
private index;
|
|
6
|
+
get length(): number;
|
|
7
|
+
constructor(texts: string[]);
|
|
8
|
+
hasMoreConversations(): boolean;
|
|
9
|
+
nextConversation(): Conversation[] | null;
|
|
10
|
+
private conversationFromString;
|
|
11
|
+
estimateTokens(tokeniser: ITokeniser): Promise<number>;
|
|
12
|
+
}
|