ruvector 0.2.20 → 0.2.22
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/cli.js +160 -0
- package/dist/core/router-wrapper.d.ts +20 -7
- package/dist/core/router-wrapper.d.ts.map +1 -1
- package/dist/core/router-wrapper.js +52 -18
- package/package.json +2 -1
- package/src/decompiler/api-prober.js +302 -0
- package/src/decompiler/index.js +57 -1
- package/src/decompiler/model-decompiler.js +423 -0
- package/wasm/package.json +27 -0
- package/wasm/ruvector_decompiler_wasm.d.ts +27 -0
- package/wasm/ruvector_decompiler_wasm.js +220 -0
- package/wasm/ruvector_decompiler_wasm_bg.wasm +0 -0
- package/wasm/ruvector_decompiler_wasm_bg.wasm.d.ts +16 -0
- package/dist/core/onnx/loader.js +0 -348
- package/dist/core/onnx/pkg/LICENSE +0 -21
- package/dist/core/onnx/pkg/loader.js +0 -348
- package/dist/core/onnx/pkg/ruvector_onnx_embeddings_wasm.d.ts +0 -112
- package/dist/core/onnx/pkg/ruvector_onnx_embeddings_wasm.js +0 -5
- package/dist/core/onnx/pkg/ruvector_onnx_embeddings_wasm_bg.js +0 -638
- package/dist/core/onnx/pkg/ruvector_onnx_embeddings_wasm_bg.wasm +0 -0
- package/dist/core/onnx/pkg/ruvector_onnx_embeddings_wasm_bg.wasm.d.ts +0 -29
- package/dist/core/onnx/pkg/ruvector_onnx_embeddings_wasm_cjs.js +0 -127
- package/dist/core/onnx-llm.d.ts +0 -206
- package/dist/core/onnx-llm.d.ts.map +0 -1
- package/dist/core/onnx-llm.js +0 -430
package/src/decompiler/index.js
CHANGED
|
@@ -27,6 +27,55 @@ const { computeMetrics, computeModuleMetrics } = require('./metrics');
|
|
|
27
27
|
const { reconstructCode, reconstructRunnable } = require('./reconstructor');
|
|
28
28
|
const { validateReconstruction } = require('./validator');
|
|
29
29
|
|
|
30
|
+
/**
|
|
31
|
+
* Try the WASM Louvain decompiler (full graph-partitioning pipeline).
|
|
32
|
+
* Returns null if WASM module is not available or fails.
|
|
33
|
+
*
|
|
34
|
+
* @param {string} source - raw JavaScript source
|
|
35
|
+
* @param {object} [options]
|
|
36
|
+
* @returns {{modules: object[], metrics: object, witness: object|null}|null}
|
|
37
|
+
*/
|
|
38
|
+
function tryWasmDecompiler(source, options = {}) {
|
|
39
|
+
try {
|
|
40
|
+
const wasm = require('../../wasm/ruvector_decompiler_wasm');
|
|
41
|
+
const configJson = JSON.stringify({
|
|
42
|
+
target_modules: null,
|
|
43
|
+
min_confidence: options.minConfidence || 0.3,
|
|
44
|
+
generate_source_maps: false,
|
|
45
|
+
generate_witness: options.witness !== false,
|
|
46
|
+
output_filename: 'bundle.js',
|
|
47
|
+
model_path: null,
|
|
48
|
+
hierarchical_output: true,
|
|
49
|
+
max_depth: 3,
|
|
50
|
+
min_folder_size: 3,
|
|
51
|
+
});
|
|
52
|
+
const resultJson = wasm.decompile(source, configJson);
|
|
53
|
+
const result = JSON.parse(resultJson);
|
|
54
|
+
if (result.error) return null;
|
|
55
|
+
|
|
56
|
+
// Convert Rust DecompileResult to Node.js format
|
|
57
|
+
return {
|
|
58
|
+
modules: (result.modules || []).map((m) => ({
|
|
59
|
+
name: m.name,
|
|
60
|
+
content: m.source || '',
|
|
61
|
+
declarations: (m.declarations && m.declarations.length) || 0,
|
|
62
|
+
fragments: (m.declarations && m.declarations.length) || 0,
|
|
63
|
+
confidence: 0.8,
|
|
64
|
+
})),
|
|
65
|
+
metrics: {
|
|
66
|
+
source: { sizeBytes: source.length },
|
|
67
|
+
modules: (result.modules || []).length,
|
|
68
|
+
engine: 'wasm-louvain',
|
|
69
|
+
},
|
|
70
|
+
witness: result.witness || null,
|
|
71
|
+
moduleTree: result.module_tree || null,
|
|
72
|
+
beautifiedSource: source,
|
|
73
|
+
};
|
|
74
|
+
} catch {
|
|
75
|
+
return null; // WASM not available, fall back
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
|
|
30
79
|
/**
|
|
31
80
|
* Try to beautify source code using js-beautify (optional dep).
|
|
32
81
|
* Falls back to returning the source unchanged if not installed.
|
|
@@ -118,7 +167,13 @@ function decompileSource(source, options = {}) {
|
|
|
118
167
|
filePath,
|
|
119
168
|
} = options;
|
|
120
169
|
|
|
121
|
-
//
|
|
170
|
+
// Priority 1: WASM Louvain (full pipeline, works everywhere, no binary needed)
|
|
171
|
+
if (useRust !== false && source.length > 1000) {
|
|
172
|
+
const wasmResult = tryWasmDecompiler(source, options);
|
|
173
|
+
if (wasmResult) return wasmResult;
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
// Priority 2: Rust binary (full pipeline, requires cargo build)
|
|
122
177
|
if (useRust && filePath && source.length > 100000) {
|
|
123
178
|
const tmpDir = path.join(require('os').tmpdir(), 'ruvector-decompile-' + Date.now());
|
|
124
179
|
const rustResult = tryRustDecompiler(filePath, tmpDir);
|
|
@@ -404,4 +459,5 @@ module.exports = {
|
|
|
404
459
|
reconstructCode,
|
|
405
460
|
reconstructRunnable,
|
|
406
461
|
validateReconstruction,
|
|
462
|
+
tryWasmDecompiler,
|
|
407
463
|
};
|
|
@@ -0,0 +1,423 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* LLM model weight decompiler for Node.js.
|
|
5
|
+
* Parses GGUF and Safetensors files to reconstruct architecture info.
|
|
6
|
+
* See ADR-138.
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
const fs = require('fs');
|
|
10
|
+
const path = require('path');
|
|
11
|
+
const crypto = require('crypto');
|
|
12
|
+
|
|
13
|
+
// ── GGUF constants ───────────────────────────────────────────────────────
|
|
14
|
+
|
|
15
|
+
const GGUF_MAGIC = 0x46554747;
|
|
16
|
+
|
|
17
|
+
const QUANT_TYPES = {
|
|
18
|
+
0: { name: 'F32', bpw: 32 }, 1: { name: 'F16', bpw: 16 },
|
|
19
|
+
2: { name: 'Q4_0', bpw: 4.5 }, 3: { name: 'Q4_1', bpw: 5 },
|
|
20
|
+
6: { name: 'Q5_0', bpw: 5.5 }, 7: { name: 'Q5_1', bpw: 6 },
|
|
21
|
+
8: { name: 'Q8_0', bpw: 8.5 }, 9: { name: 'Q8_1', bpw: 9 },
|
|
22
|
+
10: { name: 'Q2_K', bpw: 2.56 }, 11: { name: 'Q3_K', bpw: 3.44 },
|
|
23
|
+
12: { name: 'Q4_K', bpw: 4.5 }, 13: { name: 'Q5_K', bpw: 5.5 },
|
|
24
|
+
14: { name: 'Q6_K', bpw: 6.56 }, 15: { name: 'Q8_K', bpw: 8.5 },
|
|
25
|
+
29: { name: 'BF16', bpw: 16 },
|
|
26
|
+
};
|
|
27
|
+
|
|
28
|
+
// ── Main entry ───────────────────────────────────────────────────────────
|
|
29
|
+
|
|
30
|
+
async function decompileModelFile(filePath) {
|
|
31
|
+
const ext = path.extname(filePath).toLowerCase();
|
|
32
|
+
if (ext === '.gguf') return decompileGguf(filePath);
|
|
33
|
+
if (ext === '.safetensors') return decompileSafetensors(filePath);
|
|
34
|
+
throw new Error(`Unsupported model format: ${ext} (expected .gguf or .safetensors)`);
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
// ── GGUF decompiler ──────────────────────────────────────────────────────
|
|
38
|
+
|
|
39
|
+
function decompileGguf(filePath) {
|
|
40
|
+
const fd = fs.openSync(filePath, 'r');
|
|
41
|
+
const stat = fs.fstatSync(fd);
|
|
42
|
+
let pos = 0;
|
|
43
|
+
|
|
44
|
+
function readBuf(n) {
|
|
45
|
+
const buf = Buffer.alloc(n);
|
|
46
|
+
fs.readSync(fd, buf, 0, n, pos);
|
|
47
|
+
pos += n;
|
|
48
|
+
return buf;
|
|
49
|
+
}
|
|
50
|
+
function readU32() { return readBuf(4).readUInt32LE(0); }
|
|
51
|
+
function readU64() { return Number(readBuf(8).readBigUInt64LE(0)); }
|
|
52
|
+
function readF32() { return readBuf(4).readFloatLE(0); }
|
|
53
|
+
function readF64() { return readBuf(8).readDoubleLE(0); }
|
|
54
|
+
function readStr() {
|
|
55
|
+
const len = readU64();
|
|
56
|
+
if (len > 65536) throw new Error(`String too long: ${len}`);
|
|
57
|
+
return readBuf(len).toString('utf8');
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
function readValue() {
|
|
61
|
+
const type = readU32();
|
|
62
|
+
switch (type) {
|
|
63
|
+
case 0: return readBuf(1).readUInt8(0);
|
|
64
|
+
case 1: return readBuf(1).readInt8(0);
|
|
65
|
+
case 2: return readBuf(2).readUInt16LE(0);
|
|
66
|
+
case 3: return readBuf(2).readInt16LE(0);
|
|
67
|
+
case 4: return readU32();
|
|
68
|
+
case 5: return readBuf(4).readInt32LE(0);
|
|
69
|
+
case 6: return readF32();
|
|
70
|
+
case 7: return readBuf(1).readUInt8(0) !== 0;
|
|
71
|
+
case 8: return readStr();
|
|
72
|
+
case 9: { // Array
|
|
73
|
+
const elemType = readU32();
|
|
74
|
+
const count = readU64();
|
|
75
|
+
const arr = [];
|
|
76
|
+
for (let i = 0; i < Math.min(count, 10000); i++) {
|
|
77
|
+
if (elemType === 8) arr.push(readStr());
|
|
78
|
+
else if (elemType === 4) arr.push(readU32());
|
|
79
|
+
else if (elemType === 0) arr.push(readBuf(1).readUInt8(0));
|
|
80
|
+
else if (elemType === 5) arr.push(readBuf(4).readInt32LE(0));
|
|
81
|
+
else if (elemType === 6) arr.push(readF32());
|
|
82
|
+
else if (elemType === 10) arr.push(readU64());
|
|
83
|
+
else readBuf(elemType <= 1 ? 1 : elemType <= 3 ? 2 : elemType <= 6 ? 4 : 8);
|
|
84
|
+
}
|
|
85
|
+
// Skip remaining if array was truncated
|
|
86
|
+
if (count > 10000) {
|
|
87
|
+
// Cannot reliably skip variable-size elements, just return what we have
|
|
88
|
+
}
|
|
89
|
+
return arr;
|
|
90
|
+
}
|
|
91
|
+
case 10: return readU64();
|
|
92
|
+
case 11: return Number(readBuf(8).readBigInt64LE(0));
|
|
93
|
+
case 12: return readF64();
|
|
94
|
+
default: throw new Error(`Unknown value type: ${type}`);
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
// Parse header
|
|
99
|
+
const magic = readU32();
|
|
100
|
+
if (magic !== GGUF_MAGIC) throw new Error(`Not a GGUF file (magic: 0x${magic.toString(16)})`);
|
|
101
|
+
const version = readU32();
|
|
102
|
+
const tensorCount = readU64();
|
|
103
|
+
const metadataCount = readU64();
|
|
104
|
+
|
|
105
|
+
// Parse metadata
|
|
106
|
+
const metadata = {};
|
|
107
|
+
for (let i = 0; i < metadataCount; i++) {
|
|
108
|
+
const key = readStr();
|
|
109
|
+
metadata[key] = readValue();
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
// Parse tensor infos
|
|
113
|
+
const tensors = [];
|
|
114
|
+
for (let i = 0; i < tensorCount; i++) {
|
|
115
|
+
const name = readStr();
|
|
116
|
+
const nDims = readU32();
|
|
117
|
+
const shape = [];
|
|
118
|
+
for (let d = 0; d < nDims; d++) shape.push(readU64());
|
|
119
|
+
const quantType = readU32();
|
|
120
|
+
const offset = readU64();
|
|
121
|
+
const qt = QUANT_TYPES[quantType] || { name: `Unknown(${quantType})`, bpw: 0 };
|
|
122
|
+
tensors.push({ name, shape, quantType, quantName: qt.name, bpw: qt.bpw, offset });
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
fs.closeSync(fd);
|
|
126
|
+
|
|
127
|
+
return buildResult({
|
|
128
|
+
format: `GGUF v${version}`,
|
|
129
|
+
metadata,
|
|
130
|
+
tensors,
|
|
131
|
+
fileSize: stat.size,
|
|
132
|
+
filePath,
|
|
133
|
+
});
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
// ── Safetensors decompiler ───────────────────────────────────────────────
|
|
137
|
+
|
|
138
|
+
function decompileSafetensors(filePath) {
|
|
139
|
+
const fd = fs.openSync(filePath, 'r');
|
|
140
|
+
const stat = fs.fstatSync(fd);
|
|
141
|
+
const lenBuf = Buffer.alloc(8);
|
|
142
|
+
fs.readSync(fd, lenBuf, 0, 8, 0);
|
|
143
|
+
const headerLen = Number(lenBuf.readBigUInt64LE(0));
|
|
144
|
+
if (headerLen > 100 * 1024 * 1024) throw new Error(`Header too large: ${headerLen}`);
|
|
145
|
+
|
|
146
|
+
const headerBuf = Buffer.alloc(headerLen);
|
|
147
|
+
fs.readSync(fd, headerBuf, 0, headerLen, 8);
|
|
148
|
+
fs.closeSync(fd);
|
|
149
|
+
|
|
150
|
+
const header = JSON.parse(headerBuf.toString('utf8'));
|
|
151
|
+
const metadata = {};
|
|
152
|
+
const tensors = [];
|
|
153
|
+
|
|
154
|
+
for (const [name, info] of Object.entries(header)) {
|
|
155
|
+
if (name === '__metadata__') {
|
|
156
|
+
Object.assign(metadata, info);
|
|
157
|
+
continue;
|
|
158
|
+
}
|
|
159
|
+
if (!info || !info.dtype) continue;
|
|
160
|
+
const dtypeMap = { F32: 32, F16: 16, BF16: 16, F64: 64, I8: 8, I16: 16, I32: 32, I64: 64 };
|
|
161
|
+
tensors.push({
|
|
162
|
+
name,
|
|
163
|
+
shape: info.shape || [],
|
|
164
|
+
quantName: info.dtype,
|
|
165
|
+
bpw: dtypeMap[info.dtype] || 32,
|
|
166
|
+
offset: info.data_offsets ? info.data_offsets[0] : 0,
|
|
167
|
+
});
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
tensors.sort((a, b) => a.offset - b.offset);
|
|
171
|
+
|
|
172
|
+
return buildResult({
|
|
173
|
+
format: 'Safetensors',
|
|
174
|
+
metadata,
|
|
175
|
+
tensors,
|
|
176
|
+
fileSize: stat.size,
|
|
177
|
+
filePath,
|
|
178
|
+
});
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
// ── Architecture inference ───────────────────────────────────────────────
|
|
182
|
+
|
|
183
|
+
function buildResult({ format, metadata, tensors, fileSize, filePath }) {
|
|
184
|
+
const arch = inferArchitecture(metadata, tensors);
|
|
185
|
+
const quant = detectQuantization(tensors, arch);
|
|
186
|
+
const layers = extractLayers(tensors, arch);
|
|
187
|
+
|
|
188
|
+
// Witness: SHA3 not available in Node crypto, use SHA256
|
|
189
|
+
const hash = crypto.createHash('sha256').update(filePath).digest('hex');
|
|
190
|
+
|
|
191
|
+
return {
|
|
192
|
+
format,
|
|
193
|
+
architecture: arch,
|
|
194
|
+
layers: layers.slice(0, 50), // Limit output
|
|
195
|
+
tokenizer: extractTokenizer(metadata),
|
|
196
|
+
quantization: quant,
|
|
197
|
+
witness: { source_hash: hash, chain_root: hash.slice(0, 32) },
|
|
198
|
+
metadata: flattenMetadata(metadata),
|
|
199
|
+
fileSize,
|
|
200
|
+
};
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
function inferArchitecture(metadata, tensors) {
|
|
204
|
+
const archKey = metadata['general.architecture'] || '';
|
|
205
|
+
const prefix = archKey ? `${archKey}.` : '';
|
|
206
|
+
const hiddenSize = Number(metadata[`${prefix}embedding_length`]) || inferHiddenSize(tensors);
|
|
207
|
+
const numLayers = Number(metadata[`${prefix}block_count`]) || inferNumLayers(tensors);
|
|
208
|
+
const numHeads = Number(metadata[`${prefix}attention.head_count`]) || inferNumHeads(hiddenSize);
|
|
209
|
+
const numKvHeads = Number(metadata[`${prefix}attention.head_count_kv`]) || inferKvHeads(tensors, hiddenSize, numHeads);
|
|
210
|
+
const ffnSize = Number(metadata[`${prefix}feed_forward_length`]) || inferFfnSize(tensors);
|
|
211
|
+
const vocabSize = inferVocabSize(tensors);
|
|
212
|
+
const maxSeqLen = Number(metadata[`${prefix}context_length`]) || 0;
|
|
213
|
+
const totalParams = tensors.reduce((sum, t) => sum + t.shape.reduce((a, b) => a * b, 1), 0);
|
|
214
|
+
|
|
215
|
+
return {
|
|
216
|
+
name: archKey || 'unknown',
|
|
217
|
+
hidden_size: hiddenSize,
|
|
218
|
+
num_layers: numLayers,
|
|
219
|
+
num_heads: numHeads,
|
|
220
|
+
num_kv_heads: numKvHeads,
|
|
221
|
+
intermediate_size: ffnSize,
|
|
222
|
+
vocab_size: vocabSize,
|
|
223
|
+
max_sequence_length: maxSeqLen,
|
|
224
|
+
total_params: totalParams,
|
|
225
|
+
estimated_size_mb: (totalParams * 2) / (1024 * 1024),
|
|
226
|
+
};
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
function inferHiddenSize(tensors) {
|
|
230
|
+
for (const t of tensors) {
|
|
231
|
+
if ((t.name.includes('embed') || t.name.includes('token_embd')) && t.shape.length === 2) {
|
|
232
|
+
return t.shape[1];
|
|
233
|
+
}
|
|
234
|
+
}
|
|
235
|
+
return 0;
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
function inferNumLayers(tensors) {
|
|
239
|
+
let max = -1;
|
|
240
|
+
for (const t of tensors) {
|
|
241
|
+
const m = t.name.match(/(?:blk|layers|h)\.\s*(\d+)\./);
|
|
242
|
+
if (m) max = Math.max(max, parseInt(m[1], 10));
|
|
243
|
+
}
|
|
244
|
+
return max >= 0 ? max + 1 : 0;
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
function inferNumHeads(hiddenSize) {
|
|
248
|
+
if (!hiddenSize) return 0;
|
|
249
|
+
for (const hd of [128, 64, 96, 256]) {
|
|
250
|
+
if (hiddenSize % hd === 0) return hiddenSize / hd;
|
|
251
|
+
}
|
|
252
|
+
return 0;
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
function inferKvHeads(tensors, hiddenSize, numHeads) {
|
|
256
|
+
if (!hiddenSize || !numHeads) return numHeads;
|
|
257
|
+
const headDim = hiddenSize / numHeads;
|
|
258
|
+
for (const t of tensors) {
|
|
259
|
+
if ((t.name.includes('attn_k') || t.name.includes('k_proj')) && t.shape.length === 2) {
|
|
260
|
+
if (headDim > 0 && t.shape[0] % headDim === 0) return t.shape[0] / headDim;
|
|
261
|
+
}
|
|
262
|
+
}
|
|
263
|
+
return numHeads;
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
function inferFfnSize(tensors) {
|
|
267
|
+
for (const t of tensors) {
|
|
268
|
+
if ((t.name.includes('ffn_up') || t.name.includes('up_proj') ||
|
|
269
|
+
t.name.includes('ffn_gate') || t.name.includes('gate_proj')) && t.shape.length === 2) {
|
|
270
|
+
return t.shape[0];
|
|
271
|
+
}
|
|
272
|
+
}
|
|
273
|
+
return 0;
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
function inferVocabSize(tensors) {
|
|
277
|
+
for (const t of tensors) {
|
|
278
|
+
if ((t.name.includes('embed') || t.name.includes('token_embd')) && t.shape.length === 2) {
|
|
279
|
+
return t.shape[0];
|
|
280
|
+
}
|
|
281
|
+
}
|
|
282
|
+
return 0;
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
function detectQuantization(tensors, arch) {
|
|
286
|
+
const counts = {};
|
|
287
|
+
for (const t of tensors) {
|
|
288
|
+
if (t.name.includes('norm') || t.name.includes('embed') || t.name.includes('embd')) continue;
|
|
289
|
+
counts[t.quantName] = (counts[t.quantName] || 0) + 1;
|
|
290
|
+
}
|
|
291
|
+
let method = 'Unknown';
|
|
292
|
+
let maxCount = 0;
|
|
293
|
+
for (const [name, count] of Object.entries(counts)) {
|
|
294
|
+
if (count > maxCount) { method = name; maxCount = count; }
|
|
295
|
+
}
|
|
296
|
+
const bpw = (QUANT_TYPES[Object.keys(QUANT_TYPES).find(k => QUANT_TYPES[k].name === method)] || {}).bpw || 0;
|
|
297
|
+
const totalBits = tensors.reduce((s, t) => s + t.shape.reduce((a, b) => a * b, 1) * t.bpw, 0);
|
|
298
|
+
const quantizedMb = totalBits / 8 / (1024 * 1024);
|
|
299
|
+
|
|
300
|
+
return {
|
|
301
|
+
method,
|
|
302
|
+
bits_per_weight: bpw,
|
|
303
|
+
original_size_mb: arch.estimated_size_mb,
|
|
304
|
+
quantized_size_mb: quantizedMb,
|
|
305
|
+
compression_ratio: quantizedMb > 0 ? arch.estimated_size_mb / quantizedMb : 1,
|
|
306
|
+
};
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
function extractLayers(tensors, arch) {
|
|
310
|
+
const layers = [];
|
|
311
|
+
// Just collect unique layer indices
|
|
312
|
+
const seen = new Set();
|
|
313
|
+
for (const t of tensors) {
|
|
314
|
+
const m = t.name.match(/(?:blk|layers|h)\.\s*(\d+)\./);
|
|
315
|
+
if (m && !seen.has(m[1])) {
|
|
316
|
+
seen.add(m[1]);
|
|
317
|
+
const idx = parseInt(m[1], 10);
|
|
318
|
+
const blockTensors = tensors.filter(tt => {
|
|
319
|
+
const mm = tt.name.match(/(?:blk|layers|h)\.\s*(\d+)\./);
|
|
320
|
+
return mm && parseInt(mm[1], 10) === idx;
|
|
321
|
+
});
|
|
322
|
+
layers.push({
|
|
323
|
+
index: idx,
|
|
324
|
+
tensor_count: blockTensors.length,
|
|
325
|
+
param_count: blockTensors.reduce((s, tt) => s + tt.shape.reduce((a, b) => a * b, 1), 0),
|
|
326
|
+
quantization: blockTensors[0]?.quantName,
|
|
327
|
+
});
|
|
328
|
+
}
|
|
329
|
+
}
|
|
330
|
+
return layers;
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
function extractTokenizer(metadata) {
|
|
334
|
+
const tokens = metadata['tokenizer.ggml.tokens'];
|
|
335
|
+
if (!Array.isArray(tokens)) return null;
|
|
336
|
+
const special = [];
|
|
337
|
+
for (const key of ['tokenizer.ggml.bos_token_id', 'tokenizer.ggml.eos_token_id',
|
|
338
|
+
'tokenizer.ggml.padding_token_id', 'tokenizer.ggml.unknown_token_id']) {
|
|
339
|
+
if (metadata[key] != null) {
|
|
340
|
+
special.push({ name: key.replace('tokenizer.ggml.', ''), id: metadata[key] });
|
|
341
|
+
}
|
|
342
|
+
}
|
|
343
|
+
return {
|
|
344
|
+
vocab_size: tokens.length,
|
|
345
|
+
special_tokens: special,
|
|
346
|
+
sample_tokens: tokens.slice(0, 20).map((t, i) => ({ id: i, text: String(t) })),
|
|
347
|
+
};
|
|
348
|
+
}
|
|
349
|
+
|
|
350
|
+
function flattenMetadata(metadata) {
|
|
351
|
+
const flat = {};
|
|
352
|
+
for (const [k, v] of Object.entries(metadata)) {
|
|
353
|
+
if (Array.isArray(v)) flat[k] = `[${v.length} elements]`;
|
|
354
|
+
else if (typeof v === 'object' && v !== null) flat[k] = JSON.stringify(v);
|
|
355
|
+
else flat[k] = String(v);
|
|
356
|
+
}
|
|
357
|
+
return flat;
|
|
358
|
+
}
|
|
359
|
+
|
|
360
|
+
// ── Pretty printer ───────────────────────────────────────────────────────
|
|
361
|
+
|
|
362
|
+
function printModelResult(result) {
|
|
363
|
+
const _chalk = require('chalk');
|
|
364
|
+
const chalk = _chalk.default || _chalk;
|
|
365
|
+
const a = result.architecture;
|
|
366
|
+
|
|
367
|
+
console.log(chalk.bold.cyan('\n LLM Model Decompilation'));
|
|
368
|
+
console.log(chalk.white(` Format: ${result.format}`));
|
|
369
|
+
console.log(chalk.white(` Architecture: ${a.name}`));
|
|
370
|
+
console.log(chalk.white(` Parameters: ${formatNumber(a.total_params)} (${formatSize(a.total_params)})`));
|
|
371
|
+
console.log('');
|
|
372
|
+
console.log(chalk.white(` Hidden size: ${a.hidden_size}`));
|
|
373
|
+
console.log(chalk.white(` Layers: ${a.num_layers}`));
|
|
374
|
+
console.log(chalk.white(` Attention heads: ${a.num_heads}`));
|
|
375
|
+
if (a.num_kv_heads !== a.num_heads) {
|
|
376
|
+
const ratio = a.num_heads / a.num_kv_heads;
|
|
377
|
+
console.log(chalk.white(` KV heads: ${a.num_kv_heads} (GQA ${ratio}:1)`));
|
|
378
|
+
}
|
|
379
|
+
console.log(chalk.white(` FFN size: ${a.intermediate_size}`));
|
|
380
|
+
console.log(chalk.white(` Vocab size: ${a.vocab_size}`));
|
|
381
|
+
if (a.max_sequence_length > 0) {
|
|
382
|
+
console.log(chalk.white(` Max seq length: ${a.max_sequence_length}`));
|
|
383
|
+
}
|
|
384
|
+
|
|
385
|
+
if (result.quantization) {
|
|
386
|
+
const q = result.quantization;
|
|
387
|
+
console.log('');
|
|
388
|
+
console.log(chalk.white(` Quantization: ${q.method}`));
|
|
389
|
+
console.log(chalk.white(` Original size: ${q.original_size_mb.toFixed(0)} MB (FP16)`));
|
|
390
|
+
console.log(chalk.white(` Quantized: ${q.quantized_size_mb.toFixed(0)} MB`));
|
|
391
|
+
console.log(chalk.white(` Compression: ${q.compression_ratio.toFixed(1)}x`));
|
|
392
|
+
}
|
|
393
|
+
|
|
394
|
+
if (result.tokenizer) {
|
|
395
|
+
console.log('');
|
|
396
|
+
console.log(chalk.white(` Tokenizer:`));
|
|
397
|
+
console.log(chalk.white(` Vocab: ${formatNumber(result.tokenizer.vocab_size)} tokens`));
|
|
398
|
+
if (result.tokenizer.special_tokens.length > 0) {
|
|
399
|
+
const specials = result.tokenizer.special_tokens.map(s => `${s.name}(${s.id})`).join(', ');
|
|
400
|
+
console.log(chalk.white(` Special: ${specials}`));
|
|
401
|
+
}
|
|
402
|
+
}
|
|
403
|
+
|
|
404
|
+
console.log('');
|
|
405
|
+
console.log(chalk.dim(` Witness: ${result.witness.source_hash.slice(0, 16)}...`));
|
|
406
|
+
console.log('');
|
|
407
|
+
}
|
|
408
|
+
|
|
409
|
+
function formatNumber(n) {
|
|
410
|
+
if (n >= 1e12) return `${(n / 1e12).toFixed(1)}T`;
|
|
411
|
+
if (n >= 1e9) return `${(n / 1e9).toFixed(1)}B`;
|
|
412
|
+
if (n >= 1e6) return `${(n / 1e6).toFixed(1)}M`;
|
|
413
|
+
if (n >= 1e3) return `${(n / 1e3).toFixed(1)}K`;
|
|
414
|
+
return String(n);
|
|
415
|
+
}
|
|
416
|
+
|
|
417
|
+
function formatSize(params) {
|
|
418
|
+
const mb = (params * 2) / (1024 * 1024);
|
|
419
|
+
if (mb >= 1024) return `${(mb / 1024).toFixed(1)} GB FP16`;
|
|
420
|
+
return `${mb.toFixed(0)} MB FP16`;
|
|
421
|
+
}
|
|
422
|
+
|
|
423
|
+
module.exports = { decompileModelFile, decompileGguf, decompileSafetensors, printModelResult };
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "ruvector-decompiler-wasm",
|
|
3
|
+
"collaborators": [
|
|
4
|
+
"Ruvector Team"
|
|
5
|
+
],
|
|
6
|
+
"description": "WASM bindings for the RuVector JavaScript bundle decompiler (Louvain pipeline)",
|
|
7
|
+
"version": "2.1.0",
|
|
8
|
+
"license": "MIT",
|
|
9
|
+
"repository": {
|
|
10
|
+
"type": "git",
|
|
11
|
+
"url": "https://github.com/ruvnet/ruvector"
|
|
12
|
+
},
|
|
13
|
+
"files": [
|
|
14
|
+
"ruvector_decompiler_wasm_bg.wasm",
|
|
15
|
+
"ruvector_decompiler_wasm.js",
|
|
16
|
+
"ruvector_decompiler_wasm.d.ts"
|
|
17
|
+
],
|
|
18
|
+
"main": "ruvector_decompiler_wasm.js",
|
|
19
|
+
"types": "ruvector_decompiler_wasm.d.ts",
|
|
20
|
+
"keywords": [
|
|
21
|
+
"decompiler",
|
|
22
|
+
"javascript",
|
|
23
|
+
"wasm",
|
|
24
|
+
"mincut",
|
|
25
|
+
"louvain"
|
|
26
|
+
]
|
|
27
|
+
}
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
/* tslint:disable */
|
|
2
|
+
/* eslint-disable */
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* Decompile a minified JavaScript bundle using the full Louvain pipeline.
|
|
6
|
+
*
|
|
7
|
+
* # Arguments
|
|
8
|
+
*
|
|
9
|
+
* * `source` - The minified JavaScript source code.
|
|
10
|
+
* * `config_json` - JSON string of `DecompileConfig` fields. Pass `"{}"` for defaults.
|
|
11
|
+
*
|
|
12
|
+
* # Returns
|
|
13
|
+
*
|
|
14
|
+
* A JSON string containing the `DecompileResult` (modules, witness, inferred names, etc.)
|
|
15
|
+
* or a JSON object with an `"error"` field on failure.
|
|
16
|
+
*/
|
|
17
|
+
export function decompile(source: string, config_json: string): string;
|
|
18
|
+
|
|
19
|
+
/**
|
|
20
|
+
* Initialize the WASM module (sets up panic hook for better error messages).
|
|
21
|
+
*/
|
|
22
|
+
export function init(): void;
|
|
23
|
+
|
|
24
|
+
/**
|
|
25
|
+
* Return the version of the decompiler WASM module.
|
|
26
|
+
*/
|
|
27
|
+
export function version(): string;
|