079project 3.0.0 → 5.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/GroupStarter.cjs +396 -30
- package/forwarder.js +312 -55
- package/main_Serve.cjs +583 -105
- package/main_Study.cjs +581 -68
- package/notes.txt +241 -0
- package/package.json +7 -1
- package/note.txt +0 -5
- package/notebook.txt +0 -8
package/main_Serve.cjs
CHANGED
|
@@ -55,6 +55,71 @@ global.config = {
|
|
|
55
55
|
peerServePorts: String(__args['peers'] || '').split(',').map(s => Number(s)).filter(n => Number.isFinite(n) && n > 0),
|
|
56
56
|
isStudy: !!__args['study']
|
|
57
57
|
};
|
|
58
|
+
// ...existing code...
|
|
59
|
+
const vm = require('vm'); // 新增:沙箱编译
|
|
60
|
+
// ...existing code...
|
|
61
|
+
|
|
62
|
+
// ==== 内置激活/传递函数注册表 + 安全编译工具 ====
|
|
63
|
+
const BuiltinActivations = {
|
|
64
|
+
identity: (x) => x,
|
|
65
|
+
relu: (x) => (x > 0 ? x : 0),
|
|
66
|
+
leaky_relu: (x) => (x > 0 ? x : 0.01 * x),
|
|
67
|
+
tanh: (x) => Math.tanh(x),
|
|
68
|
+
sigmoid: (x) => 1 / (1 + Math.exp(-x)),
|
|
69
|
+
elu: (x) => (x >= 0 ? x : (Math.exp(x) - 1)),
|
|
70
|
+
softplus: (x) => Math.log(1 + Math.exp(x)),
|
|
71
|
+
// 近似 GELU
|
|
72
|
+
gelu: (x) => 0.5 * x * (1 + Math.tanh(Math.sqrt(2 / Math.PI) * (x + 0.044715 * Math.pow(x, 3))))
|
|
73
|
+
};
|
|
74
|
+
|
|
75
|
+
const BuiltinTransfers = {
|
|
76
|
+
// 线性衰减:next = value - decayK*weight*(dirMult)
|
|
77
|
+
linear: (value, weight, decayK, ctx) => {
|
|
78
|
+
const dm = ctx?.direction === 0 ? (ctx?.bidirectionalMultiplier ?? 1.2) : (ctx?.directionalMultiplier ?? 0.7);
|
|
79
|
+
return value - (decayK * weight * dm);
|
|
80
|
+
},
|
|
81
|
+
// 指数衰减:next = value * exp(-decayK*weight*(dirMult))
|
|
82
|
+
exp: (value, weight, decayK, ctx) => {
|
|
83
|
+
const dm = ctx?.direction === 0 ? (ctx?.bidirectionalMultiplier ?? 1.2) : (ctx?.directionalMultiplier ?? 0.7);
|
|
84
|
+
return value * Math.exp(-(decayK * weight * dm));
|
|
85
|
+
},
|
|
86
|
+
// 反比例:next = value / (1 + decayK*weight*(dirMult))
|
|
87
|
+
inverse: (value, weight, decayK, ctx) => {
|
|
88
|
+
const dm = ctx?.direction === 0 ? (ctx?.bidirectionalMultiplier ?? 1.2) : (ctx?.directionalMultiplier ?? 0.7);
|
|
89
|
+
return value / (1 + (decayK * weight * dm));
|
|
90
|
+
},
|
|
91
|
+
// 截断线性:线性后下限截断为0,上限截断为value
|
|
92
|
+
capped: (value, weight, decayK, ctx) => {
|
|
93
|
+
const dm = ctx?.direction === 0 ? (ctx?.bidirectionalMultiplier ?? 1.2) : (ctx?.directionalMultiplier ?? 0.7);
|
|
94
|
+
const raw = value - (decayK * weight * dm);
|
|
95
|
+
return Math.max(0, Math.min(value, raw));
|
|
96
|
+
}
|
|
97
|
+
};
|
|
98
|
+
|
|
99
|
+
function compileCustomFunctionSafely(source, argNames, fallback) {
|
|
100
|
+
try {
|
|
101
|
+
const ctx = vm.createContext({ Math });
|
|
102
|
+
// 如果用户提供的是“表达式”,包一层 return
|
|
103
|
+
const body = source.includes('return') || source.includes('=>') || source.includes('function')
|
|
104
|
+
? source
|
|
105
|
+
: `return (${source});`;
|
|
106
|
+
|
|
107
|
+
// 统一包成 function 体
|
|
108
|
+
const wrapper = `(function(${argNames.join(',')}) { "use strict"; ${body} })`;
|
|
109
|
+
const script = new vm.Script(wrapper, { timeout: 50 });
|
|
110
|
+
const fn = script.runInContext(ctx, { timeout: 50 });
|
|
111
|
+
if (typeof fn !== 'function') return fallback;
|
|
112
|
+
// 再包一层,避免传入异常导致抛出
|
|
113
|
+
return (...args) => {
|
|
114
|
+
try { return fn(...args); } catch (_e) { return fallback(...args); }
|
|
115
|
+
};
|
|
116
|
+
} catch (_e) {
|
|
117
|
+
return fallback;
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
// ...existing code...
|
|
121
|
+
|
|
122
|
+
// 顶部 modelDefaults 增加参数(与本文件后半段的重复默认值保持一致)
|
|
58
123
|
const modelDefaults = {
|
|
59
124
|
decayFactor: 0.5,
|
|
60
125
|
maxMemeWords: 100,
|
|
@@ -66,7 +131,12 @@ const modelDefaults = {
|
|
|
66
131
|
decay: 1,
|
|
67
132
|
decayK: 1,
|
|
68
133
|
maxLen: 16,
|
|
69
|
-
edgeWeight: 1
|
|
134
|
+
edgeWeight: 1,
|
|
135
|
+
// 新增:激活/传递函数选择与自定义
|
|
136
|
+
activationType: 'relu', // identity|relu|leaky_relu|tanh|sigmoid|elu|softplus|gelu|custom
|
|
137
|
+
transferType: 'linear', // linear|exp|inverse|capped|custom
|
|
138
|
+
activationCustom: '', // 自定义激活函数源码/表达式:f(x) 或 return ...
|
|
139
|
+
transferCustom: '' // 自定义传递函数源码/表达式:f(value, weight, decayK, ctx) 或 return ...
|
|
70
140
|
};
|
|
71
141
|
const currentModelParams = { ...modelDefaults };
|
|
72
142
|
// 反触发机制
|
|
@@ -1445,7 +1515,318 @@ class PartitionedGraphDB {
|
|
|
1445
1515
|
// 说明:wordGraph 仍使用内存 GraphDB;模因图使用 PartitionedGraphDB
|
|
1446
1516
|
|
|
1447
1517
|
// ...existing code...
|
|
1518
|
+
// ...existing code...
|
|
1519
|
+
|
|
1520
|
+
// ===== Linear Algebra Backend (CSR + SpMM + Hash Embedding + PCA/UMAP) =====
|
|
1521
|
+
class CSR {
|
|
1522
|
+
constructor(rowPtr, colIdx, values, nRows, nCols) {
|
|
1523
|
+
this.rowPtr = rowPtr; // Uint32Array length nRows+1
|
|
1524
|
+
this.colIdx = colIdx; // Uint32Array length nnz
|
|
1525
|
+
this.values = values; // Float32Array length nnz
|
|
1526
|
+
this.nRows = nRows | 0;
|
|
1527
|
+
this.nCols = nCols | 0;
|
|
1528
|
+
this.nnz = this.values.length | 0;
|
|
1529
|
+
}
|
|
1530
|
+
}
|
|
1531
|
+
|
|
1532
|
+
class TensorEngine {
|
|
1533
|
+
// y = A x (A in CSR, x dense Float32Array)
|
|
1534
|
+
spmm(csr, x, out = null) {
|
|
1535
|
+
const { rowPtr, colIdx, values, nRows } = csr;
|
|
1536
|
+
const y = out instanceof Float32Array && out.length === nRows ? out : new Float32Array(nRows);
|
|
1537
|
+
for (let i = 0; i < nRows; i++) {
|
|
1538
|
+
let s = 0.0;
|
|
1539
|
+
const start = rowPtr[i], end = rowPtr[i + 1];
|
|
1540
|
+
for (let p = start; p < end; p++) {
|
|
1541
|
+
s += values[p] * x[colIdx[p]];
|
|
1542
|
+
}
|
|
1543
|
+
y[i] = s;
|
|
1544
|
+
}
|
|
1545
|
+
return y;
|
|
1546
|
+
}
|
|
1547
|
+
|
|
1548
|
+
// x = a*x + b*y
|
|
1549
|
+
axpby(a, x, b, y, out = null) {
|
|
1550
|
+
const n = x.length | 0;
|
|
1551
|
+
const z = out instanceof Float32Array && out.length === n ? out : new Float32Array(n);
|
|
1552
|
+
for (let i = 0; i < n; i++) z[i] = a * x[i] + b * y[i];
|
|
1553
|
+
return z;
|
|
1554
|
+
}
|
|
1555
|
+
|
|
1556
|
+
l2NormalizeRows(mat, nRows, nCols) {
|
|
1557
|
+
for (let i = 0; i < nRows; i++) {
|
|
1558
|
+
let s = 0.0, base = i * nCols;
|
|
1559
|
+
for (let j = 0; j < nCols; j++) { const v = mat[base + j]; s += v * v; }
|
|
1560
|
+
s = Math.sqrt(s) || 1.0;
|
|
1561
|
+
for (let j = 0; j < nCols; j++) mat[base + j] /= s;
|
|
1562
|
+
}
|
|
1563
|
+
}
|
|
1564
|
+
|
|
1565
|
+
dot(a, b) {
|
|
1566
|
+
let s = 0.0;
|
|
1567
|
+
for (let i = 0; i < a.length; i++) s += a[i] * b[i];
|
|
1568
|
+
return s;
|
|
1569
|
+
}
|
|
1570
|
+
|
|
1571
|
+
// 迭代式传播(不跟踪路径,面向速度)
|
|
1572
|
+
// return Float32Array activation over rows
|
|
1573
|
+
iteratePropagation(csr, seeds, steps, actFn, decayK, damp = 0.02) {
|
|
1574
|
+
const n = csr.nRows | 0;
|
|
1575
|
+
let x = new Float32Array(n);
|
|
1576
|
+
for (const [row, v] of seeds) { if (row >= 0 && row < n) x[row] += v; }
|
|
1577
|
+
|
|
1578
|
+
let y = new Float32Array(n);
|
|
1579
|
+
for (let t = 0; t < steps; t++) {
|
|
1580
|
+
// y = A x
|
|
1581
|
+
this.spmm(csr, x, y);
|
|
1582
|
+
// x = act(x + y - decayK*damp*x)
|
|
1583
|
+
for (let i = 0; i < n; i++) {
|
|
1584
|
+
const raw = x[i] + y[i] - (decayK * damp * x[i]);
|
|
1585
|
+
x[i] = actFn(raw);
|
|
1586
|
+
}
|
|
1587
|
+
}
|
|
1588
|
+
return x; // final activation
|
|
1589
|
+
}
|
|
1590
|
+
}
|
|
1591
|
+
|
|
1592
|
+
// 词-模因hash嵌入:fixed-D feature hashing + L2 normalize
|
|
1593
|
+
class GraphTensorBridge {
|
|
1594
|
+
constructor(runtime) {
|
|
1595
|
+
this.rt = runtime;
|
|
1596
|
+
this.rowIndex = new Map(); // memeID -> row
|
|
1597
|
+
this.rows = []; // row -> memeID
|
|
1598
|
+
this.emb = null; // Float32Array [N*D]
|
|
1599
|
+
this.dim = 0;
|
|
1600
|
+
this.csrAll = null; // CSR (all directions)
|
|
1601
|
+
this._multi = null; // {all,bi,out,in, id2row, row2id}
|
|
1602
|
+
}
|
|
1603
|
+
|
|
1604
|
+
static fnv1a32(str) {
|
|
1605
|
+
let h = 0x811c9dc5;
|
|
1606
|
+
for (let i = 0; i < str.length; i++) {
|
|
1607
|
+
h ^= str.charCodeAt(i);
|
|
1608
|
+
h = (h + ((h << 1) + (h << 4) + (h << 7) + (h << 8) + (h << 24))) >>> 0;
|
|
1609
|
+
}
|
|
1610
|
+
return h >>> 0;
|
|
1611
|
+
}
|
|
1612
|
+
|
|
1613
|
+
// 构建行索引,只遍历当前窗口
|
|
1614
|
+
rebuildRowIndex() {
|
|
1615
|
+
this.rowIndex.clear();
|
|
1616
|
+
this.rows.length = 0;
|
|
1617
|
+
const pts = this.rt.graph.getAllPoints();
|
|
1618
|
+
for (let i = 0; i < pts.length; i++) {
|
|
1619
|
+
const id = pts[i].pointID;
|
|
1620
|
+
this.rowIndex.set(id, i);
|
|
1621
|
+
this.rows.push(id);
|
|
1622
|
+
}
|
|
1623
|
+
return pts.length;
|
|
1624
|
+
}
|
|
1625
|
+
|
|
1626
|
+
// Hash embedding: D默认512
|
|
1627
|
+
buildEmbeddings(D = 512) {
|
|
1628
|
+
const N = this.rebuildRowIndex();
|
|
1629
|
+
this.dim = D | 0;
|
|
1630
|
+
this.emb = new Float32Array(N * D);
|
|
1631
|
+
for (let r = 0; r < N; r++) {
|
|
1632
|
+
const memeId = this.rows[r];
|
|
1633
|
+
const words = this.rt.kvm.get(memeId) || [];
|
|
1634
|
+
const base = r * D;
|
|
1635
|
+
for (let k = 0; k < words.length; k++) {
|
|
1636
|
+
const w = String(words[k] || '').toLowerCase();
|
|
1637
|
+
const idx = GraphTensorBridge.fnv1a32(w) % D;
|
|
1638
|
+
this.emb[base + idx] += 1.0;
|
|
1639
|
+
}
|
|
1640
|
+
}
|
|
1641
|
+
this.rt.tensor.l2NormalizeRows(this.emb, N, D);
|
|
1642
|
+
return { N, D };
|
|
1643
|
+
}
|
|
1644
|
+
|
|
1645
|
+
// 构建多通道CSR(每行Top-K;all/bi/out,同时构建out的转置作为in)
|
|
1646
|
+
buildMultiOrderCSR(topK = 64) {
|
|
1647
|
+
const pts = this.rt.graph.getAllPoints();
|
|
1648
|
+
const N = pts.length | 0;
|
|
1649
|
+
const id2row = this.rowIndex;
|
|
1650
|
+
const row2id = this.rows.slice();
|
|
1651
|
+
|
|
1652
|
+
const buildChannel = (filterFn) => {
|
|
1653
|
+
const rows = new Array(N);
|
|
1654
|
+
let nnz = 0;
|
|
1655
|
+
for (let i = 0; i < N; i++) {
|
|
1656
|
+
const conns = (pts[i].connect || []).filter(filterFn);
|
|
1657
|
+
// 取Top-K(按权重大->小)
|
|
1658
|
+
conns.sort((a, b) => b[0] - a[0]);
|
|
1659
|
+
const pruned = conns.slice(0, topK);
|
|
1660
|
+
const cols = [];
|
|
1661
|
+
const vals = [];
|
|
1662
|
+
for (const [w, tgt] of pruned) {
|
|
1663
|
+
const c = id2row.get(tgt);
|
|
1664
|
+
if (c === undefined) continue;
|
|
1665
|
+
cols.push(c);
|
|
1666
|
+
vals.push((typeof w === 'number' && isFinite(w)) ? w : 1.0);
|
|
1667
|
+
}
|
|
1668
|
+
rows[i] = { cols, vals };
|
|
1669
|
+
nnz += cols.length;
|
|
1670
|
+
}
|
|
1671
|
+
const rowPtr = new Uint32Array(N + 1);
|
|
1672
|
+
const colIdx = new Uint32Array(nnz);
|
|
1673
|
+
const values = new Float32Array(nnz);
|
|
1674
|
+
let p = 0;
|
|
1675
|
+
for (let i = 0; i < N; i++) {
|
|
1676
|
+
rowPtr[i] = p;
|
|
1677
|
+
const { cols, vals } = rows[i];
|
|
1678
|
+
for (let j = 0; j < cols.length; j++) {
|
|
1679
|
+
colIdx[p] = cols[j];
|
|
1680
|
+
values[p] = vals[j];
|
|
1681
|
+
p++;
|
|
1682
|
+
}
|
|
1683
|
+
}
|
|
1684
|
+
rowPtr[N] = p;
|
|
1685
|
+
return new CSR(rowPtr, colIdx, values, N, N);
|
|
1686
|
+
};
|
|
1687
|
+
|
|
1688
|
+
const ALL = buildChannel(_ => true);
|
|
1689
|
+
const BI = buildChannel(([, , d]) => d === 0);
|
|
1690
|
+
const OUT = buildChannel(([, , d]) => d === 2);
|
|
1691
|
+
|
|
1692
|
+
// IN = transpose(OUT)
|
|
1693
|
+
const IN = this.transposeCSR(OUT);
|
|
1694
|
+
|
|
1695
|
+
this._multi = { all: ALL, bi: BI, out: OUT, in: IN, id2row, row2id };
|
|
1696
|
+
this.csrAll = ALL;
|
|
1697
|
+
return this._multi;
|
|
1698
|
+
}
|
|
1699
|
+
|
|
1700
|
+
transposeCSR(A) {
|
|
1701
|
+
const { nRows, nCols, rowPtr, colIdx, values } = A;
|
|
1702
|
+
const nnz = values.length;
|
|
1703
|
+
const counts = new Uint32Array(nCols);
|
|
1704
|
+
for (let p = 0; p < nnz; p++) counts[colIdx[p]]++;
|
|
1705
|
+
const rowPtrT = new Uint32Array(nCols + 1);
|
|
1706
|
+
for (let i = 0; i < nCols; i++) rowPtrT[i + 1] = rowPtrT[i] + counts[i];
|
|
1707
|
+
const colIdxT = new Uint32Array(nnz);
|
|
1708
|
+
const valuesT = new Float32Array(nnz);
|
|
1709
|
+
const cursor = rowPtrT.slice();
|
|
1710
|
+
for (let i = 0; i < nRows; i++) {
|
|
1711
|
+
for (let p = rowPtr[i]; p < rowPtr[i + 1]; p++) {
|
|
1712
|
+
const j = colIdx[p];
|
|
1713
|
+
const q = cursor[j]++;
|
|
1714
|
+
colIdxT[q] = i;
|
|
1715
|
+
valuesT[q] = values[p];
|
|
1716
|
+
}
|
|
1717
|
+
}
|
|
1718
|
+
return new CSR(rowPtrT, colIdxT, valuesT, nCols, nRows);
|
|
1719
|
+
}
|
|
1720
|
+
|
|
1721
|
+
get multi() { return this._multi; }
|
|
1722
|
+
}
|
|
1723
|
+
|
|
1724
|
+
class DimReducer {
|
|
1725
|
+
// PCA(2D) 快速近似:power-iteration + 投影
|
|
1726
|
+
pca2D(emb, N, D, iters = 6) {
|
|
1727
|
+
// 均值中心化
|
|
1728
|
+
const mean = new Float32Array(D);
|
|
1729
|
+
for (let i = 0; i < N; i++) {
|
|
1730
|
+
const base = i * D;
|
|
1731
|
+
for (let j = 0; j < D; j++) mean[j] += emb[base + j];
|
|
1732
|
+
}
|
|
1733
|
+
for (let j = 0; j < D; j++) mean[j] /= Math.max(1, N);
|
|
1734
|
+
const X = new Float32Array(N * D);
|
|
1735
|
+
for (let i = 0; i < N; i++) {
|
|
1736
|
+
const base = i * D;
|
|
1737
|
+
for (let j = 0; j < D; j++) X[base + j] = emb[base + j] - mean[j];
|
|
1738
|
+
}
|
|
1739
|
+
// 随机初始向量
|
|
1740
|
+
let v1 = new Float32Array(D); for (let j = 0; j < D; j++) v1[j] = Math.random() - 0.5;
|
|
1741
|
+
let v2 = new Float32Array(D); for (let j = 0; j < D; j++) v2[j] = Math.random() - 0.5;
|
|
1742
|
+
|
|
1743
|
+
const mulCov = (v) => { // X^T X v
|
|
1744
|
+
const tmp = new Float32Array(D);
|
|
1745
|
+
for (let i = 0; i < N; i++) {
|
|
1746
|
+
const base = i * D;
|
|
1747
|
+
// s = x_i dot v
|
|
1748
|
+
let s = 0.0;
|
|
1749
|
+
for (let j = 0; j < D; j++) s += X[base + j] * v[j];
|
|
1750
|
+
for (let j = 0; j < D; j++) tmp[j] += X[base + j] * s;
|
|
1751
|
+
}
|
|
1752
|
+
return tmp;
|
|
1753
|
+
};
|
|
1754
|
+
const normalize = (v) => {
|
|
1755
|
+
let s = 0.0; for (let j = 0; j < D; j++) s += v[j] * v[j];
|
|
1756
|
+
s = Math.sqrt(s) || 1.0;
|
|
1757
|
+
for (let j = 0; j < D; j++) v[j] /= s;
|
|
1758
|
+
};
|
|
1759
|
+
|
|
1760
|
+
for (let t = 0; t < iters; t++) { v1 = mulCov(v1); normalize(v1); }
|
|
1761
|
+
// v2 去除与 v1 的分量
|
|
1762
|
+
for (let t = 0; t < iters; t++) {
|
|
1763
|
+
v2 = mulCov(v2);
|
|
1764
|
+
// Gram-Schmidt
|
|
1765
|
+
let dot = 0.0; for (let j = 0; j < D; j++) dot += v2[j] * v1[j];
|
|
1766
|
+
for (let j = 0; j < D; j++) v2[j] -= dot * v1[j];
|
|
1767
|
+
normalize(v2);
|
|
1768
|
+
}
|
|
1769
|
+
|
|
1770
|
+
// 投影到2D
|
|
1771
|
+
const out = new Float32Array(N * 2);
|
|
1772
|
+
for (let i = 0; i < N; i++) {
|
|
1773
|
+
const base = i * D;
|
|
1774
|
+
let x = 0.0, y = 0.0;
|
|
1775
|
+
for (let j = 0; j < D; j++) {
|
|
1776
|
+
const xv = X[base + j];
|
|
1777
|
+
x += xv * v1[j];
|
|
1778
|
+
y += xv * v2[j];
|
|
1779
|
+
}
|
|
1780
|
+
out[2 * i + 0] = x;
|
|
1781
|
+
out[2 * i + 1] = y;
|
|
1782
|
+
}
|
|
1783
|
+
return out;
|
|
1784
|
+
}
|
|
1785
|
+
|
|
1786
|
+
// 若安装 umap-js 则使用 UMAP,否则回退 PCA
|
|
1787
|
+
project2D(emb, N, D, method = 'auto') {
|
|
1788
|
+
if (method === 'pca') return this.pca2D(emb, N, D);
|
|
1789
|
+
if (method === 'umap' || method === 'auto') {
|
|
1790
|
+
try {
|
|
1791
|
+
// 按需加载
|
|
1792
|
+
const { UMAP } = require('umap-js');
|
|
1793
|
+
const umap = new UMAP({ nComponents: 2, nNeighbors: 15, minDist: 0.1 });
|
|
1794
|
+
// umap-js 需要普通数组
|
|
1795
|
+
const data = new Array(N);
|
|
1796
|
+
for (let i = 0; i < N; i++) {
|
|
1797
|
+
const row = new Array(D);
|
|
1798
|
+
const base = i * D;
|
|
1799
|
+
for (let j = 0; j < D; j++) row[j] = emb[base + j];
|
|
1800
|
+
data[i] = row;
|
|
1801
|
+
}
|
|
1802
|
+
const coords = umap.fit(data);
|
|
1803
|
+
const out = new Float32Array(N * 2);
|
|
1804
|
+
for (let i = 0; i < N; i++) { out[2 * i] = coords[i][0]; out[2 * i + 1] = coords[i][1]; }
|
|
1805
|
+
return out;
|
|
1806
|
+
} catch (_) {
|
|
1807
|
+
return this.pca2D(emb, N, D);
|
|
1808
|
+
}
|
|
1809
|
+
}
|
|
1810
|
+
return this.pca2D(emb, N, D);
|
|
1811
|
+
}
|
|
1812
|
+
}
|
|
1813
|
+
|
|
1814
|
+
// 提供多通道邻接卷(all/bi/out/in),供三维张量视角
|
|
1815
|
+
class MultiOrderAdjacency {
|
|
1816
|
+
constructor(runtime) {
|
|
1817
|
+
this.rt = runtime;
|
|
1818
|
+
}
|
|
1819
|
+
rebuild(topK = 64, Demb = 512) {
|
|
1820
|
+
if (!this.rt.tensorBridge) this.rt.tensorBridge = new GraphTensorBridge(this.rt);
|
|
1821
|
+
const gb = this.rt.tensorBridge;
|
|
1822
|
+
gb.buildEmbeddings(Demb);
|
|
1823
|
+
const multi = gb.buildMultiOrderCSR(topK);
|
|
1824
|
+
return multi;
|
|
1825
|
+
}
|
|
1826
|
+
}
|
|
1827
|
+
// ===== End of Linear Algebra Backend =====
|
|
1448
1828
|
|
|
1829
|
+
// ...existing code...
|
|
1449
1830
|
class KVM {
|
|
1450
1831
|
// this KVM is the key-value memory
|
|
1451
1832
|
constructor() {
|
|
@@ -1615,7 +1996,9 @@ class Runtime {
|
|
|
1615
1996
|
baseDir: path.join(__dirname, 'graph_parts'),
|
|
1616
1997
|
backend: this.config.graphBackend || 'fs' // 可选 'fs' | 'lmdb' | 'level'
|
|
1617
1998
|
});
|
|
1618
|
-
|
|
1999
|
+
this._act = BuiltinActivations.relu;
|
|
2000
|
+
this._transfer = BuiltinTransfers.linear;
|
|
2001
|
+
this._activationMeta = { activationType: 'relu', transferType: 'linear' };
|
|
1619
2002
|
this.wordGraph = new GraphDB();
|
|
1620
2003
|
this.kvm = new KVM();
|
|
1621
2004
|
this.changer = new Changer();
|
|
@@ -1625,6 +2008,7 @@ class Runtime {
|
|
|
1625
2008
|
this.wordAccessLog = new Map();
|
|
1626
2009
|
this.initWordGraph();
|
|
1627
2010
|
this.MAX_MEME_WORDS = 100; // 单个模因最大词数
|
|
2011
|
+
|
|
1628
2012
|
this.MIN_OVERLAP = 2; // 至少有2个词重叠才允许合并
|
|
1629
2013
|
// Runtime类内新增
|
|
1630
2014
|
this.activationStats = new Map(); // 记录激活关系
|
|
@@ -1636,6 +2020,82 @@ class Runtime {
|
|
|
1636
2020
|
batchSizeMultiplier: 1
|
|
1637
2021
|
};
|
|
1638
2022
|
this.memeBarrier = new memeBarrier(this);
|
|
2023
|
+
this.tensor = new TensorEngine();
|
|
2024
|
+
this.tensorBridge = new GraphTensorBridge(this);
|
|
2025
|
+
this.dimReducer = new DimReducer();
|
|
2026
|
+
this.multiAdj = new MultiOrderAdjacency(this);
|
|
2027
|
+
this._laReady = false; // 线代缓存是否就绪
|
|
2028
|
+
}
|
|
2029
|
+
// 重建线代缓存:Embedding + 多通道CSR
|
|
2030
|
+
rebuildLinearAlgebraCaches({ topK = 64, embDim = 512 } = {}) {
|
|
2031
|
+
try {
|
|
2032
|
+
this.multiAdj.rebuild(topK, embDim);
|
|
2033
|
+
this._laReady = !!(this.tensorBridge?.multi?.all);
|
|
2034
|
+
return {
|
|
2035
|
+
ok: this._laReady,
|
|
2036
|
+
nodes: this.tensorBridge?.rows?.length || 0,
|
|
2037
|
+
embDim
|
|
2038
|
+
};
|
|
2039
|
+
} catch (e) {
|
|
2040
|
+
this._laReady = false;
|
|
2041
|
+
return { ok: false, error: e.message };
|
|
2042
|
+
}
|
|
2043
|
+
}
|
|
2044
|
+
|
|
2045
|
+
// 导出CSR(all通道)
|
|
2046
|
+
exportSparseMatrix() {
|
|
2047
|
+
const m = this.tensorBridge?.multi;
|
|
2048
|
+
if (!m?.all) return null;
|
|
2049
|
+
const A = m.all;
|
|
2050
|
+
return {
|
|
2051
|
+
nRows: A.nRows, nCols: A.nCols, nnz: A.nnz,
|
|
2052
|
+
rowPtr: Array.from(A.rowPtr),
|
|
2053
|
+
colIdx: Array.from(A.colIdx),
|
|
2054
|
+
values: Array.from(A.values),
|
|
2055
|
+
rows: this.tensorBridge.rows.slice()
|
|
2056
|
+
};
|
|
2057
|
+
}
|
|
2058
|
+
// 高维->2D投影(PCA/UMAP)
|
|
2059
|
+
foldHighDimTo2D(method = 'auto') {
|
|
2060
|
+
const emb = this.tensorBridge?.emb;
|
|
2061
|
+
const N = this.tensorBridge?.rows?.length || 0;
|
|
2062
|
+
const D = this.tensorBridge?.dim || 0;
|
|
2063
|
+
if (!emb || !N || !D) return { ok: false, error: 'embedding not ready' };
|
|
2064
|
+
const coords = this.dimReducer.project2D(emb, N, D, method);
|
|
2065
|
+
const out = {};
|
|
2066
|
+
for (let i = 0; i < N; i++) out[this.tensorBridge.rows[i]] = [coords[2 * i], coords[2 * i + 1]];
|
|
2067
|
+
return { ok: true, dim: 2, points: out };
|
|
2068
|
+
}
|
|
2069
|
+
// 获取/设置激活-传递函数配置
|
|
2070
|
+
getActivationConfig() {
|
|
2071
|
+
return {
|
|
2072
|
+
activationType: this._activationMeta.activationType,
|
|
2073
|
+
transferType: this._activationMeta.transferType,
|
|
2074
|
+
activationCustom: this.config?.activationCustom || '',
|
|
2075
|
+
transferCustom: this.config?.transferCustom || ''
|
|
2076
|
+
};
|
|
2077
|
+
}
|
|
2078
|
+
|
|
2079
|
+
setActivationConfig({ activationType, transferType, activationCustom, transferCustom } = {}) {
|
|
2080
|
+
const aType = String(activationType || this._activationMeta.activationType || 'relu');
|
|
2081
|
+
const tType = String(transferType || this._activationMeta.transferType || 'linear');
|
|
2082
|
+
|
|
2083
|
+
let act = BuiltinActivations[aType] || BuiltinActivations.relu;
|
|
2084
|
+
let tr = BuiltinTransfers[tType] || BuiltinTransfers.linear;
|
|
2085
|
+
|
|
2086
|
+
if (aType === 'custom' && activationCustom) {
|
|
2087
|
+
act = compileCustomFunctionSafely(activationCustom, ['x'], BuiltinActivations.relu);
|
|
2088
|
+
}
|
|
2089
|
+
if (tType === 'custom' && transferCustom) {
|
|
2090
|
+
tr = compileCustomFunctionSafely(transferCustom, ['value', 'weight', 'decayK', 'ctx'], BuiltinTransfers.linear);
|
|
2091
|
+
}
|
|
2092
|
+
|
|
2093
|
+
this._act = (typeof act === 'function') ? act : BuiltinActivations.relu;
|
|
2094
|
+
this._transfer = (typeof tr === 'function') ? tr : BuiltinTransfers.linear;
|
|
2095
|
+
this._activationMeta = { activationType: aType, transferType: tType };
|
|
2096
|
+
this.config = this.config || {};
|
|
2097
|
+
this.config.activationCustom = activationCustom || this.config.activationCustom || '';
|
|
2098
|
+
this.config.transferCustom = transferCustom || this.config.transferCustom || '';
|
|
1639
2099
|
}
|
|
1640
2100
|
// 新增资源监控方法
|
|
1641
2101
|
monitorSystemLoad() {
|
|
@@ -2075,112 +2535,48 @@ class Runtime {
|
|
|
2075
2535
|
* options.bidirectionalMultiplier: 双向连接的衰减倍率
|
|
2076
2536
|
* @returns {Object|Map} 激活结果
|
|
2077
2537
|
*/
|
|
2538
|
+
// 用于多源扩散:将“传递函数+激活函数”应用在每一步
|
|
2539
|
+
// 线代版多源扩散(不跟踪路径)
|
|
2078
2540
|
propagateSignalMultiSource(startIDs, strengths, decayK, maxStep, options = {}) {
|
|
2079
|
-
|
|
2080
|
-
|
|
2081
|
-
|
|
2082
|
-
|
|
2083
|
-
|
|
2084
|
-
|
|
2085
|
-
|
|
2086
|
-
|
|
2087
|
-
|
|
2088
|
-
|
|
2089
|
-
|
|
2090
|
-
|
|
2091
|
-
|
|
2092
|
-
|
|
2093
|
-
|
|
2094
|
-
|
|
2095
|
-
// 初始化活跃队列,每个元素{id, value, from, connectionType}
|
|
2096
|
-
let active = startIDs.map((id, i) => ({
|
|
2097
|
-
id,
|
|
2098
|
-
value: strengths[i],
|
|
2099
|
-
from: null,
|
|
2100
|
-
connectionType: -1 // 起点无连接类型
|
|
2101
|
-
}));
|
|
2102
|
-
|
|
2103
|
-
let step = 0;
|
|
2104
|
-
|
|
2105
|
-
while (active.length > 0 && step < maxStep) {
|
|
2106
|
-
// 限制活跃节点数,优先保留信号强的
|
|
2107
|
-
if (active.length > maxActiveNodes) {
|
|
2108
|
-
active.sort((a, b) => b.value - a.value);
|
|
2109
|
-
active = active.slice(0, maxActiveNodes);
|
|
2110
|
-
console.log(`[LIMIT] 多源扩散活跃节点数已限制为 ${maxActiveNodes}`);
|
|
2541
|
+
// 如果调用方需要trackPath,仍走旧逻辑
|
|
2542
|
+
if (options?.trackPath) {
|
|
2543
|
+
return super.propagateSignalMultiSource
|
|
2544
|
+
? super.propagateSignalMultiSource(startIDs, strengths, decayK, maxStep, options)
|
|
2545
|
+
: this._propagateFallback(startIDs, strengths, decayK, maxStep, options);
|
|
2546
|
+
}
|
|
2547
|
+
|
|
2548
|
+
// 优先使用线代后端
|
|
2549
|
+
if (this._laReady && this.tensorBridge?.multi?.all) {
|
|
2550
|
+
const A = this.tensorBridge.multi.all;
|
|
2551
|
+
const id2row = this.tensorBridge.rowIndex;
|
|
2552
|
+
const seeds = [];
|
|
2553
|
+
for (let i = 0; i < startIDs.length; i++) {
|
|
2554
|
+
const r = id2row.get(startIDs[i]);
|
|
2555
|
+
if (r !== undefined) seeds.push([r, strengths[i] || 0]);
|
|
2111
2556
|
}
|
|
2557
|
+
if (!seeds.length) return new Map();
|
|
2112
2558
|
|
|
2113
|
-
const
|
|
2114
|
-
|
|
2115
|
-
|
|
2116
|
-
|
|
2117
|
-
// 信号融合:累加到signalMap
|
|
2118
|
-
signalMap.set(id, (signalMap.get(id) || 0) + value);
|
|
2119
|
-
|
|
2120
|
-
// 记录激活类型
|
|
2121
|
-
if (trackPath && connectionType !== -1) {
|
|
2122
|
-
if (!activationTypes.has(id)) {
|
|
2123
|
-
activationTypes.set(id, new Set());
|
|
2124
|
-
}
|
|
2125
|
-
activationTypes.get(id).add(connectionType);
|
|
2126
|
-
}
|
|
2127
|
-
|
|
2128
|
-
// 路径追踪
|
|
2129
|
-
if (trackPath && from) {
|
|
2130
|
-
if (!activationPaths.has(id)) {
|
|
2131
|
-
activationPaths.set(id, []);
|
|
2132
|
-
}
|
|
2133
|
-
activationPaths.get(id).push({ from, connectionType, value });
|
|
2134
|
-
}
|
|
2135
|
-
|
|
2136
|
-
// 传播到邻居(考虑连接方向)
|
|
2137
|
-
const point = this.graph.points.get(id);
|
|
2138
|
-
if (!point) continue;
|
|
2139
|
-
|
|
2140
|
-
// 限制每个节点最多处理的邻居数量
|
|
2141
|
-
const MAX_NEIGHBORS = 30;
|
|
2142
|
-
const neighbors = point.connect.slice(0, MAX_NEIGHBORS);
|
|
2143
|
-
|
|
2144
|
-
for (const [weight, neighborID, direction = 0] of neighbors) {
|
|
2145
|
-
// 根据连接类型决定衰减系数
|
|
2146
|
-
let effectiveDecay = decayK;
|
|
2559
|
+
const actFn = this._act || ((x) => (x > 0 ? x : 0));
|
|
2560
|
+
const steps = Math.max(1, maxStep | 0);
|
|
2561
|
+
const x = this.tensor.iteratePropagation(A, seeds, steps, actFn, (decayK ?? 1));
|
|
2147
2562
|
|
|
2148
|
-
|
|
2149
|
-
|
|
2150
|
-
|
|
2151
|
-
|
|
2152
|
-
// 单向连接 - 衰减较小(语义流向强)
|
|
2153
|
-
effectiveDecay *= directionalMultiplier;
|
|
2154
|
-
}
|
|
2155
|
-
|
|
2156
|
-
// 计算传播后的信号强度
|
|
2157
|
-
const nextValue = value - effectiveDecay * weight;
|
|
2158
|
-
|
|
2159
|
-
// 仅当信号足够强时才继续传播
|
|
2160
|
-
if (nextValue >= minSignal) {
|
|
2161
|
-
next.push({
|
|
2162
|
-
id: neighborID,
|
|
2163
|
-
value: nextValue,
|
|
2164
|
-
from: id,
|
|
2165
|
-
connectionType: direction
|
|
2166
|
-
});
|
|
2167
|
-
}
|
|
2168
|
-
}
|
|
2563
|
+
// 返回Map<MemeID,value>
|
|
2564
|
+
const out = new Map();
|
|
2565
|
+
for (let i = 0; i < x.length; i++) {
|
|
2566
|
+
if (x[i] > 0) out.set(this.tensorBridge.rows[i], x[i]);
|
|
2169
2567
|
}
|
|
2170
|
-
|
|
2171
|
-
step++;
|
|
2568
|
+
return out;
|
|
2172
2569
|
}
|
|
2173
2570
|
|
|
2174
|
-
//
|
|
2175
|
-
|
|
2176
|
-
|
|
2177
|
-
signalMap,
|
|
2178
|
-
activationPaths,
|
|
2179
|
-
activationTypes
|
|
2180
|
-
};
|
|
2181
|
-
}
|
|
2571
|
+
// 回退旧实现
|
|
2572
|
+
return this._propagateFallback(startIDs, strengths, decayK, maxStep, options);
|
|
2573
|
+
}
|
|
2182
2574
|
|
|
2183
|
-
|
|
2575
|
+
_propagateFallback(startIDs, strengths, decayK, maxStep, options) {
|
|
2576
|
+
// 使用文件里现有版本(保持兼容)
|
|
2577
|
+
// 这里直接调用之前定义的 Runtime.propagateSignalMultiSource 的原实现
|
|
2578
|
+
const fn = Object.getPrototypeOf(this).propagateSignalMultiSource;
|
|
2579
|
+
return fn.call(this, startIDs, strengths, decayK, maxStep, options);
|
|
2184
2580
|
}
|
|
2185
2581
|
|
|
2186
2582
|
// 主循环:持续归纳和信号传递
|
|
@@ -3358,6 +3754,70 @@ async function main() {
|
|
|
3358
3754
|
|
|
3359
3755
|
// 安装对端监控触发器
|
|
3360
3756
|
installPeerFailoverMonitor();
|
|
3757
|
+
// ...existing code...
|
|
3758
|
+
app.post('/api/tensor/refresh', (req, res) => {
|
|
3759
|
+
try {
|
|
3760
|
+
const { topK = 64, embDim = 512 } = req.body || {};
|
|
3761
|
+
const rt = global.ctrlA?.runtime;
|
|
3762
|
+
if (!rt) return res.status(500).json({ ok: false, error: 'runtime missing' });
|
|
3763
|
+
const ret = rt.rebuildLinearAlgebraCaches({ topK, embDim });
|
|
3764
|
+
res.json({ ok: !!ret.ok, ...ret });
|
|
3765
|
+
} catch (e) {
|
|
3766
|
+
res.status(500).json({ ok: false, error: e.message });
|
|
3767
|
+
}
|
|
3768
|
+
});
|
|
3769
|
+
|
|
3770
|
+
app.get('/api/tensor/csr', (req, res) => {
|
|
3771
|
+
try {
|
|
3772
|
+
const rt = global.ctrlA?.runtime;
|
|
3773
|
+
if (!rt) return res.status(500).json({ ok: false, error: 'runtime missing' });
|
|
3774
|
+
const csr = rt.exportSparseMatrix();
|
|
3775
|
+
if (!csr) return res.status(400).json({ ok: false, error: 'csr not ready' });
|
|
3776
|
+
res.json({ ok: true, csr });
|
|
3777
|
+
} catch (e) {
|
|
3778
|
+
res.status(500).json({ ok: false, error: e.message });
|
|
3779
|
+
}
|
|
3780
|
+
});
|
|
3781
|
+
|
|
3782
|
+
app.get('/api/tensor/project2d', (req, res) => {
|
|
3783
|
+
try {
|
|
3784
|
+
const method = String(req.query?.method || 'auto');
|
|
3785
|
+
const rt = global.ctrlA?.runtime;
|
|
3786
|
+
if (!rt) return res.status(500).json({ ok: false, error: 'runtime missing' });
|
|
3787
|
+
const ret = rt.foldHighDimTo2D(method);
|
|
3788
|
+
res.json(ret);
|
|
3789
|
+
} catch (e) {
|
|
3790
|
+
res.status(500).json({ ok: false, error: e.message });
|
|
3791
|
+
}
|
|
3792
|
+
});
|
|
3793
|
+
|
|
3794
|
+
app.get('/api/tensor/topk', (req, res) => {
|
|
3795
|
+
try {
|
|
3796
|
+
const memeId = String(req.query?.memeId || '');
|
|
3797
|
+
const k = Math.max(1, Math.min(50, Number(req.query?.k || 10)));
|
|
3798
|
+
const rt = global.ctrlA?.runtime;
|
|
3799
|
+
if (!rt?.tensorBridge?.emb) return res.status(400).json({ ok: false, error: 'embedding not ready' });
|
|
3800
|
+
const row = rt.tensorBridge.rowIndex.get(memeId);
|
|
3801
|
+
if (row === undefined) return res.status(404).json({ ok: false, error: 'meme not found' });
|
|
3802
|
+
|
|
3803
|
+
const N = rt.tensorBridge.rows.length, D = rt.tensorBridge.dim;
|
|
3804
|
+
const base = row * D;
|
|
3805
|
+
const q = new Float32Array(D);
|
|
3806
|
+
for (let j = 0; j < D; j++) q[j] = rt.tensorBridge.emb[base + j];
|
|
3807
|
+
|
|
3808
|
+
const scores = new Array(N);
|
|
3809
|
+
for (let i = 0; i < N; i++) {
|
|
3810
|
+
const b = i * D;
|
|
3811
|
+
let s = 0.0; for (let j = 0; j < D; j++) s += q[j] * rt.tensorBridge.emb[b + j];
|
|
3812
|
+
scores[i] = [rt.tensorBridge.rows[i], s];
|
|
3813
|
+
}
|
|
3814
|
+
scores.sort((a, b) => b[1] - a[1]);
|
|
3815
|
+
res.json({ ok: true, memeId, neighbors: scores.slice(0, k) });
|
|
3816
|
+
} catch (e) {
|
|
3817
|
+
res.status(500).json({ ok: false, error: e.message });
|
|
3818
|
+
}
|
|
3819
|
+
});
|
|
3820
|
+
// ...existing code...
|
|
3361
3821
|
app.get('/api/graph/partitions/status', async (req, res) => {
|
|
3362
3822
|
try {
|
|
3363
3823
|
const g = global.ctrlA?.runtime?.graph;
|
|
@@ -3504,7 +3964,11 @@ app.post('/api/graph/prefetch', async (req, res) => {
|
|
|
3504
3964
|
decay: 1, // 新增
|
|
3505
3965
|
decayK: 1, // 新增
|
|
3506
3966
|
maxLen: 16, // 新增
|
|
3507
|
-
edgeWeight: 1
|
|
3967
|
+
edgeWeight: 1, // 新增
|
|
3968
|
+
activationType: 'relu',
|
|
3969
|
+
transferType: 'linear',
|
|
3970
|
+
activationCustom: '',
|
|
3971
|
+
transferCustom: ''
|
|
3508
3972
|
};
|
|
3509
3973
|
const currentModelParams = { ...modelDefaults };
|
|
3510
3974
|
|
|
@@ -3610,10 +4074,10 @@ app.post('/api/adversary/start', (req, res) => {
|
|
|
3610
4074
|
|
|
3611
4075
|
console.log('已设置交错自主学习定时任务,每200s执行一次');
|
|
3612
4076
|
}
|
|
3613
|
-
// plainObjToRuntime: 将protobuf对象同步到runtime实例
|
|
3614
|
-
// 扩展 applyModelParams
|
|
3615
4077
|
function applyModelParams(runtime) {
|
|
3616
4078
|
if (!runtime) return;
|
|
4079
|
+
|
|
4080
|
+
// 同步通用参数
|
|
3617
4081
|
runtime.MAX_MEME_WORDS = currentModelParams.maxMemeWords;
|
|
3618
4082
|
runtime.MIN_OVERLAP = currentModelParams.minOverlapThreshold;
|
|
3619
4083
|
runtime.config = runtime.config || {};
|
|
@@ -3624,11 +4088,12 @@ function applyModelParams(runtime) {
|
|
|
3624
4088
|
runtime.config.iteration = currentModelParams.iteration;
|
|
3625
4089
|
runtime.config.threshold = currentModelParams.threshold;
|
|
3626
4090
|
runtime.config.decay = currentModelParams.decay;
|
|
4091
|
+
|
|
3627
4092
|
// memeBarrier
|
|
3628
4093
|
if (runtime.memeBarrier) {
|
|
3629
4094
|
runtime.memeBarrier.maliciousThreshold = currentModelParams.maliciousThreshold;
|
|
3630
4095
|
}
|
|
3631
|
-
//
|
|
4096
|
+
// 全局边权
|
|
3632
4097
|
if (runtime.graph && currentModelParams.edgeWeight !== undefined) {
|
|
3633
4098
|
for (const point of runtime.graph.getAllPoints()) {
|
|
3634
4099
|
for (const conn of point.connect) {
|
|
@@ -3636,7 +4101,20 @@ function applyModelParams(runtime) {
|
|
|
3636
4101
|
}
|
|
3637
4102
|
}
|
|
3638
4103
|
}
|
|
3639
|
-
|
|
4104
|
+
|
|
4105
|
+
// 新增:激活/传递函数配置
|
|
4106
|
+
runtime.setActivationConfig({
|
|
4107
|
+
activationType: currentModelParams.activationType,
|
|
4108
|
+
transferType: currentModelParams.transferType,
|
|
4109
|
+
activationCustom: currentModelParams.activationCustom,
|
|
4110
|
+
transferCustom: currentModelParams.transferCustom
|
|
4111
|
+
});
|
|
4112
|
+
|
|
4113
|
+
console.log('[PARAMS] 已更新运行时参数:', {
|
|
4114
|
+
...currentModelParams,
|
|
4115
|
+
activationType: runtime.getActivationConfig().activationType,
|
|
4116
|
+
transferType: runtime.getActivationConfig().transferType
|
|
4117
|
+
});
|
|
3640
4118
|
}
|
|
3641
4119
|
// plainObjToRuntime: 将protobuf对象同步到runtime实例
|
|
3642
4120
|
async function plainObjToRuntime(runtime, obj) {
|