079project 4.0.0 → 5.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/main_Serve.cjs CHANGED
@@ -1515,7 +1515,318 @@ class PartitionedGraphDB {
1515
1515
  // 说明:wordGraph 仍使用内存 GraphDB;模因图使用 PartitionedGraphDB
1516
1516
 
1517
1517
  // ...existing code...
1518
+ // ...existing code...
1519
+
1520
+ // ===== Linear Algebra Backend (CSR + SpMM + Hash Embedding + PCA/UMAP) =====
1521
+ class CSR {
1522
+ constructor(rowPtr, colIdx, values, nRows, nCols) {
1523
+ this.rowPtr = rowPtr; // Uint32Array length nRows+1
1524
+ this.colIdx = colIdx; // Uint32Array length nnz
1525
+ this.values = values; // Float32Array length nnz
1526
+ this.nRows = nRows | 0;
1527
+ this.nCols = nCols | 0;
1528
+ this.nnz = this.values.length | 0;
1529
+ }
1530
+ }
1531
+
1532
+ class TensorEngine {
1533
+ // y = A x (A in CSR, x dense Float32Array)
1534
+ spmm(csr, x, out = null) {
1535
+ const { rowPtr, colIdx, values, nRows } = csr;
1536
+ const y = out instanceof Float32Array && out.length === nRows ? out : new Float32Array(nRows);
1537
+ for (let i = 0; i < nRows; i++) {
1538
+ let s = 0.0;
1539
+ const start = rowPtr[i], end = rowPtr[i + 1];
1540
+ for (let p = start; p < end; p++) {
1541
+ s += values[p] * x[colIdx[p]];
1542
+ }
1543
+ y[i] = s;
1544
+ }
1545
+ return y;
1546
+ }
1547
+
1548
+ // x = a*x + b*y
1549
+ axpby(a, x, b, y, out = null) {
1550
+ const n = x.length | 0;
1551
+ const z = out instanceof Float32Array && out.length === n ? out : new Float32Array(n);
1552
+ for (let i = 0; i < n; i++) z[i] = a * x[i] + b * y[i];
1553
+ return z;
1554
+ }
1555
+
1556
+ l2NormalizeRows(mat, nRows, nCols) {
1557
+ for (let i = 0; i < nRows; i++) {
1558
+ let s = 0.0, base = i * nCols;
1559
+ for (let j = 0; j < nCols; j++) { const v = mat[base + j]; s += v * v; }
1560
+ s = Math.sqrt(s) || 1.0;
1561
+ for (let j = 0; j < nCols; j++) mat[base + j] /= s;
1562
+ }
1563
+ }
1564
+
1565
+ dot(a, b) {
1566
+ let s = 0.0;
1567
+ for (let i = 0; i < a.length; i++) s += a[i] * b[i];
1568
+ return s;
1569
+ }
1570
+
1571
+ // 迭代式传播(不跟踪路径,面向速度)
1572
+ // return Float32Array activation over rows
1573
+ iteratePropagation(csr, seeds, steps, actFn, decayK, damp = 0.02) {
1574
+ const n = csr.nRows | 0;
1575
+ let x = new Float32Array(n);
1576
+ for (const [row, v] of seeds) { if (row >= 0 && row < n) x[row] += v; }
1577
+
1578
+ let y = new Float32Array(n);
1579
+ for (let t = 0; t < steps; t++) {
1580
+ // y = A x
1581
+ this.spmm(csr, x, y);
1582
+ // x = act(x + y - decayK*damp*x)
1583
+ for (let i = 0; i < n; i++) {
1584
+ const raw = x[i] + y[i] - (decayK * damp * x[i]);
1585
+ x[i] = actFn(raw);
1586
+ }
1587
+ }
1588
+ return x; // final activation
1589
+ }
1590
+ }
1591
+
1592
+ // 词-模因hash嵌入:fixed-D feature hashing + L2 normalize
1593
+ class GraphTensorBridge {
1594
+ constructor(runtime) {
1595
+ this.rt = runtime;
1596
+ this.rowIndex = new Map(); // memeID -> row
1597
+ this.rows = []; // row -> memeID
1598
+ this.emb = null; // Float32Array [N*D]
1599
+ this.dim = 0;
1600
+ this.csrAll = null; // CSR (all directions)
1601
+ this._multi = null; // {all,bi,out,in, id2row, row2id}
1602
+ }
1603
+
1604
+ static fnv1a32(str) {
1605
+ let h = 0x811c9dc5;
1606
+ for (let i = 0; i < str.length; i++) {
1607
+ h ^= str.charCodeAt(i);
1608
+ h = (h + ((h << 1) + (h << 4) + (h << 7) + (h << 8) + (h << 24))) >>> 0;
1609
+ }
1610
+ return h >>> 0;
1611
+ }
1612
+
1613
+ // 构建行索引,只遍历当前窗口
1614
+ rebuildRowIndex() {
1615
+ this.rowIndex.clear();
1616
+ this.rows.length = 0;
1617
+ const pts = this.rt.graph.getAllPoints();
1618
+ for (let i = 0; i < pts.length; i++) {
1619
+ const id = pts[i].pointID;
1620
+ this.rowIndex.set(id, i);
1621
+ this.rows.push(id);
1622
+ }
1623
+ return pts.length;
1624
+ }
1625
+
1626
+ // Hash embedding: D默认512
1627
+ buildEmbeddings(D = 512) {
1628
+ const N = this.rebuildRowIndex();
1629
+ this.dim = D | 0;
1630
+ this.emb = new Float32Array(N * D);
1631
+ for (let r = 0; r < N; r++) {
1632
+ const memeId = this.rows[r];
1633
+ const words = this.rt.kvm.get(memeId) || [];
1634
+ const base = r * D;
1635
+ for (let k = 0; k < words.length; k++) {
1636
+ const w = String(words[k] || '').toLowerCase();
1637
+ const idx = GraphTensorBridge.fnv1a32(w) % D;
1638
+ this.emb[base + idx] += 1.0;
1639
+ }
1640
+ }
1641
+ this.rt.tensor.l2NormalizeRows(this.emb, N, D);
1642
+ return { N, D };
1643
+ }
1644
+
1645
+ // 构建多通道CSR(每行Top-K;all/bi/out,同时构建out的转置作为in)
1646
+ buildMultiOrderCSR(topK = 64) {
1647
+ const pts = this.rt.graph.getAllPoints();
1648
+ const N = pts.length | 0;
1649
+ const id2row = this.rowIndex;
1650
+ const row2id = this.rows.slice();
1651
+
1652
+ const buildChannel = (filterFn) => {
1653
+ const rows = new Array(N);
1654
+ let nnz = 0;
1655
+ for (let i = 0; i < N; i++) {
1656
+ const conns = (pts[i].connect || []).filter(filterFn);
1657
+ // 取Top-K(按权重大->小)
1658
+ conns.sort((a, b) => b[0] - a[0]);
1659
+ const pruned = conns.slice(0, topK);
1660
+ const cols = [];
1661
+ const vals = [];
1662
+ for (const [w, tgt] of pruned) {
1663
+ const c = id2row.get(tgt);
1664
+ if (c === undefined) continue;
1665
+ cols.push(c);
1666
+ vals.push((typeof w === 'number' && isFinite(w)) ? w : 1.0);
1667
+ }
1668
+ rows[i] = { cols, vals };
1669
+ nnz += cols.length;
1670
+ }
1671
+ const rowPtr = new Uint32Array(N + 1);
1672
+ const colIdx = new Uint32Array(nnz);
1673
+ const values = new Float32Array(nnz);
1674
+ let p = 0;
1675
+ for (let i = 0; i < N; i++) {
1676
+ rowPtr[i] = p;
1677
+ const { cols, vals } = rows[i];
1678
+ for (let j = 0; j < cols.length; j++) {
1679
+ colIdx[p] = cols[j];
1680
+ values[p] = vals[j];
1681
+ p++;
1682
+ }
1683
+ }
1684
+ rowPtr[N] = p;
1685
+ return new CSR(rowPtr, colIdx, values, N, N);
1686
+ };
1687
+
1688
+ const ALL = buildChannel(_ => true);
1689
+ const BI = buildChannel(([, , d]) => d === 0);
1690
+ const OUT = buildChannel(([, , d]) => d === 2);
1691
+
1692
+ // IN = transpose(OUT)
1693
+ const IN = this.transposeCSR(OUT);
1694
+
1695
+ this._multi = { all: ALL, bi: BI, out: OUT, in: IN, id2row, row2id };
1696
+ this.csrAll = ALL;
1697
+ return this._multi;
1698
+ }
1699
+
1700
+ transposeCSR(A) {
1701
+ const { nRows, nCols, rowPtr, colIdx, values } = A;
1702
+ const nnz = values.length;
1703
+ const counts = new Uint32Array(nCols);
1704
+ for (let p = 0; p < nnz; p++) counts[colIdx[p]]++;
1705
+ const rowPtrT = new Uint32Array(nCols + 1);
1706
+ for (let i = 0; i < nCols; i++) rowPtrT[i + 1] = rowPtrT[i] + counts[i];
1707
+ const colIdxT = new Uint32Array(nnz);
1708
+ const valuesT = new Float32Array(nnz);
1709
+ const cursor = rowPtrT.slice();
1710
+ for (let i = 0; i < nRows; i++) {
1711
+ for (let p = rowPtr[i]; p < rowPtr[i + 1]; p++) {
1712
+ const j = colIdx[p];
1713
+ const q = cursor[j]++;
1714
+ colIdxT[q] = i;
1715
+ valuesT[q] = values[p];
1716
+ }
1717
+ }
1718
+ return new CSR(rowPtrT, colIdxT, valuesT, nCols, nRows);
1719
+ }
1720
+
1721
+ get multi() { return this._multi; }
1722
+ }
1723
+
1724
+ class DimReducer {
1725
+ // PCA(2D) 快速近似:power-iteration + 投影
1726
+ pca2D(emb, N, D, iters = 6) {
1727
+ // 均值中心化
1728
+ const mean = new Float32Array(D);
1729
+ for (let i = 0; i < N; i++) {
1730
+ const base = i * D;
1731
+ for (let j = 0; j < D; j++) mean[j] += emb[base + j];
1732
+ }
1733
+ for (let j = 0; j < D; j++) mean[j] /= Math.max(1, N);
1734
+ const X = new Float32Array(N * D);
1735
+ for (let i = 0; i < N; i++) {
1736
+ const base = i * D;
1737
+ for (let j = 0; j < D; j++) X[base + j] = emb[base + j] - mean[j];
1738
+ }
1739
+ // 随机初始向量
1740
+ let v1 = new Float32Array(D); for (let j = 0; j < D; j++) v1[j] = Math.random() - 0.5;
1741
+ let v2 = new Float32Array(D); for (let j = 0; j < D; j++) v2[j] = Math.random() - 0.5;
1742
+
1743
+ const mulCov = (v) => { // X^T X v
1744
+ const tmp = new Float32Array(D);
1745
+ for (let i = 0; i < N; i++) {
1746
+ const base = i * D;
1747
+ // s = x_i dot v
1748
+ let s = 0.0;
1749
+ for (let j = 0; j < D; j++) s += X[base + j] * v[j];
1750
+ for (let j = 0; j < D; j++) tmp[j] += X[base + j] * s;
1751
+ }
1752
+ return tmp;
1753
+ };
1754
+ const normalize = (v) => {
1755
+ let s = 0.0; for (let j = 0; j < D; j++) s += v[j] * v[j];
1756
+ s = Math.sqrt(s) || 1.0;
1757
+ for (let j = 0; j < D; j++) v[j] /= s;
1758
+ };
1759
+
1760
+ for (let t = 0; t < iters; t++) { v1 = mulCov(v1); normalize(v1); }
1761
+ // v2 去除与 v1 的分量
1762
+ for (let t = 0; t < iters; t++) {
1763
+ v2 = mulCov(v2);
1764
+ // Gram-Schmidt
1765
+ let dot = 0.0; for (let j = 0; j < D; j++) dot += v2[j] * v1[j];
1766
+ for (let j = 0; j < D; j++) v2[j] -= dot * v1[j];
1767
+ normalize(v2);
1768
+ }
1769
+
1770
+ // 投影到2D
1771
+ const out = new Float32Array(N * 2);
1772
+ for (let i = 0; i < N; i++) {
1773
+ const base = i * D;
1774
+ let x = 0.0, y = 0.0;
1775
+ for (let j = 0; j < D; j++) {
1776
+ const xv = X[base + j];
1777
+ x += xv * v1[j];
1778
+ y += xv * v2[j];
1779
+ }
1780
+ out[2 * i + 0] = x;
1781
+ out[2 * i + 1] = y;
1782
+ }
1783
+ return out;
1784
+ }
1785
+
1786
+ // 若安装 umap-js 则使用 UMAP,否则回退 PCA
1787
+ project2D(emb, N, D, method = 'auto') {
1788
+ if (method === 'pca') return this.pca2D(emb, N, D);
1789
+ if (method === 'umap' || method === 'auto') {
1790
+ try {
1791
+ // 按需加载
1792
+ const { UMAP } = require('umap-js');
1793
+ const umap = new UMAP({ nComponents: 2, nNeighbors: 15, minDist: 0.1 });
1794
+ // umap-js 需要普通数组
1795
+ const data = new Array(N);
1796
+ for (let i = 0; i < N; i++) {
1797
+ const row = new Array(D);
1798
+ const base = i * D;
1799
+ for (let j = 0; j < D; j++) row[j] = emb[base + j];
1800
+ data[i] = row;
1801
+ }
1802
+ const coords = umap.fit(data);
1803
+ const out = new Float32Array(N * 2);
1804
+ for (let i = 0; i < N; i++) { out[2 * i] = coords[i][0]; out[2 * i + 1] = coords[i][1]; }
1805
+ return out;
1806
+ } catch (_) {
1807
+ return this.pca2D(emb, N, D);
1808
+ }
1809
+ }
1810
+ return this.pca2D(emb, N, D);
1811
+ }
1812
+ }
1813
+
1814
+ // 提供多通道邻接卷(all/bi/out/in),供三维张量视角
1815
+ class MultiOrderAdjacency {
1816
+ constructor(runtime) {
1817
+ this.rt = runtime;
1818
+ }
1819
+ rebuild(topK = 64, Demb = 512) {
1820
+ if (!this.rt.tensorBridge) this.rt.tensorBridge = new GraphTensorBridge(this.rt);
1821
+ const gb = this.rt.tensorBridge;
1822
+ gb.buildEmbeddings(Demb);
1823
+ const multi = gb.buildMultiOrderCSR(topK);
1824
+ return multi;
1825
+ }
1826
+ }
1827
+ // ===== End of Linear Algebra Backend =====
1518
1828
 
1829
+ // ...existing code...
1519
1830
  class KVM {
1520
1831
  // this KVM is the key-value memory
1521
1832
  constructor() {
@@ -1697,6 +2008,7 @@ class Runtime {
1697
2008
  this.wordAccessLog = new Map();
1698
2009
  this.initWordGraph();
1699
2010
  this.MAX_MEME_WORDS = 100; // 单个模因最大词数
2011
+
1700
2012
  this.MIN_OVERLAP = 2; // 至少有2个词重叠才允许合并
1701
2013
  // Runtime类内新增
1702
2014
  this.activationStats = new Map(); // 记录激活关系
@@ -1708,6 +2020,51 @@ class Runtime {
1708
2020
  batchSizeMultiplier: 1
1709
2021
  };
1710
2022
  this.memeBarrier = new memeBarrier(this);
2023
+ this.tensor = new TensorEngine();
2024
+ this.tensorBridge = new GraphTensorBridge(this);
2025
+ this.dimReducer = new DimReducer();
2026
+ this.multiAdj = new MultiOrderAdjacency(this);
2027
+ this._laReady = false; // 线代缓存是否就绪
2028
+ }
2029
+ // 重建线代缓存:Embedding + 多通道CSR
2030
+ rebuildLinearAlgebraCaches({ topK = 64, embDim = 512 } = {}) {
2031
+ try {
2032
+ this.multiAdj.rebuild(topK, embDim);
2033
+ this._laReady = !!(this.tensorBridge?.multi?.all);
2034
+ return {
2035
+ ok: this._laReady,
2036
+ nodes: this.tensorBridge?.rows?.length || 0,
2037
+ embDim
2038
+ };
2039
+ } catch (e) {
2040
+ this._laReady = false;
2041
+ return { ok: false, error: e.message };
2042
+ }
2043
+ }
2044
+
2045
+ // 导出CSR(all通道)
2046
+ exportSparseMatrix() {
2047
+ const m = this.tensorBridge?.multi;
2048
+ if (!m?.all) return null;
2049
+ const A = m.all;
2050
+ return {
2051
+ nRows: A.nRows, nCols: A.nCols, nnz: A.nnz,
2052
+ rowPtr: Array.from(A.rowPtr),
2053
+ colIdx: Array.from(A.colIdx),
2054
+ values: Array.from(A.values),
2055
+ rows: this.tensorBridge.rows.slice()
2056
+ };
2057
+ }
2058
+ // 高维->2D投影(PCA/UMAP)
2059
+ foldHighDimTo2D(method = 'auto') {
2060
+ const emb = this.tensorBridge?.emb;
2061
+ const N = this.tensorBridge?.rows?.length || 0;
2062
+ const D = this.tensorBridge?.dim || 0;
2063
+ if (!emb || !N || !D) return { ok: false, error: 'embedding not ready' };
2064
+ const coords = this.dimReducer.project2D(emb, N, D, method);
2065
+ const out = {};
2066
+ for (let i = 0; i < N; i++) out[this.tensorBridge.rows[i]] = [coords[2 * i], coords[2 * i + 1]];
2067
+ return { ok: true, dim: 2, points: out };
1711
2068
  }
1712
2069
  // 获取/设置激活-传递函数配置
1713
2070
  getActivationConfig() {
@@ -2179,75 +2536,47 @@ class Runtime {
2179
2536
  * @returns {Object|Map} 激活结果
2180
2537
  */
2181
2538
  // 用于多源扩散:将“传递函数+激活函数”应用在每一步
2539
+ // 线代版多源扩散(不跟踪路径)
2182
2540
  propagateSignalMultiSource(startIDs, strengths, decayK, maxStep, options = {}) {
2183
- decayK = decayK !== undefined ? decayK : (this.config.decayK !== undefined ? this.config.decayK : 1);
2184
- maxStep = maxStep !== undefined ? maxStep : (this.config.maxStep !== undefined ? this.config.maxStep : 10);
2185
- const maxActiveNodes = options.maxActiveNodes || 5000;
2186
- const minSignal = options.minSignal || 0.01;
2187
- const trackPath = options.trackPath || false;
2188
- const directionalMultiplier = options.directionalMultiplier || 0.7;
2189
- const bidirectionalMultiplier = options.bidirectionalMultiplier || 1.2;
2190
-
2191
- const actFn = this._act || BuiltinActivations.relu;
2192
- const transferFn = this._transfer || BuiltinTransfers.linear;
2193
-
2194
- const signalMap = new Map();
2195
- const activationPaths = trackPath ? new Map() : null;
2196
- const activationTypes = trackPath ? new Map() : null;
2197
-
2198
- let active = startIDs.map((id, i) => ({
2199
- id, value: strengths[i], from: null, connectionType: -1
2200
- }));
2201
- let step = 0;
2202
-
2203
- while (active.length > 0 && step < maxStep) {
2204
- if (active.length > maxActiveNodes) {
2205
- active.sort((a, b) => b.value - a.value);
2206
- active = active.slice(0, maxActiveNodes);
2541
+ // 如果调用方需要trackPath,仍走旧逻辑
2542
+ if (options?.trackPath) {
2543
+ return super.propagateSignalMultiSource
2544
+ ? super.propagateSignalMultiSource(startIDs, strengths, decayK, maxStep, options)
2545
+ : this._propagateFallback(startIDs, strengths, decayK, maxStep, options);
2546
+ }
2547
+
2548
+ // 优先使用线代后端
2549
+ if (this._laReady && this.tensorBridge?.multi?.all) {
2550
+ const A = this.tensorBridge.multi.all;
2551
+ const id2row = this.tensorBridge.rowIndex;
2552
+ const seeds = [];
2553
+ for (let i = 0; i < startIDs.length; i++) {
2554
+ const r = id2row.get(startIDs[i]);
2555
+ if (r !== undefined) seeds.push([r, strengths[i] || 0]);
2207
2556
  }
2557
+ if (!seeds.length) return new Map();
2208
2558
 
2209
- const next = [];
2210
- for (const { id, value, from, connectionType } of active) {
2211
- if (value < minSignal) continue;
2212
-
2213
- // 节点处应用激活函数(融合累计)
2214
- const prev = signalMap.get(id) || 0;
2215
- const merged = actFn(prev + value);
2216
- signalMap.set(id, merged);
2559
+ const actFn = this._act || ((x) => (x > 0 ? x : 0));
2560
+ const steps = Math.max(1, maxStep | 0);
2561
+ const x = this.tensor.iteratePropagation(A, seeds, steps, actFn, (decayK ?? 1));
2217
2562
 
2218
- if (trackPath && connectionType !== -1) {
2219
- if (!activationTypes.has(id)) activationTypes.set(id, new Set());
2220
- activationTypes.get(id).add(connectionType);
2221
- }
2222
- if (trackPath && from) {
2223
- if (!activationPaths.has(id)) activationPaths.set(id, []);
2224
- activationPaths.get(id).push({ from, connectionType, value });
2225
- }
2226
-
2227
- const point = this.graph.points.get(id);
2228
- if (!point) continue;
2229
-
2230
- const MAX_NEIGHBORS = 30;
2231
- const neighbors = point.connect.slice(0, MAX_NEIGHBORS);
2232
-
2233
- for (const [weight, neighborID, direction = 0] of neighbors) {
2234
- const ctx = { direction, directionalMultiplier, bidirectionalMultiplier };
2235
- const rawNext = transferFn(value, weight, decayK, ctx);
2236
- const nextValue = actFn(rawNext);
2237
-
2238
- if (nextValue >= minSignal) {
2239
- next.push({ id: neighborID, value: nextValue, from: id, connectionType: direction });
2240
- }
2241
- }
2563
+ // 返回Map<MemeID,value>
2564
+ const out = new Map();
2565
+ for (let i = 0; i < x.length; i++) {
2566
+ if (x[i] > 0) out.set(this.tensorBridge.rows[i], x[i]);
2242
2567
  }
2243
- active = next;
2244
- step++;
2568
+ return out;
2245
2569
  }
2246
2570
 
2247
- if (trackPath) {
2248
- return { signalMap, activationPaths, activationTypes };
2249
- }
2250
- return signalMap;
2571
+ // 回退旧实现
2572
+ return this._propagateFallback(startIDs, strengths, decayK, maxStep, options);
2573
+ }
2574
+
2575
+ _propagateFallback(startIDs, strengths, decayK, maxStep, options) {
2576
+ // 使用文件里现有版本(保持兼容)
2577
+ // 这里直接调用之前定义的 Runtime.propagateSignalMultiSource 的原实现
2578
+ const fn = Object.getPrototypeOf(this).propagateSignalMultiSource;
2579
+ return fn.call(this, startIDs, strengths, decayK, maxStep, options);
2251
2580
  }
2252
2581
 
2253
2582
  // 主循环:持续归纳和信号传递
@@ -3425,6 +3754,70 @@ async function main() {
3425
3754
 
3426
3755
  // 安装对端监控触发器
3427
3756
  installPeerFailoverMonitor();
3757
+ // ...existing code...
3758
+ app.post('/api/tensor/refresh', (req, res) => {
3759
+ try {
3760
+ const { topK = 64, embDim = 512 } = req.body || {};
3761
+ const rt = global.ctrlA?.runtime;
3762
+ if (!rt) return res.status(500).json({ ok: false, error: 'runtime missing' });
3763
+ const ret = rt.rebuildLinearAlgebraCaches({ topK, embDim });
3764
+ res.json({ ok: !!ret.ok, ...ret });
3765
+ } catch (e) {
3766
+ res.status(500).json({ ok: false, error: e.message });
3767
+ }
3768
+ });
3769
+
3770
+ app.get('/api/tensor/csr', (req, res) => {
3771
+ try {
3772
+ const rt = global.ctrlA?.runtime;
3773
+ if (!rt) return res.status(500).json({ ok: false, error: 'runtime missing' });
3774
+ const csr = rt.exportSparseMatrix();
3775
+ if (!csr) return res.status(400).json({ ok: false, error: 'csr not ready' });
3776
+ res.json({ ok: true, csr });
3777
+ } catch (e) {
3778
+ res.status(500).json({ ok: false, error: e.message });
3779
+ }
3780
+ });
3781
+
3782
+ app.get('/api/tensor/project2d', (req, res) => {
3783
+ try {
3784
+ const method = String(req.query?.method || 'auto');
3785
+ const rt = global.ctrlA?.runtime;
3786
+ if (!rt) return res.status(500).json({ ok: false, error: 'runtime missing' });
3787
+ const ret = rt.foldHighDimTo2D(method);
3788
+ res.json(ret);
3789
+ } catch (e) {
3790
+ res.status(500).json({ ok: false, error: e.message });
3791
+ }
3792
+ });
3793
+
3794
+ app.get('/api/tensor/topk', (req, res) => {
3795
+ try {
3796
+ const memeId = String(req.query?.memeId || '');
3797
+ const k = Math.max(1, Math.min(50, Number(req.query?.k || 10)));
3798
+ const rt = global.ctrlA?.runtime;
3799
+ if (!rt?.tensorBridge?.emb) return res.status(400).json({ ok: false, error: 'embedding not ready' });
3800
+ const row = rt.tensorBridge.rowIndex.get(memeId);
3801
+ if (row === undefined) return res.status(404).json({ ok: false, error: 'meme not found' });
3802
+
3803
+ const N = rt.tensorBridge.rows.length, D = rt.tensorBridge.dim;
3804
+ const base = row * D;
3805
+ const q = new Float32Array(D);
3806
+ for (let j = 0; j < D; j++) q[j] = rt.tensorBridge.emb[base + j];
3807
+
3808
+ const scores = new Array(N);
3809
+ for (let i = 0; i < N; i++) {
3810
+ const b = i * D;
3811
+ let s = 0.0; for (let j = 0; j < D; j++) s += q[j] * rt.tensorBridge.emb[b + j];
3812
+ scores[i] = [rt.tensorBridge.rows[i], s];
3813
+ }
3814
+ scores.sort((a, b) => b[1] - a[1]);
3815
+ res.json({ ok: true, memeId, neighbors: scores.slice(0, k) });
3816
+ } catch (e) {
3817
+ res.status(500).json({ ok: false, error: e.message });
3818
+ }
3819
+ });
3820
+ // ...existing code...
3428
3821
  app.get('/api/graph/partitions/status', async (req, res) => {
3429
3822
  try {
3430
3823
  const g = global.ctrlA?.runtime?.graph;