graphwise 1.0.0 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +41 -0
- package/dist/expansion/base.d.ts +12 -0
- package/dist/expansion/base.d.ts.map +1 -0
- package/dist/expansion/base.unit.test.d.ts +2 -0
- package/dist/expansion/base.unit.test.d.ts.map +1 -0
- package/dist/expansion/dome.d.ts +16 -0
- package/dist/expansion/dome.d.ts.map +1 -0
- package/dist/expansion/dome.unit.test.d.ts +2 -0
- package/dist/expansion/dome.unit.test.d.ts.map +1 -0
- package/dist/expansion/edge.d.ts +15 -0
- package/dist/expansion/edge.d.ts.map +1 -0
- package/dist/expansion/edge.unit.test.d.ts +2 -0
- package/dist/expansion/edge.unit.test.d.ts.map +1 -0
- package/dist/expansion/hae.d.ts +22 -0
- package/dist/expansion/hae.d.ts.map +1 -0
- package/dist/expansion/hae.unit.test.d.ts +2 -0
- package/dist/expansion/hae.unit.test.d.ts.map +1 -0
- package/dist/expansion/index.d.ts +22 -0
- package/dist/expansion/index.d.ts.map +1 -0
- package/dist/expansion/maze.d.ts +25 -0
- package/dist/expansion/maze.d.ts.map +1 -0
- package/dist/expansion/maze.unit.test.d.ts +2 -0
- package/dist/expansion/maze.unit.test.d.ts.map +1 -0
- package/dist/expansion/pipe.d.ts +15 -0
- package/dist/expansion/pipe.d.ts.map +1 -0
- package/dist/expansion/pipe.unit.test.d.ts +2 -0
- package/dist/expansion/pipe.unit.test.d.ts.map +1 -0
- package/dist/expansion/reach.d.ts +26 -0
- package/dist/expansion/reach.d.ts.map +1 -0
- package/dist/expansion/reach.unit.test.d.ts +2 -0
- package/dist/expansion/reach.unit.test.d.ts.map +1 -0
- package/dist/expansion/sage.d.ts +24 -0
- package/dist/expansion/sage.d.ts.map +1 -0
- package/dist/expansion/sage.unit.test.d.ts +2 -0
- package/dist/expansion/sage.unit.test.d.ts.map +1 -0
- package/dist/expansion/types.d.ts +105 -0
- package/dist/expansion/types.d.ts.map +1 -0
- package/dist/extraction/ego-network.d.ts +32 -0
- package/dist/extraction/ego-network.d.ts.map +1 -0
- package/dist/extraction/ego-network.unit.test.d.ts +5 -0
- package/dist/extraction/ego-network.unit.test.d.ts.map +1 -0
- package/dist/extraction/index.d.ts +20 -0
- package/dist/extraction/index.d.ts.map +1 -0
- package/dist/extraction/induced-subgraph.d.ts +19 -0
- package/dist/extraction/induced-subgraph.d.ts.map +1 -0
- package/dist/extraction/induced-subgraph.unit.test.d.ts +5 -0
- package/dist/extraction/induced-subgraph.unit.test.d.ts.map +1 -0
- package/dist/extraction/k-core.d.ts +24 -0
- package/dist/extraction/k-core.d.ts.map +1 -0
- package/dist/extraction/k-core.unit.test.d.ts +5 -0
- package/dist/extraction/k-core.unit.test.d.ts.map +1 -0
- package/dist/extraction/motif.d.ts +50 -0
- package/dist/extraction/motif.d.ts.map +1 -0
- package/dist/extraction/motif.unit.test.d.ts +5 -0
- package/dist/extraction/motif.unit.test.d.ts.map +1 -0
- package/dist/extraction/node-filter.d.ts +35 -0
- package/dist/extraction/node-filter.d.ts.map +1 -0
- package/dist/extraction/node-filter.unit.test.d.ts +5 -0
- package/dist/extraction/node-filter.unit.test.d.ts.map +1 -0
- package/dist/extraction/truss.d.ts +41 -0
- package/dist/extraction/truss.d.ts.map +1 -0
- package/dist/extraction/truss.unit.test.d.ts +5 -0
- package/dist/extraction/truss.unit.test.d.ts.map +1 -0
- package/dist/gpu/context.d.ts +118 -0
- package/dist/gpu/context.d.ts.map +1 -0
- package/dist/gpu/context.unit.test.d.ts +2 -0
- package/dist/gpu/context.unit.test.d.ts.map +1 -0
- package/dist/gpu/csr.d.ts +97 -0
- package/dist/gpu/csr.d.ts.map +1 -0
- package/dist/gpu/csr.unit.test.d.ts +2 -0
- package/dist/gpu/csr.unit.test.d.ts.map +1 -0
- package/dist/gpu/detect.d.ts +25 -0
- package/dist/gpu/detect.d.ts.map +1 -0
- package/dist/gpu/detect.unit.test.d.ts +2 -0
- package/dist/gpu/detect.unit.test.d.ts.map +1 -0
- package/dist/gpu/index.cjs +6 -0
- package/dist/gpu/index.d.ts +11 -0
- package/dist/gpu/index.d.ts.map +1 -0
- package/dist/gpu/index.js +2 -0
- package/dist/gpu/types.d.ts +50 -0
- package/dist/gpu/types.d.ts.map +1 -0
- package/dist/gpu-BJRVYBjx.cjs +338 -0
- package/dist/gpu-BJRVYBjx.cjs.map +1 -0
- package/dist/gpu-BveuXugy.js +315 -0
- package/dist/gpu-BveuXugy.js.map +1 -0
- package/dist/graph/adjacency-map.d.ts +95 -0
- package/dist/graph/adjacency-map.d.ts.map +1 -0
- package/dist/graph/adjacency-map.unit.test.d.ts +2 -0
- package/dist/graph/adjacency-map.unit.test.d.ts.map +1 -0
- package/dist/graph/index.cjs +3 -0
- package/dist/graph/index.d.ts +9 -0
- package/dist/graph/index.d.ts.map +1 -0
- package/dist/graph/index.js +2 -0
- package/dist/graph/interfaces.d.ts +125 -0
- package/dist/graph/interfaces.d.ts.map +1 -0
- package/dist/graph/types.d.ts +72 -0
- package/dist/graph/types.d.ts.map +1 -0
- package/dist/graph-DLWiziLB.js +222 -0
- package/dist/graph-DLWiziLB.js.map +1 -0
- package/dist/graph-az06J1YV.cjs +227 -0
- package/dist/graph-az06J1YV.cjs.map +1 -0
- package/dist/index/index.cjs +1404 -0
- package/dist/index/index.cjs.map +1 -0
- package/dist/index/index.js +1356 -0
- package/dist/index/index.js.map +1 -0
- package/dist/index.d.ts +15 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/kmeans-B0HEOU6k.cjs +234 -0
- package/dist/kmeans-B0HEOU6k.cjs.map +1 -0
- package/dist/kmeans-DgbsOznU.js +223 -0
- package/dist/kmeans-DgbsOznU.js.map +1 -0
- package/dist/ranking/baselines/shortest.d.ts +15 -0
- package/dist/ranking/baselines/shortest.d.ts.map +1 -0
- package/dist/ranking/baselines/shortest.unit.test.d.ts +2 -0
- package/dist/ranking/baselines/shortest.unit.test.d.ts.map +1 -0
- package/dist/ranking/baselines/types.d.ts +30 -0
- package/dist/ranking/baselines/types.d.ts.map +1 -0
- package/dist/ranking/index.d.ts +15 -0
- package/dist/ranking/index.d.ts.map +1 -0
- package/dist/ranking/mi/adamic-adar.d.ts +13 -0
- package/dist/ranking/mi/adamic-adar.d.ts.map +1 -0
- package/dist/ranking/mi/adaptive.d.ts +16 -0
- package/dist/ranking/mi/adaptive.d.ts.map +1 -0
- package/dist/ranking/mi/etch.d.ts +7 -0
- package/dist/ranking/mi/etch.d.ts.map +1 -0
- package/dist/ranking/mi/index.d.ts +18 -0
- package/dist/ranking/mi/index.d.ts.map +1 -0
- package/dist/ranking/mi/jaccard.d.ts +13 -0
- package/dist/ranking/mi/jaccard.d.ts.map +1 -0
- package/dist/ranking/mi/mi-variants.unit.test.d.ts +2 -0
- package/dist/ranking/mi/mi-variants.unit.test.d.ts.map +1 -0
- package/dist/ranking/mi/notch.d.ts +7 -0
- package/dist/ranking/mi/notch.d.ts.map +1 -0
- package/dist/ranking/mi/scale.d.ts +7 -0
- package/dist/ranking/mi/scale.d.ts.map +1 -0
- package/dist/ranking/mi/skew.d.ts +7 -0
- package/dist/ranking/mi/skew.d.ts.map +1 -0
- package/dist/ranking/mi/span.d.ts +7 -0
- package/dist/ranking/mi/span.d.ts.map +1 -0
- package/dist/ranking/mi/types.d.ts +35 -0
- package/dist/ranking/mi/types.d.ts.map +1 -0
- package/dist/ranking/parse.d.ts +56 -0
- package/dist/ranking/parse.d.ts.map +1 -0
- package/dist/ranking/parse.unit.test.d.ts +2 -0
- package/dist/ranking/parse.unit.test.d.ts.map +1 -0
- package/dist/schemas/define.d.ts +18 -0
- package/dist/schemas/define.d.ts.map +1 -0
- package/dist/schemas/define.unit.test.d.ts +2 -0
- package/dist/schemas/define.unit.test.d.ts.map +1 -0
- package/dist/schemas/graph.d.ts +85 -0
- package/dist/schemas/graph.d.ts.map +1 -0
- package/dist/schemas/graph.unit.test.d.ts +2 -0
- package/dist/schemas/graph.unit.test.d.ts.map +1 -0
- package/dist/schemas/index.cjs +3791 -0
- package/dist/schemas/index.cjs.map +1 -0
- package/dist/schemas/index.d.ts +3 -0
- package/dist/schemas/index.d.ts.map +1 -0
- package/dist/schemas/index.js +3782 -0
- package/dist/schemas/index.js.map +1 -0
- package/dist/seeds/grasp.d.ts +79 -0
- package/dist/seeds/grasp.d.ts.map +1 -0
- package/dist/seeds/grasp.unit.test.d.ts +2 -0
- package/dist/seeds/grasp.unit.test.d.ts.map +1 -0
- package/dist/seeds/index.cjs +4 -0
- package/dist/seeds/index.d.ts +10 -0
- package/dist/seeds/index.d.ts.map +1 -0
- package/dist/seeds/index.js +2 -0
- package/dist/seeds/stratified.d.ts +85 -0
- package/dist/seeds/stratified.d.ts.map +1 -0
- package/dist/seeds/stratified.unit.test.d.ts +2 -0
- package/dist/seeds/stratified.unit.test.d.ts.map +1 -0
- package/dist/seeds-B6J9oJfU.cjs +404 -0
- package/dist/seeds-B6J9oJfU.cjs.map +1 -0
- package/dist/seeds-UNZxqm_U.js +393 -0
- package/dist/seeds-UNZxqm_U.js.map +1 -0
- package/dist/structures/index.cjs +3 -0
- package/dist/structures/index.d.ts +3 -0
- package/dist/structures/index.d.ts.map +1 -0
- package/dist/structures/index.js +2 -0
- package/dist/structures/priority-queue.d.ts +59 -0
- package/dist/structures/priority-queue.d.ts.map +1 -0
- package/dist/structures/priority-queue.unit.test.d.ts +2 -0
- package/dist/structures/priority-queue.unit.test.d.ts.map +1 -0
- package/dist/structures-BPfhfqNP.js +133 -0
- package/dist/structures-BPfhfqNP.js.map +1 -0
- package/dist/structures-CJ_S_7fs.cjs +138 -0
- package/dist/structures-CJ_S_7fs.cjs.map +1 -0
- package/dist/traversal/bfs.d.ts +50 -0
- package/dist/traversal/bfs.d.ts.map +1 -0
- package/dist/traversal/bfs.unit.test.d.ts +2 -0
- package/dist/traversal/bfs.unit.test.d.ts.map +1 -0
- package/dist/traversal/dfs.d.ts +50 -0
- package/dist/traversal/dfs.d.ts.map +1 -0
- package/dist/traversal/dfs.unit.test.d.ts +2 -0
- package/dist/traversal/dfs.unit.test.d.ts.map +1 -0
- package/dist/traversal/index.cjs +6 -0
- package/dist/traversal/index.d.ts +11 -0
- package/dist/traversal/index.d.ts.map +1 -0
- package/dist/traversal/index.js +2 -0
- package/dist/traversal-CQCjUwUJ.js +149 -0
- package/dist/traversal-CQCjUwUJ.js.map +1 -0
- package/dist/traversal-QeHaNUWn.cjs +172 -0
- package/dist/traversal-QeHaNUWn.cjs.map +1 -0
- package/dist/utils/clustering-coefficient.d.ts +38 -0
- package/dist/utils/clustering-coefficient.d.ts.map +1 -0
- package/dist/utils/clustering-coefficient.unit.test.d.ts +2 -0
- package/dist/utils/clustering-coefficient.unit.test.d.ts.map +1 -0
- package/dist/utils/entropy.d.ts +58 -0
- package/dist/utils/entropy.d.ts.map +1 -0
- package/dist/utils/entropy.unit.test.d.ts +2 -0
- package/dist/utils/entropy.unit.test.d.ts.map +1 -0
- package/dist/utils/index.cjs +13 -0
- package/dist/utils/index.d.ts +9 -0
- package/dist/utils/index.d.ts.map +1 -0
- package/dist/utils/index.js +3 -0
- package/dist/utils/kmeans.d.ts +73 -0
- package/dist/utils/kmeans.d.ts.map +1 -0
- package/dist/utils/kmeans.unit.test.d.ts +2 -0
- package/dist/utils/kmeans.unit.test.d.ts.map +1 -0
- package/dist/utils-Q_akvlMn.js +164 -0
- package/dist/utils-Q_akvlMn.js.map +1 -0
- package/dist/utils-spZa1ZvS.cjs +205 -0
- package/dist/utils-spZa1ZvS.cjs.map +1 -0
- package/package.json +136 -8
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
//#region src/traversal/bfs.ts
|
|
2
|
+
/**
|
|
3
|
+
* Perform a breadth-first search traversal from a start node.
|
|
4
|
+
*
|
|
5
|
+
* This generator yields node IDs in BFS order. It uses lazy evaluation
|
|
6
|
+
* via a generator, making it memory-efficient for large graphs.
|
|
7
|
+
*
|
|
8
|
+
* @param graph - The graph to traverse
|
|
9
|
+
* @param start - The starting node ID
|
|
10
|
+
* @yields Node IDs in BFS order
|
|
11
|
+
*
|
|
12
|
+
* @example
|
|
13
|
+
* ```typescript
|
|
14
|
+
* for (const nodeId of bfs(graph, "A")) {
|
|
15
|
+
* console.log(nodeId);
|
|
16
|
+
* }
|
|
17
|
+
* ```
|
|
18
|
+
*/
|
|
19
|
+
function* bfs(graph, start) {
|
|
20
|
+
if (!graph.hasNode(start)) return;
|
|
21
|
+
const visited = new Set([start]);
|
|
22
|
+
const queue = [start];
|
|
23
|
+
while (queue.length > 0) {
|
|
24
|
+
const current = queue.shift();
|
|
25
|
+
if (current === void 0) break;
|
|
26
|
+
yield current;
|
|
27
|
+
for (const neighbour of graph.neighbours(current)) if (!visited.has(neighbour)) {
|
|
28
|
+
visited.add(neighbour);
|
|
29
|
+
queue.push(neighbour);
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
/**
|
|
34
|
+
* Perform a breadth-first search traversal with path information.
|
|
35
|
+
*
|
|
36
|
+
* This generator yields detailed information about each visited node,
|
|
37
|
+
* including its depth from the start and parent in the BFS tree.
|
|
38
|
+
* Useful for reconstructing shortest paths.
|
|
39
|
+
*
|
|
40
|
+
* @param graph - The graph to traverse
|
|
41
|
+
* @param start - The starting node ID
|
|
42
|
+
* @yields Objects containing node ID, depth, and parent
|
|
43
|
+
*
|
|
44
|
+
* @example
|
|
45
|
+
* ```typescript
|
|
46
|
+
* for (const entry of bfsWithPath(graph, "A")) {
|
|
47
|
+
* console.log(`${entry.node} at depth ${entry.depth}`);
|
|
48
|
+
* }
|
|
49
|
+
* ```
|
|
50
|
+
*/
|
|
51
|
+
function* bfsWithPath(graph, start) {
|
|
52
|
+
if (!graph.hasNode(start)) return;
|
|
53
|
+
const visited = new Set([start]);
|
|
54
|
+
const queue = [{
|
|
55
|
+
node: start,
|
|
56
|
+
depth: 0,
|
|
57
|
+
parent: void 0
|
|
58
|
+
}];
|
|
59
|
+
while (queue.length > 0) {
|
|
60
|
+
const current = queue.shift();
|
|
61
|
+
if (current === void 0) break;
|
|
62
|
+
yield current;
|
|
63
|
+
for (const neighbour of graph.neighbours(current.node)) if (!visited.has(neighbour)) {
|
|
64
|
+
visited.add(neighbour);
|
|
65
|
+
queue.push({
|
|
66
|
+
node: neighbour,
|
|
67
|
+
depth: current.depth + 1,
|
|
68
|
+
parent: current.node
|
|
69
|
+
});
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
//#endregion
|
|
74
|
+
//#region src/traversal/dfs.ts
|
|
75
|
+
/**
|
|
76
|
+
* Perform a depth-first search traversal from a start node.
|
|
77
|
+
*
|
|
78
|
+
* This generator yields node IDs in DFS order (pre-order traversal).
|
|
79
|
+
* It uses lazy evaluation via a generator, making it memory-efficient
|
|
80
|
+
* for large graphs.
|
|
81
|
+
*
|
|
82
|
+
* @param graph - The graph to traverse
|
|
83
|
+
* @param start - The starting node ID
|
|
84
|
+
* @yields Node IDs in DFS order
|
|
85
|
+
*
|
|
86
|
+
* @example
|
|
87
|
+
* ```typescript
|
|
88
|
+
* for (const nodeId of dfs(graph, "A")) {
|
|
89
|
+
* console.log(nodeId);
|
|
90
|
+
* }
|
|
91
|
+
* ```
|
|
92
|
+
*/
|
|
93
|
+
function* dfs(graph, start) {
|
|
94
|
+
if (!graph.hasNode(start)) return;
|
|
95
|
+
const visited = /* @__PURE__ */ new Set();
|
|
96
|
+
const stack = [start];
|
|
97
|
+
while (stack.length > 0) {
|
|
98
|
+
const current = stack.pop();
|
|
99
|
+
if (current === void 0) break;
|
|
100
|
+
if (visited.has(current)) continue;
|
|
101
|
+
visited.add(current);
|
|
102
|
+
yield current;
|
|
103
|
+
const neighbours = [...graph.neighbours(current)].reverse();
|
|
104
|
+
for (const neighbour of neighbours) if (!visited.has(neighbour)) stack.push(neighbour);
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
/**
|
|
108
|
+
* Perform a depth-first search traversal with path information.
|
|
109
|
+
*
|
|
110
|
+
* This generator yields detailed information about each visited node,
|
|
111
|
+
* including its depth from the start and parent in the DFS tree.
|
|
112
|
+
*
|
|
113
|
+
* @param graph - The graph to traverse
|
|
114
|
+
* @param start - The starting node ID
|
|
115
|
+
* @yields Objects containing node ID, depth, and parent
|
|
116
|
+
*
|
|
117
|
+
* @example
|
|
118
|
+
* ```typescript
|
|
119
|
+
* for (const entry of dfsWithPath(graph, "A")) {
|
|
120
|
+
* console.log(`${entry.node} at depth ${entry.depth}`);
|
|
121
|
+
* }
|
|
122
|
+
* ```
|
|
123
|
+
*/
|
|
124
|
+
function* dfsWithPath(graph, start) {
|
|
125
|
+
if (!graph.hasNode(start)) return;
|
|
126
|
+
const visited = /* @__PURE__ */ new Set();
|
|
127
|
+
const stack = [{
|
|
128
|
+
node: start,
|
|
129
|
+
depth: 0,
|
|
130
|
+
parent: void 0
|
|
131
|
+
}];
|
|
132
|
+
while (stack.length > 0) {
|
|
133
|
+
const current = stack.pop();
|
|
134
|
+
if (current === void 0) break;
|
|
135
|
+
if (visited.has(current.node)) continue;
|
|
136
|
+
visited.add(current.node);
|
|
137
|
+
yield current;
|
|
138
|
+
const neighbours = [...graph.neighbours(current.node)].reverse();
|
|
139
|
+
for (const neighbour of neighbours) if (!visited.has(neighbour)) stack.push({
|
|
140
|
+
node: neighbour,
|
|
141
|
+
depth: current.depth + 1,
|
|
142
|
+
parent: current.node
|
|
143
|
+
});
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
//#endregion
|
|
147
|
+
Object.defineProperty(exports, "bfs", {
|
|
148
|
+
enumerable: true,
|
|
149
|
+
get: function() {
|
|
150
|
+
return bfs;
|
|
151
|
+
}
|
|
152
|
+
});
|
|
153
|
+
Object.defineProperty(exports, "bfsWithPath", {
|
|
154
|
+
enumerable: true,
|
|
155
|
+
get: function() {
|
|
156
|
+
return bfsWithPath;
|
|
157
|
+
}
|
|
158
|
+
});
|
|
159
|
+
Object.defineProperty(exports, "dfs", {
|
|
160
|
+
enumerable: true,
|
|
161
|
+
get: function() {
|
|
162
|
+
return dfs;
|
|
163
|
+
}
|
|
164
|
+
});
|
|
165
|
+
Object.defineProperty(exports, "dfsWithPath", {
|
|
166
|
+
enumerable: true,
|
|
167
|
+
get: function() {
|
|
168
|
+
return dfsWithPath;
|
|
169
|
+
}
|
|
170
|
+
});
|
|
171
|
+
|
|
172
|
+
//# sourceMappingURL=traversal-QeHaNUWn.cjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"traversal-QeHaNUWn.cjs","names":[],"sources":["../src/traversal/bfs.ts","../src/traversal/dfs.ts"],"sourcesContent":["/**\n * Breadth-first search traversal algorithms.\n *\n * BFS explores nodes level by level, visiting all neighbours at the current\n * depth before moving to nodes at the next depth level. This guarantees\n * shortest path discovery in unweighted graphs.\n */\n\nimport type { ReadableGraph, NodeId } from \"../graph\";\n\n/**\n * Node visit information during BFS traversal.\n */\nexport interface BfsPathEntry {\n\t/** The visited node identifier */\n\tnode: NodeId;\n\t/** Distance from the start node (0 for start node) */\n\tdepth: number;\n\t/** Parent node in the BFS tree (undefined for start node) */\n\tparent: NodeId | undefined;\n}\n\n/**\n * Perform a breadth-first search traversal from a start node.\n *\n * This generator yields node IDs in BFS order. It uses lazy evaluation\n * via a generator, making it memory-efficient for large graphs.\n *\n * @param graph - The graph to traverse\n * @param start - The starting node ID\n * @yields Node IDs in BFS order\n *\n * @example\n * ```typescript\n * for (const nodeId of bfs(graph, \"A\")) {\n * console.log(nodeId);\n * }\n * ```\n */\nexport function* bfs(graph: ReadableGraph, start: NodeId): Iterable<NodeId> {\n\tif (!graph.hasNode(start)) {\n\t\treturn;\n\t}\n\n\tconst visited = new Set<NodeId>([start]);\n\tconst queue: NodeId[] = [start];\n\n\twhile (queue.length > 0) {\n\t\tconst current = queue.shift();\n\t\tif (current === undefined) break;\n\t\tyield current;\n\n\t\tfor (const neighbour of graph.neighbours(current)) {\n\t\t\tif (!visited.has(neighbour)) {\n\t\t\t\tvisited.add(neighbour);\n\t\t\t\tqueue.push(neighbour);\n\t\t\t}\n\t\t}\n\t}\n}\n\n/**\n * Perform a breadth-first search traversal with path information.\n *\n * This generator yields detailed information about each visited node,\n * including its depth from the start and parent in the BFS tree.\n * Useful for reconstructing shortest paths.\n *\n * @param graph - The graph to traverse\n * @param start - The starting node ID\n * @yields Objects containing node ID, depth, and parent\n *\n * @example\n * ```typescript\n * for (const entry of bfsWithPath(graph, \"A\")) {\n * console.log(`${entry.node} at depth ${entry.depth}`);\n * }\n * ```\n */\nexport function* bfsWithPath(\n\tgraph: ReadableGraph,\n\tstart: NodeId,\n): Iterable<BfsPathEntry> {\n\tif (!graph.hasNode(start)) {\n\t\treturn;\n\t}\n\n\tconst visited = new Set<NodeId>([start]);\n\tconst queue: { node: NodeId; depth: number; parent: NodeId | undefined }[] = [\n\t\t{ node: start, depth: 0, parent: undefined },\n\t];\n\n\twhile (queue.length > 0) {\n\t\tconst current = queue.shift();\n\t\tif (current === undefined) break;\n\t\tyield current;\n\n\t\tfor (const neighbour of graph.neighbours(current.node)) {\n\t\t\tif (!visited.has(neighbour)) {\n\t\t\t\tvisited.add(neighbour);\n\t\t\t\tqueue.push({\n\t\t\t\t\tnode: neighbour,\n\t\t\t\t\tdepth: current.depth + 1,\n\t\t\t\t\tparent: current.node,\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t}\n}\n","/**\n * Depth-first search traversal algorithms.\n *\n * DFS explores as far as possible along each branch before backtracking.\n * This makes it useful for cycle detection, topological sorting, and\n * finding connected components.\n */\n\nimport type { ReadableGraph, NodeId } from \"../graph\";\n\n/**\n * Node visit information during DFS traversal.\n */\nexport interface DfsPathEntry {\n\t/** The visited node identifier */\n\tnode: NodeId;\n\t/** Distance from the start node (0 for start node) */\n\tdepth: number;\n\t/** Parent node in the DFS tree (undefined for start node) */\n\tparent: NodeId | undefined;\n}\n\n/**\n * Perform a depth-first search traversal from a start node.\n *\n * This generator yields node IDs in DFS order (pre-order traversal).\n * It uses lazy evaluation via a generator, making it memory-efficient\n * for large graphs.\n *\n * @param graph - The graph to traverse\n * @param start - The starting node ID\n * @yields Node IDs in DFS order\n *\n * @example\n * ```typescript\n * for (const nodeId of dfs(graph, \"A\")) {\n * console.log(nodeId);\n * }\n * ```\n */\nexport function* dfs(graph: ReadableGraph, start: NodeId): Iterable<NodeId> {\n\tif (!graph.hasNode(start)) {\n\t\treturn;\n\t}\n\n\tconst visited = new Set<NodeId>();\n\tconst stack: NodeId[] = [start];\n\n\twhile (stack.length > 0) {\n\t\tconst current = stack.pop();\n\t\tif (current === undefined) break;\n\n\t\tif (visited.has(current)) {\n\t\t\tcontinue;\n\t\t}\n\n\t\tvisited.add(current);\n\t\tyield current;\n\n\t\t// Add neighbours in reverse order to maintain left-to-right traversal\n\t\t// when popping from the stack\n\t\tconst neighbours = [...graph.neighbours(current)].reverse();\n\t\tfor (const neighbour of neighbours) {\n\t\t\tif (!visited.has(neighbour)) {\n\t\t\t\tstack.push(neighbour);\n\t\t\t}\n\t\t}\n\t}\n}\n\n/**\n * Perform a depth-first search traversal with path information.\n *\n * This generator yields detailed information about each visited node,\n * including its depth from the start and parent in the DFS tree.\n *\n * @param graph - The graph to traverse\n * @param start - The starting node ID\n * @yields Objects containing node ID, depth, and parent\n *\n * @example\n * ```typescript\n * for (const entry of dfsWithPath(graph, \"A\")) {\n * console.log(`${entry.node} at depth ${entry.depth}`);\n * }\n * ```\n */\nexport function* dfsWithPath(\n\tgraph: ReadableGraph,\n\tstart: NodeId,\n): Iterable<DfsPathEntry> {\n\tif (!graph.hasNode(start)) {\n\t\treturn;\n\t}\n\n\tconst visited = new Set<NodeId>();\n\tconst stack: { node: NodeId; depth: number; parent: NodeId | undefined }[] = [\n\t\t{ node: start, depth: 0, parent: undefined },\n\t];\n\n\twhile (stack.length > 0) {\n\t\tconst current = stack.pop();\n\t\tif (current === undefined) break;\n\n\t\tif (visited.has(current.node)) {\n\t\t\tcontinue;\n\t\t}\n\n\t\tvisited.add(current.node);\n\t\tyield current;\n\n\t\t// Add neighbours in reverse order to maintain left-to-right traversal\n\t\t// when popping from the stack\n\t\tconst neighbours = [...graph.neighbours(current.node)].reverse();\n\t\tfor (const neighbour of neighbours) {\n\t\t\tif (!visited.has(neighbour)) {\n\t\t\t\tstack.push({\n\t\t\t\t\tnode: neighbour,\n\t\t\t\t\tdepth: current.depth + 1,\n\t\t\t\t\tparent: current.node,\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAuCA,UAAiB,IAAI,OAAsB,OAAiC;AAC3E,KAAI,CAAC,MAAM,QAAQ,MAAM,CACxB;CAGD,MAAM,UAAU,IAAI,IAAY,CAAC,MAAM,CAAC;CACxC,MAAM,QAAkB,CAAC,MAAM;AAE/B,QAAO,MAAM,SAAS,GAAG;EACxB,MAAM,UAAU,MAAM,OAAO;AAC7B,MAAI,YAAY,KAAA,EAAW;AAC3B,QAAM;AAEN,OAAK,MAAM,aAAa,MAAM,WAAW,QAAQ,CAChD,KAAI,CAAC,QAAQ,IAAI,UAAU,EAAE;AAC5B,WAAQ,IAAI,UAAU;AACtB,SAAM,KAAK,UAAU;;;;;;;;;;;;;;;;;;;;;;AAwBzB,UAAiB,YAChB,OACA,OACyB;AACzB,KAAI,CAAC,MAAM,QAAQ,MAAM,CACxB;CAGD,MAAM,UAAU,IAAI,IAAY,CAAC,MAAM,CAAC;CACxC,MAAM,QAAuE,CAC5E;EAAE,MAAM;EAAO,OAAO;EAAG,QAAQ,KAAA;EAAW,CAC5C;AAED,QAAO,MAAM,SAAS,GAAG;EACxB,MAAM,UAAU,MAAM,OAAO;AAC7B,MAAI,YAAY,KAAA,EAAW;AAC3B,QAAM;AAEN,OAAK,MAAM,aAAa,MAAM,WAAW,QAAQ,KAAK,CACrD,KAAI,CAAC,QAAQ,IAAI,UAAU,EAAE;AAC5B,WAAQ,IAAI,UAAU;AACtB,SAAM,KAAK;IACV,MAAM;IACN,OAAO,QAAQ,QAAQ;IACvB,QAAQ,QAAQ;IAChB,CAAC;;;;;;;;;;;;;;;;;;;;;;;;AChEN,UAAiB,IAAI,OAAsB,OAAiC;AAC3E,KAAI,CAAC,MAAM,QAAQ,MAAM,CACxB;CAGD,MAAM,0BAAU,IAAI,KAAa;CACjC,MAAM,QAAkB,CAAC,MAAM;AAE/B,QAAO,MAAM,SAAS,GAAG;EACxB,MAAM,UAAU,MAAM,KAAK;AAC3B,MAAI,YAAY,KAAA,EAAW;AAE3B,MAAI,QAAQ,IAAI,QAAQ,CACvB;AAGD,UAAQ,IAAI,QAAQ;AACpB,QAAM;EAIN,MAAM,aAAa,CAAC,GAAG,MAAM,WAAW,QAAQ,CAAC,CAAC,SAAS;AAC3D,OAAK,MAAM,aAAa,WACvB,KAAI,CAAC,QAAQ,IAAI,UAAU,CAC1B,OAAM,KAAK,UAAU;;;;;;;;;;;;;;;;;;;;AAuBzB,UAAiB,YAChB,OACA,OACyB;AACzB,KAAI,CAAC,MAAM,QAAQ,MAAM,CACxB;CAGD,MAAM,0BAAU,IAAI,KAAa;CACjC,MAAM,QAAuE,CAC5E;EAAE,MAAM;EAAO,OAAO;EAAG,QAAQ,KAAA;EAAW,CAC5C;AAED,QAAO,MAAM,SAAS,GAAG;EACxB,MAAM,UAAU,MAAM,KAAK;AAC3B,MAAI,YAAY,KAAA,EAAW;AAE3B,MAAI,QAAQ,IAAI,QAAQ,KAAK,CAC5B;AAGD,UAAQ,IAAI,QAAQ,KAAK;AACzB,QAAM;EAIN,MAAM,aAAa,CAAC,GAAG,MAAM,WAAW,QAAQ,KAAK,CAAC,CAAC,SAAS;AAChE,OAAK,MAAM,aAAa,WACvB,KAAI,CAAC,QAAQ,IAAI,UAAU,CAC1B,OAAM,KAAK;GACV,MAAM;GACN,OAAO,QAAQ,QAAQ;GACvB,QAAQ,QAAQ;GAChB,CAAC"}
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
import { ReadableGraph, NodeId } from '../graph';
|
|
2
|
+
/**
|
|
3
|
+
* Compute the local clustering coefficient for a single node.
|
|
4
|
+
*
|
|
5
|
+
* The clustering coefficient is defined as:
|
|
6
|
+
* CC(v) = (triangles through v) / (possible triangles)
|
|
7
|
+
* CC(v) = 2 * |{(u,w) : u,w in N(v), (u,w) in E}| / (deg(v) * (deg(v) - 1))
|
|
8
|
+
*
|
|
9
|
+
* For nodes with degree < 2, the clustering coefficient is 0.
|
|
10
|
+
*
|
|
11
|
+
* @param graph - The graph to compute on
|
|
12
|
+
* @param nodeId - The node to compute clustering coefficient for
|
|
13
|
+
* @returns The clustering coefficient in [0, 1], or 0 if undefined
|
|
14
|
+
*/
|
|
15
|
+
export declare function localClusteringCoefficient(graph: ReadableGraph, nodeId: NodeId): number;
|
|
16
|
+
/**
|
|
17
|
+
* Compute approximate local clustering coefficient using sampling.
|
|
18
|
+
*
|
|
19
|
+
* For nodes with many neighbours, this samples neighbour pairs rather than
|
|
20
|
+
* checking all pairs. Useful for large graphs where exact computation is expensive.
|
|
21
|
+
*
|
|
22
|
+
* @param graph - The graph to compute on
|
|
23
|
+
* @param nodeId - The node to compute clustering coefficient for
|
|
24
|
+
* @param sampleSize - Maximum number of neighbour pairs to check (default: 100)
|
|
25
|
+
* @returns The approximate clustering coefficient in [0, 1]
|
|
26
|
+
*/
|
|
27
|
+
export declare function approximateClusteringCoefficient(graph: ReadableGraph, nodeId: NodeId, sampleSize?: number): number;
|
|
28
|
+
/**
|
|
29
|
+
* Compute clustering coefficients for multiple nodes efficiently.
|
|
30
|
+
*
|
|
31
|
+
* Reuses neighbour sets to avoid repeated iteration.
|
|
32
|
+
*
|
|
33
|
+
* @param graph - The graph to compute on
|
|
34
|
+
* @param nodeIds - The nodes to compute clustering coefficients for
|
|
35
|
+
* @returns Map from nodeId to clustering coefficient
|
|
36
|
+
*/
|
|
37
|
+
export declare function batchClusteringCoefficients(graph: ReadableGraph, nodeIds: readonly NodeId[]): Map<NodeId, number>;
|
|
38
|
+
//# sourceMappingURL=clustering-coefficient.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"clustering-coefficient.d.ts","sourceRoot":"","sources":["../../src/utils/clustering-coefficient.ts"],"names":[],"mappings":"AAAA;;;;;;;;GAQG;AAEH,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,EAAE,MAAM,UAAU,CAAC;AAEtD;;;;;;;;;;;;GAYG;AACH,wBAAgB,0BAA0B,CACzC,KAAK,EAAE,aAAa,EACpB,MAAM,EAAE,MAAM,GACZ,MAAM,CAmCR;AAED;;;;;;;;;;GAUG;AACH,wBAAgB,gCAAgC,CAC/C,KAAK,EAAE,aAAa,EACpB,MAAM,EAAE,MAAM,EACd,UAAU,SAAM,GACd,MAAM,CA2CR;AAED;;;;;;;;GAQG;AACH,wBAAgB,2BAA2B,CAC1C,KAAK,EAAE,aAAa,EACpB,OAAO,EAAE,SAAS,MAAM,EAAE,GACxB,GAAG,CAAC,MAAM,EAAE,MAAM,CAAC,CAQrB"}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"clustering-coefficient.unit.test.d.ts","sourceRoot":"","sources":["../../src/utils/clustering-coefficient.unit.test.ts"],"names":[],"mappings":""}
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Entropy computation utilities for graph analysis.
|
|
3
|
+
*
|
|
4
|
+
* Shannon entropy measures uncertainty or randomness in a distribution.
|
|
5
|
+
* Used in EDGE and HAE algorithms for heterogeneity-aware expansion.
|
|
6
|
+
*
|
|
7
|
+
* @packageDocumentation
|
|
8
|
+
*/
|
|
9
|
+
/**
|
|
10
|
+
* Compute Shannon entropy of a probability distribution.
|
|
11
|
+
*
|
|
12
|
+
* Shannon entropy is defined as:
|
|
13
|
+
* H(X) = -Σ p(x) × log₂(p(x))
|
|
14
|
+
*
|
|
15
|
+
* A uniform distribution has maximum entropy.
|
|
16
|
+
* A deterministic distribution (all probability on one value) has zero entropy.
|
|
17
|
+
*
|
|
18
|
+
* @param probabilities - Array of probabilities (should sum to 1)
|
|
19
|
+
* @returns Entropy in bits (log base 2), or 0 if probabilities are invalid
|
|
20
|
+
*/
|
|
21
|
+
export declare function shannonEntropy(probabilities: readonly number[]): number;
|
|
22
|
+
/**
|
|
23
|
+
* Compute normalised entropy (entropy divided by maximum possible entropy).
|
|
24
|
+
*
|
|
25
|
+
* Normalised entropy is in [0, 1], where:
|
|
26
|
+
* - 0 means the distribution is deterministic (all mass on one value)
|
|
27
|
+
* - 1 means the distribution is uniform (maximum uncertainty)
|
|
28
|
+
*
|
|
29
|
+
* This is useful for comparing entropy across distributions with different
|
|
30
|
+
* numbers of possible values.
|
|
31
|
+
*
|
|
32
|
+
* @param probabilities - Array of probabilities (should sum to 1)
|
|
33
|
+
* @returns Normalised entropy in [0, 1], or 0 if only one category
|
|
34
|
+
*/
|
|
35
|
+
export declare function normalisedEntropy(probabilities: readonly number[]): number;
|
|
36
|
+
/**
|
|
37
|
+
* Compute entropy from a frequency count.
|
|
38
|
+
*
|
|
39
|
+
* Converts counts to probabilities and then computes entropy.
|
|
40
|
+
* This is a convenience function when you have raw counts rather than
|
|
41
|
+
* normalised probabilities.
|
|
42
|
+
*
|
|
43
|
+
* @param counts - Array of frequency counts
|
|
44
|
+
* @returns Entropy in bits
|
|
45
|
+
*/
|
|
46
|
+
export declare function entropyFromCounts(counts: readonly number[]): number;
|
|
47
|
+
/**
|
|
48
|
+
* Compute local type entropy for a node's neighbours.
|
|
49
|
+
*
|
|
50
|
+
* This measures the diversity of types among a node's neighbours.
|
|
51
|
+
* High entropy = heterogeneous neighbourhood (diverse types).
|
|
52
|
+
* Low entropy = homogeneous neighbourhood (similar types).
|
|
53
|
+
*
|
|
54
|
+
* @param neighbourTypes - Array of type labels for neighbours
|
|
55
|
+
* @returns Normalised entropy in [0, 1]
|
|
56
|
+
*/
|
|
57
|
+
export declare function localTypeEntropy(neighbourTypes: readonly string[]): number;
|
|
58
|
+
//# sourceMappingURL=entropy.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"entropy.d.ts","sourceRoot":"","sources":["../../src/utils/entropy.ts"],"names":[],"mappings":"AAAA;;;;;;;GAOG;AAEH;;;;;;;;;;;GAWG;AACH,wBAAgB,cAAc,CAAC,aAAa,EAAE,SAAS,MAAM,EAAE,GAAG,MAAM,CAcvE;AAED;;;;;;;;;;;;GAYG;AACH,wBAAgB,iBAAiB,CAAC,aAAa,EAAE,SAAS,MAAM,EAAE,GAAG,MAAM,CAa1E;AAED;;;;;;;;;GASG;AACH,wBAAgB,iBAAiB,CAAC,MAAM,EAAE,SAAS,MAAM,EAAE,GAAG,MAAM,CAYnE;AAED;;;;;;;;;GASG;AACH,wBAAgB,gBAAgB,CAAC,cAAc,EAAE,SAAS,MAAM,EAAE,GAAG,MAAM,CAwB1E"}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"entropy.unit.test.d.ts","sourceRoot":"","sources":["../../src/utils/entropy.unit.test.ts"],"names":[],"mappings":""}
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
Object.defineProperty(exports, Symbol.toStringTag, { value: "Module" });
|
|
2
|
+
const require_kmeans = require("../kmeans-B0HEOU6k.cjs");
|
|
3
|
+
const require_utils = require("../utils-spZa1ZvS.cjs");
|
|
4
|
+
exports.approximateClusteringCoefficient = require_utils.approximateClusteringCoefficient;
|
|
5
|
+
exports.batchClusteringCoefficients = require_utils.batchClusteringCoefficients;
|
|
6
|
+
exports.entropyFromCounts = require_utils.entropyFromCounts;
|
|
7
|
+
exports.localClusteringCoefficient = require_utils.localClusteringCoefficient;
|
|
8
|
+
exports.localTypeEntropy = require_utils.localTypeEntropy;
|
|
9
|
+
exports.miniBatchKMeans = require_kmeans.miniBatchKMeans;
|
|
10
|
+
exports.normaliseFeatures = require_kmeans.normaliseFeatures;
|
|
11
|
+
exports.normalisedEntropy = require_utils.normalisedEntropy;
|
|
12
|
+
exports.shannonEntropy = require_utils.shannonEntropy;
|
|
13
|
+
exports.zScoreNormalise = require_kmeans.normaliseFeatures;
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Utility functions module.
|
|
3
|
+
*
|
|
4
|
+
* @packageDocumentation
|
|
5
|
+
*/
|
|
6
|
+
export { localClusteringCoefficient, approximateClusteringCoefficient, batchClusteringCoefficients, } from './clustering-coefficient';
|
|
7
|
+
export { miniBatchKMeans, normaliseFeatures, zScoreNormalise, type FeatureVector3D, type LabelledFeature, type KMeansResult, type KMeansOptions, } from './kmeans';
|
|
8
|
+
export { shannonEntropy, normalisedEntropy, entropyFromCounts, localTypeEntropy, } from './entropy';
|
|
9
|
+
//# sourceMappingURL=index.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/utils/index.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH,OAAO,EACN,0BAA0B,EAC1B,gCAAgC,EAChC,2BAA2B,GAC3B,MAAM,0BAA0B,CAAC;AAElC,OAAO,EACN,eAAe,EACf,iBAAiB,EACjB,eAAe,EACf,KAAK,eAAe,EACpB,KAAK,eAAe,EACpB,KAAK,YAAY,EACjB,KAAK,aAAa,GAClB,MAAM,UAAU,CAAC;AAElB,OAAO,EACN,cAAc,EACd,iBAAiB,EACjB,iBAAiB,EACjB,gBAAgB,GAChB,MAAM,WAAW,CAAC"}
|
|
@@ -0,0 +1,3 @@
|
|
|
1
|
+
import { n as normaliseFeatures, t as miniBatchKMeans } from "../kmeans-DgbsOznU.js";
|
|
2
|
+
import { a as approximateClusteringCoefficient, i as shannonEntropy, n as localTypeEntropy, o as batchClusteringCoefficients, r as normalisedEntropy, s as localClusteringCoefficient, t as entropyFromCounts } from "../utils-Q_akvlMn.js";
|
|
3
|
+
export { approximateClusteringCoefficient, batchClusteringCoefficients, entropyFromCounts, localClusteringCoefficient, localTypeEntropy, miniBatchKMeans, normaliseFeatures, normaliseFeatures as zScoreNormalise, normalisedEntropy, shannonEntropy };
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Minimal K-means clustering implementation for GRASP seed selection.
|
|
3
|
+
*
|
|
4
|
+
* This is a lightweight implementation specifically designed for 3D feature vectors
|
|
5
|
+
* used in structural seed selection. For general-purpose clustering, consider
|
|
6
|
+
* using a dedicated library.
|
|
7
|
+
*
|
|
8
|
+
* @packageDocumentation
|
|
9
|
+
*/
|
|
10
|
+
/**
|
|
11
|
+
* A 3D feature vector representing node structural properties.
|
|
12
|
+
*/
|
|
13
|
+
export interface FeatureVector3D {
|
|
14
|
+
/** First dimension (e.g., log-degree) */
|
|
15
|
+
readonly f1: number;
|
|
16
|
+
/** Second dimension (e.g., clustering coefficient) */
|
|
17
|
+
readonly f2: number;
|
|
18
|
+
/** Third dimension (e.g., approximate PageRank) */
|
|
19
|
+
readonly f3: number;
|
|
20
|
+
}
|
|
21
|
+
/**
|
|
22
|
+
* A labelled feature vector with associated node ID.
|
|
23
|
+
*/
|
|
24
|
+
export interface LabelledFeature extends FeatureVector3D {
|
|
25
|
+
/** Node identifier */
|
|
26
|
+
readonly nodeId: string;
|
|
27
|
+
}
|
|
28
|
+
/**
|
|
29
|
+
* Result of K-means clustering.
|
|
30
|
+
*/
|
|
31
|
+
export interface KMeansResult {
|
|
32
|
+
/** Cluster centroids */
|
|
33
|
+
readonly centroids: readonly FeatureVector3D[];
|
|
34
|
+
/** Cluster assignments: nodeId -> cluster index */
|
|
35
|
+
readonly assignments: ReadonlyMap<string, number>;
|
|
36
|
+
/** Number of clusters */
|
|
37
|
+
readonly k: number;
|
|
38
|
+
}
|
|
39
|
+
/**
|
|
40
|
+
* Options for K-means clustering.
|
|
41
|
+
*/
|
|
42
|
+
export interface KMeansOptions {
|
|
43
|
+
/** Number of clusters */
|
|
44
|
+
readonly k: number;
|
|
45
|
+
/** Maximum iterations (default: 100) */
|
|
46
|
+
readonly maxIterations?: number;
|
|
47
|
+
/** Convergence threshold (default: 1e-6) */
|
|
48
|
+
readonly tolerance?: number;
|
|
49
|
+
/** Random seed for reproducibility */
|
|
50
|
+
readonly seed?: number;
|
|
51
|
+
}
|
|
52
|
+
/**
|
|
53
|
+
* Compute the mean of a set of feature vectors.
|
|
54
|
+
* @internal - Used for testing
|
|
55
|
+
*/
|
|
56
|
+
export declare function _computeMean(vectors: readonly FeatureVector3D[]): FeatureVector3D;
|
|
57
|
+
/**
|
|
58
|
+
* Z-score normalise features (zero mean, unit variance).
|
|
59
|
+
*/
|
|
60
|
+
export { normaliseFeatures as zScoreNormalise };
|
|
61
|
+
export declare function normaliseFeatures(features: readonly LabelledFeature[]): LabelledFeature[];
|
|
62
|
+
/**
|
|
63
|
+
* Mini-batch K-means clustering for 3D feature vectors.
|
|
64
|
+
*
|
|
65
|
+
* Uses Mini-batch K-means for efficiency with large datasets.
|
|
66
|
+
* This is specifically designed for the GRASP seed selection algorithm.
|
|
67
|
+
*
|
|
68
|
+
* @param features - Array of labelled feature vectors
|
|
69
|
+
* @param options - Clustering options
|
|
70
|
+
* @returns Clustering result with centroids and assignments
|
|
71
|
+
*/
|
|
72
|
+
export declare function miniBatchKMeans(features: readonly LabelledFeature[], options: KMeansOptions): KMeansResult;
|
|
73
|
+
//# sourceMappingURL=kmeans.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"kmeans.d.ts","sourceRoot":"","sources":["../../src/utils/kmeans.ts"],"names":[],"mappings":"AAAA;;;;;;;;GAQG;AAEH;;GAEG;AACH,MAAM,WAAW,eAAe;IAC/B,yCAAyC;IACzC,QAAQ,CAAC,EAAE,EAAE,MAAM,CAAC;IACpB,sDAAsD;IACtD,QAAQ,CAAC,EAAE,EAAE,MAAM,CAAC;IACpB,mDAAmD;IACnD,QAAQ,CAAC,EAAE,EAAE,MAAM,CAAC;CACpB;AAED;;GAEG;AACH,MAAM,WAAW,eAAgB,SAAQ,eAAe;IACvD,sBAAsB;IACtB,QAAQ,CAAC,MAAM,EAAE,MAAM,CAAC;CACxB;AAED;;GAEG;AACH,MAAM,WAAW,YAAY;IAC5B,wBAAwB;IACxB,QAAQ,CAAC,SAAS,EAAE,SAAS,eAAe,EAAE,CAAC;IAC/C,mDAAmD;IACnD,QAAQ,CAAC,WAAW,EAAE,WAAW,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IAClD,yBAAyB;IACzB,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC;CACnB;AAED;;GAEG;AACH,MAAM,WAAW,aAAa;IAC7B,yBAAyB;IACzB,QAAQ,CAAC,CAAC,EAAE,MAAM,CAAC;IACnB,wCAAwC;IACxC,QAAQ,CAAC,aAAa,CAAC,EAAE,MAAM,CAAC;IAChC,4CAA4C;IAC5C,QAAQ,CAAC,SAAS,CAAC,EAAE,MAAM,CAAC;IAC5B,sCAAsC;IACtC,QAAQ,CAAC,IAAI,CAAC,EAAE,MAAM,CAAC;CACvB;AA4BD;;;GAGG;AACH,wBAAgB,YAAY,CAC3B,OAAO,EAAE,SAAS,eAAe,EAAE,GACjC,eAAe,CAcjB;AAED;;GAEG;AACH,OAAO,EAAE,iBAAiB,IAAI,eAAe,EAAE,CAAC;AAEhD,wBAAgB,iBAAiB,CAChC,QAAQ,EAAE,SAAS,eAAe,EAAE,GAClC,eAAe,EAAE,CAyCnB;AAED;;;;;;;;;GASG;AACH,wBAAgB,eAAe,CAC9B,QAAQ,EAAE,SAAS,eAAe,EAAE,EACpC,OAAO,EAAE,aAAa,GACpB,YAAY,CAiId"}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"kmeans.unit.test.d.ts","sourceRoot":"","sources":["../../src/utils/kmeans.unit.test.ts"],"names":[],"mappings":""}
|
|
@@ -0,0 +1,164 @@
|
|
|
1
|
+
//#region src/utils/clustering-coefficient.ts
|
|
2
|
+
/**
|
|
3
|
+
* Compute the local clustering coefficient for a single node.
|
|
4
|
+
*
|
|
5
|
+
* The clustering coefficient is defined as:
|
|
6
|
+
* CC(v) = (triangles through v) / (possible triangles)
|
|
7
|
+
* CC(v) = 2 * |{(u,w) : u,w in N(v), (u,w) in E}| / (deg(v) * (deg(v) - 1))
|
|
8
|
+
*
|
|
9
|
+
* For nodes with degree < 2, the clustering coefficient is 0.
|
|
10
|
+
*
|
|
11
|
+
* @param graph - The graph to compute on
|
|
12
|
+
* @param nodeId - The node to compute clustering coefficient for
|
|
13
|
+
* @returns The clustering coefficient in [0, 1], or 0 if undefined
|
|
14
|
+
*/
|
|
15
|
+
function localClusteringCoefficient(graph, nodeId) {
|
|
16
|
+
const neighbours = [...graph.neighbours(nodeId, "both")];
|
|
17
|
+
const degree = neighbours.length;
|
|
18
|
+
if (degree < 2) return 0;
|
|
19
|
+
let triangleCount = 0;
|
|
20
|
+
for (let i = 0; i < neighbours.length; i++) {
|
|
21
|
+
const u = neighbours[i];
|
|
22
|
+
if (u === void 0) continue;
|
|
23
|
+
for (let j = i + 1; j < neighbours.length; j++) {
|
|
24
|
+
const w = neighbours[j];
|
|
25
|
+
if (w === void 0) continue;
|
|
26
|
+
if (graph.getEdge(u, w) !== void 0 || graph.getEdge(w, u) !== void 0) triangleCount++;
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
const possibleTriangles = degree * (degree - 1) / 2;
|
|
30
|
+
return triangleCount / possibleTriangles;
|
|
31
|
+
}
|
|
32
|
+
/**
|
|
33
|
+
* Compute approximate local clustering coefficient using sampling.
|
|
34
|
+
*
|
|
35
|
+
* For nodes with many neighbours, this samples neighbour pairs rather than
|
|
36
|
+
* checking all pairs. Useful for large graphs where exact computation is expensive.
|
|
37
|
+
*
|
|
38
|
+
* @param graph - The graph to compute on
|
|
39
|
+
* @param nodeId - The node to compute clustering coefficient for
|
|
40
|
+
* @param sampleSize - Maximum number of neighbour pairs to check (default: 100)
|
|
41
|
+
* @returns The approximate clustering coefficient in [0, 1]
|
|
42
|
+
*/
|
|
43
|
+
function approximateClusteringCoefficient(graph, nodeId, sampleSize = 100) {
|
|
44
|
+
const neighbours = [...graph.neighbours(nodeId, "both")];
|
|
45
|
+
const degree = neighbours.length;
|
|
46
|
+
if (degree < 2) return 0;
|
|
47
|
+
const possibleTriangles = degree * (degree - 1) / 2;
|
|
48
|
+
if (possibleTriangles <= sampleSize) return localClusteringCoefficient(graph, nodeId);
|
|
49
|
+
let triangleCount = 0;
|
|
50
|
+
let sampled = 0;
|
|
51
|
+
for (let i = 0; i < neighbours.length && sampled < sampleSize; i++) {
|
|
52
|
+
const u = neighbours[i];
|
|
53
|
+
if (u === void 0) continue;
|
|
54
|
+
for (let j = i + 1; j < neighbours.length && sampled < sampleSize; j++) {
|
|
55
|
+
const w = neighbours[j];
|
|
56
|
+
if (w === void 0) continue;
|
|
57
|
+
sampled++;
|
|
58
|
+
if (graph.getEdge(u, w) !== void 0 || graph.getEdge(w, u) !== void 0) triangleCount++;
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
return triangleCount / sampled * (possibleTriangles / possibleTriangles);
|
|
62
|
+
}
|
|
63
|
+
/**
|
|
64
|
+
* Compute clustering coefficients for multiple nodes efficiently.
|
|
65
|
+
*
|
|
66
|
+
* Reuses neighbour sets to avoid repeated iteration.
|
|
67
|
+
*
|
|
68
|
+
* @param graph - The graph to compute on
|
|
69
|
+
* @param nodeIds - The nodes to compute clustering coefficients for
|
|
70
|
+
* @returns Map from nodeId to clustering coefficient
|
|
71
|
+
*/
|
|
72
|
+
function batchClusteringCoefficients(graph, nodeIds) {
|
|
73
|
+
const results = /* @__PURE__ */ new Map();
|
|
74
|
+
for (const nodeId of nodeIds) results.set(nodeId, localClusteringCoefficient(graph, nodeId));
|
|
75
|
+
return results;
|
|
76
|
+
}
|
|
77
|
+
//#endregion
|
|
78
|
+
//#region src/utils/entropy.ts
|
|
79
|
+
/**
|
|
80
|
+
* Entropy computation utilities for graph analysis.
|
|
81
|
+
*
|
|
82
|
+
* Shannon entropy measures uncertainty or randomness in a distribution.
|
|
83
|
+
* Used in EDGE and HAE algorithms for heterogeneity-aware expansion.
|
|
84
|
+
*
|
|
85
|
+
* @packageDocumentation
|
|
86
|
+
*/
|
|
87
|
+
/**
|
|
88
|
+
* Compute Shannon entropy of a probability distribution.
|
|
89
|
+
*
|
|
90
|
+
* Shannon entropy is defined as:
|
|
91
|
+
* H(X) = -Σ p(x) × log₂(p(x))
|
|
92
|
+
*
|
|
93
|
+
* A uniform distribution has maximum entropy.
|
|
94
|
+
* A deterministic distribution (all probability on one value) has zero entropy.
|
|
95
|
+
*
|
|
96
|
+
* @param probabilities - Array of probabilities (should sum to 1)
|
|
97
|
+
* @returns Entropy in bits (log base 2), or 0 if probabilities are invalid
|
|
98
|
+
*/
|
|
99
|
+
function shannonEntropy(probabilities) {
|
|
100
|
+
if (probabilities.length === 0) return 0;
|
|
101
|
+
let entropy = 0;
|
|
102
|
+
for (const p of probabilities) if (p > 0) entropy -= p * Math.log2(p);
|
|
103
|
+
return entropy;
|
|
104
|
+
}
|
|
105
|
+
/**
|
|
106
|
+
* Compute normalised entropy (entropy divided by maximum possible entropy).
|
|
107
|
+
*
|
|
108
|
+
* Normalised entropy is in [0, 1], where:
|
|
109
|
+
* - 0 means the distribution is deterministic (all mass on one value)
|
|
110
|
+
* - 1 means the distribution is uniform (maximum uncertainty)
|
|
111
|
+
*
|
|
112
|
+
* This is useful for comparing entropy across distributions with different
|
|
113
|
+
* numbers of possible values.
|
|
114
|
+
*
|
|
115
|
+
* @param probabilities - Array of probabilities (should sum to 1)
|
|
116
|
+
* @returns Normalised entropy in [0, 1], or 0 if only one category
|
|
117
|
+
*/
|
|
118
|
+
function normalisedEntropy(probabilities) {
|
|
119
|
+
if (probabilities.length <= 1) return 0;
|
|
120
|
+
const H = shannonEntropy(probabilities);
|
|
121
|
+
const Hmax = Math.log2(probabilities.length);
|
|
122
|
+
if (Hmax === 0) return 0;
|
|
123
|
+
return H / Hmax;
|
|
124
|
+
}
|
|
125
|
+
/**
|
|
126
|
+
* Compute entropy from a frequency count.
|
|
127
|
+
*
|
|
128
|
+
* Converts counts to probabilities and then computes entropy.
|
|
129
|
+
* This is a convenience function when you have raw counts rather than
|
|
130
|
+
* normalised probabilities.
|
|
131
|
+
*
|
|
132
|
+
* @param counts - Array of frequency counts
|
|
133
|
+
* @returns Entropy in bits
|
|
134
|
+
*/
|
|
135
|
+
function entropyFromCounts(counts) {
|
|
136
|
+
if (counts.length === 0) return 0;
|
|
137
|
+
const total = counts.reduce((sum, c) => sum + c, 0);
|
|
138
|
+
if (total === 0) return 0;
|
|
139
|
+
return shannonEntropy(counts.map((c) => c / total));
|
|
140
|
+
}
|
|
141
|
+
/**
|
|
142
|
+
* Compute local type entropy for a node's neighbours.
|
|
143
|
+
*
|
|
144
|
+
* This measures the diversity of types among a node's neighbours.
|
|
145
|
+
* High entropy = heterogeneous neighbourhood (diverse types).
|
|
146
|
+
* Low entropy = homogeneous neighbourhood (similar types).
|
|
147
|
+
*
|
|
148
|
+
* @param neighbourTypes - Array of type labels for neighbours
|
|
149
|
+
* @returns Normalised entropy in [0, 1]
|
|
150
|
+
*/
|
|
151
|
+
function localTypeEntropy(neighbourTypes) {
|
|
152
|
+
if (neighbourTypes.length <= 1) return 0;
|
|
153
|
+
const typeCounts = /* @__PURE__ */ new Map();
|
|
154
|
+
for (const t of neighbourTypes) typeCounts.set(t, (typeCounts.get(t) ?? 0) + 1);
|
|
155
|
+
if (typeCounts.size === 1) return 0;
|
|
156
|
+
const probabilities = [];
|
|
157
|
+
const total = neighbourTypes.length;
|
|
158
|
+
for (const count of typeCounts.values()) probabilities.push(count / total);
|
|
159
|
+
return normalisedEntropy(probabilities);
|
|
160
|
+
}
|
|
161
|
+
//#endregion
|
|
162
|
+
export { approximateClusteringCoefficient as a, shannonEntropy as i, localTypeEntropy as n, batchClusteringCoefficients as o, normalisedEntropy as r, localClusteringCoefficient as s, entropyFromCounts as t };
|
|
163
|
+
|
|
164
|
+
//# sourceMappingURL=utils-Q_akvlMn.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"utils-Q_akvlMn.js","names":[],"sources":["../src/utils/clustering-coefficient.ts","../src/utils/entropy.ts"],"sourcesContent":["/**\n * Clustering coefficient computation for graph nodes.\n *\n * The local clustering coefficient measures how close a node's neighbours\n * are to being a complete graph (clique). It is used in SPAN MI variant\n * and GRASP seed selection.\n *\n * @packageDocumentation\n */\n\nimport type { ReadableGraph, NodeId } from \"../graph\";\n\n/**\n * Compute the local clustering coefficient for a single node.\n *\n * The clustering coefficient is defined as:\n * CC(v) = (triangles through v) / (possible triangles)\n * CC(v) = 2 * |{(u,w) : u,w in N(v), (u,w) in E}| / (deg(v) * (deg(v) - 1))\n *\n * For nodes with degree < 2, the clustering coefficient is 0.\n *\n * @param graph - The graph to compute on\n * @param nodeId - The node to compute clustering coefficient for\n * @returns The clustering coefficient in [0, 1], or 0 if undefined\n */\nexport function localClusteringCoefficient(\n\tgraph: ReadableGraph,\n\tnodeId: NodeId,\n): number {\n\tconst neighbours = [...graph.neighbours(nodeId, \"both\")];\n\tconst degree = neighbours.length;\n\n\t// Nodes with degree < 2 have no possible triangles\n\tif (degree < 2) {\n\t\treturn 0;\n\t}\n\n\t// Count actual triangles: pairs of neighbours that are connected\n\tlet triangleCount = 0;\n\n\tfor (let i = 0; i < neighbours.length; i++) {\n\t\tconst u = neighbours[i];\n\t\tif (u === undefined) continue;\n\n\t\tfor (let j = i + 1; j < neighbours.length; j++) {\n\t\t\tconst w = neighbours[j];\n\t\t\tif (w === undefined) continue;\n\n\t\t\t// Check if u and w are connected\n\t\t\tif (\n\t\t\t\tgraph.getEdge(u, w) !== undefined ||\n\t\t\t\tgraph.getEdge(w, u) !== undefined\n\t\t\t) {\n\t\t\t\ttriangleCount++;\n\t\t\t}\n\t\t}\n\t}\n\n\t// Possible triangles: deg * (deg - 1) / 2 pairs\n\t// We multiply by 2 because each triangle is counted once\n\tconst possibleTriangles = (degree * (degree - 1)) / 2;\n\n\treturn triangleCount / possibleTriangles;\n}\n\n/**\n * Compute approximate local clustering coefficient using sampling.\n *\n * For nodes with many neighbours, this samples neighbour pairs rather than\n * checking all pairs. Useful for large graphs where exact computation is expensive.\n *\n * @param graph - The graph to compute on\n * @param nodeId - The node to compute clustering coefficient for\n * @param sampleSize - Maximum number of neighbour pairs to check (default: 100)\n * @returns The approximate clustering coefficient in [0, 1]\n */\nexport function approximateClusteringCoefficient(\n\tgraph: ReadableGraph,\n\tnodeId: NodeId,\n\tsampleSize = 100,\n): number {\n\tconst neighbours = [...graph.neighbours(nodeId, \"both\")];\n\tconst degree = neighbours.length;\n\n\tif (degree < 2) {\n\t\treturn 0;\n\t}\n\n\tconst possibleTriangles = (degree * (degree - 1)) / 2;\n\n\t// If all pairs can be checked within sample limit, use exact computation\n\tif (possibleTriangles <= sampleSize) {\n\t\treturn localClusteringCoefficient(graph, nodeId);\n\t}\n\n\t// Sample pairs uniformly\n\tlet triangleCount = 0;\n\tlet sampled = 0;\n\n\t// Use reservoir sampling style approach for pair selection\n\tfor (let i = 0; i < neighbours.length && sampled < sampleSize; i++) {\n\t\tconst u = neighbours[i];\n\t\tif (u === undefined) continue;\n\n\t\tfor (let j = i + 1; j < neighbours.length && sampled < sampleSize; j++) {\n\t\t\tconst w = neighbours[j];\n\t\t\tif (w === undefined) continue;\n\n\t\t\t// Decide whether to include this pair based on remaining budget\n\t\t\tsampled++;\n\n\t\t\t// Check if u and w are connected\n\t\t\tif (\n\t\t\t\tgraph.getEdge(u, w) !== undefined ||\n\t\t\t\tgraph.getEdge(w, u) !== undefined\n\t\t\t) {\n\t\t\t\ttriangleCount++;\n\t\t\t}\n\t\t}\n\t}\n\n\t// Extrapolate from sample\n\treturn (triangleCount / sampled) * (possibleTriangles / possibleTriangles);\n}\n\n/**\n * Compute clustering coefficients for multiple nodes efficiently.\n *\n * Reuses neighbour sets to avoid repeated iteration.\n *\n * @param graph - The graph to compute on\n * @param nodeIds - The nodes to compute clustering coefficients for\n * @returns Map from nodeId to clustering coefficient\n */\nexport function batchClusteringCoefficients(\n\tgraph: ReadableGraph,\n\tnodeIds: readonly NodeId[],\n): Map<NodeId, number> {\n\tconst results = new Map<NodeId, number>();\n\n\tfor (const nodeId of nodeIds) {\n\t\tresults.set(nodeId, localClusteringCoefficient(graph, nodeId));\n\t}\n\n\treturn results;\n}\n","/**\n * Entropy computation utilities for graph analysis.\n *\n * Shannon entropy measures uncertainty or randomness in a distribution.\n * Used in EDGE and HAE algorithms for heterogeneity-aware expansion.\n *\n * @packageDocumentation\n */\n\n/**\n * Compute Shannon entropy of a probability distribution.\n *\n * Shannon entropy is defined as:\n * H(X) = -Σ p(x) × log₂(p(x))\n *\n * A uniform distribution has maximum entropy.\n * A deterministic distribution (all probability on one value) has zero entropy.\n *\n * @param probabilities - Array of probabilities (should sum to 1)\n * @returns Entropy in bits (log base 2), or 0 if probabilities are invalid\n */\nexport function shannonEntropy(probabilities: readonly number[]): number {\n\tif (probabilities.length === 0) {\n\t\treturn 0;\n\t}\n\n\tlet entropy = 0;\n\tfor (const p of probabilities) {\n\t\t// Skip zero probabilities (log(0) is undefined, but 0 * log(0) = 0)\n\t\tif (p > 0) {\n\t\t\tentropy -= p * Math.log2(p);\n\t\t}\n\t}\n\n\treturn entropy;\n}\n\n/**\n * Compute normalised entropy (entropy divided by maximum possible entropy).\n *\n * Normalised entropy is in [0, 1], where:\n * - 0 means the distribution is deterministic (all mass on one value)\n * - 1 means the distribution is uniform (maximum uncertainty)\n *\n * This is useful for comparing entropy across distributions with different\n * numbers of possible values.\n *\n * @param probabilities - Array of probabilities (should sum to 1)\n * @returns Normalised entropy in [0, 1], or 0 if only one category\n */\nexport function normalisedEntropy(probabilities: readonly number[]): number {\n\tif (probabilities.length <= 1) {\n\t\treturn 0;\n\t}\n\n\tconst H = shannonEntropy(probabilities);\n\tconst Hmax = Math.log2(probabilities.length);\n\n\tif (Hmax === 0) {\n\t\treturn 0;\n\t}\n\n\treturn H / Hmax;\n}\n\n/**\n * Compute entropy from a frequency count.\n *\n * Converts counts to probabilities and then computes entropy.\n * This is a convenience function when you have raw counts rather than\n * normalised probabilities.\n *\n * @param counts - Array of frequency counts\n * @returns Entropy in bits\n */\nexport function entropyFromCounts(counts: readonly number[]): number {\n\tif (counts.length === 0) {\n\t\treturn 0;\n\t}\n\n\tconst total = counts.reduce((sum, c) => sum + c, 0);\n\tif (total === 0) {\n\t\treturn 0;\n\t}\n\n\tconst probabilities = counts.map((c) => c / total);\n\treturn shannonEntropy(probabilities);\n}\n\n/**\n * Compute local type entropy for a node's neighbours.\n *\n * This measures the diversity of types among a node's neighbours.\n * High entropy = heterogeneous neighbourhood (diverse types).\n * Low entropy = homogeneous neighbourhood (similar types).\n *\n * @param neighbourTypes - Array of type labels for neighbours\n * @returns Normalised entropy in [0, 1]\n */\nexport function localTypeEntropy(neighbourTypes: readonly string[]): number {\n\tif (neighbourTypes.length <= 1) {\n\t\treturn 0;\n\t}\n\n\t// Count occurrences of each type\n\tconst typeCounts = new Map<string, number>();\n\tfor (const t of neighbourTypes) {\n\t\ttypeCounts.set(t, (typeCounts.get(t) ?? 0) + 1);\n\t}\n\n\t// If all neighbours are the same type, entropy is 0\n\tif (typeCounts.size === 1) {\n\t\treturn 0;\n\t}\n\n\t// Convert to probability array\n\tconst probabilities: number[] = [];\n\tconst total = neighbourTypes.length;\n\tfor (const count of typeCounts.values()) {\n\t\tprobabilities.push(count / total);\n\t}\n\n\treturn normalisedEntropy(probabilities);\n}\n"],"mappings":";;;;;;;;;;;;;;AAyBA,SAAgB,2BACf,OACA,QACS;CACT,MAAM,aAAa,CAAC,GAAG,MAAM,WAAW,QAAQ,OAAO,CAAC;CACxD,MAAM,SAAS,WAAW;AAG1B,KAAI,SAAS,EACZ,QAAO;CAIR,IAAI,gBAAgB;AAEpB,MAAK,IAAI,IAAI,GAAG,IAAI,WAAW,QAAQ,KAAK;EAC3C,MAAM,IAAI,WAAW;AACrB,MAAI,MAAM,KAAA,EAAW;AAErB,OAAK,IAAI,IAAI,IAAI,GAAG,IAAI,WAAW,QAAQ,KAAK;GAC/C,MAAM,IAAI,WAAW;AACrB,OAAI,MAAM,KAAA,EAAW;AAGrB,OACC,MAAM,QAAQ,GAAG,EAAE,KAAK,KAAA,KACxB,MAAM,QAAQ,GAAG,EAAE,KAAK,KAAA,EAExB;;;CAOH,MAAM,oBAAqB,UAAU,SAAS,KAAM;AAEpD,QAAO,gBAAgB;;;;;;;;;;;;;AAcxB,SAAgB,iCACf,OACA,QACA,aAAa,KACJ;CACT,MAAM,aAAa,CAAC,GAAG,MAAM,WAAW,QAAQ,OAAO,CAAC;CACxD,MAAM,SAAS,WAAW;AAE1B,KAAI,SAAS,EACZ,QAAO;CAGR,MAAM,oBAAqB,UAAU,SAAS,KAAM;AAGpD,KAAI,qBAAqB,WACxB,QAAO,2BAA2B,OAAO,OAAO;CAIjD,IAAI,gBAAgB;CACpB,IAAI,UAAU;AAGd,MAAK,IAAI,IAAI,GAAG,IAAI,WAAW,UAAU,UAAU,YAAY,KAAK;EACnE,MAAM,IAAI,WAAW;AACrB,MAAI,MAAM,KAAA,EAAW;AAErB,OAAK,IAAI,IAAI,IAAI,GAAG,IAAI,WAAW,UAAU,UAAU,YAAY,KAAK;GACvE,MAAM,IAAI,WAAW;AACrB,OAAI,MAAM,KAAA,EAAW;AAGrB;AAGA,OACC,MAAM,QAAQ,GAAG,EAAE,KAAK,KAAA,KACxB,MAAM,QAAQ,GAAG,EAAE,KAAK,KAAA,EAExB;;;AAMH,QAAQ,gBAAgB,WAAY,oBAAoB;;;;;;;;;;;AAYzD,SAAgB,4BACf,OACA,SACsB;CACtB,MAAM,0BAAU,IAAI,KAAqB;AAEzC,MAAK,MAAM,UAAU,QACpB,SAAQ,IAAI,QAAQ,2BAA2B,OAAO,OAAO,CAAC;AAG/D,QAAO;;;;;;;;;;;;;;;;;;;;;;;;AC3HR,SAAgB,eAAe,eAA0C;AACxE,KAAI,cAAc,WAAW,EAC5B,QAAO;CAGR,IAAI,UAAU;AACd,MAAK,MAAM,KAAK,cAEf,KAAI,IAAI,EACP,YAAW,IAAI,KAAK,KAAK,EAAE;AAI7B,QAAO;;;;;;;;;;;;;;;AAgBR,SAAgB,kBAAkB,eAA0C;AAC3E,KAAI,cAAc,UAAU,EAC3B,QAAO;CAGR,MAAM,IAAI,eAAe,cAAc;CACvC,MAAM,OAAO,KAAK,KAAK,cAAc,OAAO;AAE5C,KAAI,SAAS,EACZ,QAAO;AAGR,QAAO,IAAI;;;;;;;;;;;;AAaZ,SAAgB,kBAAkB,QAAmC;AACpE,KAAI,OAAO,WAAW,EACrB,QAAO;CAGR,MAAM,QAAQ,OAAO,QAAQ,KAAK,MAAM,MAAM,GAAG,EAAE;AACnD,KAAI,UAAU,EACb,QAAO;AAIR,QAAO,eADe,OAAO,KAAK,MAAM,IAAI,MAAM,CACd;;;;;;;;;;;;AAarC,SAAgB,iBAAiB,gBAA2C;AAC3E,KAAI,eAAe,UAAU,EAC5B,QAAO;CAIR,MAAM,6BAAa,IAAI,KAAqB;AAC5C,MAAK,MAAM,KAAK,eACf,YAAW,IAAI,IAAI,WAAW,IAAI,EAAE,IAAI,KAAK,EAAE;AAIhD,KAAI,WAAW,SAAS,EACvB,QAAO;CAIR,MAAM,gBAA0B,EAAE;CAClC,MAAM,QAAQ,eAAe;AAC7B,MAAK,MAAM,SAAS,WAAW,QAAQ,CACtC,eAAc,KAAK,QAAQ,MAAM;AAGlC,QAAO,kBAAkB,cAAc"}
|