vue-hook-optimizer 0.0.82 → 0.0.84
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +177 -22
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +35 -7
- package/dist/index.d.ts +35 -7
- package/dist/index.js +177 -22
- package/dist/index.js.map +1 -1
- package/package.json +1 -1
package/dist/index.cjs
CHANGED
|
@@ -1691,17 +1691,164 @@ function getMermaidText(graph, nodesUsedInTemplate, nodesUsedInStyle = /* @__PUR
|
|
|
1691
1691
|
|
|
1692
1692
|
//#endregion
|
|
1693
1693
|
//#region src/suggest/community.ts
|
|
1694
|
-
|
|
1695
|
-
|
|
1694
|
+
const COMMON_PREFIXES = [
|
|
1695
|
+
"handle",
|
|
1696
|
+
"on",
|
|
1697
|
+
"is",
|
|
1698
|
+
"has",
|
|
1699
|
+
"can",
|
|
1700
|
+
"should",
|
|
1701
|
+
"get",
|
|
1702
|
+
"set",
|
|
1703
|
+
"update",
|
|
1704
|
+
"toggle",
|
|
1705
|
+
"reset",
|
|
1706
|
+
"clear",
|
|
1707
|
+
"init",
|
|
1708
|
+
"fetch",
|
|
1709
|
+
"load",
|
|
1710
|
+
"save",
|
|
1711
|
+
"delete",
|
|
1712
|
+
"remove",
|
|
1713
|
+
"add",
|
|
1714
|
+
"create",
|
|
1715
|
+
"show",
|
|
1716
|
+
"hide",
|
|
1717
|
+
"open",
|
|
1718
|
+
"close",
|
|
1719
|
+
"enable",
|
|
1720
|
+
"disable",
|
|
1721
|
+
"validate",
|
|
1722
|
+
"check",
|
|
1723
|
+
"use"
|
|
1724
|
+
];
|
|
1725
|
+
const COMMON_SUFFIXES = [
|
|
1726
|
+
"change",
|
|
1727
|
+
"changed",
|
|
1728
|
+
"handler",
|
|
1729
|
+
"callback",
|
|
1730
|
+
"listener",
|
|
1731
|
+
"state",
|
|
1732
|
+
"value",
|
|
1733
|
+
"data",
|
|
1734
|
+
"list",
|
|
1735
|
+
"items",
|
|
1736
|
+
"count",
|
|
1737
|
+
"index",
|
|
1738
|
+
"id",
|
|
1739
|
+
"name",
|
|
1740
|
+
"type",
|
|
1741
|
+
"status",
|
|
1742
|
+
"error",
|
|
1743
|
+
"loading",
|
|
1744
|
+
"visible",
|
|
1745
|
+
"disabled",
|
|
1746
|
+
"enabled",
|
|
1747
|
+
"active",
|
|
1748
|
+
"selected",
|
|
1749
|
+
"checked",
|
|
1750
|
+
"open",
|
|
1751
|
+
"closed"
|
|
1752
|
+
];
|
|
1753
|
+
/**
|
|
1754
|
+
* Extract the base/root word from an identifier by removing common prefixes/suffixes.
|
|
1755
|
+
* Only removes prefixes at the start and suffixes at the end.
|
|
1756
|
+
* e.g., "handleOpenChange" -> ["open"]
|
|
1757
|
+
* "isVisible" -> ["visible"]
|
|
1758
|
+
* "userName" -> ["user", "name"]
|
|
1759
|
+
*/
|
|
1760
|
+
function extractBaseWords(identifier) {
|
|
1761
|
+
const tokens = identifier.replace(/([a-z])([A-Z])/g, "$1_$2").replace(/([A-Z]+)([A-Z][a-z])/g, "$1_$2").toLowerCase().split(/[_\-\s]+/).filter(Boolean);
|
|
1762
|
+
if (tokens.length === 0) return [];
|
|
1763
|
+
if (tokens.length === 1) return tokens;
|
|
1764
|
+
let start = 0;
|
|
1765
|
+
if (COMMON_PREFIXES.includes(tokens[0])) start = 1;
|
|
1766
|
+
let end = tokens.length;
|
|
1767
|
+
if (COMMON_SUFFIXES.includes(tokens[tokens.length - 1])) end = tokens.length - 1;
|
|
1768
|
+
const words = tokens.slice(start, end);
|
|
1769
|
+
if (words.length === 0) return tokens.slice(start > 0 ? start : 0);
|
|
1770
|
+
return words;
|
|
1771
|
+
}
|
|
1772
|
+
/**
|
|
1773
|
+
* Calculate semantic similarity using cached base words.
|
|
1774
|
+
* Returns a value between 0 and 1.
|
|
1775
|
+
*/
|
|
1776
|
+
function calculateSemanticSimilarityCached(labelA, labelB, wordsA, wordsB) {
|
|
1777
|
+
if (labelA === labelB) return 1;
|
|
1778
|
+
const lowerA = labelA.toLowerCase();
|
|
1779
|
+
const lowerB = labelB.toLowerCase();
|
|
1780
|
+
if (lowerA.includes(lowerB) || lowerB.includes(lowerA)) {
|
|
1781
|
+
const shorter = lowerA.length < lowerB.length ? lowerA : lowerB;
|
|
1782
|
+
const longer = lowerA.length < lowerB.length ? lowerB : lowerA;
|
|
1783
|
+
return shorter.length / longer.length;
|
|
1784
|
+
}
|
|
1785
|
+
if (wordsA.length === 0 || wordsB.length === 0) return 0;
|
|
1786
|
+
const setA = new Set(wordsA);
|
|
1787
|
+
const setB = new Set(wordsB);
|
|
1788
|
+
let sharedCount = 0;
|
|
1789
|
+
for (const word of setA) if (setB.has(word)) sharedCount++;
|
|
1790
|
+
if (sharedCount === 0) return 0;
|
|
1791
|
+
return sharedCount / (setA.size + setB.size - sharedCount);
|
|
1792
|
+
}
|
|
1793
|
+
/**
|
|
1794
|
+
* Build a weighted graph that combines structural connections with semantic similarity.
|
|
1795
|
+
*
|
|
1796
|
+
* Optimized algorithm:
|
|
1797
|
+
* 1. Cache extractBaseWords results to avoid repeated computation
|
|
1798
|
+
* 2. Build word-to-nodes bucket map, only compare nodes within same bucket
|
|
1799
|
+
* This reduces O(N²) to O(B × K²) where B = number of buckets, K = avg nodes per bucket
|
|
1800
|
+
*/
|
|
1801
|
+
function buildWeightedGraph(graph, options = {}) {
|
|
1802
|
+
const { semanticWeight = 1, similarityThreshold = .3 } = options;
|
|
1803
|
+
const weighted = /* @__PURE__ */ new Map();
|
|
1804
|
+
const allNodes = /* @__PURE__ */ new Set();
|
|
1696
1805
|
for (const [node, edges] of graph) {
|
|
1697
|
-
|
|
1698
|
-
for (const edge of edges)
|
|
1699
|
-
|
|
1700
|
-
|
|
1701
|
-
|
|
1806
|
+
allNodes.add(node);
|
|
1807
|
+
for (const edge of edges) allNodes.add(edge.node);
|
|
1808
|
+
}
|
|
1809
|
+
for (const node of allNodes) weighted.set(node, /* @__PURE__ */ new Map());
|
|
1810
|
+
const structuralWeight = .85;
|
|
1811
|
+
for (const [node, edges] of graph) for (const edge of edges) {
|
|
1812
|
+
const currentWeight = weighted.get(node).get(edge.node) || 0;
|
|
1813
|
+
weighted.get(node).set(edge.node, Math.max(currentWeight, structuralWeight));
|
|
1814
|
+
const reverseWeight = weighted.get(edge.node).get(node) || 0;
|
|
1815
|
+
weighted.get(edge.node).set(node, Math.max(reverseWeight, structuralWeight));
|
|
1816
|
+
}
|
|
1817
|
+
if (semanticWeight > 0) {
|
|
1818
|
+
const nodeWordsCache = /* @__PURE__ */ new Map();
|
|
1819
|
+
const wordToBucket = /* @__PURE__ */ new Map();
|
|
1820
|
+
for (const node of allNodes) {
|
|
1821
|
+
const words = extractBaseWords(node.label);
|
|
1822
|
+
nodeWordsCache.set(node, words);
|
|
1823
|
+
for (const word of words) {
|
|
1824
|
+
if (!wordToBucket.has(word)) wordToBucket.set(word, /* @__PURE__ */ new Set());
|
|
1825
|
+
wordToBucket.get(word).add(node);
|
|
1826
|
+
}
|
|
1827
|
+
}
|
|
1828
|
+
const comparedPairs = /* @__PURE__ */ new Set();
|
|
1829
|
+
for (const [_, bucket] of wordToBucket) {
|
|
1830
|
+
if (bucket.size < 2) continue;
|
|
1831
|
+
const bucketNodes = Array.from(bucket);
|
|
1832
|
+
for (let i = 0; i < bucketNodes.length; i++) for (let j = i + 1; j < bucketNodes.length; j++) {
|
|
1833
|
+
const nodeA = bucketNodes[i];
|
|
1834
|
+
const nodeB = bucketNodes[j];
|
|
1835
|
+
const comparedKey = [nodeA.label, nodeB.label].sort().join("|");
|
|
1836
|
+
if (comparedPairs.has(comparedKey)) continue;
|
|
1837
|
+
comparedPairs.add(comparedKey);
|
|
1838
|
+
const wordsA = nodeWordsCache.get(nodeA);
|
|
1839
|
+
const wordsB = nodeWordsCache.get(nodeB);
|
|
1840
|
+
const similarity = calculateSemanticSimilarityCached(nodeA.label, nodeB.label, wordsA, wordsB);
|
|
1841
|
+
if (similarity > similarityThreshold) {
|
|
1842
|
+
const semanticEdgeWeight = similarity * semanticWeight;
|
|
1843
|
+
const currentAB = weighted.get(nodeA).get(nodeB);
|
|
1844
|
+
if (typeof currentAB === "number") weighted.get(nodeA).set(nodeB, Math.min(Math.max(currentAB, semanticEdgeWeight), 2));
|
|
1845
|
+
const currentBA = weighted.get(nodeB).get(nodeA);
|
|
1846
|
+
if (typeof currentBA === "number") weighted.get(nodeB).set(nodeA, Math.min(Math.max(currentBA, semanticEdgeWeight), 2));
|
|
1847
|
+
}
|
|
1848
|
+
}
|
|
1702
1849
|
}
|
|
1703
1850
|
}
|
|
1704
|
-
return
|
|
1851
|
+
return weighted;
|
|
1705
1852
|
}
|
|
1706
1853
|
function createSeededRandom(seed) {
|
|
1707
1854
|
if (seed === void 0) return Math.random;
|
|
@@ -1720,20 +1867,28 @@ function shuffleArray(array, random = Math.random) {
|
|
|
1720
1867
|
return result;
|
|
1721
1868
|
}
|
|
1722
1869
|
/**
|
|
1723
|
-
* Label Propagation Algorithm for community detection.
|
|
1870
|
+
* Label Propagation Algorithm for community detection with semantic awareness.
|
|
1724
1871
|
*
|
|
1725
1872
|
* Each node starts with its own unique label. In each iteration,
|
|
1726
|
-
* nodes adopt the most frequent label among their neighbors
|
|
1727
|
-
*
|
|
1873
|
+
* nodes adopt the most frequent label among their neighbors,
|
|
1874
|
+
* weighted by both structural connections and semantic similarity.
|
|
1875
|
+
*
|
|
1876
|
+
* Semantic similarity considers:
|
|
1877
|
+
* - Shared base words (e.g., "open" in "isOpen" and "handleOpenChange")
|
|
1878
|
+
* - Substring relationships
|
|
1879
|
+
* - Common naming patterns (handler/state pairs)
|
|
1728
1880
|
*
|
|
1729
1881
|
* This helps identify groups of nodes that are tightly connected
|
|
1730
1882
|
* and could potentially be extracted into separate hooks.
|
|
1731
1883
|
*/
|
|
1732
1884
|
function detectCommunities(graph, options = {}) {
|
|
1733
|
-
const { maxIterations = 100 } = options;
|
|
1885
|
+
const { maxIterations = 100, semanticWeight = 1, similarityThreshold = .3 } = options;
|
|
1734
1886
|
const random = createSeededRandom(options.seed);
|
|
1735
|
-
const
|
|
1736
|
-
|
|
1887
|
+
const weightedGraph = buildWeightedGraph(graph, {
|
|
1888
|
+
semanticWeight,
|
|
1889
|
+
similarityThreshold
|
|
1890
|
+
});
|
|
1891
|
+
const nodes = Array.from(weightedGraph.keys());
|
|
1737
1892
|
if (nodes.length === 0) return {
|
|
1738
1893
|
communities: [],
|
|
1739
1894
|
nodeToCommuntiy: /* @__PURE__ */ new Map()
|
|
@@ -1749,19 +1904,19 @@ function detectCommunities(graph, options = {}) {
|
|
|
1749
1904
|
iterations++;
|
|
1750
1905
|
const shuffledNodes = shuffleArray(nodes, random);
|
|
1751
1906
|
for (const node of shuffledNodes) {
|
|
1752
|
-
const neighbors =
|
|
1907
|
+
const neighbors = weightedGraph.get(node);
|
|
1753
1908
|
if (!neighbors || neighbors.size === 0) continue;
|
|
1754
|
-
const
|
|
1755
|
-
for (const neighbor of neighbors) {
|
|
1909
|
+
const labelWeights = /* @__PURE__ */ new Map();
|
|
1910
|
+
for (const [neighbor, weight] of neighbors) {
|
|
1756
1911
|
const neighborLabel = labels.get(neighbor);
|
|
1757
|
-
|
|
1912
|
+
labelWeights.set(neighborLabel, (labelWeights.get(neighborLabel) || 0) + weight);
|
|
1758
1913
|
}
|
|
1759
|
-
let
|
|
1914
|
+
let maxWeight = 0;
|
|
1760
1915
|
let maxLabels = [];
|
|
1761
|
-
for (const [label,
|
|
1762
|
-
|
|
1916
|
+
for (const [label, weight] of labelWeights) if (weight > maxWeight) {
|
|
1917
|
+
maxWeight = weight;
|
|
1763
1918
|
maxLabels = [label];
|
|
1764
|
-
} else if (
|
|
1919
|
+
} else if (weight === maxWeight) maxLabels.push(label);
|
|
1765
1920
|
const currentLabel = labels.get(node);
|
|
1766
1921
|
if (maxLabels.includes(currentLabel)) continue;
|
|
1767
1922
|
const newLabel = maxLabels[Math.floor(random() * maxLabels.length)];
|