spec-up-t 1.1.52 → 1.1.54
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/assets/js/create-alphabet-index.js +8 -1
- package/assets/js/insert-trefs.js +1 -1
- package/branches.md +42 -0
- package/index.js +9 -1
- package/package.json +7 -1
- package/src/README.md +98 -0
- package/src/collect-external-references.js +108 -31
- package/src/collect-external-references.test.js +152 -0
- package/src/collectExternalReferences/fetchTermsFromGitHubRepository.js +39 -44
- package/src/collectExternalReferences/fetchTermsFromGitHubRepository.test.js +385 -0
- package/src/collectExternalReferences/matchTerm.js +10 -5
- package/src/collectExternalReferences/matchTerm.test.js +30 -0
- package/src/collectExternalReferences/octokitClient.js +96 -0
- package/src/collectExternalReferences/processXTrefsData.js +2 -0
- package/src/create-term-index.js +3 -1
- package/src/create-term-relations.js +3 -1
- package/src/fix-markdown-files.js +2 -1
- package/src/markdown-it-extensions.js +105 -0
- package/src/prepare-tref.js +2 -1
- package/src/utils/file-filter.js +36 -0
- package/src/utils/isLineWithDefinition.js +5 -10
- package/readme.md +0 -10
- package/src/collectExternalReferences/checkRateLimit.js +0 -17
- package/src/collectExternalReferences/setupFetchHeaders.js +0 -14
package/src/create-term-index.js
CHANGED
|
@@ -15,6 +15,8 @@
|
|
|
15
15
|
* @since 2024-09-02
|
|
16
16
|
*/
|
|
17
17
|
|
|
18
|
+
const { shouldProcessFile } = require('./utils/file-filter');
|
|
19
|
+
|
|
18
20
|
function createTermIndex() {
|
|
19
21
|
const fs = require('fs-extra');
|
|
20
22
|
const path = require('path');
|
|
@@ -23,7 +25,7 @@ function createTermIndex() {
|
|
|
23
25
|
const specTermDirectoryName = config.specs.map(spec => spec.spec_terms_directory);
|
|
24
26
|
const outputPathJSON = path.join('output', 'term-index.json');
|
|
25
27
|
const files = fs.readdirSync(path.join(specDirectories[0], specTermDirectoryName[0]))
|
|
26
|
-
.filter(
|
|
28
|
+
.filter(shouldProcessFile);
|
|
27
29
|
|
|
28
30
|
const filePaths = files.map(file => specTermDirectoryName[0] + '/' + file);
|
|
29
31
|
|
|
@@ -5,9 +5,11 @@
|
|
|
5
5
|
* @since 2024-06-22
|
|
6
6
|
*/
|
|
7
7
|
|
|
8
|
+
|
|
8
9
|
const fs = require('fs-extra');
|
|
9
10
|
const path = require('path');
|
|
10
11
|
const config = fs.readJsonSync('specs.json');
|
|
12
|
+
const { shouldProcessFile } = require('./utils/file-filter');
|
|
11
13
|
|
|
12
14
|
const specTermDirectoryName = config.specs.map(spec => spec.spec_directory + '/' + spec.spec_terms_directory);
|
|
13
15
|
|
|
@@ -41,7 +43,7 @@ function createTermRelations() {
|
|
|
41
43
|
// read directory
|
|
42
44
|
fs.readdirSync(specDirectory).forEach(file => {
|
|
43
45
|
// read file
|
|
44
|
-
if (file
|
|
46
|
+
if (shouldProcessFile(file)) {
|
|
45
47
|
const markdown = fs.readFileSync(`${specDirectory}/${file}`, 'utf8');
|
|
46
48
|
|
|
47
49
|
let regexDef = /\[\[def:.*?\]\]/g;
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
const fs = require('fs');
|
|
2
2
|
const path = require('path');
|
|
3
|
+
const { shouldProcessFile } = require('./utils/file-filter');
|
|
3
4
|
|
|
4
5
|
// Function to process markdown files in a directory recursively
|
|
5
6
|
function fixMarkdownFiles(directory) {
|
|
@@ -15,7 +16,7 @@ function fixMarkdownFiles(directory) {
|
|
|
15
16
|
if (item.isDirectory()) {
|
|
16
17
|
// If the item is a directory, call processDirectory recursively
|
|
17
18
|
processDirectory(itemPath);
|
|
18
|
-
} else if (item.isFile() &&
|
|
19
|
+
} else if (item.isFile() && shouldProcessFile(item.name)) {
|
|
19
20
|
try {
|
|
20
21
|
// Read the file synchronously
|
|
21
22
|
let data = fs.readFileSync(itemPath, 'utf8');
|
|
@@ -94,6 +94,111 @@ module.exports = function (md, templates = {}) {
|
|
|
94
94
|
if (targetIndex !== -1 && idx > targetIndex && !classAdded) {
|
|
95
95
|
tokens[idx].attrPush(['class', 'terms-and-definitions-list']);
|
|
96
96
|
classAdded = true;
|
|
97
|
+
|
|
98
|
+
/* Sort terms and definitions alphabetically
|
|
99
|
+
Sort dt/dd pairs case-insensitively based on dt content
|
|
100
|
+
|
|
101
|
+
1: Token-based Markdown Processing: Spec-Up-T uses a token-based approach to parse and render Markdown. When Markdown is processed, it's converted into a series of tokens that represent different elements (like dt_open, dt_content, dt_close, dd_open, dd_content, dd_close). We're not dealing with simple strings but with structured tokens.
|
|
102
|
+
|
|
103
|
+
2: Preserving Relationships: When sorting terms, we need to ensure that each definition term (<dt>) stays connected to its corresponding definition description (<dd>). It's not as simple as sorting an array of strings - we're sorting complex structures.
|
|
104
|
+
|
|
105
|
+
3: Implementation Details: The implementation includes:
|
|
106
|
+
|
|
107
|
+
- Finding the terminology section in the document
|
|
108
|
+
- Collecting term starts, ends, and their contents
|
|
109
|
+
- Creating a sorted index based on case-insensitive comparisons
|
|
110
|
+
- Rebuilding the token array in the correct order
|
|
111
|
+
- Ensuring all relationships between terms and definitions are preserved
|
|
112
|
+
- Handling special cases and edge conditions
|
|
113
|
+
|
|
114
|
+
The complexity is unavoidable because:
|
|
115
|
+
|
|
116
|
+
- We're working with the markdown-it rendering pipeline, not just manipulating DOM
|
|
117
|
+
- The terms and definitions exist as tokens before they become HTML
|
|
118
|
+
- We need to preserve all the token relationships while reordering
|
|
119
|
+
- We're intercepting the rendering process to modify the token structure
|
|
120
|
+
|
|
121
|
+
If we were just sorting DOM elements after the page rendered, it would be simpler. But by doing the sorting during the Markdown processing, we ensure the HTML output is correct from the beginning, which is more efficient and leads to better performance.
|
|
122
|
+
*/
|
|
123
|
+
let dtStartIndices = [];
|
|
124
|
+
let dtEndIndices = [];
|
|
125
|
+
let dtContents = [];
|
|
126
|
+
|
|
127
|
+
// First pass: collect all dt blocks and their contents
|
|
128
|
+
for (let i = idx + 1; i < tokens.length; i++) {
|
|
129
|
+
if (tokens[i].type === 'dl_close') {
|
|
130
|
+
break;
|
|
131
|
+
}
|
|
132
|
+
if (tokens[i].type === 'dt_open') {
|
|
133
|
+
const startIdx = i;
|
|
134
|
+
let content = '';
|
|
135
|
+
|
|
136
|
+
// Find the end of this dt block and capture its content
|
|
137
|
+
for (let j = i + 1; j < tokens.length; j++) {
|
|
138
|
+
if (tokens[j].type === 'dt_close') {
|
|
139
|
+
dtStartIndices.push(startIdx);
|
|
140
|
+
dtEndIndices.push(j);
|
|
141
|
+
dtContents.push(content.toLowerCase()); // Store lowercase for case-insensitive sorting
|
|
142
|
+
break;
|
|
143
|
+
}
|
|
144
|
+
// Collect the content inside the dt (including spans with term IDs)
|
|
145
|
+
if (tokens[j].content) {
|
|
146
|
+
content += tokens[j].content;
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
// Create indices sorted by case-insensitive term content
|
|
153
|
+
const sortedIndices = dtContents.map((_, idx) => idx)
|
|
154
|
+
.sort((a, b) => dtContents[a].localeCompare(dtContents[b]));
|
|
155
|
+
|
|
156
|
+
// Reorder the tokens based on the sorted indices
|
|
157
|
+
if (sortedIndices.length > 0) {
|
|
158
|
+
// Create a new array of tokens
|
|
159
|
+
const newTokens = tokens.slice(0, idx + 1); // Include dl_open
|
|
160
|
+
|
|
161
|
+
// For each dt/dd pair in sorted order
|
|
162
|
+
for (let i = 0; i < sortedIndices.length; i++) {
|
|
163
|
+
const originalIndex = sortedIndices[i];
|
|
164
|
+
const dtStart = dtStartIndices[originalIndex];
|
|
165
|
+
const dtEnd = dtEndIndices[originalIndex];
|
|
166
|
+
|
|
167
|
+
// Add dt tokens
|
|
168
|
+
for (let j = dtStart; j <= dtEnd; j++) {
|
|
169
|
+
newTokens.push(tokens[j]);
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
// Find and add dd tokens
|
|
173
|
+
let ddFound = false;
|
|
174
|
+
for (let j = dtEnd + 1; j < tokens.length; j++) {
|
|
175
|
+
if (tokens[j].type === 'dt_open' || tokens[j].type === 'dl_close') {
|
|
176
|
+
break;
|
|
177
|
+
}
|
|
178
|
+
if (tokens[j].type === 'dd_open') {
|
|
179
|
+
ddFound = true;
|
|
180
|
+
}
|
|
181
|
+
if (ddFound) {
|
|
182
|
+
newTokens.push(tokens[j]);
|
|
183
|
+
if (tokens[j].type === 'dd_close') {
|
|
184
|
+
break;
|
|
185
|
+
}
|
|
186
|
+
}
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
// Add the closing dl token
|
|
191
|
+
for (let i = idx + 1; i < tokens.length; i++) {
|
|
192
|
+
if (tokens[i].type === 'dl_close') {
|
|
193
|
+
newTokens.push(tokens[i]);
|
|
194
|
+
break;
|
|
195
|
+
}
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
// Replace the old tokens with the new sorted ones
|
|
199
|
+
tokens.splice(idx, newTokens.length, ...newTokens);
|
|
200
|
+
}
|
|
201
|
+
// END Sort terms and definitions alphabetically
|
|
97
202
|
}
|
|
98
203
|
|
|
99
204
|
let lastDdIndex = -1;
|
package/src/prepare-tref.js
CHANGED
|
@@ -22,6 +22,7 @@
|
|
|
22
22
|
const fs = require('fs');
|
|
23
23
|
const path = require('path');
|
|
24
24
|
const dedent = require('dedent');
|
|
25
|
+
const { shouldProcessFile } = require('./utils/file-filter');
|
|
25
26
|
|
|
26
27
|
function getLocalXTrefContent(externalSpec, term) {
|
|
27
28
|
const filePath = path.join('output', 'xtrefs-data.json');
|
|
@@ -58,7 +59,7 @@ function prepareTref(directory) {
|
|
|
58
59
|
if (item.isDirectory()) {
|
|
59
60
|
// If the item is a directory, call processDirectory recursively
|
|
60
61
|
processDirectory(itemPath);
|
|
61
|
-
} else if (item.isFile() &&
|
|
62
|
+
} else if (item.isFile() && shouldProcessFile(item.name)) {
|
|
62
63
|
try {
|
|
63
64
|
// Read the file synchronously
|
|
64
65
|
let data = fs.readFileSync(itemPath, 'utf8');
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @file Utility functions for filtering files consistently across the codebase
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* Checks if a file is a Markdown file (ends with .md)
|
|
7
|
+
* @param {string} filename - The filename to check
|
|
8
|
+
* @returns {boolean} - True if the file is a Markdown file, false otherwise
|
|
9
|
+
*/
|
|
10
|
+
function isMarkdownFile(filename) {
|
|
11
|
+
return filename.endsWith('.md');
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
/**
|
|
15
|
+
* Checks if a file is hidden/excluded (starts with underscore)
|
|
16
|
+
* @param {string} filename - The filename to check
|
|
17
|
+
* @returns {boolean} - True if the file is hidden/excluded, false otherwise
|
|
18
|
+
*/
|
|
19
|
+
function isNotHiddenFile(filename) {
|
|
20
|
+
return !filename.startsWith('_');
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
/**
|
|
24
|
+
* Checks if a file should be processed (is a Markdown file and not hidden)
|
|
25
|
+
* @param {string} filename - The filename to check
|
|
26
|
+
* @returns {boolean} - True if the file should be processed, false otherwise
|
|
27
|
+
*/
|
|
28
|
+
function shouldProcessFile(filename) {
|
|
29
|
+
return isMarkdownFile(filename) && isNotHiddenFile(filename);
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
module.exports = {
|
|
33
|
+
isMarkdownFile,
|
|
34
|
+
isNotHiddenFile,
|
|
35
|
+
shouldProcessFile
|
|
36
|
+
};
|
|
@@ -1,13 +1,8 @@
|
|
|
1
|
-
|
|
2
|
-
line
|
|
3
|
-
|
|
4
|
-
if
|
|
5
|
-
|
|
6
|
-
return true;
|
|
7
|
-
} else {
|
|
8
|
-
// console.log('String does not start with `[[def:` or end with `]]`');
|
|
9
|
-
return false;
|
|
10
|
-
}
|
|
1
|
+
function isLineWithDefinition(line) {
|
|
2
|
+
if (!line || typeof line !== 'string') return false;
|
|
3
|
+
|
|
4
|
+
// Check if the line starts with [[def: and contains ]]
|
|
5
|
+
return line.startsWith('[[def:') && line.includes(']]');
|
|
11
6
|
}
|
|
12
7
|
|
|
13
8
|
exports.isLineWithDefinition = isLineWithDefinition;
|
package/readme.md
DELETED
|
@@ -1,10 +0,0 @@
|
|
|
1
|
-
# Spec-Up-T
|
|
2
|
-
|
|
3
|
-
<div align="center">
|
|
4
|
-
|
|
5
|
-
<img src="./static/specup_logo.png">
|
|
6
|
-
|
|
7
|
-
<h2 style="display: block; margin: 0 auto; text-align: center;">Markdown » Spec-Up</h2>
|
|
8
|
-
</div>
|
|
9
|
-
|
|
10
|
-
There is a [special website that documents everything regarding Spec-Up-T](https://blockchainbird.github.io/spec-up-t-website/).
|
|
@@ -1,17 +0,0 @@
|
|
|
1
|
-
// Function to check the rate limit of the GitHub API
|
|
2
|
-
function checkRateLimit(response) {
|
|
3
|
-
const remaining = response.headers.get('X-RateLimit-Remaining');
|
|
4
|
-
const reset = response.headers.get('X-RateLimit-Reset');
|
|
5
|
-
|
|
6
|
-
if (response.status === 403 && remaining === '0') {
|
|
7
|
-
const resetTime = new Date(reset * 1000);
|
|
8
|
-
console.error(`❌ Github API rate limit exceeded. Try again after ${resetTime}. See https://blockchainbird.github.io/spec-up-t-website/docs/getting-started/github-token for more info.`);
|
|
9
|
-
return true;
|
|
10
|
-
} else if (remaining !== null) {
|
|
11
|
-
console.log(`ℹ️ Github API rate limit: ${remaining} requests remaining. See https://blockchainbird.github.io/spec-up-t-website/docs/getting-started/github-token for more info.`);
|
|
12
|
-
} else {
|
|
13
|
-
console.warn(`ℹ️ Unable to determine rate limit status. Check your GitHub API token and network connection.`);
|
|
14
|
-
}
|
|
15
|
-
return false;
|
|
16
|
-
}
|
|
17
|
-
exports.checkRateLimit = checkRateLimit;
|
|
@@ -1,14 +0,0 @@
|
|
|
1
|
-
function setupFetchHeaders(GITHUB_API_TOKEN) {
|
|
2
|
-
const fetchHeaders = {
|
|
3
|
-
'Accept': 'application/vnd.github.v3+json'
|
|
4
|
-
};
|
|
5
|
-
|
|
6
|
-
if (GITHUB_API_TOKEN) {
|
|
7
|
-
fetchHeaders['Authorization'] = `token ${GITHUB_API_TOKEN}`;
|
|
8
|
-
} else {
|
|
9
|
-
console.log('ℹ️ There is no GitHub token set up. Therefore, you are more likely to be at your limit of GitHub API requests. If you run into the limit, create a token and search the documentation on this topic.');
|
|
10
|
-
}
|
|
11
|
-
|
|
12
|
-
return fetchHeaders;
|
|
13
|
-
}
|
|
14
|
-
exports.setupFetchHeaders = setupFetchHeaders;
|