skrypt-ai 0.4.2 → 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/auth/index.d.ts +13 -3
- package/dist/auth/index.js +94 -9
- package/dist/auth/keychain.d.ts +5 -0
- package/dist/auth/keychain.js +82 -0
- package/dist/auth/notices.d.ts +3 -0
- package/dist/auth/notices.js +42 -0
- package/dist/autofix/index.js +10 -3
- package/dist/cli.js +16 -3
- package/dist/commands/generate.js +37 -1
- package/dist/commands/import.d.ts +2 -0
- package/dist/commands/import.js +157 -0
- package/dist/commands/init.js +19 -7
- package/dist/commands/login.js +15 -4
- package/dist/commands/review-pr.js +10 -0
- package/dist/commands/security.d.ts +2 -0
- package/dist/commands/security.js +103 -0
- package/dist/generator/writer.js +12 -3
- package/dist/importers/confluence.d.ts +5 -0
- package/dist/importers/confluence.js +137 -0
- package/dist/importers/detect.d.ts +20 -0
- package/dist/importers/detect.js +121 -0
- package/dist/importers/docusaurus.d.ts +5 -0
- package/dist/importers/docusaurus.js +279 -0
- package/dist/importers/gitbook.d.ts +5 -0
- package/dist/importers/gitbook.js +189 -0
- package/dist/importers/github.d.ts +8 -0
- package/dist/importers/github.js +99 -0
- package/dist/importers/index.d.ts +15 -0
- package/dist/importers/index.js +30 -0
- package/dist/importers/markdown.d.ts +6 -0
- package/dist/importers/markdown.js +105 -0
- package/dist/importers/mintlify.d.ts +5 -0
- package/dist/importers/mintlify.js +172 -0
- package/dist/importers/notion.d.ts +5 -0
- package/dist/importers/notion.js +174 -0
- package/dist/importers/readme.d.ts +5 -0
- package/dist/importers/readme.js +184 -0
- package/dist/importers/transform.d.ts +90 -0
- package/dist/importers/transform.js +457 -0
- package/dist/importers/types.d.ts +37 -0
- package/dist/importers/types.js +1 -0
- package/dist/plugins/index.js +7 -0
- package/dist/scanner/index.js +37 -24
- package/dist/scanner/python.js +17 -0
- package/dist/template/public/search-index.json +1 -1
- package/dist/template/scripts/build-search-index.mjs +67 -9
- package/dist/template/src/lib/search-types.ts +4 -1
- package/dist/template/src/lib/search.ts +30 -7
- package/dist/utils/files.d.ts +9 -1
- package/dist/utils/files.js +59 -10
- package/package.json +4 -1
|
@@ -19,13 +19,15 @@ async function dirExists(dir) {
|
|
|
19
19
|
async function getAllMDXFiles(dir) {
|
|
20
20
|
const files = []
|
|
21
21
|
|
|
22
|
-
async function walk(currentDir) {
|
|
22
|
+
async function walk(currentDir, depth = 0) {
|
|
23
|
+
if (depth > 20) return
|
|
23
24
|
try {
|
|
24
25
|
const entries = await readdir(currentDir, { withFileTypes: true })
|
|
25
26
|
for (const entry of entries) {
|
|
26
27
|
const fullPath = join(currentDir, entry.name)
|
|
28
|
+
if (entry.isSymbolicLink?.()) continue
|
|
27
29
|
if (entry.isDirectory()) {
|
|
28
|
-
await walk(fullPath)
|
|
30
|
+
await walk(fullPath, depth + 1)
|
|
29
31
|
} else if (entry.name.endsWith('.md') || entry.name.endsWith('.mdx')) {
|
|
30
32
|
files.push(fullPath)
|
|
31
33
|
}
|
|
@@ -39,14 +41,30 @@ async function getAllMDXFiles(dir) {
|
|
|
39
41
|
return files
|
|
40
42
|
}
|
|
41
43
|
|
|
44
|
+
/**
|
|
45
|
+
* Extract headings (h2, h3) as a separate searchable field.
|
|
46
|
+
* This lets users find pages by section names.
|
|
47
|
+
*/
|
|
48
|
+
function extractHeadings(content) {
|
|
49
|
+
const headings = []
|
|
50
|
+
const regex = /^#{2,3}\s+(.+)$/gm
|
|
51
|
+
let match
|
|
52
|
+
while ((match = regex.exec(content)) !== null) {
|
|
53
|
+
// Strip any inline formatting
|
|
54
|
+
const clean = match[1].replace(/[*_`[\]]/g, '').trim()
|
|
55
|
+
if (clean) headings.push(clean)
|
|
56
|
+
}
|
|
57
|
+
return headings.join(' | ')
|
|
58
|
+
}
|
|
59
|
+
|
|
42
60
|
function extractPlainText(content) {
|
|
43
61
|
return content
|
|
44
62
|
// Remove import statements
|
|
45
63
|
.replace(/^import\s+.*$/gm, '')
|
|
46
64
|
// Remove export statements
|
|
47
65
|
.replace(/^export\s+.*$/gm, '')
|
|
48
|
-
// Remove MDX/JSX components
|
|
49
|
-
.replace(/<[^>]+>/g, '')
|
|
66
|
+
// Remove MDX/JSX components (but keep text content inside)
|
|
67
|
+
.replace(/<[^>]+>/g, ' ')
|
|
50
68
|
// Remove code blocks
|
|
51
69
|
.replace(/```[\s\S]*?```/g, '')
|
|
52
70
|
.replace(/`[^`]+`/g, '')
|
|
@@ -68,6 +86,35 @@ function extractPlainText(content) {
|
|
|
68
86
|
.trim()
|
|
69
87
|
}
|
|
70
88
|
|
|
89
|
+
/**
|
|
90
|
+
* Extract keywords from content — important terms that appear frequently
|
|
91
|
+
* or in headings, used to boost search relevance.
|
|
92
|
+
*/
|
|
93
|
+
function extractKeywords(content, title) {
|
|
94
|
+
const words = new Map()
|
|
95
|
+
|
|
96
|
+
// Title words get high weight
|
|
97
|
+
for (const word of title.toLowerCase().split(/\s+/)) {
|
|
98
|
+
if (word.length > 2) words.set(word, (words.get(word) || 0) + 3)
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
// Heading words get medium weight
|
|
102
|
+
const headingRegex = /^#{1,3}\s+(.+)$/gm
|
|
103
|
+
let match
|
|
104
|
+
while ((match = headingRegex.exec(content)) !== null) {
|
|
105
|
+
for (const word of match[1].toLowerCase().replace(/[^a-z0-9\s]/g, '').split(/\s+/)) {
|
|
106
|
+
if (word.length > 2) words.set(word, (words.get(word) || 0) + 2)
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
// Sort by weight and return top keywords
|
|
111
|
+
return [...words.entries()]
|
|
112
|
+
.sort((a, b) => b[1] - a[1])
|
|
113
|
+
.slice(0, 20)
|
|
114
|
+
.map(([word]) => word)
|
|
115
|
+
.join(' ')
|
|
116
|
+
}
|
|
117
|
+
|
|
71
118
|
function getSlugFromContentPath(filePath) {
|
|
72
119
|
const rel = relative(CONTENT_DIR, filePath)
|
|
73
120
|
const slug = rel
|
|
@@ -79,8 +126,6 @@ function getSlugFromContentPath(filePath) {
|
|
|
79
126
|
}
|
|
80
127
|
|
|
81
128
|
function getSlugFromAppPath(filePath) {
|
|
82
|
-
// For App Router: src/app/docs/quickstart/page.mdx -> /docs/quickstart
|
|
83
|
-
// src/app/docs/page.mdx -> /docs
|
|
84
129
|
const rel = relative(APP_DOCS_DIR, filePath)
|
|
85
130
|
const dir = dirname(rel)
|
|
86
131
|
const name = basename(rel)
|
|
@@ -99,11 +144,9 @@ function getSlugFromAppPath(filePath) {
|
|
|
99
144
|
}
|
|
100
145
|
|
|
101
146
|
function extractTitle(content, filePath) {
|
|
102
|
-
// Try to get title from first heading
|
|
103
147
|
const h1Match = content.match(/^#\s+(.+)$/m)
|
|
104
148
|
if (h1Match) return h1Match[1].trim()
|
|
105
149
|
|
|
106
|
-
// Derive from file path
|
|
107
150
|
const dir = dirname(filePath)
|
|
108
151
|
const folderName = basename(dir)
|
|
109
152
|
if (folderName && folderName !== 'docs' && folderName !== '.') {
|
|
@@ -122,10 +165,18 @@ async function buildSearchIndex() {
|
|
|
122
165
|
schema: {
|
|
123
166
|
id: 'string',
|
|
124
167
|
title: 'string',
|
|
168
|
+
headings: 'string',
|
|
169
|
+
keywords: 'string',
|
|
125
170
|
content: 'string',
|
|
126
171
|
href: 'string',
|
|
127
172
|
section: 'string',
|
|
128
173
|
},
|
|
174
|
+
components: {
|
|
175
|
+
tokenizer: {
|
|
176
|
+
stemming: true,
|
|
177
|
+
language: 'english',
|
|
178
|
+
},
|
|
179
|
+
},
|
|
129
180
|
})
|
|
130
181
|
|
|
131
182
|
const documents = []
|
|
@@ -141,6 +192,8 @@ async function buildSearchIndex() {
|
|
|
141
192
|
|
|
142
193
|
const title = data.title || extractTitle(content, file)
|
|
143
194
|
const plainContent = extractPlainText(content)
|
|
195
|
+
const headings = extractHeadings(content)
|
|
196
|
+
const keywords = extractKeywords(content, title)
|
|
144
197
|
const href = getSlugFromContentPath(file)
|
|
145
198
|
|
|
146
199
|
if (seen.has(href)) continue
|
|
@@ -149,6 +202,8 @@ async function buildSearchIndex() {
|
|
|
149
202
|
documents.push({
|
|
150
203
|
id: href,
|
|
151
204
|
title,
|
|
205
|
+
headings,
|
|
206
|
+
keywords,
|
|
152
207
|
content: plainContent.slice(0, 5000),
|
|
153
208
|
href,
|
|
154
209
|
section: data.section || data.category || '',
|
|
@@ -165,7 +220,6 @@ async function buildSearchIndex() {
|
|
|
165
220
|
if (await dirExists(APP_DOCS_DIR)) {
|
|
166
221
|
const appFiles = await getAllMDXFiles(APP_DOCS_DIR)
|
|
167
222
|
for (const file of appFiles) {
|
|
168
|
-
// Skip catch-all route files and layout files
|
|
169
223
|
if (file.includes('[...slug]') || file.includes('layout.')) continue
|
|
170
224
|
|
|
171
225
|
try {
|
|
@@ -174,6 +228,8 @@ async function buildSearchIndex() {
|
|
|
174
228
|
|
|
175
229
|
const title = data.title || extractTitle(content, file)
|
|
176
230
|
const plainContent = extractPlainText(content)
|
|
231
|
+
const headings = extractHeadings(content)
|
|
232
|
+
const keywords = extractKeywords(content, title)
|
|
177
233
|
const href = getSlugFromAppPath(file)
|
|
178
234
|
|
|
179
235
|
if (seen.has(href)) continue
|
|
@@ -182,6 +238,8 @@ async function buildSearchIndex() {
|
|
|
182
238
|
documents.push({
|
|
183
239
|
id: href,
|
|
184
240
|
title,
|
|
241
|
+
headings,
|
|
242
|
+
keywords,
|
|
185
243
|
content: plainContent.slice(0, 5000),
|
|
186
244
|
href,
|
|
187
245
|
section: data.section || data.category || '',
|
|
@@ -1,6 +1,5 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* Type definitions for Orama search database
|
|
3
|
-
* Fixes P0: Removes `any` types from search functionality
|
|
4
3
|
*/
|
|
5
4
|
|
|
6
5
|
import type { Orama, Results, SearchParams } from '@orama/orama'
|
|
@@ -9,6 +8,8 @@ import type { Orama, Results, SearchParams } from '@orama/orama'
|
|
|
9
8
|
export interface SearchDocument {
|
|
10
9
|
id: string
|
|
11
10
|
title: string
|
|
11
|
+
headings: string
|
|
12
|
+
keywords: string
|
|
12
13
|
content: string
|
|
13
14
|
href: string
|
|
14
15
|
section: string
|
|
@@ -18,6 +19,8 @@ export interface SearchDocument {
|
|
|
18
19
|
export type SearchDatabase = Orama<{
|
|
19
20
|
id: 'string'
|
|
20
21
|
title: 'string'
|
|
22
|
+
headings: 'string'
|
|
23
|
+
keywords: 'string'
|
|
21
24
|
content: 'string'
|
|
22
25
|
href: 'string'
|
|
23
26
|
section: 'string'
|
|
@@ -7,6 +7,8 @@ let loadPromise: Promise<void> | null = null
|
|
|
7
7
|
const schema = {
|
|
8
8
|
id: 'string' as const,
|
|
9
9
|
title: 'string' as const,
|
|
10
|
+
headings: 'string' as const,
|
|
11
|
+
keywords: 'string' as const,
|
|
10
12
|
content: 'string' as const,
|
|
11
13
|
href: 'string' as const,
|
|
12
14
|
section: 'string' as const,
|
|
@@ -41,7 +43,15 @@ async function loadSearchIndex(): Promise<void> {
|
|
|
41
43
|
return
|
|
42
44
|
}
|
|
43
45
|
|
|
44
|
-
const newDb = await create({
|
|
46
|
+
const newDb = await create({
|
|
47
|
+
schema,
|
|
48
|
+
components: {
|
|
49
|
+
tokenizer: {
|
|
50
|
+
stemming: true,
|
|
51
|
+
language: 'english',
|
|
52
|
+
},
|
|
53
|
+
},
|
|
54
|
+
}) as SearchDatabase
|
|
45
55
|
await load(newDb, data as RawData)
|
|
46
56
|
db = newDb
|
|
47
57
|
} catch (err) {
|
|
@@ -55,10 +65,21 @@ async function loadSearchIndex(): Promise<void> {
|
|
|
55
65
|
}
|
|
56
66
|
|
|
57
67
|
/**
|
|
58
|
-
* Generate a snippet with highlighted search terms
|
|
68
|
+
* Generate a snippet with highlighted search terms, preferring heading matches
|
|
59
69
|
*/
|
|
60
|
-
function generateSnippet(content: string, query: string, maxLength: number = 150): string {
|
|
70
|
+
function generateSnippet(content: string, headings: string, query: string, maxLength: number = 150): string {
|
|
61
71
|
const terms = query.toLowerCase().split(/\s+/).filter(Boolean)
|
|
72
|
+
|
|
73
|
+
// Check if any headings match — show that context first
|
|
74
|
+
if (headings) {
|
|
75
|
+
const headingList = headings.split(' | ')
|
|
76
|
+
for (const heading of headingList) {
|
|
77
|
+
if (terms.some(term => heading.toLowerCase().includes(term))) {
|
|
78
|
+
return heading
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
|
|
62
83
|
const contentLower = content.toLowerCase()
|
|
63
84
|
|
|
64
85
|
// Find the best position to start the snippet (where most terms match)
|
|
@@ -110,11 +131,13 @@ export async function search(query: string): Promise<SearchResultWithHighlight[]
|
|
|
110
131
|
try {
|
|
111
132
|
const results = await oramaSearch(db, {
|
|
112
133
|
term: query,
|
|
113
|
-
properties: ['title', 'content'],
|
|
134
|
+
properties: ['title', 'headings', 'keywords', 'content'],
|
|
114
135
|
limit: 10,
|
|
115
|
-
tolerance:
|
|
136
|
+
tolerance: 2,
|
|
116
137
|
boost: {
|
|
117
|
-
title:
|
|
138
|
+
title: 5,
|
|
139
|
+
headings: 3,
|
|
140
|
+
keywords: 2,
|
|
118
141
|
content: 1,
|
|
119
142
|
},
|
|
120
143
|
})
|
|
@@ -124,7 +147,7 @@ export async function search(query: string): Promise<SearchResultWithHighlight[]
|
|
|
124
147
|
href: hit.document.href,
|
|
125
148
|
content: hit.document.content,
|
|
126
149
|
section: hit.document.section || undefined,
|
|
127
|
-
snippet: generateSnippet(hit.document.content, query),
|
|
150
|
+
snippet: generateSnippet(hit.document.content, hit.document.headings, query),
|
|
128
151
|
score: hit.score,
|
|
129
152
|
}))
|
|
130
153
|
} catch (err) {
|
package/dist/utils/files.d.ts
CHANGED
|
@@ -1,9 +1,17 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* Recursively find all .md and .mdx files in a directory.
|
|
3
|
-
* Skips hidden directories and
|
|
3
|
+
* Skips hidden directories, node_modules, and symlinks.
|
|
4
4
|
*/
|
|
5
5
|
export declare function findMdxFiles(dir: string): string[];
|
|
6
6
|
/**
|
|
7
7
|
* Convert string to URL-safe slug
|
|
8
8
|
*/
|
|
9
9
|
export declare function slugify(str: string): string;
|
|
10
|
+
/**
|
|
11
|
+
* Simple YAML frontmatter parser — splits on --- markers.
|
|
12
|
+
* Returns parsed key-value data and remaining content body.
|
|
13
|
+
*/
|
|
14
|
+
export declare function parseFrontmatter(content: string): {
|
|
15
|
+
data: Record<string, unknown>;
|
|
16
|
+
content: string;
|
|
17
|
+
};
|
package/dist/utils/files.js
CHANGED
|
@@ -1,25 +1,41 @@
|
|
|
1
|
-
import { readdirSync, statSync } from 'fs';
|
|
1
|
+
import { readdirSync, statSync, lstatSync } from 'fs';
|
|
2
2
|
import { join, extname } from 'path';
|
|
3
3
|
/**
|
|
4
4
|
* Recursively find all .md and .mdx files in a directory.
|
|
5
|
-
* Skips hidden directories and
|
|
5
|
+
* Skips hidden directories, node_modules, and symlinks.
|
|
6
6
|
*/
|
|
7
7
|
export function findMdxFiles(dir) {
|
|
8
8
|
const files = [];
|
|
9
|
-
function walk(currentDir) {
|
|
10
|
-
|
|
9
|
+
function walk(currentDir, depth) {
|
|
10
|
+
if (depth > 30)
|
|
11
|
+
return;
|
|
12
|
+
let entries;
|
|
13
|
+
try {
|
|
14
|
+
entries = readdirSync(currentDir);
|
|
15
|
+
}
|
|
16
|
+
catch {
|
|
17
|
+
return;
|
|
18
|
+
}
|
|
11
19
|
for (const entry of entries) {
|
|
12
20
|
const fullPath = join(currentDir, entry);
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
21
|
+
try {
|
|
22
|
+
// Skip symlinks to prevent infinite loops
|
|
23
|
+
if (lstatSync(fullPath).isSymbolicLink())
|
|
24
|
+
continue;
|
|
25
|
+
const stat = statSync(fullPath);
|
|
26
|
+
if (stat.isDirectory() && !entry.startsWith('.') && entry !== 'node_modules') {
|
|
27
|
+
walk(fullPath, depth + 1);
|
|
28
|
+
}
|
|
29
|
+
else if (stat.isFile() && (extname(entry) === '.mdx' || extname(entry) === '.md')) {
|
|
30
|
+
files.push(fullPath);
|
|
31
|
+
}
|
|
16
32
|
}
|
|
17
|
-
|
|
18
|
-
|
|
33
|
+
catch {
|
|
34
|
+
continue;
|
|
19
35
|
}
|
|
20
36
|
}
|
|
21
37
|
}
|
|
22
|
-
walk(dir);
|
|
38
|
+
walk(dir, 0);
|
|
23
39
|
return files;
|
|
24
40
|
}
|
|
25
41
|
/**
|
|
@@ -31,3 +47,36 @@ export function slugify(str) {
|
|
|
31
47
|
.replace(/[^a-z0-9]+/g, '-')
|
|
32
48
|
.replace(/^-|-$/g, '');
|
|
33
49
|
}
|
|
50
|
+
/**
|
|
51
|
+
* Simple YAML frontmatter parser — splits on --- markers.
|
|
52
|
+
* Returns parsed key-value data and remaining content body.
|
|
53
|
+
*/
|
|
54
|
+
export function parseFrontmatter(content) {
|
|
55
|
+
const match = content.match(/^---\r?\n([\s\S]*?)\r?\n---\r?\n?([\s\S]*)$/);
|
|
56
|
+
if (!match)
|
|
57
|
+
return { data: {}, content };
|
|
58
|
+
const yamlStr = match[1];
|
|
59
|
+
const body = match[2];
|
|
60
|
+
const data = {};
|
|
61
|
+
for (const line of yamlStr.split('\n')) {
|
|
62
|
+
const kvMatch = line.match(/^(\w[\w-]*)\s*:\s*(.*)$/);
|
|
63
|
+
if (!kvMatch)
|
|
64
|
+
continue;
|
|
65
|
+
const key = kvMatch[1];
|
|
66
|
+
let value = kvMatch[2].trim();
|
|
67
|
+
if (typeof value === 'string') {
|
|
68
|
+
if ((value.startsWith('"') && value.endsWith('"')) ||
|
|
69
|
+
(value.startsWith("'") && value.endsWith("'"))) {
|
|
70
|
+
value = value.slice(1, -1);
|
|
71
|
+
}
|
|
72
|
+
else if (value === 'true')
|
|
73
|
+
value = true;
|
|
74
|
+
else if (value === 'false')
|
|
75
|
+
value = false;
|
|
76
|
+
else if (/^\d+$/.test(value))
|
|
77
|
+
value = parseInt(value, 10);
|
|
78
|
+
}
|
|
79
|
+
data[key] = value;
|
|
80
|
+
}
|
|
81
|
+
return { data, content: body };
|
|
82
|
+
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "skrypt-ai",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.5.0",
|
|
4
4
|
"description": "AI-powered documentation generator with code examples",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "dist/cli.js",
|
|
@@ -54,6 +54,9 @@
|
|
|
54
54
|
"openai": "^6.27.0",
|
|
55
55
|
"typescript": "^5.9.3"
|
|
56
56
|
},
|
|
57
|
+
"optionalDependencies": {
|
|
58
|
+
"@napi-rs/keyring": "^1.1.6"
|
|
59
|
+
},
|
|
57
60
|
"devDependencies": {
|
|
58
61
|
"@eslint/js": "^10.0.1",
|
|
59
62
|
"@types/js-yaml": "^4.0.9",
|