skill-any-code 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +48 -0
- package/dist/cli.js +319 -0
- package/dist/index.js +22 -0
- package/jest.config.js +27 -0
- package/package.json +59 -0
- package/src/adapters/command.schemas.ts +21 -0
- package/src/application/analysis.app.service.ts +272 -0
- package/src/application/bootstrap.ts +35 -0
- package/src/application/services/llm.analysis.service.ts +237 -0
- package/src/cli.ts +297 -0
- package/src/common/config.ts +209 -0
- package/src/common/constants.ts +8 -0
- package/src/common/errors.ts +34 -0
- package/src/common/logger.ts +82 -0
- package/src/common/types.ts +385 -0
- package/src/common/ui.ts +228 -0
- package/src/common/utils.ts +81 -0
- package/src/domain/index.ts +1 -0
- package/src/domain/interfaces.ts +188 -0
- package/src/domain/services/analysis.service.ts +735 -0
- package/src/domain/services/incremental.service.ts +50 -0
- package/src/index.ts +6 -0
- package/src/infrastructure/blacklist.service.ts +37 -0
- package/src/infrastructure/cache/file.hash.cache.ts +119 -0
- package/src/infrastructure/git/git.service.ts +120 -0
- package/src/infrastructure/git.service.ts +121 -0
- package/src/infrastructure/index.service.ts +94 -0
- package/src/infrastructure/llm/llm.usage.tracker.ts +65 -0
- package/src/infrastructure/llm/openai.client.ts +162 -0
- package/src/infrastructure/llm/prompt.template.ts +175 -0
- package/src/infrastructure/llm.service.ts +70 -0
- package/src/infrastructure/skill/skill.generator.ts +53 -0
- package/src/infrastructure/skill/templates/resolve.script.ts +97 -0
- package/src/infrastructure/skill/templates/skill.md.template.ts +45 -0
- package/src/infrastructure/splitter/code.splitter.ts +176 -0
- package/src/infrastructure/storage.service.ts +413 -0
- package/src/infrastructure/worker-pool/parse.worker.impl.ts +135 -0
- package/src/infrastructure/worker-pool/parse.worker.ts +9 -0
- package/src/infrastructure/worker-pool/worker-pool.service.ts +173 -0
- package/tsconfig.json +24 -0
- package/tsconfig.test.json +5 -0
package/README.md
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
# 🛠️ Skill Any Code (sac)
|
|
2
|
+
|
|
3
|
+
**Redefines codebase indexing by Agent Skills**.
|
|
4
|
+
|
|
5
|
+
**Skill Any Code** is a high-performance CLI tool designed to transform complex code repositories into "Skill Maps" that Large Language Models (LLMs) and AI Agents can actually navigate. By generating natural language summaries and adhering to the standard **Agent Skills** protocol, it enables LLMs to explore code through **Progressive Disclosure**. No more context-window bloat; no more AI hallucinations caused by overwhelming raw source code.
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
## ✨ Why Skill Any Code?
|
|
9
|
+
|
|
10
|
+
* **🗺️ Progressive Understanding, Zero Context Bloat**: Traditional methods "dump" the entire codebase into the LLM, wasting tokens and causing confusion. **sac** generates hierarchical Markdown summaries, allowing the LLM to navigate layer-by-layer—only diving deep when necessary.
|
|
11
|
+
* **🤖 Native Agent Skills Support**: Automatically generates `SKILL.md` and routing scripts compatible with the [Agent Skills](https://agentskills.io/) specification. Works out-of-the-box with AI-native editors like Cursor, Claude Code, and GitHub Copilot.
|
|
12
|
+
* **🧠 Natural Language Indexing**: Translates dry code files into rich descriptions—including overviews, core purposes, key functions, and class definitions—dramatically increasing the LLM's reasoning accuracy.
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
## 📦 Quick Start
|
|
16
|
+
|
|
17
|
+
### 1. Install the CLI:
|
|
18
|
+
|
|
19
|
+
```Bash
|
|
20
|
+
npm i -g skill-any-code
|
|
21
|
+
|
|
22
|
+
npm init
|
|
23
|
+
# Config your LLM API Key in ~/.config/skill-any-code/config.yaml
|
|
24
|
+
```
|
|
25
|
+
|
|
26
|
+
### 2. Generate Summaries:
|
|
27
|
+
|
|
28
|
+
```Bash
|
|
29
|
+
cd <your_project_path>
|
|
30
|
+
|
|
31
|
+
# Start generating summaries
|
|
32
|
+
sac
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
## 🚀 How it Works
|
|
37
|
+
|
|
38
|
+
1. **Bottom-Up Recursive Analysis**: The CLI starts from the deepest files in your project. It uses LLM power to extract **classes, functions, and core logic** from raw code. As it moves up, it aggregates these snippets into high-level directory summaries, ensuring every layer of your project has a concise "executive summary."
|
|
39
|
+
2. **Semantic Indexing**: The process results in a structured Markdown knowledge base. This is not just a file list; it’s a **Semantic Index** where top-level folders explain "Why" and "What," while leaf-node files explain "How."
|
|
40
|
+
3. **Skill Injection**: **sac** deploys a `SKILL.md` and a cross-platform routing script (`get_summary.py`) to your root. This grants any AI Agent the "Skill" to navigate your codebase intelligently without reading every line of code.
|
|
41
|
+
4. **Progressive Discovery**: Instead of a "Big Bang" context dump, the AI explores your repo layer-by-layer:
|
|
42
|
+
* **Identify**: Check the root summary to find the relevant module.
|
|
43
|
+
* **Drill-down**: Use the Skill script to fetch sub-directory summaries.
|
|
44
|
+
* **Locate**: Reach the target file summary with surgical precision.
|
|
45
|
+
|
|
46
|
+
<p align="center">
|
|
47
|
+
<img src="docs/readme_img.png" alt="Skill Any Code Bottom-Up Parsing and Navigation Workflow" width="90%" />
|
|
48
|
+
</p>
|
package/dist/cli.js
ADDED
|
@@ -0,0 +1,319 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
"use strict";
|
|
3
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
4
|
+
if (k2 === undefined) k2 = k;
|
|
5
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
6
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
7
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
8
|
+
}
|
|
9
|
+
Object.defineProperty(o, k2, desc);
|
|
10
|
+
}) : (function(o, m, k, k2) {
|
|
11
|
+
if (k2 === undefined) k2 = k;
|
|
12
|
+
o[k2] = m[k];
|
|
13
|
+
}));
|
|
14
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
15
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
16
|
+
}) : function(o, v) {
|
|
17
|
+
o["default"] = v;
|
|
18
|
+
});
|
|
19
|
+
var __importStar = (this && this.__importStar) || (function () {
|
|
20
|
+
var ownKeys = function(o) {
|
|
21
|
+
ownKeys = Object.getOwnPropertyNames || function (o) {
|
|
22
|
+
var ar = [];
|
|
23
|
+
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
|
|
24
|
+
return ar;
|
|
25
|
+
};
|
|
26
|
+
return ownKeys(o);
|
|
27
|
+
};
|
|
28
|
+
return function (mod) {
|
|
29
|
+
if (mod && mod.__esModule) return mod;
|
|
30
|
+
var result = {};
|
|
31
|
+
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
32
|
+
__setModuleDefault(result, mod);
|
|
33
|
+
return result;
|
|
34
|
+
};
|
|
35
|
+
})();
|
|
36
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
37
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
38
|
+
};
|
|
39
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
40
|
+
const commander_1 = require("commander");
|
|
41
|
+
const picocolors_1 = __importDefault(require("picocolors"));
|
|
42
|
+
const fs_extra_1 = __importDefault(require("fs-extra"));
|
|
43
|
+
const path_1 = __importDefault(require("path"));
|
|
44
|
+
const package_json_1 = require("../package.json");
|
|
45
|
+
const analysis_app_service_1 = require("./application/analysis.app.service");
|
|
46
|
+
const config_1 = require("./common/config");
|
|
47
|
+
const logger_1 = require("./common/logger");
|
|
48
|
+
const ui_1 = require("./common/ui");
|
|
49
|
+
const errors_1 = require("./common/errors");
|
|
50
|
+
const constants_1 = require("./common/constants");
|
|
51
|
+
const program = new commander_1.Command();
|
|
52
|
+
// 主命令配置
|
|
53
|
+
program
|
|
54
|
+
.name('skill-any-code')
|
|
55
|
+
.alias('sac')
|
|
56
|
+
.description('Skill Any Code: a CLI for large codebase understanding and analysis')
|
|
57
|
+
.version(package_json_1.version, '-v, --version', 'Show version number')
|
|
58
|
+
.helpOption('-h, --help', 'Show help information')
|
|
59
|
+
.option('--log-level <level>', 'Log level: debug/info/warn/error')
|
|
60
|
+
.option('--path <path>', 'Project root path to analyze', process.cwd())
|
|
61
|
+
.option('-m, --mode <mode>', 'Analysis mode: full/incremental/auto', 'auto')
|
|
62
|
+
.option('-d, --depth <number>', 'Max directory depth (-1 = unlimited)', '-1')
|
|
63
|
+
.option('-C, --concurrency <number>', 'Max concurrent workers (default: CPU*2, capped by analyze.max_concurrency)')
|
|
64
|
+
.option('--output-dir <path>', 'Custom output directory for results')
|
|
65
|
+
.option('--skills-providers <list>', 'Comma-separated AI tool providers (opencode/cursor/claude/codex)')
|
|
66
|
+
.option('--no-skills', 'Skip skill generation')
|
|
67
|
+
// LLM相关参数
|
|
68
|
+
.option('--llm-base-url <url>', 'LLM API base URL')
|
|
69
|
+
.option('--llm-api-key <key>', 'LLM API key')
|
|
70
|
+
.option('--llm-model <model>', 'LLM model name')
|
|
71
|
+
.option('--llm-temperature <number>', 'LLM temperature (0-2)', parseFloat)
|
|
72
|
+
.option('--llm-max-tokens <number>', 'LLM max output tokens', parseInt)
|
|
73
|
+
.option('--llm-timeout <ms>', 'LLM request timeout (ms)', parseInt)
|
|
74
|
+
.option('--llm-max-retries <number>', 'LLM max retries', parseInt)
|
|
75
|
+
.option('--llm-retry-delay <ms>', 'LLM retry delay (ms)', parseInt)
|
|
76
|
+
.option('--llm-context-window-size <number>', 'LLM context window size', parseInt)
|
|
77
|
+
.option('--no-llm-cache', 'Disable LLM result cache')
|
|
78
|
+
.option('--llm-cache-dir <path>', 'LLM cache directory')
|
|
79
|
+
.option('--clear-cache', 'Clear existing LLM cache before analyzing');
|
|
80
|
+
// init 子命令:显式初始化配置文件(V2.5)
|
|
81
|
+
program
|
|
82
|
+
.command('init')
|
|
83
|
+
.description('Initialize or reset the config file')
|
|
84
|
+
.action(async () => {
|
|
85
|
+
try {
|
|
86
|
+
const resolvedPath = '~/.config/skill-any-code/config.yaml';
|
|
87
|
+
const fsPath = resolvedPath.replace('~', process.env.HOME || process.env.USERPROFILE || '');
|
|
88
|
+
const exists = await fs_extra_1.default.pathExists(fsPath);
|
|
89
|
+
await config_1.configManager.init();
|
|
90
|
+
logger_1.logger.success(exists ? `Config file reset: ${fsPath}` : `Config file created: ${fsPath}`);
|
|
91
|
+
process.exit(0);
|
|
92
|
+
}
|
|
93
|
+
catch (error) {
|
|
94
|
+
logger_1.logger.error('Failed to initialize config', error);
|
|
95
|
+
process.exit(1);
|
|
96
|
+
}
|
|
97
|
+
});
|
|
98
|
+
// 默认解析(不带子命令时直接执行)
|
|
99
|
+
program.action(async () => {
|
|
100
|
+
const os = require('os');
|
|
101
|
+
try {
|
|
102
|
+
// 加载配置(V2.5:配置未初始化时直接失败,提示先执行 init)
|
|
103
|
+
let config;
|
|
104
|
+
try {
|
|
105
|
+
config = await config_1.configManager.load();
|
|
106
|
+
}
|
|
107
|
+
catch (e) {
|
|
108
|
+
if (e instanceof errors_1.AppError && e.code === errors_1.ErrorCode.CONFIG_NOT_INITIALIZED) {
|
|
109
|
+
process.stderr.write(`Config is not initialized. Run "skill-any-code init" to create: ~/.config/skill-any-code/config.yaml\n`);
|
|
110
|
+
process.exit(1);
|
|
111
|
+
return;
|
|
112
|
+
}
|
|
113
|
+
throw e;
|
|
114
|
+
}
|
|
115
|
+
const cliLogLevel = program.opts().logLevel;
|
|
116
|
+
if (cliLogLevel) {
|
|
117
|
+
logger_1.logger.setLevel(cliLogLevel);
|
|
118
|
+
}
|
|
119
|
+
// 合并LLM命令行参数
|
|
120
|
+
const options = program.opts();
|
|
121
|
+
if (options.llmBaseUrl)
|
|
122
|
+
config.llm.base_url = options.llmBaseUrl;
|
|
123
|
+
if (options.llmApiKey)
|
|
124
|
+
config.llm.api_key = options.llmApiKey;
|
|
125
|
+
if (options.llmModel)
|
|
126
|
+
config.llm.model = options.llmModel;
|
|
127
|
+
if (options.llmTemperature !== undefined)
|
|
128
|
+
config.llm.temperature = options.llmTemperature;
|
|
129
|
+
if (options.llmMaxTokens !== undefined)
|
|
130
|
+
config.llm.max_tokens = options.llmMaxTokens;
|
|
131
|
+
if (options.llmTimeout !== undefined)
|
|
132
|
+
config.llm.timeout = options.llmTimeout;
|
|
133
|
+
if (options.llmMaxRetries !== undefined)
|
|
134
|
+
config.llm.max_retries = options.llmMaxRetries;
|
|
135
|
+
if (options.llmRetryDelay !== undefined)
|
|
136
|
+
config.llm.retry_delay = options.llmRetryDelay;
|
|
137
|
+
if (options.llmContextWindowSize !== undefined)
|
|
138
|
+
config.llm.context_window_size = options.llmContextWindowSize;
|
|
139
|
+
if (options.llmCache !== undefined)
|
|
140
|
+
config.llm.cache_enabled = options.llmCache;
|
|
141
|
+
if (options.llmCacheDir)
|
|
142
|
+
config.llm.cache_dir = options.llmCacheDir;
|
|
143
|
+
// 处理清空缓存选项
|
|
144
|
+
if (options.clearCache) {
|
|
145
|
+
const { FileHashCache } = await Promise.resolve().then(() => __importStar(require('./infrastructure/cache/file.hash.cache')));
|
|
146
|
+
const homeDir = os.homedir();
|
|
147
|
+
const cache = new FileHashCache({
|
|
148
|
+
cacheDir: config.llm.cache_dir.replace(/^~(?=\/|\\|$)/, homeDir),
|
|
149
|
+
maxSizeMb: config.llm.cache_max_size_mb,
|
|
150
|
+
});
|
|
151
|
+
await cache.clear();
|
|
152
|
+
logger_1.logger.info('LLM cache cleared');
|
|
153
|
+
}
|
|
154
|
+
// 默认并发:CPU*2,但不超过配置中的 analyze.max_concurrency
|
|
155
|
+
const cpuBasedConcurrency = constants_1.DEFAULT_CONCURRENCY;
|
|
156
|
+
const configuredMax = Number(config.analyze.max_concurrency ?? constants_1.DEFAULT_CONCURRENCY);
|
|
157
|
+
const defaultConcurrency = configuredMax > 0 ? Math.min(cpuBasedConcurrency, configuredMax) : cpuBasedConcurrency;
|
|
158
|
+
const analysisParams = {
|
|
159
|
+
path: options.path,
|
|
160
|
+
mode: options.mode,
|
|
161
|
+
depth: Number(options.depth),
|
|
162
|
+
concurrency: options.concurrency !== undefined ? Number(options.concurrency) : defaultConcurrency,
|
|
163
|
+
outputDir: options.outputDir || config.global.output_dir,
|
|
164
|
+
llmConfig: config.llm,
|
|
165
|
+
skillsProviders: options.skillsProviders
|
|
166
|
+
? options.skillsProviders.split(',').map((s) => s.trim().toLowerCase())
|
|
167
|
+
: undefined,
|
|
168
|
+
noSkills: options.skills === false,
|
|
169
|
+
};
|
|
170
|
+
// V2.5:解析前执行 LLM 连接可用性校验,失败则立即退出(需求文档 13.4.2 / 测试文档 ST-LLM-CONNECT-001)
|
|
171
|
+
const { OpenAIClient } = await Promise.resolve().then(() => __importStar(require('./infrastructure/llm/openai.client')));
|
|
172
|
+
const llmClient = new OpenAIClient(config.llm);
|
|
173
|
+
logger_1.logger.info(`LLM client initialized. Testing connectivity and config (url=${config.llm.base_url}, model=${config.llm.model})`);
|
|
174
|
+
try {
|
|
175
|
+
await llmClient.testConnection(config.llm);
|
|
176
|
+
}
|
|
177
|
+
catch (e) {
|
|
178
|
+
const detail = e?.message || String(e);
|
|
179
|
+
process.stderr.write(`LLM connectivity/config validation failed: ${detail}\n`);
|
|
180
|
+
process.exit(1);
|
|
181
|
+
}
|
|
182
|
+
const analysisService = new analysis_app_service_1.AnalysisAppService();
|
|
183
|
+
// 在 analyze 生命周期内,将所有 logger 输出通过 CLI 渲染器固定到进度块下方,
|
|
184
|
+
// 避免产生额外的进度/对象/Tokens 区域块。
|
|
185
|
+
logger_1.logger.setSink((line) => {
|
|
186
|
+
ui_1.cliRenderer.logBelow(line);
|
|
187
|
+
});
|
|
188
|
+
const paramsWithProgress = {
|
|
189
|
+
...analysisParams,
|
|
190
|
+
onTotalKnown: (total) => {
|
|
191
|
+
ui_1.cliRenderer.setTotal(total);
|
|
192
|
+
},
|
|
193
|
+
onProgress: (done, total, current) => {
|
|
194
|
+
ui_1.cliRenderer.updateProgress(done, total, current?.path, analysisParams.concurrency);
|
|
195
|
+
},
|
|
196
|
+
onTokenUsageSnapshot: (stats) => {
|
|
197
|
+
ui_1.cliRenderer.updateTokens(stats);
|
|
198
|
+
},
|
|
199
|
+
onScanProgress: (scannedFiles) => {
|
|
200
|
+
ui_1.cliRenderer.updateScanProgress(scannedFiles);
|
|
201
|
+
},
|
|
202
|
+
};
|
|
203
|
+
let result;
|
|
204
|
+
// 执行解析(V2.4+:不再在 CLI 中做交互式错误处理,所有 LLM 错误由应用层统一抛出)
|
|
205
|
+
result = await analysisService.runAnalysis(paramsWithProgress);
|
|
206
|
+
if (result.success) {
|
|
207
|
+
const files = result.data?.analyzedFilesCount || 0;
|
|
208
|
+
const dirs = result.data?.analyzedDirsCount || 0;
|
|
209
|
+
const objects = files + dirs;
|
|
210
|
+
logger_1.logger.success(`Analysis completed. Processed ${objects} object(s)`);
|
|
211
|
+
const summaryPath = result.data?.summaryPath || '';
|
|
212
|
+
const summaryLabel = summaryPath ? `Entry file: ${path_1.default.basename(summaryPath)}` : 'Entry file: index.md';
|
|
213
|
+
logger_1.logger.success(`Project analysis result. ${summaryLabel}`);
|
|
214
|
+
const usage = result.data?.tokenUsage;
|
|
215
|
+
if (usage) {
|
|
216
|
+
logger_1.logger.info(`LLM calls: ${usage.totalCalls}, prompt tokens: ${usage.totalPromptTokens}, ` +
|
|
217
|
+
`completion tokens: ${usage.totalCompletionTokens}, total tokens: ${usage.totalTokens}`);
|
|
218
|
+
}
|
|
219
|
+
}
|
|
220
|
+
else {
|
|
221
|
+
logger_1.logger.error(`Analysis failed: ${result.message}`);
|
|
222
|
+
if (result.errors && result.errors.length > 0) {
|
|
223
|
+
result.errors.forEach(err => logger_1.logger.error(`- ${err.path}: ${err.message}`));
|
|
224
|
+
}
|
|
225
|
+
process.exit(1);
|
|
226
|
+
}
|
|
227
|
+
}
|
|
228
|
+
catch (error) {
|
|
229
|
+
const err = error;
|
|
230
|
+
// V2.5:LLM 连接/配置校验失败时统一输出明确前缀,满足 ST-LLM-CONNECT-001/002/003
|
|
231
|
+
if (err && err.code && (err.code === errors_1.ErrorCode.LLM_INVALID_CONFIG ||
|
|
232
|
+
err.code === errors_1.ErrorCode.LLM_CALL_FAILED ||
|
|
233
|
+
err.code === errors_1.ErrorCode.LLM_TIMEOUT)) {
|
|
234
|
+
const detail = err.message || '';
|
|
235
|
+
process.stderr.write(`LLM connectivity/config validation failed: ${detail}\n`);
|
|
236
|
+
}
|
|
237
|
+
else if (err instanceof errors_1.AppError) {
|
|
238
|
+
logger_1.logger.error(`Execution failed: ${err.message}`, err);
|
|
239
|
+
}
|
|
240
|
+
else {
|
|
241
|
+
logger_1.logger.error('Execution failed', err);
|
|
242
|
+
}
|
|
243
|
+
process.exit(1);
|
|
244
|
+
}
|
|
245
|
+
});
|
|
246
|
+
// resolve 子命令(V2.6:根据相对路径推导分析结果 Markdown 路径,不依赖索引)
|
|
247
|
+
program
|
|
248
|
+
.command('resolve')
|
|
249
|
+
.description('Resolve the analysis result Markdown path for a file/directory')
|
|
250
|
+
.argument('<relative-path>', 'Relative path of the file/directory (from project root)')
|
|
251
|
+
.option('-p, --project <path>', 'Project root path', process.cwd())
|
|
252
|
+
.action(async (relativePath, options) => {
|
|
253
|
+
try {
|
|
254
|
+
logger_1.logger.setLevel(program.opts().logLevel);
|
|
255
|
+
const projectRoot = options.project || process.cwd();
|
|
256
|
+
const DEFAULT_OUTPUT_DIR = '.skill-any-code-result';
|
|
257
|
+
const normalizeRel = (input) => {
|
|
258
|
+
const raw = (input || '').trim();
|
|
259
|
+
const rawPosix = raw.replace(/\\/g, '/');
|
|
260
|
+
const rawHadTrailingSlash = rawPosix.endsWith('/') && rawPosix.length > 1;
|
|
261
|
+
let rel = rawPosix;
|
|
262
|
+
while (rel.startsWith('./'))
|
|
263
|
+
rel = rel.slice(2);
|
|
264
|
+
if (rel.endsWith('/') && rel.length > 1)
|
|
265
|
+
rel = rel.slice(0, -1);
|
|
266
|
+
if (rel === '')
|
|
267
|
+
rel = '.';
|
|
268
|
+
return { rel, rawHadTrailingSlash };
|
|
269
|
+
};
|
|
270
|
+
const { rel, rawHadTrailingSlash } = normalizeRel(relativePath);
|
|
271
|
+
const targetAbs = path_1.default.resolve(projectRoot, rel);
|
|
272
|
+
if (!(await fs_extra_1.default.pathExists(targetAbs))) {
|
|
273
|
+
process.stdout.write('N/A\n');
|
|
274
|
+
process.exit(0);
|
|
275
|
+
return;
|
|
276
|
+
}
|
|
277
|
+
const stat = await fs_extra_1.default.stat(targetAbs);
|
|
278
|
+
const isDir = stat.isDirectory() || rawHadTrailingSlash || rel === '.';
|
|
279
|
+
let mdRel;
|
|
280
|
+
if (isDir) {
|
|
281
|
+
mdRel =
|
|
282
|
+
rel === '.'
|
|
283
|
+
? path_1.default.posix.join(DEFAULT_OUTPUT_DIR, 'index.md')
|
|
284
|
+
: path_1.default.posix.join(DEFAULT_OUTPUT_DIR, rel, 'index.md');
|
|
285
|
+
}
|
|
286
|
+
else {
|
|
287
|
+
const parsed = path_1.default.posix.parse(rel);
|
|
288
|
+
const dirPart = parsed.dir;
|
|
289
|
+
const name = parsed.name === 'index' && parsed.ext
|
|
290
|
+
? `index${parsed.ext}.md`
|
|
291
|
+
: `${parsed.name}.md`;
|
|
292
|
+
mdRel = dirPart ? path_1.default.posix.join(DEFAULT_OUTPUT_DIR, dirPart, name) : path_1.default.posix.join(DEFAULT_OUTPUT_DIR, name);
|
|
293
|
+
}
|
|
294
|
+
const mdAbs = path_1.default.resolve(projectRoot, mdRel);
|
|
295
|
+
if (await fs_extra_1.default.pathExists(mdAbs)) {
|
|
296
|
+
process.stdout.write(mdRel + '\n');
|
|
297
|
+
}
|
|
298
|
+
else {
|
|
299
|
+
process.stdout.write('N/A\n');
|
|
300
|
+
}
|
|
301
|
+
process.exit(0);
|
|
302
|
+
}
|
|
303
|
+
catch (error) {
|
|
304
|
+
const msg = error instanceof Error ? error.message : String(error);
|
|
305
|
+
process.stderr.write(`Resolve failed: ${msg}\n`);
|
|
306
|
+
process.exit(1);
|
|
307
|
+
}
|
|
308
|
+
});
|
|
309
|
+
// 全局错误处理
|
|
310
|
+
program.showSuggestionAfterError();
|
|
311
|
+
program.configureHelp({
|
|
312
|
+
sortSubcommands: true,
|
|
313
|
+
sortOptions: true,
|
|
314
|
+
});
|
|
315
|
+
// 解析命令行参数
|
|
316
|
+
program.parseAsync(process.argv).catch((error) => {
|
|
317
|
+
console.error(picocolors_1.default.red(`\nExecution failed: ${error.message}`));
|
|
318
|
+
process.exit(1);
|
|
319
|
+
});
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
+
if (k2 === undefined) k2 = k;
|
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
+
}
|
|
8
|
+
Object.defineProperty(o, k2, desc);
|
|
9
|
+
}) : (function(o, m, k, k2) {
|
|
10
|
+
if (k2 === undefined) k2 = k;
|
|
11
|
+
o[k2] = m[k];
|
|
12
|
+
}));
|
|
13
|
+
var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
14
|
+
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
|
15
|
+
};
|
|
16
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
17
|
+
__exportStar(require("./common/types"), exports);
|
|
18
|
+
__exportStar(require("./common/errors"), exports);
|
|
19
|
+
__exportStar(require("./domain"), exports);
|
|
20
|
+
__exportStar(require("./application/analysis.app.service"), exports);
|
|
21
|
+
__exportStar(require("./infrastructure/git.service"), exports);
|
|
22
|
+
__exportStar(require("./infrastructure/storage.service"), exports);
|
package/jest.config.js
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
/** @type {import('jest').Config} */
|
|
2
|
+
module.exports = {
|
|
3
|
+
preset: 'ts-jest',
|
|
4
|
+
testEnvironment: 'node',
|
|
5
|
+
rootDir: '.',
|
|
6
|
+
// Windows 下大量文件/子进程/临时目录操作容易触发 EBUSY,串行执行更稳定
|
|
7
|
+
maxWorkers: 1,
|
|
8
|
+
testMatch: [
|
|
9
|
+
'<rootDir>/tests/**/*.test.ts'
|
|
10
|
+
],
|
|
11
|
+
moduleFileExtensions: ['ts', 'tsx', 'js', 'jsx', 'json', 'node'],
|
|
12
|
+
transform: {
|
|
13
|
+
'^.+\\.tsx?$': ['ts-jest', { tsconfig: 'tsconfig.test.json' }]
|
|
14
|
+
},
|
|
15
|
+
collectCoverageFrom: [
|
|
16
|
+
'src/**/*.ts',
|
|
17
|
+
'!src/**/*.d.ts'
|
|
18
|
+
],
|
|
19
|
+
coverageDirectory: '<rootDir>/tests/coverage',
|
|
20
|
+
coverageReporters: ['text', 'lcov', 'html'],
|
|
21
|
+
cacheDirectory: '<rootDir>/tests/.cache/jest',
|
|
22
|
+
testTimeout: 300000, // 5分钟超时,适配大型项目测试
|
|
23
|
+
// 旧版 OpenCode/Skill 的测试辅助已废弃(V2.x 使用 CLI + LLM Mock Server)
|
|
24
|
+
moduleNameMapper: {
|
|
25
|
+
'^@/(.*)$': '<rootDir>/src/$1'
|
|
26
|
+
}
|
|
27
|
+
};
|
package/package.json
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "skill-any-code",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "Skill Any Code: CLI tool for large codebase understanding and analysis",
|
|
5
|
+
"main": "dist/index.js",
|
|
6
|
+
"bin": {
|
|
7
|
+
"skill-any-code": "dist/cli.js",
|
|
8
|
+
"sac": "dist/cli.js"
|
|
9
|
+
},
|
|
10
|
+
"scripts": {
|
|
11
|
+
"build": "tsc",
|
|
12
|
+
"dev": "tsc --watch",
|
|
13
|
+
"start": "node dist/cli.js",
|
|
14
|
+
"test": "jest"
|
|
15
|
+
},
|
|
16
|
+
"keywords": [
|
|
17
|
+
"code-analysis",
|
|
18
|
+
"cli",
|
|
19
|
+
"code-understanding",
|
|
20
|
+
"project-analyzer"
|
|
21
|
+
],
|
|
22
|
+
"author": "",
|
|
23
|
+
"license": "MIT",
|
|
24
|
+
"dependencies": {
|
|
25
|
+
"@types/mustache": "^4.2.6",
|
|
26
|
+
"cli-progress": "^3.12.0",
|
|
27
|
+
"commander": "^14.0.3",
|
|
28
|
+
"crypto-js": "^4.2.0",
|
|
29
|
+
"fs-extra": "^11.3.4",
|
|
30
|
+
"ignore": "^5.3.0",
|
|
31
|
+
"inquirer": "^13.3.0",
|
|
32
|
+
"js-yaml": "^4.1.1",
|
|
33
|
+
"mustache": "^4.2.0",
|
|
34
|
+
"openai": "^6.27.0",
|
|
35
|
+
"picocolors": "^1.1.1",
|
|
36
|
+
"simple-git": "^3.22.0",
|
|
37
|
+
"workerpool": "^9.1.0",
|
|
38
|
+
"zod": "^3.22.4"
|
|
39
|
+
},
|
|
40
|
+
"devDependencies": {
|
|
41
|
+
"@types/cli-progress": "^3.11.6",
|
|
42
|
+
"@types/crypto-js": "^4.2.2",
|
|
43
|
+
"@types/fs-extra": "^11.0.4",
|
|
44
|
+
"@types/inquirer": "^9.0.9",
|
|
45
|
+
"@types/jest": "^29.5.11",
|
|
46
|
+
"@types/js-yaml": "^4.0.9",
|
|
47
|
+
"@types/node": "^20.11.0",
|
|
48
|
+
"@types/pidusage": "^2.0.5",
|
|
49
|
+
"@types/workerpool": "^6.4.7",
|
|
50
|
+
"jest": "^29.7.0",
|
|
51
|
+
"pidusage": "^4.0.1",
|
|
52
|
+
"ts-jest": "^29.4.6",
|
|
53
|
+
"typescript": "^5.3.3"
|
|
54
|
+
},
|
|
55
|
+
"directories": {
|
|
56
|
+
"test": "tests"
|
|
57
|
+
},
|
|
58
|
+
"type": "commonjs"
|
|
59
|
+
}
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
import { z } from 'zod'
|
|
2
|
+
|
|
3
|
+
export const AnalyzeProjectCommandSchema = z.object({
|
|
4
|
+
path: z.string().optional().describe('Project root path to analyze (default: current working directory)'),
|
|
5
|
+
mode: z.enum(['full', 'incremental', 'auto']).default('auto').describe('Analysis mode'),
|
|
6
|
+
depth: z.number().int().min(1).optional().describe('Max directory depth (-1 = unlimited)'),
|
|
7
|
+
concurrency: z.number().int().min(1).optional().describe('Max concurrency (default: CPU cores * 2)'),
|
|
8
|
+
outputDir: z.string().optional().describe('Output directory for analysis results'),
|
|
9
|
+
skillsProviders: z.array(z.string()).optional().describe('AI tool providers to deploy the skill to'),
|
|
10
|
+
noSkills: z.boolean().optional().describe('Skip skill generation')
|
|
11
|
+
})
|
|
12
|
+
|
|
13
|
+
export type AnalyzeProjectCommandParams = z.infer<typeof AnalyzeProjectCommandSchema>
|
|
14
|
+
|
|
15
|
+
export const ProjectCodeQuerySkillSchema = z.object({
|
|
16
|
+
path: z.string().describe('Relative path of the file/directory from the project root'),
|
|
17
|
+
type: z.enum(['summary', 'full', 'diagram']).default('summary').describe('Query type'),
|
|
18
|
+
projectSlug: z.string().optional().describe('Project identifier (default: current project)')
|
|
19
|
+
})
|
|
20
|
+
|
|
21
|
+
export type ProjectCodeQuerySkillParams = z.infer<typeof ProjectCodeQuerySkillSchema>
|