skill-any-code 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/README.md +48 -0
  2. package/dist/cli.js +319 -0
  3. package/dist/index.js +22 -0
  4. package/jest.config.js +27 -0
  5. package/package.json +59 -0
  6. package/src/adapters/command.schemas.ts +21 -0
  7. package/src/application/analysis.app.service.ts +272 -0
  8. package/src/application/bootstrap.ts +35 -0
  9. package/src/application/services/llm.analysis.service.ts +237 -0
  10. package/src/cli.ts +297 -0
  11. package/src/common/config.ts +209 -0
  12. package/src/common/constants.ts +8 -0
  13. package/src/common/errors.ts +34 -0
  14. package/src/common/logger.ts +82 -0
  15. package/src/common/types.ts +385 -0
  16. package/src/common/ui.ts +228 -0
  17. package/src/common/utils.ts +81 -0
  18. package/src/domain/index.ts +1 -0
  19. package/src/domain/interfaces.ts +188 -0
  20. package/src/domain/services/analysis.service.ts +735 -0
  21. package/src/domain/services/incremental.service.ts +50 -0
  22. package/src/index.ts +6 -0
  23. package/src/infrastructure/blacklist.service.ts +37 -0
  24. package/src/infrastructure/cache/file.hash.cache.ts +119 -0
  25. package/src/infrastructure/git/git.service.ts +120 -0
  26. package/src/infrastructure/git.service.ts +121 -0
  27. package/src/infrastructure/index.service.ts +94 -0
  28. package/src/infrastructure/llm/llm.usage.tracker.ts +65 -0
  29. package/src/infrastructure/llm/openai.client.ts +162 -0
  30. package/src/infrastructure/llm/prompt.template.ts +175 -0
  31. package/src/infrastructure/llm.service.ts +70 -0
  32. package/src/infrastructure/skill/skill.generator.ts +53 -0
  33. package/src/infrastructure/skill/templates/resolve.script.ts +97 -0
  34. package/src/infrastructure/skill/templates/skill.md.template.ts +45 -0
  35. package/src/infrastructure/splitter/code.splitter.ts +176 -0
  36. package/src/infrastructure/storage.service.ts +413 -0
  37. package/src/infrastructure/worker-pool/parse.worker.impl.ts +135 -0
  38. package/src/infrastructure/worker-pool/parse.worker.ts +9 -0
  39. package/src/infrastructure/worker-pool/worker-pool.service.ts +173 -0
  40. package/tsconfig.json +24 -0
  41. package/tsconfig.test.json +5 -0
@@ -0,0 +1,162 @@
1
+ import OpenAI from 'openai';
2
+ import { ILLMClient } from '../../domain/interfaces';
3
+ import { LLMConfig, LLMCallOptions, LLMResponse } from '../../common/types';
4
+ import { LLMUsageTracker } from './llm.usage.tracker';
5
+ import { AppError, ErrorCode } from '../../common/errors';
6
+ import { logger } from '../../common/logger';
7
+
8
+ export class OpenAIClient implements ILLMClient {
9
+ private client: OpenAI;
10
+ private config: LLMConfig;
11
+ private tracker?: LLMUsageTracker;
12
+
13
+ constructor(config: LLMConfig, tracker?: LLMUsageTracker) {
14
+ this.config = config;
15
+ this.tracker = tracker;
16
+ this.client = new OpenAI({
17
+ apiKey: config.api_key,
18
+ baseURL: config.base_url,
19
+ timeout: config.timeout,
20
+ dangerouslyAllowBrowser: true,
21
+ });
22
+ }
23
+
24
+ /**
25
+ * 连接可用性校验(V2.5)
26
+ * - 在进入任何解析流程前调用;
27
+ * - 配置不完整或服务不可用时抛出带有明确 ErrorCode 的 AppError。
28
+ */
29
+ async testConnection(config: LLMConfig): Promise<void> {
30
+ // 保持与最新配置一致(允许运行时通过 CLI/环境变量覆盖)
31
+ this.config = config;
32
+ // 基本配置校验:base_url / api_key / model 不能为空
33
+ if (!this.config.base_url || !this.config.api_key || !this.config.model) {
34
+ throw new AppError(
35
+ ErrorCode.LLM_INVALID_CONFIG,
36
+ 'Incomplete LLM config. Please set base_url/api_key/model via config file, env vars, or CLI options.',
37
+ {
38
+ missing: {
39
+ base_url: !this.config.base_url,
40
+ api_key: !this.config.api_key,
41
+ model: !this.config.model,
42
+ },
43
+ },
44
+ );
45
+ }
46
+
47
+ try {
48
+ const res = await this.client.chat.completions.create({
49
+ model: this.config.model,
50
+ temperature: 0,
51
+ max_tokens: 1,
52
+ messages: [{ role: 'system', content: 'health-check' }],
53
+ } as any);
54
+
55
+ const status = (res as any).status ?? 200;
56
+ if (status === 401) {
57
+ throw new AppError(ErrorCode.LLM_CALL_FAILED, 'LLM authentication failed (401)', { status });
58
+ }
59
+ if (status === 404) {
60
+ throw new AppError(ErrorCode.LLM_CALL_FAILED, 'LLM model not found (404)', { status });
61
+ }
62
+ if (status < 200 || status >= 300) {
63
+ throw new AppError(
64
+ ErrorCode.LLM_CALL_FAILED,
65
+ `LLM connectivity check returned non-2xx status: ${status}`,
66
+ { status },
67
+ );
68
+ }
69
+ } catch (e: any) {
70
+ if (e instanceof AppError) {
71
+ throw e;
72
+ }
73
+ const code = e?.code || e?.status;
74
+ if (code === 'ETIMEDOUT') {
75
+ throw new AppError(ErrorCode.LLM_TIMEOUT, 'LLM connectivity check timed out', e);
76
+ }
77
+ if (code === 'ENOTFOUND' || code === 'ECONNREFUSED' || code === 'ECONNRESET') {
78
+ throw new AppError(ErrorCode.LLM_CALL_FAILED, 'Unable to reach LLM service. Check network or base_url.', e);
79
+ }
80
+ throw new AppError(
81
+ ErrorCode.LLM_CALL_FAILED,
82
+ `LLM connectivity check failed: ${e?.message || String(e)}`,
83
+ e,
84
+ );
85
+ }
86
+ }
87
+
88
+ /**
89
+ * 向后兼容旧版本/测试中使用的 connectTest 名称。
90
+ * 内部直接代理到 V2.5 的 testConnection。
91
+ */
92
+ async connectTest(): Promise<void> {
93
+ await this.testConnection(this.config);
94
+ }
95
+
96
+ async call(prompt: string, options?: LLMCallOptions): Promise<LLMResponse> {
97
+ const startTime = Date.now();
98
+ const retries = options?.retries ?? this.config.max_retries;
99
+
100
+ for (let attempt = 0; attempt <= retries; attempt++) {
101
+ try {
102
+ const response = await this.client.chat.completions.create({
103
+ model: options?.model ?? this.config.model,
104
+ temperature: options?.temperature ?? this.config.temperature,
105
+ max_tokens: options?.maxTokens ?? this.config.max_tokens,
106
+ messages: [
107
+ { role: 'user', content: prompt }
108
+ ]
109
+ });
110
+
111
+ const content = response.choices[0].message.content || '';
112
+ const usage = response.usage || { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 };
113
+
114
+ const normalizedUsage = {
115
+ promptTokens: usage.prompt_tokens ?? 0,
116
+ completionTokens: usage.completion_tokens ?? 0,
117
+ totalTokens: usage.total_tokens ?? 0,
118
+ };
119
+
120
+ if (this.tracker) {
121
+ this.tracker.addUsage(normalizedUsage);
122
+ }
123
+
124
+ return {
125
+ content,
126
+ usage: normalizedUsage,
127
+ model: response.model,
128
+ responseTime: Date.now() - startTime,
129
+ };
130
+ } catch (error: any) {
131
+ const errorMessage = error?.message || 'Unknown LLM call error';
132
+ logger.debug(`LLM call attempt ${attempt + 1} failed: ${errorMessage}`);
133
+
134
+ if (attempt === retries) {
135
+ if (error?.status === 429) {
136
+ throw new AppError(ErrorCode.LLM_RATE_LIMITED, 'LLM service rate limited', error);
137
+ } else if (error?.code === 'ETIMEDOUT') {
138
+ throw new AppError(ErrorCode.LLM_TIMEOUT, 'LLM call timeout', error);
139
+ } else {
140
+ throw new AppError(ErrorCode.LLM_CALL_FAILED, `LLM call failed: ${errorMessage}`, error);
141
+ }
142
+ }
143
+
144
+ await this.sleep(this.config.retry_delay * Math.pow(2, attempt));
145
+ }
146
+ }
147
+
148
+ throw new AppError(ErrorCode.LLM_CALL_FAILED, 'Max retries exceeded');
149
+ }
150
+
151
+ async batchCall(prompts: string[], options?: LLMCallOptions): Promise<LLMResponse[]> {
152
+ const results: LLMResponse[] = [];
153
+ for (const prompt of prompts) {
154
+ results.push(await this.call(prompt, options));
155
+ }
156
+ return results;
157
+ }
158
+
159
+ private sleep(ms: number): Promise<void> {
160
+ return new Promise(resolve => setTimeout(resolve, ms));
161
+ }
162
+ }
@@ -0,0 +1,175 @@
1
+ // ===== Three-step protocol: single file =====
2
+ /** Step 1: Extract structure only (classes / globals / functions). No summary/description/basic info. */
3
+ export const FILE_STRUCTURE_PROMPT = `
4
+ Extract ONLY the following structure from the code file. Return STRICT JSON and nothing else.
5
+ Do NOT generate summary/description or any basic info (language, LOC, dependencies, etc.).
6
+
7
+ File path: {{filePath}}
8
+ File content:
9
+ {{fileContent}}
10
+
11
+ Return JSON with ONLY these fields:
12
+ {
13
+ "classes": [
14
+ {
15
+ "name": "ClassName",
16
+ "extends": "ParentClassName or null",
17
+ "implements": ["InterfaceName", "..."],
18
+ "methods": [
19
+ { "name": "methodName", "signature": "methodSignature", "description": "what it does", "visibility": "public|private|protected" }
20
+ ],
21
+ "properties": [
22
+ { "name": "propertyName", "type": "propertyType", "description": "what it represents", "visibility": "public|private|protected" }
23
+ ]
24
+ }
25
+ ],
26
+ "functions": [
27
+ { "name": "functionName", "signature": "functionSignature", "description": "what it does" }
28
+ ]
29
+ }
30
+
31
+ If nothing is found, return empty arrays. Do NOT output any other fields.
32
+ `;
33
+
34
+ /** Step 2: Generate description only (<= 200 words) */
35
+ export const FILE_DESCRIPTION_PROMPT = `
36
+ Based on the extracted structure JSON below, describe the overall purpose of this file in <= 200 words.
37
+ Return ONLY one JSON object: {"description": "..."}. No other text.
38
+
39
+ Structure JSON:
40
+ {{structureJson}}
41
+ `;
42
+
43
+ /** Step 3: Generate summary only (<= 100 words) */
44
+ export const FILE_SUMMARY_PROMPT = `
45
+ Based on the structure and description below, write a one-sentence high-level summary in <= 100 words.
46
+ Return ONLY one JSON object: {"summary": "..."}. No other text.
47
+
48
+ Structure JSON:
49
+ {{structureJson}}
50
+
51
+ Description:
52
+ {{description}}
53
+ `;
54
+
55
+ /** Retry hint when parsing fails (append to original prompt) */
56
+ export const PARSE_RETRY_HINT = `
57
+
58
+ [IMPORTANT] Your previous output did NOT match the required format.
59
+ Return STRICTLY the JSON specified above and NOTHING ELSE (no markdown, no explanations, no extra text).
60
+ `;
61
+
62
+ // ===== Merge stage =====
63
+ /** Merge step: merge and deduplicate multiple chunk results into one structure */
64
+ export const MERGE_STRUCTURE_PROMPT = `
65
+ Below are analysis results for multiple chunks of the same file. Merge and deduplicate them into ONE complete structure (classes and global functions).
66
+ Return ONLY JSON. Do NOT generate description or summary.
67
+
68
+ File path: {{filePath}}
69
+ Chunk results:
70
+ {{chunkResults}}
71
+
72
+ Return JSON with ONLY these fields:
73
+ {
74
+ "classes": [
75
+ {
76
+ "name": "ClassName",
77
+ "extends": "ParentClassName or null",
78
+ "implements": ["InterfaceName", "..."],
79
+ "methods": [
80
+ { "name": "methodName", "signature": "methodSignature", "description": "what it does", "visibility": "public|private|protected" }
81
+ ],
82
+ "properties": [
83
+ { "name": "propertyName", "type": "propertyType", "description": "what it represents", "visibility": "public|private|protected" }
84
+ ]
85
+ }
86
+ ],
87
+ "functions": [
88
+ { "name": "functionName", "signature": "functionSignature", "description": "what it does" }
89
+ ]
90
+ }
91
+
92
+ Keep item formats consistent with the chunk results. Do NOT output any other fields.
93
+ `;
94
+
95
+ // ===== Directory two-step protocol =====
96
+ /** Directory step 1: generate description (<= 200 words) */
97
+ export const DIRECTORY_DESCRIPTION_PROMPT = `
98
+ You are a codebase structure analysis assistant. Below is a JSON list of all direct child directories and files (with brief summaries).
99
+ Write an English paragraph (<= 200 words) describing the directory's role and responsibilities in the project.
100
+
101
+ Return ONLY one JSON object: {"description": "..."}. No other text.
102
+
103
+ Children (JSON):
104
+ {{childrenJson}}
105
+ `;
106
+
107
+ /** Directory step 2: generate summary (<= 100 words) */
108
+ export const DIRECTORY_SUMMARY_PROMPT = `
109
+ Based on the directory description and children JSON below, write a one-sentence high-level summary in English (<= 100 words).
110
+ Focus on the big picture and avoid details.
111
+
112
+ Return ONLY one JSON object: {"summary": "..."}. No other text.
113
+
114
+ Directory description:
115
+ {{description}}
116
+
117
+ Children (JSON):
118
+ {{childrenJson}}
119
+ `;
120
+
121
+ export const CODE_ANALYSIS_PROMPT = `
122
+ (Deprecated legacy prompt. Kept empty for backward compatibility; not used in the main pipeline.)
123
+ `;
124
+
125
+ export const CHUNK_ANALYSIS_PROMPT = `
126
+ Analyze the following code chunk. Extract ONLY the structure that is confidently present in THIS chunk (classes, global variables, global functions).
127
+ Return STRICT JSON and nothing else. Do NOT generate description, summary, or diagrams.
128
+
129
+ File path: {{filePath}}
130
+ Chunk ID: {{chunkId}}
131
+ Chunk content:
132
+ {{chunkContent}}
133
+ Context (reference only): {{context}}
134
+
135
+ Return JSON with ONLY these fields:
136
+ {
137
+ "classes": [
138
+ {
139
+ "name": "ClassName",
140
+ "extends": "ParentClassName or null",
141
+ "implements": ["InterfaceName", "..."],
142
+ "methods": [
143
+ {
144
+ "name": "methodName",
145
+ "signature": "methodSignature",
146
+ "description": "what it does",
147
+ "visibility": "public|private|protected"
148
+ }
149
+ ],
150
+ "properties": [
151
+ {
152
+ "name": "propertyName",
153
+ "type": "propertyType",
154
+ "description": "what it represents",
155
+ "visibility": "public|private|protected"
156
+ }
157
+ ]
158
+ }
159
+ ],
160
+ "functions": [
161
+ {
162
+ "name": "functionName",
163
+ "signature": "functionSignature",
164
+ "description": "what it does"
165
+ }
166
+ ]
167
+ }
168
+
169
+ Rules:
170
+ 1) Output STRICT JSON only (no extra text)
171
+ 2) If nothing is found, return empty arrays
172
+ 3) Analyze ONLY this chunk; context is reference only
173
+ 4) Do NOT output fields like basicInfo, partialDiagrams, summary, description, etc.
174
+ 5) Write all descriptions in English
175
+ `;
@@ -0,0 +1,70 @@
1
+ import OpenAI from 'openai';
2
+ import { configManager } from '../common/config';
3
+ import { AppError, ErrorCode } from '../common/errors';
4
+ import { logger } from '../common/logger';
5
+
6
+ export interface LLMService {
7
+ generateCompletion(prompt: string, systemPrompt?: string): Promise<string>;
8
+ }
9
+
10
+ export class OpenAILLMService implements LLMService {
11
+ private client: OpenAI | null = null;
12
+
13
+ private async getClient(): Promise<OpenAI> {
14
+ if (!this.client) {
15
+ let config: any;
16
+ try {
17
+ config = configManager.getConfig();
18
+ } catch (e) {
19
+ await configManager.load();
20
+ config = configManager.getConfig();
21
+ }
22
+
23
+ if (!config.llm.api_key) {
24
+ throw new AppError(
25
+ ErrorCode.ANALYSIS_EXCEPTION,
26
+ 'LLM API key not configured. Please set it in config file or via SKILL_ANY_CODE_LLM_API_KEY environment variable.',
27
+ );
28
+ }
29
+
30
+ this.client = new OpenAI({
31
+ baseURL: config.llm.base_url,
32
+ apiKey: config.llm.api_key,
33
+ });
34
+ }
35
+ return this.client;
36
+ }
37
+
38
+ async generateCompletion(prompt: string, systemPrompt: string = 'You are a code analysis expert. You help analyze code files, generate summaries, class diagrams, and other analysis results. Be concise and accurate.'): Promise<string> {
39
+ const client = await this.getClient();
40
+ const config = configManager.getConfig();
41
+
42
+ try {
43
+ logger.debug(`Calling LLM model ${config.llm.model} with prompt length: ${prompt.length}`);
44
+
45
+ const response = await client.chat.completions.create({
46
+ model: config.llm.model,
47
+ messages: [
48
+ { role: 'system', content: systemPrompt },
49
+ { role: 'user', content: prompt },
50
+ ],
51
+ temperature: 0.1,
52
+ max_tokens: 2048,
53
+ });
54
+
55
+ const result = response.choices[0]?.message?.content?.trim() || '';
56
+
57
+ if (!result) {
58
+ throw new AppError(ErrorCode.ANALYSIS_EXCEPTION, 'LLM returned empty response');
59
+ }
60
+
61
+ logger.debug(`LLM response received, length: ${result.length}`);
62
+ return result;
63
+ } catch (error: any) {
64
+ logger.error('LLM call failed:', error);
65
+ throw new AppError(ErrorCode.ANALYSIS_EXCEPTION, `LLM call failed: ${error.message}`);
66
+ }
67
+ }
68
+ }
69
+
70
+ export const llmService = new OpenAILLMService();
@@ -0,0 +1,53 @@
1
+ import * as fs from 'fs-extra'
2
+ import * as path from 'path'
3
+ import { ISkillGenerator, SkillGenerateOptions, SkillProvider } from '../../domain/interfaces'
4
+ import { getSkillMdContent } from './templates/skill.md.template'
5
+ import { getResolveScriptContent } from './templates/resolve.script'
6
+ import { logger } from '../../common/logger'
7
+
8
+ const PROVIDER_DIRECTORY_MAP: Record<SkillProvider, string> = {
9
+ opencode: '.agents/skills/skill-any-code',
10
+ cursor: '.agents/skills/skill-any-code',
11
+ codex: '.agents/skills/skill-any-code',
12
+ claude: '.claude/skills/skill-any-code',
13
+ }
14
+
15
+ export class SkillGenerator implements ISkillGenerator {
16
+ async generate(options: SkillGenerateOptions): Promise<string[]> {
17
+ const { projectRoot, providers } = options
18
+ const deployedPaths: string[] = []
19
+
20
+ const uniqueDirs = new Set<string>()
21
+ for (const p of providers) {
22
+ const lowerProvider = p.toLowerCase() as SkillProvider
23
+ const dir = PROVIDER_DIRECTORY_MAP[lowerProvider]
24
+ if (dir) {
25
+ uniqueDirs.add(dir)
26
+ } else {
27
+ logger.warn(`Unknown provider: ${p}. Skipped.`)
28
+ }
29
+ }
30
+
31
+ const skillMd = getSkillMdContent()
32
+ const resolveScript = getResolveScriptContent()
33
+
34
+ for (const relativeDir of uniqueDirs) {
35
+ const targetDir = path.join(projectRoot, relativeDir)
36
+ try {
37
+ await fs.ensureDir(targetDir)
38
+ await fs.ensureDir(path.join(targetDir, 'scripts'))
39
+
40
+ await fs.writeFile(path.join(targetDir, 'SKILL.md'), skillMd, 'utf-8')
41
+ await fs.writeFile(path.join(targetDir, 'scripts', 'get-summary.py'), resolveScript, 'utf-8')
42
+
43
+ deployedPaths.push(targetDir)
44
+ logger.debug(`Skill deployed to: ${targetDir}`)
45
+ } catch (error: unknown) {
46
+ const msg = error instanceof Error ? error.message : String(error)
47
+ logger.warn(`Failed to deploy skill (${targetDir}): ${msg}`)
48
+ }
49
+ }
50
+
51
+ return deployedPaths
52
+ }
53
+ }
@@ -0,0 +1,97 @@
1
+ /**
2
+ * scripts/get-summary.py 内容模板(部署到 Skill 目录中的独立脚本,仅使用 Python 标准库)
3
+ *
4
+ * 约定:
5
+ * - 输入:命令行参数 argv[1],应为项目内文件或目录「相对项目根目录」的相对路径
6
+ * - 行为:遵循主程序的结果 md 命名规则,推导目标对象对应的结果 md 路径;若结果 md 存在则输出其相对项目根路径
7
+ * - 输出:
8
+ * - 命中:stdout 输出对应 Markdown 结果文件的相对路径(相对项目根目录,单行)
9
+ * - 未命中:stdout 输出字符串 "N/A"(单行)
10
+ * - 参数错误:stderr 输出错误信息并以 exit code 1 退出
11
+ */
12
+ export function getResolveScriptContent(): string {
13
+ return `#!/usr/bin/env python3
14
+ from __future__ import annotations
15
+
16
+ import os
17
+ import sys
18
+ from pathlib import Path, PurePosixPath
19
+
20
+
21
+ DEFAULT_OUTPUT_DIR = ".skill-any-code-result"
22
+
23
+
24
+ def _to_posix_rel(s: str) -> str:
25
+ # 最大兼容:支持 \\、./、尾部 /
26
+ v = (s or "").strip().replace("\\\\", "/")
27
+ while v.startswith("./"):
28
+ v = v[2:]
29
+ # 保留根目录语义:"." / "" 视为项目根目录
30
+ if v in (".", ""):
31
+ return "."
32
+ # 裁剪尾部斜杠(目录也允许输入 xxx/)
33
+ if v.endswith("/") and len(v) > 1:
34
+ v = v[:-1]
35
+ return v
36
+
37
+
38
+ def _detect_project_root(script_path: Path) -> Path:
39
+ # 约定:<projectRoot>/.agents/skills/skill-any-code/scripts/get-summary.py
40
+ # parents: [scripts, skill-any-code, skills, .agents, projectRoot, ...]
41
+ return script_path.resolve().parents[4]
42
+
43
+
44
+ def _file_md_rel(target_rel: str) -> PurePosixPath:
45
+ p = PurePosixPath(target_rel)
46
+ dir_part = str(p.parent) if str(p.parent) not in (".", "") else ""
47
+ stem = p.stem
48
+ suffix = p.suffix # includes leading '.' or ''
49
+
50
+ if stem == "index" and suffix:
51
+ name = f"index{suffix}.md"
52
+ else:
53
+ name = f"{stem}.md"
54
+
55
+ if dir_part:
56
+ return PurePosixPath(DEFAULT_OUTPUT_DIR) / dir_part / name
57
+ return PurePosixPath(DEFAULT_OUTPUT_DIR) / name
58
+
59
+
60
+ def _dir_md_rel(target_rel: str) -> PurePosixPath:
61
+ if target_rel in (".", ""):
62
+ return PurePosixPath(DEFAULT_OUTPUT_DIR) / "index.md"
63
+ return PurePosixPath(DEFAULT_OUTPUT_DIR) / target_rel / "index.md"
64
+
65
+
66
+ def main() -> int:
67
+ if len(sys.argv) < 2 or not sys.argv[1]:
68
+ sys.stderr.write("Usage: python get-summary.py <relative-path>\\n")
69
+ return 1
70
+
71
+ raw = sys.argv[1]
72
+ raw_posix = raw.replace("\\\\", "/").strip()
73
+ rel = _to_posix_rel(raw)
74
+
75
+ project_root = _detect_project_root(Path(__file__))
76
+ target_abs = (project_root / PurePosixPath(rel)).resolve()
77
+
78
+ if not target_abs.exists():
79
+ sys.stdout.write("N/A\\n")
80
+ return 0
81
+
82
+ # 输入可能是目录(含尾 /)或真实目录
83
+ is_dir = target_abs.is_dir() or raw_posix.endswith("/")
84
+ md_rel = _dir_md_rel(rel) if is_dir else _file_md_rel(rel)
85
+ md_abs = (project_root / Path(os.fspath(md_rel))).resolve()
86
+
87
+ if md_abs.exists():
88
+ sys.stdout.write(str(md_rel).replace("\\\\", "/") + "\\n")
89
+ else:
90
+ sys.stdout.write("N/A\\n")
91
+ return 0
92
+
93
+
94
+ if __name__ == "__main__":
95
+ raise SystemExit(main())
96
+ `
97
+ }
@@ -0,0 +1,45 @@
1
+ export function getSkillMdContent(): string {
2
+ const lines = [
3
+ '---',
4
+ 'name: code-atlas-navigator',
5
+ 'description: Use this skill when you need to explore, understand, or search this codebase. Input a file or directory path relative to the project root to retrieve the path to its detailed natural language summary. Use this progressively to navigate from the root directory down to specific target files without reading the full raw source code.',
6
+ '---',
7
+ '',
8
+ '# Codebase Navigation Guide',
9
+ '',
10
+ 'This repository has been pre-analyzed and summarized into natural language Markdown files. To save context window and improve accuracy, **do not read the raw source code directly**. Instead, use this skill to progressively navigate the repository layer by layer.',
11
+ '',
12
+ '## 🧭 How to Explore the Codebase',
13
+ '',
14
+ "1. **Start High-Level**: If you don't know the exact file location, begin by querying the summary of the root directory (`.`).",
15
+ '2. **Progressive Disclosure**: Read the directory summary to understand its sub-components. Identify the next relevant sub-directory or file based on your current task.',
16
+ '3. **Drill Down**: Query the summaries of those specific sub-components. Repeat this until you locate the target function, class, or logic.',
17
+ '',
18
+ '## 🛠️ How to Locate a Summary File',
19
+ '',
20
+ 'Use the provided Python script to map the original codebase path to its corresponding Markdown summary path.',
21
+ '',
22
+ '**Script Specification:**',
23
+ '* **Input Parameter**: The relative path of the target file or directory with respect to the project root.',
24
+ '* **Output**: The relative path of the Markdown summary file with respect to the project root.',
25
+ '* **Fallback**: If the summary Markdown file cannot be found, the script will strictly output `N/A`.',
26
+ '',
27
+ '### Execution Commands',
28
+ '',
29
+ 'Since the execution environment may vary, please use the appropriate command based on your current operating system:',
30
+ '',
31
+ '**For Linux / macOS (Bash/Zsh):**',
32
+ '```bash',
33
+ 'python3 scripts/get_summary.py <relative/path/to/target>',
34
+ '```',
35
+ '(Note: If python3 is not found, fallback to python).',
36
+ '',
37
+ '**For Windows (CMD/PowerShell):**',
38
+ '```dos',
39
+ 'python scripts\\get_summary.py <relative\\path\\to\\target>',
40
+ '```',
41
+ '(Note: You may also use py or python3 depending on the Windows environment setup).',
42
+ ]
43
+
44
+ return lines.join('\n') + '\n'
45
+ }