skill-seekers 2.7.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- skill_seekers/__init__.py +22 -0
- skill_seekers/cli/__init__.py +39 -0
- skill_seekers/cli/adaptors/__init__.py +120 -0
- skill_seekers/cli/adaptors/base.py +221 -0
- skill_seekers/cli/adaptors/claude.py +485 -0
- skill_seekers/cli/adaptors/gemini.py +453 -0
- skill_seekers/cli/adaptors/markdown.py +269 -0
- skill_seekers/cli/adaptors/openai.py +503 -0
- skill_seekers/cli/ai_enhancer.py +310 -0
- skill_seekers/cli/api_reference_builder.py +373 -0
- skill_seekers/cli/architectural_pattern_detector.py +525 -0
- skill_seekers/cli/code_analyzer.py +1462 -0
- skill_seekers/cli/codebase_scraper.py +1225 -0
- skill_seekers/cli/config_command.py +563 -0
- skill_seekers/cli/config_enhancer.py +431 -0
- skill_seekers/cli/config_extractor.py +871 -0
- skill_seekers/cli/config_manager.py +452 -0
- skill_seekers/cli/config_validator.py +394 -0
- skill_seekers/cli/conflict_detector.py +528 -0
- skill_seekers/cli/constants.py +72 -0
- skill_seekers/cli/dependency_analyzer.py +757 -0
- skill_seekers/cli/doc_scraper.py +2332 -0
- skill_seekers/cli/enhance_skill.py +488 -0
- skill_seekers/cli/enhance_skill_local.py +1096 -0
- skill_seekers/cli/enhance_status.py +194 -0
- skill_seekers/cli/estimate_pages.py +433 -0
- skill_seekers/cli/generate_router.py +1209 -0
- skill_seekers/cli/github_fetcher.py +534 -0
- skill_seekers/cli/github_scraper.py +1466 -0
- skill_seekers/cli/guide_enhancer.py +723 -0
- skill_seekers/cli/how_to_guide_builder.py +1267 -0
- skill_seekers/cli/install_agent.py +461 -0
- skill_seekers/cli/install_skill.py +178 -0
- skill_seekers/cli/language_detector.py +614 -0
- skill_seekers/cli/llms_txt_detector.py +60 -0
- skill_seekers/cli/llms_txt_downloader.py +104 -0
- skill_seekers/cli/llms_txt_parser.py +150 -0
- skill_seekers/cli/main.py +558 -0
- skill_seekers/cli/markdown_cleaner.py +132 -0
- skill_seekers/cli/merge_sources.py +806 -0
- skill_seekers/cli/package_multi.py +77 -0
- skill_seekers/cli/package_skill.py +241 -0
- skill_seekers/cli/pattern_recognizer.py +1825 -0
- skill_seekers/cli/pdf_extractor_poc.py +1166 -0
- skill_seekers/cli/pdf_scraper.py +617 -0
- skill_seekers/cli/quality_checker.py +519 -0
- skill_seekers/cli/rate_limit_handler.py +438 -0
- skill_seekers/cli/resume_command.py +160 -0
- skill_seekers/cli/run_tests.py +230 -0
- skill_seekers/cli/setup_wizard.py +93 -0
- skill_seekers/cli/split_config.py +390 -0
- skill_seekers/cli/swift_patterns.py +560 -0
- skill_seekers/cli/test_example_extractor.py +1081 -0
- skill_seekers/cli/test_unified_simple.py +179 -0
- skill_seekers/cli/unified_codebase_analyzer.py +572 -0
- skill_seekers/cli/unified_scraper.py +932 -0
- skill_seekers/cli/unified_skill_builder.py +1605 -0
- skill_seekers/cli/upload_skill.py +162 -0
- skill_seekers/cli/utils.py +432 -0
- skill_seekers/mcp/__init__.py +33 -0
- skill_seekers/mcp/agent_detector.py +316 -0
- skill_seekers/mcp/git_repo.py +273 -0
- skill_seekers/mcp/server.py +231 -0
- skill_seekers/mcp/server_fastmcp.py +1249 -0
- skill_seekers/mcp/server_legacy.py +2302 -0
- skill_seekers/mcp/source_manager.py +285 -0
- skill_seekers/mcp/tools/__init__.py +115 -0
- skill_seekers/mcp/tools/config_tools.py +251 -0
- skill_seekers/mcp/tools/packaging_tools.py +826 -0
- skill_seekers/mcp/tools/scraping_tools.py +842 -0
- skill_seekers/mcp/tools/source_tools.py +828 -0
- skill_seekers/mcp/tools/splitting_tools.py +212 -0
- skill_seekers/py.typed +0 -0
- skill_seekers-2.7.3.dist-info/METADATA +2027 -0
- skill_seekers-2.7.3.dist-info/RECORD +79 -0
- skill_seekers-2.7.3.dist-info/WHEEL +5 -0
- skill_seekers-2.7.3.dist-info/entry_points.txt +19 -0
- skill_seekers-2.7.3.dist-info/licenses/LICENSE +21 -0
- skill_seekers-2.7.3.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,310 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
AI Enhancement Module for Pattern Detection and Test Examples
|
|
4
|
+
|
|
5
|
+
Enhances C3.1 (Pattern Detection) and C3.2 (Test Example Extraction) with AI analysis.
|
|
6
|
+
|
|
7
|
+
Features:
|
|
8
|
+
- Explains why patterns were detected
|
|
9
|
+
- Suggests improvements and identifies issues
|
|
10
|
+
- Recommends related patterns
|
|
11
|
+
- Adds context to test examples
|
|
12
|
+
- Groups related examples into tutorials
|
|
13
|
+
- Identifies best practices
|
|
14
|
+
|
|
15
|
+
Credits:
|
|
16
|
+
- Uses Claude AI (Anthropic) for analysis
|
|
17
|
+
- Graceful degradation if API unavailable
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
import logging
|
|
21
|
+
import os
|
|
22
|
+
from dataclasses import dataclass
|
|
23
|
+
|
|
24
|
+
logger = logging.getLogger(__name__)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@dataclass
|
|
28
|
+
class AIAnalysis:
|
|
29
|
+
"""AI analysis result for patterns or examples"""
|
|
30
|
+
|
|
31
|
+
explanation: str
|
|
32
|
+
issues: list[str]
|
|
33
|
+
recommendations: list[str]
|
|
34
|
+
related_items: list[str] # Related patterns or examples
|
|
35
|
+
best_practices: list[str]
|
|
36
|
+
confidence_boost: float # -0.2 to +0.2 adjustment to confidence
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class AIEnhancer:
|
|
40
|
+
"""Base class for AI enhancement"""
|
|
41
|
+
|
|
42
|
+
def __init__(self, api_key: str | None = None, enabled: bool = True, mode: str = "auto"):
|
|
43
|
+
"""
|
|
44
|
+
Initialize AI enhancer.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
api_key: Anthropic API key (uses ANTHROPIC_API_KEY env if None)
|
|
48
|
+
enabled: Enable AI enhancement (default: True)
|
|
49
|
+
mode: Enhancement mode - "auto" (default), "api", or "local"
|
|
50
|
+
- "auto": Use API if key available, otherwise disable
|
|
51
|
+
- "api": Force API mode (fails if no key)
|
|
52
|
+
- "local": Use Claude Code local mode (opens terminal)
|
|
53
|
+
"""
|
|
54
|
+
self.enabled = enabled
|
|
55
|
+
self.mode = mode
|
|
56
|
+
self.api_key = api_key or os.environ.get("ANTHROPIC_API_KEY")
|
|
57
|
+
self.client = None
|
|
58
|
+
|
|
59
|
+
# Determine actual mode
|
|
60
|
+
if mode == "auto":
|
|
61
|
+
if self.api_key:
|
|
62
|
+
self.mode = "api"
|
|
63
|
+
else:
|
|
64
|
+
# For now, disable if no API key
|
|
65
|
+
# LOCAL mode for batch processing is complex
|
|
66
|
+
self.mode = "disabled"
|
|
67
|
+
self.enabled = False
|
|
68
|
+
logger.info("ℹ️ AI enhancement disabled (no API key found)")
|
|
69
|
+
logger.info(
|
|
70
|
+
" Set ANTHROPIC_API_KEY to enable, or use 'skill-seekers enhance' for SKILL.md"
|
|
71
|
+
)
|
|
72
|
+
return
|
|
73
|
+
|
|
74
|
+
if self.mode == "api" and self.enabled:
|
|
75
|
+
try:
|
|
76
|
+
import anthropic
|
|
77
|
+
|
|
78
|
+
self.client = anthropic.Anthropic(api_key=self.api_key)
|
|
79
|
+
logger.info("✅ AI enhancement enabled (using Claude API)")
|
|
80
|
+
except ImportError:
|
|
81
|
+
logger.warning("⚠️ anthropic package not installed. AI enhancement disabled.")
|
|
82
|
+
logger.warning(" Install with: pip install anthropic")
|
|
83
|
+
self.enabled = False
|
|
84
|
+
except Exception as e:
|
|
85
|
+
logger.warning(f"⚠️ Failed to initialize AI client: {e}")
|
|
86
|
+
self.enabled = False
|
|
87
|
+
elif self.mode == "local":
|
|
88
|
+
# LOCAL mode requires Claude Code to be available
|
|
89
|
+
# For patterns/examples, this is less practical than API mode
|
|
90
|
+
logger.info("ℹ️ LOCAL mode not yet supported for pattern/example enhancement")
|
|
91
|
+
logger.info(
|
|
92
|
+
" Use API mode (set ANTHROPIC_API_KEY) or 'skill-seekers enhance' for SKILL.md"
|
|
93
|
+
)
|
|
94
|
+
self.enabled = False
|
|
95
|
+
|
|
96
|
+
def _call_claude(self, prompt: str, max_tokens: int = 1000) -> str | None:
|
|
97
|
+
"""Call Claude API with error handling"""
|
|
98
|
+
if not self.client:
|
|
99
|
+
return None
|
|
100
|
+
|
|
101
|
+
try:
|
|
102
|
+
response = self.client.messages.create(
|
|
103
|
+
model="claude-sonnet-4-20250514",
|
|
104
|
+
max_tokens=max_tokens,
|
|
105
|
+
messages=[{"role": "user", "content": prompt}],
|
|
106
|
+
)
|
|
107
|
+
return response.content[0].text
|
|
108
|
+
except Exception as e:
|
|
109
|
+
logger.warning(f"⚠️ AI API call failed: {e}")
|
|
110
|
+
return None
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
class PatternEnhancer(AIEnhancer):
|
|
114
|
+
"""Enhance design pattern detection with AI analysis"""
|
|
115
|
+
|
|
116
|
+
def enhance_patterns(self, patterns: list[dict]) -> list[dict]:
|
|
117
|
+
"""
|
|
118
|
+
Enhance detected patterns with AI analysis.
|
|
119
|
+
|
|
120
|
+
Args:
|
|
121
|
+
patterns: List of detected pattern instances
|
|
122
|
+
|
|
123
|
+
Returns:
|
|
124
|
+
Enhanced patterns with AI analysis
|
|
125
|
+
"""
|
|
126
|
+
if not self.enabled or not patterns:
|
|
127
|
+
return patterns
|
|
128
|
+
|
|
129
|
+
logger.info(f"🤖 Enhancing {len(patterns)} detected patterns with AI...")
|
|
130
|
+
|
|
131
|
+
# Batch patterns to minimize API calls (max 5 per batch)
|
|
132
|
+
batch_size = 5
|
|
133
|
+
enhanced = []
|
|
134
|
+
|
|
135
|
+
for i in range(0, len(patterns), batch_size):
|
|
136
|
+
batch = patterns[i : i + batch_size]
|
|
137
|
+
batch_results = self._enhance_pattern_batch(batch)
|
|
138
|
+
enhanced.extend(batch_results)
|
|
139
|
+
|
|
140
|
+
logger.info(f"✅ Enhanced {len(enhanced)} patterns")
|
|
141
|
+
return enhanced
|
|
142
|
+
|
|
143
|
+
def _enhance_pattern_batch(self, patterns: list[dict]) -> list[dict]:
|
|
144
|
+
"""Enhance a batch of patterns"""
|
|
145
|
+
# Prepare prompt
|
|
146
|
+
pattern_descriptions = []
|
|
147
|
+
for idx, p in enumerate(patterns):
|
|
148
|
+
desc = f"{idx + 1}. {p['pattern_type']} in {p.get('class_name', 'unknown')}"
|
|
149
|
+
desc += f"\n Evidence: {', '.join(p.get('evidence', []))}"
|
|
150
|
+
pattern_descriptions.append(desc)
|
|
151
|
+
|
|
152
|
+
prompt = f"""Analyze these detected design patterns and provide insights:
|
|
153
|
+
|
|
154
|
+
{chr(10).join(pattern_descriptions)}
|
|
155
|
+
|
|
156
|
+
For EACH pattern, provide (in JSON format):
|
|
157
|
+
1. "explanation": Brief why this pattern was detected (1-2 sentences)
|
|
158
|
+
2. "issues": List of potential issues or anti-patterns (if any)
|
|
159
|
+
3. "recommendations": Suggestions for improvement (if any)
|
|
160
|
+
4. "related_patterns": Other patterns that might be relevant
|
|
161
|
+
5. "confidence_boost": Confidence adjustment from -0.2 to +0.2 based on evidence quality
|
|
162
|
+
|
|
163
|
+
Format as JSON array matching input order. Be concise and actionable.
|
|
164
|
+
"""
|
|
165
|
+
|
|
166
|
+
response = self._call_claude(prompt, max_tokens=2000)
|
|
167
|
+
|
|
168
|
+
if not response:
|
|
169
|
+
# Return patterns unchanged if API fails
|
|
170
|
+
return patterns
|
|
171
|
+
|
|
172
|
+
try:
|
|
173
|
+
import json
|
|
174
|
+
|
|
175
|
+
analyses = json.loads(response)
|
|
176
|
+
|
|
177
|
+
# Merge AI analysis into patterns
|
|
178
|
+
for idx, pattern in enumerate(patterns):
|
|
179
|
+
if idx < len(analyses):
|
|
180
|
+
analysis = analyses[idx]
|
|
181
|
+
pattern["ai_analysis"] = {
|
|
182
|
+
"explanation": analysis.get("explanation", ""),
|
|
183
|
+
"issues": analysis.get("issues", []),
|
|
184
|
+
"recommendations": analysis.get("recommendations", []),
|
|
185
|
+
"related_patterns": analysis.get("related_patterns", []),
|
|
186
|
+
"confidence_boost": analysis.get("confidence_boost", 0.0),
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
# Adjust confidence
|
|
190
|
+
boost = analysis.get("confidence_boost", 0.0)
|
|
191
|
+
if -0.2 <= boost <= 0.2:
|
|
192
|
+
pattern["confidence"] = min(1.0, max(0.0, pattern["confidence"] + boost))
|
|
193
|
+
|
|
194
|
+
return patterns
|
|
195
|
+
|
|
196
|
+
except json.JSONDecodeError:
|
|
197
|
+
logger.warning("⚠️ Failed to parse AI response, returning patterns unchanged")
|
|
198
|
+
return patterns
|
|
199
|
+
except Exception as e:
|
|
200
|
+
logger.warning(f"⚠️ Error processing AI analysis: {e}")
|
|
201
|
+
return patterns
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
class TestExampleEnhancer(AIEnhancer):
|
|
205
|
+
"""Enhance test examples with AI analysis"""
|
|
206
|
+
|
|
207
|
+
def enhance_examples(self, examples: list[dict]) -> list[dict]:
|
|
208
|
+
"""
|
|
209
|
+
Enhance test examples with AI context and explanations.
|
|
210
|
+
|
|
211
|
+
Args:
|
|
212
|
+
examples: List of extracted test examples
|
|
213
|
+
|
|
214
|
+
Returns:
|
|
215
|
+
Enhanced examples with AI analysis
|
|
216
|
+
"""
|
|
217
|
+
if not self.enabled or not examples:
|
|
218
|
+
return examples
|
|
219
|
+
|
|
220
|
+
logger.info(f"🤖 Enhancing {len(examples)} test examples with AI...")
|
|
221
|
+
|
|
222
|
+
# Batch examples to minimize API calls
|
|
223
|
+
batch_size = 5
|
|
224
|
+
enhanced = []
|
|
225
|
+
|
|
226
|
+
for i in range(0, len(examples), batch_size):
|
|
227
|
+
batch = examples[i : i + batch_size]
|
|
228
|
+
batch_results = self._enhance_example_batch(batch)
|
|
229
|
+
enhanced.extend(batch_results)
|
|
230
|
+
|
|
231
|
+
logger.info(f"✅ Enhanced {len(enhanced)} examples")
|
|
232
|
+
return enhanced
|
|
233
|
+
|
|
234
|
+
def _enhance_example_batch(self, examples: list[dict]) -> list[dict]:
|
|
235
|
+
"""Enhance a batch of examples"""
|
|
236
|
+
# Prepare prompt
|
|
237
|
+
example_descriptions = []
|
|
238
|
+
for idx, ex in enumerate(examples):
|
|
239
|
+
desc = f"{idx + 1}. {ex.get('category', 'unknown')} - {ex.get('test_name', 'unknown')}"
|
|
240
|
+
desc += f"\n Code: {ex.get('code', '')[:100]}..."
|
|
241
|
+
if ex.get("expected_behavior"):
|
|
242
|
+
desc += f"\n Expected: {ex['expected_behavior']}"
|
|
243
|
+
example_descriptions.append(desc)
|
|
244
|
+
|
|
245
|
+
prompt = f"""Analyze these test examples and provide educational context:
|
|
246
|
+
|
|
247
|
+
{chr(10).join(example_descriptions)}
|
|
248
|
+
|
|
249
|
+
For EACH example, provide (in JSON format):
|
|
250
|
+
1. "explanation": What this example demonstrates (1-2 sentences, beginner-friendly)
|
|
251
|
+
2. "best_practices": List of best practices shown in this example
|
|
252
|
+
3. "common_mistakes": Common mistakes this example helps avoid
|
|
253
|
+
4. "related_examples": Related test scenarios or patterns
|
|
254
|
+
5. "tutorial_group": Suggested tutorial category (e.g., "User Authentication", "Database Operations")
|
|
255
|
+
|
|
256
|
+
Format as JSON array matching input order. Focus on educational value.
|
|
257
|
+
"""
|
|
258
|
+
|
|
259
|
+
response = self._call_claude(prompt, max_tokens=2000)
|
|
260
|
+
|
|
261
|
+
if not response:
|
|
262
|
+
return examples
|
|
263
|
+
|
|
264
|
+
try:
|
|
265
|
+
import json
|
|
266
|
+
|
|
267
|
+
analyses = json.loads(response)
|
|
268
|
+
|
|
269
|
+
# Merge AI analysis into examples
|
|
270
|
+
for idx, example in enumerate(examples):
|
|
271
|
+
if idx < len(analyses):
|
|
272
|
+
analysis = analyses[idx]
|
|
273
|
+
example["ai_analysis"] = {
|
|
274
|
+
"explanation": analysis.get("explanation", ""),
|
|
275
|
+
"best_practices": analysis.get("best_practices", []),
|
|
276
|
+
"common_mistakes": analysis.get("common_mistakes", []),
|
|
277
|
+
"related_examples": analysis.get("related_examples", []),
|
|
278
|
+
"tutorial_group": analysis.get("tutorial_group", ""),
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
return examples
|
|
282
|
+
|
|
283
|
+
except json.JSONDecodeError:
|
|
284
|
+
logger.warning("⚠️ Failed to parse AI response, returning examples unchanged")
|
|
285
|
+
return examples
|
|
286
|
+
except Exception as e:
|
|
287
|
+
logger.warning(f"⚠️ Error processing AI analysis: {e}")
|
|
288
|
+
return examples
|
|
289
|
+
|
|
290
|
+
def generate_tutorials(self, examples: list[dict]) -> dict[str, list[dict]]:
|
|
291
|
+
"""
|
|
292
|
+
Group enhanced examples into tutorial sections.
|
|
293
|
+
|
|
294
|
+
Args:
|
|
295
|
+
examples: Enhanced examples with AI analysis
|
|
296
|
+
|
|
297
|
+
Returns:
|
|
298
|
+
Dictionary mapping tutorial groups to examples
|
|
299
|
+
"""
|
|
300
|
+
tutorials = {}
|
|
301
|
+
|
|
302
|
+
for example in examples:
|
|
303
|
+
ai_analysis = example.get("ai_analysis", {})
|
|
304
|
+
group = ai_analysis.get("tutorial_group", "Miscellaneous")
|
|
305
|
+
|
|
306
|
+
if group not in tutorials:
|
|
307
|
+
tutorials[group] = []
|
|
308
|
+
tutorials[group].append(example)
|
|
309
|
+
|
|
310
|
+
return tutorials
|
|
@@ -0,0 +1,373 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
API Reference Builder
|
|
4
|
+
|
|
5
|
+
Generates markdown API documentation from code analysis results.
|
|
6
|
+
Supports Python, JavaScript/TypeScript, and C++.
|
|
7
|
+
|
|
8
|
+
Output Format:
|
|
9
|
+
- One .md file per analyzed source file
|
|
10
|
+
- Organized by: Classes → Methods, then standalone Functions
|
|
11
|
+
- Includes: Signatures, parameters, return types, docstrings
|
|
12
|
+
|
|
13
|
+
Usage:
|
|
14
|
+
from skill_seekers.cli.api_reference_builder import APIReferenceBuilder
|
|
15
|
+
|
|
16
|
+
builder = APIReferenceBuilder(code_analysis_results)
|
|
17
|
+
builder.build_reference(output_dir)
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
import json
|
|
21
|
+
from pathlib import Path
|
|
22
|
+
from typing import Any
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class APIReferenceBuilder:
|
|
26
|
+
"""
|
|
27
|
+
Builds markdown API reference from code analysis results.
|
|
28
|
+
|
|
29
|
+
Processes code analysis data and generates well-formatted markdown
|
|
30
|
+
documentation for each analyzed source file.
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
def __init__(self, code_analysis: dict[str, Any]):
|
|
34
|
+
"""
|
|
35
|
+
Initialize builder with code analysis results.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
code_analysis: Dictionary containing analyzed files and their code structures.
|
|
39
|
+
Expected format: {'files': [{'file': 'path', 'classes': [...], 'functions': [...]}]}
|
|
40
|
+
"""
|
|
41
|
+
self.code_analysis = code_analysis
|
|
42
|
+
self.files_data = code_analysis.get("files", [])
|
|
43
|
+
|
|
44
|
+
def build_reference(self, output_dir: Path) -> dict[str, Path]:
|
|
45
|
+
"""
|
|
46
|
+
Generate markdown files for each analyzed source file.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
output_dir: Directory to save generated markdown files
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
Dictionary mapping source file paths to generated markdown file paths
|
|
53
|
+
"""
|
|
54
|
+
output_dir = Path(output_dir)
|
|
55
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
56
|
+
|
|
57
|
+
generated_files = {}
|
|
58
|
+
|
|
59
|
+
for file_data in self.files_data:
|
|
60
|
+
source_file = file_data.get("file", "unknown")
|
|
61
|
+
language = file_data.get("language", "Unknown")
|
|
62
|
+
|
|
63
|
+
# Skip files with no analysis
|
|
64
|
+
if not file_data.get("classes") and not file_data.get("functions"):
|
|
65
|
+
continue
|
|
66
|
+
|
|
67
|
+
# Generate markdown content
|
|
68
|
+
markdown_content = self._generate_file_reference(file_data, source_file, language)
|
|
69
|
+
|
|
70
|
+
# Determine output filename
|
|
71
|
+
output_filename = self._get_output_filename(source_file)
|
|
72
|
+
output_path = output_dir / output_filename
|
|
73
|
+
|
|
74
|
+
# Write markdown file
|
|
75
|
+
output_path.write_text(markdown_content, encoding="utf-8")
|
|
76
|
+
generated_files[source_file] = output_path
|
|
77
|
+
|
|
78
|
+
return generated_files
|
|
79
|
+
|
|
80
|
+
def _get_output_filename(self, source_file: str) -> str:
|
|
81
|
+
"""
|
|
82
|
+
Generate output filename from source file path.
|
|
83
|
+
|
|
84
|
+
Args:
|
|
85
|
+
source_file: Path to source file
|
|
86
|
+
|
|
87
|
+
Returns:
|
|
88
|
+
Safe filename for markdown output
|
|
89
|
+
"""
|
|
90
|
+
# Get base filename
|
|
91
|
+
basename = Path(source_file).name
|
|
92
|
+
|
|
93
|
+
# Replace extension with .md
|
|
94
|
+
name_without_ext = basename.rsplit(".", 1)[0] if "." in basename else basename
|
|
95
|
+
return f"{name_without_ext}.md"
|
|
96
|
+
|
|
97
|
+
def _generate_file_reference(
|
|
98
|
+
self, file_data: dict[str, Any], source_file: str, language: str
|
|
99
|
+
) -> str:
|
|
100
|
+
"""
|
|
101
|
+
Generate complete markdown reference for a single file.
|
|
102
|
+
|
|
103
|
+
Args:
|
|
104
|
+
file_data: Analysis data for the file
|
|
105
|
+
source_file: Path to source file
|
|
106
|
+
language: Programming language
|
|
107
|
+
|
|
108
|
+
Returns:
|
|
109
|
+
Complete markdown content
|
|
110
|
+
"""
|
|
111
|
+
lines = []
|
|
112
|
+
|
|
113
|
+
# Header
|
|
114
|
+
filename = Path(source_file).name
|
|
115
|
+
lines.append(f"# API Reference: {filename}\n")
|
|
116
|
+
lines.append(f"**Language**: {language}\n")
|
|
117
|
+
lines.append(f"**Source**: `{source_file}`\n")
|
|
118
|
+
lines.append("---\n")
|
|
119
|
+
|
|
120
|
+
# Classes section
|
|
121
|
+
classes = file_data.get("classes", [])
|
|
122
|
+
if classes:
|
|
123
|
+
lines.append("## Classes\n")
|
|
124
|
+
for cls in classes:
|
|
125
|
+
lines.append(self._format_class(cls))
|
|
126
|
+
lines.append("\n")
|
|
127
|
+
|
|
128
|
+
# Functions section
|
|
129
|
+
functions = file_data.get("functions", [])
|
|
130
|
+
if functions:
|
|
131
|
+
lines.append("## Functions\n")
|
|
132
|
+
for func in functions:
|
|
133
|
+
lines.append(self._format_function(func))
|
|
134
|
+
lines.append("\n")
|
|
135
|
+
|
|
136
|
+
return "\n".join(lines)
|
|
137
|
+
|
|
138
|
+
def _format_class(self, class_sig: dict[str, Any]) -> str:
|
|
139
|
+
"""
|
|
140
|
+
Format class signature as markdown.
|
|
141
|
+
|
|
142
|
+
Args:
|
|
143
|
+
class_sig: Class signature dictionary
|
|
144
|
+
|
|
145
|
+
Returns:
|
|
146
|
+
Formatted markdown for class
|
|
147
|
+
"""
|
|
148
|
+
lines = []
|
|
149
|
+
|
|
150
|
+
# Class name
|
|
151
|
+
class_name = class_sig.get("name", "Unknown")
|
|
152
|
+
lines.append(f"### {class_name}\n")
|
|
153
|
+
|
|
154
|
+
# Docstring
|
|
155
|
+
docstring = class_sig.get("docstring")
|
|
156
|
+
if docstring:
|
|
157
|
+
lines.append(f"{docstring}\n")
|
|
158
|
+
|
|
159
|
+
# Inheritance
|
|
160
|
+
base_classes = class_sig.get("base_classes", [])
|
|
161
|
+
if base_classes:
|
|
162
|
+
bases_str = ", ".join(base_classes)
|
|
163
|
+
lines.append(f"**Inherits from**: {bases_str}\n")
|
|
164
|
+
else:
|
|
165
|
+
lines.append("**Inherits from**: (none)\n")
|
|
166
|
+
|
|
167
|
+
# Methods
|
|
168
|
+
methods = class_sig.get("methods", [])
|
|
169
|
+
if methods:
|
|
170
|
+
lines.append("#### Methods\n")
|
|
171
|
+
for method in methods:
|
|
172
|
+
lines.append(self._format_method(method))
|
|
173
|
+
lines.append("")
|
|
174
|
+
|
|
175
|
+
return "\n".join(lines)
|
|
176
|
+
|
|
177
|
+
def _format_method(self, method_sig: dict[str, Any]) -> str:
|
|
178
|
+
"""
|
|
179
|
+
Format method signature as markdown.
|
|
180
|
+
|
|
181
|
+
Args:
|
|
182
|
+
method_sig: Method signature dictionary
|
|
183
|
+
|
|
184
|
+
Returns:
|
|
185
|
+
Formatted markdown for method
|
|
186
|
+
"""
|
|
187
|
+
lines = []
|
|
188
|
+
|
|
189
|
+
# Method signature
|
|
190
|
+
signature = self._build_signature(method_sig)
|
|
191
|
+
lines.append(f"##### {signature}\n")
|
|
192
|
+
|
|
193
|
+
# Docstring
|
|
194
|
+
docstring = method_sig.get("docstring")
|
|
195
|
+
if docstring:
|
|
196
|
+
lines.append(f"{docstring}\n")
|
|
197
|
+
|
|
198
|
+
# Decorators
|
|
199
|
+
decorators = method_sig.get("decorators", [])
|
|
200
|
+
if decorators:
|
|
201
|
+
dec_str = ", ".join(f"`@{d}`" for d in decorators)
|
|
202
|
+
lines.append(f"**Decorators**: {dec_str}\n")
|
|
203
|
+
|
|
204
|
+
# Parameters table
|
|
205
|
+
params = method_sig.get("parameters", [])
|
|
206
|
+
if params:
|
|
207
|
+
lines.append(self._format_parameters(params))
|
|
208
|
+
lines.append("")
|
|
209
|
+
|
|
210
|
+
# Return type
|
|
211
|
+
return_type = method_sig.get("return_type")
|
|
212
|
+
if return_type:
|
|
213
|
+
lines.append(f"**Returns**: `{return_type}`\n")
|
|
214
|
+
|
|
215
|
+
return "\n".join(lines)
|
|
216
|
+
|
|
217
|
+
def _format_function(self, func_sig: dict[str, Any]) -> str:
|
|
218
|
+
"""
|
|
219
|
+
Format function signature as markdown.
|
|
220
|
+
|
|
221
|
+
Args:
|
|
222
|
+
func_sig: Function signature dictionary
|
|
223
|
+
|
|
224
|
+
Returns:
|
|
225
|
+
Formatted markdown for function
|
|
226
|
+
"""
|
|
227
|
+
lines = []
|
|
228
|
+
|
|
229
|
+
# Function signature
|
|
230
|
+
signature = self._build_signature(func_sig)
|
|
231
|
+
lines.append(f"### {signature}\n")
|
|
232
|
+
|
|
233
|
+
# Async indicator
|
|
234
|
+
if func_sig.get("is_async"):
|
|
235
|
+
lines.append("**Async function**\n")
|
|
236
|
+
|
|
237
|
+
# Docstring
|
|
238
|
+
docstring = func_sig.get("docstring")
|
|
239
|
+
if docstring:
|
|
240
|
+
lines.append(f"{docstring}\n")
|
|
241
|
+
|
|
242
|
+
# Parameters table
|
|
243
|
+
params = func_sig.get("parameters", [])
|
|
244
|
+
if params:
|
|
245
|
+
lines.append(self._format_parameters(params))
|
|
246
|
+
lines.append("")
|
|
247
|
+
|
|
248
|
+
# Return type
|
|
249
|
+
return_type = func_sig.get("return_type")
|
|
250
|
+
if return_type:
|
|
251
|
+
lines.append(f"**Returns**: `{return_type}`\n")
|
|
252
|
+
else:
|
|
253
|
+
lines.append("**Returns**: (none)\n")
|
|
254
|
+
|
|
255
|
+
return "\n".join(lines)
|
|
256
|
+
|
|
257
|
+
def _build_signature(self, sig: dict[str, Any]) -> str:
|
|
258
|
+
"""
|
|
259
|
+
Build function/method signature string.
|
|
260
|
+
|
|
261
|
+
Args:
|
|
262
|
+
sig: Signature dictionary
|
|
263
|
+
|
|
264
|
+
Returns:
|
|
265
|
+
Formatted signature string
|
|
266
|
+
"""
|
|
267
|
+
name = sig.get("name", "unknown")
|
|
268
|
+
params = sig.get("parameters", [])
|
|
269
|
+
return_type = sig.get("return_type")
|
|
270
|
+
|
|
271
|
+
# Build parameter list
|
|
272
|
+
param_strs = []
|
|
273
|
+
for param in params:
|
|
274
|
+
param_str = param.get("name", "")
|
|
275
|
+
|
|
276
|
+
# Add type hint if available
|
|
277
|
+
type_hint = param.get("type_hint")
|
|
278
|
+
if type_hint:
|
|
279
|
+
param_str += f": {type_hint}"
|
|
280
|
+
|
|
281
|
+
# Add default value if available
|
|
282
|
+
default = param.get("default")
|
|
283
|
+
if default:
|
|
284
|
+
param_str += f" = {default}"
|
|
285
|
+
|
|
286
|
+
param_strs.append(param_str)
|
|
287
|
+
|
|
288
|
+
params_str = ", ".join(param_strs)
|
|
289
|
+
|
|
290
|
+
# Build full signature
|
|
291
|
+
if return_type:
|
|
292
|
+
return f"{name}({params_str}) → {return_type}"
|
|
293
|
+
else:
|
|
294
|
+
return f"{name}({params_str})"
|
|
295
|
+
|
|
296
|
+
def _format_parameters(self, params: list[dict]) -> str:
|
|
297
|
+
"""
|
|
298
|
+
Format parameter list as markdown table.
|
|
299
|
+
|
|
300
|
+
Args:
|
|
301
|
+
params: List of parameter dictionaries
|
|
302
|
+
|
|
303
|
+
Returns:
|
|
304
|
+
Formatted markdown table
|
|
305
|
+
"""
|
|
306
|
+
if not params:
|
|
307
|
+
return ""
|
|
308
|
+
|
|
309
|
+
lines = []
|
|
310
|
+
lines.append("**Parameters**:")
|
|
311
|
+
lines.append("")
|
|
312
|
+
lines.append("| Name | Type | Default | Description |")
|
|
313
|
+
lines.append("|------|------|---------|-------------|")
|
|
314
|
+
|
|
315
|
+
for param in params:
|
|
316
|
+
name = param.get("name", "-")
|
|
317
|
+
type_hint = param.get("type_hint", "-")
|
|
318
|
+
default = param.get("default")
|
|
319
|
+
|
|
320
|
+
# Show "-" for parameters without defaults
|
|
321
|
+
default_str = default if default is not None else "-"
|
|
322
|
+
|
|
323
|
+
# For description, use empty for now (would need JSDoc/docstring parsing)
|
|
324
|
+
description = "-"
|
|
325
|
+
|
|
326
|
+
lines.append(f"| {name} | {type_hint} | {default_str} | {description} |")
|
|
327
|
+
|
|
328
|
+
return "\n".join(lines)
|
|
329
|
+
|
|
330
|
+
|
|
331
|
+
def main():
|
|
332
|
+
"""
|
|
333
|
+
Command-line interface for API reference generation.
|
|
334
|
+
|
|
335
|
+
Reads code analysis JSON and generates markdown API documentation.
|
|
336
|
+
"""
|
|
337
|
+
import argparse
|
|
338
|
+
|
|
339
|
+
parser = argparse.ArgumentParser(
|
|
340
|
+
description="Generate API reference from code analysis results"
|
|
341
|
+
)
|
|
342
|
+
|
|
343
|
+
parser.add_argument("input_file", help="Code analysis JSON file")
|
|
344
|
+
parser.add_argument("output_dir", help="Output directory for markdown files")
|
|
345
|
+
|
|
346
|
+
args = parser.parse_args()
|
|
347
|
+
|
|
348
|
+
# Read code analysis
|
|
349
|
+
input_path = Path(args.input_file)
|
|
350
|
+
if not input_path.exists():
|
|
351
|
+
print(f"Error: Input file not found: {input_path}")
|
|
352
|
+
return 1
|
|
353
|
+
|
|
354
|
+
with open(input_path, encoding="utf-8") as f:
|
|
355
|
+
code_analysis = json.load(f)
|
|
356
|
+
|
|
357
|
+
# Build API reference
|
|
358
|
+
builder = APIReferenceBuilder(code_analysis)
|
|
359
|
+
generated_files = builder.build_reference(Path(args.output_dir))
|
|
360
|
+
|
|
361
|
+
# Report results
|
|
362
|
+
print(f"✅ Generated {len(generated_files)} API reference files")
|
|
363
|
+
print(f"📁 Output directory: {args.output_dir}")
|
|
364
|
+
for source, output in generated_files.items():
|
|
365
|
+
print(f" • {output.name} (from {Path(source).name})")
|
|
366
|
+
|
|
367
|
+
return 0
|
|
368
|
+
|
|
369
|
+
|
|
370
|
+
if __name__ == "__main__":
|
|
371
|
+
import sys
|
|
372
|
+
|
|
373
|
+
sys.exit(main())
|