skill-seekers 2.7.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. skill_seekers/__init__.py +22 -0
  2. skill_seekers/cli/__init__.py +39 -0
  3. skill_seekers/cli/adaptors/__init__.py +120 -0
  4. skill_seekers/cli/adaptors/base.py +221 -0
  5. skill_seekers/cli/adaptors/claude.py +485 -0
  6. skill_seekers/cli/adaptors/gemini.py +453 -0
  7. skill_seekers/cli/adaptors/markdown.py +269 -0
  8. skill_seekers/cli/adaptors/openai.py +503 -0
  9. skill_seekers/cli/ai_enhancer.py +310 -0
  10. skill_seekers/cli/api_reference_builder.py +373 -0
  11. skill_seekers/cli/architectural_pattern_detector.py +525 -0
  12. skill_seekers/cli/code_analyzer.py +1462 -0
  13. skill_seekers/cli/codebase_scraper.py +1225 -0
  14. skill_seekers/cli/config_command.py +563 -0
  15. skill_seekers/cli/config_enhancer.py +431 -0
  16. skill_seekers/cli/config_extractor.py +871 -0
  17. skill_seekers/cli/config_manager.py +452 -0
  18. skill_seekers/cli/config_validator.py +394 -0
  19. skill_seekers/cli/conflict_detector.py +528 -0
  20. skill_seekers/cli/constants.py +72 -0
  21. skill_seekers/cli/dependency_analyzer.py +757 -0
  22. skill_seekers/cli/doc_scraper.py +2332 -0
  23. skill_seekers/cli/enhance_skill.py +488 -0
  24. skill_seekers/cli/enhance_skill_local.py +1096 -0
  25. skill_seekers/cli/enhance_status.py +194 -0
  26. skill_seekers/cli/estimate_pages.py +433 -0
  27. skill_seekers/cli/generate_router.py +1209 -0
  28. skill_seekers/cli/github_fetcher.py +534 -0
  29. skill_seekers/cli/github_scraper.py +1466 -0
  30. skill_seekers/cli/guide_enhancer.py +723 -0
  31. skill_seekers/cli/how_to_guide_builder.py +1267 -0
  32. skill_seekers/cli/install_agent.py +461 -0
  33. skill_seekers/cli/install_skill.py +178 -0
  34. skill_seekers/cli/language_detector.py +614 -0
  35. skill_seekers/cli/llms_txt_detector.py +60 -0
  36. skill_seekers/cli/llms_txt_downloader.py +104 -0
  37. skill_seekers/cli/llms_txt_parser.py +150 -0
  38. skill_seekers/cli/main.py +558 -0
  39. skill_seekers/cli/markdown_cleaner.py +132 -0
  40. skill_seekers/cli/merge_sources.py +806 -0
  41. skill_seekers/cli/package_multi.py +77 -0
  42. skill_seekers/cli/package_skill.py +241 -0
  43. skill_seekers/cli/pattern_recognizer.py +1825 -0
  44. skill_seekers/cli/pdf_extractor_poc.py +1166 -0
  45. skill_seekers/cli/pdf_scraper.py +617 -0
  46. skill_seekers/cli/quality_checker.py +519 -0
  47. skill_seekers/cli/rate_limit_handler.py +438 -0
  48. skill_seekers/cli/resume_command.py +160 -0
  49. skill_seekers/cli/run_tests.py +230 -0
  50. skill_seekers/cli/setup_wizard.py +93 -0
  51. skill_seekers/cli/split_config.py +390 -0
  52. skill_seekers/cli/swift_patterns.py +560 -0
  53. skill_seekers/cli/test_example_extractor.py +1081 -0
  54. skill_seekers/cli/test_unified_simple.py +179 -0
  55. skill_seekers/cli/unified_codebase_analyzer.py +572 -0
  56. skill_seekers/cli/unified_scraper.py +932 -0
  57. skill_seekers/cli/unified_skill_builder.py +1605 -0
  58. skill_seekers/cli/upload_skill.py +162 -0
  59. skill_seekers/cli/utils.py +432 -0
  60. skill_seekers/mcp/__init__.py +33 -0
  61. skill_seekers/mcp/agent_detector.py +316 -0
  62. skill_seekers/mcp/git_repo.py +273 -0
  63. skill_seekers/mcp/server.py +231 -0
  64. skill_seekers/mcp/server_fastmcp.py +1249 -0
  65. skill_seekers/mcp/server_legacy.py +2302 -0
  66. skill_seekers/mcp/source_manager.py +285 -0
  67. skill_seekers/mcp/tools/__init__.py +115 -0
  68. skill_seekers/mcp/tools/config_tools.py +251 -0
  69. skill_seekers/mcp/tools/packaging_tools.py +826 -0
  70. skill_seekers/mcp/tools/scraping_tools.py +842 -0
  71. skill_seekers/mcp/tools/source_tools.py +828 -0
  72. skill_seekers/mcp/tools/splitting_tools.py +212 -0
  73. skill_seekers/py.typed +0 -0
  74. skill_seekers-2.7.3.dist-info/METADATA +2027 -0
  75. skill_seekers-2.7.3.dist-info/RECORD +79 -0
  76. skill_seekers-2.7.3.dist-info/WHEEL +5 -0
  77. skill_seekers-2.7.3.dist-info/entry_points.txt +19 -0
  78. skill_seekers-2.7.3.dist-info/licenses/LICENSE +21 -0
  79. skill_seekers-2.7.3.dist-info/top_level.txt +1 -0
@@ -0,0 +1,826 @@
1
+ """
2
+ Packaging tools for MCP server.
3
+
4
+ This module contains tools for packaging, uploading, and installing skills.
5
+ Extracted from server.py for better modularity.
6
+ """
7
+
8
+ import json
9
+ import os
10
+ import re
11
+ import subprocess
12
+ import sys
13
+ import time
14
+ from pathlib import Path
15
+
16
+ try:
17
+ from mcp.types import TextContent
18
+ except ImportError:
19
+ # Graceful degradation: Create a simple fallback class for testing
20
+ class TextContent:
21
+ """Fallback TextContent for when MCP is not installed"""
22
+
23
+ def __init__(self, type: str, text: str):
24
+ self.type = type
25
+ self.text = text
26
+
27
+
28
+ # Path to CLI tools
29
+ CLI_DIR = Path(__file__).parent.parent.parent / "cli"
30
+
31
+
32
+ def run_subprocess_with_streaming(cmd: list[str], timeout: int = None) -> tuple[str, str, int]:
33
+ """
34
+ Run subprocess with real-time output streaming.
35
+
36
+ This solves the blocking issue where long-running processes (like scraping)
37
+ would cause MCP to appear frozen. Now we stream output as it comes.
38
+
39
+ Args:
40
+ cmd: Command to run as list of strings
41
+ timeout: Maximum time to wait in seconds (None for no timeout)
42
+
43
+ Returns:
44
+ Tuple of (stdout, stderr, returncode)
45
+ """
46
+ try:
47
+ process = subprocess.Popen(
48
+ cmd,
49
+ stdout=subprocess.PIPE,
50
+ stderr=subprocess.PIPE,
51
+ text=True,
52
+ bufsize=1, # Line buffered
53
+ universal_newlines=True,
54
+ )
55
+
56
+ stdout_lines = []
57
+ stderr_lines = []
58
+ start_time = time.time()
59
+
60
+ # Read output line by line as it comes
61
+ while True:
62
+ # Check timeout
63
+ if timeout and (time.time() - start_time) > timeout:
64
+ process.kill()
65
+ stderr_lines.append(f"\n⚠️ Process killed after {timeout}s timeout")
66
+ break
67
+
68
+ # Check if process finished
69
+ if process.poll() is not None:
70
+ break
71
+
72
+ # Read available output (non-blocking)
73
+ try:
74
+ import select
75
+
76
+ readable, _, _ = select.select([process.stdout, process.stderr], [], [], 0.1)
77
+
78
+ if process.stdout in readable:
79
+ line = process.stdout.readline()
80
+ if line:
81
+ stdout_lines.append(line)
82
+
83
+ if process.stderr in readable:
84
+ line = process.stderr.readline()
85
+ if line:
86
+ stderr_lines.append(line)
87
+ except Exception:
88
+ # Fallback for Windows (no select)
89
+ time.sleep(0.1)
90
+
91
+ # Get any remaining output
92
+ remaining_stdout, remaining_stderr = process.communicate()
93
+ if remaining_stdout:
94
+ stdout_lines.append(remaining_stdout)
95
+ if remaining_stderr:
96
+ stderr_lines.append(remaining_stderr)
97
+
98
+ stdout = "".join(stdout_lines)
99
+ stderr = "".join(stderr_lines)
100
+ returncode = process.returncode
101
+
102
+ return stdout, stderr, returncode
103
+
104
+ except Exception as e:
105
+ return "", f"Error running subprocess: {str(e)}", 1
106
+
107
+
108
+ async def package_skill_tool(args: dict) -> list[TextContent]:
109
+ """
110
+ Package skill for target LLM platform and optionally auto-upload.
111
+
112
+ Args:
113
+ args: Dictionary with:
114
+ - skill_dir (str): Path to skill directory (e.g., output/react/)
115
+ - auto_upload (bool): Try to upload automatically if API key is available (default: True)
116
+ - target (str): Target platform (default: 'claude')
117
+ Options: 'claude', 'gemini', 'openai', 'markdown'
118
+
119
+ Returns:
120
+ List of TextContent with packaging results
121
+ """
122
+ from skill_seekers.cli.adaptors import get_adaptor
123
+
124
+ skill_dir = args["skill_dir"]
125
+ auto_upload = args.get("auto_upload", True)
126
+ target = args.get("target", "claude")
127
+
128
+ # Get platform adaptor
129
+ try:
130
+ adaptor = get_adaptor(target)
131
+ except ValueError as e:
132
+ return [
133
+ TextContent(
134
+ type="text",
135
+ text=f"❌ Invalid platform: {str(e)}\n\nSupported platforms: claude, gemini, openai, markdown",
136
+ )
137
+ ]
138
+
139
+ # Check if platform-specific API key exists - only upload if available
140
+ env_var_name = adaptor.get_env_var_name()
141
+ has_api_key = os.environ.get(env_var_name, "").strip() if env_var_name else False
142
+ should_upload = auto_upload and has_api_key
143
+
144
+ # Run package_skill.py with target parameter
145
+ cmd = [
146
+ sys.executable,
147
+ str(CLI_DIR / "package_skill.py"),
148
+ skill_dir,
149
+ "--no-open", # Don't open folder in MCP context
150
+ "--skip-quality-check", # Skip interactive quality checks in MCP context
151
+ "--target",
152
+ target, # Add target platform
153
+ ]
154
+
155
+ # Add upload flag only if we have API key
156
+ if should_upload:
157
+ cmd.append("--upload")
158
+
159
+ # Timeout: 5 minutes for packaging + upload
160
+ timeout = 300
161
+
162
+ progress_msg = f"📦 Packaging skill for {adaptor.PLATFORM_NAME}...\n"
163
+ if should_upload:
164
+ progress_msg += f"📤 Will auto-upload to {adaptor.PLATFORM_NAME} if successful\n"
165
+ progress_msg += f"⏱️ Maximum time: {timeout // 60} minutes\n\n"
166
+
167
+ stdout, stderr, returncode = run_subprocess_with_streaming(cmd, timeout=timeout)
168
+
169
+ output = progress_msg + stdout
170
+
171
+ if returncode == 0:
172
+ if should_upload:
173
+ # Upload succeeded
174
+ output += f"\n\n✅ Skill packaged and uploaded to {adaptor.PLATFORM_NAME}!"
175
+ if target == "claude":
176
+ output += "\n Your skill is now available in Claude!"
177
+ output += "\n Go to https://claude.ai/skills to use it"
178
+ elif target == "gemini":
179
+ output += "\n Your skill is now available in Gemini!"
180
+ output += "\n Go to https://aistudio.google.com/ to use it"
181
+ elif target == "openai":
182
+ output += "\n Your assistant is now available in OpenAI!"
183
+ output += "\n Go to https://platform.openai.com/assistants/ to use it"
184
+ elif auto_upload and not has_api_key:
185
+ # User wanted upload but no API key
186
+ output += f"\n\n📝 Skill packaged successfully for {adaptor.PLATFORM_NAME}!"
187
+ output += "\n"
188
+ output += "\n💡 To enable automatic upload:"
189
+ if target == "claude":
190
+ output += "\n 1. Get API key from https://console.anthropic.com/"
191
+ output += "\n 2. Set: export ANTHROPIC_API_KEY=sk-ant-..."
192
+ output += "\n\n📤 Manual upload:"
193
+ output += "\n 1. Find the .zip file in your output/ folder"
194
+ output += "\n 2. Go to https://claude.ai/skills"
195
+ output += "\n 3. Click 'Upload Skill' and select the .zip file"
196
+ elif target == "gemini":
197
+ output += "\n 1. Get API key from https://aistudio.google.com/"
198
+ output += "\n 2. Set: export GOOGLE_API_KEY=AIza..."
199
+ output += "\n\n📤 Manual upload:"
200
+ output += "\n 1. Go to https://aistudio.google.com/"
201
+ output += "\n 2. Upload the .tar.gz file from your output/ folder"
202
+ elif target == "openai":
203
+ output += "\n 1. Get API key from https://platform.openai.com/"
204
+ output += "\n 2. Set: export OPENAI_API_KEY=sk-proj-..."
205
+ output += "\n\n📤 Manual upload:"
206
+ output += "\n 1. Use OpenAI Assistants API"
207
+ output += "\n 2. Upload the .zip file from your output/ folder"
208
+ elif target == "markdown":
209
+ output += "\n (No API key needed - markdown is export only)"
210
+ output += "\n Package created for manual distribution"
211
+ else:
212
+ # auto_upload=False, just packaged
213
+ output += f"\n\n✅ Skill packaged successfully for {adaptor.PLATFORM_NAME}!"
214
+ if target == "claude":
215
+ output += "\n Upload manually to https://claude.ai/skills"
216
+ elif target == "gemini":
217
+ output += "\n Upload manually to https://aistudio.google.com/"
218
+ elif target == "openai":
219
+ output += "\n Upload manually via OpenAI Assistants API"
220
+ elif target == "markdown":
221
+ output += "\n Package ready for manual distribution"
222
+
223
+ return [TextContent(type="text", text=output)]
224
+ else:
225
+ return [TextContent(type="text", text=f"{output}\n\n❌ Error:\n{stderr}")]
226
+
227
+
228
+ async def upload_skill_tool(args: dict) -> list[TextContent]:
229
+ """
230
+ Upload skill package to target LLM platform.
231
+
232
+ Args:
233
+ args: Dictionary with:
234
+ - skill_zip (str): Path to skill package (.zip or .tar.gz)
235
+ - target (str): Target platform (default: 'claude')
236
+ Options: 'claude', 'gemini', 'openai'
237
+ Note: 'markdown' does not support upload
238
+ - api_key (str, optional): API key (uses env var if not provided)
239
+
240
+ Returns:
241
+ List of TextContent with upload results
242
+ """
243
+ from skill_seekers.cli.adaptors import get_adaptor
244
+
245
+ skill_zip = args["skill_zip"]
246
+ target = args.get("target", "claude")
247
+ api_key = args.get("api_key")
248
+
249
+ # Get platform adaptor
250
+ try:
251
+ adaptor = get_adaptor(target)
252
+ except ValueError as e:
253
+ return [
254
+ TextContent(
255
+ type="text",
256
+ text=f"❌ Invalid platform: {str(e)}\n\nSupported platforms: claude, gemini, openai",
257
+ )
258
+ ]
259
+
260
+ # Check if upload is supported
261
+ if target == "markdown":
262
+ return [
263
+ TextContent(
264
+ type="text",
265
+ text="❌ Markdown export does not support upload. Use the packaged file manually.",
266
+ )
267
+ ]
268
+
269
+ # Run upload_skill.py with target parameter
270
+ cmd = [sys.executable, str(CLI_DIR / "upload_skill.py"), skill_zip, "--target", target]
271
+
272
+ # Add API key if provided
273
+ if api_key:
274
+ cmd.extend(["--api-key", api_key])
275
+
276
+ # Timeout: 5 minutes for upload
277
+ timeout = 300
278
+
279
+ progress_msg = f"📤 Uploading skill to {adaptor.PLATFORM_NAME}...\n"
280
+ progress_msg += f"⏱️ Maximum time: {timeout // 60} minutes\n\n"
281
+
282
+ stdout, stderr, returncode = run_subprocess_with_streaming(cmd, timeout=timeout)
283
+
284
+ output = progress_msg + stdout
285
+
286
+ if returncode == 0:
287
+ return [TextContent(type="text", text=output)]
288
+ else:
289
+ return [TextContent(type="text", text=f"{output}\n\n❌ Error:\n{stderr}")]
290
+
291
+
292
+ async def enhance_skill_tool(args: dict) -> list[TextContent]:
293
+ """
294
+ Enhance SKILL.md with AI using target platform's model.
295
+
296
+ Args:
297
+ args: Dictionary with:
298
+ - skill_dir (str): Path to skill directory
299
+ - target (str): Target platform (default: 'claude')
300
+ Options: 'claude', 'gemini', 'openai'
301
+ Note: 'markdown' does not support enhancement
302
+ - mode (str): Enhancement mode (default: 'local')
303
+ 'local': Uses Claude Code Max (no API key)
304
+ 'api': Uses platform API (requires API key)
305
+ - api_key (str, optional): API key for 'api' mode
306
+
307
+ Returns:
308
+ List of TextContent with enhancement results
309
+ """
310
+ from skill_seekers.cli.adaptors import get_adaptor
311
+
312
+ skill_dir = Path(args.get("skill_dir"))
313
+ target = args.get("target", "claude")
314
+ mode = args.get("mode", "local")
315
+ api_key = args.get("api_key")
316
+
317
+ # Validate skill directory
318
+ if not skill_dir.exists():
319
+ return [TextContent(type="text", text=f"❌ Skill directory not found: {skill_dir}")]
320
+
321
+ if not (skill_dir / "SKILL.md").exists():
322
+ return [TextContent(type="text", text=f"❌ SKILL.md not found in {skill_dir}")]
323
+
324
+ # Get platform adaptor
325
+ try:
326
+ adaptor = get_adaptor(target)
327
+ except ValueError as e:
328
+ return [
329
+ TextContent(
330
+ type="text",
331
+ text=f"❌ Invalid platform: {str(e)}\n\nSupported platforms: claude, gemini, openai",
332
+ )
333
+ ]
334
+
335
+ # Check if enhancement is supported
336
+ if not adaptor.supports_enhancement():
337
+ return [
338
+ TextContent(
339
+ type="text", text=f"❌ {adaptor.PLATFORM_NAME} does not support AI enhancement"
340
+ )
341
+ ]
342
+
343
+ output_lines = []
344
+ output_lines.append(f"🚀 Enhancing skill with {adaptor.PLATFORM_NAME}")
345
+ output_lines.append("-" * 70)
346
+ output_lines.append(f"Skill directory: {skill_dir}")
347
+ output_lines.append(f"Mode: {mode}")
348
+ output_lines.append("")
349
+
350
+ if mode == "local":
351
+ # Use local enhancement (Claude Code)
352
+ output_lines.append("Using Claude Code Max (local, no API key required)")
353
+ output_lines.append("Running enhancement in headless mode...")
354
+ output_lines.append("")
355
+
356
+ cmd = [sys.executable, str(CLI_DIR / "enhance_skill_local.py"), str(skill_dir)]
357
+
358
+ try:
359
+ stdout, stderr, returncode = run_subprocess_with_streaming(cmd, timeout=900)
360
+
361
+ if returncode == 0:
362
+ output_lines.append(stdout)
363
+ output_lines.append("")
364
+ output_lines.append("✅ Enhancement complete!")
365
+ output_lines.append(f"Enhanced SKILL.md: {skill_dir / 'SKILL.md'}")
366
+ output_lines.append(f"Backup: {skill_dir / 'SKILL.md.backup'}")
367
+ else:
368
+ output_lines.append(f"❌ Enhancement failed (exit code {returncode})")
369
+ output_lines.append(stderr if stderr else stdout)
370
+
371
+ except Exception as e:
372
+ output_lines.append(f"❌ Error: {str(e)}")
373
+
374
+ elif mode == "api":
375
+ # Use API enhancement
376
+ output_lines.append(f"Using {adaptor.PLATFORM_NAME} API")
377
+
378
+ # Get API key
379
+ if not api_key:
380
+ env_var = adaptor.get_env_var_name()
381
+ api_key = os.environ.get(env_var)
382
+
383
+ if not api_key:
384
+ return [
385
+ TextContent(
386
+ type="text",
387
+ text=f"❌ {env_var} not set. Set API key or pass via api_key parameter.",
388
+ )
389
+ ]
390
+
391
+ # Validate API key
392
+ if not adaptor.validate_api_key(api_key):
393
+ return [
394
+ TextContent(
395
+ type="text", text=f"❌ Invalid API key format for {adaptor.PLATFORM_NAME}"
396
+ )
397
+ ]
398
+
399
+ output_lines.append("Calling API for enhancement...")
400
+ output_lines.append("")
401
+
402
+ try:
403
+ success = adaptor.enhance(skill_dir, api_key)
404
+
405
+ if success:
406
+ output_lines.append("✅ Enhancement complete!")
407
+ output_lines.append(f"Enhanced SKILL.md: {skill_dir / 'SKILL.md'}")
408
+ output_lines.append(f"Backup: {skill_dir / 'SKILL.md.backup'}")
409
+ else:
410
+ output_lines.append("❌ Enhancement failed")
411
+
412
+ except Exception as e:
413
+ output_lines.append(f"❌ Error: {str(e)}")
414
+
415
+ else:
416
+ return [TextContent(type="text", text=f"❌ Invalid mode: {mode}. Use 'local' or 'api'")]
417
+
418
+ return [TextContent(type="text", text="\n".join(output_lines))]
419
+
420
+
421
+ async def install_skill_tool(args: dict) -> list[TextContent]:
422
+ """
423
+ Complete skill installation workflow.
424
+
425
+ Orchestrates the complete workflow:
426
+ 1. Fetch config (if config_name provided)
427
+ 2. Scrape documentation
428
+ 3. AI Enhancement (MANDATORY - no skip option)
429
+ 4. Package for target platform (ZIP or tar.gz)
430
+ 5. Upload to target platform (optional)
431
+
432
+ Args:
433
+ args: Dictionary with:
434
+ - config_name (str, optional): Config to fetch from API (mutually exclusive with config_path)
435
+ - config_path (str, optional): Path to existing config (mutually exclusive with config_name)
436
+ - destination (str): Output directory (default: "output")
437
+ - auto_upload (bool): Upload after packaging (default: True)
438
+ - unlimited (bool): Remove page limits (default: False)
439
+ - dry_run (bool): Preview only (default: False)
440
+ - target (str): Target LLM platform (default: "claude")
441
+
442
+ Returns:
443
+ List of TextContent with workflow progress and results
444
+ """
445
+ # Import these here to avoid circular imports
446
+ from skill_seekers.cli.adaptors import get_adaptor
447
+
448
+ from .scraping_tools import scrape_docs_tool
449
+ from .source_tools import fetch_config_tool
450
+
451
+ # Extract and validate inputs
452
+ config_name = args.get("config_name")
453
+ config_path = args.get("config_path")
454
+ destination = args.get("destination", "output")
455
+ auto_upload = args.get("auto_upload", True)
456
+ unlimited = args.get("unlimited", False)
457
+ dry_run = args.get("dry_run", False)
458
+ target = args.get("target", "claude")
459
+
460
+ # Get platform adaptor
461
+ try:
462
+ adaptor = get_adaptor(target)
463
+ except ValueError as e:
464
+ return [
465
+ TextContent(
466
+ type="text",
467
+ text=f"❌ Error: {str(e)}\n\nSupported platforms: claude, gemini, openai, markdown",
468
+ )
469
+ ]
470
+
471
+ # Validation: Must provide exactly one of config_name or config_path
472
+ if not config_name and not config_path:
473
+ return [
474
+ TextContent(
475
+ type="text",
476
+ text="❌ Error: Must provide either config_name or config_path\n\nExamples:\n install_skill(config_name='react')\n install_skill(config_path='configs/custom.json')",
477
+ )
478
+ ]
479
+
480
+ if config_name and config_path:
481
+ return [
482
+ TextContent(
483
+ type="text",
484
+ text="❌ Error: Cannot provide both config_name and config_path\n\nChoose one:\n - config_name: Fetch from API (e.g., 'react')\n - config_path: Use existing file (e.g., 'configs/custom.json')",
485
+ )
486
+ ]
487
+
488
+ # Initialize output
489
+ output_lines = []
490
+ output_lines.append("🚀 SKILL INSTALLATION WORKFLOW")
491
+ output_lines.append("=" * 70)
492
+ output_lines.append("")
493
+
494
+ if dry_run:
495
+ output_lines.append("🔍 DRY RUN MODE - Preview only, no actions taken")
496
+ output_lines.append("")
497
+
498
+ # Track workflow state
499
+ workflow_state = {
500
+ "config_path": config_path,
501
+ "skill_name": None,
502
+ "skill_dir": None,
503
+ "zip_path": None,
504
+ "phases_completed": [],
505
+ }
506
+
507
+ try:
508
+ # ===== PHASE 1: Fetch Config (if needed) =====
509
+ if config_name:
510
+ output_lines.append("📥 PHASE 1/5: Fetch Config")
511
+ output_lines.append("-" * 70)
512
+ output_lines.append(f"Config: {config_name}")
513
+ output_lines.append(f"Destination: {destination}/")
514
+ output_lines.append("")
515
+
516
+ if not dry_run:
517
+ # Call fetch_config_tool directly
518
+ fetch_result = await fetch_config_tool(
519
+ {"config_name": config_name, "destination": destination}
520
+ )
521
+
522
+ # Parse result to extract config path
523
+ fetch_output = fetch_result[0].text
524
+ output_lines.append(fetch_output)
525
+ output_lines.append("")
526
+
527
+ # Extract config path from output
528
+ # Expected format: "📂 Saved to: configs/react.json"
529
+ match = re.search(r"(?i)saved to:\s*(.+\.json)", fetch_output)
530
+ if match:
531
+ workflow_state["config_path"] = match.group(1).strip()
532
+ output_lines.append(f"✅ Config fetched: {workflow_state['config_path']}")
533
+ else:
534
+ return [
535
+ TextContent(
536
+ type="text",
537
+ text="\n".join(output_lines) + "\n\n❌ Failed to fetch config",
538
+ )
539
+ ]
540
+
541
+ workflow_state["phases_completed"].append("fetch_config")
542
+ else:
543
+ output_lines.append(" [DRY RUN] Would fetch config from API")
544
+ workflow_state["config_path"] = f"{destination}/{config_name}.json"
545
+
546
+ output_lines.append("")
547
+
548
+ # ===== PHASE 2: Scrape Documentation =====
549
+ phase_num = "2/5" if config_name else "1/4"
550
+ output_lines.append(f"📄 PHASE {phase_num}: Scrape Documentation")
551
+ output_lines.append("-" * 70)
552
+ output_lines.append(f"Config: {workflow_state['config_path']}")
553
+ output_lines.append(f"Unlimited mode: {unlimited}")
554
+ output_lines.append("")
555
+
556
+ if not dry_run:
557
+ # Load config to get skill name
558
+ try:
559
+ with open(workflow_state["config_path"]) as f:
560
+ config = json.load(f)
561
+ workflow_state["skill_name"] = config.get("name", "unknown")
562
+ except Exception as e:
563
+ return [
564
+ TextContent(
565
+ type="text",
566
+ text="\n".join(output_lines) + f"\n\n❌ Failed to read config: {str(e)}",
567
+ )
568
+ ]
569
+
570
+ # Call scrape_docs_tool (does NOT include enhancement)
571
+ output_lines.append("Scraping documentation (this may take 20-45 minutes)...")
572
+ output_lines.append("")
573
+
574
+ scrape_result = await scrape_docs_tool(
575
+ {
576
+ "config_path": workflow_state["config_path"],
577
+ "unlimited": unlimited,
578
+ "enhance_local": False, # Enhancement is separate phase
579
+ "skip_scrape": False,
580
+ "dry_run": False,
581
+ }
582
+ )
583
+
584
+ scrape_output = scrape_result[0].text
585
+ output_lines.append(scrape_output)
586
+ output_lines.append("")
587
+
588
+ # Check for success
589
+ if "❌" in scrape_output:
590
+ return [
591
+ TextContent(
592
+ type="text",
593
+ text="\n".join(output_lines) + "\n\n❌ Scraping failed - see error above",
594
+ )
595
+ ]
596
+
597
+ workflow_state["skill_dir"] = f"{destination}/{workflow_state['skill_name']}"
598
+ workflow_state["phases_completed"].append("scrape_docs")
599
+ else:
600
+ output_lines.append(" [DRY RUN] Would scrape documentation")
601
+ workflow_state["skill_name"] = "example"
602
+ workflow_state["skill_dir"] = f"{destination}/example"
603
+
604
+ output_lines.append("")
605
+
606
+ # ===== PHASE 3: AI Enhancement (MANDATORY) =====
607
+ phase_num = "3/5" if config_name else "2/4"
608
+ output_lines.append(f"✨ PHASE {phase_num}: AI Enhancement (MANDATORY)")
609
+ output_lines.append("-" * 70)
610
+ output_lines.append("⚠️ Enhancement is REQUIRED for quality (3/10→9/10 boost)")
611
+ output_lines.append(f"Skill directory: {workflow_state['skill_dir']}")
612
+ output_lines.append("Mode: Headless (runs in background)")
613
+ output_lines.append("Estimated time: 30-60 seconds")
614
+ output_lines.append("")
615
+
616
+ if not dry_run:
617
+ # Run enhance_skill_local in headless mode
618
+ # Build command directly
619
+ cmd = [
620
+ sys.executable,
621
+ str(CLI_DIR / "enhance_skill_local.py"),
622
+ workflow_state["skill_dir"],
623
+ # Headless is default, no flag needed
624
+ ]
625
+
626
+ timeout = 900 # 15 minutes max for enhancement
627
+
628
+ output_lines.append("Running AI enhancement...")
629
+
630
+ stdout, stderr, returncode = run_subprocess_with_streaming(cmd, timeout=timeout)
631
+
632
+ if returncode != 0:
633
+ output_lines.append(f"\n❌ Enhancement failed (exit code {returncode}):")
634
+ output_lines.append(stderr if stderr else stdout)
635
+ return [TextContent(type="text", text="\n".join(output_lines))]
636
+
637
+ output_lines.append(stdout)
638
+ workflow_state["phases_completed"].append("enhance_skill")
639
+ else:
640
+ output_lines.append(" [DRY RUN] Would enhance SKILL.md with Claude Code")
641
+
642
+ output_lines.append("")
643
+
644
+ # ===== PHASE 4: Package Skill =====
645
+ phase_num = "4/5" if config_name else "3/4"
646
+ output_lines.append(f"📦 PHASE {phase_num}: Package Skill for {adaptor.PLATFORM_NAME}")
647
+ output_lines.append("-" * 70)
648
+ output_lines.append(f"Skill directory: {workflow_state['skill_dir']}")
649
+ output_lines.append(f"Target platform: {adaptor.PLATFORM_NAME}")
650
+ output_lines.append("")
651
+
652
+ if not dry_run:
653
+ # Call package_skill_tool with target
654
+ package_result = await package_skill_tool(
655
+ {
656
+ "skill_dir": workflow_state["skill_dir"],
657
+ "auto_upload": False, # We handle upload in next phase
658
+ "target": target,
659
+ }
660
+ )
661
+
662
+ package_output = package_result[0].text
663
+ output_lines.append(package_output)
664
+ output_lines.append("")
665
+
666
+ # Extract package path from output (supports .zip and .tar.gz)
667
+ # Expected format: "Saved to: output/react.zip" or "Saved to: output/react-gemini.tar.gz"
668
+ match = re.search(r"(?i)saved to:\s*(.+\.(?:zip|tar\.gz))", package_output)
669
+ if match:
670
+ workflow_state["zip_path"] = match.group(1).strip()
671
+ else:
672
+ # Fallback: construct package path based on platform
673
+ if target == "gemini":
674
+ workflow_state["zip_path"] = (
675
+ f"{destination}/{workflow_state['skill_name']}-gemini.tar.gz"
676
+ )
677
+ elif target == "openai":
678
+ workflow_state["zip_path"] = (
679
+ f"{destination}/{workflow_state['skill_name']}-openai.zip"
680
+ )
681
+ else:
682
+ workflow_state["zip_path"] = f"{destination}/{workflow_state['skill_name']}.zip"
683
+
684
+ workflow_state["phases_completed"].append("package_skill")
685
+ else:
686
+ # Dry run - show expected package format
687
+ if target == "gemini":
688
+ pkg_ext = "tar.gz"
689
+ pkg_file = f"{destination}/{workflow_state['skill_name']}-gemini.tar.gz"
690
+ elif target == "openai":
691
+ pkg_ext = "zip"
692
+ pkg_file = f"{destination}/{workflow_state['skill_name']}-openai.zip"
693
+ else:
694
+ pkg_ext = "zip"
695
+ pkg_file = f"{destination}/{workflow_state['skill_name']}.zip"
696
+
697
+ output_lines.append(
698
+ f" [DRY RUN] Would package to {pkg_ext} file for {adaptor.PLATFORM_NAME}"
699
+ )
700
+ workflow_state["zip_path"] = pkg_file
701
+
702
+ output_lines.append("")
703
+
704
+ # ===== PHASE 5: Upload (Optional) =====
705
+ if auto_upload:
706
+ phase_num = "5/5" if config_name else "4/4"
707
+ output_lines.append(f"📤 PHASE {phase_num}: Upload to {adaptor.PLATFORM_NAME}")
708
+ output_lines.append("-" * 70)
709
+ output_lines.append(f"Package file: {workflow_state['zip_path']}")
710
+ output_lines.append("")
711
+
712
+ # Check for platform-specific API key
713
+ env_var_name = adaptor.get_env_var_name()
714
+ has_api_key = os.environ.get(env_var_name, "").strip()
715
+
716
+ if not dry_run:
717
+ if has_api_key:
718
+ # Upload not supported for markdown platform
719
+ if target == "markdown":
720
+ output_lines.append("⚠️ Markdown export does not support upload")
721
+ output_lines.append(" Package has been created - use manually")
722
+ else:
723
+ # Call upload_skill_tool with target
724
+ upload_result = await upload_skill_tool(
725
+ {"skill_zip": workflow_state["zip_path"], "target": target}
726
+ )
727
+
728
+ upload_output = upload_result[0].text
729
+ output_lines.append(upload_output)
730
+
731
+ workflow_state["phases_completed"].append("upload_skill")
732
+ else:
733
+ # Platform-specific instructions for missing API key
734
+ output_lines.append(f"⚠️ {env_var_name} not set - skipping upload")
735
+ output_lines.append("")
736
+ output_lines.append("To enable automatic upload:")
737
+
738
+ if target == "claude":
739
+ output_lines.append(" 1. Get API key from https://console.anthropic.com/")
740
+ output_lines.append(" 2. Set: export ANTHROPIC_API_KEY=sk-ant-...")
741
+ output_lines.append("")
742
+ output_lines.append("📤 Manual upload:")
743
+ output_lines.append(" 1. Go to https://claude.ai/skills")
744
+ output_lines.append(" 2. Click 'Upload Skill'")
745
+ output_lines.append(f" 3. Select: {workflow_state['zip_path']}")
746
+ elif target == "gemini":
747
+ output_lines.append(" 1. Get API key from https://aistudio.google.com/")
748
+ output_lines.append(" 2. Set: export GOOGLE_API_KEY=AIza...")
749
+ output_lines.append("")
750
+ output_lines.append("📤 Manual upload:")
751
+ output_lines.append(" 1. Go to https://aistudio.google.com/")
752
+ output_lines.append(f" 2. Upload package: {workflow_state['zip_path']}")
753
+ elif target == "openai":
754
+ output_lines.append(" 1. Get API key from https://platform.openai.com/")
755
+ output_lines.append(" 2. Set: export OPENAI_API_KEY=sk-proj-...")
756
+ output_lines.append("")
757
+ output_lines.append("📤 Manual upload:")
758
+ output_lines.append(" 1. Use OpenAI Assistants API")
759
+ output_lines.append(f" 2. Upload package: {workflow_state['zip_path']}")
760
+ elif target == "markdown":
761
+ output_lines.append(" (No API key needed - markdown is export only)")
762
+ output_lines.append(f" Package created: {workflow_state['zip_path']}")
763
+ else:
764
+ output_lines.append(
765
+ f" [DRY RUN] Would upload to {adaptor.PLATFORM_NAME} (if API key set)"
766
+ )
767
+
768
+ output_lines.append("")
769
+
770
+ # ===== WORKFLOW SUMMARY =====
771
+ output_lines.append("=" * 70)
772
+ output_lines.append("✅ WORKFLOW COMPLETE")
773
+ output_lines.append("=" * 70)
774
+ output_lines.append("")
775
+
776
+ if not dry_run:
777
+ output_lines.append("Phases completed:")
778
+ for phase in workflow_state["phases_completed"]:
779
+ output_lines.append(f" ✓ {phase}")
780
+ output_lines.append("")
781
+
782
+ output_lines.append("📁 Output:")
783
+ output_lines.append(f" Skill directory: {workflow_state['skill_dir']}")
784
+ if workflow_state["zip_path"]:
785
+ output_lines.append(f" Skill package: {workflow_state['zip_path']}")
786
+ output_lines.append("")
787
+
788
+ if auto_upload and has_api_key and target != "markdown":
789
+ # Platform-specific success message
790
+ if target == "claude":
791
+ output_lines.append("🎉 Your skill is now available in Claude!")
792
+ output_lines.append(" Go to https://claude.ai/skills to use it")
793
+ elif target == "gemini":
794
+ output_lines.append("🎉 Your skill is now available in Gemini!")
795
+ output_lines.append(" Go to https://aistudio.google.com/ to use it")
796
+ elif target == "openai":
797
+ output_lines.append("🎉 Your assistant is now available in OpenAI!")
798
+ output_lines.append(
799
+ " Go to https://platform.openai.com/assistants/ to use it"
800
+ )
801
+ elif auto_upload:
802
+ output_lines.append("📝 Manual upload required (see instructions above)")
803
+ else:
804
+ output_lines.append("📤 To upload:")
805
+ output_lines.append(
806
+ f" skill-seekers upload {workflow_state['zip_path']} --target {target}"
807
+ )
808
+ else:
809
+ output_lines.append("This was a dry run. No actions were taken.")
810
+ output_lines.append("")
811
+ output_lines.append("To execute for real, remove the --dry-run flag:")
812
+ if config_name:
813
+ output_lines.append(f" install_skill(config_name='{config_name}')")
814
+ else:
815
+ output_lines.append(f" install_skill(config_path='{config_path}')")
816
+
817
+ return [TextContent(type="text", text="\n".join(output_lines))]
818
+
819
+ except Exception as e:
820
+ output_lines.append("")
821
+ output_lines.append(f"❌ Workflow failed: {str(e)}")
822
+ output_lines.append("")
823
+ output_lines.append("Phases completed before failure:")
824
+ for phase in workflow_state["phases_completed"]:
825
+ output_lines.append(f" ✓ {phase}")
826
+ return [TextContent(type="text", text="\n".join(output_lines))]