iflow-mcp_anton-prosterity-documentation-search-enhanced 1.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. documentation_search_enhanced/__init__.py +14 -0
  2. documentation_search_enhanced/__main__.py +6 -0
  3. documentation_search_enhanced/config.json +1674 -0
  4. documentation_search_enhanced/config_manager.py +233 -0
  5. documentation_search_enhanced/config_validator.py +79 -0
  6. documentation_search_enhanced/content_enhancer.py +578 -0
  7. documentation_search_enhanced/docker_manager.py +87 -0
  8. documentation_search_enhanced/logger.py +179 -0
  9. documentation_search_enhanced/main.py +2170 -0
  10. documentation_search_enhanced/project_generator.py +260 -0
  11. documentation_search_enhanced/project_scanner.py +85 -0
  12. documentation_search_enhanced/reranker.py +230 -0
  13. documentation_search_enhanced/site_index_builder.py +274 -0
  14. documentation_search_enhanced/site_index_downloader.py +222 -0
  15. documentation_search_enhanced/site_search.py +1325 -0
  16. documentation_search_enhanced/smart_search.py +473 -0
  17. documentation_search_enhanced/snyk_integration.py +657 -0
  18. documentation_search_enhanced/vector_search.py +303 -0
  19. documentation_search_enhanced/version_resolver.py +189 -0
  20. documentation_search_enhanced/vulnerability_scanner.py +545 -0
  21. documentation_search_enhanced/web_scraper.py +117 -0
  22. iflow_mcp_anton_prosterity_documentation_search_enhanced-1.9.0.dist-info/METADATA +195 -0
  23. iflow_mcp_anton_prosterity_documentation_search_enhanced-1.9.0.dist-info/RECORD +26 -0
  24. iflow_mcp_anton_prosterity_documentation_search_enhanced-1.9.0.dist-info/WHEEL +4 -0
  25. iflow_mcp_anton_prosterity_documentation_search_enhanced-1.9.0.dist-info/entry_points.txt +2 -0
  26. iflow_mcp_anton_prosterity_documentation_search_enhanced-1.9.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,578 @@
1
+ """
2
+ Content enhancement features for documentation-search-enhanced MCP server.
3
+ Adds smart parsing, code extraction, version awareness, and contextual recommendations.
4
+ """
5
+
6
+ import re
7
+ import os
8
+ import sys
9
+ from typing import Dict, List, Any, Optional
10
+ from dataclasses import dataclass
11
+ import httpx
12
+ from datetime import datetime, timedelta
13
+
14
+
15
+ @dataclass
16
+ class CodeSnippet:
17
+ """Represents an extracted code snippet"""
18
+
19
+ language: str
20
+ code: str
21
+ description: str
22
+ line_number: Optional[int] = None
23
+ is_complete: bool = False
24
+ imports: List[str] = None
25
+
26
+ def __post_init__(self):
27
+ if self.imports is None:
28
+ self.imports = []
29
+
30
+
31
+ @dataclass
32
+ class DocumentationSection:
33
+ """Represents a section of documentation"""
34
+
35
+ title: str
36
+ content: str
37
+ code_snippets: List[CodeSnippet]
38
+ cross_references: List[str]
39
+ section_type: str # "tutorial", "reference", "example", "guide"
40
+ difficulty_level: str # "beginner", "intermediate", "advanced"
41
+
42
+
43
+ SUMMARIZER_ENDPOINT = os.getenv("SUMMARY_API_URL")
44
+ SUMMARIZER_KEY = os.getenv("SUMMARY_API_KEY")
45
+
46
+
47
+ class ContentEnhancer:
48
+ """Enhances documentation content with smart parsing and features"""
49
+
50
+ def __init__(self):
51
+ self.version_cache = {}
52
+ self.cross_ref_cache = {}
53
+
54
+ async def enhance_content(
55
+ self, content: str, library: str, query: str
56
+ ) -> Dict[str, Any]:
57
+ """Main content enhancement pipeline"""
58
+ enhanced = {
59
+ "original_content": content,
60
+ "library": library,
61
+ "query": query,
62
+ "enhanced_at": datetime.utcnow().isoformat(),
63
+ "enhancements": {},
64
+ }
65
+
66
+ # Extract and enhance code snippets
67
+ code_snippets = self.extract_code_snippets(content)
68
+ enhanced["enhancements"]["code_snippets"] = [
69
+ {
70
+ "language": snippet.language,
71
+ "code": snippet.code,
72
+ "description": snippet.description,
73
+ "is_complete": snippet.is_complete,
74
+ "imports": snippet.imports,
75
+ }
76
+ for snippet in code_snippets
77
+ ]
78
+
79
+ # Parse into structured sections
80
+ sections = self.parse_sections(content)
81
+ enhanced["enhancements"]["sections"] = [
82
+ {
83
+ "title": section.title,
84
+ "content": (
85
+ section.content[:500] + "..."
86
+ if len(section.content) > 500
87
+ else section.content
88
+ ),
89
+ "section_type": section.section_type,
90
+ "difficulty_level": section.difficulty_level,
91
+ "code_count": len(section.code_snippets),
92
+ }
93
+ for section in sections
94
+ ]
95
+
96
+ # Add contextual recommendations
97
+ enhanced["enhancements"][
98
+ "recommendations"
99
+ ] = await self.get_contextual_recommendations(library, query)
100
+
101
+ # Extract and resolve cross-references
102
+ enhanced["enhancements"]["cross_references"] = self.extract_cross_references(
103
+ content, library
104
+ )
105
+
106
+ # Add version information
107
+ enhanced["enhancements"]["version_info"] = await self.get_version_info(library)
108
+
109
+ # Generate quick summary
110
+ enhanced["enhancements"]["summary"] = self.generate_summary(content, query)
111
+
112
+ return enhanced
113
+
114
+ def extract_code_snippets(self, content: str) -> List[CodeSnippet]:
115
+ """Extract and analyze code snippets from content"""
116
+ snippets = []
117
+
118
+ # Patterns for different code block formats
119
+ patterns = [
120
+ r"```(\w+)?\n(.*?)```", # Markdown code blocks
121
+ r"<code[^>]*>(.*?)</code>", # HTML code tags
122
+ r"<pre[^>]*><code[^>]*>(.*?)</code></pre>", # HTML pre+code
123
+ r".. code-block:: (\w+)\n\n(.*?)(?=\n\S|\Z)", # reStructuredText
124
+ ]
125
+
126
+ for pattern in patterns:
127
+ matches = re.finditer(pattern, content, re.DOTALL | re.IGNORECASE)
128
+ for match in matches:
129
+ if len(match.groups()) == 2:
130
+ language = match.group(1) or "text"
131
+ code = match.group(2).strip()
132
+ else:
133
+ language = "text"
134
+ code = match.group(1).strip()
135
+
136
+ if len(code) > 10: # Filter out very short snippets
137
+ snippet = self.analyze_code_snippet(code, language)
138
+ snippets.append(snippet)
139
+
140
+ return snippets
141
+
142
+ def analyze_code_snippet(self, code: str, language: str) -> CodeSnippet:
143
+ """Analyze a code snippet for completeness and imports"""
144
+ description = self.generate_code_description(code, language)
145
+ imports = self.extract_imports(code, language)
146
+ is_complete = self.is_code_complete(code, language)
147
+
148
+ return CodeSnippet(
149
+ language=language.lower(),
150
+ code=code,
151
+ description=description,
152
+ is_complete=is_complete,
153
+ imports=imports,
154
+ )
155
+
156
+ def generate_code_description(self, code: str, language: str) -> str:
157
+ """Generate a description for a code snippet"""
158
+ # Common patterns and descriptions
159
+ patterns = {
160
+ # Python patterns
161
+ r"def\s+(\w+)": "Function definition: {}",
162
+ r"class\s+(\w+)": "Class definition: {}",
163
+ r"import\s+(\w+)": "Import: {}",
164
+ r"from\s+(\w+)\s+import": "Import from: {}",
165
+ r"@\w+": "Decorator usage",
166
+ r"async\s+def": "Async function definition",
167
+ r'if\s+__name__\s*==\s*["\']__main__["\']': "Main execution block",
168
+ # JavaScript patterns
169
+ r"function\s+(\w+)": "Function: {}",
170
+ r"const\s+(\w+)": "Constant: {}",
171
+ r"let\s+(\w+)": "Variable: {}",
172
+ r"export\s+": "Export statement",
173
+ r"import\s+.*from": "Import statement",
174
+ # FastAPI/web patterns
175
+ r"@app\.(get|post|put|delete)": "API endpoint definition",
176
+ r"FastAPI\(\)": "FastAPI application initialization",
177
+ r"app\s*=\s*FastAPI": "FastAPI app creation",
178
+ }
179
+
180
+ descriptions = []
181
+ for pattern, desc in patterns.items():
182
+ matches = re.finditer(pattern, code, re.IGNORECASE)
183
+ for match in matches:
184
+ if "{}" in desc and len(match.groups()) > 0:
185
+ descriptions.append(desc.format(match.group(1)))
186
+ else:
187
+ descriptions.append(desc)
188
+
189
+ if descriptions:
190
+ return "; ".join(descriptions[:3]) # Limit to first 3 descriptions
191
+ else:
192
+ return f"{language.title()} code snippet"
193
+
194
+ def extract_imports(self, code: str, language: str) -> List[str]:
195
+ """Extract import statements from code"""
196
+ imports = []
197
+
198
+ if language.lower() in ["python", "py"]:
199
+ # Python imports
200
+ import_patterns = [r"import\s+([^\s,\n]+)", r"from\s+([^\s,\n]+)\s+import"]
201
+ for pattern in import_patterns:
202
+ matches = re.finditer(pattern, code, re.MULTILINE)
203
+ imports.extend([match.group(1) for match in matches])
204
+
205
+ elif language.lower() in ["javascript", "js", "typescript", "ts"]:
206
+ # JavaScript/TypeScript imports
207
+ import_patterns = [
208
+ r'import\s+.*from\s+["\']([^"\']+)["\']',
209
+ r'require\(["\']([^"\']+)["\']\)',
210
+ ]
211
+ for pattern in import_patterns:
212
+ matches = re.finditer(pattern, code)
213
+ imports.extend([match.group(1) for match in matches])
214
+
215
+ return list(set(imports)) # Remove duplicates
216
+
217
+ def is_code_complete(self, code: str, language: str) -> bool:
218
+ """Determine if a code snippet is complete/runnable"""
219
+ code = code.strip()
220
+
221
+ # Check for common completeness indicators
222
+ completeness_indicators = {
223
+ "python": [
224
+ r'if\s+__name__\s*==\s*["\']__main__["\']', # Main block
225
+ r"def\s+\w+.*:\s*\n.*return", # Function with return
226
+ r"class\s+\w+.*:\s*\n.*def\s+__init__", # Class with constructor
227
+ ],
228
+ "javascript": [
229
+ r"function\s+\w+.*{.*}", # Complete function
230
+ r".*\.exports\s*=", # Module export
231
+ r"export\s+default", # ES6 export
232
+ ],
233
+ }
234
+
235
+ lang_key = language.lower()
236
+ if lang_key in completeness_indicators:
237
+ for pattern in completeness_indicators[lang_key]:
238
+ if re.search(pattern, code, re.DOTALL):
239
+ return True
240
+
241
+ # Basic completeness checks
242
+ if language.lower() in ["python", "py"]:
243
+ # Check for balanced brackets and basic structure
244
+ return (
245
+ code.count("(") == code.count(")")
246
+ and code.count("[") == code.count("]")
247
+ and code.count("{") == code.count("}")
248
+ and len(code.split("\n")) >= 3
249
+ )
250
+
251
+ return len(code) > 50 # Fallback: assume longer snippets are more complete
252
+
253
+ def parse_sections(self, content: str) -> List[DocumentationSection]:
254
+ """Parse content into structured sections"""
255
+ sections = []
256
+
257
+ # Split by headers (markdown and HTML)
258
+ header_patterns = [
259
+ r"^#{1,6}\s+(.+)$", # Markdown headers
260
+ r"<h[1-6][^>]*>(.*?)</h[1-6]>", # HTML headers
261
+ ]
262
+
263
+ current_section = ""
264
+ current_title = "Introduction"
265
+
266
+ lines = content.split("\n")
267
+ for line in lines:
268
+ is_header = False
269
+ for pattern in header_patterns:
270
+ match = re.match(pattern, line, re.IGNORECASE)
271
+ if match:
272
+ # Save previous section if it has content
273
+ if current_section.strip():
274
+ section = self.create_section(current_title, current_section)
275
+ sections.append(section)
276
+
277
+ # Start new section
278
+ current_title = re.sub(r"<[^>]+>", "", match.group(1)).strip()
279
+ current_section = ""
280
+ is_header = True
281
+ break
282
+
283
+ if not is_header:
284
+ current_section += line + "\n"
285
+
286
+ # Add final section
287
+ if current_section.strip():
288
+ section = self.create_section(current_title, current_section)
289
+ sections.append(section)
290
+
291
+ return sections
292
+
293
+ def create_section(self, title: str, content: str) -> DocumentationSection:
294
+ """Create a DocumentationSection with analysis"""
295
+ code_snippets = self.extract_code_snippets(content)
296
+ cross_refs = self.extract_cross_references(content, "")
297
+ section_type = self.classify_section_type(title, content)
298
+ difficulty = self.assess_difficulty(title, content, code_snippets)
299
+
300
+ return DocumentationSection(
301
+ title=title,
302
+ content=content,
303
+ code_snippets=code_snippets,
304
+ cross_references=cross_refs,
305
+ section_type=section_type,
306
+ difficulty_level=difficulty,
307
+ )
308
+
309
+ def classify_section_type(self, title: str, content: str) -> str:
310
+ """Classify the type of documentation section"""
311
+ title_lower = title.lower()
312
+ content_lower = content.lower()
313
+
314
+ # Classification patterns
315
+ if any(
316
+ word in title_lower for word in ["tutorial", "guide", "walkthrough", "step"]
317
+ ):
318
+ return "tutorial"
319
+ elif any(word in title_lower for word in ["example", "demo", "sample"]):
320
+ return "example"
321
+ elif any(word in title_lower for word in ["api", "reference", "documentation"]):
322
+ return "reference"
323
+ elif any(
324
+ word in content_lower
325
+ for word in ["first", "getting started", "quickstart", "introduction"]
326
+ ):
327
+ return "guide"
328
+ else:
329
+ return "guide" # Default
330
+
331
+ def assess_difficulty(
332
+ self, title: str, content: str, code_snippets: List[CodeSnippet]
333
+ ) -> str:
334
+ """Assess the difficulty level of a section"""
335
+ difficulty_score = 0
336
+
337
+ # Title indicators
338
+ title_lower = title.lower()
339
+ if any(
340
+ word in title_lower for word in ["advanced", "expert", "deep", "complex"]
341
+ ):
342
+ difficulty_score += 3
343
+ elif any(word in title_lower for word in ["intermediate", "moderate"]):
344
+ difficulty_score += 2
345
+ elif any(word in title_lower for word in ["basic", "simple", "intro", "quick"]):
346
+ difficulty_score += 1
347
+
348
+ # Content complexity indicators
349
+ content_lower = content.lower()
350
+
351
+ # Advanced concepts
352
+ advanced_terms = [
353
+ "async",
354
+ "concurrent",
355
+ "threading",
356
+ "multiprocessing",
357
+ "decorator",
358
+ "metaclass",
359
+ "inheritance",
360
+ "polymorphism",
361
+ "dependency injection",
362
+ ]
363
+ difficulty_score += (
364
+ sum(1 for term in advanced_terms if term in content_lower) * 0.5
365
+ )
366
+
367
+ # Code complexity
368
+ if code_snippets:
369
+ avg_code_length = sum(len(snippet.code) for snippet in code_snippets) / len(
370
+ code_snippets
371
+ )
372
+ if avg_code_length > 200:
373
+ difficulty_score += 2
374
+ elif avg_code_length > 100:
375
+ difficulty_score += 1
376
+
377
+ # Return difficulty level
378
+ if difficulty_score >= 4:
379
+ return "advanced"
380
+ elif difficulty_score >= 2:
381
+ return "intermediate"
382
+ else:
383
+ return "beginner"
384
+
385
+ def extract_cross_references(self, content: str, library: str) -> List[str]:
386
+ """Extract cross-references to other libraries or concepts"""
387
+ cross_refs = []
388
+
389
+ # Common library mentions
390
+ library_patterns = [
391
+ r"\b(fastapi|django|flask|express|react|vue|angular)\b",
392
+ r"\b(numpy|pandas|matplotlib|scikit-learn)\b",
393
+ r"\b(tensorflow|pytorch|keras)\b",
394
+ r"\b(docker|kubernetes|aws|azure|gcp)\b",
395
+ ]
396
+
397
+ for pattern in library_patterns:
398
+ matches = re.finditer(pattern, content, re.IGNORECASE)
399
+ cross_refs.extend([match.group(1).lower() for match in matches])
400
+
401
+ # Remove the current library from cross-references
402
+ cross_refs = [ref for ref in cross_refs if ref != library.lower()]
403
+
404
+ return list(set(cross_refs)) # Remove duplicates
405
+
406
+ async def get_contextual_recommendations(
407
+ self, library: str, query: str
408
+ ) -> List[Dict[str, str]]:
409
+ """Get contextual recommendations based on library and query"""
410
+ recommendations = []
411
+
412
+ # Library-specific recommendations
413
+ lib_recommendations = {
414
+ "fastapi": [
415
+ {
416
+ "type": "related_library",
417
+ "name": "pydantic",
418
+ "reason": "Data validation and settings",
419
+ },
420
+ {
421
+ "type": "related_library",
422
+ "name": "uvicorn",
423
+ "reason": "ASGI server for FastAPI",
424
+ },
425
+ {
426
+ "type": "concept",
427
+ "name": "async/await",
428
+ "reason": "Essential for FastAPI performance",
429
+ },
430
+ ],
431
+ "react": [
432
+ {
433
+ "type": "related_library",
434
+ "name": "typescript",
435
+ "reason": "Type safety for React applications",
436
+ },
437
+ {
438
+ "type": "related_library",
439
+ "name": "tailwind",
440
+ "reason": "Utility-first CSS framework",
441
+ },
442
+ {"type": "concept", "name": "hooks", "reason": "Modern React pattern"},
443
+ ],
444
+ "django": [
445
+ {
446
+ "type": "related_library",
447
+ "name": "django-rest-framework",
448
+ "reason": "API development",
449
+ },
450
+ {
451
+ "type": "related_library",
452
+ "name": "celery",
453
+ "reason": "Background tasks",
454
+ },
455
+ {"type": "concept", "name": "orm", "reason": "Database abstraction"},
456
+ ],
457
+ }
458
+
459
+ if library.lower() in lib_recommendations:
460
+ recommendations.extend(lib_recommendations[library.lower()])
461
+
462
+ # Query-specific recommendations
463
+ query_lower = query.lower()
464
+ if "auth" in query_lower:
465
+ recommendations.append(
466
+ {
467
+ "type": "security",
468
+ "name": "JWT tokens",
469
+ "reason": "Secure authentication method",
470
+ }
471
+ )
472
+ elif "database" in query_lower:
473
+ recommendations.append(
474
+ {
475
+ "type": "related_library",
476
+ "name": "sqlalchemy",
477
+ "reason": "Python SQL toolkit and ORM",
478
+ }
479
+ )
480
+ elif "api" in query_lower:
481
+ recommendations.append(
482
+ {
483
+ "type": "concept",
484
+ "name": "REST principles",
485
+ "reason": "API design best practices",
486
+ }
487
+ )
488
+
489
+ return recommendations[:5] # Limit to 5 recommendations
490
+
491
+ async def get_version_info(self, library: str) -> Dict[str, Any]:
492
+ """Get version information for a library"""
493
+ if library in self.version_cache:
494
+ cached_time, version_info = self.version_cache[library]
495
+ if datetime.now() - cached_time < timedelta(hours=24):
496
+ return version_info
497
+
498
+ version_info = {
499
+ "current_version": "unknown",
500
+ "release_date": "unknown",
501
+ "is_latest": True,
502
+ "changelog_url": None,
503
+ }
504
+
505
+ try:
506
+ # Try to get version info from PyPI for Python packages
507
+ if library in ["fastapi", "django", "flask", "pandas", "numpy"]:
508
+ async with httpx.AsyncClient() as client:
509
+ response = await client.get(
510
+ f"https://pypi.org/pypi/{library}/json", timeout=5.0
511
+ )
512
+ if response.status_code == 200:
513
+ data = response.json()
514
+ version_info.update(
515
+ {
516
+ "current_version": data["info"]["version"],
517
+ "release_date": data["releases"][
518
+ data["info"]["version"]
519
+ ][0]["upload_time"][:10],
520
+ "changelog_url": data["info"]
521
+ .get("project_urls", {})
522
+ .get("Changelog"),
523
+ }
524
+ )
525
+ except Exception:
526
+ pass # Fallback to unknown version
527
+
528
+ # Cache the result
529
+ self.version_cache[library] = (datetime.now(), version_info)
530
+ return version_info
531
+
532
+ def generate_summary(self, content: str, query: str) -> str:
533
+ """Generate a concise summary of the content"""
534
+ if SUMMARIZER_ENDPOINT and SUMMARIZER_KEY:
535
+ try:
536
+ payload = {"query": query, "context": content[:4000]}
537
+ headers = {
538
+ "Authorization": f"Bearer {SUMMARIZER_KEY}",
539
+ "Content-Type": "application/json",
540
+ }
541
+ response = httpx.post(
542
+ SUMMARIZER_ENDPOINT, json=payload, headers=headers, timeout=15
543
+ )
544
+ response.raise_for_status()
545
+ data = response.json()
546
+ summary = data.get("summary") or data.get("result")
547
+ if summary:
548
+ return summary
549
+ except Exception as exc:
550
+ print(f"⚠️ LLM summarization failed: {exc}", file=sys.stderr)
551
+
552
+ sentences = re.split(r"[.!?]+", content)
553
+ query_words = set(query.lower().split())
554
+ scored_sentences: List[tuple[int, str]] = []
555
+
556
+ for sentence in sentences[:10]:
557
+ sentence = sentence.strip()
558
+ if len(sentence) > 20:
559
+ words = set(sentence.lower().split())
560
+ score = len(query_words.intersection(words))
561
+ scored_sentences.append((score, sentence))
562
+
563
+ scored_sentences.sort(key=lambda x: x[0], reverse=True)
564
+ top_sentences = [sent for score, sent in scored_sentences[:3] if score > 0]
565
+
566
+ if top_sentences:
567
+ return ". ".join(top_sentences)[:300] + "..."
568
+
569
+ for sentence in sentences:
570
+ sentence = sentence.strip()
571
+ if len(sentence) > 20:
572
+ return sentence[:300] + "..."
573
+
574
+ return "Documentation content for " + query
575
+
576
+
577
+ # Global content enhancer instance
578
+ content_enhancer = ContentEnhancer()
@@ -0,0 +1,87 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Manages docker-compose files for local development environments.
4
+ """
5
+
6
+ import os
7
+ import yaml
8
+ from typing import Dict
9
+
10
+ TEMPLATES: Dict[str, Dict] = {
11
+ "postgres": {
12
+ "version": "3.8",
13
+ "services": {
14
+ "db": {
15
+ "image": "postgres:15-alpine",
16
+ "restart": "always",
17
+ "environment": {
18
+ "POSTGRES_USER": "myuser",
19
+ "POSTGRES_PASSWORD": "mypassword",
20
+ "POSTGRES_DB": "mydatabase",
21
+ },
22
+ "ports": ["5432:5432"],
23
+ "volumes": ["postgres_data:/var/lib/postgresql/data/"],
24
+ }
25
+ },
26
+ "volumes": {"postgres_data": {}},
27
+ },
28
+ "redis": {
29
+ "version": "3.8",
30
+ "services": {
31
+ "redis": {
32
+ "image": "redis:7-alpine",
33
+ "restart": "always",
34
+ "ports": ["6379:6379"],
35
+ "volumes": ["redis_data:/data"],
36
+ }
37
+ },
38
+ "volumes": {"redis_data": {}},
39
+ },
40
+ "rabbitmq": {
41
+ "version": "3.8",
42
+ "services": {
43
+ "rabbitmq": {
44
+ "image": "rabbitmq:3-management-alpine",
45
+ "restart": "always",
46
+ "ports": ["5672:5672", "15672:15672"],
47
+ "environment": {
48
+ "RABBITMQ_DEFAULT_USER": "myuser",
49
+ "RABBITA_DEFAULT_PASS": "mypassword",
50
+ },
51
+ "volumes": ["rabbitmq_data:/var/lib/rabbitmq/"],
52
+ }
53
+ },
54
+ "volumes": {"rabbitmq_data": {}},
55
+ },
56
+ }
57
+
58
+
59
+ def create_docker_compose(service: str, path: str = ".") -> str:
60
+ """
61
+ Creates a docker-compose.yml file for a given service in the specified path.
62
+
63
+ Args:
64
+ service: The name of the service (e.g., 'postgres').
65
+ path: The directory where the file will be created.
66
+
67
+ Returns:
68
+ The full path to the created docker-compose.yml file.
69
+ """
70
+ if service not in TEMPLATES:
71
+ raise ValueError(
72
+ f"Service '{service}' not supported. Available services: {list(TEMPLATES.keys())}"
73
+ )
74
+
75
+ compose_path = os.path.join(path, "docker-compose.yml")
76
+
77
+ if os.path.exists(compose_path):
78
+ # We can decide whether to overwrite, merge, or fail.
79
+ # For now, we'll fail to avoid accidental data loss.
80
+ raise FileExistsError(
81
+ f"A 'docker-compose.yml' already exists at {path}. Please remove it first."
82
+ )
83
+
84
+ with open(compose_path, "w") as f:
85
+ yaml.dump(TEMPLATES[service], f, default_flow_style=False, sort_keys=False)
86
+
87
+ return compose_path