ai-coding-assistant 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. ai_coding_assistant-0.5.0.dist-info/METADATA +226 -0
  2. ai_coding_assistant-0.5.0.dist-info/RECORD +89 -0
  3. ai_coding_assistant-0.5.0.dist-info/WHEEL +4 -0
  4. ai_coding_assistant-0.5.0.dist-info/entry_points.txt +3 -0
  5. ai_coding_assistant-0.5.0.dist-info/licenses/LICENSE +21 -0
  6. coding_assistant/__init__.py +3 -0
  7. coding_assistant/__main__.py +19 -0
  8. coding_assistant/cli/__init__.py +1 -0
  9. coding_assistant/cli/app.py +158 -0
  10. coding_assistant/cli/commands/__init__.py +19 -0
  11. coding_assistant/cli/commands/ask.py +178 -0
  12. coding_assistant/cli/commands/config.py +438 -0
  13. coding_assistant/cli/commands/diagram.py +267 -0
  14. coding_assistant/cli/commands/document.py +410 -0
  15. coding_assistant/cli/commands/explain.py +192 -0
  16. coding_assistant/cli/commands/fix.py +249 -0
  17. coding_assistant/cli/commands/index.py +162 -0
  18. coding_assistant/cli/commands/refactor.py +245 -0
  19. coding_assistant/cli/commands/search.py +182 -0
  20. coding_assistant/cli/commands/serve_docs.py +128 -0
  21. coding_assistant/cli/repl.py +381 -0
  22. coding_assistant/cli/theme.py +90 -0
  23. coding_assistant/codebase/__init__.py +1 -0
  24. coding_assistant/codebase/crawler.py +93 -0
  25. coding_assistant/codebase/parser.py +266 -0
  26. coding_assistant/config/__init__.py +25 -0
  27. coding_assistant/config/config_manager.py +615 -0
  28. coding_assistant/config/settings.py +82 -0
  29. coding_assistant/context/__init__.py +19 -0
  30. coding_assistant/context/chunker.py +443 -0
  31. coding_assistant/context/enhanced_retriever.py +322 -0
  32. coding_assistant/context/hybrid_search.py +311 -0
  33. coding_assistant/context/ranker.py +355 -0
  34. coding_assistant/context/retriever.py +119 -0
  35. coding_assistant/context/window.py +362 -0
  36. coding_assistant/documentation/__init__.py +23 -0
  37. coding_assistant/documentation/agents/__init__.py +27 -0
  38. coding_assistant/documentation/agents/coordinator.py +510 -0
  39. coding_assistant/documentation/agents/module_documenter.py +111 -0
  40. coding_assistant/documentation/agents/synthesizer.py +139 -0
  41. coding_assistant/documentation/agents/task_delegator.py +100 -0
  42. coding_assistant/documentation/decomposition/__init__.py +21 -0
  43. coding_assistant/documentation/decomposition/context_preserver.py +477 -0
  44. coding_assistant/documentation/decomposition/module_detector.py +302 -0
  45. coding_assistant/documentation/decomposition/partitioner.py +621 -0
  46. coding_assistant/documentation/generators/__init__.py +14 -0
  47. coding_assistant/documentation/generators/dataflow_generator.py +440 -0
  48. coding_assistant/documentation/generators/diagram_generator.py +511 -0
  49. coding_assistant/documentation/graph/__init__.py +13 -0
  50. coding_assistant/documentation/graph/dependency_builder.py +468 -0
  51. coding_assistant/documentation/graph/module_analyzer.py +475 -0
  52. coding_assistant/documentation/writers/__init__.py +11 -0
  53. coding_assistant/documentation/writers/markdown_writer.py +322 -0
  54. coding_assistant/embeddings/__init__.py +0 -0
  55. coding_assistant/embeddings/generator.py +89 -0
  56. coding_assistant/embeddings/store.py +187 -0
  57. coding_assistant/exceptions/__init__.py +50 -0
  58. coding_assistant/exceptions/base.py +110 -0
  59. coding_assistant/exceptions/llm.py +249 -0
  60. coding_assistant/exceptions/recovery.py +263 -0
  61. coding_assistant/exceptions/storage.py +213 -0
  62. coding_assistant/exceptions/validation.py +230 -0
  63. coding_assistant/llm/__init__.py +1 -0
  64. coding_assistant/llm/client.py +277 -0
  65. coding_assistant/llm/gemini_client.py +181 -0
  66. coding_assistant/llm/groq_client.py +160 -0
  67. coding_assistant/llm/prompts.py +98 -0
  68. coding_assistant/llm/together_client.py +160 -0
  69. coding_assistant/operations/__init__.py +13 -0
  70. coding_assistant/operations/differ.py +369 -0
  71. coding_assistant/operations/generator.py +347 -0
  72. coding_assistant/operations/linter.py +430 -0
  73. coding_assistant/operations/validator.py +406 -0
  74. coding_assistant/storage/__init__.py +9 -0
  75. coding_assistant/storage/database.py +363 -0
  76. coding_assistant/storage/session.py +231 -0
  77. coding_assistant/utils/__init__.py +31 -0
  78. coding_assistant/utils/cache.py +477 -0
  79. coding_assistant/utils/hardware.py +132 -0
  80. coding_assistant/utils/keystore.py +206 -0
  81. coding_assistant/utils/logger.py +32 -0
  82. coding_assistant/utils/progress.py +311 -0
  83. coding_assistant/validation/__init__.py +13 -0
  84. coding_assistant/validation/files.py +305 -0
  85. coding_assistant/validation/inputs.py +335 -0
  86. coding_assistant/validation/params.py +280 -0
  87. coding_assistant/validation/sanitizers.py +243 -0
  88. coding_assistant/vcs/__init__.py +5 -0
  89. coding_assistant/vcs/git.py +269 -0
@@ -0,0 +1,510 @@
1
+ """Coordinate multiple LLM agents for parallel documentation generation.
2
+
3
+ This module implements the multi-agent coordination system that enables
4
+ scalable, parallel documentation generation across repository partitions.
5
+ """
6
+
7
+ from typing import List, Dict, Optional, Set
8
+ from dataclasses import dataclass, field
9
+ from pathlib import Path
10
+ import asyncio
11
+ from concurrent.futures import ThreadPoolExecutor
12
+ import networkx as nx
13
+
14
+ from coding_assistant.documentation.decomposition.partitioner import Partition
15
+ from coding_assistant.documentation.decomposition.context_preserver import ModuleContext
16
+ from coding_assistant.llm.client import LLMClientFactory
17
+ from coding_assistant.utils.logger import get_logger
18
+
19
+ logger = get_logger(__name__)
20
+
21
+
22
+ @dataclass
23
+ class DocumentationTask:
24
+ """
25
+ Represents a documentation generation task for an LLM agent.
26
+
27
+ Tasks are executed by individual agents and may have dependencies
28
+ on other tasks completing first.
29
+ """
30
+ task_id: str
31
+ partition: Partition
32
+ context: ModuleContext
33
+ priority: int = 0
34
+ dependencies: List[str] = field(default_factory=list) # Task IDs that must complete first
35
+ status: str = 'pending' # pending, running, completed, failed
36
+ result: Optional[str] = None
37
+ error: Optional[str] = None
38
+ metadata: Dict = field(default_factory=dict)
39
+
40
+ def __repr__(self):
41
+ return f"DocumentationTask({self.task_id}, status={self.status}, deps={len(self.dependencies)})"
42
+
43
+
44
+ class MultiAgentCoordinator:
45
+ """
46
+ Coordinate multiple LLM agents for parallel documentation generation.
47
+
48
+ Inspired by CodeWiki's recursive multi-agent system, this coordinator:
49
+ - Manages task dependencies and scheduling
50
+ - Executes tasks in parallel waves
51
+ - Handles failures and retries
52
+ - Synthesizes results into coherent documentation
53
+ """
54
+
55
+ def __init__(self,
56
+ max_concurrent_agents: int = 3,
57
+ llm_client=None,
58
+ use_async: bool = True):
59
+ """
60
+ Initialize the multi-agent coordinator.
61
+
62
+ Args:
63
+ max_concurrent_agents: Maximum number of parallel agents
64
+ llm_client: LLM client instance (creates default if None)
65
+ use_async: Whether to use async execution (True) or threaded (False)
66
+ """
67
+ self.max_concurrent = max_concurrent_agents
68
+ self.llm_client = llm_client or LLMClientFactory.create_client()
69
+ self.use_async = use_async
70
+ self.executor = ThreadPoolExecutor(max_workers=max_concurrent_agents)
71
+
72
+ # Task tracking
73
+ self.tasks: Dict[str, DocumentationTask] = {}
74
+ self.completed_tasks: Set[str] = set()
75
+
76
+ logger.info(f"MultiAgentCoordinator initialized with {max_concurrent_agents} concurrent agents")
77
+
78
+ async def generate_documentation(self,
79
+ partitions: List[Partition],
80
+ contexts: Dict[str, ModuleContext],
81
+ parsed_files: Optional[Dict[str, Dict]] = None) -> Dict[str, str]:
82
+ """
83
+ Generate documentation for all partitions in parallel.
84
+
85
+ Args:
86
+ partitions: List of repository partitions
87
+ contexts: Dictionary mapping partition names to their contexts
88
+ parsed_files: Optional dictionary of parsed file information
89
+
90
+ Returns:
91
+ Dictionary mapping partition names to generated documentation (markdown)
92
+ """
93
+ logger.info(f"Starting parallel documentation generation for {len(partitions)} partitions")
94
+
95
+ # Step 1: Create tasks
96
+ tasks = self._create_tasks(partitions, contexts)
97
+ self.tasks = {task.task_id: task for task in tasks}
98
+
99
+ logger.info(f"Created {len(tasks)} documentation tasks")
100
+
101
+ # Step 2: Topologically sort tasks based on dependencies
102
+ sorted_tasks = self._topological_sort(tasks)
103
+
104
+ logger.info(f"Tasks sorted into {len(sorted_tasks)} execution waves")
105
+
106
+ # Step 3: Execute tasks in waves (respecting dependencies)
107
+ results = await self._execute_tasks_parallel(sorted_tasks, parsed_files)
108
+
109
+ logger.info(f"Documentation generation complete: {len(results)} partitions documented")
110
+
111
+ return results
112
+
113
+ def _create_tasks(self,
114
+ partitions: List[Partition],
115
+ contexts: Dict[str, ModuleContext]) -> List[DocumentationTask]:
116
+ """
117
+ Create documentation tasks with priorities and dependencies.
118
+
119
+ Args:
120
+ partitions: List of partitions to document
121
+ contexts: Context information for partitions
122
+
123
+ Returns:
124
+ List of DocumentationTask objects
125
+ """
126
+ tasks = []
127
+
128
+ for partition in partitions:
129
+ # Compute priority based on dependencies
130
+ # Partitions with fewer dependencies get higher priority
131
+ priority = len(partition.dependencies)
132
+
133
+ task = DocumentationTask(
134
+ task_id=partition.name,
135
+ partition=partition,
136
+ context=contexts.get(partition.name, ModuleContext(partition.name)),
137
+ priority=priority,
138
+ dependencies=partition.dependencies.copy() # Copy to avoid mutation
139
+ )
140
+
141
+ tasks.append(task)
142
+
143
+ return tasks
144
+
145
+ def _topological_sort(self, tasks: List[DocumentationTask]) -> List[List[DocumentationTask]]:
146
+ """
147
+ Topologically sort tasks into execution waves.
148
+
149
+ Tasks in the same wave have no dependencies on each other and
150
+ can be executed in parallel.
151
+
152
+ Args:
153
+ tasks: List of tasks to sort
154
+
155
+ Returns:
156
+ List of task waves (each wave is a list of tasks)
157
+ """
158
+ # Build dependency graph
159
+ graph = nx.DiGraph()
160
+
161
+ for task in tasks:
162
+ graph.add_node(task.task_id)
163
+
164
+ for dep in task.dependencies:
165
+ if dep in [t.task_id for t in tasks]:
166
+ graph.add_edge(dep, task.task_id)
167
+
168
+ # Group into waves by dependency level
169
+ waves = []
170
+ remaining = set(graph.nodes())
171
+ processed = set()
172
+
173
+ while remaining:
174
+ # Find nodes with no unprocessed dependencies
175
+ current_wave_ids = []
176
+
177
+ for node in remaining:
178
+ predecessors = set(graph.predecessors(node))
179
+ if not predecessors or predecessors.issubset(processed):
180
+ current_wave_ids.append(node)
181
+
182
+ if not current_wave_ids:
183
+ # Circular dependency or disconnected graph
184
+ # Add all remaining nodes to final wave
185
+ logger.warning("Circular dependencies detected, adding remaining tasks to final wave")
186
+ current_wave_ids = list(remaining)
187
+
188
+ # Get task objects for this wave
189
+ current_wave = [task for task in tasks if task.task_id in current_wave_ids]
190
+
191
+ # Sort wave by priority (lower priority value = higher priority)
192
+ current_wave.sort(key=lambda t: t.priority)
193
+
194
+ waves.append(current_wave)
195
+ remaining -= set(current_wave_ids)
196
+ processed.update(current_wave_ids)
197
+
198
+ return waves
199
+
200
+ async def _execute_tasks_parallel(self,
201
+ task_waves: List[List[DocumentationTask]],
202
+ parsed_files: Optional[Dict[str, Dict]]) -> Dict[str, str]:
203
+ """
204
+ Execute tasks in parallel waves.
205
+
206
+ Args:
207
+ task_waves: List of task waves to execute
208
+ parsed_files: Optional parsed file information
209
+
210
+ Returns:
211
+ Dictionary mapping task IDs to generated documentation
212
+ """
213
+ results = {}
214
+
215
+ for wave_num, wave_tasks in enumerate(task_waves):
216
+ logger.info(f"Executing wave {wave_num + 1}/{len(task_waves)} with {len(wave_tasks)} tasks")
217
+
218
+ # Execute all tasks in this wave concurrently
219
+ if self.use_async:
220
+ wave_results = await self._execute_wave_async(wave_tasks, results, parsed_files)
221
+ else:
222
+ wave_results = await self._execute_wave_threaded(wave_tasks, results, parsed_files)
223
+
224
+ # Add results
225
+ for task_id, doc in wave_results.items():
226
+ results[task_id] = doc
227
+ self.completed_tasks.add(task_id)
228
+
229
+ return results
230
+
231
+ async def _execute_wave_async(self,
232
+ tasks: List[DocumentationTask],
233
+ dependency_results: Dict[str, str],
234
+ parsed_files: Optional[Dict[str, Dict]]) -> Dict[str, str]:
235
+ """Execute a wave of tasks asynchronously."""
236
+ # Create coroutines for all tasks in wave
237
+ coroutines = [
238
+ self._generate_partition_docs(task, dependency_results, parsed_files)
239
+ for task in tasks
240
+ ]
241
+
242
+ # Execute in parallel with semaphore to limit concurrency
243
+ semaphore = asyncio.Semaphore(self.max_concurrent)
244
+
245
+ async def bounded_task(coro, task):
246
+ async with semaphore:
247
+ try:
248
+ result = await coro
249
+ task.status = 'completed'
250
+ task.result = result
251
+ return task.task_id, result
252
+ except Exception as e:
253
+ logger.error(f"Task {task.task_id} failed: {e}")
254
+ task.status = 'failed'
255
+ task.error = str(e)
256
+ return task.task_id, f"# Error\n\nDocumentation generation failed: {e}"
257
+
258
+ bounded_coroutines = [
259
+ bounded_task(coro, task)
260
+ for coro, task in zip(coroutines, tasks)
261
+ ]
262
+
263
+ results = await asyncio.gather(*bounded_coroutines)
264
+
265
+ return dict(results)
266
+
267
+ async def _execute_wave_threaded(self,
268
+ tasks: List[DocumentationTask],
269
+ dependency_results: Dict[str, str],
270
+ parsed_files: Optional[Dict[str, Dict]]) -> Dict[str, str]:
271
+ """Execute a wave of tasks using thread pool (fallback for non-async LLMs)."""
272
+ loop = asyncio.get_event_loop()
273
+
274
+ def sync_generate(task):
275
+ try:
276
+ # Run async function in sync context
277
+ result = asyncio.run(self._generate_partition_docs(task, dependency_results, parsed_files))
278
+ task.status = 'completed'
279
+ task.result = result
280
+ return task.task_id, result
281
+ except Exception as e:
282
+ logger.error(f"Task {task.task_id} failed: {e}")
283
+ task.status = 'failed'
284
+ task.error = str(e)
285
+ return task.task_id, f"# Error\n\nDocumentation generation failed: {e}"
286
+
287
+ # Execute in thread pool
288
+ futures = [
289
+ loop.run_in_executor(self.executor, sync_generate, task)
290
+ for task in tasks
291
+ ]
292
+
293
+ results = await asyncio.gather(*futures)
294
+
295
+ return dict(results)
296
+
297
+ async def _generate_partition_docs(self,
298
+ task: DocumentationTask,
299
+ dependency_docs: Dict[str, str],
300
+ parsed_files: Optional[Dict[str, Dict]]) -> str:
301
+ """
302
+ Generate documentation for a single partition.
303
+
304
+ Args:
305
+ task: Documentation task
306
+ dependency_docs: Documentation from dependent partitions
307
+ parsed_files: Optional parsed file information
308
+
309
+ Returns:
310
+ Generated documentation as markdown string
311
+ """
312
+ logger.debug(f"Generating documentation for partition: {task.partition.name}")
313
+
314
+ task.status = 'running'
315
+
316
+ # Build comprehensive prompt
317
+ prompt = self._build_documentation_prompt(task, dependency_docs, parsed_files)
318
+
319
+ # Generate documentation using LLM
320
+ try:
321
+ # LLM clients use messages format and return generators
322
+ messages = [{"role": "user", "content": prompt}]
323
+
324
+ # Generate and collect response chunks
325
+ response_chunks = []
326
+ for chunk in self.llm_client.generate(messages=messages, stream=True):
327
+ response_chunks.append(chunk)
328
+
329
+ response = ''.join(response_chunks)
330
+ logger.debug(f"Documentation generated for {task.partition.name} ({len(response)} chars)")
331
+
332
+ return response
333
+
334
+ except Exception as e:
335
+ logger.error(f"LLM generation failed for {task.partition.name}: {e}")
336
+ # Return placeholder documentation
337
+ return self._generate_fallback_documentation(task)
338
+
339
+ def _build_documentation_prompt(self,
340
+ task: DocumentationTask,
341
+ dependency_docs: Dict[str, str],
342
+ parsed_files: Optional[Dict[str, Dict]]) -> str:
343
+ """
344
+ Build comprehensive prompt for partition documentation.
345
+
346
+ Args:
347
+ task: Documentation task
348
+ dependency_docs: Documentation from dependencies
349
+ parsed_files: Optional parsed files
350
+
351
+ Returns:
352
+ Prompt string for LLM
353
+ """
354
+ partition = task.partition
355
+ context = task.context
356
+
357
+ # Build file content summary
358
+ file_summaries = []
359
+ total_lines = 0
360
+
361
+ for file_path in partition.files[:10]: # Limit to first 10 files
362
+ file_name = Path(file_path).name
363
+
364
+ if parsed_files and file_path in parsed_files:
365
+ parsed = parsed_files[file_path]
366
+ functions = parsed.get('functions', [])
367
+ classes = parsed.get('classes', [])
368
+
369
+ summary = f"**{file_name}**\n"
370
+ if classes:
371
+ summary += f" - Classes: {', '.join(c['name'] for c in classes[:5])}\n"
372
+ if functions:
373
+ summary += f" - Functions: {', '.join(f['name'] for f in functions[:5])}\n"
374
+
375
+ file_summaries.append(summary)
376
+ else:
377
+ file_summaries.append(f"**{file_name}**\n")
378
+
379
+ total_lines += 1
380
+
381
+ if len(partition.files) > 10:
382
+ file_summaries.append(f"\n... and {len(partition.files) - 10} more files")
383
+
384
+ # Build dependency context
385
+ dep_context = ""
386
+ if task.dependencies and dependency_docs:
387
+ dep_context = "\n### Dependencies\n\nThis module depends on:\n\n"
388
+ for dep_name in task.dependencies[:3]:
389
+ if dep_name in dependency_docs:
390
+ dep_doc = dependency_docs[dep_name]
391
+ # Extract first paragraph
392
+ first_para = dep_doc.split('\n\n')[0] if dep_doc else "No documentation"
393
+ dep_context += f"**{dep_name}**: {first_para[:200]}...\n\n"
394
+
395
+ prompt = f"""Generate comprehensive documentation for the following code module.
396
+
397
+ ## Module: {partition.name}
398
+
399
+ ### Overview
400
+ - **Files**: {len(partition.files)}
401
+ - **Lines of Code**: {partition.size_loc}
402
+ - **Cohesion Score**: {partition.cohesion_score:.2f}
403
+ - **Architectural Role**: {context.architectural_role}
404
+ - **Level**: {partition.level} (0=top-level, 1=module, 2=component)
405
+
406
+ ### Architectural Context
407
+ - **Related Modules**: {', '.join(context.related_partitions[:5]) if context.related_partitions else 'None'}
408
+ - **Design Patterns**: {', '.join(context.common_patterns) if context.common_patterns else 'None detected'}
409
+ - **Public Interfaces**: {len(context.exports)} exports
410
+
411
+ ### Files in This Module
412
+ {chr(10).join(file_summaries)}
413
+
414
+ {dep_context}
415
+
416
+ ## Task
417
+ Generate detailed, professional documentation in Markdown format that includes:
418
+
419
+ 1. **Module Overview** (2-3 sentences)
420
+ - What this module does
421
+ - Its purpose in the overall system
422
+
423
+ 2. **Architecture** (1 paragraph)
424
+ - How it's organized internally
425
+ - Key structural decisions
426
+
427
+ 3. **Key Components**
428
+ - Main classes/functions and their responsibilities
429
+ - Important data structures
430
+
431
+ 4. **Dependencies & Relationships**
432
+ - What this module depends on
433
+ - What depends on this module
434
+ - How it fits in the larger architecture
435
+
436
+ 5. **Public API** (if applicable)
437
+ - Exported functions/classes
438
+ - Usage examples
439
+ - API contracts
440
+
441
+ 6. **Design Decisions**
442
+ - Notable patterns or architectural choices
443
+ - Trade-offs made
444
+
445
+ Format: Use clear Markdown with headers, bullet points, and code examples where appropriate.
446
+ Tone: Professional, concise, developer-focused.
447
+ """
448
+
449
+ return prompt
450
+
451
+ def _generate_fallback_documentation(self, task: DocumentationTask) -> str:
452
+ """Generate basic fallback documentation when LLM fails."""
453
+ partition = task.partition
454
+ context = task.context
455
+
456
+ doc = f"""# {partition.name}
457
+
458
+ ## Overview
459
+
460
+ This module contains {len(partition.files)} files with approximately {partition.size_loc} lines of code.
461
+
462
+ **Architectural Role**: {context.architectural_role}
463
+
464
+ ## Files
465
+
466
+ """
467
+ for file_path in partition.files[:20]:
468
+ doc += f"- `{Path(file_path).name}`\n"
469
+
470
+ if len(partition.files) > 20:
471
+ doc += f"\n... and {len(partition.files) - 20} more files\n"
472
+
473
+ if context.common_patterns:
474
+ doc += f"\n## Design Patterns\n\n"
475
+ for pattern in context.common_patterns:
476
+ doc += f"- {pattern}\n"
477
+
478
+ if context.related_partitions:
479
+ doc += f"\n## Related Modules\n\n"
480
+ for related in context.related_partitions[:10]:
481
+ doc += f"- {related}\n"
482
+
483
+ doc += f"\n## Metrics\n\n"
484
+ doc += f"- **Cohesion**: {partition.cohesion_score:.2f}\n"
485
+ doc += f"- **Dependencies**: {len(partition.dependencies)}\n"
486
+
487
+ doc += "\n---\n\n*Note: Detailed documentation could not be generated. This is a placeholder.*\n"
488
+
489
+ return doc
490
+
491
+ def get_task_status(self, task_id: str) -> Optional[DocumentationTask]:
492
+ """Get status of a specific task."""
493
+ return self.tasks.get(task_id)
494
+
495
+ def get_completion_stats(self) -> Dict:
496
+ """Get statistics about task completion."""
497
+ total = len(self.tasks)
498
+ completed = len([t for t in self.tasks.values() if t.status == 'completed'])
499
+ failed = len([t for t in self.tasks.values() if t.status == 'failed'])
500
+ running = len([t for t in self.tasks.values() if t.status == 'running'])
501
+ pending = len([t for t in self.tasks.values() if t.status == 'pending'])
502
+
503
+ return {
504
+ 'total': total,
505
+ 'completed': completed,
506
+ 'failed': failed,
507
+ 'running': running,
508
+ 'pending': pending,
509
+ 'completion_rate': completed / total if total > 0 else 0
510
+ }
@@ -0,0 +1,111 @@
1
+ """Per-module documentation generation.
2
+
3
+ This module provides utilities for generating documentation for individual
4
+ code modules/partitions.
5
+ """
6
+
7
+ from typing import Dict, List, Optional
8
+ from pathlib import Path
9
+
10
+ from coding_assistant.documentation.decomposition.partitioner import Partition
11
+ from coding_assistant.documentation.decomposition.context_preserver import ModuleContext
12
+ from coding_assistant.utils.logger import get_logger
13
+
14
+ logger = get_logger(__name__)
15
+
16
+
17
+ class ModuleDocumenter:
18
+ """
19
+ Generate documentation for individual modules.
20
+
21
+ This class encapsulates the logic for documenting a single module/partition,
22
+ which is then used by the coordinator for parallel execution.
23
+ """
24
+
25
+ def __init__(self, llm_client=None):
26
+ """
27
+ Initialize module documenter.
28
+
29
+ Args:
30
+ llm_client: LLM client instance
31
+ """
32
+ from coding_assistant.llm.client import LLMClientFactory
33
+ self.llm_client = llm_client or LLMClientFactory.create_client()
34
+
35
+ def generate(self,
36
+ partition: Partition,
37
+ context: ModuleContext,
38
+ parsed_files: Optional[Dict[str, Dict]] = None) -> str:
39
+ """
40
+ Generate documentation for a single partition.
41
+
42
+ Args:
43
+ partition: Partition to document
44
+ context: Architectural context
45
+ parsed_files: Optional parsed file data
46
+
47
+ Returns:
48
+ Generated documentation as markdown
49
+ """
50
+ logger.info(f"Generating documentation for module: {partition.name}")
51
+
52
+ prompt = self._build_prompt(partition, context, parsed_files)
53
+
54
+ try:
55
+ # LLM clients use messages format and return generators
56
+ messages = [{"role": "user", "content": prompt}]
57
+
58
+ # Generate and collect response chunks
59
+ response_chunks = []
60
+ for chunk in self.llm_client.generate(messages=messages, stream=True):
61
+ response_chunks.append(chunk)
62
+
63
+ response = ''.join(response_chunks)
64
+ return response
65
+
66
+ except Exception as e:
67
+ logger.error(f"Documentation generation failed: {e}")
68
+ return self._generate_fallback(partition, context)
69
+
70
+ def _build_prompt(self,
71
+ partition: Partition,
72
+ context: ModuleContext,
73
+ parsed_files: Optional[Dict[str, Dict]]) -> str:
74
+ """Build prompt for module documentation."""
75
+ # Similar to coordinator's prompt builder
76
+ file_list = "\n".join(f"- {Path(f).name}" for f in partition.files[:15])
77
+
78
+ if len(partition.files) > 15:
79
+ file_list += f"\n- ... and {len(partition.files) - 15} more files"
80
+
81
+ return f"""Document the following code module:
82
+
83
+ **Module**: {partition.name}
84
+ **Role**: {context.architectural_role}
85
+ **Files**: {len(partition.files)}
86
+ **LOC**: {partition.size_loc}
87
+
88
+ **Files**:
89
+ {file_list}
90
+
91
+ **Design Patterns**: {', '.join(context.common_patterns) if context.common_patterns else 'None'}
92
+
93
+ Generate concise, professional documentation covering:
94
+ 1. Purpose and overview
95
+ 2. Key components
96
+ 3. Architecture
97
+ 4. Public API (if any)
98
+
99
+ Format as markdown."""
100
+
101
+ def _generate_fallback(self, partition: Partition, context: ModuleContext) -> str:
102
+ """Generate fallback documentation."""
103
+ return f"""# {partition.name}
104
+
105
+ ## Overview
106
+ Module with {len(partition.files)} files ({partition.size_loc} LOC).
107
+
108
+ **Role**: {context.architectural_role}
109
+
110
+ *Detailed documentation could not be generated.*
111
+ """