ai-coding-assistant 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. ai_coding_assistant-0.5.0.dist-info/METADATA +226 -0
  2. ai_coding_assistant-0.5.0.dist-info/RECORD +89 -0
  3. ai_coding_assistant-0.5.0.dist-info/WHEEL +4 -0
  4. ai_coding_assistant-0.5.0.dist-info/entry_points.txt +3 -0
  5. ai_coding_assistant-0.5.0.dist-info/licenses/LICENSE +21 -0
  6. coding_assistant/__init__.py +3 -0
  7. coding_assistant/__main__.py +19 -0
  8. coding_assistant/cli/__init__.py +1 -0
  9. coding_assistant/cli/app.py +158 -0
  10. coding_assistant/cli/commands/__init__.py +19 -0
  11. coding_assistant/cli/commands/ask.py +178 -0
  12. coding_assistant/cli/commands/config.py +438 -0
  13. coding_assistant/cli/commands/diagram.py +267 -0
  14. coding_assistant/cli/commands/document.py +410 -0
  15. coding_assistant/cli/commands/explain.py +192 -0
  16. coding_assistant/cli/commands/fix.py +249 -0
  17. coding_assistant/cli/commands/index.py +162 -0
  18. coding_assistant/cli/commands/refactor.py +245 -0
  19. coding_assistant/cli/commands/search.py +182 -0
  20. coding_assistant/cli/commands/serve_docs.py +128 -0
  21. coding_assistant/cli/repl.py +381 -0
  22. coding_assistant/cli/theme.py +90 -0
  23. coding_assistant/codebase/__init__.py +1 -0
  24. coding_assistant/codebase/crawler.py +93 -0
  25. coding_assistant/codebase/parser.py +266 -0
  26. coding_assistant/config/__init__.py +25 -0
  27. coding_assistant/config/config_manager.py +615 -0
  28. coding_assistant/config/settings.py +82 -0
  29. coding_assistant/context/__init__.py +19 -0
  30. coding_assistant/context/chunker.py +443 -0
  31. coding_assistant/context/enhanced_retriever.py +322 -0
  32. coding_assistant/context/hybrid_search.py +311 -0
  33. coding_assistant/context/ranker.py +355 -0
  34. coding_assistant/context/retriever.py +119 -0
  35. coding_assistant/context/window.py +362 -0
  36. coding_assistant/documentation/__init__.py +23 -0
  37. coding_assistant/documentation/agents/__init__.py +27 -0
  38. coding_assistant/documentation/agents/coordinator.py +510 -0
  39. coding_assistant/documentation/agents/module_documenter.py +111 -0
  40. coding_assistant/documentation/agents/synthesizer.py +139 -0
  41. coding_assistant/documentation/agents/task_delegator.py +100 -0
  42. coding_assistant/documentation/decomposition/__init__.py +21 -0
  43. coding_assistant/documentation/decomposition/context_preserver.py +477 -0
  44. coding_assistant/documentation/decomposition/module_detector.py +302 -0
  45. coding_assistant/documentation/decomposition/partitioner.py +621 -0
  46. coding_assistant/documentation/generators/__init__.py +14 -0
  47. coding_assistant/documentation/generators/dataflow_generator.py +440 -0
  48. coding_assistant/documentation/generators/diagram_generator.py +511 -0
  49. coding_assistant/documentation/graph/__init__.py +13 -0
  50. coding_assistant/documentation/graph/dependency_builder.py +468 -0
  51. coding_assistant/documentation/graph/module_analyzer.py +475 -0
  52. coding_assistant/documentation/writers/__init__.py +11 -0
  53. coding_assistant/documentation/writers/markdown_writer.py +322 -0
  54. coding_assistant/embeddings/__init__.py +0 -0
  55. coding_assistant/embeddings/generator.py +89 -0
  56. coding_assistant/embeddings/store.py +187 -0
  57. coding_assistant/exceptions/__init__.py +50 -0
  58. coding_assistant/exceptions/base.py +110 -0
  59. coding_assistant/exceptions/llm.py +249 -0
  60. coding_assistant/exceptions/recovery.py +263 -0
  61. coding_assistant/exceptions/storage.py +213 -0
  62. coding_assistant/exceptions/validation.py +230 -0
  63. coding_assistant/llm/__init__.py +1 -0
  64. coding_assistant/llm/client.py +277 -0
  65. coding_assistant/llm/gemini_client.py +181 -0
  66. coding_assistant/llm/groq_client.py +160 -0
  67. coding_assistant/llm/prompts.py +98 -0
  68. coding_assistant/llm/together_client.py +160 -0
  69. coding_assistant/operations/__init__.py +13 -0
  70. coding_assistant/operations/differ.py +369 -0
  71. coding_assistant/operations/generator.py +347 -0
  72. coding_assistant/operations/linter.py +430 -0
  73. coding_assistant/operations/validator.py +406 -0
  74. coding_assistant/storage/__init__.py +9 -0
  75. coding_assistant/storage/database.py +363 -0
  76. coding_assistant/storage/session.py +231 -0
  77. coding_assistant/utils/__init__.py +31 -0
  78. coding_assistant/utils/cache.py +477 -0
  79. coding_assistant/utils/hardware.py +132 -0
  80. coding_assistant/utils/keystore.py +206 -0
  81. coding_assistant/utils/logger.py +32 -0
  82. coding_assistant/utils/progress.py +311 -0
  83. coding_assistant/validation/__init__.py +13 -0
  84. coding_assistant/validation/files.py +305 -0
  85. coding_assistant/validation/inputs.py +335 -0
  86. coding_assistant/validation/params.py +280 -0
  87. coding_assistant/validation/sanitizers.py +243 -0
  88. coding_assistant/vcs/__init__.py +5 -0
  89. coding_assistant/vcs/git.py +269 -0
@@ -0,0 +1,511 @@
1
+ """Generate Mermaid diagrams from code structure.
2
+
3
+ This module provides generation of various diagram types in Mermaid format:
4
+ - Architecture diagrams (module/partition dependencies)
5
+ - Class diagrams (classes, inheritance, relationships)
6
+ - Sequence diagrams (function call flows)
7
+ - Dependency graphs (file/module dependencies)
8
+ """
9
+
10
+ from typing import List, Dict, Set, Optional, Tuple
11
+ from pathlib import Path
12
+ import networkx as nx
13
+ import re
14
+
15
+ from coding_assistant.documentation.decomposition.partitioner import Partition
16
+ from coding_assistant.utils.logger import get_logger
17
+
18
+ logger = get_logger(__name__)
19
+
20
+
21
+ class MermaidDiagramGenerator:
22
+ """
23
+ Generate various Mermaid diagrams from code structure.
24
+
25
+ Supports:
26
+ - Architecture diagrams: High-level module relationships
27
+ - Class diagrams: OOP structure and inheritance
28
+ - Sequence diagrams: Function call sequences
29
+ - Dependency graphs: File/module dependencies
30
+ """
31
+
32
+ def __init__(self):
33
+ """Initialize the diagram generator."""
34
+ pass
35
+
36
+ def generate_architecture_diagram(self,
37
+ partitions: List[Partition],
38
+ dependency_graph: Optional[nx.DiGraph] = None,
39
+ direction: str = "TB") -> str:
40
+ """
41
+ Generate high-level architecture diagram showing modules and their relationships.
42
+
43
+ Args:
44
+ partitions: List of repository partitions
45
+ dependency_graph: Optional dependency graph for additional context
46
+ direction: Graph direction (TB=top-bottom, LR=left-right, RL, BT)
47
+
48
+ Returns:
49
+ Mermaid diagram as string
50
+ """
51
+ logger.info(f"Generating architecture diagram for {len(partitions)} partitions")
52
+
53
+ mermaid = [f"graph {direction}"]
54
+
55
+ # Add nodes for each partition
56
+ for partition in partitions:
57
+ node_id = self._sanitize_id(partition.name)
58
+ label = self._format_partition_label(partition.name, partition.size_loc)
59
+
60
+ # Choose node shape based on architectural role (if available)
61
+ node_shape = self._get_node_shape_for_partition(partition)
62
+ mermaid.append(f" {node_id}{node_shape[0]}{label}{node_shape[1]}")
63
+
64
+ # Add edges for dependencies
65
+ added_edges = set()
66
+ for partition in partitions:
67
+ node_id = self._sanitize_id(partition.name)
68
+
69
+ for dep in partition.dependencies:
70
+ dep_id = self._sanitize_id(dep)
71
+ edge_key = (node_id, dep_id)
72
+
73
+ # Avoid duplicate edges
74
+ if edge_key not in added_edges:
75
+ mermaid.append(f" {node_id} --> {dep_id}")
76
+ added_edges.add(edge_key)
77
+
78
+ # Add styling
79
+ mermaid.extend(self._add_architecture_styling(partitions))
80
+
81
+ result = "\n".join(mermaid)
82
+ logger.debug(f"Architecture diagram generated ({len(mermaid)} lines)")
83
+
84
+ return result
85
+
86
+ def generate_class_diagram(self,
87
+ parsed_files: Dict[str, Dict],
88
+ file_filter: Optional[List[str]] = None,
89
+ max_classes: int = 20) -> str:
90
+ """
91
+ Generate class diagram showing classes, methods, and inheritance.
92
+
93
+ Args:
94
+ parsed_files: Dictionary of file_path -> parsed code data
95
+ file_filter: Optional list of files to include (None = all)
96
+ max_classes: Maximum number of classes to include
97
+
98
+ Returns:
99
+ Mermaid class diagram as string
100
+ """
101
+ logger.info(f"Generating class diagram from {len(parsed_files)} files")
102
+
103
+ mermaid = ["classDiagram"]
104
+
105
+ # Extract all classes from parsed files
106
+ all_classes = []
107
+ class_relationships = []
108
+
109
+ for file_path, parsed_data in parsed_files.items():
110
+ # Apply file filter
111
+ if file_filter and file_path not in file_filter:
112
+ continue
113
+
114
+ classes = parsed_data.get('classes', [])
115
+
116
+ for cls in classes:
117
+ all_classes.append({
118
+ 'file': file_path,
119
+ 'data': cls
120
+ })
121
+
122
+ # Track inheritance relationships
123
+ if cls.get('bases'):
124
+ for base in cls['bases']:
125
+ class_relationships.append((base, cls['name'], 'inherits'))
126
+
127
+ # Limit to max_classes (prioritize classes with relationships)
128
+ if len(all_classes) > max_classes:
129
+ # Sort by number of relationships (methods count as a heuristic)
130
+ all_classes.sort(
131
+ key=lambda c: len(c['data'].get('methods', [])),
132
+ reverse=True
133
+ )
134
+ all_classes = all_classes[:max_classes]
135
+
136
+ # Generate class definitions
137
+ included_classes = set()
138
+ for cls_info in all_classes:
139
+ cls = cls_info['data']
140
+ class_name = cls['name']
141
+ included_classes.add(class_name)
142
+
143
+ mermaid.append(f" class {class_name} {{")
144
+
145
+ # Add attributes (if available)
146
+ attributes = cls.get('attributes', [])
147
+ for attr in attributes[:10]: # Limit attributes
148
+ attr_name = attr.get('name', attr) if isinstance(attr, dict) else attr
149
+ attr_type = attr.get('type', '') if isinstance(attr, dict) else ''
150
+ if attr_type:
151
+ mermaid.append(f" +{attr_type} {attr_name}")
152
+ else:
153
+ mermaid.append(f" +{attr_name}")
154
+
155
+ # Add methods
156
+ methods = cls.get('methods', [])
157
+ for method in methods[:15]: # Limit methods
158
+ method_name = method.get('name', method) if isinstance(method, dict) else method
159
+
160
+ # Skip private methods starting with _
161
+ if method_name.startswith('_') and not method_name.startswith('__'):
162
+ continue
163
+
164
+ # Get method signature if available
165
+ params = method.get('parameters', []) if isinstance(method, dict) else []
166
+ return_type = method.get('return_type', '') if isinstance(method, dict) else ''
167
+
168
+ if params:
169
+ param_str = ', '.join(params[:3]) # Limit params shown
170
+ if len(params) > 3:
171
+ param_str += ', ...'
172
+ else:
173
+ param_str = ''
174
+
175
+ if return_type:
176
+ mermaid.append(f" +{method_name}({param_str}) {return_type}")
177
+ else:
178
+ mermaid.append(f" +{method_name}({param_str})")
179
+
180
+ mermaid.append(" }")
181
+
182
+ # Add inheritance relationships
183
+ for base, derived, rel_type in class_relationships:
184
+ # Only include if both classes are in the diagram
185
+ if base in included_classes and derived in included_classes:
186
+ if rel_type == 'inherits':
187
+ mermaid.append(f" {base} <|-- {derived}")
188
+
189
+ result = "\n".join(mermaid)
190
+ logger.debug(f"Class diagram generated with {len(included_classes)} classes")
191
+
192
+ return result
193
+
194
+ def generate_sequence_diagram(self,
195
+ function_name: str,
196
+ call_chain: List[Dict],
197
+ max_depth: int = 10) -> str:
198
+ """
199
+ Generate sequence diagram for a function's execution flow.
200
+
201
+ Args:
202
+ function_name: Name of the main function
203
+ call_chain: List of function calls with structure:
204
+ [{'caller': 'A', 'callee': 'B', 'message': 'method()'}]
205
+ max_depth: Maximum call depth to visualize
206
+
207
+ Returns:
208
+ Mermaid sequence diagram as string
209
+ """
210
+ logger.info(f"Generating sequence diagram for {function_name}")
211
+
212
+ mermaid = ["sequenceDiagram"]
213
+
214
+ # Add title
215
+ mermaid.append(f" title {function_name} Execution Flow")
216
+
217
+ # Extract unique participants
218
+ participants = self._extract_participants(call_chain)
219
+
220
+ # Add participants
221
+ for participant in participants[:max_depth]:
222
+ mermaid.append(f" participant {participant}")
223
+
224
+ # Add interactions
225
+ call_depth = 0
226
+ for call in call_chain[:max_depth * 2]: # Limit total calls
227
+ caller = call.get('caller', 'Unknown')
228
+ callee = call.get('callee', 'Unknown')
229
+ message = call.get('message', callee)
230
+ call_type = call.get('type', 'sync') # sync, async, return
231
+
232
+ # Skip if participants not included
233
+ if caller not in participants or callee not in participants:
234
+ continue
235
+
236
+ # Different arrow types based on call type
237
+ if call_type == 'async':
238
+ mermaid.append(f" {caller}-))+{callee}: {message}")
239
+ elif call_type == 'return':
240
+ mermaid.append(f" {callee}-->>-{caller}: return")
241
+ call_depth -= 1
242
+ else:
243
+ mermaid.append(f" {caller}->>+{callee}: {message}")
244
+ call_depth += 1
245
+
246
+ # Prevent excessive nesting
247
+ if call_depth > max_depth:
248
+ break
249
+
250
+ result = "\n".join(mermaid)
251
+ logger.debug(f"Sequence diagram generated with {len(participants)} participants")
252
+
253
+ return result
254
+
255
+ def generate_dependency_graph(self,
256
+ dependency_graph: nx.DiGraph,
257
+ max_nodes: int = 50,
258
+ show_files: bool = True) -> str:
259
+ """
260
+ Generate dependency graph visualization.
261
+
262
+ Args:
263
+ dependency_graph: NetworkX directed graph of dependencies
264
+ max_nodes: Maximum nodes to include (most important)
265
+ show_files: If True, show file names; otherwise show modules
266
+
267
+ Returns:
268
+ Mermaid dependency graph as string
269
+ """
270
+ logger.info(f"Generating dependency graph with {dependency_graph.number_of_nodes()} nodes")
271
+
272
+ # Limit to most important nodes if graph is too large
273
+ if dependency_graph.number_of_nodes() > max_nodes:
274
+ important_nodes = self._get_important_nodes(dependency_graph, max_nodes)
275
+ subgraph = dependency_graph.subgraph(important_nodes).copy()
276
+ else:
277
+ subgraph = dependency_graph
278
+
279
+ mermaid = ["graph LR"]
280
+
281
+ # Add nodes
282
+ for node in subgraph.nodes():
283
+ node_id = self._sanitize_id(node)
284
+
285
+ # Format label based on show_files
286
+ if show_files:
287
+ label = Path(node).name if '/' in node or '\\' in node else node
288
+ else:
289
+ label = node
290
+
291
+ # Determine node styling based on centrality
292
+ node_data = subgraph.nodes.get(node, {})
293
+ is_central = node_data.get('is_central', False)
294
+
295
+ if is_central:
296
+ mermaid.append(f" {node_id}[[\"{label}\"]]") # Double border for central nodes
297
+ else:
298
+ mermaid.append(f" {node_id}[\"{label}\"]")
299
+
300
+ # Add edges
301
+ for source, target in subgraph.edges():
302
+ source_id = self._sanitize_id(source)
303
+ target_id = self._sanitize_id(target)
304
+
305
+ # Check for edge data (weight, type, etc.)
306
+ edge_data = subgraph.edges.get((source, target), {})
307
+ weight = edge_data.get('weight', 1)
308
+
309
+ # Thicker arrows for stronger dependencies
310
+ if weight > 5:
311
+ mermaid.append(f" {source_id} ==> {target_id}")
312
+ else:
313
+ mermaid.append(f" {source_id} --> {target_id}")
314
+
315
+ # Add styling for central nodes
316
+ mermaid.append(" classDef central fill:#ffd700,stroke:#ff6347,stroke-width:3px")
317
+ central_nodes = [
318
+ self._sanitize_id(n) for n in subgraph.nodes()
319
+ if subgraph.nodes.get(n, {}).get('is_central', False)
320
+ ]
321
+ if central_nodes:
322
+ mermaid.append(f" class {','.join(central_nodes)} central")
323
+
324
+ result = "\n".join(mermaid)
325
+ logger.debug(f"Dependency graph generated with {subgraph.number_of_nodes()} nodes")
326
+
327
+ return result
328
+
329
+ # Helper methods
330
+
331
+ def _sanitize_id(self, name: str) -> str:
332
+ """
333
+ Sanitize name for use as Mermaid ID.
334
+
335
+ Creates clean, valid Mermaid node IDs:
336
+ - Uses basename for paths (shorter, more readable)
337
+ - Replaces special characters with underscores
338
+ - Ensures ID starts with a letter
339
+ - Collapses multiple underscores
340
+ """
341
+ # Extract basename if it's a path (use last meaningful component)
342
+ if '/' in name or '\\' in name:
343
+ # Get the last non-empty path component
344
+ parts = re.split(r'[/\\]', name)
345
+ meaningful_parts = [p for p in parts if p and p not in ('.', '..')]
346
+ if meaningful_parts:
347
+ # Use last 2 parts for context (e.g., "module/file" instead of just "file")
348
+ name = '_'.join(meaningful_parts[-2:]) if len(meaningful_parts) > 1 else meaningful_parts[-1]
349
+
350
+ # Replace path separators, dots, dashes, spaces with underscores
351
+ sanitized = re.sub(r'[/\\.\-\s]', '_', name)
352
+
353
+ # Remove any remaining special characters (keep alphanumeric and underscore)
354
+ sanitized = re.sub(r'[^a-zA-Z0-9_]', '', sanitized)
355
+
356
+ # Collapse multiple consecutive underscores into one
357
+ sanitized = re.sub(r'_+', '_', sanitized)
358
+
359
+ # Remove leading/trailing underscores
360
+ sanitized = sanitized.strip('_')
361
+
362
+ # Ensure ID starts with a letter (Mermaid requirement)
363
+ if sanitized and not sanitized[0].isalpha():
364
+ sanitized = 'n_' + sanitized
365
+
366
+ # Handle empty result
367
+ if not sanitized:
368
+ sanitized = 'unknown'
369
+
370
+ return sanitized
371
+
372
+ def _format_partition_label(self, name: str, size_loc: int) -> str:
373
+ """Format partition node label with size info."""
374
+ # Extract meaningful name from path
375
+ if '/' in name or '\\' in name:
376
+ parts = re.split(r'[/\\]', name)
377
+ meaningful_parts = [p for p in parts if p and p not in ('.', '..', '')]
378
+ if meaningful_parts:
379
+ # Use last 2 meaningful parts for context
380
+ name = '/'.join(meaningful_parts[-2:]) if len(meaningful_parts) > 1 else meaningful_parts[-1]
381
+
382
+ # Remove leading dots/special chars from name
383
+ name = name.lstrip('./\\')
384
+
385
+ # Shorten long names
386
+ if len(name) > 30:
387
+ name = name[:27] + '...'
388
+
389
+ # Escape quotes in name
390
+ name = name.replace('"', "'")
391
+
392
+ return f'"{name}\\n({size_loc} LOC)"'
393
+
394
+ def _get_node_shape_for_partition(self, partition: Partition) -> Tuple[str, str]:
395
+ """
396
+ Determine node shape based on partition characteristics.
397
+
398
+ Returns:
399
+ Tuple of (opening bracket, closing bracket) for node shape
400
+ """
401
+ # Check if partition has metadata with architectural role
402
+ role = partition.metadata.get('architectural_role', '').lower()
403
+
404
+ if 'api' in role or 'interface' in role:
405
+ return ('([', '])') # Stadium shape for APIs
406
+ elif 'data' in role or 'model' in role:
407
+ return ('[(', ')]') # Cylindrical shape for data
408
+ elif 'ui' in role or 'view' in role:
409
+ return ('{{', '}}') # Hexagon for UI
410
+ else:
411
+ return ('[', ']') # Rectangle for general
412
+
413
+ def _add_architecture_styling(self, partitions: List[Partition]) -> List[str]:
414
+ """Add styling for architecture diagram."""
415
+ styles = []
416
+
417
+ # Define color scheme based on size
418
+ large_partitions = []
419
+ medium_partitions = []
420
+ small_partitions = []
421
+
422
+ for partition in partitions:
423
+ node_id = self._sanitize_id(partition.name)
424
+
425
+ if partition.size_loc > 2000:
426
+ large_partitions.append(node_id)
427
+ elif partition.size_loc > 500:
428
+ medium_partitions.append(node_id)
429
+ else:
430
+ small_partitions.append(node_id)
431
+
432
+ # Define class styles
433
+ styles.append(" classDef large fill:#ff6b6b,stroke:#c92a2a,stroke-width:2px")
434
+ styles.append(" classDef medium fill:#4dabf7,stroke:#1971c2,stroke-width:2px")
435
+ styles.append(" classDef small fill:#51cf66,stroke:#2f9e44,stroke-width:2px")
436
+
437
+ # Apply styles
438
+ if large_partitions:
439
+ styles.append(f" class {','.join(large_partitions)} large")
440
+ if medium_partitions:
441
+ styles.append(f" class {','.join(medium_partitions)} medium")
442
+ if small_partitions:
443
+ styles.append(f" class {','.join(small_partitions)} small")
444
+
445
+ return styles
446
+
447
+ def _extract_participants(self, call_chain: List[Dict]) -> List[str]:
448
+ """Extract unique participants from call chain."""
449
+ participants = []
450
+ seen = set()
451
+
452
+ for call in call_chain:
453
+ caller = call.get('caller', 'Unknown')
454
+ callee = call.get('callee', 'Unknown')
455
+
456
+ if caller not in seen:
457
+ participants.append(caller)
458
+ seen.add(caller)
459
+
460
+ if callee not in seen:
461
+ participants.append(callee)
462
+ seen.add(callee)
463
+
464
+ return participants
465
+
466
+ def _get_important_nodes(self,
467
+ graph: nx.DiGraph,
468
+ max_nodes: int) -> List[str]:
469
+ """
470
+ Get most important nodes from graph based on centrality metrics.
471
+
472
+ Uses PageRank and degree centrality to identify key nodes.
473
+ """
474
+ try:
475
+ # Calculate PageRank
476
+ pagerank = nx.pagerank(graph)
477
+
478
+ # Calculate degree centrality
479
+ degree_centrality = nx.degree_centrality(graph)
480
+
481
+ # Combine scores (weighted average)
482
+ combined_scores = {}
483
+ for node in graph.nodes():
484
+ combined_scores[node] = (
485
+ 0.7 * pagerank.get(node, 0) +
486
+ 0.3 * degree_centrality.get(node, 0)
487
+ )
488
+
489
+ # Sort by score and take top max_nodes
490
+ sorted_nodes = sorted(
491
+ combined_scores.items(),
492
+ key=lambda x: x[1],
493
+ reverse=True
494
+ )
495
+
496
+ important_nodes = [node for node, score in sorted_nodes[:max_nodes]]
497
+
498
+ # Mark central nodes in graph
499
+ for node in important_nodes[:max_nodes // 5]: # Top 20% are "central"
500
+ if node in graph.nodes():
501
+ graph.nodes[node]['is_central'] = True
502
+
503
+ return important_nodes
504
+
505
+ except Exception as e:
506
+ logger.warning(f"Error calculating node importance: {e}, using degree instead")
507
+
508
+ # Fallback: just use degree
509
+ degree_dict = dict(graph.degree())
510
+ sorted_nodes = sorted(degree_dict.items(), key=lambda x: x[1], reverse=True)
511
+ return [node for node, degree in sorted_nodes[:max_nodes]]
@@ -0,0 +1,13 @@
1
+ """Graph analysis for code dependencies and module relationships."""
2
+
3
+ from coding_assistant.documentation.graph.dependency_builder import (
4
+ DependencyGraphBuilder,
5
+ CodeEntity,
6
+ )
7
+ from coding_assistant.documentation.graph.module_analyzer import ModuleAnalyzer
8
+
9
+ __all__ = [
10
+ 'DependencyGraphBuilder',
11
+ 'CodeEntity',
12
+ 'ModuleAnalyzer',
13
+ ]