kairo-code 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (144) hide show
  1. image-service/main.py +178 -0
  2. infra/chat/app/main.py +84 -0
  3. kairo/backend/__init__.py +0 -0
  4. kairo/backend/api/__init__.py +0 -0
  5. kairo/backend/api/admin/__init__.py +23 -0
  6. kairo/backend/api/admin/audit.py +54 -0
  7. kairo/backend/api/admin/content.py +142 -0
  8. kairo/backend/api/admin/incidents.py +148 -0
  9. kairo/backend/api/admin/stats.py +125 -0
  10. kairo/backend/api/admin/system.py +87 -0
  11. kairo/backend/api/admin/users.py +279 -0
  12. kairo/backend/api/agents.py +94 -0
  13. kairo/backend/api/api_keys.py +85 -0
  14. kairo/backend/api/auth.py +116 -0
  15. kairo/backend/api/billing.py +41 -0
  16. kairo/backend/api/chat.py +72 -0
  17. kairo/backend/api/conversations.py +125 -0
  18. kairo/backend/api/device_auth.py +100 -0
  19. kairo/backend/api/files.py +83 -0
  20. kairo/backend/api/health.py +36 -0
  21. kairo/backend/api/images.py +80 -0
  22. kairo/backend/api/openai_compat.py +225 -0
  23. kairo/backend/api/projects.py +102 -0
  24. kairo/backend/api/usage.py +32 -0
  25. kairo/backend/api/webhooks.py +79 -0
  26. kairo/backend/app.py +297 -0
  27. kairo/backend/config.py +179 -0
  28. kairo/backend/core/__init__.py +0 -0
  29. kairo/backend/core/admin_auth.py +24 -0
  30. kairo/backend/core/api_key_auth.py +55 -0
  31. kairo/backend/core/database.py +28 -0
  32. kairo/backend/core/dependencies.py +70 -0
  33. kairo/backend/core/logging.py +23 -0
  34. kairo/backend/core/rate_limit.py +73 -0
  35. kairo/backend/core/security.py +29 -0
  36. kairo/backend/models/__init__.py +19 -0
  37. kairo/backend/models/agent.py +30 -0
  38. kairo/backend/models/api_key.py +25 -0
  39. kairo/backend/models/api_usage.py +29 -0
  40. kairo/backend/models/audit_log.py +26 -0
  41. kairo/backend/models/conversation.py +48 -0
  42. kairo/backend/models/device_code.py +30 -0
  43. kairo/backend/models/feature_flag.py +21 -0
  44. kairo/backend/models/image_generation.py +24 -0
  45. kairo/backend/models/incident.py +28 -0
  46. kairo/backend/models/project.py +28 -0
  47. kairo/backend/models/uptime_record.py +24 -0
  48. kairo/backend/models/usage.py +24 -0
  49. kairo/backend/models/user.py +49 -0
  50. kairo/backend/schemas/__init__.py +0 -0
  51. kairo/backend/schemas/admin/__init__.py +0 -0
  52. kairo/backend/schemas/admin/audit.py +28 -0
  53. kairo/backend/schemas/admin/content.py +53 -0
  54. kairo/backend/schemas/admin/stats.py +77 -0
  55. kairo/backend/schemas/admin/system.py +44 -0
  56. kairo/backend/schemas/admin/users.py +48 -0
  57. kairo/backend/schemas/agent.py +42 -0
  58. kairo/backend/schemas/api_key.py +30 -0
  59. kairo/backend/schemas/auth.py +57 -0
  60. kairo/backend/schemas/chat.py +26 -0
  61. kairo/backend/schemas/conversation.py +39 -0
  62. kairo/backend/schemas/device_auth.py +40 -0
  63. kairo/backend/schemas/image.py +15 -0
  64. kairo/backend/schemas/openai_compat.py +76 -0
  65. kairo/backend/schemas/project.py +21 -0
  66. kairo/backend/schemas/status.py +81 -0
  67. kairo/backend/schemas/usage.py +15 -0
  68. kairo/backend/services/__init__.py +0 -0
  69. kairo/backend/services/admin/__init__.py +0 -0
  70. kairo/backend/services/admin/audit_service.py +78 -0
  71. kairo/backend/services/admin/content_service.py +119 -0
  72. kairo/backend/services/admin/incident_service.py +94 -0
  73. kairo/backend/services/admin/stats_service.py +281 -0
  74. kairo/backend/services/admin/system_service.py +126 -0
  75. kairo/backend/services/admin/user_service.py +157 -0
  76. kairo/backend/services/agent_service.py +107 -0
  77. kairo/backend/services/api_key_service.py +66 -0
  78. kairo/backend/services/api_usage_service.py +126 -0
  79. kairo/backend/services/auth_service.py +101 -0
  80. kairo/backend/services/chat_service.py +501 -0
  81. kairo/backend/services/conversation_service.py +264 -0
  82. kairo/backend/services/device_auth_service.py +193 -0
  83. kairo/backend/services/email_service.py +55 -0
  84. kairo/backend/services/image_service.py +181 -0
  85. kairo/backend/services/llm_service.py +186 -0
  86. kairo/backend/services/project_service.py +109 -0
  87. kairo/backend/services/status_service.py +167 -0
  88. kairo/backend/services/stripe_service.py +78 -0
  89. kairo/backend/services/usage_service.py +150 -0
  90. kairo/backend/services/web_search_service.py +96 -0
  91. kairo/migrations/env.py +60 -0
  92. kairo/migrations/versions/001_initial.py +55 -0
  93. kairo/migrations/versions/002_usage_tracking_and_indexes.py +66 -0
  94. kairo/migrations/versions/003_username_to_email.py +21 -0
  95. kairo/migrations/versions/004_add_plans_and_verification.py +67 -0
  96. kairo/migrations/versions/005_add_projects.py +52 -0
  97. kairo/migrations/versions/006_add_image_generation.py +63 -0
  98. kairo/migrations/versions/007_add_admin_portal.py +107 -0
  99. kairo/migrations/versions/008_add_device_code_auth.py +76 -0
  100. kairo/migrations/versions/009_add_status_page.py +65 -0
  101. kairo/tools/extract_claude_data.py +465 -0
  102. kairo/tools/filter_claude_data.py +303 -0
  103. kairo/tools/generate_curated_data.py +157 -0
  104. kairo/tools/mix_training_data.py +295 -0
  105. kairo_code/__init__.py +3 -0
  106. kairo_code/agents/__init__.py +25 -0
  107. kairo_code/agents/architect.py +98 -0
  108. kairo_code/agents/audit.py +100 -0
  109. kairo_code/agents/base.py +463 -0
  110. kairo_code/agents/coder.py +155 -0
  111. kairo_code/agents/database.py +77 -0
  112. kairo_code/agents/docs.py +88 -0
  113. kairo_code/agents/explorer.py +62 -0
  114. kairo_code/agents/guardian.py +80 -0
  115. kairo_code/agents/planner.py +66 -0
  116. kairo_code/agents/reviewer.py +91 -0
  117. kairo_code/agents/security.py +94 -0
  118. kairo_code/agents/terraform.py +88 -0
  119. kairo_code/agents/testing.py +97 -0
  120. kairo_code/agents/uiux.py +88 -0
  121. kairo_code/auth.py +232 -0
  122. kairo_code/config.py +172 -0
  123. kairo_code/conversation.py +173 -0
  124. kairo_code/heartbeat.py +63 -0
  125. kairo_code/llm.py +291 -0
  126. kairo_code/logging_config.py +156 -0
  127. kairo_code/main.py +818 -0
  128. kairo_code/router.py +217 -0
  129. kairo_code/sandbox.py +248 -0
  130. kairo_code/settings.py +183 -0
  131. kairo_code/tools/__init__.py +51 -0
  132. kairo_code/tools/analysis.py +509 -0
  133. kairo_code/tools/base.py +417 -0
  134. kairo_code/tools/code.py +58 -0
  135. kairo_code/tools/definitions.py +617 -0
  136. kairo_code/tools/files.py +315 -0
  137. kairo_code/tools/review.py +390 -0
  138. kairo_code/tools/search.py +185 -0
  139. kairo_code/ui.py +418 -0
  140. kairo_code-0.1.0.dist-info/METADATA +13 -0
  141. kairo_code-0.1.0.dist-info/RECORD +144 -0
  142. kairo_code-0.1.0.dist-info/WHEEL +5 -0
  143. kairo_code-0.1.0.dist-info/entry_points.txt +2 -0
  144. kairo_code-0.1.0.dist-info/top_level.txt +4 -0
@@ -0,0 +1,315 @@
1
+ """File operation tools for Kairo Code"""
2
+
3
+ import os
4
+ from pathlib import Path
5
+ from glob import glob
6
+
7
+ from ..sandbox import get_sandbox
8
+
9
+
10
+ # Constants for file operations
11
+ MAX_READ_LINES = 500
12
+ MAX_LINE_LENGTH = 1000
13
+ TRUNCATION_WARNING = "\n... [File truncated. Use offset/limit to read more] ..."
14
+
15
+
16
+ def read_file(
17
+ path: str,
18
+ offset: int = 0,
19
+ limit: int | None = None,
20
+ show_line_numbers: bool = True
21
+ ) -> str:
22
+ """
23
+ Read and return file contents with line numbers.
24
+
25
+ Args:
26
+ path: Path to the file
27
+ offset: Line number to start from (0-indexed)
28
+ limit: Maximum number of lines to read (default: MAX_READ_LINES)
29
+ show_line_numbers: Whether to prefix lines with numbers
30
+
31
+ Returns:
32
+ File contents with optional line numbers
33
+ """
34
+ # Validate path through sandbox
35
+ sandbox = get_sandbox()
36
+ valid, error, filepath = sandbox.validate_path(path, "read")
37
+ if not valid:
38
+ raise PermissionError(error)
39
+
40
+ if not filepath.exists():
41
+ raise FileNotFoundError(f"File not found: {path}")
42
+
43
+ if not filepath.is_file():
44
+ raise ValueError(f"Not a file: {path}")
45
+
46
+ # Check file size first
47
+ file_size = filepath.stat().st_size
48
+ if file_size > 10 * 1024 * 1024: # 10MB
49
+ raise ValueError(f"File too large ({file_size // 1024 // 1024}MB). Max is 10MB.")
50
+
51
+ limit = limit or MAX_READ_LINES
52
+
53
+ with open(filepath, 'r', errors='replace') as f:
54
+ lines = f.readlines()
55
+
56
+ total_lines = len(lines)
57
+
58
+ # Apply offset and limit
59
+ selected_lines = lines[offset:offset + limit]
60
+ truncated = len(lines) > offset + limit
61
+
62
+ # Format output
63
+ output_lines = []
64
+
65
+ # Add file info header
66
+ output_lines.append(f"# File: {filepath}")
67
+ output_lines.append(f"# Lines: {offset + 1}-{min(offset + len(selected_lines), total_lines)} of {total_lines}")
68
+ output_lines.append("")
69
+
70
+ for i, line in enumerate(selected_lines, start=offset + 1):
71
+ # Truncate long lines
72
+ line = line.rstrip('\n\r')
73
+ if len(line) > MAX_LINE_LENGTH:
74
+ line = line[:MAX_LINE_LENGTH] + "... [line truncated]"
75
+
76
+ if show_line_numbers:
77
+ output_lines.append(f"{i:4d} | {line}")
78
+ else:
79
+ output_lines.append(line)
80
+
81
+ if truncated:
82
+ output_lines.append(TRUNCATION_WARNING)
83
+ output_lines.append(f"# To continue: read_file(\"{path}\", offset={offset + limit})")
84
+
85
+ return "\n".join(output_lines)
86
+
87
+
88
+ def write_file(path: str, content: str) -> str:
89
+ """Write content to a file. Creates parent directories if needed."""
90
+ # Use sandbox for safe writes
91
+ sandbox = get_sandbox()
92
+ return sandbox.safe_write(path, content)
93
+
94
+
95
+ def edit_file(path: str, old_string: str, new_string: str) -> str:
96
+ """
97
+ Make a targeted edit to a file by replacing old_string with new_string.
98
+
99
+ This is the PREFERRED way to modify files - more reliable than full rewrites.
100
+
101
+ Args:
102
+ path: Path to the file
103
+ old_string: Exact string to find and replace (must be unique in file)
104
+ new_string: String to replace it with
105
+
106
+ Returns:
107
+ Confirmation message with diff preview
108
+ """
109
+ sandbox = get_sandbox()
110
+ valid, error, filepath = sandbox.validate_path(path, "write")
111
+ if not valid:
112
+ raise PermissionError(error)
113
+
114
+ if not filepath.exists():
115
+ raise FileNotFoundError(f"File not found: {path}")
116
+
117
+ content = filepath.read_text()
118
+
119
+ # Check that old_string exists and is unique
120
+ count = content.count(old_string)
121
+
122
+ if count == 0:
123
+ # Try to provide helpful feedback
124
+ lines = content.split('\n')
125
+ for i, line in enumerate(lines, 1):
126
+ if old_string[:50] in line: # Check partial match
127
+ raise ValueError(
128
+ f"Exact match not found, but similar content at line {i}. "
129
+ f"Make sure old_string matches exactly including whitespace."
130
+ )
131
+ raise ValueError(f"old_string not found in file. Read the file first to see current content.")
132
+
133
+ if count > 1:
134
+ raise ValueError(
135
+ f"old_string appears {count} times in file. "
136
+ f"Provide more context to make it unique."
137
+ )
138
+
139
+ # Perform the replacement
140
+ new_content = content.replace(old_string, new_string, 1)
141
+ filepath.write_text(new_content)
142
+
143
+ # Generate a simple diff preview
144
+ old_preview = old_string[:100] + ("..." if len(old_string) > 100 else "")
145
+ new_preview = new_string[:100] + ("..." if len(new_string) > 100 else "")
146
+
147
+ return f"""Edit successful: {filepath}
148
+
149
+ Changed:
150
+ - {old_preview}
151
+ + {new_preview}
152
+ """
153
+
154
+
155
+ def list_files(pattern: str, root: str | None = None, max_results: int = 100) -> list[str]:
156
+ """
157
+ List files matching a glob pattern.
158
+
159
+ Args:
160
+ pattern: Glob pattern (e.g., "*.py", "**/*.js")
161
+ root: Root directory to search from (defaults to cwd)
162
+ max_results: Maximum number of results to return
163
+
164
+ Returns:
165
+ List of matching file paths
166
+ """
167
+ root_path = Path(root).resolve() if root else Path.cwd()
168
+ matches = glob(str(root_path / pattern), recursive=True)
169
+
170
+ # Sort by modification time (newest first) and limit
171
+ matches.sort(key=lambda x: os.path.getmtime(x), reverse=True)
172
+
173
+ return matches[:max_results]
174
+
175
+
176
+ def search_files(
177
+ query: str,
178
+ pattern: str = "**/*",
179
+ root: str | None = None,
180
+ context_lines: int = 0,
181
+ max_results: int = 50
182
+ ) -> list[dict]:
183
+ """
184
+ Search for text in files matching a pattern.
185
+
186
+ Args:
187
+ query: Text to search for (case-insensitive)
188
+ pattern: Glob pattern for files to search
189
+ root: Root directory
190
+ context_lines: Number of lines of context before/after match
191
+ max_results: Maximum number of matches to return
192
+
193
+ Returns:
194
+ List of dicts with file, line_number, content, and context
195
+ """
196
+ root_path = Path(root).resolve() if root else Path.cwd()
197
+ results = []
198
+
199
+ # Skip binary and large files
200
+ skip_extensions = {'.pyc', '.pyo', '.so', '.dylib', '.dll', '.exe', '.bin',
201
+ '.jpg', '.jpeg', '.png', '.gif', '.ico', '.pdf', '.zip',
202
+ '.tar', '.gz', '.node', '.woff', '.woff2', '.ttf'}
203
+
204
+ for filepath in glob(str(root_path / pattern), recursive=True):
205
+ path = Path(filepath)
206
+
207
+ if not path.is_file():
208
+ continue
209
+
210
+ if path.suffix.lower() in skip_extensions:
211
+ continue
212
+
213
+ # Skip large files
214
+ try:
215
+ if path.stat().st_size > 1024 * 1024: # 1MB
216
+ continue
217
+ except OSError:
218
+ continue
219
+
220
+ try:
221
+ lines = path.read_text(errors='replace').splitlines()
222
+
223
+ for i, line in enumerate(lines):
224
+ if query.lower() in line.lower():
225
+ result = {
226
+ "file": str(path),
227
+ "line": i + 1,
228
+ "content": line.strip()[:200]
229
+ }
230
+
231
+ # Add context if requested
232
+ if context_lines > 0:
233
+ start = max(0, i - context_lines)
234
+ end = min(len(lines), i + context_lines + 1)
235
+ result["context"] = "\n".join(
236
+ f"{j + 1}: {lines[j]}"
237
+ for j in range(start, end)
238
+ )
239
+
240
+ results.append(result)
241
+
242
+ if len(results) >= max_results:
243
+ return results
244
+
245
+ except (UnicodeDecodeError, PermissionError, OSError):
246
+ continue
247
+
248
+ return results
249
+
250
+
251
+ def tree(path: str = ".", max_depth: int = 3, max_files: int = 100) -> str:
252
+ """
253
+ Generate a tree view of directory structure.
254
+
255
+ Args:
256
+ path: Root directory to display
257
+ max_depth: Maximum depth to traverse
258
+ max_files: Maximum number of files to show
259
+
260
+ Returns:
261
+ Formatted tree string
262
+ """
263
+ root_path = Path(path).expanduser().resolve()
264
+
265
+ if not root_path.exists():
266
+ raise FileNotFoundError(f"Directory not found: {path}")
267
+
268
+ if not root_path.is_dir():
269
+ raise ValueError(f"Not a directory: {path}")
270
+
271
+ output = [f"{root_path.name}/"]
272
+ file_count = 0
273
+
274
+ def _tree_recursive(current_path: Path, prefix: str, depth: int) -> None:
275
+ nonlocal file_count
276
+
277
+ if depth > max_depth or file_count >= max_files:
278
+ return
279
+
280
+ # Get contents, sorted (directories first, then files)
281
+ try:
282
+ contents = sorted(
283
+ current_path.iterdir(),
284
+ key=lambda p: (not p.is_dir(), p.name.lower())
285
+ )
286
+ except PermissionError:
287
+ output.append(f"{prefix}[permission denied]")
288
+ return
289
+
290
+ # Skip hidden and common ignore patterns
291
+ ignore_patterns = {'.git', '__pycache__', 'node_modules', '.venv', 'venv',
292
+ '.idea', '.vscode', '.pytest_cache', 'dist', 'build'}
293
+
294
+ contents = [p for p in contents
295
+ if p.name not in ignore_patterns
296
+ and not p.name.startswith('.')]
297
+
298
+ for i, item in enumerate(contents):
299
+ if file_count >= max_files:
300
+ output.append(f"{prefix}... [truncated, {max_files} items shown]")
301
+ return
302
+
303
+ is_last = i == len(contents) - 1
304
+ connector = "└── " if is_last else "├── "
305
+ next_prefix = prefix + (" " if is_last else "│ ")
306
+
307
+ if item.is_dir():
308
+ output.append(f"{prefix}{connector}{item.name}/")
309
+ _tree_recursive(item, next_prefix, depth + 1)
310
+ else:
311
+ output.append(f"{prefix}{connector}{item.name}")
312
+ file_count += 1
313
+
314
+ _tree_recursive(root_path, "", 1)
315
+ return "\n".join(output)
@@ -0,0 +1,390 @@
1
+ """Code review tools - checks for quality, modularity, and best practices"""
2
+
3
+ import ast
4
+ import os
5
+ from pathlib import Path
6
+ from dataclasses import dataclass
7
+ from typing import Generator
8
+
9
+ from .base import Tool, ToolResult
10
+
11
+
12
+ @dataclass
13
+ class CodeIssue:
14
+ """A code quality issue."""
15
+ severity: str # "error", "warning", "info"
16
+ file: str
17
+ line: int
18
+ message: str
19
+ rule: str
20
+
21
+
22
+ class CodeReviewTool(Tool):
23
+ """Review code for quality, modularity, and best practices."""
24
+
25
+ name = "code_review"
26
+ description = "Review code for quality issues: long files, complex functions, missing docs, etc."
27
+ parameters = {
28
+ "type": "object",
29
+ "properties": {
30
+ "path": {"type": "string", "description": "File or directory to review"},
31
+ "strict": {"type": "boolean", "description": "Use stricter thresholds (default: False)"},
32
+ },
33
+ "required": ["path"],
34
+ }
35
+
36
+ # Thresholds (normal / strict)
37
+ THRESHOLDS = {
38
+ "max_file_lines": (300, 200),
39
+ "max_function_lines": (50, 30),
40
+ "max_function_args": (6, 4),
41
+ "max_class_methods": (15, 10),
42
+ "max_nesting_depth": (4, 3),
43
+ "max_line_length": (100, 88),
44
+ "min_docstring_coverage": (0.5, 0.8),
45
+ }
46
+
47
+ def execute(self, path: str, strict: bool = False) -> ToolResult:
48
+ path_obj = Path(path)
49
+ issues: list[CodeIssue] = []
50
+
51
+ if not path_obj.exists():
52
+ return ToolResult(success=False, output="", error=f"Path not found: {path}")
53
+
54
+ files = [path_obj] if path_obj.is_file() else list(path_obj.rglob("*.py"))
55
+
56
+ for f in files[:50]: # Limit files to review
57
+ try:
58
+ file_issues = list(self._review_file(f, strict))
59
+ issues.extend(file_issues)
60
+ except Exception as e:
61
+ issues.append(CodeIssue("error", str(f), 0, f"Failed to parse: {e}", "parse-error"))
62
+
63
+ if not issues:
64
+ return ToolResult(success=True, output="Code review passed! No issues found.")
65
+
66
+ # Format output
67
+ output_lines = [f"Found {len(issues)} issues:\n"]
68
+
69
+ # Group by file
70
+ by_file: dict[str, list[CodeIssue]] = {}
71
+ for issue in issues:
72
+ by_file.setdefault(issue.file, []).append(issue)
73
+
74
+ for file, file_issues in by_file.items():
75
+ output_lines.append(f"\n{file}:")
76
+ for issue in sorted(file_issues, key=lambda x: x.line):
77
+ icon = {"error": "❌", "warning": "⚠️", "info": "ℹ️"}.get(issue.severity, "•")
78
+ output_lines.append(f" {icon} L{issue.line}: [{issue.rule}] {issue.message}")
79
+
80
+ # Summary
81
+ errors = sum(1 for i in issues if i.severity == "error")
82
+ warnings = sum(1 for i in issues if i.severity == "warning")
83
+ output_lines.append(f"\nSummary: {errors} errors, {warnings} warnings")
84
+
85
+ return ToolResult(
86
+ success=errors == 0,
87
+ output="\n".join(output_lines)[:8000]
88
+ )
89
+
90
+ def _review_file(self, file_path: Path, strict: bool) -> Generator[CodeIssue, None, None]:
91
+ """Review a single file."""
92
+ with open(file_path) as f:
93
+ content = f.read()
94
+ lines = content.split("\n")
95
+
96
+ str_path = str(file_path)
97
+ idx = 1 if strict else 0
98
+
99
+ # Check file length
100
+ max_lines = self.THRESHOLDS["max_file_lines"][idx]
101
+ if len(lines) > max_lines:
102
+ yield CodeIssue(
103
+ "warning", str_path, 1,
104
+ f"File has {len(lines)} lines (max: {max_lines}). Consider splitting into modules.",
105
+ "file-too-long"
106
+ )
107
+
108
+ # Check line lengths
109
+ for i, line in enumerate(lines, 1):
110
+ max_len = self.THRESHOLDS["max_line_length"][idx]
111
+ if len(line) > max_len:
112
+ yield CodeIssue(
113
+ "info", str_path, i,
114
+ f"Line is {len(line)} chars (max: {max_len})",
115
+ "line-too-long"
116
+ )
117
+
118
+ # Parse AST for deeper analysis
119
+ try:
120
+ tree = ast.parse(content)
121
+ except SyntaxError as e:
122
+ yield CodeIssue("error", str_path, e.lineno or 1, f"Syntax error: {e.msg}", "syntax-error")
123
+ return
124
+
125
+ # Analyze functions and classes
126
+ for node in ast.walk(tree):
127
+ if isinstance(node, ast.FunctionDef):
128
+ yield from self._review_function(node, str_path, strict)
129
+ elif isinstance(node, ast.ClassDef):
130
+ yield from self._review_class(node, str_path, strict)
131
+
132
+ def _review_function(self, node: ast.FunctionDef, file_path: str, strict: bool) -> Generator[CodeIssue, None, None]:
133
+ """Review a function definition."""
134
+ idx = 1 if strict else 0
135
+
136
+ # Function length
137
+ func_lines = node.end_lineno - node.lineno if node.end_lineno else 0
138
+ max_func_lines = self.THRESHOLDS["max_function_lines"][idx]
139
+ if func_lines > max_func_lines:
140
+ yield CodeIssue(
141
+ "warning", file_path, node.lineno,
142
+ f"Function '{node.name}' has {func_lines} lines (max: {max_func_lines}). Consider breaking it up.",
143
+ "function-too-long"
144
+ )
145
+
146
+ # Argument count
147
+ arg_count = len(node.args.args) + len(node.args.kwonlyargs)
148
+ max_args = self.THRESHOLDS["max_function_args"][idx]
149
+ if arg_count > max_args:
150
+ yield CodeIssue(
151
+ "warning", file_path, node.lineno,
152
+ f"Function '{node.name}' has {arg_count} arguments (max: {max_args}). Consider using a config object.",
153
+ "too-many-args"
154
+ )
155
+
156
+ # Missing docstring (for public functions)
157
+ if not node.name.startswith("_"):
158
+ if not (node.body and isinstance(node.body[0], ast.Expr) and isinstance(node.body[0].value, ast.Constant)):
159
+ yield CodeIssue(
160
+ "info", file_path, node.lineno,
161
+ f"Function '{node.name}' has no docstring",
162
+ "missing-docstring"
163
+ )
164
+
165
+ # Check nesting depth
166
+ max_depth = self._get_max_nesting(node)
167
+ max_allowed = self.THRESHOLDS["max_nesting_depth"][idx]
168
+ if max_depth > max_allowed:
169
+ yield CodeIssue(
170
+ "warning", file_path, node.lineno,
171
+ f"Function '{node.name}' has nesting depth {max_depth} (max: {max_allowed}). Consider early returns.",
172
+ "deep-nesting"
173
+ )
174
+
175
+ def _review_class(self, node: ast.ClassDef, file_path: str, strict: bool) -> Generator[CodeIssue, None, None]:
176
+ """Review a class definition."""
177
+ idx = 1 if strict else 0
178
+
179
+ # Method count
180
+ methods = [n for n in node.body if isinstance(n, ast.FunctionDef)]
181
+ max_methods = self.THRESHOLDS["max_class_methods"][idx]
182
+ if len(methods) > max_methods:
183
+ yield CodeIssue(
184
+ "warning", file_path, node.lineno,
185
+ f"Class '{node.name}' has {len(methods)} methods (max: {max_methods}). Consider splitting responsibilities.",
186
+ "class-too-large"
187
+ )
188
+
189
+ # Missing class docstring
190
+ if not (node.body and isinstance(node.body[0], ast.Expr) and isinstance(node.body[0].value, ast.Constant)):
191
+ yield CodeIssue(
192
+ "info", file_path, node.lineno,
193
+ f"Class '{node.name}' has no docstring",
194
+ "missing-docstring"
195
+ )
196
+
197
+ def _get_max_nesting(self, node: ast.AST, current_depth: int = 0) -> int:
198
+ """Get maximum nesting depth in a node."""
199
+ max_depth = current_depth
200
+
201
+ for child in ast.iter_child_nodes(node):
202
+ if isinstance(child, (ast.If, ast.For, ast.While, ast.With, ast.Try)):
203
+ child_depth = self._get_max_nesting(child, current_depth + 1)
204
+ max_depth = max(max_depth, child_depth)
205
+ else:
206
+ child_depth = self._get_max_nesting(child, current_depth)
207
+ max_depth = max(max_depth, child_depth)
208
+
209
+ return max_depth
210
+
211
+
212
+ class ModularityCheckTool(Tool):
213
+ """Check code modularity and suggest improvements."""
214
+
215
+ name = "check_modularity"
216
+ description = "Analyze code structure and suggest how to make it more modular and reusable"
217
+ parameters = {
218
+ "type": "object",
219
+ "properties": {
220
+ "path": {"type": "string", "description": "Directory to analyze"},
221
+ },
222
+ "required": ["path"],
223
+ }
224
+
225
+ def execute(self, path: str) -> ToolResult:
226
+ path_obj = Path(path)
227
+
228
+ if not path_obj.exists():
229
+ return ToolResult(success=False, output="", error=f"Path not found: {path}")
230
+
231
+ if not path_obj.is_dir():
232
+ return ToolResult(success=False, output="", error="Path must be a directory")
233
+
234
+ suggestions = []
235
+ stats = {
236
+ "files": 0,
237
+ "large_files": [],
238
+ "god_classes": [],
239
+ "long_functions": [],
240
+ "circular_imports": [],
241
+ }
242
+
243
+ py_files = list(path_obj.rglob("*.py"))
244
+ stats["files"] = len(py_files)
245
+
246
+ for f in py_files:
247
+ try:
248
+ with open(f) as fp:
249
+ content = fp.read()
250
+ lines = len(content.split("\n"))
251
+
252
+ if lines > 300:
253
+ stats["large_files"].append((str(f.relative_to(path_obj)), lines))
254
+
255
+ tree = ast.parse(content)
256
+ for node in ast.walk(tree):
257
+ if isinstance(node, ast.ClassDef):
258
+ methods = [n for n in node.body if isinstance(n, ast.FunctionDef)]
259
+ if len(methods) > 15:
260
+ stats["god_classes"].append((str(f.relative_to(path_obj)), node.name, len(methods)))
261
+
262
+ if isinstance(node, ast.FunctionDef):
263
+ func_lines = (node.end_lineno or node.lineno) - node.lineno
264
+ if func_lines > 50:
265
+ stats["long_functions"].append((str(f.relative_to(path_obj)), node.name, func_lines))
266
+
267
+ except Exception:
268
+ continue
269
+
270
+ # Generate report
271
+ output_lines = ["# Modularity Analysis\n"]
272
+
273
+ output_lines.append(f"**Files analyzed:** {stats['files']}\n")
274
+
275
+ if stats["large_files"]:
276
+ output_lines.append("## Large Files (>300 lines)")
277
+ output_lines.append("Consider splitting these into smaller modules:\n")
278
+ for file, lines in sorted(stats["large_files"], key=lambda x: -x[1])[:10]:
279
+ output_lines.append(f" - {file}: {lines} lines")
280
+
281
+ suggestions.append("Split large files into focused modules with single responsibilities")
282
+
283
+ if stats["god_classes"]:
284
+ output_lines.append("\n## Large Classes (>15 methods)")
285
+ output_lines.append("These might have too many responsibilities:\n")
286
+ for file, cls, methods in sorted(stats["god_classes"], key=lambda x: -x[2])[:10]:
287
+ output_lines.append(f" - {file}: class {cls} ({methods} methods)")
288
+
289
+ suggestions.append("Extract related methods into separate classes or mixins")
290
+
291
+ if stats["long_functions"]:
292
+ output_lines.append("\n## Long Functions (>50 lines)")
293
+ output_lines.append("Consider breaking these into smaller helper functions:\n")
294
+ for file, func, lines in sorted(stats["long_functions"], key=lambda x: -x[2])[:10]:
295
+ output_lines.append(f" - {file}: {func}() ({lines} lines)")
296
+
297
+ suggestions.append("Extract repeated logic into helper functions")
298
+
299
+ if suggestions:
300
+ output_lines.append("\n## Suggestions")
301
+ for i, s in enumerate(suggestions, 1):
302
+ output_lines.append(f"{i}. {s}")
303
+ else:
304
+ output_lines.append("\n✅ Code structure looks good!")
305
+
306
+ return ToolResult(success=True, output="\n".join(output_lines))
307
+
308
+
309
+ class DependencyGraphTool(Tool):
310
+ """Visualize module dependencies."""
311
+
312
+ name = "dependency_graph"
313
+ description = "Show import dependencies between modules to identify coupling"
314
+ parameters = {
315
+ "type": "object",
316
+ "properties": {
317
+ "path": {"type": "string", "description": "Directory to analyze"},
318
+ },
319
+ "required": ["path"],
320
+ }
321
+
322
+ def execute(self, path: str) -> ToolResult:
323
+ path_obj = Path(path)
324
+
325
+ if not path_obj.exists():
326
+ return ToolResult(success=False, output="", error=f"Path not found: {path}")
327
+
328
+ imports: dict[str, set[str]] = {}
329
+ py_files = list(path_obj.rglob("*.py"))
330
+
331
+ for f in py_files:
332
+ try:
333
+ with open(f) as fp:
334
+ tree = ast.parse(fp.read())
335
+
336
+ module_name = f.stem
337
+ imports[module_name] = set()
338
+
339
+ for node in ast.walk(tree):
340
+ if isinstance(node, ast.Import):
341
+ for alias in node.names:
342
+ imports[module_name].add(alias.name.split(".")[0])
343
+ elif isinstance(node, ast.ImportFrom):
344
+ if node.module:
345
+ imports[module_name].add(node.module.split(".")[0])
346
+
347
+ except Exception:
348
+ continue
349
+
350
+ # Filter to only internal modules
351
+ internal_modules = set(imports.keys())
352
+
353
+ output_lines = ["# Module Dependency Graph\n"]
354
+
355
+ # Find modules with most dependencies (potential coupling issues)
356
+ dep_counts = []
357
+ for mod, deps in imports.items():
358
+ internal_deps = deps & internal_modules
359
+ if internal_deps:
360
+ dep_counts.append((mod, len(internal_deps), internal_deps))
361
+
362
+ if dep_counts:
363
+ output_lines.append("## Dependencies (internal modules only)\n")
364
+ for mod, count, deps in sorted(dep_counts, key=lambda x: -x[1]):
365
+ output_lines.append(f"**{mod}** ({count} deps)")
366
+ for d in sorted(deps):
367
+ output_lines.append(f" └─ {d}")
368
+ output_lines.append("")
369
+
370
+ # Find potential circular dependencies
371
+ output_lines.append("## Potential Issues\n")
372
+ circular = []
373
+ for mod, deps in imports.items():
374
+ internal_deps = deps & internal_modules
375
+ for dep in internal_deps:
376
+ if dep in imports and mod in imports[dep]:
377
+ pair = tuple(sorted([mod, dep]))
378
+ if pair not in circular:
379
+ circular.append(pair)
380
+
381
+ if circular:
382
+ output_lines.append("⚠️ Possible circular imports:")
383
+ for a, b in circular:
384
+ output_lines.append(f" - {a} ↔ {b}")
385
+ else:
386
+ output_lines.append("✅ No circular imports detected")
387
+ else:
388
+ output_lines.append("No internal dependencies found.")
389
+
390
+ return ToolResult(success=True, output="\n".join(output_lines))