crackerjack 0.31.10__py3-none-any.whl → 0.31.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crackerjack might be problematic. Click here for more details.

Files changed (155) hide show
  1. crackerjack/CLAUDE.md +288 -705
  2. crackerjack/__main__.py +22 -8
  3. crackerjack/agents/__init__.py +0 -3
  4. crackerjack/agents/architect_agent.py +0 -43
  5. crackerjack/agents/base.py +1 -9
  6. crackerjack/agents/coordinator.py +2 -148
  7. crackerjack/agents/documentation_agent.py +109 -81
  8. crackerjack/agents/dry_agent.py +122 -97
  9. crackerjack/agents/formatting_agent.py +3 -16
  10. crackerjack/agents/import_optimization_agent.py +1174 -130
  11. crackerjack/agents/performance_agent.py +956 -188
  12. crackerjack/agents/performance_helpers.py +229 -0
  13. crackerjack/agents/proactive_agent.py +1 -48
  14. crackerjack/agents/refactoring_agent.py +516 -246
  15. crackerjack/agents/refactoring_helpers.py +282 -0
  16. crackerjack/agents/security_agent.py +393 -90
  17. crackerjack/agents/test_creation_agent.py +1776 -120
  18. crackerjack/agents/test_specialist_agent.py +59 -15
  19. crackerjack/agents/tracker.py +0 -102
  20. crackerjack/api.py +145 -37
  21. crackerjack/cli/handlers.py +48 -30
  22. crackerjack/cli/interactive.py +11 -11
  23. crackerjack/cli/options.py +66 -4
  24. crackerjack/code_cleaner.py +808 -148
  25. crackerjack/config/global_lock_config.py +110 -0
  26. crackerjack/config/hooks.py +43 -64
  27. crackerjack/core/async_workflow_orchestrator.py +247 -97
  28. crackerjack/core/autofix_coordinator.py +192 -109
  29. crackerjack/core/enhanced_container.py +46 -63
  30. crackerjack/core/file_lifecycle.py +549 -0
  31. crackerjack/core/performance.py +9 -8
  32. crackerjack/core/performance_monitor.py +395 -0
  33. crackerjack/core/phase_coordinator.py +281 -94
  34. crackerjack/core/proactive_workflow.py +9 -58
  35. crackerjack/core/resource_manager.py +501 -0
  36. crackerjack/core/service_watchdog.py +490 -0
  37. crackerjack/core/session_coordinator.py +4 -8
  38. crackerjack/core/timeout_manager.py +504 -0
  39. crackerjack/core/websocket_lifecycle.py +475 -0
  40. crackerjack/core/workflow_orchestrator.py +343 -209
  41. crackerjack/dynamic_config.py +47 -6
  42. crackerjack/errors.py +3 -4
  43. crackerjack/executors/async_hook_executor.py +63 -13
  44. crackerjack/executors/cached_hook_executor.py +14 -14
  45. crackerjack/executors/hook_executor.py +100 -37
  46. crackerjack/executors/hook_lock_manager.py +856 -0
  47. crackerjack/executors/individual_hook_executor.py +120 -86
  48. crackerjack/intelligence/__init__.py +0 -7
  49. crackerjack/intelligence/adaptive_learning.py +13 -86
  50. crackerjack/intelligence/agent_orchestrator.py +15 -78
  51. crackerjack/intelligence/agent_registry.py +12 -59
  52. crackerjack/intelligence/agent_selector.py +31 -92
  53. crackerjack/intelligence/integration.py +1 -41
  54. crackerjack/interactive.py +9 -9
  55. crackerjack/managers/async_hook_manager.py +25 -8
  56. crackerjack/managers/hook_manager.py +9 -9
  57. crackerjack/managers/publish_manager.py +57 -59
  58. crackerjack/managers/test_command_builder.py +6 -36
  59. crackerjack/managers/test_executor.py +9 -61
  60. crackerjack/managers/test_manager.py +17 -63
  61. crackerjack/managers/test_manager_backup.py +77 -127
  62. crackerjack/managers/test_progress.py +4 -23
  63. crackerjack/mcp/cache.py +5 -12
  64. crackerjack/mcp/client_runner.py +10 -10
  65. crackerjack/mcp/context.py +64 -6
  66. crackerjack/mcp/dashboard.py +14 -11
  67. crackerjack/mcp/enhanced_progress_monitor.py +55 -55
  68. crackerjack/mcp/file_monitor.py +72 -42
  69. crackerjack/mcp/progress_components.py +103 -84
  70. crackerjack/mcp/progress_monitor.py +122 -49
  71. crackerjack/mcp/rate_limiter.py +12 -12
  72. crackerjack/mcp/server_core.py +16 -22
  73. crackerjack/mcp/service_watchdog.py +26 -26
  74. crackerjack/mcp/state.py +15 -0
  75. crackerjack/mcp/tools/core_tools.py +95 -39
  76. crackerjack/mcp/tools/error_analyzer.py +6 -32
  77. crackerjack/mcp/tools/execution_tools.py +1 -56
  78. crackerjack/mcp/tools/execution_tools_backup.py +35 -131
  79. crackerjack/mcp/tools/intelligence_tool_registry.py +0 -36
  80. crackerjack/mcp/tools/intelligence_tools.py +2 -55
  81. crackerjack/mcp/tools/monitoring_tools.py +308 -145
  82. crackerjack/mcp/tools/proactive_tools.py +12 -42
  83. crackerjack/mcp/tools/progress_tools.py +23 -15
  84. crackerjack/mcp/tools/utility_tools.py +3 -40
  85. crackerjack/mcp/tools/workflow_executor.py +40 -60
  86. crackerjack/mcp/websocket/app.py +0 -3
  87. crackerjack/mcp/websocket/endpoints.py +206 -268
  88. crackerjack/mcp/websocket/jobs.py +213 -66
  89. crackerjack/mcp/websocket/server.py +84 -6
  90. crackerjack/mcp/websocket/websocket_handler.py +137 -29
  91. crackerjack/models/config_adapter.py +3 -16
  92. crackerjack/models/protocols.py +162 -3
  93. crackerjack/models/resource_protocols.py +454 -0
  94. crackerjack/models/task.py +3 -3
  95. crackerjack/monitoring/__init__.py +0 -0
  96. crackerjack/monitoring/ai_agent_watchdog.py +25 -71
  97. crackerjack/monitoring/regression_prevention.py +28 -87
  98. crackerjack/orchestration/advanced_orchestrator.py +44 -78
  99. crackerjack/orchestration/coverage_improvement.py +10 -60
  100. crackerjack/orchestration/execution_strategies.py +16 -16
  101. crackerjack/orchestration/test_progress_streamer.py +61 -53
  102. crackerjack/plugins/base.py +1 -1
  103. crackerjack/plugins/managers.py +22 -20
  104. crackerjack/py313.py +65 -21
  105. crackerjack/services/backup_service.py +467 -0
  106. crackerjack/services/bounded_status_operations.py +627 -0
  107. crackerjack/services/cache.py +7 -9
  108. crackerjack/services/config.py +35 -52
  109. crackerjack/services/config_integrity.py +5 -16
  110. crackerjack/services/config_merge.py +542 -0
  111. crackerjack/services/contextual_ai_assistant.py +17 -19
  112. crackerjack/services/coverage_ratchet.py +44 -73
  113. crackerjack/services/debug.py +25 -39
  114. crackerjack/services/dependency_monitor.py +52 -50
  115. crackerjack/services/enhanced_filesystem.py +14 -11
  116. crackerjack/services/file_hasher.py +1 -1
  117. crackerjack/services/filesystem.py +1 -12
  118. crackerjack/services/git.py +71 -47
  119. crackerjack/services/health_metrics.py +31 -27
  120. crackerjack/services/initialization.py +276 -428
  121. crackerjack/services/input_validator.py +760 -0
  122. crackerjack/services/log_manager.py +16 -16
  123. crackerjack/services/logging.py +7 -6
  124. crackerjack/services/metrics.py +43 -43
  125. crackerjack/services/pattern_cache.py +2 -31
  126. crackerjack/services/pattern_detector.py +26 -63
  127. crackerjack/services/performance_benchmarks.py +20 -45
  128. crackerjack/services/regex_patterns.py +2887 -0
  129. crackerjack/services/regex_utils.py +537 -0
  130. crackerjack/services/secure_path_utils.py +683 -0
  131. crackerjack/services/secure_status_formatter.py +534 -0
  132. crackerjack/services/secure_subprocess.py +605 -0
  133. crackerjack/services/security.py +47 -10
  134. crackerjack/services/security_logger.py +492 -0
  135. crackerjack/services/server_manager.py +109 -50
  136. crackerjack/services/smart_scheduling.py +8 -25
  137. crackerjack/services/status_authentication.py +603 -0
  138. crackerjack/services/status_security_manager.py +442 -0
  139. crackerjack/services/thread_safe_status_collector.py +546 -0
  140. crackerjack/services/tool_version_service.py +1 -23
  141. crackerjack/services/unified_config.py +36 -58
  142. crackerjack/services/validation_rate_limiter.py +269 -0
  143. crackerjack/services/version_checker.py +9 -40
  144. crackerjack/services/websocket_resource_limiter.py +572 -0
  145. crackerjack/slash_commands/__init__.py +52 -2
  146. crackerjack/tools/__init__.py +0 -0
  147. crackerjack/tools/validate_input_validator_patterns.py +262 -0
  148. crackerjack/tools/validate_regex_patterns.py +198 -0
  149. {crackerjack-0.31.10.dist-info → crackerjack-0.31.12.dist-info}/METADATA +197 -12
  150. crackerjack-0.31.12.dist-info/RECORD +178 -0
  151. crackerjack/cli/facade.py +0 -104
  152. crackerjack-0.31.10.dist-info/RECORD +0 -149
  153. {crackerjack-0.31.10.dist-info → crackerjack-0.31.12.dist-info}/WHEEL +0 -0
  154. {crackerjack-0.31.10.dist-info → crackerjack-0.31.12.dist-info}/entry_points.txt +0 -0
  155. {crackerjack-0.31.10.dist-info → crackerjack-0.31.12.dist-info}/licenses/LICENSE +0 -0
@@ -1,4 +1,7 @@
1
1
  import ast
2
+ import json
3
+ import operator
4
+ from collections.abc import Callable
2
5
  from pathlib import Path
3
6
  from typing import Any
4
7
 
@@ -15,8 +18,6 @@ from .base import (
15
18
  class TestCreationAgent(SubAgent):
16
19
  def __init__(self, context: AgentContext) -> None:
17
20
  super().__init__(context)
18
- self.test_frameworks = ["pytest", "unittest"]
19
- # No fixed coverage threshold - use ratchet system instead
20
21
 
21
22
  def get_supported_types(self) -> set[IssueType]:
22
23
  return {
@@ -27,19 +28,32 @@ class TestCreationAgent(SubAgent):
27
28
  }
28
29
 
29
30
  async def can_handle(self, issue: Issue) -> float:
31
+ """Enhanced confidence scoring based on issue complexity and expected impact."""
30
32
  if issue.type not in self.get_supported_types():
31
33
  return 0.0
32
34
 
33
35
  message_lower = issue.message.lower()
34
36
 
35
- # Handle coverage improvement requests with perfect confidence
37
+ # High confidence for coverage improvement - key audit requirement
36
38
  if issue.type == IssueType.COVERAGE_IMPROVEMENT:
37
- return 1.0
39
+ # Check for specific coverage improvement scenarios
40
+ if any(
41
+ term in message_lower
42
+ for term in (
43
+ "coverage below",
44
+ "missing tests",
45
+ "untested functions",
46
+ "no tests found",
47
+ "coverage requirement",
48
+ )
49
+ ):
50
+ return 0.95 # Enhanced confidence for coverage issues
51
+ return 0.9
38
52
 
39
- # Handle test organization issues with high confidence
40
53
  if issue.type == IssueType.TEST_ORGANIZATION:
41
54
  return self._check_test_organization_confidence(message_lower)
42
55
 
56
+ # Enhanced pattern matching for test creation needs
43
57
  perfect_score = self._check_perfect_test_creation_matches(message_lower)
44
58
  if perfect_score > 0:
45
59
  return perfect_score
@@ -48,10 +62,18 @@ class TestCreationAgent(SubAgent):
48
62
  if good_score > 0:
49
63
  return good_score
50
64
 
51
- return self._check_file_path_test_indicators(issue.file_path)
65
+ # Improved file path analysis
66
+ file_path_score = self._check_file_path_test_indicators(issue.file_path)
67
+ if file_path_score > 0:
68
+ return file_path_score
69
+
70
+ # New: Check for untested functions specifically
71
+ if self._indicates_untested_functions(message_lower):
72
+ return 0.85
73
+
74
+ return 0.0
52
75
 
53
76
  def _check_test_organization_confidence(self, message_lower: str) -> float:
54
- """Check confidence for test organization issues."""
55
77
  organization_keywords = [
56
78
  "redundant tests",
57
79
  "duplicate tests",
@@ -101,13 +123,45 @@ class TestCreationAgent(SubAgent):
101
123
  )
102
124
 
103
125
  def _check_file_path_test_indicators(self, file_path: str | None) -> float:
104
- if file_path and not self._has_corresponding_test(file_path):
126
+ if not file_path:
127
+ return 0.0
128
+
129
+ # Enhanced file path analysis
130
+ if not self._has_corresponding_test(file_path):
131
+ # Higher confidence for core modules
132
+ if any(
133
+ core_path in file_path
134
+ for core_path in ("/managers/", "/services/", "/core/", "/agents/")
135
+ ):
136
+ return 0.8
105
137
  return 0.7
106
138
  return 0.0
107
139
 
140
+ def _indicates_untested_functions(self, message_lower: str) -> bool:
141
+ """Check if message indicates untested functions."""
142
+ return any(
143
+ indicator in message_lower
144
+ for indicator in (
145
+ "function not tested",
146
+ "untested method",
147
+ "no test for function",
148
+ "function coverage",
149
+ "method coverage",
150
+ "untested code path",
151
+ )
152
+ )
153
+
108
154
  async def analyze_and_fix(self, issue: Issue) -> FixResult:
155
+ # Log the analysis
156
+ self._log_analysis(issue)
157
+
158
+ # Apply fixes and create result
159
+ return await self._apply_fixes_and_create_result(issue)
160
+
161
+ def _log_analysis(self, issue: Issue) -> None:
109
162
  self.log(f"Analyzing test creation need: {issue.message}")
110
163
 
164
+ async def _apply_fixes_and_create_result(self, issue: Issue) -> FixResult:
111
165
  try:
112
166
  fixes_applied, files_modified = await self._apply_test_creation_fixes(issue)
113
167
  return self._create_test_creation_result(fixes_applied, files_modified)
@@ -119,24 +173,165 @@ class TestCreationAgent(SubAgent):
119
173
  async def _apply_test_creation_fixes(
120
174
  self,
121
175
  issue: Issue,
176
+ ) -> tuple[list[str], list[str]]:
177
+ # Apply all test creation fixes
178
+ return await self._apply_all_test_creation_fixes(issue)
179
+
180
+ async def _apply_all_test_creation_fixes(
181
+ self,
182
+ issue: Issue,
122
183
  ) -> tuple[list[str], list[str]]:
123
184
  fixes_applied: list[str] = []
124
185
  files_modified: list[str] = []
125
186
 
187
+ # Apply different types of fixes
188
+ fixes_applied, files_modified = await self._apply_all_fix_types(
189
+ issue, fixes_applied, files_modified
190
+ )
191
+
192
+ return fixes_applied, files_modified
193
+
194
+ async def _apply_all_fix_types(
195
+ self,
196
+ issue: Issue,
197
+ fixes_applied: list[str],
198
+ files_modified: list[str],
199
+ ) -> tuple[list[str], list[str]]:
200
+ # Apply all fix types sequentially
201
+ return await self._apply_sequential_fixes(issue, fixes_applied, files_modified)
202
+
203
+ async def _apply_sequential_fixes(
204
+ self,
205
+ issue: Issue,
206
+ fixes_applied: list[str],
207
+ files_modified: list[str],
208
+ ) -> tuple[list[str], list[str]]:
209
+ # Apply all fix types sequentially
210
+ return await self._apply_all_fix_types_sequentially(
211
+ issue, fixes_applied, files_modified
212
+ )
213
+
214
+ async def _apply_all_fix_types_sequentially(
215
+ self,
216
+ issue: Issue,
217
+ fixes_applied: list[str],
218
+ files_modified: list[str],
219
+ ) -> tuple[list[str], list[str]]:
220
+ # Apply all fix types sequentially
221
+ return await self._apply_all_fix_types_in_sequence(
222
+ issue, fixes_applied, files_modified
223
+ )
224
+
225
+ async def _apply_all_fix_types_in_sequence(
226
+ self,
227
+ issue: Issue,
228
+ fixes_applied: list[str],
229
+ files_modified: list[str],
230
+ ) -> tuple[list[str], list[str]]:
231
+ # Apply all fix types in sequence
232
+ return await self._apply_fix_types_in_defined_order(
233
+ issue, fixes_applied, files_modified
234
+ )
235
+
236
+ async def _apply_fix_types_in_defined_order(
237
+ self,
238
+ issue: Issue,
239
+ fixes_applied: list[str],
240
+ files_modified: list[str],
241
+ ) -> tuple[list[str], list[str]]:
242
+ # Apply coverage based fixes
243
+ (
244
+ fixes_applied,
245
+ files_modified,
246
+ ) = await self._apply_coverage_based_fixes_sequentially(
247
+ fixes_applied, files_modified
248
+ )
249
+
250
+ # Apply file specific fixes
251
+ (
252
+ fixes_applied,
253
+ files_modified,
254
+ ) = await self._apply_file_specific_fixes_sequentially(
255
+ issue, fixes_applied, files_modified
256
+ )
257
+
258
+ # Apply function specific fixes
259
+ (
260
+ fixes_applied,
261
+ files_modified,
262
+ ) = await self._apply_function_specific_fixes_sequentially(
263
+ fixes_applied, files_modified
264
+ )
265
+
266
+ return fixes_applied, files_modified
267
+
268
+ async def _apply_coverage_based_fixes_sequentially(
269
+ self,
270
+ fixes_applied: list[str],
271
+ files_modified: list[str],
272
+ ) -> tuple[list[str], list[str]]:
273
+ """Apply coverage based fixes sequentially."""
126
274
  coverage_fixes, coverage_files = await self._apply_coverage_based_fixes()
127
275
  fixes_applied.extend(coverage_fixes)
128
276
  files_modified.extend(coverage_files)
277
+ return fixes_applied, files_modified
129
278
 
279
+ async def _apply_file_specific_fixes_sequentially(
280
+ self,
281
+ issue: Issue,
282
+ fixes_applied: list[str],
283
+ files_modified: list[str],
284
+ ) -> tuple[list[str], list[str]]:
285
+ """Apply file specific fixes sequentially."""
130
286
  file_fixes, file_modified = await self._apply_file_specific_fixes(
131
287
  issue.file_path,
132
288
  )
133
289
  fixes_applied.extend(file_fixes)
134
290
  files_modified.extend(file_modified)
291
+ return fixes_applied, files_modified
135
292
 
293
+ async def _apply_function_specific_fixes_sequentially(
294
+ self,
295
+ fixes_applied: list[str],
296
+ files_modified: list[str],
297
+ ) -> tuple[list[str], list[str]]:
298
+ """Apply function specific fixes sequentially."""
136
299
  function_fixes, function_files = await self._apply_function_specific_fixes()
137
300
  fixes_applied.extend(function_fixes)
138
301
  files_modified.extend(function_files)
302
+ return fixes_applied, files_modified
139
303
 
304
+ async def _apply_coverage_fixes(
305
+ self,
306
+ fixes_applied: list[str],
307
+ files_modified: list[str],
308
+ ) -> tuple[list[str], list[str]]:
309
+ coverage_fixes, coverage_files = await self._apply_coverage_based_fixes()
310
+ fixes_applied.extend(coverage_fixes)
311
+ files_modified.extend(coverage_files)
312
+ return fixes_applied, files_modified
313
+
314
+ async def _apply_file_fixes(
315
+ self,
316
+ issue: Issue,
317
+ fixes_applied: list[str],
318
+ files_modified: list[str],
319
+ ) -> tuple[list[str], list[str]]:
320
+ file_fixes, file_modified = await self._apply_file_specific_fixes(
321
+ issue.file_path,
322
+ )
323
+ fixes_applied.extend(file_fixes)
324
+ files_modified.extend(file_modified)
325
+ return fixes_applied, files_modified
326
+
327
+ async def _apply_function_fixes(
328
+ self,
329
+ fixes_applied: list[str],
330
+ files_modified: list[str],
331
+ ) -> tuple[list[str], list[str]]:
332
+ function_fixes, function_files = await self._apply_function_specific_fixes()
333
+ fixes_applied.extend(function_fixes)
334
+ files_modified.extend(function_files)
140
335
  return fixes_applied, files_modified
141
336
 
142
337
  async def _apply_coverage_based_fixes(self) -> tuple[list[str], list[str]]:
@@ -146,17 +341,92 @@ class TestCreationAgent(SubAgent):
146
341
  coverage_analysis = await self._analyze_coverage()
147
342
 
148
343
  if coverage_analysis["below_threshold"]:
149
- self.log(
150
- f"Coverage below threshold: {coverage_analysis['current_coverage']:.1%}",
344
+ fixes_applied, files_modified = await self._handle_low_coverage(
345
+ coverage_analysis, fixes_applied, files_modified
151
346
  )
152
347
 
153
- for module_path in coverage_analysis["uncovered_modules"]:
154
- test_fixes = await self._create_tests_for_module(module_path)
155
- fixes_applied.extend(test_fixes["fixes"])
156
- files_modified.extend(test_fixes["files"])
348
+ return fixes_applied, files_modified
349
+
350
+ async def _handle_low_coverage(
351
+ self,
352
+ coverage_analysis: dict[str, Any],
353
+ fixes_applied: list[str],
354
+ files_modified: list[str],
355
+ ) -> tuple[list[str], list[str]]:
356
+ """Handle low coverage by creating tests for uncovered modules."""
357
+ self.log(
358
+ f"Coverage below threshold: {coverage_analysis['current_coverage']:.1%}",
359
+ )
360
+
361
+ # Process uncovered modules
362
+ return await self._process_uncovered_modules_for_low_coverage(
363
+ coverage_analysis["uncovered_modules"], fixes_applied, files_modified
364
+ )
365
+
366
+ async def _process_uncovered_modules_for_low_coverage(
367
+ self,
368
+ uncovered_modules: list[str],
369
+ fixes_applied: list[str],
370
+ files_modified: list[str],
371
+ ) -> tuple[list[str], list[str]]:
372
+ """Process uncovered modules for low coverage scenario."""
373
+ for module_path in uncovered_modules:
374
+ test_fixes = await self._create_tests_for_module(module_path)
375
+ fixes_applied.extend(test_fixes["fixes"])
376
+ files_modified.extend(test_fixes["files"])
377
+
378
+ return fixes_applied, files_modified
379
+
380
+ async def _process_uncovered_modules(
381
+ self,
382
+ uncovered_modules: list[str],
383
+ fixes_applied: list[str],
384
+ files_modified: list[str],
385
+ ) -> tuple[list[str], list[str]]:
386
+ """Process uncovered modules to create tests."""
387
+ # Process each uncovered module
388
+ return await self._process_each_uncovered_module(
389
+ uncovered_modules, fixes_applied, files_modified
390
+ )
391
+
392
+ async def _process_each_uncovered_module(
393
+ self,
394
+ uncovered_modules: list[str],
395
+ fixes_applied: list[str],
396
+ files_modified: list[str],
397
+ ) -> tuple[list[str], list[str]]:
398
+ """Process each uncovered module individually."""
399
+ # Process all uncovered modules
400
+ return await self._process_all_uncovered_modules(
401
+ uncovered_modules, fixes_applied, files_modified
402
+ )
403
+
404
+ async def _process_all_uncovered_modules(
405
+ self,
406
+ uncovered_modules: list[str],
407
+ fixes_applied: list[str],
408
+ files_modified: list[str],
409
+ ) -> tuple[list[str], list[str]]:
410
+ """Process all uncovered modules."""
411
+ for module_path in uncovered_modules:
412
+ fixes_applied, files_modified = await self._process_single_uncovered_module(
413
+ module_path, fixes_applied, files_modified
414
+ )
157
415
 
158
416
  return fixes_applied, files_modified
159
417
 
418
+ async def _process_single_uncovered_module(
419
+ self,
420
+ module_path: str,
421
+ fixes_applied: list[str],
422
+ files_modified: list[str],
423
+ ) -> tuple[list[str], list[str]]:
424
+ """Process a single uncovered module."""
425
+ test_fixes = await self._create_tests_for_module(module_path)
426
+ fixes_applied.extend(test_fixes["fixes"])
427
+ files_modified.extend(test_fixes["files"])
428
+ return fixes_applied, files_modified
429
+
160
430
  async def _apply_file_specific_fixes(
161
431
  self,
162
432
  file_path: str | None,
@@ -172,6 +442,19 @@ class TestCreationAgent(SubAgent):
172
442
  files_modified: list[str] = []
173
443
 
174
444
  untested_functions = await self._find_untested_functions()
445
+ fixes_applied, files_modified = await self._process_untested_functions(
446
+ untested_functions, fixes_applied, files_modified
447
+ )
448
+
449
+ return fixes_applied, files_modified
450
+
451
+ async def _process_untested_functions(
452
+ self,
453
+ untested_functions: list[dict[str, Any]],
454
+ fixes_applied: list[str],
455
+ files_modified: list[str],
456
+ ) -> tuple[list[str], list[str]]:
457
+ """Process untested functions to create tests."""
175
458
  for func_info in untested_functions[:5]:
176
459
  func_fixes = await self._create_test_for_function(func_info)
177
460
  fixes_applied.extend(func_fixes["fixes"])
@@ -184,24 +467,79 @@ class TestCreationAgent(SubAgent):
184
467
  fixes_applied: list[str],
185
468
  files_modified: list[str],
186
469
  ) -> FixResult:
470
+ """Enhanced result creation with detailed confidence scoring."""
187
471
  success = len(fixes_applied) > 0
188
- confidence = 0.8 if success else 0.5
189
- recommendations = [] if success else self._get_test_creation_recommendations()
472
+
473
+ # Calculate confidence based on the fixes applied
474
+ confidence = self._calculate_confidence(success, fixes_applied, files_modified)
190
475
 
191
476
  return FixResult(
192
477
  success=success,
193
478
  confidence=confidence,
194
479
  fixes_applied=fixes_applied,
480
+ remaining_issues=[],
481
+ recommendations=self._generate_recommendations(success),
195
482
  files_modified=files_modified,
196
- recommendations=recommendations,
197
483
  )
198
484
 
199
- def _get_test_creation_recommendations(self) -> list[str]:
485
+ def _calculate_confidence(
486
+ self, success: bool, fixes_applied: list[str], files_modified: list[str]
487
+ ) -> float:
488
+ """Calculate confidence based on types of fixes applied."""
489
+ if not success:
490
+ return 0.0
491
+
492
+ # Enhanced confidence calculation based on types of fixes applied
493
+ confidence = 0.5 # Base confidence
494
+
495
+ # Higher confidence based on quality of fixes
496
+ test_file_fixes = [f for f in fixes_applied if "test file" in f.lower()]
497
+ function_fixes = [f for f in fixes_applied if "function" in f.lower()]
498
+ coverage_fixes = [f for f in fixes_applied if "coverage" in f.lower()]
499
+
500
+ # Boost confidence for comprehensive test creation
501
+ if test_file_fixes:
502
+ confidence += 0.25 # Test file creation
503
+ if function_fixes:
504
+ confidence += 0.15 # Function-specific tests
505
+ if coverage_fixes:
506
+ confidence += 0.1 # Coverage improvements
507
+
508
+ # Additional boost for multiple file creation (broader impact)
509
+ if len(files_modified) > 1:
510
+ confidence += 0.1
511
+
512
+ # Cap confidence at 0.95 for realistic assessment
513
+ return min(confidence, 0.95)
514
+
515
+ def _generate_recommendations(self, success: bool) -> list[str]:
516
+ """Generate recommendations based on the success of the operation."""
517
+ if success:
518
+ return [
519
+ "Generated comprehensive test suite",
520
+ "Consider running pytest to validate new tests",
521
+ "Review generated tests for edge cases",
522
+ ]
200
523
  return [
201
- "Run pytest --cov to identify coverage gaps",
202
- "Focus on testing core business logic functions",
203
- "Add parametrized tests for edge cases",
204
- "Consider property-based testing for complex logic",
524
+ "No test creation opportunities identified",
525
+ "Consider manual test creation for complex scenarios",
526
+ ]
527
+
528
+ def _get_enhanced_test_creation_recommendations(self) -> list[str]:
529
+ """Enhanced recommendations based on audit requirements."""
530
+ return [
531
+ "Run 'python -m crackerjack -t' to execute comprehensive coverage analysis",
532
+ "Focus on testing high-priority functions in managers/ services/ and core/ "
533
+ "modules",
534
+ "Implement parametrized tests (@pytest.mark.parametrize) for functions with "
535
+ "multiple arguments",
536
+ "Add edge case testing for boundary conditions and error scenarios",
537
+ "Use fixtures for complex object instantiation and dependency injection",
538
+ "Consider integration tests for modules with multiple classes/functions",
539
+ "Add async tests for coroutine functions using @pytest.mark.asyncio",
540
+ "Mock external dependencies to ensure isolated unit testing",
541
+ "Target ≥10% coverage improvement through systematic test creation",
542
+ "Validate generated tests are syntactically correct before committing",
205
543
  ]
206
544
 
207
545
  def _create_error_result(self, error: Exception) -> FixResult:
@@ -216,18 +554,77 @@ class TestCreationAgent(SubAgent):
216
554
  )
217
555
 
218
556
  async def _analyze_coverage(self) -> dict[str, Any]:
557
+ """Enhanced coverage analysis with detailed metrics and improvement tracking."""
219
558
  try:
559
+ # First try to get coverage from existing reports
560
+ coverage_data = await self._get_existing_coverage_data()
561
+ if coverage_data:
562
+ return coverage_data
563
+
564
+ # Run coverage analysis if no existing data
220
565
  returncode, _, stderr = await self._run_coverage_command()
221
566
 
222
567
  if returncode != 0:
223
568
  return self._handle_coverage_command_failure(stderr)
224
569
 
225
- return await self._process_coverage_results()
570
+ return await self._process_coverage_results_enhanced()
226
571
 
227
572
  except Exception as e:
228
573
  self.log(f"Coverage analysis error: {e}", "WARN")
229
574
  return self._create_default_coverage_result()
230
575
 
576
+ async def _get_existing_coverage_data(self) -> dict[str, Any] | None:
577
+ """Try to get coverage data from existing coverage reports."""
578
+ try:
579
+ # Check for JSON coverage report
580
+ json_report = self.context.project_path / "coverage.json"
581
+ if json_report.exists():
582
+ content = self.context.get_file_content(json_report)
583
+ if content:
584
+ coverage_json = json.loads(content)
585
+ return self._parse_coverage_json(coverage_json)
586
+
587
+ # Check for .coverage file
588
+ coverage_file = self.context.project_path / ".coverage"
589
+ if coverage_file.exists():
590
+ return await self._process_coverage_results_enhanced()
591
+
592
+ except Exception as e:
593
+ self.log(f"Error reading existing coverage: {e}", "WARN")
594
+
595
+ return None
596
+
597
+ def _parse_coverage_json(self, coverage_json: dict[str, Any]) -> dict[str, Any]:
598
+ """Parse coverage JSON data into our format."""
599
+ try:
600
+ totals = coverage_json.get("totals", {})
601
+ current_coverage = totals.get("percent_covered", 0) / 100.0
602
+
603
+ # Find uncovered modules
604
+ uncovered_modules = []
605
+ files = coverage_json.get("files", {})
606
+
607
+ for file_path, file_data in files.items():
608
+ if file_data.get("summary", {}).get("percent_covered", 100) < 80:
609
+ # Convert absolute path to relative
610
+ rel_path = str(
611
+ Path(file_path).relative_to(self.context.project_path)
612
+ )
613
+ uncovered_modules.append(rel_path)
614
+
615
+ return {
616
+ "below_threshold": current_coverage < 0.8, # 80% threshold
617
+ "current_coverage": current_coverage,
618
+ "uncovered_modules": uncovered_modules[:15], # Limit for performance
619
+ "missing_lines": totals.get("num_statements", 0)
620
+ - totals.get("covered_lines", 0),
621
+ "total_lines": totals.get("num_statements", 0),
622
+ }
623
+
624
+ except Exception as e:
625
+ self.log(f"Error parsing coverage JSON: {e}", "WARN")
626
+ return self._create_default_coverage_result()
627
+
231
628
  async def _run_coverage_command(self) -> tuple[int, str, str]:
232
629
  return await self.run_command(
233
630
  [
@@ -246,18 +643,82 @@ class TestCreationAgent(SubAgent):
246
643
  self.log(f"Coverage analysis failed: {stderr}", "WARN")
247
644
  return self._create_default_coverage_result()
248
645
 
249
- async def _process_coverage_results(self) -> dict[str, Any]:
646
+ async def _process_coverage_results_enhanced(self) -> dict[str, Any]:
647
+ """Enhanced coverage results processing with detailed analysis."""
250
648
  coverage_file = self.context.project_path / ".coverage"
251
649
  if not coverage_file.exists():
252
650
  return self._create_default_coverage_result()
253
651
 
254
- uncovered_modules = await self._find_uncovered_modules()
255
- current_coverage = 0.35
652
+ # Get more detailed coverage analysis
653
+ uncovered_modules = await self._find_uncovered_modules_enhanced()
654
+ untested_functions = await self._find_untested_functions_enhanced()
655
+
656
+ # Estimate current coverage more accurately
657
+ current_coverage = await self._estimate_current_coverage()
256
658
 
257
659
  return {
258
- "below_threshold": False, # Always use ratchet system, not thresholds
660
+ "below_threshold": current_coverage < 0.8, # 80% threshold
259
661
  "current_coverage": current_coverage,
260
- "uncovered_modules": uncovered_modules,
662
+ "uncovered_modules": uncovered_modules[:15], # Performance limit
663
+ "untested_functions": untested_functions[:20], # Top priority functions
664
+ "coverage_gaps": await self._identify_coverage_gaps(),
665
+ "improvement_potential": self._calculate_improvement_potential(
666
+ len(uncovered_modules), len(untested_functions)
667
+ ),
668
+ }
669
+
670
+ async def _estimate_current_coverage(self) -> float:
671
+ """Estimate current coverage by analyzing test files vs source files."""
672
+ try:
673
+ source_files = list(
674
+ (self.context.project_path / "crackerjack").rglob("*.py")
675
+ )
676
+ source_files = [f for f in source_files if not f.name.startswith("test_")]
677
+
678
+ test_files = list((self.context.project_path / "tests").rglob("test_*.py"))
679
+
680
+ if not source_files:
681
+ return 0.0
682
+
683
+ # Simple heuristic: ratio of test files to source files
684
+ coverage_ratio = len(test_files) / len(source_files)
685
+
686
+ # Adjust based on known coverage patterns
687
+ estimated_coverage = min(coverage_ratio * 0.6, 0.9) # Cap at 90%
688
+
689
+ return estimated_coverage
690
+
691
+ except Exception:
692
+ return 0.1 # Conservative estimate
693
+
694
+ def _calculate_improvement_potential(
695
+ self, uncovered_modules: int, untested_functions: int
696
+ ) -> dict[str, Any]:
697
+ """Calculate potential coverage improvement from test generation."""
698
+ if uncovered_modules == untested_functions == 0:
699
+ return {"percentage_points": 0, "priority": "low"}
700
+
701
+ # Estimate improvement potential
702
+ module_improvement = uncovered_modules * 2.5 # Each module ~2.5% coverage
703
+ function_improvement = untested_functions * 0.8 # Each function ~0.8% coverage
704
+
705
+ total_potential = min(
706
+ module_improvement + function_improvement, 40
707
+ ) # Cap at 40%
708
+
709
+ priority = (
710
+ "high"
711
+ if total_potential > 15
712
+ else "medium"
713
+ if total_potential > 5
714
+ else "low"
715
+ )
716
+
717
+ return {
718
+ "percentage_points": round(total_potential, 1),
719
+ "priority": priority,
720
+ "module_contribution": round(module_improvement, 1),
721
+ "function_contribution": round(function_improvement, 1),
261
722
  }
262
723
 
263
724
  def _create_default_coverage_result(self) -> dict[str, Any]:
@@ -267,21 +728,279 @@ class TestCreationAgent(SubAgent):
267
728
  "uncovered_modules": [],
268
729
  }
269
730
 
270
- async def _find_uncovered_modules(self) -> list[str]:
271
- uncovered: list[str] = []
731
+ async def _find_uncovered_modules_enhanced(self) -> list[dict[str, Any]]:
732
+ """Enhanced uncovered modules detection with priority scoring."""
733
+ uncovered: list[dict[str, Any]] = []
272
734
 
273
735
  package_dir = self.context.project_path / "crackerjack"
274
736
  if not package_dir.exists():
275
- return uncovered[:10]
737
+ return uncovered[:15]
276
738
 
277
739
  for py_file in package_dir.rglob("*.py"):
278
740
  if self._should_skip_module_for_coverage(py_file):
279
741
  continue
280
742
 
281
743
  if not self._has_corresponding_test(str(py_file)):
282
- uncovered.append(self._get_relative_module_path(py_file))
744
+ module_info = await self._analyze_module_priority(py_file)
745
+ uncovered.append(module_info)
746
+
747
+ # Sort by priority (highest first)
748
+ uncovered.sort(key=operator.itemgetter("priority_score"), reverse=True)
749
+ return uncovered[:15]
750
+
751
+ async def _analyze_module_priority(self, py_file: Path) -> dict[str, Any]:
752
+ """Analyze module to determine testing priority."""
753
+ try:
754
+ content = self.context.get_file_content(py_file) or ""
755
+ ast.parse(content)
756
+
757
+ functions = await self._extract_functions_from_file(py_file)
758
+ classes = await self._extract_classes_from_file(py_file)
759
+
760
+ # Calculate priority score
761
+ priority_score = 0
762
+
763
+ # Core modules get higher priority
764
+ rel_path = str(py_file.relative_to(self.context.project_path))
765
+ if any(
766
+ core_path in rel_path
767
+ for core_path in ("managers/", "services/", "core/", "agents/")
768
+ ):
769
+ priority_score += 10
770
+
771
+ # More functions/classes = higher priority
772
+ priority_score += len(functions) * 2
773
+ priority_score += len(classes) * 3
774
+
775
+ # Public API functions get higher priority
776
+ public_functions = [f for f in functions if not f["name"].startswith("_")]
777
+ priority_score += len(public_functions) * 2
778
+
779
+ # File size consideration (larger files need tests more)
780
+ lines_count = len(content.split("\n"))
781
+ if lines_count > 100:
782
+ priority_score += 5
783
+ elif lines_count > 50:
784
+ priority_score += 2
785
+
786
+ return {
787
+ "path": rel_path,
788
+ "absolute_path": str(py_file),
789
+ "priority_score": priority_score,
790
+ "function_count": len(functions),
791
+ "class_count": len(classes),
792
+ "public_function_count": len(public_functions),
793
+ "lines_count": lines_count,
794
+ "category": self._categorize_module(rel_path),
795
+ }
796
+
797
+ except Exception as e:
798
+ self.log(f"Error analyzing module priority for {py_file}: {e}", "WARN")
799
+ return {
800
+ "path": str(py_file.relative_to(self.context.project_path)),
801
+ "absolute_path": str(py_file),
802
+ "priority_score": 1,
803
+ "function_count": 0,
804
+ "class_count": 0,
805
+ "public_function_count": 0,
806
+ "lines_count": 0,
807
+ "category": "unknown",
808
+ }
809
+
810
+ def _categorize_module(self, relative_path: str) -> str:
811
+ """Categorize module for test generation strategies."""
812
+ if "managers/" in relative_path:
813
+ return "manager"
814
+ elif "services/" in relative_path:
815
+ return "service"
816
+ elif "core/" in relative_path:
817
+ return "core"
818
+ elif "agents/" in relative_path:
819
+ return "agent"
820
+ elif "models/" in relative_path:
821
+ return "model"
822
+ elif "executors/" in relative_path:
823
+ return "executor"
824
+ return "utility"
825
+
826
+ async def _find_untested_functions_enhanced(self) -> list[dict[str, Any]]:
827
+ """Enhanced untested function detection with detailed analysis."""
828
+ untested: list[dict[str, Any]] = []
829
+
830
+ package_dir = self.context.project_path / "crackerjack"
831
+ if not package_dir.exists():
832
+ return untested[:20]
833
+
834
+ for py_file in package_dir.rglob("*.py"):
835
+ if self._should_skip_file_for_testing(py_file):
836
+ continue
837
+
838
+ file_untested = await self._find_untested_functions_in_file_enhanced(
839
+ py_file
840
+ )
841
+ untested.extend(file_untested)
842
+
843
+ # Sort by testing priority
844
+ untested.sort(key=operator.itemgetter("testing_priority"), reverse=True)
845
+ return untested[:20]
846
+
847
+ async def _find_untested_functions_in_file_enhanced(
848
+ self, py_file: Path
849
+ ) -> list[dict[str, Any]]:
850
+ """Enhanced untested function detection with priority scoring."""
851
+ untested: list[dict[str, Any]] = []
852
+
853
+ try:
854
+ functions = await self._extract_functions_from_file(py_file)
855
+ for func in functions:
856
+ if not await self._function_has_test(func, py_file):
857
+ func_info = await self._analyze_function_testability(func, py_file)
858
+ untested.append(func_info)
859
+
860
+ except Exception as e:
861
+ self.log(f"Error finding untested functions in {py_file}: {e}", "WARN")
862
+
863
+ return untested
864
+
865
+ async def _analyze_function_testability(
866
+ self, func: dict[str, Any], py_file: Path
867
+ ) -> dict[str, Any]:
868
+ """Analyze function to determine testing priority and approach."""
869
+ try:
870
+ # Basic function info
871
+ func_info = {
872
+ "name": func["name"],
873
+ "file": str(py_file),
874
+ "relative_file": str(py_file.relative_to(self.context.project_path)),
875
+ "line": func.get("line", 1),
876
+ "signature": func.get("signature", ""),
877
+ "args": func.get("args", []),
878
+ "returns": func.get("returns", "Any"),
879
+ "testing_priority": 0,
880
+ "complexity": "simple",
881
+ "test_strategy": "basic",
882
+ }
883
+
884
+ # Calculate testing priority
885
+ priority = 0
886
+
887
+ # Public functions get higher priority
888
+ if not func["name"].startswith("_"):
889
+ priority += 10
890
+
891
+ # Functions with multiple args are more complex
892
+ arg_count = len(func.get("args", []))
893
+ if arg_count > 3:
894
+ priority += 5
895
+ func_info["complexity"] = "complex"
896
+ func_info["test_strategy"] = "parametrized"
897
+ elif arg_count > 1:
898
+ priority += 2
899
+ func_info["complexity"] = "moderate"
900
+
901
+ # Core module functions get higher priority
902
+ if any(
903
+ core_path in str(func_info["relative_file"])
904
+ for core_path in ("managers/", "services/", "core/")
905
+ ):
906
+ priority += 8
907
+
908
+ # Async functions need special handling
909
+ if func.get("is_async", False):
910
+ priority += 3
911
+ func_info["test_strategy"] = "async"
912
+
913
+ func_info["testing_priority"] = priority
914
+
915
+ return func_info
916
+
917
+ except Exception as e:
918
+ self.log(f"Error analyzing function testability: {e}", "WARN")
919
+ return {
920
+ "name": func.get("name", "unknown"),
921
+ "file": str(py_file),
922
+ "relative_file": str(py_file.relative_to(self.context.project_path)),
923
+ "line": func.get("line", 1),
924
+ "testing_priority": 1,
925
+ "complexity": "unknown",
926
+ "test_strategy": "basic",
927
+ }
928
+
929
+ async def _identify_coverage_gaps(self) -> list[dict[str, Any]]:
930
+ """Identify specific coverage gaps that can be addressed."""
931
+ gaps = []
932
+
933
+ try:
934
+ # Find modules with partial test coverage
935
+ package_dir = self.context.project_path / "crackerjack"
936
+ tests_dir = self.context.project_path / "tests"
937
+
938
+ if not package_dir.exists() or not tests_dir.exists():
939
+ return gaps
940
+
941
+ for py_file in package_dir.rglob("*.py"):
942
+ if self._should_skip_module_for_coverage(py_file):
943
+ continue
944
+
945
+ test_coverage_info = await self._analyze_existing_test_coverage(py_file)
946
+ if test_coverage_info["has_gaps"]:
947
+ gaps.append(test_coverage_info)
283
948
 
284
- return uncovered[:10]
949
+ except Exception as e:
950
+ self.log(f"Error identifying coverage gaps: {e}", "WARN")
951
+
952
+ return gaps[:10] # Limit for performance
953
+
954
+ async def _analyze_existing_test_coverage(self, py_file: Path) -> dict[str, Any]:
955
+ """Analyze existing test coverage for a specific file."""
956
+ try:
957
+ test_file_path = await self._generate_test_file_path(py_file)
958
+
959
+ coverage_info = {
960
+ "source_file": str(py_file.relative_to(self.context.project_path)),
961
+ "test_file": str(test_file_path) if test_file_path.exists() else None,
962
+ "has_gaps": True, # Default assumption
963
+ "missing_test_types": [],
964
+ "coverage_score": 0,
965
+ }
966
+
967
+ if not test_file_path.exists():
968
+ coverage_info["missing_test_types"] = [
969
+ "basic",
970
+ "edge_cases",
971
+ "error_handling",
972
+ ]
973
+ return coverage_info
974
+
975
+ # Analyze existing test file
976
+ test_content = self.context.get_file_content(test_file_path) or ""
977
+
978
+ # Check for different test types
979
+ missing_types = []
980
+ if "def test_" not in test_content:
981
+ missing_types.append("basic")
982
+ if "@pytest.mark.parametrize" not in test_content:
983
+ missing_types.append("parametrized")
984
+ if "with pytest.raises" not in test_content:
985
+ missing_types.append("error_handling")
986
+ if "mock" not in test_content.lower():
987
+ missing_types.append("mocking")
988
+
989
+ coverage_info["missing_test_types"] = missing_types
990
+ coverage_info["has_gaps"] = len(missing_types) > 0
991
+ coverage_info["coverage_score"] = max(0, 100 - len(missing_types) * 25)
992
+
993
+ return coverage_info
994
+
995
+ except Exception as e:
996
+ self.log(f"Error analyzing test coverage for {py_file}: {e}", "WARN")
997
+ return {
998
+ "source_file": str(py_file.relative_to(self.context.project_path)),
999
+ "test_file": None,
1000
+ "has_gaps": True,
1001
+ "missing_test_types": ["basic"],
1002
+ "coverage_score": 0,
1003
+ }
285
1004
 
286
1005
  def _should_skip_module_for_coverage(self, py_file: Path) -> bool:
287
1006
  return py_file.name.startswith("test_") or py_file.name == "__init__.py"
@@ -307,36 +1026,64 @@ class TestCreationAgent(SubAgent):
307
1026
  return False
308
1027
 
309
1028
  async def _create_tests_for_module(self, module_path: str) -> dict[str, list[str]]:
1029
+ """Create tests for a module."""
310
1030
  fixes: list[str] = []
311
1031
  files: list[str] = []
312
1032
 
313
1033
  try:
314
- module_file = Path(module_path)
315
- if not module_file.exists():
316
- return {"fixes": fixes, "files": files}
1034
+ test_results = await self._generate_module_tests(module_path)
1035
+ fixes.extend(test_results["fixes"])
1036
+ files.extend(test_results["files"])
1037
+
1038
+ except Exception as e:
1039
+ self._handle_test_creation_error(module_path, e)
317
1040
 
318
- functions = await self._extract_functions_from_file(module_file)
319
- classes = await self._extract_classes_from_file(module_file)
1041
+ return {"fixes": fixes, "files": files}
320
1042
 
321
- if not functions and not classes:
322
- return {"fixes": fixes, "files": files}
1043
+ async def _generate_module_tests(self, module_path: str) -> dict[str, list[str]]:
1044
+ """Generate tests for a module."""
1045
+ module_file = Path(module_path)
1046
+ if not await self._is_module_valid(module_file):
1047
+ return {"fixes": [], "files": []}
323
1048
 
324
- test_file_path = await self._generate_test_file_path(module_file)
325
- test_content = await self._generate_test_content(
326
- module_file,
327
- functions,
328
- classes,
329
- )
1049
+ functions = await self._extract_functions_from_file(module_file)
1050
+ classes = await self._extract_classes_from_file(module_file)
330
1051
 
331
- if self.context.write_file_content(test_file_path, test_content):
332
- fixes.append(f"Created test file for {module_path}")
333
- files.append(str(test_file_path))
334
- self.log(f"Created test file: {test_file_path}")
1052
+ if not functions and not classes:
1053
+ return {"fixes": [], "files": []}
335
1054
 
336
- except Exception as e:
337
- self.log(f"Error creating tests for module {module_path}: {e}", "ERROR")
1055
+ return await self._create_test_artifacts(module_file, functions, classes)
338
1056
 
339
- return {"fixes": fixes, "files": files}
1057
+ async def _is_module_valid(self, module_file: Path) -> bool:
1058
+ """Check if the module file is valid."""
1059
+ return module_file.exists()
1060
+
1061
+ async def _create_test_artifacts(
1062
+ self,
1063
+ module_file: Path,
1064
+ functions: list[dict[str, Any]],
1065
+ classes: list[dict[str, Any]],
1066
+ ) -> dict[str, list[str]]:
1067
+ """Create test artifacts for the module."""
1068
+ test_file_path = await self._generate_test_file_path(module_file)
1069
+ test_content = await self._generate_test_content(
1070
+ module_file,
1071
+ functions,
1072
+ classes,
1073
+ )
1074
+
1075
+ if self.context.write_file_content(test_file_path, test_content):
1076
+ self.log(f"Created test file: {test_file_path}")
1077
+ return {
1078
+ "fixes": [f"Created test file for {module_file}"],
1079
+ "files": [str(test_file_path)],
1080
+ }
1081
+
1082
+ return {"fixes": [], "files": []}
1083
+
1084
+ def _handle_test_creation_error(self, module_path: str, e: Exception) -> None:
1085
+ """Handle errors during test creation."""
1086
+ self.log(f"Error creating tests for module {module_path}: {e}", "ERROR")
340
1087
 
341
1088
  async def _create_tests_for_file(self, file_path: str) -> dict[str, list[str]]:
342
1089
  if self._has_corresponding_test(file_path):
@@ -441,25 +1188,38 @@ class TestCreationAgent(SubAgent):
441
1188
  return functions
442
1189
 
443
1190
  def _parse_function_nodes(self, tree: ast.AST) -> list[dict[str, Any]]:
1191
+ """Enhanced function parsing with async function support."""
444
1192
  functions: list[dict[str, Any]] = []
445
1193
 
446
1194
  for node in ast.walk(tree):
447
- if isinstance(node, ast.FunctionDef) and self._is_valid_function_node(node):
1195
+ if isinstance(
1196
+ node, ast.FunctionDef | ast.AsyncFunctionDef
1197
+ ) and self._is_valid_function_node(node):
448
1198
  function_info = self._create_function_info(node)
1199
+ # Add async detection
1200
+ function_info["is_async"] = isinstance(node, ast.AsyncFunctionDef)
449
1201
  functions.append(function_info)
450
1202
 
451
1203
  return functions
452
1204
 
453
- def _is_valid_function_node(self, node: ast.FunctionDef) -> bool:
1205
+ def _is_valid_function_node(
1206
+ self, node: ast.FunctionDef | ast.AsyncFunctionDef
1207
+ ) -> bool:
1208
+ """Enhanced validation for both sync and async functions."""
454
1209
  return not node.name.startswith(("_", "test_"))
455
1210
 
456
- def _create_function_info(self, node: ast.FunctionDef) -> dict[str, Any]:
1211
+ def _create_function_info(
1212
+ self, node: ast.FunctionDef | ast.AsyncFunctionDef
1213
+ ) -> dict[str, Any]:
1214
+ """Enhanced function info creation with async support."""
457
1215
  return {
458
1216
  "name": node.name,
459
1217
  "line": node.lineno,
460
1218
  "signature": self._get_function_signature(node),
461
1219
  "args": [arg.arg for arg in node.args.args],
462
1220
  "returns": self._get_return_annotation(node),
1221
+ "is_async": isinstance(node, ast.AsyncFunctionDef),
1222
+ "docstring": ast.get_docstring(node) or "",
463
1223
  }
464
1224
 
465
1225
  async def _extract_classes_from_file(self, file_path: Path) -> list[dict[str, Any]]:
@@ -502,13 +1262,20 @@ class TestCreationAgent(SubAgent):
502
1262
  if isinstance(item, ast.FunctionDef) and not item.name.startswith("_")
503
1263
  ]
504
1264
 
505
- def _get_function_signature(self, node: ast.FunctionDef) -> str:
1265
+ def _get_function_signature(
1266
+ self, node: ast.FunctionDef | ast.AsyncFunctionDef
1267
+ ) -> str:
1268
+ """Enhanced function signature generation with async support."""
506
1269
  args = [arg.arg for arg in node.args.args]
507
- return f"{node.name}({', '.join(args)})"
1270
+ prefix = "async " if isinstance(node, ast.AsyncFunctionDef) else ""
1271
+ return f"{prefix}{node.name}({', '.join(args)})"
508
1272
 
509
- def _get_return_annotation(self, node: ast.FunctionDef) -> str:
1273
+ def _get_return_annotation(
1274
+ self, node: ast.FunctionDef | ast.AsyncFunctionDef
1275
+ ) -> str:
1276
+ """Enhanced return annotation extraction with async support."""
510
1277
  if node.returns:
511
- return ast.unparse(node.returns) if hasattr(ast, "unparse") else "Any"
1278
+ return ast.unparse(node.returns) if (hasattr(ast, "unparse")) else "Any"
512
1279
  return "Any"
513
1280
 
514
1281
  async def _function_has_test(
@@ -550,85 +1317,160 @@ class TestCreationAgent(SubAgent):
550
1317
  functions: list[dict[str, Any]],
551
1318
  classes: list[dict[str, Any]],
552
1319
  ) -> str:
1320
+ """Generate comprehensive test content with enhanced patterns."""
1321
+ test_params = self._prepare_test_generation_params(module_file)
1322
+ return await self._generate_all_test_types(test_params, functions, classes)
1323
+
1324
+ async def _generate_comprehensive_test_content(
1325
+ self,
1326
+ test_params: dict[str, Any],
1327
+ functions: list[dict[str, Any]],
1328
+ classes: list[dict[str, Any]],
1329
+ ) -> str:
1330
+ """Generate comprehensive test content from prepared parameters."""
1331
+ return await self._generate_all_test_types(test_params, functions, classes)
1332
+
1333
+ def _prepare_test_generation_params(self, module_file: Path) -> dict[str, Any]:
1334
+ """Prepare parameters for test generation."""
553
1335
  module_name = self._get_module_import_path(module_file)
1336
+ module_category = self._categorize_module(
1337
+ str(module_file.relative_to(self.context.project_path))
1338
+ )
1339
+ return {
1340
+ "module_name": module_name,
1341
+ "module_file": module_file,
1342
+ "module_category": module_category,
1343
+ }
554
1344
 
555
- base_content = self._generate_test_file_header(module_name, module_file)
556
- function_tests = self._generate_function_tests(functions)
557
- class_tests = self._generate_class_tests(classes)
1345
+ async def _generate_all_test_types(
1346
+ self,
1347
+ test_params: dict[str, Any],
1348
+ functions: list[dict[str, Any]],
1349
+ classes: list[dict[str, Any]],
1350
+ ) -> str:
1351
+ """Generate all types of tests."""
1352
+ # Generate header
1353
+ base_content = self._generate_enhanced_test_file_header(
1354
+ test_params["module_name"],
1355
+ test_params["module_file"],
1356
+ test_params["module_category"],
1357
+ )
558
1358
 
559
- return base_content + function_tests + class_tests
1359
+ # Generate different test sections
1360
+ function_tests = await self._generate_function_tests_content(
1361
+ functions, test_params["module_category"]
1362
+ )
1363
+ class_tests = await self._generate_class_tests_content(
1364
+ classes, test_params["module_category"]
1365
+ )
1366
+ integration_tests = await self._generate_integration_tests_content(
1367
+ test_params["module_file"],
1368
+ functions,
1369
+ classes,
1370
+ test_params["module_category"],
1371
+ )
560
1372
 
561
- def _generate_test_file_header(self, module_name: str, module_file: Path) -> str:
562
- return f'''"""Tests for {module_name}."""
1373
+ return base_content + function_tests + class_tests + integration_tests
563
1374
 
564
- import pytest
565
- from pathlib import Path
1375
+ async def _generate_function_tests_content(
1376
+ self, functions: list[dict[str, Any]], module_category: str
1377
+ ) -> str:
1378
+ """Generate function tests content."""
1379
+ return await self._generate_enhanced_function_tests(functions, module_category)
566
1380
 
567
- from {module_name} import *
1381
+ async def _generate_class_tests_content(
1382
+ self, classes: list[dict[str, Any]], module_category: str
1383
+ ) -> str:
1384
+ """Generate class tests content."""
1385
+ return await self._generate_enhanced_class_tests(classes, module_category)
568
1386
 
1387
+ async def _generate_integration_tests_content(
1388
+ self,
1389
+ module_file: Path,
1390
+ functions: list[dict[str, Any]],
1391
+ classes: list[dict[str, Any]],
1392
+ module_category: str,
1393
+ ) -> str:
1394
+ """Generate integration tests content."""
1395
+ return await self._generate_integration_tests(
1396
+ module_file, functions, classes, module_category
1397
+ )
569
1398
 
570
- class Test{module_file.stem.title()}:
571
- """Test suite for {module_file.stem} module."""
1399
+ def _generate_enhanced_test_file_header(
1400
+ self, module_name: str, module_file: Path, module_category: str
1401
+ ) -> str:
1402
+ """Generate enhanced test file header with appropriate imports based on
1403
+ module type."""
1404
+ # Determine imports based on module category
1405
+ imports = [
1406
+ "import pytest",
1407
+ "from pathlib import Path",
1408
+ "from unittest.mock import Mock, patch, AsyncMock",
1409
+ ]
572
1410
 
573
- def test_module_imports(self):
574
- """Test that module imports successfully."""
575
- import {module_name}
576
- assert {module_name} is not None
577
- '''
1411
+ if module_category in ("service", "manager", "core"):
1412
+ imports.append("import asyncio")
578
1413
 
579
- def _generate_function_tests(self, functions: list[dict[str, Any]]) -> str:
580
- content = ""
581
- for func in functions:
582
- content += f'''
583
- def test_{func["name"]}_basic(self):
584
- """Test basic functionality of {func["name"]}."""
1414
+ if module_category == "agent":
1415
+ imports.extend(
1416
+ [
1417
+ "from crackerjack.agents.base import AgentContext, FixResult, "
1418
+ "Issue, IssueType",
1419
+ ]
1420
+ )
1421
+
1422
+ imports_str = "\n".join(imports)
585
1423
 
1424
+ # Add specific imports for the module
586
1425
  try:
587
- result = {func["name"]}()
588
- assert result is not None or result is None
589
- except TypeError:
1426
+ # Try to import specific classes/functions
1427
+ content = self.context.get_file_content(module_file) or ""
1428
+ tree = ast.parse(content)
590
1429
 
591
- pytest.skip("Function requires specific arguments - manual implementation needed")
592
- except Exception as e:
593
- pytest.fail(f"Unexpected error in {func["name"]}: {{e}}")
594
- '''
595
- return content
1430
+ # Extract importable items
1431
+ importable_items = []
1432
+ for node in ast.walk(tree):
1433
+ if isinstance(node, ast.ClassDef) and not node.name.startswith("_"):
1434
+ importable_items.append(node.name)
1435
+ elif isinstance(
1436
+ node, ast.FunctionDef | ast.AsyncFunctionDef
1437
+ ) and not node.name.startswith("_"):
1438
+ importable_items.append(node.name)
1439
+
1440
+ if importable_items:
1441
+ specific_imports = (
1442
+ f"from {module_name} import {', '.join(importable_items[:10])}"
1443
+ )
1444
+ else:
1445
+ specific_imports = f"import {module_name}"
596
1446
 
597
- def _generate_class_tests(self, classes: list[dict[str, Any]]) -> str:
598
- content = ""
599
- for cls in classes:
600
- content += f'''
601
- def test_{cls["name"].lower()}_creation(self):
602
- """Test {cls["name"]} class creation."""
1447
+ except Exception:
1448
+ specific_imports = f"import {module_name}"
603
1449
 
604
- try:
605
- instance = {cls["name"]}()
606
- assert instance is not None
607
- assert isinstance(instance, {cls["name"]})
608
- except TypeError:
1450
+ class_name = f"Test{module_file.stem.replace('_', '').title()}"
609
1451
 
610
- pytest.skip("Class requires specific constructor arguments - manual implementation needed")
611
- except Exception as e:
612
- pytest.fail(f"Unexpected error creating {cls["name"]}: {{e}}")
613
- '''
614
- return content
1452
+ return f'''"""Tests for {module_name}.
615
1453
 
616
- async def _generate_function_test(self, func_info: dict[str, Any]) -> str:
617
- return f'''def test_{func_info["name"]}_basic():
618
- """Test basic functionality of {func_info["name"]}."""
1454
+ This module contains comprehensive tests for {module_name} including:
1455
+ - Basic functionality tests
1456
+ - Edge case validation
1457
+ - Error handling verification
1458
+ - Integration testing
1459
+ - Performance validation (where applicable)
1460
+ """
619
1461
 
620
- try:
621
- result = {func_info["name"]}()
622
- assert result is not None or result is None
623
- except TypeError:
1462
+ {imports_str}
1463
+ {specific_imports}
1464
+
1465
+
1466
+ class {class_name}:
1467
+ """Comprehensive test suite for {module_name}."""
1468
+
1469
+ def test_module_imports_successfully(self):
1470
+ """Test that the module can be imported without errors."""
1471
+ import {module_name}
1472
+ assert {module_name} is not None
624
1473
 
625
- import inspect
626
- assert callable({func_info["name"]}), "Function should be callable"
627
- sig = inspect.signature({func_info["name"]})
628
- assert sig is not None, "Function should have valid signature"
629
- pytest.skip("Function requires specific arguments - manual implementation needed")
630
- except Exception as e:
631
- pytest.fail(f"Unexpected error in {func_info["name"]}: {{e}}")
632
1474
  '''
633
1475
 
634
1476
  async def _generate_minimal_test_file(self, func_info: dict[str, Any]) -> str:
@@ -653,5 +1495,819 @@ from {module_name} import {func_info["name"]}
653
1495
  except ValueError:
654
1496
  return file_path.stem
655
1497
 
1498
+ async def _generate_function_test(self, func_info: dict[str, Any]) -> str:
1499
+ """Generate a test for a specific function."""
1500
+ func_name = func_info["name"]
1501
+ args = func_info.get("args", [])
1502
+
1503
+ # Generate basic test template
1504
+ test_template = f'''def test_{func_name}_basic(self):
1505
+ """Test basic functionality of {func_name}."""
1506
+ try:
1507
+ # Basic test - may need manual implementation for specific arguments
1508
+ result = {func_name}({self._generate_default_args(args)})
1509
+ assert result is not None or result is None
1510
+ except TypeError:
1511
+ pytest.skip("Function requires specific arguments - manual implementation
1512
+ needed")
1513
+ except Exception as e:
1514
+ pytest.fail(f"Unexpected error in {func_name}: {{e}}")'''
1515
+
1516
+ return test_template
1517
+
1518
+ async def _generate_enhanced_function_tests(
1519
+ self, functions: list[dict[str, Any]], module_category: str
1520
+ ) -> str:
1521
+ """Generate enhanced test methods for functions with parametrization and
1522
+ edge cases."""
1523
+ if not functions:
1524
+ return ""
1525
+
1526
+ test_methods = []
1527
+ for func in functions:
1528
+ func_tests = await self._generate_all_tests_for_function(
1529
+ func, module_category
1530
+ )
1531
+ test_methods.extend(func_tests)
1532
+
1533
+ return "\n".join(test_methods)
1534
+
1535
+ async def _generate_all_tests_for_function(
1536
+ self, func: dict[str, Any], module_category: str
1537
+ ) -> list[str]:
1538
+ """Generate all test types for a single function."""
1539
+ func_tests = []
1540
+
1541
+ # Always generate basic test
1542
+ basic_test = await self._generate_basic_function_test(func, module_category)
1543
+ func_tests.append(basic_test)
1544
+
1545
+ # Generate additional tests based on function characteristics
1546
+ additional_tests = await self._generate_conditional_tests_for_function(
1547
+ func, module_category
1548
+ )
1549
+ func_tests.extend(additional_tests)
1550
+
1551
+ return func_tests
1552
+
1553
+ async def _generate_conditional_tests_for_function(
1554
+ self, func: dict[str, Any], module_category: str
1555
+ ) -> list[str]:
1556
+ """Generate conditional tests based on function characteristics."""
1557
+ tests = []
1558
+ args = func.get("args", [])
1559
+ func_name = func["name"]
1560
+
1561
+ # Generate parametrized test if function has multiple args
1562
+ if self._should_generate_parametrized_test(args):
1563
+ parametrized_test = await self._generate_parametrized_test(
1564
+ func, module_category
1565
+ )
1566
+ tests.append(parametrized_test)
1567
+
1568
+ # Always generate error handling test
1569
+ error_test = await self._generate_error_handling_test(func, module_category)
1570
+ tests.append(error_test)
1571
+
1572
+ # Generate edge case tests for complex functions
1573
+ if self._should_generate_edge_case_test(args, func_name):
1574
+ edge_test = await self._generate_edge_case_test(func, module_category)
1575
+ tests.append(edge_test)
1576
+
1577
+ return tests
1578
+
1579
+ def _should_generate_parametrized_test(self, args: list[str]) -> bool:
1580
+ """Determine if parametrized test should be generated."""
1581
+ return len(args) > 1
1582
+
1583
+ def _should_generate_edge_case_test(self, args: list[str], func_name: str) -> bool:
1584
+ """Determine if edge case test should be generated."""
1585
+ has_multiple_args = len(args) > 2
1586
+ is_complex_function = any(
1587
+ hint in func_name.lower()
1588
+ for hint in ("process", "validate", "parse", "convert")
1589
+ )
1590
+ return has_multiple_args or is_complex_function
1591
+
1592
+ async def _generate_basic_function_test(
1593
+ self, func: dict[str, Any], module_category: str
1594
+ ) -> str:
1595
+ """Generate basic functionality test for a function."""
1596
+ func_name = func["name"]
1597
+ args = func.get("args", [])
1598
+
1599
+ template_generator = self._get_test_template_generator(module_category)
1600
+ return template_generator(func_name, args)
1601
+
1602
+ def _get_test_template_generator(
1603
+ self, module_category: str
1604
+ ) -> Callable[[str, list[str]], str]:
1605
+ """Get the appropriate test template generator for the module category."""
1606
+ return {
1607
+ "agent": self._generate_agent_test_template,
1608
+ "service": self._generate_async_test_template,
1609
+ "manager": self._generate_async_test_template,
1610
+ }.get(module_category, self._generate_default_test_template)
1611
+
1612
+ def _generate_agent_test_template(self, func_name: str, args: list[str]) -> str:
1613
+ """Generate test template for agent functions."""
1614
+ return f'''
1615
+ def test_{func_name}_basic_functionality(self):
1616
+ """Test basic functionality of {func_name}."""
1617
+ # TODO: Implement specific test logic for {func_name}
1618
+ # This is a placeholder test that should be customized
1619
+ try:
1620
+ result = {func_name}({self._generate_smart_default_args(args)})
1621
+ assert result is not None or result is None
1622
+ except (TypeError, NotImplementedError) as e:
1623
+ pytest.skip(f"Function {func_name} requires manual implementation: {{e}}")
1624
+ except Exception as e:
1625
+ pytest.fail(f"Unexpected error in {func_name}: {{e}}")'''
1626
+
1627
+ def _generate_async_test_template(self, func_name: str, args: list[str]) -> str:
1628
+ """Generate test template for async service/manager functions."""
1629
+ return f'''\n @pytest.mark.asyncio\n async def test_{func_name}_basic_functionality(self):
1630
+ """Test basic functionality of {func_name}."""
1631
+ # TODO: Implement specific test logic for {func_name}
1632
+ # Consider mocking external dependencies
1633
+ try:
1634
+ if asyncio.iscoroutinefunction({func_name}):
1635
+ result = await {func_name}({self._generate_smart_default_args(args)})
1636
+ else:
1637
+ result = {func_name}({self._generate_smart_default_args(args)})
1638
+ assert result is not None or result is None
1639
+ except (TypeError, NotImplementedError) as e:
1640
+ pytest.skip(f"Function {func_name} requires manual implementation: {{e}}")
1641
+ except Exception as e:
1642
+ pytest.fail(f"Unexpected error in {func_name}: {{e}}")'''
1643
+
1644
+ def _generate_default_test_template(self, func_name: str, args: list[str]) -> str:
1645
+ """Generate default test template for regular functions."""
1646
+ return f'''
1647
+ def test_{func_name}_basic_functionality(self):
1648
+ """Test basic functionality of {func_name}."""
1649
+ try:
1650
+ result = {func_name}({self._generate_smart_default_args(args)})
1651
+ assert result is not None or result is None
1652
+ except (TypeError, NotImplementedError) as e:
1653
+ pytest.skip(f"Function {func_name} requires manual implementation: {{e}}")
1654
+ except Exception as e:
1655
+ pytest.fail(f"Unexpected error in {func_name}: {{e}}")'''
1656
+
1657
+ async def _generate_parametrized_test(
1658
+ self, func: dict[str, Any], module_category: str
1659
+ ) -> str:
1660
+ """Generate parametrized test for functions with multiple arguments."""
1661
+ func_name = func["name"]
1662
+ args = func.get("args", [])
1663
+
1664
+ # Generate test parameters based on argument types
1665
+ test_cases = self._generate_test_parameters(args)
1666
+
1667
+ if not test_cases:
1668
+ return ""
1669
+
1670
+ parametrize_decorator = f"@pytest.mark.parametrize({test_cases})"
1671
+
1672
+ test_template = f'''
1673
+ {parametrize_decorator}
1674
+ def test_{func_name}_with_parameters(self, {
1675
+ ", ".join(args) if len(args) <= 5 else "test_input"
1676
+ }):
1677
+ """Test {func_name} with various parameter combinations."""
1678
+ try:
1679
+ if len({args}) <= 5:
1680
+ result = {func_name}({", ".join(args)})
1681
+ else:
1682
+ result = {func_name}(**test_input)
1683
+ # Basic assertion - customize based on expected behavior
1684
+ assert result is not None or result is None
1685
+ except (TypeError, ValueError) as expected_error:
1686
+ # Some parameter combinations may be invalid - this is expected
1687
+ pass
1688
+ except Exception as e:
1689
+ pytest.fail(f"Unexpected error with parameters: {{e}}")'''
1690
+
1691
+ return test_template
1692
+
1693
+ async def _generate_error_handling_test(
1694
+ self, func: dict[str, Any], module_category: str
1695
+ ) -> str:
1696
+ """Generate error handling test for a function."""
1697
+ func_name = func["name"]
1698
+ args = func.get("args", [])
1699
+
1700
+ test_template = f'''
1701
+ def test_{func_name}_error_handling(self):
1702
+ """Test {func_name} error handling with invalid inputs."""
1703
+ # Test with None values
1704
+ with pytest.raises((TypeError, ValueError, AttributeError)):
1705
+ {func_name}({self._generate_invalid_args(args)})
1706
+
1707
+ # Test with empty/invalid values where applicable
1708
+ if len({args}) > 0:
1709
+ with pytest.raises((TypeError, ValueError)):
1710
+ {func_name}({self._generate_edge_case_args(args, "empty")})'''
1711
+
1712
+ return test_template
1713
+
1714
+ async def _generate_edge_case_test(
1715
+ self, func: dict[str, Any], module_category: str
1716
+ ) -> str:
1717
+ """Generate edge case test for complex functions."""
1718
+ func_name = func["name"]
1719
+ args = func.get("args", [])
1720
+
1721
+ test_template = f'''
1722
+ def test_{func_name}_edge_cases(self):
1723
+ """Test {func_name} with edge case scenarios."""
1724
+ # Test boundary conditions
1725
+ edge_cases = [
1726
+ {self._generate_edge_case_args(args, "boundary")},
1727
+ {self._generate_edge_case_args(args, "extreme")},
1728
+ ]
1729
+
1730
+ for edge_case in edge_cases:
1731
+ try:
1732
+ result = {func_name}(*edge_case)
1733
+ # Verify the function handles edge cases gracefully
1734
+ assert result is not None or result is None
1735
+ except (ValueError, TypeError):
1736
+ # Some edge cases may be invalid - that's acceptable
1737
+ pass
1738
+ except Exception as e:
1739
+ pytest.fail(f"Unexpected error with edge case {{edge_case}}: {{e}}")'''
1740
+
1741
+ return test_template
1742
+
1743
+ def _generate_test_parameters(self, args: list[str]) -> str:
1744
+ """Generate test parameters for parametrized tests."""
1745
+ if not args or len(args) > 5: # Limit complexity
1746
+ return ""
1747
+
1748
+ # Simple parameter generation
1749
+ param_names = ", ".join(f'"{arg}"' for arg in args)
1750
+ param_values = []
1751
+
1752
+ # Generate a few test cases
1753
+ for i in range(min(3, len(args))):
1754
+ test_case = []
1755
+ for arg in args:
1756
+ if "path" in arg.lower():
1757
+ test_case.append(f'Path("test_{i}")')
1758
+ elif "str" in arg.lower() or "name" in arg.lower():
1759
+ test_case.append(f'"test_{i}"')
1760
+ elif "int" in arg.lower() or "count" in arg.lower():
1761
+ test_case.append(str(i))
1762
+ elif "bool" in arg.lower():
1763
+ test_case.append("True" if i % 2 == 0 else "False")
1764
+ else:
1765
+ test_case.append("None")
1766
+ param_values.append(f"({', '.join(test_case)})")
1767
+
1768
+ return f"[{param_names}], [{', '.join(param_values)}]"
1769
+
1770
+ def _generate_smart_default_args(self, args: list[str]) -> str:
1771
+ """Generate smarter default arguments based on argument names."""
1772
+ if not args or args == ["self"]:
1773
+ return ""
1774
+
1775
+ filtered_args = self._filter_args(args)
1776
+ if not filtered_args:
1777
+ return ""
1778
+
1779
+ placeholders = [
1780
+ self._generate_placeholder_for_arg(arg) for arg in filtered_args
1781
+ ]
1782
+ return ", ".join(placeholders)
1783
+
1784
+ def _filter_args(self, args: list[str]) -> list[str]:
1785
+ """Filter out 'self' parameter from arguments."""
1786
+ return [arg for arg in args if arg != "self"]
1787
+
1788
+ def _generate_placeholder_for_arg(self, arg: str) -> str:
1789
+ """Generate a placeholder value for a single argument based on its name."""
1790
+ arg_lower = arg.lower()
1791
+
1792
+ if self._is_path_arg(arg_lower):
1793
+ return 'Path("test_file.txt")'
1794
+ elif self._is_url_arg(arg_lower):
1795
+ return '"https://example.com"'
1796
+ elif self._is_email_arg(arg_lower):
1797
+ return '"test@example.com"'
1798
+ elif self._is_id_arg(arg_lower):
1799
+ return '"test-id-123"'
1800
+ elif self._is_name_arg(arg_lower):
1801
+ return '"test_name"'
1802
+ elif self._is_numeric_arg(arg_lower):
1803
+ return "10"
1804
+ elif self._is_boolean_arg(arg_lower):
1805
+ return "True"
1806
+ elif self._is_text_arg(arg_lower):
1807
+ return '"test data"'
1808
+ elif self._is_list_arg(arg_lower):
1809
+ return '["test1", "test2"]'
1810
+ elif self._is_dict_arg(arg_lower):
1811
+ return '{"key": "value"}'
1812
+ return '"test"'
1813
+
1814
+ def _is_path_arg(self, arg_lower: str) -> bool:
1815
+ """Check if argument is path-related."""
1816
+ return any(term in arg_lower for term in ("path", "file"))
1817
+
1818
+ def _is_url_arg(self, arg_lower: str) -> bool:
1819
+ """Check if argument is URL-related."""
1820
+ return any(term in arg_lower for term in ("url", "uri"))
1821
+
1822
+ def _is_email_arg(self, arg_lower: str) -> bool:
1823
+ """Check if argument is email-related."""
1824
+ return any(term in arg_lower for term in ("email", "mail"))
1825
+
1826
+ def _is_id_arg(self, arg_lower: str) -> bool:
1827
+ """Check if argument is ID-related."""
1828
+ return any(term in arg_lower for term in ("id", "uuid"))
1829
+
1830
+ def _is_name_arg(self, arg_lower: str) -> bool:
1831
+ """Check if argument is name-related."""
1832
+ return any(term in arg_lower for term in ("name", "title"))
1833
+
1834
+ def _is_numeric_arg(self, arg_lower: str) -> bool:
1835
+ """Check if argument is numeric-related."""
1836
+ return any(term in arg_lower for term in ("count", "size", "number", "num"))
1837
+
1838
+ def _is_boolean_arg(self, arg_lower: str) -> bool:
1839
+ """Check if argument is boolean-related."""
1840
+ return any(term in arg_lower for term in ("enable", "flag", "is_", "has_"))
1841
+
1842
+ def _is_text_arg(self, arg_lower: str) -> bool:
1843
+ """Check if argument is text-related."""
1844
+ return any(term in arg_lower for term in ("data", "content", "text"))
1845
+
1846
+ def _is_list_arg(self, arg_lower: str) -> bool:
1847
+ """Check if argument is list-related."""
1848
+ return any(term in arg_lower for term in ("list", "items"))
1849
+
1850
+ def _is_dict_arg(self, arg_lower: str) -> bool:
1851
+ """Check if argument is dict-related."""
1852
+ return any(term in arg_lower for term in ("dict", "config", "options"))
1853
+
1854
+ def _generate_invalid_args(self, args: list[str]) -> str:
1855
+ """Generate invalid arguments for error testing."""
1856
+ filtered_args = [arg for arg in args if arg != "self"]
1857
+ if not filtered_args:
1858
+ return ""
1859
+ return ", ".join(["None"] * len(filtered_args))
1860
+
1861
+ def _generate_edge_case_args(self, args: list[str], case_type: str) -> str:
1862
+ """Generate edge case arguments."""
1863
+ filtered_args = self._filter_args(args)
1864
+ if not filtered_args:
1865
+ return ""
1866
+
1867
+ placeholders = self._generate_placeholders_by_case_type(
1868
+ filtered_args, case_type
1869
+ )
1870
+ return ", ".join(placeholders)
1871
+
1872
+ def _generate_placeholders_by_case_type(
1873
+ self, filtered_args: list[str], case_type: str
1874
+ ) -> list[str]:
1875
+ """Generate placeholders based on case type."""
1876
+ if case_type == "empty":
1877
+ return self._generate_empty_case_placeholders(filtered_args)
1878
+ elif case_type == "boundary":
1879
+ return self._generate_boundary_case_placeholders(filtered_args)
1880
+ # extreme
1881
+ return self._generate_extreme_case_placeholders(filtered_args)
1882
+
1883
+ def _generate_empty_case_placeholders(self, filtered_args: list[str]) -> list[str]:
1884
+ """Generate placeholders for empty case."""
1885
+ placeholders = []
1886
+ for arg in filtered_args:
1887
+ arg_lower = arg.lower()
1888
+ if any(term in arg_lower for term in ("str", "name", "text")):
1889
+ placeholders.append('""')
1890
+ elif any(term in arg_lower for term in ("list", "items")):
1891
+ placeholders.append("[]")
1892
+ elif any(term in arg_lower for term in ("dict", "config")):
1893
+ placeholders.append("{}")
1894
+ else:
1895
+ placeholders.append("None")
1896
+ return placeholders
1897
+
1898
+ def _generate_boundary_case_placeholders(
1899
+ self, filtered_args: list[str]
1900
+ ) -> list[str]:
1901
+ """Generate placeholders for boundary case."""
1902
+ placeholders = []
1903
+ for arg in filtered_args:
1904
+ arg_lower = arg.lower()
1905
+ if any(term in arg_lower for term in ("count", "size", "number")):
1906
+ placeholders.append("0")
1907
+ elif any(term in arg_lower for term in ("str", "name")):
1908
+ placeholders.append('"x" * 1000') # Very long string
1909
+ else:
1910
+ placeholders.append("None")
1911
+ return placeholders
1912
+
1913
+ def _generate_extreme_case_placeholders(
1914
+ self, filtered_args: list[str]
1915
+ ) -> list[str]:
1916
+ """Generate placeholders for extreme case."""
1917
+ placeholders = []
1918
+ for arg in filtered_args:
1919
+ arg_lower = arg.lower()
1920
+ if any(term in arg_lower for term in ("count", "size", "number")):
1921
+ placeholders.append("-1")
1922
+ else:
1923
+ placeholders.append("None")
1924
+ return placeholders
1925
+
1926
+ async def _generate_enhanced_class_tests(
1927
+ self, classes: list[dict[str, Any]], module_category: str
1928
+ ) -> str:
1929
+ """Generate enhanced test methods for classes with fixtures and comprehensive
1930
+ coverage."""
1931
+ if not classes:
1932
+ return ""
1933
+
1934
+ test_components = await self._generate_all_class_test_components(
1935
+ classes, module_category
1936
+ )
1937
+ return self._combine_class_test_elements(
1938
+ test_components["fixtures"], test_components["test_methods"]
1939
+ )
1940
+
1941
+ async def _generate_all_class_test_components(
1942
+ self, classes: list[dict[str, Any]], module_category: str
1943
+ ) -> dict[str, list[str]]:
1944
+ """Generate all test components for classes."""
1945
+ fixtures = []
1946
+ test_methods = []
1947
+
1948
+ for cls in classes:
1949
+ class_components = await self._generate_single_class_test_components(
1950
+ cls, module_category
1951
+ )
1952
+ fixtures.extend(class_components["fixtures"])
1953
+ test_methods.extend(class_components["test_methods"])
1954
+
1955
+ return {"fixtures": fixtures, "test_methods": test_methods}
1956
+
1957
+ async def _generate_single_class_test_components(
1958
+ self, cls: dict[str, Any], module_category: str
1959
+ ) -> dict[str, list[str]]:
1960
+ """Generate test components for a single class."""
1961
+ fixtures = []
1962
+ test_methods = []
1963
+ methods = cls.get("methods", [])
1964
+
1965
+ # Generate fixture for class instantiation
1966
+ fixture = await self._generate_class_fixture(cls, module_category)
1967
+ if fixture:
1968
+ fixtures.append(fixture)
1969
+
1970
+ # Generate core tests for the class
1971
+ core_tests = await self._generate_core_class_tests(
1972
+ cls, methods, module_category
1973
+ )
1974
+ test_methods.extend(core_tests)
1975
+
1976
+ return {"fixtures": fixtures, "test_methods": test_methods}
1977
+
1978
+ async def _generate_core_class_tests(
1979
+ self, cls: dict[str, Any], methods: list[str], module_category: str
1980
+ ) -> list[str]:
1981
+ """Generate core tests for a class."""
1982
+ test_methods = []
1983
+
1984
+ # Basic class instantiation test
1985
+ instantiation_test = await self._generate_class_instantiation_test(
1986
+ cls, module_category
1987
+ )
1988
+ test_methods.append(instantiation_test)
1989
+
1990
+ # Generate tests for public methods (limit for performance)
1991
+ method_tests = await self._generate_method_tests(
1992
+ cls, methods[:5], module_category
1993
+ )
1994
+ test_methods.extend(method_tests)
1995
+
1996
+ # Generate property tests if applicable
1997
+ property_test = await self._generate_class_property_test(cls, module_category)
1998
+ if property_test:
1999
+ test_methods.append(property_test)
2000
+
2001
+ return test_methods
2002
+
2003
+ async def _generate_method_tests(
2004
+ self, cls: dict[str, Any], methods: list[str], module_category: str
2005
+ ) -> list[str]:
2006
+ """Generate tests for class methods."""
2007
+ method_tests = []
2008
+ for method in methods:
2009
+ method_test = await self._generate_class_method_test(
2010
+ cls, method, module_category
2011
+ )
2012
+ method_tests.append(method_test)
2013
+ return method_tests
2014
+
2015
+ def _combine_class_test_elements(
2016
+ self, fixtures: list[str], test_methods: list[str]
2017
+ ) -> str:
2018
+ """Combine fixtures and test methods into a single string."""
2019
+ fixture_section = "\n".join(fixtures) if fixtures else ""
2020
+ test_section = "\n".join(test_methods)
2021
+ return fixture_section + test_section
2022
+
2023
+ async def _generate_class_fixture(
2024
+ self, cls: dict[str, Any], module_category: str
2025
+ ) -> str:
2026
+ """Generate pytest fixture for class instantiation."""
2027
+ class_name = cls["name"]
2028
+
2029
+ if module_category in ("service", "manager", "core"):
2030
+ # These often require dependency injection
2031
+ fixture_template = f'''
2032
+ @pytest.fixture
2033
+ def {class_name.lower()}_instance(self):
2034
+ """Fixture to create {class_name} instance for testing."""
2035
+ # TODO: Configure dependencies and mocks as needed
2036
+ try:
2037
+ return {class_name}()
2038
+ except TypeError:
2039
+ # If constructor requires arguments, mock them
2040
+ with patch.object({class_name}, '__init__', return_value=None):
2041
+ instance = {class_name}.__new__({class_name})
2042
+ return instance'''
2043
+
2044
+ elif module_category == "agent":
2045
+ # Agents typically require AgentContext
2046
+ fixture_template = f'''
2047
+ @pytest.fixture
2048
+ def {class_name.lower()}_instance(self):
2049
+ """Fixture to create {class_name} instance for testing."""
2050
+ # Mock AgentContext for agent testing
2051
+ mock_context = Mock(spec=AgentContext)
2052
+ mock_context.project_path = Path("/test/project")
2053
+ mock_context.get_file_content = Mock(return_value="# test content")
2054
+ mock_context.write_file_content = Mock(return_value=True)
2055
+
2056
+ try:
2057
+ return {class_name}(mock_context)
2058
+ except Exception:
2059
+ pytest.skip("Agent requires specific context configuration")'''
2060
+
2061
+ else:
2062
+ # Simple fixture for other classes
2063
+ fixture_template = f'''
2064
+ @pytest.fixture
2065
+ def {class_name.lower()}_instance(self):
2066
+ """Fixture to create {class_name} instance for testing."""
2067
+ try:
2068
+ return {class_name}()
2069
+ except TypeError:
2070
+ pytest.skip("Class requires specific constructor arguments")'''
2071
+
2072
+ return fixture_template
2073
+
2074
+ @staticmethod
2075
+ async def _generate_class_instantiation_test(
2076
+ class_info: dict[str, Any], module_category: str
2077
+ ) -> str:
2078
+ """Generate class instantiation test."""
2079
+ class_name = class_info["name"]
2080
+
2081
+ test_template = f'''
2082
+ def test_{class_name.lower()}_instantiation(self, {class_name.lower()}_instance):
2083
+ """Test successful instantiation of {class_name}."""
2084
+ assert {class_name.lower()}_instance is not None
2085
+ assert isinstance({class_name.lower()}_instance, {class_name})
2086
+
2087
+ # Test basic attributes exist
2088
+ assert hasattr({class_name.lower()}_instance, '__class__')
2089
+ assert {class_name.lower()}_instance.__class__.__name__ == "{class_name}"'''
2090
+
2091
+ return test_template
2092
+
2093
+ async def _generate_class_method_test(
2094
+ self, cls: dict[str, Any], method_name: str, module_category: str
2095
+ ) -> str:
2096
+ """Generate test for a class method."""
2097
+ class_name = cls["name"]
2098
+
2099
+ if self._is_special_agent_method(module_category, method_name):
2100
+ return self._generate_agent_method_test(class_name, method_name)
2101
+ if module_category in ("service", "manager"):
2102
+ return self._generate_async_method_test(class_name, method_name)
2103
+ return self._generate_default_method_test(class_name, method_name)
2104
+
2105
+ def _is_special_agent_method(self, module_category: str, method_name: str) -> bool:
2106
+ """Check if this is a special agent method requiring custom test logic."""
2107
+ return module_category == "agent" and method_name in (
2108
+ "can_handle",
2109
+ "analyze_and_fix",
2110
+ )
2111
+
2112
+ def _generate_agent_method_test(self, class_name: str, method_name: str) -> str:
2113
+ """Generate test for special agent methods."""
2114
+ if method_name == "can_handle":
2115
+ return self._generate_can_handle_test(class_name)
2116
+ elif method_name == "analyze_and_fix":
2117
+ return self._generate_analyze_and_fix_test(class_name)
2118
+ return self._generate_generic_agent_method_test(class_name, method_name)
2119
+
2120
+ def _generate_can_handle_test(self, class_name: str) -> str:
2121
+ """Generate test for can_handle method."""
2122
+ return f'''
2123
+ @pytest.mark.asyncio
2124
+ async def test_{class_name.lower()}_can_handle(self, {class_name.lower()}_instance):
2125
+ """Test {class_name}.can_handle method."""
2126
+ # Test with mock issue
2127
+ mock_issue = Mock(spec=Issue)
2128
+ mock_issue.type = IssueType.COVERAGE_IMPROVEMENT
2129
+ mock_issue.message = "test coverage issue"
2130
+ mock_issue.file_path = "/test/path.py"
2131
+
2132
+ result = await {class_name.lower()}_instance.can_handle(mock_issue)
2133
+ assert isinstance(result, (int, float))
2134
+ assert 0.0 <= result <= 1.0'''
2135
+
2136
+ def _generate_analyze_and_fix_test(self, class_name: str) -> str:
2137
+ """Generate test for analyze_and_fix method."""
2138
+ return f'''
2139
+ @pytest.mark.asyncio
2140
+ async def test_{class_name.lower()}_analyze_and_fix(self, {class_name.lower()}_instance):
2141
+ """Test {class_name}.analyze_and_fix method."""
2142
+ # Test with mock issue
2143
+ mock_issue = Mock(spec=Issue)
2144
+ mock_issue.type = IssueType.COVERAGE_IMPROVEMENT
2145
+ mock_issue.message = "test coverage issue"
2146
+ mock_issue.file_path = "/test/path.py"
2147
+
2148
+ result = await {class_name.lower()}_instance.analyze_and_fix(mock_issue)
2149
+ assert isinstance(result, FixResult)
2150
+ assert hasattr(result, 'success')
2151
+ assert hasattr(result, 'confidence')'''
2152
+
2153
+ def _generate_generic_agent_method_test(
2154
+ self, class_name: str, method_name: str
2155
+ ) -> str:
2156
+ """Generate test for generic agent methods."""
2157
+ return f'''
2158
+ @pytest.mark.asyncio
2159
+ async def test_{class_name.lower()}_{method_name}(self, {class_name.lower()}_instance):
2160
+ """Test {class_name}.{method_name} method."""
2161
+ try:
2162
+ method = getattr({class_name.lower()}_instance, "{method_name}", None)
2163
+ assert method is not None, f"Method {method_name} should exist"
2164
+
2165
+ # Generic test for agent methods
2166
+ if asyncio.iscoroutinefunction(method):
2167
+ result = await method()
2168
+ else:
2169
+ result = method()
2170
+
2171
+ assert result is not None or result is None
2172
+ except (TypeError, NotImplementedError):
2173
+ pytest.skip(f"Method {method_name} requires specific arguments")
2174
+ except Exception as e:
2175
+ pytest.fail(f"Unexpected error in {method_name}: {{e}}")'''
2176
+
2177
+ def _generate_async_method_test(self, class_name: str, method_name: str) -> str:
2178
+ """Generate test for async service/manager methods."""
2179
+ return f'''
2180
+ @pytest.mark.asyncio
2181
+ async def test_{class_name.lower()}_{method_name}(self, {class_name.lower()}_instance):
2182
+ """Test {class_name}.{method_name} method."""
2183
+ try:
2184
+ method = getattr({class_name.lower()}_instance, "{method_name}", None)
2185
+ assert method is not None, f"Method {method_name} should exist"
2186
+
2187
+ # Test method call (may need arguments)
2188
+ if asyncio.iscoroutinefunction(method):
2189
+ result = await method()
2190
+ else:
2191
+ result = method()
2192
+
2193
+ # Basic assertion - customize based on expected behavior
2194
+ assert result is not None or result is None
2195
+
2196
+ except (TypeError, NotImplementedError):
2197
+ pytest.skip(f"Method {method_name} requires specific arguments or implementation")
2198
+ except Exception as e:
2199
+ pytest.fail(f"Unexpected error in {method_name}: {{e}}")'''
2200
+
2201
+ def _generate_default_method_test(self, class_name: str, method_name: str) -> str:
2202
+ """Generate test for default methods."""
2203
+ return f'''
2204
+ def test_{class_name.lower()}_{method_name}(self, {class_name.lower()}_instance):
2205
+ """Test {class_name}.{method_name} method."""
2206
+ try:
2207
+ method = getattr({class_name.lower()}_instance, "{method_name}", None)
2208
+ assert method is not None, f"Method {method_name} should exist"
2209
+
2210
+ # Test method call
2211
+ result = method()
2212
+ assert result is not None or result is None
2213
+
2214
+ except (TypeError, NotImplementedError):
2215
+ pytest.skip(f"Method {method_name} requires specific arguments or implementation")
2216
+ except Exception as e:
2217
+ pytest.fail(f"Unexpected error in {method_name}: {{e}}")'''
2218
+
2219
+ async def _generate_class_property_test(
2220
+ self, cls: dict[str, Any], module_category: str
2221
+ ) -> str:
2222
+ """Generate test for class properties."""
2223
+ class_name = cls["name"]
2224
+
2225
+ # Only generate property tests for certain module categories
2226
+ if module_category not in ("service", "manager", "agent"):
2227
+ return ""
2228
+
2229
+ test_template = f'''
2230
+ def test_{class_name.lower()}_properties(self, {class_name.lower()}_instance):
2231
+ """Test {class_name} properties and attributes."""
2232
+ # Test that instance has expected structure
2233
+ assert hasattr({class_name.lower()}_instance, '__dict__') or
2234
+ hasattr({class_name.lower()}_instance, '__slots__')
2235
+
2236
+ # Test string representation
2237
+ str_repr = str({class_name.lower()}_instance)
2238
+ assert len(str_repr) > 0
2239
+ assert "{class_name}" in str_repr or "{class_name.lower()}" in
2240
+ str_repr.lower()'''
2241
+
2242
+ return test_template
2243
+
2244
+ async def _generate_integration_tests(
2245
+ self,
2246
+ module_file: Path,
2247
+ functions: list[dict[str, Any]],
2248
+ classes: list[dict[str, Any]],
2249
+ module_category: str,
2250
+ ) -> str:
2251
+ """Generate integration tests for certain module types."""
2252
+ if module_category not in ("service", "manager", "core"):
2253
+ return ""
2254
+
2255
+ # Only generate integration tests for modules with sufficient complexity
2256
+ if len(functions) < 3 and len(classes) < 2:
2257
+ return ""
2258
+
2259
+ integration_tests = f'''
2260
+
2261
+ # Integration Tests
2262
+ @pytest.mark.integration
2263
+ def test_{module_file.stem}_integration(self):
2264
+ """Integration test for {module_file.stem} module functionality."""
2265
+ # TODO: Implement integration test scenarios
2266
+ # Test interactions between classes and functions in this module
2267
+ pytest.skip("Integration test needs manual implementation")
2268
+
2269
+ @pytest.mark.integration
2270
+ @pytest.mark.asyncio
2271
+ async def test_{module_file.stem}_async_integration(self):
2272
+ """Async integration test for {module_file.stem} module."""
2273
+ # TODO: Implement async integration scenarios
2274
+ # Test async workflows and dependencies
2275
+ pytest.skip("Async integration test needs manual implementation")
2276
+
2277
+ @pytest.mark.performance
2278
+ def test_{module_file.stem}_performance(self):
2279
+ """Basic performance test for {module_file.stem} module."""
2280
+ # TODO: Add performance benchmarks if applicable
2281
+ # Consider timing critical operations
2282
+ pytest.skip("Performance test needs manual implementation")'''
2283
+
2284
+ return integration_tests
2285
+
2286
+ def _generate_default_args(self, args: list[str]) -> str:
2287
+ """Generate default arguments for function calls."""
2288
+ if not args or args == ["self"]:
2289
+ return ""
2290
+
2291
+ # Filter out 'self' parameter
2292
+ filtered_args = [arg for arg in args if arg != "self"]
2293
+ if not filtered_args:
2294
+ return ""
2295
+
2296
+ # Generate placeholder arguments
2297
+ placeholders = []
2298
+ for arg in filtered_args:
2299
+ if "path" in arg.lower():
2300
+ placeholders.append('Path("test")')
2301
+ elif "str" in arg.lower() or "name" in arg.lower():
2302
+ placeholders.append('"test"')
2303
+ elif "int" in arg.lower() or "count" in arg.lower():
2304
+ placeholders.append("1")
2305
+ elif "bool" in arg.lower():
2306
+ placeholders.append("True")
2307
+ else:
2308
+ placeholders.append("None")
2309
+
2310
+ return ", ".join(placeholders)
2311
+
656
2312
 
657
2313
  agent_registry.register(TestCreationAgent)