crackerjack 0.33.0__py3-none-any.whl → 0.33.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crackerjack might be problematic. Click here for more details.
- crackerjack/__main__.py +1350 -34
- crackerjack/adapters/__init__.py +17 -0
- crackerjack/adapters/lsp_client.py +358 -0
- crackerjack/adapters/rust_tool_adapter.py +194 -0
- crackerjack/adapters/rust_tool_manager.py +193 -0
- crackerjack/adapters/skylos_adapter.py +231 -0
- crackerjack/adapters/zuban_adapter.py +560 -0
- crackerjack/agents/base.py +7 -3
- crackerjack/agents/coordinator.py +271 -33
- crackerjack/agents/documentation_agent.py +9 -15
- crackerjack/agents/dry_agent.py +3 -15
- crackerjack/agents/formatting_agent.py +1 -1
- crackerjack/agents/import_optimization_agent.py +36 -180
- crackerjack/agents/performance_agent.py +17 -98
- crackerjack/agents/performance_helpers.py +7 -31
- crackerjack/agents/proactive_agent.py +1 -3
- crackerjack/agents/refactoring_agent.py +16 -85
- crackerjack/agents/refactoring_helpers.py +7 -42
- crackerjack/agents/security_agent.py +9 -48
- crackerjack/agents/test_creation_agent.py +356 -513
- crackerjack/agents/test_specialist_agent.py +0 -4
- crackerjack/api.py +6 -25
- crackerjack/cli/cache_handlers.py +204 -0
- crackerjack/cli/cache_handlers_enhanced.py +683 -0
- crackerjack/cli/facade.py +100 -0
- crackerjack/cli/handlers.py +224 -9
- crackerjack/cli/interactive.py +6 -4
- crackerjack/cli/options.py +642 -55
- crackerjack/cli/utils.py +2 -1
- crackerjack/code_cleaner.py +58 -117
- crackerjack/config/global_lock_config.py +8 -48
- crackerjack/config/hooks.py +53 -62
- crackerjack/core/async_workflow_orchestrator.py +24 -34
- crackerjack/core/autofix_coordinator.py +3 -17
- crackerjack/core/enhanced_container.py +4 -13
- crackerjack/core/file_lifecycle.py +12 -89
- crackerjack/core/performance.py +2 -2
- crackerjack/core/performance_monitor.py +15 -55
- crackerjack/core/phase_coordinator.py +104 -204
- crackerjack/core/resource_manager.py +14 -90
- crackerjack/core/service_watchdog.py +62 -95
- crackerjack/core/session_coordinator.py +149 -0
- crackerjack/core/timeout_manager.py +14 -72
- crackerjack/core/websocket_lifecycle.py +13 -78
- crackerjack/core/workflow_orchestrator.py +171 -174
- crackerjack/docs/INDEX.md +11 -0
- crackerjack/docs/generated/api/API_REFERENCE.md +10895 -0
- crackerjack/docs/generated/api/CLI_REFERENCE.md +109 -0
- crackerjack/docs/generated/api/CROSS_REFERENCES.md +1755 -0
- crackerjack/docs/generated/api/PROTOCOLS.md +3 -0
- crackerjack/docs/generated/api/SERVICES.md +1252 -0
- crackerjack/documentation/__init__.py +31 -0
- crackerjack/documentation/ai_templates.py +756 -0
- crackerjack/documentation/dual_output_generator.py +765 -0
- crackerjack/documentation/mkdocs_integration.py +518 -0
- crackerjack/documentation/reference_generator.py +977 -0
- crackerjack/dynamic_config.py +55 -50
- crackerjack/executors/async_hook_executor.py +10 -15
- crackerjack/executors/cached_hook_executor.py +117 -43
- crackerjack/executors/hook_executor.py +8 -34
- crackerjack/executors/hook_lock_manager.py +26 -183
- crackerjack/executors/individual_hook_executor.py +13 -11
- crackerjack/executors/lsp_aware_hook_executor.py +270 -0
- crackerjack/executors/tool_proxy.py +417 -0
- crackerjack/hooks/lsp_hook.py +79 -0
- crackerjack/intelligence/adaptive_learning.py +25 -10
- crackerjack/intelligence/agent_orchestrator.py +2 -5
- crackerjack/intelligence/agent_registry.py +34 -24
- crackerjack/intelligence/agent_selector.py +5 -7
- crackerjack/interactive.py +17 -6
- crackerjack/managers/async_hook_manager.py +0 -1
- crackerjack/managers/hook_manager.py +79 -1
- crackerjack/managers/publish_manager.py +44 -8
- crackerjack/managers/test_command_builder.py +1 -15
- crackerjack/managers/test_executor.py +1 -3
- crackerjack/managers/test_manager.py +98 -7
- crackerjack/managers/test_manager_backup.py +10 -9
- crackerjack/mcp/cache.py +2 -2
- crackerjack/mcp/client_runner.py +1 -1
- crackerjack/mcp/context.py +191 -68
- crackerjack/mcp/dashboard.py +7 -5
- crackerjack/mcp/enhanced_progress_monitor.py +31 -28
- crackerjack/mcp/file_monitor.py +30 -23
- crackerjack/mcp/progress_components.py +31 -21
- crackerjack/mcp/progress_monitor.py +50 -53
- crackerjack/mcp/rate_limiter.py +6 -6
- crackerjack/mcp/server_core.py +17 -16
- crackerjack/mcp/service_watchdog.py +2 -1
- crackerjack/mcp/state.py +4 -7
- crackerjack/mcp/task_manager.py +11 -9
- crackerjack/mcp/tools/core_tools.py +173 -32
- crackerjack/mcp/tools/error_analyzer.py +3 -2
- crackerjack/mcp/tools/execution_tools.py +8 -10
- crackerjack/mcp/tools/execution_tools_backup.py +42 -30
- crackerjack/mcp/tools/intelligence_tool_registry.py +7 -5
- crackerjack/mcp/tools/intelligence_tools.py +5 -2
- crackerjack/mcp/tools/monitoring_tools.py +33 -70
- crackerjack/mcp/tools/proactive_tools.py +24 -11
- crackerjack/mcp/tools/progress_tools.py +5 -8
- crackerjack/mcp/tools/utility_tools.py +20 -14
- crackerjack/mcp/tools/workflow_executor.py +62 -40
- crackerjack/mcp/websocket/app.py +8 -0
- crackerjack/mcp/websocket/endpoints.py +352 -357
- crackerjack/mcp/websocket/jobs.py +40 -57
- crackerjack/mcp/websocket/monitoring_endpoints.py +2935 -0
- crackerjack/mcp/websocket/server.py +7 -25
- crackerjack/mcp/websocket/websocket_handler.py +6 -17
- crackerjack/mixins/__init__.py +0 -2
- crackerjack/mixins/error_handling.py +1 -70
- crackerjack/models/config.py +12 -1
- crackerjack/models/config_adapter.py +49 -1
- crackerjack/models/protocols.py +122 -122
- crackerjack/models/resource_protocols.py +55 -210
- crackerjack/monitoring/ai_agent_watchdog.py +13 -13
- crackerjack/monitoring/metrics_collector.py +426 -0
- crackerjack/monitoring/regression_prevention.py +8 -8
- crackerjack/monitoring/websocket_server.py +643 -0
- crackerjack/orchestration/advanced_orchestrator.py +11 -6
- crackerjack/orchestration/coverage_improvement.py +3 -3
- crackerjack/orchestration/execution_strategies.py +26 -6
- crackerjack/orchestration/test_progress_streamer.py +8 -5
- crackerjack/plugins/base.py +2 -2
- crackerjack/plugins/hooks.py +7 -0
- crackerjack/plugins/managers.py +11 -8
- crackerjack/security/__init__.py +0 -1
- crackerjack/security/audit.py +6 -35
- crackerjack/services/anomaly_detector.py +392 -0
- crackerjack/services/api_extractor.py +615 -0
- crackerjack/services/backup_service.py +2 -2
- crackerjack/services/bounded_status_operations.py +15 -152
- crackerjack/services/cache.py +127 -1
- crackerjack/services/changelog_automation.py +395 -0
- crackerjack/services/config.py +15 -9
- crackerjack/services/config_merge.py +19 -80
- crackerjack/services/config_template.py +506 -0
- crackerjack/services/contextual_ai_assistant.py +48 -22
- crackerjack/services/coverage_badge_service.py +171 -0
- crackerjack/services/coverage_ratchet.py +27 -25
- crackerjack/services/debug.py +3 -3
- crackerjack/services/dependency_analyzer.py +460 -0
- crackerjack/services/dependency_monitor.py +14 -11
- crackerjack/services/documentation_generator.py +491 -0
- crackerjack/services/documentation_service.py +675 -0
- crackerjack/services/enhanced_filesystem.py +6 -5
- crackerjack/services/enterprise_optimizer.py +865 -0
- crackerjack/services/error_pattern_analyzer.py +676 -0
- crackerjack/services/file_hasher.py +1 -1
- crackerjack/services/git.py +8 -25
- crackerjack/services/health_metrics.py +10 -8
- crackerjack/services/heatmap_generator.py +735 -0
- crackerjack/services/initialization.py +11 -30
- crackerjack/services/input_validator.py +5 -97
- crackerjack/services/intelligent_commit.py +327 -0
- crackerjack/services/log_manager.py +15 -12
- crackerjack/services/logging.py +4 -3
- crackerjack/services/lsp_client.py +628 -0
- crackerjack/services/memory_optimizer.py +19 -87
- crackerjack/services/metrics.py +42 -33
- crackerjack/services/parallel_executor.py +9 -67
- crackerjack/services/pattern_cache.py +1 -1
- crackerjack/services/pattern_detector.py +6 -6
- crackerjack/services/performance_benchmarks.py +18 -59
- crackerjack/services/performance_cache.py +20 -81
- crackerjack/services/performance_monitor.py +27 -95
- crackerjack/services/predictive_analytics.py +510 -0
- crackerjack/services/quality_baseline.py +234 -0
- crackerjack/services/quality_baseline_enhanced.py +646 -0
- crackerjack/services/quality_intelligence.py +785 -0
- crackerjack/services/regex_patterns.py +605 -524
- crackerjack/services/regex_utils.py +43 -123
- crackerjack/services/secure_path_utils.py +5 -164
- crackerjack/services/secure_status_formatter.py +30 -141
- crackerjack/services/secure_subprocess.py +11 -92
- crackerjack/services/security.py +9 -41
- crackerjack/services/security_logger.py +12 -24
- crackerjack/services/server_manager.py +124 -16
- crackerjack/services/status_authentication.py +16 -159
- crackerjack/services/status_security_manager.py +4 -131
- crackerjack/services/thread_safe_status_collector.py +19 -125
- crackerjack/services/unified_config.py +21 -13
- crackerjack/services/validation_rate_limiter.py +5 -54
- crackerjack/services/version_analyzer.py +459 -0
- crackerjack/services/version_checker.py +1 -1
- crackerjack/services/websocket_resource_limiter.py +10 -144
- crackerjack/services/zuban_lsp_service.py +390 -0
- crackerjack/slash_commands/__init__.py +2 -7
- crackerjack/slash_commands/run.md +2 -2
- crackerjack/tools/validate_input_validator_patterns.py +14 -40
- crackerjack/tools/validate_regex_patterns.py +19 -48
- {crackerjack-0.33.0.dist-info → crackerjack-0.33.1.dist-info}/METADATA +196 -25
- crackerjack-0.33.1.dist-info/RECORD +229 -0
- crackerjack/CLAUDE.md +0 -207
- crackerjack/RULES.md +0 -380
- crackerjack/py313.py +0 -234
- crackerjack-0.33.0.dist-info/RECORD +0 -187
- {crackerjack-0.33.0.dist-info → crackerjack-0.33.1.dist-info}/WHEEL +0 -0
- {crackerjack-0.33.0.dist-info → crackerjack-0.33.1.dist-info}/entry_points.txt +0 -0
- {crackerjack-0.33.0.dist-info → crackerjack-0.33.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -28,15 +28,12 @@ class TestCreationAgent(SubAgent):
|
|
|
28
28
|
}
|
|
29
29
|
|
|
30
30
|
async def can_handle(self, issue: Issue) -> float:
|
|
31
|
-
"""Enhanced confidence scoring based on issue complexity and expected impact."""
|
|
32
31
|
if issue.type not in self.get_supported_types():
|
|
33
32
|
return 0.0
|
|
34
33
|
|
|
35
34
|
message_lower = issue.message.lower()
|
|
36
35
|
|
|
37
|
-
# High confidence for coverage improvement - key audit requirement
|
|
38
36
|
if issue.type == IssueType.COVERAGE_IMPROVEMENT:
|
|
39
|
-
# Check for specific coverage improvement scenarios
|
|
40
37
|
if any(
|
|
41
38
|
term in message_lower
|
|
42
39
|
for term in (
|
|
@@ -47,13 +44,12 @@ class TestCreationAgent(SubAgent):
|
|
|
47
44
|
"coverage requirement",
|
|
48
45
|
)
|
|
49
46
|
):
|
|
50
|
-
return 0.95
|
|
47
|
+
return 0.95
|
|
51
48
|
return 0.9
|
|
52
49
|
|
|
53
50
|
if issue.type == IssueType.TEST_ORGANIZATION:
|
|
54
51
|
return self._check_test_organization_confidence(message_lower)
|
|
55
52
|
|
|
56
|
-
# Enhanced pattern matching for test creation needs
|
|
57
53
|
perfect_score = self._check_perfect_test_creation_matches(message_lower)
|
|
58
54
|
if perfect_score > 0:
|
|
59
55
|
return perfect_score
|
|
@@ -62,12 +58,10 @@ class TestCreationAgent(SubAgent):
|
|
|
62
58
|
if good_score > 0:
|
|
63
59
|
return good_score
|
|
64
60
|
|
|
65
|
-
# Improved file path analysis
|
|
66
61
|
file_path_score = self._check_file_path_test_indicators(issue.file_path)
|
|
67
62
|
if file_path_score > 0:
|
|
68
63
|
return file_path_score
|
|
69
64
|
|
|
70
|
-
# New: Check for untested functions specifically
|
|
71
65
|
if self._indicates_untested_functions(message_lower):
|
|
72
66
|
return 0.85
|
|
73
67
|
|
|
@@ -126,9 +120,7 @@ class TestCreationAgent(SubAgent):
|
|
|
126
120
|
if not file_path:
|
|
127
121
|
return 0.0
|
|
128
122
|
|
|
129
|
-
# Enhanced file path analysis
|
|
130
123
|
if not self._has_corresponding_test(file_path):
|
|
131
|
-
# Higher confidence for core modules
|
|
132
124
|
if any(
|
|
133
125
|
core_path in file_path
|
|
134
126
|
for core_path in ("/managers/", "/services/", "/core/", "/agents/")
|
|
@@ -138,7 +130,6 @@ class TestCreationAgent(SubAgent):
|
|
|
138
130
|
return 0.0
|
|
139
131
|
|
|
140
132
|
def _indicates_untested_functions(self, message_lower: str) -> bool:
|
|
141
|
-
"""Check if message indicates untested functions."""
|
|
142
133
|
return any(
|
|
143
134
|
indicator in message_lower
|
|
144
135
|
for indicator in (
|
|
@@ -152,10 +143,8 @@ class TestCreationAgent(SubAgent):
|
|
|
152
143
|
)
|
|
153
144
|
|
|
154
145
|
async def analyze_and_fix(self, issue: Issue) -> FixResult:
|
|
155
|
-
# Log the analysis
|
|
156
146
|
self._log_analysis(issue)
|
|
157
147
|
|
|
158
|
-
# Apply fixes and create result
|
|
159
148
|
return await self._apply_fixes_and_create_result(issue)
|
|
160
149
|
|
|
161
150
|
def _log_analysis(self, issue: Issue) -> None:
|
|
@@ -174,7 +163,6 @@ class TestCreationAgent(SubAgent):
|
|
|
174
163
|
self,
|
|
175
164
|
issue: Issue,
|
|
176
165
|
) -> tuple[list[str], list[str]]:
|
|
177
|
-
# Apply all test creation fixes
|
|
178
166
|
return await self._apply_all_test_creation_fixes(issue)
|
|
179
167
|
|
|
180
168
|
async def _apply_all_test_creation_fixes(
|
|
@@ -184,7 +172,6 @@ class TestCreationAgent(SubAgent):
|
|
|
184
172
|
fixes_applied: list[str] = []
|
|
185
173
|
files_modified: list[str] = []
|
|
186
174
|
|
|
187
|
-
# Apply different types of fixes
|
|
188
175
|
fixes_applied, files_modified = await self._apply_all_fix_types(
|
|
189
176
|
issue, fixes_applied, files_modified
|
|
190
177
|
)
|
|
@@ -197,7 +184,6 @@ class TestCreationAgent(SubAgent):
|
|
|
197
184
|
fixes_applied: list[str],
|
|
198
185
|
files_modified: list[str],
|
|
199
186
|
) -> tuple[list[str], list[str]]:
|
|
200
|
-
# Apply all fix types sequentially
|
|
201
187
|
return await self._apply_sequential_fixes(issue, fixes_applied, files_modified)
|
|
202
188
|
|
|
203
189
|
async def _apply_sequential_fixes(
|
|
@@ -206,7 +192,6 @@ class TestCreationAgent(SubAgent):
|
|
|
206
192
|
fixes_applied: list[str],
|
|
207
193
|
files_modified: list[str],
|
|
208
194
|
) -> tuple[list[str], list[str]]:
|
|
209
|
-
# Apply all fix types sequentially
|
|
210
195
|
return await self._apply_all_fix_types_sequentially(
|
|
211
196
|
issue, fixes_applied, files_modified
|
|
212
197
|
)
|
|
@@ -217,7 +202,6 @@ class TestCreationAgent(SubAgent):
|
|
|
217
202
|
fixes_applied: list[str],
|
|
218
203
|
files_modified: list[str],
|
|
219
204
|
) -> tuple[list[str], list[str]]:
|
|
220
|
-
# Apply all fix types sequentially
|
|
221
205
|
return await self._apply_all_fix_types_in_sequence(
|
|
222
206
|
issue, fixes_applied, files_modified
|
|
223
207
|
)
|
|
@@ -228,7 +212,6 @@ class TestCreationAgent(SubAgent):
|
|
|
228
212
|
fixes_applied: list[str],
|
|
229
213
|
files_modified: list[str],
|
|
230
214
|
) -> tuple[list[str], list[str]]:
|
|
231
|
-
# Apply all fix types in sequence
|
|
232
215
|
return await self._apply_fix_types_in_defined_order(
|
|
233
216
|
issue, fixes_applied, files_modified
|
|
234
217
|
)
|
|
@@ -239,7 +222,6 @@ class TestCreationAgent(SubAgent):
|
|
|
239
222
|
fixes_applied: list[str],
|
|
240
223
|
files_modified: list[str],
|
|
241
224
|
) -> tuple[list[str], list[str]]:
|
|
242
|
-
# Apply coverage based fixes
|
|
243
225
|
(
|
|
244
226
|
fixes_applied,
|
|
245
227
|
files_modified,
|
|
@@ -247,7 +229,6 @@ class TestCreationAgent(SubAgent):
|
|
|
247
229
|
fixes_applied, files_modified
|
|
248
230
|
)
|
|
249
231
|
|
|
250
|
-
# Apply file specific fixes
|
|
251
232
|
(
|
|
252
233
|
fixes_applied,
|
|
253
234
|
files_modified,
|
|
@@ -255,7 +236,6 @@ class TestCreationAgent(SubAgent):
|
|
|
255
236
|
issue, fixes_applied, files_modified
|
|
256
237
|
)
|
|
257
238
|
|
|
258
|
-
# Apply function specific fixes
|
|
259
239
|
(
|
|
260
240
|
fixes_applied,
|
|
261
241
|
files_modified,
|
|
@@ -270,7 +250,6 @@ class TestCreationAgent(SubAgent):
|
|
|
270
250
|
fixes_applied: list[str],
|
|
271
251
|
files_modified: list[str],
|
|
272
252
|
) -> tuple[list[str], list[str]]:
|
|
273
|
-
"""Apply coverage based fixes sequentially."""
|
|
274
253
|
coverage_fixes, coverage_files = await self._apply_coverage_based_fixes()
|
|
275
254
|
fixes_applied.extend(coverage_fixes)
|
|
276
255
|
files_modified.extend(coverage_files)
|
|
@@ -282,7 +261,6 @@ class TestCreationAgent(SubAgent):
|
|
|
282
261
|
fixes_applied: list[str],
|
|
283
262
|
files_modified: list[str],
|
|
284
263
|
) -> tuple[list[str], list[str]]:
|
|
285
|
-
"""Apply file specific fixes sequentially."""
|
|
286
264
|
file_fixes, file_modified = await self._apply_file_specific_fixes(
|
|
287
265
|
issue.file_path,
|
|
288
266
|
)
|
|
@@ -295,7 +273,6 @@ class TestCreationAgent(SubAgent):
|
|
|
295
273
|
fixes_applied: list[str],
|
|
296
274
|
files_modified: list[str],
|
|
297
275
|
) -> tuple[list[str], list[str]]:
|
|
298
|
-
"""Apply function specific fixes sequentially."""
|
|
299
276
|
function_fixes, function_files = await self._apply_function_specific_fixes()
|
|
300
277
|
fixes_applied.extend(function_fixes)
|
|
301
278
|
files_modified.extend(function_files)
|
|
@@ -353,12 +330,10 @@ class TestCreationAgent(SubAgent):
|
|
|
353
330
|
fixes_applied: list[str],
|
|
354
331
|
files_modified: list[str],
|
|
355
332
|
) -> tuple[list[str], list[str]]:
|
|
356
|
-
"""Handle low coverage by creating tests for uncovered modules."""
|
|
357
333
|
self.log(
|
|
358
|
-
f"Coverage below threshold: {coverage_analysis['current_coverage']
|
|
334
|
+
f"Coverage below threshold: {coverage_analysis['current_coverage']: .1%}",
|
|
359
335
|
)
|
|
360
336
|
|
|
361
|
-
# Process uncovered modules
|
|
362
337
|
return await self._process_uncovered_modules_for_low_coverage(
|
|
363
338
|
coverage_analysis["uncovered_modules"], fixes_applied, files_modified
|
|
364
339
|
)
|
|
@@ -369,7 +344,6 @@ class TestCreationAgent(SubAgent):
|
|
|
369
344
|
fixes_applied: list[str],
|
|
370
345
|
files_modified: list[str],
|
|
371
346
|
) -> tuple[list[str], list[str]]:
|
|
372
|
-
"""Process uncovered modules for low coverage scenario."""
|
|
373
347
|
for module_path in uncovered_modules:
|
|
374
348
|
test_fixes = await self._create_tests_for_module(module_path)
|
|
375
349
|
fixes_applied.extend(test_fixes["fixes"])
|
|
@@ -383,8 +357,6 @@ class TestCreationAgent(SubAgent):
|
|
|
383
357
|
fixes_applied: list[str],
|
|
384
358
|
files_modified: list[str],
|
|
385
359
|
) -> tuple[list[str], list[str]]:
|
|
386
|
-
"""Process uncovered modules to create tests."""
|
|
387
|
-
# Process each uncovered module
|
|
388
360
|
return await self._process_each_uncovered_module(
|
|
389
361
|
uncovered_modules, fixes_applied, files_modified
|
|
390
362
|
)
|
|
@@ -395,8 +367,6 @@ class TestCreationAgent(SubAgent):
|
|
|
395
367
|
fixes_applied: list[str],
|
|
396
368
|
files_modified: list[str],
|
|
397
369
|
) -> tuple[list[str], list[str]]:
|
|
398
|
-
"""Process each uncovered module individually."""
|
|
399
|
-
# Process all uncovered modules
|
|
400
370
|
return await self._process_all_uncovered_modules(
|
|
401
371
|
uncovered_modules, fixes_applied, files_modified
|
|
402
372
|
)
|
|
@@ -407,7 +377,6 @@ class TestCreationAgent(SubAgent):
|
|
|
407
377
|
fixes_applied: list[str],
|
|
408
378
|
files_modified: list[str],
|
|
409
379
|
) -> tuple[list[str], list[str]]:
|
|
410
|
-
"""Process all uncovered modules."""
|
|
411
380
|
for module_path in uncovered_modules:
|
|
412
381
|
fixes_applied, files_modified = await self._process_single_uncovered_module(
|
|
413
382
|
module_path, fixes_applied, files_modified
|
|
@@ -421,7 +390,6 @@ class TestCreationAgent(SubAgent):
|
|
|
421
390
|
fixes_applied: list[str],
|
|
422
391
|
files_modified: list[str],
|
|
423
392
|
) -> tuple[list[str], list[str]]:
|
|
424
|
-
"""Process a single uncovered module."""
|
|
425
393
|
test_fixes = await self._create_tests_for_module(module_path)
|
|
426
394
|
fixes_applied.extend(test_fixes["fixes"])
|
|
427
395
|
files_modified.extend(test_fixes["files"])
|
|
@@ -454,7 +422,6 @@ class TestCreationAgent(SubAgent):
|
|
|
454
422
|
fixes_applied: list[str],
|
|
455
423
|
files_modified: list[str],
|
|
456
424
|
) -> tuple[list[str], list[str]]:
|
|
457
|
-
"""Process untested functions to create tests."""
|
|
458
425
|
for func_info in untested_functions[:5]:
|
|
459
426
|
func_fixes = await self._create_test_for_function(func_info)
|
|
460
427
|
fixes_applied.extend(func_fixes["fixes"])
|
|
@@ -467,10 +434,8 @@ class TestCreationAgent(SubAgent):
|
|
|
467
434
|
fixes_applied: list[str],
|
|
468
435
|
files_modified: list[str],
|
|
469
436
|
) -> FixResult:
|
|
470
|
-
"""Enhanced result creation with detailed confidence scoring."""
|
|
471
437
|
success = len(fixes_applied) > 0
|
|
472
438
|
|
|
473
|
-
# Calculate confidence based on the fixes applied
|
|
474
439
|
confidence = self._calculate_confidence(success, fixes_applied, files_modified)
|
|
475
440
|
|
|
476
441
|
return FixResult(
|
|
@@ -485,35 +450,28 @@ class TestCreationAgent(SubAgent):
|
|
|
485
450
|
def _calculate_confidence(
|
|
486
451
|
self, success: bool, fixes_applied: list[str], files_modified: list[str]
|
|
487
452
|
) -> float:
|
|
488
|
-
"""Calculate confidence based on types of fixes applied."""
|
|
489
453
|
if not success:
|
|
490
454
|
return 0.0
|
|
491
455
|
|
|
492
|
-
|
|
493
|
-
confidence = 0.5 # Base confidence
|
|
456
|
+
confidence = 0.5
|
|
494
457
|
|
|
495
|
-
# Higher confidence based on quality of fixes
|
|
496
458
|
test_file_fixes = [f for f in fixes_applied if "test file" in f.lower()]
|
|
497
459
|
function_fixes = [f for f in fixes_applied if "function" in f.lower()]
|
|
498
460
|
coverage_fixes = [f for f in fixes_applied if "coverage" in f.lower()]
|
|
499
461
|
|
|
500
|
-
# Boost confidence for comprehensive test creation
|
|
501
462
|
if test_file_fixes:
|
|
502
|
-
confidence += 0.25
|
|
463
|
+
confidence += 0.25
|
|
503
464
|
if function_fixes:
|
|
504
|
-
confidence += 0.15
|
|
465
|
+
confidence += 0.15
|
|
505
466
|
if coverage_fixes:
|
|
506
|
-
confidence += 0.1
|
|
467
|
+
confidence += 0.1
|
|
507
468
|
|
|
508
|
-
# Additional boost for multiple file creation (broader impact)
|
|
509
469
|
if len(files_modified) > 1:
|
|
510
470
|
confidence += 0.1
|
|
511
471
|
|
|
512
|
-
# Cap confidence at 0.95 for realistic assessment
|
|
513
472
|
return min(confidence, 0.95)
|
|
514
473
|
|
|
515
474
|
def _generate_recommendations(self, success: bool) -> list[str]:
|
|
516
|
-
"""Generate recommendations based on the success of the operation."""
|
|
517
475
|
if success:
|
|
518
476
|
return [
|
|
519
477
|
"Generated comprehensive test suite",
|
|
@@ -526,13 +484,16 @@ class TestCreationAgent(SubAgent):
|
|
|
526
484
|
]
|
|
527
485
|
|
|
528
486
|
def _get_enhanced_test_creation_recommendations(self) -> list[str]:
|
|
529
|
-
"""Enhanced recommendations based on audit requirements."""
|
|
530
487
|
return [
|
|
531
488
|
"Run 'python -m crackerjack -t' to execute comprehensive coverage analysis",
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
489
|
+
(
|
|
490
|
+
"Focus on testing high-priority functions in managers/ services/ "
|
|
491
|
+
"and core/ modules"
|
|
492
|
+
),
|
|
493
|
+
(
|
|
494
|
+
"Implement parametrized tests (@pytest.mark.parametrize) "
|
|
495
|
+
"for functions with multiple arguments"
|
|
496
|
+
),
|
|
536
497
|
"Add edge case testing for boundary conditions and error scenarios",
|
|
537
498
|
"Use fixtures for complex object instantiation and dependency injection",
|
|
538
499
|
"Consider integration tests for modules with multiple classes/functions",
|
|
@@ -554,14 +515,11 @@ class TestCreationAgent(SubAgent):
|
|
|
554
515
|
)
|
|
555
516
|
|
|
556
517
|
async def _analyze_coverage(self) -> dict[str, Any]:
|
|
557
|
-
"""Enhanced coverage analysis with detailed metrics and improvement tracking."""
|
|
558
518
|
try:
|
|
559
|
-
# First try to get coverage from existing reports
|
|
560
519
|
coverage_data = await self._get_existing_coverage_data()
|
|
561
520
|
if coverage_data:
|
|
562
521
|
return coverage_data
|
|
563
522
|
|
|
564
|
-
# Run coverage analysis if no existing data
|
|
565
523
|
returncode, _, stderr = await self._run_coverage_command()
|
|
566
524
|
|
|
567
525
|
if returncode != 0:
|
|
@@ -574,9 +532,7 @@ class TestCreationAgent(SubAgent):
|
|
|
574
532
|
return self._create_default_coverage_result()
|
|
575
533
|
|
|
576
534
|
async def _get_existing_coverage_data(self) -> dict[str, Any] | None:
|
|
577
|
-
"""Try to get coverage data from existing coverage reports."""
|
|
578
535
|
try:
|
|
579
|
-
# Check for JSON coverage report
|
|
580
536
|
json_report = self.context.project_path / "coverage.json"
|
|
581
537
|
if json_report.exists():
|
|
582
538
|
content = self.context.get_file_content(json_report)
|
|
@@ -584,7 +540,6 @@ class TestCreationAgent(SubAgent):
|
|
|
584
540
|
coverage_json = json.loads(content)
|
|
585
541
|
return self._parse_coverage_json(coverage_json)
|
|
586
542
|
|
|
587
|
-
# Check for .coverage file
|
|
588
543
|
coverage_file = self.context.project_path / ".coverage"
|
|
589
544
|
if coverage_file.exists():
|
|
590
545
|
return await self._process_coverage_results_enhanced()
|
|
@@ -595,27 +550,24 @@ class TestCreationAgent(SubAgent):
|
|
|
595
550
|
return None
|
|
596
551
|
|
|
597
552
|
def _parse_coverage_json(self, coverage_json: dict[str, Any]) -> dict[str, Any]:
|
|
598
|
-
"""Parse coverage JSON data into our format."""
|
|
599
553
|
try:
|
|
600
554
|
totals = coverage_json.get("totals", {})
|
|
601
555
|
current_coverage = totals.get("percent_covered", 0) / 100.0
|
|
602
556
|
|
|
603
|
-
# Find uncovered modules
|
|
604
557
|
uncovered_modules = []
|
|
605
558
|
files = coverage_json.get("files", {})
|
|
606
559
|
|
|
607
560
|
for file_path, file_data in files.items():
|
|
608
561
|
if file_data.get("summary", {}).get("percent_covered", 100) < 80:
|
|
609
|
-
# Convert absolute path to relative
|
|
610
562
|
rel_path = str(
|
|
611
563
|
Path(file_path).relative_to(self.context.project_path)
|
|
612
564
|
)
|
|
613
565
|
uncovered_modules.append(rel_path)
|
|
614
566
|
|
|
615
567
|
return {
|
|
616
|
-
"below_threshold": current_coverage < 0.8,
|
|
568
|
+
"below_threshold": current_coverage < 0.8,
|
|
617
569
|
"current_coverage": current_coverage,
|
|
618
|
-
"uncovered_modules": uncovered_modules[:15],
|
|
570
|
+
"uncovered_modules": uncovered_modules[:15],
|
|
619
571
|
"missing_lines": totals.get("num_statements", 0)
|
|
620
572
|
- totals.get("covered_lines", 0),
|
|
621
573
|
"total_lines": totals.get("num_statements", 0),
|
|
@@ -644,23 +596,20 @@ class TestCreationAgent(SubAgent):
|
|
|
644
596
|
return self._create_default_coverage_result()
|
|
645
597
|
|
|
646
598
|
async def _process_coverage_results_enhanced(self) -> dict[str, Any]:
|
|
647
|
-
"""Enhanced coverage results processing with detailed analysis."""
|
|
648
599
|
coverage_file = self.context.project_path / ".coverage"
|
|
649
600
|
if not coverage_file.exists():
|
|
650
601
|
return self._create_default_coverage_result()
|
|
651
602
|
|
|
652
|
-
# Get more detailed coverage analysis
|
|
653
603
|
uncovered_modules = await self._find_uncovered_modules_enhanced()
|
|
654
604
|
untested_functions = await self._find_untested_functions_enhanced()
|
|
655
605
|
|
|
656
|
-
# Estimate current coverage more accurately
|
|
657
606
|
current_coverage = await self._estimate_current_coverage()
|
|
658
607
|
|
|
659
608
|
return {
|
|
660
|
-
"below_threshold": current_coverage < 0.8,
|
|
609
|
+
"below_threshold": current_coverage < 0.8,
|
|
661
610
|
"current_coverage": current_coverage,
|
|
662
|
-
"uncovered_modules": uncovered_modules[:15],
|
|
663
|
-
"untested_functions": untested_functions[:20],
|
|
611
|
+
"uncovered_modules": uncovered_modules[:15],
|
|
612
|
+
"untested_functions": untested_functions[:20],
|
|
664
613
|
"coverage_gaps": await self._identify_coverage_gaps(),
|
|
665
614
|
"improvement_potential": self._calculate_improvement_potential(
|
|
666
615
|
len(uncovered_modules), len(untested_functions)
|
|
@@ -668,43 +617,38 @@ class TestCreationAgent(SubAgent):
|
|
|
668
617
|
}
|
|
669
618
|
|
|
670
619
|
async def _estimate_current_coverage(self) -> float:
|
|
671
|
-
"""Estimate current coverage by analyzing test files vs source files."""
|
|
672
620
|
try:
|
|
673
|
-
source_files = list(
|
|
621
|
+
source_files: list[Path] = list(
|
|
674
622
|
(self.context.project_path / "crackerjack").rglob("*.py")
|
|
675
623
|
)
|
|
676
624
|
source_files = [f for f in source_files if not f.name.startswith("test_")]
|
|
677
625
|
|
|
678
|
-
test_files = list(
|
|
626
|
+
test_files: list[Path] = list(
|
|
627
|
+
(self.context.project_path / "tests").rglob("test_*.py")
|
|
628
|
+
)
|
|
679
629
|
|
|
680
630
|
if not source_files:
|
|
681
631
|
return 0.0
|
|
682
632
|
|
|
683
|
-
# Simple heuristic: ratio of test files to source files
|
|
684
633
|
coverage_ratio = len(test_files) / len(source_files)
|
|
685
634
|
|
|
686
|
-
|
|
687
|
-
estimated_coverage = min(coverage_ratio * 0.6, 0.9) # Cap at 90%
|
|
635
|
+
estimated_coverage = min(coverage_ratio * 0.6, 0.9)
|
|
688
636
|
|
|
689
637
|
return estimated_coverage
|
|
690
638
|
|
|
691
639
|
except Exception:
|
|
692
|
-
return 0.1
|
|
640
|
+
return 0.1
|
|
693
641
|
|
|
694
642
|
def _calculate_improvement_potential(
|
|
695
643
|
self, uncovered_modules: int, untested_functions: int
|
|
696
644
|
) -> dict[str, Any]:
|
|
697
|
-
"""Calculate potential coverage improvement from test generation."""
|
|
698
645
|
if uncovered_modules == untested_functions == 0:
|
|
699
646
|
return {"percentage_points": 0, "priority": "low"}
|
|
700
647
|
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
function_improvement = untested_functions * 0.8 # Each function ~0.8% coverage
|
|
648
|
+
module_improvement = uncovered_modules * 2.5
|
|
649
|
+
function_improvement = untested_functions * 0.8
|
|
704
650
|
|
|
705
|
-
total_potential = min(
|
|
706
|
-
module_improvement + function_improvement, 40
|
|
707
|
-
) # Cap at 40%
|
|
651
|
+
total_potential = min(module_improvement + function_improvement, 40)
|
|
708
652
|
|
|
709
653
|
priority = (
|
|
710
654
|
"high"
|
|
@@ -729,7 +673,6 @@ class TestCreationAgent(SubAgent):
|
|
|
729
673
|
}
|
|
730
674
|
|
|
731
675
|
async def _find_uncovered_modules_enhanced(self) -> list[dict[str, Any]]:
|
|
732
|
-
"""Enhanced uncovered modules detection with priority scoring."""
|
|
733
676
|
uncovered: list[dict[str, Any]] = []
|
|
734
677
|
|
|
735
678
|
package_dir = self.context.project_path / "crackerjack"
|
|
@@ -744,12 +687,10 @@ class TestCreationAgent(SubAgent):
|
|
|
744
687
|
module_info = await self._analyze_module_priority(py_file)
|
|
745
688
|
uncovered.append(module_info)
|
|
746
689
|
|
|
747
|
-
# Sort by priority (highest first)
|
|
748
690
|
uncovered.sort(key=operator.itemgetter("priority_score"), reverse=True)
|
|
749
691
|
return uncovered[:15]
|
|
750
692
|
|
|
751
693
|
async def _analyze_module_priority(self, py_file: Path) -> dict[str, Any]:
|
|
752
|
-
"""Analyze module to determine testing priority."""
|
|
753
694
|
try:
|
|
754
695
|
content = self.context.get_file_content(py_file) or ""
|
|
755
696
|
ast.parse(content)
|
|
@@ -757,10 +698,8 @@ class TestCreationAgent(SubAgent):
|
|
|
757
698
|
functions = await self._extract_functions_from_file(py_file)
|
|
758
699
|
classes = await self._extract_classes_from_file(py_file)
|
|
759
700
|
|
|
760
|
-
# Calculate priority score
|
|
761
701
|
priority_score = 0
|
|
762
702
|
|
|
763
|
-
# Core modules get higher priority
|
|
764
703
|
rel_path = str(py_file.relative_to(self.context.project_path))
|
|
765
704
|
if any(
|
|
766
705
|
core_path in rel_path
|
|
@@ -768,15 +707,12 @@ class TestCreationAgent(SubAgent):
|
|
|
768
707
|
):
|
|
769
708
|
priority_score += 10
|
|
770
709
|
|
|
771
|
-
# More functions/classes = higher priority
|
|
772
710
|
priority_score += len(functions) * 2
|
|
773
711
|
priority_score += len(classes) * 3
|
|
774
712
|
|
|
775
|
-
# Public API functions get higher priority
|
|
776
713
|
public_functions = [f for f in functions if not f["name"].startswith("_")]
|
|
777
714
|
priority_score += len(public_functions) * 2
|
|
778
715
|
|
|
779
|
-
# File size consideration (larger files need tests more)
|
|
780
716
|
lines_count = len(content.split("\n"))
|
|
781
717
|
if lines_count > 100:
|
|
782
718
|
priority_score += 5
|
|
@@ -808,7 +744,6 @@ class TestCreationAgent(SubAgent):
|
|
|
808
744
|
}
|
|
809
745
|
|
|
810
746
|
def _categorize_module(self, relative_path: str) -> str:
|
|
811
|
-
"""Categorize module for test generation strategies."""
|
|
812
747
|
if "managers/" in relative_path:
|
|
813
748
|
return "manager"
|
|
814
749
|
elif "services/" in relative_path:
|
|
@@ -824,7 +759,6 @@ class TestCreationAgent(SubAgent):
|
|
|
824
759
|
return "utility"
|
|
825
760
|
|
|
826
761
|
async def _find_untested_functions_enhanced(self) -> list[dict[str, Any]]:
|
|
827
|
-
"""Enhanced untested function detection with detailed analysis."""
|
|
828
762
|
untested: list[dict[str, Any]] = []
|
|
829
763
|
|
|
830
764
|
package_dir = self.context.project_path / "crackerjack"
|
|
@@ -840,14 +774,12 @@ class TestCreationAgent(SubAgent):
|
|
|
840
774
|
)
|
|
841
775
|
untested.extend(file_untested)
|
|
842
776
|
|
|
843
|
-
# Sort by testing priority
|
|
844
777
|
untested.sort(key=operator.itemgetter("testing_priority"), reverse=True)
|
|
845
778
|
return untested[:20]
|
|
846
779
|
|
|
847
780
|
async def _find_untested_functions_in_file_enhanced(
|
|
848
781
|
self, py_file: Path
|
|
849
782
|
) -> list[dict[str, Any]]:
|
|
850
|
-
"""Enhanced untested function detection with priority scoring."""
|
|
851
783
|
untested: list[dict[str, Any]] = []
|
|
852
784
|
|
|
853
785
|
try:
|
|
@@ -865,9 +797,7 @@ class TestCreationAgent(SubAgent):
|
|
|
865
797
|
async def _analyze_function_testability(
|
|
866
798
|
self, func: dict[str, Any], py_file: Path
|
|
867
799
|
) -> dict[str, Any]:
|
|
868
|
-
"""Analyze function to determine testing priority and approach."""
|
|
869
800
|
try:
|
|
870
|
-
# Basic function info
|
|
871
801
|
func_info = {
|
|
872
802
|
"name": func["name"],
|
|
873
803
|
"file": str(py_file),
|
|
@@ -881,14 +811,11 @@ class TestCreationAgent(SubAgent):
|
|
|
881
811
|
"test_strategy": "basic",
|
|
882
812
|
}
|
|
883
813
|
|
|
884
|
-
# Calculate testing priority
|
|
885
814
|
priority = 0
|
|
886
815
|
|
|
887
|
-
# Public functions get higher priority
|
|
888
816
|
if not func["name"].startswith("_"):
|
|
889
817
|
priority += 10
|
|
890
818
|
|
|
891
|
-
# Functions with multiple args are more complex
|
|
892
819
|
arg_count = len(func.get("args", []))
|
|
893
820
|
if arg_count > 3:
|
|
894
821
|
priority += 5
|
|
@@ -898,14 +825,12 @@ class TestCreationAgent(SubAgent):
|
|
|
898
825
|
priority += 2
|
|
899
826
|
func_info["complexity"] = "moderate"
|
|
900
827
|
|
|
901
|
-
# Core module functions get higher priority
|
|
902
828
|
if any(
|
|
903
829
|
core_path in str(func_info["relative_file"])
|
|
904
830
|
for core_path in ("managers/", "services/", "core/")
|
|
905
831
|
):
|
|
906
832
|
priority += 8
|
|
907
833
|
|
|
908
|
-
# Async functions need special handling
|
|
909
834
|
if func.get("is_async", False):
|
|
910
835
|
priority += 3
|
|
911
836
|
func_info["test_strategy"] = "async"
|
|
@@ -927,11 +852,9 @@ class TestCreationAgent(SubAgent):
|
|
|
927
852
|
}
|
|
928
853
|
|
|
929
854
|
async def _identify_coverage_gaps(self) -> list[dict[str, Any]]:
|
|
930
|
-
|
|
931
|
-
gaps = []
|
|
855
|
+
gaps: list[dict[str, Any]] = []
|
|
932
856
|
|
|
933
857
|
try:
|
|
934
|
-
# Find modules with partial test coverage
|
|
935
858
|
package_dir = self.context.project_path / "crackerjack"
|
|
936
859
|
tests_dir = self.context.project_path / "tests"
|
|
937
860
|
|
|
@@ -949,17 +872,16 @@ class TestCreationAgent(SubAgent):
|
|
|
949
872
|
except Exception as e:
|
|
950
873
|
self.log(f"Error identifying coverage gaps: {e}", "WARN")
|
|
951
874
|
|
|
952
|
-
return gaps[:10]
|
|
875
|
+
return gaps[:10]
|
|
953
876
|
|
|
954
877
|
async def _analyze_existing_test_coverage(self, py_file: Path) -> dict[str, Any]:
|
|
955
|
-
"""Analyze existing test coverage for a specific file."""
|
|
956
878
|
try:
|
|
957
879
|
test_file_path = await self._generate_test_file_path(py_file)
|
|
958
880
|
|
|
959
|
-
coverage_info = {
|
|
881
|
+
coverage_info: dict[str, Any] = {
|
|
960
882
|
"source_file": str(py_file.relative_to(self.context.project_path)),
|
|
961
883
|
"test_file": str(test_file_path) if test_file_path.exists() else None,
|
|
962
|
-
"has_gaps": True,
|
|
884
|
+
"has_gaps": True,
|
|
963
885
|
"missing_test_types": [],
|
|
964
886
|
"coverage_score": 0,
|
|
965
887
|
}
|
|
@@ -972,10 +894,8 @@ class TestCreationAgent(SubAgent):
|
|
|
972
894
|
]
|
|
973
895
|
return coverage_info
|
|
974
896
|
|
|
975
|
-
# Analyze existing test file
|
|
976
897
|
test_content = self.context.get_file_content(test_file_path) or ""
|
|
977
898
|
|
|
978
|
-
# Check for different test types
|
|
979
899
|
missing_types = []
|
|
980
900
|
if "def test_" not in test_content:
|
|
981
901
|
missing_types.append("basic")
|
|
@@ -1026,7 +946,6 @@ class TestCreationAgent(SubAgent):
|
|
|
1026
946
|
return False
|
|
1027
947
|
|
|
1028
948
|
async def _create_tests_for_module(self, module_path: str) -> dict[str, list[str]]:
|
|
1029
|
-
"""Create tests for a module."""
|
|
1030
949
|
fixes: list[str] = []
|
|
1031
950
|
files: list[str] = []
|
|
1032
951
|
|
|
@@ -1041,7 +960,6 @@ class TestCreationAgent(SubAgent):
|
|
|
1041
960
|
return {"fixes": fixes, "files": files}
|
|
1042
961
|
|
|
1043
962
|
async def _generate_module_tests(self, module_path: str) -> dict[str, list[str]]:
|
|
1044
|
-
"""Generate tests for a module."""
|
|
1045
963
|
module_file = Path(module_path)
|
|
1046
964
|
if not await self._is_module_valid(module_file):
|
|
1047
965
|
return {"fixes": [], "files": []}
|
|
@@ -1055,7 +973,6 @@ class TestCreationAgent(SubAgent):
|
|
|
1055
973
|
return await self._create_test_artifacts(module_file, functions, classes)
|
|
1056
974
|
|
|
1057
975
|
async def _is_module_valid(self, module_file: Path) -> bool:
|
|
1058
|
-
"""Check if the module file is valid."""
|
|
1059
976
|
return module_file.exists()
|
|
1060
977
|
|
|
1061
978
|
async def _create_test_artifacts(
|
|
@@ -1064,7 +981,6 @@ class TestCreationAgent(SubAgent):
|
|
|
1064
981
|
functions: list[dict[str, Any]],
|
|
1065
982
|
classes: list[dict[str, Any]],
|
|
1066
983
|
) -> dict[str, list[str]]:
|
|
1067
|
-
"""Create test artifacts for the module."""
|
|
1068
984
|
test_file_path = await self._generate_test_file_path(module_file)
|
|
1069
985
|
test_content = await self._generate_test_content(
|
|
1070
986
|
module_file,
|
|
@@ -1082,7 +998,6 @@ class TestCreationAgent(SubAgent):
|
|
|
1082
998
|
return {"fixes": [], "files": []}
|
|
1083
999
|
|
|
1084
1000
|
def _handle_test_creation_error(self, module_path: str, e: Exception) -> None:
|
|
1085
|
-
"""Handle errors during test creation."""
|
|
1086
1001
|
self.log(f"Error creating tests for module {module_path}: {e}", "ERROR")
|
|
1087
1002
|
|
|
1088
1003
|
async def _create_tests_for_file(self, file_path: str) -> dict[str, list[str]]:
|
|
@@ -1155,7 +1070,7 @@ class TestCreationAgent(SubAgent):
|
|
|
1155
1070
|
fixes.append(f"Added test for function {func_info['name']}")
|
|
1156
1071
|
files.append(str(test_file_path))
|
|
1157
1072
|
else:
|
|
1158
|
-
test_content = await self.
|
|
1073
|
+
test_content = await self._generate_function_test(func_info)
|
|
1159
1074
|
if self.context.write_file_content(test_file_path, test_content):
|
|
1160
1075
|
fixes.append(f"Created test file with test for {func_info['name']}")
|
|
1161
1076
|
files.append(str(test_file_path))
|
|
@@ -1172,7 +1087,7 @@ class TestCreationAgent(SubAgent):
|
|
|
1172
1087
|
self,
|
|
1173
1088
|
file_path: Path,
|
|
1174
1089
|
) -> list[dict[str, Any]]:
|
|
1175
|
-
functions = []
|
|
1090
|
+
functions: list[dict[str, Any]] = []
|
|
1176
1091
|
|
|
1177
1092
|
try:
|
|
1178
1093
|
content = self.context.get_file_content(file_path)
|
|
@@ -1188,7 +1103,6 @@ class TestCreationAgent(SubAgent):
|
|
|
1188
1103
|
return functions
|
|
1189
1104
|
|
|
1190
1105
|
def _parse_function_nodes(self, tree: ast.AST) -> list[dict[str, Any]]:
|
|
1191
|
-
"""Enhanced function parsing with async function support."""
|
|
1192
1106
|
functions: list[dict[str, Any]] = []
|
|
1193
1107
|
|
|
1194
1108
|
for node in ast.walk(tree):
|
|
@@ -1196,7 +1110,7 @@ class TestCreationAgent(SubAgent):
|
|
|
1196
1110
|
node, ast.FunctionDef | ast.AsyncFunctionDef
|
|
1197
1111
|
) and self._is_valid_function_node(node):
|
|
1198
1112
|
function_info = self._create_function_info(node)
|
|
1199
|
-
|
|
1113
|
+
|
|
1200
1114
|
function_info["is_async"] = isinstance(node, ast.AsyncFunctionDef)
|
|
1201
1115
|
functions.append(function_info)
|
|
1202
1116
|
|
|
@@ -1205,13 +1119,11 @@ class TestCreationAgent(SubAgent):
|
|
|
1205
1119
|
def _is_valid_function_node(
|
|
1206
1120
|
self, node: ast.FunctionDef | ast.AsyncFunctionDef
|
|
1207
1121
|
) -> bool:
|
|
1208
|
-
"""Enhanced validation for both sync and async functions."""
|
|
1209
1122
|
return not node.name.startswith(("_", "test_"))
|
|
1210
1123
|
|
|
1211
1124
|
def _create_function_info(
|
|
1212
1125
|
self, node: ast.FunctionDef | ast.AsyncFunctionDef
|
|
1213
1126
|
) -> dict[str, Any]:
|
|
1214
|
-
"""Enhanced function info creation with async support."""
|
|
1215
1127
|
return {
|
|
1216
1128
|
"name": node.name,
|
|
1217
1129
|
"line": node.lineno,
|
|
@@ -1223,7 +1135,7 @@ class TestCreationAgent(SubAgent):
|
|
|
1223
1135
|
}
|
|
1224
1136
|
|
|
1225
1137
|
async def _extract_classes_from_file(self, file_path: Path) -> list[dict[str, Any]]:
|
|
1226
|
-
classes = []
|
|
1138
|
+
classes: list[dict[str, Any]] = []
|
|
1227
1139
|
|
|
1228
1140
|
try:
|
|
1229
1141
|
content = self.context.get_file_content(file_path)
|
|
@@ -1265,7 +1177,6 @@ class TestCreationAgent(SubAgent):
|
|
|
1265
1177
|
def _get_function_signature(
|
|
1266
1178
|
self, node: ast.FunctionDef | ast.AsyncFunctionDef
|
|
1267
1179
|
) -> str:
|
|
1268
|
-
"""Enhanced function signature generation with async support."""
|
|
1269
1180
|
args = [arg.arg for arg in node.args.args]
|
|
1270
1181
|
prefix = "async " if isinstance(node, ast.AsyncFunctionDef) else ""
|
|
1271
1182
|
return f"{prefix}{node.name}({', '.join(args)})"
|
|
@@ -1273,7 +1184,6 @@ class TestCreationAgent(SubAgent):
|
|
|
1273
1184
|
def _get_return_annotation(
|
|
1274
1185
|
self, node: ast.FunctionDef | ast.AsyncFunctionDef
|
|
1275
1186
|
) -> str:
|
|
1276
|
-
"""Enhanced return annotation extraction with async support."""
|
|
1277
1187
|
if node.returns:
|
|
1278
1188
|
return ast.unparse(node.returns) if (hasattr(ast, "unparse")) else "Any"
|
|
1279
1189
|
return "Any"
|
|
@@ -1317,7 +1227,6 @@ class TestCreationAgent(SubAgent):
|
|
|
1317
1227
|
functions: list[dict[str, Any]],
|
|
1318
1228
|
classes: list[dict[str, Any]],
|
|
1319
1229
|
) -> str:
|
|
1320
|
-
"""Generate comprehensive test content with enhanced patterns."""
|
|
1321
1230
|
test_params = self._prepare_test_generation_params(module_file)
|
|
1322
1231
|
return await self._generate_all_test_types(test_params, functions, classes)
|
|
1323
1232
|
|
|
@@ -1327,11 +1236,9 @@ class TestCreationAgent(SubAgent):
|
|
|
1327
1236
|
functions: list[dict[str, Any]],
|
|
1328
1237
|
classes: list[dict[str, Any]],
|
|
1329
1238
|
) -> str:
|
|
1330
|
-
"""Generate comprehensive test content from prepared parameters."""
|
|
1331
1239
|
return await self._generate_all_test_types(test_params, functions, classes)
|
|
1332
1240
|
|
|
1333
1241
|
def _prepare_test_generation_params(self, module_file: Path) -> dict[str, Any]:
|
|
1334
|
-
"""Prepare parameters for test generation."""
|
|
1335
1242
|
module_name = self._get_module_import_path(module_file)
|
|
1336
1243
|
module_category = self._categorize_module(
|
|
1337
1244
|
str(module_file.relative_to(self.context.project_path))
|
|
@@ -1348,15 +1255,12 @@ class TestCreationAgent(SubAgent):
|
|
|
1348
1255
|
functions: list[dict[str, Any]],
|
|
1349
1256
|
classes: list[dict[str, Any]],
|
|
1350
1257
|
) -> str:
|
|
1351
|
-
"""Generate all types of tests."""
|
|
1352
|
-
# Generate header
|
|
1353
1258
|
base_content = self._generate_enhanced_test_file_header(
|
|
1354
1259
|
test_params["module_name"],
|
|
1355
1260
|
test_params["module_file"],
|
|
1356
1261
|
test_params["module_category"],
|
|
1357
1262
|
)
|
|
1358
1263
|
|
|
1359
|
-
# Generate different test sections
|
|
1360
1264
|
function_tests = await self._generate_function_tests_content(
|
|
1361
1265
|
functions, test_params["module_category"]
|
|
1362
1266
|
)
|
|
@@ -1375,13 +1279,11 @@ class TestCreationAgent(SubAgent):
|
|
|
1375
1279
|
async def _generate_function_tests_content(
|
|
1376
1280
|
self, functions: list[dict[str, Any]], module_category: str
|
|
1377
1281
|
) -> str:
|
|
1378
|
-
"""Generate function tests content."""
|
|
1379
1282
|
return await self._generate_enhanced_function_tests(functions, module_category)
|
|
1380
1283
|
|
|
1381
1284
|
async def _generate_class_tests_content(
|
|
1382
1285
|
self, classes: list[dict[str, Any]], module_category: str
|
|
1383
1286
|
) -> str:
|
|
1384
|
-
"""Generate class tests content."""
|
|
1385
1287
|
return await self._generate_enhanced_class_tests(classes, module_category)
|
|
1386
1288
|
|
|
1387
1289
|
async def _generate_integration_tests_content(
|
|
@@ -1391,7 +1293,6 @@ class TestCreationAgent(SubAgent):
|
|
|
1391
1293
|
classes: list[dict[str, Any]],
|
|
1392
1294
|
module_category: str,
|
|
1393
1295
|
) -> str:
|
|
1394
|
-
"""Generate integration tests content."""
|
|
1395
1296
|
return await self._generate_integration_tests(
|
|
1396
1297
|
module_file, functions, classes, module_category
|
|
1397
1298
|
)
|
|
@@ -1399,9 +1300,6 @@ class TestCreationAgent(SubAgent):
|
|
|
1399
1300
|
def _generate_enhanced_test_file_header(
|
|
1400
1301
|
self, module_name: str, module_file: Path, module_category: str
|
|
1401
1302
|
) -> str:
|
|
1402
|
-
"""Generate enhanced test file header with appropriate imports based on
|
|
1403
|
-
module type."""
|
|
1404
|
-
# Determine imports based on module category
|
|
1405
1303
|
imports = [
|
|
1406
1304
|
"import pytest",
|
|
1407
1305
|
"from pathlib import Path",
|
|
@@ -1421,13 +1319,10 @@ class TestCreationAgent(SubAgent):
|
|
|
1421
1319
|
|
|
1422
1320
|
imports_str = "\n".join(imports)
|
|
1423
1321
|
|
|
1424
|
-
# Add specific imports for the module
|
|
1425
1322
|
try:
|
|
1426
|
-
# Try to import specific classes/functions
|
|
1427
1323
|
content = self.context.get_file_content(module_file) or ""
|
|
1428
1324
|
tree = ast.parse(content)
|
|
1429
1325
|
|
|
1430
|
-
# Extract importable items
|
|
1431
1326
|
importable_items = []
|
|
1432
1327
|
for node in ast.walk(tree):
|
|
1433
1328
|
if isinstance(node, ast.ClassDef) and not node.name.startswith("_"):
|
|
@@ -1449,43 +1344,28 @@ class TestCreationAgent(SubAgent):
|
|
|
1449
1344
|
|
|
1450
1345
|
class_name = f"Test{module_file.stem.replace('_', '').title()}"
|
|
1451
1346
|
|
|
1452
|
-
return
|
|
1453
|
-
|
|
1454
|
-
|
|
1455
|
-
|
|
1456
|
-
|
|
1457
|
-
|
|
1458
|
-
|
|
1459
|
-
|
|
1460
|
-
""
|
|
1461
|
-
|
|
1462
|
-
|
|
1463
|
-
|
|
1464
|
-
|
|
1465
|
-
|
|
1466
|
-
|
|
1467
|
-
"""
|
|
1468
|
-
|
|
1469
|
-
def test_module_imports_successfully(self)
|
|
1470
|
-
"""Test that the module can be imported without errors."""
|
|
1471
|
-
import {module_name}
|
|
1472
|
-
assert {module_name} is not None
|
|
1473
|
-
|
|
1474
|
-
'''
|
|
1475
|
-
|
|
1476
|
-
async def _generate_minimal_test_file(self, func_info: dict[str, Any]) -> str:
|
|
1477
|
-
file_path = Path(func_info["file"])
|
|
1478
|
-
module_name = self._get_module_import_path(file_path)
|
|
1479
|
-
|
|
1480
|
-
return f'''"""Tests for {func_info["name"]} function."""
|
|
1481
|
-
|
|
1482
|
-
import pytest
|
|
1483
|
-
|
|
1484
|
-
from {module_name} import {func_info["name"]}
|
|
1485
|
-
|
|
1486
|
-
|
|
1487
|
-
{await self._generate_function_test(func_info)}
|
|
1488
|
-
'''
|
|
1347
|
+
return (
|
|
1348
|
+
f'"""{imports_str}\n'
|
|
1349
|
+
f"{specific_imports}\n"
|
|
1350
|
+
"\n"
|
|
1351
|
+
"\n"
|
|
1352
|
+
f"class {class_name}:\n"
|
|
1353
|
+
f' """Tests for {module_name}.\n'
|
|
1354
|
+
"\n"
|
|
1355
|
+
f" This module contains comprehensive tests for {module_name}\n"
|
|
1356
|
+
" including:\n"
|
|
1357
|
+
" - Basic functionality tests\n"
|
|
1358
|
+
" - Edge case validation\n"
|
|
1359
|
+
" - Error handling verification\n"
|
|
1360
|
+
" - Integration testing\n"
|
|
1361
|
+
" - Performance validation (where applicable)\n"
|
|
1362
|
+
' """\n'
|
|
1363
|
+
"\n"
|
|
1364
|
+
" def test_module_imports_successfully(self):\n"
|
|
1365
|
+
' """Test that the module can be imported without errors."""\n'
|
|
1366
|
+
f" import {module_name}\n"
|
|
1367
|
+
f" assert {module_name} is not None\n"
|
|
1368
|
+
)
|
|
1489
1369
|
|
|
1490
1370
|
def _get_module_import_path(self, file_path: Path) -> str:
|
|
1491
1371
|
try:
|
|
@@ -1496,30 +1376,26 @@ from {module_name} import {func_info["name"]}
|
|
|
1496
1376
|
return file_path.stem
|
|
1497
1377
|
|
|
1498
1378
|
async def _generate_function_test(self, func_info: dict[str, Any]) -> str:
|
|
1499
|
-
"""Generate a test for a specific function."""
|
|
1500
1379
|
func_name = func_info["name"]
|
|
1501
1380
|
args = func_info.get("args", [])
|
|
1502
1381
|
|
|
1503
|
-
|
|
1504
|
-
|
|
1505
|
-
"""Test basic functionality of {func_name}."""
|
|
1382
|
+
test_template = f"""def test_{func_name}_basic(self):
|
|
1383
|
+
\"\"\"Test basic functionality of {func_name}.\"\"\"
|
|
1506
1384
|
try:
|
|
1507
|
-
# Basic test - may need manual implementation for specific arguments
|
|
1508
1385
|
result = {func_name}({self._generate_default_args(args)})
|
|
1509
1386
|
assert result is not None or result is None
|
|
1510
1387
|
except TypeError:
|
|
1511
|
-
pytest.skip(
|
|
1512
|
-
|
|
1388
|
+
pytest.skip(
|
|
1389
|
+
"Function requires specific arguments - manual implementation needed"
|
|
1390
|
+
)
|
|
1513
1391
|
except Exception as e:
|
|
1514
|
-
pytest.fail(f"Unexpected error in {func_name}: {{e}}")
|
|
1392
|
+
pytest.fail(f"Unexpected error in {func_name}: {{e}}")"""
|
|
1515
1393
|
|
|
1516
1394
|
return test_template
|
|
1517
1395
|
|
|
1518
1396
|
async def _generate_enhanced_function_tests(
|
|
1519
1397
|
self, functions: list[dict[str, Any]], module_category: str
|
|
1520
1398
|
) -> str:
|
|
1521
|
-
"""Generate enhanced test methods for functions with parametrization and
|
|
1522
|
-
edge cases."""
|
|
1523
1399
|
if not functions:
|
|
1524
1400
|
return ""
|
|
1525
1401
|
|
|
@@ -1535,14 +1411,11 @@ from {module_name} import {func_info["name"]}
|
|
|
1535
1411
|
async def _generate_all_tests_for_function(
|
|
1536
1412
|
self, func: dict[str, Any], module_category: str
|
|
1537
1413
|
) -> list[str]:
|
|
1538
|
-
"""Generate all test types for a single function."""
|
|
1539
1414
|
func_tests = []
|
|
1540
1415
|
|
|
1541
|
-
# Always generate basic test
|
|
1542
1416
|
basic_test = await self._generate_basic_function_test(func, module_category)
|
|
1543
1417
|
func_tests.append(basic_test)
|
|
1544
1418
|
|
|
1545
|
-
# Generate additional tests based on function characteristics
|
|
1546
1419
|
additional_tests = await self._generate_conditional_tests_for_function(
|
|
1547
1420
|
func, module_category
|
|
1548
1421
|
)
|
|
@@ -1553,23 +1426,19 @@ from {module_name} import {func_info["name"]}
|
|
|
1553
1426
|
async def _generate_conditional_tests_for_function(
|
|
1554
1427
|
self, func: dict[str, Any], module_category: str
|
|
1555
1428
|
) -> list[str]:
|
|
1556
|
-
"""Generate conditional tests based on function characteristics."""
|
|
1557
1429
|
tests = []
|
|
1558
1430
|
args = func.get("args", [])
|
|
1559
1431
|
func_name = func["name"]
|
|
1560
1432
|
|
|
1561
|
-
# Generate parametrized test if function has multiple args
|
|
1562
1433
|
if self._should_generate_parametrized_test(args):
|
|
1563
1434
|
parametrized_test = await self._generate_parametrized_test(
|
|
1564
1435
|
func, module_category
|
|
1565
1436
|
)
|
|
1566
1437
|
tests.append(parametrized_test)
|
|
1567
1438
|
|
|
1568
|
-
# Always generate error handling test
|
|
1569
1439
|
error_test = await self._generate_error_handling_test(func, module_category)
|
|
1570
1440
|
tests.append(error_test)
|
|
1571
1441
|
|
|
1572
|
-
# Generate edge case tests for complex functions
|
|
1573
1442
|
if self._should_generate_edge_case_test(args, func_name):
|
|
1574
1443
|
edge_test = await self._generate_edge_case_test(func, module_category)
|
|
1575
1444
|
tests.append(edge_test)
|
|
@@ -1577,11 +1446,9 @@ from {module_name} import {func_info["name"]}
|
|
|
1577
1446
|
return tests
|
|
1578
1447
|
|
|
1579
1448
|
def _should_generate_parametrized_test(self, args: list[str]) -> bool:
|
|
1580
|
-
"""Determine if parametrized test should be generated."""
|
|
1581
1449
|
return len(args) > 1
|
|
1582
1450
|
|
|
1583
1451
|
def _should_generate_edge_case_test(self, args: list[str], func_name: str) -> bool:
|
|
1584
|
-
"""Determine if edge case test should be generated."""
|
|
1585
1452
|
has_multiple_args = len(args) > 2
|
|
1586
1453
|
is_complex_function = any(
|
|
1587
1454
|
hint in func_name.lower()
|
|
@@ -1592,7 +1459,6 @@ from {module_name} import {func_info["name"]}
|
|
|
1592
1459
|
async def _generate_basic_function_test(
|
|
1593
1460
|
self, func: dict[str, Any], module_category: str
|
|
1594
1461
|
) -> str:
|
|
1595
|
-
"""Generate basic functionality test for a function."""
|
|
1596
1462
|
func_name = func["name"]
|
|
1597
1463
|
args = func.get("args", [])
|
|
1598
1464
|
|
|
@@ -1602,7 +1468,6 @@ from {module_name} import {func_info["name"]}
|
|
|
1602
1468
|
def _get_test_template_generator(
|
|
1603
1469
|
self, module_category: str
|
|
1604
1470
|
) -> Callable[[str, list[str]], str]:
|
|
1605
|
-
"""Get the appropriate test template generator for the module category."""
|
|
1606
1471
|
return {
|
|
1607
1472
|
"agent": self._generate_agent_test_template,
|
|
1608
1473
|
"service": self._generate_async_test_template,
|
|
@@ -1610,58 +1475,79 @@ from {module_name} import {func_info["name"]}
|
|
|
1610
1475
|
}.get(module_category, self._generate_default_test_template)
|
|
1611
1476
|
|
|
1612
1477
|
def _generate_agent_test_template(self, func_name: str, args: list[str]) -> str:
|
|
1613
|
-
|
|
1614
|
-
|
|
1615
|
-
|
|
1616
|
-
|
|
1617
|
-
|
|
1618
|
-
|
|
1619
|
-
|
|
1620
|
-
result
|
|
1621
|
-
|
|
1622
|
-
|
|
1623
|
-
pytest.skip(
|
|
1624
|
-
|
|
1625
|
-
|
|
1478
|
+
template = (
|
|
1479
|
+
" def test_FUNC_NAME_basic_functionality(self):\n"
|
|
1480
|
+
' """Test basic functionality of FUNC_NAME."""\n'
|
|
1481
|
+
"\n"
|
|
1482
|
+
"\n"
|
|
1483
|
+
" try:\n"
|
|
1484
|
+
" result = FUNC_NAME(ARGS)\n"
|
|
1485
|
+
" assert result is not None or result is None\n"
|
|
1486
|
+
" except (TypeError, NotImplementedError) as e:\n"
|
|
1487
|
+
+ (
|
|
1488
|
+
" pytest.skip('Function FUNC_NAME requires manual "
|
|
1489
|
+
"implementation: ' + str(e))\n"
|
|
1490
|
+
)
|
|
1491
|
+
+ " except Exception as e:\n"
|
|
1492
|
+
" pytest.fail('Unexpected error in FUNC_NAME: ' + str(e))"
|
|
1493
|
+
)
|
|
1494
|
+
|
|
1495
|
+
return template.replace("FUNC_NAME", func_name).replace(
|
|
1496
|
+
"ARGS", self._generate_smart_default_args(args)
|
|
1497
|
+
)
|
|
1626
1498
|
|
|
1627
1499
|
def _generate_async_test_template(self, func_name: str, args: list[str]) -> str:
|
|
1628
|
-
|
|
1629
|
-
|
|
1630
|
-
|
|
1631
|
-
|
|
1632
|
-
|
|
1633
|
-
|
|
1634
|
-
|
|
1635
|
-
|
|
1636
|
-
|
|
1637
|
-
|
|
1638
|
-
|
|
1639
|
-
|
|
1640
|
-
|
|
1641
|
-
|
|
1642
|
-
pytest.
|
|
1500
|
+
template = (
|
|
1501
|
+
" @pytest.mark.asyncio\n"
|
|
1502
|
+
" async def test_FUNC_NAME_basic_functionality(self):\n"
|
|
1503
|
+
' """Test basic functionality of FUNC_NAME."""\n'
|
|
1504
|
+
"\n"
|
|
1505
|
+
"\n"
|
|
1506
|
+
" try:\n"
|
|
1507
|
+
" if asyncio.iscoroutinefunction(FUNC_NAME):\n"
|
|
1508
|
+
" result = await FUNC_NAME(ARGS)\n"
|
|
1509
|
+
" else:\n"
|
|
1510
|
+
" result = FUNC_NAME(ARGS)\n"
|
|
1511
|
+
" assert result is not None or result is None\n"
|
|
1512
|
+
" except (TypeError, NotImplementedError) as e:\n"
|
|
1513
|
+
+ (
|
|
1514
|
+
" pytest.skip('Function FUNC_NAME requires manual "
|
|
1515
|
+
"implementation: ' + str(e))\n"
|
|
1516
|
+
)
|
|
1517
|
+
+ " except Exception as e:\n"
|
|
1518
|
+
" pytest.fail('Unexpected error in FUNC_NAME: ' + str(e))"
|
|
1519
|
+
)
|
|
1520
|
+
|
|
1521
|
+
return template.replace("FUNC_NAME", func_name).replace(
|
|
1522
|
+
"ARGS", self._generate_smart_default_args(args)
|
|
1523
|
+
)
|
|
1643
1524
|
|
|
1644
1525
|
def _generate_default_test_template(self, func_name: str, args: list[str]) -> str:
|
|
1645
|
-
|
|
1646
|
-
|
|
1647
|
-
|
|
1648
|
-
"
|
|
1649
|
-
|
|
1650
|
-
result
|
|
1651
|
-
|
|
1652
|
-
|
|
1653
|
-
pytest.skip(
|
|
1654
|
-
|
|
1655
|
-
|
|
1526
|
+
template = (
|
|
1527
|
+
" def test_FUNC_NAME_basic_functionality(self):\n"
|
|
1528
|
+
' """Test basic functionality of FUNC_NAME."""\n'
|
|
1529
|
+
" try:\n"
|
|
1530
|
+
" result = FUNC_NAME(ARGS)\n"
|
|
1531
|
+
" assert result is not None or result is None\n"
|
|
1532
|
+
" except (TypeError, NotImplementedError) as e:\n"
|
|
1533
|
+
+ (
|
|
1534
|
+
" pytest.skip('Function FUNC_NAME requires manual "
|
|
1535
|
+
"implementation: ' + str(e))\n"
|
|
1536
|
+
)
|
|
1537
|
+
+ " except Exception as e:\n"
|
|
1538
|
+
" pytest.fail('Unexpected error in FUNC_NAME: ' + str(e))"
|
|
1539
|
+
)
|
|
1540
|
+
|
|
1541
|
+
return template.replace("FUNC_NAME", func_name).replace(
|
|
1542
|
+
"ARGS", self._generate_smart_default_args(args)
|
|
1543
|
+
)
|
|
1656
1544
|
|
|
1657
1545
|
async def _generate_parametrized_test(
|
|
1658
1546
|
self, func: dict[str, Any], module_category: str
|
|
1659
1547
|
) -> str:
|
|
1660
|
-
"""Generate parametrized test for functions with multiple arguments."""
|
|
1661
1548
|
func_name = func["name"]
|
|
1662
1549
|
args = func.get("args", [])
|
|
1663
1550
|
|
|
1664
|
-
# Generate test parameters based on argument types
|
|
1665
1551
|
test_cases = self._generate_test_parameters(args)
|
|
1666
1552
|
|
|
1667
1553
|
if not test_cases:
|
|
@@ -1669,87 +1555,86 @@ from {module_name} import {func_info["name"]}
|
|
|
1669
1555
|
|
|
1670
1556
|
parametrize_decorator = f"@pytest.mark.parametrize({test_cases})"
|
|
1671
1557
|
|
|
1672
|
-
test_template =
|
|
1673
|
-
{parametrize_decorator}
|
|
1674
|
-
def test_{func_name}_with_parameters(self,
|
|
1675
|
-
",
|
|
1676
|
-
}
|
|
1677
|
-
"
|
|
1678
|
-
|
|
1679
|
-
|
|
1680
|
-
|
|
1681
|
-
|
|
1682
|
-
|
|
1683
|
-
|
|
1684
|
-
|
|
1685
|
-
|
|
1686
|
-
|
|
1687
|
-
|
|
1688
|
-
|
|
1689
|
-
|
|
1558
|
+
test_template = (
|
|
1559
|
+
f" {parametrize_decorator}\n"
|
|
1560
|
+
f" def test_{func_name}_with_parameters(self, "
|
|
1561
|
+
f"{', '.join(args) if len(args) <= 5 else 'test_input'}):\n"
|
|
1562
|
+
f' """Test {func_name} with various parameter combinations."""\n'
|
|
1563
|
+
" try:\n"
|
|
1564
|
+
f" if len({args}) <= 5:\n"
|
|
1565
|
+
f" result = {func_name}({', '.join(args)})\n"
|
|
1566
|
+
" else:\n"
|
|
1567
|
+
f" result = {func_name}(**test_input)\n"
|
|
1568
|
+
"\n"
|
|
1569
|
+
" assert result is not None or result is None\n"
|
|
1570
|
+
" except (TypeError, ValueError) as expected_error:\n"
|
|
1571
|
+
"\n"
|
|
1572
|
+
" pass\n"
|
|
1573
|
+
" except Exception as e:\n"
|
|
1574
|
+
' pytest.fail(f"Unexpected error with parameters: {e}")'
|
|
1575
|
+
)
|
|
1690
1576
|
|
|
1691
1577
|
return test_template
|
|
1692
1578
|
|
|
1693
1579
|
async def _generate_error_handling_test(
|
|
1694
1580
|
self, func: dict[str, Any], module_category: str
|
|
1695
1581
|
) -> str:
|
|
1696
|
-
"""Generate error handling test for a function."""
|
|
1697
1582
|
func_name = func["name"]
|
|
1698
1583
|
args = func.get("args", [])
|
|
1699
1584
|
|
|
1700
|
-
test_template =
|
|
1701
|
-
def test_{func_name}_error_handling(self)
|
|
1702
|
-
"""Test {func_name} error handling with invalid inputs."""
|
|
1703
|
-
|
|
1704
|
-
with pytest.raises((TypeError, ValueError, AttributeError))
|
|
1705
|
-
{func_name}({self._generate_invalid_args(args)})
|
|
1706
|
-
|
|
1707
|
-
|
|
1708
|
-
if len({args}) > 0
|
|
1709
|
-
with pytest.raises((TypeError, ValueError))
|
|
1710
|
-
{func_name}(
|
|
1585
|
+
test_template = (
|
|
1586
|
+
f" def test_{func_name}_error_handling(self):\n"
|
|
1587
|
+
f' """Test {func_name} error handling with invalid inputs."""\n'
|
|
1588
|
+
"\n"
|
|
1589
|
+
" with pytest.raises((TypeError, ValueError, AttributeError)):\n"
|
|
1590
|
+
f" {func_name}({self._generate_invalid_args(args)})\n"
|
|
1591
|
+
"\n"
|
|
1592
|
+
"\n"
|
|
1593
|
+
f" if len({args}) > 0:\n"
|
|
1594
|
+
" with pytest.raises((TypeError, ValueError)):\n"
|
|
1595
|
+
f" {func_name}("
|
|
1596
|
+
f"{self._generate_edge_case_args(args, 'empty')})"
|
|
1597
|
+
)
|
|
1711
1598
|
|
|
1712
1599
|
return test_template
|
|
1713
1600
|
|
|
1714
1601
|
async def _generate_edge_case_test(
|
|
1715
1602
|
self, func: dict[str, Any], module_category: str
|
|
1716
1603
|
) -> str:
|
|
1717
|
-
"""Generate edge case test for complex functions."""
|
|
1718
1604
|
func_name = func["name"]
|
|
1719
1605
|
args = func.get("args", [])
|
|
1720
1606
|
|
|
1721
|
-
test_template =
|
|
1722
|
-
def test_{func_name}_edge_cases(self)
|
|
1723
|
-
"""Test {func_name} with edge case scenarios."""
|
|
1724
|
-
|
|
1725
|
-
edge_cases = [
|
|
1726
|
-
{self._generate_edge_case_args(args,
|
|
1727
|
-
{self._generate_edge_case_args(args,
|
|
1728
|
-
]
|
|
1729
|
-
|
|
1730
|
-
for edge_case in edge_cases
|
|
1731
|
-
try
|
|
1732
|
-
result = {func_name}(*edge_case)
|
|
1733
|
-
|
|
1734
|
-
assert result is not None or result is None
|
|
1735
|
-
except (ValueError, TypeError)
|
|
1736
|
-
|
|
1737
|
-
pass
|
|
1738
|
-
except Exception as e
|
|
1739
|
-
pytest.fail(f"Unexpected error with edge case {
|
|
1607
|
+
test_template = (
|
|
1608
|
+
f" def test_{func_name}_edge_cases(self):\n"
|
|
1609
|
+
f' """Test {func_name} with edge case scenarios."""\n'
|
|
1610
|
+
"\n"
|
|
1611
|
+
" edge_cases = [\n"
|
|
1612
|
+
f" {self._generate_edge_case_args(args, 'boundary')},\n"
|
|
1613
|
+
f" {self._generate_edge_case_args(args, 'extreme')},\n"
|
|
1614
|
+
" ]\n"
|
|
1615
|
+
"\n"
|
|
1616
|
+
" for edge_case in edge_cases:\n"
|
|
1617
|
+
" try:\n"
|
|
1618
|
+
f" result = {func_name}(*edge_case)\n"
|
|
1619
|
+
"\n"
|
|
1620
|
+
" assert result is not None or result is None\n"
|
|
1621
|
+
" except (ValueError, TypeError):\n"
|
|
1622
|
+
"\n"
|
|
1623
|
+
" pass\n"
|
|
1624
|
+
" except Exception as e:\n"
|
|
1625
|
+
' pytest.fail(f"Unexpected error with edge case {edge_case}: '
|
|
1626
|
+
'{e}")'
|
|
1627
|
+
)
|
|
1740
1628
|
|
|
1741
1629
|
return test_template
|
|
1742
1630
|
|
|
1743
1631
|
def _generate_test_parameters(self, args: list[str]) -> str:
|
|
1744
|
-
|
|
1745
|
-
if not args or len(args) > 5: # Limit complexity
|
|
1632
|
+
if not args or len(args) > 5:
|
|
1746
1633
|
return ""
|
|
1747
1634
|
|
|
1748
|
-
# Simple parameter generation
|
|
1749
1635
|
param_names = ", ".join(f'"{arg}"' for arg in args)
|
|
1750
1636
|
param_values = []
|
|
1751
1637
|
|
|
1752
|
-
# Generate a few test cases
|
|
1753
1638
|
for i in range(min(3, len(args))):
|
|
1754
1639
|
test_case = []
|
|
1755
1640
|
for arg in args:
|
|
@@ -1768,7 +1653,6 @@ from {module_name} import {func_info["name"]}
|
|
|
1768
1653
|
return f"[{param_names}], [{', '.join(param_values)}]"
|
|
1769
1654
|
|
|
1770
1655
|
def _generate_smart_default_args(self, args: list[str]) -> str:
|
|
1771
|
-
"""Generate smarter default arguments based on argument names."""
|
|
1772
1656
|
if not args or args == ["self"]:
|
|
1773
1657
|
return ""
|
|
1774
1658
|
|
|
@@ -1782,17 +1666,15 @@ from {module_name} import {func_info["name"]}
|
|
|
1782
1666
|
return ", ".join(placeholders)
|
|
1783
1667
|
|
|
1784
1668
|
def _filter_args(self, args: list[str]) -> list[str]:
|
|
1785
|
-
"""Filter out 'self' parameter from arguments."""
|
|
1786
1669
|
return [arg for arg in args if arg != "self"]
|
|
1787
1670
|
|
|
1788
1671
|
def _generate_placeholder_for_arg(self, arg: str) -> str:
|
|
1789
|
-
"""Generate a placeholder value for a single argument based on its name."""
|
|
1790
1672
|
arg_lower = arg.lower()
|
|
1791
1673
|
|
|
1792
1674
|
if self._is_path_arg(arg_lower):
|
|
1793
1675
|
return 'Path("test_file.txt")'
|
|
1794
1676
|
elif self._is_url_arg(arg_lower):
|
|
1795
|
-
return '"https
|
|
1677
|
+
return '"https: //example.com"'
|
|
1796
1678
|
elif self._is_email_arg(arg_lower):
|
|
1797
1679
|
return '"test@example.com"'
|
|
1798
1680
|
elif self._is_id_arg(arg_lower):
|
|
@@ -1812,54 +1694,44 @@ from {module_name} import {func_info["name"]}
|
|
|
1812
1694
|
return '"test"'
|
|
1813
1695
|
|
|
1814
1696
|
def _is_path_arg(self, arg_lower: str) -> bool:
|
|
1815
|
-
"""Check if argument is path-related."""
|
|
1816
1697
|
return any(term in arg_lower for term in ("path", "file"))
|
|
1817
1698
|
|
|
1818
1699
|
def _is_url_arg(self, arg_lower: str) -> bool:
|
|
1819
|
-
"""Check if argument is URL-related."""
|
|
1820
1700
|
return any(term in arg_lower for term in ("url", "uri"))
|
|
1821
1701
|
|
|
1822
1702
|
def _is_email_arg(self, arg_lower: str) -> bool:
|
|
1823
|
-
"""Check if argument is email-related."""
|
|
1824
1703
|
return any(term in arg_lower for term in ("email", "mail"))
|
|
1825
1704
|
|
|
1826
1705
|
def _is_id_arg(self, arg_lower: str) -> bool:
|
|
1827
|
-
"""Check if argument is ID-related."""
|
|
1828
1706
|
return any(term in arg_lower for term in ("id", "uuid"))
|
|
1829
1707
|
|
|
1830
1708
|
def _is_name_arg(self, arg_lower: str) -> bool:
|
|
1831
|
-
"""Check if argument is name-related."""
|
|
1832
1709
|
return any(term in arg_lower for term in ("name", "title"))
|
|
1833
1710
|
|
|
1834
1711
|
def _is_numeric_arg(self, arg_lower: str) -> bool:
|
|
1835
|
-
"""Check if argument is numeric-related."""
|
|
1836
1712
|
return any(term in arg_lower for term in ("count", "size", "number", "num"))
|
|
1837
1713
|
|
|
1838
1714
|
def _is_boolean_arg(self, arg_lower: str) -> bool:
|
|
1839
|
-
"""Check if argument is boolean-related."""
|
|
1840
1715
|
return any(term in arg_lower for term in ("enable", "flag", "is_", "has_"))
|
|
1841
1716
|
|
|
1842
1717
|
def _is_text_arg(self, arg_lower: str) -> bool:
|
|
1843
|
-
"""Check if argument is text-related."""
|
|
1844
1718
|
return any(term in arg_lower for term in ("data", "content", "text"))
|
|
1845
1719
|
|
|
1846
1720
|
def _is_list_arg(self, arg_lower: str) -> bool:
|
|
1847
|
-
|
|
1848
|
-
return any(term in arg_lower for term in ("list", "items"))
|
|
1721
|
+
return any(term in arg_lower for term in ("list[t.Any]", "items"))
|
|
1849
1722
|
|
|
1850
1723
|
def _is_dict_arg(self, arg_lower: str) -> bool:
|
|
1851
|
-
|
|
1852
|
-
|
|
1724
|
+
return any(
|
|
1725
|
+
term in arg_lower for term in ("dict[str, t.Any]", "config", "options")
|
|
1726
|
+
)
|
|
1853
1727
|
|
|
1854
1728
|
def _generate_invalid_args(self, args: list[str]) -> str:
|
|
1855
|
-
"""Generate invalid arguments for error testing."""
|
|
1856
1729
|
filtered_args = [arg for arg in args if arg != "self"]
|
|
1857
1730
|
if not filtered_args:
|
|
1858
1731
|
return ""
|
|
1859
1732
|
return ", ".join(["None"] * len(filtered_args))
|
|
1860
1733
|
|
|
1861
1734
|
def _generate_edge_case_args(self, args: list[str], case_type: str) -> str:
|
|
1862
|
-
"""Generate edge case arguments."""
|
|
1863
1735
|
filtered_args = self._filter_args(args)
|
|
1864
1736
|
if not filtered_args:
|
|
1865
1737
|
return ""
|
|
@@ -1872,24 +1744,22 @@ from {module_name} import {func_info["name"]}
|
|
|
1872
1744
|
def _generate_placeholders_by_case_type(
|
|
1873
1745
|
self, filtered_args: list[str], case_type: str
|
|
1874
1746
|
) -> list[str]:
|
|
1875
|
-
"""Generate placeholders based on case type."""
|
|
1876
1747
|
if case_type == "empty":
|
|
1877
1748
|
return self._generate_empty_case_placeholders(filtered_args)
|
|
1878
1749
|
elif case_type == "boundary":
|
|
1879
1750
|
return self._generate_boundary_case_placeholders(filtered_args)
|
|
1880
|
-
|
|
1751
|
+
|
|
1881
1752
|
return self._generate_extreme_case_placeholders(filtered_args)
|
|
1882
1753
|
|
|
1883
1754
|
def _generate_empty_case_placeholders(self, filtered_args: list[str]) -> list[str]:
|
|
1884
|
-
"""Generate placeholders for empty case."""
|
|
1885
1755
|
placeholders = []
|
|
1886
1756
|
for arg in filtered_args:
|
|
1887
1757
|
arg_lower = arg.lower()
|
|
1888
1758
|
if any(term in arg_lower for term in ("str", "name", "text")):
|
|
1889
1759
|
placeholders.append('""')
|
|
1890
|
-
elif any(term in arg_lower for term in ("list", "items")):
|
|
1760
|
+
elif any(term in arg_lower for term in ("list[t.Any]", "items")):
|
|
1891
1761
|
placeholders.append("[]")
|
|
1892
|
-
elif any(term in arg_lower for term in ("dict", "config")):
|
|
1762
|
+
elif any(term in arg_lower for term in ("dict[str, t.Any]", "config")):
|
|
1893
1763
|
placeholders.append("{}")
|
|
1894
1764
|
else:
|
|
1895
1765
|
placeholders.append("None")
|
|
@@ -1898,14 +1768,13 @@ from {module_name} import {func_info["name"]}
|
|
|
1898
1768
|
def _generate_boundary_case_placeholders(
|
|
1899
1769
|
self, filtered_args: list[str]
|
|
1900
1770
|
) -> list[str]:
|
|
1901
|
-
"""Generate placeholders for boundary case."""
|
|
1902
1771
|
placeholders = []
|
|
1903
1772
|
for arg in filtered_args:
|
|
1904
1773
|
arg_lower = arg.lower()
|
|
1905
1774
|
if any(term in arg_lower for term in ("count", "size", "number")):
|
|
1906
1775
|
placeholders.append("0")
|
|
1907
1776
|
elif any(term in arg_lower for term in ("str", "name")):
|
|
1908
|
-
placeholders.append('"x" * 1000')
|
|
1777
|
+
placeholders.append('"x" * 1000')
|
|
1909
1778
|
else:
|
|
1910
1779
|
placeholders.append("None")
|
|
1911
1780
|
return placeholders
|
|
@@ -1913,7 +1782,6 @@ from {module_name} import {func_info["name"]}
|
|
|
1913
1782
|
def _generate_extreme_case_placeholders(
|
|
1914
1783
|
self, filtered_args: list[str]
|
|
1915
1784
|
) -> list[str]:
|
|
1916
|
-
"""Generate placeholders for extreme case."""
|
|
1917
1785
|
placeholders = []
|
|
1918
1786
|
for arg in filtered_args:
|
|
1919
1787
|
arg_lower = arg.lower()
|
|
@@ -1926,8 +1794,6 @@ from {module_name} import {func_info["name"]}
|
|
|
1926
1794
|
async def _generate_enhanced_class_tests(
|
|
1927
1795
|
self, classes: list[dict[str, Any]], module_category: str
|
|
1928
1796
|
) -> str:
|
|
1929
|
-
"""Generate enhanced test methods for classes with fixtures and comprehensive
|
|
1930
|
-
coverage."""
|
|
1931
1797
|
if not classes:
|
|
1932
1798
|
return ""
|
|
1933
1799
|
|
|
@@ -1941,7 +1807,6 @@ from {module_name} import {func_info["name"]}
|
|
|
1941
1807
|
async def _generate_all_class_test_components(
|
|
1942
1808
|
self, classes: list[dict[str, Any]], module_category: str
|
|
1943
1809
|
) -> dict[str, list[str]]:
|
|
1944
|
-
"""Generate all test components for classes."""
|
|
1945
1810
|
fixtures = []
|
|
1946
1811
|
test_methods = []
|
|
1947
1812
|
|
|
@@ -1957,17 +1822,14 @@ from {module_name} import {func_info["name"]}
|
|
|
1957
1822
|
async def _generate_single_class_test_components(
|
|
1958
1823
|
self, cls: dict[str, Any], module_category: str
|
|
1959
1824
|
) -> dict[str, list[str]]:
|
|
1960
|
-
"""Generate test components for a single class."""
|
|
1961
1825
|
fixtures = []
|
|
1962
1826
|
test_methods = []
|
|
1963
1827
|
methods = cls.get("methods", [])
|
|
1964
1828
|
|
|
1965
|
-
# Generate fixture for class instantiation
|
|
1966
1829
|
fixture = await self._generate_class_fixture(cls, module_category)
|
|
1967
1830
|
if fixture:
|
|
1968
1831
|
fixtures.append(fixture)
|
|
1969
1832
|
|
|
1970
|
-
# Generate core tests for the class
|
|
1971
1833
|
core_tests = await self._generate_core_class_tests(
|
|
1972
1834
|
cls, methods, module_category
|
|
1973
1835
|
)
|
|
@@ -1978,22 +1840,18 @@ from {module_name} import {func_info["name"]}
|
|
|
1978
1840
|
async def _generate_core_class_tests(
|
|
1979
1841
|
self, cls: dict[str, Any], methods: list[str], module_category: str
|
|
1980
1842
|
) -> list[str]:
|
|
1981
|
-
"""Generate core tests for a class."""
|
|
1982
1843
|
test_methods = []
|
|
1983
1844
|
|
|
1984
|
-
# Basic class instantiation test
|
|
1985
1845
|
instantiation_test = await self._generate_class_instantiation_test(
|
|
1986
1846
|
cls, module_category
|
|
1987
1847
|
)
|
|
1988
1848
|
test_methods.append(instantiation_test)
|
|
1989
1849
|
|
|
1990
|
-
# Generate tests for public methods (limit for performance)
|
|
1991
1850
|
method_tests = await self._generate_method_tests(
|
|
1992
1851
|
cls, methods[:5], module_category
|
|
1993
1852
|
)
|
|
1994
1853
|
test_methods.extend(method_tests)
|
|
1995
1854
|
|
|
1996
|
-
# Generate property tests if applicable
|
|
1997
1855
|
property_test = await self._generate_class_property_test(cls, module_category)
|
|
1998
1856
|
if property_test:
|
|
1999
1857
|
test_methods.append(property_test)
|
|
@@ -2003,7 +1861,6 @@ from {module_name} import {func_info["name"]}
|
|
|
2003
1861
|
async def _generate_method_tests(
|
|
2004
1862
|
self, cls: dict[str, Any], methods: list[str], module_category: str
|
|
2005
1863
|
) -> list[str]:
|
|
2006
|
-
"""Generate tests for class methods."""
|
|
2007
1864
|
method_tests = []
|
|
2008
1865
|
for method in methods:
|
|
2009
1866
|
method_test = await self._generate_class_method_test(
|
|
@@ -2015,7 +1872,6 @@ from {module_name} import {func_info["name"]}
|
|
|
2015
1872
|
def _combine_class_test_elements(
|
|
2016
1873
|
self, fixtures: list[str], test_methods: list[str]
|
|
2017
1874
|
) -> str:
|
|
2018
|
-
"""Combine fixtures and test methods into a single string."""
|
|
2019
1875
|
fixture_section = "\n".join(fixtures) if fixtures else ""
|
|
2020
1876
|
test_section = "\n".join(test_methods)
|
|
2021
1877
|
return fixture_section + test_section
|
|
@@ -2023,51 +1879,50 @@ from {module_name} import {func_info["name"]}
|
|
|
2023
1879
|
async def _generate_class_fixture(
|
|
2024
1880
|
self, cls: dict[str, Any], module_category: str
|
|
2025
1881
|
) -> str:
|
|
2026
|
-
"""Generate pytest fixture for class instantiation."""
|
|
2027
1882
|
class_name = cls["name"]
|
|
2028
1883
|
|
|
2029
1884
|
if module_category in ("service", "manager", "core"):
|
|
2030
|
-
|
|
2031
|
-
|
|
2032
|
-
|
|
2033
|
-
|
|
2034
|
-
|
|
2035
|
-
|
|
2036
|
-
|
|
2037
|
-
|
|
2038
|
-
|
|
2039
|
-
|
|
2040
|
-
|
|
2041
|
-
instance
|
|
2042
|
-
|
|
1885
|
+
fixture_template = (
|
|
1886
|
+
" @pytest.fixture\n"
|
|
1887
|
+
f" def {class_name.lower()}_instance(self):\n"
|
|
1888
|
+
f' """Fixture to create {class_name} instance for testing."""\n'
|
|
1889
|
+
"\n"
|
|
1890
|
+
" try:\n"
|
|
1891
|
+
f" return {class_name}()\n"
|
|
1892
|
+
" except TypeError:\n"
|
|
1893
|
+
"\n"
|
|
1894
|
+
f" with patch.object({class_name}, '__init__', return_value=None):\n"
|
|
1895
|
+
f" instance = {class_name}.__new__({class_name})\n"
|
|
1896
|
+
" return instance"
|
|
1897
|
+
)
|
|
2043
1898
|
|
|
2044
1899
|
elif module_category == "agent":
|
|
2045
|
-
|
|
2046
|
-
|
|
2047
|
-
|
|
2048
|
-
|
|
2049
|
-
|
|
2050
|
-
|
|
2051
|
-
mock_context =
|
|
2052
|
-
mock_context.
|
|
2053
|
-
mock_context.
|
|
2054
|
-
|
|
2055
|
-
|
|
2056
|
-
|
|
2057
|
-
|
|
2058
|
-
|
|
2059
|
-
|
|
1900
|
+
fixture_template = (
|
|
1901
|
+
" @pytest.fixture\n"
|
|
1902
|
+
f" def {class_name.lower()}_instance(self):\n"
|
|
1903
|
+
f' """Fixture to create {class_name} instance for testing."""\n'
|
|
1904
|
+
"\n"
|
|
1905
|
+
" mock_context = Mock(spec=AgentContext)\n"
|
|
1906
|
+
' mock_context.project_path = Path("/test/project")\n'
|
|
1907
|
+
' mock_context.get_file_content = Mock(return_value="# test content")\n'
|
|
1908
|
+
" mock_context.write_file_content = Mock(return_value=True)\n"
|
|
1909
|
+
"\n"
|
|
1910
|
+
" try:\n"
|
|
1911
|
+
f" return {class_name}(mock_context)\n"
|
|
1912
|
+
" except Exception:\n"
|
|
1913
|
+
' pytest.skip("Agent requires specific context configuration")'
|
|
1914
|
+
)
|
|
2060
1915
|
|
|
2061
1916
|
else:
|
|
2062
|
-
|
|
2063
|
-
|
|
2064
|
-
|
|
2065
|
-
|
|
2066
|
-
"
|
|
2067
|
-
|
|
2068
|
-
|
|
2069
|
-
|
|
2070
|
-
|
|
1917
|
+
fixture_template = (
|
|
1918
|
+
" @pytest.fixture\n"
|
|
1919
|
+
f" def {class_name.lower()}_instance(self):\n"
|
|
1920
|
+
f' """Fixture to create {class_name} instance for testing."""\n'
|
|
1921
|
+
" try:\n"
|
|
1922
|
+
f" return {class_name}()\n"
|
|
1923
|
+
" except TypeError:\n"
|
|
1924
|
+
' pytest.skip("Class requires specific constructor arguments")'
|
|
1925
|
+
)
|
|
2071
1926
|
|
|
2072
1927
|
return fixture_template
|
|
2073
1928
|
|
|
@@ -2075,25 +1930,23 @@ from {module_name} import {func_info["name"]}
|
|
|
2075
1930
|
async def _generate_class_instantiation_test(
|
|
2076
1931
|
class_info: dict[str, Any], module_category: str
|
|
2077
1932
|
) -> str:
|
|
2078
|
-
"""Generate class instantiation test."""
|
|
2079
1933
|
class_name = class_info["name"]
|
|
2080
1934
|
|
|
2081
|
-
test_template =
|
|
2082
|
-
def test_{class_name.lower()}_instantiation(self, {class_name.lower()}_instance)
|
|
2083
|
-
"""Test successful instantiation of {class_name}."""
|
|
2084
|
-
assert {class_name.lower()}_instance is not None
|
|
2085
|
-
assert isinstance({class_name.lower()}_instance, {class_name})
|
|
2086
|
-
|
|
2087
|
-
|
|
2088
|
-
assert
|
|
2089
|
-
|
|
1935
|
+
test_template = (
|
|
1936
|
+
f" def test_{class_name.lower()}_instantiation(self, {class_name.lower()}_instance):\n"
|
|
1937
|
+
f' """Test successful instantiation of {class_name}."""\n'
|
|
1938
|
+
f" assert {class_name.lower()}_instance is not None\n"
|
|
1939
|
+
f" assert isinstance({class_name.lower()}_instance, {class_name})\n"
|
|
1940
|
+
"\n"
|
|
1941
|
+
f" assert hasattr({class_name.lower()}_instance, '__class__')\n"
|
|
1942
|
+
f' assert {class_name.lower()}_instance.__class__.__name__ == "{class_name}"'
|
|
1943
|
+
)
|
|
2090
1944
|
|
|
2091
1945
|
return test_template
|
|
2092
1946
|
|
|
2093
1947
|
async def _generate_class_method_test(
|
|
2094
1948
|
self, cls: dict[str, Any], method_name: str, module_category: str
|
|
2095
1949
|
) -> str:
|
|
2096
|
-
"""Generate test for a class method."""
|
|
2097
1950
|
class_name = cls["name"]
|
|
2098
1951
|
|
|
2099
1952
|
if self._is_special_agent_method(module_category, method_name):
|
|
@@ -2103,14 +1956,12 @@ from {module_name} import {func_info["name"]}
|
|
|
2103
1956
|
return self._generate_default_method_test(class_name, method_name)
|
|
2104
1957
|
|
|
2105
1958
|
def _is_special_agent_method(self, module_category: str, method_name: str) -> bool:
|
|
2106
|
-
"""Check if this is a special agent method requiring custom test logic."""
|
|
2107
1959
|
return module_category == "agent" and method_name in (
|
|
2108
1960
|
"can_handle",
|
|
2109
1961
|
"analyze_and_fix",
|
|
2110
1962
|
)
|
|
2111
1963
|
|
|
2112
1964
|
def _generate_agent_method_test(self, class_name: str, method_name: str) -> str:
|
|
2113
|
-
"""Generate test for special agent methods."""
|
|
2114
1965
|
if method_name == "can_handle":
|
|
2115
1966
|
return self._generate_can_handle_test(class_name)
|
|
2116
1967
|
elif method_name == "analyze_and_fix":
|
|
@@ -2118,126 +1969,126 @@ from {module_name} import {func_info["name"]}
|
|
|
2118
1969
|
return self._generate_generic_agent_method_test(class_name, method_name)
|
|
2119
1970
|
|
|
2120
1971
|
def _generate_can_handle_test(self, class_name: str) -> str:
|
|
2121
|
-
|
|
2122
|
-
|
|
2123
|
-
|
|
2124
|
-
|
|
2125
|
-
|
|
2126
|
-
|
|
2127
|
-
mock_issue =
|
|
2128
|
-
mock_issue.
|
|
2129
|
-
mock_issue.
|
|
2130
|
-
|
|
2131
|
-
|
|
2132
|
-
result
|
|
2133
|
-
assert
|
|
2134
|
-
|
|
1972
|
+
return (
|
|
1973
|
+
" @pytest.mark.asyncio\n"
|
|
1974
|
+
f" async def test_{class_name.lower()}_can_handle(self, {class_name.lower()}_instance):\n"
|
|
1975
|
+
f' """Test {class_name}.can_handle method."""\n'
|
|
1976
|
+
"\n"
|
|
1977
|
+
" mock_issue = Mock(spec=Issue)\n"
|
|
1978
|
+
" mock_issue.type = IssueType.COVERAGE_IMPROVEMENT\n"
|
|
1979
|
+
' mock_issue.message = "test coverage issue"\n'
|
|
1980
|
+
' mock_issue.file_path = "/test/path.py"\n'
|
|
1981
|
+
"\n"
|
|
1982
|
+
f" result = await {class_name.lower()}_instance.can_handle(mock_issue)\n"
|
|
1983
|
+
" assert isinstance(result, (int, float))\n"
|
|
1984
|
+
" assert 0.0 <= result <= 1.0"
|
|
1985
|
+
)
|
|
2135
1986
|
|
|
2136
1987
|
def _generate_analyze_and_fix_test(self, class_name: str) -> str:
|
|
2137
|
-
|
|
2138
|
-
|
|
2139
|
-
|
|
2140
|
-
|
|
2141
|
-
|
|
2142
|
-
|
|
2143
|
-
mock_issue =
|
|
2144
|
-
mock_issue.
|
|
2145
|
-
mock_issue.
|
|
2146
|
-
|
|
2147
|
-
|
|
2148
|
-
result
|
|
2149
|
-
assert
|
|
2150
|
-
assert hasattr(result, '
|
|
2151
|
-
|
|
1988
|
+
return (
|
|
1989
|
+
" @pytest.mark.asyncio\n"
|
|
1990
|
+
f" async def test_{class_name.lower()}_analyze_and_fix(self, {class_name.lower()}_instance):\n"
|
|
1991
|
+
f' """Test {class_name}.analyze_and_fix method."""\n'
|
|
1992
|
+
"\n"
|
|
1993
|
+
" mock_issue = Mock(spec=Issue)\n"
|
|
1994
|
+
" mock_issue.type = IssueType.COVERAGE_IMPROVEMENT\n"
|
|
1995
|
+
' mock_issue.message = "test coverage issue"\n'
|
|
1996
|
+
' mock_issue.file_path = "/test/path.py"\n'
|
|
1997
|
+
"\n"
|
|
1998
|
+
f" result = await {class_name.lower()}_instance.analyze_and_fix(mock_issue)\n"
|
|
1999
|
+
" assert isinstance(result, FixResult)\n"
|
|
2000
|
+
" assert hasattr(result, 'success')\n"
|
|
2001
|
+
" assert hasattr(result, 'confidence')"
|
|
2002
|
+
)
|
|
2152
2003
|
|
|
2153
2004
|
def _generate_generic_agent_method_test(
|
|
2154
2005
|
self, class_name: str, method_name: str
|
|
2155
2006
|
) -> str:
|
|
2156
|
-
|
|
2157
|
-
|
|
2158
|
-
|
|
2159
|
-
|
|
2160
|
-
"
|
|
2161
|
-
|
|
2162
|
-
|
|
2163
|
-
assert method is not None,
|
|
2164
|
-
|
|
2165
|
-
|
|
2166
|
-
if asyncio.iscoroutinefunction(method)
|
|
2167
|
-
result = await method()
|
|
2168
|
-
else
|
|
2169
|
-
result = method()
|
|
2170
|
-
|
|
2171
|
-
assert result is not None or result is None
|
|
2172
|
-
except (TypeError, NotImplementedError)
|
|
2173
|
-
pytest.skip(f"Method {method_name} requires specific arguments")
|
|
2174
|
-
except Exception as e
|
|
2175
|
-
pytest.fail(f"Unexpected error in {method_name}: {{e}}")'
|
|
2007
|
+
return (
|
|
2008
|
+
" @pytest.mark.asyncio\n"
|
|
2009
|
+
f" async def test_{class_name.lower()}_{method_name}(self, {class_name.lower()}_instance):\n"
|
|
2010
|
+
f' """Test {class_name}.{method_name} method."""\n'
|
|
2011
|
+
" try:\n"
|
|
2012
|
+
f" method = getattr({class_name.lower()}_instance, "
|
|
2013
|
+
f'"{method_name}", None)\n'
|
|
2014
|
+
f" assert method is not None, "
|
|
2015
|
+
f'f"Method {method_name} should exist"\n'
|
|
2016
|
+
"\n"
|
|
2017
|
+
" if asyncio.iscoroutinefunction(method):\n"
|
|
2018
|
+
" result = await method()\n"
|
|
2019
|
+
" else:\n"
|
|
2020
|
+
" result = method()\n"
|
|
2021
|
+
"\n"
|
|
2022
|
+
" assert result is not None or result is None\n"
|
|
2023
|
+
" except (TypeError, NotImplementedError):\n"
|
|
2024
|
+
f' pytest.skip(f"Method {method_name} requires specific arguments")\n'
|
|
2025
|
+
" except Exception as e:\n"
|
|
2026
|
+
f' pytest.fail(f"Unexpected error in {method_name}: {{e}}")'
|
|
2027
|
+
)
|
|
2176
2028
|
|
|
2177
2029
|
def _generate_async_method_test(self, class_name: str, method_name: str) -> str:
|
|
2178
|
-
|
|
2179
|
-
|
|
2180
|
-
|
|
2181
|
-
|
|
2182
|
-
"
|
|
2183
|
-
|
|
2184
|
-
|
|
2185
|
-
assert method is not None,
|
|
2186
|
-
|
|
2187
|
-
|
|
2188
|
-
if asyncio.iscoroutinefunction(method)
|
|
2189
|
-
result = await method()
|
|
2190
|
-
else
|
|
2191
|
-
result = method()
|
|
2192
|
-
|
|
2193
|
-
|
|
2194
|
-
|
|
2195
|
-
|
|
2196
|
-
|
|
2197
|
-
|
|
2198
|
-
|
|
2199
|
-
|
|
2030
|
+
return (
|
|
2031
|
+
" @pytest.mark.asyncio\n"
|
|
2032
|
+
f" async def test_{class_name.lower()}_{method_name}(self, {class_name.lower()}_instance):\n"
|
|
2033
|
+
f' """Test {class_name}.{method_name} method."""\n'
|
|
2034
|
+
" try:\n"
|
|
2035
|
+
f" method = getattr({class_name.lower()}_instance, "
|
|
2036
|
+
f'"{method_name}", None)\n'
|
|
2037
|
+
f" assert method is not None, "
|
|
2038
|
+
f'f"Method {method_name} should exist"\n'
|
|
2039
|
+
"\n"
|
|
2040
|
+
" if asyncio.iscoroutinefunction(method):\n"
|
|
2041
|
+
" result = await method()\n"
|
|
2042
|
+
" else:\n"
|
|
2043
|
+
" result = method()\n"
|
|
2044
|
+
"\n"
|
|
2045
|
+
" assert result is not None or result is None\n"
|
|
2046
|
+
"\n"
|
|
2047
|
+
" except (TypeError, NotImplementedError):\n"
|
|
2048
|
+
f' pytest.skip(f"Method {method_name} requires specific arguments or implementation")\n'
|
|
2049
|
+
" except Exception as e:\n"
|
|
2050
|
+
f' pytest.fail(f"Unexpected error in {method_name}: {{e}}")'
|
|
2051
|
+
)
|
|
2200
2052
|
|
|
2201
2053
|
def _generate_default_method_test(self, class_name: str, method_name: str) -> str:
|
|
2202
|
-
|
|
2203
|
-
|
|
2204
|
-
|
|
2205
|
-
"
|
|
2206
|
-
|
|
2207
|
-
|
|
2208
|
-
assert method is not None,
|
|
2209
|
-
|
|
2210
|
-
|
|
2211
|
-
result = method()
|
|
2212
|
-
assert result is not None or result is None
|
|
2213
|
-
|
|
2214
|
-
except (TypeError, NotImplementedError)
|
|
2215
|
-
pytest.skip(f"Method {method_name} requires specific arguments or implementation")
|
|
2216
|
-
except Exception as e
|
|
2217
|
-
pytest.fail(f"Unexpected error in {method_name}: {{e}}")'
|
|
2054
|
+
return (
|
|
2055
|
+
f" def test_{class_name.lower()}_{method_name}(self, {class_name.lower()}_instance):\n"
|
|
2056
|
+
f' """Test {class_name}.{method_name} method."""\n'
|
|
2057
|
+
" try:\n"
|
|
2058
|
+
f" method = getattr({class_name.lower()}_instance, "
|
|
2059
|
+
f'"{method_name}", None)\n'
|
|
2060
|
+
f" assert method is not None, "
|
|
2061
|
+
f'f"Method {method_name} should exist"\n'
|
|
2062
|
+
"\n"
|
|
2063
|
+
" result = method()\n"
|
|
2064
|
+
" assert result is not None or result is None\n"
|
|
2065
|
+
"\n"
|
|
2066
|
+
" except (TypeError, NotImplementedError):\n"
|
|
2067
|
+
f' pytest.skip(f"Method {method_name} requires specific arguments or implementation")\n'
|
|
2068
|
+
" except Exception as e:\n"
|
|
2069
|
+
f' pytest.fail(f"Unexpected error in {method_name}: {{e}}")'
|
|
2070
|
+
)
|
|
2218
2071
|
|
|
2219
2072
|
async def _generate_class_property_test(
|
|
2220
2073
|
self, cls: dict[str, Any], module_category: str
|
|
2221
2074
|
) -> str:
|
|
2222
|
-
"""Generate test for class properties."""
|
|
2223
2075
|
class_name = cls["name"]
|
|
2224
2076
|
|
|
2225
|
-
# Only generate property tests for certain module categories
|
|
2226
2077
|
if module_category not in ("service", "manager", "agent"):
|
|
2227
2078
|
return ""
|
|
2228
2079
|
|
|
2229
|
-
test_template =
|
|
2230
|
-
def test_{class_name.lower()}_properties(self, {class_name.lower()}_instance)
|
|
2231
|
-
"""Test {class_name} properties and attributes."""
|
|
2232
|
-
|
|
2233
|
-
assert hasattr({class_name.lower()}_instance, '__dict__') or
|
|
2234
|
-
hasattr({class_name.lower()}_instance, '__slots__')
|
|
2235
|
-
|
|
2236
|
-
|
|
2237
|
-
str_repr
|
|
2238
|
-
assert
|
|
2239
|
-
|
|
2240
|
-
|
|
2080
|
+
test_template = (
|
|
2081
|
+
f" def test_{class_name.lower()}_properties(self, {class_name.lower()}_instance):\n"
|
|
2082
|
+
f' """Test {class_name} properties and attributes."""\n'
|
|
2083
|
+
"\n"
|
|
2084
|
+
f" assert hasattr({class_name.lower()}_instance, '__dict__') or \\\n"
|
|
2085
|
+
f" hasattr({class_name.lower()}_instance, '__slots__')\n"
|
|
2086
|
+
"\n"
|
|
2087
|
+
f" str_repr = str({class_name.lower()}_instance)\n"
|
|
2088
|
+
" assert len(str_repr) > 0\n"
|
|
2089
|
+
f' assert "{class_name}" in str_repr or "{class_name.lower()}" in \\\n'
|
|
2090
|
+
" str_repr.lower()"
|
|
2091
|
+
)
|
|
2241
2092
|
|
|
2242
2093
|
return test_template
|
|
2243
2094
|
|
|
@@ -2248,52 +2099,44 @@ from {module_name} import {func_info["name"]}
|
|
|
2248
2099
|
classes: list[dict[str, Any]],
|
|
2249
2100
|
module_category: str,
|
|
2250
2101
|
) -> str:
|
|
2251
|
-
"""Generate integration tests for certain module types."""
|
|
2252
2102
|
if module_category not in ("service", "manager", "core"):
|
|
2253
2103
|
return ""
|
|
2254
2104
|
|
|
2255
|
-
# Only generate integration tests for modules with sufficient complexity
|
|
2256
2105
|
if len(functions) < 3 and len(classes) < 2:
|
|
2257
2106
|
return ""
|
|
2258
2107
|
|
|
2259
|
-
integration_tests =
|
|
2260
|
-
|
|
2261
|
-
|
|
2262
|
-
|
|
2263
|
-
|
|
2264
|
-
|
|
2265
|
-
|
|
2266
|
-
|
|
2267
|
-
|
|
2268
|
-
|
|
2269
|
-
|
|
2270
|
-
|
|
2271
|
-
|
|
2272
|
-
"
|
|
2273
|
-
|
|
2274
|
-
|
|
2275
|
-
|
|
2276
|
-
|
|
2277
|
-
|
|
2278
|
-
|
|
2279
|
-
|
|
2280
|
-
# TODO: Add performance benchmarks if applicable
|
|
2281
|
-
# Consider timing critical operations
|
|
2282
|
-
pytest.skip("Performance test needs manual implementation")'''
|
|
2108
|
+
integration_tests = (
|
|
2109
|
+
"\n\n"
|
|
2110
|
+
" @pytest.mark.integration\n"
|
|
2111
|
+
f" def test_{module_file.stem}_integration(self):\n"
|
|
2112
|
+
f' """Integration test for {module_file.stem} module functionality."""\n'
|
|
2113
|
+
"\n"
|
|
2114
|
+
' pytest.skip("Integration test needs manual implementation")\n'
|
|
2115
|
+
"\n"
|
|
2116
|
+
" @pytest.mark.integration\n"
|
|
2117
|
+
" @pytest.mark.asyncio\n"
|
|
2118
|
+
f" async def test_{module_file.stem}_async_integration(self):\n"
|
|
2119
|
+
f' """Async integration test for {module_file.stem} module."""\n'
|
|
2120
|
+
"\n"
|
|
2121
|
+
' pytest.skip("Async integration test needs manual implementation")\n'
|
|
2122
|
+
"\n"
|
|
2123
|
+
" @pytest.mark.performance\n"
|
|
2124
|
+
f" def test_{module_file.stem}_performance(self):\n"
|
|
2125
|
+
f' """Basic performance test for {module_file.stem} module."""\n'
|
|
2126
|
+
"\n"
|
|
2127
|
+
' pytest.skip("Performance test needs manual implementation")'
|
|
2128
|
+
)
|
|
2283
2129
|
|
|
2284
2130
|
return integration_tests
|
|
2285
2131
|
|
|
2286
2132
|
def _generate_default_args(self, args: list[str]) -> str:
|
|
2287
|
-
"""Generate default arguments for function calls."""
|
|
2288
2133
|
if not args or args == ["self"]:
|
|
2289
2134
|
return ""
|
|
2290
2135
|
|
|
2291
|
-
# Filter out 'self' parameter
|
|
2292
2136
|
filtered_args = [arg for arg in args if arg != "self"]
|
|
2293
2137
|
if not filtered_args:
|
|
2294
2138
|
return ""
|
|
2295
2139
|
|
|
2296
|
-
# Generate placeholder arguments
|
|
2297
2140
|
placeholders = []
|
|
2298
2141
|
for arg in filtered_args:
|
|
2299
2142
|
if "path" in arg.lower():
|