crackerjack 0.37.9__py3-none-any.whl → 0.45.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- crackerjack/README.md +19 -0
- crackerjack/__init__.py +30 -1
- crackerjack/__main__.py +342 -1263
- crackerjack/adapters/README.md +18 -0
- crackerjack/adapters/__init__.py +27 -5
- crackerjack/adapters/_output_paths.py +167 -0
- crackerjack/adapters/_qa_adapter_base.py +309 -0
- crackerjack/adapters/_tool_adapter_base.py +706 -0
- crackerjack/adapters/ai/README.md +65 -0
- crackerjack/adapters/ai/__init__.py +5 -0
- crackerjack/adapters/ai/claude.py +853 -0
- crackerjack/adapters/complexity/README.md +53 -0
- crackerjack/adapters/complexity/__init__.py +10 -0
- crackerjack/adapters/complexity/complexipy.py +641 -0
- crackerjack/adapters/dependency/__init__.py +22 -0
- crackerjack/adapters/dependency/pip_audit.py +418 -0
- crackerjack/adapters/format/README.md +72 -0
- crackerjack/adapters/format/__init__.py +11 -0
- crackerjack/adapters/format/mdformat.py +313 -0
- crackerjack/adapters/format/ruff.py +516 -0
- crackerjack/adapters/lint/README.md +47 -0
- crackerjack/adapters/lint/__init__.py +11 -0
- crackerjack/adapters/lint/codespell.py +273 -0
- crackerjack/adapters/lsp/README.md +49 -0
- crackerjack/adapters/lsp/__init__.py +27 -0
- crackerjack/adapters/{rust_tool_manager.py → lsp/_manager.py} +3 -3
- crackerjack/adapters/{skylos_adapter.py → lsp/skylos.py} +59 -7
- crackerjack/adapters/{zuban_adapter.py → lsp/zuban.py} +3 -6
- crackerjack/adapters/refactor/README.md +59 -0
- crackerjack/adapters/refactor/__init__.py +12 -0
- crackerjack/adapters/refactor/creosote.py +318 -0
- crackerjack/adapters/refactor/refurb.py +406 -0
- crackerjack/adapters/refactor/skylos.py +494 -0
- crackerjack/adapters/sast/README.md +132 -0
- crackerjack/adapters/sast/__init__.py +32 -0
- crackerjack/adapters/sast/_base.py +201 -0
- crackerjack/adapters/sast/bandit.py +423 -0
- crackerjack/adapters/sast/pyscn.py +405 -0
- crackerjack/adapters/sast/semgrep.py +241 -0
- crackerjack/adapters/security/README.md +111 -0
- crackerjack/adapters/security/__init__.py +17 -0
- crackerjack/adapters/security/gitleaks.py +339 -0
- crackerjack/adapters/type/README.md +52 -0
- crackerjack/adapters/type/__init__.py +12 -0
- crackerjack/adapters/type/pyrefly.py +402 -0
- crackerjack/adapters/type/ty.py +402 -0
- crackerjack/adapters/type/zuban.py +522 -0
- crackerjack/adapters/utility/README.md +51 -0
- crackerjack/adapters/utility/__init__.py +10 -0
- crackerjack/adapters/utility/checks.py +884 -0
- crackerjack/agents/README.md +264 -0
- crackerjack/agents/__init__.py +40 -12
- crackerjack/agents/base.py +1 -0
- crackerjack/agents/claude_code_bridge.py +641 -0
- crackerjack/agents/coordinator.py +49 -53
- crackerjack/agents/dry_agent.py +187 -3
- crackerjack/agents/enhanced_coordinator.py +279 -0
- crackerjack/agents/enhanced_proactive_agent.py +185 -0
- crackerjack/agents/error_middleware.py +53 -0
- crackerjack/agents/formatting_agent.py +6 -8
- crackerjack/agents/helpers/__init__.py +9 -0
- crackerjack/agents/helpers/performance/__init__.py +22 -0
- crackerjack/agents/helpers/performance/performance_ast_analyzer.py +357 -0
- crackerjack/agents/helpers/performance/performance_pattern_detector.py +909 -0
- crackerjack/agents/helpers/performance/performance_recommender.py +572 -0
- crackerjack/agents/helpers/refactoring/__init__.py +22 -0
- crackerjack/agents/helpers/refactoring/code_transformer.py +536 -0
- crackerjack/agents/helpers/refactoring/complexity_analyzer.py +344 -0
- crackerjack/agents/helpers/refactoring/dead_code_detector.py +437 -0
- crackerjack/agents/helpers/test_creation/__init__.py +19 -0
- crackerjack/agents/helpers/test_creation/test_ast_analyzer.py +216 -0
- crackerjack/agents/helpers/test_creation/test_coverage_analyzer.py +643 -0
- crackerjack/agents/helpers/test_creation/test_template_generator.py +1031 -0
- crackerjack/agents/performance_agent.py +121 -1152
- crackerjack/agents/refactoring_agent.py +156 -655
- crackerjack/agents/semantic_agent.py +479 -0
- crackerjack/agents/semantic_helpers.py +356 -0
- crackerjack/agents/test_creation_agent.py +19 -1605
- crackerjack/api.py +5 -7
- crackerjack/cli/README.md +394 -0
- crackerjack/cli/__init__.py +1 -1
- crackerjack/cli/cache_handlers.py +23 -18
- crackerjack/cli/cache_handlers_enhanced.py +1 -4
- crackerjack/cli/facade.py +70 -8
- crackerjack/cli/formatting.py +13 -0
- crackerjack/cli/handlers/__init__.py +85 -0
- crackerjack/cli/handlers/advanced.py +103 -0
- crackerjack/cli/handlers/ai_features.py +62 -0
- crackerjack/cli/handlers/analytics.py +479 -0
- crackerjack/cli/handlers/changelog.py +271 -0
- crackerjack/cli/handlers/config_handlers.py +16 -0
- crackerjack/cli/handlers/coverage.py +84 -0
- crackerjack/cli/handlers/documentation.py +280 -0
- crackerjack/cli/handlers/main_handlers.py +497 -0
- crackerjack/cli/handlers/monitoring.py +371 -0
- crackerjack/cli/handlers.py +249 -49
- crackerjack/cli/interactive.py +8 -5
- crackerjack/cli/options.py +203 -110
- crackerjack/cli/semantic_handlers.py +292 -0
- crackerjack/cli/version.py +19 -0
- crackerjack/code_cleaner.py +60 -24
- crackerjack/config/README.md +472 -0
- crackerjack/config/__init__.py +256 -0
- crackerjack/config/global_lock_config.py +191 -54
- crackerjack/config/hooks.py +188 -16
- crackerjack/config/loader.py +239 -0
- crackerjack/config/settings.py +141 -0
- crackerjack/config/tool_commands.py +331 -0
- crackerjack/core/README.md +393 -0
- crackerjack/core/async_workflow_orchestrator.py +79 -53
- crackerjack/core/autofix_coordinator.py +22 -9
- crackerjack/core/container.py +10 -9
- crackerjack/core/enhanced_container.py +9 -9
- crackerjack/core/performance.py +1 -1
- crackerjack/core/performance_monitor.py +5 -3
- crackerjack/core/phase_coordinator.py +1018 -634
- crackerjack/core/proactive_workflow.py +3 -3
- crackerjack/core/retry.py +275 -0
- crackerjack/core/service_watchdog.py +167 -23
- crackerjack/core/session_coordinator.py +187 -382
- crackerjack/core/timeout_manager.py +161 -44
- crackerjack/core/workflow/__init__.py +21 -0
- crackerjack/core/workflow/workflow_ai_coordinator.py +863 -0
- crackerjack/core/workflow/workflow_event_orchestrator.py +1107 -0
- crackerjack/core/workflow/workflow_issue_parser.py +714 -0
- crackerjack/core/workflow/workflow_phase_executor.py +1158 -0
- crackerjack/core/workflow/workflow_security_gates.py +400 -0
- crackerjack/core/workflow_orchestrator.py +1247 -953
- crackerjack/data/README.md +11 -0
- crackerjack/data/__init__.py +8 -0
- crackerjack/data/models.py +79 -0
- crackerjack/data/repository.py +210 -0
- crackerjack/decorators/README.md +180 -0
- crackerjack/decorators/__init__.py +35 -0
- crackerjack/decorators/error_handling.py +649 -0
- crackerjack/decorators/error_handling_decorators.py +334 -0
- crackerjack/decorators/helpers.py +58 -0
- crackerjack/decorators/patterns.py +281 -0
- crackerjack/decorators/utils.py +58 -0
- crackerjack/docs/README.md +11 -0
- crackerjack/docs/generated/api/CLI_REFERENCE.md +1 -1
- crackerjack/documentation/README.md +11 -0
- crackerjack/documentation/ai_templates.py +1 -1
- crackerjack/documentation/dual_output_generator.py +11 -9
- crackerjack/documentation/reference_generator.py +104 -59
- crackerjack/dynamic_config.py +52 -61
- crackerjack/errors.py +1 -1
- crackerjack/events/README.md +11 -0
- crackerjack/events/__init__.py +16 -0
- crackerjack/events/telemetry.py +175 -0
- crackerjack/events/workflow_bus.py +346 -0
- crackerjack/exceptions/README.md +301 -0
- crackerjack/exceptions/__init__.py +5 -0
- crackerjack/exceptions/config.py +4 -0
- crackerjack/exceptions/tool_execution_error.py +245 -0
- crackerjack/executors/README.md +591 -0
- crackerjack/executors/__init__.py +2 -0
- crackerjack/executors/async_hook_executor.py +539 -77
- crackerjack/executors/cached_hook_executor.py +3 -3
- crackerjack/executors/hook_executor.py +967 -102
- crackerjack/executors/hook_lock_manager.py +31 -22
- crackerjack/executors/individual_hook_executor.py +66 -32
- crackerjack/executors/lsp_aware_hook_executor.py +136 -57
- crackerjack/executors/progress_hook_executor.py +282 -0
- crackerjack/executors/tool_proxy.py +23 -7
- crackerjack/hooks/README.md +485 -0
- crackerjack/hooks/lsp_hook.py +8 -9
- crackerjack/intelligence/README.md +557 -0
- crackerjack/interactive.py +37 -10
- crackerjack/managers/README.md +369 -0
- crackerjack/managers/async_hook_manager.py +41 -57
- crackerjack/managers/hook_manager.py +449 -79
- crackerjack/managers/publish_manager.py +81 -36
- crackerjack/managers/test_command_builder.py +290 -12
- crackerjack/managers/test_executor.py +93 -8
- crackerjack/managers/test_manager.py +1082 -75
- crackerjack/managers/test_progress.py +118 -26
- crackerjack/mcp/README.md +374 -0
- crackerjack/mcp/cache.py +25 -2
- crackerjack/mcp/client_runner.py +35 -18
- crackerjack/mcp/context.py +9 -9
- crackerjack/mcp/dashboard.py +24 -8
- crackerjack/mcp/enhanced_progress_monitor.py +34 -23
- crackerjack/mcp/file_monitor.py +27 -6
- crackerjack/mcp/progress_components.py +45 -34
- crackerjack/mcp/progress_monitor.py +6 -9
- crackerjack/mcp/rate_limiter.py +11 -7
- crackerjack/mcp/server.py +2 -0
- crackerjack/mcp/server_core.py +187 -55
- crackerjack/mcp/service_watchdog.py +12 -9
- crackerjack/mcp/task_manager.py +2 -2
- crackerjack/mcp/tools/README.md +27 -0
- crackerjack/mcp/tools/__init__.py +2 -0
- crackerjack/mcp/tools/core_tools.py +75 -52
- crackerjack/mcp/tools/execution_tools.py +87 -31
- crackerjack/mcp/tools/intelligence_tools.py +2 -2
- crackerjack/mcp/tools/proactive_tools.py +1 -1
- crackerjack/mcp/tools/semantic_tools.py +584 -0
- crackerjack/mcp/tools/utility_tools.py +180 -132
- crackerjack/mcp/tools/workflow_executor.py +87 -46
- crackerjack/mcp/websocket/README.md +31 -0
- crackerjack/mcp/websocket/app.py +11 -1
- crackerjack/mcp/websocket/event_bridge.py +188 -0
- crackerjack/mcp/websocket/jobs.py +27 -4
- crackerjack/mcp/websocket/monitoring/__init__.py +25 -0
- crackerjack/mcp/websocket/monitoring/api/__init__.py +19 -0
- crackerjack/mcp/websocket/monitoring/api/dependencies.py +141 -0
- crackerjack/mcp/websocket/monitoring/api/heatmap.py +154 -0
- crackerjack/mcp/websocket/monitoring/api/intelligence.py +199 -0
- crackerjack/mcp/websocket/monitoring/api/metrics.py +203 -0
- crackerjack/mcp/websocket/monitoring/api/telemetry.py +101 -0
- crackerjack/mcp/websocket/monitoring/dashboard.py +18 -0
- crackerjack/mcp/websocket/monitoring/factory.py +109 -0
- crackerjack/mcp/websocket/monitoring/filters.py +10 -0
- crackerjack/mcp/websocket/monitoring/metrics.py +64 -0
- crackerjack/mcp/websocket/monitoring/models.py +90 -0
- crackerjack/mcp/websocket/monitoring/utils.py +171 -0
- crackerjack/mcp/websocket/monitoring/websocket_manager.py +78 -0
- crackerjack/mcp/websocket/monitoring/websockets/__init__.py +17 -0
- crackerjack/mcp/websocket/monitoring/websockets/dependencies.py +126 -0
- crackerjack/mcp/websocket/monitoring/websockets/heatmap.py +176 -0
- crackerjack/mcp/websocket/monitoring/websockets/intelligence.py +291 -0
- crackerjack/mcp/websocket/monitoring/websockets/metrics.py +291 -0
- crackerjack/mcp/websocket/monitoring_endpoints.py +16 -2930
- crackerjack/mcp/websocket/server.py +1 -3
- crackerjack/mcp/websocket/websocket_handler.py +107 -6
- crackerjack/models/README.md +308 -0
- crackerjack/models/__init__.py +10 -1
- crackerjack/models/config.py +639 -22
- crackerjack/models/config_adapter.py +6 -6
- crackerjack/models/protocols.py +1167 -23
- crackerjack/models/pydantic_models.py +320 -0
- crackerjack/models/qa_config.py +145 -0
- crackerjack/models/qa_results.py +134 -0
- crackerjack/models/results.py +35 -0
- crackerjack/models/semantic_models.py +258 -0
- crackerjack/models/task.py +19 -3
- crackerjack/models/test_models.py +60 -0
- crackerjack/monitoring/README.md +11 -0
- crackerjack/monitoring/ai_agent_watchdog.py +5 -4
- crackerjack/monitoring/metrics_collector.py +4 -3
- crackerjack/monitoring/regression_prevention.py +4 -3
- crackerjack/monitoring/websocket_server.py +4 -241
- crackerjack/orchestration/README.md +340 -0
- crackerjack/orchestration/__init__.py +43 -0
- crackerjack/orchestration/advanced_orchestrator.py +20 -67
- crackerjack/orchestration/cache/README.md +312 -0
- crackerjack/orchestration/cache/__init__.py +37 -0
- crackerjack/orchestration/cache/memory_cache.py +338 -0
- crackerjack/orchestration/cache/tool_proxy_cache.py +340 -0
- crackerjack/orchestration/config.py +297 -0
- crackerjack/orchestration/coverage_improvement.py +13 -6
- crackerjack/orchestration/execution_strategies.py +6 -6
- crackerjack/orchestration/hook_orchestrator.py +1398 -0
- crackerjack/orchestration/strategies/README.md +401 -0
- crackerjack/orchestration/strategies/__init__.py +39 -0
- crackerjack/orchestration/strategies/adaptive_strategy.py +630 -0
- crackerjack/orchestration/strategies/parallel_strategy.py +237 -0
- crackerjack/orchestration/strategies/sequential_strategy.py +299 -0
- crackerjack/orchestration/test_progress_streamer.py +1 -1
- crackerjack/plugins/README.md +11 -0
- crackerjack/plugins/hooks.py +3 -2
- crackerjack/plugins/loader.py +3 -3
- crackerjack/plugins/managers.py +1 -1
- crackerjack/py313.py +191 -0
- crackerjack/security/README.md +11 -0
- crackerjack/services/README.md +374 -0
- crackerjack/services/__init__.py +8 -21
- crackerjack/services/ai/README.md +295 -0
- crackerjack/services/ai/__init__.py +7 -0
- crackerjack/services/ai/advanced_optimizer.py +878 -0
- crackerjack/services/{contextual_ai_assistant.py → ai/contextual_ai_assistant.py} +5 -3
- crackerjack/services/ai/embeddings.py +444 -0
- crackerjack/services/ai/intelligent_commit.py +328 -0
- crackerjack/services/ai/predictive_analytics.py +510 -0
- crackerjack/services/api_extractor.py +5 -3
- crackerjack/services/bounded_status_operations.py +45 -5
- crackerjack/services/cache.py +249 -318
- crackerjack/services/changelog_automation.py +7 -3
- crackerjack/services/command_execution_service.py +305 -0
- crackerjack/services/config_integrity.py +83 -39
- crackerjack/services/config_merge.py +9 -6
- crackerjack/services/config_service.py +198 -0
- crackerjack/services/config_template.py +13 -26
- crackerjack/services/coverage_badge_service.py +6 -4
- crackerjack/services/coverage_ratchet.py +53 -27
- crackerjack/services/debug.py +18 -7
- crackerjack/services/dependency_analyzer.py +4 -4
- crackerjack/services/dependency_monitor.py +13 -13
- crackerjack/services/documentation_generator.py +4 -2
- crackerjack/services/documentation_service.py +62 -33
- crackerjack/services/enhanced_filesystem.py +81 -27
- crackerjack/services/enterprise_optimizer.py +1 -1
- crackerjack/services/error_pattern_analyzer.py +10 -10
- crackerjack/services/file_filter.py +221 -0
- crackerjack/services/file_hasher.py +5 -7
- crackerjack/services/file_io_service.py +361 -0
- crackerjack/services/file_modifier.py +615 -0
- crackerjack/services/filesystem.py +80 -109
- crackerjack/services/git.py +99 -5
- crackerjack/services/health_metrics.py +4 -6
- crackerjack/services/heatmap_generator.py +12 -3
- crackerjack/services/incremental_executor.py +380 -0
- crackerjack/services/initialization.py +101 -49
- crackerjack/services/log_manager.py +2 -2
- crackerjack/services/logging.py +120 -68
- crackerjack/services/lsp_client.py +12 -12
- crackerjack/services/memory_optimizer.py +27 -22
- crackerjack/services/monitoring/README.md +30 -0
- crackerjack/services/monitoring/__init__.py +9 -0
- crackerjack/services/monitoring/dependency_monitor.py +678 -0
- crackerjack/services/monitoring/error_pattern_analyzer.py +676 -0
- crackerjack/services/monitoring/health_metrics.py +716 -0
- crackerjack/services/monitoring/metrics.py +587 -0
- crackerjack/services/{performance_benchmarks.py → monitoring/performance_benchmarks.py} +100 -14
- crackerjack/services/{performance_cache.py → monitoring/performance_cache.py} +21 -15
- crackerjack/services/{performance_monitor.py → monitoring/performance_monitor.py} +10 -6
- crackerjack/services/parallel_executor.py +166 -55
- crackerjack/services/patterns/__init__.py +142 -0
- crackerjack/services/patterns/agents.py +107 -0
- crackerjack/services/patterns/code/__init__.py +15 -0
- crackerjack/services/patterns/code/detection.py +118 -0
- crackerjack/services/patterns/code/imports.py +107 -0
- crackerjack/services/patterns/code/paths.py +159 -0
- crackerjack/services/patterns/code/performance.py +119 -0
- crackerjack/services/patterns/code/replacement.py +36 -0
- crackerjack/services/patterns/core.py +212 -0
- crackerjack/services/patterns/documentation/__init__.py +14 -0
- crackerjack/services/patterns/documentation/badges_markdown.py +96 -0
- crackerjack/services/patterns/documentation/comments_blocks.py +83 -0
- crackerjack/services/patterns/documentation/docstrings.py +89 -0
- crackerjack/services/patterns/formatting.py +226 -0
- crackerjack/services/patterns/operations.py +339 -0
- crackerjack/services/patterns/security/__init__.py +23 -0
- crackerjack/services/patterns/security/code_injection.py +122 -0
- crackerjack/services/patterns/security/credentials.py +190 -0
- crackerjack/services/patterns/security/path_traversal.py +221 -0
- crackerjack/services/patterns/security/unsafe_operations.py +216 -0
- crackerjack/services/patterns/templates.py +62 -0
- crackerjack/services/patterns/testing/__init__.py +18 -0
- crackerjack/services/patterns/testing/error_patterns.py +107 -0
- crackerjack/services/patterns/testing/pytest_output.py +126 -0
- crackerjack/services/patterns/tool_output/__init__.py +16 -0
- crackerjack/services/patterns/tool_output/bandit.py +72 -0
- crackerjack/services/patterns/tool_output/other.py +97 -0
- crackerjack/services/patterns/tool_output/pyright.py +67 -0
- crackerjack/services/patterns/tool_output/ruff.py +44 -0
- crackerjack/services/patterns/url_sanitization.py +114 -0
- crackerjack/services/patterns/utilities.py +42 -0
- crackerjack/services/patterns/utils.py +339 -0
- crackerjack/services/patterns/validation.py +46 -0
- crackerjack/services/patterns/versioning.py +62 -0
- crackerjack/services/predictive_analytics.py +21 -8
- crackerjack/services/profiler.py +280 -0
- crackerjack/services/quality/README.md +415 -0
- crackerjack/services/quality/__init__.py +11 -0
- crackerjack/services/quality/anomaly_detector.py +392 -0
- crackerjack/services/quality/pattern_cache.py +333 -0
- crackerjack/services/quality/pattern_detector.py +479 -0
- crackerjack/services/quality/qa_orchestrator.py +491 -0
- crackerjack/services/{quality_baseline.py → quality/quality_baseline.py} +163 -2
- crackerjack/services/{quality_baseline_enhanced.py → quality/quality_baseline_enhanced.py} +4 -1
- crackerjack/services/{quality_intelligence.py → quality/quality_intelligence.py} +180 -16
- crackerjack/services/regex_patterns.py +58 -2987
- crackerjack/services/regex_utils.py +55 -29
- crackerjack/services/secure_status_formatter.py +42 -15
- crackerjack/services/secure_subprocess.py +35 -2
- crackerjack/services/security.py +16 -8
- crackerjack/services/server_manager.py +40 -51
- crackerjack/services/smart_scheduling.py +46 -6
- crackerjack/services/status_authentication.py +3 -3
- crackerjack/services/thread_safe_status_collector.py +1 -0
- crackerjack/services/tool_filter.py +368 -0
- crackerjack/services/tool_version_service.py +9 -5
- crackerjack/services/unified_config.py +43 -351
- crackerjack/services/vector_store.py +689 -0
- crackerjack/services/version_analyzer.py +6 -4
- crackerjack/services/version_checker.py +14 -8
- crackerjack/services/zuban_lsp_service.py +5 -4
- crackerjack/slash_commands/README.md +11 -0
- crackerjack/slash_commands/init.md +2 -12
- crackerjack/slash_commands/run.md +84 -50
- crackerjack/tools/README.md +11 -0
- crackerjack/tools/__init__.py +30 -0
- crackerjack/tools/_git_utils.py +105 -0
- crackerjack/tools/check_added_large_files.py +139 -0
- crackerjack/tools/check_ast.py +105 -0
- crackerjack/tools/check_json.py +103 -0
- crackerjack/tools/check_jsonschema.py +297 -0
- crackerjack/tools/check_toml.py +103 -0
- crackerjack/tools/check_yaml.py +110 -0
- crackerjack/tools/codespell_wrapper.py +72 -0
- crackerjack/tools/end_of_file_fixer.py +202 -0
- crackerjack/tools/format_json.py +128 -0
- crackerjack/tools/mdformat_wrapper.py +114 -0
- crackerjack/tools/trailing_whitespace.py +198 -0
- crackerjack/tools/validate_regex_patterns.py +7 -3
- crackerjack/ui/README.md +11 -0
- crackerjack/ui/dashboard_renderer.py +28 -0
- crackerjack/ui/templates/README.md +11 -0
- crackerjack/utils/console_utils.py +13 -0
- crackerjack/utils/dependency_guard.py +230 -0
- crackerjack/utils/retry_utils.py +275 -0
- crackerjack/workflows/README.md +590 -0
- crackerjack/workflows/__init__.py +46 -0
- crackerjack/workflows/actions.py +811 -0
- crackerjack/workflows/auto_fix.py +444 -0
- crackerjack/workflows/container_builder.py +499 -0
- crackerjack/workflows/definitions.py +443 -0
- crackerjack/workflows/engine.py +177 -0
- crackerjack/workflows/event_bridge.py +242 -0
- {crackerjack-0.37.9.dist-info → crackerjack-0.45.2.dist-info}/METADATA +678 -98
- crackerjack-0.45.2.dist-info/RECORD +478 -0
- {crackerjack-0.37.9.dist-info → crackerjack-0.45.2.dist-info}/WHEEL +1 -1
- crackerjack/managers/test_manager_backup.py +0 -1075
- crackerjack/mcp/tools/execution_tools_backup.py +0 -1011
- crackerjack/mixins/__init__.py +0 -3
- crackerjack/mixins/error_handling.py +0 -145
- crackerjack/services/config.py +0 -358
- crackerjack/ui/server_panels.py +0 -125
- crackerjack-0.37.9.dist-info/RECORD +0 -231
- /crackerjack/adapters/{rust_tool_adapter.py → lsp/_base.py} +0 -0
- /crackerjack/adapters/{lsp_client.py → lsp/_client.py} +0 -0
- {crackerjack-0.37.9.dist-info → crackerjack-0.45.2.dist-info}/entry_points.txt +0 -0
- {crackerjack-0.37.9.dist-info → crackerjack-0.45.2.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,7 +1,3 @@
|
|
|
1
|
-
import ast
|
|
2
|
-
import json
|
|
3
|
-
import operator
|
|
4
|
-
from collections.abc import Callable
|
|
5
1
|
from pathlib import Path
|
|
6
2
|
from typing import Any
|
|
7
3
|
|
|
@@ -13,11 +9,17 @@ from .base import (
|
|
|
13
9
|
SubAgent,
|
|
14
10
|
agent_registry,
|
|
15
11
|
)
|
|
12
|
+
from .helpers.test_creation.test_ast_analyzer import TestASTAnalyzer
|
|
13
|
+
from .helpers.test_creation.test_coverage_analyzer import TestCoverageAnalyzer
|
|
14
|
+
from .helpers.test_creation.test_template_generator import TestTemplateGenerator
|
|
16
15
|
|
|
17
16
|
|
|
18
17
|
class TestCreationAgent(SubAgent):
|
|
19
18
|
def __init__(self, context: AgentContext) -> None:
|
|
20
19
|
super().__init__(context)
|
|
20
|
+
self._ast_analyzer = TestASTAnalyzer(context)
|
|
21
|
+
self._template_generator = TestTemplateGenerator(context)
|
|
22
|
+
self._coverage_analyzer = TestCoverageAnalyzer(context)
|
|
21
23
|
|
|
22
24
|
def get_supported_types(self) -> set[IssueType]:
|
|
23
25
|
return {
|
|
@@ -515,711 +517,44 @@ class TestCreationAgent(SubAgent):
|
|
|
515
517
|
)
|
|
516
518
|
|
|
517
519
|
async def _analyze_coverage(self) -> dict[str, Any]:
|
|
518
|
-
|
|
519
|
-
coverage_data = await self._get_existing_coverage_data()
|
|
520
|
-
if coverage_data:
|
|
521
|
-
return coverage_data
|
|
522
|
-
|
|
523
|
-
returncode, _, stderr = await self._run_coverage_command()
|
|
524
|
-
|
|
525
|
-
if returncode != 0:
|
|
526
|
-
return self._handle_coverage_command_failure(stderr)
|
|
527
|
-
|
|
528
|
-
return await self._process_coverage_results_enhanced()
|
|
529
|
-
|
|
530
|
-
except Exception as e:
|
|
531
|
-
self.log(f"Coverage analysis error: {e}", "WARN")
|
|
532
|
-
return self._create_default_coverage_result()
|
|
533
|
-
|
|
534
|
-
async def _get_existing_coverage_data(self) -> dict[str, Any] | None:
|
|
535
|
-
try:
|
|
536
|
-
json_report = self.context.project_path / "coverage.json"
|
|
537
|
-
if json_report.exists():
|
|
538
|
-
content = self.context.get_file_content(json_report)
|
|
539
|
-
if content:
|
|
540
|
-
coverage_json = json.loads(content)
|
|
541
|
-
return self._parse_coverage_json(coverage_json)
|
|
542
|
-
|
|
543
|
-
coverage_file = self.context.project_path / ".coverage"
|
|
544
|
-
if coverage_file.exists():
|
|
545
|
-
return await self._process_coverage_results_enhanced()
|
|
546
|
-
|
|
547
|
-
except Exception as e:
|
|
548
|
-
self.log(f"Error reading existing coverage: {e}", "WARN")
|
|
549
|
-
|
|
550
|
-
return None
|
|
551
|
-
|
|
552
|
-
def _parse_coverage_json(self, coverage_json: dict[str, Any]) -> dict[str, Any]:
|
|
553
|
-
try:
|
|
554
|
-
totals = coverage_json.get("totals", {})
|
|
555
|
-
current_coverage = totals.get("percent_covered", 0) / 100.0
|
|
556
|
-
|
|
557
|
-
uncovered_modules = []
|
|
558
|
-
files = coverage_json.get("files", {})
|
|
559
|
-
|
|
560
|
-
for file_path, file_data in files.items():
|
|
561
|
-
if file_data.get("summary", {}).get("percent_covered", 100) < 80:
|
|
562
|
-
rel_path = str(
|
|
563
|
-
Path(file_path).relative_to(self.context.project_path)
|
|
564
|
-
)
|
|
565
|
-
uncovered_modules.append(rel_path)
|
|
566
|
-
|
|
567
|
-
return {
|
|
568
|
-
"below_threshold": current_coverage < 0.8,
|
|
569
|
-
"current_coverage": current_coverage,
|
|
570
|
-
"uncovered_modules": uncovered_modules[:15],
|
|
571
|
-
"missing_lines": totals.get("num_statements", 0)
|
|
572
|
-
- totals.get("covered_lines", 0),
|
|
573
|
-
"total_lines": totals.get("num_statements", 0),
|
|
574
|
-
}
|
|
575
|
-
|
|
576
|
-
except Exception as e:
|
|
577
|
-
self.log(f"Error parsing coverage JSON: {e}", "WARN")
|
|
578
|
-
return self._create_default_coverage_result()
|
|
579
|
-
|
|
580
|
-
async def _run_coverage_command(self) -> tuple[int, str, str]:
|
|
581
|
-
return await self.run_command(
|
|
582
|
-
[
|
|
583
|
-
"uv",
|
|
584
|
-
"run",
|
|
585
|
-
"python",
|
|
586
|
-
"-m",
|
|
587
|
-
"pytest",
|
|
588
|
-
"--cov=crackerjack",
|
|
589
|
-
"--cov-report=json",
|
|
590
|
-
"-q",
|
|
591
|
-
],
|
|
592
|
-
)
|
|
593
|
-
|
|
594
|
-
def _handle_coverage_command_failure(self, stderr: str) -> dict[str, Any]:
|
|
595
|
-
self.log(f"Coverage analysis failed: {stderr}", "WARN")
|
|
596
|
-
return self._create_default_coverage_result()
|
|
597
|
-
|
|
598
|
-
async def _process_coverage_results_enhanced(self) -> dict[str, Any]:
|
|
599
|
-
coverage_file = self.context.project_path / ".coverage"
|
|
600
|
-
if not coverage_file.exists():
|
|
601
|
-
return self._create_default_coverage_result()
|
|
602
|
-
|
|
603
|
-
uncovered_modules = await self._find_uncovered_modules_enhanced()
|
|
604
|
-
untested_functions = await self._find_untested_functions_enhanced()
|
|
605
|
-
|
|
606
|
-
current_coverage = await self._estimate_current_coverage()
|
|
607
|
-
|
|
608
|
-
return {
|
|
609
|
-
"below_threshold": current_coverage < 0.8,
|
|
610
|
-
"current_coverage": current_coverage,
|
|
611
|
-
"uncovered_modules": uncovered_modules[:15],
|
|
612
|
-
"untested_functions": untested_functions[:20],
|
|
613
|
-
"coverage_gaps": await self._identify_coverage_gaps(),
|
|
614
|
-
"improvement_potential": self._calculate_improvement_potential(
|
|
615
|
-
len(uncovered_modules), len(untested_functions)
|
|
616
|
-
),
|
|
617
|
-
}
|
|
618
|
-
|
|
619
|
-
async def _estimate_current_coverage(self) -> float:
|
|
620
|
-
try:
|
|
621
|
-
source_files: list[Path] = list(
|
|
622
|
-
(self.context.project_path / "crackerjack").rglob("*.py")
|
|
623
|
-
)
|
|
624
|
-
source_files = [f for f in source_files if not f.name.startswith("test_")]
|
|
625
|
-
|
|
626
|
-
test_files: list[Path] = list(
|
|
627
|
-
(self.context.project_path / "tests").rglob("test_*.py")
|
|
628
|
-
)
|
|
629
|
-
|
|
630
|
-
if not source_files:
|
|
631
|
-
return 0.0
|
|
632
|
-
|
|
633
|
-
coverage_ratio = len(test_files) / len(source_files)
|
|
634
|
-
|
|
635
|
-
estimated_coverage = min(coverage_ratio * 0.6, 0.9)
|
|
636
|
-
|
|
637
|
-
return estimated_coverage
|
|
638
|
-
|
|
639
|
-
except Exception:
|
|
640
|
-
return 0.1
|
|
641
|
-
|
|
642
|
-
def _calculate_improvement_potential(
|
|
643
|
-
self, uncovered_modules: int, untested_functions: int
|
|
644
|
-
) -> dict[str, Any]:
|
|
645
|
-
if uncovered_modules == untested_functions == 0:
|
|
646
|
-
return {"percentage_points": 0, "priority": "low"}
|
|
647
|
-
|
|
648
|
-
module_improvement = uncovered_modules * 2.5
|
|
649
|
-
function_improvement = untested_functions * 0.8
|
|
650
|
-
|
|
651
|
-
total_potential = min(module_improvement + function_improvement, 40)
|
|
652
|
-
|
|
653
|
-
priority = (
|
|
654
|
-
"high"
|
|
655
|
-
if total_potential > 15
|
|
656
|
-
else "medium"
|
|
657
|
-
if total_potential > 5
|
|
658
|
-
else "low"
|
|
659
|
-
)
|
|
660
|
-
|
|
661
|
-
return {
|
|
662
|
-
"percentage_points": round(total_potential, 1),
|
|
663
|
-
"priority": priority,
|
|
664
|
-
"module_contribution": round(module_improvement, 1),
|
|
665
|
-
"function_contribution": round(function_improvement, 1),
|
|
666
|
-
}
|
|
667
|
-
|
|
668
|
-
def _create_default_coverage_result(self) -> dict[str, Any]:
|
|
669
|
-
return {
|
|
670
|
-
"below_threshold": True,
|
|
671
|
-
"current_coverage": 0.0,
|
|
672
|
-
"uncovered_modules": [],
|
|
673
|
-
}
|
|
674
|
-
|
|
675
|
-
async def _find_uncovered_modules_enhanced(self) -> list[dict[str, Any]]:
|
|
676
|
-
uncovered: list[dict[str, Any]] = []
|
|
677
|
-
|
|
678
|
-
package_dir = self.context.project_path / "crackerjack"
|
|
679
|
-
if not package_dir.exists():
|
|
680
|
-
return uncovered[:15]
|
|
681
|
-
|
|
682
|
-
for py_file in package_dir.rglob("*.py"):
|
|
683
|
-
if self._should_skip_module_for_coverage(py_file):
|
|
684
|
-
continue
|
|
685
|
-
|
|
686
|
-
if not self._has_corresponding_test(str(py_file)):
|
|
687
|
-
module_info = await self._analyze_module_priority(py_file)
|
|
688
|
-
uncovered.append(module_info)
|
|
689
|
-
|
|
690
|
-
uncovered.sort(key=operator.itemgetter("priority_score"), reverse=True)
|
|
691
|
-
return uncovered[:15]
|
|
692
|
-
|
|
693
|
-
async def _analyze_module_priority(self, py_file: Path) -> dict[str, Any]:
|
|
694
|
-
try:
|
|
695
|
-
content = self.context.get_file_content(py_file) or ""
|
|
696
|
-
ast.parse(content)
|
|
697
|
-
|
|
698
|
-
functions = await self._extract_functions_from_file(py_file)
|
|
699
|
-
classes = await self._extract_classes_from_file(py_file)
|
|
700
|
-
|
|
701
|
-
priority_score = 0
|
|
702
|
-
|
|
703
|
-
rel_path = str(py_file.relative_to(self.context.project_path))
|
|
704
|
-
if any(
|
|
705
|
-
core_path in rel_path
|
|
706
|
-
for core_path in ("managers/", "services/", "core/", "agents/")
|
|
707
|
-
):
|
|
708
|
-
priority_score += 10
|
|
709
|
-
|
|
710
|
-
priority_score += len(functions) * 2
|
|
711
|
-
priority_score += len(classes) * 3
|
|
712
|
-
|
|
713
|
-
public_functions = [f for f in functions if not f["name"].startswith("_")]
|
|
714
|
-
priority_score += len(public_functions) * 2
|
|
715
|
-
|
|
716
|
-
lines_count = len(content.split("\n"))
|
|
717
|
-
if lines_count > 100:
|
|
718
|
-
priority_score += 5
|
|
719
|
-
elif lines_count > 50:
|
|
720
|
-
priority_score += 2
|
|
721
|
-
|
|
722
|
-
return {
|
|
723
|
-
"path": rel_path,
|
|
724
|
-
"absolute_path": str(py_file),
|
|
725
|
-
"priority_score": priority_score,
|
|
726
|
-
"function_count": len(functions),
|
|
727
|
-
"class_count": len(classes),
|
|
728
|
-
"public_function_count": len(public_functions),
|
|
729
|
-
"lines_count": lines_count,
|
|
730
|
-
"category": self._categorize_module(rel_path),
|
|
731
|
-
}
|
|
732
|
-
|
|
733
|
-
except Exception as e:
|
|
734
|
-
self.log(f"Error analyzing module priority for {py_file}: {e}", "WARN")
|
|
735
|
-
return {
|
|
736
|
-
"path": str(py_file.relative_to(self.context.project_path)),
|
|
737
|
-
"absolute_path": str(py_file),
|
|
738
|
-
"priority_score": 1,
|
|
739
|
-
"function_count": 0,
|
|
740
|
-
"class_count": 0,
|
|
741
|
-
"public_function_count": 0,
|
|
742
|
-
"lines_count": 0,
|
|
743
|
-
"category": "unknown",
|
|
744
|
-
}
|
|
745
|
-
|
|
746
|
-
def _categorize_module(self, relative_path: str) -> str:
|
|
747
|
-
if "managers/" in relative_path:
|
|
748
|
-
return "manager"
|
|
749
|
-
elif "services/" in relative_path:
|
|
750
|
-
return "service"
|
|
751
|
-
elif "core/" in relative_path:
|
|
752
|
-
return "core"
|
|
753
|
-
elif "agents/" in relative_path:
|
|
754
|
-
return "agent"
|
|
755
|
-
elif "models/" in relative_path:
|
|
756
|
-
return "model"
|
|
757
|
-
elif "executors/" in relative_path:
|
|
758
|
-
return "executor"
|
|
759
|
-
return "utility"
|
|
760
|
-
|
|
761
|
-
async def _find_untested_functions_enhanced(self) -> list[dict[str, Any]]:
|
|
762
|
-
untested: list[dict[str, Any]] = []
|
|
763
|
-
|
|
764
|
-
package_dir = self.context.project_path / "crackerjack"
|
|
765
|
-
if not package_dir.exists():
|
|
766
|
-
return untested[:20]
|
|
767
|
-
|
|
768
|
-
for py_file in package_dir.rglob("*.py"):
|
|
769
|
-
if self._should_skip_file_for_testing(py_file):
|
|
770
|
-
continue
|
|
771
|
-
|
|
772
|
-
file_untested = await self._find_untested_functions_in_file_enhanced(
|
|
773
|
-
py_file
|
|
774
|
-
)
|
|
775
|
-
untested.extend(file_untested)
|
|
776
|
-
|
|
777
|
-
untested.sort(key=operator.itemgetter("testing_priority"), reverse=True)
|
|
778
|
-
return untested[:20]
|
|
779
|
-
|
|
780
|
-
async def _find_untested_functions_in_file_enhanced(
|
|
781
|
-
self, py_file: Path
|
|
782
|
-
) -> list[dict[str, Any]]:
|
|
783
|
-
untested: list[dict[str, Any]] = []
|
|
784
|
-
|
|
785
|
-
try:
|
|
786
|
-
functions = await self._extract_functions_from_file(py_file)
|
|
787
|
-
for func in functions:
|
|
788
|
-
if not await self._function_has_test(func, py_file):
|
|
789
|
-
func_info = await self._analyze_function_testability(func, py_file)
|
|
790
|
-
untested.append(func_info)
|
|
791
|
-
|
|
792
|
-
except Exception as e:
|
|
793
|
-
self.log(f"Error finding untested functions in {py_file}: {e}", "WARN")
|
|
794
|
-
|
|
795
|
-
return untested
|
|
796
|
-
|
|
797
|
-
async def _analyze_function_testability(
|
|
798
|
-
self, func: dict[str, Any], py_file: Path
|
|
799
|
-
) -> dict[str, Any]:
|
|
800
|
-
try:
|
|
801
|
-
func_info = {
|
|
802
|
-
"name": func["name"],
|
|
803
|
-
"file": str(py_file),
|
|
804
|
-
"relative_file": str(py_file.relative_to(self.context.project_path)),
|
|
805
|
-
"line": func.get("line", 1),
|
|
806
|
-
"signature": func.get("signature", ""),
|
|
807
|
-
"args": func.get("args", []),
|
|
808
|
-
"returns": func.get("returns", "Any"),
|
|
809
|
-
"testing_priority": 0,
|
|
810
|
-
"complexity": "simple",
|
|
811
|
-
"test_strategy": "basic",
|
|
812
|
-
}
|
|
813
|
-
|
|
814
|
-
priority = 0
|
|
815
|
-
|
|
816
|
-
if not func["name"].startswith("_"):
|
|
817
|
-
priority += 10
|
|
818
|
-
|
|
819
|
-
arg_count = len(func.get("args", []))
|
|
820
|
-
if arg_count > 3:
|
|
821
|
-
priority += 5
|
|
822
|
-
func_info["complexity"] = "complex"
|
|
823
|
-
func_info["test_strategy"] = "parametrized"
|
|
824
|
-
elif arg_count > 1:
|
|
825
|
-
priority += 2
|
|
826
|
-
func_info["complexity"] = "moderate"
|
|
827
|
-
|
|
828
|
-
if any(
|
|
829
|
-
core_path in str(func_info["relative_file"])
|
|
830
|
-
for core_path in ("managers/", "services/", "core/")
|
|
831
|
-
):
|
|
832
|
-
priority += 8
|
|
833
|
-
|
|
834
|
-
if func.get("is_async", False):
|
|
835
|
-
priority += 3
|
|
836
|
-
func_info["test_strategy"] = "async"
|
|
837
|
-
|
|
838
|
-
func_info["testing_priority"] = priority
|
|
839
|
-
|
|
840
|
-
return func_info
|
|
841
|
-
|
|
842
|
-
except Exception as e:
|
|
843
|
-
self.log(f"Error analyzing function testability: {e}", "WARN")
|
|
844
|
-
return {
|
|
845
|
-
"name": func.get("name", "unknown"),
|
|
846
|
-
"file": str(py_file),
|
|
847
|
-
"relative_file": str(py_file.relative_to(self.context.project_path)),
|
|
848
|
-
"line": func.get("line", 1),
|
|
849
|
-
"testing_priority": 1,
|
|
850
|
-
"complexity": "unknown",
|
|
851
|
-
"test_strategy": "basic",
|
|
852
|
-
}
|
|
853
|
-
|
|
854
|
-
async def _identify_coverage_gaps(self) -> list[dict[str, Any]]:
|
|
855
|
-
gaps: list[dict[str, Any]] = []
|
|
856
|
-
|
|
857
|
-
try:
|
|
858
|
-
package_dir = self.context.project_path / "crackerjack"
|
|
859
|
-
tests_dir = self.context.project_path / "tests"
|
|
860
|
-
|
|
861
|
-
if not package_dir.exists() or not tests_dir.exists():
|
|
862
|
-
return gaps
|
|
863
|
-
|
|
864
|
-
for py_file in package_dir.rglob("*.py"):
|
|
865
|
-
if self._should_skip_module_for_coverage(py_file):
|
|
866
|
-
continue
|
|
867
|
-
|
|
868
|
-
test_coverage_info = await self._analyze_existing_test_coverage(py_file)
|
|
869
|
-
if test_coverage_info["has_gaps"]:
|
|
870
|
-
gaps.append(test_coverage_info)
|
|
871
|
-
|
|
872
|
-
except Exception as e:
|
|
873
|
-
self.log(f"Error identifying coverage gaps: {e}", "WARN")
|
|
874
|
-
|
|
875
|
-
return gaps[:10]
|
|
876
|
-
|
|
877
|
-
async def _analyze_existing_test_coverage(self, py_file: Path) -> dict[str, Any]:
|
|
878
|
-
try:
|
|
879
|
-
test_file_path = await self._generate_test_file_path(py_file)
|
|
880
|
-
|
|
881
|
-
coverage_info: dict[str, Any] = {
|
|
882
|
-
"source_file": str(py_file.relative_to(self.context.project_path)),
|
|
883
|
-
"test_file": str(test_file_path) if test_file_path.exists() else None,
|
|
884
|
-
"has_gaps": True,
|
|
885
|
-
"missing_test_types": [],
|
|
886
|
-
"coverage_score": 0,
|
|
887
|
-
}
|
|
888
|
-
|
|
889
|
-
if not test_file_path.exists():
|
|
890
|
-
coverage_info["missing_test_types"] = [
|
|
891
|
-
"basic",
|
|
892
|
-
"edge_cases",
|
|
893
|
-
"error_handling",
|
|
894
|
-
]
|
|
895
|
-
return coverage_info
|
|
896
|
-
|
|
897
|
-
test_content = self.context.get_file_content(test_file_path) or ""
|
|
898
|
-
|
|
899
|
-
missing_types = []
|
|
900
|
-
if "def test_" not in test_content:
|
|
901
|
-
missing_types.append("basic")
|
|
902
|
-
if "@pytest.mark.parametrize" not in test_content:
|
|
903
|
-
missing_types.append("parametrized")
|
|
904
|
-
if "with pytest.raises" not in test_content:
|
|
905
|
-
missing_types.append("error_handling")
|
|
906
|
-
if "mock" not in test_content.lower():
|
|
907
|
-
missing_types.append("mocking")
|
|
908
|
-
|
|
909
|
-
coverage_info["missing_test_types"] = missing_types
|
|
910
|
-
coverage_info["has_gaps"] = len(missing_types) > 0
|
|
911
|
-
coverage_info["coverage_score"] = max(0, 100 - len(missing_types) * 25)
|
|
912
|
-
|
|
913
|
-
return coverage_info
|
|
914
|
-
|
|
915
|
-
except Exception as e:
|
|
916
|
-
self.log(f"Error analyzing test coverage for {py_file}: {e}", "WARN")
|
|
917
|
-
return {
|
|
918
|
-
"source_file": str(py_file.relative_to(self.context.project_path)),
|
|
919
|
-
"test_file": None,
|
|
920
|
-
"has_gaps": True,
|
|
921
|
-
"missing_test_types": ["basic"],
|
|
922
|
-
"coverage_score": 0,
|
|
923
|
-
}
|
|
924
|
-
|
|
925
|
-
def _should_skip_module_for_coverage(self, py_file: Path) -> bool:
|
|
926
|
-
return py_file.name.startswith("test_") or py_file.name == "__init__.py"
|
|
927
|
-
|
|
928
|
-
def _get_relative_module_path(self, py_file: Path) -> str:
|
|
929
|
-
return str(py_file.relative_to(self.context.project_path))
|
|
930
|
-
|
|
931
|
-
def _has_corresponding_test(self, file_path: str) -> bool:
|
|
932
|
-
path = Path(file_path)
|
|
933
|
-
|
|
934
|
-
test_patterns = [
|
|
935
|
-
f"test_{path.stem}.py",
|
|
936
|
-
f"{path.stem}_test.py",
|
|
937
|
-
f"test_{path.stem}_*.py",
|
|
938
|
-
]
|
|
939
|
-
|
|
940
|
-
tests_dir = self.context.project_path / "tests"
|
|
941
|
-
if tests_dir.exists():
|
|
942
|
-
for pattern in test_patterns:
|
|
943
|
-
if list(tests_dir.glob(pattern)):
|
|
944
|
-
return True
|
|
945
|
-
|
|
946
|
-
return False
|
|
520
|
+
return await self._coverage_analyzer.analyze_coverage()
|
|
947
521
|
|
|
948
522
|
async def _create_tests_for_module(self, module_path: str) -> dict[str, list[str]]:
|
|
949
|
-
|
|
950
|
-
files: list[str] = []
|
|
951
|
-
|
|
952
|
-
try:
|
|
953
|
-
test_results = await self._generate_module_tests(module_path)
|
|
954
|
-
fixes.extend(test_results["fixes"])
|
|
955
|
-
files.extend(test_results["files"])
|
|
956
|
-
|
|
957
|
-
except Exception as e:
|
|
958
|
-
self._handle_test_creation_error(module_path, e)
|
|
959
|
-
|
|
960
|
-
return {"fixes": fixes, "files": files}
|
|
961
|
-
|
|
962
|
-
async def _generate_module_tests(self, module_path: str) -> dict[str, list[str]]:
|
|
963
|
-
module_file = Path(module_path)
|
|
964
|
-
if not await self._is_module_valid(module_file):
|
|
965
|
-
return {"fixes": [], "files": []}
|
|
966
|
-
|
|
967
|
-
functions = await self._extract_functions_from_file(module_file)
|
|
968
|
-
classes = await self._extract_classes_from_file(module_file)
|
|
969
|
-
|
|
970
|
-
if not functions and not classes:
|
|
971
|
-
return {"fixes": [], "files": []}
|
|
972
|
-
|
|
973
|
-
return await self._create_test_artifacts(module_file, functions, classes)
|
|
974
|
-
|
|
975
|
-
async def _is_module_valid(self, module_file: Path) -> bool:
|
|
976
|
-
return module_file.exists()
|
|
977
|
-
|
|
978
|
-
async def _create_test_artifacts(
|
|
979
|
-
self,
|
|
980
|
-
module_file: Path,
|
|
981
|
-
functions: list[dict[str, Any]],
|
|
982
|
-
classes: list[dict[str, Any]],
|
|
983
|
-
) -> dict[str, list[str]]:
|
|
984
|
-
test_file_path = await self._generate_test_file_path(module_file)
|
|
985
|
-
test_content = await self._generate_test_content(
|
|
986
|
-
module_file,
|
|
987
|
-
functions,
|
|
988
|
-
classes,
|
|
989
|
-
)
|
|
990
|
-
|
|
991
|
-
if self.context.write_file_content(test_file_path, test_content):
|
|
992
|
-
self.log(f"Created test file: {test_file_path}")
|
|
993
|
-
return {
|
|
994
|
-
"fixes": [f"Created test file for {module_file}"],
|
|
995
|
-
"files": [str(test_file_path)],
|
|
996
|
-
}
|
|
997
|
-
|
|
998
|
-
return {"fixes": [], "files": []}
|
|
999
|
-
|
|
1000
|
-
def _handle_test_creation_error(self, module_path: str, e: Exception) -> None:
|
|
1001
|
-
self.log(f"Error creating tests for module {module_path}: {e}", "ERROR")
|
|
523
|
+
return await self._coverage_analyzer.create_tests_for_module(module_path)
|
|
1002
524
|
|
|
1003
525
|
async def _create_tests_for_file(self, file_path: str) -> dict[str, list[str]]:
|
|
1004
|
-
|
|
1005
|
-
return {"fixes": [], "files": []}
|
|
1006
|
-
|
|
1007
|
-
return await self._create_tests_for_module(file_path)
|
|
526
|
+
return await self._coverage_analyzer.create_tests_for_file(file_path)
|
|
1008
527
|
|
|
1009
528
|
async def _find_untested_functions(self) -> list[dict[str, Any]]:
|
|
1010
|
-
|
|
1011
|
-
|
|
1012
|
-
package_dir = self.context.project_path / "crackerjack"
|
|
1013
|
-
if not package_dir.exists():
|
|
1014
|
-
return untested[:10]
|
|
1015
|
-
|
|
1016
|
-
for py_file in package_dir.rglob("*.py"):
|
|
1017
|
-
if self._should_skip_file_for_testing(py_file):
|
|
1018
|
-
continue
|
|
1019
|
-
|
|
1020
|
-
file_untested = await self._find_untested_functions_in_file(py_file)
|
|
1021
|
-
untested.extend(file_untested)
|
|
1022
|
-
|
|
1023
|
-
return untested[:10]
|
|
1024
|
-
|
|
1025
|
-
def _should_skip_file_for_testing(self, py_file: Path) -> bool:
|
|
1026
|
-
return py_file.name.startswith("test_")
|
|
1027
|
-
|
|
1028
|
-
async def _find_untested_functions_in_file(
|
|
1029
|
-
self,
|
|
1030
|
-
py_file: Path,
|
|
1031
|
-
) -> list[dict[str, Any]]:
|
|
1032
|
-
untested: list[dict[str, Any]] = []
|
|
1033
|
-
|
|
1034
|
-
functions = await self._extract_functions_from_file(py_file)
|
|
1035
|
-
for func in functions:
|
|
1036
|
-
if not await self._function_has_test(func, py_file):
|
|
1037
|
-
untested.append(self._create_untested_function_info(func, py_file))
|
|
1038
|
-
|
|
1039
|
-
return untested
|
|
1040
|
-
|
|
1041
|
-
def _create_untested_function_info(
|
|
1042
|
-
self,
|
|
1043
|
-
func: dict[str, Any],
|
|
1044
|
-
py_file: Path,
|
|
1045
|
-
) -> dict[str, Any]:
|
|
1046
|
-
return {
|
|
1047
|
-
"name": func["name"],
|
|
1048
|
-
"file": str(py_file),
|
|
1049
|
-
"line": func.get("line", 1),
|
|
1050
|
-
"signature": func.get("signature", ""),
|
|
1051
|
-
}
|
|
529
|
+
return await self._coverage_analyzer.find_untested_functions()
|
|
1052
530
|
|
|
1053
531
|
async def _create_test_for_function(
|
|
1054
532
|
self,
|
|
1055
533
|
func_info: dict[str, Any],
|
|
1056
534
|
) -> dict[str, list[str]]:
|
|
1057
|
-
|
|
1058
|
-
files: list[str] = []
|
|
1059
|
-
|
|
1060
|
-
try:
|
|
1061
|
-
func_file = Path(func_info["file"])
|
|
1062
|
-
test_file_path = await self._generate_test_file_path(func_file)
|
|
1063
|
-
|
|
1064
|
-
if test_file_path.exists():
|
|
1065
|
-
existing_content = self.context.get_file_content(test_file_path) or ""
|
|
1066
|
-
new_test = await self._generate_function_test(func_info)
|
|
1067
|
-
|
|
1068
|
-
updated_content = existing_content.rstrip() + "\n\n" + new_test
|
|
1069
|
-
if self.context.write_file_content(test_file_path, updated_content):
|
|
1070
|
-
fixes.append(f"Added test for function {func_info['name']}")
|
|
1071
|
-
files.append(str(test_file_path))
|
|
1072
|
-
else:
|
|
1073
|
-
test_content = await self._generate_function_test(func_info)
|
|
1074
|
-
if self.context.write_file_content(test_file_path, test_content):
|
|
1075
|
-
fixes.append(f"Created test file with test for {func_info['name']}")
|
|
1076
|
-
files.append(str(test_file_path))
|
|
1077
|
-
|
|
1078
|
-
except Exception as e:
|
|
1079
|
-
self.log(
|
|
1080
|
-
f"Error creating test for function {func_info['name']}: {e}",
|
|
1081
|
-
"ERROR",
|
|
1082
|
-
)
|
|
1083
|
-
|
|
1084
|
-
return {"fixes": fixes, "files": files}
|
|
535
|
+
return await self._coverage_analyzer.create_test_for_function(func_info)
|
|
1085
536
|
|
|
1086
537
|
async def _extract_functions_from_file(
|
|
1087
538
|
self,
|
|
1088
539
|
file_path: Path,
|
|
1089
540
|
) -> list[dict[str, Any]]:
|
|
1090
|
-
|
|
1091
|
-
|
|
1092
|
-
try:
|
|
1093
|
-
content = self.context.get_file_content(file_path)
|
|
1094
|
-
if not content:
|
|
1095
|
-
return functions
|
|
1096
|
-
|
|
1097
|
-
tree = ast.parse(content)
|
|
1098
|
-
functions = self._parse_function_nodes(tree)
|
|
1099
|
-
|
|
1100
|
-
except Exception as e:
|
|
1101
|
-
self.log(f"Error parsing file {file_path}: {e}", "WARN")
|
|
1102
|
-
|
|
1103
|
-
return functions
|
|
1104
|
-
|
|
1105
|
-
def _parse_function_nodes(self, tree: ast.AST) -> list[dict[str, Any]]:
|
|
1106
|
-
functions: list[dict[str, Any]] = []
|
|
1107
|
-
|
|
1108
|
-
for node in ast.walk(tree):
|
|
1109
|
-
if isinstance(
|
|
1110
|
-
node, ast.FunctionDef | ast.AsyncFunctionDef
|
|
1111
|
-
) and self._is_valid_function_node(node):
|
|
1112
|
-
function_info = self._create_function_info(node)
|
|
1113
|
-
|
|
1114
|
-
function_info["is_async"] = isinstance(node, ast.AsyncFunctionDef)
|
|
1115
|
-
functions.append(function_info)
|
|
1116
|
-
|
|
1117
|
-
return functions
|
|
1118
|
-
|
|
1119
|
-
def _is_valid_function_node(
|
|
1120
|
-
self, node: ast.FunctionDef | ast.AsyncFunctionDef
|
|
1121
|
-
) -> bool:
|
|
1122
|
-
return not node.name.startswith(("_", "test_"))
|
|
1123
|
-
|
|
1124
|
-
def _create_function_info(
|
|
1125
|
-
self, node: ast.FunctionDef | ast.AsyncFunctionDef
|
|
1126
|
-
) -> dict[str, Any]:
|
|
1127
|
-
return {
|
|
1128
|
-
"name": node.name,
|
|
1129
|
-
"line": node.lineno,
|
|
1130
|
-
"signature": self._get_function_signature(node),
|
|
1131
|
-
"args": [arg.arg for arg in node.args.args],
|
|
1132
|
-
"returns": self._get_return_annotation(node),
|
|
1133
|
-
"is_async": isinstance(node, ast.AsyncFunctionDef),
|
|
1134
|
-
"docstring": ast.get_docstring(node) or "",
|
|
1135
|
-
}
|
|
541
|
+
return await self._ast_analyzer.extract_functions_from_file(file_path)
|
|
1136
542
|
|
|
1137
543
|
async def _extract_classes_from_file(self, file_path: Path) -> list[dict[str, Any]]:
|
|
1138
|
-
|
|
1139
|
-
|
|
1140
|
-
try:
|
|
1141
|
-
content = self.context.get_file_content(file_path)
|
|
1142
|
-
if not content:
|
|
1143
|
-
return classes
|
|
1144
|
-
|
|
1145
|
-
tree = ast.parse(content)
|
|
1146
|
-
classes = self._process_ast_nodes_for_classes(tree)
|
|
1147
|
-
|
|
1148
|
-
except Exception as e:
|
|
1149
|
-
self.log(f"Error parsing classes from {file_path}: {e}", "WARN")
|
|
1150
|
-
|
|
1151
|
-
return classes
|
|
1152
|
-
|
|
1153
|
-
def _process_ast_nodes_for_classes(self, tree: ast.AST) -> list[dict[str, Any]]:
|
|
1154
|
-
classes: list[dict[str, Any]] = []
|
|
1155
|
-
|
|
1156
|
-
for node in ast.walk(tree):
|
|
1157
|
-
if isinstance(node, ast.ClassDef) and self._should_include_class(node):
|
|
1158
|
-
class_info = self._create_class_info(node)
|
|
1159
|
-
classes.append(class_info)
|
|
1160
|
-
|
|
1161
|
-
return classes
|
|
1162
|
-
|
|
1163
|
-
def _should_include_class(self, node: ast.ClassDef) -> bool:
|
|
1164
|
-
return not node.name.startswith("_")
|
|
1165
|
-
|
|
1166
|
-
def _create_class_info(self, node: ast.ClassDef) -> dict[str, Any]:
|
|
1167
|
-
methods = self._extract_public_methods_from_class(node)
|
|
1168
|
-
return {"name": node.name, "line": node.lineno, "methods": methods}
|
|
1169
|
-
|
|
1170
|
-
def _extract_public_methods_from_class(self, node: ast.ClassDef) -> list[str]:
|
|
1171
|
-
return [
|
|
1172
|
-
item.name
|
|
1173
|
-
for item in node.body
|
|
1174
|
-
if isinstance(item, ast.FunctionDef) and not item.name.startswith("_")
|
|
1175
|
-
]
|
|
1176
|
-
|
|
1177
|
-
def _get_function_signature(
|
|
1178
|
-
self, node: ast.FunctionDef | ast.AsyncFunctionDef
|
|
1179
|
-
) -> str:
|
|
1180
|
-
args = [arg.arg for arg in node.args.args]
|
|
1181
|
-
prefix = "async " if isinstance(node, ast.AsyncFunctionDef) else ""
|
|
1182
|
-
return f"{prefix}{node.name}({', '.join(args)})"
|
|
1183
|
-
|
|
1184
|
-
def _get_return_annotation(
|
|
1185
|
-
self, node: ast.FunctionDef | ast.AsyncFunctionDef
|
|
1186
|
-
) -> str:
|
|
1187
|
-
if node.returns:
|
|
1188
|
-
return ast.unparse(node.returns) if (hasattr(ast, "unparse")) else "Any"
|
|
1189
|
-
return "Any"
|
|
544
|
+
return await self._ast_analyzer.extract_classes_from_file(file_path)
|
|
1190
545
|
|
|
1191
546
|
async def _function_has_test(
|
|
1192
547
|
self,
|
|
1193
548
|
func_info: dict[str, Any],
|
|
1194
549
|
file_path: Path,
|
|
1195
550
|
) -> bool:
|
|
1196
|
-
|
|
1197
|
-
|
|
1198
|
-
if not test_file_path.exists():
|
|
1199
|
-
return False
|
|
1200
|
-
|
|
1201
|
-
test_content = self.context.get_file_content(test_file_path)
|
|
1202
|
-
if not test_content:
|
|
1203
|
-
return False
|
|
1204
|
-
|
|
1205
|
-
test_patterns = [
|
|
1206
|
-
f"test_{func_info['name']}",
|
|
1207
|
-
f"test_{func_info['name']}_",
|
|
1208
|
-
f"def test_{func_info['name']}",
|
|
1209
|
-
]
|
|
1210
|
-
|
|
1211
|
-
return any(pattern in test_content for pattern in test_patterns)
|
|
551
|
+
return await self._ast_analyzer.function_has_test(func_info, file_path)
|
|
1212
552
|
|
|
1213
553
|
async def _generate_test_file_path(self, source_file: Path) -> Path:
|
|
1214
|
-
|
|
1215
|
-
tests_dir.mkdir(exist_ok=True)
|
|
554
|
+
return await self._ast_analyzer.generate_test_file_path(source_file)
|
|
1216
555
|
|
|
1217
|
-
|
|
1218
|
-
|
|
1219
|
-
)
|
|
1220
|
-
test_name = f"test_{relative_path.stem}.py"
|
|
1221
|
-
|
|
1222
|
-
return tests_dir / test_name
|
|
556
|
+
def _has_corresponding_test(self, file_path: str) -> bool:
|
|
557
|
+
return self._ast_analyzer.has_corresponding_test(file_path)
|
|
1223
558
|
|
|
1224
559
|
async def _generate_test_content(
|
|
1225
560
|
self,
|
|
@@ -1227,930 +562,9 @@ class TestCreationAgent(SubAgent):
|
|
|
1227
562
|
functions: list[dict[str, Any]],
|
|
1228
563
|
classes: list[dict[str, Any]],
|
|
1229
564
|
) -> str:
|
|
1230
|
-
|
|
1231
|
-
|
|
1232
|
-
|
|
1233
|
-
async def _generate_comprehensive_test_content(
|
|
1234
|
-
self,
|
|
1235
|
-
test_params: dict[str, Any],
|
|
1236
|
-
functions: list[dict[str, Any]],
|
|
1237
|
-
classes: list[dict[str, Any]],
|
|
1238
|
-
) -> str:
|
|
1239
|
-
return await self._generate_all_test_types(test_params, functions, classes)
|
|
1240
|
-
|
|
1241
|
-
def _prepare_test_generation_params(self, module_file: Path) -> dict[str, Any]:
|
|
1242
|
-
module_name = self._get_module_import_path(module_file)
|
|
1243
|
-
module_category = self._categorize_module(
|
|
1244
|
-
str(module_file.relative_to(self.context.project_path))
|
|
1245
|
-
)
|
|
1246
|
-
return {
|
|
1247
|
-
"module_name": module_name,
|
|
1248
|
-
"module_file": module_file,
|
|
1249
|
-
"module_category": module_category,
|
|
1250
|
-
}
|
|
1251
|
-
|
|
1252
|
-
async def _generate_all_test_types(
|
|
1253
|
-
self,
|
|
1254
|
-
test_params: dict[str, Any],
|
|
1255
|
-
functions: list[dict[str, Any]],
|
|
1256
|
-
classes: list[dict[str, Any]],
|
|
1257
|
-
) -> str:
|
|
1258
|
-
base_content = self._generate_enhanced_test_file_header(
|
|
1259
|
-
test_params["module_name"],
|
|
1260
|
-
test_params["module_file"],
|
|
1261
|
-
test_params["module_category"],
|
|
1262
|
-
)
|
|
1263
|
-
|
|
1264
|
-
function_tests = await self._generate_function_tests_content(
|
|
1265
|
-
functions, test_params["module_category"]
|
|
1266
|
-
)
|
|
1267
|
-
class_tests = await self._generate_class_tests_content(
|
|
1268
|
-
classes, test_params["module_category"]
|
|
1269
|
-
)
|
|
1270
|
-
integration_tests = await self._generate_integration_tests_content(
|
|
1271
|
-
test_params["module_file"],
|
|
1272
|
-
functions,
|
|
1273
|
-
classes,
|
|
1274
|
-
test_params["module_category"],
|
|
1275
|
-
)
|
|
1276
|
-
|
|
1277
|
-
return base_content + function_tests + class_tests + integration_tests
|
|
1278
|
-
|
|
1279
|
-
async def _generate_function_tests_content(
|
|
1280
|
-
self, functions: list[dict[str, Any]], module_category: str
|
|
1281
|
-
) -> str:
|
|
1282
|
-
return await self._generate_enhanced_function_tests(functions, module_category)
|
|
1283
|
-
|
|
1284
|
-
async def _generate_class_tests_content(
|
|
1285
|
-
self, classes: list[dict[str, Any]], module_category: str
|
|
1286
|
-
) -> str:
|
|
1287
|
-
return await self._generate_enhanced_class_tests(classes, module_category)
|
|
1288
|
-
|
|
1289
|
-
async def _generate_integration_tests_content(
|
|
1290
|
-
self,
|
|
1291
|
-
module_file: Path,
|
|
1292
|
-
functions: list[dict[str, Any]],
|
|
1293
|
-
classes: list[dict[str, Any]],
|
|
1294
|
-
module_category: str,
|
|
1295
|
-
) -> str:
|
|
1296
|
-
return await self._generate_integration_tests(
|
|
1297
|
-
module_file, functions, classes, module_category
|
|
1298
|
-
)
|
|
1299
|
-
|
|
1300
|
-
def _generate_enhanced_test_file_header(
|
|
1301
|
-
self, module_name: str, module_file: Path, module_category: str
|
|
1302
|
-
) -> str:
|
|
1303
|
-
imports = [
|
|
1304
|
-
"import pytest",
|
|
1305
|
-
"from pathlib import Path",
|
|
1306
|
-
"from unittest.mock import Mock, patch, AsyncMock",
|
|
1307
|
-
]
|
|
1308
|
-
|
|
1309
|
-
if module_category in ("service", "manager", "core"):
|
|
1310
|
-
imports.append("import asyncio")
|
|
1311
|
-
|
|
1312
|
-
if module_category == "agent":
|
|
1313
|
-
imports.extend(
|
|
1314
|
-
[
|
|
1315
|
-
"from crackerjack.agents.base import AgentContext, FixResult, "
|
|
1316
|
-
"Issue, IssueType",
|
|
1317
|
-
]
|
|
1318
|
-
)
|
|
1319
|
-
|
|
1320
|
-
imports_str = "\n".join(imports)
|
|
1321
|
-
|
|
1322
|
-
try:
|
|
1323
|
-
content = self.context.get_file_content(module_file) or ""
|
|
1324
|
-
tree = ast.parse(content)
|
|
1325
|
-
|
|
1326
|
-
importable_items = []
|
|
1327
|
-
for node in ast.walk(tree):
|
|
1328
|
-
if isinstance(node, ast.ClassDef) and not node.name.startswith("_"):
|
|
1329
|
-
importable_items.append(node.name)
|
|
1330
|
-
elif isinstance(
|
|
1331
|
-
node, ast.FunctionDef | ast.AsyncFunctionDef
|
|
1332
|
-
) and not node.name.startswith("_"):
|
|
1333
|
-
importable_items.append(node.name)
|
|
1334
|
-
|
|
1335
|
-
if importable_items:
|
|
1336
|
-
specific_imports = (
|
|
1337
|
-
f"from {module_name} import {', '.join(importable_items[:10])}"
|
|
1338
|
-
)
|
|
1339
|
-
else:
|
|
1340
|
-
specific_imports = f"import {module_name}"
|
|
1341
|
-
|
|
1342
|
-
except Exception:
|
|
1343
|
-
specific_imports = f"import {module_name}"
|
|
1344
|
-
|
|
1345
|
-
class_name = f"Test{module_file.stem.replace('_', '').title()}"
|
|
1346
|
-
|
|
1347
|
-
return (
|
|
1348
|
-
f'"""{imports_str}\n'
|
|
1349
|
-
f"{specific_imports}\n"
|
|
1350
|
-
"\n"
|
|
1351
|
-
"\n"
|
|
1352
|
-
f"class {class_name}:\n"
|
|
1353
|
-
f' """Tests for {module_name}.\n'
|
|
1354
|
-
"\n"
|
|
1355
|
-
f" This module contains comprehensive tests for {module_name}\n"
|
|
1356
|
-
" including:\n"
|
|
1357
|
-
" - Basic functionality tests\n"
|
|
1358
|
-
" - Edge case validation\n"
|
|
1359
|
-
" - Error handling verification\n"
|
|
1360
|
-
" - Integration testing\n"
|
|
1361
|
-
" - Performance validation (where applicable)\n"
|
|
1362
|
-
' """\n'
|
|
1363
|
-
"\n"
|
|
1364
|
-
" def test_module_imports_successfully(self):\n"
|
|
1365
|
-
' """Test that the module can be imported without errors."""\n'
|
|
1366
|
-
f" import {module_name}\n"
|
|
1367
|
-
f" assert {module_name} is not None\n"
|
|
1368
|
-
)
|
|
1369
|
-
|
|
1370
|
-
def _get_module_import_path(self, file_path: Path) -> str:
|
|
1371
|
-
try:
|
|
1372
|
-
relative_path = file_path.relative_to(self.context.project_path)
|
|
1373
|
-
parts = (*relative_path.parts[:-1], relative_path.stem)
|
|
1374
|
-
return ".".join(parts)
|
|
1375
|
-
except ValueError:
|
|
1376
|
-
return file_path.stem
|
|
1377
|
-
|
|
1378
|
-
async def _generate_function_test(self, func_info: dict[str, Any]) -> str:
|
|
1379
|
-
func_name = func_info["name"]
|
|
1380
|
-
args = func_info.get("args", [])
|
|
1381
|
-
|
|
1382
|
-
test_template = f"""def test_{func_name}_basic(self):
|
|
1383
|
-
\"\"\"Test basic functionality of {func_name}.\"\"\"
|
|
1384
|
-
try:
|
|
1385
|
-
result = {func_name}({self._generate_default_args(args)})
|
|
1386
|
-
assert result is not None or result is None
|
|
1387
|
-
except TypeError:
|
|
1388
|
-
pytest.skip(
|
|
1389
|
-
"Function requires specific arguments - manual implementation needed"
|
|
1390
|
-
)
|
|
1391
|
-
except Exception as e:
|
|
1392
|
-
pytest.fail(f"Unexpected error in {func_name}: {{e}}")"""
|
|
1393
|
-
|
|
1394
|
-
return test_template
|
|
1395
|
-
|
|
1396
|
-
async def _generate_enhanced_function_tests(
|
|
1397
|
-
self, functions: list[dict[str, Any]], module_category: str
|
|
1398
|
-
) -> str:
|
|
1399
|
-
if not functions:
|
|
1400
|
-
return ""
|
|
1401
|
-
|
|
1402
|
-
test_methods = []
|
|
1403
|
-
for func in functions:
|
|
1404
|
-
func_tests = await self._generate_all_tests_for_function(
|
|
1405
|
-
func, module_category
|
|
1406
|
-
)
|
|
1407
|
-
test_methods.extend(func_tests)
|
|
1408
|
-
|
|
1409
|
-
return "\n".join(test_methods)
|
|
1410
|
-
|
|
1411
|
-
async def _generate_all_tests_for_function(
|
|
1412
|
-
self, func: dict[str, Any], module_category: str
|
|
1413
|
-
) -> list[str]:
|
|
1414
|
-
func_tests = []
|
|
1415
|
-
|
|
1416
|
-
basic_test = await self._generate_basic_function_test(func, module_category)
|
|
1417
|
-
func_tests.append(basic_test)
|
|
1418
|
-
|
|
1419
|
-
additional_tests = await self._generate_conditional_tests_for_function(
|
|
1420
|
-
func, module_category
|
|
1421
|
-
)
|
|
1422
|
-
func_tests.extend(additional_tests)
|
|
1423
|
-
|
|
1424
|
-
return func_tests
|
|
1425
|
-
|
|
1426
|
-
async def _generate_conditional_tests_for_function(
|
|
1427
|
-
self, func: dict[str, Any], module_category: str
|
|
1428
|
-
) -> list[str]:
|
|
1429
|
-
tests = []
|
|
1430
|
-
args = func.get("args", [])
|
|
1431
|
-
func_name = func["name"]
|
|
1432
|
-
|
|
1433
|
-
if self._should_generate_parametrized_test(args):
|
|
1434
|
-
parametrized_test = await self._generate_parametrized_test(
|
|
1435
|
-
func, module_category
|
|
1436
|
-
)
|
|
1437
|
-
tests.append(parametrized_test)
|
|
1438
|
-
|
|
1439
|
-
error_test = await self._generate_error_handling_test(func, module_category)
|
|
1440
|
-
tests.append(error_test)
|
|
1441
|
-
|
|
1442
|
-
if self._should_generate_edge_case_test(args, func_name):
|
|
1443
|
-
edge_test = await self._generate_edge_case_test(func, module_category)
|
|
1444
|
-
tests.append(edge_test)
|
|
1445
|
-
|
|
1446
|
-
return tests
|
|
1447
|
-
|
|
1448
|
-
def _should_generate_parametrized_test(self, args: list[str]) -> bool:
|
|
1449
|
-
return len(args) > 1
|
|
1450
|
-
|
|
1451
|
-
def _should_generate_edge_case_test(self, args: list[str], func_name: str) -> bool:
|
|
1452
|
-
has_multiple_args = len(args) > 2
|
|
1453
|
-
is_complex_function = any(
|
|
1454
|
-
hint in func_name.lower()
|
|
1455
|
-
for hint in ("process", "validate", "parse", "convert")
|
|
1456
|
-
)
|
|
1457
|
-
return has_multiple_args or is_complex_function
|
|
1458
|
-
|
|
1459
|
-
async def _generate_basic_function_test(
|
|
1460
|
-
self, func: dict[str, Any], module_category: str
|
|
1461
|
-
) -> str:
|
|
1462
|
-
func_name = func["name"]
|
|
1463
|
-
args = func.get("args", [])
|
|
1464
|
-
|
|
1465
|
-
template_generator = self._get_test_template_generator(module_category)
|
|
1466
|
-
return template_generator(func_name, args)
|
|
1467
|
-
|
|
1468
|
-
def _get_test_template_generator(
|
|
1469
|
-
self, module_category: str
|
|
1470
|
-
) -> Callable[[str, list[str]], str]:
|
|
1471
|
-
return {
|
|
1472
|
-
"agent": self._generate_agent_test_template,
|
|
1473
|
-
"service": self._generate_async_test_template,
|
|
1474
|
-
"manager": self._generate_async_test_template,
|
|
1475
|
-
}.get(module_category, self._generate_default_test_template)
|
|
1476
|
-
|
|
1477
|
-
def _generate_agent_test_template(self, func_name: str, args: list[str]) -> str:
|
|
1478
|
-
template = (
|
|
1479
|
-
" def test_FUNC_NAME_basic_functionality(self):\n"
|
|
1480
|
-
' """Test basic functionality of FUNC_NAME."""\n'
|
|
1481
|
-
"\n"
|
|
1482
|
-
"\n"
|
|
1483
|
-
" try:\n"
|
|
1484
|
-
" result = FUNC_NAME(ARGS)\n"
|
|
1485
|
-
" assert result is not None or result is None\n"
|
|
1486
|
-
" except (TypeError, NotImplementedError) as e:\n"
|
|
1487
|
-
+ (
|
|
1488
|
-
" pytest.skip('Function FUNC_NAME requires manual "
|
|
1489
|
-
"implementation: ' + str(e))\n"
|
|
1490
|
-
)
|
|
1491
|
-
+ " except Exception as e:\n"
|
|
1492
|
-
" pytest.fail('Unexpected error in FUNC_NAME: ' + str(e))"
|
|
1493
|
-
)
|
|
1494
|
-
|
|
1495
|
-
return template.replace("FUNC_NAME", func_name).replace(
|
|
1496
|
-
"ARGS", self._generate_smart_default_args(args)
|
|
1497
|
-
)
|
|
1498
|
-
|
|
1499
|
-
def _generate_async_test_template(self, func_name: str, args: list[str]) -> str:
|
|
1500
|
-
template = (
|
|
1501
|
-
" @pytest.mark.asyncio\n"
|
|
1502
|
-
" async def test_FUNC_NAME_basic_functionality(self):\n"
|
|
1503
|
-
' """Test basic functionality of FUNC_NAME."""\n'
|
|
1504
|
-
"\n"
|
|
1505
|
-
"\n"
|
|
1506
|
-
" try:\n"
|
|
1507
|
-
" if asyncio.iscoroutinefunction(FUNC_NAME):\n"
|
|
1508
|
-
" result = await FUNC_NAME(ARGS)\n"
|
|
1509
|
-
" else:\n"
|
|
1510
|
-
" result = FUNC_NAME(ARGS)\n"
|
|
1511
|
-
" assert result is not None or result is None\n"
|
|
1512
|
-
" except (TypeError, NotImplementedError) as e:\n"
|
|
1513
|
-
+ (
|
|
1514
|
-
" pytest.skip('Function FUNC_NAME requires manual "
|
|
1515
|
-
"implementation: ' + str(e))\n"
|
|
1516
|
-
)
|
|
1517
|
-
+ " except Exception as e:\n"
|
|
1518
|
-
" pytest.fail('Unexpected error in FUNC_NAME: ' + str(e))"
|
|
1519
|
-
)
|
|
1520
|
-
|
|
1521
|
-
return template.replace("FUNC_NAME", func_name).replace(
|
|
1522
|
-
"ARGS", self._generate_smart_default_args(args)
|
|
1523
|
-
)
|
|
1524
|
-
|
|
1525
|
-
def _generate_default_test_template(self, func_name: str, args: list[str]) -> str:
|
|
1526
|
-
template = (
|
|
1527
|
-
" def test_FUNC_NAME_basic_functionality(self):\n"
|
|
1528
|
-
' """Test basic functionality of FUNC_NAME."""\n'
|
|
1529
|
-
" try:\n"
|
|
1530
|
-
" result = FUNC_NAME(ARGS)\n"
|
|
1531
|
-
" assert result is not None or result is None\n"
|
|
1532
|
-
" except (TypeError, NotImplementedError) as e:\n"
|
|
1533
|
-
+ (
|
|
1534
|
-
" pytest.skip('Function FUNC_NAME requires manual "
|
|
1535
|
-
"implementation: ' + str(e))\n"
|
|
1536
|
-
)
|
|
1537
|
-
+ " except Exception as e:\n"
|
|
1538
|
-
" pytest.fail('Unexpected error in FUNC_NAME: ' + str(e))"
|
|
1539
|
-
)
|
|
1540
|
-
|
|
1541
|
-
return template.replace("FUNC_NAME", func_name).replace(
|
|
1542
|
-
"ARGS", self._generate_smart_default_args(args)
|
|
565
|
+
return await self._template_generator.generate_test_content(
|
|
566
|
+
module_file, functions, classes
|
|
1543
567
|
)
|
|
1544
568
|
|
|
1545
|
-
async def _generate_parametrized_test(
|
|
1546
|
-
self, func: dict[str, Any], module_category: str
|
|
1547
|
-
) -> str:
|
|
1548
|
-
func_name = func["name"]
|
|
1549
|
-
args = func.get("args", [])
|
|
1550
|
-
|
|
1551
|
-
test_cases = self._generate_test_parameters(args)
|
|
1552
|
-
|
|
1553
|
-
if not test_cases:
|
|
1554
|
-
return ""
|
|
1555
|
-
|
|
1556
|
-
parametrize_decorator = f"@pytest.mark.parametrize({test_cases})"
|
|
1557
|
-
|
|
1558
|
-
test_template = (
|
|
1559
|
-
f" {parametrize_decorator}\n"
|
|
1560
|
-
f" def test_{func_name}_with_parameters(self, "
|
|
1561
|
-
f"{', '.join(args) if len(args) <= 5 else 'test_input'}):\n"
|
|
1562
|
-
f' """Test {func_name} with various parameter combinations."""\n'
|
|
1563
|
-
" try:\n"
|
|
1564
|
-
f" if len({args}) <= 5:\n"
|
|
1565
|
-
f" result = {func_name}({', '.join(args)})\n"
|
|
1566
|
-
" else:\n"
|
|
1567
|
-
f" result = {func_name}(**test_input)\n"
|
|
1568
|
-
"\n"
|
|
1569
|
-
" assert result is not None or result is None\n"
|
|
1570
|
-
" except (TypeError, ValueError) as expected_error:\n"
|
|
1571
|
-
"\n"
|
|
1572
|
-
" pass\n"
|
|
1573
|
-
" except Exception as e:\n"
|
|
1574
|
-
' pytest.fail(f"Unexpected error with parameters: {e}")'
|
|
1575
|
-
)
|
|
1576
|
-
|
|
1577
|
-
return test_template
|
|
1578
|
-
|
|
1579
|
-
async def _generate_error_handling_test(
|
|
1580
|
-
self, func: dict[str, Any], module_category: str
|
|
1581
|
-
) -> str:
|
|
1582
|
-
func_name = func["name"]
|
|
1583
|
-
args = func.get("args", [])
|
|
1584
|
-
|
|
1585
|
-
test_template = (
|
|
1586
|
-
f" def test_{func_name}_error_handling(self):\n"
|
|
1587
|
-
f' """Test {func_name} error handling with invalid inputs."""\n'
|
|
1588
|
-
"\n"
|
|
1589
|
-
" with pytest.raises((TypeError, ValueError, AttributeError)):\n"
|
|
1590
|
-
f" {func_name}({self._generate_invalid_args(args)})\n"
|
|
1591
|
-
"\n"
|
|
1592
|
-
"\n"
|
|
1593
|
-
f" if len({args}) > 0:\n"
|
|
1594
|
-
" with pytest.raises((TypeError, ValueError)):\n"
|
|
1595
|
-
f" {func_name}("
|
|
1596
|
-
f"{self._generate_edge_case_args(args, 'empty')})"
|
|
1597
|
-
)
|
|
1598
|
-
|
|
1599
|
-
return test_template
|
|
1600
|
-
|
|
1601
|
-
async def _generate_edge_case_test(
|
|
1602
|
-
self, func: dict[str, Any], module_category: str
|
|
1603
|
-
) -> str:
|
|
1604
|
-
func_name = func["name"]
|
|
1605
|
-
args = func.get("args", [])
|
|
1606
|
-
|
|
1607
|
-
test_template = (
|
|
1608
|
-
f" def test_{func_name}_edge_cases(self):\n"
|
|
1609
|
-
f' """Test {func_name} with edge case scenarios."""\n'
|
|
1610
|
-
"\n"
|
|
1611
|
-
" edge_cases = [\n"
|
|
1612
|
-
f" {self._generate_edge_case_args(args, 'boundary')},\n"
|
|
1613
|
-
f" {self._generate_edge_case_args(args, 'extreme')},\n"
|
|
1614
|
-
" ]\n"
|
|
1615
|
-
"\n"
|
|
1616
|
-
" for edge_case in edge_cases:\n"
|
|
1617
|
-
" try:\n"
|
|
1618
|
-
f" result = {func_name}(*edge_case)\n"
|
|
1619
|
-
"\n"
|
|
1620
|
-
" assert result is not None or result is None\n"
|
|
1621
|
-
" except (ValueError, TypeError):\n"
|
|
1622
|
-
"\n"
|
|
1623
|
-
" pass\n"
|
|
1624
|
-
" except Exception as e:\n"
|
|
1625
|
-
' pytest.fail(f"Unexpected error with edge case {edge_case}: '
|
|
1626
|
-
'{e}")'
|
|
1627
|
-
)
|
|
1628
|
-
|
|
1629
|
-
return test_template
|
|
1630
|
-
|
|
1631
|
-
def _generate_test_parameters(self, args: list[str]) -> str:
|
|
1632
|
-
if not args or len(args) > 5:
|
|
1633
|
-
return ""
|
|
1634
|
-
|
|
1635
|
-
param_names = ", ".join(f'"{arg}"' for arg in args)
|
|
1636
|
-
param_values = []
|
|
1637
|
-
|
|
1638
|
-
for i in range(min(3, len(args))):
|
|
1639
|
-
test_case = []
|
|
1640
|
-
for arg in args:
|
|
1641
|
-
if "path" in arg.lower():
|
|
1642
|
-
test_case.append(f'Path("test_{i}")')
|
|
1643
|
-
elif "str" in arg.lower() or "name" in arg.lower():
|
|
1644
|
-
test_case.append(f'"test_{i}"')
|
|
1645
|
-
elif "int" in arg.lower() or "count" in arg.lower():
|
|
1646
|
-
test_case.append(str(i))
|
|
1647
|
-
elif "bool" in arg.lower():
|
|
1648
|
-
test_case.append("True" if i % 2 == 0 else "False")
|
|
1649
|
-
else:
|
|
1650
|
-
test_case.append("None")
|
|
1651
|
-
param_values.append(f"({', '.join(test_case)})")
|
|
1652
|
-
|
|
1653
|
-
return f"[{param_names}], [{', '.join(param_values)}]"
|
|
1654
|
-
|
|
1655
|
-
def _generate_smart_default_args(self, args: list[str]) -> str:
|
|
1656
|
-
if not args or args == ["self"]:
|
|
1657
|
-
return ""
|
|
1658
|
-
|
|
1659
|
-
filtered_args = self._filter_args(args)
|
|
1660
|
-
if not filtered_args:
|
|
1661
|
-
return ""
|
|
1662
|
-
|
|
1663
|
-
placeholders = [
|
|
1664
|
-
self._generate_placeholder_for_arg(arg) for arg in filtered_args
|
|
1665
|
-
]
|
|
1666
|
-
return ", ".join(placeholders)
|
|
1667
|
-
|
|
1668
|
-
def _filter_args(self, args: list[str]) -> list[str]:
|
|
1669
|
-
return [arg for arg in args if arg != "self"]
|
|
1670
|
-
|
|
1671
|
-
def _generate_placeholder_for_arg(self, arg: str) -> str:
|
|
1672
|
-
arg_lower = arg.lower()
|
|
1673
|
-
|
|
1674
|
-
if self._is_path_arg(arg_lower):
|
|
1675
|
-
return 'Path("test_file.txt")'
|
|
1676
|
-
elif self._is_url_arg(arg_lower):
|
|
1677
|
-
return '"https: //example.com"'
|
|
1678
|
-
elif self._is_email_arg(arg_lower):
|
|
1679
|
-
return '"test@example.com"'
|
|
1680
|
-
elif self._is_id_arg(arg_lower):
|
|
1681
|
-
return '"test-id-123"'
|
|
1682
|
-
elif self._is_name_arg(arg_lower):
|
|
1683
|
-
return '"test_name"'
|
|
1684
|
-
elif self._is_numeric_arg(arg_lower):
|
|
1685
|
-
return "10"
|
|
1686
|
-
elif self._is_boolean_arg(arg_lower):
|
|
1687
|
-
return "True"
|
|
1688
|
-
elif self._is_text_arg(arg_lower):
|
|
1689
|
-
return '"test data"'
|
|
1690
|
-
elif self._is_list_arg(arg_lower):
|
|
1691
|
-
return '["test1", "test2"]'
|
|
1692
|
-
elif self._is_dict_arg(arg_lower):
|
|
1693
|
-
return '{"key": "value"}'
|
|
1694
|
-
return '"test"'
|
|
1695
|
-
|
|
1696
|
-
def _is_path_arg(self, arg_lower: str) -> bool:
|
|
1697
|
-
return any(term in arg_lower for term in ("path", "file"))
|
|
1698
|
-
|
|
1699
|
-
def _is_url_arg(self, arg_lower: str) -> bool:
|
|
1700
|
-
return any(term in arg_lower for term in ("url", "uri"))
|
|
1701
|
-
|
|
1702
|
-
def _is_email_arg(self, arg_lower: str) -> bool:
|
|
1703
|
-
return any(term in arg_lower for term in ("email", "mail"))
|
|
1704
|
-
|
|
1705
|
-
def _is_id_arg(self, arg_lower: str) -> bool:
|
|
1706
|
-
return any(term in arg_lower for term in ("id", "uuid"))
|
|
1707
|
-
|
|
1708
|
-
def _is_name_arg(self, arg_lower: str) -> bool:
|
|
1709
|
-
return any(term in arg_lower for term in ("name", "title"))
|
|
1710
|
-
|
|
1711
|
-
def _is_numeric_arg(self, arg_lower: str) -> bool:
|
|
1712
|
-
return any(term in arg_lower for term in ("count", "size", "number", "num"))
|
|
1713
|
-
|
|
1714
|
-
def _is_boolean_arg(self, arg_lower: str) -> bool:
|
|
1715
|
-
return any(term in arg_lower for term in ("enable", "flag", "is_", "has_"))
|
|
1716
|
-
|
|
1717
|
-
def _is_text_arg(self, arg_lower: str) -> bool:
|
|
1718
|
-
return any(term in arg_lower for term in ("data", "content", "text"))
|
|
1719
|
-
|
|
1720
|
-
def _is_list_arg(self, arg_lower: str) -> bool:
|
|
1721
|
-
return any(term in arg_lower for term in ("list[t.Any]", "items"))
|
|
1722
|
-
|
|
1723
|
-
def _is_dict_arg(self, arg_lower: str) -> bool:
|
|
1724
|
-
return any(
|
|
1725
|
-
term in arg_lower for term in ("dict[str, t.Any]", "config", "options")
|
|
1726
|
-
)
|
|
1727
|
-
|
|
1728
|
-
def _generate_invalid_args(self, args: list[str]) -> str:
|
|
1729
|
-
filtered_args = [arg for arg in args if arg != "self"]
|
|
1730
|
-
if not filtered_args:
|
|
1731
|
-
return ""
|
|
1732
|
-
return ", ".join(["None"] * len(filtered_args))
|
|
1733
|
-
|
|
1734
|
-
def _generate_edge_case_args(self, args: list[str], case_type: str) -> str:
|
|
1735
|
-
filtered_args = self._filter_args(args)
|
|
1736
|
-
if not filtered_args:
|
|
1737
|
-
return ""
|
|
1738
|
-
|
|
1739
|
-
placeholders = self._generate_placeholders_by_case_type(
|
|
1740
|
-
filtered_args, case_type
|
|
1741
|
-
)
|
|
1742
|
-
return ", ".join(placeholders)
|
|
1743
|
-
|
|
1744
|
-
def _generate_placeholders_by_case_type(
|
|
1745
|
-
self, filtered_args: list[str], case_type: str
|
|
1746
|
-
) -> list[str]:
|
|
1747
|
-
if case_type == "empty":
|
|
1748
|
-
return self._generate_empty_case_placeholders(filtered_args)
|
|
1749
|
-
elif case_type == "boundary":
|
|
1750
|
-
return self._generate_boundary_case_placeholders(filtered_args)
|
|
1751
|
-
|
|
1752
|
-
return self._generate_extreme_case_placeholders(filtered_args)
|
|
1753
|
-
|
|
1754
|
-
def _generate_empty_case_placeholders(self, filtered_args: list[str]) -> list[str]:
|
|
1755
|
-
placeholders = []
|
|
1756
|
-
for arg in filtered_args:
|
|
1757
|
-
arg_lower = arg.lower()
|
|
1758
|
-
if any(term in arg_lower for term in ("str", "name", "text")):
|
|
1759
|
-
placeholders.append('""')
|
|
1760
|
-
elif any(term in arg_lower for term in ("list[t.Any]", "items")):
|
|
1761
|
-
placeholders.append("[]")
|
|
1762
|
-
elif any(term in arg_lower for term in ("dict[str, t.Any]", "config")):
|
|
1763
|
-
placeholders.append("{}")
|
|
1764
|
-
else:
|
|
1765
|
-
placeholders.append("None")
|
|
1766
|
-
return placeholders
|
|
1767
|
-
|
|
1768
|
-
def _generate_boundary_case_placeholders(
|
|
1769
|
-
self, filtered_args: list[str]
|
|
1770
|
-
) -> list[str]:
|
|
1771
|
-
placeholders = []
|
|
1772
|
-
for arg in filtered_args:
|
|
1773
|
-
arg_lower = arg.lower()
|
|
1774
|
-
if any(term in arg_lower for term in ("count", "size", "number")):
|
|
1775
|
-
placeholders.append("0")
|
|
1776
|
-
elif any(term in arg_lower for term in ("str", "name")):
|
|
1777
|
-
placeholders.append('"x" * 1000')
|
|
1778
|
-
else:
|
|
1779
|
-
placeholders.append("None")
|
|
1780
|
-
return placeholders
|
|
1781
|
-
|
|
1782
|
-
def _generate_extreme_case_placeholders(
|
|
1783
|
-
self, filtered_args: list[str]
|
|
1784
|
-
) -> list[str]:
|
|
1785
|
-
placeholders = []
|
|
1786
|
-
for arg in filtered_args:
|
|
1787
|
-
arg_lower = arg.lower()
|
|
1788
|
-
if any(term in arg_lower for term in ("count", "size", "number")):
|
|
1789
|
-
placeholders.append("-1")
|
|
1790
|
-
else:
|
|
1791
|
-
placeholders.append("None")
|
|
1792
|
-
return placeholders
|
|
1793
|
-
|
|
1794
|
-
async def _generate_enhanced_class_tests(
|
|
1795
|
-
self, classes: list[dict[str, Any]], module_category: str
|
|
1796
|
-
) -> str:
|
|
1797
|
-
if not classes:
|
|
1798
|
-
return ""
|
|
1799
|
-
|
|
1800
|
-
test_components = await self._generate_all_class_test_components(
|
|
1801
|
-
classes, module_category
|
|
1802
|
-
)
|
|
1803
|
-
return self._combine_class_test_elements(
|
|
1804
|
-
test_components["fixtures"], test_components["test_methods"]
|
|
1805
|
-
)
|
|
1806
|
-
|
|
1807
|
-
async def _generate_all_class_test_components(
|
|
1808
|
-
self, classes: list[dict[str, Any]], module_category: str
|
|
1809
|
-
) -> dict[str, list[str]]:
|
|
1810
|
-
fixtures = []
|
|
1811
|
-
test_methods = []
|
|
1812
|
-
|
|
1813
|
-
for cls in classes:
|
|
1814
|
-
class_components = await self._generate_single_class_test_components(
|
|
1815
|
-
cls, module_category
|
|
1816
|
-
)
|
|
1817
|
-
fixtures.extend(class_components["fixtures"])
|
|
1818
|
-
test_methods.extend(class_components["test_methods"])
|
|
1819
|
-
|
|
1820
|
-
return {"fixtures": fixtures, "test_methods": test_methods}
|
|
1821
|
-
|
|
1822
|
-
async def _generate_single_class_test_components(
|
|
1823
|
-
self, cls: dict[str, Any], module_category: str
|
|
1824
|
-
) -> dict[str, list[str]]:
|
|
1825
|
-
fixtures = []
|
|
1826
|
-
test_methods = []
|
|
1827
|
-
methods = cls.get("methods", [])
|
|
1828
|
-
|
|
1829
|
-
fixture = await self._generate_class_fixture(cls, module_category)
|
|
1830
|
-
if fixture:
|
|
1831
|
-
fixtures.append(fixture)
|
|
1832
|
-
|
|
1833
|
-
core_tests = await self._generate_core_class_tests(
|
|
1834
|
-
cls, methods, module_category
|
|
1835
|
-
)
|
|
1836
|
-
test_methods.extend(core_tests)
|
|
1837
|
-
|
|
1838
|
-
return {"fixtures": fixtures, "test_methods": test_methods}
|
|
1839
|
-
|
|
1840
|
-
async def _generate_core_class_tests(
|
|
1841
|
-
self, cls: dict[str, Any], methods: list[str], module_category: str
|
|
1842
|
-
) -> list[str]:
|
|
1843
|
-
test_methods = []
|
|
1844
|
-
|
|
1845
|
-
instantiation_test = await self._generate_class_instantiation_test(
|
|
1846
|
-
cls, module_category
|
|
1847
|
-
)
|
|
1848
|
-
test_methods.append(instantiation_test)
|
|
1849
|
-
|
|
1850
|
-
method_tests = await self._generate_method_tests(
|
|
1851
|
-
cls, methods[:5], module_category
|
|
1852
|
-
)
|
|
1853
|
-
test_methods.extend(method_tests)
|
|
1854
|
-
|
|
1855
|
-
property_test = await self._generate_class_property_test(cls, module_category)
|
|
1856
|
-
if property_test:
|
|
1857
|
-
test_methods.append(property_test)
|
|
1858
|
-
|
|
1859
|
-
return test_methods
|
|
1860
|
-
|
|
1861
|
-
async def _generate_method_tests(
|
|
1862
|
-
self, cls: dict[str, Any], methods: list[str], module_category: str
|
|
1863
|
-
) -> list[str]:
|
|
1864
|
-
method_tests = []
|
|
1865
|
-
for method in methods:
|
|
1866
|
-
method_test = await self._generate_class_method_test(
|
|
1867
|
-
cls, method, module_category
|
|
1868
|
-
)
|
|
1869
|
-
method_tests.append(method_test)
|
|
1870
|
-
return method_tests
|
|
1871
|
-
|
|
1872
|
-
def _combine_class_test_elements(
|
|
1873
|
-
self, fixtures: list[str], test_methods: list[str]
|
|
1874
|
-
) -> str:
|
|
1875
|
-
fixture_section = "\n".join(fixtures) if fixtures else ""
|
|
1876
|
-
test_section = "\n".join(test_methods)
|
|
1877
|
-
return fixture_section + test_section
|
|
1878
|
-
|
|
1879
|
-
async def _generate_class_fixture(
|
|
1880
|
-
self, cls: dict[str, Any], module_category: str
|
|
1881
|
-
) -> str:
|
|
1882
|
-
class_name = cls["name"]
|
|
1883
|
-
|
|
1884
|
-
if module_category in ("service", "manager", "core"):
|
|
1885
|
-
fixture_template = (
|
|
1886
|
-
" @pytest.fixture\n"
|
|
1887
|
-
f" def {class_name.lower()}_instance(self):\n"
|
|
1888
|
-
f' """Fixture to create {class_name} instance for testing."""\n'
|
|
1889
|
-
"\n"
|
|
1890
|
-
" try:\n"
|
|
1891
|
-
f" return {class_name}()\n"
|
|
1892
|
-
" except TypeError:\n"
|
|
1893
|
-
"\n"
|
|
1894
|
-
f" with patch.object({class_name}, '__init__', return_value=None):\n"
|
|
1895
|
-
f" instance = {class_name}.__new__({class_name})\n"
|
|
1896
|
-
" return instance"
|
|
1897
|
-
)
|
|
1898
|
-
|
|
1899
|
-
elif module_category == "agent":
|
|
1900
|
-
fixture_template = (
|
|
1901
|
-
" @pytest.fixture\n"
|
|
1902
|
-
f" def {class_name.lower()}_instance(self):\n"
|
|
1903
|
-
f' """Fixture to create {class_name} instance for testing."""\n'
|
|
1904
|
-
"\n"
|
|
1905
|
-
" mock_context = Mock(spec=AgentContext)\n"
|
|
1906
|
-
' mock_context.project_path = Path("/test/project")\n'
|
|
1907
|
-
' mock_context.get_file_content = Mock(return_value="# test content")\n'
|
|
1908
|
-
" mock_context.write_file_content = Mock(return_value=True)\n"
|
|
1909
|
-
"\n"
|
|
1910
|
-
" try:\n"
|
|
1911
|
-
f" return {class_name}(mock_context)\n"
|
|
1912
|
-
" except Exception:\n"
|
|
1913
|
-
' pytest.skip("Agent requires specific context configuration")'
|
|
1914
|
-
)
|
|
1915
|
-
|
|
1916
|
-
else:
|
|
1917
|
-
fixture_template = (
|
|
1918
|
-
" @pytest.fixture\n"
|
|
1919
|
-
f" def {class_name.lower()}_instance(self):\n"
|
|
1920
|
-
f' """Fixture to create {class_name} instance for testing."""\n'
|
|
1921
|
-
" try:\n"
|
|
1922
|
-
f" return {class_name}()\n"
|
|
1923
|
-
" except TypeError:\n"
|
|
1924
|
-
' pytest.skip("Class requires specific constructor arguments")'
|
|
1925
|
-
)
|
|
1926
|
-
|
|
1927
|
-
return fixture_template
|
|
1928
|
-
|
|
1929
|
-
@staticmethod
|
|
1930
|
-
async def _generate_class_instantiation_test(
|
|
1931
|
-
class_info: dict[str, Any], module_category: str
|
|
1932
|
-
) -> str:
|
|
1933
|
-
class_name = class_info["name"]
|
|
1934
|
-
|
|
1935
|
-
test_template = (
|
|
1936
|
-
f" def test_{class_name.lower()}_instantiation(self, {class_name.lower()}_instance):\n"
|
|
1937
|
-
f' """Test successful instantiation of {class_name}."""\n'
|
|
1938
|
-
f" assert {class_name.lower()}_instance is not None\n"
|
|
1939
|
-
f" assert isinstance({class_name.lower()}_instance, {class_name})\n"
|
|
1940
|
-
"\n"
|
|
1941
|
-
f" assert hasattr({class_name.lower()}_instance, '__class__')\n"
|
|
1942
|
-
f' assert {class_name.lower()}_instance.__class__.__name__ == "{class_name}"'
|
|
1943
|
-
)
|
|
1944
|
-
|
|
1945
|
-
return test_template
|
|
1946
|
-
|
|
1947
|
-
async def _generate_class_method_test(
|
|
1948
|
-
self, cls: dict[str, Any], method_name: str, module_category: str
|
|
1949
|
-
) -> str:
|
|
1950
|
-
class_name = cls["name"]
|
|
1951
|
-
|
|
1952
|
-
if self._is_special_agent_method(module_category, method_name):
|
|
1953
|
-
return self._generate_agent_method_test(class_name, method_name)
|
|
1954
|
-
if module_category in ("service", "manager"):
|
|
1955
|
-
return self._generate_async_method_test(class_name, method_name)
|
|
1956
|
-
return self._generate_default_method_test(class_name, method_name)
|
|
1957
|
-
|
|
1958
|
-
def _is_special_agent_method(self, module_category: str, method_name: str) -> bool:
|
|
1959
|
-
return module_category == "agent" and method_name in (
|
|
1960
|
-
"can_handle",
|
|
1961
|
-
"analyze_and_fix",
|
|
1962
|
-
)
|
|
1963
|
-
|
|
1964
|
-
def _generate_agent_method_test(self, class_name: str, method_name: str) -> str:
|
|
1965
|
-
if method_name == "can_handle":
|
|
1966
|
-
return self._generate_can_handle_test(class_name)
|
|
1967
|
-
elif method_name == "analyze_and_fix":
|
|
1968
|
-
return self._generate_analyze_and_fix_test(class_name)
|
|
1969
|
-
return self._generate_generic_agent_method_test(class_name, method_name)
|
|
1970
|
-
|
|
1971
|
-
def _generate_can_handle_test(self, class_name: str) -> str:
|
|
1972
|
-
return (
|
|
1973
|
-
" @pytest.mark.asyncio\n"
|
|
1974
|
-
f" async def test_{class_name.lower()}_can_handle(self, {class_name.lower()}_instance):\n"
|
|
1975
|
-
f' """Test {class_name}.can_handle method."""\n'
|
|
1976
|
-
"\n"
|
|
1977
|
-
" mock_issue = Mock(spec=Issue)\n"
|
|
1978
|
-
" mock_issue.type = IssueType.COVERAGE_IMPROVEMENT\n"
|
|
1979
|
-
' mock_issue.message = "test coverage issue"\n'
|
|
1980
|
-
' mock_issue.file_path = "/test/path.py"\n'
|
|
1981
|
-
"\n"
|
|
1982
|
-
f" result = await {class_name.lower()}_instance.can_handle(mock_issue)\n"
|
|
1983
|
-
" assert isinstance(result, (int, float))\n"
|
|
1984
|
-
" assert 0.0 <= result <= 1.0"
|
|
1985
|
-
)
|
|
1986
|
-
|
|
1987
|
-
def _generate_analyze_and_fix_test(self, class_name: str) -> str:
|
|
1988
|
-
return (
|
|
1989
|
-
" @pytest.mark.asyncio\n"
|
|
1990
|
-
f" async def test_{class_name.lower()}_analyze_and_fix(self, {class_name.lower()}_instance):\n"
|
|
1991
|
-
f' """Test {class_name}.analyze_and_fix method."""\n'
|
|
1992
|
-
"\n"
|
|
1993
|
-
" mock_issue = Mock(spec=Issue)\n"
|
|
1994
|
-
" mock_issue.type = IssueType.COVERAGE_IMPROVEMENT\n"
|
|
1995
|
-
' mock_issue.message = "test coverage issue"\n'
|
|
1996
|
-
' mock_issue.file_path = "/test/path.py"\n'
|
|
1997
|
-
"\n"
|
|
1998
|
-
f" result = await {class_name.lower()}_instance.analyze_and_fix(mock_issue)\n"
|
|
1999
|
-
" assert isinstance(result, FixResult)\n"
|
|
2000
|
-
" assert hasattr(result, 'success')\n"
|
|
2001
|
-
" assert hasattr(result, 'confidence')"
|
|
2002
|
-
)
|
|
2003
|
-
|
|
2004
|
-
def _generate_generic_agent_method_test(
|
|
2005
|
-
self, class_name: str, method_name: str
|
|
2006
|
-
) -> str:
|
|
2007
|
-
return (
|
|
2008
|
-
" @pytest.mark.asyncio\n"
|
|
2009
|
-
f" async def test_{class_name.lower()}_{method_name}(self, {class_name.lower()}_instance):\n"
|
|
2010
|
-
f' """Test {class_name}.{method_name} method."""\n'
|
|
2011
|
-
" try:\n"
|
|
2012
|
-
f" method = getattr({class_name.lower()}_instance, "
|
|
2013
|
-
f'"{method_name}", None)\n'
|
|
2014
|
-
f" assert method is not None, "
|
|
2015
|
-
f'f"Method {method_name} should exist"\n'
|
|
2016
|
-
"\n"
|
|
2017
|
-
" if asyncio.iscoroutinefunction(method):\n"
|
|
2018
|
-
" result = await method()\n"
|
|
2019
|
-
" else:\n"
|
|
2020
|
-
" result = method()\n"
|
|
2021
|
-
"\n"
|
|
2022
|
-
" assert result is not None or result is None\n"
|
|
2023
|
-
" except (TypeError, NotImplementedError):\n"
|
|
2024
|
-
f' pytest.skip(f"Method {method_name} requires specific arguments")\n'
|
|
2025
|
-
" except Exception as e:\n"
|
|
2026
|
-
f' pytest.fail(f"Unexpected error in {method_name}: {{e}}")'
|
|
2027
|
-
)
|
|
2028
|
-
|
|
2029
|
-
def _generate_async_method_test(self, class_name: str, method_name: str) -> str:
|
|
2030
|
-
return (
|
|
2031
|
-
" @pytest.mark.asyncio\n"
|
|
2032
|
-
f" async def test_{class_name.lower()}_{method_name}(self, {class_name.lower()}_instance):\n"
|
|
2033
|
-
f' """Test {class_name}.{method_name} method."""\n'
|
|
2034
|
-
" try:\n"
|
|
2035
|
-
f" method = getattr({class_name.lower()}_instance, "
|
|
2036
|
-
f'"{method_name}", None)\n'
|
|
2037
|
-
f" assert method is not None, "
|
|
2038
|
-
f'f"Method {method_name} should exist"\n'
|
|
2039
|
-
"\n"
|
|
2040
|
-
" if asyncio.iscoroutinefunction(method):\n"
|
|
2041
|
-
" result = await method()\n"
|
|
2042
|
-
" else:\n"
|
|
2043
|
-
" result = method()\n"
|
|
2044
|
-
"\n"
|
|
2045
|
-
" assert result is not None or result is None\n"
|
|
2046
|
-
"\n"
|
|
2047
|
-
" except (TypeError, NotImplementedError):\n"
|
|
2048
|
-
f' pytest.skip(f"Method {method_name} requires specific arguments or implementation")\n'
|
|
2049
|
-
" except Exception as e:\n"
|
|
2050
|
-
f' pytest.fail(f"Unexpected error in {method_name}: {{e}}")'
|
|
2051
|
-
)
|
|
2052
|
-
|
|
2053
|
-
def _generate_default_method_test(self, class_name: str, method_name: str) -> str:
|
|
2054
|
-
return (
|
|
2055
|
-
f" def test_{class_name.lower()}_{method_name}(self, {class_name.lower()}_instance):\n"
|
|
2056
|
-
f' """Test {class_name}.{method_name} method."""\n'
|
|
2057
|
-
" try:\n"
|
|
2058
|
-
f" method = getattr({class_name.lower()}_instance, "
|
|
2059
|
-
f'"{method_name}", None)\n'
|
|
2060
|
-
f" assert method is not None, "
|
|
2061
|
-
f'f"Method {method_name} should exist"\n'
|
|
2062
|
-
"\n"
|
|
2063
|
-
" result = method()\n"
|
|
2064
|
-
" assert result is not None or result is None\n"
|
|
2065
|
-
"\n"
|
|
2066
|
-
" except (TypeError, NotImplementedError):\n"
|
|
2067
|
-
f' pytest.skip(f"Method {method_name} requires specific arguments or implementation")\n'
|
|
2068
|
-
" except Exception as e:\n"
|
|
2069
|
-
f' pytest.fail(f"Unexpected error in {method_name}: {{e}}")'
|
|
2070
|
-
)
|
|
2071
|
-
|
|
2072
|
-
async def _generate_class_property_test(
|
|
2073
|
-
self, cls: dict[str, Any], module_category: str
|
|
2074
|
-
) -> str:
|
|
2075
|
-
class_name = cls["name"]
|
|
2076
|
-
|
|
2077
|
-
if module_category not in ("service", "manager", "agent"):
|
|
2078
|
-
return ""
|
|
2079
|
-
|
|
2080
|
-
test_template = (
|
|
2081
|
-
f" def test_{class_name.lower()}_properties(self, {class_name.lower()}_instance):\n"
|
|
2082
|
-
f' """Test {class_name} properties and attributes."""\n'
|
|
2083
|
-
"\n"
|
|
2084
|
-
f" assert hasattr({class_name.lower()}_instance, '__dict__') or \\\n"
|
|
2085
|
-
f" hasattr({class_name.lower()}_instance, '__slots__')\n"
|
|
2086
|
-
"\n"
|
|
2087
|
-
f" str_repr = str({class_name.lower()}_instance)\n"
|
|
2088
|
-
" assert len(str_repr) > 0\n"
|
|
2089
|
-
f' assert "{class_name}" in str_repr or "{class_name.lower()}" in \\\n'
|
|
2090
|
-
" str_repr.lower()"
|
|
2091
|
-
)
|
|
2092
|
-
|
|
2093
|
-
return test_template
|
|
2094
|
-
|
|
2095
|
-
async def _generate_integration_tests(
|
|
2096
|
-
self,
|
|
2097
|
-
module_file: Path,
|
|
2098
|
-
functions: list[dict[str, Any]],
|
|
2099
|
-
classes: list[dict[str, Any]],
|
|
2100
|
-
module_category: str,
|
|
2101
|
-
) -> str:
|
|
2102
|
-
if module_category not in ("service", "manager", "core"):
|
|
2103
|
-
return ""
|
|
2104
|
-
|
|
2105
|
-
if len(functions) < 3 and len(classes) < 2:
|
|
2106
|
-
return ""
|
|
2107
|
-
|
|
2108
|
-
integration_tests = (
|
|
2109
|
-
"\n\n"
|
|
2110
|
-
" @pytest.mark.integration\n"
|
|
2111
|
-
f" def test_{module_file.stem}_integration(self):\n"
|
|
2112
|
-
f' """Integration test for {module_file.stem} module functionality."""\n'
|
|
2113
|
-
"\n"
|
|
2114
|
-
' pytest.skip("Integration test needs manual implementation")\n'
|
|
2115
|
-
"\n"
|
|
2116
|
-
" @pytest.mark.integration\n"
|
|
2117
|
-
" @pytest.mark.asyncio\n"
|
|
2118
|
-
f" async def test_{module_file.stem}_async_integration(self):\n"
|
|
2119
|
-
f' """Async integration test for {module_file.stem} module."""\n'
|
|
2120
|
-
"\n"
|
|
2121
|
-
' pytest.skip("Async integration test needs manual implementation")\n'
|
|
2122
|
-
"\n"
|
|
2123
|
-
" @pytest.mark.performance\n"
|
|
2124
|
-
f" def test_{module_file.stem}_performance(self):\n"
|
|
2125
|
-
f' """Basic performance test for {module_file.stem} module."""\n'
|
|
2126
|
-
"\n"
|
|
2127
|
-
' pytest.skip("Performance test needs manual implementation")'
|
|
2128
|
-
)
|
|
2129
|
-
|
|
2130
|
-
return integration_tests
|
|
2131
|
-
|
|
2132
|
-
def _generate_default_args(self, args: list[str]) -> str:
|
|
2133
|
-
if not args or args == ["self"]:
|
|
2134
|
-
return ""
|
|
2135
|
-
|
|
2136
|
-
filtered_args = [arg for arg in args if arg != "self"]
|
|
2137
|
-
if not filtered_args:
|
|
2138
|
-
return ""
|
|
2139
|
-
|
|
2140
|
-
placeholders = []
|
|
2141
|
-
for arg in filtered_args:
|
|
2142
|
-
if "path" in arg.lower():
|
|
2143
|
-
placeholders.append('Path("test")')
|
|
2144
|
-
elif "str" in arg.lower() or "name" in arg.lower():
|
|
2145
|
-
placeholders.append('"test"')
|
|
2146
|
-
elif "int" in arg.lower() or "count" in arg.lower():
|
|
2147
|
-
placeholders.append("1")
|
|
2148
|
-
elif "bool" in arg.lower():
|
|
2149
|
-
placeholders.append("True")
|
|
2150
|
-
else:
|
|
2151
|
-
placeholders.append("None")
|
|
2152
|
-
|
|
2153
|
-
return ", ".join(placeholders)
|
|
2154
|
-
|
|
2155
569
|
|
|
2156
570
|
agent_registry.register(TestCreationAgent)
|