tapps-agents 3.5.41__py3-none-any.whl → 3.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tapps_agents/__init__.py +2 -2
- tapps_agents/agents/__init__.py +22 -22
- tapps_agents/agents/analyst/__init__.py +5 -5
- tapps_agents/agents/architect/__init__.py +5 -5
- tapps_agents/agents/architect/agent.py +1033 -1033
- tapps_agents/agents/architect/pattern_detector.py +75 -75
- tapps_agents/agents/cleanup/__init__.py +7 -7
- tapps_agents/agents/cleanup/agent.py +445 -445
- tapps_agents/agents/debugger/__init__.py +7 -7
- tapps_agents/agents/debugger/agent.py +310 -310
- tapps_agents/agents/debugger/error_analyzer.py +437 -437
- tapps_agents/agents/designer/__init__.py +5 -5
- tapps_agents/agents/designer/agent.py +786 -786
- tapps_agents/agents/designer/visual_designer.py +638 -638
- tapps_agents/agents/documenter/__init__.py +7 -7
- tapps_agents/agents/documenter/agent.py +531 -531
- tapps_agents/agents/documenter/doc_generator.py +472 -472
- tapps_agents/agents/documenter/doc_validator.py +393 -393
- tapps_agents/agents/documenter/framework_doc_updater.py +493 -493
- tapps_agents/agents/enhancer/__init__.py +7 -7
- tapps_agents/agents/evaluator/__init__.py +7 -7
- tapps_agents/agents/evaluator/agent.py +443 -443
- tapps_agents/agents/evaluator/priority_evaluator.py +641 -641
- tapps_agents/agents/evaluator/quality_analyzer.py +147 -147
- tapps_agents/agents/evaluator/report_generator.py +344 -344
- tapps_agents/agents/evaluator/usage_analyzer.py +192 -192
- tapps_agents/agents/evaluator/workflow_analyzer.py +189 -189
- tapps_agents/agents/implementer/__init__.py +7 -7
- tapps_agents/agents/implementer/agent.py +798 -798
- tapps_agents/agents/implementer/auto_fix.py +1119 -1119
- tapps_agents/agents/implementer/code_generator.py +73 -73
- tapps_agents/agents/improver/__init__.py +1 -1
- tapps_agents/agents/improver/agent.py +753 -753
- tapps_agents/agents/ops/__init__.py +1 -1
- tapps_agents/agents/ops/agent.py +619 -619
- tapps_agents/agents/ops/dependency_analyzer.py +600 -600
- tapps_agents/agents/orchestrator/__init__.py +5 -5
- tapps_agents/agents/orchestrator/agent.py +522 -522
- tapps_agents/agents/planner/__init__.py +7 -7
- tapps_agents/agents/planner/agent.py +1127 -1127
- tapps_agents/agents/reviewer/__init__.py +24 -24
- tapps_agents/agents/reviewer/agent.py +3513 -3513
- tapps_agents/agents/reviewer/aggregator.py +213 -213
- tapps_agents/agents/reviewer/batch_review.py +448 -448
- tapps_agents/agents/reviewer/cache.py +443 -443
- tapps_agents/agents/reviewer/context7_enhancer.py +630 -630
- tapps_agents/agents/reviewer/context_detector.py +203 -203
- tapps_agents/agents/reviewer/docker_compose_validator.py +158 -158
- tapps_agents/agents/reviewer/dockerfile_validator.py +176 -176
- tapps_agents/agents/reviewer/error_handling.py +126 -126
- tapps_agents/agents/reviewer/feedback_generator.py +490 -490
- tapps_agents/agents/reviewer/influxdb_validator.py +316 -316
- tapps_agents/agents/reviewer/issue_tracking.py +169 -169
- tapps_agents/agents/reviewer/library_detector.py +295 -295
- tapps_agents/agents/reviewer/library_patterns.py +268 -268
- tapps_agents/agents/reviewer/maintainability_scorer.py +593 -593
- tapps_agents/agents/reviewer/metric_strategies.py +276 -276
- tapps_agents/agents/reviewer/mqtt_validator.py +160 -160
- tapps_agents/agents/reviewer/output_enhancer.py +105 -105
- tapps_agents/agents/reviewer/pattern_detector.py +241 -241
- tapps_agents/agents/reviewer/performance_scorer.py +357 -357
- tapps_agents/agents/reviewer/phased_review.py +516 -516
- tapps_agents/agents/reviewer/progressive_review.py +435 -435
- tapps_agents/agents/reviewer/react_scorer.py +331 -331
- tapps_agents/agents/reviewer/score_constants.py +228 -228
- tapps_agents/agents/reviewer/score_validator.py +507 -507
- tapps_agents/agents/reviewer/scorer_registry.py +373 -373
- tapps_agents/agents/reviewer/scoring.py +1566 -1566
- tapps_agents/agents/reviewer/service_discovery.py +534 -534
- tapps_agents/agents/reviewer/tools/__init__.py +41 -41
- tapps_agents/agents/reviewer/tools/parallel_executor.py +581 -581
- tapps_agents/agents/reviewer/tools/ruff_grouping.py +250 -250
- tapps_agents/agents/reviewer/tools/scoped_mypy.py +284 -284
- tapps_agents/agents/reviewer/typescript_scorer.py +1142 -1142
- tapps_agents/agents/reviewer/validation.py +208 -208
- tapps_agents/agents/reviewer/websocket_validator.py +132 -132
- tapps_agents/agents/tester/__init__.py +7 -7
- tapps_agents/agents/tester/accessibility_auditor.py +309 -309
- tapps_agents/agents/tester/agent.py +1080 -1080
- tapps_agents/agents/tester/batch_generator.py +54 -54
- tapps_agents/agents/tester/context_learner.py +51 -51
- tapps_agents/agents/tester/coverage_analyzer.py +386 -386
- tapps_agents/agents/tester/coverage_test_generator.py +290 -290
- tapps_agents/agents/tester/debug_enhancer.py +238 -238
- tapps_agents/agents/tester/device_emulator.py +241 -241
- tapps_agents/agents/tester/integration_generator.py +62 -62
- tapps_agents/agents/tester/network_recorder.py +300 -300
- tapps_agents/agents/tester/performance_monitor.py +320 -320
- tapps_agents/agents/tester/test_fixer.py +316 -316
- tapps_agents/agents/tester/test_generator.py +632 -632
- tapps_agents/agents/tester/trace_manager.py +234 -234
- tapps_agents/agents/tester/visual_regression.py +291 -291
- tapps_agents/analysis/pattern_detector.py +36 -36
- tapps_agents/beads/hydration.py +213 -213
- tapps_agents/beads/parse.py +32 -32
- tapps_agents/beads/specs.py +206 -206
- tapps_agents/cli/__init__.py +9 -9
- tapps_agents/cli/__main__.py +8 -8
- tapps_agents/cli/base.py +478 -478
- tapps_agents/cli/command_classifier.py +72 -72
- tapps_agents/cli/commands/__init__.py +2 -2
- tapps_agents/cli/commands/analyst.py +173 -173
- tapps_agents/cli/commands/architect.py +109 -109
- tapps_agents/cli/commands/cleanup_agent.py +92 -92
- tapps_agents/cli/commands/common.py +126 -126
- tapps_agents/cli/commands/debugger.py +90 -90
- tapps_agents/cli/commands/designer.py +112 -112
- tapps_agents/cli/commands/documenter.py +136 -136
- tapps_agents/cli/commands/enhancer.py +110 -110
- tapps_agents/cli/commands/evaluator.py +255 -255
- tapps_agents/cli/commands/health.py +665 -665
- tapps_agents/cli/commands/implementer.py +301 -301
- tapps_agents/cli/commands/improver.py +91 -91
- tapps_agents/cli/commands/knowledge.py +111 -111
- tapps_agents/cli/commands/learning.py +172 -172
- tapps_agents/cli/commands/observability.py +283 -283
- tapps_agents/cli/commands/ops.py +135 -135
- tapps_agents/cli/commands/orchestrator.py +116 -116
- tapps_agents/cli/commands/planner.py +237 -237
- tapps_agents/cli/commands/reviewer.py +1872 -1872
- tapps_agents/cli/commands/status.py +285 -285
- tapps_agents/cli/commands/task.py +227 -227
- tapps_agents/cli/commands/tester.py +191 -191
- tapps_agents/cli/commands/top_level.py +3586 -3586
- tapps_agents/cli/feedback.py +936 -936
- tapps_agents/cli/formatters.py +608 -608
- tapps_agents/cli/help/__init__.py +7 -7
- tapps_agents/cli/help/static_help.py +425 -425
- tapps_agents/cli/network_detection.py +110 -110
- tapps_agents/cli/output_compactor.py +274 -274
- tapps_agents/cli/parsers/__init__.py +2 -2
- tapps_agents/cli/parsers/analyst.py +186 -186
- tapps_agents/cli/parsers/architect.py +167 -167
- tapps_agents/cli/parsers/cleanup_agent.py +228 -228
- tapps_agents/cli/parsers/debugger.py +116 -116
- tapps_agents/cli/parsers/designer.py +182 -182
- tapps_agents/cli/parsers/documenter.py +134 -134
- tapps_agents/cli/parsers/enhancer.py +113 -113
- tapps_agents/cli/parsers/evaluator.py +213 -213
- tapps_agents/cli/parsers/implementer.py +168 -168
- tapps_agents/cli/parsers/improver.py +132 -132
- tapps_agents/cli/parsers/ops.py +159 -159
- tapps_agents/cli/parsers/orchestrator.py +98 -98
- tapps_agents/cli/parsers/planner.py +145 -145
- tapps_agents/cli/parsers/reviewer.py +462 -462
- tapps_agents/cli/parsers/tester.py +124 -124
- tapps_agents/cli/progress_heartbeat.py +254 -254
- tapps_agents/cli/streaming_progress.py +336 -336
- tapps_agents/cli/utils/__init__.py +6 -6
- tapps_agents/cli/utils/agent_lifecycle.py +48 -48
- tapps_agents/cli/utils/error_formatter.py +82 -82
- tapps_agents/cli/utils/error_recovery.py +188 -188
- tapps_agents/cli/utils/output_handler.py +59 -59
- tapps_agents/cli/utils/prompt_enhancer.py +319 -319
- tapps_agents/cli/validators/__init__.py +9 -9
- tapps_agents/cli/validators/command_validator.py +81 -81
- tapps_agents/context7/__init__.py +112 -112
- tapps_agents/context7/agent_integration.py +869 -869
- tapps_agents/context7/analytics.py +382 -382
- tapps_agents/context7/analytics_dashboard.py +299 -299
- tapps_agents/context7/async_cache.py +681 -681
- tapps_agents/context7/backup_client.py +958 -958
- tapps_agents/context7/cache_locking.py +194 -194
- tapps_agents/context7/cache_metadata.py +214 -214
- tapps_agents/context7/cache_prewarm.py +488 -488
- tapps_agents/context7/cache_structure.py +168 -168
- tapps_agents/context7/cache_warming.py +604 -604
- tapps_agents/context7/circuit_breaker.py +376 -376
- tapps_agents/context7/cleanup.py +461 -461
- tapps_agents/context7/commands.py +858 -858
- tapps_agents/context7/credential_validation.py +276 -276
- tapps_agents/context7/cross_reference_resolver.py +168 -168
- tapps_agents/context7/cross_references.py +424 -424
- tapps_agents/context7/doc_manager.py +225 -225
- tapps_agents/context7/fuzzy_matcher.py +369 -369
- tapps_agents/context7/kb_cache.py +404 -404
- tapps_agents/context7/language_detector.py +219 -219
- tapps_agents/context7/library_detector.py +725 -725
- tapps_agents/context7/lookup.py +738 -738
- tapps_agents/context7/metadata.py +258 -258
- tapps_agents/context7/refresh_queue.py +300 -300
- tapps_agents/context7/security.py +373 -373
- tapps_agents/context7/staleness_policies.py +278 -278
- tapps_agents/context7/tiles_integration.py +47 -47
- tapps_agents/continuous_bug_fix/__init__.py +20 -20
- tapps_agents/continuous_bug_fix/bug_finder.py +306 -306
- tapps_agents/continuous_bug_fix/bug_fix_coordinator.py +177 -177
- tapps_agents/continuous_bug_fix/commit_manager.py +178 -178
- tapps_agents/continuous_bug_fix/continuous_bug_fixer.py +322 -322
- tapps_agents/continuous_bug_fix/proactive_bug_finder.py +285 -285
- tapps_agents/core/__init__.py +298 -298
- tapps_agents/core/adaptive_cache_config.py +432 -432
- tapps_agents/core/agent_base.py +647 -647
- tapps_agents/core/agent_cache.py +466 -466
- tapps_agents/core/agent_learning.py +1865 -1865
- tapps_agents/core/analytics_dashboard.py +563 -563
- tapps_agents/core/analytics_enhancements.py +597 -597
- tapps_agents/core/anonymization.py +274 -274
- tapps_agents/core/artifact_context_builder.py +293 -0
- tapps_agents/core/ast_parser.py +228 -228
- tapps_agents/core/async_file_ops.py +402 -402
- tapps_agents/core/best_practice_consultant.py +299 -299
- tapps_agents/core/brownfield_analyzer.py +299 -299
- tapps_agents/core/brownfield_review.py +541 -541
- tapps_agents/core/browser_controller.py +513 -513
- tapps_agents/core/capability_registry.py +418 -418
- tapps_agents/core/change_impact_analyzer.py +190 -190
- tapps_agents/core/checkpoint_manager.py +377 -377
- tapps_agents/core/code_generator.py +329 -329
- tapps_agents/core/code_validator.py +276 -276
- tapps_agents/core/command_registry.py +327 -327
- tapps_agents/core/config.py +33 -0
- tapps_agents/core/context_gathering/__init__.py +2 -2
- tapps_agents/core/context_gathering/repository_explorer.py +28 -28
- tapps_agents/core/context_intelligence/__init__.py +2 -2
- tapps_agents/core/context_intelligence/relevance_scorer.py +24 -24
- tapps_agents/core/context_intelligence/token_budget_manager.py +27 -27
- tapps_agents/core/context_manager.py +240 -240
- tapps_agents/core/cursor_feedback_monitor.py +146 -146
- tapps_agents/core/cursor_verification.py +290 -290
- tapps_agents/core/customization_loader.py +280 -280
- tapps_agents/core/customization_schema.py +260 -260
- tapps_agents/core/customization_template.py +238 -238
- tapps_agents/core/debug_logger.py +124 -124
- tapps_agents/core/design_validator.py +298 -298
- tapps_agents/core/diagram_generator.py +226 -226
- tapps_agents/core/docker_utils.py +232 -232
- tapps_agents/core/document_generator.py +617 -617
- tapps_agents/core/domain_detector.py +30 -30
- tapps_agents/core/error_envelope.py +454 -454
- tapps_agents/core/error_handler.py +270 -270
- tapps_agents/core/estimation_tracker.py +189 -189
- tapps_agents/core/eval_prompt_engine.py +116 -116
- tapps_agents/core/evaluation_base.py +119 -119
- tapps_agents/core/evaluation_models.py +320 -320
- tapps_agents/core/evaluation_orchestrator.py +225 -225
- tapps_agents/core/evaluators/__init__.py +7 -7
- tapps_agents/core/evaluators/architectural_evaluator.py +205 -205
- tapps_agents/core/evaluators/behavioral_evaluator.py +160 -160
- tapps_agents/core/evaluators/performance_profile_evaluator.py +160 -160
- tapps_agents/core/evaluators/security_posture_evaluator.py +148 -148
- tapps_agents/core/evaluators/spec_compliance_evaluator.py +181 -181
- tapps_agents/core/exceptions.py +107 -107
- tapps_agents/core/expert_config_generator.py +293 -293
- tapps_agents/core/export_schema.py +202 -202
- tapps_agents/core/external_feedback_models.py +102 -102
- tapps_agents/core/external_feedback_storage.py +213 -213
- tapps_agents/core/fallback_strategy.py +314 -314
- tapps_agents/core/feedback_analyzer.py +162 -162
- tapps_agents/core/feedback_collector.py +178 -178
- tapps_agents/core/git_operations.py +445 -445
- tapps_agents/core/hardware_profiler.py +151 -151
- tapps_agents/core/instructions.py +324 -324
- tapps_agents/core/io_guardrails.py +69 -69
- tapps_agents/core/issue_manifest.py +249 -249
- tapps_agents/core/issue_schema.py +139 -139
- tapps_agents/core/json_utils.py +128 -128
- tapps_agents/core/knowledge_graph.py +446 -446
- tapps_agents/core/language_detector.py +296 -296
- tapps_agents/core/learning_confidence.py +242 -242
- tapps_agents/core/learning_dashboard.py +246 -246
- tapps_agents/core/learning_decision.py +384 -384
- tapps_agents/core/learning_explainability.py +578 -578
- tapps_agents/core/learning_export.py +287 -287
- tapps_agents/core/learning_integration.py +228 -228
- tapps_agents/core/llm_behavior.py +232 -232
- tapps_agents/core/long_duration_support.py +786 -786
- tapps_agents/core/mcp_setup.py +106 -106
- tapps_agents/core/memory_integration.py +396 -396
- tapps_agents/core/meta_learning.py +666 -666
- tapps_agents/core/module_path_sanitizer.py +199 -199
- tapps_agents/core/multi_agent_orchestrator.py +382 -382
- tapps_agents/core/network_errors.py +125 -125
- tapps_agents/core/nfr_validator.py +336 -336
- tapps_agents/core/offline_mode.py +158 -158
- tapps_agents/core/output_contracts.py +300 -300
- tapps_agents/core/output_formatter.py +300 -300
- tapps_agents/core/path_normalizer.py +174 -174
- tapps_agents/core/path_validator.py +322 -322
- tapps_agents/core/pattern_library.py +250 -250
- tapps_agents/core/performance_benchmark.py +301 -301
- tapps_agents/core/performance_monitor.py +184 -184
- tapps_agents/core/playwright_mcp_controller.py +771 -771
- tapps_agents/core/policy_loader.py +135 -135
- tapps_agents/core/progress.py +166 -166
- tapps_agents/core/project_profile.py +354 -354
- tapps_agents/core/project_type_detector.py +454 -454
- tapps_agents/core/prompt_base.py +223 -223
- tapps_agents/core/prompt_learning/__init__.py +2 -2
- tapps_agents/core/prompt_learning/learning_loop.py +24 -24
- tapps_agents/core/prompt_learning/project_prompt_store.py +25 -25
- tapps_agents/core/prompt_learning/skills_prompt_analyzer.py +35 -35
- tapps_agents/core/prompt_optimization/__init__.py +6 -6
- tapps_agents/core/prompt_optimization/ab_tester.py +114 -114
- tapps_agents/core/prompt_optimization/correlation_analyzer.py +160 -160
- tapps_agents/core/prompt_optimization/progressive_refiner.py +129 -129
- tapps_agents/core/prompt_optimization/prompt_library.py +37 -37
- tapps_agents/core/requirements_evaluator.py +431 -431
- tapps_agents/core/resource_aware_executor.py +449 -449
- tapps_agents/core/resource_monitor.py +343 -343
- tapps_agents/core/resume_handler.py +298 -298
- tapps_agents/core/retry_handler.py +197 -197
- tapps_agents/core/review_checklists.py +479 -479
- tapps_agents/core/role_loader.py +201 -201
- tapps_agents/core/role_template_loader.py +201 -201
- tapps_agents/core/runtime_mode.py +60 -60
- tapps_agents/core/security_scanner.py +342 -342
- tapps_agents/core/skill_agent_registry.py +194 -194
- tapps_agents/core/skill_integration.py +208 -208
- tapps_agents/core/skill_loader.py +492 -492
- tapps_agents/core/skill_template.py +341 -341
- tapps_agents/core/skill_validator.py +478 -478
- tapps_agents/core/stack_analyzer.py +35 -35
- tapps_agents/core/startup.py +174 -174
- tapps_agents/core/storage_manager.py +397 -397
- tapps_agents/core/storage_models.py +166 -166
- tapps_agents/core/story_evaluator.py +410 -410
- tapps_agents/core/subprocess_utils.py +170 -170
- tapps_agents/core/task_duration.py +296 -296
- tapps_agents/core/task_memory.py +582 -582
- tapps_agents/core/task_state.py +226 -226
- tapps_agents/core/tech_stack_priorities.py +208 -208
- tapps_agents/core/temp_directory.py +194 -194
- tapps_agents/core/template_merger.py +600 -600
- tapps_agents/core/template_selector.py +280 -280
- tapps_agents/core/test_generator.py +286 -286
- tapps_agents/core/tiered_context.py +253 -253
- tapps_agents/core/token_monitor.py +345 -345
- tapps_agents/core/traceability.py +254 -254
- tapps_agents/core/trajectory_tracker.py +50 -50
- tapps_agents/core/unicode_safe.py +143 -143
- tapps_agents/core/unified_cache_config.py +170 -170
- tapps_agents/core/unified_state.py +324 -324
- tapps_agents/core/validate_cursor_setup.py +237 -237
- tapps_agents/core/validation_registry.py +136 -136
- tapps_agents/core/validators/__init__.py +4 -4
- tapps_agents/core/validators/python_validator.py +87 -87
- tapps_agents/core/verification_agent.py +90 -90
- tapps_agents/core/visual_feedback.py +644 -644
- tapps_agents/core/workflow_validator.py +197 -197
- tapps_agents/core/worktree.py +367 -367
- tapps_agents/docker/__init__.py +10 -10
- tapps_agents/docker/analyzer.py +186 -186
- tapps_agents/docker/debugger.py +229 -229
- tapps_agents/docker/error_patterns.py +216 -216
- tapps_agents/epic/__init__.py +22 -22
- tapps_agents/epic/beads_sync.py +115 -115
- tapps_agents/epic/markdown_sync.py +105 -105
- tapps_agents/epic/models.py +96 -96
- tapps_agents/experts/__init__.py +163 -163
- tapps_agents/experts/agent_integration.py +243 -243
- tapps_agents/experts/auto_generator.py +331 -331
- tapps_agents/experts/base_expert.py +536 -536
- tapps_agents/experts/builtin_registry.py +261 -261
- tapps_agents/experts/business_metrics.py +565 -565
- tapps_agents/experts/cache.py +266 -266
- tapps_agents/experts/confidence_breakdown.py +306 -306
- tapps_agents/experts/confidence_calculator.py +336 -336
- tapps_agents/experts/confidence_metrics.py +236 -236
- tapps_agents/experts/domain_config.py +311 -311
- tapps_agents/experts/domain_detector.py +550 -550
- tapps_agents/experts/domain_utils.py +84 -84
- tapps_agents/experts/expert_config.py +113 -113
- tapps_agents/experts/expert_engine.py +465 -465
- tapps_agents/experts/expert_registry.py +744 -744
- tapps_agents/experts/expert_synthesizer.py +70 -70
- tapps_agents/experts/governance.py +197 -197
- tapps_agents/experts/history_logger.py +312 -312
- tapps_agents/experts/knowledge/README.md +180 -180
- tapps_agents/experts/knowledge/accessibility/accessible-forms.md +331 -331
- tapps_agents/experts/knowledge/accessibility/aria-patterns.md +344 -344
- tapps_agents/experts/knowledge/accessibility/color-contrast.md +285 -285
- tapps_agents/experts/knowledge/accessibility/keyboard-navigation.md +332 -332
- tapps_agents/experts/knowledge/accessibility/screen-readers.md +282 -282
- tapps_agents/experts/knowledge/accessibility/semantic-html.md +355 -355
- tapps_agents/experts/knowledge/accessibility/testing-accessibility.md +369 -369
- tapps_agents/experts/knowledge/accessibility/wcag-2.1.md +296 -296
- tapps_agents/experts/knowledge/accessibility/wcag-2.2.md +211 -211
- tapps_agents/experts/knowledge/agent-learning/best-practices.md +715 -715
- tapps_agents/experts/knowledge/agent-learning/pattern-extraction.md +282 -282
- tapps_agents/experts/knowledge/agent-learning/prompt-optimization.md +320 -320
- tapps_agents/experts/knowledge/ai-frameworks/model-optimization.md +90 -90
- tapps_agents/experts/knowledge/ai-frameworks/openvino-patterns.md +260 -260
- tapps_agents/experts/knowledge/api-design-integration/api-gateway-patterns.md +309 -309
- tapps_agents/experts/knowledge/api-design-integration/api-security-patterns.md +521 -521
- tapps_agents/experts/knowledge/api-design-integration/api-versioning.md +421 -421
- tapps_agents/experts/knowledge/api-design-integration/async-protocol-patterns.md +61 -61
- tapps_agents/experts/knowledge/api-design-integration/contract-testing.md +221 -221
- tapps_agents/experts/knowledge/api-design-integration/external-api-integration.md +489 -489
- tapps_agents/experts/knowledge/api-design-integration/fastapi-patterns.md +360 -360
- tapps_agents/experts/knowledge/api-design-integration/fastapi-testing.md +262 -262
- tapps_agents/experts/knowledge/api-design-integration/graphql-patterns.md +582 -582
- tapps_agents/experts/knowledge/api-design-integration/grpc-best-practices.md +499 -499
- tapps_agents/experts/knowledge/api-design-integration/mqtt-patterns.md +455 -455
- tapps_agents/experts/knowledge/api-design-integration/rate-limiting.md +507 -507
- tapps_agents/experts/knowledge/api-design-integration/restful-api-design.md +618 -618
- tapps_agents/experts/knowledge/api-design-integration/websocket-patterns.md +480 -480
- tapps_agents/experts/knowledge/cloud-infrastructure/cloud-native-patterns.md +175 -175
- tapps_agents/experts/knowledge/cloud-infrastructure/container-health-checks.md +261 -261
- tapps_agents/experts/knowledge/cloud-infrastructure/containerization.md +222 -222
- tapps_agents/experts/knowledge/cloud-infrastructure/cost-optimization.md +122 -122
- tapps_agents/experts/knowledge/cloud-infrastructure/disaster-recovery.md +153 -153
- tapps_agents/experts/knowledge/cloud-infrastructure/dockerfile-patterns.md +285 -285
- tapps_agents/experts/knowledge/cloud-infrastructure/infrastructure-as-code.md +187 -187
- tapps_agents/experts/knowledge/cloud-infrastructure/kubernetes-patterns.md +253 -253
- tapps_agents/experts/knowledge/cloud-infrastructure/multi-cloud-strategies.md +155 -155
- tapps_agents/experts/knowledge/cloud-infrastructure/serverless-architecture.md +200 -200
- tapps_agents/experts/knowledge/code-quality-analysis/README.md +16 -16
- tapps_agents/experts/knowledge/code-quality-analysis/code-metrics.md +137 -137
- tapps_agents/experts/knowledge/code-quality-analysis/complexity-analysis.md +181 -181
- tapps_agents/experts/knowledge/code-quality-analysis/technical-debt-patterns.md +191 -191
- tapps_agents/experts/knowledge/data-privacy-compliance/anonymization.md +313 -313
- tapps_agents/experts/knowledge/data-privacy-compliance/ccpa.md +255 -255
- tapps_agents/experts/knowledge/data-privacy-compliance/consent-management.md +282 -282
- tapps_agents/experts/knowledge/data-privacy-compliance/data-minimization.md +275 -275
- tapps_agents/experts/knowledge/data-privacy-compliance/data-retention.md +297 -297
- tapps_agents/experts/knowledge/data-privacy-compliance/data-subject-rights.md +383 -383
- tapps_agents/experts/knowledge/data-privacy-compliance/encryption-privacy.md +285 -285
- tapps_agents/experts/knowledge/data-privacy-compliance/gdpr.md +344 -344
- tapps_agents/experts/knowledge/data-privacy-compliance/hipaa.md +385 -385
- tapps_agents/experts/knowledge/data-privacy-compliance/privacy-by-design.md +280 -280
- tapps_agents/experts/knowledge/database-data-management/acid-vs-cap.md +164 -164
- tapps_agents/experts/knowledge/database-data-management/backup-and-recovery.md +182 -182
- tapps_agents/experts/knowledge/database-data-management/data-modeling.md +172 -172
- tapps_agents/experts/knowledge/database-data-management/database-design.md +187 -187
- tapps_agents/experts/knowledge/database-data-management/flux-query-optimization.md +342 -342
- tapps_agents/experts/knowledge/database-data-management/influxdb-connection-patterns.md +432 -432
- tapps_agents/experts/knowledge/database-data-management/influxdb-patterns.md +442 -442
- tapps_agents/experts/knowledge/database-data-management/migration-strategies.md +216 -216
- tapps_agents/experts/knowledge/database-data-management/nosql-patterns.md +259 -259
- tapps_agents/experts/knowledge/database-data-management/scalability-patterns.md +184 -184
- tapps_agents/experts/knowledge/database-data-management/sql-optimization.md +175 -175
- tapps_agents/experts/knowledge/database-data-management/time-series-modeling.md +444 -444
- tapps_agents/experts/knowledge/development-workflow/README.md +16 -16
- tapps_agents/experts/knowledge/development-workflow/automation-best-practices.md +216 -216
- tapps_agents/experts/knowledge/development-workflow/build-strategies.md +198 -198
- tapps_agents/experts/knowledge/development-workflow/deployment-patterns.md +205 -205
- tapps_agents/experts/knowledge/development-workflow/git-workflows.md +205 -205
- tapps_agents/experts/knowledge/documentation-knowledge-management/README.md +16 -16
- tapps_agents/experts/knowledge/documentation-knowledge-management/api-documentation-patterns.md +231 -231
- tapps_agents/experts/knowledge/documentation-knowledge-management/documentation-standards.md +191 -191
- tapps_agents/experts/knowledge/documentation-knowledge-management/knowledge-management.md +171 -171
- tapps_agents/experts/knowledge/documentation-knowledge-management/technical-writing-guide.md +192 -192
- tapps_agents/experts/knowledge/observability-monitoring/alerting-patterns.md +461 -461
- tapps_agents/experts/knowledge/observability-monitoring/apm-tools.md +459 -459
- tapps_agents/experts/knowledge/observability-monitoring/distributed-tracing.md +367 -367
- tapps_agents/experts/knowledge/observability-monitoring/logging-strategies.md +478 -478
- tapps_agents/experts/knowledge/observability-monitoring/metrics-and-monitoring.md +510 -510
- tapps_agents/experts/knowledge/observability-monitoring/observability-best-practices.md +492 -492
- tapps_agents/experts/knowledge/observability-monitoring/open-telemetry.md +573 -573
- tapps_agents/experts/knowledge/observability-monitoring/slo-sli-sla.md +419 -419
- tapps_agents/experts/knowledge/performance/anti-patterns.md +284 -284
- tapps_agents/experts/knowledge/performance/api-performance.md +256 -256
- tapps_agents/experts/knowledge/performance/caching.md +327 -327
- tapps_agents/experts/knowledge/performance/database-performance.md +252 -252
- tapps_agents/experts/knowledge/performance/optimization-patterns.md +327 -327
- tapps_agents/experts/knowledge/performance/profiling.md +297 -297
- tapps_agents/experts/knowledge/performance/resource-management.md +293 -293
- tapps_agents/experts/knowledge/performance/scalability.md +306 -306
- tapps_agents/experts/knowledge/security/owasp-top10.md +209 -209
- tapps_agents/experts/knowledge/security/secure-coding-practices.md +207 -207
- tapps_agents/experts/knowledge/security/threat-modeling.md +220 -220
- tapps_agents/experts/knowledge/security/vulnerability-patterns.md +342 -342
- tapps_agents/experts/knowledge/software-architecture/docker-compose-patterns.md +314 -314
- tapps_agents/experts/knowledge/software-architecture/microservices-patterns.md +379 -379
- tapps_agents/experts/knowledge/software-architecture/service-communication.md +316 -316
- tapps_agents/experts/knowledge/testing/best-practices.md +310 -310
- tapps_agents/experts/knowledge/testing/coverage-analysis.md +293 -293
- tapps_agents/experts/knowledge/testing/mocking.md +256 -256
- tapps_agents/experts/knowledge/testing/test-automation.md +276 -276
- tapps_agents/experts/knowledge/testing/test-data.md +271 -271
- tapps_agents/experts/knowledge/testing/test-design-patterns.md +280 -280
- tapps_agents/experts/knowledge/testing/test-maintenance.md +236 -236
- tapps_agents/experts/knowledge/testing/test-strategies.md +311 -311
- tapps_agents/experts/knowledge/user-experience/information-architecture.md +325 -325
- tapps_agents/experts/knowledge/user-experience/interaction-design.md +363 -363
- tapps_agents/experts/knowledge/user-experience/prototyping.md +293 -293
- tapps_agents/experts/knowledge/user-experience/usability-heuristics.md +337 -337
- tapps_agents/experts/knowledge/user-experience/usability-testing.md +311 -311
- tapps_agents/experts/knowledge/user-experience/user-journeys.md +296 -296
- tapps_agents/experts/knowledge/user-experience/user-research.md +373 -373
- tapps_agents/experts/knowledge/user-experience/ux-principles.md +340 -340
- tapps_agents/experts/knowledge_freshness.py +321 -321
- tapps_agents/experts/knowledge_ingestion.py +438 -438
- tapps_agents/experts/knowledge_need_detector.py +93 -93
- tapps_agents/experts/knowledge_validator.py +382 -382
- tapps_agents/experts/observability.py +440 -440
- tapps_agents/experts/passive_notifier.py +238 -238
- tapps_agents/experts/proactive_orchestrator.py +32 -32
- tapps_agents/experts/rag_chunker.py +205 -205
- tapps_agents/experts/rag_embedder.py +152 -152
- tapps_agents/experts/rag_evaluation.py +299 -299
- tapps_agents/experts/rag_index.py +303 -303
- tapps_agents/experts/rag_metrics.py +293 -293
- tapps_agents/experts/rag_safety.py +263 -263
- tapps_agents/experts/report_generator.py +296 -296
- tapps_agents/experts/setup_wizard.py +441 -441
- tapps_agents/experts/simple_rag.py +431 -431
- tapps_agents/experts/vector_rag.py +354 -354
- tapps_agents/experts/weight_distributor.py +304 -304
- tapps_agents/health/__init__.py +24 -24
- tapps_agents/health/base.py +75 -75
- tapps_agents/health/checks/__init__.py +22 -22
- tapps_agents/health/checks/automation.py +127 -127
- tapps_agents/health/checks/context7_cache.py +210 -210
- tapps_agents/health/checks/environment.py +116 -116
- tapps_agents/health/checks/execution.py +170 -170
- tapps_agents/health/checks/knowledge_base.py +187 -187
- tapps_agents/health/checks/outcomes.py +324 -324
- tapps_agents/health/collector.py +280 -280
- tapps_agents/health/dashboard.py +137 -137
- tapps_agents/health/metrics.py +151 -151
- tapps_agents/health/orchestrator.py +271 -271
- tapps_agents/health/registry.py +166 -166
- tapps_agents/hooks/__init__.py +33 -33
- tapps_agents/hooks/config.py +140 -140
- tapps_agents/hooks/events.py +135 -135
- tapps_agents/hooks/executor.py +128 -128
- tapps_agents/hooks/manager.py +143 -143
- tapps_agents/integration/__init__.py +8 -8
- tapps_agents/integration/service_integrator.py +121 -121
- tapps_agents/integrations/__init__.py +10 -10
- tapps_agents/integrations/clawdbot.py +525 -525
- tapps_agents/integrations/memory_bridge.py +356 -356
- tapps_agents/mcp/__init__.py +18 -18
- tapps_agents/mcp/gateway.py +112 -112
- tapps_agents/mcp/servers/__init__.py +13 -13
- tapps_agents/mcp/servers/analysis.py +204 -204
- tapps_agents/mcp/servers/context7.py +198 -198
- tapps_agents/mcp/servers/filesystem.py +218 -218
- tapps_agents/mcp/servers/git.py +201 -201
- tapps_agents/mcp/tool_registry.py +115 -115
- tapps_agents/quality/__init__.py +54 -54
- tapps_agents/quality/coverage_analyzer.py +379 -379
- tapps_agents/quality/enforcement.py +82 -82
- tapps_agents/quality/gates/__init__.py +37 -37
- tapps_agents/quality/gates/approval_gate.py +255 -255
- tapps_agents/quality/gates/base.py +84 -84
- tapps_agents/quality/gates/exceptions.py +43 -43
- tapps_agents/quality/gates/policy_gate.py +195 -195
- tapps_agents/quality/gates/registry.py +239 -239
- tapps_agents/quality/gates/security_gate.py +156 -156
- tapps_agents/quality/quality_gates.py +369 -369
- tapps_agents/quality/secret_scanner.py +335 -335
- tapps_agents/session/__init__.py +19 -19
- tapps_agents/session/manager.py +256 -256
- tapps_agents/simple_mode/__init__.py +66 -66
- tapps_agents/simple_mode/agent_contracts.py +357 -357
- tapps_agents/simple_mode/beads_hooks.py +151 -151
- tapps_agents/simple_mode/code_snippet_handler.py +382 -382
- tapps_agents/simple_mode/documentation_manager.py +395 -395
- tapps_agents/simple_mode/documentation_reader.py +187 -187
- tapps_agents/simple_mode/file_inference.py +292 -292
- tapps_agents/simple_mode/framework_change_detector.py +268 -268
- tapps_agents/simple_mode/intent_parser.py +510 -510
- tapps_agents/simple_mode/learning_progression.py +358 -358
- tapps_agents/simple_mode/nl_handler.py +700 -700
- tapps_agents/simple_mode/onboarding.py +253 -253
- tapps_agents/simple_mode/orchestrators/__init__.py +38 -38
- tapps_agents/simple_mode/orchestrators/base.py +185 -185
- tapps_agents/simple_mode/orchestrators/breakdown_orchestrator.py +49 -49
- tapps_agents/simple_mode/orchestrators/brownfield_orchestrator.py +135 -135
- tapps_agents/simple_mode/orchestrators/build_orchestrator.py +2700 -2667
- tapps_agents/simple_mode/orchestrators/deliverable_checklist.py +349 -349
- tapps_agents/simple_mode/orchestrators/enhance_orchestrator.py +53 -53
- tapps_agents/simple_mode/orchestrators/epic_orchestrator.py +122 -122
- tapps_agents/simple_mode/orchestrators/explore_orchestrator.py +184 -184
- tapps_agents/simple_mode/orchestrators/fix_orchestrator.py +723 -723
- tapps_agents/simple_mode/orchestrators/plan_analysis_orchestrator.py +206 -206
- tapps_agents/simple_mode/orchestrators/pr_orchestrator.py +237 -237
- tapps_agents/simple_mode/orchestrators/refactor_orchestrator.py +222 -222
- tapps_agents/simple_mode/orchestrators/requirements_tracer.py +262 -262
- tapps_agents/simple_mode/orchestrators/resume_orchestrator.py +210 -210
- tapps_agents/simple_mode/orchestrators/review_orchestrator.py +161 -161
- tapps_agents/simple_mode/orchestrators/test_orchestrator.py +82 -82
- tapps_agents/simple_mode/output_aggregator.py +340 -340
- tapps_agents/simple_mode/result_formatters.py +598 -598
- tapps_agents/simple_mode/step_dependencies.py +382 -382
- tapps_agents/simple_mode/step_results.py +276 -276
- tapps_agents/simple_mode/streaming.py +388 -388
- tapps_agents/simple_mode/variations.py +129 -129
- tapps_agents/simple_mode/visual_feedback.py +238 -238
- tapps_agents/simple_mode/zero_config.py +274 -274
- tapps_agents/suggestions/__init__.py +8 -8
- tapps_agents/suggestions/inline_suggester.py +52 -52
- tapps_agents/templates/__init__.py +8 -8
- tapps_agents/templates/microservice_generator.py +274 -274
- tapps_agents/utils/env_validator.py +291 -291
- tapps_agents/workflow/__init__.py +171 -171
- tapps_agents/workflow/acceptance_verifier.py +132 -132
- tapps_agents/workflow/agent_handlers/__init__.py +41 -41
- tapps_agents/workflow/agent_handlers/analyst_handler.py +75 -75
- tapps_agents/workflow/agent_handlers/architect_handler.py +107 -107
- tapps_agents/workflow/agent_handlers/base.py +84 -84
- tapps_agents/workflow/agent_handlers/debugger_handler.py +100 -100
- tapps_agents/workflow/agent_handlers/designer_handler.py +110 -110
- tapps_agents/workflow/agent_handlers/documenter_handler.py +94 -94
- tapps_agents/workflow/agent_handlers/implementer_handler.py +235 -235
- tapps_agents/workflow/agent_handlers/ops_handler.py +62 -62
- tapps_agents/workflow/agent_handlers/orchestrator_handler.py +43 -43
- tapps_agents/workflow/agent_handlers/planner_handler.py +98 -98
- tapps_agents/workflow/agent_handlers/registry.py +119 -119
- tapps_agents/workflow/agent_handlers/reviewer_handler.py +119 -119
- tapps_agents/workflow/agent_handlers/tester_handler.py +69 -69
- tapps_agents/workflow/analytics_accessor.py +337 -337
- tapps_agents/workflow/analytics_alerts.py +416 -416
- tapps_agents/workflow/analytics_dashboard_cursor.py +281 -281
- tapps_agents/workflow/analytics_dual_write.py +103 -103
- tapps_agents/workflow/analytics_integration.py +119 -119
- tapps_agents/workflow/analytics_query_parser.py +278 -278
- tapps_agents/workflow/analytics_visualizer.py +259 -259
- tapps_agents/workflow/artifact_helper.py +204 -204
- tapps_agents/workflow/audit_logger.py +263 -263
- tapps_agents/workflow/auto_execution_config.py +340 -340
- tapps_agents/workflow/auto_progression.py +586 -586
- tapps_agents/workflow/branch_cleanup.py +349 -349
- tapps_agents/workflow/checkpoint.py +256 -256
- tapps_agents/workflow/checkpoint_manager.py +178 -178
- tapps_agents/workflow/code_artifact.py +179 -179
- tapps_agents/workflow/common_enums.py +96 -96
- tapps_agents/workflow/confirmation_handler.py +130 -130
- tapps_agents/workflow/context_analyzer.py +222 -222
- tapps_agents/workflow/context_artifact.py +230 -230
- tapps_agents/workflow/cursor_chat.py +94 -94
- tapps_agents/workflow/cursor_executor.py +2337 -2337
- tapps_agents/workflow/cursor_skill_helper.py +516 -516
- tapps_agents/workflow/dependency_resolver.py +244 -244
- tapps_agents/workflow/design_artifact.py +156 -156
- tapps_agents/workflow/detector.py +751 -751
- tapps_agents/workflow/direct_execution_fallback.py +301 -301
- tapps_agents/workflow/docs_artifact.py +168 -168
- tapps_agents/workflow/enforcer.py +389 -389
- tapps_agents/workflow/enhancement_artifact.py +142 -142
- tapps_agents/workflow/error_recovery.py +806 -806
- tapps_agents/workflow/event_bus.py +183 -183
- tapps_agents/workflow/event_log.py +612 -612
- tapps_agents/workflow/events.py +63 -63
- tapps_agents/workflow/exceptions.py +43 -43
- tapps_agents/workflow/execution_graph.py +498 -498
- tapps_agents/workflow/execution_plan.py +126 -126
- tapps_agents/workflow/file_utils.py +186 -186
- tapps_agents/workflow/gate_evaluator.py +182 -182
- tapps_agents/workflow/gate_integration.py +200 -200
- tapps_agents/workflow/graph_visualizer.py +130 -130
- tapps_agents/workflow/health_checker.py +206 -206
- tapps_agents/workflow/logging_helper.py +243 -243
- tapps_agents/workflow/manifest.py +582 -582
- tapps_agents/workflow/marker_writer.py +250 -250
- tapps_agents/workflow/message_formatter.py +188 -188
- tapps_agents/workflow/messaging.py +325 -325
- tapps_agents/workflow/metadata_models.py +91 -91
- tapps_agents/workflow/metrics_integration.py +226 -226
- tapps_agents/workflow/migration_utils.py +116 -116
- tapps_agents/workflow/models.py +148 -148
- tapps_agents/workflow/nlp_config.py +198 -198
- tapps_agents/workflow/nlp_error_handler.py +207 -207
- tapps_agents/workflow/nlp_executor.py +163 -163
- tapps_agents/workflow/nlp_parser.py +528 -528
- tapps_agents/workflow/observability_dashboard.py +451 -451
- tapps_agents/workflow/observer.py +170 -170
- tapps_agents/workflow/ops_artifact.py +257 -257
- tapps_agents/workflow/output_passing.py +214 -214
- tapps_agents/workflow/parallel_executor.py +463 -463
- tapps_agents/workflow/planning_artifact.py +179 -179
- tapps_agents/workflow/preset_loader.py +285 -285
- tapps_agents/workflow/preset_recommender.py +270 -270
- tapps_agents/workflow/progress_logger.py +145 -145
- tapps_agents/workflow/progress_manager.py +303 -303
- tapps_agents/workflow/progress_monitor.py +186 -186
- tapps_agents/workflow/progress_updates.py +423 -423
- tapps_agents/workflow/quality_artifact.py +158 -158
- tapps_agents/workflow/quality_loopback.py +101 -101
- tapps_agents/workflow/recommender.py +387 -387
- tapps_agents/workflow/remediation_loop.py +166 -166
- tapps_agents/workflow/result_aggregator.py +300 -300
- tapps_agents/workflow/review_artifact.py +185 -185
- tapps_agents/workflow/schema_validator.py +522 -522
- tapps_agents/workflow/session_handoff.py +178 -178
- tapps_agents/workflow/skill_invoker.py +648 -648
- tapps_agents/workflow/state_manager.py +756 -756
- tapps_agents/workflow/state_persistence_config.py +331 -331
- tapps_agents/workflow/status_monitor.py +449 -449
- tapps_agents/workflow/step_checkpoint.py +314 -314
- tapps_agents/workflow/step_details.py +201 -201
- tapps_agents/workflow/story_models.py +147 -147
- tapps_agents/workflow/streaming.py +416 -416
- tapps_agents/workflow/suggestion_engine.py +552 -552
- tapps_agents/workflow/testing_artifact.py +186 -186
- tapps_agents/workflow/timeline.py +158 -158
- tapps_agents/workflow/token_integration.py +209 -209
- tapps_agents/workflow/validation.py +217 -217
- tapps_agents/workflow/visual_feedback.py +391 -391
- tapps_agents/workflow/workflow_chain.py +95 -95
- tapps_agents/workflow/workflow_summary.py +219 -219
- tapps_agents/workflow/worktree_manager.py +724 -724
- {tapps_agents-3.5.41.dist-info → tapps_agents-3.6.0.dist-info}/METADATA +672 -672
- tapps_agents-3.6.0.dist-info/RECORD +758 -0
- {tapps_agents-3.5.41.dist-info → tapps_agents-3.6.0.dist-info}/licenses/LICENSE +22 -22
- tapps_agents/health/checks/outcomes.backup_20260204_064058.py +0 -324
- tapps_agents/health/checks/outcomes.backup_20260204_064256.py +0 -324
- tapps_agents/health/checks/outcomes.backup_20260204_064600.py +0 -324
- tapps_agents-3.5.41.dist-info/RECORD +0 -760
- {tapps_agents-3.5.41.dist-info → tapps_agents-3.6.0.dist-info}/WHEEL +0 -0
- {tapps_agents-3.5.41.dist-info → tapps_agents-3.6.0.dist-info}/entry_points.txt +0 -0
- {tapps_agents-3.5.41.dist-info → tapps_agents-3.6.0.dist-info}/top_level.txt +0 -0
|
@@ -1,1872 +1,1872 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Reviewer agent command handlers
|
|
3
|
-
|
|
4
|
-
Performance-optimized with:
|
|
5
|
-
- Result caching for 90%+ speedup on unchanged files
|
|
6
|
-
- Streaming progress for batch operations
|
|
7
|
-
- Async I/O for better concurrency
|
|
8
|
-
"""
|
|
9
|
-
import asyncio
|
|
10
|
-
import sys
|
|
11
|
-
import time
|
|
12
|
-
from pathlib import Path
|
|
13
|
-
from typing import Any
|
|
14
|
-
|
|
15
|
-
from ...agents.reviewer.agent import ReviewerAgent
|
|
16
|
-
from ...agents.reviewer.cache import get_reviewer_cache, ReviewerResultCache
|
|
17
|
-
from ..base import normalize_command, run_async_command
|
|
18
|
-
from ..feedback import get_feedback, ProgressTracker
|
|
19
|
-
from .common import check_result_error, format_json_output
|
|
20
|
-
from ..formatters import format_json, format_markdown, format_html
|
|
21
|
-
|
|
22
|
-
# Use cache version from the cache module for consistency
|
|
23
|
-
REVIEWER_CACHE_VERSION = ReviewerResultCache.CACHE_VERSION
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
def _infer_output_format(output_format: str, output_file: str | None) -> str:
|
|
27
|
-
"""Infer output format from output file extension, otherwise keep explicit format."""
|
|
28
|
-
if not output_file:
|
|
29
|
-
return output_format
|
|
30
|
-
|
|
31
|
-
suffix = Path(output_file).suffix.lower()
|
|
32
|
-
if suffix == ".html":
|
|
33
|
-
return "html"
|
|
34
|
-
if suffix in {".md", ".markdown"}:
|
|
35
|
-
return "markdown"
|
|
36
|
-
if suffix == ".json":
|
|
37
|
-
return "json"
|
|
38
|
-
if suffix in {".txt", ".log"}:
|
|
39
|
-
return "text"
|
|
40
|
-
return output_format
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
def _write_output(output_file: str, content: str) -> None:
|
|
44
|
-
"""Write output content to a file (UTF-8), creating parent directories."""
|
|
45
|
-
output_path = Path(output_file)
|
|
46
|
-
output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
47
|
-
output_path.write_text(content, encoding="utf-8")
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
def _format_text_review_result(result: dict[str, Any]) -> str:
|
|
51
|
-
"""Create a human-readable review summary string."""
|
|
52
|
-
lines: list[str] = []
|
|
53
|
-
file_path = result.get("file", "unknown")
|
|
54
|
-
lines.append(f"Results for: {file_path}")
|
|
55
|
-
|
|
56
|
-
scoring = result.get("scoring") or {}
|
|
57
|
-
if scoring:
|
|
58
|
-
lines.append("")
|
|
59
|
-
lines.append(f"Score: {scoring.get('overall_score', 0.0):.1f}/100")
|
|
60
|
-
lines.append(f" Complexity: {scoring.get('complexity_score', 0.0):.1f}/10")
|
|
61
|
-
lines.append(f" Security: {scoring.get('security_score', 0.0):.1f}/10")
|
|
62
|
-
lines.append(f" Maintainability: {scoring.get('maintainability_score', 0.0):.1f}/10")
|
|
63
|
-
|
|
64
|
-
# P1 Improvement: Show linting and type checking scores with issue counts
|
|
65
|
-
linting_score = scoring.get('linting_score', 0.0)
|
|
66
|
-
linting_count = scoring.get('linting_issue_count', 0)
|
|
67
|
-
type_score = scoring.get('type_checking_score', 0.0)
|
|
68
|
-
type_count = scoring.get('type_issue_count', 0)
|
|
69
|
-
|
|
70
|
-
linting_suffix = f" ({linting_count} issues)" if linting_count > 0 else ""
|
|
71
|
-
type_suffix = f" ({type_count} issues)" if type_count > 0 else ""
|
|
72
|
-
|
|
73
|
-
lines.append(f" Linting: {linting_score:.1f}/10{linting_suffix}")
|
|
74
|
-
lines.append(f" Type Checking: {type_score:.1f}/10{type_suffix}")
|
|
75
|
-
|
|
76
|
-
if "threshold" in result:
|
|
77
|
-
lines.append(f" Threshold: {result.get('threshold')}")
|
|
78
|
-
if "passed" in result:
|
|
79
|
-
lines.append(f" Status: {'Passed' if result.get('passed') else 'Failed'}")
|
|
80
|
-
|
|
81
|
-
# P1 Improvement: Show actual linting issues
|
|
82
|
-
linting_issues = scoring.get('linting_issues', [])
|
|
83
|
-
if linting_issues:
|
|
84
|
-
lines.append("")
|
|
85
|
-
lines.append(f"Linting Issues ({len(linting_issues)}):")
|
|
86
|
-
for issue in linting_issues[:10]: # Show top 10
|
|
87
|
-
code = issue.get('code', '???')
|
|
88
|
-
msg = issue.get('message', 'Unknown issue')
|
|
89
|
-
line = issue.get('line', 0)
|
|
90
|
-
lines.append(f" Line {line}: [{code}] {msg}")
|
|
91
|
-
if len(linting_issues) > 10:
|
|
92
|
-
lines.append(f" ... and {len(linting_issues) - 10} more")
|
|
93
|
-
|
|
94
|
-
# P1 Improvement: Show actual type checking issues
|
|
95
|
-
type_issues = scoring.get('type_issues', [])
|
|
96
|
-
if type_issues:
|
|
97
|
-
lines.append("")
|
|
98
|
-
lines.append(f"Type Issues ({len(type_issues)}):")
|
|
99
|
-
for issue in type_issues[:10]: # Show top 10
|
|
100
|
-
msg = issue.get('message', 'Unknown issue')
|
|
101
|
-
line = issue.get('line', 0)
|
|
102
|
-
error_code = issue.get('error_code', '')
|
|
103
|
-
code_suffix = f" [{error_code}]" if error_code else ""
|
|
104
|
-
lines.append(f" Line {line}: {msg}{code_suffix}")
|
|
105
|
-
if len(type_issues) > 10:
|
|
106
|
-
lines.append(f" ... and {len(type_issues) - 10} more")
|
|
107
|
-
|
|
108
|
-
feedback = result.get("feedback") or {}
|
|
109
|
-
|
|
110
|
-
# Handle feedback structure: could be instruction object or parsed feedback
|
|
111
|
-
feedback_text = None
|
|
112
|
-
feedback_summary = None
|
|
113
|
-
|
|
114
|
-
# Check if feedback is an instruction object (Cursor Skills format)
|
|
115
|
-
if isinstance(feedback, dict):
|
|
116
|
-
if "instruction" in feedback:
|
|
117
|
-
# Extract prompt from instruction as fallback
|
|
118
|
-
instruction = feedback.get("instruction", {})
|
|
119
|
-
feedback_text = instruction.get("prompt", "")
|
|
120
|
-
# Try to get actual feedback if it was executed
|
|
121
|
-
if "summary" in feedback:
|
|
122
|
-
feedback_summary = feedback.get("summary")
|
|
123
|
-
elif "feedback_text" in feedback:
|
|
124
|
-
feedback_text = feedback.get("feedback_text")
|
|
125
|
-
elif "summary" in feedback:
|
|
126
|
-
feedback_summary = feedback.get("summary")
|
|
127
|
-
elif "feedback_text" in feedback:
|
|
128
|
-
feedback_text = feedback.get("feedback_text")
|
|
129
|
-
|
|
130
|
-
# Parse feedback text if available
|
|
131
|
-
if feedback_text and not feedback_summary:
|
|
132
|
-
from ...agents.reviewer.feedback_generator import FeedbackGenerator
|
|
133
|
-
parsed = FeedbackGenerator.parse_feedback_text(feedback_text)
|
|
134
|
-
feedback_summary = parsed.get("summary") or feedback_text[:500]
|
|
135
|
-
|
|
136
|
-
# Display structured feedback with priorities
|
|
137
|
-
if parsed.get("security_concerns") or parsed.get("critical_issues") or parsed.get("improvements"):
|
|
138
|
-
lines.append("")
|
|
139
|
-
lines.append("Feedback:")
|
|
140
|
-
if feedback_summary:
|
|
141
|
-
lines.append(feedback_summary)
|
|
142
|
-
lines.append("")
|
|
143
|
-
|
|
144
|
-
# Security concerns (highest priority)
|
|
145
|
-
if parsed.get("security_concerns"):
|
|
146
|
-
lines.append("🔒 Security Concerns:")
|
|
147
|
-
for concern in parsed["security_concerns"][:5]: # Top 5
|
|
148
|
-
lines.append(f" • {concern}")
|
|
149
|
-
lines.append("")
|
|
150
|
-
|
|
151
|
-
# Critical issues
|
|
152
|
-
if parsed.get("critical_issues"):
|
|
153
|
-
lines.append("⚠️ Critical Issues:")
|
|
154
|
-
for issue in parsed["critical_issues"][:5]: # Top 5
|
|
155
|
-
lines.append(f" • {issue}")
|
|
156
|
-
lines.append("")
|
|
157
|
-
|
|
158
|
-
# Improvements
|
|
159
|
-
if parsed.get("improvements"):
|
|
160
|
-
lines.append("💡 Improvements:")
|
|
161
|
-
for improvement in parsed["improvements"][:5]: # Top 5
|
|
162
|
-
lines.append(f" • {improvement}")
|
|
163
|
-
lines.append("")
|
|
164
|
-
|
|
165
|
-
# Style suggestions (only if no other feedback)
|
|
166
|
-
if not (parsed.get("security_concerns") or parsed.get("critical_issues") or parsed.get("improvements")):
|
|
167
|
-
if parsed.get("style_suggestions"):
|
|
168
|
-
lines.append("📝 Style Suggestions:")
|
|
169
|
-
for suggestion in parsed["style_suggestions"][:5]:
|
|
170
|
-
lines.append(f" • {suggestion}")
|
|
171
|
-
lines.append("")
|
|
172
|
-
else:
|
|
173
|
-
# Fallback: just show summary
|
|
174
|
-
if feedback_summary:
|
|
175
|
-
lines.append("")
|
|
176
|
-
lines.append("Feedback:")
|
|
177
|
-
lines.append(feedback_summary)
|
|
178
|
-
elif feedback_summary:
|
|
179
|
-
# Direct summary available
|
|
180
|
-
lines.append("")
|
|
181
|
-
lines.append("Feedback:")
|
|
182
|
-
lines.append(str(feedback_summary))
|
|
183
|
-
|
|
184
|
-
# Surface quality gate signals if present
|
|
185
|
-
if result.get("quality_gate_blocked"):
|
|
186
|
-
lines.append("")
|
|
187
|
-
lines.append("Quality Gate: BLOCKED")
|
|
188
|
-
|
|
189
|
-
return "\n".join(lines) + "\n"
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
def _format_text_batch_summary(result: dict[str, Any], title: str) -> str:
|
|
193
|
-
"""Create a human-readable batch summary string."""
|
|
194
|
-
lines: list[str] = []
|
|
195
|
-
lines.append(f"{title} Results")
|
|
196
|
-
lines.append("")
|
|
197
|
-
lines.append(f" Total files: {result.get('total', 0)}")
|
|
198
|
-
lines.append(f" Successful: {result.get('successful', 0)}")
|
|
199
|
-
lines.append(f" Failed: {result.get('failed', 0)}")
|
|
200
|
-
|
|
201
|
-
errors = result.get("errors") or []
|
|
202
|
-
if errors:
|
|
203
|
-
lines.append("")
|
|
204
|
-
lines.append("Errors:")
|
|
205
|
-
for err in errors[:25]:
|
|
206
|
-
f = err.get("file", "unknown")
|
|
207
|
-
msg = err.get("error", "unknown error")
|
|
208
|
-
lines.append(f" {f}: {msg}")
|
|
209
|
-
if len(errors) > 25:
|
|
210
|
-
lines.append(f" ... and {len(errors) - 25} more")
|
|
211
|
-
|
|
212
|
-
return "\n".join(lines) + "\n"
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
async def review_command(
|
|
216
|
-
file_path: str | None = None,
|
|
217
|
-
files: list[str] | None = None,
|
|
218
|
-
pattern: str | None = None,
|
|
219
|
-
output_format: str = "json",
|
|
220
|
-
max_workers: int = 4,
|
|
221
|
-
output_file: str | None = None,
|
|
222
|
-
fail_under: float | None = None,
|
|
223
|
-
verbose_output: bool = False,
|
|
224
|
-
):
|
|
225
|
-
"""
|
|
226
|
-
Review code file(s) (supports both *review and review commands).
|
|
227
|
-
|
|
228
|
-
Supports single file (backward compatible) or batch processing.
|
|
229
|
-
"""
|
|
230
|
-
from ..command_classifier import CommandClassifier, CommandNetworkRequirement
|
|
231
|
-
from ..network_detection import NetworkDetector
|
|
232
|
-
from ...core.network_errors import NetworkRequiredError, NetworkOptionalError
|
|
233
|
-
from ..base import handle_network_error
|
|
234
|
-
|
|
235
|
-
feedback = get_feedback()
|
|
236
|
-
output_format = _infer_output_format(output_format, output_file)
|
|
237
|
-
feedback.format_type = output_format
|
|
238
|
-
|
|
239
|
-
# Check network requirement
|
|
240
|
-
requirement = CommandClassifier.get_network_requirement("reviewer", "review")
|
|
241
|
-
offline_mode = False
|
|
242
|
-
|
|
243
|
-
if requirement == CommandNetworkRequirement.OFFLINE:
|
|
244
|
-
offline_mode = True
|
|
245
|
-
elif requirement == CommandNetworkRequirement.OPTIONAL:
|
|
246
|
-
# Try offline first if network unavailable
|
|
247
|
-
if not NetworkDetector.is_network_available():
|
|
248
|
-
offline_mode = True
|
|
249
|
-
feedback.info("Network unavailable, continuing in offline mode with reduced functionality")
|
|
250
|
-
else:
|
|
251
|
-
# Network required - check availability
|
|
252
|
-
if not NetworkDetector.is_network_available():
|
|
253
|
-
try:
|
|
254
|
-
raise NetworkRequiredError(
|
|
255
|
-
operation_name="reviewer review",
|
|
256
|
-
message="Network is required for this command"
|
|
257
|
-
)
|
|
258
|
-
except NetworkRequiredError as e:
|
|
259
|
-
handle_network_error(e, format_type=output_format)
|
|
260
|
-
return
|
|
261
|
-
|
|
262
|
-
# Handle backward compatibility: single file argument
|
|
263
|
-
if file_path and not files and not pattern:
|
|
264
|
-
files = [file_path]
|
|
265
|
-
|
|
266
|
-
# Normalize file paths before processing (fixes Windows absolute path issues)
|
|
267
|
-
files = _normalize_file_paths(files)
|
|
268
|
-
|
|
269
|
-
# Resolve file list
|
|
270
|
-
try:
|
|
271
|
-
resolved_files = _resolve_file_list(files, pattern)
|
|
272
|
-
except ValueError as e:
|
|
273
|
-
feedback.error(
|
|
274
|
-
str(e),
|
|
275
|
-
error_code="no_files_found",
|
|
276
|
-
context={"files": files, "pattern": pattern},
|
|
277
|
-
remediation="Specify files as arguments or use --pattern with a glob pattern",
|
|
278
|
-
exit_code=1,
|
|
279
|
-
)
|
|
280
|
-
return
|
|
281
|
-
|
|
282
|
-
# Validate files exist
|
|
283
|
-
missing_files = [f for f in resolved_files if not f.exists()]
|
|
284
|
-
if missing_files:
|
|
285
|
-
feedback.error(
|
|
286
|
-
f"Files not found: {', '.join(str(f) for f in missing_files)}",
|
|
287
|
-
error_code="file_not_found",
|
|
288
|
-
context={"missing_files": [str(f) for f in missing_files]},
|
|
289
|
-
remediation="Check that the files exist and paths are correct",
|
|
290
|
-
exit_code=1,
|
|
291
|
-
)
|
|
292
|
-
return
|
|
293
|
-
|
|
294
|
-
feedback.start_operation("Review")
|
|
295
|
-
if len(resolved_files) == 1:
|
|
296
|
-
feedback.info(f"Reviewing {resolved_files[0]}...")
|
|
297
|
-
else:
|
|
298
|
-
feedback.info(f"Reviewing {len(resolved_files)} files (max {max_workers} concurrent)...")
|
|
299
|
-
|
|
300
|
-
reviewer = ReviewerAgent()
|
|
301
|
-
cache = get_reviewer_cache()
|
|
302
|
-
|
|
303
|
-
try:
|
|
304
|
-
# Activate agent (load configs, etc.)
|
|
305
|
-
if feedback.verbosity.value == "verbose":
|
|
306
|
-
feedback.info("Initializing ReviewerAgent...")
|
|
307
|
-
await reviewer.activate(offline_mode=offline_mode)
|
|
308
|
-
|
|
309
|
-
# Single file - use existing flow for backward compatibility
|
|
310
|
-
if len(resolved_files) == 1:
|
|
311
|
-
file_path_obj = resolved_files[0]
|
|
312
|
-
|
|
313
|
-
# Execute review command (with caching)
|
|
314
|
-
if feedback.verbosity.value == "verbose":
|
|
315
|
-
feedback.info("Running code analysis...")
|
|
316
|
-
|
|
317
|
-
# Check cache first
|
|
318
|
-
cached_result = await cache.get_cached_result(
|
|
319
|
-
file_path_obj, "review", REVIEWER_CACHE_VERSION
|
|
320
|
-
)
|
|
321
|
-
if cached_result is not None:
|
|
322
|
-
result = cached_result
|
|
323
|
-
feedback.info("Using cached result (file unchanged)")
|
|
324
|
-
else:
|
|
325
|
-
result = await reviewer.run("review", file=str(file_path_obj))
|
|
326
|
-
check_result_error(result)
|
|
327
|
-
# Cache the result
|
|
328
|
-
await cache.save_result(
|
|
329
|
-
file_path_obj, "review", REVIEWER_CACHE_VERSION, result
|
|
330
|
-
)
|
|
331
|
-
|
|
332
|
-
feedback.clear_progress()
|
|
333
|
-
|
|
334
|
-
# Output handling (stdout vs file)
|
|
335
|
-
if output_file:
|
|
336
|
-
if output_format == "json":
|
|
337
|
-
output_content = format_json(result)
|
|
338
|
-
elif output_format == "markdown":
|
|
339
|
-
output_content = format_markdown(result)
|
|
340
|
-
elif output_format == "html":
|
|
341
|
-
output_content = format_html(result, title="Code Review")
|
|
342
|
-
else:
|
|
343
|
-
output_content = _format_text_review_result(result)
|
|
344
|
-
_write_output(output_file, output_content)
|
|
345
|
-
feedback.success(f"Results written to {output_file}")
|
|
346
|
-
else:
|
|
347
|
-
if output_format == "json":
|
|
348
|
-
feedback.output_result(result, message="Review completed successfully", warnings=None, compact=not verbose_output)
|
|
349
|
-
elif output_format in ("markdown", "html"):
|
|
350
|
-
# Print raw content to stdout
|
|
351
|
-
output_content = (
|
|
352
|
-
format_markdown(result)
|
|
353
|
-
if output_format == "markdown"
|
|
354
|
-
else format_html(result, title="Code Review")
|
|
355
|
-
)
|
|
356
|
-
print(output_content)
|
|
357
|
-
else:
|
|
358
|
-
feedback.success("Review completed")
|
|
359
|
-
print(_format_text_review_result(result))
|
|
360
|
-
|
|
361
|
-
# CI-style failure handling
|
|
362
|
-
if fail_under is not None:
|
|
363
|
-
scoring = result.get("scoring") or {}
|
|
364
|
-
overall = float(scoring.get("overall_score", 0.0))
|
|
365
|
-
if overall < fail_under:
|
|
366
|
-
sys.exit(1)
|
|
367
|
-
elif result.get("passed") is False:
|
|
368
|
-
# If the agent evaluated a threshold and failed, return non-zero (useful in CI)
|
|
369
|
-
sys.exit(1)
|
|
370
|
-
else:
|
|
371
|
-
# Batch processing
|
|
372
|
-
result = await _process_file_batch(reviewer, resolved_files, "review", max_workers)
|
|
373
|
-
feedback.clear_progress()
|
|
374
|
-
|
|
375
|
-
if output_file:
|
|
376
|
-
if output_format == "json":
|
|
377
|
-
output_content = format_json(result)
|
|
378
|
-
elif output_format == "markdown":
|
|
379
|
-
output_content = format_markdown(result.get("files", []))
|
|
380
|
-
elif output_format == "html":
|
|
381
|
-
output_content = format_html(result.get("files", []), title="Batch Code Review")
|
|
382
|
-
else:
|
|
383
|
-
output_content = _format_text_batch_summary(result, title="Batch Review")
|
|
384
|
-
_write_output(output_file, output_content)
|
|
385
|
-
feedback.success(f"Results written to {output_file}")
|
|
386
|
-
else:
|
|
387
|
-
if output_format == "json":
|
|
388
|
-
feedback.output_result(
|
|
389
|
-
result,
|
|
390
|
-
message=f"Review completed: {result['successful']}/{result['total']} files successful",
|
|
391
|
-
compact=not verbose_output,
|
|
392
|
-
)
|
|
393
|
-
elif output_format in ("markdown", "html"):
|
|
394
|
-
output_content = (
|
|
395
|
-
format_markdown(result.get("files", []))
|
|
396
|
-
if output_format == "markdown"
|
|
397
|
-
else format_html(result.get("files", []), title="Batch Code Review")
|
|
398
|
-
)
|
|
399
|
-
print(output_content)
|
|
400
|
-
else:
|
|
401
|
-
feedback.success(f"Review completed: {result['successful']}/{result['total']} files successful")
|
|
402
|
-
print(_format_text_batch_summary(result, title="Batch Review"))
|
|
403
|
-
|
|
404
|
-
# Fail if any file failed, or if fail_under is set and any score < threshold
|
|
405
|
-
if fail_under is not None:
|
|
406
|
-
for file_result in result.get("files", []):
|
|
407
|
-
scoring = file_result.get("scoring") or {}
|
|
408
|
-
overall = float(scoring.get("overall_score", 0.0))
|
|
409
|
-
if overall < fail_under:
|
|
410
|
-
sys.exit(1)
|
|
411
|
-
if int(result.get("failed", 0)) > 0:
|
|
412
|
-
sys.exit(1)
|
|
413
|
-
finally:
|
|
414
|
-
await reviewer.close()
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
def _normalize_file_paths(files: list[str] | None) -> list[str]:
|
|
418
|
-
"""
|
|
419
|
-
Normalize file paths to handle Windows absolute paths.
|
|
420
|
-
|
|
421
|
-
Args:
|
|
422
|
-
files: List of file paths (can be None or empty)
|
|
423
|
-
|
|
424
|
-
Returns:
|
|
425
|
-
List of normalized file paths
|
|
426
|
-
"""
|
|
427
|
-
if not files:
|
|
428
|
-
return []
|
|
429
|
-
|
|
430
|
-
from ...core.path_normalizer import normalize_for_cli, normalize_project_root
|
|
431
|
-
project_root = normalize_project_root(Path.cwd())
|
|
432
|
-
normalized_files = []
|
|
433
|
-
|
|
434
|
-
for f in files:
|
|
435
|
-
try:
|
|
436
|
-
# Normalize Windows absolute paths to relative paths
|
|
437
|
-
normalized = normalize_for_cli(f, project_root)
|
|
438
|
-
normalized_files.append(normalized)
|
|
439
|
-
except Exception:
|
|
440
|
-
# If normalization fails, use original path
|
|
441
|
-
normalized_files.append(f)
|
|
442
|
-
|
|
443
|
-
return normalized_files
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
def _resolve_file_list(files: list[str] | None, pattern: str | None) -> list[Path]:
|
|
447
|
-
"""
|
|
448
|
-
Resolve file list from files and/or pattern.
|
|
449
|
-
|
|
450
|
-
Args:
|
|
451
|
-
files: List of file paths (can be None or empty)
|
|
452
|
-
pattern: Glob pattern (can be None)
|
|
453
|
-
|
|
454
|
-
Returns:
|
|
455
|
-
List of resolved Path objects
|
|
456
|
-
|
|
457
|
-
Raises:
|
|
458
|
-
ValueError: If no files found
|
|
459
|
-
"""
|
|
460
|
-
resolved_files: list[Path] = []
|
|
461
|
-
|
|
462
|
-
exclude_dir_names = {
|
|
463
|
-
".git",
|
|
464
|
-
"__pycache__",
|
|
465
|
-
".pytest_cache",
|
|
466
|
-
".mypy_cache",
|
|
467
|
-
".ruff_cache",
|
|
468
|
-
".venv",
|
|
469
|
-
"venv",
|
|
470
|
-
"env",
|
|
471
|
-
"node_modules",
|
|
472
|
-
"dist",
|
|
473
|
-
"build",
|
|
474
|
-
"htmlcov",
|
|
475
|
-
"reports",
|
|
476
|
-
".tapps-agents",
|
|
477
|
-
"tapps_agents.egg-info",
|
|
478
|
-
".egg-info",
|
|
479
|
-
}
|
|
480
|
-
allowed_suffixes = {
|
|
481
|
-
".py",
|
|
482
|
-
".ts",
|
|
483
|
-
".tsx",
|
|
484
|
-
".js",
|
|
485
|
-
".jsx",
|
|
486
|
-
".java",
|
|
487
|
-
".go",
|
|
488
|
-
".rs",
|
|
489
|
-
".yaml",
|
|
490
|
-
".yml",
|
|
491
|
-
".json",
|
|
492
|
-
".md",
|
|
493
|
-
".dockerfile",
|
|
494
|
-
}
|
|
495
|
-
|
|
496
|
-
def _is_excluded(path: Path) -> bool:
|
|
497
|
-
return any(part in exclude_dir_names for part in path.parts)
|
|
498
|
-
|
|
499
|
-
def _discover_from_dir(root: Path, max_files: int = 200) -> list[Path]:
|
|
500
|
-
"""
|
|
501
|
-
Discover code files from a directory.
|
|
502
|
-
|
|
503
|
-
Args:
|
|
504
|
-
root: Directory to search
|
|
505
|
-
max_files: Maximum number of files to discover (default: 200)
|
|
506
|
-
|
|
507
|
-
Returns:
|
|
508
|
-
List of discovered file paths
|
|
509
|
-
"""
|
|
510
|
-
discovered: list[Path] = []
|
|
511
|
-
for pat in ["*.py", "*.ts", "*.tsx", "*.js", "*.jsx", "*.java", "*.go", "*.rs", "*.yaml", "*.yml"]:
|
|
512
|
-
if len(discovered) >= max_files:
|
|
513
|
-
break
|
|
514
|
-
for p in root.rglob(pat):
|
|
515
|
-
if len(discovered) >= max_files:
|
|
516
|
-
break
|
|
517
|
-
if _is_excluded(p):
|
|
518
|
-
continue
|
|
519
|
-
if p.is_file() and p.suffix.lower() in allowed_suffixes:
|
|
520
|
-
discovered.append(p)
|
|
521
|
-
return discovered
|
|
522
|
-
|
|
523
|
-
# Handle glob pattern
|
|
524
|
-
if pattern:
|
|
525
|
-
cwd = Path.cwd()
|
|
526
|
-
matched = []
|
|
527
|
-
for p in cwd.glob(pattern):
|
|
528
|
-
if len(matched) >= 200: # Limit pattern matches to prevent too many files
|
|
529
|
-
break
|
|
530
|
-
if p.is_file() and not _is_excluded(p):
|
|
531
|
-
matched.append(p)
|
|
532
|
-
resolved_files.extend(matched)
|
|
533
|
-
|
|
534
|
-
# Handle explicit file list
|
|
535
|
-
if files:
|
|
536
|
-
for file_path in files:
|
|
537
|
-
# Support passing glob patterns directly as positional args (e.g. "src/**/*.py")
|
|
538
|
-
if any(ch in file_path for ch in ["*", "?", "["]):
|
|
539
|
-
matched_count = 0
|
|
540
|
-
for p in Path.cwd().glob(file_path):
|
|
541
|
-
if matched_count >= 200: # Limit glob matches to prevent too many files
|
|
542
|
-
break
|
|
543
|
-
if p.is_file() and not _is_excluded(p):
|
|
544
|
-
resolved_files.append(p)
|
|
545
|
-
matched_count += 1
|
|
546
|
-
continue
|
|
547
|
-
|
|
548
|
-
path = Path(file_path)
|
|
549
|
-
if not path.is_absolute():
|
|
550
|
-
# Use resolve() to properly normalize path and eliminate directory duplication
|
|
551
|
-
path = (Path.cwd() / path).resolve()
|
|
552
|
-
if path.exists() and path.is_dir():
|
|
553
|
-
resolved_files.extend(_discover_from_dir(path))
|
|
554
|
-
elif path.exists():
|
|
555
|
-
if path.is_file() and (path.suffix.lower() in allowed_suffixes or path.suffix == ""):
|
|
556
|
-
resolved_files.append(path)
|
|
557
|
-
else:
|
|
558
|
-
# Try relative to cwd (with proper resolution to eliminate duplication)
|
|
559
|
-
cwd_path = (Path.cwd() / file_path).resolve()
|
|
560
|
-
if cwd_path.exists() and cwd_path.is_dir():
|
|
561
|
-
resolved_files.extend(_discover_from_dir(cwd_path))
|
|
562
|
-
elif cwd_path.exists():
|
|
563
|
-
if cwd_path.is_file() and (cwd_path.suffix.lower() in allowed_suffixes or cwd_path.suffix == ""):
|
|
564
|
-
resolved_files.append(cwd_path)
|
|
565
|
-
else:
|
|
566
|
-
# Keep it anyway - let the agent handle the error
|
|
567
|
-
resolved_files.append(path)
|
|
568
|
-
|
|
569
|
-
# Remove duplicates while preserving order
|
|
570
|
-
seen = set()
|
|
571
|
-
unique_files = []
|
|
572
|
-
for f in resolved_files:
|
|
573
|
-
if f not in seen:
|
|
574
|
-
seen.add(f)
|
|
575
|
-
unique_files.append(f)
|
|
576
|
-
|
|
577
|
-
if not unique_files:
|
|
578
|
-
raise ValueError("No files found. Specify files or use --pattern to match files.")
|
|
579
|
-
|
|
580
|
-
# Warn if too many files discovered
|
|
581
|
-
if len(unique_files) > 200:
|
|
582
|
-
from ..feedback import get_feedback
|
|
583
|
-
feedback = get_feedback()
|
|
584
|
-
feedback.warning(
|
|
585
|
-
f"Large number of files discovered ({len(unique_files)}). Processing may take a while. "
|
|
586
|
-
f"Consider using --pattern to target specific files or directories. "
|
|
587
|
-
f"Only the first 200 files will be processed."
|
|
588
|
-
)
|
|
589
|
-
# Limit to 200 files to prevent connection errors
|
|
590
|
-
unique_files = unique_files[:200]
|
|
591
|
-
|
|
592
|
-
return unique_files
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
class CircuitBreaker:
|
|
596
|
-
"""Circuit breaker to prevent cascading failures."""
|
|
597
|
-
|
|
598
|
-
def __init__(self, failure_threshold: int = 5, reset_timeout: float = 60.0):
|
|
599
|
-
self.failure_threshold = failure_threshold
|
|
600
|
-
self.reset_timeout = reset_timeout
|
|
601
|
-
self.failure_count = 0
|
|
602
|
-
self.last_failure_time: float | None = None
|
|
603
|
-
self.is_open = False
|
|
604
|
-
|
|
605
|
-
def record_success(self) -> None:
|
|
606
|
-
"""Record successful operation."""
|
|
607
|
-
self.failure_count = 0
|
|
608
|
-
self.is_open = False
|
|
609
|
-
|
|
610
|
-
def record_failure(self) -> None:
|
|
611
|
-
"""Record failure and check if circuit should open."""
|
|
612
|
-
self.failure_count += 1
|
|
613
|
-
if self.failure_count >= self.failure_threshold:
|
|
614
|
-
self.is_open = True
|
|
615
|
-
self.last_failure_time = time.time()
|
|
616
|
-
|
|
617
|
-
def should_allow(self) -> bool:
|
|
618
|
-
"""Check if operation should be allowed."""
|
|
619
|
-
if not self.is_open:
|
|
620
|
-
return True
|
|
621
|
-
|
|
622
|
-
# Check if reset timeout has passed
|
|
623
|
-
if self.last_failure_time:
|
|
624
|
-
elapsed = time.time() - self.last_failure_time
|
|
625
|
-
if elapsed >= self.reset_timeout:
|
|
626
|
-
self.is_open = False
|
|
627
|
-
self.failure_count = 0
|
|
628
|
-
return True
|
|
629
|
-
|
|
630
|
-
return False
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
def is_retryable_error(error: Exception) -> bool:
|
|
634
|
-
"""
|
|
635
|
-
Check if error is retryable (connection-related).
|
|
636
|
-
|
|
637
|
-
Implements error taxonomy to distinguish between:
|
|
638
|
-
- Retryable: Transient issues (network timeouts, connection errors)
|
|
639
|
-
- Non-retryable: Permanent issues (file not found, invalid input)
|
|
640
|
-
|
|
641
|
-
Based on best practices for AI agent error handling.
|
|
642
|
-
|
|
643
|
-
Args:
|
|
644
|
-
error: Exception to check
|
|
645
|
-
|
|
646
|
-
Returns:
|
|
647
|
-
True if error is retryable (connection-related)
|
|
648
|
-
"""
|
|
649
|
-
retryable_types = (
|
|
650
|
-
ConnectionError,
|
|
651
|
-
TimeoutError,
|
|
652
|
-
OSError,
|
|
653
|
-
)
|
|
654
|
-
|
|
655
|
-
# Check for requests library errors
|
|
656
|
-
try:
|
|
657
|
-
import requests
|
|
658
|
-
retryable_types = retryable_types + (
|
|
659
|
-
requests.exceptions.RequestException,
|
|
660
|
-
requests.exceptions.ConnectionError,
|
|
661
|
-
requests.exceptions.Timeout,
|
|
662
|
-
requests.exceptions.ReadTimeout,
|
|
663
|
-
requests.exceptions.ConnectTimeout,
|
|
664
|
-
)
|
|
665
|
-
except ImportError:
|
|
666
|
-
pass
|
|
667
|
-
|
|
668
|
-
# Check for aiohttp errors (common in async Python)
|
|
669
|
-
try:
|
|
670
|
-
import aiohttp
|
|
671
|
-
retryable_types = retryable_types + (
|
|
672
|
-
aiohttp.ClientError,
|
|
673
|
-
aiohttp.ClientConnectionError,
|
|
674
|
-
aiohttp.ClientConnectorError,
|
|
675
|
-
aiohttp.ServerTimeoutError,
|
|
676
|
-
)
|
|
677
|
-
except ImportError:
|
|
678
|
-
pass
|
|
679
|
-
|
|
680
|
-
error_str = str(error).lower()
|
|
681
|
-
retryable_keywords = [
|
|
682
|
-
"connection",
|
|
683
|
-
"timeout",
|
|
684
|
-
"network",
|
|
685
|
-
"unreachable",
|
|
686
|
-
"refused",
|
|
687
|
-
"reset",
|
|
688
|
-
"connection error",
|
|
689
|
-
"connection failed",
|
|
690
|
-
"temporary failure",
|
|
691
|
-
"service unavailable",
|
|
692
|
-
"rate limit", # Rate limits are often temporary
|
|
693
|
-
]
|
|
694
|
-
|
|
695
|
-
# Non-retryable keywords (permanent errors)
|
|
696
|
-
non_retryable_keywords = [
|
|
697
|
-
"file not found",
|
|
698
|
-
"permission denied",
|
|
699
|
-
"invalid",
|
|
700
|
-
"malformed",
|
|
701
|
-
"syntax error",
|
|
702
|
-
]
|
|
703
|
-
|
|
704
|
-
# Check for non-retryable errors first
|
|
705
|
-
if any(keyword in error_str for keyword in non_retryable_keywords):
|
|
706
|
-
return False
|
|
707
|
-
|
|
708
|
-
return (
|
|
709
|
-
isinstance(error, retryable_types) or
|
|
710
|
-
any(keyword in error_str for keyword in retryable_keywords)
|
|
711
|
-
)
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
async def _process_file_batch(
|
|
715
|
-
reviewer: ReviewerAgent,
|
|
716
|
-
files: list[Path],
|
|
717
|
-
command: str,
|
|
718
|
-
max_workers: int = 4,
|
|
719
|
-
) -> dict[str, Any]:
|
|
720
|
-
"""
|
|
721
|
-
Process multiple files concurrently in batches with retry logic and circuit breaker.
|
|
722
|
-
|
|
723
|
-
Performance optimizations:
|
|
724
|
-
- Result caching for 90%+ speedup on unchanged files
|
|
725
|
-
- Circuit breaker to prevent cascading failures
|
|
726
|
-
- Retry logic with exponential backoff
|
|
727
|
-
|
|
728
|
-
Args:
|
|
729
|
-
reviewer: ReviewerAgent instance
|
|
730
|
-
files: List of file paths to process
|
|
731
|
-
command: Command to run ('score', 'review', 'lint', 'type-check')
|
|
732
|
-
max_workers: Maximum concurrent operations
|
|
733
|
-
|
|
734
|
-
Returns:
|
|
735
|
-
Dictionary with aggregated results
|
|
736
|
-
"""
|
|
737
|
-
from ..feedback import get_feedback
|
|
738
|
-
feedback = get_feedback()
|
|
739
|
-
cache = get_reviewer_cache()
|
|
740
|
-
|
|
741
|
-
# Configuration
|
|
742
|
-
BATCH_SIZE = 10 # Process 10 files per batch
|
|
743
|
-
MAX_CONCURRENT = max(1, min(max_workers, 2)) # Limit to max 2 concurrent
|
|
744
|
-
BATCH_DELAY = 1.0 # Delay between batches
|
|
745
|
-
FILE_DELAY = 0.2 # Small delay between individual files
|
|
746
|
-
MAX_RETRIES = 3 # Maximum retry attempts for connection errors
|
|
747
|
-
RETRY_BACKOFF_BASE = 2.0 # Exponential backoff base
|
|
748
|
-
MAX_RETRY_BACKOFF = 10.0 # Maximum backoff time in seconds
|
|
749
|
-
RETRY_TIMEOUT = 120.0 # Timeout per retry attempt (2 minutes)
|
|
750
|
-
|
|
751
|
-
# Track cache statistics for this batch
|
|
752
|
-
cache_hits = 0
|
|
753
|
-
cache_misses = 0
|
|
754
|
-
|
|
755
|
-
# Progress tracking for long operations
|
|
756
|
-
total_files = len(files)
|
|
757
|
-
processed_count = 0
|
|
758
|
-
start_time = asyncio.get_event_loop().time()
|
|
759
|
-
last_progress_update = start_time
|
|
760
|
-
PROGRESS_UPDATE_INTERVAL = 5.0 # Update progress every 5 seconds for long operations
|
|
761
|
-
|
|
762
|
-
# Initialize circuit breaker
|
|
763
|
-
circuit_breaker = CircuitBreaker(failure_threshold=5, reset_timeout=60.0)
|
|
764
|
-
semaphore = asyncio.Semaphore(MAX_CONCURRENT)
|
|
765
|
-
|
|
766
|
-
async def process_single_file(file_path: Path) -> tuple[Path, dict[str, Any]]:
|
|
767
|
-
"""Process a single file with caching, retry logic, circuit breaker, and semaphore limiting."""
|
|
768
|
-
nonlocal cache_hits, cache_misses
|
|
769
|
-
|
|
770
|
-
# Check cache first (before circuit breaker)
|
|
771
|
-
cached_result = await cache.get_cached_result(
|
|
772
|
-
file_path, command, REVIEWER_CACHE_VERSION
|
|
773
|
-
)
|
|
774
|
-
if cached_result is not None:
|
|
775
|
-
cache_hits += 1
|
|
776
|
-
cached_result["_from_cache"] = True
|
|
777
|
-
return (file_path, cached_result)
|
|
778
|
-
|
|
779
|
-
cache_misses += 1
|
|
780
|
-
|
|
781
|
-
# Check circuit breaker before processing
|
|
782
|
-
if not circuit_breaker.should_allow():
|
|
783
|
-
return (file_path, {
|
|
784
|
-
"error": "Circuit breaker open - too many failures",
|
|
785
|
-
"file": str(file_path),
|
|
786
|
-
"circuit_breaker": True
|
|
787
|
-
})
|
|
788
|
-
|
|
789
|
-
async with semaphore:
|
|
790
|
-
await asyncio.sleep(FILE_DELAY)
|
|
791
|
-
|
|
792
|
-
# Retry logic for connection errors with per-attempt timeout
|
|
793
|
-
last_error: Exception | None = None
|
|
794
|
-
RETRY_TIMEOUT = 120.0 # 2 minutes per retry attempt
|
|
795
|
-
|
|
796
|
-
for attempt in range(1, MAX_RETRIES + 1):
|
|
797
|
-
try:
|
|
798
|
-
# Wrap each retry attempt in a timeout to prevent hanging
|
|
799
|
-
result = await asyncio.wait_for(
|
|
800
|
-
reviewer.run(command, file=str(file_path)),
|
|
801
|
-
timeout=RETRY_TIMEOUT
|
|
802
|
-
)
|
|
803
|
-
# Ensure result is always a dict (defensive check)
|
|
804
|
-
if not isinstance(result, dict):
|
|
805
|
-
return (file_path, {
|
|
806
|
-
"error": f"Unexpected result type: {type(result).__name__}. Result: {str(result)[:200]}",
|
|
807
|
-
"file": str(file_path)
|
|
808
|
-
})
|
|
809
|
-
|
|
810
|
-
# Success - record in circuit breaker and cache result
|
|
811
|
-
circuit_breaker.record_success()
|
|
812
|
-
|
|
813
|
-
# Cache successful results (non-error results only)
|
|
814
|
-
if "error" not in result:
|
|
815
|
-
await cache.save_result(
|
|
816
|
-
file_path, command, REVIEWER_CACHE_VERSION, result
|
|
817
|
-
)
|
|
818
|
-
|
|
819
|
-
return (file_path, result)
|
|
820
|
-
|
|
821
|
-
except asyncio.TimeoutError:
|
|
822
|
-
# Per-attempt timeout - treat as retryable connection issue
|
|
823
|
-
last_error = TimeoutError(f"Operation timed out after {RETRY_TIMEOUT}s")
|
|
824
|
-
if attempt < MAX_RETRIES:
|
|
825
|
-
backoff = min(RETRY_BACKOFF_BASE ** attempt, MAX_RETRY_BACKOFF)
|
|
826
|
-
if feedback.verbosity.value == "verbose":
|
|
827
|
-
feedback.info(
|
|
828
|
-
f"Retrying {file_path.name} after timeout "
|
|
829
|
-
f"(attempt {attempt + 1}/{MAX_RETRIES}, backoff {backoff:.1f}s)..."
|
|
830
|
-
)
|
|
831
|
-
await asyncio.sleep(backoff)
|
|
832
|
-
continue
|
|
833
|
-
else:
|
|
834
|
-
circuit_breaker.record_failure()
|
|
835
|
-
return (file_path, {
|
|
836
|
-
"error": f"Operation timed out after {RETRY_TIMEOUT}s (attempt {attempt}/{MAX_RETRIES})",
|
|
837
|
-
"file": str(file_path),
|
|
838
|
-
"retryable": True,
|
|
839
|
-
"attempts": attempt,
|
|
840
|
-
"timeout": True
|
|
841
|
-
})
|
|
842
|
-
|
|
843
|
-
except Exception as e:
|
|
844
|
-
last_error = e
|
|
845
|
-
|
|
846
|
-
# Check if error is retryable
|
|
847
|
-
if is_retryable_error(e) and attempt < MAX_RETRIES:
|
|
848
|
-
# Exponential backoff
|
|
849
|
-
backoff = min(RETRY_BACKOFF_BASE ** attempt, MAX_RETRY_BACKOFF)
|
|
850
|
-
if feedback.verbosity.value == "verbose":
|
|
851
|
-
feedback.info(
|
|
852
|
-
f"Retrying {file_path.name} after connection error "
|
|
853
|
-
f"(attempt {attempt + 1}/{MAX_RETRIES}, backoff {backoff:.1f}s)..."
|
|
854
|
-
)
|
|
855
|
-
await asyncio.sleep(backoff)
|
|
856
|
-
continue
|
|
857
|
-
else:
|
|
858
|
-
# Non-retryable error or max retries reached
|
|
859
|
-
if is_retryable_error(e):
|
|
860
|
-
circuit_breaker.record_failure()
|
|
861
|
-
return (file_path, {
|
|
862
|
-
"error": str(e),
|
|
863
|
-
"file": str(file_path),
|
|
864
|
-
"retryable": is_retryable_error(e),
|
|
865
|
-
"attempts": attempt,
|
|
866
|
-
"error_type": type(e).__name__
|
|
867
|
-
})
|
|
868
|
-
|
|
869
|
-
# All retries exhausted
|
|
870
|
-
circuit_breaker.record_failure()
|
|
871
|
-
return (file_path, {
|
|
872
|
-
"error": f"Failed after {MAX_RETRIES} attempts: {str(last_error)}",
|
|
873
|
-
"file": str(file_path),
|
|
874
|
-
"retryable": True,
|
|
875
|
-
"attempts": MAX_RETRIES,
|
|
876
|
-
"error_type": type(last_error).__name__ if last_error else "Unknown"
|
|
877
|
-
})
|
|
878
|
-
|
|
879
|
-
# Process files in batches with circuit breaker protection
|
|
880
|
-
all_results = []
|
|
881
|
-
total_batches = (len(files) + BATCH_SIZE - 1) // BATCH_SIZE
|
|
882
|
-
|
|
883
|
-
for batch_idx in range(total_batches):
|
|
884
|
-
# Check circuit breaker before processing batch
|
|
885
|
-
if not circuit_breaker.should_allow():
|
|
886
|
-
remaining_count = len(files) - batch_idx * BATCH_SIZE
|
|
887
|
-
feedback.warning(
|
|
888
|
-
f"Circuit breaker open - skipping remaining {remaining_count} files "
|
|
889
|
-
f"(too many connection failures)"
|
|
890
|
-
)
|
|
891
|
-
# Mark remaining files as failed
|
|
892
|
-
for remaining_file in files[batch_idx * BATCH_SIZE:]:
|
|
893
|
-
all_results.append((remaining_file, {
|
|
894
|
-
"error": "Circuit breaker open - skipped due to too many failures",
|
|
895
|
-
"file": str(remaining_file),
|
|
896
|
-
"circuit_breaker": True
|
|
897
|
-
}))
|
|
898
|
-
break
|
|
899
|
-
|
|
900
|
-
start_idx = batch_idx * BATCH_SIZE
|
|
901
|
-
end_idx = min(start_idx + BATCH_SIZE, len(files))
|
|
902
|
-
batch_files = files[start_idx:end_idx]
|
|
903
|
-
|
|
904
|
-
if total_batches > 1:
|
|
905
|
-
feedback.info(f"Processing batch {batch_idx + 1}/{total_batches} ({len(batch_files)} files)...")
|
|
906
|
-
|
|
907
|
-
# Process files in batch with limited concurrency and progress updates
|
|
908
|
-
# Create tasks for the batch, but semaphore limits concurrent execution
|
|
909
|
-
batch_tasks = [process_single_file(f) for f in batch_files]
|
|
910
|
-
|
|
911
|
-
# Add progress tracking for long operations
|
|
912
|
-
async def process_with_progress():
|
|
913
|
-
"""Process batch with periodic progress updates."""
|
|
914
|
-
nonlocal processed_count, last_progress_update
|
|
915
|
-
|
|
916
|
-
# Create a wrapper that updates progress
|
|
917
|
-
async def process_and_track(task):
|
|
918
|
-
nonlocal processed_count, last_progress_update
|
|
919
|
-
result = await task
|
|
920
|
-
processed_count += 1
|
|
921
|
-
|
|
922
|
-
# Update progress every 5 seconds for operations >10 seconds
|
|
923
|
-
current_time = asyncio.get_event_loop().time()
|
|
924
|
-
elapsed = current_time - start_time
|
|
925
|
-
|
|
926
|
-
if elapsed > 10.0: # Only show progress for operations >10 seconds
|
|
927
|
-
if current_time - last_progress_update >= PROGRESS_UPDATE_INTERVAL:
|
|
928
|
-
percent = (processed_count / total_files * 100) if total_files > 0 else 0
|
|
929
|
-
feedback.info(
|
|
930
|
-
f"Reviewing files: {processed_count}/{total_files} ({percent:.1f}%) "
|
|
931
|
-
f"- {elapsed:.1f}s elapsed"
|
|
932
|
-
)
|
|
933
|
-
last_progress_update = current_time
|
|
934
|
-
|
|
935
|
-
return result
|
|
936
|
-
|
|
937
|
-
# Process all tasks with progress tracking
|
|
938
|
-
tracked_tasks = [process_and_track(task) for task in batch_tasks]
|
|
939
|
-
return await asyncio.gather(*tracked_tasks, return_exceptions=True)
|
|
940
|
-
|
|
941
|
-
batch_results = await process_with_progress()
|
|
942
|
-
all_results.extend(batch_results)
|
|
943
|
-
|
|
944
|
-
# Delay between batches to avoid overwhelming connections
|
|
945
|
-
if batch_idx < total_batches - 1: # Don't delay after last batch
|
|
946
|
-
await asyncio.sleep(BATCH_DELAY)
|
|
947
|
-
|
|
948
|
-
results = all_results
|
|
949
|
-
|
|
950
|
-
# Aggregate results
|
|
951
|
-
aggregated: dict[str, Any] = {
|
|
952
|
-
"files": [],
|
|
953
|
-
"successful": 0,
|
|
954
|
-
"failed": 0,
|
|
955
|
-
"errors": [],
|
|
956
|
-
}
|
|
957
|
-
|
|
958
|
-
for result in results:
|
|
959
|
-
if isinstance(result, Exception):
|
|
960
|
-
aggregated["failed"] += 1
|
|
961
|
-
aggregated["errors"].append({"error": str(result)})
|
|
962
|
-
continue
|
|
963
|
-
|
|
964
|
-
file_path, file_result = result
|
|
965
|
-
|
|
966
|
-
# Defensive check: ensure file_result is a dict
|
|
967
|
-
if not isinstance(file_result, dict):
|
|
968
|
-
aggregated["failed"] += 1
|
|
969
|
-
aggregated["errors"].append({
|
|
970
|
-
"file": str(file_path),
|
|
971
|
-
"error": f"Unexpected result type: {type(file_result).__name__}. Result: {str(file_result)[:200]}"
|
|
972
|
-
})
|
|
973
|
-
continue
|
|
974
|
-
|
|
975
|
-
if "error" in file_result:
|
|
976
|
-
aggregated["failed"] += 1
|
|
977
|
-
aggregated["errors"].append({
|
|
978
|
-
"file": str(file_path),
|
|
979
|
-
"error": file_result.get("error", "Unknown error")
|
|
980
|
-
})
|
|
981
|
-
else:
|
|
982
|
-
aggregated["successful"] += 1
|
|
983
|
-
|
|
984
|
-
file_entry: dict[str, Any] = {
|
|
985
|
-
"file": str(file_path),
|
|
986
|
-
}
|
|
987
|
-
file_entry.update(file_result)
|
|
988
|
-
aggregated["files"].append(file_entry)
|
|
989
|
-
|
|
990
|
-
aggregated["total"] = len(files)
|
|
991
|
-
|
|
992
|
-
# Add cache statistics to help users understand performance gains
|
|
993
|
-
aggregated["_cache_stats"] = {
|
|
994
|
-
"hits": cache_hits,
|
|
995
|
-
"misses": cache_misses,
|
|
996
|
-
"hit_rate": f"{(cache_hits / len(files) * 100):.1f}%" if files else "0.0%"
|
|
997
|
-
}
|
|
998
|
-
|
|
999
|
-
# Log cache statistics if verbose
|
|
1000
|
-
if feedback.verbosity.value == "verbose" and cache_hits > 0:
|
|
1001
|
-
feedback.info(
|
|
1002
|
-
f"Cache stats: {cache_hits} hits, {cache_misses} misses "
|
|
1003
|
-
f"({cache_hits / len(files) * 100:.1f}% hit rate)"
|
|
1004
|
-
)
|
|
1005
|
-
|
|
1006
|
-
return aggregated
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
async def score_command(
|
|
1010
|
-
file_path: str | None = None,
|
|
1011
|
-
files: list[str] | None = None,
|
|
1012
|
-
pattern: str | None = None,
|
|
1013
|
-
output_format: str = "json",
|
|
1014
|
-
max_workers: int = 4,
|
|
1015
|
-
output_file: str | None = None,
|
|
1016
|
-
fail_under: float | None = None,
|
|
1017
|
-
verbose_output: bool = False,
|
|
1018
|
-
explain: bool = False,
|
|
1019
|
-
):
|
|
1020
|
-
"""
|
|
1021
|
-
Score code file(s) (supports both *score and score commands).
|
|
1022
|
-
|
|
1023
|
-
Supports single file (backward compatible) or batch processing.
|
|
1024
|
-
"""
|
|
1025
|
-
from ..command_classifier import CommandClassifier, CommandNetworkRequirement
|
|
1026
|
-
|
|
1027
|
-
feedback = get_feedback()
|
|
1028
|
-
output_format = _infer_output_format(output_format, output_file)
|
|
1029
|
-
feedback.format_type = output_format
|
|
1030
|
-
|
|
1031
|
-
# Check network requirement - score is offline
|
|
1032
|
-
requirement = CommandClassifier.get_network_requirement("reviewer", "score")
|
|
1033
|
-
offline_mode = (requirement == CommandNetworkRequirement.OFFLINE)
|
|
1034
|
-
|
|
1035
|
-
# Handle backward compatibility: single file argument
|
|
1036
|
-
if file_path and not files and not pattern:
|
|
1037
|
-
files = [file_path]
|
|
1038
|
-
|
|
1039
|
-
# Normalize file paths before processing (fixes Windows absolute path issues)
|
|
1040
|
-
files = _normalize_file_paths(files)
|
|
1041
|
-
|
|
1042
|
-
# Resolve file list
|
|
1043
|
-
try:
|
|
1044
|
-
resolved_files = _resolve_file_list(files, pattern)
|
|
1045
|
-
except ValueError as e:
|
|
1046
|
-
feedback.error(
|
|
1047
|
-
str(e),
|
|
1048
|
-
error_code="no_files_found",
|
|
1049
|
-
context={"files": files, "pattern": pattern},
|
|
1050
|
-
remediation="Specify files as arguments or use --pattern with a glob pattern",
|
|
1051
|
-
exit_code=1,
|
|
1052
|
-
)
|
|
1053
|
-
return
|
|
1054
|
-
|
|
1055
|
-
# Validate files exist
|
|
1056
|
-
missing_files = [f for f in resolved_files if not f.exists()]
|
|
1057
|
-
if missing_files:
|
|
1058
|
-
feedback.error(
|
|
1059
|
-
f"Files not found: {', '.join(str(f) for f in missing_files)}",
|
|
1060
|
-
error_code="file_not_found",
|
|
1061
|
-
context={"missing_files": [str(f) for f in missing_files]},
|
|
1062
|
-
remediation="Check that the files exist and paths are correct",
|
|
1063
|
-
exit_code=1,
|
|
1064
|
-
)
|
|
1065
|
-
return
|
|
1066
|
-
|
|
1067
|
-
feedback.start_operation("Score")
|
|
1068
|
-
if len(resolved_files) == 1:
|
|
1069
|
-
feedback.info(f"Scoring {resolved_files[0]}...")
|
|
1070
|
-
else:
|
|
1071
|
-
feedback.info(f"Scoring {len(resolved_files)} files (max {max_workers} concurrent)...")
|
|
1072
|
-
|
|
1073
|
-
reviewer = ReviewerAgent()
|
|
1074
|
-
cache = get_reviewer_cache()
|
|
1075
|
-
|
|
1076
|
-
try:
|
|
1077
|
-
await reviewer.activate(offline_mode=offline_mode)
|
|
1078
|
-
|
|
1079
|
-
# Single file - use existing flow for backward compatibility
|
|
1080
|
-
if len(resolved_files) == 1:
|
|
1081
|
-
file_path_obj = resolved_files[0]
|
|
1082
|
-
|
|
1083
|
-
# Check cache first
|
|
1084
|
-
cached_result = await cache.get_cached_result(
|
|
1085
|
-
file_path_obj, "score", REVIEWER_CACHE_VERSION
|
|
1086
|
-
)
|
|
1087
|
-
if cached_result is not None:
|
|
1088
|
-
result = cached_result
|
|
1089
|
-
feedback.info("Using cached result (file unchanged)")
|
|
1090
|
-
else:
|
|
1091
|
-
result = await reviewer.run("score", file=str(file_path_obj), explain=explain)
|
|
1092
|
-
check_result_error(result)
|
|
1093
|
-
# Cache the result
|
|
1094
|
-
await cache.save_result(
|
|
1095
|
-
file_path_obj, "score", REVIEWER_CACHE_VERSION, result
|
|
1096
|
-
)
|
|
1097
|
-
feedback.clear_progress()
|
|
1098
|
-
|
|
1099
|
-
# Format and output result
|
|
1100
|
-
if output_format == "json":
|
|
1101
|
-
output_content = format_json(result)
|
|
1102
|
-
elif output_format == "markdown":
|
|
1103
|
-
output_content = format_markdown(result)
|
|
1104
|
-
elif output_format == "html":
|
|
1105
|
-
output_content = format_html(result, title="Code Quality Scores")
|
|
1106
|
-
else: # text
|
|
1107
|
-
output_content = _format_text_review_result(result)
|
|
1108
|
-
|
|
1109
|
-
# Write to file or stdout
|
|
1110
|
-
if output_file:
|
|
1111
|
-
_write_output(output_file, output_content)
|
|
1112
|
-
feedback.success(f"Results written to {output_file}")
|
|
1113
|
-
else:
|
|
1114
|
-
if output_format == "json":
|
|
1115
|
-
feedback.output_result(result, message="Scoring completed", warnings=None, compact=not verbose_output)
|
|
1116
|
-
elif output_format in ("markdown", "html"):
|
|
1117
|
-
print(output_content)
|
|
1118
|
-
else:
|
|
1119
|
-
feedback.success("Scoring completed")
|
|
1120
|
-
print(output_content)
|
|
1121
|
-
|
|
1122
|
-
if fail_under is not None:
|
|
1123
|
-
scoring = result.get("scoring") or {}
|
|
1124
|
-
overall = float(scoring.get("overall_score", 0.0))
|
|
1125
|
-
if overall < fail_under:
|
|
1126
|
-
sys.exit(1)
|
|
1127
|
-
else:
|
|
1128
|
-
# Batch processing
|
|
1129
|
-
result = await _process_file_batch(reviewer, resolved_files, "score", max_workers)
|
|
1130
|
-
feedback.clear_progress()
|
|
1131
|
-
|
|
1132
|
-
# Format and output result
|
|
1133
|
-
if output_format == "json":
|
|
1134
|
-
output_content = format_json(result)
|
|
1135
|
-
elif output_format == "markdown":
|
|
1136
|
-
output_content = format_markdown(result['files'])
|
|
1137
|
-
elif output_format == "html":
|
|
1138
|
-
output_content = format_html(result['files'], title="Batch Code Quality Scores")
|
|
1139
|
-
else: # text
|
|
1140
|
-
output_content = _format_text_batch_summary(result, title="Batch Score")
|
|
1141
|
-
|
|
1142
|
-
# Write to file or stdout
|
|
1143
|
-
if output_file:
|
|
1144
|
-
_write_output(output_file, output_content)
|
|
1145
|
-
feedback.success(f"Results written to {output_file}")
|
|
1146
|
-
else:
|
|
1147
|
-
if output_format == "json":
|
|
1148
|
-
feedback.output_result(result, message=f"Scoring completed: {result['successful']}/{result['total']} files successful", compact=not verbose_output)
|
|
1149
|
-
elif output_format in ("markdown", "html"):
|
|
1150
|
-
print(output_content)
|
|
1151
|
-
else:
|
|
1152
|
-
feedback.success(f"Scoring completed: {result['successful']}/{result['total']} files successful")
|
|
1153
|
-
print(output_content)
|
|
1154
|
-
|
|
1155
|
-
if fail_under is not None:
|
|
1156
|
-
for file_result in result.get("files", []):
|
|
1157
|
-
scoring = file_result.get("scoring") or {}
|
|
1158
|
-
overall = float(scoring.get("overall_score", 0.0))
|
|
1159
|
-
if overall < fail_under:
|
|
1160
|
-
sys.exit(1)
|
|
1161
|
-
if int(result.get("failed", 0)) > 0:
|
|
1162
|
-
sys.exit(1)
|
|
1163
|
-
finally:
|
|
1164
|
-
await reviewer.close()
|
|
1165
|
-
|
|
1166
|
-
|
|
1167
|
-
async def lint_command(
|
|
1168
|
-
file_path: str | None = None,
|
|
1169
|
-
files: list[str] | None = None,
|
|
1170
|
-
pattern: str | None = None,
|
|
1171
|
-
output_format: str = "json",
|
|
1172
|
-
max_workers: int = 4,
|
|
1173
|
-
output_file: str | None = None,
|
|
1174
|
-
fail_on_issues: bool = False,
|
|
1175
|
-
verbose_output: bool = False,
|
|
1176
|
-
isolated: bool = False,
|
|
1177
|
-
) -> None:
|
|
1178
|
-
"""Run linting on file(s) with consistent async execution and output handling."""
|
|
1179
|
-
from ..command_classifier import CommandClassifier, CommandNetworkRequirement
|
|
1180
|
-
|
|
1181
|
-
feedback = get_feedback()
|
|
1182
|
-
output_format = _infer_output_format(output_format, output_file)
|
|
1183
|
-
feedback.format_type = output_format
|
|
1184
|
-
|
|
1185
|
-
# Check network requirement - lint is offline
|
|
1186
|
-
requirement = CommandClassifier.get_network_requirement("reviewer", "lint")
|
|
1187
|
-
offline_mode = (requirement == CommandNetworkRequirement.OFFLINE)
|
|
1188
|
-
|
|
1189
|
-
if file_path and not files and not pattern:
|
|
1190
|
-
files = [file_path]
|
|
1191
|
-
|
|
1192
|
-
# Normalize file paths before processing (fixes Windows absolute path issues)
|
|
1193
|
-
files = _normalize_file_paths(files)
|
|
1194
|
-
|
|
1195
|
-
try:
|
|
1196
|
-
resolved_files = _resolve_file_list(files, pattern)
|
|
1197
|
-
except ValueError as e:
|
|
1198
|
-
feedback.error(
|
|
1199
|
-
str(e),
|
|
1200
|
-
error_code="no_files_found",
|
|
1201
|
-
context={"files": files, "pattern": pattern},
|
|
1202
|
-
remediation="Specify files as arguments or use --pattern with a glob pattern",
|
|
1203
|
-
exit_code=1,
|
|
1204
|
-
)
|
|
1205
|
-
return
|
|
1206
|
-
|
|
1207
|
-
missing_files = [f for f in resolved_files if not f.exists()]
|
|
1208
|
-
if missing_files:
|
|
1209
|
-
feedback.error(
|
|
1210
|
-
f"Files not found: {', '.join(str(f) for f in missing_files)}",
|
|
1211
|
-
error_code="file_not_found",
|
|
1212
|
-
context={"missing_files": [str(f) for f in missing_files]},
|
|
1213
|
-
remediation="Check that the files exist and paths are correct",
|
|
1214
|
-
exit_code=1,
|
|
1215
|
-
)
|
|
1216
|
-
return
|
|
1217
|
-
|
|
1218
|
-
feedback.start_operation("Lint")
|
|
1219
|
-
feedback.info(
|
|
1220
|
-
f"Linting {resolved_files[0]}..." if len(resolved_files) == 1 else f"Linting {len(resolved_files)} files (max {max_workers} concurrent)..."
|
|
1221
|
-
)
|
|
1222
|
-
|
|
1223
|
-
reviewer = ReviewerAgent()
|
|
1224
|
-
cache = get_reviewer_cache()
|
|
1225
|
-
|
|
1226
|
-
try:
|
|
1227
|
-
await reviewer.activate(offline_mode=offline_mode)
|
|
1228
|
-
|
|
1229
|
-
if len(resolved_files) == 1:
|
|
1230
|
-
file_path_obj = resolved_files[0]
|
|
1231
|
-
|
|
1232
|
-
# Check cache first
|
|
1233
|
-
cached_result = await cache.get_cached_result(
|
|
1234
|
-
file_path_obj, "lint", REVIEWER_CACHE_VERSION
|
|
1235
|
-
)
|
|
1236
|
-
if cached_result is not None:
|
|
1237
|
-
result = cached_result
|
|
1238
|
-
feedback.info("Using cached result (file unchanged)")
|
|
1239
|
-
else:
|
|
1240
|
-
result = await reviewer.run("lint", file=str(file_path_obj), isolated=isolated)
|
|
1241
|
-
check_result_error(result)
|
|
1242
|
-
# Cache the result (only if not isolated, as isolated results may differ)
|
|
1243
|
-
if not isolated:
|
|
1244
|
-
await cache.save_result(
|
|
1245
|
-
file_path_obj, "lint", REVIEWER_CACHE_VERSION, result
|
|
1246
|
-
)
|
|
1247
|
-
feedback.clear_progress()
|
|
1248
|
-
|
|
1249
|
-
if output_file:
|
|
1250
|
-
if output_format == "json":
|
|
1251
|
-
output_content = format_json(result)
|
|
1252
|
-
elif output_format == "markdown":
|
|
1253
|
-
output_content = format_markdown(result)
|
|
1254
|
-
elif output_format == "html":
|
|
1255
|
-
output_content = format_html(result, title="Linting Results")
|
|
1256
|
-
else:
|
|
1257
|
-
output_content = _format_text_review_result(result)
|
|
1258
|
-
_write_output(output_file, output_content)
|
|
1259
|
-
feedback.success(f"Results written to {output_file}")
|
|
1260
|
-
else:
|
|
1261
|
-
if output_format == "json":
|
|
1262
|
-
feedback.output_result(result, message="Linting completed", compact=not verbose_output)
|
|
1263
|
-
elif output_format in ("markdown", "html"):
|
|
1264
|
-
print(
|
|
1265
|
-
format_markdown(result)
|
|
1266
|
-
if output_format == "markdown"
|
|
1267
|
-
else format_html(result, title="Linting Results")
|
|
1268
|
-
)
|
|
1269
|
-
else:
|
|
1270
|
-
feedback.success("Linting completed")
|
|
1271
|
-
print(_format_text_review_result(result))
|
|
1272
|
-
|
|
1273
|
-
if fail_on_issues and int(result.get("issue_count", 0)) > 0:
|
|
1274
|
-
sys.exit(1)
|
|
1275
|
-
else:
|
|
1276
|
-
result = await _process_file_batch(reviewer, resolved_files, "lint", max_workers)
|
|
1277
|
-
feedback.clear_progress()
|
|
1278
|
-
|
|
1279
|
-
# Defensive check: ensure result is a dict
|
|
1280
|
-
if not isinstance(result, dict):
|
|
1281
|
-
feedback.error(
|
|
1282
|
-
f"Unexpected result type from batch processing: {type(result).__name__}",
|
|
1283
|
-
error_code="invalid_result_type",
|
|
1284
|
-
context={"result_type": type(result).__name__, "result_preview": str(result)[:200]},
|
|
1285
|
-
exit_code=1,
|
|
1286
|
-
)
|
|
1287
|
-
return
|
|
1288
|
-
|
|
1289
|
-
if output_file:
|
|
1290
|
-
if output_format == "json":
|
|
1291
|
-
output_content = format_json(result)
|
|
1292
|
-
elif output_format == "markdown":
|
|
1293
|
-
output_content = format_markdown(result.get("files", []))
|
|
1294
|
-
elif output_format == "html":
|
|
1295
|
-
output_content = format_html(result.get("files", []), title="Batch Linting Results")
|
|
1296
|
-
else:
|
|
1297
|
-
output_content = _format_text_batch_summary(result, title="Batch Lint")
|
|
1298
|
-
_write_output(output_file, output_content)
|
|
1299
|
-
feedback.success(f"Results written to {output_file}")
|
|
1300
|
-
else:
|
|
1301
|
-
if output_format == "json":
|
|
1302
|
-
feedback.output_result(
|
|
1303
|
-
result,
|
|
1304
|
-
message=f"Linting completed: {result.get('successful', 0)}/{result.get('total', 0)} files successful",
|
|
1305
|
-
compact=not verbose_output,
|
|
1306
|
-
)
|
|
1307
|
-
elif output_format in ("markdown", "html"):
|
|
1308
|
-
print(
|
|
1309
|
-
format_markdown(result.get("files", []))
|
|
1310
|
-
if output_format == "markdown"
|
|
1311
|
-
else format_html(result.get("files", []), title="Batch Linting Results")
|
|
1312
|
-
)
|
|
1313
|
-
else:
|
|
1314
|
-
feedback.success(f"Linting completed: {result.get('successful', 0)}/{result.get('total', 0)} files successful")
|
|
1315
|
-
print(_format_text_batch_summary(result, title="Batch Lint"))
|
|
1316
|
-
|
|
1317
|
-
if fail_on_issues:
|
|
1318
|
-
for file_result in result.get("files", []):
|
|
1319
|
-
if int(file_result.get("issue_count", 0)) > 0:
|
|
1320
|
-
sys.exit(1)
|
|
1321
|
-
if int(result.get("failed", 0)) > 0:
|
|
1322
|
-
sys.exit(1)
|
|
1323
|
-
finally:
|
|
1324
|
-
await reviewer.close()
|
|
1325
|
-
|
|
1326
|
-
|
|
1327
|
-
async def type_check_command(
|
|
1328
|
-
file_path: str | None = None,
|
|
1329
|
-
files: list[str] | None = None,
|
|
1330
|
-
pattern: str | None = None,
|
|
1331
|
-
output_format: str = "json",
|
|
1332
|
-
max_workers: int = 4,
|
|
1333
|
-
output_file: str | None = None,
|
|
1334
|
-
fail_on_issues: bool = False,
|
|
1335
|
-
verbose_output: bool = False,
|
|
1336
|
-
) -> None:
|
|
1337
|
-
"""Run type-checking on file(s) with consistent async execution and output handling."""
|
|
1338
|
-
from ..command_classifier import CommandClassifier, CommandNetworkRequirement
|
|
1339
|
-
|
|
1340
|
-
feedback = get_feedback()
|
|
1341
|
-
output_format = _infer_output_format(output_format, output_file)
|
|
1342
|
-
feedback.format_type = output_format
|
|
1343
|
-
|
|
1344
|
-
# Check network requirement - type-check is offline
|
|
1345
|
-
requirement = CommandClassifier.get_network_requirement("reviewer", "type-check")
|
|
1346
|
-
offline_mode = (requirement == CommandNetworkRequirement.OFFLINE)
|
|
1347
|
-
|
|
1348
|
-
if file_path and not files and not pattern:
|
|
1349
|
-
files = [file_path]
|
|
1350
|
-
|
|
1351
|
-
# Normalize file paths before processing (fixes Windows absolute path issues)
|
|
1352
|
-
files = _normalize_file_paths(files)
|
|
1353
|
-
|
|
1354
|
-
try:
|
|
1355
|
-
resolved_files = _resolve_file_list(files, pattern)
|
|
1356
|
-
except ValueError as e:
|
|
1357
|
-
feedback.error(
|
|
1358
|
-
str(e),
|
|
1359
|
-
error_code="no_files_found",
|
|
1360
|
-
context={"files": files, "pattern": pattern},
|
|
1361
|
-
remediation="Specify files as arguments or use --pattern with a glob pattern",
|
|
1362
|
-
exit_code=1,
|
|
1363
|
-
)
|
|
1364
|
-
return
|
|
1365
|
-
|
|
1366
|
-
missing_files = [f for f in resolved_files if not f.exists()]
|
|
1367
|
-
if missing_files:
|
|
1368
|
-
feedback.error(
|
|
1369
|
-
f"Files not found: {', '.join(str(f) for f in missing_files)}",
|
|
1370
|
-
error_code="file_not_found",
|
|
1371
|
-
context={"missing_files": [str(f) for f in missing_files]},
|
|
1372
|
-
remediation="Check that the files exist and paths are correct",
|
|
1373
|
-
exit_code=1,
|
|
1374
|
-
)
|
|
1375
|
-
return
|
|
1376
|
-
|
|
1377
|
-
feedback.start_operation("Type Check")
|
|
1378
|
-
feedback.info(
|
|
1379
|
-
f"Type checking {resolved_files[0]}..." if len(resolved_files) == 1 else f"Type checking {len(resolved_files)} files (max {max_workers} concurrent)..."
|
|
1380
|
-
)
|
|
1381
|
-
|
|
1382
|
-
reviewer = ReviewerAgent()
|
|
1383
|
-
cache = get_reviewer_cache()
|
|
1384
|
-
|
|
1385
|
-
try:
|
|
1386
|
-
await reviewer.activate(offline_mode=offline_mode)
|
|
1387
|
-
|
|
1388
|
-
if len(resolved_files) == 1:
|
|
1389
|
-
file_path_obj = resolved_files[0]
|
|
1390
|
-
|
|
1391
|
-
# Check cache first
|
|
1392
|
-
cached_result = await cache.get_cached_result(
|
|
1393
|
-
file_path_obj, "type-check", REVIEWER_CACHE_VERSION
|
|
1394
|
-
)
|
|
1395
|
-
if cached_result is not None:
|
|
1396
|
-
result = cached_result
|
|
1397
|
-
feedback.info("Using cached result (file unchanged)")
|
|
1398
|
-
else:
|
|
1399
|
-
result = await reviewer.run("type-check", file=str(file_path_obj))
|
|
1400
|
-
check_result_error(result)
|
|
1401
|
-
# Cache the result
|
|
1402
|
-
await cache.save_result(
|
|
1403
|
-
file_path_obj, "type-check", REVIEWER_CACHE_VERSION, result
|
|
1404
|
-
)
|
|
1405
|
-
feedback.clear_progress()
|
|
1406
|
-
|
|
1407
|
-
if output_file:
|
|
1408
|
-
if output_format == "json":
|
|
1409
|
-
output_content = format_json(result)
|
|
1410
|
-
elif output_format == "markdown":
|
|
1411
|
-
output_content = format_markdown(result)
|
|
1412
|
-
elif output_format == "html":
|
|
1413
|
-
output_content = format_html(result, title="Type Check Results")
|
|
1414
|
-
else:
|
|
1415
|
-
output_content = _format_text_review_result(result)
|
|
1416
|
-
_write_output(output_file, output_content)
|
|
1417
|
-
feedback.success(f"Results written to {output_file}")
|
|
1418
|
-
else:
|
|
1419
|
-
if output_format == "json":
|
|
1420
|
-
feedback.output_result(result, message="Type checking completed", compact=not verbose_output)
|
|
1421
|
-
elif output_format in ("markdown", "html"):
|
|
1422
|
-
print(
|
|
1423
|
-
format_markdown(result)
|
|
1424
|
-
if output_format == "markdown"
|
|
1425
|
-
else format_html(result, title="Type Check Results")
|
|
1426
|
-
)
|
|
1427
|
-
else:
|
|
1428
|
-
feedback.success("Type checking completed")
|
|
1429
|
-
print(_format_text_review_result(result))
|
|
1430
|
-
|
|
1431
|
-
if fail_on_issues and int(result.get("error_count", 0)) > 0:
|
|
1432
|
-
sys.exit(1)
|
|
1433
|
-
else:
|
|
1434
|
-
result = await _process_file_batch(reviewer, resolved_files, "type-check", max_workers)
|
|
1435
|
-
feedback.clear_progress()
|
|
1436
|
-
|
|
1437
|
-
if output_file:
|
|
1438
|
-
if output_format == "json":
|
|
1439
|
-
output_content = format_json(result)
|
|
1440
|
-
elif output_format == "markdown":
|
|
1441
|
-
output_content = format_markdown(result.get("files", []))
|
|
1442
|
-
elif output_format == "html":
|
|
1443
|
-
output_content = format_html(result.get("files", []), title="Batch Type Check Results")
|
|
1444
|
-
else:
|
|
1445
|
-
output_content = _format_text_batch_summary(result, title="Batch Type Check")
|
|
1446
|
-
_write_output(output_file, output_content)
|
|
1447
|
-
feedback.success(f"Results written to {output_file}")
|
|
1448
|
-
else:
|
|
1449
|
-
if output_format == "json":
|
|
1450
|
-
feedback.output_result(
|
|
1451
|
-
result,
|
|
1452
|
-
message=f"Type checking completed: {result['successful']}/{result['total']} files successful",
|
|
1453
|
-
compact=not verbose_output,
|
|
1454
|
-
)
|
|
1455
|
-
elif output_format in ("markdown", "html"):
|
|
1456
|
-
print(
|
|
1457
|
-
format_markdown(result.get("files", []))
|
|
1458
|
-
if output_format == "markdown"
|
|
1459
|
-
else format_html(result.get("files", []), title="Batch Type Check Results")
|
|
1460
|
-
)
|
|
1461
|
-
else:
|
|
1462
|
-
feedback.success(f"Type checking completed: {result['successful']}/{result['total']} files successful")
|
|
1463
|
-
print(_format_text_batch_summary(result, title="Batch Type Check"))
|
|
1464
|
-
|
|
1465
|
-
if fail_on_issues:
|
|
1466
|
-
for file_result in result.get("files", []):
|
|
1467
|
-
if int(file_result.get("error_count", 0)) > 0 or len(file_result.get("errors", []) or []) > 0:
|
|
1468
|
-
sys.exit(1)
|
|
1469
|
-
if int(result.get("failed", 0)) > 0:
|
|
1470
|
-
sys.exit(1)
|
|
1471
|
-
finally:
|
|
1472
|
-
await reviewer.close()
|
|
1473
|
-
|
|
1474
|
-
|
|
1475
|
-
async def docs_command(
|
|
1476
|
-
library: str,
|
|
1477
|
-
topic: str | None = None,
|
|
1478
|
-
mode: str = "code",
|
|
1479
|
-
page: int = 1,
|
|
1480
|
-
output_format: str = "json",
|
|
1481
|
-
no_cache: bool = False,
|
|
1482
|
-
) -> None:
|
|
1483
|
-
"""
|
|
1484
|
-
Get library documentation from Context7 (supports both *docs and docs commands).
|
|
1485
|
-
|
|
1486
|
-
Uses KB-first lookup with automatic fallback to Context7 API.
|
|
1487
|
-
"""
|
|
1488
|
-
from ..command_classifier import CommandClassifier, CommandNetworkRequirement
|
|
1489
|
-
from ..network_detection import NetworkDetector
|
|
1490
|
-
|
|
1491
|
-
feedback = get_feedback()
|
|
1492
|
-
feedback.format_type = output_format
|
|
1493
|
-
feedback.start_operation("Get Documentation")
|
|
1494
|
-
|
|
1495
|
-
# Check network requirement - docs is optional (can use cache)
|
|
1496
|
-
requirement = CommandClassifier.get_network_requirement("reviewer", "docs")
|
|
1497
|
-
offline_mode = False
|
|
1498
|
-
|
|
1499
|
-
if requirement == CommandNetworkRequirement.OPTIONAL:
|
|
1500
|
-
# Try offline first if network unavailable
|
|
1501
|
-
if not NetworkDetector.is_network_available():
|
|
1502
|
-
offline_mode = True
|
|
1503
|
-
feedback.info("Network unavailable, using cached documentation if available")
|
|
1504
|
-
|
|
1505
|
-
query_desc = f"{library}"
|
|
1506
|
-
if topic:
|
|
1507
|
-
query_desc += f" ({topic})"
|
|
1508
|
-
feedback.info(f"Fetching documentation for {query_desc}...")
|
|
1509
|
-
|
|
1510
|
-
reviewer = ReviewerAgent()
|
|
1511
|
-
try:
|
|
1512
|
-
await reviewer.activate(offline_mode=offline_mode)
|
|
1513
|
-
|
|
1514
|
-
result = await reviewer.run(
|
|
1515
|
-
"docs",
|
|
1516
|
-
library=library,
|
|
1517
|
-
topic=topic,
|
|
1518
|
-
mode=mode,
|
|
1519
|
-
page=page,
|
|
1520
|
-
no_cache=no_cache,
|
|
1521
|
-
)
|
|
1522
|
-
|
|
1523
|
-
check_result_error(result)
|
|
1524
|
-
feedback.clear_progress()
|
|
1525
|
-
|
|
1526
|
-
# Format output based on format type
|
|
1527
|
-
if output_format == "json":
|
|
1528
|
-
feedback.output_result(result, message="Documentation retrieved successfully")
|
|
1529
|
-
elif output_format == "markdown":
|
|
1530
|
-
content = result.get("content", "")
|
|
1531
|
-
if content:
|
|
1532
|
-
print(content)
|
|
1533
|
-
else:
|
|
1534
|
-
feedback.warning("No documentation content found")
|
|
1535
|
-
else: # text
|
|
1536
|
-
content = result.get("content", "")
|
|
1537
|
-
if content:
|
|
1538
|
-
print(content)
|
|
1539
|
-
else:
|
|
1540
|
-
feedback.warning("No documentation content found")
|
|
1541
|
-
|
|
1542
|
-
finally:
|
|
1543
|
-
await reviewer.close()
|
|
1544
|
-
|
|
1545
|
-
|
|
1546
|
-
async def help_command():
|
|
1547
|
-
"""Show help (supports both *help and help commands) - uses static help, no activation needed"""
|
|
1548
|
-
from ..help.static_help import get_static_help
|
|
1549
|
-
help_text = get_static_help("reviewer")
|
|
1550
|
-
feedback = get_feedback()
|
|
1551
|
-
feedback.output_result(help_text)
|
|
1552
|
-
|
|
1553
|
-
|
|
1554
|
-
def handle_reviewer_command(args: object) -> None:
|
|
1555
|
-
"""Handle reviewer agent commands"""
|
|
1556
|
-
from ..feedback import get_feedback
|
|
1557
|
-
from ..help.static_help import get_static_help
|
|
1558
|
-
|
|
1559
|
-
feedback = get_feedback()
|
|
1560
|
-
command = normalize_command(getattr(args, "command", None))
|
|
1561
|
-
output_format = getattr(args, "format", "json")
|
|
1562
|
-
feedback.format_type = output_format
|
|
1563
|
-
|
|
1564
|
-
# Help commands first - no activation needed
|
|
1565
|
-
if command == "help" or command is None:
|
|
1566
|
-
help_text = get_static_help("reviewer")
|
|
1567
|
-
feedback.output_result(help_text)
|
|
1568
|
-
return
|
|
1569
|
-
|
|
1570
|
-
# Get batch operation parameters
|
|
1571
|
-
files = getattr(args, "files", None)
|
|
1572
|
-
pattern = getattr(args, "pattern", None)
|
|
1573
|
-
max_workers = getattr(args, "max_workers", 4)
|
|
1574
|
-
output_file = getattr(args, "output", None)
|
|
1575
|
-
|
|
1576
|
-
# Backward compatibility: support 'file' attribute for single file
|
|
1577
|
-
single_file = getattr(args, "file", None)
|
|
1578
|
-
if single_file and not files:
|
|
1579
|
-
files = [single_file]
|
|
1580
|
-
|
|
1581
|
-
try:
|
|
1582
|
-
if command == "review":
|
|
1583
|
-
fail_under = getattr(args, "fail_under", None)
|
|
1584
|
-
verbose_output = bool(getattr(args, "verbose_output", False))
|
|
1585
|
-
run_async_command(
|
|
1586
|
-
review_command(
|
|
1587
|
-
file_path=single_file,
|
|
1588
|
-
files=files,
|
|
1589
|
-
pattern=pattern,
|
|
1590
|
-
output_format=output_format,
|
|
1591
|
-
max_workers=max_workers,
|
|
1592
|
-
output_file=output_file,
|
|
1593
|
-
fail_under=fail_under,
|
|
1594
|
-
verbose_output=verbose_output,
|
|
1595
|
-
)
|
|
1596
|
-
)
|
|
1597
|
-
elif command == "score":
|
|
1598
|
-
fail_under = getattr(args, "fail_under", None)
|
|
1599
|
-
verbose_output = bool(getattr(args, "verbose_output", False))
|
|
1600
|
-
explain = bool(getattr(args, "explain", False))
|
|
1601
|
-
run_async_command(
|
|
1602
|
-
score_command(
|
|
1603
|
-
file_path=single_file,
|
|
1604
|
-
files=files,
|
|
1605
|
-
pattern=pattern,
|
|
1606
|
-
output_format=output_format,
|
|
1607
|
-
max_workers=max_workers,
|
|
1608
|
-
output_file=output_file,
|
|
1609
|
-
fail_under=fail_under,
|
|
1610
|
-
verbose_output=verbose_output,
|
|
1611
|
-
explain=explain,
|
|
1612
|
-
)
|
|
1613
|
-
)
|
|
1614
|
-
elif command == "lint":
|
|
1615
|
-
fail_on_issues = bool(getattr(args, "fail_on_issues", False))
|
|
1616
|
-
verbose_output = bool(getattr(args, "verbose_output", False))
|
|
1617
|
-
isolated = bool(getattr(args, "isolated", False))
|
|
1618
|
-
run_async_command(
|
|
1619
|
-
lint_command(
|
|
1620
|
-
file_path=single_file,
|
|
1621
|
-
files=files,
|
|
1622
|
-
pattern=pattern,
|
|
1623
|
-
output_format=output_format,
|
|
1624
|
-
max_workers=max_workers,
|
|
1625
|
-
output_file=output_file,
|
|
1626
|
-
fail_on_issues=fail_on_issues,
|
|
1627
|
-
verbose_output=verbose_output,
|
|
1628
|
-
isolated=isolated,
|
|
1629
|
-
)
|
|
1630
|
-
)
|
|
1631
|
-
elif command == "type-check":
|
|
1632
|
-
fail_on_issues = bool(getattr(args, "fail_on_issues", False))
|
|
1633
|
-
verbose_output = bool(getattr(args, "verbose_output", False))
|
|
1634
|
-
run_async_command(
|
|
1635
|
-
type_check_command(
|
|
1636
|
-
file_path=single_file,
|
|
1637
|
-
files=files,
|
|
1638
|
-
pattern=pattern,
|
|
1639
|
-
output_format=output_format,
|
|
1640
|
-
max_workers=max_workers,
|
|
1641
|
-
output_file=output_file,
|
|
1642
|
-
fail_on_issues=fail_on_issues,
|
|
1643
|
-
verbose_output=verbose_output,
|
|
1644
|
-
)
|
|
1645
|
-
)
|
|
1646
|
-
elif command == "report":
|
|
1647
|
-
from ..command_classifier import CommandClassifier, CommandNetworkRequirement
|
|
1648
|
-
from ..network_detection import NetworkDetector
|
|
1649
|
-
from ...core.network_errors import NetworkRequiredError
|
|
1650
|
-
from ..base import handle_network_error
|
|
1651
|
-
|
|
1652
|
-
requirement = CommandClassifier.get_network_requirement("reviewer", "report")
|
|
1653
|
-
if requirement == CommandNetworkRequirement.REQUIRED and not NetworkDetector.is_network_available():
|
|
1654
|
-
try:
|
|
1655
|
-
raise NetworkRequiredError(
|
|
1656
|
-
operation_name="reviewer report",
|
|
1657
|
-
message="Network is required for this command"
|
|
1658
|
-
)
|
|
1659
|
-
except NetworkRequiredError as e:
|
|
1660
|
-
handle_network_error(e, format_type=output_format)
|
|
1661
|
-
return
|
|
1662
|
-
|
|
1663
|
-
feedback.start_operation("Report Generation", "Analyzing project quality...")
|
|
1664
|
-
formats = getattr(args, "formats", ["all"])
|
|
1665
|
-
if "all" in formats:
|
|
1666
|
-
format_type = "all"
|
|
1667
|
-
else:
|
|
1668
|
-
format_type = ",".join(formats)
|
|
1669
|
-
|
|
1670
|
-
# Show initial progress
|
|
1671
|
-
feedback.running("Discovering files...", step=1, total_steps=4)
|
|
1672
|
-
|
|
1673
|
-
reviewer = ReviewerAgent()
|
|
1674
|
-
result = run_async_command(
|
|
1675
|
-
run_report(reviewer, args.target, format_type, getattr(args, "output_dir", None))
|
|
1676
|
-
)
|
|
1677
|
-
check_result_error(result)
|
|
1678
|
-
feedback.clear_progress()
|
|
1679
|
-
|
|
1680
|
-
# Extract report paths from result for better feedback
|
|
1681
|
-
report_paths = []
|
|
1682
|
-
if isinstance(result, dict):
|
|
1683
|
-
if "reports" in result and isinstance(result["reports"], dict):
|
|
1684
|
-
# Reports is a dict like {"json": "path", "markdown": "path", ...}
|
|
1685
|
-
report_paths = list(result["reports"].values())
|
|
1686
|
-
elif "reports" in result and isinstance(result["reports"], list):
|
|
1687
|
-
report_paths = result["reports"]
|
|
1688
|
-
elif "data" in result and isinstance(result["data"], dict):
|
|
1689
|
-
if "reports" in result["data"]:
|
|
1690
|
-
if isinstance(result["data"]["reports"], dict):
|
|
1691
|
-
report_paths = list(result["data"]["reports"].values())
|
|
1692
|
-
elif isinstance(result["data"]["reports"], list):
|
|
1693
|
-
report_paths = result["data"]["reports"]
|
|
1694
|
-
|
|
1695
|
-
summary = {}
|
|
1696
|
-
if report_paths:
|
|
1697
|
-
summary["reports_generated"] = len(report_paths)
|
|
1698
|
-
if len(report_paths) <= 5: # Only show paths if not too many
|
|
1699
|
-
summary["report_files"] = report_paths
|
|
1700
|
-
|
|
1701
|
-
verbose_output = bool(getattr(args, "verbose_output", False))
|
|
1702
|
-
feedback.output_result(result, message="Report generated successfully", warnings=None, compact=not verbose_output)
|
|
1703
|
-
elif command == "duplication":
|
|
1704
|
-
# Duplication check is offline - no network check needed
|
|
1705
|
-
feedback.start_operation("Duplication Check")
|
|
1706
|
-
feedback.info(f"Checking for code duplication in {args.target}...")
|
|
1707
|
-
reviewer = ReviewerAgent()
|
|
1708
|
-
result = run_async_command(run_duplication(reviewer, args.target))
|
|
1709
|
-
check_result_error(result)
|
|
1710
|
-
feedback.clear_progress()
|
|
1711
|
-
if output_format == "json":
|
|
1712
|
-
feedback.output_result(result, message="Duplication check completed")
|
|
1713
|
-
else:
|
|
1714
|
-
feedback.success("Duplication check completed")
|
|
1715
|
-
if "duplicates" in result:
|
|
1716
|
-
print(f"\nCode duplication detected in {args.target}:")
|
|
1717
|
-
print(f" Total duplicates: {len(result.get('duplicates', []))}")
|
|
1718
|
-
elif command == "analyze-project":
|
|
1719
|
-
# Project analysis may need network - check if required
|
|
1720
|
-
from ..command_classifier import CommandClassifier, CommandNetworkRequirement
|
|
1721
|
-
from ..network_detection import NetworkDetector
|
|
1722
|
-
from ...core.network_errors import NetworkRequiredError
|
|
1723
|
-
from ..base import handle_network_error
|
|
1724
|
-
|
|
1725
|
-
requirement = CommandClassifier.get_network_requirement("reviewer", "analyze-project")
|
|
1726
|
-
if requirement == CommandNetworkRequirement.REQUIRED and not NetworkDetector.is_network_available():
|
|
1727
|
-
try:
|
|
1728
|
-
raise NetworkRequiredError(
|
|
1729
|
-
operation_name="reviewer analyze-project",
|
|
1730
|
-
message="Network is required for this command"
|
|
1731
|
-
)
|
|
1732
|
-
except NetworkRequiredError as e:
|
|
1733
|
-
handle_network_error(e, format_type=output_format)
|
|
1734
|
-
return
|
|
1735
|
-
|
|
1736
|
-
feedback.start_operation("Project Analysis")
|
|
1737
|
-
feedback.info("Analyzing project...")
|
|
1738
|
-
reviewer = ReviewerAgent()
|
|
1739
|
-
result = run_async_command(
|
|
1740
|
-
run_analyze_project(
|
|
1741
|
-
reviewer,
|
|
1742
|
-
getattr(args, "project_root", None),
|
|
1743
|
-
include_comparison=not getattr(args, "no_comparison", False),
|
|
1744
|
-
)
|
|
1745
|
-
)
|
|
1746
|
-
check_result_error(result)
|
|
1747
|
-
feedback.clear_progress()
|
|
1748
|
-
verbose_output = bool(getattr(args, "verbose_output", False))
|
|
1749
|
-
feedback.output_result(result, message="Project analysis completed", compact=not verbose_output)
|
|
1750
|
-
elif command == "analyze-services":
|
|
1751
|
-
# Service analysis may need network - check if required
|
|
1752
|
-
from ..command_classifier import CommandClassifier, CommandNetworkRequirement
|
|
1753
|
-
from ..network_detection import NetworkDetector
|
|
1754
|
-
from ...core.network_errors import NetworkRequiredError
|
|
1755
|
-
from ..base import handle_network_error
|
|
1756
|
-
|
|
1757
|
-
requirement = CommandClassifier.get_network_requirement("reviewer", "analyze-services")
|
|
1758
|
-
if requirement == CommandNetworkRequirement.REQUIRED and not NetworkDetector.is_network_available():
|
|
1759
|
-
try:
|
|
1760
|
-
raise NetworkRequiredError(
|
|
1761
|
-
operation_name="reviewer analyze-services",
|
|
1762
|
-
message="Network is required for this command"
|
|
1763
|
-
)
|
|
1764
|
-
except NetworkRequiredError as e:
|
|
1765
|
-
handle_network_error(e, format_type=output_format)
|
|
1766
|
-
return
|
|
1767
|
-
|
|
1768
|
-
feedback.start_operation("Service Analysis")
|
|
1769
|
-
feedback.info("Analyzing services...")
|
|
1770
|
-
services = getattr(args, "services", None)
|
|
1771
|
-
reviewer = ReviewerAgent()
|
|
1772
|
-
result = run_async_command(
|
|
1773
|
-
run_analyze_services(
|
|
1774
|
-
reviewer,
|
|
1775
|
-
services if services else None,
|
|
1776
|
-
getattr(args, "project_root", None),
|
|
1777
|
-
include_comparison=not getattr(args, "no_comparison", False),
|
|
1778
|
-
)
|
|
1779
|
-
)
|
|
1780
|
-
check_result_error(result)
|
|
1781
|
-
feedback.clear_progress()
|
|
1782
|
-
verbose_output = bool(getattr(args, "verbose_output", False))
|
|
1783
|
-
feedback.output_result(result, message="Service analysis completed", compact=not verbose_output)
|
|
1784
|
-
elif command == "docs":
|
|
1785
|
-
run_async_command(
|
|
1786
|
-
docs_command(
|
|
1787
|
-
library=getattr(args, "library"),
|
|
1788
|
-
topic=getattr(args, "topic", None),
|
|
1789
|
-
mode=getattr(args, "mode", "code"),
|
|
1790
|
-
page=getattr(args, "page", 1),
|
|
1791
|
-
output_format=output_format,
|
|
1792
|
-
no_cache=bool(getattr(args, "no_cache", False)),
|
|
1793
|
-
)
|
|
1794
|
-
)
|
|
1795
|
-
else:
|
|
1796
|
-
# Invalid command - show help without activation
|
|
1797
|
-
help_text = get_static_help("reviewer")
|
|
1798
|
-
feedback.output_result(help_text)
|
|
1799
|
-
finally:
|
|
1800
|
-
# Each command manages its own agent lifecycle; nothing to close here.
|
|
1801
|
-
pass
|
|
1802
|
-
|
|
1803
|
-
|
|
1804
|
-
async def run_report(reviewer: ReviewerAgent, target: str, format_type: str, output_dir: str | None) -> dict[str, Any]:
|
|
1805
|
-
from ..command_classifier import CommandClassifier, CommandNetworkRequirement
|
|
1806
|
-
|
|
1807
|
-
# Report generation may need network for some features, but can work offline
|
|
1808
|
-
requirement = CommandClassifier.get_network_requirement("reviewer", "report")
|
|
1809
|
-
offline_mode = (requirement == CommandNetworkRequirement.OFFLINE)
|
|
1810
|
-
|
|
1811
|
-
try:
|
|
1812
|
-
await reviewer.activate(offline_mode=offline_mode)
|
|
1813
|
-
return await reviewer.run("report", target=target, format=format_type, output_dir=output_dir)
|
|
1814
|
-
finally:
|
|
1815
|
-
await reviewer.close()
|
|
1816
|
-
|
|
1817
|
-
|
|
1818
|
-
async def run_duplication(reviewer: ReviewerAgent, target: str) -> dict[str, Any]:
|
|
1819
|
-
from ..command_classifier import CommandClassifier, CommandNetworkRequirement
|
|
1820
|
-
|
|
1821
|
-
# Duplication check is offline (local analysis)
|
|
1822
|
-
requirement = CommandClassifier.get_network_requirement("reviewer", "duplication")
|
|
1823
|
-
offline_mode = (requirement == CommandNetworkRequirement.OFFLINE)
|
|
1824
|
-
|
|
1825
|
-
try:
|
|
1826
|
-
await reviewer.activate(offline_mode=offline_mode)
|
|
1827
|
-
return await reviewer.run("duplication", file=target)
|
|
1828
|
-
finally:
|
|
1829
|
-
await reviewer.close()
|
|
1830
|
-
|
|
1831
|
-
|
|
1832
|
-
async def run_analyze_project(reviewer: ReviewerAgent, project_root: str | None, include_comparison: bool) -> dict[str, Any]:
|
|
1833
|
-
from ..command_classifier import CommandClassifier, CommandNetworkRequirement
|
|
1834
|
-
|
|
1835
|
-
# Project analysis may need network for some features, but can work offline
|
|
1836
|
-
requirement = CommandClassifier.get_network_requirement("reviewer", "analyze-project")
|
|
1837
|
-
offline_mode = (requirement == CommandNetworkRequirement.OFFLINE)
|
|
1838
|
-
|
|
1839
|
-
try:
|
|
1840
|
-
await reviewer.activate(offline_mode=offline_mode)
|
|
1841
|
-
return await reviewer.run(
|
|
1842
|
-
"analyze-project",
|
|
1843
|
-
project_root=project_root,
|
|
1844
|
-
include_comparison=include_comparison,
|
|
1845
|
-
)
|
|
1846
|
-
finally:
|
|
1847
|
-
await reviewer.close()
|
|
1848
|
-
|
|
1849
|
-
|
|
1850
|
-
async def run_analyze_services(
|
|
1851
|
-
reviewer: ReviewerAgent,
|
|
1852
|
-
services: list[str] | None,
|
|
1853
|
-
project_root: str | None,
|
|
1854
|
-
include_comparison: bool,
|
|
1855
|
-
) -> dict[str, Any]:
|
|
1856
|
-
from ..command_classifier import CommandClassifier, CommandNetworkRequirement
|
|
1857
|
-
|
|
1858
|
-
# Service analysis may need network for some features, but can work offline
|
|
1859
|
-
requirement = CommandClassifier.get_network_requirement("reviewer", "analyze-services")
|
|
1860
|
-
offline_mode = (requirement == CommandNetworkRequirement.OFFLINE)
|
|
1861
|
-
|
|
1862
|
-
try:
|
|
1863
|
-
await reviewer.activate(offline_mode=offline_mode)
|
|
1864
|
-
return await reviewer.run(
|
|
1865
|
-
"analyze-services",
|
|
1866
|
-
services=services,
|
|
1867
|
-
project_root=project_root,
|
|
1868
|
-
include_comparison=include_comparison,
|
|
1869
|
-
)
|
|
1870
|
-
finally:
|
|
1871
|
-
await reviewer.close()
|
|
1872
|
-
|
|
1
|
+
"""
|
|
2
|
+
Reviewer agent command handlers
|
|
3
|
+
|
|
4
|
+
Performance-optimized with:
|
|
5
|
+
- Result caching for 90%+ speedup on unchanged files
|
|
6
|
+
- Streaming progress for batch operations
|
|
7
|
+
- Async I/O for better concurrency
|
|
8
|
+
"""
|
|
9
|
+
import asyncio
|
|
10
|
+
import sys
|
|
11
|
+
import time
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from typing import Any
|
|
14
|
+
|
|
15
|
+
from ...agents.reviewer.agent import ReviewerAgent
|
|
16
|
+
from ...agents.reviewer.cache import get_reviewer_cache, ReviewerResultCache
|
|
17
|
+
from ..base import normalize_command, run_async_command
|
|
18
|
+
from ..feedback import get_feedback, ProgressTracker
|
|
19
|
+
from .common import check_result_error, format_json_output
|
|
20
|
+
from ..formatters import format_json, format_markdown, format_html
|
|
21
|
+
|
|
22
|
+
# Use cache version from the cache module for consistency
|
|
23
|
+
REVIEWER_CACHE_VERSION = ReviewerResultCache.CACHE_VERSION
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def _infer_output_format(output_format: str, output_file: str | None) -> str:
|
|
27
|
+
"""Infer output format from output file extension, otherwise keep explicit format."""
|
|
28
|
+
if not output_file:
|
|
29
|
+
return output_format
|
|
30
|
+
|
|
31
|
+
suffix = Path(output_file).suffix.lower()
|
|
32
|
+
if suffix == ".html":
|
|
33
|
+
return "html"
|
|
34
|
+
if suffix in {".md", ".markdown"}:
|
|
35
|
+
return "markdown"
|
|
36
|
+
if suffix == ".json":
|
|
37
|
+
return "json"
|
|
38
|
+
if suffix in {".txt", ".log"}:
|
|
39
|
+
return "text"
|
|
40
|
+
return output_format
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def _write_output(output_file: str, content: str) -> None:
|
|
44
|
+
"""Write output content to a file (UTF-8), creating parent directories."""
|
|
45
|
+
output_path = Path(output_file)
|
|
46
|
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
47
|
+
output_path.write_text(content, encoding="utf-8")
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def _format_text_review_result(result: dict[str, Any]) -> str:
|
|
51
|
+
"""Create a human-readable review summary string."""
|
|
52
|
+
lines: list[str] = []
|
|
53
|
+
file_path = result.get("file", "unknown")
|
|
54
|
+
lines.append(f"Results for: {file_path}")
|
|
55
|
+
|
|
56
|
+
scoring = result.get("scoring") or {}
|
|
57
|
+
if scoring:
|
|
58
|
+
lines.append("")
|
|
59
|
+
lines.append(f"Score: {scoring.get('overall_score', 0.0):.1f}/100")
|
|
60
|
+
lines.append(f" Complexity: {scoring.get('complexity_score', 0.0):.1f}/10")
|
|
61
|
+
lines.append(f" Security: {scoring.get('security_score', 0.0):.1f}/10")
|
|
62
|
+
lines.append(f" Maintainability: {scoring.get('maintainability_score', 0.0):.1f}/10")
|
|
63
|
+
|
|
64
|
+
# P1 Improvement: Show linting and type checking scores with issue counts
|
|
65
|
+
linting_score = scoring.get('linting_score', 0.0)
|
|
66
|
+
linting_count = scoring.get('linting_issue_count', 0)
|
|
67
|
+
type_score = scoring.get('type_checking_score', 0.0)
|
|
68
|
+
type_count = scoring.get('type_issue_count', 0)
|
|
69
|
+
|
|
70
|
+
linting_suffix = f" ({linting_count} issues)" if linting_count > 0 else ""
|
|
71
|
+
type_suffix = f" ({type_count} issues)" if type_count > 0 else ""
|
|
72
|
+
|
|
73
|
+
lines.append(f" Linting: {linting_score:.1f}/10{linting_suffix}")
|
|
74
|
+
lines.append(f" Type Checking: {type_score:.1f}/10{type_suffix}")
|
|
75
|
+
|
|
76
|
+
if "threshold" in result:
|
|
77
|
+
lines.append(f" Threshold: {result.get('threshold')}")
|
|
78
|
+
if "passed" in result:
|
|
79
|
+
lines.append(f" Status: {'Passed' if result.get('passed') else 'Failed'}")
|
|
80
|
+
|
|
81
|
+
# P1 Improvement: Show actual linting issues
|
|
82
|
+
linting_issues = scoring.get('linting_issues', [])
|
|
83
|
+
if linting_issues:
|
|
84
|
+
lines.append("")
|
|
85
|
+
lines.append(f"Linting Issues ({len(linting_issues)}):")
|
|
86
|
+
for issue in linting_issues[:10]: # Show top 10
|
|
87
|
+
code = issue.get('code', '???')
|
|
88
|
+
msg = issue.get('message', 'Unknown issue')
|
|
89
|
+
line = issue.get('line', 0)
|
|
90
|
+
lines.append(f" Line {line}: [{code}] {msg}")
|
|
91
|
+
if len(linting_issues) > 10:
|
|
92
|
+
lines.append(f" ... and {len(linting_issues) - 10} more")
|
|
93
|
+
|
|
94
|
+
# P1 Improvement: Show actual type checking issues
|
|
95
|
+
type_issues = scoring.get('type_issues', [])
|
|
96
|
+
if type_issues:
|
|
97
|
+
lines.append("")
|
|
98
|
+
lines.append(f"Type Issues ({len(type_issues)}):")
|
|
99
|
+
for issue in type_issues[:10]: # Show top 10
|
|
100
|
+
msg = issue.get('message', 'Unknown issue')
|
|
101
|
+
line = issue.get('line', 0)
|
|
102
|
+
error_code = issue.get('error_code', '')
|
|
103
|
+
code_suffix = f" [{error_code}]" if error_code else ""
|
|
104
|
+
lines.append(f" Line {line}: {msg}{code_suffix}")
|
|
105
|
+
if len(type_issues) > 10:
|
|
106
|
+
lines.append(f" ... and {len(type_issues) - 10} more")
|
|
107
|
+
|
|
108
|
+
feedback = result.get("feedback") or {}
|
|
109
|
+
|
|
110
|
+
# Handle feedback structure: could be instruction object or parsed feedback
|
|
111
|
+
feedback_text = None
|
|
112
|
+
feedback_summary = None
|
|
113
|
+
|
|
114
|
+
# Check if feedback is an instruction object (Cursor Skills format)
|
|
115
|
+
if isinstance(feedback, dict):
|
|
116
|
+
if "instruction" in feedback:
|
|
117
|
+
# Extract prompt from instruction as fallback
|
|
118
|
+
instruction = feedback.get("instruction", {})
|
|
119
|
+
feedback_text = instruction.get("prompt", "")
|
|
120
|
+
# Try to get actual feedback if it was executed
|
|
121
|
+
if "summary" in feedback:
|
|
122
|
+
feedback_summary = feedback.get("summary")
|
|
123
|
+
elif "feedback_text" in feedback:
|
|
124
|
+
feedback_text = feedback.get("feedback_text")
|
|
125
|
+
elif "summary" in feedback:
|
|
126
|
+
feedback_summary = feedback.get("summary")
|
|
127
|
+
elif "feedback_text" in feedback:
|
|
128
|
+
feedback_text = feedback.get("feedback_text")
|
|
129
|
+
|
|
130
|
+
# Parse feedback text if available
|
|
131
|
+
if feedback_text and not feedback_summary:
|
|
132
|
+
from ...agents.reviewer.feedback_generator import FeedbackGenerator
|
|
133
|
+
parsed = FeedbackGenerator.parse_feedback_text(feedback_text)
|
|
134
|
+
feedback_summary = parsed.get("summary") or feedback_text[:500]
|
|
135
|
+
|
|
136
|
+
# Display structured feedback with priorities
|
|
137
|
+
if parsed.get("security_concerns") or parsed.get("critical_issues") or parsed.get("improvements"):
|
|
138
|
+
lines.append("")
|
|
139
|
+
lines.append("Feedback:")
|
|
140
|
+
if feedback_summary:
|
|
141
|
+
lines.append(feedback_summary)
|
|
142
|
+
lines.append("")
|
|
143
|
+
|
|
144
|
+
# Security concerns (highest priority)
|
|
145
|
+
if parsed.get("security_concerns"):
|
|
146
|
+
lines.append("🔒 Security Concerns:")
|
|
147
|
+
for concern in parsed["security_concerns"][:5]: # Top 5
|
|
148
|
+
lines.append(f" • {concern}")
|
|
149
|
+
lines.append("")
|
|
150
|
+
|
|
151
|
+
# Critical issues
|
|
152
|
+
if parsed.get("critical_issues"):
|
|
153
|
+
lines.append("⚠️ Critical Issues:")
|
|
154
|
+
for issue in parsed["critical_issues"][:5]: # Top 5
|
|
155
|
+
lines.append(f" • {issue}")
|
|
156
|
+
lines.append("")
|
|
157
|
+
|
|
158
|
+
# Improvements
|
|
159
|
+
if parsed.get("improvements"):
|
|
160
|
+
lines.append("💡 Improvements:")
|
|
161
|
+
for improvement in parsed["improvements"][:5]: # Top 5
|
|
162
|
+
lines.append(f" • {improvement}")
|
|
163
|
+
lines.append("")
|
|
164
|
+
|
|
165
|
+
# Style suggestions (only if no other feedback)
|
|
166
|
+
if not (parsed.get("security_concerns") or parsed.get("critical_issues") or parsed.get("improvements")):
|
|
167
|
+
if parsed.get("style_suggestions"):
|
|
168
|
+
lines.append("📝 Style Suggestions:")
|
|
169
|
+
for suggestion in parsed["style_suggestions"][:5]:
|
|
170
|
+
lines.append(f" • {suggestion}")
|
|
171
|
+
lines.append("")
|
|
172
|
+
else:
|
|
173
|
+
# Fallback: just show summary
|
|
174
|
+
if feedback_summary:
|
|
175
|
+
lines.append("")
|
|
176
|
+
lines.append("Feedback:")
|
|
177
|
+
lines.append(feedback_summary)
|
|
178
|
+
elif feedback_summary:
|
|
179
|
+
# Direct summary available
|
|
180
|
+
lines.append("")
|
|
181
|
+
lines.append("Feedback:")
|
|
182
|
+
lines.append(str(feedback_summary))
|
|
183
|
+
|
|
184
|
+
# Surface quality gate signals if present
|
|
185
|
+
if result.get("quality_gate_blocked"):
|
|
186
|
+
lines.append("")
|
|
187
|
+
lines.append("Quality Gate: BLOCKED")
|
|
188
|
+
|
|
189
|
+
return "\n".join(lines) + "\n"
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
def _format_text_batch_summary(result: dict[str, Any], title: str) -> str:
|
|
193
|
+
"""Create a human-readable batch summary string."""
|
|
194
|
+
lines: list[str] = []
|
|
195
|
+
lines.append(f"{title} Results")
|
|
196
|
+
lines.append("")
|
|
197
|
+
lines.append(f" Total files: {result.get('total', 0)}")
|
|
198
|
+
lines.append(f" Successful: {result.get('successful', 0)}")
|
|
199
|
+
lines.append(f" Failed: {result.get('failed', 0)}")
|
|
200
|
+
|
|
201
|
+
errors = result.get("errors") or []
|
|
202
|
+
if errors:
|
|
203
|
+
lines.append("")
|
|
204
|
+
lines.append("Errors:")
|
|
205
|
+
for err in errors[:25]:
|
|
206
|
+
f = err.get("file", "unknown")
|
|
207
|
+
msg = err.get("error", "unknown error")
|
|
208
|
+
lines.append(f" {f}: {msg}")
|
|
209
|
+
if len(errors) > 25:
|
|
210
|
+
lines.append(f" ... and {len(errors) - 25} more")
|
|
211
|
+
|
|
212
|
+
return "\n".join(lines) + "\n"
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
async def review_command(
|
|
216
|
+
file_path: str | None = None,
|
|
217
|
+
files: list[str] | None = None,
|
|
218
|
+
pattern: str | None = None,
|
|
219
|
+
output_format: str = "json",
|
|
220
|
+
max_workers: int = 4,
|
|
221
|
+
output_file: str | None = None,
|
|
222
|
+
fail_under: float | None = None,
|
|
223
|
+
verbose_output: bool = False,
|
|
224
|
+
):
|
|
225
|
+
"""
|
|
226
|
+
Review code file(s) (supports both *review and review commands).
|
|
227
|
+
|
|
228
|
+
Supports single file (backward compatible) or batch processing.
|
|
229
|
+
"""
|
|
230
|
+
from ..command_classifier import CommandClassifier, CommandNetworkRequirement
|
|
231
|
+
from ..network_detection import NetworkDetector
|
|
232
|
+
from ...core.network_errors import NetworkRequiredError, NetworkOptionalError
|
|
233
|
+
from ..base import handle_network_error
|
|
234
|
+
|
|
235
|
+
feedback = get_feedback()
|
|
236
|
+
output_format = _infer_output_format(output_format, output_file)
|
|
237
|
+
feedback.format_type = output_format
|
|
238
|
+
|
|
239
|
+
# Check network requirement
|
|
240
|
+
requirement = CommandClassifier.get_network_requirement("reviewer", "review")
|
|
241
|
+
offline_mode = False
|
|
242
|
+
|
|
243
|
+
if requirement == CommandNetworkRequirement.OFFLINE:
|
|
244
|
+
offline_mode = True
|
|
245
|
+
elif requirement == CommandNetworkRequirement.OPTIONAL:
|
|
246
|
+
# Try offline first if network unavailable
|
|
247
|
+
if not NetworkDetector.is_network_available():
|
|
248
|
+
offline_mode = True
|
|
249
|
+
feedback.info("Network unavailable, continuing in offline mode with reduced functionality")
|
|
250
|
+
else:
|
|
251
|
+
# Network required - check availability
|
|
252
|
+
if not NetworkDetector.is_network_available():
|
|
253
|
+
try:
|
|
254
|
+
raise NetworkRequiredError(
|
|
255
|
+
operation_name="reviewer review",
|
|
256
|
+
message="Network is required for this command"
|
|
257
|
+
)
|
|
258
|
+
except NetworkRequiredError as e:
|
|
259
|
+
handle_network_error(e, format_type=output_format)
|
|
260
|
+
return
|
|
261
|
+
|
|
262
|
+
# Handle backward compatibility: single file argument
|
|
263
|
+
if file_path and not files and not pattern:
|
|
264
|
+
files = [file_path]
|
|
265
|
+
|
|
266
|
+
# Normalize file paths before processing (fixes Windows absolute path issues)
|
|
267
|
+
files = _normalize_file_paths(files)
|
|
268
|
+
|
|
269
|
+
# Resolve file list
|
|
270
|
+
try:
|
|
271
|
+
resolved_files = _resolve_file_list(files, pattern)
|
|
272
|
+
except ValueError as e:
|
|
273
|
+
feedback.error(
|
|
274
|
+
str(e),
|
|
275
|
+
error_code="no_files_found",
|
|
276
|
+
context={"files": files, "pattern": pattern},
|
|
277
|
+
remediation="Specify files as arguments or use --pattern with a glob pattern",
|
|
278
|
+
exit_code=1,
|
|
279
|
+
)
|
|
280
|
+
return
|
|
281
|
+
|
|
282
|
+
# Validate files exist
|
|
283
|
+
missing_files = [f for f in resolved_files if not f.exists()]
|
|
284
|
+
if missing_files:
|
|
285
|
+
feedback.error(
|
|
286
|
+
f"Files not found: {', '.join(str(f) for f in missing_files)}",
|
|
287
|
+
error_code="file_not_found",
|
|
288
|
+
context={"missing_files": [str(f) for f in missing_files]},
|
|
289
|
+
remediation="Check that the files exist and paths are correct",
|
|
290
|
+
exit_code=1,
|
|
291
|
+
)
|
|
292
|
+
return
|
|
293
|
+
|
|
294
|
+
feedback.start_operation("Review")
|
|
295
|
+
if len(resolved_files) == 1:
|
|
296
|
+
feedback.info(f"Reviewing {resolved_files[0]}...")
|
|
297
|
+
else:
|
|
298
|
+
feedback.info(f"Reviewing {len(resolved_files)} files (max {max_workers} concurrent)...")
|
|
299
|
+
|
|
300
|
+
reviewer = ReviewerAgent()
|
|
301
|
+
cache = get_reviewer_cache()
|
|
302
|
+
|
|
303
|
+
try:
|
|
304
|
+
# Activate agent (load configs, etc.)
|
|
305
|
+
if feedback.verbosity.value == "verbose":
|
|
306
|
+
feedback.info("Initializing ReviewerAgent...")
|
|
307
|
+
await reviewer.activate(offline_mode=offline_mode)
|
|
308
|
+
|
|
309
|
+
# Single file - use existing flow for backward compatibility
|
|
310
|
+
if len(resolved_files) == 1:
|
|
311
|
+
file_path_obj = resolved_files[0]
|
|
312
|
+
|
|
313
|
+
# Execute review command (with caching)
|
|
314
|
+
if feedback.verbosity.value == "verbose":
|
|
315
|
+
feedback.info("Running code analysis...")
|
|
316
|
+
|
|
317
|
+
# Check cache first
|
|
318
|
+
cached_result = await cache.get_cached_result(
|
|
319
|
+
file_path_obj, "review", REVIEWER_CACHE_VERSION
|
|
320
|
+
)
|
|
321
|
+
if cached_result is not None:
|
|
322
|
+
result = cached_result
|
|
323
|
+
feedback.info("Using cached result (file unchanged)")
|
|
324
|
+
else:
|
|
325
|
+
result = await reviewer.run("review", file=str(file_path_obj))
|
|
326
|
+
check_result_error(result)
|
|
327
|
+
# Cache the result
|
|
328
|
+
await cache.save_result(
|
|
329
|
+
file_path_obj, "review", REVIEWER_CACHE_VERSION, result
|
|
330
|
+
)
|
|
331
|
+
|
|
332
|
+
feedback.clear_progress()
|
|
333
|
+
|
|
334
|
+
# Output handling (stdout vs file)
|
|
335
|
+
if output_file:
|
|
336
|
+
if output_format == "json":
|
|
337
|
+
output_content = format_json(result)
|
|
338
|
+
elif output_format == "markdown":
|
|
339
|
+
output_content = format_markdown(result)
|
|
340
|
+
elif output_format == "html":
|
|
341
|
+
output_content = format_html(result, title="Code Review")
|
|
342
|
+
else:
|
|
343
|
+
output_content = _format_text_review_result(result)
|
|
344
|
+
_write_output(output_file, output_content)
|
|
345
|
+
feedback.success(f"Results written to {output_file}")
|
|
346
|
+
else:
|
|
347
|
+
if output_format == "json":
|
|
348
|
+
feedback.output_result(result, message="Review completed successfully", warnings=None, compact=not verbose_output)
|
|
349
|
+
elif output_format in ("markdown", "html"):
|
|
350
|
+
# Print raw content to stdout
|
|
351
|
+
output_content = (
|
|
352
|
+
format_markdown(result)
|
|
353
|
+
if output_format == "markdown"
|
|
354
|
+
else format_html(result, title="Code Review")
|
|
355
|
+
)
|
|
356
|
+
print(output_content)
|
|
357
|
+
else:
|
|
358
|
+
feedback.success("Review completed")
|
|
359
|
+
print(_format_text_review_result(result))
|
|
360
|
+
|
|
361
|
+
# CI-style failure handling
|
|
362
|
+
if fail_under is not None:
|
|
363
|
+
scoring = result.get("scoring") or {}
|
|
364
|
+
overall = float(scoring.get("overall_score", 0.0))
|
|
365
|
+
if overall < fail_under:
|
|
366
|
+
sys.exit(1)
|
|
367
|
+
elif result.get("passed") is False:
|
|
368
|
+
# If the agent evaluated a threshold and failed, return non-zero (useful in CI)
|
|
369
|
+
sys.exit(1)
|
|
370
|
+
else:
|
|
371
|
+
# Batch processing
|
|
372
|
+
result = await _process_file_batch(reviewer, resolved_files, "review", max_workers)
|
|
373
|
+
feedback.clear_progress()
|
|
374
|
+
|
|
375
|
+
if output_file:
|
|
376
|
+
if output_format == "json":
|
|
377
|
+
output_content = format_json(result)
|
|
378
|
+
elif output_format == "markdown":
|
|
379
|
+
output_content = format_markdown(result.get("files", []))
|
|
380
|
+
elif output_format == "html":
|
|
381
|
+
output_content = format_html(result.get("files", []), title="Batch Code Review")
|
|
382
|
+
else:
|
|
383
|
+
output_content = _format_text_batch_summary(result, title="Batch Review")
|
|
384
|
+
_write_output(output_file, output_content)
|
|
385
|
+
feedback.success(f"Results written to {output_file}")
|
|
386
|
+
else:
|
|
387
|
+
if output_format == "json":
|
|
388
|
+
feedback.output_result(
|
|
389
|
+
result,
|
|
390
|
+
message=f"Review completed: {result['successful']}/{result['total']} files successful",
|
|
391
|
+
compact=not verbose_output,
|
|
392
|
+
)
|
|
393
|
+
elif output_format in ("markdown", "html"):
|
|
394
|
+
output_content = (
|
|
395
|
+
format_markdown(result.get("files", []))
|
|
396
|
+
if output_format == "markdown"
|
|
397
|
+
else format_html(result.get("files", []), title="Batch Code Review")
|
|
398
|
+
)
|
|
399
|
+
print(output_content)
|
|
400
|
+
else:
|
|
401
|
+
feedback.success(f"Review completed: {result['successful']}/{result['total']} files successful")
|
|
402
|
+
print(_format_text_batch_summary(result, title="Batch Review"))
|
|
403
|
+
|
|
404
|
+
# Fail if any file failed, or if fail_under is set and any score < threshold
|
|
405
|
+
if fail_under is not None:
|
|
406
|
+
for file_result in result.get("files", []):
|
|
407
|
+
scoring = file_result.get("scoring") or {}
|
|
408
|
+
overall = float(scoring.get("overall_score", 0.0))
|
|
409
|
+
if overall < fail_under:
|
|
410
|
+
sys.exit(1)
|
|
411
|
+
if int(result.get("failed", 0)) > 0:
|
|
412
|
+
sys.exit(1)
|
|
413
|
+
finally:
|
|
414
|
+
await reviewer.close()
|
|
415
|
+
|
|
416
|
+
|
|
417
|
+
def _normalize_file_paths(files: list[str] | None) -> list[str]:
|
|
418
|
+
"""
|
|
419
|
+
Normalize file paths to handle Windows absolute paths.
|
|
420
|
+
|
|
421
|
+
Args:
|
|
422
|
+
files: List of file paths (can be None or empty)
|
|
423
|
+
|
|
424
|
+
Returns:
|
|
425
|
+
List of normalized file paths
|
|
426
|
+
"""
|
|
427
|
+
if not files:
|
|
428
|
+
return []
|
|
429
|
+
|
|
430
|
+
from ...core.path_normalizer import normalize_for_cli, normalize_project_root
|
|
431
|
+
project_root = normalize_project_root(Path.cwd())
|
|
432
|
+
normalized_files = []
|
|
433
|
+
|
|
434
|
+
for f in files:
|
|
435
|
+
try:
|
|
436
|
+
# Normalize Windows absolute paths to relative paths
|
|
437
|
+
normalized = normalize_for_cli(f, project_root)
|
|
438
|
+
normalized_files.append(normalized)
|
|
439
|
+
except Exception:
|
|
440
|
+
# If normalization fails, use original path
|
|
441
|
+
normalized_files.append(f)
|
|
442
|
+
|
|
443
|
+
return normalized_files
|
|
444
|
+
|
|
445
|
+
|
|
446
|
+
def _resolve_file_list(files: list[str] | None, pattern: str | None) -> list[Path]:
|
|
447
|
+
"""
|
|
448
|
+
Resolve file list from files and/or pattern.
|
|
449
|
+
|
|
450
|
+
Args:
|
|
451
|
+
files: List of file paths (can be None or empty)
|
|
452
|
+
pattern: Glob pattern (can be None)
|
|
453
|
+
|
|
454
|
+
Returns:
|
|
455
|
+
List of resolved Path objects
|
|
456
|
+
|
|
457
|
+
Raises:
|
|
458
|
+
ValueError: If no files found
|
|
459
|
+
"""
|
|
460
|
+
resolved_files: list[Path] = []
|
|
461
|
+
|
|
462
|
+
exclude_dir_names = {
|
|
463
|
+
".git",
|
|
464
|
+
"__pycache__",
|
|
465
|
+
".pytest_cache",
|
|
466
|
+
".mypy_cache",
|
|
467
|
+
".ruff_cache",
|
|
468
|
+
".venv",
|
|
469
|
+
"venv",
|
|
470
|
+
"env",
|
|
471
|
+
"node_modules",
|
|
472
|
+
"dist",
|
|
473
|
+
"build",
|
|
474
|
+
"htmlcov",
|
|
475
|
+
"reports",
|
|
476
|
+
".tapps-agents",
|
|
477
|
+
"tapps_agents.egg-info",
|
|
478
|
+
".egg-info",
|
|
479
|
+
}
|
|
480
|
+
allowed_suffixes = {
|
|
481
|
+
".py",
|
|
482
|
+
".ts",
|
|
483
|
+
".tsx",
|
|
484
|
+
".js",
|
|
485
|
+
".jsx",
|
|
486
|
+
".java",
|
|
487
|
+
".go",
|
|
488
|
+
".rs",
|
|
489
|
+
".yaml",
|
|
490
|
+
".yml",
|
|
491
|
+
".json",
|
|
492
|
+
".md",
|
|
493
|
+
".dockerfile",
|
|
494
|
+
}
|
|
495
|
+
|
|
496
|
+
def _is_excluded(path: Path) -> bool:
|
|
497
|
+
return any(part in exclude_dir_names for part in path.parts)
|
|
498
|
+
|
|
499
|
+
def _discover_from_dir(root: Path, max_files: int = 200) -> list[Path]:
|
|
500
|
+
"""
|
|
501
|
+
Discover code files from a directory.
|
|
502
|
+
|
|
503
|
+
Args:
|
|
504
|
+
root: Directory to search
|
|
505
|
+
max_files: Maximum number of files to discover (default: 200)
|
|
506
|
+
|
|
507
|
+
Returns:
|
|
508
|
+
List of discovered file paths
|
|
509
|
+
"""
|
|
510
|
+
discovered: list[Path] = []
|
|
511
|
+
for pat in ["*.py", "*.ts", "*.tsx", "*.js", "*.jsx", "*.java", "*.go", "*.rs", "*.yaml", "*.yml"]:
|
|
512
|
+
if len(discovered) >= max_files:
|
|
513
|
+
break
|
|
514
|
+
for p in root.rglob(pat):
|
|
515
|
+
if len(discovered) >= max_files:
|
|
516
|
+
break
|
|
517
|
+
if _is_excluded(p):
|
|
518
|
+
continue
|
|
519
|
+
if p.is_file() and p.suffix.lower() in allowed_suffixes:
|
|
520
|
+
discovered.append(p)
|
|
521
|
+
return discovered
|
|
522
|
+
|
|
523
|
+
# Handle glob pattern
|
|
524
|
+
if pattern:
|
|
525
|
+
cwd = Path.cwd()
|
|
526
|
+
matched = []
|
|
527
|
+
for p in cwd.glob(pattern):
|
|
528
|
+
if len(matched) >= 200: # Limit pattern matches to prevent too many files
|
|
529
|
+
break
|
|
530
|
+
if p.is_file() and not _is_excluded(p):
|
|
531
|
+
matched.append(p)
|
|
532
|
+
resolved_files.extend(matched)
|
|
533
|
+
|
|
534
|
+
# Handle explicit file list
|
|
535
|
+
if files:
|
|
536
|
+
for file_path in files:
|
|
537
|
+
# Support passing glob patterns directly as positional args (e.g. "src/**/*.py")
|
|
538
|
+
if any(ch in file_path for ch in ["*", "?", "["]):
|
|
539
|
+
matched_count = 0
|
|
540
|
+
for p in Path.cwd().glob(file_path):
|
|
541
|
+
if matched_count >= 200: # Limit glob matches to prevent too many files
|
|
542
|
+
break
|
|
543
|
+
if p.is_file() and not _is_excluded(p):
|
|
544
|
+
resolved_files.append(p)
|
|
545
|
+
matched_count += 1
|
|
546
|
+
continue
|
|
547
|
+
|
|
548
|
+
path = Path(file_path)
|
|
549
|
+
if not path.is_absolute():
|
|
550
|
+
# Use resolve() to properly normalize path and eliminate directory duplication
|
|
551
|
+
path = (Path.cwd() / path).resolve()
|
|
552
|
+
if path.exists() and path.is_dir():
|
|
553
|
+
resolved_files.extend(_discover_from_dir(path))
|
|
554
|
+
elif path.exists():
|
|
555
|
+
if path.is_file() and (path.suffix.lower() in allowed_suffixes or path.suffix == ""):
|
|
556
|
+
resolved_files.append(path)
|
|
557
|
+
else:
|
|
558
|
+
# Try relative to cwd (with proper resolution to eliminate duplication)
|
|
559
|
+
cwd_path = (Path.cwd() / file_path).resolve()
|
|
560
|
+
if cwd_path.exists() and cwd_path.is_dir():
|
|
561
|
+
resolved_files.extend(_discover_from_dir(cwd_path))
|
|
562
|
+
elif cwd_path.exists():
|
|
563
|
+
if cwd_path.is_file() and (cwd_path.suffix.lower() in allowed_suffixes or cwd_path.suffix == ""):
|
|
564
|
+
resolved_files.append(cwd_path)
|
|
565
|
+
else:
|
|
566
|
+
# Keep it anyway - let the agent handle the error
|
|
567
|
+
resolved_files.append(path)
|
|
568
|
+
|
|
569
|
+
# Remove duplicates while preserving order
|
|
570
|
+
seen = set()
|
|
571
|
+
unique_files = []
|
|
572
|
+
for f in resolved_files:
|
|
573
|
+
if f not in seen:
|
|
574
|
+
seen.add(f)
|
|
575
|
+
unique_files.append(f)
|
|
576
|
+
|
|
577
|
+
if not unique_files:
|
|
578
|
+
raise ValueError("No files found. Specify files or use --pattern to match files.")
|
|
579
|
+
|
|
580
|
+
# Warn if too many files discovered
|
|
581
|
+
if len(unique_files) > 200:
|
|
582
|
+
from ..feedback import get_feedback
|
|
583
|
+
feedback = get_feedback()
|
|
584
|
+
feedback.warning(
|
|
585
|
+
f"Large number of files discovered ({len(unique_files)}). Processing may take a while. "
|
|
586
|
+
f"Consider using --pattern to target specific files or directories. "
|
|
587
|
+
f"Only the first 200 files will be processed."
|
|
588
|
+
)
|
|
589
|
+
# Limit to 200 files to prevent connection errors
|
|
590
|
+
unique_files = unique_files[:200]
|
|
591
|
+
|
|
592
|
+
return unique_files
|
|
593
|
+
|
|
594
|
+
|
|
595
|
+
class CircuitBreaker:
|
|
596
|
+
"""Circuit breaker to prevent cascading failures."""
|
|
597
|
+
|
|
598
|
+
def __init__(self, failure_threshold: int = 5, reset_timeout: float = 60.0):
|
|
599
|
+
self.failure_threshold = failure_threshold
|
|
600
|
+
self.reset_timeout = reset_timeout
|
|
601
|
+
self.failure_count = 0
|
|
602
|
+
self.last_failure_time: float | None = None
|
|
603
|
+
self.is_open = False
|
|
604
|
+
|
|
605
|
+
def record_success(self) -> None:
|
|
606
|
+
"""Record successful operation."""
|
|
607
|
+
self.failure_count = 0
|
|
608
|
+
self.is_open = False
|
|
609
|
+
|
|
610
|
+
def record_failure(self) -> None:
|
|
611
|
+
"""Record failure and check if circuit should open."""
|
|
612
|
+
self.failure_count += 1
|
|
613
|
+
if self.failure_count >= self.failure_threshold:
|
|
614
|
+
self.is_open = True
|
|
615
|
+
self.last_failure_time = time.time()
|
|
616
|
+
|
|
617
|
+
def should_allow(self) -> bool:
|
|
618
|
+
"""Check if operation should be allowed."""
|
|
619
|
+
if not self.is_open:
|
|
620
|
+
return True
|
|
621
|
+
|
|
622
|
+
# Check if reset timeout has passed
|
|
623
|
+
if self.last_failure_time:
|
|
624
|
+
elapsed = time.time() - self.last_failure_time
|
|
625
|
+
if elapsed >= self.reset_timeout:
|
|
626
|
+
self.is_open = False
|
|
627
|
+
self.failure_count = 0
|
|
628
|
+
return True
|
|
629
|
+
|
|
630
|
+
return False
|
|
631
|
+
|
|
632
|
+
|
|
633
|
+
def is_retryable_error(error: Exception) -> bool:
|
|
634
|
+
"""
|
|
635
|
+
Check if error is retryable (connection-related).
|
|
636
|
+
|
|
637
|
+
Implements error taxonomy to distinguish between:
|
|
638
|
+
- Retryable: Transient issues (network timeouts, connection errors)
|
|
639
|
+
- Non-retryable: Permanent issues (file not found, invalid input)
|
|
640
|
+
|
|
641
|
+
Based on best practices for AI agent error handling.
|
|
642
|
+
|
|
643
|
+
Args:
|
|
644
|
+
error: Exception to check
|
|
645
|
+
|
|
646
|
+
Returns:
|
|
647
|
+
True if error is retryable (connection-related)
|
|
648
|
+
"""
|
|
649
|
+
retryable_types = (
|
|
650
|
+
ConnectionError,
|
|
651
|
+
TimeoutError,
|
|
652
|
+
OSError,
|
|
653
|
+
)
|
|
654
|
+
|
|
655
|
+
# Check for requests library errors
|
|
656
|
+
try:
|
|
657
|
+
import requests
|
|
658
|
+
retryable_types = retryable_types + (
|
|
659
|
+
requests.exceptions.RequestException,
|
|
660
|
+
requests.exceptions.ConnectionError,
|
|
661
|
+
requests.exceptions.Timeout,
|
|
662
|
+
requests.exceptions.ReadTimeout,
|
|
663
|
+
requests.exceptions.ConnectTimeout,
|
|
664
|
+
)
|
|
665
|
+
except ImportError:
|
|
666
|
+
pass
|
|
667
|
+
|
|
668
|
+
# Check for aiohttp errors (common in async Python)
|
|
669
|
+
try:
|
|
670
|
+
import aiohttp
|
|
671
|
+
retryable_types = retryable_types + (
|
|
672
|
+
aiohttp.ClientError,
|
|
673
|
+
aiohttp.ClientConnectionError,
|
|
674
|
+
aiohttp.ClientConnectorError,
|
|
675
|
+
aiohttp.ServerTimeoutError,
|
|
676
|
+
)
|
|
677
|
+
except ImportError:
|
|
678
|
+
pass
|
|
679
|
+
|
|
680
|
+
error_str = str(error).lower()
|
|
681
|
+
retryable_keywords = [
|
|
682
|
+
"connection",
|
|
683
|
+
"timeout",
|
|
684
|
+
"network",
|
|
685
|
+
"unreachable",
|
|
686
|
+
"refused",
|
|
687
|
+
"reset",
|
|
688
|
+
"connection error",
|
|
689
|
+
"connection failed",
|
|
690
|
+
"temporary failure",
|
|
691
|
+
"service unavailable",
|
|
692
|
+
"rate limit", # Rate limits are often temporary
|
|
693
|
+
]
|
|
694
|
+
|
|
695
|
+
# Non-retryable keywords (permanent errors)
|
|
696
|
+
non_retryable_keywords = [
|
|
697
|
+
"file not found",
|
|
698
|
+
"permission denied",
|
|
699
|
+
"invalid",
|
|
700
|
+
"malformed",
|
|
701
|
+
"syntax error",
|
|
702
|
+
]
|
|
703
|
+
|
|
704
|
+
# Check for non-retryable errors first
|
|
705
|
+
if any(keyword in error_str for keyword in non_retryable_keywords):
|
|
706
|
+
return False
|
|
707
|
+
|
|
708
|
+
return (
|
|
709
|
+
isinstance(error, retryable_types) or
|
|
710
|
+
any(keyword in error_str for keyword in retryable_keywords)
|
|
711
|
+
)
|
|
712
|
+
|
|
713
|
+
|
|
714
|
+
async def _process_file_batch(
|
|
715
|
+
reviewer: ReviewerAgent,
|
|
716
|
+
files: list[Path],
|
|
717
|
+
command: str,
|
|
718
|
+
max_workers: int = 4,
|
|
719
|
+
) -> dict[str, Any]:
|
|
720
|
+
"""
|
|
721
|
+
Process multiple files concurrently in batches with retry logic and circuit breaker.
|
|
722
|
+
|
|
723
|
+
Performance optimizations:
|
|
724
|
+
- Result caching for 90%+ speedup on unchanged files
|
|
725
|
+
- Circuit breaker to prevent cascading failures
|
|
726
|
+
- Retry logic with exponential backoff
|
|
727
|
+
|
|
728
|
+
Args:
|
|
729
|
+
reviewer: ReviewerAgent instance
|
|
730
|
+
files: List of file paths to process
|
|
731
|
+
command: Command to run ('score', 'review', 'lint', 'type-check')
|
|
732
|
+
max_workers: Maximum concurrent operations
|
|
733
|
+
|
|
734
|
+
Returns:
|
|
735
|
+
Dictionary with aggregated results
|
|
736
|
+
"""
|
|
737
|
+
from ..feedback import get_feedback
|
|
738
|
+
feedback = get_feedback()
|
|
739
|
+
cache = get_reviewer_cache()
|
|
740
|
+
|
|
741
|
+
# Configuration
|
|
742
|
+
BATCH_SIZE = 10 # Process 10 files per batch
|
|
743
|
+
MAX_CONCURRENT = max(1, min(max_workers, 2)) # Limit to max 2 concurrent
|
|
744
|
+
BATCH_DELAY = 1.0 # Delay between batches
|
|
745
|
+
FILE_DELAY = 0.2 # Small delay between individual files
|
|
746
|
+
MAX_RETRIES = 3 # Maximum retry attempts for connection errors
|
|
747
|
+
RETRY_BACKOFF_BASE = 2.0 # Exponential backoff base
|
|
748
|
+
MAX_RETRY_BACKOFF = 10.0 # Maximum backoff time in seconds
|
|
749
|
+
RETRY_TIMEOUT = 120.0 # Timeout per retry attempt (2 minutes)
|
|
750
|
+
|
|
751
|
+
# Track cache statistics for this batch
|
|
752
|
+
cache_hits = 0
|
|
753
|
+
cache_misses = 0
|
|
754
|
+
|
|
755
|
+
# Progress tracking for long operations
|
|
756
|
+
total_files = len(files)
|
|
757
|
+
processed_count = 0
|
|
758
|
+
start_time = asyncio.get_event_loop().time()
|
|
759
|
+
last_progress_update = start_time
|
|
760
|
+
PROGRESS_UPDATE_INTERVAL = 5.0 # Update progress every 5 seconds for long operations
|
|
761
|
+
|
|
762
|
+
# Initialize circuit breaker
|
|
763
|
+
circuit_breaker = CircuitBreaker(failure_threshold=5, reset_timeout=60.0)
|
|
764
|
+
semaphore = asyncio.Semaphore(MAX_CONCURRENT)
|
|
765
|
+
|
|
766
|
+
async def process_single_file(file_path: Path) -> tuple[Path, dict[str, Any]]:
|
|
767
|
+
"""Process a single file with caching, retry logic, circuit breaker, and semaphore limiting."""
|
|
768
|
+
nonlocal cache_hits, cache_misses
|
|
769
|
+
|
|
770
|
+
# Check cache first (before circuit breaker)
|
|
771
|
+
cached_result = await cache.get_cached_result(
|
|
772
|
+
file_path, command, REVIEWER_CACHE_VERSION
|
|
773
|
+
)
|
|
774
|
+
if cached_result is not None:
|
|
775
|
+
cache_hits += 1
|
|
776
|
+
cached_result["_from_cache"] = True
|
|
777
|
+
return (file_path, cached_result)
|
|
778
|
+
|
|
779
|
+
cache_misses += 1
|
|
780
|
+
|
|
781
|
+
# Check circuit breaker before processing
|
|
782
|
+
if not circuit_breaker.should_allow():
|
|
783
|
+
return (file_path, {
|
|
784
|
+
"error": "Circuit breaker open - too many failures",
|
|
785
|
+
"file": str(file_path),
|
|
786
|
+
"circuit_breaker": True
|
|
787
|
+
})
|
|
788
|
+
|
|
789
|
+
async with semaphore:
|
|
790
|
+
await asyncio.sleep(FILE_DELAY)
|
|
791
|
+
|
|
792
|
+
# Retry logic for connection errors with per-attempt timeout
|
|
793
|
+
last_error: Exception | None = None
|
|
794
|
+
RETRY_TIMEOUT = 120.0 # 2 minutes per retry attempt
|
|
795
|
+
|
|
796
|
+
for attempt in range(1, MAX_RETRIES + 1):
|
|
797
|
+
try:
|
|
798
|
+
# Wrap each retry attempt in a timeout to prevent hanging
|
|
799
|
+
result = await asyncio.wait_for(
|
|
800
|
+
reviewer.run(command, file=str(file_path)),
|
|
801
|
+
timeout=RETRY_TIMEOUT
|
|
802
|
+
)
|
|
803
|
+
# Ensure result is always a dict (defensive check)
|
|
804
|
+
if not isinstance(result, dict):
|
|
805
|
+
return (file_path, {
|
|
806
|
+
"error": f"Unexpected result type: {type(result).__name__}. Result: {str(result)[:200]}",
|
|
807
|
+
"file": str(file_path)
|
|
808
|
+
})
|
|
809
|
+
|
|
810
|
+
# Success - record in circuit breaker and cache result
|
|
811
|
+
circuit_breaker.record_success()
|
|
812
|
+
|
|
813
|
+
# Cache successful results (non-error results only)
|
|
814
|
+
if "error" not in result:
|
|
815
|
+
await cache.save_result(
|
|
816
|
+
file_path, command, REVIEWER_CACHE_VERSION, result
|
|
817
|
+
)
|
|
818
|
+
|
|
819
|
+
return (file_path, result)
|
|
820
|
+
|
|
821
|
+
except asyncio.TimeoutError:
|
|
822
|
+
# Per-attempt timeout - treat as retryable connection issue
|
|
823
|
+
last_error = TimeoutError(f"Operation timed out after {RETRY_TIMEOUT}s")
|
|
824
|
+
if attempt < MAX_RETRIES:
|
|
825
|
+
backoff = min(RETRY_BACKOFF_BASE ** attempt, MAX_RETRY_BACKOFF)
|
|
826
|
+
if feedback.verbosity.value == "verbose":
|
|
827
|
+
feedback.info(
|
|
828
|
+
f"Retrying {file_path.name} after timeout "
|
|
829
|
+
f"(attempt {attempt + 1}/{MAX_RETRIES}, backoff {backoff:.1f}s)..."
|
|
830
|
+
)
|
|
831
|
+
await asyncio.sleep(backoff)
|
|
832
|
+
continue
|
|
833
|
+
else:
|
|
834
|
+
circuit_breaker.record_failure()
|
|
835
|
+
return (file_path, {
|
|
836
|
+
"error": f"Operation timed out after {RETRY_TIMEOUT}s (attempt {attempt}/{MAX_RETRIES})",
|
|
837
|
+
"file": str(file_path),
|
|
838
|
+
"retryable": True,
|
|
839
|
+
"attempts": attempt,
|
|
840
|
+
"timeout": True
|
|
841
|
+
})
|
|
842
|
+
|
|
843
|
+
except Exception as e:
|
|
844
|
+
last_error = e
|
|
845
|
+
|
|
846
|
+
# Check if error is retryable
|
|
847
|
+
if is_retryable_error(e) and attempt < MAX_RETRIES:
|
|
848
|
+
# Exponential backoff
|
|
849
|
+
backoff = min(RETRY_BACKOFF_BASE ** attempt, MAX_RETRY_BACKOFF)
|
|
850
|
+
if feedback.verbosity.value == "verbose":
|
|
851
|
+
feedback.info(
|
|
852
|
+
f"Retrying {file_path.name} after connection error "
|
|
853
|
+
f"(attempt {attempt + 1}/{MAX_RETRIES}, backoff {backoff:.1f}s)..."
|
|
854
|
+
)
|
|
855
|
+
await asyncio.sleep(backoff)
|
|
856
|
+
continue
|
|
857
|
+
else:
|
|
858
|
+
# Non-retryable error or max retries reached
|
|
859
|
+
if is_retryable_error(e):
|
|
860
|
+
circuit_breaker.record_failure()
|
|
861
|
+
return (file_path, {
|
|
862
|
+
"error": str(e),
|
|
863
|
+
"file": str(file_path),
|
|
864
|
+
"retryable": is_retryable_error(e),
|
|
865
|
+
"attempts": attempt,
|
|
866
|
+
"error_type": type(e).__name__
|
|
867
|
+
})
|
|
868
|
+
|
|
869
|
+
# All retries exhausted
|
|
870
|
+
circuit_breaker.record_failure()
|
|
871
|
+
return (file_path, {
|
|
872
|
+
"error": f"Failed after {MAX_RETRIES} attempts: {str(last_error)}",
|
|
873
|
+
"file": str(file_path),
|
|
874
|
+
"retryable": True,
|
|
875
|
+
"attempts": MAX_RETRIES,
|
|
876
|
+
"error_type": type(last_error).__name__ if last_error else "Unknown"
|
|
877
|
+
})
|
|
878
|
+
|
|
879
|
+
# Process files in batches with circuit breaker protection
|
|
880
|
+
all_results = []
|
|
881
|
+
total_batches = (len(files) + BATCH_SIZE - 1) // BATCH_SIZE
|
|
882
|
+
|
|
883
|
+
for batch_idx in range(total_batches):
|
|
884
|
+
# Check circuit breaker before processing batch
|
|
885
|
+
if not circuit_breaker.should_allow():
|
|
886
|
+
remaining_count = len(files) - batch_idx * BATCH_SIZE
|
|
887
|
+
feedback.warning(
|
|
888
|
+
f"Circuit breaker open - skipping remaining {remaining_count} files "
|
|
889
|
+
f"(too many connection failures)"
|
|
890
|
+
)
|
|
891
|
+
# Mark remaining files as failed
|
|
892
|
+
for remaining_file in files[batch_idx * BATCH_SIZE:]:
|
|
893
|
+
all_results.append((remaining_file, {
|
|
894
|
+
"error": "Circuit breaker open - skipped due to too many failures",
|
|
895
|
+
"file": str(remaining_file),
|
|
896
|
+
"circuit_breaker": True
|
|
897
|
+
}))
|
|
898
|
+
break
|
|
899
|
+
|
|
900
|
+
start_idx = batch_idx * BATCH_SIZE
|
|
901
|
+
end_idx = min(start_idx + BATCH_SIZE, len(files))
|
|
902
|
+
batch_files = files[start_idx:end_idx]
|
|
903
|
+
|
|
904
|
+
if total_batches > 1:
|
|
905
|
+
feedback.info(f"Processing batch {batch_idx + 1}/{total_batches} ({len(batch_files)} files)...")
|
|
906
|
+
|
|
907
|
+
# Process files in batch with limited concurrency and progress updates
|
|
908
|
+
# Create tasks for the batch, but semaphore limits concurrent execution
|
|
909
|
+
batch_tasks = [process_single_file(f) for f in batch_files]
|
|
910
|
+
|
|
911
|
+
# Add progress tracking for long operations
|
|
912
|
+
async def process_with_progress():
|
|
913
|
+
"""Process batch with periodic progress updates."""
|
|
914
|
+
nonlocal processed_count, last_progress_update
|
|
915
|
+
|
|
916
|
+
# Create a wrapper that updates progress
|
|
917
|
+
async def process_and_track(task):
|
|
918
|
+
nonlocal processed_count, last_progress_update
|
|
919
|
+
result = await task
|
|
920
|
+
processed_count += 1
|
|
921
|
+
|
|
922
|
+
# Update progress every 5 seconds for operations >10 seconds
|
|
923
|
+
current_time = asyncio.get_event_loop().time()
|
|
924
|
+
elapsed = current_time - start_time
|
|
925
|
+
|
|
926
|
+
if elapsed > 10.0: # Only show progress for operations >10 seconds
|
|
927
|
+
if current_time - last_progress_update >= PROGRESS_UPDATE_INTERVAL:
|
|
928
|
+
percent = (processed_count / total_files * 100) if total_files > 0 else 0
|
|
929
|
+
feedback.info(
|
|
930
|
+
f"Reviewing files: {processed_count}/{total_files} ({percent:.1f}%) "
|
|
931
|
+
f"- {elapsed:.1f}s elapsed"
|
|
932
|
+
)
|
|
933
|
+
last_progress_update = current_time
|
|
934
|
+
|
|
935
|
+
return result
|
|
936
|
+
|
|
937
|
+
# Process all tasks with progress tracking
|
|
938
|
+
tracked_tasks = [process_and_track(task) for task in batch_tasks]
|
|
939
|
+
return await asyncio.gather(*tracked_tasks, return_exceptions=True)
|
|
940
|
+
|
|
941
|
+
batch_results = await process_with_progress()
|
|
942
|
+
all_results.extend(batch_results)
|
|
943
|
+
|
|
944
|
+
# Delay between batches to avoid overwhelming connections
|
|
945
|
+
if batch_idx < total_batches - 1: # Don't delay after last batch
|
|
946
|
+
await asyncio.sleep(BATCH_DELAY)
|
|
947
|
+
|
|
948
|
+
results = all_results
|
|
949
|
+
|
|
950
|
+
# Aggregate results
|
|
951
|
+
aggregated: dict[str, Any] = {
|
|
952
|
+
"files": [],
|
|
953
|
+
"successful": 0,
|
|
954
|
+
"failed": 0,
|
|
955
|
+
"errors": [],
|
|
956
|
+
}
|
|
957
|
+
|
|
958
|
+
for result in results:
|
|
959
|
+
if isinstance(result, Exception):
|
|
960
|
+
aggregated["failed"] += 1
|
|
961
|
+
aggregated["errors"].append({"error": str(result)})
|
|
962
|
+
continue
|
|
963
|
+
|
|
964
|
+
file_path, file_result = result
|
|
965
|
+
|
|
966
|
+
# Defensive check: ensure file_result is a dict
|
|
967
|
+
if not isinstance(file_result, dict):
|
|
968
|
+
aggregated["failed"] += 1
|
|
969
|
+
aggregated["errors"].append({
|
|
970
|
+
"file": str(file_path),
|
|
971
|
+
"error": f"Unexpected result type: {type(file_result).__name__}. Result: {str(file_result)[:200]}"
|
|
972
|
+
})
|
|
973
|
+
continue
|
|
974
|
+
|
|
975
|
+
if "error" in file_result:
|
|
976
|
+
aggregated["failed"] += 1
|
|
977
|
+
aggregated["errors"].append({
|
|
978
|
+
"file": str(file_path),
|
|
979
|
+
"error": file_result.get("error", "Unknown error")
|
|
980
|
+
})
|
|
981
|
+
else:
|
|
982
|
+
aggregated["successful"] += 1
|
|
983
|
+
|
|
984
|
+
file_entry: dict[str, Any] = {
|
|
985
|
+
"file": str(file_path),
|
|
986
|
+
}
|
|
987
|
+
file_entry.update(file_result)
|
|
988
|
+
aggregated["files"].append(file_entry)
|
|
989
|
+
|
|
990
|
+
aggregated["total"] = len(files)
|
|
991
|
+
|
|
992
|
+
# Add cache statistics to help users understand performance gains
|
|
993
|
+
aggregated["_cache_stats"] = {
|
|
994
|
+
"hits": cache_hits,
|
|
995
|
+
"misses": cache_misses,
|
|
996
|
+
"hit_rate": f"{(cache_hits / len(files) * 100):.1f}%" if files else "0.0%"
|
|
997
|
+
}
|
|
998
|
+
|
|
999
|
+
# Log cache statistics if verbose
|
|
1000
|
+
if feedback.verbosity.value == "verbose" and cache_hits > 0:
|
|
1001
|
+
feedback.info(
|
|
1002
|
+
f"Cache stats: {cache_hits} hits, {cache_misses} misses "
|
|
1003
|
+
f"({cache_hits / len(files) * 100:.1f}% hit rate)"
|
|
1004
|
+
)
|
|
1005
|
+
|
|
1006
|
+
return aggregated
|
|
1007
|
+
|
|
1008
|
+
|
|
1009
|
+
async def score_command(
|
|
1010
|
+
file_path: str | None = None,
|
|
1011
|
+
files: list[str] | None = None,
|
|
1012
|
+
pattern: str | None = None,
|
|
1013
|
+
output_format: str = "json",
|
|
1014
|
+
max_workers: int = 4,
|
|
1015
|
+
output_file: str | None = None,
|
|
1016
|
+
fail_under: float | None = None,
|
|
1017
|
+
verbose_output: bool = False,
|
|
1018
|
+
explain: bool = False,
|
|
1019
|
+
):
|
|
1020
|
+
"""
|
|
1021
|
+
Score code file(s) (supports both *score and score commands).
|
|
1022
|
+
|
|
1023
|
+
Supports single file (backward compatible) or batch processing.
|
|
1024
|
+
"""
|
|
1025
|
+
from ..command_classifier import CommandClassifier, CommandNetworkRequirement
|
|
1026
|
+
|
|
1027
|
+
feedback = get_feedback()
|
|
1028
|
+
output_format = _infer_output_format(output_format, output_file)
|
|
1029
|
+
feedback.format_type = output_format
|
|
1030
|
+
|
|
1031
|
+
# Check network requirement - score is offline
|
|
1032
|
+
requirement = CommandClassifier.get_network_requirement("reviewer", "score")
|
|
1033
|
+
offline_mode = (requirement == CommandNetworkRequirement.OFFLINE)
|
|
1034
|
+
|
|
1035
|
+
# Handle backward compatibility: single file argument
|
|
1036
|
+
if file_path and not files and not pattern:
|
|
1037
|
+
files = [file_path]
|
|
1038
|
+
|
|
1039
|
+
# Normalize file paths before processing (fixes Windows absolute path issues)
|
|
1040
|
+
files = _normalize_file_paths(files)
|
|
1041
|
+
|
|
1042
|
+
# Resolve file list
|
|
1043
|
+
try:
|
|
1044
|
+
resolved_files = _resolve_file_list(files, pattern)
|
|
1045
|
+
except ValueError as e:
|
|
1046
|
+
feedback.error(
|
|
1047
|
+
str(e),
|
|
1048
|
+
error_code="no_files_found",
|
|
1049
|
+
context={"files": files, "pattern": pattern},
|
|
1050
|
+
remediation="Specify files as arguments or use --pattern with a glob pattern",
|
|
1051
|
+
exit_code=1,
|
|
1052
|
+
)
|
|
1053
|
+
return
|
|
1054
|
+
|
|
1055
|
+
# Validate files exist
|
|
1056
|
+
missing_files = [f for f in resolved_files if not f.exists()]
|
|
1057
|
+
if missing_files:
|
|
1058
|
+
feedback.error(
|
|
1059
|
+
f"Files not found: {', '.join(str(f) for f in missing_files)}",
|
|
1060
|
+
error_code="file_not_found",
|
|
1061
|
+
context={"missing_files": [str(f) for f in missing_files]},
|
|
1062
|
+
remediation="Check that the files exist and paths are correct",
|
|
1063
|
+
exit_code=1,
|
|
1064
|
+
)
|
|
1065
|
+
return
|
|
1066
|
+
|
|
1067
|
+
feedback.start_operation("Score")
|
|
1068
|
+
if len(resolved_files) == 1:
|
|
1069
|
+
feedback.info(f"Scoring {resolved_files[0]}...")
|
|
1070
|
+
else:
|
|
1071
|
+
feedback.info(f"Scoring {len(resolved_files)} files (max {max_workers} concurrent)...")
|
|
1072
|
+
|
|
1073
|
+
reviewer = ReviewerAgent()
|
|
1074
|
+
cache = get_reviewer_cache()
|
|
1075
|
+
|
|
1076
|
+
try:
|
|
1077
|
+
await reviewer.activate(offline_mode=offline_mode)
|
|
1078
|
+
|
|
1079
|
+
# Single file - use existing flow for backward compatibility
|
|
1080
|
+
if len(resolved_files) == 1:
|
|
1081
|
+
file_path_obj = resolved_files[0]
|
|
1082
|
+
|
|
1083
|
+
# Check cache first
|
|
1084
|
+
cached_result = await cache.get_cached_result(
|
|
1085
|
+
file_path_obj, "score", REVIEWER_CACHE_VERSION
|
|
1086
|
+
)
|
|
1087
|
+
if cached_result is not None:
|
|
1088
|
+
result = cached_result
|
|
1089
|
+
feedback.info("Using cached result (file unchanged)")
|
|
1090
|
+
else:
|
|
1091
|
+
result = await reviewer.run("score", file=str(file_path_obj), explain=explain)
|
|
1092
|
+
check_result_error(result)
|
|
1093
|
+
# Cache the result
|
|
1094
|
+
await cache.save_result(
|
|
1095
|
+
file_path_obj, "score", REVIEWER_CACHE_VERSION, result
|
|
1096
|
+
)
|
|
1097
|
+
feedback.clear_progress()
|
|
1098
|
+
|
|
1099
|
+
# Format and output result
|
|
1100
|
+
if output_format == "json":
|
|
1101
|
+
output_content = format_json(result)
|
|
1102
|
+
elif output_format == "markdown":
|
|
1103
|
+
output_content = format_markdown(result)
|
|
1104
|
+
elif output_format == "html":
|
|
1105
|
+
output_content = format_html(result, title="Code Quality Scores")
|
|
1106
|
+
else: # text
|
|
1107
|
+
output_content = _format_text_review_result(result)
|
|
1108
|
+
|
|
1109
|
+
# Write to file or stdout
|
|
1110
|
+
if output_file:
|
|
1111
|
+
_write_output(output_file, output_content)
|
|
1112
|
+
feedback.success(f"Results written to {output_file}")
|
|
1113
|
+
else:
|
|
1114
|
+
if output_format == "json":
|
|
1115
|
+
feedback.output_result(result, message="Scoring completed", warnings=None, compact=not verbose_output)
|
|
1116
|
+
elif output_format in ("markdown", "html"):
|
|
1117
|
+
print(output_content)
|
|
1118
|
+
else:
|
|
1119
|
+
feedback.success("Scoring completed")
|
|
1120
|
+
print(output_content)
|
|
1121
|
+
|
|
1122
|
+
if fail_under is not None:
|
|
1123
|
+
scoring = result.get("scoring") or {}
|
|
1124
|
+
overall = float(scoring.get("overall_score", 0.0))
|
|
1125
|
+
if overall < fail_under:
|
|
1126
|
+
sys.exit(1)
|
|
1127
|
+
else:
|
|
1128
|
+
# Batch processing
|
|
1129
|
+
result = await _process_file_batch(reviewer, resolved_files, "score", max_workers)
|
|
1130
|
+
feedback.clear_progress()
|
|
1131
|
+
|
|
1132
|
+
# Format and output result
|
|
1133
|
+
if output_format == "json":
|
|
1134
|
+
output_content = format_json(result)
|
|
1135
|
+
elif output_format == "markdown":
|
|
1136
|
+
output_content = format_markdown(result['files'])
|
|
1137
|
+
elif output_format == "html":
|
|
1138
|
+
output_content = format_html(result['files'], title="Batch Code Quality Scores")
|
|
1139
|
+
else: # text
|
|
1140
|
+
output_content = _format_text_batch_summary(result, title="Batch Score")
|
|
1141
|
+
|
|
1142
|
+
# Write to file or stdout
|
|
1143
|
+
if output_file:
|
|
1144
|
+
_write_output(output_file, output_content)
|
|
1145
|
+
feedback.success(f"Results written to {output_file}")
|
|
1146
|
+
else:
|
|
1147
|
+
if output_format == "json":
|
|
1148
|
+
feedback.output_result(result, message=f"Scoring completed: {result['successful']}/{result['total']} files successful", compact=not verbose_output)
|
|
1149
|
+
elif output_format in ("markdown", "html"):
|
|
1150
|
+
print(output_content)
|
|
1151
|
+
else:
|
|
1152
|
+
feedback.success(f"Scoring completed: {result['successful']}/{result['total']} files successful")
|
|
1153
|
+
print(output_content)
|
|
1154
|
+
|
|
1155
|
+
if fail_under is not None:
|
|
1156
|
+
for file_result in result.get("files", []):
|
|
1157
|
+
scoring = file_result.get("scoring") or {}
|
|
1158
|
+
overall = float(scoring.get("overall_score", 0.0))
|
|
1159
|
+
if overall < fail_under:
|
|
1160
|
+
sys.exit(1)
|
|
1161
|
+
if int(result.get("failed", 0)) > 0:
|
|
1162
|
+
sys.exit(1)
|
|
1163
|
+
finally:
|
|
1164
|
+
await reviewer.close()
|
|
1165
|
+
|
|
1166
|
+
|
|
1167
|
+
async def lint_command(
|
|
1168
|
+
file_path: str | None = None,
|
|
1169
|
+
files: list[str] | None = None,
|
|
1170
|
+
pattern: str | None = None,
|
|
1171
|
+
output_format: str = "json",
|
|
1172
|
+
max_workers: int = 4,
|
|
1173
|
+
output_file: str | None = None,
|
|
1174
|
+
fail_on_issues: bool = False,
|
|
1175
|
+
verbose_output: bool = False,
|
|
1176
|
+
isolated: bool = False,
|
|
1177
|
+
) -> None:
|
|
1178
|
+
"""Run linting on file(s) with consistent async execution and output handling."""
|
|
1179
|
+
from ..command_classifier import CommandClassifier, CommandNetworkRequirement
|
|
1180
|
+
|
|
1181
|
+
feedback = get_feedback()
|
|
1182
|
+
output_format = _infer_output_format(output_format, output_file)
|
|
1183
|
+
feedback.format_type = output_format
|
|
1184
|
+
|
|
1185
|
+
# Check network requirement - lint is offline
|
|
1186
|
+
requirement = CommandClassifier.get_network_requirement("reviewer", "lint")
|
|
1187
|
+
offline_mode = (requirement == CommandNetworkRequirement.OFFLINE)
|
|
1188
|
+
|
|
1189
|
+
if file_path and not files and not pattern:
|
|
1190
|
+
files = [file_path]
|
|
1191
|
+
|
|
1192
|
+
# Normalize file paths before processing (fixes Windows absolute path issues)
|
|
1193
|
+
files = _normalize_file_paths(files)
|
|
1194
|
+
|
|
1195
|
+
try:
|
|
1196
|
+
resolved_files = _resolve_file_list(files, pattern)
|
|
1197
|
+
except ValueError as e:
|
|
1198
|
+
feedback.error(
|
|
1199
|
+
str(e),
|
|
1200
|
+
error_code="no_files_found",
|
|
1201
|
+
context={"files": files, "pattern": pattern},
|
|
1202
|
+
remediation="Specify files as arguments or use --pattern with a glob pattern",
|
|
1203
|
+
exit_code=1,
|
|
1204
|
+
)
|
|
1205
|
+
return
|
|
1206
|
+
|
|
1207
|
+
missing_files = [f for f in resolved_files if not f.exists()]
|
|
1208
|
+
if missing_files:
|
|
1209
|
+
feedback.error(
|
|
1210
|
+
f"Files not found: {', '.join(str(f) for f in missing_files)}",
|
|
1211
|
+
error_code="file_not_found",
|
|
1212
|
+
context={"missing_files": [str(f) for f in missing_files]},
|
|
1213
|
+
remediation="Check that the files exist and paths are correct",
|
|
1214
|
+
exit_code=1,
|
|
1215
|
+
)
|
|
1216
|
+
return
|
|
1217
|
+
|
|
1218
|
+
feedback.start_operation("Lint")
|
|
1219
|
+
feedback.info(
|
|
1220
|
+
f"Linting {resolved_files[0]}..." if len(resolved_files) == 1 else f"Linting {len(resolved_files)} files (max {max_workers} concurrent)..."
|
|
1221
|
+
)
|
|
1222
|
+
|
|
1223
|
+
reviewer = ReviewerAgent()
|
|
1224
|
+
cache = get_reviewer_cache()
|
|
1225
|
+
|
|
1226
|
+
try:
|
|
1227
|
+
await reviewer.activate(offline_mode=offline_mode)
|
|
1228
|
+
|
|
1229
|
+
if len(resolved_files) == 1:
|
|
1230
|
+
file_path_obj = resolved_files[0]
|
|
1231
|
+
|
|
1232
|
+
# Check cache first
|
|
1233
|
+
cached_result = await cache.get_cached_result(
|
|
1234
|
+
file_path_obj, "lint", REVIEWER_CACHE_VERSION
|
|
1235
|
+
)
|
|
1236
|
+
if cached_result is not None:
|
|
1237
|
+
result = cached_result
|
|
1238
|
+
feedback.info("Using cached result (file unchanged)")
|
|
1239
|
+
else:
|
|
1240
|
+
result = await reviewer.run("lint", file=str(file_path_obj), isolated=isolated)
|
|
1241
|
+
check_result_error(result)
|
|
1242
|
+
# Cache the result (only if not isolated, as isolated results may differ)
|
|
1243
|
+
if not isolated:
|
|
1244
|
+
await cache.save_result(
|
|
1245
|
+
file_path_obj, "lint", REVIEWER_CACHE_VERSION, result
|
|
1246
|
+
)
|
|
1247
|
+
feedback.clear_progress()
|
|
1248
|
+
|
|
1249
|
+
if output_file:
|
|
1250
|
+
if output_format == "json":
|
|
1251
|
+
output_content = format_json(result)
|
|
1252
|
+
elif output_format == "markdown":
|
|
1253
|
+
output_content = format_markdown(result)
|
|
1254
|
+
elif output_format == "html":
|
|
1255
|
+
output_content = format_html(result, title="Linting Results")
|
|
1256
|
+
else:
|
|
1257
|
+
output_content = _format_text_review_result(result)
|
|
1258
|
+
_write_output(output_file, output_content)
|
|
1259
|
+
feedback.success(f"Results written to {output_file}")
|
|
1260
|
+
else:
|
|
1261
|
+
if output_format == "json":
|
|
1262
|
+
feedback.output_result(result, message="Linting completed", compact=not verbose_output)
|
|
1263
|
+
elif output_format in ("markdown", "html"):
|
|
1264
|
+
print(
|
|
1265
|
+
format_markdown(result)
|
|
1266
|
+
if output_format == "markdown"
|
|
1267
|
+
else format_html(result, title="Linting Results")
|
|
1268
|
+
)
|
|
1269
|
+
else:
|
|
1270
|
+
feedback.success("Linting completed")
|
|
1271
|
+
print(_format_text_review_result(result))
|
|
1272
|
+
|
|
1273
|
+
if fail_on_issues and int(result.get("issue_count", 0)) > 0:
|
|
1274
|
+
sys.exit(1)
|
|
1275
|
+
else:
|
|
1276
|
+
result = await _process_file_batch(reviewer, resolved_files, "lint", max_workers)
|
|
1277
|
+
feedback.clear_progress()
|
|
1278
|
+
|
|
1279
|
+
# Defensive check: ensure result is a dict
|
|
1280
|
+
if not isinstance(result, dict):
|
|
1281
|
+
feedback.error(
|
|
1282
|
+
f"Unexpected result type from batch processing: {type(result).__name__}",
|
|
1283
|
+
error_code="invalid_result_type",
|
|
1284
|
+
context={"result_type": type(result).__name__, "result_preview": str(result)[:200]},
|
|
1285
|
+
exit_code=1,
|
|
1286
|
+
)
|
|
1287
|
+
return
|
|
1288
|
+
|
|
1289
|
+
if output_file:
|
|
1290
|
+
if output_format == "json":
|
|
1291
|
+
output_content = format_json(result)
|
|
1292
|
+
elif output_format == "markdown":
|
|
1293
|
+
output_content = format_markdown(result.get("files", []))
|
|
1294
|
+
elif output_format == "html":
|
|
1295
|
+
output_content = format_html(result.get("files", []), title="Batch Linting Results")
|
|
1296
|
+
else:
|
|
1297
|
+
output_content = _format_text_batch_summary(result, title="Batch Lint")
|
|
1298
|
+
_write_output(output_file, output_content)
|
|
1299
|
+
feedback.success(f"Results written to {output_file}")
|
|
1300
|
+
else:
|
|
1301
|
+
if output_format == "json":
|
|
1302
|
+
feedback.output_result(
|
|
1303
|
+
result,
|
|
1304
|
+
message=f"Linting completed: {result.get('successful', 0)}/{result.get('total', 0)} files successful",
|
|
1305
|
+
compact=not verbose_output,
|
|
1306
|
+
)
|
|
1307
|
+
elif output_format in ("markdown", "html"):
|
|
1308
|
+
print(
|
|
1309
|
+
format_markdown(result.get("files", []))
|
|
1310
|
+
if output_format == "markdown"
|
|
1311
|
+
else format_html(result.get("files", []), title="Batch Linting Results")
|
|
1312
|
+
)
|
|
1313
|
+
else:
|
|
1314
|
+
feedback.success(f"Linting completed: {result.get('successful', 0)}/{result.get('total', 0)} files successful")
|
|
1315
|
+
print(_format_text_batch_summary(result, title="Batch Lint"))
|
|
1316
|
+
|
|
1317
|
+
if fail_on_issues:
|
|
1318
|
+
for file_result in result.get("files", []):
|
|
1319
|
+
if int(file_result.get("issue_count", 0)) > 0:
|
|
1320
|
+
sys.exit(1)
|
|
1321
|
+
if int(result.get("failed", 0)) > 0:
|
|
1322
|
+
sys.exit(1)
|
|
1323
|
+
finally:
|
|
1324
|
+
await reviewer.close()
|
|
1325
|
+
|
|
1326
|
+
|
|
1327
|
+
async def type_check_command(
|
|
1328
|
+
file_path: str | None = None,
|
|
1329
|
+
files: list[str] | None = None,
|
|
1330
|
+
pattern: str | None = None,
|
|
1331
|
+
output_format: str = "json",
|
|
1332
|
+
max_workers: int = 4,
|
|
1333
|
+
output_file: str | None = None,
|
|
1334
|
+
fail_on_issues: bool = False,
|
|
1335
|
+
verbose_output: bool = False,
|
|
1336
|
+
) -> None:
|
|
1337
|
+
"""Run type-checking on file(s) with consistent async execution and output handling."""
|
|
1338
|
+
from ..command_classifier import CommandClassifier, CommandNetworkRequirement
|
|
1339
|
+
|
|
1340
|
+
feedback = get_feedback()
|
|
1341
|
+
output_format = _infer_output_format(output_format, output_file)
|
|
1342
|
+
feedback.format_type = output_format
|
|
1343
|
+
|
|
1344
|
+
# Check network requirement - type-check is offline
|
|
1345
|
+
requirement = CommandClassifier.get_network_requirement("reviewer", "type-check")
|
|
1346
|
+
offline_mode = (requirement == CommandNetworkRequirement.OFFLINE)
|
|
1347
|
+
|
|
1348
|
+
if file_path and not files and not pattern:
|
|
1349
|
+
files = [file_path]
|
|
1350
|
+
|
|
1351
|
+
# Normalize file paths before processing (fixes Windows absolute path issues)
|
|
1352
|
+
files = _normalize_file_paths(files)
|
|
1353
|
+
|
|
1354
|
+
try:
|
|
1355
|
+
resolved_files = _resolve_file_list(files, pattern)
|
|
1356
|
+
except ValueError as e:
|
|
1357
|
+
feedback.error(
|
|
1358
|
+
str(e),
|
|
1359
|
+
error_code="no_files_found",
|
|
1360
|
+
context={"files": files, "pattern": pattern},
|
|
1361
|
+
remediation="Specify files as arguments or use --pattern with a glob pattern",
|
|
1362
|
+
exit_code=1,
|
|
1363
|
+
)
|
|
1364
|
+
return
|
|
1365
|
+
|
|
1366
|
+
missing_files = [f for f in resolved_files if not f.exists()]
|
|
1367
|
+
if missing_files:
|
|
1368
|
+
feedback.error(
|
|
1369
|
+
f"Files not found: {', '.join(str(f) for f in missing_files)}",
|
|
1370
|
+
error_code="file_not_found",
|
|
1371
|
+
context={"missing_files": [str(f) for f in missing_files]},
|
|
1372
|
+
remediation="Check that the files exist and paths are correct",
|
|
1373
|
+
exit_code=1,
|
|
1374
|
+
)
|
|
1375
|
+
return
|
|
1376
|
+
|
|
1377
|
+
feedback.start_operation("Type Check")
|
|
1378
|
+
feedback.info(
|
|
1379
|
+
f"Type checking {resolved_files[0]}..." if len(resolved_files) == 1 else f"Type checking {len(resolved_files)} files (max {max_workers} concurrent)..."
|
|
1380
|
+
)
|
|
1381
|
+
|
|
1382
|
+
reviewer = ReviewerAgent()
|
|
1383
|
+
cache = get_reviewer_cache()
|
|
1384
|
+
|
|
1385
|
+
try:
|
|
1386
|
+
await reviewer.activate(offline_mode=offline_mode)
|
|
1387
|
+
|
|
1388
|
+
if len(resolved_files) == 1:
|
|
1389
|
+
file_path_obj = resolved_files[0]
|
|
1390
|
+
|
|
1391
|
+
# Check cache first
|
|
1392
|
+
cached_result = await cache.get_cached_result(
|
|
1393
|
+
file_path_obj, "type-check", REVIEWER_CACHE_VERSION
|
|
1394
|
+
)
|
|
1395
|
+
if cached_result is not None:
|
|
1396
|
+
result = cached_result
|
|
1397
|
+
feedback.info("Using cached result (file unchanged)")
|
|
1398
|
+
else:
|
|
1399
|
+
result = await reviewer.run("type-check", file=str(file_path_obj))
|
|
1400
|
+
check_result_error(result)
|
|
1401
|
+
# Cache the result
|
|
1402
|
+
await cache.save_result(
|
|
1403
|
+
file_path_obj, "type-check", REVIEWER_CACHE_VERSION, result
|
|
1404
|
+
)
|
|
1405
|
+
feedback.clear_progress()
|
|
1406
|
+
|
|
1407
|
+
if output_file:
|
|
1408
|
+
if output_format == "json":
|
|
1409
|
+
output_content = format_json(result)
|
|
1410
|
+
elif output_format == "markdown":
|
|
1411
|
+
output_content = format_markdown(result)
|
|
1412
|
+
elif output_format == "html":
|
|
1413
|
+
output_content = format_html(result, title="Type Check Results")
|
|
1414
|
+
else:
|
|
1415
|
+
output_content = _format_text_review_result(result)
|
|
1416
|
+
_write_output(output_file, output_content)
|
|
1417
|
+
feedback.success(f"Results written to {output_file}")
|
|
1418
|
+
else:
|
|
1419
|
+
if output_format == "json":
|
|
1420
|
+
feedback.output_result(result, message="Type checking completed", compact=not verbose_output)
|
|
1421
|
+
elif output_format in ("markdown", "html"):
|
|
1422
|
+
print(
|
|
1423
|
+
format_markdown(result)
|
|
1424
|
+
if output_format == "markdown"
|
|
1425
|
+
else format_html(result, title="Type Check Results")
|
|
1426
|
+
)
|
|
1427
|
+
else:
|
|
1428
|
+
feedback.success("Type checking completed")
|
|
1429
|
+
print(_format_text_review_result(result))
|
|
1430
|
+
|
|
1431
|
+
if fail_on_issues and int(result.get("error_count", 0)) > 0:
|
|
1432
|
+
sys.exit(1)
|
|
1433
|
+
else:
|
|
1434
|
+
result = await _process_file_batch(reviewer, resolved_files, "type-check", max_workers)
|
|
1435
|
+
feedback.clear_progress()
|
|
1436
|
+
|
|
1437
|
+
if output_file:
|
|
1438
|
+
if output_format == "json":
|
|
1439
|
+
output_content = format_json(result)
|
|
1440
|
+
elif output_format == "markdown":
|
|
1441
|
+
output_content = format_markdown(result.get("files", []))
|
|
1442
|
+
elif output_format == "html":
|
|
1443
|
+
output_content = format_html(result.get("files", []), title="Batch Type Check Results")
|
|
1444
|
+
else:
|
|
1445
|
+
output_content = _format_text_batch_summary(result, title="Batch Type Check")
|
|
1446
|
+
_write_output(output_file, output_content)
|
|
1447
|
+
feedback.success(f"Results written to {output_file}")
|
|
1448
|
+
else:
|
|
1449
|
+
if output_format == "json":
|
|
1450
|
+
feedback.output_result(
|
|
1451
|
+
result,
|
|
1452
|
+
message=f"Type checking completed: {result['successful']}/{result['total']} files successful",
|
|
1453
|
+
compact=not verbose_output,
|
|
1454
|
+
)
|
|
1455
|
+
elif output_format in ("markdown", "html"):
|
|
1456
|
+
print(
|
|
1457
|
+
format_markdown(result.get("files", []))
|
|
1458
|
+
if output_format == "markdown"
|
|
1459
|
+
else format_html(result.get("files", []), title="Batch Type Check Results")
|
|
1460
|
+
)
|
|
1461
|
+
else:
|
|
1462
|
+
feedback.success(f"Type checking completed: {result['successful']}/{result['total']} files successful")
|
|
1463
|
+
print(_format_text_batch_summary(result, title="Batch Type Check"))
|
|
1464
|
+
|
|
1465
|
+
if fail_on_issues:
|
|
1466
|
+
for file_result in result.get("files", []):
|
|
1467
|
+
if int(file_result.get("error_count", 0)) > 0 or len(file_result.get("errors", []) or []) > 0:
|
|
1468
|
+
sys.exit(1)
|
|
1469
|
+
if int(result.get("failed", 0)) > 0:
|
|
1470
|
+
sys.exit(1)
|
|
1471
|
+
finally:
|
|
1472
|
+
await reviewer.close()
|
|
1473
|
+
|
|
1474
|
+
|
|
1475
|
+
async def docs_command(
|
|
1476
|
+
library: str,
|
|
1477
|
+
topic: str | None = None,
|
|
1478
|
+
mode: str = "code",
|
|
1479
|
+
page: int = 1,
|
|
1480
|
+
output_format: str = "json",
|
|
1481
|
+
no_cache: bool = False,
|
|
1482
|
+
) -> None:
|
|
1483
|
+
"""
|
|
1484
|
+
Get library documentation from Context7 (supports both *docs and docs commands).
|
|
1485
|
+
|
|
1486
|
+
Uses KB-first lookup with automatic fallback to Context7 API.
|
|
1487
|
+
"""
|
|
1488
|
+
from ..command_classifier import CommandClassifier, CommandNetworkRequirement
|
|
1489
|
+
from ..network_detection import NetworkDetector
|
|
1490
|
+
|
|
1491
|
+
feedback = get_feedback()
|
|
1492
|
+
feedback.format_type = output_format
|
|
1493
|
+
feedback.start_operation("Get Documentation")
|
|
1494
|
+
|
|
1495
|
+
# Check network requirement - docs is optional (can use cache)
|
|
1496
|
+
requirement = CommandClassifier.get_network_requirement("reviewer", "docs")
|
|
1497
|
+
offline_mode = False
|
|
1498
|
+
|
|
1499
|
+
if requirement == CommandNetworkRequirement.OPTIONAL:
|
|
1500
|
+
# Try offline first if network unavailable
|
|
1501
|
+
if not NetworkDetector.is_network_available():
|
|
1502
|
+
offline_mode = True
|
|
1503
|
+
feedback.info("Network unavailable, using cached documentation if available")
|
|
1504
|
+
|
|
1505
|
+
query_desc = f"{library}"
|
|
1506
|
+
if topic:
|
|
1507
|
+
query_desc += f" ({topic})"
|
|
1508
|
+
feedback.info(f"Fetching documentation for {query_desc}...")
|
|
1509
|
+
|
|
1510
|
+
reviewer = ReviewerAgent()
|
|
1511
|
+
try:
|
|
1512
|
+
await reviewer.activate(offline_mode=offline_mode)
|
|
1513
|
+
|
|
1514
|
+
result = await reviewer.run(
|
|
1515
|
+
"docs",
|
|
1516
|
+
library=library,
|
|
1517
|
+
topic=topic,
|
|
1518
|
+
mode=mode,
|
|
1519
|
+
page=page,
|
|
1520
|
+
no_cache=no_cache,
|
|
1521
|
+
)
|
|
1522
|
+
|
|
1523
|
+
check_result_error(result)
|
|
1524
|
+
feedback.clear_progress()
|
|
1525
|
+
|
|
1526
|
+
# Format output based on format type
|
|
1527
|
+
if output_format == "json":
|
|
1528
|
+
feedback.output_result(result, message="Documentation retrieved successfully")
|
|
1529
|
+
elif output_format == "markdown":
|
|
1530
|
+
content = result.get("content", "")
|
|
1531
|
+
if content:
|
|
1532
|
+
print(content)
|
|
1533
|
+
else:
|
|
1534
|
+
feedback.warning("No documentation content found")
|
|
1535
|
+
else: # text
|
|
1536
|
+
content = result.get("content", "")
|
|
1537
|
+
if content:
|
|
1538
|
+
print(content)
|
|
1539
|
+
else:
|
|
1540
|
+
feedback.warning("No documentation content found")
|
|
1541
|
+
|
|
1542
|
+
finally:
|
|
1543
|
+
await reviewer.close()
|
|
1544
|
+
|
|
1545
|
+
|
|
1546
|
+
async def help_command():
|
|
1547
|
+
"""Show help (supports both *help and help commands) - uses static help, no activation needed"""
|
|
1548
|
+
from ..help.static_help import get_static_help
|
|
1549
|
+
help_text = get_static_help("reviewer")
|
|
1550
|
+
feedback = get_feedback()
|
|
1551
|
+
feedback.output_result(help_text)
|
|
1552
|
+
|
|
1553
|
+
|
|
1554
|
+
def handle_reviewer_command(args: object) -> None:
|
|
1555
|
+
"""Handle reviewer agent commands"""
|
|
1556
|
+
from ..feedback import get_feedback
|
|
1557
|
+
from ..help.static_help import get_static_help
|
|
1558
|
+
|
|
1559
|
+
feedback = get_feedback()
|
|
1560
|
+
command = normalize_command(getattr(args, "command", None))
|
|
1561
|
+
output_format = getattr(args, "format", "json")
|
|
1562
|
+
feedback.format_type = output_format
|
|
1563
|
+
|
|
1564
|
+
# Help commands first - no activation needed
|
|
1565
|
+
if command == "help" or command is None:
|
|
1566
|
+
help_text = get_static_help("reviewer")
|
|
1567
|
+
feedback.output_result(help_text)
|
|
1568
|
+
return
|
|
1569
|
+
|
|
1570
|
+
# Get batch operation parameters
|
|
1571
|
+
files = getattr(args, "files", None)
|
|
1572
|
+
pattern = getattr(args, "pattern", None)
|
|
1573
|
+
max_workers = getattr(args, "max_workers", 4)
|
|
1574
|
+
output_file = getattr(args, "output", None)
|
|
1575
|
+
|
|
1576
|
+
# Backward compatibility: support 'file' attribute for single file
|
|
1577
|
+
single_file = getattr(args, "file", None)
|
|
1578
|
+
if single_file and not files:
|
|
1579
|
+
files = [single_file]
|
|
1580
|
+
|
|
1581
|
+
try:
|
|
1582
|
+
if command == "review":
|
|
1583
|
+
fail_under = getattr(args, "fail_under", None)
|
|
1584
|
+
verbose_output = bool(getattr(args, "verbose_output", False))
|
|
1585
|
+
run_async_command(
|
|
1586
|
+
review_command(
|
|
1587
|
+
file_path=single_file,
|
|
1588
|
+
files=files,
|
|
1589
|
+
pattern=pattern,
|
|
1590
|
+
output_format=output_format,
|
|
1591
|
+
max_workers=max_workers,
|
|
1592
|
+
output_file=output_file,
|
|
1593
|
+
fail_under=fail_under,
|
|
1594
|
+
verbose_output=verbose_output,
|
|
1595
|
+
)
|
|
1596
|
+
)
|
|
1597
|
+
elif command == "score":
|
|
1598
|
+
fail_under = getattr(args, "fail_under", None)
|
|
1599
|
+
verbose_output = bool(getattr(args, "verbose_output", False))
|
|
1600
|
+
explain = bool(getattr(args, "explain", False))
|
|
1601
|
+
run_async_command(
|
|
1602
|
+
score_command(
|
|
1603
|
+
file_path=single_file,
|
|
1604
|
+
files=files,
|
|
1605
|
+
pattern=pattern,
|
|
1606
|
+
output_format=output_format,
|
|
1607
|
+
max_workers=max_workers,
|
|
1608
|
+
output_file=output_file,
|
|
1609
|
+
fail_under=fail_under,
|
|
1610
|
+
verbose_output=verbose_output,
|
|
1611
|
+
explain=explain,
|
|
1612
|
+
)
|
|
1613
|
+
)
|
|
1614
|
+
elif command == "lint":
|
|
1615
|
+
fail_on_issues = bool(getattr(args, "fail_on_issues", False))
|
|
1616
|
+
verbose_output = bool(getattr(args, "verbose_output", False))
|
|
1617
|
+
isolated = bool(getattr(args, "isolated", False))
|
|
1618
|
+
run_async_command(
|
|
1619
|
+
lint_command(
|
|
1620
|
+
file_path=single_file,
|
|
1621
|
+
files=files,
|
|
1622
|
+
pattern=pattern,
|
|
1623
|
+
output_format=output_format,
|
|
1624
|
+
max_workers=max_workers,
|
|
1625
|
+
output_file=output_file,
|
|
1626
|
+
fail_on_issues=fail_on_issues,
|
|
1627
|
+
verbose_output=verbose_output,
|
|
1628
|
+
isolated=isolated,
|
|
1629
|
+
)
|
|
1630
|
+
)
|
|
1631
|
+
elif command == "type-check":
|
|
1632
|
+
fail_on_issues = bool(getattr(args, "fail_on_issues", False))
|
|
1633
|
+
verbose_output = bool(getattr(args, "verbose_output", False))
|
|
1634
|
+
run_async_command(
|
|
1635
|
+
type_check_command(
|
|
1636
|
+
file_path=single_file,
|
|
1637
|
+
files=files,
|
|
1638
|
+
pattern=pattern,
|
|
1639
|
+
output_format=output_format,
|
|
1640
|
+
max_workers=max_workers,
|
|
1641
|
+
output_file=output_file,
|
|
1642
|
+
fail_on_issues=fail_on_issues,
|
|
1643
|
+
verbose_output=verbose_output,
|
|
1644
|
+
)
|
|
1645
|
+
)
|
|
1646
|
+
elif command == "report":
|
|
1647
|
+
from ..command_classifier import CommandClassifier, CommandNetworkRequirement
|
|
1648
|
+
from ..network_detection import NetworkDetector
|
|
1649
|
+
from ...core.network_errors import NetworkRequiredError
|
|
1650
|
+
from ..base import handle_network_error
|
|
1651
|
+
|
|
1652
|
+
requirement = CommandClassifier.get_network_requirement("reviewer", "report")
|
|
1653
|
+
if requirement == CommandNetworkRequirement.REQUIRED and not NetworkDetector.is_network_available():
|
|
1654
|
+
try:
|
|
1655
|
+
raise NetworkRequiredError(
|
|
1656
|
+
operation_name="reviewer report",
|
|
1657
|
+
message="Network is required for this command"
|
|
1658
|
+
)
|
|
1659
|
+
except NetworkRequiredError as e:
|
|
1660
|
+
handle_network_error(e, format_type=output_format)
|
|
1661
|
+
return
|
|
1662
|
+
|
|
1663
|
+
feedback.start_operation("Report Generation", "Analyzing project quality...")
|
|
1664
|
+
formats = getattr(args, "formats", ["all"])
|
|
1665
|
+
if "all" in formats:
|
|
1666
|
+
format_type = "all"
|
|
1667
|
+
else:
|
|
1668
|
+
format_type = ",".join(formats)
|
|
1669
|
+
|
|
1670
|
+
# Show initial progress
|
|
1671
|
+
feedback.running("Discovering files...", step=1, total_steps=4)
|
|
1672
|
+
|
|
1673
|
+
reviewer = ReviewerAgent()
|
|
1674
|
+
result = run_async_command(
|
|
1675
|
+
run_report(reviewer, args.target, format_type, getattr(args, "output_dir", None))
|
|
1676
|
+
)
|
|
1677
|
+
check_result_error(result)
|
|
1678
|
+
feedback.clear_progress()
|
|
1679
|
+
|
|
1680
|
+
# Extract report paths from result for better feedback
|
|
1681
|
+
report_paths = []
|
|
1682
|
+
if isinstance(result, dict):
|
|
1683
|
+
if "reports" in result and isinstance(result["reports"], dict):
|
|
1684
|
+
# Reports is a dict like {"json": "path", "markdown": "path", ...}
|
|
1685
|
+
report_paths = list(result["reports"].values())
|
|
1686
|
+
elif "reports" in result and isinstance(result["reports"], list):
|
|
1687
|
+
report_paths = result["reports"]
|
|
1688
|
+
elif "data" in result and isinstance(result["data"], dict):
|
|
1689
|
+
if "reports" in result["data"]:
|
|
1690
|
+
if isinstance(result["data"]["reports"], dict):
|
|
1691
|
+
report_paths = list(result["data"]["reports"].values())
|
|
1692
|
+
elif isinstance(result["data"]["reports"], list):
|
|
1693
|
+
report_paths = result["data"]["reports"]
|
|
1694
|
+
|
|
1695
|
+
summary = {}
|
|
1696
|
+
if report_paths:
|
|
1697
|
+
summary["reports_generated"] = len(report_paths)
|
|
1698
|
+
if len(report_paths) <= 5: # Only show paths if not too many
|
|
1699
|
+
summary["report_files"] = report_paths
|
|
1700
|
+
|
|
1701
|
+
verbose_output = bool(getattr(args, "verbose_output", False))
|
|
1702
|
+
feedback.output_result(result, message="Report generated successfully", warnings=None, compact=not verbose_output)
|
|
1703
|
+
elif command == "duplication":
|
|
1704
|
+
# Duplication check is offline - no network check needed
|
|
1705
|
+
feedback.start_operation("Duplication Check")
|
|
1706
|
+
feedback.info(f"Checking for code duplication in {args.target}...")
|
|
1707
|
+
reviewer = ReviewerAgent()
|
|
1708
|
+
result = run_async_command(run_duplication(reviewer, args.target))
|
|
1709
|
+
check_result_error(result)
|
|
1710
|
+
feedback.clear_progress()
|
|
1711
|
+
if output_format == "json":
|
|
1712
|
+
feedback.output_result(result, message="Duplication check completed")
|
|
1713
|
+
else:
|
|
1714
|
+
feedback.success("Duplication check completed")
|
|
1715
|
+
if "duplicates" in result:
|
|
1716
|
+
print(f"\nCode duplication detected in {args.target}:")
|
|
1717
|
+
print(f" Total duplicates: {len(result.get('duplicates', []))}")
|
|
1718
|
+
elif command == "analyze-project":
|
|
1719
|
+
# Project analysis may need network - check if required
|
|
1720
|
+
from ..command_classifier import CommandClassifier, CommandNetworkRequirement
|
|
1721
|
+
from ..network_detection import NetworkDetector
|
|
1722
|
+
from ...core.network_errors import NetworkRequiredError
|
|
1723
|
+
from ..base import handle_network_error
|
|
1724
|
+
|
|
1725
|
+
requirement = CommandClassifier.get_network_requirement("reviewer", "analyze-project")
|
|
1726
|
+
if requirement == CommandNetworkRequirement.REQUIRED and not NetworkDetector.is_network_available():
|
|
1727
|
+
try:
|
|
1728
|
+
raise NetworkRequiredError(
|
|
1729
|
+
operation_name="reviewer analyze-project",
|
|
1730
|
+
message="Network is required for this command"
|
|
1731
|
+
)
|
|
1732
|
+
except NetworkRequiredError as e:
|
|
1733
|
+
handle_network_error(e, format_type=output_format)
|
|
1734
|
+
return
|
|
1735
|
+
|
|
1736
|
+
feedback.start_operation("Project Analysis")
|
|
1737
|
+
feedback.info("Analyzing project...")
|
|
1738
|
+
reviewer = ReviewerAgent()
|
|
1739
|
+
result = run_async_command(
|
|
1740
|
+
run_analyze_project(
|
|
1741
|
+
reviewer,
|
|
1742
|
+
getattr(args, "project_root", None),
|
|
1743
|
+
include_comparison=not getattr(args, "no_comparison", False),
|
|
1744
|
+
)
|
|
1745
|
+
)
|
|
1746
|
+
check_result_error(result)
|
|
1747
|
+
feedback.clear_progress()
|
|
1748
|
+
verbose_output = bool(getattr(args, "verbose_output", False))
|
|
1749
|
+
feedback.output_result(result, message="Project analysis completed", compact=not verbose_output)
|
|
1750
|
+
elif command == "analyze-services":
|
|
1751
|
+
# Service analysis may need network - check if required
|
|
1752
|
+
from ..command_classifier import CommandClassifier, CommandNetworkRequirement
|
|
1753
|
+
from ..network_detection import NetworkDetector
|
|
1754
|
+
from ...core.network_errors import NetworkRequiredError
|
|
1755
|
+
from ..base import handle_network_error
|
|
1756
|
+
|
|
1757
|
+
requirement = CommandClassifier.get_network_requirement("reviewer", "analyze-services")
|
|
1758
|
+
if requirement == CommandNetworkRequirement.REQUIRED and not NetworkDetector.is_network_available():
|
|
1759
|
+
try:
|
|
1760
|
+
raise NetworkRequiredError(
|
|
1761
|
+
operation_name="reviewer analyze-services",
|
|
1762
|
+
message="Network is required for this command"
|
|
1763
|
+
)
|
|
1764
|
+
except NetworkRequiredError as e:
|
|
1765
|
+
handle_network_error(e, format_type=output_format)
|
|
1766
|
+
return
|
|
1767
|
+
|
|
1768
|
+
feedback.start_operation("Service Analysis")
|
|
1769
|
+
feedback.info("Analyzing services...")
|
|
1770
|
+
services = getattr(args, "services", None)
|
|
1771
|
+
reviewer = ReviewerAgent()
|
|
1772
|
+
result = run_async_command(
|
|
1773
|
+
run_analyze_services(
|
|
1774
|
+
reviewer,
|
|
1775
|
+
services if services else None,
|
|
1776
|
+
getattr(args, "project_root", None),
|
|
1777
|
+
include_comparison=not getattr(args, "no_comparison", False),
|
|
1778
|
+
)
|
|
1779
|
+
)
|
|
1780
|
+
check_result_error(result)
|
|
1781
|
+
feedback.clear_progress()
|
|
1782
|
+
verbose_output = bool(getattr(args, "verbose_output", False))
|
|
1783
|
+
feedback.output_result(result, message="Service analysis completed", compact=not verbose_output)
|
|
1784
|
+
elif command == "docs":
|
|
1785
|
+
run_async_command(
|
|
1786
|
+
docs_command(
|
|
1787
|
+
library=getattr(args, "library"),
|
|
1788
|
+
topic=getattr(args, "topic", None),
|
|
1789
|
+
mode=getattr(args, "mode", "code"),
|
|
1790
|
+
page=getattr(args, "page", 1),
|
|
1791
|
+
output_format=output_format,
|
|
1792
|
+
no_cache=bool(getattr(args, "no_cache", False)),
|
|
1793
|
+
)
|
|
1794
|
+
)
|
|
1795
|
+
else:
|
|
1796
|
+
# Invalid command - show help without activation
|
|
1797
|
+
help_text = get_static_help("reviewer")
|
|
1798
|
+
feedback.output_result(help_text)
|
|
1799
|
+
finally:
|
|
1800
|
+
# Each command manages its own agent lifecycle; nothing to close here.
|
|
1801
|
+
pass
|
|
1802
|
+
|
|
1803
|
+
|
|
1804
|
+
async def run_report(reviewer: ReviewerAgent, target: str, format_type: str, output_dir: str | None) -> dict[str, Any]:
|
|
1805
|
+
from ..command_classifier import CommandClassifier, CommandNetworkRequirement
|
|
1806
|
+
|
|
1807
|
+
# Report generation may need network for some features, but can work offline
|
|
1808
|
+
requirement = CommandClassifier.get_network_requirement("reviewer", "report")
|
|
1809
|
+
offline_mode = (requirement == CommandNetworkRequirement.OFFLINE)
|
|
1810
|
+
|
|
1811
|
+
try:
|
|
1812
|
+
await reviewer.activate(offline_mode=offline_mode)
|
|
1813
|
+
return await reviewer.run("report", target=target, format=format_type, output_dir=output_dir)
|
|
1814
|
+
finally:
|
|
1815
|
+
await reviewer.close()
|
|
1816
|
+
|
|
1817
|
+
|
|
1818
|
+
async def run_duplication(reviewer: ReviewerAgent, target: str) -> dict[str, Any]:
|
|
1819
|
+
from ..command_classifier import CommandClassifier, CommandNetworkRequirement
|
|
1820
|
+
|
|
1821
|
+
# Duplication check is offline (local analysis)
|
|
1822
|
+
requirement = CommandClassifier.get_network_requirement("reviewer", "duplication")
|
|
1823
|
+
offline_mode = (requirement == CommandNetworkRequirement.OFFLINE)
|
|
1824
|
+
|
|
1825
|
+
try:
|
|
1826
|
+
await reviewer.activate(offline_mode=offline_mode)
|
|
1827
|
+
return await reviewer.run("duplication", file=target)
|
|
1828
|
+
finally:
|
|
1829
|
+
await reviewer.close()
|
|
1830
|
+
|
|
1831
|
+
|
|
1832
|
+
async def run_analyze_project(reviewer: ReviewerAgent, project_root: str | None, include_comparison: bool) -> dict[str, Any]:
|
|
1833
|
+
from ..command_classifier import CommandClassifier, CommandNetworkRequirement
|
|
1834
|
+
|
|
1835
|
+
# Project analysis may need network for some features, but can work offline
|
|
1836
|
+
requirement = CommandClassifier.get_network_requirement("reviewer", "analyze-project")
|
|
1837
|
+
offline_mode = (requirement == CommandNetworkRequirement.OFFLINE)
|
|
1838
|
+
|
|
1839
|
+
try:
|
|
1840
|
+
await reviewer.activate(offline_mode=offline_mode)
|
|
1841
|
+
return await reviewer.run(
|
|
1842
|
+
"analyze-project",
|
|
1843
|
+
project_root=project_root,
|
|
1844
|
+
include_comparison=include_comparison,
|
|
1845
|
+
)
|
|
1846
|
+
finally:
|
|
1847
|
+
await reviewer.close()
|
|
1848
|
+
|
|
1849
|
+
|
|
1850
|
+
async def run_analyze_services(
|
|
1851
|
+
reviewer: ReviewerAgent,
|
|
1852
|
+
services: list[str] | None,
|
|
1853
|
+
project_root: str | None,
|
|
1854
|
+
include_comparison: bool,
|
|
1855
|
+
) -> dict[str, Any]:
|
|
1856
|
+
from ..command_classifier import CommandClassifier, CommandNetworkRequirement
|
|
1857
|
+
|
|
1858
|
+
# Service analysis may need network for some features, but can work offline
|
|
1859
|
+
requirement = CommandClassifier.get_network_requirement("reviewer", "analyze-services")
|
|
1860
|
+
offline_mode = (requirement == CommandNetworkRequirement.OFFLINE)
|
|
1861
|
+
|
|
1862
|
+
try:
|
|
1863
|
+
await reviewer.activate(offline_mode=offline_mode)
|
|
1864
|
+
return await reviewer.run(
|
|
1865
|
+
"analyze-services",
|
|
1866
|
+
services=services,
|
|
1867
|
+
project_root=project_root,
|
|
1868
|
+
include_comparison=include_comparison,
|
|
1869
|
+
)
|
|
1870
|
+
finally:
|
|
1871
|
+
await reviewer.close()
|
|
1872
|
+
|