tapps-agents 3.6.0__py3-none-any.whl → 3.6.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tapps_agents/__init__.py +2 -2
- tapps_agents/agents/__init__.py +22 -22
- tapps_agents/agents/analyst/__init__.py +5 -5
- tapps_agents/agents/architect/__init__.py +5 -5
- tapps_agents/agents/architect/agent.py +1033 -1033
- tapps_agents/agents/architect/pattern_detector.py +75 -75
- tapps_agents/agents/cleanup/__init__.py +7 -7
- tapps_agents/agents/cleanup/agent.py +445 -445
- tapps_agents/agents/debugger/__init__.py +7 -7
- tapps_agents/agents/debugger/agent.py +310 -310
- tapps_agents/agents/debugger/error_analyzer.py +437 -437
- tapps_agents/agents/designer/__init__.py +5 -5
- tapps_agents/agents/designer/agent.py +786 -786
- tapps_agents/agents/designer/visual_designer.py +638 -638
- tapps_agents/agents/documenter/__init__.py +7 -7
- tapps_agents/agents/documenter/agent.py +531 -531
- tapps_agents/agents/documenter/doc_generator.py +472 -472
- tapps_agents/agents/documenter/doc_validator.py +393 -393
- tapps_agents/agents/documenter/framework_doc_updater.py +493 -493
- tapps_agents/agents/enhancer/__init__.py +7 -7
- tapps_agents/agents/evaluator/__init__.py +7 -7
- tapps_agents/agents/evaluator/agent.py +443 -443
- tapps_agents/agents/evaluator/priority_evaluator.py +641 -641
- tapps_agents/agents/evaluator/quality_analyzer.py +147 -147
- tapps_agents/agents/evaluator/report_generator.py +344 -344
- tapps_agents/agents/evaluator/usage_analyzer.py +192 -192
- tapps_agents/agents/evaluator/workflow_analyzer.py +189 -189
- tapps_agents/agents/implementer/__init__.py +7 -7
- tapps_agents/agents/implementer/agent.py +798 -798
- tapps_agents/agents/implementer/auto_fix.py +1119 -1119
- tapps_agents/agents/implementer/code_generator.py +73 -73
- tapps_agents/agents/improver/__init__.py +1 -1
- tapps_agents/agents/improver/agent.py +753 -753
- tapps_agents/agents/ops/__init__.py +1 -1
- tapps_agents/agents/ops/agent.py +619 -619
- tapps_agents/agents/ops/dependency_analyzer.py +600 -600
- tapps_agents/agents/orchestrator/__init__.py +5 -5
- tapps_agents/agents/orchestrator/agent.py +522 -522
- tapps_agents/agents/planner/__init__.py +7 -7
- tapps_agents/agents/planner/agent.py +1127 -1127
- tapps_agents/agents/reviewer/__init__.py +24 -24
- tapps_agents/agents/reviewer/agent.py +3513 -3513
- tapps_agents/agents/reviewer/aggregator.py +213 -213
- tapps_agents/agents/reviewer/batch_review.py +448 -448
- tapps_agents/agents/reviewer/cache.py +443 -443
- tapps_agents/agents/reviewer/context7_enhancer.py +630 -630
- tapps_agents/agents/reviewer/context_detector.py +203 -203
- tapps_agents/agents/reviewer/docker_compose_validator.py +158 -158
- tapps_agents/agents/reviewer/dockerfile_validator.py +176 -176
- tapps_agents/agents/reviewer/error_handling.py +126 -126
- tapps_agents/agents/reviewer/feedback_generator.py +490 -490
- tapps_agents/agents/reviewer/influxdb_validator.py +316 -316
- tapps_agents/agents/reviewer/issue_tracking.py +169 -169
- tapps_agents/agents/reviewer/library_detector.py +295 -295
- tapps_agents/agents/reviewer/library_patterns.py +268 -268
- tapps_agents/agents/reviewer/maintainability_scorer.py +593 -593
- tapps_agents/agents/reviewer/metric_strategies.py +276 -276
- tapps_agents/agents/reviewer/mqtt_validator.py +160 -160
- tapps_agents/agents/reviewer/output_enhancer.py +105 -105
- tapps_agents/agents/reviewer/pattern_detector.py +241 -241
- tapps_agents/agents/reviewer/performance_scorer.py +357 -357
- tapps_agents/agents/reviewer/phased_review.py +516 -516
- tapps_agents/agents/reviewer/progressive_review.py +435 -435
- tapps_agents/agents/reviewer/react_scorer.py +331 -331
- tapps_agents/agents/reviewer/score_constants.py +228 -228
- tapps_agents/agents/reviewer/score_validator.py +507 -507
- tapps_agents/agents/reviewer/scorer_registry.py +373 -373
- tapps_agents/agents/reviewer/service_discovery.py +534 -534
- tapps_agents/agents/reviewer/tools/parallel_executor.py +581 -581
- tapps_agents/agents/reviewer/tools/ruff_grouping.py +250 -250
- tapps_agents/agents/reviewer/tools/scoped_mypy.py +284 -284
- tapps_agents/agents/reviewer/typescript_scorer.py +1142 -1142
- tapps_agents/agents/reviewer/validation.py +208 -208
- tapps_agents/agents/reviewer/websocket_validator.py +132 -132
- tapps_agents/agents/tester/__init__.py +7 -7
- tapps_agents/agents/tester/accessibility_auditor.py +309 -309
- tapps_agents/agents/tester/agent.py +1080 -1080
- tapps_agents/agents/tester/batch_generator.py +54 -54
- tapps_agents/agents/tester/context_learner.py +51 -51
- tapps_agents/agents/tester/coverage_analyzer.py +386 -386
- tapps_agents/agents/tester/coverage_test_generator.py +290 -290
- tapps_agents/agents/tester/debug_enhancer.py +238 -238
- tapps_agents/agents/tester/device_emulator.py +241 -241
- tapps_agents/agents/tester/integration_generator.py +62 -62
- tapps_agents/agents/tester/network_recorder.py +300 -300
- tapps_agents/agents/tester/performance_monitor.py +320 -320
- tapps_agents/agents/tester/test_fixer.py +316 -316
- tapps_agents/agents/tester/test_generator.py +632 -632
- tapps_agents/agents/tester/trace_manager.py +234 -234
- tapps_agents/agents/tester/visual_regression.py +291 -291
- tapps_agents/analysis/pattern_detector.py +36 -36
- tapps_agents/beads/hydration.py +213 -213
- tapps_agents/beads/parse.py +32 -32
- tapps_agents/beads/specs.py +206 -206
- tapps_agents/cli/__init__.py +9 -9
- tapps_agents/cli/__main__.py +8 -8
- tapps_agents/cli/base.py +478 -478
- tapps_agents/cli/command_classifier.py +72 -72
- tapps_agents/cli/commands/__init__.py +2 -2
- tapps_agents/cli/commands/analyst.py +173 -173
- tapps_agents/cli/commands/architect.py +109 -109
- tapps_agents/cli/commands/cleanup_agent.py +92 -92
- tapps_agents/cli/commands/common.py +126 -126
- tapps_agents/cli/commands/debugger.py +90 -90
- tapps_agents/cli/commands/designer.py +112 -112
- tapps_agents/cli/commands/documenter.py +136 -136
- tapps_agents/cli/commands/enhancer.py +110 -110
- tapps_agents/cli/commands/evaluator.py +255 -255
- tapps_agents/cli/commands/implementer.py +301 -301
- tapps_agents/cli/commands/improver.py +91 -91
- tapps_agents/cli/commands/knowledge.py +111 -111
- tapps_agents/cli/commands/learning.py +172 -172
- tapps_agents/cli/commands/observability.py +283 -283
- tapps_agents/cli/commands/ops.py +135 -135
- tapps_agents/cli/commands/orchestrator.py +116 -116
- tapps_agents/cli/commands/planner.py +237 -237
- tapps_agents/cli/commands/reviewer.py +1872 -1872
- tapps_agents/cli/commands/status.py +285 -285
- tapps_agents/cli/commands/task.py +227 -227
- tapps_agents/cli/commands/tester.py +191 -191
- tapps_agents/cli/feedback.py +936 -936
- tapps_agents/cli/formatters.py +608 -608
- tapps_agents/cli/help/__init__.py +7 -7
- tapps_agents/cli/help/static_help.py +425 -425
- tapps_agents/cli/network_detection.py +110 -110
- tapps_agents/cli/output_compactor.py +274 -274
- tapps_agents/cli/parsers/__init__.py +2 -2
- tapps_agents/cli/parsers/analyst.py +186 -186
- tapps_agents/cli/parsers/architect.py +167 -167
- tapps_agents/cli/parsers/cleanup_agent.py +228 -228
- tapps_agents/cli/parsers/debugger.py +116 -116
- tapps_agents/cli/parsers/designer.py +182 -182
- tapps_agents/cli/parsers/documenter.py +134 -134
- tapps_agents/cli/parsers/enhancer.py +113 -113
- tapps_agents/cli/parsers/evaluator.py +213 -213
- tapps_agents/cli/parsers/implementer.py +168 -168
- tapps_agents/cli/parsers/improver.py +132 -132
- tapps_agents/cli/parsers/ops.py +159 -159
- tapps_agents/cli/parsers/orchestrator.py +98 -98
- tapps_agents/cli/parsers/planner.py +145 -145
- tapps_agents/cli/parsers/reviewer.py +462 -462
- tapps_agents/cli/parsers/tester.py +124 -124
- tapps_agents/cli/progress_heartbeat.py +254 -254
- tapps_agents/cli/streaming_progress.py +336 -336
- tapps_agents/cli/utils/__init__.py +6 -6
- tapps_agents/cli/utils/agent_lifecycle.py +48 -48
- tapps_agents/cli/utils/error_formatter.py +82 -82
- tapps_agents/cli/utils/error_recovery.py +188 -188
- tapps_agents/cli/utils/output_handler.py +59 -59
- tapps_agents/cli/utils/prompt_enhancer.py +319 -319
- tapps_agents/cli/validators/__init__.py +9 -9
- tapps_agents/cli/validators/command_validator.py +81 -81
- tapps_agents/context7/__init__.py +112 -112
- tapps_agents/context7/agent_integration.py +869 -869
- tapps_agents/context7/analytics.py +382 -382
- tapps_agents/context7/analytics_dashboard.py +299 -299
- tapps_agents/context7/async_cache.py +681 -681
- tapps_agents/context7/backup_client.py +958 -958
- tapps_agents/context7/cache_locking.py +194 -194
- tapps_agents/context7/cache_metadata.py +214 -214
- tapps_agents/context7/cache_prewarm.py +488 -488
- tapps_agents/context7/cache_structure.py +168 -168
- tapps_agents/context7/cache_warming.py +604 -604
- tapps_agents/context7/circuit_breaker.py +376 -376
- tapps_agents/context7/cleanup.py +461 -461
- tapps_agents/context7/commands.py +858 -858
- tapps_agents/context7/credential_validation.py +276 -276
- tapps_agents/context7/cross_reference_resolver.py +168 -168
- tapps_agents/context7/cross_references.py +424 -424
- tapps_agents/context7/doc_manager.py +225 -225
- tapps_agents/context7/fuzzy_matcher.py +369 -369
- tapps_agents/context7/kb_cache.py +404 -404
- tapps_agents/context7/language_detector.py +219 -219
- tapps_agents/context7/library_detector.py +725 -725
- tapps_agents/context7/lookup.py +738 -738
- tapps_agents/context7/metadata.py +258 -258
- tapps_agents/context7/refresh_queue.py +300 -300
- tapps_agents/context7/security.py +373 -373
- tapps_agents/context7/staleness_policies.py +278 -278
- tapps_agents/context7/tiles_integration.py +47 -47
- tapps_agents/continuous_bug_fix/__init__.py +20 -20
- tapps_agents/continuous_bug_fix/bug_finder.py +306 -306
- tapps_agents/continuous_bug_fix/bug_fix_coordinator.py +177 -177
- tapps_agents/continuous_bug_fix/commit_manager.py +178 -178
- tapps_agents/continuous_bug_fix/continuous_bug_fixer.py +322 -322
- tapps_agents/continuous_bug_fix/proactive_bug_finder.py +285 -285
- tapps_agents/core/__init__.py +298 -298
- tapps_agents/core/adaptive_cache_config.py +432 -432
- tapps_agents/core/agent_base.py +647 -647
- tapps_agents/core/agent_cache.py +466 -466
- tapps_agents/core/agent_learning.py +1865 -1865
- tapps_agents/core/analytics_dashboard.py +563 -563
- tapps_agents/core/analytics_enhancements.py +597 -597
- tapps_agents/core/anonymization.py +274 -274
- tapps_agents/core/ast_parser.py +228 -228
- tapps_agents/core/async_file_ops.py +402 -402
- tapps_agents/core/best_practice_consultant.py +299 -299
- tapps_agents/core/brownfield_analyzer.py +299 -299
- tapps_agents/core/brownfield_review.py +541 -541
- tapps_agents/core/browser_controller.py +513 -513
- tapps_agents/core/capability_registry.py +418 -418
- tapps_agents/core/change_impact_analyzer.py +190 -190
- tapps_agents/core/checkpoint_manager.py +377 -377
- tapps_agents/core/code_generator.py +329 -329
- tapps_agents/core/code_validator.py +276 -276
- tapps_agents/core/command_registry.py +327 -327
- tapps_agents/core/context_gathering/__init__.py +2 -2
- tapps_agents/core/context_gathering/repository_explorer.py +28 -28
- tapps_agents/core/context_intelligence/__init__.py +2 -2
- tapps_agents/core/context_intelligence/relevance_scorer.py +24 -24
- tapps_agents/core/context_intelligence/token_budget_manager.py +27 -27
- tapps_agents/core/context_manager.py +240 -240
- tapps_agents/core/cursor_feedback_monitor.py +146 -146
- tapps_agents/core/cursor_verification.py +290 -290
- tapps_agents/core/customization_loader.py +280 -280
- tapps_agents/core/customization_schema.py +260 -260
- tapps_agents/core/customization_template.py +238 -238
- tapps_agents/core/debug_logger.py +124 -124
- tapps_agents/core/design_validator.py +298 -298
- tapps_agents/core/diagram_generator.py +226 -226
- tapps_agents/core/docker_utils.py +232 -232
- tapps_agents/core/document_generator.py +617 -617
- tapps_agents/core/domain_detector.py +30 -30
- tapps_agents/core/error_envelope.py +454 -454
- tapps_agents/core/error_handler.py +270 -270
- tapps_agents/core/estimation_tracker.py +189 -189
- tapps_agents/core/eval_prompt_engine.py +116 -116
- tapps_agents/core/evaluation_base.py +119 -119
- tapps_agents/core/evaluation_models.py +320 -320
- tapps_agents/core/evaluation_orchestrator.py +225 -225
- tapps_agents/core/evaluators/__init__.py +7 -7
- tapps_agents/core/evaluators/architectural_evaluator.py +205 -205
- tapps_agents/core/evaluators/behavioral_evaluator.py +160 -160
- tapps_agents/core/evaluators/performance_profile_evaluator.py +160 -160
- tapps_agents/core/evaluators/security_posture_evaluator.py +148 -148
- tapps_agents/core/evaluators/spec_compliance_evaluator.py +181 -181
- tapps_agents/core/exceptions.py +107 -107
- tapps_agents/core/expert_config_generator.py +293 -293
- tapps_agents/core/export_schema.py +202 -202
- tapps_agents/core/external_feedback_models.py +102 -102
- tapps_agents/core/external_feedback_storage.py +213 -213
- tapps_agents/core/fallback_strategy.py +314 -314
- tapps_agents/core/feedback_analyzer.py +162 -162
- tapps_agents/core/feedback_collector.py +178 -178
- tapps_agents/core/git_operations.py +445 -445
- tapps_agents/core/hardware_profiler.py +151 -151
- tapps_agents/core/instructions.py +324 -324
- tapps_agents/core/io_guardrails.py +69 -69
- tapps_agents/core/issue_manifest.py +249 -249
- tapps_agents/core/issue_schema.py +139 -139
- tapps_agents/core/json_utils.py +128 -128
- tapps_agents/core/knowledge_graph.py +446 -446
- tapps_agents/core/language_detector.py +296 -296
- tapps_agents/core/learning_confidence.py +242 -242
- tapps_agents/core/learning_dashboard.py +246 -246
- tapps_agents/core/learning_decision.py +384 -384
- tapps_agents/core/learning_explainability.py +578 -578
- tapps_agents/core/learning_export.py +287 -287
- tapps_agents/core/learning_integration.py +228 -228
- tapps_agents/core/llm_behavior.py +232 -232
- tapps_agents/core/long_duration_support.py +786 -786
- tapps_agents/core/mcp_setup.py +106 -106
- tapps_agents/core/memory_integration.py +396 -396
- tapps_agents/core/meta_learning.py +666 -666
- tapps_agents/core/module_path_sanitizer.py +199 -199
- tapps_agents/core/multi_agent_orchestrator.py +382 -382
- tapps_agents/core/network_errors.py +125 -125
- tapps_agents/core/nfr_validator.py +336 -336
- tapps_agents/core/offline_mode.py +158 -158
- tapps_agents/core/output_contracts.py +300 -300
- tapps_agents/core/output_formatter.py +300 -300
- tapps_agents/core/path_normalizer.py +174 -174
- tapps_agents/core/path_validator.py +322 -322
- tapps_agents/core/pattern_library.py +250 -250
- tapps_agents/core/performance_benchmark.py +301 -301
- tapps_agents/core/performance_monitor.py +184 -184
- tapps_agents/core/playwright_mcp_controller.py +771 -771
- tapps_agents/core/policy_loader.py +135 -135
- tapps_agents/core/progress.py +166 -166
- tapps_agents/core/project_profile.py +354 -354
- tapps_agents/core/project_type_detector.py +454 -454
- tapps_agents/core/prompt_base.py +223 -223
- tapps_agents/core/prompt_learning/__init__.py +2 -2
- tapps_agents/core/prompt_learning/learning_loop.py +24 -24
- tapps_agents/core/prompt_learning/project_prompt_store.py +25 -25
- tapps_agents/core/prompt_learning/skills_prompt_analyzer.py +35 -35
- tapps_agents/core/prompt_optimization/__init__.py +6 -6
- tapps_agents/core/prompt_optimization/ab_tester.py +114 -114
- tapps_agents/core/prompt_optimization/correlation_analyzer.py +160 -160
- tapps_agents/core/prompt_optimization/progressive_refiner.py +129 -129
- tapps_agents/core/prompt_optimization/prompt_library.py +37 -37
- tapps_agents/core/requirements_evaluator.py +431 -431
- tapps_agents/core/resource_aware_executor.py +449 -449
- tapps_agents/core/resource_monitor.py +343 -343
- tapps_agents/core/resume_handler.py +298 -298
- tapps_agents/core/retry_handler.py +197 -197
- tapps_agents/core/review_checklists.py +479 -479
- tapps_agents/core/role_loader.py +201 -201
- tapps_agents/core/role_template_loader.py +201 -201
- tapps_agents/core/runtime_mode.py +60 -60
- tapps_agents/core/security_scanner.py +342 -342
- tapps_agents/core/skill_agent_registry.py +194 -194
- tapps_agents/core/skill_integration.py +208 -208
- tapps_agents/core/skill_loader.py +492 -492
- tapps_agents/core/skill_template.py +341 -341
- tapps_agents/core/skill_validator.py +478 -478
- tapps_agents/core/stack_analyzer.py +35 -35
- tapps_agents/core/startup.py +174 -174
- tapps_agents/core/storage_manager.py +397 -397
- tapps_agents/core/storage_models.py +166 -166
- tapps_agents/core/story_evaluator.py +410 -410
- tapps_agents/core/subprocess_utils.py +170 -170
- tapps_agents/core/task_duration.py +296 -296
- tapps_agents/core/task_memory.py +582 -582
- tapps_agents/core/task_state.py +226 -226
- tapps_agents/core/tech_stack_priorities.py +208 -208
- tapps_agents/core/temp_directory.py +194 -194
- tapps_agents/core/template_merger.py +600 -600
- tapps_agents/core/template_selector.py +280 -280
- tapps_agents/core/test_generator.py +286 -286
- tapps_agents/core/tiered_context.py +253 -253
- tapps_agents/core/token_monitor.py +345 -345
- tapps_agents/core/traceability.py +254 -254
- tapps_agents/core/trajectory_tracker.py +50 -50
- tapps_agents/core/unicode_safe.py +143 -143
- tapps_agents/core/unified_cache_config.py +170 -170
- tapps_agents/core/unified_state.py +324 -324
- tapps_agents/core/validate_cursor_setup.py +237 -237
- tapps_agents/core/validation_registry.py +136 -136
- tapps_agents/core/validators/__init__.py +4 -4
- tapps_agents/core/validators/python_validator.py +87 -87
- tapps_agents/core/verification_agent.py +90 -90
- tapps_agents/core/visual_feedback.py +644 -644
- tapps_agents/core/workflow_validator.py +197 -197
- tapps_agents/core/worktree.py +367 -367
- tapps_agents/docker/__init__.py +10 -10
- tapps_agents/docker/analyzer.py +186 -186
- tapps_agents/docker/debugger.py +229 -229
- tapps_agents/docker/error_patterns.py +216 -216
- tapps_agents/epic/__init__.py +22 -22
- tapps_agents/epic/beads_sync.py +115 -115
- tapps_agents/epic/markdown_sync.py +105 -105
- tapps_agents/epic/models.py +96 -96
- tapps_agents/experts/__init__.py +163 -163
- tapps_agents/experts/agent_integration.py +243 -243
- tapps_agents/experts/auto_generator.py +331 -331
- tapps_agents/experts/base_expert.py +536 -536
- tapps_agents/experts/builtin_registry.py +261 -261
- tapps_agents/experts/business_metrics.py +565 -565
- tapps_agents/experts/cache.py +266 -266
- tapps_agents/experts/confidence_breakdown.py +306 -306
- tapps_agents/experts/confidence_calculator.py +336 -336
- tapps_agents/experts/confidence_metrics.py +236 -236
- tapps_agents/experts/domain_config.py +311 -311
- tapps_agents/experts/domain_detector.py +550 -550
- tapps_agents/experts/domain_utils.py +84 -84
- tapps_agents/experts/expert_config.py +113 -113
- tapps_agents/experts/expert_engine.py +465 -465
- tapps_agents/experts/expert_registry.py +744 -744
- tapps_agents/experts/expert_synthesizer.py +70 -70
- tapps_agents/experts/governance.py +197 -197
- tapps_agents/experts/history_logger.py +312 -312
- tapps_agents/experts/knowledge/README.md +180 -180
- tapps_agents/experts/knowledge/accessibility/accessible-forms.md +331 -331
- tapps_agents/experts/knowledge/accessibility/aria-patterns.md +344 -344
- tapps_agents/experts/knowledge/accessibility/color-contrast.md +285 -285
- tapps_agents/experts/knowledge/accessibility/keyboard-navigation.md +332 -332
- tapps_agents/experts/knowledge/accessibility/screen-readers.md +282 -282
- tapps_agents/experts/knowledge/accessibility/semantic-html.md +355 -355
- tapps_agents/experts/knowledge/accessibility/testing-accessibility.md +369 -369
- tapps_agents/experts/knowledge/accessibility/wcag-2.1.md +296 -296
- tapps_agents/experts/knowledge/accessibility/wcag-2.2.md +211 -211
- tapps_agents/experts/knowledge/agent-learning/best-practices.md +715 -715
- tapps_agents/experts/knowledge/agent-learning/pattern-extraction.md +282 -282
- tapps_agents/experts/knowledge/agent-learning/prompt-optimization.md +320 -320
- tapps_agents/experts/knowledge/ai-frameworks/model-optimization.md +90 -90
- tapps_agents/experts/knowledge/ai-frameworks/openvino-patterns.md +260 -260
- tapps_agents/experts/knowledge/api-design-integration/api-gateway-patterns.md +309 -309
- tapps_agents/experts/knowledge/api-design-integration/api-security-patterns.md +521 -521
- tapps_agents/experts/knowledge/api-design-integration/api-versioning.md +421 -421
- tapps_agents/experts/knowledge/api-design-integration/async-protocol-patterns.md +61 -61
- tapps_agents/experts/knowledge/api-design-integration/contract-testing.md +221 -221
- tapps_agents/experts/knowledge/api-design-integration/external-api-integration.md +489 -489
- tapps_agents/experts/knowledge/api-design-integration/fastapi-patterns.md +360 -360
- tapps_agents/experts/knowledge/api-design-integration/fastapi-testing.md +262 -262
- tapps_agents/experts/knowledge/api-design-integration/graphql-patterns.md +582 -582
- tapps_agents/experts/knowledge/api-design-integration/grpc-best-practices.md +499 -499
- tapps_agents/experts/knowledge/api-design-integration/mqtt-patterns.md +455 -455
- tapps_agents/experts/knowledge/api-design-integration/rate-limiting.md +507 -507
- tapps_agents/experts/knowledge/api-design-integration/restful-api-design.md +618 -618
- tapps_agents/experts/knowledge/api-design-integration/websocket-patterns.md +480 -480
- tapps_agents/experts/knowledge/cloud-infrastructure/cloud-native-patterns.md +175 -175
- tapps_agents/experts/knowledge/cloud-infrastructure/container-health-checks.md +261 -261
- tapps_agents/experts/knowledge/cloud-infrastructure/containerization.md +222 -222
- tapps_agents/experts/knowledge/cloud-infrastructure/cost-optimization.md +122 -122
- tapps_agents/experts/knowledge/cloud-infrastructure/disaster-recovery.md +153 -153
- tapps_agents/experts/knowledge/cloud-infrastructure/dockerfile-patterns.md +285 -285
- tapps_agents/experts/knowledge/cloud-infrastructure/infrastructure-as-code.md +187 -187
- tapps_agents/experts/knowledge/cloud-infrastructure/kubernetes-patterns.md +253 -253
- tapps_agents/experts/knowledge/cloud-infrastructure/multi-cloud-strategies.md +155 -155
- tapps_agents/experts/knowledge/cloud-infrastructure/serverless-architecture.md +200 -200
- tapps_agents/experts/knowledge/code-quality-analysis/README.md +16 -16
- tapps_agents/experts/knowledge/code-quality-analysis/code-metrics.md +137 -137
- tapps_agents/experts/knowledge/code-quality-analysis/complexity-analysis.md +181 -181
- tapps_agents/experts/knowledge/code-quality-analysis/technical-debt-patterns.md +191 -191
- tapps_agents/experts/knowledge/data-privacy-compliance/anonymization.md +313 -313
- tapps_agents/experts/knowledge/data-privacy-compliance/ccpa.md +255 -255
- tapps_agents/experts/knowledge/data-privacy-compliance/consent-management.md +282 -282
- tapps_agents/experts/knowledge/data-privacy-compliance/data-minimization.md +275 -275
- tapps_agents/experts/knowledge/data-privacy-compliance/data-retention.md +297 -297
- tapps_agents/experts/knowledge/data-privacy-compliance/data-subject-rights.md +383 -383
- tapps_agents/experts/knowledge/data-privacy-compliance/encryption-privacy.md +285 -285
- tapps_agents/experts/knowledge/data-privacy-compliance/gdpr.md +344 -344
- tapps_agents/experts/knowledge/data-privacy-compliance/hipaa.md +385 -385
- tapps_agents/experts/knowledge/data-privacy-compliance/privacy-by-design.md +280 -280
- tapps_agents/experts/knowledge/database-data-management/acid-vs-cap.md +164 -164
- tapps_agents/experts/knowledge/database-data-management/backup-and-recovery.md +182 -182
- tapps_agents/experts/knowledge/database-data-management/data-modeling.md +172 -172
- tapps_agents/experts/knowledge/database-data-management/database-design.md +187 -187
- tapps_agents/experts/knowledge/database-data-management/flux-query-optimization.md +342 -342
- tapps_agents/experts/knowledge/database-data-management/influxdb-connection-patterns.md +432 -432
- tapps_agents/experts/knowledge/database-data-management/influxdb-patterns.md +442 -442
- tapps_agents/experts/knowledge/database-data-management/migration-strategies.md +216 -216
- tapps_agents/experts/knowledge/database-data-management/nosql-patterns.md +259 -259
- tapps_agents/experts/knowledge/database-data-management/scalability-patterns.md +184 -184
- tapps_agents/experts/knowledge/database-data-management/sql-optimization.md +175 -175
- tapps_agents/experts/knowledge/database-data-management/time-series-modeling.md +444 -444
- tapps_agents/experts/knowledge/development-workflow/README.md +16 -16
- tapps_agents/experts/knowledge/development-workflow/automation-best-practices.md +216 -216
- tapps_agents/experts/knowledge/development-workflow/build-strategies.md +198 -198
- tapps_agents/experts/knowledge/development-workflow/deployment-patterns.md +205 -205
- tapps_agents/experts/knowledge/development-workflow/git-workflows.md +205 -205
- tapps_agents/experts/knowledge/documentation-knowledge-management/README.md +16 -16
- tapps_agents/experts/knowledge/documentation-knowledge-management/api-documentation-patterns.md +231 -231
- tapps_agents/experts/knowledge/documentation-knowledge-management/documentation-standards.md +191 -191
- tapps_agents/experts/knowledge/documentation-knowledge-management/knowledge-management.md +171 -171
- tapps_agents/experts/knowledge/documentation-knowledge-management/technical-writing-guide.md +192 -192
- tapps_agents/experts/knowledge/observability-monitoring/alerting-patterns.md +461 -461
- tapps_agents/experts/knowledge/observability-monitoring/apm-tools.md +459 -459
- tapps_agents/experts/knowledge/observability-monitoring/distributed-tracing.md +367 -367
- tapps_agents/experts/knowledge/observability-monitoring/logging-strategies.md +478 -478
- tapps_agents/experts/knowledge/observability-monitoring/metrics-and-monitoring.md +510 -510
- tapps_agents/experts/knowledge/observability-monitoring/observability-best-practices.md +492 -492
- tapps_agents/experts/knowledge/observability-monitoring/open-telemetry.md +573 -573
- tapps_agents/experts/knowledge/observability-monitoring/slo-sli-sla.md +419 -419
- tapps_agents/experts/knowledge/performance/anti-patterns.md +284 -284
- tapps_agents/experts/knowledge/performance/api-performance.md +256 -256
- tapps_agents/experts/knowledge/performance/caching.md +327 -327
- tapps_agents/experts/knowledge/performance/database-performance.md +252 -252
- tapps_agents/experts/knowledge/performance/optimization-patterns.md +327 -327
- tapps_agents/experts/knowledge/performance/profiling.md +297 -297
- tapps_agents/experts/knowledge/performance/resource-management.md +293 -293
- tapps_agents/experts/knowledge/performance/scalability.md +306 -306
- tapps_agents/experts/knowledge/security/owasp-top10.md +209 -209
- tapps_agents/experts/knowledge/security/secure-coding-practices.md +207 -207
- tapps_agents/experts/knowledge/security/threat-modeling.md +220 -220
- tapps_agents/experts/knowledge/security/vulnerability-patterns.md +342 -342
- tapps_agents/experts/knowledge/software-architecture/docker-compose-patterns.md +314 -314
- tapps_agents/experts/knowledge/software-architecture/microservices-patterns.md +379 -379
- tapps_agents/experts/knowledge/software-architecture/service-communication.md +316 -316
- tapps_agents/experts/knowledge/testing/best-practices.md +310 -310
- tapps_agents/experts/knowledge/testing/coverage-analysis.md +293 -293
- tapps_agents/experts/knowledge/testing/mocking.md +256 -256
- tapps_agents/experts/knowledge/testing/test-automation.md +276 -276
- tapps_agents/experts/knowledge/testing/test-data.md +271 -271
- tapps_agents/experts/knowledge/testing/test-design-patterns.md +280 -280
- tapps_agents/experts/knowledge/testing/test-maintenance.md +236 -236
- tapps_agents/experts/knowledge/testing/test-strategies.md +311 -311
- tapps_agents/experts/knowledge/user-experience/information-architecture.md +325 -325
- tapps_agents/experts/knowledge/user-experience/interaction-design.md +363 -363
- tapps_agents/experts/knowledge/user-experience/prototyping.md +293 -293
- tapps_agents/experts/knowledge/user-experience/usability-heuristics.md +337 -337
- tapps_agents/experts/knowledge/user-experience/usability-testing.md +311 -311
- tapps_agents/experts/knowledge/user-experience/user-journeys.md +296 -296
- tapps_agents/experts/knowledge/user-experience/user-research.md +373 -373
- tapps_agents/experts/knowledge/user-experience/ux-principles.md +340 -340
- tapps_agents/experts/knowledge_freshness.py +321 -321
- tapps_agents/experts/knowledge_ingestion.py +438 -438
- tapps_agents/experts/knowledge_need_detector.py +93 -93
- tapps_agents/experts/knowledge_validator.py +382 -382
- tapps_agents/experts/observability.py +440 -440
- tapps_agents/experts/passive_notifier.py +238 -238
- tapps_agents/experts/proactive_orchestrator.py +32 -32
- tapps_agents/experts/rag_chunker.py +205 -205
- tapps_agents/experts/rag_embedder.py +152 -152
- tapps_agents/experts/rag_evaluation.py +299 -299
- tapps_agents/experts/rag_index.py +303 -303
- tapps_agents/experts/rag_metrics.py +293 -293
- tapps_agents/experts/rag_safety.py +263 -263
- tapps_agents/experts/report_generator.py +296 -296
- tapps_agents/experts/setup_wizard.py +441 -441
- tapps_agents/experts/simple_rag.py +431 -431
- tapps_agents/experts/vector_rag.py +354 -354
- tapps_agents/experts/weight_distributor.py +304 -304
- tapps_agents/health/__init__.py +24 -24
- tapps_agents/health/base.py +75 -75
- tapps_agents/health/checks/__init__.py +22 -22
- tapps_agents/health/checks/automation.py +127 -127
- tapps_agents/health/checks/context7_cache.py +210 -210
- tapps_agents/health/checks/environment.py +116 -116
- tapps_agents/health/checks/execution.py +170 -170
- tapps_agents/health/checks/knowledge_base.py +187 -187
- tapps_agents/health/checks/outcomes.backup_20260204_064058.py +324 -0
- tapps_agents/health/checks/outcomes.backup_20260204_064256.py +324 -0
- tapps_agents/health/checks/outcomes.backup_20260204_064600.py +324 -0
- tapps_agents/health/checks/outcomes.py +324 -324
- tapps_agents/health/collector.py +280 -280
- tapps_agents/health/dashboard.py +137 -137
- tapps_agents/health/metrics.py +151 -151
- tapps_agents/health/registry.py +166 -166
- tapps_agents/hooks/__init__.py +33 -33
- tapps_agents/hooks/config.py +140 -140
- tapps_agents/hooks/events.py +135 -135
- tapps_agents/hooks/executor.py +128 -128
- tapps_agents/hooks/manager.py +143 -143
- tapps_agents/integration/__init__.py +8 -8
- tapps_agents/integration/service_integrator.py +121 -121
- tapps_agents/integrations/__init__.py +10 -10
- tapps_agents/integrations/clawdbot.py +525 -525
- tapps_agents/integrations/memory_bridge.py +356 -356
- tapps_agents/mcp/__init__.py +18 -18
- tapps_agents/mcp/gateway.py +112 -112
- tapps_agents/mcp/servers/__init__.py +13 -13
- tapps_agents/mcp/servers/analysis.py +204 -204
- tapps_agents/mcp/servers/context7.py +198 -198
- tapps_agents/mcp/servers/filesystem.py +218 -218
- tapps_agents/mcp/servers/git.py +201 -201
- tapps_agents/mcp/tool_registry.py +115 -115
- tapps_agents/quality/__init__.py +54 -54
- tapps_agents/quality/coverage_analyzer.py +379 -379
- tapps_agents/quality/enforcement.py +82 -82
- tapps_agents/quality/gates/__init__.py +37 -37
- tapps_agents/quality/gates/approval_gate.py +255 -255
- tapps_agents/quality/gates/base.py +84 -84
- tapps_agents/quality/gates/exceptions.py +43 -43
- tapps_agents/quality/gates/policy_gate.py +195 -195
- tapps_agents/quality/gates/registry.py +239 -239
- tapps_agents/quality/gates/security_gate.py +156 -156
- tapps_agents/quality/quality_gates.py +369 -369
- tapps_agents/quality/secret_scanner.py +335 -335
- tapps_agents/resources/__init__.py +5 -0
- tapps_agents/resources/claude/__init__.py +1 -0
- tapps_agents/resources/claude/commands/README.md +156 -0
- tapps_agents/resources/claude/commands/__init__.py +1 -0
- tapps_agents/resources/claude/commands/build-fix.md +22 -0
- tapps_agents/resources/claude/commands/build.md +77 -0
- tapps_agents/resources/claude/commands/debug.md +53 -0
- tapps_agents/resources/claude/commands/design.md +68 -0
- tapps_agents/resources/claude/commands/docs.md +53 -0
- tapps_agents/resources/claude/commands/e2e.md +22 -0
- tapps_agents/resources/claude/commands/fix.md +54 -0
- tapps_agents/resources/claude/commands/implement.md +53 -0
- tapps_agents/resources/claude/commands/improve.md +53 -0
- tapps_agents/resources/claude/commands/library-docs.md +64 -0
- tapps_agents/resources/claude/commands/lint.md +52 -0
- tapps_agents/resources/claude/commands/plan.md +65 -0
- tapps_agents/resources/claude/commands/refactor-clean.md +21 -0
- tapps_agents/resources/claude/commands/refactor.md +55 -0
- tapps_agents/resources/claude/commands/review.md +67 -0
- tapps_agents/resources/claude/commands/score.md +60 -0
- tapps_agents/resources/claude/commands/security-review.md +22 -0
- tapps_agents/resources/claude/commands/security-scan.md +54 -0
- tapps_agents/resources/claude/commands/tdd.md +24 -0
- tapps_agents/resources/claude/commands/test-coverage.md +21 -0
- tapps_agents/resources/claude/commands/test.md +54 -0
- tapps_agents/resources/claude/commands/update-codemaps.md +20 -0
- tapps_agents/resources/claude/commands/update-docs.md +21 -0
- tapps_agents/resources/claude/skills/__init__.py +1 -0
- tapps_agents/resources/claude/skills/analyst/SKILL.md +272 -0
- tapps_agents/resources/claude/skills/analyst/__init__.py +1 -0
- tapps_agents/resources/claude/skills/architect/SKILL.md +282 -0
- tapps_agents/resources/claude/skills/architect/__init__.py +1 -0
- tapps_agents/resources/claude/skills/backend-patterns/SKILL.md +30 -0
- tapps_agents/resources/claude/skills/backend-patterns/__init__.py +1 -0
- tapps_agents/resources/claude/skills/coding-standards/SKILL.md +29 -0
- tapps_agents/resources/claude/skills/coding-standards/__init__.py +1 -0
- tapps_agents/resources/claude/skills/debugger/SKILL.md +203 -0
- tapps_agents/resources/claude/skills/debugger/__init__.py +1 -0
- tapps_agents/resources/claude/skills/designer/SKILL.md +243 -0
- tapps_agents/resources/claude/skills/designer/__init__.py +1 -0
- tapps_agents/resources/claude/skills/documenter/SKILL.md +252 -0
- tapps_agents/resources/claude/skills/documenter/__init__.py +1 -0
- tapps_agents/resources/claude/skills/enhancer/SKILL.md +307 -0
- tapps_agents/resources/claude/skills/enhancer/__init__.py +1 -0
- tapps_agents/resources/claude/skills/evaluator/SKILL.md +204 -0
- tapps_agents/resources/claude/skills/evaluator/__init__.py +1 -0
- tapps_agents/resources/claude/skills/frontend-patterns/SKILL.md +29 -0
- tapps_agents/resources/claude/skills/frontend-patterns/__init__.py +1 -0
- tapps_agents/resources/claude/skills/implementer/SKILL.md +188 -0
- tapps_agents/resources/claude/skills/implementer/__init__.py +1 -0
- tapps_agents/resources/claude/skills/improver/SKILL.md +218 -0
- tapps_agents/resources/claude/skills/improver/__init__.py +1 -0
- tapps_agents/resources/claude/skills/ops/SKILL.md +281 -0
- tapps_agents/resources/claude/skills/ops/__init__.py +1 -0
- tapps_agents/resources/claude/skills/orchestrator/SKILL.md +390 -0
- tapps_agents/resources/claude/skills/orchestrator/__init__.py +1 -0
- tapps_agents/resources/claude/skills/planner/SKILL.md +254 -0
- tapps_agents/resources/claude/skills/planner/__init__.py +1 -0
- tapps_agents/resources/claude/skills/reviewer/SKILL.md +434 -0
- tapps_agents/resources/claude/skills/reviewer/__init__.py +1 -0
- tapps_agents/resources/claude/skills/security-review/SKILL.md +31 -0
- tapps_agents/resources/claude/skills/security-review/__init__.py +1 -0
- tapps_agents/resources/claude/skills/simple-mode/SKILL.md +695 -0
- tapps_agents/resources/claude/skills/simple-mode/__init__.py +1 -0
- tapps_agents/resources/claude/skills/tester/SKILL.md +219 -0
- tapps_agents/resources/claude/skills/tester/__init__.py +1 -0
- tapps_agents/resources/cursor/.cursorignore +35 -0
- tapps_agents/resources/cursor/__init__.py +1 -0
- tapps_agents/resources/cursor/commands/__init__.py +1 -0
- tapps_agents/resources/cursor/commands/build-fix.md +11 -0
- tapps_agents/resources/cursor/commands/build.md +11 -0
- tapps_agents/resources/cursor/commands/e2e.md +11 -0
- tapps_agents/resources/cursor/commands/fix.md +11 -0
- tapps_agents/resources/cursor/commands/refactor-clean.md +11 -0
- tapps_agents/resources/cursor/commands/review.md +11 -0
- tapps_agents/resources/cursor/commands/security-review.md +11 -0
- tapps_agents/resources/cursor/commands/tdd.md +11 -0
- tapps_agents/resources/cursor/commands/test-coverage.md +11 -0
- tapps_agents/resources/cursor/commands/test.md +11 -0
- tapps_agents/resources/cursor/commands/update-codemaps.md +10 -0
- tapps_agents/resources/cursor/commands/update-docs.md +11 -0
- tapps_agents/resources/cursor/rules/__init__.py +1 -0
- tapps_agents/resources/cursor/rules/agent-capabilities.mdc +687 -0
- tapps_agents/resources/cursor/rules/coding-style.mdc +31 -0
- tapps_agents/resources/cursor/rules/command-reference.mdc +2081 -0
- tapps_agents/resources/cursor/rules/cursor-mode-usage.mdc +125 -0
- tapps_agents/resources/cursor/rules/git-workflow.mdc +29 -0
- tapps_agents/resources/cursor/rules/performance.mdc +29 -0
- tapps_agents/resources/cursor/rules/project-context.mdc +163 -0
- tapps_agents/resources/cursor/rules/project-profiling.mdc +197 -0
- tapps_agents/resources/cursor/rules/quick-reference.mdc +630 -0
- tapps_agents/resources/cursor/rules/security.mdc +32 -0
- tapps_agents/resources/cursor/rules/simple-mode.mdc +500 -0
- tapps_agents/resources/cursor/rules/testing.mdc +31 -0
- tapps_agents/resources/cursor/rules/when-to-use.mdc +156 -0
- tapps_agents/resources/cursor/rules/workflow-presets.mdc +179 -0
- tapps_agents/resources/customizations/__init__.py +1 -0
- tapps_agents/resources/customizations/example-custom.yaml +83 -0
- tapps_agents/resources/hooks/__init__.py +1 -0
- tapps_agents/resources/hooks/templates/README.md +5 -0
- tapps_agents/resources/hooks/templates/__init__.py +1 -0
- tapps_agents/resources/hooks/templates/add-project-context.yaml +8 -0
- tapps_agents/resources/hooks/templates/auto-format-js.yaml +10 -0
- tapps_agents/resources/hooks/templates/auto-format-python.yaml +10 -0
- tapps_agents/resources/hooks/templates/git-commit-check.yaml +7 -0
- tapps_agents/resources/hooks/templates/notify-on-complete.yaml +8 -0
- tapps_agents/resources/hooks/templates/quality-gate.yaml +8 -0
- tapps_agents/resources/hooks/templates/security-scan-on-edit.yaml +10 -0
- tapps_agents/resources/hooks/templates/session-end-log.yaml +7 -0
- tapps_agents/resources/hooks/templates/show-beads-ready.yaml +8 -0
- tapps_agents/resources/hooks/templates/test-on-edit.yaml +10 -0
- tapps_agents/resources/hooks/templates/update-docs-on-complete.yaml +8 -0
- tapps_agents/resources/hooks/templates/user-prompt-log.yaml +7 -0
- tapps_agents/resources/scripts/__init__.py +1 -0
- tapps_agents/resources/scripts/set_bd_path.ps1 +51 -0
- tapps_agents/resources/workflows/__init__.py +1 -0
- tapps_agents/resources/workflows/presets/__init__.py +1 -0
- tapps_agents/resources/workflows/presets/brownfield-analysis.yaml +235 -0
- tapps_agents/resources/workflows/presets/fix.yaml +78 -0
- tapps_agents/resources/workflows/presets/full-sdlc.yaml +122 -0
- tapps_agents/resources/workflows/presets/quality.yaml +82 -0
- tapps_agents/resources/workflows/presets/rapid-dev.yaml +84 -0
- tapps_agents/session/__init__.py +19 -19
- tapps_agents/session/manager.py +256 -256
- tapps_agents/simple_mode/__init__.py +66 -66
- tapps_agents/simple_mode/agent_contracts.py +357 -357
- tapps_agents/simple_mode/beads_hooks.py +151 -151
- tapps_agents/simple_mode/code_snippet_handler.py +382 -382
- tapps_agents/simple_mode/documentation_manager.py +395 -395
- tapps_agents/simple_mode/documentation_reader.py +187 -187
- tapps_agents/simple_mode/file_inference.py +292 -292
- tapps_agents/simple_mode/framework_change_detector.py +268 -268
- tapps_agents/simple_mode/intent_parser.py +510 -510
- tapps_agents/simple_mode/learning_progression.py +358 -358
- tapps_agents/simple_mode/nl_handler.py +700 -700
- tapps_agents/simple_mode/onboarding.py +253 -253
- tapps_agents/simple_mode/orchestrators/__init__.py +38 -38
- tapps_agents/simple_mode/orchestrators/breakdown_orchestrator.py +49 -49
- tapps_agents/simple_mode/orchestrators/brownfield_orchestrator.py +135 -135
- tapps_agents/simple_mode/orchestrators/deliverable_checklist.py +349 -349
- tapps_agents/simple_mode/orchestrators/enhance_orchestrator.py +53 -53
- tapps_agents/simple_mode/orchestrators/epic_orchestrator.py +122 -122
- tapps_agents/simple_mode/orchestrators/explore_orchestrator.py +184 -184
- tapps_agents/simple_mode/orchestrators/plan_analysis_orchestrator.py +206 -206
- tapps_agents/simple_mode/orchestrators/pr_orchestrator.py +237 -237
- tapps_agents/simple_mode/orchestrators/refactor_orchestrator.py +222 -222
- tapps_agents/simple_mode/orchestrators/requirements_tracer.py +262 -262
- tapps_agents/simple_mode/orchestrators/resume_orchestrator.py +210 -210
- tapps_agents/simple_mode/orchestrators/review_orchestrator.py +161 -161
- tapps_agents/simple_mode/orchestrators/test_orchestrator.py +82 -82
- tapps_agents/simple_mode/output_aggregator.py +340 -340
- tapps_agents/simple_mode/result_formatters.py +598 -598
- tapps_agents/simple_mode/step_dependencies.py +382 -382
- tapps_agents/simple_mode/step_results.py +276 -276
- tapps_agents/simple_mode/streaming.py +388 -388
- tapps_agents/simple_mode/variations.py +129 -129
- tapps_agents/simple_mode/visual_feedback.py +238 -238
- tapps_agents/simple_mode/zero_config.py +274 -274
- tapps_agents/suggestions/__init__.py +8 -8
- tapps_agents/suggestions/inline_suggester.py +52 -52
- tapps_agents/templates/__init__.py +8 -8
- tapps_agents/templates/microservice_generator.py +274 -274
- tapps_agents/utils/env_validator.py +291 -291
- tapps_agents/workflow/__init__.py +171 -171
- tapps_agents/workflow/acceptance_verifier.py +132 -132
- tapps_agents/workflow/agent_handlers/__init__.py +41 -41
- tapps_agents/workflow/agent_handlers/analyst_handler.py +75 -75
- tapps_agents/workflow/agent_handlers/architect_handler.py +107 -107
- tapps_agents/workflow/agent_handlers/base.py +84 -84
- tapps_agents/workflow/agent_handlers/debugger_handler.py +100 -100
- tapps_agents/workflow/agent_handlers/designer_handler.py +110 -110
- tapps_agents/workflow/agent_handlers/documenter_handler.py +94 -94
- tapps_agents/workflow/agent_handlers/implementer_handler.py +235 -235
- tapps_agents/workflow/agent_handlers/ops_handler.py +62 -62
- tapps_agents/workflow/agent_handlers/orchestrator_handler.py +43 -43
- tapps_agents/workflow/agent_handlers/planner_handler.py +98 -98
- tapps_agents/workflow/agent_handlers/registry.py +119 -119
- tapps_agents/workflow/agent_handlers/reviewer_handler.py +119 -119
- tapps_agents/workflow/agent_handlers/tester_handler.py +69 -69
- tapps_agents/workflow/analytics_accessor.py +337 -337
- tapps_agents/workflow/analytics_alerts.py +416 -416
- tapps_agents/workflow/analytics_dashboard_cursor.py +281 -281
- tapps_agents/workflow/analytics_dual_write.py +103 -103
- tapps_agents/workflow/analytics_integration.py +119 -119
- tapps_agents/workflow/analytics_query_parser.py +278 -278
- tapps_agents/workflow/analytics_visualizer.py +259 -259
- tapps_agents/workflow/artifact_helper.py +204 -204
- tapps_agents/workflow/audit_logger.py +263 -263
- tapps_agents/workflow/auto_execution_config.py +340 -340
- tapps_agents/workflow/auto_progression.py +586 -586
- tapps_agents/workflow/branch_cleanup.py +349 -349
- tapps_agents/workflow/checkpoint.py +256 -256
- tapps_agents/workflow/checkpoint_manager.py +178 -178
- tapps_agents/workflow/code_artifact.py +179 -179
- tapps_agents/workflow/common_enums.py +96 -96
- tapps_agents/workflow/confirmation_handler.py +130 -130
- tapps_agents/workflow/context_analyzer.py +222 -222
- tapps_agents/workflow/context_artifact.py +230 -230
- tapps_agents/workflow/cursor_chat.py +94 -94
- tapps_agents/workflow/cursor_skill_helper.py +516 -516
- tapps_agents/workflow/dependency_resolver.py +244 -244
- tapps_agents/workflow/design_artifact.py +156 -156
- tapps_agents/workflow/detector.py +751 -751
- tapps_agents/workflow/direct_execution_fallback.py +301 -301
- tapps_agents/workflow/docs_artifact.py +168 -168
- tapps_agents/workflow/enforcer.py +389 -389
- tapps_agents/workflow/enhancement_artifact.py +142 -142
- tapps_agents/workflow/error_recovery.py +806 -806
- tapps_agents/workflow/event_bus.py +183 -183
- tapps_agents/workflow/event_log.py +612 -612
- tapps_agents/workflow/events.py +63 -63
- tapps_agents/workflow/exceptions.py +43 -43
- tapps_agents/workflow/execution_graph.py +498 -498
- tapps_agents/workflow/execution_plan.py +126 -126
- tapps_agents/workflow/file_utils.py +186 -186
- tapps_agents/workflow/gate_evaluator.py +182 -182
- tapps_agents/workflow/gate_integration.py +200 -200
- tapps_agents/workflow/graph_visualizer.py +130 -130
- tapps_agents/workflow/health_checker.py +206 -206
- tapps_agents/workflow/logging_helper.py +243 -243
- tapps_agents/workflow/manifest.py +582 -582
- tapps_agents/workflow/marker_writer.py +250 -250
- tapps_agents/workflow/messaging.py +325 -325
- tapps_agents/workflow/metadata_models.py +91 -91
- tapps_agents/workflow/metrics_integration.py +226 -226
- tapps_agents/workflow/migration_utils.py +116 -116
- tapps_agents/workflow/models.py +148 -148
- tapps_agents/workflow/nlp_config.py +198 -198
- tapps_agents/workflow/nlp_error_handler.py +207 -207
- tapps_agents/workflow/nlp_executor.py +163 -163
- tapps_agents/workflow/nlp_parser.py +528 -528
- tapps_agents/workflow/observability_dashboard.py +451 -451
- tapps_agents/workflow/observer.py +170 -170
- tapps_agents/workflow/ops_artifact.py +257 -257
- tapps_agents/workflow/output_passing.py +214 -214
- tapps_agents/workflow/parallel_executor.py +463 -463
- tapps_agents/workflow/planning_artifact.py +179 -179
- tapps_agents/workflow/preset_loader.py +285 -285
- tapps_agents/workflow/preset_recommender.py +270 -270
- tapps_agents/workflow/progress_logger.py +145 -145
- tapps_agents/workflow/progress_manager.py +303 -303
- tapps_agents/workflow/progress_monitor.py +186 -186
- tapps_agents/workflow/progress_updates.py +423 -423
- tapps_agents/workflow/quality_artifact.py +158 -158
- tapps_agents/workflow/quality_loopback.py +101 -101
- tapps_agents/workflow/recommender.py +387 -387
- tapps_agents/workflow/remediation_loop.py +166 -166
- tapps_agents/workflow/result_aggregator.py +300 -300
- tapps_agents/workflow/review_artifact.py +185 -185
- tapps_agents/workflow/schema_validator.py +522 -522
- tapps_agents/workflow/session_handoff.py +178 -178
- tapps_agents/workflow/skill_invoker.py +648 -648
- tapps_agents/workflow/state_manager.py +756 -756
- tapps_agents/workflow/state_persistence_config.py +331 -331
- tapps_agents/workflow/status_monitor.py +449 -449
- tapps_agents/workflow/step_checkpoint.py +314 -314
- tapps_agents/workflow/step_details.py +201 -201
- tapps_agents/workflow/story_models.py +147 -147
- tapps_agents/workflow/streaming.py +416 -416
- tapps_agents/workflow/suggestion_engine.py +552 -552
- tapps_agents/workflow/testing_artifact.py +186 -186
- tapps_agents/workflow/timeline.py +158 -158
- tapps_agents/workflow/token_integration.py +209 -209
- tapps_agents/workflow/validation.py +217 -217
- tapps_agents/workflow/visual_feedback.py +391 -391
- tapps_agents/workflow/workflow_chain.py +95 -95
- tapps_agents/workflow/workflow_summary.py +219 -219
- tapps_agents/workflow/worktree_manager.py +724 -724
- {tapps_agents-3.6.0.dist-info → tapps_agents-3.6.1.dist-info}/METADATA +672 -672
- tapps_agents-3.6.1.dist-info/RECORD +883 -0
- {tapps_agents-3.6.0.dist-info → tapps_agents-3.6.1.dist-info}/licenses/LICENSE +22 -22
- tapps_agents-3.6.0.dist-info/RECORD +0 -758
- {tapps_agents-3.6.0.dist-info → tapps_agents-3.6.1.dist-info}/WHEEL +0 -0
- {tapps_agents-3.6.0.dist-info → tapps_agents-3.6.1.dist-info}/entry_points.txt +0 -0
- {tapps_agents-3.6.0.dist-info → tapps_agents-3.6.1.dist-info}/top_level.txt +0 -0
|
@@ -1,1865 +1,1865 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Agent Learning System
|
|
3
|
-
|
|
4
|
-
Enables agents to learn from past tasks and improve over time.
|
|
5
|
-
"""
|
|
6
|
-
|
|
7
|
-
# @ai-prime-directive: This file implements the Agent Learning System for pattern extraction, prompt optimization,
|
|
8
|
-
# and feedback analysis. The system enables agents to learn from past tasks and improve over time through
|
|
9
|
-
# meta-learning, pattern recognition, and adaptive learning strategies.
|
|
10
|
-
|
|
11
|
-
# @ai-constraints:
|
|
12
|
-
# - Learning intensity must adapt to hardware profile (NUC, Desktop, Server)
|
|
13
|
-
# - Pattern extraction must include security scanning before pattern acceptance
|
|
14
|
-
# - Anti-pattern extraction must learn from negative feedback and failures
|
|
15
|
-
# - Prompt optimization must respect hardware constraints and token budgets
|
|
16
|
-
# - Meta-learning must track learning effectiveness and adjust strategies accordingly
|
|
17
|
-
# - Performance: Learning operations should not significantly impact agent response times
|
|
18
|
-
|
|
19
|
-
# @note[2025-01-15]: Agent learning is an advanced feature that improves agent performance over time.
|
|
20
|
-
# The system uses meta-learning to adapt learning strategies based on effectiveness tracking.
|
|
21
|
-
# See docs/architecture/decisions/ for related architectural decisions.
|
|
22
|
-
|
|
23
|
-
import logging
|
|
24
|
-
import re
|
|
25
|
-
from dataclasses import dataclass, field
|
|
26
|
-
from datetime import UTC, datetime
|
|
27
|
-
from typing import Any
|
|
28
|
-
|
|
29
|
-
from .best_practice_consultant import BestPracticeConsultant
|
|
30
|
-
from .capability_registry import CapabilityRegistry, LearningIntensity
|
|
31
|
-
from .hardware_profiler import HardwareProfile, HardwareProfiler
|
|
32
|
-
from .learning_confidence import LearningConfidenceCalculator
|
|
33
|
-
from .learning_dashboard import LearningDashboard
|
|
34
|
-
from .learning_decision import LearningDecisionEngine
|
|
35
|
-
from .learning_explainability import (
|
|
36
|
-
DecisionReasoningLogger,
|
|
37
|
-
LearningImpactReporter,
|
|
38
|
-
PatternSelectionExplainer,
|
|
39
|
-
)
|
|
40
|
-
from .meta_learning import (
|
|
41
|
-
AdaptiveLearningRate,
|
|
42
|
-
LearningEffectivenessTracker,
|
|
43
|
-
LearningSelfAssessor,
|
|
44
|
-
LearningStrategy,
|
|
45
|
-
LearningStrategySelector,
|
|
46
|
-
)
|
|
47
|
-
from .security_scanner import SecurityScanner
|
|
48
|
-
from .task_memory import TaskMemorySystem, TaskOutcome
|
|
49
|
-
|
|
50
|
-
logger = logging.getLogger(__name__)
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
@dataclass
|
|
54
|
-
class CodePattern:
|
|
55
|
-
"""Represents a learned code pattern."""
|
|
56
|
-
|
|
57
|
-
pattern_id: str
|
|
58
|
-
pattern_type: str # "function", "class", "import", "structure"
|
|
59
|
-
code_snippet: str
|
|
60
|
-
context: str
|
|
61
|
-
quality_score: float
|
|
62
|
-
usage_count: int
|
|
63
|
-
success_rate: float
|
|
64
|
-
learned_from: list[str] # Task IDs where this pattern was successful
|
|
65
|
-
metadata: dict[str, Any] = field(default_factory=dict)
|
|
66
|
-
security_score: float = 0.0 # Security score (0-10, higher is better)
|
|
67
|
-
is_anti_pattern: bool = False # True if this is an anti-pattern to avoid
|
|
68
|
-
failure_reasons: list[str] = field(default_factory=list) # Reasons for failure
|
|
69
|
-
rejection_count: int = 0 # Number of times this pattern was rejected
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
@dataclass
|
|
73
|
-
class PromptVariant:
|
|
74
|
-
"""Represents a prompt variation for A/B testing."""
|
|
75
|
-
|
|
76
|
-
variant_id: str
|
|
77
|
-
prompt_template: str
|
|
78
|
-
modifications: list[str] # List of modifications made
|
|
79
|
-
test_count: int
|
|
80
|
-
success_count: int
|
|
81
|
-
average_quality: float
|
|
82
|
-
created_at: datetime
|
|
83
|
-
last_tested: datetime | None = None
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
class PatternExtractor:
|
|
87
|
-
"""Extracts patterns from successful code outputs."""
|
|
88
|
-
|
|
89
|
-
def __init__(
|
|
90
|
-
self,
|
|
91
|
-
min_quality_threshold: float = 0.7,
|
|
92
|
-
security_scanner: SecurityScanner | None = None,
|
|
93
|
-
security_threshold: float = 7.0,
|
|
94
|
-
):
|
|
95
|
-
"""
|
|
96
|
-
Initialize pattern extractor.
|
|
97
|
-
|
|
98
|
-
Args:
|
|
99
|
-
min_quality_threshold: Minimum quality score to extract patterns
|
|
100
|
-
security_scanner: Optional security scanner instance
|
|
101
|
-
security_threshold: Minimum security score to extract patterns (default: 7.0)
|
|
102
|
-
"""
|
|
103
|
-
self.min_quality_threshold = min_quality_threshold
|
|
104
|
-
self.security_scanner = security_scanner or SecurityScanner()
|
|
105
|
-
self.security_threshold = security_threshold
|
|
106
|
-
self.patterns: dict[str, CodePattern] = {}
|
|
107
|
-
|
|
108
|
-
def extract_patterns(
|
|
109
|
-
self,
|
|
110
|
-
code: str,
|
|
111
|
-
quality_score: float,
|
|
112
|
-
task_id: str,
|
|
113
|
-
pattern_types: list[str] | None = None,
|
|
114
|
-
) -> list[CodePattern]:
|
|
115
|
-
"""
|
|
116
|
-
Extract patterns from code.
|
|
117
|
-
|
|
118
|
-
Args:
|
|
119
|
-
code: Source code
|
|
120
|
-
quality_score: Quality score of the code
|
|
121
|
-
task_id: Task identifier
|
|
122
|
-
pattern_types: Optional list of pattern types to extract
|
|
123
|
-
|
|
124
|
-
Returns:
|
|
125
|
-
List of extracted patterns
|
|
126
|
-
"""
|
|
127
|
-
if quality_score < self.min_quality_threshold:
|
|
128
|
-
return []
|
|
129
|
-
|
|
130
|
-
# Security check before extraction
|
|
131
|
-
security_result = self.security_scanner.scan_code(code=code)
|
|
132
|
-
security_score = security_result["security_score"]
|
|
133
|
-
vulnerabilities = security_result["vulnerabilities"]
|
|
134
|
-
is_safe = security_result.get("is_safe", True)
|
|
135
|
-
|
|
136
|
-
# Only extract if security score meets threshold
|
|
137
|
-
# Check both score >= threshold AND is_safe flag
|
|
138
|
-
if security_score < self.security_threshold or not is_safe:
|
|
139
|
-
logger.debug(
|
|
140
|
-
f"Skipping pattern extraction: security score {security_score:.2f} "
|
|
141
|
-
f"below threshold {self.security_threshold:.2f} or is_safe={is_safe}"
|
|
142
|
-
)
|
|
143
|
-
return []
|
|
144
|
-
|
|
145
|
-
patterns = []
|
|
146
|
-
|
|
147
|
-
# Extract function patterns
|
|
148
|
-
if not pattern_types or "function" in pattern_types:
|
|
149
|
-
func_patterns = self._extract_function_patterns(
|
|
150
|
-
code, quality_score, task_id
|
|
151
|
-
)
|
|
152
|
-
patterns.extend(func_patterns)
|
|
153
|
-
|
|
154
|
-
# Extract class patterns
|
|
155
|
-
if not pattern_types or "class" in pattern_types:
|
|
156
|
-
class_patterns = self._extract_class_patterns(code, quality_score, task_id)
|
|
157
|
-
patterns.extend(class_patterns)
|
|
158
|
-
|
|
159
|
-
# Extract import patterns
|
|
160
|
-
if not pattern_types or "import" in pattern_types:
|
|
161
|
-
import_patterns = self._extract_import_patterns(
|
|
162
|
-
code, quality_score, task_id
|
|
163
|
-
)
|
|
164
|
-
patterns.extend(import_patterns)
|
|
165
|
-
|
|
166
|
-
# Extract structural patterns
|
|
167
|
-
if not pattern_types or "structure" in pattern_types:
|
|
168
|
-
struct_patterns = self._extract_structural_patterns(
|
|
169
|
-
code, quality_score, task_id
|
|
170
|
-
)
|
|
171
|
-
patterns.extend(struct_patterns)
|
|
172
|
-
|
|
173
|
-
return patterns
|
|
174
|
-
|
|
175
|
-
def _extract_function_patterns(
|
|
176
|
-
self, code: str, quality_score: float, task_id: str
|
|
177
|
-
) -> list[CodePattern]:
|
|
178
|
-
"""Extract function patterns."""
|
|
179
|
-
patterns = []
|
|
180
|
-
|
|
181
|
-
# Match function definitions - more flexible pattern that handles docstrings and type hints
|
|
182
|
-
# Find all function definitions, then extract their bodies
|
|
183
|
-
# Pattern matches: def function_name(...) with optional type hints, handling multiline signatures
|
|
184
|
-
func_def_pattern = r"def\s+(\w+)\s*\([^)]*\)\s*(?:->\s*[^:]+)?:"
|
|
185
|
-
func_matches = list(re.finditer(func_def_pattern, code, re.MULTILINE | re.DOTALL))
|
|
186
|
-
|
|
187
|
-
for i, match in enumerate(func_matches):
|
|
188
|
-
func_name = match.group(1)
|
|
189
|
-
start_pos = match.end()
|
|
190
|
-
|
|
191
|
-
# Find the end of this function (next function def or end of code)
|
|
192
|
-
if i + 1 < len(func_matches):
|
|
193
|
-
end_pos = func_matches[i + 1].start()
|
|
194
|
-
else:
|
|
195
|
-
end_pos = len(code)
|
|
196
|
-
|
|
197
|
-
func_body = code[start_pos:end_pos].strip()
|
|
198
|
-
|
|
199
|
-
# Skip if function body is empty
|
|
200
|
-
if not func_body:
|
|
201
|
-
continue
|
|
202
|
-
|
|
203
|
-
# Get security score for this pattern
|
|
204
|
-
pattern_code = f"def {func_name}(...):\n{func_body[:200]}"
|
|
205
|
-
pattern_security = self.security_scanner.scan_code(code=pattern_code)
|
|
206
|
-
pattern_security_score = pattern_security["security_score"]
|
|
207
|
-
pattern_vulnerabilities = pattern_security["vulnerabilities"]
|
|
208
|
-
|
|
209
|
-
pattern = CodePattern(
|
|
210
|
-
pattern_id=f"func_{func_name}_{hash(func_body) % 10000}",
|
|
211
|
-
pattern_type="function",
|
|
212
|
-
code_snippet=pattern_code,
|
|
213
|
-
context=f"Function: {func_name}",
|
|
214
|
-
quality_score=quality_score,
|
|
215
|
-
usage_count=1,
|
|
216
|
-
success_rate=1.0,
|
|
217
|
-
learned_from=[task_id],
|
|
218
|
-
security_score=pattern_security_score,
|
|
219
|
-
metadata={"vulnerabilities": pattern_vulnerabilities},
|
|
220
|
-
)
|
|
221
|
-
patterns.append(pattern)
|
|
222
|
-
|
|
223
|
-
return patterns
|
|
224
|
-
|
|
225
|
-
def _extract_class_patterns(
|
|
226
|
-
self, code: str, quality_score: float, task_id: str
|
|
227
|
-
) -> list[CodePattern]:
|
|
228
|
-
"""Extract class patterns."""
|
|
229
|
-
patterns = []
|
|
230
|
-
|
|
231
|
-
# Match class definitions
|
|
232
|
-
class_pattern = r"class\s+(\w+)(?:\([^)]+\))?:\s*\n((?:\s{4}.*\n?)*)"
|
|
233
|
-
matches = re.finditer(class_pattern, code, re.MULTILINE)
|
|
234
|
-
|
|
235
|
-
for match in matches:
|
|
236
|
-
class_name = match.group(1)
|
|
237
|
-
class_body = match.group(2)
|
|
238
|
-
|
|
239
|
-
# Get security score for this pattern
|
|
240
|
-
pattern_code = f"class {class_name}:\n{class_body[:200]}"
|
|
241
|
-
pattern_security = self.security_scanner.scan_code(code=pattern_code)
|
|
242
|
-
pattern_security_score = pattern_security["security_score"]
|
|
243
|
-
pattern_vulnerabilities = pattern_security["vulnerabilities"]
|
|
244
|
-
|
|
245
|
-
pattern = CodePattern(
|
|
246
|
-
pattern_id=f"class_{class_name}_{hash(class_body) % 10000}",
|
|
247
|
-
pattern_type="class",
|
|
248
|
-
code_snippet=pattern_code,
|
|
249
|
-
context=f"Class: {class_name}",
|
|
250
|
-
quality_score=quality_score,
|
|
251
|
-
usage_count=1,
|
|
252
|
-
success_rate=1.0,
|
|
253
|
-
learned_from=[task_id],
|
|
254
|
-
security_score=pattern_security_score,
|
|
255
|
-
metadata={"vulnerabilities": pattern_vulnerabilities},
|
|
256
|
-
)
|
|
257
|
-
patterns.append(pattern)
|
|
258
|
-
|
|
259
|
-
return patterns
|
|
260
|
-
|
|
261
|
-
def _extract_import_patterns(
|
|
262
|
-
self, code: str, quality_score: float, task_id: str
|
|
263
|
-
) -> list[CodePattern]:
|
|
264
|
-
"""Extract import patterns."""
|
|
265
|
-
patterns = []
|
|
266
|
-
|
|
267
|
-
# Match import statements
|
|
268
|
-
import_pattern = r"^(?:from\s+[\w.]+|import\s+[\w.,\s]+)"
|
|
269
|
-
matches = re.finditer(import_pattern, code, re.MULTILINE)
|
|
270
|
-
|
|
271
|
-
imports = []
|
|
272
|
-
for match in matches:
|
|
273
|
-
imports.append(match.group(0).strip())
|
|
274
|
-
|
|
275
|
-
if imports:
|
|
276
|
-
# Get security score for this pattern
|
|
277
|
-
pattern_code = "\n".join(imports[:10])
|
|
278
|
-
pattern_security = self.security_scanner.scan_code(code=pattern_code)
|
|
279
|
-
pattern_security_score = pattern_security["security_score"]
|
|
280
|
-
pattern_vulnerabilities = pattern_security["vulnerabilities"]
|
|
281
|
-
|
|
282
|
-
pattern = CodePattern(
|
|
283
|
-
pattern_id=f"imports_{hash(''.join(imports)) % 10000}",
|
|
284
|
-
pattern_type="import",
|
|
285
|
-
code_snippet=pattern_code,
|
|
286
|
-
context="Import statements",
|
|
287
|
-
quality_score=quality_score,
|
|
288
|
-
usage_count=1,
|
|
289
|
-
success_rate=1.0,
|
|
290
|
-
learned_from=[task_id],
|
|
291
|
-
security_score=pattern_security_score,
|
|
292
|
-
metadata={"vulnerabilities": pattern_vulnerabilities},
|
|
293
|
-
)
|
|
294
|
-
patterns.append(pattern)
|
|
295
|
-
|
|
296
|
-
return patterns
|
|
297
|
-
|
|
298
|
-
def _extract_structural_patterns(
|
|
299
|
-
self, code: str, quality_score: float, task_id: str
|
|
300
|
-
) -> list[CodePattern]:
|
|
301
|
-
"""Extract structural patterns (decorators, context managers, etc.)."""
|
|
302
|
-
patterns = []
|
|
303
|
-
|
|
304
|
-
# Match decorators
|
|
305
|
-
decorator_pattern = r"@\w+(?:\([^)]*\))?"
|
|
306
|
-
decorators = re.findall(decorator_pattern, code)
|
|
307
|
-
|
|
308
|
-
if decorators:
|
|
309
|
-
# Get security score for this pattern
|
|
310
|
-
pattern_code = "\n".join(decorators[:5])
|
|
311
|
-
pattern_security = self.security_scanner.scan_code(code=pattern_code)
|
|
312
|
-
pattern_security_score = pattern_security["security_score"]
|
|
313
|
-
pattern_vulnerabilities = pattern_security["vulnerabilities"]
|
|
314
|
-
|
|
315
|
-
pattern = CodePattern(
|
|
316
|
-
pattern_id=f"decorators_{hash(''.join(decorators)) % 10000}",
|
|
317
|
-
pattern_type="structure",
|
|
318
|
-
code_snippet=pattern_code,
|
|
319
|
-
context="Decorators",
|
|
320
|
-
quality_score=quality_score,
|
|
321
|
-
usage_count=1,
|
|
322
|
-
success_rate=1.0,
|
|
323
|
-
learned_from=[task_id],
|
|
324
|
-
security_score=pattern_security_score,
|
|
325
|
-
metadata={"vulnerabilities": pattern_vulnerabilities},
|
|
326
|
-
)
|
|
327
|
-
patterns.append(pattern)
|
|
328
|
-
|
|
329
|
-
return patterns
|
|
330
|
-
|
|
331
|
-
def get_patterns_for_context(
|
|
332
|
-
self,
|
|
333
|
-
context: str,
|
|
334
|
-
pattern_type: str | None = None,
|
|
335
|
-
limit: int = 5,
|
|
336
|
-
exclude_anti_patterns: bool = True,
|
|
337
|
-
) -> list[CodePattern]:
|
|
338
|
-
"""
|
|
339
|
-
Get relevant patterns for a context.
|
|
340
|
-
|
|
341
|
-
Args:
|
|
342
|
-
context: Context string
|
|
343
|
-
pattern_type: Optional pattern type filter
|
|
344
|
-
limit: Maximum results
|
|
345
|
-
exclude_anti_patterns: If True, exclude anti-patterns (default: True)
|
|
346
|
-
|
|
347
|
-
Returns:
|
|
348
|
-
List of relevant patterns
|
|
349
|
-
"""
|
|
350
|
-
candidates = list(self.patterns.values())
|
|
351
|
-
|
|
352
|
-
# Filter out anti-patterns if requested
|
|
353
|
-
if exclude_anti_patterns:
|
|
354
|
-
candidates = [p for p in candidates if not p.is_anti_pattern]
|
|
355
|
-
|
|
356
|
-
if pattern_type:
|
|
357
|
-
candidates = [p for p in candidates if p.pattern_type == pattern_type]
|
|
358
|
-
|
|
359
|
-
# Sort by security score, quality, and usage
|
|
360
|
-
candidates.sort(
|
|
361
|
-
key=lambda p: (p.security_score, p.quality_score, p.usage_count),
|
|
362
|
-
reverse=True,
|
|
363
|
-
)
|
|
364
|
-
|
|
365
|
-
return candidates[:limit]
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
class AntiPatternExtractor:
|
|
369
|
-
"""Extracts anti-patterns from failed tasks and low-quality code."""
|
|
370
|
-
|
|
371
|
-
def __init__(
|
|
372
|
-
self,
|
|
373
|
-
max_quality_threshold: float = 0.7,
|
|
374
|
-
security_scanner: SecurityScanner | None = None,
|
|
375
|
-
):
|
|
376
|
-
"""
|
|
377
|
-
Initialize anti-pattern extractor.
|
|
378
|
-
|
|
379
|
-
Args:
|
|
380
|
-
max_quality_threshold: Maximum quality score to extract anti-patterns
|
|
381
|
-
security_scanner: Optional security scanner instance
|
|
382
|
-
"""
|
|
383
|
-
self.max_quality_threshold = max_quality_threshold
|
|
384
|
-
self.security_scanner = security_scanner or SecurityScanner()
|
|
385
|
-
self.anti_patterns: dict[str, CodePattern] = {}
|
|
386
|
-
|
|
387
|
-
def extract_anti_patterns(
|
|
388
|
-
self,
|
|
389
|
-
code: str,
|
|
390
|
-
quality_score: float,
|
|
391
|
-
task_id: str,
|
|
392
|
-
failure_reasons: list[str] | None = None,
|
|
393
|
-
pattern_types: list[str] | None = None,
|
|
394
|
-
) -> list[CodePattern]:
|
|
395
|
-
"""
|
|
396
|
-
Extract anti-patterns from failed or low-quality code.
|
|
397
|
-
|
|
398
|
-
Args:
|
|
399
|
-
code: Source code
|
|
400
|
-
quality_score: Quality score of the code
|
|
401
|
-
task_id: Task identifier
|
|
402
|
-
failure_reasons: Optional list of failure reasons
|
|
403
|
-
pattern_types: Optional list of pattern types to extract
|
|
404
|
-
|
|
405
|
-
Returns:
|
|
406
|
-
List of extracted anti-patterns
|
|
407
|
-
"""
|
|
408
|
-
# Only extract if quality is below threshold
|
|
409
|
-
if quality_score >= self.max_quality_threshold:
|
|
410
|
-
return []
|
|
411
|
-
|
|
412
|
-
failure_reasons = failure_reasons or []
|
|
413
|
-
anti_patterns = []
|
|
414
|
-
|
|
415
|
-
# Extract function anti-patterns
|
|
416
|
-
if not pattern_types or "function" in pattern_types:
|
|
417
|
-
func_patterns = self._extract_function_anti_patterns(
|
|
418
|
-
code, quality_score, task_id, failure_reasons
|
|
419
|
-
)
|
|
420
|
-
anti_patterns.extend(func_patterns)
|
|
421
|
-
|
|
422
|
-
# Extract class anti-patterns
|
|
423
|
-
if not pattern_types or "class" in pattern_types:
|
|
424
|
-
class_patterns = self._extract_class_anti_patterns(
|
|
425
|
-
code, quality_score, task_id, failure_reasons
|
|
426
|
-
)
|
|
427
|
-
anti_patterns.extend(class_patterns)
|
|
428
|
-
|
|
429
|
-
# Extract import anti-patterns
|
|
430
|
-
if not pattern_types or "import" in pattern_types:
|
|
431
|
-
import_patterns = self._extract_import_anti_patterns(
|
|
432
|
-
code, quality_score, task_id, failure_reasons
|
|
433
|
-
)
|
|
434
|
-
anti_patterns.extend(import_patterns)
|
|
435
|
-
|
|
436
|
-
# Extract structural anti-patterns
|
|
437
|
-
if not pattern_types or "structure" in pattern_types:
|
|
438
|
-
struct_patterns = self._extract_structural_anti_patterns(
|
|
439
|
-
code, quality_score, task_id, failure_reasons
|
|
440
|
-
)
|
|
441
|
-
anti_patterns.extend(struct_patterns)
|
|
442
|
-
|
|
443
|
-
return anti_patterns
|
|
444
|
-
|
|
445
|
-
def _extract_function_anti_patterns(
|
|
446
|
-
self,
|
|
447
|
-
code: str,
|
|
448
|
-
quality_score: float,
|
|
449
|
-
task_id: str,
|
|
450
|
-
failure_reasons: list[str],
|
|
451
|
-
) -> list[CodePattern]:
|
|
452
|
-
"""Extract function anti-patterns."""
|
|
453
|
-
anti_patterns = []
|
|
454
|
-
|
|
455
|
-
# Match function definitions
|
|
456
|
-
func_pattern = r"def\s+(\w+)\s*\([^)]*\):\s*\n((?:\s{4}.*\n?)*)"
|
|
457
|
-
matches = list(re.finditer(func_pattern, code, re.MULTILINE))
|
|
458
|
-
|
|
459
|
-
for match in matches:
|
|
460
|
-
func_name = match.group(1)
|
|
461
|
-
func_body = match.group(2)
|
|
462
|
-
|
|
463
|
-
# Get security score for this pattern
|
|
464
|
-
pattern_code = f"def {func_name}(...):\n{func_body[:200]}"
|
|
465
|
-
pattern_security = self.security_scanner.scan_code(code=pattern_code)
|
|
466
|
-
pattern_security_score = pattern_security["security_score"]
|
|
467
|
-
pattern_vulnerabilities = pattern_security["vulnerabilities"]
|
|
468
|
-
|
|
469
|
-
anti_pattern = CodePattern(
|
|
470
|
-
pattern_id=f"anti_func_{func_name}_{hash(func_body) % 10000}",
|
|
471
|
-
pattern_type="function",
|
|
472
|
-
code_snippet=pattern_code,
|
|
473
|
-
context=f"Anti-pattern Function: {func_name}",
|
|
474
|
-
quality_score=quality_score,
|
|
475
|
-
usage_count=1,
|
|
476
|
-
success_rate=0.0, # Anti-patterns have 0 success rate
|
|
477
|
-
learned_from=[task_id],
|
|
478
|
-
security_score=pattern_security_score,
|
|
479
|
-
is_anti_pattern=True,
|
|
480
|
-
failure_reasons=failure_reasons.copy(),
|
|
481
|
-
rejection_count=0,
|
|
482
|
-
metadata={"vulnerabilities": pattern_vulnerabilities},
|
|
483
|
-
)
|
|
484
|
-
anti_patterns.append(anti_pattern)
|
|
485
|
-
|
|
486
|
-
return anti_patterns
|
|
487
|
-
|
|
488
|
-
def _extract_class_anti_patterns(
|
|
489
|
-
self,
|
|
490
|
-
code: str,
|
|
491
|
-
quality_score: float,
|
|
492
|
-
task_id: str,
|
|
493
|
-
failure_reasons: list[str],
|
|
494
|
-
) -> list[CodePattern]:
|
|
495
|
-
"""Extract class anti-patterns."""
|
|
496
|
-
anti_patterns = []
|
|
497
|
-
|
|
498
|
-
# Match class definitions
|
|
499
|
-
class_pattern = r"class\s+(\w+)(?:\([^)]+\))?:\s*\n((?:\s{4}.*\n?)*)"
|
|
500
|
-
matches = re.finditer(class_pattern, code, re.MULTILINE)
|
|
501
|
-
|
|
502
|
-
for match in matches:
|
|
503
|
-
class_name = match.group(1)
|
|
504
|
-
class_body = match.group(2)
|
|
505
|
-
|
|
506
|
-
# Get security score for this pattern
|
|
507
|
-
pattern_code = f"class {class_name}:\n{class_body[:200]}"
|
|
508
|
-
pattern_security = self.security_scanner.scan_code(code=pattern_code)
|
|
509
|
-
pattern_security_score = pattern_security["security_score"]
|
|
510
|
-
pattern_vulnerabilities = pattern_security["vulnerabilities"]
|
|
511
|
-
|
|
512
|
-
anti_pattern = CodePattern(
|
|
513
|
-
pattern_id=f"anti_class_{class_name}_{hash(class_body) % 10000}",
|
|
514
|
-
pattern_type="class",
|
|
515
|
-
code_snippet=pattern_code,
|
|
516
|
-
context=f"Anti-pattern Class: {class_name}",
|
|
517
|
-
quality_score=quality_score,
|
|
518
|
-
usage_count=1,
|
|
519
|
-
success_rate=0.0,
|
|
520
|
-
learned_from=[task_id],
|
|
521
|
-
security_score=pattern_security_score,
|
|
522
|
-
is_anti_pattern=True,
|
|
523
|
-
failure_reasons=failure_reasons.copy(),
|
|
524
|
-
rejection_count=0,
|
|
525
|
-
metadata={"vulnerabilities": pattern_vulnerabilities},
|
|
526
|
-
)
|
|
527
|
-
anti_patterns.append(anti_pattern)
|
|
528
|
-
|
|
529
|
-
return anti_patterns
|
|
530
|
-
|
|
531
|
-
def _extract_import_anti_patterns(
|
|
532
|
-
self,
|
|
533
|
-
code: str,
|
|
534
|
-
quality_score: float,
|
|
535
|
-
task_id: str,
|
|
536
|
-
failure_reasons: list[str],
|
|
537
|
-
) -> list[CodePattern]:
|
|
538
|
-
"""Extract import anti-patterns."""
|
|
539
|
-
anti_patterns = []
|
|
540
|
-
|
|
541
|
-
# Match import statements
|
|
542
|
-
import_pattern = r"^(?:from\s+[\w.]+|import\s+[\w.,\s]+)"
|
|
543
|
-
matches = re.finditer(import_pattern, code, re.MULTILINE)
|
|
544
|
-
|
|
545
|
-
imports = []
|
|
546
|
-
for match in matches:
|
|
547
|
-
imports.append(match.group(0).strip())
|
|
548
|
-
|
|
549
|
-
if imports:
|
|
550
|
-
# Get security score for this pattern
|
|
551
|
-
pattern_code = "\n".join(imports[:10])
|
|
552
|
-
pattern_security = self.security_scanner.scan_code(code=pattern_code)
|
|
553
|
-
pattern_security_score = pattern_security["security_score"]
|
|
554
|
-
pattern_vulnerabilities = pattern_security["vulnerabilities"]
|
|
555
|
-
|
|
556
|
-
anti_pattern = CodePattern(
|
|
557
|
-
pattern_id=f"anti_imports_{hash(''.join(imports)) % 10000}",
|
|
558
|
-
pattern_type="import",
|
|
559
|
-
code_snippet=pattern_code,
|
|
560
|
-
context="Anti-pattern Import statements",
|
|
561
|
-
quality_score=quality_score,
|
|
562
|
-
usage_count=1,
|
|
563
|
-
success_rate=0.0,
|
|
564
|
-
learned_from=[task_id],
|
|
565
|
-
security_score=pattern_security_score,
|
|
566
|
-
is_anti_pattern=True,
|
|
567
|
-
failure_reasons=failure_reasons.copy(),
|
|
568
|
-
rejection_count=0,
|
|
569
|
-
metadata={"vulnerabilities": pattern_vulnerabilities},
|
|
570
|
-
)
|
|
571
|
-
anti_patterns.append(anti_pattern)
|
|
572
|
-
|
|
573
|
-
return anti_patterns
|
|
574
|
-
|
|
575
|
-
def _extract_structural_anti_patterns(
|
|
576
|
-
self,
|
|
577
|
-
code: str,
|
|
578
|
-
quality_score: float,
|
|
579
|
-
task_id: str,
|
|
580
|
-
failure_reasons: list[str],
|
|
581
|
-
) -> list[CodePattern]:
|
|
582
|
-
"""Extract structural anti-patterns."""
|
|
583
|
-
anti_patterns = []
|
|
584
|
-
|
|
585
|
-
# Match decorators
|
|
586
|
-
decorator_pattern = r"@\w+(?:\([^)]*\))?"
|
|
587
|
-
decorators = re.findall(decorator_pattern, code)
|
|
588
|
-
|
|
589
|
-
if decorators:
|
|
590
|
-
# Get security score for this pattern
|
|
591
|
-
pattern_code = "\n".join(decorators[:5])
|
|
592
|
-
pattern_security = self.security_scanner.scan_code(code=pattern_code)
|
|
593
|
-
pattern_security_score = pattern_security["security_score"]
|
|
594
|
-
pattern_vulnerabilities = pattern_security["vulnerabilities"]
|
|
595
|
-
|
|
596
|
-
anti_pattern = CodePattern(
|
|
597
|
-
pattern_id=f"anti_decorators_{hash(''.join(decorators)) % 10000}",
|
|
598
|
-
pattern_type="structure",
|
|
599
|
-
code_snippet=pattern_code,
|
|
600
|
-
context="Anti-pattern Decorators",
|
|
601
|
-
quality_score=quality_score,
|
|
602
|
-
usage_count=1,
|
|
603
|
-
success_rate=0.0,
|
|
604
|
-
learned_from=[task_id],
|
|
605
|
-
security_score=pattern_security_score,
|
|
606
|
-
is_anti_pattern=True,
|
|
607
|
-
failure_reasons=failure_reasons.copy(),
|
|
608
|
-
rejection_count=0,
|
|
609
|
-
metadata={"vulnerabilities": pattern_vulnerabilities},
|
|
610
|
-
)
|
|
611
|
-
anti_patterns.append(anti_pattern)
|
|
612
|
-
|
|
613
|
-
return anti_patterns
|
|
614
|
-
|
|
615
|
-
def extract_from_failure(
|
|
616
|
-
self,
|
|
617
|
-
code: str,
|
|
618
|
-
task_id: str,
|
|
619
|
-
failure_reasons: list[str],
|
|
620
|
-
quality_score: float = 0.0,
|
|
621
|
-
) -> list[CodePattern]:
|
|
622
|
-
"""
|
|
623
|
-
Extract anti-patterns from a failed task.
|
|
624
|
-
|
|
625
|
-
Args:
|
|
626
|
-
code: Source code from failed task
|
|
627
|
-
task_id: Task identifier
|
|
628
|
-
failure_reasons: List of failure reasons
|
|
629
|
-
quality_score: Quality score (default: 0.0 for failures)
|
|
630
|
-
|
|
631
|
-
Returns:
|
|
632
|
-
List of extracted anti-patterns
|
|
633
|
-
"""
|
|
634
|
-
return self.extract_anti_patterns(
|
|
635
|
-
code=code,
|
|
636
|
-
quality_score=quality_score,
|
|
637
|
-
task_id=task_id,
|
|
638
|
-
failure_reasons=failure_reasons,
|
|
639
|
-
)
|
|
640
|
-
|
|
641
|
-
def extract_from_rejection(
|
|
642
|
-
self,
|
|
643
|
-
code: str,
|
|
644
|
-
task_id: str,
|
|
645
|
-
rejection_reason: str,
|
|
646
|
-
quality_score: float = 0.5,
|
|
647
|
-
) -> list[CodePattern]:
|
|
648
|
-
"""
|
|
649
|
-
Extract anti-patterns from user rejection.
|
|
650
|
-
|
|
651
|
-
Args:
|
|
652
|
-
code: Rejected code
|
|
653
|
-
task_id: Task identifier
|
|
654
|
-
rejection_reason: Reason for rejection
|
|
655
|
-
quality_score: Quality score
|
|
656
|
-
|
|
657
|
-
Returns:
|
|
658
|
-
List of extracted anti-patterns
|
|
659
|
-
"""
|
|
660
|
-
return self.extract_anti_patterns(
|
|
661
|
-
code=code,
|
|
662
|
-
quality_score=quality_score,
|
|
663
|
-
task_id=task_id,
|
|
664
|
-
failure_reasons=[f"User rejection: {rejection_reason}"],
|
|
665
|
-
)
|
|
666
|
-
|
|
667
|
-
def get_anti_patterns_for_context(
|
|
668
|
-
self, context: str, pattern_type: str | None = None, limit: int = 5
|
|
669
|
-
) -> list[CodePattern]:
|
|
670
|
-
"""
|
|
671
|
-
Get anti-patterns to avoid for a context.
|
|
672
|
-
|
|
673
|
-
Args:
|
|
674
|
-
context: Context string
|
|
675
|
-
pattern_type: Optional pattern type filter
|
|
676
|
-
limit: Maximum results
|
|
677
|
-
|
|
678
|
-
Returns:
|
|
679
|
-
List of anti-patterns to avoid
|
|
680
|
-
"""
|
|
681
|
-
candidates = [p for p in self.anti_patterns.values() if p.is_anti_pattern]
|
|
682
|
-
|
|
683
|
-
if pattern_type:
|
|
684
|
-
candidates = [p for p in candidates if p.pattern_type == pattern_type]
|
|
685
|
-
|
|
686
|
-
# Sort by rejection count and quality (low quality = more important to avoid)
|
|
687
|
-
candidates.sort(
|
|
688
|
-
key=lambda p: (p.rejection_count, -p.quality_score), reverse=True
|
|
689
|
-
)
|
|
690
|
-
|
|
691
|
-
return candidates[:limit]
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
class PromptOptimizer:
|
|
695
|
-
"""Optimizes agent prompts based on outcomes."""
|
|
696
|
-
|
|
697
|
-
def __init__(self, hardware_profile: HardwareProfile):
|
|
698
|
-
"""
|
|
699
|
-
Initialize prompt optimizer.
|
|
700
|
-
|
|
701
|
-
Args:
|
|
702
|
-
hardware_profile: Hardware profile for optimization
|
|
703
|
-
"""
|
|
704
|
-
self.hardware_profile = hardware_profile
|
|
705
|
-
self.variants: dict[str, PromptVariant] = {}
|
|
706
|
-
self.base_prompt: str | None = None
|
|
707
|
-
|
|
708
|
-
def create_variant(
|
|
709
|
-
self,
|
|
710
|
-
base_prompt: str,
|
|
711
|
-
modifications: list[str],
|
|
712
|
-
variant_id: str | None = None,
|
|
713
|
-
) -> PromptVariant:
|
|
714
|
-
"""
|
|
715
|
-
Create a prompt variant for A/B testing.
|
|
716
|
-
|
|
717
|
-
Args:
|
|
718
|
-
base_prompt: Base prompt template
|
|
719
|
-
modifications: List of modifications to apply
|
|
720
|
-
variant_id: Optional variant identifier
|
|
721
|
-
|
|
722
|
-
Returns:
|
|
723
|
-
PromptVariant instance
|
|
724
|
-
"""
|
|
725
|
-
if variant_id is None:
|
|
726
|
-
variant_id = f"variant_{hash(''.join(modifications)) % 10000}"
|
|
727
|
-
|
|
728
|
-
# Apply modifications (simplified - in production would be more sophisticated)
|
|
729
|
-
modified_prompt = base_prompt
|
|
730
|
-
for mod in modifications:
|
|
731
|
-
if mod.startswith("add:"):
|
|
732
|
-
modified_prompt += f"\n{mod[4:]}"
|
|
733
|
-
elif mod.startswith("prepend:"):
|
|
734
|
-
modified_prompt = f"{mod[8:]}\n{modified_prompt}"
|
|
735
|
-
|
|
736
|
-
variant = PromptVariant(
|
|
737
|
-
variant_id=variant_id,
|
|
738
|
-
prompt_template=modified_prompt,
|
|
739
|
-
modifications=modifications,
|
|
740
|
-
test_count=0,
|
|
741
|
-
success_count=0,
|
|
742
|
-
average_quality=0.0,
|
|
743
|
-
created_at=datetime.now(UTC),
|
|
744
|
-
)
|
|
745
|
-
|
|
746
|
-
self.variants[variant_id] = variant
|
|
747
|
-
return variant
|
|
748
|
-
|
|
749
|
-
def record_test_result(self, variant_id: str, success: bool, quality_score: float):
|
|
750
|
-
"""
|
|
751
|
-
Record A/B test result for a variant.
|
|
752
|
-
|
|
753
|
-
Args:
|
|
754
|
-
variant_id: Variant identifier
|
|
755
|
-
success: Whether test succeeded
|
|
756
|
-
quality_score: Quality score
|
|
757
|
-
"""
|
|
758
|
-
if variant_id not in self.variants:
|
|
759
|
-
logger.warning(f"Variant {variant_id} not found")
|
|
760
|
-
return
|
|
761
|
-
|
|
762
|
-
variant = self.variants[variant_id]
|
|
763
|
-
variant.test_count += 1
|
|
764
|
-
if success:
|
|
765
|
-
variant.success_count += 1
|
|
766
|
-
|
|
767
|
-
# Update average quality (exponential moving average)
|
|
768
|
-
alpha = 0.1
|
|
769
|
-
variant.average_quality = (
|
|
770
|
-
alpha * quality_score + (1 - alpha) * variant.average_quality
|
|
771
|
-
)
|
|
772
|
-
variant.last_tested = datetime.now(UTC)
|
|
773
|
-
|
|
774
|
-
def get_best_variant(self, min_tests: int = 5) -> PromptVariant | None:
|
|
775
|
-
"""
|
|
776
|
-
Get the best performing variant.
|
|
777
|
-
|
|
778
|
-
Args:
|
|
779
|
-
min_tests: Minimum test count to consider
|
|
780
|
-
|
|
781
|
-
Returns:
|
|
782
|
-
Best PromptVariant if found, None otherwise
|
|
783
|
-
"""
|
|
784
|
-
candidates = [v for v in self.variants.values() if v.test_count >= min_tests]
|
|
785
|
-
|
|
786
|
-
if not candidates:
|
|
787
|
-
return None
|
|
788
|
-
|
|
789
|
-
# Sort by success rate and average quality
|
|
790
|
-
candidates.sort(
|
|
791
|
-
key=lambda v: (v.success_count / v.test_count, v.average_quality),
|
|
792
|
-
reverse=True,
|
|
793
|
-
)
|
|
794
|
-
|
|
795
|
-
return candidates[0]
|
|
796
|
-
|
|
797
|
-
def optimize_for_hardware(self, prompt: str) -> str:
|
|
798
|
-
"""
|
|
799
|
-
Optimize prompt for hardware profile.
|
|
800
|
-
|
|
801
|
-
Args:
|
|
802
|
-
prompt: Original prompt
|
|
803
|
-
|
|
804
|
-
Returns:
|
|
805
|
-
Optimized prompt
|
|
806
|
-
"""
|
|
807
|
-
# Workstation-like: return full prompt (hardware taxonomy removed)
|
|
808
|
-
return prompt
|
|
809
|
-
|
|
810
|
-
|
|
811
|
-
class NegativeFeedbackHandler:
|
|
812
|
-
"""Handles negative feedback, rejections, and corrections."""
|
|
813
|
-
|
|
814
|
-
def __init__(self, anti_pattern_extractor: AntiPatternExtractor | None = None):
|
|
815
|
-
"""
|
|
816
|
-
Initialize negative feedback handler.
|
|
817
|
-
|
|
818
|
-
Args:
|
|
819
|
-
anti_pattern_extractor: Optional anti-pattern extractor instance
|
|
820
|
-
"""
|
|
821
|
-
self.anti_pattern_extractor = anti_pattern_extractor or AntiPatternExtractor()
|
|
822
|
-
self.rejections: list[dict[str, Any]] = []
|
|
823
|
-
self.corrections: list[dict[str, Any]] = []
|
|
824
|
-
|
|
825
|
-
def record_rejection(
|
|
826
|
-
self,
|
|
827
|
-
code: str,
|
|
828
|
-
task_id: str,
|
|
829
|
-
reason: str,
|
|
830
|
-
quality_score: float = 0.5,
|
|
831
|
-
) -> list[CodePattern]:
|
|
832
|
-
"""
|
|
833
|
-
Record user rejection and extract anti-patterns.
|
|
834
|
-
|
|
835
|
-
Args:
|
|
836
|
-
code: Rejected code
|
|
837
|
-
task_id: Task identifier
|
|
838
|
-
reason: Reason for rejection
|
|
839
|
-
quality_score: Quality score
|
|
840
|
-
|
|
841
|
-
Returns:
|
|
842
|
-
List of extracted anti-patterns
|
|
843
|
-
"""
|
|
844
|
-
rejection = {
|
|
845
|
-
"task_id": task_id,
|
|
846
|
-
"reason": reason,
|
|
847
|
-
"quality_score": quality_score,
|
|
848
|
-
"timestamp": datetime.now(UTC).isoformat(),
|
|
849
|
-
}
|
|
850
|
-
self.rejections.append(rejection)
|
|
851
|
-
|
|
852
|
-
# Extract anti-patterns from rejected code
|
|
853
|
-
anti_patterns = self.anti_pattern_extractor.extract_from_rejection(
|
|
854
|
-
code=code,
|
|
855
|
-
task_id=task_id,
|
|
856
|
-
rejection_reason=reason,
|
|
857
|
-
quality_score=quality_score,
|
|
858
|
-
)
|
|
859
|
-
|
|
860
|
-
# Update rejection counts for existing anti-patterns
|
|
861
|
-
for anti_pattern in anti_patterns:
|
|
862
|
-
if anti_pattern.pattern_id in self.anti_pattern_extractor.anti_patterns:
|
|
863
|
-
existing = self.anti_pattern_extractor.anti_patterns[
|
|
864
|
-
anti_pattern.pattern_id
|
|
865
|
-
]
|
|
866
|
-
existing.rejection_count += 1
|
|
867
|
-
if reason not in existing.failure_reasons:
|
|
868
|
-
existing.failure_reasons.append(reason)
|
|
869
|
-
else:
|
|
870
|
-
# New anti-pattern from rejection should have rejection_count=1
|
|
871
|
-
anti_pattern.rejection_count = 1
|
|
872
|
-
self.anti_pattern_extractor.anti_patterns[
|
|
873
|
-
anti_pattern.pattern_id
|
|
874
|
-
] = anti_pattern
|
|
875
|
-
|
|
876
|
-
return anti_patterns
|
|
877
|
-
|
|
878
|
-
def record_correction(
|
|
879
|
-
self,
|
|
880
|
-
original_code: str,
|
|
881
|
-
corrected_code: str,
|
|
882
|
-
task_id: str,
|
|
883
|
-
correction_reason: str,
|
|
884
|
-
) -> tuple[list[CodePattern], list[CodePattern]]:
|
|
885
|
-
"""
|
|
886
|
-
Record user correction and extract both anti-patterns and patterns.
|
|
887
|
-
|
|
888
|
-
Args:
|
|
889
|
-
original_code: Original (incorrect) code
|
|
890
|
-
corrected_code: Corrected code
|
|
891
|
-
task_id: Task identifier
|
|
892
|
-
correction_reason: Reason for correction
|
|
893
|
-
|
|
894
|
-
Returns:
|
|
895
|
-
Tuple of (anti-patterns from original, patterns from corrected)
|
|
896
|
-
"""
|
|
897
|
-
correction = {
|
|
898
|
-
"task_id": task_id,
|
|
899
|
-
"reason": correction_reason,
|
|
900
|
-
"timestamp": datetime.now(UTC).isoformat(),
|
|
901
|
-
}
|
|
902
|
-
self.corrections.append(correction)
|
|
903
|
-
|
|
904
|
-
# Extract anti-patterns from original code
|
|
905
|
-
anti_patterns = self.anti_pattern_extractor.extract_from_rejection(
|
|
906
|
-
code=original_code,
|
|
907
|
-
task_id=task_id,
|
|
908
|
-
rejection_reason=f"Correction: {correction_reason}",
|
|
909
|
-
quality_score=0.3, # Low quality for incorrect code
|
|
910
|
-
)
|
|
911
|
-
|
|
912
|
-
# Extract patterns from corrected code (using PatternExtractor logic)
|
|
913
|
-
# Note: This would require access to PatternExtractor, but for now
|
|
914
|
-
# we'll just return the anti-patterns
|
|
915
|
-
return anti_patterns, []
|
|
916
|
-
|
|
917
|
-
def extract_anti_patterns_from_feedback(
|
|
918
|
-
self, code: str, task_id: str, feedback: str
|
|
919
|
-
) -> list[CodePattern]:
|
|
920
|
-
"""
|
|
921
|
-
Extract anti-patterns from feedback text.
|
|
922
|
-
|
|
923
|
-
Args:
|
|
924
|
-
code: Code that received feedback
|
|
925
|
-
task_id: Task identifier
|
|
926
|
-
feedback: Feedback text
|
|
927
|
-
|
|
928
|
-
Returns:
|
|
929
|
-
List of extracted anti-patterns
|
|
930
|
-
"""
|
|
931
|
-
return self.anti_pattern_extractor.extract_from_rejection(
|
|
932
|
-
code=code,
|
|
933
|
-
task_id=task_id,
|
|
934
|
-
rejection_reason=feedback,
|
|
935
|
-
quality_score=0.4,
|
|
936
|
-
)
|
|
937
|
-
|
|
938
|
-
def get_anti_patterns_for_context(
|
|
939
|
-
self, context: str, pattern_type: str | None = None, limit: int = 5
|
|
940
|
-
) -> list[CodePattern]:
|
|
941
|
-
"""
|
|
942
|
-
Get anti-patterns to avoid for a context.
|
|
943
|
-
|
|
944
|
-
Args:
|
|
945
|
-
context: Context string
|
|
946
|
-
pattern_type: Optional pattern type filter
|
|
947
|
-
limit: Maximum results
|
|
948
|
-
|
|
949
|
-
Returns:
|
|
950
|
-
List of anti-patterns to avoid
|
|
951
|
-
"""
|
|
952
|
-
return self.anti_pattern_extractor.get_anti_patterns_for_context(
|
|
953
|
-
context=context, pattern_type=pattern_type, limit=limit
|
|
954
|
-
)
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
class FailureModeAnalyzer:
|
|
958
|
-
"""Analyzes failure patterns and categorizes failures."""
|
|
959
|
-
|
|
960
|
-
def __init__(self):
|
|
961
|
-
"""Initialize failure mode analyzer."""
|
|
962
|
-
self.failure_modes: dict[str, dict[str, Any]] = {}
|
|
963
|
-
|
|
964
|
-
def analyze_failure(
|
|
965
|
-
self,
|
|
966
|
-
code: str,
|
|
967
|
-
task_id: str,
|
|
968
|
-
failure_reasons: list[str],
|
|
969
|
-
quality_scores: dict[str, float] | None = None,
|
|
970
|
-
) -> dict[str, Any]:
|
|
971
|
-
"""
|
|
972
|
-
Analyze a single failure.
|
|
973
|
-
|
|
974
|
-
Args:
|
|
975
|
-
code: Failed code
|
|
976
|
-
task_id: Task identifier
|
|
977
|
-
failure_reasons: List of failure reasons
|
|
978
|
-
quality_scores: Optional quality scores
|
|
979
|
-
|
|
980
|
-
Returns:
|
|
981
|
-
Analysis result with failure mode and suggestions
|
|
982
|
-
"""
|
|
983
|
-
failure_mode = self.identify_failure_mode(failure_reasons, quality_scores)
|
|
984
|
-
|
|
985
|
-
# Track failure mode statistics
|
|
986
|
-
if failure_mode not in self.failure_modes:
|
|
987
|
-
self.failure_modes[failure_mode] = {
|
|
988
|
-
"count": 0,
|
|
989
|
-
"reasons": [],
|
|
990
|
-
"task_ids": [],
|
|
991
|
-
}
|
|
992
|
-
|
|
993
|
-
self.failure_modes[failure_mode]["count"] += 1
|
|
994
|
-
self.failure_modes[failure_mode]["reasons"].extend(failure_reasons)
|
|
995
|
-
self.failure_modes[failure_mode]["task_ids"].append(task_id)
|
|
996
|
-
|
|
997
|
-
# Generate prevention suggestions
|
|
998
|
-
suggestions = self.suggest_prevention(failure_mode, failure_reasons)
|
|
999
|
-
|
|
1000
|
-
return {
|
|
1001
|
-
"failure_mode": failure_mode,
|
|
1002
|
-
"failure_reasons": failure_reasons,
|
|
1003
|
-
"suggestions": suggestions,
|
|
1004
|
-
"task_id": task_id,
|
|
1005
|
-
}
|
|
1006
|
-
|
|
1007
|
-
def identify_failure_mode(
|
|
1008
|
-
self,
|
|
1009
|
-
failure_reasons: list[str],
|
|
1010
|
-
quality_scores: dict[str, float] | None = None,
|
|
1011
|
-
) -> str:
|
|
1012
|
-
"""
|
|
1013
|
-
Categorize failure into a failure mode.
|
|
1014
|
-
|
|
1015
|
-
Args:
|
|
1016
|
-
failure_reasons: List of failure reasons
|
|
1017
|
-
quality_scores: Optional quality scores
|
|
1018
|
-
|
|
1019
|
-
Returns:
|
|
1020
|
-
Failure mode category
|
|
1021
|
-
"""
|
|
1022
|
-
reasons_str = " ".join(failure_reasons).lower()
|
|
1023
|
-
|
|
1024
|
-
# Categorize based on keywords and quality scores
|
|
1025
|
-
if any(
|
|
1026
|
-
keyword in reasons_str
|
|
1027
|
-
for keyword in ["syntax", "parse", "indentation", "syntaxerror"]
|
|
1028
|
-
):
|
|
1029
|
-
return "syntax_error"
|
|
1030
|
-
elif any(
|
|
1031
|
-
keyword in reasons_str
|
|
1032
|
-
for keyword in ["security", "vulnerability", "insecure", "bandit"]
|
|
1033
|
-
):
|
|
1034
|
-
return "security_issue"
|
|
1035
|
-
elif any(
|
|
1036
|
-
keyword in reasons_str
|
|
1037
|
-
for keyword in ["timeout", "slow", "performance", "efficiency"]
|
|
1038
|
-
):
|
|
1039
|
-
return "performance_issue"
|
|
1040
|
-
elif any(
|
|
1041
|
-
keyword in reasons_str
|
|
1042
|
-
for keyword in ["logic", "incorrect", "wrong", "bug", "error"]
|
|
1043
|
-
):
|
|
1044
|
-
return "logic_error"
|
|
1045
|
-
elif quality_scores:
|
|
1046
|
-
# Check quality scores
|
|
1047
|
-
if quality_scores.get("security_score", 10.0) < 5.0:
|
|
1048
|
-
return "security_issue"
|
|
1049
|
-
elif quality_scores.get("complexity_score", 0.0) > 8.0:
|
|
1050
|
-
return "complexity_issue"
|
|
1051
|
-
elif quality_scores.get("maintainability_score", 10.0) < 5.0:
|
|
1052
|
-
return "maintainability_issue"
|
|
1053
|
-
else:
|
|
1054
|
-
return "quality_issue"
|
|
1055
|
-
else:
|
|
1056
|
-
return "unknown_failure"
|
|
1057
|
-
|
|
1058
|
-
def get_common_failure_modes(self, limit: int = 5) -> list[dict[str, Any]]:
|
|
1059
|
-
"""
|
|
1060
|
-
Get most common failure modes.
|
|
1061
|
-
|
|
1062
|
-
Args:
|
|
1063
|
-
limit: Maximum results
|
|
1064
|
-
|
|
1065
|
-
Returns:
|
|
1066
|
-
List of failure mode statistics
|
|
1067
|
-
"""
|
|
1068
|
-
modes = sorted(
|
|
1069
|
-
self.failure_modes.items(),
|
|
1070
|
-
key=lambda x: x[1]["count"],
|
|
1071
|
-
reverse=True,
|
|
1072
|
-
)
|
|
1073
|
-
return [
|
|
1074
|
-
{
|
|
1075
|
-
"mode": mode,
|
|
1076
|
-
"count": stats["count"],
|
|
1077
|
-
"reasons": list(set(stats["reasons"]))[:5], # Unique reasons, limit 5
|
|
1078
|
-
}
|
|
1079
|
-
for mode, stats in modes[:limit]
|
|
1080
|
-
]
|
|
1081
|
-
|
|
1082
|
-
def suggest_prevention(
|
|
1083
|
-
self, failure_mode: str, failure_reasons: list[str]
|
|
1084
|
-
) -> list[str]:
|
|
1085
|
-
"""
|
|
1086
|
-
Suggest how to prevent this type of failure.
|
|
1087
|
-
|
|
1088
|
-
Args:
|
|
1089
|
-
failure_mode: Failure mode category
|
|
1090
|
-
failure_reasons: List of failure reasons
|
|
1091
|
-
|
|
1092
|
-
Returns:
|
|
1093
|
-
List of prevention suggestions
|
|
1094
|
-
"""
|
|
1095
|
-
suggestions = []
|
|
1096
|
-
|
|
1097
|
-
if failure_mode == "syntax_error":
|
|
1098
|
-
suggestions.append(
|
|
1099
|
-
"Use syntax checking tools (e.g., Ruff, pylint) before code execution"
|
|
1100
|
-
)
|
|
1101
|
-
suggestions.append("Review Python syntax rules and indentation")
|
|
1102
|
-
elif failure_mode == "security_issue":
|
|
1103
|
-
suggestions.append("Run security scanning (Bandit) before learning patterns")
|
|
1104
|
-
suggestions.append("Review security best practices for the language")
|
|
1105
|
-
suggestions.append("Avoid insecure patterns (eval, exec, shell=True)")
|
|
1106
|
-
elif failure_mode == "performance_issue":
|
|
1107
|
-
suggestions.append("Profile code to identify bottlenecks")
|
|
1108
|
-
suggestions.append("Review algorithm complexity and optimization opportunities")
|
|
1109
|
-
suggestions.append("Consider caching or lazy evaluation")
|
|
1110
|
-
elif failure_mode == "logic_error":
|
|
1111
|
-
suggestions.append("Add unit tests to catch logic errors early")
|
|
1112
|
-
suggestions.append("Use type checking (mypy) to catch type-related issues")
|
|
1113
|
-
suggestions.append("Review code logic and edge cases")
|
|
1114
|
-
elif failure_mode == "complexity_issue":
|
|
1115
|
-
suggestions.append("Refactor complex code into smaller functions")
|
|
1116
|
-
suggestions.append("Reduce nesting depth and cyclomatic complexity")
|
|
1117
|
-
suggestions.append("Use design patterns to simplify code structure")
|
|
1118
|
-
elif failure_mode == "maintainability_issue":
|
|
1119
|
-
suggestions.append("Improve code documentation and naming")
|
|
1120
|
-
suggestions.append("Follow consistent coding style")
|
|
1121
|
-
suggestions.append("Reduce code duplication")
|
|
1122
|
-
else:
|
|
1123
|
-
suggestions.append("Review failure reasons and improve code quality")
|
|
1124
|
-
suggestions.append("Add more comprehensive testing")
|
|
1125
|
-
|
|
1126
|
-
return suggestions
|
|
1127
|
-
|
|
1128
|
-
|
|
1129
|
-
class FeedbackAnalyzer:
|
|
1130
|
-
"""Analyzes feedback from code scoring system and user input."""
|
|
1131
|
-
|
|
1132
|
-
def __init__(self):
|
|
1133
|
-
"""Initialize feedback analyzer."""
|
|
1134
|
-
self.feedback_history: list[dict[str, Any]] = []
|
|
1135
|
-
|
|
1136
|
-
def analyze_code_scores(
|
|
1137
|
-
self, scores: dict[str, float], threshold: float = 0.7
|
|
1138
|
-
) -> dict[str, Any]:
|
|
1139
|
-
"""
|
|
1140
|
-
Analyze code scoring results.
|
|
1141
|
-
|
|
1142
|
-
Args:
|
|
1143
|
-
scores: Dictionary of metric scores
|
|
1144
|
-
threshold: Quality threshold
|
|
1145
|
-
|
|
1146
|
-
Returns:
|
|
1147
|
-
Analysis results
|
|
1148
|
-
"""
|
|
1149
|
-
overall_score = scores.get("overall_score", 0.0)
|
|
1150
|
-
metrics_obj: object = scores.get("metrics", {})
|
|
1151
|
-
metrics: dict[str, float] = {}
|
|
1152
|
-
if isinstance(metrics_obj, dict):
|
|
1153
|
-
for metric, score in metrics_obj.items():
|
|
1154
|
-
if isinstance(score, (int, float)):
|
|
1155
|
-
metrics[str(metric)] = float(score)
|
|
1156
|
-
|
|
1157
|
-
# Identify weak areas
|
|
1158
|
-
weak_areas = [metric for metric, score in metrics.items() if score < threshold]
|
|
1159
|
-
|
|
1160
|
-
# Calculate improvement potential
|
|
1161
|
-
improvement_potential = {}
|
|
1162
|
-
for metric, score in metrics.items():
|
|
1163
|
-
if score < threshold:
|
|
1164
|
-
improvement_potential[metric] = threshold - score
|
|
1165
|
-
|
|
1166
|
-
analysis = {
|
|
1167
|
-
"overall_score": overall_score,
|
|
1168
|
-
"weak_areas": weak_areas,
|
|
1169
|
-
"improvement_potential": improvement_potential,
|
|
1170
|
-
"meets_threshold": overall_score >= threshold,
|
|
1171
|
-
"timestamp": datetime.now(UTC).isoformat(),
|
|
1172
|
-
}
|
|
1173
|
-
|
|
1174
|
-
self.feedback_history.append(analysis)
|
|
1175
|
-
return analysis
|
|
1176
|
-
|
|
1177
|
-
def correlate_prompt_changes(
|
|
1178
|
-
self, prompt_variants: list[str], quality_scores: list[float]
|
|
1179
|
-
) -> dict[str, float]:
|
|
1180
|
-
"""
|
|
1181
|
-
Correlate prompt changes with quality improvements.
|
|
1182
|
-
|
|
1183
|
-
Args:
|
|
1184
|
-
prompt_variants: List of prompt variant identifiers
|
|
1185
|
-
quality_scores: Corresponding quality scores
|
|
1186
|
-
|
|
1187
|
-
Returns:
|
|
1188
|
-
Dictionary mapping variants to quality scores
|
|
1189
|
-
"""
|
|
1190
|
-
correlations = {}
|
|
1191
|
-
for variant, score in zip(prompt_variants, quality_scores, strict=False):
|
|
1192
|
-
correlations[variant] = score
|
|
1193
|
-
|
|
1194
|
-
return correlations
|
|
1195
|
-
|
|
1196
|
-
def get_improvement_suggestions(self, analysis: dict[str, Any]) -> list[str]:
|
|
1197
|
-
"""
|
|
1198
|
-
Get improvement suggestions based on analysis.
|
|
1199
|
-
|
|
1200
|
-
Args:
|
|
1201
|
-
analysis: Analysis results
|
|
1202
|
-
|
|
1203
|
-
Returns:
|
|
1204
|
-
List of improvement suggestions
|
|
1205
|
-
"""
|
|
1206
|
-
suggestions = []
|
|
1207
|
-
|
|
1208
|
-
weak_areas = analysis.get("weak_areas", [])
|
|
1209
|
-
improvement_potential = analysis.get("improvement_potential", {})
|
|
1210
|
-
|
|
1211
|
-
for area in weak_areas:
|
|
1212
|
-
potential = improvement_potential.get(area, 0.0)
|
|
1213
|
-
if potential > 0.1:
|
|
1214
|
-
suggestions.append(
|
|
1215
|
-
f"Focus on improving {area} (potential: {potential:.2f})"
|
|
1216
|
-
)
|
|
1217
|
-
|
|
1218
|
-
if not suggestions:
|
|
1219
|
-
suggestions.append("Code quality is good, maintain current patterns")
|
|
1220
|
-
|
|
1221
|
-
return suggestions
|
|
1222
|
-
|
|
1223
|
-
|
|
1224
|
-
class AgentLearner:
|
|
1225
|
-
"""
|
|
1226
|
-
Core learning engine for agents.
|
|
1227
|
-
|
|
1228
|
-
Integrates pattern extraction, prompt optimization, and feedback analysis.
|
|
1229
|
-
"""
|
|
1230
|
-
|
|
1231
|
-
def __init__(
|
|
1232
|
-
self,
|
|
1233
|
-
capability_registry: CapabilityRegistry,
|
|
1234
|
-
expert_registry: Any, # ExpertRegistry (required)
|
|
1235
|
-
memory_system: TaskMemorySystem | None = None,
|
|
1236
|
-
hardware_profile: HardwareProfile | None = None,
|
|
1237
|
-
):
|
|
1238
|
-
"""
|
|
1239
|
-
Initialize agent learner.
|
|
1240
|
-
|
|
1241
|
-
Args:
|
|
1242
|
-
capability_registry: Capability registry
|
|
1243
|
-
expert_registry: Expert registry for best practices consultation (required)
|
|
1244
|
-
memory_system: Optional task memory system
|
|
1245
|
-
hardware_profile: Hardware profile (auto-detected if None)
|
|
1246
|
-
"""
|
|
1247
|
-
if hardware_profile is None:
|
|
1248
|
-
profiler = HardwareProfiler()
|
|
1249
|
-
hardware_profile = profiler.detect_profile()
|
|
1250
|
-
|
|
1251
|
-
self.capability_registry = capability_registry
|
|
1252
|
-
self.memory_system = memory_system
|
|
1253
|
-
self.hardware_profile = hardware_profile
|
|
1254
|
-
self.learning_intensity = self._get_learning_intensity()
|
|
1255
|
-
|
|
1256
|
-
# Initialize security scanner
|
|
1257
|
-
security_scanner = SecurityScanner()
|
|
1258
|
-
|
|
1259
|
-
self.pattern_extractor = PatternExtractor(security_scanner=security_scanner)
|
|
1260
|
-
self.anti_pattern_extractor = AntiPatternExtractor(
|
|
1261
|
-
security_scanner=security_scanner
|
|
1262
|
-
)
|
|
1263
|
-
self.negative_feedback_handler = NegativeFeedbackHandler(
|
|
1264
|
-
anti_pattern_extractor=self.anti_pattern_extractor
|
|
1265
|
-
)
|
|
1266
|
-
self.failure_mode_analyzer = FailureModeAnalyzer()
|
|
1267
|
-
self.prompt_optimizer = PromptOptimizer(hardware_profile)
|
|
1268
|
-
self.feedback_analyzer = FeedbackAnalyzer()
|
|
1269
|
-
|
|
1270
|
-
# Initialize explainability components
|
|
1271
|
-
self.decision_logger = DecisionReasoningLogger()
|
|
1272
|
-
self.pattern_explainer = PatternSelectionExplainer()
|
|
1273
|
-
self.impact_reporter = LearningImpactReporter()
|
|
1274
|
-
|
|
1275
|
-
# Initialize meta-learning components
|
|
1276
|
-
self.effectiveness_tracker = LearningEffectivenessTracker()
|
|
1277
|
-
self.self_assessor = LearningSelfAssessor()
|
|
1278
|
-
self.adaptive_rate = AdaptiveLearningRate()
|
|
1279
|
-
self.strategy_selector = LearningStrategySelector()
|
|
1280
|
-
self.current_strategy = LearningStrategy.BALANCED
|
|
1281
|
-
|
|
1282
|
-
# Initialize decision engine (always required) with explainability
|
|
1283
|
-
best_practice_consultant = BestPracticeConsultant(expert_registry)
|
|
1284
|
-
confidence_calculator = LearningConfidenceCalculator()
|
|
1285
|
-
self.decision_engine = LearningDecisionEngine(
|
|
1286
|
-
capability_registry=self.capability_registry,
|
|
1287
|
-
best_practice_consultant=best_practice_consultant,
|
|
1288
|
-
confidence_calculator=confidence_calculator,
|
|
1289
|
-
decision_logger=self.decision_logger,
|
|
1290
|
-
)
|
|
1291
|
-
|
|
1292
|
-
# Initialize dashboard
|
|
1293
|
-
self.dashboard = LearningDashboard(
|
|
1294
|
-
capability_registry=self.capability_registry,
|
|
1295
|
-
pattern_extractor=self.pattern_extractor,
|
|
1296
|
-
anti_pattern_extractor=self.anti_pattern_extractor,
|
|
1297
|
-
decision_logger=self.decision_logger,
|
|
1298
|
-
impact_reporter=self.impact_reporter,
|
|
1299
|
-
)
|
|
1300
|
-
|
|
1301
|
-
def _get_learning_intensity(self) -> LearningIntensity:
|
|
1302
|
-
"""Get learning intensity based on hardware."""
|
|
1303
|
-
return self.capability_registry.get_learning_intensity()
|
|
1304
|
-
|
|
1305
|
-
async def learn_from_task(
|
|
1306
|
-
self,
|
|
1307
|
-
capability_id: str,
|
|
1308
|
-
task_id: str,
|
|
1309
|
-
code: str | None = None,
|
|
1310
|
-
quality_scores: dict[str, float] | None = None,
|
|
1311
|
-
success: bool = True,
|
|
1312
|
-
duration: float = 0.0,
|
|
1313
|
-
) -> dict[str, Any]:
|
|
1314
|
-
"""
|
|
1315
|
-
Learn from a completed task.
|
|
1316
|
-
|
|
1317
|
-
Args:
|
|
1318
|
-
capability_id: Capability identifier
|
|
1319
|
-
task_id: Task identifier
|
|
1320
|
-
code: Optional source code
|
|
1321
|
-
quality_scores: Optional code scoring results
|
|
1322
|
-
success: Whether task succeeded
|
|
1323
|
-
duration: Task duration
|
|
1324
|
-
|
|
1325
|
-
Returns:
|
|
1326
|
-
Learning results
|
|
1327
|
-
"""
|
|
1328
|
-
results: dict[str, Any] = {
|
|
1329
|
-
"patterns_extracted": 0,
|
|
1330
|
-
"anti_patterns_extracted": 0,
|
|
1331
|
-
"prompt_optimized": False,
|
|
1332
|
-
"feedback_analyzed": False,
|
|
1333
|
-
"security_checked": False,
|
|
1334
|
-
"security_score": 0.0,
|
|
1335
|
-
"security_vulnerabilities": [],
|
|
1336
|
-
"failure_analyzed": False,
|
|
1337
|
-
}
|
|
1338
|
-
patterns: list[CodePattern] = []
|
|
1339
|
-
anti_patterns: list[CodePattern] = []
|
|
1340
|
-
|
|
1341
|
-
# Get before metrics for tracking
|
|
1342
|
-
metric = self.capability_registry.get_capability(capability_id)
|
|
1343
|
-
before_metrics = {
|
|
1344
|
-
"quality_score": metric.quality_score if metric else 0.0,
|
|
1345
|
-
"success_rate": metric.success_rate if metric else 0.0,
|
|
1346
|
-
"usage_count": metric.usage_count if metric else 0,
|
|
1347
|
-
}
|
|
1348
|
-
|
|
1349
|
-
# Extract quality score
|
|
1350
|
-
quality_score = 0.5
|
|
1351
|
-
if quality_scores:
|
|
1352
|
-
quality_score = (
|
|
1353
|
-
quality_scores.get("overall_score", 50.0) / 100.0
|
|
1354
|
-
) # Normalize to 0-1 (assumes 0-100 scale)
|
|
1355
|
-
|
|
1356
|
-
# Security check before learning
|
|
1357
|
-
security_scanner = SecurityScanner()
|
|
1358
|
-
security_result = None
|
|
1359
|
-
if code:
|
|
1360
|
-
security_result = security_scanner.scan_code(code=code)
|
|
1361
|
-
results["security_checked"] = True
|
|
1362
|
-
results["security_score"] = security_result["security_score"]
|
|
1363
|
-
results["security_vulnerabilities"] = security_result["vulnerabilities"]
|
|
1364
|
-
|
|
1365
|
-
# Skip pattern extraction if security score is too low
|
|
1366
|
-
# But continue to extract anti-patterns from vulnerable code
|
|
1367
|
-
if not security_scanner.is_safe_for_learning(
|
|
1368
|
-
code=code, threshold=self.pattern_extractor.security_threshold
|
|
1369
|
-
):
|
|
1370
|
-
logger.debug(
|
|
1371
|
-
f"Skipping pattern extraction for task {task_id}: "
|
|
1372
|
-
f"security score {security_result['security_score']:.2f} "
|
|
1373
|
-
f"below threshold {self.pattern_extractor.security_threshold:.2f}"
|
|
1374
|
-
)
|
|
1375
|
-
# Still update capability metrics even if we skip pattern extraction
|
|
1376
|
-
self.capability_registry.update_capability_metrics(
|
|
1377
|
-
capability_id=capability_id,
|
|
1378
|
-
success=success,
|
|
1379
|
-
duration=duration,
|
|
1380
|
-
quality_score=quality_score,
|
|
1381
|
-
)
|
|
1382
|
-
# Don't return early - continue to extract anti-patterns from vulnerable code
|
|
1383
|
-
# The anti-pattern extraction code below will handle low-quality/vulnerable code
|
|
1384
|
-
|
|
1385
|
-
# Update capability metrics
|
|
1386
|
-
self.capability_registry.update_capability_metrics(
|
|
1387
|
-
capability_id=capability_id,
|
|
1388
|
-
success=success,
|
|
1389
|
-
duration=duration,
|
|
1390
|
-
quality_score=quality_score,
|
|
1391
|
-
)
|
|
1392
|
-
|
|
1393
|
-
# Extract patterns if code provided and quality is good
|
|
1394
|
-
# Use decision engine for adaptive threshold, but ensure high-quality code is always extracted
|
|
1395
|
-
should_extract_patterns = False
|
|
1396
|
-
if code and self.learning_intensity != LearningIntensity.LOW:
|
|
1397
|
-
# Base threshold: extract if quality >= 0.7 (normalized 0-1 scale)
|
|
1398
|
-
base_threshold = 0.7
|
|
1399
|
-
|
|
1400
|
-
# Use decision engine to get adaptive threshold (if available)
|
|
1401
|
-
metric = self.capability_registry.get_capability(capability_id)
|
|
1402
|
-
learned_data = {
|
|
1403
|
-
"usage_count": metric.usage_count if metric else 0,
|
|
1404
|
-
"success_rate": metric.success_rate if metric else 0.0,
|
|
1405
|
-
"quality_score": quality_score,
|
|
1406
|
-
"value": quality_score, # The threshold value we're considering
|
|
1407
|
-
"context_relevance": 1.0,
|
|
1408
|
-
}
|
|
1409
|
-
context = {
|
|
1410
|
-
"hardware_profile": (
|
|
1411
|
-
self.hardware_profile.profile_type.value
|
|
1412
|
-
if hasattr(self.hardware_profile, "profile_type")
|
|
1413
|
-
else "unknown"
|
|
1414
|
-
),
|
|
1415
|
-
"learning_intensity": self.learning_intensity.value,
|
|
1416
|
-
"task_id": task_id,
|
|
1417
|
-
"capability_id": capability_id,
|
|
1418
|
-
}
|
|
1419
|
-
|
|
1420
|
-
# Use decision engine (always available)
|
|
1421
|
-
decision = await self.decision_engine.make_decision(
|
|
1422
|
-
decision_type="pattern_extraction_threshold",
|
|
1423
|
-
learned_data=learned_data,
|
|
1424
|
-
context=context,
|
|
1425
|
-
default_value=base_threshold,
|
|
1426
|
-
)
|
|
1427
|
-
|
|
1428
|
-
# Determine threshold value from decision result
|
|
1429
|
-
threshold_value = base_threshold # Default
|
|
1430
|
-
if decision.result.value is not None:
|
|
1431
|
-
if isinstance(decision.result.value, (int, float)):
|
|
1432
|
-
threshold_value = float(decision.result.value)
|
|
1433
|
-
elif isinstance(decision.result.value, str):
|
|
1434
|
-
# Try to extract numeric value from string
|
|
1435
|
-
import re
|
|
1436
|
-
match = re.search(r'(\d+\.?\d*)', str(decision.result.value))
|
|
1437
|
-
if match:
|
|
1438
|
-
threshold_value = float(match.group(1))
|
|
1439
|
-
|
|
1440
|
-
# Extract patterns if quality_score meets threshold
|
|
1441
|
-
# For new capabilities (low learned_confidence), still extract if quality is high
|
|
1442
|
-
should_extract_patterns = quality_score >= threshold_value
|
|
1443
|
-
|
|
1444
|
-
# Fallback: if quality is very high (>= 0.8), always extract regardless of decision
|
|
1445
|
-
if quality_score >= 0.8:
|
|
1446
|
-
should_extract_patterns = True
|
|
1447
|
-
|
|
1448
|
-
logger.debug(
|
|
1449
|
-
f"Pattern extraction decision: quality_score={quality_score:.3f}, "
|
|
1450
|
-
f"threshold={threshold_value:.3f}, should_extract={should_extract_patterns}"
|
|
1451
|
-
)
|
|
1452
|
-
|
|
1453
|
-
if should_extract_patterns and code is not None:
|
|
1454
|
-
logger.debug(f"Attempting to extract patterns from code (length={len(code)})")
|
|
1455
|
-
patterns = self.pattern_extractor.extract_patterns(
|
|
1456
|
-
code=code, quality_score=quality_score, task_id=task_id
|
|
1457
|
-
)
|
|
1458
|
-
logger.debug(f"Extracted {len(patterns)} patterns from code")
|
|
1459
|
-
|
|
1460
|
-
# Store patterns
|
|
1461
|
-
for pattern in patterns:
|
|
1462
|
-
if pattern.pattern_id not in self.pattern_extractor.patterns:
|
|
1463
|
-
self.pattern_extractor.patterns[pattern.pattern_id] = pattern
|
|
1464
|
-
else:
|
|
1465
|
-
# Update existing pattern
|
|
1466
|
-
existing = self.pattern_extractor.patterns[pattern.pattern_id]
|
|
1467
|
-
existing.usage_count += 1
|
|
1468
|
-
existing.learned_from.append(task_id)
|
|
1469
|
-
|
|
1470
|
-
results["patterns_extracted"] = len(patterns)
|
|
1471
|
-
|
|
1472
|
-
# Handle failures: extract anti-patterns
|
|
1473
|
-
if not success and code:
|
|
1474
|
-
failure_reasons = [f"Task {task_id} failed"]
|
|
1475
|
-
if quality_scores:
|
|
1476
|
-
# Add quality-based failure reasons
|
|
1477
|
-
if quality_scores.get("security_score", 10.0) < 5.0:
|
|
1478
|
-
failure_reasons.append("Low security score")
|
|
1479
|
-
if quality_scores.get("overall_score", 100.0) < 50.0:
|
|
1480
|
-
failure_reasons.append("Low overall quality score")
|
|
1481
|
-
|
|
1482
|
-
# Extract anti-patterns from failed code
|
|
1483
|
-
anti_patterns = self.anti_pattern_extractor.extract_from_failure(
|
|
1484
|
-
code=code,
|
|
1485
|
-
task_id=task_id,
|
|
1486
|
-
failure_reasons=failure_reasons,
|
|
1487
|
-
quality_score=quality_score,
|
|
1488
|
-
)
|
|
1489
|
-
|
|
1490
|
-
# Store anti-patterns
|
|
1491
|
-
for anti_pattern in anti_patterns:
|
|
1492
|
-
if anti_pattern.pattern_id not in self.anti_pattern_extractor.anti_patterns:
|
|
1493
|
-
self.anti_pattern_extractor.anti_patterns[
|
|
1494
|
-
anti_pattern.pattern_id
|
|
1495
|
-
] = anti_pattern
|
|
1496
|
-
else:
|
|
1497
|
-
# Update existing anti-pattern
|
|
1498
|
-
existing = self.anti_pattern_extractor.anti_patterns[
|
|
1499
|
-
anti_pattern.pattern_id
|
|
1500
|
-
]
|
|
1501
|
-
existing.usage_count += 1
|
|
1502
|
-
existing.learned_from.append(task_id)
|
|
1503
|
-
|
|
1504
|
-
results["anti_patterns_extracted"] = len(anti_patterns)
|
|
1505
|
-
|
|
1506
|
-
# Analyze failure mode
|
|
1507
|
-
failure_analysis = self.failure_mode_analyzer.analyze_failure(
|
|
1508
|
-
code=code,
|
|
1509
|
-
task_id=task_id,
|
|
1510
|
-
failure_reasons=failure_reasons,
|
|
1511
|
-
quality_scores=quality_scores,
|
|
1512
|
-
)
|
|
1513
|
-
results["failure_analyzed"] = True
|
|
1514
|
-
results["failure_analysis"] = failure_analysis
|
|
1515
|
-
|
|
1516
|
-
# Also extract anti-patterns from low-quality code (even if success=True)
|
|
1517
|
-
logger.debug(f"Anti-pattern extraction check: code={code is not None}, quality_score={quality_score}, threshold={self.anti_pattern_extractor.max_quality_threshold}, success={success}")
|
|
1518
|
-
if code and quality_score < self.anti_pattern_extractor.max_quality_threshold:
|
|
1519
|
-
failure_reasons = [f"Low quality score: {quality_score:.2f}"]
|
|
1520
|
-
anti_patterns = self.anti_pattern_extractor.extract_anti_patterns(
|
|
1521
|
-
code=code,
|
|
1522
|
-
quality_score=quality_score,
|
|
1523
|
-
task_id=task_id,
|
|
1524
|
-
failure_reasons=failure_reasons,
|
|
1525
|
-
)
|
|
1526
|
-
|
|
1527
|
-
# Store anti-patterns
|
|
1528
|
-
for anti_pattern in anti_patterns:
|
|
1529
|
-
if anti_pattern.pattern_id not in self.anti_pattern_extractor.anti_patterns:
|
|
1530
|
-
self.anti_pattern_extractor.anti_patterns[
|
|
1531
|
-
anti_pattern.pattern_id
|
|
1532
|
-
] = anti_pattern
|
|
1533
|
-
|
|
1534
|
-
results["anti_patterns_extracted"] = len(anti_patterns)
|
|
1535
|
-
|
|
1536
|
-
# Analyze feedback if scores provided
|
|
1537
|
-
if quality_scores:
|
|
1538
|
-
analysis = self.feedback_analyzer.analyze_code_scores(quality_scores)
|
|
1539
|
-
results["feedback_analyzed"] = True
|
|
1540
|
-
results["feedback_analysis"] = analysis
|
|
1541
|
-
|
|
1542
|
-
# Check if improvement is needed
|
|
1543
|
-
if not analysis.get("meets_threshold", False):
|
|
1544
|
-
suggestions = self.feedback_analyzer.get_improvement_suggestions(
|
|
1545
|
-
analysis
|
|
1546
|
-
)
|
|
1547
|
-
results["improvement_suggestions"] = suggestions
|
|
1548
|
-
|
|
1549
|
-
# Store in memory system if available
|
|
1550
|
-
if self.memory_system and success:
|
|
1551
|
-
outcome = TaskOutcome.SUCCESS if success else TaskOutcome.FAILURE
|
|
1552
|
-
self.memory_system.store_memory(
|
|
1553
|
-
task_id=task_id,
|
|
1554
|
-
agent_id="unknown", # Would be provided by agent
|
|
1555
|
-
command=capability_id,
|
|
1556
|
-
outcome=outcome,
|
|
1557
|
-
quality_score=quality_score,
|
|
1558
|
-
patterns_used=[p.pattern_id for p in patterns] if code else [],
|
|
1559
|
-
)
|
|
1560
|
-
|
|
1561
|
-
# Track learning effectiveness
|
|
1562
|
-
metric_after = self.capability_registry.get_capability(capability_id)
|
|
1563
|
-
after_metrics = {
|
|
1564
|
-
"quality_score": metric_after.quality_score if metric_after else 0.0,
|
|
1565
|
-
"success_rate": metric_after.success_rate if metric_after else 0.0,
|
|
1566
|
-
"usage_count": metric_after.usage_count if metric_after else 0,
|
|
1567
|
-
}
|
|
1568
|
-
|
|
1569
|
-
# Track effectiveness
|
|
1570
|
-
session = self.effectiveness_tracker.track_effectiveness(
|
|
1571
|
-
capability_id=capability_id,
|
|
1572
|
-
before_metrics=before_metrics,
|
|
1573
|
-
after_metrics=after_metrics,
|
|
1574
|
-
strategies_used=[self.current_strategy.value],
|
|
1575
|
-
)
|
|
1576
|
-
|
|
1577
|
-
# Adjust learning rate based on effectiveness
|
|
1578
|
-
rate_adjustment = self.adaptive_rate.adjust_learning_intensity(
|
|
1579
|
-
session.effectiveness_score
|
|
1580
|
-
)
|
|
1581
|
-
results["learning_rate_adjustment"] = rate_adjustment
|
|
1582
|
-
|
|
1583
|
-
# Generate impact report
|
|
1584
|
-
impact_report = self.impact_reporter.generate_impact_report(
|
|
1585
|
-
capability_id=capability_id,
|
|
1586
|
-
before_metrics=before_metrics,
|
|
1587
|
-
after_metrics=after_metrics,
|
|
1588
|
-
learning_session_id=session.session_id,
|
|
1589
|
-
)
|
|
1590
|
-
results["learning_impact"] = impact_report
|
|
1591
|
-
|
|
1592
|
-
return results
|
|
1593
|
-
|
|
1594
|
-
async def learn_from_rejection(
|
|
1595
|
-
self,
|
|
1596
|
-
capability_id: str,
|
|
1597
|
-
task_id: str,
|
|
1598
|
-
code: str,
|
|
1599
|
-
rejection_reason: str,
|
|
1600
|
-
quality_score: float = 0.5,
|
|
1601
|
-
) -> dict[str, Any]:
|
|
1602
|
-
"""
|
|
1603
|
-
Learn from user rejection.
|
|
1604
|
-
|
|
1605
|
-
Args:
|
|
1606
|
-
capability_id: Capability identifier
|
|
1607
|
-
task_id: Task identifier
|
|
1608
|
-
code: Rejected code
|
|
1609
|
-
rejection_reason: Reason for rejection
|
|
1610
|
-
quality_score: Quality score
|
|
1611
|
-
|
|
1612
|
-
Returns:
|
|
1613
|
-
Learning results
|
|
1614
|
-
"""
|
|
1615
|
-
results: dict[str, Any] = {
|
|
1616
|
-
"anti_patterns_extracted": 0,
|
|
1617
|
-
"rejection_recorded": False,
|
|
1618
|
-
}
|
|
1619
|
-
|
|
1620
|
-
# Record rejection and extract anti-patterns
|
|
1621
|
-
anti_patterns = self.negative_feedback_handler.record_rejection(
|
|
1622
|
-
code=code,
|
|
1623
|
-
task_id=task_id,
|
|
1624
|
-
reason=rejection_reason,
|
|
1625
|
-
quality_score=quality_score,
|
|
1626
|
-
)
|
|
1627
|
-
|
|
1628
|
-
results["anti_patterns_extracted"] = len(anti_patterns)
|
|
1629
|
-
results["rejection_recorded"] = True
|
|
1630
|
-
|
|
1631
|
-
# Update capability metrics (rejection counts as failure)
|
|
1632
|
-
self.capability_registry.update_capability_metrics(
|
|
1633
|
-
capability_id=capability_id,
|
|
1634
|
-
success=False,
|
|
1635
|
-
duration=0.0,
|
|
1636
|
-
quality_score=quality_score,
|
|
1637
|
-
)
|
|
1638
|
-
|
|
1639
|
-
return results
|
|
1640
|
-
|
|
1641
|
-
def get_learned_patterns(
|
|
1642
|
-
self,
|
|
1643
|
-
context: str,
|
|
1644
|
-
pattern_type: str | None = None,
|
|
1645
|
-
limit: int = 5,
|
|
1646
|
-
exclude_anti_patterns: bool = True,
|
|
1647
|
-
) -> list[CodePattern]:
|
|
1648
|
-
"""
|
|
1649
|
-
Get learned patterns for a context.
|
|
1650
|
-
|
|
1651
|
-
Args:
|
|
1652
|
-
context: Context string
|
|
1653
|
-
pattern_type: Optional pattern type filter
|
|
1654
|
-
limit: Maximum results
|
|
1655
|
-
exclude_anti_patterns: If True, exclude anti-patterns (default: True)
|
|
1656
|
-
|
|
1657
|
-
Returns:
|
|
1658
|
-
List of relevant patterns
|
|
1659
|
-
"""
|
|
1660
|
-
return self.pattern_extractor.get_patterns_for_context(
|
|
1661
|
-
context=context,
|
|
1662
|
-
pattern_type=pattern_type,
|
|
1663
|
-
limit=limit,
|
|
1664
|
-
exclude_anti_patterns=exclude_anti_patterns,
|
|
1665
|
-
)
|
|
1666
|
-
|
|
1667
|
-
def optimize_prompt(self, base_prompt: str, context: str | None = None) -> str:
|
|
1668
|
-
"""
|
|
1669
|
-
Get optimized prompt.
|
|
1670
|
-
|
|
1671
|
-
Args:
|
|
1672
|
-
base_prompt: Base prompt
|
|
1673
|
-
context: Optional context
|
|
1674
|
-
|
|
1675
|
-
Returns:
|
|
1676
|
-
Optimized prompt
|
|
1677
|
-
"""
|
|
1678
|
-
# Get best variant if available
|
|
1679
|
-
best_variant = self.prompt_optimizer.get_best_variant()
|
|
1680
|
-
if best_variant and best_variant.average_quality > 0.7:
|
|
1681
|
-
return best_variant.prompt_template
|
|
1682
|
-
|
|
1683
|
-
# Otherwise, optimize for hardware
|
|
1684
|
-
return self.prompt_optimizer.optimize_for_hardware(base_prompt)
|
|
1685
|
-
|
|
1686
|
-
def explain_learning(
|
|
1687
|
-
self,
|
|
1688
|
-
capability_id: str,
|
|
1689
|
-
task_id: str | None = None,
|
|
1690
|
-
decision_id: str | None = None,
|
|
1691
|
-
) -> dict[str, Any]:
|
|
1692
|
-
"""
|
|
1693
|
-
Generate explanation for learning process.
|
|
1694
|
-
|
|
1695
|
-
Args:
|
|
1696
|
-
capability_id: Capability identifier
|
|
1697
|
-
task_id: Optional task identifier
|
|
1698
|
-
decision_id: Optional decision identifier
|
|
1699
|
-
|
|
1700
|
-
Returns:
|
|
1701
|
-
Explanation dictionary
|
|
1702
|
-
"""
|
|
1703
|
-
explanation: dict[str, Any] = {
|
|
1704
|
-
"capability_id": capability_id,
|
|
1705
|
-
"task_id": task_id,
|
|
1706
|
-
}
|
|
1707
|
-
|
|
1708
|
-
# Get decision explanation if decision_id provided
|
|
1709
|
-
if decision_id:
|
|
1710
|
-
decision_explanation = self.decision_logger.explain_decision(decision_id)
|
|
1711
|
-
if decision_explanation:
|
|
1712
|
-
explanation["decision"] = decision_explanation
|
|
1713
|
-
|
|
1714
|
-
# Get pattern selection explanation
|
|
1715
|
-
patterns = self.get_learned_patterns(
|
|
1716
|
-
context=capability_id, exclude_anti_patterns=True
|
|
1717
|
-
)
|
|
1718
|
-
if patterns:
|
|
1719
|
-
pattern_explanation = self.pattern_explainer.explain_pattern_selection(
|
|
1720
|
-
selected_patterns=patterns, context=capability_id
|
|
1721
|
-
)
|
|
1722
|
-
explanation["pattern_selection"] = pattern_explanation
|
|
1723
|
-
|
|
1724
|
-
# Get decision statistics
|
|
1725
|
-
explanation["decision_statistics"] = self.decision_logger.get_decision_statistics()
|
|
1726
|
-
|
|
1727
|
-
return explanation
|
|
1728
|
-
|
|
1729
|
-
async def optimize_learning(
|
|
1730
|
-
self, capability_id: str | None = None
|
|
1731
|
-
) -> dict[str, Any]:
|
|
1732
|
-
"""
|
|
1733
|
-
Run meta-learning optimization.
|
|
1734
|
-
|
|
1735
|
-
Args:
|
|
1736
|
-
capability_id: Optional filter by capability
|
|
1737
|
-
|
|
1738
|
-
Returns:
|
|
1739
|
-
Optimization report
|
|
1740
|
-
"""
|
|
1741
|
-
optimization: dict[str, Any] = {
|
|
1742
|
-
"timestamp": datetime.now(UTC).isoformat(),
|
|
1743
|
-
"capability_id": capability_id,
|
|
1744
|
-
}
|
|
1745
|
-
|
|
1746
|
-
# Assess learning quality
|
|
1747
|
-
pattern_count = len(self.pattern_extractor.patterns)
|
|
1748
|
-
anti_pattern_count = len(self.anti_pattern_extractor.anti_patterns)
|
|
1749
|
-
|
|
1750
|
-
# Calculate average quality and security
|
|
1751
|
-
avg_quality = 0.0
|
|
1752
|
-
avg_security = 0.0
|
|
1753
|
-
if self.pattern_extractor.patterns:
|
|
1754
|
-
total_quality = sum(p.quality_score for p in self.pattern_extractor.patterns.values())
|
|
1755
|
-
total_security = sum(
|
|
1756
|
-
getattr(p, "security_score", 0.0)
|
|
1757
|
-
for p in self.pattern_extractor.patterns.values()
|
|
1758
|
-
)
|
|
1759
|
-
avg_quality = total_quality / len(self.pattern_extractor.patterns)
|
|
1760
|
-
avg_security = total_security / len(self.pattern_extractor.patterns)
|
|
1761
|
-
|
|
1762
|
-
quality_assessment = self.self_assessor.assess_learning_quality(
|
|
1763
|
-
pattern_count=pattern_count,
|
|
1764
|
-
anti_pattern_count=anti_pattern_count,
|
|
1765
|
-
average_quality=avg_quality,
|
|
1766
|
-
average_security=avg_security,
|
|
1767
|
-
)
|
|
1768
|
-
optimization["quality_assessment"] = quality_assessment
|
|
1769
|
-
|
|
1770
|
-
# Identify learning gaps
|
|
1771
|
-
capability_metrics = {}
|
|
1772
|
-
if capability_id:
|
|
1773
|
-
metric = self.capability_registry.get_capability(capability_id)
|
|
1774
|
-
if metric:
|
|
1775
|
-
capability_metrics = {
|
|
1776
|
-
"success_rate": metric.success_rate,
|
|
1777
|
-
"quality_score": metric.quality_score,
|
|
1778
|
-
"usage_count": metric.usage_count,
|
|
1779
|
-
}
|
|
1780
|
-
pattern_stats = {
|
|
1781
|
-
"total_patterns": pattern_count,
|
|
1782
|
-
"average_quality": avg_quality,
|
|
1783
|
-
"average_security": avg_security,
|
|
1784
|
-
}
|
|
1785
|
-
gaps = self.self_assessor.identify_learning_gaps(
|
|
1786
|
-
capability_metrics=capability_metrics,
|
|
1787
|
-
pattern_statistics=pattern_stats,
|
|
1788
|
-
)
|
|
1789
|
-
optimization["learning_gaps"] = gaps
|
|
1790
|
-
|
|
1791
|
-
# Get improvement suggestions
|
|
1792
|
-
suggestions = self.self_assessor.suggest_improvements(quality_assessment)
|
|
1793
|
-
optimization["improvement_suggestions"] = suggestions
|
|
1794
|
-
|
|
1795
|
-
# Select optimal strategy
|
|
1796
|
-
if capability_id:
|
|
1797
|
-
metric = self.capability_registry.get_capability(capability_id)
|
|
1798
|
-
current_effectiveness = metric.success_rate if metric else 0.5
|
|
1799
|
-
else:
|
|
1800
|
-
# Use average effectiveness
|
|
1801
|
-
roi = self.effectiveness_tracker.get_learning_roi(capability_id=capability_id)
|
|
1802
|
-
current_effectiveness = roi.get("average_effectiveness", 0.5)
|
|
1803
|
-
|
|
1804
|
-
hardware_profile_str = (
|
|
1805
|
-
self.hardware_profile.profile_type.value
|
|
1806
|
-
if hasattr(self.hardware_profile, "profile_type")
|
|
1807
|
-
else None
|
|
1808
|
-
)
|
|
1809
|
-
optimal_strategy = self.strategy_selector.select_strategy(
|
|
1810
|
-
capability_id=capability_id or "global",
|
|
1811
|
-
current_effectiveness=current_effectiveness,
|
|
1812
|
-
hardware_profile=hardware_profile_str,
|
|
1813
|
-
)
|
|
1814
|
-
optimization["optimal_strategy"] = optimal_strategy.value
|
|
1815
|
-
optimization["current_strategy"] = self.current_strategy.value
|
|
1816
|
-
|
|
1817
|
-
# Optimize thresholds
|
|
1818
|
-
current_threshold = self.pattern_extractor.min_quality_threshold
|
|
1819
|
-
metric = self.capability_registry.get_capability(capability_id) if capability_id else None
|
|
1820
|
-
success_rate = metric.success_rate if metric else 0.5
|
|
1821
|
-
optimized_threshold = self.adaptive_rate.optimize_thresholds(
|
|
1822
|
-
current_threshold=current_threshold,
|
|
1823
|
-
success_rate=success_rate,
|
|
1824
|
-
quality_score=avg_quality,
|
|
1825
|
-
)
|
|
1826
|
-
optimization["optimized_threshold"] = optimized_threshold
|
|
1827
|
-
optimization["current_threshold"] = current_threshold
|
|
1828
|
-
|
|
1829
|
-
# Update strategy if better one found
|
|
1830
|
-
if optimal_strategy != self.current_strategy:
|
|
1831
|
-
switch_result = self.strategy_selector.switch_strategy(
|
|
1832
|
-
current_strategy=self.current_strategy,
|
|
1833
|
-
new_strategy=optimal_strategy,
|
|
1834
|
-
)
|
|
1835
|
-
if switch_result["switched"]:
|
|
1836
|
-
self.current_strategy = optimal_strategy
|
|
1837
|
-
optimization["strategy_switched"] = True
|
|
1838
|
-
optimization["switch_result"] = switch_result
|
|
1839
|
-
else:
|
|
1840
|
-
optimization["strategy_switched"] = False
|
|
1841
|
-
|
|
1842
|
-
# Get effectiveness metrics
|
|
1843
|
-
effectiveness_metrics = self.effectiveness_tracker.get_learning_roi(
|
|
1844
|
-
capability_id=capability_id
|
|
1845
|
-
)
|
|
1846
|
-
optimization["effectiveness_metrics"] = effectiveness_metrics
|
|
1847
|
-
|
|
1848
|
-
return optimization
|
|
1849
|
-
|
|
1850
|
-
def should_refine_capability(self, capability_id: str) -> bool:
|
|
1851
|
-
"""
|
|
1852
|
-
Determine if a capability should be refined.
|
|
1853
|
-
|
|
1854
|
-
Args:
|
|
1855
|
-
capability_id: Capability identifier
|
|
1856
|
-
|
|
1857
|
-
Returns:
|
|
1858
|
-
True if refinement is recommended
|
|
1859
|
-
"""
|
|
1860
|
-
metric = self.capability_registry.get_capability(capability_id)
|
|
1861
|
-
if not metric:
|
|
1862
|
-
return False
|
|
1863
|
-
|
|
1864
|
-
# Refine if quality is below threshold and has enough usage
|
|
1865
|
-
return metric.quality_score < 0.7 and metric.usage_count >= 10
|
|
1
|
+
"""
|
|
2
|
+
Agent Learning System
|
|
3
|
+
|
|
4
|
+
Enables agents to learn from past tasks and improve over time.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
# @ai-prime-directive: This file implements the Agent Learning System for pattern extraction, prompt optimization,
|
|
8
|
+
# and feedback analysis. The system enables agents to learn from past tasks and improve over time through
|
|
9
|
+
# meta-learning, pattern recognition, and adaptive learning strategies.
|
|
10
|
+
|
|
11
|
+
# @ai-constraints:
|
|
12
|
+
# - Learning intensity must adapt to hardware profile (NUC, Desktop, Server)
|
|
13
|
+
# - Pattern extraction must include security scanning before pattern acceptance
|
|
14
|
+
# - Anti-pattern extraction must learn from negative feedback and failures
|
|
15
|
+
# - Prompt optimization must respect hardware constraints and token budgets
|
|
16
|
+
# - Meta-learning must track learning effectiveness and adjust strategies accordingly
|
|
17
|
+
# - Performance: Learning operations should not significantly impact agent response times
|
|
18
|
+
|
|
19
|
+
# @note[2025-01-15]: Agent learning is an advanced feature that improves agent performance over time.
|
|
20
|
+
# The system uses meta-learning to adapt learning strategies based on effectiveness tracking.
|
|
21
|
+
# See docs/architecture/decisions/ for related architectural decisions.
|
|
22
|
+
|
|
23
|
+
import logging
|
|
24
|
+
import re
|
|
25
|
+
from dataclasses import dataclass, field
|
|
26
|
+
from datetime import UTC, datetime
|
|
27
|
+
from typing import Any
|
|
28
|
+
|
|
29
|
+
from .best_practice_consultant import BestPracticeConsultant
|
|
30
|
+
from .capability_registry import CapabilityRegistry, LearningIntensity
|
|
31
|
+
from .hardware_profiler import HardwareProfile, HardwareProfiler
|
|
32
|
+
from .learning_confidence import LearningConfidenceCalculator
|
|
33
|
+
from .learning_dashboard import LearningDashboard
|
|
34
|
+
from .learning_decision import LearningDecisionEngine
|
|
35
|
+
from .learning_explainability import (
|
|
36
|
+
DecisionReasoningLogger,
|
|
37
|
+
LearningImpactReporter,
|
|
38
|
+
PatternSelectionExplainer,
|
|
39
|
+
)
|
|
40
|
+
from .meta_learning import (
|
|
41
|
+
AdaptiveLearningRate,
|
|
42
|
+
LearningEffectivenessTracker,
|
|
43
|
+
LearningSelfAssessor,
|
|
44
|
+
LearningStrategy,
|
|
45
|
+
LearningStrategySelector,
|
|
46
|
+
)
|
|
47
|
+
from .security_scanner import SecurityScanner
|
|
48
|
+
from .task_memory import TaskMemorySystem, TaskOutcome
|
|
49
|
+
|
|
50
|
+
logger = logging.getLogger(__name__)
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
@dataclass
|
|
54
|
+
class CodePattern:
|
|
55
|
+
"""Represents a learned code pattern."""
|
|
56
|
+
|
|
57
|
+
pattern_id: str
|
|
58
|
+
pattern_type: str # "function", "class", "import", "structure"
|
|
59
|
+
code_snippet: str
|
|
60
|
+
context: str
|
|
61
|
+
quality_score: float
|
|
62
|
+
usage_count: int
|
|
63
|
+
success_rate: float
|
|
64
|
+
learned_from: list[str] # Task IDs where this pattern was successful
|
|
65
|
+
metadata: dict[str, Any] = field(default_factory=dict)
|
|
66
|
+
security_score: float = 0.0 # Security score (0-10, higher is better)
|
|
67
|
+
is_anti_pattern: bool = False # True if this is an anti-pattern to avoid
|
|
68
|
+
failure_reasons: list[str] = field(default_factory=list) # Reasons for failure
|
|
69
|
+
rejection_count: int = 0 # Number of times this pattern was rejected
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
@dataclass
|
|
73
|
+
class PromptVariant:
|
|
74
|
+
"""Represents a prompt variation for A/B testing."""
|
|
75
|
+
|
|
76
|
+
variant_id: str
|
|
77
|
+
prompt_template: str
|
|
78
|
+
modifications: list[str] # List of modifications made
|
|
79
|
+
test_count: int
|
|
80
|
+
success_count: int
|
|
81
|
+
average_quality: float
|
|
82
|
+
created_at: datetime
|
|
83
|
+
last_tested: datetime | None = None
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
class PatternExtractor:
|
|
87
|
+
"""Extracts patterns from successful code outputs."""
|
|
88
|
+
|
|
89
|
+
def __init__(
|
|
90
|
+
self,
|
|
91
|
+
min_quality_threshold: float = 0.7,
|
|
92
|
+
security_scanner: SecurityScanner | None = None,
|
|
93
|
+
security_threshold: float = 7.0,
|
|
94
|
+
):
|
|
95
|
+
"""
|
|
96
|
+
Initialize pattern extractor.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
min_quality_threshold: Minimum quality score to extract patterns
|
|
100
|
+
security_scanner: Optional security scanner instance
|
|
101
|
+
security_threshold: Minimum security score to extract patterns (default: 7.0)
|
|
102
|
+
"""
|
|
103
|
+
self.min_quality_threshold = min_quality_threshold
|
|
104
|
+
self.security_scanner = security_scanner or SecurityScanner()
|
|
105
|
+
self.security_threshold = security_threshold
|
|
106
|
+
self.patterns: dict[str, CodePattern] = {}
|
|
107
|
+
|
|
108
|
+
def extract_patterns(
|
|
109
|
+
self,
|
|
110
|
+
code: str,
|
|
111
|
+
quality_score: float,
|
|
112
|
+
task_id: str,
|
|
113
|
+
pattern_types: list[str] | None = None,
|
|
114
|
+
) -> list[CodePattern]:
|
|
115
|
+
"""
|
|
116
|
+
Extract patterns from code.
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
code: Source code
|
|
120
|
+
quality_score: Quality score of the code
|
|
121
|
+
task_id: Task identifier
|
|
122
|
+
pattern_types: Optional list of pattern types to extract
|
|
123
|
+
|
|
124
|
+
Returns:
|
|
125
|
+
List of extracted patterns
|
|
126
|
+
"""
|
|
127
|
+
if quality_score < self.min_quality_threshold:
|
|
128
|
+
return []
|
|
129
|
+
|
|
130
|
+
# Security check before extraction
|
|
131
|
+
security_result = self.security_scanner.scan_code(code=code)
|
|
132
|
+
security_score = security_result["security_score"]
|
|
133
|
+
vulnerabilities = security_result["vulnerabilities"]
|
|
134
|
+
is_safe = security_result.get("is_safe", True)
|
|
135
|
+
|
|
136
|
+
# Only extract if security score meets threshold
|
|
137
|
+
# Check both score >= threshold AND is_safe flag
|
|
138
|
+
if security_score < self.security_threshold or not is_safe:
|
|
139
|
+
logger.debug(
|
|
140
|
+
f"Skipping pattern extraction: security score {security_score:.2f} "
|
|
141
|
+
f"below threshold {self.security_threshold:.2f} or is_safe={is_safe}"
|
|
142
|
+
)
|
|
143
|
+
return []
|
|
144
|
+
|
|
145
|
+
patterns = []
|
|
146
|
+
|
|
147
|
+
# Extract function patterns
|
|
148
|
+
if not pattern_types or "function" in pattern_types:
|
|
149
|
+
func_patterns = self._extract_function_patterns(
|
|
150
|
+
code, quality_score, task_id
|
|
151
|
+
)
|
|
152
|
+
patterns.extend(func_patterns)
|
|
153
|
+
|
|
154
|
+
# Extract class patterns
|
|
155
|
+
if not pattern_types or "class" in pattern_types:
|
|
156
|
+
class_patterns = self._extract_class_patterns(code, quality_score, task_id)
|
|
157
|
+
patterns.extend(class_patterns)
|
|
158
|
+
|
|
159
|
+
# Extract import patterns
|
|
160
|
+
if not pattern_types or "import" in pattern_types:
|
|
161
|
+
import_patterns = self._extract_import_patterns(
|
|
162
|
+
code, quality_score, task_id
|
|
163
|
+
)
|
|
164
|
+
patterns.extend(import_patterns)
|
|
165
|
+
|
|
166
|
+
# Extract structural patterns
|
|
167
|
+
if not pattern_types or "structure" in pattern_types:
|
|
168
|
+
struct_patterns = self._extract_structural_patterns(
|
|
169
|
+
code, quality_score, task_id
|
|
170
|
+
)
|
|
171
|
+
patterns.extend(struct_patterns)
|
|
172
|
+
|
|
173
|
+
return patterns
|
|
174
|
+
|
|
175
|
+
def _extract_function_patterns(
|
|
176
|
+
self, code: str, quality_score: float, task_id: str
|
|
177
|
+
) -> list[CodePattern]:
|
|
178
|
+
"""Extract function patterns."""
|
|
179
|
+
patterns = []
|
|
180
|
+
|
|
181
|
+
# Match function definitions - more flexible pattern that handles docstrings and type hints
|
|
182
|
+
# Find all function definitions, then extract their bodies
|
|
183
|
+
# Pattern matches: def function_name(...) with optional type hints, handling multiline signatures
|
|
184
|
+
func_def_pattern = r"def\s+(\w+)\s*\([^)]*\)\s*(?:->\s*[^:]+)?:"
|
|
185
|
+
func_matches = list(re.finditer(func_def_pattern, code, re.MULTILINE | re.DOTALL))
|
|
186
|
+
|
|
187
|
+
for i, match in enumerate(func_matches):
|
|
188
|
+
func_name = match.group(1)
|
|
189
|
+
start_pos = match.end()
|
|
190
|
+
|
|
191
|
+
# Find the end of this function (next function def or end of code)
|
|
192
|
+
if i + 1 < len(func_matches):
|
|
193
|
+
end_pos = func_matches[i + 1].start()
|
|
194
|
+
else:
|
|
195
|
+
end_pos = len(code)
|
|
196
|
+
|
|
197
|
+
func_body = code[start_pos:end_pos].strip()
|
|
198
|
+
|
|
199
|
+
# Skip if function body is empty
|
|
200
|
+
if not func_body:
|
|
201
|
+
continue
|
|
202
|
+
|
|
203
|
+
# Get security score for this pattern
|
|
204
|
+
pattern_code = f"def {func_name}(...):\n{func_body[:200]}"
|
|
205
|
+
pattern_security = self.security_scanner.scan_code(code=pattern_code)
|
|
206
|
+
pattern_security_score = pattern_security["security_score"]
|
|
207
|
+
pattern_vulnerabilities = pattern_security["vulnerabilities"]
|
|
208
|
+
|
|
209
|
+
pattern = CodePattern(
|
|
210
|
+
pattern_id=f"func_{func_name}_{hash(func_body) % 10000}",
|
|
211
|
+
pattern_type="function",
|
|
212
|
+
code_snippet=pattern_code,
|
|
213
|
+
context=f"Function: {func_name}",
|
|
214
|
+
quality_score=quality_score,
|
|
215
|
+
usage_count=1,
|
|
216
|
+
success_rate=1.0,
|
|
217
|
+
learned_from=[task_id],
|
|
218
|
+
security_score=pattern_security_score,
|
|
219
|
+
metadata={"vulnerabilities": pattern_vulnerabilities},
|
|
220
|
+
)
|
|
221
|
+
patterns.append(pattern)
|
|
222
|
+
|
|
223
|
+
return patterns
|
|
224
|
+
|
|
225
|
+
def _extract_class_patterns(
|
|
226
|
+
self, code: str, quality_score: float, task_id: str
|
|
227
|
+
) -> list[CodePattern]:
|
|
228
|
+
"""Extract class patterns."""
|
|
229
|
+
patterns = []
|
|
230
|
+
|
|
231
|
+
# Match class definitions
|
|
232
|
+
class_pattern = r"class\s+(\w+)(?:\([^)]+\))?:\s*\n((?:\s{4}.*\n?)*)"
|
|
233
|
+
matches = re.finditer(class_pattern, code, re.MULTILINE)
|
|
234
|
+
|
|
235
|
+
for match in matches:
|
|
236
|
+
class_name = match.group(1)
|
|
237
|
+
class_body = match.group(2)
|
|
238
|
+
|
|
239
|
+
# Get security score for this pattern
|
|
240
|
+
pattern_code = f"class {class_name}:\n{class_body[:200]}"
|
|
241
|
+
pattern_security = self.security_scanner.scan_code(code=pattern_code)
|
|
242
|
+
pattern_security_score = pattern_security["security_score"]
|
|
243
|
+
pattern_vulnerabilities = pattern_security["vulnerabilities"]
|
|
244
|
+
|
|
245
|
+
pattern = CodePattern(
|
|
246
|
+
pattern_id=f"class_{class_name}_{hash(class_body) % 10000}",
|
|
247
|
+
pattern_type="class",
|
|
248
|
+
code_snippet=pattern_code,
|
|
249
|
+
context=f"Class: {class_name}",
|
|
250
|
+
quality_score=quality_score,
|
|
251
|
+
usage_count=1,
|
|
252
|
+
success_rate=1.0,
|
|
253
|
+
learned_from=[task_id],
|
|
254
|
+
security_score=pattern_security_score,
|
|
255
|
+
metadata={"vulnerabilities": pattern_vulnerabilities},
|
|
256
|
+
)
|
|
257
|
+
patterns.append(pattern)
|
|
258
|
+
|
|
259
|
+
return patterns
|
|
260
|
+
|
|
261
|
+
def _extract_import_patterns(
|
|
262
|
+
self, code: str, quality_score: float, task_id: str
|
|
263
|
+
) -> list[CodePattern]:
|
|
264
|
+
"""Extract import patterns."""
|
|
265
|
+
patterns = []
|
|
266
|
+
|
|
267
|
+
# Match import statements
|
|
268
|
+
import_pattern = r"^(?:from\s+[\w.]+|import\s+[\w.,\s]+)"
|
|
269
|
+
matches = re.finditer(import_pattern, code, re.MULTILINE)
|
|
270
|
+
|
|
271
|
+
imports = []
|
|
272
|
+
for match in matches:
|
|
273
|
+
imports.append(match.group(0).strip())
|
|
274
|
+
|
|
275
|
+
if imports:
|
|
276
|
+
# Get security score for this pattern
|
|
277
|
+
pattern_code = "\n".join(imports[:10])
|
|
278
|
+
pattern_security = self.security_scanner.scan_code(code=pattern_code)
|
|
279
|
+
pattern_security_score = pattern_security["security_score"]
|
|
280
|
+
pattern_vulnerabilities = pattern_security["vulnerabilities"]
|
|
281
|
+
|
|
282
|
+
pattern = CodePattern(
|
|
283
|
+
pattern_id=f"imports_{hash(''.join(imports)) % 10000}",
|
|
284
|
+
pattern_type="import",
|
|
285
|
+
code_snippet=pattern_code,
|
|
286
|
+
context="Import statements",
|
|
287
|
+
quality_score=quality_score,
|
|
288
|
+
usage_count=1,
|
|
289
|
+
success_rate=1.0,
|
|
290
|
+
learned_from=[task_id],
|
|
291
|
+
security_score=pattern_security_score,
|
|
292
|
+
metadata={"vulnerabilities": pattern_vulnerabilities},
|
|
293
|
+
)
|
|
294
|
+
patterns.append(pattern)
|
|
295
|
+
|
|
296
|
+
return patterns
|
|
297
|
+
|
|
298
|
+
def _extract_structural_patterns(
|
|
299
|
+
self, code: str, quality_score: float, task_id: str
|
|
300
|
+
) -> list[CodePattern]:
|
|
301
|
+
"""Extract structural patterns (decorators, context managers, etc.)."""
|
|
302
|
+
patterns = []
|
|
303
|
+
|
|
304
|
+
# Match decorators
|
|
305
|
+
decorator_pattern = r"@\w+(?:\([^)]*\))?"
|
|
306
|
+
decorators = re.findall(decorator_pattern, code)
|
|
307
|
+
|
|
308
|
+
if decorators:
|
|
309
|
+
# Get security score for this pattern
|
|
310
|
+
pattern_code = "\n".join(decorators[:5])
|
|
311
|
+
pattern_security = self.security_scanner.scan_code(code=pattern_code)
|
|
312
|
+
pattern_security_score = pattern_security["security_score"]
|
|
313
|
+
pattern_vulnerabilities = pattern_security["vulnerabilities"]
|
|
314
|
+
|
|
315
|
+
pattern = CodePattern(
|
|
316
|
+
pattern_id=f"decorators_{hash(''.join(decorators)) % 10000}",
|
|
317
|
+
pattern_type="structure",
|
|
318
|
+
code_snippet=pattern_code,
|
|
319
|
+
context="Decorators",
|
|
320
|
+
quality_score=quality_score,
|
|
321
|
+
usage_count=1,
|
|
322
|
+
success_rate=1.0,
|
|
323
|
+
learned_from=[task_id],
|
|
324
|
+
security_score=pattern_security_score,
|
|
325
|
+
metadata={"vulnerabilities": pattern_vulnerabilities},
|
|
326
|
+
)
|
|
327
|
+
patterns.append(pattern)
|
|
328
|
+
|
|
329
|
+
return patterns
|
|
330
|
+
|
|
331
|
+
def get_patterns_for_context(
|
|
332
|
+
self,
|
|
333
|
+
context: str,
|
|
334
|
+
pattern_type: str | None = None,
|
|
335
|
+
limit: int = 5,
|
|
336
|
+
exclude_anti_patterns: bool = True,
|
|
337
|
+
) -> list[CodePattern]:
|
|
338
|
+
"""
|
|
339
|
+
Get relevant patterns for a context.
|
|
340
|
+
|
|
341
|
+
Args:
|
|
342
|
+
context: Context string
|
|
343
|
+
pattern_type: Optional pattern type filter
|
|
344
|
+
limit: Maximum results
|
|
345
|
+
exclude_anti_patterns: If True, exclude anti-patterns (default: True)
|
|
346
|
+
|
|
347
|
+
Returns:
|
|
348
|
+
List of relevant patterns
|
|
349
|
+
"""
|
|
350
|
+
candidates = list(self.patterns.values())
|
|
351
|
+
|
|
352
|
+
# Filter out anti-patterns if requested
|
|
353
|
+
if exclude_anti_patterns:
|
|
354
|
+
candidates = [p for p in candidates if not p.is_anti_pattern]
|
|
355
|
+
|
|
356
|
+
if pattern_type:
|
|
357
|
+
candidates = [p for p in candidates if p.pattern_type == pattern_type]
|
|
358
|
+
|
|
359
|
+
# Sort by security score, quality, and usage
|
|
360
|
+
candidates.sort(
|
|
361
|
+
key=lambda p: (p.security_score, p.quality_score, p.usage_count),
|
|
362
|
+
reverse=True,
|
|
363
|
+
)
|
|
364
|
+
|
|
365
|
+
return candidates[:limit]
|
|
366
|
+
|
|
367
|
+
|
|
368
|
+
class AntiPatternExtractor:
|
|
369
|
+
"""Extracts anti-patterns from failed tasks and low-quality code."""
|
|
370
|
+
|
|
371
|
+
def __init__(
|
|
372
|
+
self,
|
|
373
|
+
max_quality_threshold: float = 0.7,
|
|
374
|
+
security_scanner: SecurityScanner | None = None,
|
|
375
|
+
):
|
|
376
|
+
"""
|
|
377
|
+
Initialize anti-pattern extractor.
|
|
378
|
+
|
|
379
|
+
Args:
|
|
380
|
+
max_quality_threshold: Maximum quality score to extract anti-patterns
|
|
381
|
+
security_scanner: Optional security scanner instance
|
|
382
|
+
"""
|
|
383
|
+
self.max_quality_threshold = max_quality_threshold
|
|
384
|
+
self.security_scanner = security_scanner or SecurityScanner()
|
|
385
|
+
self.anti_patterns: dict[str, CodePattern] = {}
|
|
386
|
+
|
|
387
|
+
def extract_anti_patterns(
|
|
388
|
+
self,
|
|
389
|
+
code: str,
|
|
390
|
+
quality_score: float,
|
|
391
|
+
task_id: str,
|
|
392
|
+
failure_reasons: list[str] | None = None,
|
|
393
|
+
pattern_types: list[str] | None = None,
|
|
394
|
+
) -> list[CodePattern]:
|
|
395
|
+
"""
|
|
396
|
+
Extract anti-patterns from failed or low-quality code.
|
|
397
|
+
|
|
398
|
+
Args:
|
|
399
|
+
code: Source code
|
|
400
|
+
quality_score: Quality score of the code
|
|
401
|
+
task_id: Task identifier
|
|
402
|
+
failure_reasons: Optional list of failure reasons
|
|
403
|
+
pattern_types: Optional list of pattern types to extract
|
|
404
|
+
|
|
405
|
+
Returns:
|
|
406
|
+
List of extracted anti-patterns
|
|
407
|
+
"""
|
|
408
|
+
# Only extract if quality is below threshold
|
|
409
|
+
if quality_score >= self.max_quality_threshold:
|
|
410
|
+
return []
|
|
411
|
+
|
|
412
|
+
failure_reasons = failure_reasons or []
|
|
413
|
+
anti_patterns = []
|
|
414
|
+
|
|
415
|
+
# Extract function anti-patterns
|
|
416
|
+
if not pattern_types or "function" in pattern_types:
|
|
417
|
+
func_patterns = self._extract_function_anti_patterns(
|
|
418
|
+
code, quality_score, task_id, failure_reasons
|
|
419
|
+
)
|
|
420
|
+
anti_patterns.extend(func_patterns)
|
|
421
|
+
|
|
422
|
+
# Extract class anti-patterns
|
|
423
|
+
if not pattern_types or "class" in pattern_types:
|
|
424
|
+
class_patterns = self._extract_class_anti_patterns(
|
|
425
|
+
code, quality_score, task_id, failure_reasons
|
|
426
|
+
)
|
|
427
|
+
anti_patterns.extend(class_patterns)
|
|
428
|
+
|
|
429
|
+
# Extract import anti-patterns
|
|
430
|
+
if not pattern_types or "import" in pattern_types:
|
|
431
|
+
import_patterns = self._extract_import_anti_patterns(
|
|
432
|
+
code, quality_score, task_id, failure_reasons
|
|
433
|
+
)
|
|
434
|
+
anti_patterns.extend(import_patterns)
|
|
435
|
+
|
|
436
|
+
# Extract structural anti-patterns
|
|
437
|
+
if not pattern_types or "structure" in pattern_types:
|
|
438
|
+
struct_patterns = self._extract_structural_anti_patterns(
|
|
439
|
+
code, quality_score, task_id, failure_reasons
|
|
440
|
+
)
|
|
441
|
+
anti_patterns.extend(struct_patterns)
|
|
442
|
+
|
|
443
|
+
return anti_patterns
|
|
444
|
+
|
|
445
|
+
def _extract_function_anti_patterns(
|
|
446
|
+
self,
|
|
447
|
+
code: str,
|
|
448
|
+
quality_score: float,
|
|
449
|
+
task_id: str,
|
|
450
|
+
failure_reasons: list[str],
|
|
451
|
+
) -> list[CodePattern]:
|
|
452
|
+
"""Extract function anti-patterns."""
|
|
453
|
+
anti_patterns = []
|
|
454
|
+
|
|
455
|
+
# Match function definitions
|
|
456
|
+
func_pattern = r"def\s+(\w+)\s*\([^)]*\):\s*\n((?:\s{4}.*\n?)*)"
|
|
457
|
+
matches = list(re.finditer(func_pattern, code, re.MULTILINE))
|
|
458
|
+
|
|
459
|
+
for match in matches:
|
|
460
|
+
func_name = match.group(1)
|
|
461
|
+
func_body = match.group(2)
|
|
462
|
+
|
|
463
|
+
# Get security score for this pattern
|
|
464
|
+
pattern_code = f"def {func_name}(...):\n{func_body[:200]}"
|
|
465
|
+
pattern_security = self.security_scanner.scan_code(code=pattern_code)
|
|
466
|
+
pattern_security_score = pattern_security["security_score"]
|
|
467
|
+
pattern_vulnerabilities = pattern_security["vulnerabilities"]
|
|
468
|
+
|
|
469
|
+
anti_pattern = CodePattern(
|
|
470
|
+
pattern_id=f"anti_func_{func_name}_{hash(func_body) % 10000}",
|
|
471
|
+
pattern_type="function",
|
|
472
|
+
code_snippet=pattern_code,
|
|
473
|
+
context=f"Anti-pattern Function: {func_name}",
|
|
474
|
+
quality_score=quality_score,
|
|
475
|
+
usage_count=1,
|
|
476
|
+
success_rate=0.0, # Anti-patterns have 0 success rate
|
|
477
|
+
learned_from=[task_id],
|
|
478
|
+
security_score=pattern_security_score,
|
|
479
|
+
is_anti_pattern=True,
|
|
480
|
+
failure_reasons=failure_reasons.copy(),
|
|
481
|
+
rejection_count=0,
|
|
482
|
+
metadata={"vulnerabilities": pattern_vulnerabilities},
|
|
483
|
+
)
|
|
484
|
+
anti_patterns.append(anti_pattern)
|
|
485
|
+
|
|
486
|
+
return anti_patterns
|
|
487
|
+
|
|
488
|
+
def _extract_class_anti_patterns(
|
|
489
|
+
self,
|
|
490
|
+
code: str,
|
|
491
|
+
quality_score: float,
|
|
492
|
+
task_id: str,
|
|
493
|
+
failure_reasons: list[str],
|
|
494
|
+
) -> list[CodePattern]:
|
|
495
|
+
"""Extract class anti-patterns."""
|
|
496
|
+
anti_patterns = []
|
|
497
|
+
|
|
498
|
+
# Match class definitions
|
|
499
|
+
class_pattern = r"class\s+(\w+)(?:\([^)]+\))?:\s*\n((?:\s{4}.*\n?)*)"
|
|
500
|
+
matches = re.finditer(class_pattern, code, re.MULTILINE)
|
|
501
|
+
|
|
502
|
+
for match in matches:
|
|
503
|
+
class_name = match.group(1)
|
|
504
|
+
class_body = match.group(2)
|
|
505
|
+
|
|
506
|
+
# Get security score for this pattern
|
|
507
|
+
pattern_code = f"class {class_name}:\n{class_body[:200]}"
|
|
508
|
+
pattern_security = self.security_scanner.scan_code(code=pattern_code)
|
|
509
|
+
pattern_security_score = pattern_security["security_score"]
|
|
510
|
+
pattern_vulnerabilities = pattern_security["vulnerabilities"]
|
|
511
|
+
|
|
512
|
+
anti_pattern = CodePattern(
|
|
513
|
+
pattern_id=f"anti_class_{class_name}_{hash(class_body) % 10000}",
|
|
514
|
+
pattern_type="class",
|
|
515
|
+
code_snippet=pattern_code,
|
|
516
|
+
context=f"Anti-pattern Class: {class_name}",
|
|
517
|
+
quality_score=quality_score,
|
|
518
|
+
usage_count=1,
|
|
519
|
+
success_rate=0.0,
|
|
520
|
+
learned_from=[task_id],
|
|
521
|
+
security_score=pattern_security_score,
|
|
522
|
+
is_anti_pattern=True,
|
|
523
|
+
failure_reasons=failure_reasons.copy(),
|
|
524
|
+
rejection_count=0,
|
|
525
|
+
metadata={"vulnerabilities": pattern_vulnerabilities},
|
|
526
|
+
)
|
|
527
|
+
anti_patterns.append(anti_pattern)
|
|
528
|
+
|
|
529
|
+
return anti_patterns
|
|
530
|
+
|
|
531
|
+
def _extract_import_anti_patterns(
|
|
532
|
+
self,
|
|
533
|
+
code: str,
|
|
534
|
+
quality_score: float,
|
|
535
|
+
task_id: str,
|
|
536
|
+
failure_reasons: list[str],
|
|
537
|
+
) -> list[CodePattern]:
|
|
538
|
+
"""Extract import anti-patterns."""
|
|
539
|
+
anti_patterns = []
|
|
540
|
+
|
|
541
|
+
# Match import statements
|
|
542
|
+
import_pattern = r"^(?:from\s+[\w.]+|import\s+[\w.,\s]+)"
|
|
543
|
+
matches = re.finditer(import_pattern, code, re.MULTILINE)
|
|
544
|
+
|
|
545
|
+
imports = []
|
|
546
|
+
for match in matches:
|
|
547
|
+
imports.append(match.group(0).strip())
|
|
548
|
+
|
|
549
|
+
if imports:
|
|
550
|
+
# Get security score for this pattern
|
|
551
|
+
pattern_code = "\n".join(imports[:10])
|
|
552
|
+
pattern_security = self.security_scanner.scan_code(code=pattern_code)
|
|
553
|
+
pattern_security_score = pattern_security["security_score"]
|
|
554
|
+
pattern_vulnerabilities = pattern_security["vulnerabilities"]
|
|
555
|
+
|
|
556
|
+
anti_pattern = CodePattern(
|
|
557
|
+
pattern_id=f"anti_imports_{hash(''.join(imports)) % 10000}",
|
|
558
|
+
pattern_type="import",
|
|
559
|
+
code_snippet=pattern_code,
|
|
560
|
+
context="Anti-pattern Import statements",
|
|
561
|
+
quality_score=quality_score,
|
|
562
|
+
usage_count=1,
|
|
563
|
+
success_rate=0.0,
|
|
564
|
+
learned_from=[task_id],
|
|
565
|
+
security_score=pattern_security_score,
|
|
566
|
+
is_anti_pattern=True,
|
|
567
|
+
failure_reasons=failure_reasons.copy(),
|
|
568
|
+
rejection_count=0,
|
|
569
|
+
metadata={"vulnerabilities": pattern_vulnerabilities},
|
|
570
|
+
)
|
|
571
|
+
anti_patterns.append(anti_pattern)
|
|
572
|
+
|
|
573
|
+
return anti_patterns
|
|
574
|
+
|
|
575
|
+
def _extract_structural_anti_patterns(
|
|
576
|
+
self,
|
|
577
|
+
code: str,
|
|
578
|
+
quality_score: float,
|
|
579
|
+
task_id: str,
|
|
580
|
+
failure_reasons: list[str],
|
|
581
|
+
) -> list[CodePattern]:
|
|
582
|
+
"""Extract structural anti-patterns."""
|
|
583
|
+
anti_patterns = []
|
|
584
|
+
|
|
585
|
+
# Match decorators
|
|
586
|
+
decorator_pattern = r"@\w+(?:\([^)]*\))?"
|
|
587
|
+
decorators = re.findall(decorator_pattern, code)
|
|
588
|
+
|
|
589
|
+
if decorators:
|
|
590
|
+
# Get security score for this pattern
|
|
591
|
+
pattern_code = "\n".join(decorators[:5])
|
|
592
|
+
pattern_security = self.security_scanner.scan_code(code=pattern_code)
|
|
593
|
+
pattern_security_score = pattern_security["security_score"]
|
|
594
|
+
pattern_vulnerabilities = pattern_security["vulnerabilities"]
|
|
595
|
+
|
|
596
|
+
anti_pattern = CodePattern(
|
|
597
|
+
pattern_id=f"anti_decorators_{hash(''.join(decorators)) % 10000}",
|
|
598
|
+
pattern_type="structure",
|
|
599
|
+
code_snippet=pattern_code,
|
|
600
|
+
context="Anti-pattern Decorators",
|
|
601
|
+
quality_score=quality_score,
|
|
602
|
+
usage_count=1,
|
|
603
|
+
success_rate=0.0,
|
|
604
|
+
learned_from=[task_id],
|
|
605
|
+
security_score=pattern_security_score,
|
|
606
|
+
is_anti_pattern=True,
|
|
607
|
+
failure_reasons=failure_reasons.copy(),
|
|
608
|
+
rejection_count=0,
|
|
609
|
+
metadata={"vulnerabilities": pattern_vulnerabilities},
|
|
610
|
+
)
|
|
611
|
+
anti_patterns.append(anti_pattern)
|
|
612
|
+
|
|
613
|
+
return anti_patterns
|
|
614
|
+
|
|
615
|
+
def extract_from_failure(
|
|
616
|
+
self,
|
|
617
|
+
code: str,
|
|
618
|
+
task_id: str,
|
|
619
|
+
failure_reasons: list[str],
|
|
620
|
+
quality_score: float = 0.0,
|
|
621
|
+
) -> list[CodePattern]:
|
|
622
|
+
"""
|
|
623
|
+
Extract anti-patterns from a failed task.
|
|
624
|
+
|
|
625
|
+
Args:
|
|
626
|
+
code: Source code from failed task
|
|
627
|
+
task_id: Task identifier
|
|
628
|
+
failure_reasons: List of failure reasons
|
|
629
|
+
quality_score: Quality score (default: 0.0 for failures)
|
|
630
|
+
|
|
631
|
+
Returns:
|
|
632
|
+
List of extracted anti-patterns
|
|
633
|
+
"""
|
|
634
|
+
return self.extract_anti_patterns(
|
|
635
|
+
code=code,
|
|
636
|
+
quality_score=quality_score,
|
|
637
|
+
task_id=task_id,
|
|
638
|
+
failure_reasons=failure_reasons,
|
|
639
|
+
)
|
|
640
|
+
|
|
641
|
+
def extract_from_rejection(
|
|
642
|
+
self,
|
|
643
|
+
code: str,
|
|
644
|
+
task_id: str,
|
|
645
|
+
rejection_reason: str,
|
|
646
|
+
quality_score: float = 0.5,
|
|
647
|
+
) -> list[CodePattern]:
|
|
648
|
+
"""
|
|
649
|
+
Extract anti-patterns from user rejection.
|
|
650
|
+
|
|
651
|
+
Args:
|
|
652
|
+
code: Rejected code
|
|
653
|
+
task_id: Task identifier
|
|
654
|
+
rejection_reason: Reason for rejection
|
|
655
|
+
quality_score: Quality score
|
|
656
|
+
|
|
657
|
+
Returns:
|
|
658
|
+
List of extracted anti-patterns
|
|
659
|
+
"""
|
|
660
|
+
return self.extract_anti_patterns(
|
|
661
|
+
code=code,
|
|
662
|
+
quality_score=quality_score,
|
|
663
|
+
task_id=task_id,
|
|
664
|
+
failure_reasons=[f"User rejection: {rejection_reason}"],
|
|
665
|
+
)
|
|
666
|
+
|
|
667
|
+
def get_anti_patterns_for_context(
|
|
668
|
+
self, context: str, pattern_type: str | None = None, limit: int = 5
|
|
669
|
+
) -> list[CodePattern]:
|
|
670
|
+
"""
|
|
671
|
+
Get anti-patterns to avoid for a context.
|
|
672
|
+
|
|
673
|
+
Args:
|
|
674
|
+
context: Context string
|
|
675
|
+
pattern_type: Optional pattern type filter
|
|
676
|
+
limit: Maximum results
|
|
677
|
+
|
|
678
|
+
Returns:
|
|
679
|
+
List of anti-patterns to avoid
|
|
680
|
+
"""
|
|
681
|
+
candidates = [p for p in self.anti_patterns.values() if p.is_anti_pattern]
|
|
682
|
+
|
|
683
|
+
if pattern_type:
|
|
684
|
+
candidates = [p for p in candidates if p.pattern_type == pattern_type]
|
|
685
|
+
|
|
686
|
+
# Sort by rejection count and quality (low quality = more important to avoid)
|
|
687
|
+
candidates.sort(
|
|
688
|
+
key=lambda p: (p.rejection_count, -p.quality_score), reverse=True
|
|
689
|
+
)
|
|
690
|
+
|
|
691
|
+
return candidates[:limit]
|
|
692
|
+
|
|
693
|
+
|
|
694
|
+
class PromptOptimizer:
|
|
695
|
+
"""Optimizes agent prompts based on outcomes."""
|
|
696
|
+
|
|
697
|
+
def __init__(self, hardware_profile: HardwareProfile):
|
|
698
|
+
"""
|
|
699
|
+
Initialize prompt optimizer.
|
|
700
|
+
|
|
701
|
+
Args:
|
|
702
|
+
hardware_profile: Hardware profile for optimization
|
|
703
|
+
"""
|
|
704
|
+
self.hardware_profile = hardware_profile
|
|
705
|
+
self.variants: dict[str, PromptVariant] = {}
|
|
706
|
+
self.base_prompt: str | None = None
|
|
707
|
+
|
|
708
|
+
def create_variant(
|
|
709
|
+
self,
|
|
710
|
+
base_prompt: str,
|
|
711
|
+
modifications: list[str],
|
|
712
|
+
variant_id: str | None = None,
|
|
713
|
+
) -> PromptVariant:
|
|
714
|
+
"""
|
|
715
|
+
Create a prompt variant for A/B testing.
|
|
716
|
+
|
|
717
|
+
Args:
|
|
718
|
+
base_prompt: Base prompt template
|
|
719
|
+
modifications: List of modifications to apply
|
|
720
|
+
variant_id: Optional variant identifier
|
|
721
|
+
|
|
722
|
+
Returns:
|
|
723
|
+
PromptVariant instance
|
|
724
|
+
"""
|
|
725
|
+
if variant_id is None:
|
|
726
|
+
variant_id = f"variant_{hash(''.join(modifications)) % 10000}"
|
|
727
|
+
|
|
728
|
+
# Apply modifications (simplified - in production would be more sophisticated)
|
|
729
|
+
modified_prompt = base_prompt
|
|
730
|
+
for mod in modifications:
|
|
731
|
+
if mod.startswith("add:"):
|
|
732
|
+
modified_prompt += f"\n{mod[4:]}"
|
|
733
|
+
elif mod.startswith("prepend:"):
|
|
734
|
+
modified_prompt = f"{mod[8:]}\n{modified_prompt}"
|
|
735
|
+
|
|
736
|
+
variant = PromptVariant(
|
|
737
|
+
variant_id=variant_id,
|
|
738
|
+
prompt_template=modified_prompt,
|
|
739
|
+
modifications=modifications,
|
|
740
|
+
test_count=0,
|
|
741
|
+
success_count=0,
|
|
742
|
+
average_quality=0.0,
|
|
743
|
+
created_at=datetime.now(UTC),
|
|
744
|
+
)
|
|
745
|
+
|
|
746
|
+
self.variants[variant_id] = variant
|
|
747
|
+
return variant
|
|
748
|
+
|
|
749
|
+
def record_test_result(self, variant_id: str, success: bool, quality_score: float):
|
|
750
|
+
"""
|
|
751
|
+
Record A/B test result for a variant.
|
|
752
|
+
|
|
753
|
+
Args:
|
|
754
|
+
variant_id: Variant identifier
|
|
755
|
+
success: Whether test succeeded
|
|
756
|
+
quality_score: Quality score
|
|
757
|
+
"""
|
|
758
|
+
if variant_id not in self.variants:
|
|
759
|
+
logger.warning(f"Variant {variant_id} not found")
|
|
760
|
+
return
|
|
761
|
+
|
|
762
|
+
variant = self.variants[variant_id]
|
|
763
|
+
variant.test_count += 1
|
|
764
|
+
if success:
|
|
765
|
+
variant.success_count += 1
|
|
766
|
+
|
|
767
|
+
# Update average quality (exponential moving average)
|
|
768
|
+
alpha = 0.1
|
|
769
|
+
variant.average_quality = (
|
|
770
|
+
alpha * quality_score + (1 - alpha) * variant.average_quality
|
|
771
|
+
)
|
|
772
|
+
variant.last_tested = datetime.now(UTC)
|
|
773
|
+
|
|
774
|
+
def get_best_variant(self, min_tests: int = 5) -> PromptVariant | None:
|
|
775
|
+
"""
|
|
776
|
+
Get the best performing variant.
|
|
777
|
+
|
|
778
|
+
Args:
|
|
779
|
+
min_tests: Minimum test count to consider
|
|
780
|
+
|
|
781
|
+
Returns:
|
|
782
|
+
Best PromptVariant if found, None otherwise
|
|
783
|
+
"""
|
|
784
|
+
candidates = [v for v in self.variants.values() if v.test_count >= min_tests]
|
|
785
|
+
|
|
786
|
+
if not candidates:
|
|
787
|
+
return None
|
|
788
|
+
|
|
789
|
+
# Sort by success rate and average quality
|
|
790
|
+
candidates.sort(
|
|
791
|
+
key=lambda v: (v.success_count / v.test_count, v.average_quality),
|
|
792
|
+
reverse=True,
|
|
793
|
+
)
|
|
794
|
+
|
|
795
|
+
return candidates[0]
|
|
796
|
+
|
|
797
|
+
def optimize_for_hardware(self, prompt: str) -> str:
|
|
798
|
+
"""
|
|
799
|
+
Optimize prompt for hardware profile.
|
|
800
|
+
|
|
801
|
+
Args:
|
|
802
|
+
prompt: Original prompt
|
|
803
|
+
|
|
804
|
+
Returns:
|
|
805
|
+
Optimized prompt
|
|
806
|
+
"""
|
|
807
|
+
# Workstation-like: return full prompt (hardware taxonomy removed)
|
|
808
|
+
return prompt
|
|
809
|
+
|
|
810
|
+
|
|
811
|
+
class NegativeFeedbackHandler:
|
|
812
|
+
"""Handles negative feedback, rejections, and corrections."""
|
|
813
|
+
|
|
814
|
+
def __init__(self, anti_pattern_extractor: AntiPatternExtractor | None = None):
|
|
815
|
+
"""
|
|
816
|
+
Initialize negative feedback handler.
|
|
817
|
+
|
|
818
|
+
Args:
|
|
819
|
+
anti_pattern_extractor: Optional anti-pattern extractor instance
|
|
820
|
+
"""
|
|
821
|
+
self.anti_pattern_extractor = anti_pattern_extractor or AntiPatternExtractor()
|
|
822
|
+
self.rejections: list[dict[str, Any]] = []
|
|
823
|
+
self.corrections: list[dict[str, Any]] = []
|
|
824
|
+
|
|
825
|
+
def record_rejection(
|
|
826
|
+
self,
|
|
827
|
+
code: str,
|
|
828
|
+
task_id: str,
|
|
829
|
+
reason: str,
|
|
830
|
+
quality_score: float = 0.5,
|
|
831
|
+
) -> list[CodePattern]:
|
|
832
|
+
"""
|
|
833
|
+
Record user rejection and extract anti-patterns.
|
|
834
|
+
|
|
835
|
+
Args:
|
|
836
|
+
code: Rejected code
|
|
837
|
+
task_id: Task identifier
|
|
838
|
+
reason: Reason for rejection
|
|
839
|
+
quality_score: Quality score
|
|
840
|
+
|
|
841
|
+
Returns:
|
|
842
|
+
List of extracted anti-patterns
|
|
843
|
+
"""
|
|
844
|
+
rejection = {
|
|
845
|
+
"task_id": task_id,
|
|
846
|
+
"reason": reason,
|
|
847
|
+
"quality_score": quality_score,
|
|
848
|
+
"timestamp": datetime.now(UTC).isoformat(),
|
|
849
|
+
}
|
|
850
|
+
self.rejections.append(rejection)
|
|
851
|
+
|
|
852
|
+
# Extract anti-patterns from rejected code
|
|
853
|
+
anti_patterns = self.anti_pattern_extractor.extract_from_rejection(
|
|
854
|
+
code=code,
|
|
855
|
+
task_id=task_id,
|
|
856
|
+
rejection_reason=reason,
|
|
857
|
+
quality_score=quality_score,
|
|
858
|
+
)
|
|
859
|
+
|
|
860
|
+
# Update rejection counts for existing anti-patterns
|
|
861
|
+
for anti_pattern in anti_patterns:
|
|
862
|
+
if anti_pattern.pattern_id in self.anti_pattern_extractor.anti_patterns:
|
|
863
|
+
existing = self.anti_pattern_extractor.anti_patterns[
|
|
864
|
+
anti_pattern.pattern_id
|
|
865
|
+
]
|
|
866
|
+
existing.rejection_count += 1
|
|
867
|
+
if reason not in existing.failure_reasons:
|
|
868
|
+
existing.failure_reasons.append(reason)
|
|
869
|
+
else:
|
|
870
|
+
# New anti-pattern from rejection should have rejection_count=1
|
|
871
|
+
anti_pattern.rejection_count = 1
|
|
872
|
+
self.anti_pattern_extractor.anti_patterns[
|
|
873
|
+
anti_pattern.pattern_id
|
|
874
|
+
] = anti_pattern
|
|
875
|
+
|
|
876
|
+
return anti_patterns
|
|
877
|
+
|
|
878
|
+
def record_correction(
|
|
879
|
+
self,
|
|
880
|
+
original_code: str,
|
|
881
|
+
corrected_code: str,
|
|
882
|
+
task_id: str,
|
|
883
|
+
correction_reason: str,
|
|
884
|
+
) -> tuple[list[CodePattern], list[CodePattern]]:
|
|
885
|
+
"""
|
|
886
|
+
Record user correction and extract both anti-patterns and patterns.
|
|
887
|
+
|
|
888
|
+
Args:
|
|
889
|
+
original_code: Original (incorrect) code
|
|
890
|
+
corrected_code: Corrected code
|
|
891
|
+
task_id: Task identifier
|
|
892
|
+
correction_reason: Reason for correction
|
|
893
|
+
|
|
894
|
+
Returns:
|
|
895
|
+
Tuple of (anti-patterns from original, patterns from corrected)
|
|
896
|
+
"""
|
|
897
|
+
correction = {
|
|
898
|
+
"task_id": task_id,
|
|
899
|
+
"reason": correction_reason,
|
|
900
|
+
"timestamp": datetime.now(UTC).isoformat(),
|
|
901
|
+
}
|
|
902
|
+
self.corrections.append(correction)
|
|
903
|
+
|
|
904
|
+
# Extract anti-patterns from original code
|
|
905
|
+
anti_patterns = self.anti_pattern_extractor.extract_from_rejection(
|
|
906
|
+
code=original_code,
|
|
907
|
+
task_id=task_id,
|
|
908
|
+
rejection_reason=f"Correction: {correction_reason}",
|
|
909
|
+
quality_score=0.3, # Low quality for incorrect code
|
|
910
|
+
)
|
|
911
|
+
|
|
912
|
+
# Extract patterns from corrected code (using PatternExtractor logic)
|
|
913
|
+
# Note: This would require access to PatternExtractor, but for now
|
|
914
|
+
# we'll just return the anti-patterns
|
|
915
|
+
return anti_patterns, []
|
|
916
|
+
|
|
917
|
+
def extract_anti_patterns_from_feedback(
|
|
918
|
+
self, code: str, task_id: str, feedback: str
|
|
919
|
+
) -> list[CodePattern]:
|
|
920
|
+
"""
|
|
921
|
+
Extract anti-patterns from feedback text.
|
|
922
|
+
|
|
923
|
+
Args:
|
|
924
|
+
code: Code that received feedback
|
|
925
|
+
task_id: Task identifier
|
|
926
|
+
feedback: Feedback text
|
|
927
|
+
|
|
928
|
+
Returns:
|
|
929
|
+
List of extracted anti-patterns
|
|
930
|
+
"""
|
|
931
|
+
return self.anti_pattern_extractor.extract_from_rejection(
|
|
932
|
+
code=code,
|
|
933
|
+
task_id=task_id,
|
|
934
|
+
rejection_reason=feedback,
|
|
935
|
+
quality_score=0.4,
|
|
936
|
+
)
|
|
937
|
+
|
|
938
|
+
def get_anti_patterns_for_context(
|
|
939
|
+
self, context: str, pattern_type: str | None = None, limit: int = 5
|
|
940
|
+
) -> list[CodePattern]:
|
|
941
|
+
"""
|
|
942
|
+
Get anti-patterns to avoid for a context.
|
|
943
|
+
|
|
944
|
+
Args:
|
|
945
|
+
context: Context string
|
|
946
|
+
pattern_type: Optional pattern type filter
|
|
947
|
+
limit: Maximum results
|
|
948
|
+
|
|
949
|
+
Returns:
|
|
950
|
+
List of anti-patterns to avoid
|
|
951
|
+
"""
|
|
952
|
+
return self.anti_pattern_extractor.get_anti_patterns_for_context(
|
|
953
|
+
context=context, pattern_type=pattern_type, limit=limit
|
|
954
|
+
)
|
|
955
|
+
|
|
956
|
+
|
|
957
|
+
class FailureModeAnalyzer:
|
|
958
|
+
"""Analyzes failure patterns and categorizes failures."""
|
|
959
|
+
|
|
960
|
+
def __init__(self):
|
|
961
|
+
"""Initialize failure mode analyzer."""
|
|
962
|
+
self.failure_modes: dict[str, dict[str, Any]] = {}
|
|
963
|
+
|
|
964
|
+
def analyze_failure(
|
|
965
|
+
self,
|
|
966
|
+
code: str,
|
|
967
|
+
task_id: str,
|
|
968
|
+
failure_reasons: list[str],
|
|
969
|
+
quality_scores: dict[str, float] | None = None,
|
|
970
|
+
) -> dict[str, Any]:
|
|
971
|
+
"""
|
|
972
|
+
Analyze a single failure.
|
|
973
|
+
|
|
974
|
+
Args:
|
|
975
|
+
code: Failed code
|
|
976
|
+
task_id: Task identifier
|
|
977
|
+
failure_reasons: List of failure reasons
|
|
978
|
+
quality_scores: Optional quality scores
|
|
979
|
+
|
|
980
|
+
Returns:
|
|
981
|
+
Analysis result with failure mode and suggestions
|
|
982
|
+
"""
|
|
983
|
+
failure_mode = self.identify_failure_mode(failure_reasons, quality_scores)
|
|
984
|
+
|
|
985
|
+
# Track failure mode statistics
|
|
986
|
+
if failure_mode not in self.failure_modes:
|
|
987
|
+
self.failure_modes[failure_mode] = {
|
|
988
|
+
"count": 0,
|
|
989
|
+
"reasons": [],
|
|
990
|
+
"task_ids": [],
|
|
991
|
+
}
|
|
992
|
+
|
|
993
|
+
self.failure_modes[failure_mode]["count"] += 1
|
|
994
|
+
self.failure_modes[failure_mode]["reasons"].extend(failure_reasons)
|
|
995
|
+
self.failure_modes[failure_mode]["task_ids"].append(task_id)
|
|
996
|
+
|
|
997
|
+
# Generate prevention suggestions
|
|
998
|
+
suggestions = self.suggest_prevention(failure_mode, failure_reasons)
|
|
999
|
+
|
|
1000
|
+
return {
|
|
1001
|
+
"failure_mode": failure_mode,
|
|
1002
|
+
"failure_reasons": failure_reasons,
|
|
1003
|
+
"suggestions": suggestions,
|
|
1004
|
+
"task_id": task_id,
|
|
1005
|
+
}
|
|
1006
|
+
|
|
1007
|
+
def identify_failure_mode(
|
|
1008
|
+
self,
|
|
1009
|
+
failure_reasons: list[str],
|
|
1010
|
+
quality_scores: dict[str, float] | None = None,
|
|
1011
|
+
) -> str:
|
|
1012
|
+
"""
|
|
1013
|
+
Categorize failure into a failure mode.
|
|
1014
|
+
|
|
1015
|
+
Args:
|
|
1016
|
+
failure_reasons: List of failure reasons
|
|
1017
|
+
quality_scores: Optional quality scores
|
|
1018
|
+
|
|
1019
|
+
Returns:
|
|
1020
|
+
Failure mode category
|
|
1021
|
+
"""
|
|
1022
|
+
reasons_str = " ".join(failure_reasons).lower()
|
|
1023
|
+
|
|
1024
|
+
# Categorize based on keywords and quality scores
|
|
1025
|
+
if any(
|
|
1026
|
+
keyword in reasons_str
|
|
1027
|
+
for keyword in ["syntax", "parse", "indentation", "syntaxerror"]
|
|
1028
|
+
):
|
|
1029
|
+
return "syntax_error"
|
|
1030
|
+
elif any(
|
|
1031
|
+
keyword in reasons_str
|
|
1032
|
+
for keyword in ["security", "vulnerability", "insecure", "bandit"]
|
|
1033
|
+
):
|
|
1034
|
+
return "security_issue"
|
|
1035
|
+
elif any(
|
|
1036
|
+
keyword in reasons_str
|
|
1037
|
+
for keyword in ["timeout", "slow", "performance", "efficiency"]
|
|
1038
|
+
):
|
|
1039
|
+
return "performance_issue"
|
|
1040
|
+
elif any(
|
|
1041
|
+
keyword in reasons_str
|
|
1042
|
+
for keyword in ["logic", "incorrect", "wrong", "bug", "error"]
|
|
1043
|
+
):
|
|
1044
|
+
return "logic_error"
|
|
1045
|
+
elif quality_scores:
|
|
1046
|
+
# Check quality scores
|
|
1047
|
+
if quality_scores.get("security_score", 10.0) < 5.0:
|
|
1048
|
+
return "security_issue"
|
|
1049
|
+
elif quality_scores.get("complexity_score", 0.0) > 8.0:
|
|
1050
|
+
return "complexity_issue"
|
|
1051
|
+
elif quality_scores.get("maintainability_score", 10.0) < 5.0:
|
|
1052
|
+
return "maintainability_issue"
|
|
1053
|
+
else:
|
|
1054
|
+
return "quality_issue"
|
|
1055
|
+
else:
|
|
1056
|
+
return "unknown_failure"
|
|
1057
|
+
|
|
1058
|
+
def get_common_failure_modes(self, limit: int = 5) -> list[dict[str, Any]]:
|
|
1059
|
+
"""
|
|
1060
|
+
Get most common failure modes.
|
|
1061
|
+
|
|
1062
|
+
Args:
|
|
1063
|
+
limit: Maximum results
|
|
1064
|
+
|
|
1065
|
+
Returns:
|
|
1066
|
+
List of failure mode statistics
|
|
1067
|
+
"""
|
|
1068
|
+
modes = sorted(
|
|
1069
|
+
self.failure_modes.items(),
|
|
1070
|
+
key=lambda x: x[1]["count"],
|
|
1071
|
+
reverse=True,
|
|
1072
|
+
)
|
|
1073
|
+
return [
|
|
1074
|
+
{
|
|
1075
|
+
"mode": mode,
|
|
1076
|
+
"count": stats["count"],
|
|
1077
|
+
"reasons": list(set(stats["reasons"]))[:5], # Unique reasons, limit 5
|
|
1078
|
+
}
|
|
1079
|
+
for mode, stats in modes[:limit]
|
|
1080
|
+
]
|
|
1081
|
+
|
|
1082
|
+
def suggest_prevention(
|
|
1083
|
+
self, failure_mode: str, failure_reasons: list[str]
|
|
1084
|
+
) -> list[str]:
|
|
1085
|
+
"""
|
|
1086
|
+
Suggest how to prevent this type of failure.
|
|
1087
|
+
|
|
1088
|
+
Args:
|
|
1089
|
+
failure_mode: Failure mode category
|
|
1090
|
+
failure_reasons: List of failure reasons
|
|
1091
|
+
|
|
1092
|
+
Returns:
|
|
1093
|
+
List of prevention suggestions
|
|
1094
|
+
"""
|
|
1095
|
+
suggestions = []
|
|
1096
|
+
|
|
1097
|
+
if failure_mode == "syntax_error":
|
|
1098
|
+
suggestions.append(
|
|
1099
|
+
"Use syntax checking tools (e.g., Ruff, pylint) before code execution"
|
|
1100
|
+
)
|
|
1101
|
+
suggestions.append("Review Python syntax rules and indentation")
|
|
1102
|
+
elif failure_mode == "security_issue":
|
|
1103
|
+
suggestions.append("Run security scanning (Bandit) before learning patterns")
|
|
1104
|
+
suggestions.append("Review security best practices for the language")
|
|
1105
|
+
suggestions.append("Avoid insecure patterns (eval, exec, shell=True)")
|
|
1106
|
+
elif failure_mode == "performance_issue":
|
|
1107
|
+
suggestions.append("Profile code to identify bottlenecks")
|
|
1108
|
+
suggestions.append("Review algorithm complexity and optimization opportunities")
|
|
1109
|
+
suggestions.append("Consider caching or lazy evaluation")
|
|
1110
|
+
elif failure_mode == "logic_error":
|
|
1111
|
+
suggestions.append("Add unit tests to catch logic errors early")
|
|
1112
|
+
suggestions.append("Use type checking (mypy) to catch type-related issues")
|
|
1113
|
+
suggestions.append("Review code logic and edge cases")
|
|
1114
|
+
elif failure_mode == "complexity_issue":
|
|
1115
|
+
suggestions.append("Refactor complex code into smaller functions")
|
|
1116
|
+
suggestions.append("Reduce nesting depth and cyclomatic complexity")
|
|
1117
|
+
suggestions.append("Use design patterns to simplify code structure")
|
|
1118
|
+
elif failure_mode == "maintainability_issue":
|
|
1119
|
+
suggestions.append("Improve code documentation and naming")
|
|
1120
|
+
suggestions.append("Follow consistent coding style")
|
|
1121
|
+
suggestions.append("Reduce code duplication")
|
|
1122
|
+
else:
|
|
1123
|
+
suggestions.append("Review failure reasons and improve code quality")
|
|
1124
|
+
suggestions.append("Add more comprehensive testing")
|
|
1125
|
+
|
|
1126
|
+
return suggestions
|
|
1127
|
+
|
|
1128
|
+
|
|
1129
|
+
class FeedbackAnalyzer:
|
|
1130
|
+
"""Analyzes feedback from code scoring system and user input."""
|
|
1131
|
+
|
|
1132
|
+
def __init__(self):
|
|
1133
|
+
"""Initialize feedback analyzer."""
|
|
1134
|
+
self.feedback_history: list[dict[str, Any]] = []
|
|
1135
|
+
|
|
1136
|
+
def analyze_code_scores(
|
|
1137
|
+
self, scores: dict[str, float], threshold: float = 0.7
|
|
1138
|
+
) -> dict[str, Any]:
|
|
1139
|
+
"""
|
|
1140
|
+
Analyze code scoring results.
|
|
1141
|
+
|
|
1142
|
+
Args:
|
|
1143
|
+
scores: Dictionary of metric scores
|
|
1144
|
+
threshold: Quality threshold
|
|
1145
|
+
|
|
1146
|
+
Returns:
|
|
1147
|
+
Analysis results
|
|
1148
|
+
"""
|
|
1149
|
+
overall_score = scores.get("overall_score", 0.0)
|
|
1150
|
+
metrics_obj: object = scores.get("metrics", {})
|
|
1151
|
+
metrics: dict[str, float] = {}
|
|
1152
|
+
if isinstance(metrics_obj, dict):
|
|
1153
|
+
for metric, score in metrics_obj.items():
|
|
1154
|
+
if isinstance(score, (int, float)):
|
|
1155
|
+
metrics[str(metric)] = float(score)
|
|
1156
|
+
|
|
1157
|
+
# Identify weak areas
|
|
1158
|
+
weak_areas = [metric for metric, score in metrics.items() if score < threshold]
|
|
1159
|
+
|
|
1160
|
+
# Calculate improvement potential
|
|
1161
|
+
improvement_potential = {}
|
|
1162
|
+
for metric, score in metrics.items():
|
|
1163
|
+
if score < threshold:
|
|
1164
|
+
improvement_potential[metric] = threshold - score
|
|
1165
|
+
|
|
1166
|
+
analysis = {
|
|
1167
|
+
"overall_score": overall_score,
|
|
1168
|
+
"weak_areas": weak_areas,
|
|
1169
|
+
"improvement_potential": improvement_potential,
|
|
1170
|
+
"meets_threshold": overall_score >= threshold,
|
|
1171
|
+
"timestamp": datetime.now(UTC).isoformat(),
|
|
1172
|
+
}
|
|
1173
|
+
|
|
1174
|
+
self.feedback_history.append(analysis)
|
|
1175
|
+
return analysis
|
|
1176
|
+
|
|
1177
|
+
def correlate_prompt_changes(
|
|
1178
|
+
self, prompt_variants: list[str], quality_scores: list[float]
|
|
1179
|
+
) -> dict[str, float]:
|
|
1180
|
+
"""
|
|
1181
|
+
Correlate prompt changes with quality improvements.
|
|
1182
|
+
|
|
1183
|
+
Args:
|
|
1184
|
+
prompt_variants: List of prompt variant identifiers
|
|
1185
|
+
quality_scores: Corresponding quality scores
|
|
1186
|
+
|
|
1187
|
+
Returns:
|
|
1188
|
+
Dictionary mapping variants to quality scores
|
|
1189
|
+
"""
|
|
1190
|
+
correlations = {}
|
|
1191
|
+
for variant, score in zip(prompt_variants, quality_scores, strict=False):
|
|
1192
|
+
correlations[variant] = score
|
|
1193
|
+
|
|
1194
|
+
return correlations
|
|
1195
|
+
|
|
1196
|
+
def get_improvement_suggestions(self, analysis: dict[str, Any]) -> list[str]:
|
|
1197
|
+
"""
|
|
1198
|
+
Get improvement suggestions based on analysis.
|
|
1199
|
+
|
|
1200
|
+
Args:
|
|
1201
|
+
analysis: Analysis results
|
|
1202
|
+
|
|
1203
|
+
Returns:
|
|
1204
|
+
List of improvement suggestions
|
|
1205
|
+
"""
|
|
1206
|
+
suggestions = []
|
|
1207
|
+
|
|
1208
|
+
weak_areas = analysis.get("weak_areas", [])
|
|
1209
|
+
improvement_potential = analysis.get("improvement_potential", {})
|
|
1210
|
+
|
|
1211
|
+
for area in weak_areas:
|
|
1212
|
+
potential = improvement_potential.get(area, 0.0)
|
|
1213
|
+
if potential > 0.1:
|
|
1214
|
+
suggestions.append(
|
|
1215
|
+
f"Focus on improving {area} (potential: {potential:.2f})"
|
|
1216
|
+
)
|
|
1217
|
+
|
|
1218
|
+
if not suggestions:
|
|
1219
|
+
suggestions.append("Code quality is good, maintain current patterns")
|
|
1220
|
+
|
|
1221
|
+
return suggestions
|
|
1222
|
+
|
|
1223
|
+
|
|
1224
|
+
class AgentLearner:
|
|
1225
|
+
"""
|
|
1226
|
+
Core learning engine for agents.
|
|
1227
|
+
|
|
1228
|
+
Integrates pattern extraction, prompt optimization, and feedback analysis.
|
|
1229
|
+
"""
|
|
1230
|
+
|
|
1231
|
+
def __init__(
|
|
1232
|
+
self,
|
|
1233
|
+
capability_registry: CapabilityRegistry,
|
|
1234
|
+
expert_registry: Any, # ExpertRegistry (required)
|
|
1235
|
+
memory_system: TaskMemorySystem | None = None,
|
|
1236
|
+
hardware_profile: HardwareProfile | None = None,
|
|
1237
|
+
):
|
|
1238
|
+
"""
|
|
1239
|
+
Initialize agent learner.
|
|
1240
|
+
|
|
1241
|
+
Args:
|
|
1242
|
+
capability_registry: Capability registry
|
|
1243
|
+
expert_registry: Expert registry for best practices consultation (required)
|
|
1244
|
+
memory_system: Optional task memory system
|
|
1245
|
+
hardware_profile: Hardware profile (auto-detected if None)
|
|
1246
|
+
"""
|
|
1247
|
+
if hardware_profile is None:
|
|
1248
|
+
profiler = HardwareProfiler()
|
|
1249
|
+
hardware_profile = profiler.detect_profile()
|
|
1250
|
+
|
|
1251
|
+
self.capability_registry = capability_registry
|
|
1252
|
+
self.memory_system = memory_system
|
|
1253
|
+
self.hardware_profile = hardware_profile
|
|
1254
|
+
self.learning_intensity = self._get_learning_intensity()
|
|
1255
|
+
|
|
1256
|
+
# Initialize security scanner
|
|
1257
|
+
security_scanner = SecurityScanner()
|
|
1258
|
+
|
|
1259
|
+
self.pattern_extractor = PatternExtractor(security_scanner=security_scanner)
|
|
1260
|
+
self.anti_pattern_extractor = AntiPatternExtractor(
|
|
1261
|
+
security_scanner=security_scanner
|
|
1262
|
+
)
|
|
1263
|
+
self.negative_feedback_handler = NegativeFeedbackHandler(
|
|
1264
|
+
anti_pattern_extractor=self.anti_pattern_extractor
|
|
1265
|
+
)
|
|
1266
|
+
self.failure_mode_analyzer = FailureModeAnalyzer()
|
|
1267
|
+
self.prompt_optimizer = PromptOptimizer(hardware_profile)
|
|
1268
|
+
self.feedback_analyzer = FeedbackAnalyzer()
|
|
1269
|
+
|
|
1270
|
+
# Initialize explainability components
|
|
1271
|
+
self.decision_logger = DecisionReasoningLogger()
|
|
1272
|
+
self.pattern_explainer = PatternSelectionExplainer()
|
|
1273
|
+
self.impact_reporter = LearningImpactReporter()
|
|
1274
|
+
|
|
1275
|
+
# Initialize meta-learning components
|
|
1276
|
+
self.effectiveness_tracker = LearningEffectivenessTracker()
|
|
1277
|
+
self.self_assessor = LearningSelfAssessor()
|
|
1278
|
+
self.adaptive_rate = AdaptiveLearningRate()
|
|
1279
|
+
self.strategy_selector = LearningStrategySelector()
|
|
1280
|
+
self.current_strategy = LearningStrategy.BALANCED
|
|
1281
|
+
|
|
1282
|
+
# Initialize decision engine (always required) with explainability
|
|
1283
|
+
best_practice_consultant = BestPracticeConsultant(expert_registry)
|
|
1284
|
+
confidence_calculator = LearningConfidenceCalculator()
|
|
1285
|
+
self.decision_engine = LearningDecisionEngine(
|
|
1286
|
+
capability_registry=self.capability_registry,
|
|
1287
|
+
best_practice_consultant=best_practice_consultant,
|
|
1288
|
+
confidence_calculator=confidence_calculator,
|
|
1289
|
+
decision_logger=self.decision_logger,
|
|
1290
|
+
)
|
|
1291
|
+
|
|
1292
|
+
# Initialize dashboard
|
|
1293
|
+
self.dashboard = LearningDashboard(
|
|
1294
|
+
capability_registry=self.capability_registry,
|
|
1295
|
+
pattern_extractor=self.pattern_extractor,
|
|
1296
|
+
anti_pattern_extractor=self.anti_pattern_extractor,
|
|
1297
|
+
decision_logger=self.decision_logger,
|
|
1298
|
+
impact_reporter=self.impact_reporter,
|
|
1299
|
+
)
|
|
1300
|
+
|
|
1301
|
+
def _get_learning_intensity(self) -> LearningIntensity:
|
|
1302
|
+
"""Get learning intensity based on hardware."""
|
|
1303
|
+
return self.capability_registry.get_learning_intensity()
|
|
1304
|
+
|
|
1305
|
+
async def learn_from_task(
|
|
1306
|
+
self,
|
|
1307
|
+
capability_id: str,
|
|
1308
|
+
task_id: str,
|
|
1309
|
+
code: str | None = None,
|
|
1310
|
+
quality_scores: dict[str, float] | None = None,
|
|
1311
|
+
success: bool = True,
|
|
1312
|
+
duration: float = 0.0,
|
|
1313
|
+
) -> dict[str, Any]:
|
|
1314
|
+
"""
|
|
1315
|
+
Learn from a completed task.
|
|
1316
|
+
|
|
1317
|
+
Args:
|
|
1318
|
+
capability_id: Capability identifier
|
|
1319
|
+
task_id: Task identifier
|
|
1320
|
+
code: Optional source code
|
|
1321
|
+
quality_scores: Optional code scoring results
|
|
1322
|
+
success: Whether task succeeded
|
|
1323
|
+
duration: Task duration
|
|
1324
|
+
|
|
1325
|
+
Returns:
|
|
1326
|
+
Learning results
|
|
1327
|
+
"""
|
|
1328
|
+
results: dict[str, Any] = {
|
|
1329
|
+
"patterns_extracted": 0,
|
|
1330
|
+
"anti_patterns_extracted": 0,
|
|
1331
|
+
"prompt_optimized": False,
|
|
1332
|
+
"feedback_analyzed": False,
|
|
1333
|
+
"security_checked": False,
|
|
1334
|
+
"security_score": 0.0,
|
|
1335
|
+
"security_vulnerabilities": [],
|
|
1336
|
+
"failure_analyzed": False,
|
|
1337
|
+
}
|
|
1338
|
+
patterns: list[CodePattern] = []
|
|
1339
|
+
anti_patterns: list[CodePattern] = []
|
|
1340
|
+
|
|
1341
|
+
# Get before metrics for tracking
|
|
1342
|
+
metric = self.capability_registry.get_capability(capability_id)
|
|
1343
|
+
before_metrics = {
|
|
1344
|
+
"quality_score": metric.quality_score if metric else 0.0,
|
|
1345
|
+
"success_rate": metric.success_rate if metric else 0.0,
|
|
1346
|
+
"usage_count": metric.usage_count if metric else 0,
|
|
1347
|
+
}
|
|
1348
|
+
|
|
1349
|
+
# Extract quality score
|
|
1350
|
+
quality_score = 0.5
|
|
1351
|
+
if quality_scores:
|
|
1352
|
+
quality_score = (
|
|
1353
|
+
quality_scores.get("overall_score", 50.0) / 100.0
|
|
1354
|
+
) # Normalize to 0-1 (assumes 0-100 scale)
|
|
1355
|
+
|
|
1356
|
+
# Security check before learning
|
|
1357
|
+
security_scanner = SecurityScanner()
|
|
1358
|
+
security_result = None
|
|
1359
|
+
if code:
|
|
1360
|
+
security_result = security_scanner.scan_code(code=code)
|
|
1361
|
+
results["security_checked"] = True
|
|
1362
|
+
results["security_score"] = security_result["security_score"]
|
|
1363
|
+
results["security_vulnerabilities"] = security_result["vulnerabilities"]
|
|
1364
|
+
|
|
1365
|
+
# Skip pattern extraction if security score is too low
|
|
1366
|
+
# But continue to extract anti-patterns from vulnerable code
|
|
1367
|
+
if not security_scanner.is_safe_for_learning(
|
|
1368
|
+
code=code, threshold=self.pattern_extractor.security_threshold
|
|
1369
|
+
):
|
|
1370
|
+
logger.debug(
|
|
1371
|
+
f"Skipping pattern extraction for task {task_id}: "
|
|
1372
|
+
f"security score {security_result['security_score']:.2f} "
|
|
1373
|
+
f"below threshold {self.pattern_extractor.security_threshold:.2f}"
|
|
1374
|
+
)
|
|
1375
|
+
# Still update capability metrics even if we skip pattern extraction
|
|
1376
|
+
self.capability_registry.update_capability_metrics(
|
|
1377
|
+
capability_id=capability_id,
|
|
1378
|
+
success=success,
|
|
1379
|
+
duration=duration,
|
|
1380
|
+
quality_score=quality_score,
|
|
1381
|
+
)
|
|
1382
|
+
# Don't return early - continue to extract anti-patterns from vulnerable code
|
|
1383
|
+
# The anti-pattern extraction code below will handle low-quality/vulnerable code
|
|
1384
|
+
|
|
1385
|
+
# Update capability metrics
|
|
1386
|
+
self.capability_registry.update_capability_metrics(
|
|
1387
|
+
capability_id=capability_id,
|
|
1388
|
+
success=success,
|
|
1389
|
+
duration=duration,
|
|
1390
|
+
quality_score=quality_score,
|
|
1391
|
+
)
|
|
1392
|
+
|
|
1393
|
+
# Extract patterns if code provided and quality is good
|
|
1394
|
+
# Use decision engine for adaptive threshold, but ensure high-quality code is always extracted
|
|
1395
|
+
should_extract_patterns = False
|
|
1396
|
+
if code and self.learning_intensity != LearningIntensity.LOW:
|
|
1397
|
+
# Base threshold: extract if quality >= 0.7 (normalized 0-1 scale)
|
|
1398
|
+
base_threshold = 0.7
|
|
1399
|
+
|
|
1400
|
+
# Use decision engine to get adaptive threshold (if available)
|
|
1401
|
+
metric = self.capability_registry.get_capability(capability_id)
|
|
1402
|
+
learned_data = {
|
|
1403
|
+
"usage_count": metric.usage_count if metric else 0,
|
|
1404
|
+
"success_rate": metric.success_rate if metric else 0.0,
|
|
1405
|
+
"quality_score": quality_score,
|
|
1406
|
+
"value": quality_score, # The threshold value we're considering
|
|
1407
|
+
"context_relevance": 1.0,
|
|
1408
|
+
}
|
|
1409
|
+
context = {
|
|
1410
|
+
"hardware_profile": (
|
|
1411
|
+
self.hardware_profile.profile_type.value
|
|
1412
|
+
if hasattr(self.hardware_profile, "profile_type")
|
|
1413
|
+
else "unknown"
|
|
1414
|
+
),
|
|
1415
|
+
"learning_intensity": self.learning_intensity.value,
|
|
1416
|
+
"task_id": task_id,
|
|
1417
|
+
"capability_id": capability_id,
|
|
1418
|
+
}
|
|
1419
|
+
|
|
1420
|
+
# Use decision engine (always available)
|
|
1421
|
+
decision = await self.decision_engine.make_decision(
|
|
1422
|
+
decision_type="pattern_extraction_threshold",
|
|
1423
|
+
learned_data=learned_data,
|
|
1424
|
+
context=context,
|
|
1425
|
+
default_value=base_threshold,
|
|
1426
|
+
)
|
|
1427
|
+
|
|
1428
|
+
# Determine threshold value from decision result
|
|
1429
|
+
threshold_value = base_threshold # Default
|
|
1430
|
+
if decision.result.value is not None:
|
|
1431
|
+
if isinstance(decision.result.value, (int, float)):
|
|
1432
|
+
threshold_value = float(decision.result.value)
|
|
1433
|
+
elif isinstance(decision.result.value, str):
|
|
1434
|
+
# Try to extract numeric value from string
|
|
1435
|
+
import re
|
|
1436
|
+
match = re.search(r'(\d+\.?\d*)', str(decision.result.value))
|
|
1437
|
+
if match:
|
|
1438
|
+
threshold_value = float(match.group(1))
|
|
1439
|
+
|
|
1440
|
+
# Extract patterns if quality_score meets threshold
|
|
1441
|
+
# For new capabilities (low learned_confidence), still extract if quality is high
|
|
1442
|
+
should_extract_patterns = quality_score >= threshold_value
|
|
1443
|
+
|
|
1444
|
+
# Fallback: if quality is very high (>= 0.8), always extract regardless of decision
|
|
1445
|
+
if quality_score >= 0.8:
|
|
1446
|
+
should_extract_patterns = True
|
|
1447
|
+
|
|
1448
|
+
logger.debug(
|
|
1449
|
+
f"Pattern extraction decision: quality_score={quality_score:.3f}, "
|
|
1450
|
+
f"threshold={threshold_value:.3f}, should_extract={should_extract_patterns}"
|
|
1451
|
+
)
|
|
1452
|
+
|
|
1453
|
+
if should_extract_patterns and code is not None:
|
|
1454
|
+
logger.debug(f"Attempting to extract patterns from code (length={len(code)})")
|
|
1455
|
+
patterns = self.pattern_extractor.extract_patterns(
|
|
1456
|
+
code=code, quality_score=quality_score, task_id=task_id
|
|
1457
|
+
)
|
|
1458
|
+
logger.debug(f"Extracted {len(patterns)} patterns from code")
|
|
1459
|
+
|
|
1460
|
+
# Store patterns
|
|
1461
|
+
for pattern in patterns:
|
|
1462
|
+
if pattern.pattern_id not in self.pattern_extractor.patterns:
|
|
1463
|
+
self.pattern_extractor.patterns[pattern.pattern_id] = pattern
|
|
1464
|
+
else:
|
|
1465
|
+
# Update existing pattern
|
|
1466
|
+
existing = self.pattern_extractor.patterns[pattern.pattern_id]
|
|
1467
|
+
existing.usage_count += 1
|
|
1468
|
+
existing.learned_from.append(task_id)
|
|
1469
|
+
|
|
1470
|
+
results["patterns_extracted"] = len(patterns)
|
|
1471
|
+
|
|
1472
|
+
# Handle failures: extract anti-patterns
|
|
1473
|
+
if not success and code:
|
|
1474
|
+
failure_reasons = [f"Task {task_id} failed"]
|
|
1475
|
+
if quality_scores:
|
|
1476
|
+
# Add quality-based failure reasons
|
|
1477
|
+
if quality_scores.get("security_score", 10.0) < 5.0:
|
|
1478
|
+
failure_reasons.append("Low security score")
|
|
1479
|
+
if quality_scores.get("overall_score", 100.0) < 50.0:
|
|
1480
|
+
failure_reasons.append("Low overall quality score")
|
|
1481
|
+
|
|
1482
|
+
# Extract anti-patterns from failed code
|
|
1483
|
+
anti_patterns = self.anti_pattern_extractor.extract_from_failure(
|
|
1484
|
+
code=code,
|
|
1485
|
+
task_id=task_id,
|
|
1486
|
+
failure_reasons=failure_reasons,
|
|
1487
|
+
quality_score=quality_score,
|
|
1488
|
+
)
|
|
1489
|
+
|
|
1490
|
+
# Store anti-patterns
|
|
1491
|
+
for anti_pattern in anti_patterns:
|
|
1492
|
+
if anti_pattern.pattern_id not in self.anti_pattern_extractor.anti_patterns:
|
|
1493
|
+
self.anti_pattern_extractor.anti_patterns[
|
|
1494
|
+
anti_pattern.pattern_id
|
|
1495
|
+
] = anti_pattern
|
|
1496
|
+
else:
|
|
1497
|
+
# Update existing anti-pattern
|
|
1498
|
+
existing = self.anti_pattern_extractor.anti_patterns[
|
|
1499
|
+
anti_pattern.pattern_id
|
|
1500
|
+
]
|
|
1501
|
+
existing.usage_count += 1
|
|
1502
|
+
existing.learned_from.append(task_id)
|
|
1503
|
+
|
|
1504
|
+
results["anti_patterns_extracted"] = len(anti_patterns)
|
|
1505
|
+
|
|
1506
|
+
# Analyze failure mode
|
|
1507
|
+
failure_analysis = self.failure_mode_analyzer.analyze_failure(
|
|
1508
|
+
code=code,
|
|
1509
|
+
task_id=task_id,
|
|
1510
|
+
failure_reasons=failure_reasons,
|
|
1511
|
+
quality_scores=quality_scores,
|
|
1512
|
+
)
|
|
1513
|
+
results["failure_analyzed"] = True
|
|
1514
|
+
results["failure_analysis"] = failure_analysis
|
|
1515
|
+
|
|
1516
|
+
# Also extract anti-patterns from low-quality code (even if success=True)
|
|
1517
|
+
logger.debug(f"Anti-pattern extraction check: code={code is not None}, quality_score={quality_score}, threshold={self.anti_pattern_extractor.max_quality_threshold}, success={success}")
|
|
1518
|
+
if code and quality_score < self.anti_pattern_extractor.max_quality_threshold:
|
|
1519
|
+
failure_reasons = [f"Low quality score: {quality_score:.2f}"]
|
|
1520
|
+
anti_patterns = self.anti_pattern_extractor.extract_anti_patterns(
|
|
1521
|
+
code=code,
|
|
1522
|
+
quality_score=quality_score,
|
|
1523
|
+
task_id=task_id,
|
|
1524
|
+
failure_reasons=failure_reasons,
|
|
1525
|
+
)
|
|
1526
|
+
|
|
1527
|
+
# Store anti-patterns
|
|
1528
|
+
for anti_pattern in anti_patterns:
|
|
1529
|
+
if anti_pattern.pattern_id not in self.anti_pattern_extractor.anti_patterns:
|
|
1530
|
+
self.anti_pattern_extractor.anti_patterns[
|
|
1531
|
+
anti_pattern.pattern_id
|
|
1532
|
+
] = anti_pattern
|
|
1533
|
+
|
|
1534
|
+
results["anti_patterns_extracted"] = len(anti_patterns)
|
|
1535
|
+
|
|
1536
|
+
# Analyze feedback if scores provided
|
|
1537
|
+
if quality_scores:
|
|
1538
|
+
analysis = self.feedback_analyzer.analyze_code_scores(quality_scores)
|
|
1539
|
+
results["feedback_analyzed"] = True
|
|
1540
|
+
results["feedback_analysis"] = analysis
|
|
1541
|
+
|
|
1542
|
+
# Check if improvement is needed
|
|
1543
|
+
if not analysis.get("meets_threshold", False):
|
|
1544
|
+
suggestions = self.feedback_analyzer.get_improvement_suggestions(
|
|
1545
|
+
analysis
|
|
1546
|
+
)
|
|
1547
|
+
results["improvement_suggestions"] = suggestions
|
|
1548
|
+
|
|
1549
|
+
# Store in memory system if available
|
|
1550
|
+
if self.memory_system and success:
|
|
1551
|
+
outcome = TaskOutcome.SUCCESS if success else TaskOutcome.FAILURE
|
|
1552
|
+
self.memory_system.store_memory(
|
|
1553
|
+
task_id=task_id,
|
|
1554
|
+
agent_id="unknown", # Would be provided by agent
|
|
1555
|
+
command=capability_id,
|
|
1556
|
+
outcome=outcome,
|
|
1557
|
+
quality_score=quality_score,
|
|
1558
|
+
patterns_used=[p.pattern_id for p in patterns] if code else [],
|
|
1559
|
+
)
|
|
1560
|
+
|
|
1561
|
+
# Track learning effectiveness
|
|
1562
|
+
metric_after = self.capability_registry.get_capability(capability_id)
|
|
1563
|
+
after_metrics = {
|
|
1564
|
+
"quality_score": metric_after.quality_score if metric_after else 0.0,
|
|
1565
|
+
"success_rate": metric_after.success_rate if metric_after else 0.0,
|
|
1566
|
+
"usage_count": metric_after.usage_count if metric_after else 0,
|
|
1567
|
+
}
|
|
1568
|
+
|
|
1569
|
+
# Track effectiveness
|
|
1570
|
+
session = self.effectiveness_tracker.track_effectiveness(
|
|
1571
|
+
capability_id=capability_id,
|
|
1572
|
+
before_metrics=before_metrics,
|
|
1573
|
+
after_metrics=after_metrics,
|
|
1574
|
+
strategies_used=[self.current_strategy.value],
|
|
1575
|
+
)
|
|
1576
|
+
|
|
1577
|
+
# Adjust learning rate based on effectiveness
|
|
1578
|
+
rate_adjustment = self.adaptive_rate.adjust_learning_intensity(
|
|
1579
|
+
session.effectiveness_score
|
|
1580
|
+
)
|
|
1581
|
+
results["learning_rate_adjustment"] = rate_adjustment
|
|
1582
|
+
|
|
1583
|
+
# Generate impact report
|
|
1584
|
+
impact_report = self.impact_reporter.generate_impact_report(
|
|
1585
|
+
capability_id=capability_id,
|
|
1586
|
+
before_metrics=before_metrics,
|
|
1587
|
+
after_metrics=after_metrics,
|
|
1588
|
+
learning_session_id=session.session_id,
|
|
1589
|
+
)
|
|
1590
|
+
results["learning_impact"] = impact_report
|
|
1591
|
+
|
|
1592
|
+
return results
|
|
1593
|
+
|
|
1594
|
+
async def learn_from_rejection(
|
|
1595
|
+
self,
|
|
1596
|
+
capability_id: str,
|
|
1597
|
+
task_id: str,
|
|
1598
|
+
code: str,
|
|
1599
|
+
rejection_reason: str,
|
|
1600
|
+
quality_score: float = 0.5,
|
|
1601
|
+
) -> dict[str, Any]:
|
|
1602
|
+
"""
|
|
1603
|
+
Learn from user rejection.
|
|
1604
|
+
|
|
1605
|
+
Args:
|
|
1606
|
+
capability_id: Capability identifier
|
|
1607
|
+
task_id: Task identifier
|
|
1608
|
+
code: Rejected code
|
|
1609
|
+
rejection_reason: Reason for rejection
|
|
1610
|
+
quality_score: Quality score
|
|
1611
|
+
|
|
1612
|
+
Returns:
|
|
1613
|
+
Learning results
|
|
1614
|
+
"""
|
|
1615
|
+
results: dict[str, Any] = {
|
|
1616
|
+
"anti_patterns_extracted": 0,
|
|
1617
|
+
"rejection_recorded": False,
|
|
1618
|
+
}
|
|
1619
|
+
|
|
1620
|
+
# Record rejection and extract anti-patterns
|
|
1621
|
+
anti_patterns = self.negative_feedback_handler.record_rejection(
|
|
1622
|
+
code=code,
|
|
1623
|
+
task_id=task_id,
|
|
1624
|
+
reason=rejection_reason,
|
|
1625
|
+
quality_score=quality_score,
|
|
1626
|
+
)
|
|
1627
|
+
|
|
1628
|
+
results["anti_patterns_extracted"] = len(anti_patterns)
|
|
1629
|
+
results["rejection_recorded"] = True
|
|
1630
|
+
|
|
1631
|
+
# Update capability metrics (rejection counts as failure)
|
|
1632
|
+
self.capability_registry.update_capability_metrics(
|
|
1633
|
+
capability_id=capability_id,
|
|
1634
|
+
success=False,
|
|
1635
|
+
duration=0.0,
|
|
1636
|
+
quality_score=quality_score,
|
|
1637
|
+
)
|
|
1638
|
+
|
|
1639
|
+
return results
|
|
1640
|
+
|
|
1641
|
+
def get_learned_patterns(
|
|
1642
|
+
self,
|
|
1643
|
+
context: str,
|
|
1644
|
+
pattern_type: str | None = None,
|
|
1645
|
+
limit: int = 5,
|
|
1646
|
+
exclude_anti_patterns: bool = True,
|
|
1647
|
+
) -> list[CodePattern]:
|
|
1648
|
+
"""
|
|
1649
|
+
Get learned patterns for a context.
|
|
1650
|
+
|
|
1651
|
+
Args:
|
|
1652
|
+
context: Context string
|
|
1653
|
+
pattern_type: Optional pattern type filter
|
|
1654
|
+
limit: Maximum results
|
|
1655
|
+
exclude_anti_patterns: If True, exclude anti-patterns (default: True)
|
|
1656
|
+
|
|
1657
|
+
Returns:
|
|
1658
|
+
List of relevant patterns
|
|
1659
|
+
"""
|
|
1660
|
+
return self.pattern_extractor.get_patterns_for_context(
|
|
1661
|
+
context=context,
|
|
1662
|
+
pattern_type=pattern_type,
|
|
1663
|
+
limit=limit,
|
|
1664
|
+
exclude_anti_patterns=exclude_anti_patterns,
|
|
1665
|
+
)
|
|
1666
|
+
|
|
1667
|
+
def optimize_prompt(self, base_prompt: str, context: str | None = None) -> str:
|
|
1668
|
+
"""
|
|
1669
|
+
Get optimized prompt.
|
|
1670
|
+
|
|
1671
|
+
Args:
|
|
1672
|
+
base_prompt: Base prompt
|
|
1673
|
+
context: Optional context
|
|
1674
|
+
|
|
1675
|
+
Returns:
|
|
1676
|
+
Optimized prompt
|
|
1677
|
+
"""
|
|
1678
|
+
# Get best variant if available
|
|
1679
|
+
best_variant = self.prompt_optimizer.get_best_variant()
|
|
1680
|
+
if best_variant and best_variant.average_quality > 0.7:
|
|
1681
|
+
return best_variant.prompt_template
|
|
1682
|
+
|
|
1683
|
+
# Otherwise, optimize for hardware
|
|
1684
|
+
return self.prompt_optimizer.optimize_for_hardware(base_prompt)
|
|
1685
|
+
|
|
1686
|
+
def explain_learning(
|
|
1687
|
+
self,
|
|
1688
|
+
capability_id: str,
|
|
1689
|
+
task_id: str | None = None,
|
|
1690
|
+
decision_id: str | None = None,
|
|
1691
|
+
) -> dict[str, Any]:
|
|
1692
|
+
"""
|
|
1693
|
+
Generate explanation for learning process.
|
|
1694
|
+
|
|
1695
|
+
Args:
|
|
1696
|
+
capability_id: Capability identifier
|
|
1697
|
+
task_id: Optional task identifier
|
|
1698
|
+
decision_id: Optional decision identifier
|
|
1699
|
+
|
|
1700
|
+
Returns:
|
|
1701
|
+
Explanation dictionary
|
|
1702
|
+
"""
|
|
1703
|
+
explanation: dict[str, Any] = {
|
|
1704
|
+
"capability_id": capability_id,
|
|
1705
|
+
"task_id": task_id,
|
|
1706
|
+
}
|
|
1707
|
+
|
|
1708
|
+
# Get decision explanation if decision_id provided
|
|
1709
|
+
if decision_id:
|
|
1710
|
+
decision_explanation = self.decision_logger.explain_decision(decision_id)
|
|
1711
|
+
if decision_explanation:
|
|
1712
|
+
explanation["decision"] = decision_explanation
|
|
1713
|
+
|
|
1714
|
+
# Get pattern selection explanation
|
|
1715
|
+
patterns = self.get_learned_patterns(
|
|
1716
|
+
context=capability_id, exclude_anti_patterns=True
|
|
1717
|
+
)
|
|
1718
|
+
if patterns:
|
|
1719
|
+
pattern_explanation = self.pattern_explainer.explain_pattern_selection(
|
|
1720
|
+
selected_patterns=patterns, context=capability_id
|
|
1721
|
+
)
|
|
1722
|
+
explanation["pattern_selection"] = pattern_explanation
|
|
1723
|
+
|
|
1724
|
+
# Get decision statistics
|
|
1725
|
+
explanation["decision_statistics"] = self.decision_logger.get_decision_statistics()
|
|
1726
|
+
|
|
1727
|
+
return explanation
|
|
1728
|
+
|
|
1729
|
+
async def optimize_learning(
|
|
1730
|
+
self, capability_id: str | None = None
|
|
1731
|
+
) -> dict[str, Any]:
|
|
1732
|
+
"""
|
|
1733
|
+
Run meta-learning optimization.
|
|
1734
|
+
|
|
1735
|
+
Args:
|
|
1736
|
+
capability_id: Optional filter by capability
|
|
1737
|
+
|
|
1738
|
+
Returns:
|
|
1739
|
+
Optimization report
|
|
1740
|
+
"""
|
|
1741
|
+
optimization: dict[str, Any] = {
|
|
1742
|
+
"timestamp": datetime.now(UTC).isoformat(),
|
|
1743
|
+
"capability_id": capability_id,
|
|
1744
|
+
}
|
|
1745
|
+
|
|
1746
|
+
# Assess learning quality
|
|
1747
|
+
pattern_count = len(self.pattern_extractor.patterns)
|
|
1748
|
+
anti_pattern_count = len(self.anti_pattern_extractor.anti_patterns)
|
|
1749
|
+
|
|
1750
|
+
# Calculate average quality and security
|
|
1751
|
+
avg_quality = 0.0
|
|
1752
|
+
avg_security = 0.0
|
|
1753
|
+
if self.pattern_extractor.patterns:
|
|
1754
|
+
total_quality = sum(p.quality_score for p in self.pattern_extractor.patterns.values())
|
|
1755
|
+
total_security = sum(
|
|
1756
|
+
getattr(p, "security_score", 0.0)
|
|
1757
|
+
for p in self.pattern_extractor.patterns.values()
|
|
1758
|
+
)
|
|
1759
|
+
avg_quality = total_quality / len(self.pattern_extractor.patterns)
|
|
1760
|
+
avg_security = total_security / len(self.pattern_extractor.patterns)
|
|
1761
|
+
|
|
1762
|
+
quality_assessment = self.self_assessor.assess_learning_quality(
|
|
1763
|
+
pattern_count=pattern_count,
|
|
1764
|
+
anti_pattern_count=anti_pattern_count,
|
|
1765
|
+
average_quality=avg_quality,
|
|
1766
|
+
average_security=avg_security,
|
|
1767
|
+
)
|
|
1768
|
+
optimization["quality_assessment"] = quality_assessment
|
|
1769
|
+
|
|
1770
|
+
# Identify learning gaps
|
|
1771
|
+
capability_metrics = {}
|
|
1772
|
+
if capability_id:
|
|
1773
|
+
metric = self.capability_registry.get_capability(capability_id)
|
|
1774
|
+
if metric:
|
|
1775
|
+
capability_metrics = {
|
|
1776
|
+
"success_rate": metric.success_rate,
|
|
1777
|
+
"quality_score": metric.quality_score,
|
|
1778
|
+
"usage_count": metric.usage_count,
|
|
1779
|
+
}
|
|
1780
|
+
pattern_stats = {
|
|
1781
|
+
"total_patterns": pattern_count,
|
|
1782
|
+
"average_quality": avg_quality,
|
|
1783
|
+
"average_security": avg_security,
|
|
1784
|
+
}
|
|
1785
|
+
gaps = self.self_assessor.identify_learning_gaps(
|
|
1786
|
+
capability_metrics=capability_metrics,
|
|
1787
|
+
pattern_statistics=pattern_stats,
|
|
1788
|
+
)
|
|
1789
|
+
optimization["learning_gaps"] = gaps
|
|
1790
|
+
|
|
1791
|
+
# Get improvement suggestions
|
|
1792
|
+
suggestions = self.self_assessor.suggest_improvements(quality_assessment)
|
|
1793
|
+
optimization["improvement_suggestions"] = suggestions
|
|
1794
|
+
|
|
1795
|
+
# Select optimal strategy
|
|
1796
|
+
if capability_id:
|
|
1797
|
+
metric = self.capability_registry.get_capability(capability_id)
|
|
1798
|
+
current_effectiveness = metric.success_rate if metric else 0.5
|
|
1799
|
+
else:
|
|
1800
|
+
# Use average effectiveness
|
|
1801
|
+
roi = self.effectiveness_tracker.get_learning_roi(capability_id=capability_id)
|
|
1802
|
+
current_effectiveness = roi.get("average_effectiveness", 0.5)
|
|
1803
|
+
|
|
1804
|
+
hardware_profile_str = (
|
|
1805
|
+
self.hardware_profile.profile_type.value
|
|
1806
|
+
if hasattr(self.hardware_profile, "profile_type")
|
|
1807
|
+
else None
|
|
1808
|
+
)
|
|
1809
|
+
optimal_strategy = self.strategy_selector.select_strategy(
|
|
1810
|
+
capability_id=capability_id or "global",
|
|
1811
|
+
current_effectiveness=current_effectiveness,
|
|
1812
|
+
hardware_profile=hardware_profile_str,
|
|
1813
|
+
)
|
|
1814
|
+
optimization["optimal_strategy"] = optimal_strategy.value
|
|
1815
|
+
optimization["current_strategy"] = self.current_strategy.value
|
|
1816
|
+
|
|
1817
|
+
# Optimize thresholds
|
|
1818
|
+
current_threshold = self.pattern_extractor.min_quality_threshold
|
|
1819
|
+
metric = self.capability_registry.get_capability(capability_id) if capability_id else None
|
|
1820
|
+
success_rate = metric.success_rate if metric else 0.5
|
|
1821
|
+
optimized_threshold = self.adaptive_rate.optimize_thresholds(
|
|
1822
|
+
current_threshold=current_threshold,
|
|
1823
|
+
success_rate=success_rate,
|
|
1824
|
+
quality_score=avg_quality,
|
|
1825
|
+
)
|
|
1826
|
+
optimization["optimized_threshold"] = optimized_threshold
|
|
1827
|
+
optimization["current_threshold"] = current_threshold
|
|
1828
|
+
|
|
1829
|
+
# Update strategy if better one found
|
|
1830
|
+
if optimal_strategy != self.current_strategy:
|
|
1831
|
+
switch_result = self.strategy_selector.switch_strategy(
|
|
1832
|
+
current_strategy=self.current_strategy,
|
|
1833
|
+
new_strategy=optimal_strategy,
|
|
1834
|
+
)
|
|
1835
|
+
if switch_result["switched"]:
|
|
1836
|
+
self.current_strategy = optimal_strategy
|
|
1837
|
+
optimization["strategy_switched"] = True
|
|
1838
|
+
optimization["switch_result"] = switch_result
|
|
1839
|
+
else:
|
|
1840
|
+
optimization["strategy_switched"] = False
|
|
1841
|
+
|
|
1842
|
+
# Get effectiveness metrics
|
|
1843
|
+
effectiveness_metrics = self.effectiveness_tracker.get_learning_roi(
|
|
1844
|
+
capability_id=capability_id
|
|
1845
|
+
)
|
|
1846
|
+
optimization["effectiveness_metrics"] = effectiveness_metrics
|
|
1847
|
+
|
|
1848
|
+
return optimization
|
|
1849
|
+
|
|
1850
|
+
def should_refine_capability(self, capability_id: str) -> bool:
|
|
1851
|
+
"""
|
|
1852
|
+
Determine if a capability should be refined.
|
|
1853
|
+
|
|
1854
|
+
Args:
|
|
1855
|
+
capability_id: Capability identifier
|
|
1856
|
+
|
|
1857
|
+
Returns:
|
|
1858
|
+
True if refinement is recommended
|
|
1859
|
+
"""
|
|
1860
|
+
metric = self.capability_registry.get_capability(capability_id)
|
|
1861
|
+
if not metric:
|
|
1862
|
+
return False
|
|
1863
|
+
|
|
1864
|
+
# Refine if quality is below threshold and has enough usage
|
|
1865
|
+
return metric.quality_score < 0.7 and metric.usage_count >= 10
|