attune-ai 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- attune/__init__.py +358 -0
- attune/adaptive/__init__.py +13 -0
- attune/adaptive/task_complexity.py +127 -0
- attune/agent_monitoring.py +414 -0
- attune/cache/__init__.py +117 -0
- attune/cache/base.py +166 -0
- attune/cache/dependency_manager.py +256 -0
- attune/cache/hash_only.py +251 -0
- attune/cache/hybrid.py +457 -0
- attune/cache/storage.py +285 -0
- attune/cache_monitor.py +356 -0
- attune/cache_stats.py +298 -0
- attune/cli/__init__.py +152 -0
- attune/cli/__main__.py +12 -0
- attune/cli/commands/__init__.py +1 -0
- attune/cli/commands/batch.py +264 -0
- attune/cli/commands/cache.py +248 -0
- attune/cli/commands/help.py +331 -0
- attune/cli/commands/info.py +140 -0
- attune/cli/commands/inspect.py +436 -0
- attune/cli/commands/inspection.py +57 -0
- attune/cli/commands/memory.py +48 -0
- attune/cli/commands/metrics.py +92 -0
- attune/cli/commands/orchestrate.py +184 -0
- attune/cli/commands/patterns.py +207 -0
- attune/cli/commands/profiling.py +202 -0
- attune/cli/commands/provider.py +98 -0
- attune/cli/commands/routing.py +285 -0
- attune/cli/commands/setup.py +96 -0
- attune/cli/commands/status.py +235 -0
- attune/cli/commands/sync.py +166 -0
- attune/cli/commands/tier.py +121 -0
- attune/cli/commands/utilities.py +114 -0
- attune/cli/commands/workflow.py +579 -0
- attune/cli/core.py +32 -0
- attune/cli/parsers/__init__.py +68 -0
- attune/cli/parsers/batch.py +118 -0
- attune/cli/parsers/cache.py +65 -0
- attune/cli/parsers/help.py +41 -0
- attune/cli/parsers/info.py +26 -0
- attune/cli/parsers/inspect.py +66 -0
- attune/cli/parsers/metrics.py +42 -0
- attune/cli/parsers/orchestrate.py +61 -0
- attune/cli/parsers/patterns.py +54 -0
- attune/cli/parsers/provider.py +40 -0
- attune/cli/parsers/routing.py +110 -0
- attune/cli/parsers/setup.py +42 -0
- attune/cli/parsers/status.py +47 -0
- attune/cli/parsers/sync.py +31 -0
- attune/cli/parsers/tier.py +33 -0
- attune/cli/parsers/workflow.py +77 -0
- attune/cli/utils/__init__.py +1 -0
- attune/cli/utils/data.py +242 -0
- attune/cli/utils/helpers.py +68 -0
- attune/cli_legacy.py +3957 -0
- attune/cli_minimal.py +1159 -0
- attune/cli_router.py +437 -0
- attune/cli_unified.py +814 -0
- attune/config/__init__.py +66 -0
- attune/config/xml_config.py +286 -0
- attune/config.py +545 -0
- attune/coordination.py +870 -0
- attune/core.py +1511 -0
- attune/core_modules/__init__.py +15 -0
- attune/cost_tracker.py +626 -0
- attune/dashboard/__init__.py +41 -0
- attune/dashboard/app.py +512 -0
- attune/dashboard/simple_server.py +435 -0
- attune/dashboard/standalone_server.py +547 -0
- attune/discovery.py +306 -0
- attune/emergence.py +306 -0
- attune/exceptions.py +123 -0
- attune/feedback_loops.py +373 -0
- attune/hot_reload/README.md +473 -0
- attune/hot_reload/__init__.py +62 -0
- attune/hot_reload/config.py +83 -0
- attune/hot_reload/integration.py +229 -0
- attune/hot_reload/reloader.py +298 -0
- attune/hot_reload/watcher.py +183 -0
- attune/hot_reload/websocket.py +177 -0
- attune/levels.py +577 -0
- attune/leverage_points.py +441 -0
- attune/logging_config.py +261 -0
- attune/mcp/__init__.py +10 -0
- attune/mcp/server.py +506 -0
- attune/memory/__init__.py +237 -0
- attune/memory/claude_memory.py +469 -0
- attune/memory/config.py +224 -0
- attune/memory/control_panel.py +1290 -0
- attune/memory/control_panel_support.py +145 -0
- attune/memory/cross_session.py +845 -0
- attune/memory/edges.py +179 -0
- attune/memory/encryption.py +159 -0
- attune/memory/file_session.py +770 -0
- attune/memory/graph.py +570 -0
- attune/memory/long_term.py +913 -0
- attune/memory/long_term_types.py +99 -0
- attune/memory/mixins/__init__.py +25 -0
- attune/memory/mixins/backend_init_mixin.py +249 -0
- attune/memory/mixins/capabilities_mixin.py +208 -0
- attune/memory/mixins/handoff_mixin.py +208 -0
- attune/memory/mixins/lifecycle_mixin.py +49 -0
- attune/memory/mixins/long_term_mixin.py +352 -0
- attune/memory/mixins/promotion_mixin.py +109 -0
- attune/memory/mixins/short_term_mixin.py +182 -0
- attune/memory/nodes.py +179 -0
- attune/memory/redis_bootstrap.py +540 -0
- attune/memory/security/__init__.py +31 -0
- attune/memory/security/audit_logger.py +932 -0
- attune/memory/security/pii_scrubber.py +640 -0
- attune/memory/security/secrets_detector.py +678 -0
- attune/memory/short_term.py +2192 -0
- attune/memory/simple_storage.py +302 -0
- attune/memory/storage/__init__.py +15 -0
- attune/memory/storage_backend.py +167 -0
- attune/memory/summary_index.py +583 -0
- attune/memory/types.py +446 -0
- attune/memory/unified.py +182 -0
- attune/meta_workflows/__init__.py +74 -0
- attune/meta_workflows/agent_creator.py +248 -0
- attune/meta_workflows/builtin_templates.py +567 -0
- attune/meta_workflows/cli_commands/__init__.py +56 -0
- attune/meta_workflows/cli_commands/agent_commands.py +321 -0
- attune/meta_workflows/cli_commands/analytics_commands.py +442 -0
- attune/meta_workflows/cli_commands/config_commands.py +232 -0
- attune/meta_workflows/cli_commands/memory_commands.py +182 -0
- attune/meta_workflows/cli_commands/template_commands.py +354 -0
- attune/meta_workflows/cli_commands/workflow_commands.py +382 -0
- attune/meta_workflows/cli_meta_workflows.py +59 -0
- attune/meta_workflows/form_engine.py +292 -0
- attune/meta_workflows/intent_detector.py +409 -0
- attune/meta_workflows/models.py +569 -0
- attune/meta_workflows/pattern_learner.py +738 -0
- attune/meta_workflows/plan_generator.py +384 -0
- attune/meta_workflows/session_context.py +397 -0
- attune/meta_workflows/template_registry.py +229 -0
- attune/meta_workflows/workflow.py +984 -0
- attune/metrics/__init__.py +12 -0
- attune/metrics/collector.py +31 -0
- attune/metrics/prompt_metrics.py +194 -0
- attune/models/__init__.py +172 -0
- attune/models/__main__.py +13 -0
- attune/models/adaptive_routing.py +437 -0
- attune/models/auth_cli.py +444 -0
- attune/models/auth_strategy.py +450 -0
- attune/models/cli.py +655 -0
- attune/models/empathy_executor.py +354 -0
- attune/models/executor.py +257 -0
- attune/models/fallback.py +762 -0
- attune/models/provider_config.py +282 -0
- attune/models/registry.py +472 -0
- attune/models/tasks.py +359 -0
- attune/models/telemetry/__init__.py +71 -0
- attune/models/telemetry/analytics.py +594 -0
- attune/models/telemetry/backend.py +196 -0
- attune/models/telemetry/data_models.py +431 -0
- attune/models/telemetry/storage.py +489 -0
- attune/models/token_estimator.py +420 -0
- attune/models/validation.py +280 -0
- attune/monitoring/__init__.py +52 -0
- attune/monitoring/alerts.py +946 -0
- attune/monitoring/alerts_cli.py +448 -0
- attune/monitoring/multi_backend.py +271 -0
- attune/monitoring/otel_backend.py +362 -0
- attune/optimization/__init__.py +19 -0
- attune/optimization/context_optimizer.py +272 -0
- attune/orchestration/__init__.py +67 -0
- attune/orchestration/agent_templates.py +707 -0
- attune/orchestration/config_store.py +499 -0
- attune/orchestration/execution_strategies.py +2111 -0
- attune/orchestration/meta_orchestrator.py +1168 -0
- attune/orchestration/pattern_learner.py +696 -0
- attune/orchestration/real_tools.py +931 -0
- attune/pattern_cache.py +187 -0
- attune/pattern_library.py +542 -0
- attune/patterns/debugging/all_patterns.json +81 -0
- attune/patterns/debugging/workflow_20260107_1770825e.json +77 -0
- attune/patterns/refactoring_memory.json +89 -0
- attune/persistence.py +564 -0
- attune/platform_utils.py +265 -0
- attune/plugins/__init__.py +28 -0
- attune/plugins/base.py +361 -0
- attune/plugins/registry.py +268 -0
- attune/project_index/__init__.py +32 -0
- attune/project_index/cli.py +335 -0
- attune/project_index/index.py +667 -0
- attune/project_index/models.py +504 -0
- attune/project_index/reports.py +474 -0
- attune/project_index/scanner.py +777 -0
- attune/project_index/scanner_parallel.py +291 -0
- attune/prompts/__init__.py +61 -0
- attune/prompts/config.py +77 -0
- attune/prompts/context.py +177 -0
- attune/prompts/parser.py +285 -0
- attune/prompts/registry.py +313 -0
- attune/prompts/templates.py +208 -0
- attune/redis_config.py +302 -0
- attune/redis_memory.py +799 -0
- attune/resilience/__init__.py +56 -0
- attune/resilience/circuit_breaker.py +256 -0
- attune/resilience/fallback.py +179 -0
- attune/resilience/health.py +300 -0
- attune/resilience/retry.py +209 -0
- attune/resilience/timeout.py +135 -0
- attune/routing/__init__.py +43 -0
- attune/routing/chain_executor.py +433 -0
- attune/routing/classifier.py +217 -0
- attune/routing/smart_router.py +234 -0
- attune/routing/workflow_registry.py +343 -0
- attune/scaffolding/README.md +589 -0
- attune/scaffolding/__init__.py +35 -0
- attune/scaffolding/__main__.py +14 -0
- attune/scaffolding/cli.py +240 -0
- attune/scaffolding/templates/base_wizard.py.jinja2 +121 -0
- attune/scaffolding/templates/coach_wizard.py.jinja2 +321 -0
- attune/scaffolding/templates/domain_wizard.py.jinja2 +408 -0
- attune/scaffolding/templates/linear_flow_wizard.py.jinja2 +203 -0
- attune/socratic/__init__.py +256 -0
- attune/socratic/ab_testing.py +958 -0
- attune/socratic/blueprint.py +533 -0
- attune/socratic/cli.py +703 -0
- attune/socratic/collaboration.py +1114 -0
- attune/socratic/domain_templates.py +924 -0
- attune/socratic/embeddings.py +738 -0
- attune/socratic/engine.py +794 -0
- attune/socratic/explainer.py +682 -0
- attune/socratic/feedback.py +772 -0
- attune/socratic/forms.py +629 -0
- attune/socratic/generator.py +732 -0
- attune/socratic/llm_analyzer.py +637 -0
- attune/socratic/mcp_server.py +702 -0
- attune/socratic/session.py +312 -0
- attune/socratic/storage.py +667 -0
- attune/socratic/success.py +730 -0
- attune/socratic/visual_editor.py +860 -0
- attune/socratic/web_ui.py +958 -0
- attune/telemetry/__init__.py +39 -0
- attune/telemetry/agent_coordination.py +475 -0
- attune/telemetry/agent_tracking.py +367 -0
- attune/telemetry/approval_gates.py +545 -0
- attune/telemetry/cli.py +1231 -0
- attune/telemetry/commands/__init__.py +14 -0
- attune/telemetry/commands/dashboard_commands.py +696 -0
- attune/telemetry/event_streaming.py +409 -0
- attune/telemetry/feedback_loop.py +567 -0
- attune/telemetry/usage_tracker.py +591 -0
- attune/templates.py +754 -0
- attune/test_generator/__init__.py +38 -0
- attune/test_generator/__main__.py +14 -0
- attune/test_generator/cli.py +234 -0
- attune/test_generator/generator.py +355 -0
- attune/test_generator/risk_analyzer.py +216 -0
- attune/test_generator/templates/unit_test.py.jinja2 +272 -0
- attune/tier_recommender.py +384 -0
- attune/tools.py +183 -0
- attune/trust/__init__.py +28 -0
- attune/trust/circuit_breaker.py +579 -0
- attune/trust_building.py +527 -0
- attune/validation/__init__.py +19 -0
- attune/validation/xml_validator.py +281 -0
- attune/vscode_bridge.py +173 -0
- attune/workflow_commands.py +780 -0
- attune/workflow_patterns/__init__.py +33 -0
- attune/workflow_patterns/behavior.py +249 -0
- attune/workflow_patterns/core.py +76 -0
- attune/workflow_patterns/output.py +99 -0
- attune/workflow_patterns/registry.py +255 -0
- attune/workflow_patterns/structural.py +288 -0
- attune/workflows/__init__.py +539 -0
- attune/workflows/autonomous_test_gen.py +1268 -0
- attune/workflows/base.py +2667 -0
- attune/workflows/batch_processing.py +342 -0
- attune/workflows/bug_predict.py +1084 -0
- attune/workflows/builder.py +273 -0
- attune/workflows/caching.py +253 -0
- attune/workflows/code_review.py +1048 -0
- attune/workflows/code_review_adapters.py +312 -0
- attune/workflows/code_review_pipeline.py +722 -0
- attune/workflows/config.py +645 -0
- attune/workflows/dependency_check.py +644 -0
- attune/workflows/document_gen/__init__.py +25 -0
- attune/workflows/document_gen/config.py +30 -0
- attune/workflows/document_gen/report_formatter.py +162 -0
- attune/workflows/document_gen/workflow.py +1426 -0
- attune/workflows/document_manager.py +216 -0
- attune/workflows/document_manager_README.md +134 -0
- attune/workflows/documentation_orchestrator.py +1205 -0
- attune/workflows/history.py +510 -0
- attune/workflows/keyboard_shortcuts/__init__.py +39 -0
- attune/workflows/keyboard_shortcuts/generators.py +391 -0
- attune/workflows/keyboard_shortcuts/parsers.py +416 -0
- attune/workflows/keyboard_shortcuts/prompts.py +295 -0
- attune/workflows/keyboard_shortcuts/schema.py +193 -0
- attune/workflows/keyboard_shortcuts/workflow.py +509 -0
- attune/workflows/llm_base.py +363 -0
- attune/workflows/manage_docs.py +87 -0
- attune/workflows/manage_docs_README.md +134 -0
- attune/workflows/manage_documentation.py +821 -0
- attune/workflows/new_sample_workflow1.py +149 -0
- attune/workflows/new_sample_workflow1_README.md +150 -0
- attune/workflows/orchestrated_health_check.py +849 -0
- attune/workflows/orchestrated_release_prep.py +600 -0
- attune/workflows/output.py +413 -0
- attune/workflows/perf_audit.py +863 -0
- attune/workflows/pr_review.py +762 -0
- attune/workflows/progress.py +785 -0
- attune/workflows/progress_server.py +322 -0
- attune/workflows/progressive/README 2.md +454 -0
- attune/workflows/progressive/README.md +454 -0
- attune/workflows/progressive/__init__.py +82 -0
- attune/workflows/progressive/cli.py +219 -0
- attune/workflows/progressive/core.py +488 -0
- attune/workflows/progressive/orchestrator.py +723 -0
- attune/workflows/progressive/reports.py +520 -0
- attune/workflows/progressive/telemetry.py +274 -0
- attune/workflows/progressive/test_gen.py +495 -0
- attune/workflows/progressive/workflow.py +589 -0
- attune/workflows/refactor_plan.py +694 -0
- attune/workflows/release_prep.py +895 -0
- attune/workflows/release_prep_crew.py +969 -0
- attune/workflows/research_synthesis.py +404 -0
- attune/workflows/routing.py +168 -0
- attune/workflows/secure_release.py +593 -0
- attune/workflows/security_adapters.py +297 -0
- attune/workflows/security_audit.py +1329 -0
- attune/workflows/security_audit_phase3.py +355 -0
- attune/workflows/seo_optimization.py +633 -0
- attune/workflows/step_config.py +234 -0
- attune/workflows/telemetry_mixin.py +269 -0
- attune/workflows/test5.py +125 -0
- attune/workflows/test5_README.md +158 -0
- attune/workflows/test_coverage_boost_crew.py +849 -0
- attune/workflows/test_gen/__init__.py +52 -0
- attune/workflows/test_gen/ast_analyzer.py +249 -0
- attune/workflows/test_gen/config.py +88 -0
- attune/workflows/test_gen/data_models.py +38 -0
- attune/workflows/test_gen/report_formatter.py +289 -0
- attune/workflows/test_gen/test_templates.py +381 -0
- attune/workflows/test_gen/workflow.py +655 -0
- attune/workflows/test_gen.py +54 -0
- attune/workflows/test_gen_behavioral.py +477 -0
- attune/workflows/test_gen_parallel.py +341 -0
- attune/workflows/test_lifecycle.py +526 -0
- attune/workflows/test_maintenance.py +627 -0
- attune/workflows/test_maintenance_cli.py +590 -0
- attune/workflows/test_maintenance_crew.py +840 -0
- attune/workflows/test_runner.py +622 -0
- attune/workflows/tier_tracking.py +531 -0
- attune/workflows/xml_enhanced_crew.py +285 -0
- attune_ai-2.0.0.dist-info/METADATA +1026 -0
- attune_ai-2.0.0.dist-info/RECORD +457 -0
- attune_ai-2.0.0.dist-info/WHEEL +5 -0
- attune_ai-2.0.0.dist-info/entry_points.txt +26 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE +201 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +101 -0
- attune_ai-2.0.0.dist-info/top_level.txt +5 -0
- attune_healthcare/__init__.py +13 -0
- attune_healthcare/monitors/__init__.py +9 -0
- attune_healthcare/monitors/clinical_protocol_monitor.py +315 -0
- attune_healthcare/monitors/monitoring/__init__.py +44 -0
- attune_healthcare/monitors/monitoring/protocol_checker.py +300 -0
- attune_healthcare/monitors/monitoring/protocol_loader.py +214 -0
- attune_healthcare/monitors/monitoring/sensor_parsers.py +306 -0
- attune_healthcare/monitors/monitoring/trajectory_analyzer.py +389 -0
- attune_llm/README.md +553 -0
- attune_llm/__init__.py +28 -0
- attune_llm/agent_factory/__init__.py +53 -0
- attune_llm/agent_factory/adapters/__init__.py +85 -0
- attune_llm/agent_factory/adapters/autogen_adapter.py +312 -0
- attune_llm/agent_factory/adapters/crewai_adapter.py +483 -0
- attune_llm/agent_factory/adapters/haystack_adapter.py +298 -0
- attune_llm/agent_factory/adapters/langchain_adapter.py +362 -0
- attune_llm/agent_factory/adapters/langgraph_adapter.py +333 -0
- attune_llm/agent_factory/adapters/native.py +228 -0
- attune_llm/agent_factory/adapters/wizard_adapter.py +423 -0
- attune_llm/agent_factory/base.py +305 -0
- attune_llm/agent_factory/crews/__init__.py +67 -0
- attune_llm/agent_factory/crews/code_review.py +1113 -0
- attune_llm/agent_factory/crews/health_check.py +1262 -0
- attune_llm/agent_factory/crews/refactoring.py +1128 -0
- attune_llm/agent_factory/crews/security_audit.py +1018 -0
- attune_llm/agent_factory/decorators.py +287 -0
- attune_llm/agent_factory/factory.py +558 -0
- attune_llm/agent_factory/framework.py +193 -0
- attune_llm/agent_factory/memory_integration.py +328 -0
- attune_llm/agent_factory/resilient.py +320 -0
- attune_llm/agents_md/__init__.py +22 -0
- attune_llm/agents_md/loader.py +218 -0
- attune_llm/agents_md/parser.py +271 -0
- attune_llm/agents_md/registry.py +307 -0
- attune_llm/claude_memory.py +466 -0
- attune_llm/cli/__init__.py +8 -0
- attune_llm/cli/sync_claude.py +487 -0
- attune_llm/code_health.py +1313 -0
- attune_llm/commands/__init__.py +51 -0
- attune_llm/commands/context.py +375 -0
- attune_llm/commands/loader.py +301 -0
- attune_llm/commands/models.py +231 -0
- attune_llm/commands/parser.py +371 -0
- attune_llm/commands/registry.py +429 -0
- attune_llm/config/__init__.py +29 -0
- attune_llm/config/unified.py +291 -0
- attune_llm/context/__init__.py +22 -0
- attune_llm/context/compaction.py +455 -0
- attune_llm/context/manager.py +434 -0
- attune_llm/contextual_patterns.py +361 -0
- attune_llm/core.py +907 -0
- attune_llm/git_pattern_extractor.py +435 -0
- attune_llm/hooks/__init__.py +24 -0
- attune_llm/hooks/config.py +306 -0
- attune_llm/hooks/executor.py +289 -0
- attune_llm/hooks/registry.py +302 -0
- attune_llm/hooks/scripts/__init__.py +39 -0
- attune_llm/hooks/scripts/evaluate_session.py +201 -0
- attune_llm/hooks/scripts/first_time_init.py +285 -0
- attune_llm/hooks/scripts/pre_compact.py +207 -0
- attune_llm/hooks/scripts/session_end.py +183 -0
- attune_llm/hooks/scripts/session_start.py +163 -0
- attune_llm/hooks/scripts/suggest_compact.py +225 -0
- attune_llm/learning/__init__.py +30 -0
- attune_llm/learning/evaluator.py +438 -0
- attune_llm/learning/extractor.py +514 -0
- attune_llm/learning/storage.py +560 -0
- attune_llm/levels.py +227 -0
- attune_llm/pattern_confidence.py +414 -0
- attune_llm/pattern_resolver.py +272 -0
- attune_llm/pattern_summary.py +350 -0
- attune_llm/providers.py +967 -0
- attune_llm/routing/__init__.py +32 -0
- attune_llm/routing/model_router.py +362 -0
- attune_llm/security/IMPLEMENTATION_SUMMARY.md +413 -0
- attune_llm/security/PHASE2_COMPLETE.md +384 -0
- attune_llm/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
- attune_llm/security/QUICK_REFERENCE.md +316 -0
- attune_llm/security/README.md +262 -0
- attune_llm/security/__init__.py +62 -0
- attune_llm/security/audit_logger.py +929 -0
- attune_llm/security/audit_logger_example.py +152 -0
- attune_llm/security/pii_scrubber.py +640 -0
- attune_llm/security/secrets_detector.py +678 -0
- attune_llm/security/secrets_detector_example.py +304 -0
- attune_llm/security/secure_memdocs.py +1192 -0
- attune_llm/security/secure_memdocs_example.py +278 -0
- attune_llm/session_status.py +745 -0
- attune_llm/state.py +246 -0
- attune_llm/utils/__init__.py +5 -0
- attune_llm/utils/tokens.py +349 -0
- attune_software/SOFTWARE_PLUGIN_README.md +57 -0
- attune_software/__init__.py +13 -0
- attune_software/cli/__init__.py +120 -0
- attune_software/cli/inspect.py +362 -0
- attune_software/cli.py +574 -0
- attune_software/plugin.py +188 -0
- workflow_scaffolding/__init__.py +11 -0
- workflow_scaffolding/__main__.py +12 -0
- workflow_scaffolding/cli.py +206 -0
- workflow_scaffolding/generator.py +265 -0
|
@@ -0,0 +1,567 @@
|
|
|
1
|
+
"""Agent-to-LLM Feedback Loop for Quality-Based Learning.
|
|
2
|
+
|
|
3
|
+
Pattern 6 from Agent Coordination Architecture - Collect quality ratings
|
|
4
|
+
on LLM responses and use feedback to inform routing decisions.
|
|
5
|
+
|
|
6
|
+
Usage:
|
|
7
|
+
# Record feedback after LLM response
|
|
8
|
+
feedback = FeedbackLoop()
|
|
9
|
+
feedback.record_feedback(
|
|
10
|
+
workflow_name="code-review",
|
|
11
|
+
stage_name="analysis",
|
|
12
|
+
tier=ModelTier.CHEAP,
|
|
13
|
+
quality_score=0.8,
|
|
14
|
+
metadata={
|
|
15
|
+
"response_length": 500,
|
|
16
|
+
"tokens": 150,
|
|
17
|
+
"latency_ms": 1200
|
|
18
|
+
}
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
# Get tier recommendation based on historical performance
|
|
22
|
+
recommendation = feedback.recommend_tier(
|
|
23
|
+
workflow_name="code-review",
|
|
24
|
+
stage_name="analysis"
|
|
25
|
+
)
|
|
26
|
+
if recommendation.recommended_tier == ModelTier.CAPABLE:
|
|
27
|
+
print(f"Upgrade to CAPABLE tier (confidence: {recommendation.confidence})")
|
|
28
|
+
|
|
29
|
+
# Get quality stats for analysis
|
|
30
|
+
stats = feedback.get_quality_stats(
|
|
31
|
+
workflow_name="code-review",
|
|
32
|
+
stage_name="analysis"
|
|
33
|
+
)
|
|
34
|
+
print(f"Average quality: {stats.avg_quality}")
|
|
35
|
+
|
|
36
|
+
Copyright 2025 Smart-AI-Memory
|
|
37
|
+
Licensed under Fair Source License 0.9
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
from __future__ import annotations
|
|
41
|
+
|
|
42
|
+
import logging
|
|
43
|
+
from dataclasses import dataclass, field
|
|
44
|
+
from datetime import datetime
|
|
45
|
+
from enum import Enum
|
|
46
|
+
from typing import Any
|
|
47
|
+
from uuid import uuid4
|
|
48
|
+
|
|
49
|
+
logger = logging.getLogger(__name__)
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class ModelTier(str, Enum):
|
|
53
|
+
"""Model tier enum matching workflows.base.ModelTier."""
|
|
54
|
+
|
|
55
|
+
CHEAP = "cheap"
|
|
56
|
+
CAPABLE = "capable"
|
|
57
|
+
PREMIUM = "premium"
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
@dataclass
|
|
61
|
+
class FeedbackEntry:
|
|
62
|
+
"""Quality feedback for an LLM response.
|
|
63
|
+
|
|
64
|
+
Represents a single quality rating for a workflow stage execution.
|
|
65
|
+
"""
|
|
66
|
+
|
|
67
|
+
feedback_id: str
|
|
68
|
+
workflow_name: str
|
|
69
|
+
stage_name: str
|
|
70
|
+
tier: str # ModelTier value
|
|
71
|
+
quality_score: float # 0.0 (bad) to 1.0 (excellent)
|
|
72
|
+
timestamp: datetime
|
|
73
|
+
metadata: dict[str, Any] = field(default_factory=dict)
|
|
74
|
+
|
|
75
|
+
def to_dict(self) -> dict[str, Any]:
|
|
76
|
+
"""Convert to dictionary for serialization."""
|
|
77
|
+
return {
|
|
78
|
+
"feedback_id": self.feedback_id,
|
|
79
|
+
"workflow_name": self.workflow_name,
|
|
80
|
+
"stage_name": self.stage_name,
|
|
81
|
+
"tier": self.tier,
|
|
82
|
+
"quality_score": self.quality_score,
|
|
83
|
+
"timestamp": self.timestamp.isoformat() if isinstance(self.timestamp, datetime) else self.timestamp,
|
|
84
|
+
"metadata": self.metadata,
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
@classmethod
|
|
88
|
+
def from_dict(cls, data: dict[str, Any]) -> FeedbackEntry:
|
|
89
|
+
"""Create from dictionary."""
|
|
90
|
+
timestamp = data.get("timestamp")
|
|
91
|
+
if isinstance(timestamp, str):
|
|
92
|
+
timestamp = datetime.fromisoformat(timestamp)
|
|
93
|
+
elif not isinstance(timestamp, datetime):
|
|
94
|
+
timestamp = datetime.utcnow()
|
|
95
|
+
|
|
96
|
+
# Handle missing feedback_id (legacy entries)
|
|
97
|
+
feedback_id = data.get("feedback_id")
|
|
98
|
+
if not feedback_id:
|
|
99
|
+
feedback_id = f"fb-{int(timestamp.timestamp()*1000)}"
|
|
100
|
+
|
|
101
|
+
return cls(
|
|
102
|
+
feedback_id=feedback_id,
|
|
103
|
+
workflow_name=data["workflow_name"],
|
|
104
|
+
stage_name=data["stage_name"],
|
|
105
|
+
tier=data["tier"],
|
|
106
|
+
quality_score=data["quality_score"],
|
|
107
|
+
timestamp=timestamp,
|
|
108
|
+
metadata=data.get("metadata", {}),
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
@dataclass
|
|
113
|
+
class QualityStats:
|
|
114
|
+
"""Quality statistics for a workflow stage."""
|
|
115
|
+
|
|
116
|
+
workflow_name: str
|
|
117
|
+
stage_name: str
|
|
118
|
+
tier: str
|
|
119
|
+
avg_quality: float
|
|
120
|
+
min_quality: float
|
|
121
|
+
max_quality: float
|
|
122
|
+
sample_count: int
|
|
123
|
+
recent_trend: float # -1.0 (declining) to 1.0 (improving)
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
@dataclass
|
|
127
|
+
class TierRecommendation:
|
|
128
|
+
"""Tier recommendation based on quality feedback."""
|
|
129
|
+
|
|
130
|
+
current_tier: str
|
|
131
|
+
recommended_tier: str
|
|
132
|
+
confidence: float # 0.0 (low) to 1.0 (high)
|
|
133
|
+
reason: str
|
|
134
|
+
stats: dict[str, QualityStats] # Stats by tier
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
class FeedbackLoop:
|
|
138
|
+
"""Agent-to-LLM feedback loop for quality-based learning.
|
|
139
|
+
|
|
140
|
+
Collects quality ratings on LLM responses and uses feedback to:
|
|
141
|
+
- Recommend tier upgrades/downgrades
|
|
142
|
+
- Track quality trends over time
|
|
143
|
+
- Identify underperforming stages
|
|
144
|
+
- Optimize routing based on historical performance
|
|
145
|
+
|
|
146
|
+
Attributes:
|
|
147
|
+
FEEDBACK_TTL: Feedback entry TTL (7 days)
|
|
148
|
+
MIN_SAMPLES: Minimum samples for recommendation (10)
|
|
149
|
+
QUALITY_THRESHOLD: Quality threshold for tier upgrade (0.7)
|
|
150
|
+
"""
|
|
151
|
+
|
|
152
|
+
FEEDBACK_TTL = 604800 # 7 days (60*60*24*7)
|
|
153
|
+
MIN_SAMPLES = 10 # Minimum samples for recommendation
|
|
154
|
+
QUALITY_THRESHOLD = 0.7 # Quality below this triggers upgrade recommendation
|
|
155
|
+
|
|
156
|
+
def __init__(self, memory=None):
|
|
157
|
+
"""Initialize feedback loop.
|
|
158
|
+
|
|
159
|
+
Args:
|
|
160
|
+
memory: Memory instance for storing feedback
|
|
161
|
+
"""
|
|
162
|
+
self.memory = memory
|
|
163
|
+
|
|
164
|
+
if self.memory is None:
|
|
165
|
+
try:
|
|
166
|
+
from attune.telemetry import UsageTracker
|
|
167
|
+
|
|
168
|
+
tracker = UsageTracker.get_instance()
|
|
169
|
+
if hasattr(tracker, "_memory"):
|
|
170
|
+
self.memory = tracker._memory
|
|
171
|
+
except (ImportError, AttributeError):
|
|
172
|
+
pass
|
|
173
|
+
|
|
174
|
+
if self.memory is None:
|
|
175
|
+
logger.warning("No memory backend available for feedback loop")
|
|
176
|
+
|
|
177
|
+
def record_feedback(
|
|
178
|
+
self,
|
|
179
|
+
workflow_name: str,
|
|
180
|
+
stage_name: str,
|
|
181
|
+
tier: str | ModelTier,
|
|
182
|
+
quality_score: float,
|
|
183
|
+
metadata: dict[str, Any] | None = None,
|
|
184
|
+
) -> str:
|
|
185
|
+
"""Record quality feedback for a workflow stage execution.
|
|
186
|
+
|
|
187
|
+
Args:
|
|
188
|
+
workflow_name: Name of workflow
|
|
189
|
+
stage_name: Name of stage within workflow
|
|
190
|
+
tier: Model tier used (CHEAP, CAPABLE, PREMIUM)
|
|
191
|
+
quality_score: Quality rating 0.0-1.0 (0=bad, 1=excellent)
|
|
192
|
+
metadata: Optional metadata (tokens, latency, etc.)
|
|
193
|
+
|
|
194
|
+
Returns:
|
|
195
|
+
Feedback ID if stored, empty string otherwise
|
|
196
|
+
|
|
197
|
+
Example:
|
|
198
|
+
>>> feedback = FeedbackLoop()
|
|
199
|
+
>>> feedback.record_feedback(
|
|
200
|
+
... workflow_name="code-review",
|
|
201
|
+
... stage_name="analysis",
|
|
202
|
+
... tier=ModelTier.CHEAP,
|
|
203
|
+
... quality_score=0.85,
|
|
204
|
+
... metadata={"tokens": 150, "latency_ms": 1200}
|
|
205
|
+
... )
|
|
206
|
+
"""
|
|
207
|
+
if not self.memory:
|
|
208
|
+
logger.debug("Cannot record feedback: no memory backend")
|
|
209
|
+
return ""
|
|
210
|
+
|
|
211
|
+
# Validate quality score
|
|
212
|
+
if not 0.0 <= quality_score <= 1.0:
|
|
213
|
+
logger.warning(f"Invalid quality score: {quality_score} (must be 0.0-1.0)")
|
|
214
|
+
return ""
|
|
215
|
+
|
|
216
|
+
# Convert tier to string if ModelTier enum
|
|
217
|
+
if isinstance(tier, ModelTier):
|
|
218
|
+
tier = tier.value
|
|
219
|
+
|
|
220
|
+
feedback_id = f"feedback_{uuid4().hex[:8]}"
|
|
221
|
+
|
|
222
|
+
entry = FeedbackEntry(
|
|
223
|
+
feedback_id=feedback_id,
|
|
224
|
+
workflow_name=workflow_name,
|
|
225
|
+
stage_name=stage_name,
|
|
226
|
+
tier=tier,
|
|
227
|
+
quality_score=quality_score,
|
|
228
|
+
timestamp=datetime.utcnow(),
|
|
229
|
+
metadata=metadata or {},
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
# Store feedback
|
|
233
|
+
# Key format: feedback:{workflow}:{stage}:{tier}:{id}
|
|
234
|
+
key = f"feedback:{workflow_name}:{stage_name}:{tier}:{feedback_id}"
|
|
235
|
+
|
|
236
|
+
try:
|
|
237
|
+
# Use direct Redis access for custom TTL
|
|
238
|
+
if hasattr(self.memory, "_client") and self.memory._client:
|
|
239
|
+
import json
|
|
240
|
+
|
|
241
|
+
self.memory._client.setex(key, self.FEEDBACK_TTL, json.dumps(entry.to_dict()))
|
|
242
|
+
else:
|
|
243
|
+
logger.warning("Cannot store feedback: no Redis backend available")
|
|
244
|
+
return ""
|
|
245
|
+
except Exception as e:
|
|
246
|
+
logger.error(f"Failed to store feedback: {e}")
|
|
247
|
+
return ""
|
|
248
|
+
|
|
249
|
+
logger.debug(
|
|
250
|
+
f"Recorded feedback: {workflow_name}/{stage_name} tier={tier} quality={quality_score:.2f}"
|
|
251
|
+
)
|
|
252
|
+
return feedback_id
|
|
253
|
+
|
|
254
|
+
def get_feedback_history(
|
|
255
|
+
self, workflow_name: str, stage_name: str, tier: str | ModelTier | None = None, limit: int = 100
|
|
256
|
+
) -> list[FeedbackEntry]:
|
|
257
|
+
"""Get feedback history for a workflow stage.
|
|
258
|
+
|
|
259
|
+
Args:
|
|
260
|
+
workflow_name: Name of workflow
|
|
261
|
+
stage_name: Name of stage
|
|
262
|
+
tier: Optional filter by tier
|
|
263
|
+
limit: Maximum number of entries to return
|
|
264
|
+
|
|
265
|
+
Returns:
|
|
266
|
+
List of feedback entries (newest first)
|
|
267
|
+
"""
|
|
268
|
+
if not self.memory or not hasattr(self.memory, "_client"):
|
|
269
|
+
return []
|
|
270
|
+
|
|
271
|
+
# Convert tier to string if ModelTier enum
|
|
272
|
+
if isinstance(tier, ModelTier):
|
|
273
|
+
tier = tier.value
|
|
274
|
+
|
|
275
|
+
try:
|
|
276
|
+
# Build search pattern
|
|
277
|
+
if tier:
|
|
278
|
+
pattern = f"feedback:{workflow_name}:{stage_name}:{tier}:*"
|
|
279
|
+
else:
|
|
280
|
+
pattern = f"feedback:{workflow_name}:{stage_name}:*"
|
|
281
|
+
|
|
282
|
+
keys = self.memory._client.keys(pattern)
|
|
283
|
+
|
|
284
|
+
entries = []
|
|
285
|
+
for key in keys:
|
|
286
|
+
if isinstance(key, bytes):
|
|
287
|
+
key = key.decode("utf-8")
|
|
288
|
+
|
|
289
|
+
# Retrieve entry
|
|
290
|
+
data = self._retrieve_feedback(key)
|
|
291
|
+
if data:
|
|
292
|
+
try:
|
|
293
|
+
entries.append(FeedbackEntry.from_dict(data))
|
|
294
|
+
except Exception as e:
|
|
295
|
+
logger.error(f"Failed to parse feedback entry {key}: {e}, data={data}")
|
|
296
|
+
continue
|
|
297
|
+
|
|
298
|
+
if len(entries) >= limit:
|
|
299
|
+
break
|
|
300
|
+
|
|
301
|
+
# Sort by timestamp (newest first)
|
|
302
|
+
entries.sort(key=lambda e: e.timestamp, reverse=True)
|
|
303
|
+
|
|
304
|
+
return entries[:limit]
|
|
305
|
+
except Exception as e:
|
|
306
|
+
logger.error(f"Failed to get feedback history: {e}")
|
|
307
|
+
return []
|
|
308
|
+
|
|
309
|
+
def _retrieve_feedback(self, key: str) -> dict[str, Any] | None:
|
|
310
|
+
"""Retrieve feedback entry from memory."""
|
|
311
|
+
if not self.memory:
|
|
312
|
+
return None
|
|
313
|
+
|
|
314
|
+
try:
|
|
315
|
+
# Use direct Redis access (feedback keys are stored without prefix)
|
|
316
|
+
if hasattr(self.memory, "_client"):
|
|
317
|
+
import json
|
|
318
|
+
|
|
319
|
+
data = self.memory._client.get(key)
|
|
320
|
+
if data:
|
|
321
|
+
if isinstance(data, bytes):
|
|
322
|
+
data = data.decode("utf-8")
|
|
323
|
+
return json.loads(data)
|
|
324
|
+
return None
|
|
325
|
+
except Exception as e:
|
|
326
|
+
logger.debug(f"Failed to retrieve feedback: {e}")
|
|
327
|
+
return None
|
|
328
|
+
|
|
329
|
+
def get_quality_stats(
|
|
330
|
+
self, workflow_name: str, stage_name: str, tier: str | ModelTier | None = None
|
|
331
|
+
) -> QualityStats | None:
|
|
332
|
+
"""Get quality statistics for a workflow stage.
|
|
333
|
+
|
|
334
|
+
Args:
|
|
335
|
+
workflow_name: Name of workflow
|
|
336
|
+
stage_name: Name of stage
|
|
337
|
+
tier: Optional filter by tier
|
|
338
|
+
|
|
339
|
+
Returns:
|
|
340
|
+
Quality statistics or None if insufficient data
|
|
341
|
+
"""
|
|
342
|
+
history = self.get_feedback_history(workflow_name, stage_name, tier=tier)
|
|
343
|
+
|
|
344
|
+
if not history:
|
|
345
|
+
return None
|
|
346
|
+
|
|
347
|
+
# Calculate statistics
|
|
348
|
+
quality_scores = [entry.quality_score for entry in history]
|
|
349
|
+
|
|
350
|
+
avg_quality = sum(quality_scores) / len(quality_scores)
|
|
351
|
+
min_quality = min(quality_scores)
|
|
352
|
+
max_quality = max(quality_scores)
|
|
353
|
+
|
|
354
|
+
# Calculate trend (recent vs older feedback)
|
|
355
|
+
if len(history) >= 4:
|
|
356
|
+
recent = quality_scores[: len(quality_scores) // 2]
|
|
357
|
+
older = quality_scores[len(quality_scores) // 2 :]
|
|
358
|
+
recent_avg = sum(recent) / len(recent)
|
|
359
|
+
older_avg = sum(older) / len(older)
|
|
360
|
+
recent_trend = (recent_avg - older_avg) / max(older_avg, 0.1) # Normalized difference
|
|
361
|
+
else:
|
|
362
|
+
recent_trend = 0.0
|
|
363
|
+
|
|
364
|
+
tier_str = tier.value if isinstance(tier, ModelTier) else (tier or "all")
|
|
365
|
+
|
|
366
|
+
return QualityStats(
|
|
367
|
+
workflow_name=workflow_name,
|
|
368
|
+
stage_name=stage_name,
|
|
369
|
+
tier=tier_str,
|
|
370
|
+
avg_quality=avg_quality,
|
|
371
|
+
min_quality=min_quality,
|
|
372
|
+
max_quality=max_quality,
|
|
373
|
+
sample_count=len(history),
|
|
374
|
+
recent_trend=recent_trend,
|
|
375
|
+
)
|
|
376
|
+
|
|
377
|
+
def recommend_tier(
|
|
378
|
+
self, workflow_name: str, stage_name: str, current_tier: str | ModelTier | None = None
|
|
379
|
+
) -> TierRecommendation:
|
|
380
|
+
"""Recommend optimal tier based on quality feedback.
|
|
381
|
+
|
|
382
|
+
Analyzes historical quality data and recommends:
|
|
383
|
+
- Downgrade if current tier consistently delivers high quality (cost optimization)
|
|
384
|
+
- Upgrade if current tier delivers poor quality (quality optimization)
|
|
385
|
+
- Keep current if quality is acceptable
|
|
386
|
+
|
|
387
|
+
Args:
|
|
388
|
+
workflow_name: Name of workflow
|
|
389
|
+
stage_name: Name of stage
|
|
390
|
+
current_tier: Current tier in use (if known)
|
|
391
|
+
|
|
392
|
+
Returns:
|
|
393
|
+
Tier recommendation with confidence and reasoning
|
|
394
|
+
"""
|
|
395
|
+
# Convert tier to string if ModelTier enum
|
|
396
|
+
if isinstance(current_tier, ModelTier):
|
|
397
|
+
current_tier = current_tier.value
|
|
398
|
+
|
|
399
|
+
# Get stats for all tiers
|
|
400
|
+
stats_by_tier = {}
|
|
401
|
+
for tier in ["cheap", "capable", "premium"]:
|
|
402
|
+
stats = self.get_quality_stats(workflow_name, stage_name, tier=tier)
|
|
403
|
+
if stats:
|
|
404
|
+
stats_by_tier[tier] = stats
|
|
405
|
+
|
|
406
|
+
# No data - default recommendation
|
|
407
|
+
if not stats_by_tier:
|
|
408
|
+
return TierRecommendation(
|
|
409
|
+
current_tier=current_tier or "unknown",
|
|
410
|
+
recommended_tier=current_tier or "cheap",
|
|
411
|
+
confidence=0.0,
|
|
412
|
+
reason="No feedback data available",
|
|
413
|
+
stats={},
|
|
414
|
+
)
|
|
415
|
+
|
|
416
|
+
# Determine current tier if not provided
|
|
417
|
+
if not current_tier:
|
|
418
|
+
# Use tier with most recent feedback
|
|
419
|
+
all_history = self.get_feedback_history(workflow_name, stage_name, tier=None, limit=1)
|
|
420
|
+
if all_history:
|
|
421
|
+
current_tier = all_history[0].tier
|
|
422
|
+
else:
|
|
423
|
+
current_tier = "cheap"
|
|
424
|
+
|
|
425
|
+
current_stats = stats_by_tier.get(current_tier)
|
|
426
|
+
|
|
427
|
+
# Insufficient data for current tier
|
|
428
|
+
if not current_stats or current_stats.sample_count < self.MIN_SAMPLES:
|
|
429
|
+
return TierRecommendation(
|
|
430
|
+
current_tier=current_tier,
|
|
431
|
+
recommended_tier=current_tier,
|
|
432
|
+
confidence=0.0,
|
|
433
|
+
reason=f"Insufficient data (need {self.MIN_SAMPLES} samples, have {current_stats.sample_count if current_stats else 0})",
|
|
434
|
+
stats=stats_by_tier,
|
|
435
|
+
)
|
|
436
|
+
|
|
437
|
+
# Analyze quality
|
|
438
|
+
avg_quality = current_stats.avg_quality
|
|
439
|
+
confidence = min(current_stats.sample_count / (self.MIN_SAMPLES * 2), 1.0)
|
|
440
|
+
|
|
441
|
+
# Decision logic
|
|
442
|
+
if avg_quality < self.QUALITY_THRESHOLD:
|
|
443
|
+
# Poor quality - recommend upgrade
|
|
444
|
+
if current_tier == "cheap":
|
|
445
|
+
recommended = "capable"
|
|
446
|
+
reason = f"Low quality ({avg_quality:.2f}) - upgrade for better results"
|
|
447
|
+
elif current_tier == "capable":
|
|
448
|
+
recommended = "premium"
|
|
449
|
+
reason = f"Low quality ({avg_quality:.2f}) - upgrade to premium tier"
|
|
450
|
+
else: # premium
|
|
451
|
+
recommended = "premium"
|
|
452
|
+
reason = f"Already using premium tier (quality: {avg_quality:.2f})"
|
|
453
|
+
confidence = 1.0
|
|
454
|
+
elif avg_quality > 0.9 and current_tier != "cheap":
|
|
455
|
+
# Excellent quality - consider downgrade for cost optimization
|
|
456
|
+
if current_tier == "premium":
|
|
457
|
+
# Check if capable tier also has good quality
|
|
458
|
+
capable_stats = stats_by_tier.get("capable")
|
|
459
|
+
if capable_stats and capable_stats.avg_quality > 0.85:
|
|
460
|
+
recommended = "capable"
|
|
461
|
+
reason = f"Excellent quality ({avg_quality:.2f}) - downgrade to save cost"
|
|
462
|
+
else:
|
|
463
|
+
recommended = "premium"
|
|
464
|
+
reason = f"Excellent quality ({avg_quality:.2f}) - keep premium for consistency"
|
|
465
|
+
elif current_tier == "capable":
|
|
466
|
+
# Check if cheap tier also has good quality
|
|
467
|
+
cheap_stats = stats_by_tier.get("cheap")
|
|
468
|
+
if cheap_stats and cheap_stats.avg_quality > 0.85:
|
|
469
|
+
recommended = "cheap"
|
|
470
|
+
reason = f"Excellent quality ({avg_quality:.2f}) - downgrade to save cost"
|
|
471
|
+
else:
|
|
472
|
+
recommended = "capable"
|
|
473
|
+
reason = f"Excellent quality ({avg_quality:.2f}) - keep capable tier"
|
|
474
|
+
else:
|
|
475
|
+
recommended = current_tier
|
|
476
|
+
reason = f"Excellent quality ({avg_quality:.2f}) - maintain current tier"
|
|
477
|
+
else:
|
|
478
|
+
# Acceptable quality - keep current tier
|
|
479
|
+
recommended = current_tier
|
|
480
|
+
reason = f"Acceptable quality ({avg_quality:.2f}) - maintain current tier"
|
|
481
|
+
|
|
482
|
+
return TierRecommendation(
|
|
483
|
+
current_tier=current_tier,
|
|
484
|
+
recommended_tier=recommended,
|
|
485
|
+
confidence=confidence,
|
|
486
|
+
reason=reason,
|
|
487
|
+
stats=stats_by_tier,
|
|
488
|
+
)
|
|
489
|
+
|
|
490
|
+
def get_underperforming_stages(
|
|
491
|
+
self, workflow_name: str, quality_threshold: float = 0.7
|
|
492
|
+
) -> list[tuple[str, QualityStats]]:
|
|
493
|
+
"""Get workflow stages/tiers with poor quality scores.
|
|
494
|
+
|
|
495
|
+
Args:
|
|
496
|
+
workflow_name: Name of workflow
|
|
497
|
+
quality_threshold: Threshold below which stage/tier is considered underperforming
|
|
498
|
+
|
|
499
|
+
Returns:
|
|
500
|
+
List of (stage_name, stats) tuples for underperforming stage/tier combinations
|
|
501
|
+
The stage_name includes the tier for clarity (e.g., "analysis/cheap")
|
|
502
|
+
"""
|
|
503
|
+
if not self.memory or not hasattr(self.memory, "_client"):
|
|
504
|
+
return []
|
|
505
|
+
|
|
506
|
+
try:
|
|
507
|
+
# Find all feedback keys for this workflow
|
|
508
|
+
pattern = f"feedback:{workflow_name}:*"
|
|
509
|
+
keys = self.memory._client.keys(pattern)
|
|
510
|
+
|
|
511
|
+
# Extract unique stage/tier combinations
|
|
512
|
+
stage_tier_combos = set()
|
|
513
|
+
for key in keys:
|
|
514
|
+
if isinstance(key, bytes):
|
|
515
|
+
key = key.decode("utf-8")
|
|
516
|
+
# Parse key: feedback:{workflow}:{stage}:{tier}:{id}
|
|
517
|
+
parts = key.split(":")
|
|
518
|
+
if len(parts) >= 4:
|
|
519
|
+
stage_name = parts[2]
|
|
520
|
+
tier = parts[3]
|
|
521
|
+
stage_tier_combos.add((stage_name, tier))
|
|
522
|
+
|
|
523
|
+
# Get stats for each stage/tier combination
|
|
524
|
+
underperforming = []
|
|
525
|
+
for stage_name, tier in stage_tier_combos:
|
|
526
|
+
stats = self.get_quality_stats(workflow_name, stage_name, tier=tier)
|
|
527
|
+
if stats and stats.avg_quality < quality_threshold:
|
|
528
|
+
# Include tier in the stage name for clarity
|
|
529
|
+
stage_label = f"{stage_name}/{tier}"
|
|
530
|
+
underperforming.append((stage_label, stats))
|
|
531
|
+
|
|
532
|
+
# Sort by quality (worst first)
|
|
533
|
+
underperforming.sort(key=lambda x: x[1].avg_quality)
|
|
534
|
+
|
|
535
|
+
return underperforming
|
|
536
|
+
except Exception as e:
|
|
537
|
+
logger.error(f"Failed to get underperforming stages: {e}")
|
|
538
|
+
return []
|
|
539
|
+
|
|
540
|
+
def clear_feedback(self, workflow_name: str, stage_name: str | None = None) -> int:
|
|
541
|
+
"""Clear feedback history for a workflow or stage.
|
|
542
|
+
|
|
543
|
+
Args:
|
|
544
|
+
workflow_name: Name of workflow
|
|
545
|
+
stage_name: Optional stage name (clears all stages if None)
|
|
546
|
+
|
|
547
|
+
Returns:
|
|
548
|
+
Number of feedback entries cleared
|
|
549
|
+
"""
|
|
550
|
+
if not self.memory or not hasattr(self.memory, "_client"):
|
|
551
|
+
return 0
|
|
552
|
+
|
|
553
|
+
try:
|
|
554
|
+
if stage_name:
|
|
555
|
+
pattern = f"feedback:{workflow_name}:{stage_name}:*"
|
|
556
|
+
else:
|
|
557
|
+
pattern = f"feedback:{workflow_name}:*"
|
|
558
|
+
|
|
559
|
+
keys = self.memory._client.keys(pattern)
|
|
560
|
+
if not keys:
|
|
561
|
+
return 0
|
|
562
|
+
|
|
563
|
+
deleted = self.memory._client.delete(*keys)
|
|
564
|
+
return deleted
|
|
565
|
+
except Exception as e:
|
|
566
|
+
logger.error(f"Failed to clear feedback: {e}")
|
|
567
|
+
return 0
|