attune-ai 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- attune/__init__.py +358 -0
- attune/adaptive/__init__.py +13 -0
- attune/adaptive/task_complexity.py +127 -0
- attune/agent_monitoring.py +414 -0
- attune/cache/__init__.py +117 -0
- attune/cache/base.py +166 -0
- attune/cache/dependency_manager.py +256 -0
- attune/cache/hash_only.py +251 -0
- attune/cache/hybrid.py +457 -0
- attune/cache/storage.py +285 -0
- attune/cache_monitor.py +356 -0
- attune/cache_stats.py +298 -0
- attune/cli/__init__.py +152 -0
- attune/cli/__main__.py +12 -0
- attune/cli/commands/__init__.py +1 -0
- attune/cli/commands/batch.py +264 -0
- attune/cli/commands/cache.py +248 -0
- attune/cli/commands/help.py +331 -0
- attune/cli/commands/info.py +140 -0
- attune/cli/commands/inspect.py +436 -0
- attune/cli/commands/inspection.py +57 -0
- attune/cli/commands/memory.py +48 -0
- attune/cli/commands/metrics.py +92 -0
- attune/cli/commands/orchestrate.py +184 -0
- attune/cli/commands/patterns.py +207 -0
- attune/cli/commands/profiling.py +202 -0
- attune/cli/commands/provider.py +98 -0
- attune/cli/commands/routing.py +285 -0
- attune/cli/commands/setup.py +96 -0
- attune/cli/commands/status.py +235 -0
- attune/cli/commands/sync.py +166 -0
- attune/cli/commands/tier.py +121 -0
- attune/cli/commands/utilities.py +114 -0
- attune/cli/commands/workflow.py +579 -0
- attune/cli/core.py +32 -0
- attune/cli/parsers/__init__.py +68 -0
- attune/cli/parsers/batch.py +118 -0
- attune/cli/parsers/cache.py +65 -0
- attune/cli/parsers/help.py +41 -0
- attune/cli/parsers/info.py +26 -0
- attune/cli/parsers/inspect.py +66 -0
- attune/cli/parsers/metrics.py +42 -0
- attune/cli/parsers/orchestrate.py +61 -0
- attune/cli/parsers/patterns.py +54 -0
- attune/cli/parsers/provider.py +40 -0
- attune/cli/parsers/routing.py +110 -0
- attune/cli/parsers/setup.py +42 -0
- attune/cli/parsers/status.py +47 -0
- attune/cli/parsers/sync.py +31 -0
- attune/cli/parsers/tier.py +33 -0
- attune/cli/parsers/workflow.py +77 -0
- attune/cli/utils/__init__.py +1 -0
- attune/cli/utils/data.py +242 -0
- attune/cli/utils/helpers.py +68 -0
- attune/cli_legacy.py +3957 -0
- attune/cli_minimal.py +1159 -0
- attune/cli_router.py +437 -0
- attune/cli_unified.py +814 -0
- attune/config/__init__.py +66 -0
- attune/config/xml_config.py +286 -0
- attune/config.py +545 -0
- attune/coordination.py +870 -0
- attune/core.py +1511 -0
- attune/core_modules/__init__.py +15 -0
- attune/cost_tracker.py +626 -0
- attune/dashboard/__init__.py +41 -0
- attune/dashboard/app.py +512 -0
- attune/dashboard/simple_server.py +435 -0
- attune/dashboard/standalone_server.py +547 -0
- attune/discovery.py +306 -0
- attune/emergence.py +306 -0
- attune/exceptions.py +123 -0
- attune/feedback_loops.py +373 -0
- attune/hot_reload/README.md +473 -0
- attune/hot_reload/__init__.py +62 -0
- attune/hot_reload/config.py +83 -0
- attune/hot_reload/integration.py +229 -0
- attune/hot_reload/reloader.py +298 -0
- attune/hot_reload/watcher.py +183 -0
- attune/hot_reload/websocket.py +177 -0
- attune/levels.py +577 -0
- attune/leverage_points.py +441 -0
- attune/logging_config.py +261 -0
- attune/mcp/__init__.py +10 -0
- attune/mcp/server.py +506 -0
- attune/memory/__init__.py +237 -0
- attune/memory/claude_memory.py +469 -0
- attune/memory/config.py +224 -0
- attune/memory/control_panel.py +1290 -0
- attune/memory/control_panel_support.py +145 -0
- attune/memory/cross_session.py +845 -0
- attune/memory/edges.py +179 -0
- attune/memory/encryption.py +159 -0
- attune/memory/file_session.py +770 -0
- attune/memory/graph.py +570 -0
- attune/memory/long_term.py +913 -0
- attune/memory/long_term_types.py +99 -0
- attune/memory/mixins/__init__.py +25 -0
- attune/memory/mixins/backend_init_mixin.py +249 -0
- attune/memory/mixins/capabilities_mixin.py +208 -0
- attune/memory/mixins/handoff_mixin.py +208 -0
- attune/memory/mixins/lifecycle_mixin.py +49 -0
- attune/memory/mixins/long_term_mixin.py +352 -0
- attune/memory/mixins/promotion_mixin.py +109 -0
- attune/memory/mixins/short_term_mixin.py +182 -0
- attune/memory/nodes.py +179 -0
- attune/memory/redis_bootstrap.py +540 -0
- attune/memory/security/__init__.py +31 -0
- attune/memory/security/audit_logger.py +932 -0
- attune/memory/security/pii_scrubber.py +640 -0
- attune/memory/security/secrets_detector.py +678 -0
- attune/memory/short_term.py +2192 -0
- attune/memory/simple_storage.py +302 -0
- attune/memory/storage/__init__.py +15 -0
- attune/memory/storage_backend.py +167 -0
- attune/memory/summary_index.py +583 -0
- attune/memory/types.py +446 -0
- attune/memory/unified.py +182 -0
- attune/meta_workflows/__init__.py +74 -0
- attune/meta_workflows/agent_creator.py +248 -0
- attune/meta_workflows/builtin_templates.py +567 -0
- attune/meta_workflows/cli_commands/__init__.py +56 -0
- attune/meta_workflows/cli_commands/agent_commands.py +321 -0
- attune/meta_workflows/cli_commands/analytics_commands.py +442 -0
- attune/meta_workflows/cli_commands/config_commands.py +232 -0
- attune/meta_workflows/cli_commands/memory_commands.py +182 -0
- attune/meta_workflows/cli_commands/template_commands.py +354 -0
- attune/meta_workflows/cli_commands/workflow_commands.py +382 -0
- attune/meta_workflows/cli_meta_workflows.py +59 -0
- attune/meta_workflows/form_engine.py +292 -0
- attune/meta_workflows/intent_detector.py +409 -0
- attune/meta_workflows/models.py +569 -0
- attune/meta_workflows/pattern_learner.py +738 -0
- attune/meta_workflows/plan_generator.py +384 -0
- attune/meta_workflows/session_context.py +397 -0
- attune/meta_workflows/template_registry.py +229 -0
- attune/meta_workflows/workflow.py +984 -0
- attune/metrics/__init__.py +12 -0
- attune/metrics/collector.py +31 -0
- attune/metrics/prompt_metrics.py +194 -0
- attune/models/__init__.py +172 -0
- attune/models/__main__.py +13 -0
- attune/models/adaptive_routing.py +437 -0
- attune/models/auth_cli.py +444 -0
- attune/models/auth_strategy.py +450 -0
- attune/models/cli.py +655 -0
- attune/models/empathy_executor.py +354 -0
- attune/models/executor.py +257 -0
- attune/models/fallback.py +762 -0
- attune/models/provider_config.py +282 -0
- attune/models/registry.py +472 -0
- attune/models/tasks.py +359 -0
- attune/models/telemetry/__init__.py +71 -0
- attune/models/telemetry/analytics.py +594 -0
- attune/models/telemetry/backend.py +196 -0
- attune/models/telemetry/data_models.py +431 -0
- attune/models/telemetry/storage.py +489 -0
- attune/models/token_estimator.py +420 -0
- attune/models/validation.py +280 -0
- attune/monitoring/__init__.py +52 -0
- attune/monitoring/alerts.py +946 -0
- attune/monitoring/alerts_cli.py +448 -0
- attune/monitoring/multi_backend.py +271 -0
- attune/monitoring/otel_backend.py +362 -0
- attune/optimization/__init__.py +19 -0
- attune/optimization/context_optimizer.py +272 -0
- attune/orchestration/__init__.py +67 -0
- attune/orchestration/agent_templates.py +707 -0
- attune/orchestration/config_store.py +499 -0
- attune/orchestration/execution_strategies.py +2111 -0
- attune/orchestration/meta_orchestrator.py +1168 -0
- attune/orchestration/pattern_learner.py +696 -0
- attune/orchestration/real_tools.py +931 -0
- attune/pattern_cache.py +187 -0
- attune/pattern_library.py +542 -0
- attune/patterns/debugging/all_patterns.json +81 -0
- attune/patterns/debugging/workflow_20260107_1770825e.json +77 -0
- attune/patterns/refactoring_memory.json +89 -0
- attune/persistence.py +564 -0
- attune/platform_utils.py +265 -0
- attune/plugins/__init__.py +28 -0
- attune/plugins/base.py +361 -0
- attune/plugins/registry.py +268 -0
- attune/project_index/__init__.py +32 -0
- attune/project_index/cli.py +335 -0
- attune/project_index/index.py +667 -0
- attune/project_index/models.py +504 -0
- attune/project_index/reports.py +474 -0
- attune/project_index/scanner.py +777 -0
- attune/project_index/scanner_parallel.py +291 -0
- attune/prompts/__init__.py +61 -0
- attune/prompts/config.py +77 -0
- attune/prompts/context.py +177 -0
- attune/prompts/parser.py +285 -0
- attune/prompts/registry.py +313 -0
- attune/prompts/templates.py +208 -0
- attune/redis_config.py +302 -0
- attune/redis_memory.py +799 -0
- attune/resilience/__init__.py +56 -0
- attune/resilience/circuit_breaker.py +256 -0
- attune/resilience/fallback.py +179 -0
- attune/resilience/health.py +300 -0
- attune/resilience/retry.py +209 -0
- attune/resilience/timeout.py +135 -0
- attune/routing/__init__.py +43 -0
- attune/routing/chain_executor.py +433 -0
- attune/routing/classifier.py +217 -0
- attune/routing/smart_router.py +234 -0
- attune/routing/workflow_registry.py +343 -0
- attune/scaffolding/README.md +589 -0
- attune/scaffolding/__init__.py +35 -0
- attune/scaffolding/__main__.py +14 -0
- attune/scaffolding/cli.py +240 -0
- attune/scaffolding/templates/base_wizard.py.jinja2 +121 -0
- attune/scaffolding/templates/coach_wizard.py.jinja2 +321 -0
- attune/scaffolding/templates/domain_wizard.py.jinja2 +408 -0
- attune/scaffolding/templates/linear_flow_wizard.py.jinja2 +203 -0
- attune/socratic/__init__.py +256 -0
- attune/socratic/ab_testing.py +958 -0
- attune/socratic/blueprint.py +533 -0
- attune/socratic/cli.py +703 -0
- attune/socratic/collaboration.py +1114 -0
- attune/socratic/domain_templates.py +924 -0
- attune/socratic/embeddings.py +738 -0
- attune/socratic/engine.py +794 -0
- attune/socratic/explainer.py +682 -0
- attune/socratic/feedback.py +772 -0
- attune/socratic/forms.py +629 -0
- attune/socratic/generator.py +732 -0
- attune/socratic/llm_analyzer.py +637 -0
- attune/socratic/mcp_server.py +702 -0
- attune/socratic/session.py +312 -0
- attune/socratic/storage.py +667 -0
- attune/socratic/success.py +730 -0
- attune/socratic/visual_editor.py +860 -0
- attune/socratic/web_ui.py +958 -0
- attune/telemetry/__init__.py +39 -0
- attune/telemetry/agent_coordination.py +475 -0
- attune/telemetry/agent_tracking.py +367 -0
- attune/telemetry/approval_gates.py +545 -0
- attune/telemetry/cli.py +1231 -0
- attune/telemetry/commands/__init__.py +14 -0
- attune/telemetry/commands/dashboard_commands.py +696 -0
- attune/telemetry/event_streaming.py +409 -0
- attune/telemetry/feedback_loop.py +567 -0
- attune/telemetry/usage_tracker.py +591 -0
- attune/templates.py +754 -0
- attune/test_generator/__init__.py +38 -0
- attune/test_generator/__main__.py +14 -0
- attune/test_generator/cli.py +234 -0
- attune/test_generator/generator.py +355 -0
- attune/test_generator/risk_analyzer.py +216 -0
- attune/test_generator/templates/unit_test.py.jinja2 +272 -0
- attune/tier_recommender.py +384 -0
- attune/tools.py +183 -0
- attune/trust/__init__.py +28 -0
- attune/trust/circuit_breaker.py +579 -0
- attune/trust_building.py +527 -0
- attune/validation/__init__.py +19 -0
- attune/validation/xml_validator.py +281 -0
- attune/vscode_bridge.py +173 -0
- attune/workflow_commands.py +780 -0
- attune/workflow_patterns/__init__.py +33 -0
- attune/workflow_patterns/behavior.py +249 -0
- attune/workflow_patterns/core.py +76 -0
- attune/workflow_patterns/output.py +99 -0
- attune/workflow_patterns/registry.py +255 -0
- attune/workflow_patterns/structural.py +288 -0
- attune/workflows/__init__.py +539 -0
- attune/workflows/autonomous_test_gen.py +1268 -0
- attune/workflows/base.py +2667 -0
- attune/workflows/batch_processing.py +342 -0
- attune/workflows/bug_predict.py +1084 -0
- attune/workflows/builder.py +273 -0
- attune/workflows/caching.py +253 -0
- attune/workflows/code_review.py +1048 -0
- attune/workflows/code_review_adapters.py +312 -0
- attune/workflows/code_review_pipeline.py +722 -0
- attune/workflows/config.py +645 -0
- attune/workflows/dependency_check.py +644 -0
- attune/workflows/document_gen/__init__.py +25 -0
- attune/workflows/document_gen/config.py +30 -0
- attune/workflows/document_gen/report_formatter.py +162 -0
- attune/workflows/document_gen/workflow.py +1426 -0
- attune/workflows/document_manager.py +216 -0
- attune/workflows/document_manager_README.md +134 -0
- attune/workflows/documentation_orchestrator.py +1205 -0
- attune/workflows/history.py +510 -0
- attune/workflows/keyboard_shortcuts/__init__.py +39 -0
- attune/workflows/keyboard_shortcuts/generators.py +391 -0
- attune/workflows/keyboard_shortcuts/parsers.py +416 -0
- attune/workflows/keyboard_shortcuts/prompts.py +295 -0
- attune/workflows/keyboard_shortcuts/schema.py +193 -0
- attune/workflows/keyboard_shortcuts/workflow.py +509 -0
- attune/workflows/llm_base.py +363 -0
- attune/workflows/manage_docs.py +87 -0
- attune/workflows/manage_docs_README.md +134 -0
- attune/workflows/manage_documentation.py +821 -0
- attune/workflows/new_sample_workflow1.py +149 -0
- attune/workflows/new_sample_workflow1_README.md +150 -0
- attune/workflows/orchestrated_health_check.py +849 -0
- attune/workflows/orchestrated_release_prep.py +600 -0
- attune/workflows/output.py +413 -0
- attune/workflows/perf_audit.py +863 -0
- attune/workflows/pr_review.py +762 -0
- attune/workflows/progress.py +785 -0
- attune/workflows/progress_server.py +322 -0
- attune/workflows/progressive/README 2.md +454 -0
- attune/workflows/progressive/README.md +454 -0
- attune/workflows/progressive/__init__.py +82 -0
- attune/workflows/progressive/cli.py +219 -0
- attune/workflows/progressive/core.py +488 -0
- attune/workflows/progressive/orchestrator.py +723 -0
- attune/workflows/progressive/reports.py +520 -0
- attune/workflows/progressive/telemetry.py +274 -0
- attune/workflows/progressive/test_gen.py +495 -0
- attune/workflows/progressive/workflow.py +589 -0
- attune/workflows/refactor_plan.py +694 -0
- attune/workflows/release_prep.py +895 -0
- attune/workflows/release_prep_crew.py +969 -0
- attune/workflows/research_synthesis.py +404 -0
- attune/workflows/routing.py +168 -0
- attune/workflows/secure_release.py +593 -0
- attune/workflows/security_adapters.py +297 -0
- attune/workflows/security_audit.py +1329 -0
- attune/workflows/security_audit_phase3.py +355 -0
- attune/workflows/seo_optimization.py +633 -0
- attune/workflows/step_config.py +234 -0
- attune/workflows/telemetry_mixin.py +269 -0
- attune/workflows/test5.py +125 -0
- attune/workflows/test5_README.md +158 -0
- attune/workflows/test_coverage_boost_crew.py +849 -0
- attune/workflows/test_gen/__init__.py +52 -0
- attune/workflows/test_gen/ast_analyzer.py +249 -0
- attune/workflows/test_gen/config.py +88 -0
- attune/workflows/test_gen/data_models.py +38 -0
- attune/workflows/test_gen/report_formatter.py +289 -0
- attune/workflows/test_gen/test_templates.py +381 -0
- attune/workflows/test_gen/workflow.py +655 -0
- attune/workflows/test_gen.py +54 -0
- attune/workflows/test_gen_behavioral.py +477 -0
- attune/workflows/test_gen_parallel.py +341 -0
- attune/workflows/test_lifecycle.py +526 -0
- attune/workflows/test_maintenance.py +627 -0
- attune/workflows/test_maintenance_cli.py +590 -0
- attune/workflows/test_maintenance_crew.py +840 -0
- attune/workflows/test_runner.py +622 -0
- attune/workflows/tier_tracking.py +531 -0
- attune/workflows/xml_enhanced_crew.py +285 -0
- attune_ai-2.0.0.dist-info/METADATA +1026 -0
- attune_ai-2.0.0.dist-info/RECORD +457 -0
- attune_ai-2.0.0.dist-info/WHEEL +5 -0
- attune_ai-2.0.0.dist-info/entry_points.txt +26 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE +201 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +101 -0
- attune_ai-2.0.0.dist-info/top_level.txt +5 -0
- attune_healthcare/__init__.py +13 -0
- attune_healthcare/monitors/__init__.py +9 -0
- attune_healthcare/monitors/clinical_protocol_monitor.py +315 -0
- attune_healthcare/monitors/monitoring/__init__.py +44 -0
- attune_healthcare/monitors/monitoring/protocol_checker.py +300 -0
- attune_healthcare/monitors/monitoring/protocol_loader.py +214 -0
- attune_healthcare/monitors/monitoring/sensor_parsers.py +306 -0
- attune_healthcare/monitors/monitoring/trajectory_analyzer.py +389 -0
- attune_llm/README.md +553 -0
- attune_llm/__init__.py +28 -0
- attune_llm/agent_factory/__init__.py +53 -0
- attune_llm/agent_factory/adapters/__init__.py +85 -0
- attune_llm/agent_factory/adapters/autogen_adapter.py +312 -0
- attune_llm/agent_factory/adapters/crewai_adapter.py +483 -0
- attune_llm/agent_factory/adapters/haystack_adapter.py +298 -0
- attune_llm/agent_factory/adapters/langchain_adapter.py +362 -0
- attune_llm/agent_factory/adapters/langgraph_adapter.py +333 -0
- attune_llm/agent_factory/adapters/native.py +228 -0
- attune_llm/agent_factory/adapters/wizard_adapter.py +423 -0
- attune_llm/agent_factory/base.py +305 -0
- attune_llm/agent_factory/crews/__init__.py +67 -0
- attune_llm/agent_factory/crews/code_review.py +1113 -0
- attune_llm/agent_factory/crews/health_check.py +1262 -0
- attune_llm/agent_factory/crews/refactoring.py +1128 -0
- attune_llm/agent_factory/crews/security_audit.py +1018 -0
- attune_llm/agent_factory/decorators.py +287 -0
- attune_llm/agent_factory/factory.py +558 -0
- attune_llm/agent_factory/framework.py +193 -0
- attune_llm/agent_factory/memory_integration.py +328 -0
- attune_llm/agent_factory/resilient.py +320 -0
- attune_llm/agents_md/__init__.py +22 -0
- attune_llm/agents_md/loader.py +218 -0
- attune_llm/agents_md/parser.py +271 -0
- attune_llm/agents_md/registry.py +307 -0
- attune_llm/claude_memory.py +466 -0
- attune_llm/cli/__init__.py +8 -0
- attune_llm/cli/sync_claude.py +487 -0
- attune_llm/code_health.py +1313 -0
- attune_llm/commands/__init__.py +51 -0
- attune_llm/commands/context.py +375 -0
- attune_llm/commands/loader.py +301 -0
- attune_llm/commands/models.py +231 -0
- attune_llm/commands/parser.py +371 -0
- attune_llm/commands/registry.py +429 -0
- attune_llm/config/__init__.py +29 -0
- attune_llm/config/unified.py +291 -0
- attune_llm/context/__init__.py +22 -0
- attune_llm/context/compaction.py +455 -0
- attune_llm/context/manager.py +434 -0
- attune_llm/contextual_patterns.py +361 -0
- attune_llm/core.py +907 -0
- attune_llm/git_pattern_extractor.py +435 -0
- attune_llm/hooks/__init__.py +24 -0
- attune_llm/hooks/config.py +306 -0
- attune_llm/hooks/executor.py +289 -0
- attune_llm/hooks/registry.py +302 -0
- attune_llm/hooks/scripts/__init__.py +39 -0
- attune_llm/hooks/scripts/evaluate_session.py +201 -0
- attune_llm/hooks/scripts/first_time_init.py +285 -0
- attune_llm/hooks/scripts/pre_compact.py +207 -0
- attune_llm/hooks/scripts/session_end.py +183 -0
- attune_llm/hooks/scripts/session_start.py +163 -0
- attune_llm/hooks/scripts/suggest_compact.py +225 -0
- attune_llm/learning/__init__.py +30 -0
- attune_llm/learning/evaluator.py +438 -0
- attune_llm/learning/extractor.py +514 -0
- attune_llm/learning/storage.py +560 -0
- attune_llm/levels.py +227 -0
- attune_llm/pattern_confidence.py +414 -0
- attune_llm/pattern_resolver.py +272 -0
- attune_llm/pattern_summary.py +350 -0
- attune_llm/providers.py +967 -0
- attune_llm/routing/__init__.py +32 -0
- attune_llm/routing/model_router.py +362 -0
- attune_llm/security/IMPLEMENTATION_SUMMARY.md +413 -0
- attune_llm/security/PHASE2_COMPLETE.md +384 -0
- attune_llm/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
- attune_llm/security/QUICK_REFERENCE.md +316 -0
- attune_llm/security/README.md +262 -0
- attune_llm/security/__init__.py +62 -0
- attune_llm/security/audit_logger.py +929 -0
- attune_llm/security/audit_logger_example.py +152 -0
- attune_llm/security/pii_scrubber.py +640 -0
- attune_llm/security/secrets_detector.py +678 -0
- attune_llm/security/secrets_detector_example.py +304 -0
- attune_llm/security/secure_memdocs.py +1192 -0
- attune_llm/security/secure_memdocs_example.py +278 -0
- attune_llm/session_status.py +745 -0
- attune_llm/state.py +246 -0
- attune_llm/utils/__init__.py +5 -0
- attune_llm/utils/tokens.py +349 -0
- attune_software/SOFTWARE_PLUGIN_README.md +57 -0
- attune_software/__init__.py +13 -0
- attune_software/cli/__init__.py +120 -0
- attune_software/cli/inspect.py +362 -0
- attune_software/cli.py +574 -0
- attune_software/plugin.py +188 -0
- workflow_scaffolding/__init__.py +11 -0
- workflow_scaffolding/__main__.py +12 -0
- workflow_scaffolding/cli.py +206 -0
- workflow_scaffolding/generator.py +265 -0
|
@@ -0,0 +1,958 @@
|
|
|
1
|
+
"""A/B Testing for Workflow Optimization
|
|
2
|
+
|
|
3
|
+
Enables controlled experiments to compare different workflow configurations
|
|
4
|
+
and determine which performs better for specific goals or domains.
|
|
5
|
+
|
|
6
|
+
Key Features:
|
|
7
|
+
- Experiment definition with control and variant groups
|
|
8
|
+
- Statistical significance testing
|
|
9
|
+
- Automatic traffic allocation
|
|
10
|
+
- Multi-armed bandit for adaptive optimization
|
|
11
|
+
- Integration with feedback loop
|
|
12
|
+
|
|
13
|
+
Copyright 2026 Smart-AI-Memory
|
|
14
|
+
Licensed under Fair Source License 0.9
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
from __future__ import annotations
|
|
18
|
+
|
|
19
|
+
import hashlib
|
|
20
|
+
import json
|
|
21
|
+
import logging
|
|
22
|
+
import math
|
|
23
|
+
import random # Security Note: For A/B test simulation data, not cryptographic use
|
|
24
|
+
import time
|
|
25
|
+
from dataclasses import dataclass, field
|
|
26
|
+
from datetime import datetime
|
|
27
|
+
from enum import Enum
|
|
28
|
+
from pathlib import Path
|
|
29
|
+
from typing import Any
|
|
30
|
+
|
|
31
|
+
logger = logging.getLogger(__name__)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
# =============================================================================
|
|
35
|
+
# DATA STRUCTURES
|
|
36
|
+
# =============================================================================
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class ExperimentStatus(Enum):
|
|
40
|
+
"""Status of an A/B experiment."""
|
|
41
|
+
|
|
42
|
+
DRAFT = "draft"
|
|
43
|
+
RUNNING = "running"
|
|
44
|
+
PAUSED = "paused"
|
|
45
|
+
COMPLETED = "completed"
|
|
46
|
+
STOPPED = "stopped"
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class AllocationStrategy(Enum):
|
|
50
|
+
"""Strategy for allocating traffic to variants."""
|
|
51
|
+
|
|
52
|
+
FIXED = "fixed" # Fixed percentage split
|
|
53
|
+
EPSILON_GREEDY = "epsilon_greedy" # Explore vs exploit
|
|
54
|
+
THOMPSON_SAMPLING = "thompson_sampling" # Bayesian bandits
|
|
55
|
+
UCB = "ucb" # Upper confidence bound
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
@dataclass
|
|
59
|
+
class Variant:
|
|
60
|
+
"""A variant in an A/B experiment."""
|
|
61
|
+
|
|
62
|
+
variant_id: str
|
|
63
|
+
name: str
|
|
64
|
+
description: str
|
|
65
|
+
config: dict[str, Any]
|
|
66
|
+
is_control: bool = False
|
|
67
|
+
traffic_percentage: float = 50.0
|
|
68
|
+
|
|
69
|
+
# Statistics
|
|
70
|
+
impressions: int = 0
|
|
71
|
+
conversions: int = 0
|
|
72
|
+
total_success_score: float = 0.0
|
|
73
|
+
|
|
74
|
+
@property
|
|
75
|
+
def conversion_rate(self) -> float:
|
|
76
|
+
"""Calculate conversion rate."""
|
|
77
|
+
if self.impressions == 0:
|
|
78
|
+
return 0.0
|
|
79
|
+
return self.conversions / self.impressions
|
|
80
|
+
|
|
81
|
+
@property
|
|
82
|
+
def avg_success_score(self) -> float:
|
|
83
|
+
"""Calculate average success score."""
|
|
84
|
+
if self.impressions == 0:
|
|
85
|
+
return 0.0
|
|
86
|
+
return self.total_success_score / self.impressions
|
|
87
|
+
|
|
88
|
+
def to_dict(self) -> dict[str, Any]:
|
|
89
|
+
"""Convert to dictionary."""
|
|
90
|
+
return {
|
|
91
|
+
"variant_id": self.variant_id,
|
|
92
|
+
"name": self.name,
|
|
93
|
+
"description": self.description,
|
|
94
|
+
"config": self.config,
|
|
95
|
+
"is_control": self.is_control,
|
|
96
|
+
"traffic_percentage": self.traffic_percentage,
|
|
97
|
+
"impressions": self.impressions,
|
|
98
|
+
"conversions": self.conversions,
|
|
99
|
+
"total_success_score": self.total_success_score,
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
@classmethod
|
|
103
|
+
def from_dict(cls, data: dict[str, Any]) -> Variant:
|
|
104
|
+
"""Create from dictionary."""
|
|
105
|
+
return cls(
|
|
106
|
+
variant_id=data["variant_id"],
|
|
107
|
+
name=data["name"],
|
|
108
|
+
description=data["description"],
|
|
109
|
+
config=data["config"],
|
|
110
|
+
is_control=data.get("is_control", False),
|
|
111
|
+
traffic_percentage=data.get("traffic_percentage", 50.0),
|
|
112
|
+
impressions=data.get("impressions", 0),
|
|
113
|
+
conversions=data.get("conversions", 0),
|
|
114
|
+
total_success_score=data.get("total_success_score", 0.0),
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
@dataclass
|
|
119
|
+
class Experiment:
|
|
120
|
+
"""An A/B experiment definition."""
|
|
121
|
+
|
|
122
|
+
experiment_id: str
|
|
123
|
+
name: str
|
|
124
|
+
description: str
|
|
125
|
+
hypothesis: str
|
|
126
|
+
variants: list[Variant]
|
|
127
|
+
domain_filter: str | None = None
|
|
128
|
+
goal_filter: str | None = None
|
|
129
|
+
allocation_strategy: AllocationStrategy = AllocationStrategy.FIXED
|
|
130
|
+
min_sample_size: int = 100
|
|
131
|
+
max_duration_days: int = 30
|
|
132
|
+
confidence_level: float = 0.95
|
|
133
|
+
status: ExperimentStatus = ExperimentStatus.DRAFT
|
|
134
|
+
created_at: datetime = field(default_factory=datetime.now)
|
|
135
|
+
started_at: datetime | None = None
|
|
136
|
+
ended_at: datetime | None = None
|
|
137
|
+
|
|
138
|
+
def to_dict(self) -> dict[str, Any]:
|
|
139
|
+
"""Convert to dictionary."""
|
|
140
|
+
return {
|
|
141
|
+
"experiment_id": self.experiment_id,
|
|
142
|
+
"name": self.name,
|
|
143
|
+
"description": self.description,
|
|
144
|
+
"hypothesis": self.hypothesis,
|
|
145
|
+
"variants": [v.to_dict() for v in self.variants],
|
|
146
|
+
"domain_filter": self.domain_filter,
|
|
147
|
+
"goal_filter": self.goal_filter,
|
|
148
|
+
"allocation_strategy": self.allocation_strategy.value,
|
|
149
|
+
"min_sample_size": self.min_sample_size,
|
|
150
|
+
"max_duration_days": self.max_duration_days,
|
|
151
|
+
"confidence_level": self.confidence_level,
|
|
152
|
+
"status": self.status.value,
|
|
153
|
+
"created_at": self.created_at.isoformat(),
|
|
154
|
+
"started_at": self.started_at.isoformat() if self.started_at else None,
|
|
155
|
+
"ended_at": self.ended_at.isoformat() if self.ended_at else None,
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
@classmethod
|
|
159
|
+
def from_dict(cls, data: dict[str, Any]) -> Experiment:
|
|
160
|
+
"""Create from dictionary."""
|
|
161
|
+
return cls(
|
|
162
|
+
experiment_id=data["experiment_id"],
|
|
163
|
+
name=data["name"],
|
|
164
|
+
description=data["description"],
|
|
165
|
+
hypothesis=data["hypothesis"],
|
|
166
|
+
variants=[Variant.from_dict(v) for v in data["variants"]],
|
|
167
|
+
domain_filter=data.get("domain_filter"),
|
|
168
|
+
goal_filter=data.get("goal_filter"),
|
|
169
|
+
allocation_strategy=AllocationStrategy(data.get("allocation_strategy", "fixed")),
|
|
170
|
+
min_sample_size=data.get("min_sample_size", 100),
|
|
171
|
+
max_duration_days=data.get("max_duration_days", 30),
|
|
172
|
+
confidence_level=data.get("confidence_level", 0.95),
|
|
173
|
+
status=ExperimentStatus(data.get("status", "draft")),
|
|
174
|
+
created_at=datetime.fromisoformat(data["created_at"]),
|
|
175
|
+
started_at=(
|
|
176
|
+
datetime.fromisoformat(data["started_at"]) if data.get("started_at") else None
|
|
177
|
+
),
|
|
178
|
+
ended_at=(datetime.fromisoformat(data["ended_at"]) if data.get("ended_at") else None),
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
@property
|
|
182
|
+
def total_impressions(self) -> int:
|
|
183
|
+
"""Total impressions across all variants."""
|
|
184
|
+
return sum(v.impressions for v in self.variants)
|
|
185
|
+
|
|
186
|
+
@property
|
|
187
|
+
def control(self) -> Variant | None:
|
|
188
|
+
"""Get control variant."""
|
|
189
|
+
for v in self.variants:
|
|
190
|
+
if v.is_control:
|
|
191
|
+
return v
|
|
192
|
+
return None
|
|
193
|
+
|
|
194
|
+
@property
|
|
195
|
+
def treatments(self) -> list[Variant]:
|
|
196
|
+
"""Get treatment variants (non-control)."""
|
|
197
|
+
return [v for v in self.variants if not v.is_control]
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
@dataclass
|
|
201
|
+
class ExperimentResult:
|
|
202
|
+
"""Results and analysis of an experiment."""
|
|
203
|
+
|
|
204
|
+
experiment: Experiment
|
|
205
|
+
winner: Variant | None
|
|
206
|
+
is_significant: bool
|
|
207
|
+
p_value: float
|
|
208
|
+
confidence_interval: tuple[float, float]
|
|
209
|
+
lift: float # Percentage improvement over control
|
|
210
|
+
recommendation: str
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
# =============================================================================
|
|
214
|
+
# STATISTICAL ANALYSIS
|
|
215
|
+
# =============================================================================
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
class StatisticalAnalyzer:
|
|
219
|
+
"""Statistical analysis for A/B tests."""
|
|
220
|
+
|
|
221
|
+
@staticmethod
|
|
222
|
+
def z_test_proportions(
|
|
223
|
+
n1: int,
|
|
224
|
+
c1: int,
|
|
225
|
+
n2: int,
|
|
226
|
+
c2: int,
|
|
227
|
+
) -> tuple[float, float]:
|
|
228
|
+
"""Two-proportion z-test.
|
|
229
|
+
|
|
230
|
+
Args:
|
|
231
|
+
n1: Sample size for group 1
|
|
232
|
+
c1: Conversions for group 1
|
|
233
|
+
n2: Sample size for group 2
|
|
234
|
+
c2: Conversions for group 2
|
|
235
|
+
|
|
236
|
+
Returns:
|
|
237
|
+
(z_score, p_value)
|
|
238
|
+
"""
|
|
239
|
+
if n1 == 0 or n2 == 0:
|
|
240
|
+
return 0.0, 1.0
|
|
241
|
+
|
|
242
|
+
p1 = c1 / n1
|
|
243
|
+
p2 = c2 / n2
|
|
244
|
+
p_pooled = (c1 + c2) / (n1 + n2)
|
|
245
|
+
|
|
246
|
+
if p_pooled == 0 or p_pooled == 1:
|
|
247
|
+
return 0.0, 1.0
|
|
248
|
+
|
|
249
|
+
se = math.sqrt(p_pooled * (1 - p_pooled) * (1 / n1 + 1 / n2))
|
|
250
|
+
if se == 0:
|
|
251
|
+
return 0.0, 1.0
|
|
252
|
+
|
|
253
|
+
z = (p1 - p2) / se
|
|
254
|
+
|
|
255
|
+
# Approximate p-value using normal CDF
|
|
256
|
+
p_value = 2 * (1 - StatisticalAnalyzer._normal_cdf(abs(z)))
|
|
257
|
+
|
|
258
|
+
return z, p_value
|
|
259
|
+
|
|
260
|
+
@staticmethod
|
|
261
|
+
def t_test_means(
|
|
262
|
+
n1: int,
|
|
263
|
+
mean1: float,
|
|
264
|
+
var1: float,
|
|
265
|
+
n2: int,
|
|
266
|
+
mean2: float,
|
|
267
|
+
var2: float,
|
|
268
|
+
) -> tuple[float, float]:
|
|
269
|
+
"""Welch's t-test for means.
|
|
270
|
+
|
|
271
|
+
Args:
|
|
272
|
+
n1, mean1, var1: Stats for group 1
|
|
273
|
+
n2, mean2, var2: Stats for group 2
|
|
274
|
+
|
|
275
|
+
Returns:
|
|
276
|
+
(t_score, p_value)
|
|
277
|
+
"""
|
|
278
|
+
if n1 < 2 or n2 < 2:
|
|
279
|
+
return 0.0, 1.0
|
|
280
|
+
|
|
281
|
+
se = math.sqrt(var1 / n1 + var2 / n2)
|
|
282
|
+
if se == 0:
|
|
283
|
+
return 0.0, 1.0
|
|
284
|
+
|
|
285
|
+
t = (mean1 - mean2) / se
|
|
286
|
+
|
|
287
|
+
# Welch-Satterthwaite degrees of freedom
|
|
288
|
+
num = (var1 / n1 + var2 / n2) ** 2
|
|
289
|
+
denom = (var1 / n1) ** 2 / (n1 - 1) + (var2 / n2) ** 2 / (n2 - 1)
|
|
290
|
+
df = num / denom if denom > 0 else 1
|
|
291
|
+
|
|
292
|
+
# Approximate p-value using t-distribution
|
|
293
|
+
p_value = 2 * StatisticalAnalyzer._t_cdf(-abs(t), df)
|
|
294
|
+
|
|
295
|
+
return t, p_value
|
|
296
|
+
|
|
297
|
+
@staticmethod
|
|
298
|
+
def confidence_interval(
|
|
299
|
+
n: int,
|
|
300
|
+
successes: int,
|
|
301
|
+
confidence: float = 0.95,
|
|
302
|
+
) -> tuple[float, float]:
|
|
303
|
+
"""Wilson score interval for proportions.
|
|
304
|
+
|
|
305
|
+
Args:
|
|
306
|
+
n: Sample size
|
|
307
|
+
successes: Number of successes
|
|
308
|
+
confidence: Confidence level
|
|
309
|
+
|
|
310
|
+
Returns:
|
|
311
|
+
(lower, upper) bounds
|
|
312
|
+
"""
|
|
313
|
+
if n == 0:
|
|
314
|
+
return 0.0, 1.0
|
|
315
|
+
|
|
316
|
+
z = StatisticalAnalyzer._z_score(confidence)
|
|
317
|
+
p = successes / n
|
|
318
|
+
|
|
319
|
+
denominator = 1 + z * z / n
|
|
320
|
+
centre = p + z * z / (2 * n)
|
|
321
|
+
adjustment = z * math.sqrt((p * (1 - p) + z * z / (4 * n)) / n)
|
|
322
|
+
|
|
323
|
+
lower = max(0, (centre - adjustment) / denominator)
|
|
324
|
+
upper = min(1, (centre + adjustment) / denominator)
|
|
325
|
+
|
|
326
|
+
return lower, upper
|
|
327
|
+
|
|
328
|
+
@staticmethod
|
|
329
|
+
def _normal_cdf(x: float) -> float:
|
|
330
|
+
"""Approximate standard normal CDF."""
|
|
331
|
+
return 0.5 * (1 + math.erf(x / math.sqrt(2)))
|
|
332
|
+
|
|
333
|
+
@staticmethod
|
|
334
|
+
def _t_cdf(t: float, df: float) -> float:
|
|
335
|
+
"""Approximate t-distribution CDF."""
|
|
336
|
+
# Use normal approximation for large df
|
|
337
|
+
if df > 30:
|
|
338
|
+
return StatisticalAnalyzer._normal_cdf(t)
|
|
339
|
+
|
|
340
|
+
# Beta function approximation
|
|
341
|
+
x = df / (df + t * t)
|
|
342
|
+
return 0.5 * StatisticalAnalyzer._incomplete_beta(df / 2, 0.5, x)
|
|
343
|
+
|
|
344
|
+
@staticmethod
|
|
345
|
+
def _incomplete_beta(a: float, b: float, x: float) -> float:
|
|
346
|
+
"""Approximate incomplete beta function."""
|
|
347
|
+
if x == 0:
|
|
348
|
+
return 0
|
|
349
|
+
if x == 1:
|
|
350
|
+
return 1
|
|
351
|
+
|
|
352
|
+
# Continued fraction approximation (simplified)
|
|
353
|
+
result = 0.0
|
|
354
|
+
for k in range(100):
|
|
355
|
+
term = (x**k) * math.gamma(a + k) / (math.gamma(k + 1) * math.gamma(a))
|
|
356
|
+
result += term * ((1 - x) ** b) / (a + k)
|
|
357
|
+
if abs(term) < 1e-10:
|
|
358
|
+
break
|
|
359
|
+
|
|
360
|
+
return result * math.gamma(a + b) / (math.gamma(a) * math.gamma(b))
|
|
361
|
+
|
|
362
|
+
@staticmethod
|
|
363
|
+
def _z_score(confidence: float) -> float:
|
|
364
|
+
"""Get z-score for confidence level."""
|
|
365
|
+
# Common values
|
|
366
|
+
z_scores = {
|
|
367
|
+
0.90: 1.645,
|
|
368
|
+
0.95: 1.96,
|
|
369
|
+
0.99: 2.576,
|
|
370
|
+
}
|
|
371
|
+
return z_scores.get(confidence, 1.96)
|
|
372
|
+
|
|
373
|
+
|
|
374
|
+
# =============================================================================
|
|
375
|
+
# TRAFFIC ALLOCATOR
|
|
376
|
+
# =============================================================================
|
|
377
|
+
|
|
378
|
+
|
|
379
|
+
class TrafficAllocator:
|
|
380
|
+
"""Allocates traffic to experiment variants."""
|
|
381
|
+
|
|
382
|
+
def __init__(self, experiment: Experiment):
|
|
383
|
+
"""Initialize allocator.
|
|
384
|
+
|
|
385
|
+
Args:
|
|
386
|
+
experiment: The experiment to allocate for
|
|
387
|
+
"""
|
|
388
|
+
self.experiment = experiment
|
|
389
|
+
self._random = random.Random()
|
|
390
|
+
|
|
391
|
+
def allocate(self, user_id: str) -> Variant:
|
|
392
|
+
"""Allocate a user to a variant.
|
|
393
|
+
|
|
394
|
+
Args:
|
|
395
|
+
user_id: Unique user/session identifier
|
|
396
|
+
|
|
397
|
+
Returns:
|
|
398
|
+
Allocated variant
|
|
399
|
+
"""
|
|
400
|
+
strategy = self.experiment.allocation_strategy
|
|
401
|
+
|
|
402
|
+
if strategy == AllocationStrategy.FIXED:
|
|
403
|
+
return self._fixed_allocation(user_id)
|
|
404
|
+
elif strategy == AllocationStrategy.EPSILON_GREEDY:
|
|
405
|
+
return self._epsilon_greedy(epsilon=0.1)
|
|
406
|
+
elif strategy == AllocationStrategy.THOMPSON_SAMPLING:
|
|
407
|
+
return self._thompson_sampling()
|
|
408
|
+
elif strategy == AllocationStrategy.UCB:
|
|
409
|
+
return self._ucb_allocation()
|
|
410
|
+
else:
|
|
411
|
+
return self._fixed_allocation(user_id)
|
|
412
|
+
|
|
413
|
+
def _fixed_allocation(self, user_id: str) -> Variant:
|
|
414
|
+
"""Deterministic allocation based on user ID hash."""
|
|
415
|
+
# Hash user ID for consistent assignment (not for security)
|
|
416
|
+
hash_val = int(
|
|
417
|
+
hashlib.md5(
|
|
418
|
+
f"{self.experiment.experiment_id}:{user_id}".encode(), usedforsecurity=False
|
|
419
|
+
).hexdigest(),
|
|
420
|
+
16,
|
|
421
|
+
)
|
|
422
|
+
bucket = hash_val % 100
|
|
423
|
+
|
|
424
|
+
cumulative = 0.0
|
|
425
|
+
for variant in self.experiment.variants:
|
|
426
|
+
cumulative += variant.traffic_percentage
|
|
427
|
+
if bucket < cumulative:
|
|
428
|
+
return variant
|
|
429
|
+
|
|
430
|
+
return self.experiment.variants[-1]
|
|
431
|
+
|
|
432
|
+
def _epsilon_greedy(self, epsilon: float = 0.1) -> Variant:
|
|
433
|
+
"""Epsilon-greedy: explore with probability epsilon."""
|
|
434
|
+
if self._random.random() < epsilon:
|
|
435
|
+
# Explore: random variant
|
|
436
|
+
return self._random.choice(self.experiment.variants)
|
|
437
|
+
else:
|
|
438
|
+
# Exploit: best performing variant
|
|
439
|
+
return max(
|
|
440
|
+
self.experiment.variants,
|
|
441
|
+
key=lambda v: v.avg_success_score,
|
|
442
|
+
)
|
|
443
|
+
|
|
444
|
+
def _thompson_sampling(self) -> Variant:
|
|
445
|
+
"""Thompson sampling: Bayesian multi-armed bandit."""
|
|
446
|
+
samples = []
|
|
447
|
+
|
|
448
|
+
for variant in self.experiment.variants:
|
|
449
|
+
# Beta distribution parameters
|
|
450
|
+
alpha = variant.conversions + 1
|
|
451
|
+
beta = (variant.impressions - variant.conversions) + 1
|
|
452
|
+
|
|
453
|
+
# Sample from beta distribution
|
|
454
|
+
sample = self._random.betavariate(alpha, beta)
|
|
455
|
+
samples.append((sample, variant))
|
|
456
|
+
|
|
457
|
+
# Select variant with highest sample
|
|
458
|
+
return max(samples, key=lambda x: x[0])[1]
|
|
459
|
+
|
|
460
|
+
def _ucb_allocation(self) -> Variant:
|
|
461
|
+
"""Upper Confidence Bound selection."""
|
|
462
|
+
total_impressions = self.experiment.total_impressions or 1
|
|
463
|
+
|
|
464
|
+
ucb_scores = []
|
|
465
|
+
for variant in self.experiment.variants:
|
|
466
|
+
if variant.impressions == 0:
|
|
467
|
+
# Give unvisited variants high priority
|
|
468
|
+
ucb_scores.append((float("inf"), variant))
|
|
469
|
+
else:
|
|
470
|
+
mean = variant.avg_success_score
|
|
471
|
+
exploration = math.sqrt(2 * math.log(total_impressions) / variant.impressions)
|
|
472
|
+
ucb = mean + exploration
|
|
473
|
+
ucb_scores.append((ucb, variant))
|
|
474
|
+
|
|
475
|
+
return max(ucb_scores, key=lambda x: x[0])[1]
|
|
476
|
+
|
|
477
|
+
|
|
478
|
+
# =============================================================================
|
|
479
|
+
# EXPERIMENT MANAGER
|
|
480
|
+
# =============================================================================
|
|
481
|
+
|
|
482
|
+
|
|
483
|
+
class ExperimentManager:
|
|
484
|
+
"""Manages A/B experiments lifecycle."""
|
|
485
|
+
|
|
486
|
+
def __init__(self, storage_path: Path | str | None = None):
|
|
487
|
+
"""Initialize experiment manager.
|
|
488
|
+
|
|
489
|
+
Args:
|
|
490
|
+
storage_path: Path to persist experiments
|
|
491
|
+
"""
|
|
492
|
+
if storage_path is None:
|
|
493
|
+
storage_path = Path.home() / ".empathy" / "socratic" / "experiments.json"
|
|
494
|
+
self.storage_path = Path(storage_path)
|
|
495
|
+
self._experiments: dict[str, Experiment] = {}
|
|
496
|
+
self._allocators: dict[str, TrafficAllocator] = {}
|
|
497
|
+
|
|
498
|
+
# Load existing experiments
|
|
499
|
+
self._load()
|
|
500
|
+
|
|
501
|
+
def create_experiment(
|
|
502
|
+
self,
|
|
503
|
+
name: str,
|
|
504
|
+
description: str,
|
|
505
|
+
hypothesis: str,
|
|
506
|
+
control_config: dict[str, Any],
|
|
507
|
+
treatment_configs: list[dict[str, Any]],
|
|
508
|
+
domain_filter: str | None = None,
|
|
509
|
+
allocation_strategy: AllocationStrategy = AllocationStrategy.FIXED,
|
|
510
|
+
min_sample_size: int = 100,
|
|
511
|
+
) -> Experiment:
|
|
512
|
+
"""Create a new experiment.
|
|
513
|
+
|
|
514
|
+
Args:
|
|
515
|
+
name: Experiment name
|
|
516
|
+
description: Description
|
|
517
|
+
hypothesis: What we're testing
|
|
518
|
+
control_config: Configuration for control group
|
|
519
|
+
treatment_configs: Configurations for treatment groups
|
|
520
|
+
domain_filter: Optional domain to filter
|
|
521
|
+
allocation_strategy: How to allocate traffic
|
|
522
|
+
min_sample_size: Minimum samples before analysis
|
|
523
|
+
|
|
524
|
+
Returns:
|
|
525
|
+
Created experiment
|
|
526
|
+
"""
|
|
527
|
+
experiment_id = hashlib.sha256(f"{name}:{time.time()}".encode()).hexdigest()[:12]
|
|
528
|
+
|
|
529
|
+
# Create variants
|
|
530
|
+
num_variants = 1 + len(treatment_configs)
|
|
531
|
+
traffic_each = 100.0 / num_variants
|
|
532
|
+
|
|
533
|
+
variants = [
|
|
534
|
+
Variant(
|
|
535
|
+
variant_id=f"{experiment_id}_control",
|
|
536
|
+
name="Control",
|
|
537
|
+
description="Control group with existing configuration",
|
|
538
|
+
config=control_config,
|
|
539
|
+
is_control=True,
|
|
540
|
+
traffic_percentage=traffic_each,
|
|
541
|
+
)
|
|
542
|
+
]
|
|
543
|
+
|
|
544
|
+
for i, config in enumerate(treatment_configs):
|
|
545
|
+
variants.append(
|
|
546
|
+
Variant(
|
|
547
|
+
variant_id=f"{experiment_id}_treatment_{i}",
|
|
548
|
+
name=config.get("name", f"Treatment {i + 1}"),
|
|
549
|
+
description=config.get("description", ""),
|
|
550
|
+
config=config.get("config", config),
|
|
551
|
+
is_control=False,
|
|
552
|
+
traffic_percentage=traffic_each,
|
|
553
|
+
)
|
|
554
|
+
)
|
|
555
|
+
|
|
556
|
+
experiment = Experiment(
|
|
557
|
+
experiment_id=experiment_id,
|
|
558
|
+
name=name,
|
|
559
|
+
description=description,
|
|
560
|
+
hypothesis=hypothesis,
|
|
561
|
+
variants=variants,
|
|
562
|
+
domain_filter=domain_filter,
|
|
563
|
+
allocation_strategy=allocation_strategy,
|
|
564
|
+
min_sample_size=min_sample_size,
|
|
565
|
+
)
|
|
566
|
+
|
|
567
|
+
self._experiments[experiment_id] = experiment
|
|
568
|
+
self._save()
|
|
569
|
+
|
|
570
|
+
return experiment
|
|
571
|
+
|
|
572
|
+
def start_experiment(self, experiment_id: str) -> bool:
|
|
573
|
+
"""Start an experiment.
|
|
574
|
+
|
|
575
|
+
Args:
|
|
576
|
+
experiment_id: ID of experiment to start
|
|
577
|
+
|
|
578
|
+
Returns:
|
|
579
|
+
True if started successfully
|
|
580
|
+
"""
|
|
581
|
+
experiment = self._experiments.get(experiment_id)
|
|
582
|
+
if not experiment:
|
|
583
|
+
return False
|
|
584
|
+
|
|
585
|
+
if experiment.status != ExperimentStatus.DRAFT:
|
|
586
|
+
return False
|
|
587
|
+
|
|
588
|
+
experiment.status = ExperimentStatus.RUNNING
|
|
589
|
+
experiment.started_at = datetime.now()
|
|
590
|
+
self._allocators[experiment_id] = TrafficAllocator(experiment)
|
|
591
|
+
self._save()
|
|
592
|
+
|
|
593
|
+
return True
|
|
594
|
+
|
|
595
|
+
def stop_experiment(self, experiment_id: str) -> ExperimentResult | None:
|
|
596
|
+
"""Stop an experiment and analyze results.
|
|
597
|
+
|
|
598
|
+
Args:
|
|
599
|
+
experiment_id: ID of experiment to stop
|
|
600
|
+
|
|
601
|
+
Returns:
|
|
602
|
+
Experiment results with analysis
|
|
603
|
+
"""
|
|
604
|
+
experiment = self._experiments.get(experiment_id)
|
|
605
|
+
if not experiment:
|
|
606
|
+
return None
|
|
607
|
+
|
|
608
|
+
experiment.status = ExperimentStatus.COMPLETED
|
|
609
|
+
experiment.ended_at = datetime.now()
|
|
610
|
+
self._save()
|
|
611
|
+
|
|
612
|
+
return self.analyze_experiment(experiment_id)
|
|
613
|
+
|
|
614
|
+
def allocate_variant(
|
|
615
|
+
self,
|
|
616
|
+
experiment_id: str,
|
|
617
|
+
user_id: str,
|
|
618
|
+
) -> Variant | None:
|
|
619
|
+
"""Allocate a user to a variant.
|
|
620
|
+
|
|
621
|
+
Args:
|
|
622
|
+
experiment_id: Experiment ID
|
|
623
|
+
user_id: User/session ID
|
|
624
|
+
|
|
625
|
+
Returns:
|
|
626
|
+
Allocated variant or None
|
|
627
|
+
"""
|
|
628
|
+
experiment = self._experiments.get(experiment_id)
|
|
629
|
+
if not experiment or experiment.status != ExperimentStatus.RUNNING:
|
|
630
|
+
return None
|
|
631
|
+
|
|
632
|
+
allocator = self._allocators.get(experiment_id)
|
|
633
|
+
if not allocator:
|
|
634
|
+
allocator = TrafficAllocator(experiment)
|
|
635
|
+
self._allocators[experiment_id] = allocator
|
|
636
|
+
|
|
637
|
+
return allocator.allocate(user_id)
|
|
638
|
+
|
|
639
|
+
def record_impression(self, experiment_id: str, variant_id: str):
|
|
640
|
+
"""Record an impression for a variant.
|
|
641
|
+
|
|
642
|
+
Args:
|
|
643
|
+
experiment_id: Experiment ID
|
|
644
|
+
variant_id: Variant ID
|
|
645
|
+
"""
|
|
646
|
+
experiment = self._experiments.get(experiment_id)
|
|
647
|
+
if not experiment:
|
|
648
|
+
return
|
|
649
|
+
|
|
650
|
+
for variant in experiment.variants:
|
|
651
|
+
if variant.variant_id == variant_id:
|
|
652
|
+
variant.impressions += 1
|
|
653
|
+
break
|
|
654
|
+
|
|
655
|
+
self._save()
|
|
656
|
+
|
|
657
|
+
def record_conversion(
|
|
658
|
+
self,
|
|
659
|
+
experiment_id: str,
|
|
660
|
+
variant_id: str,
|
|
661
|
+
success_score: float = 1.0,
|
|
662
|
+
):
|
|
663
|
+
"""Record a conversion for a variant.
|
|
664
|
+
|
|
665
|
+
Args:
|
|
666
|
+
experiment_id: Experiment ID
|
|
667
|
+
variant_id: Variant ID
|
|
668
|
+
success_score: Score from 0-1
|
|
669
|
+
"""
|
|
670
|
+
experiment = self._experiments.get(experiment_id)
|
|
671
|
+
if not experiment:
|
|
672
|
+
return
|
|
673
|
+
|
|
674
|
+
for variant in experiment.variants:
|
|
675
|
+
if variant.variant_id == variant_id:
|
|
676
|
+
variant.conversions += 1
|
|
677
|
+
variant.total_success_score += success_score
|
|
678
|
+
break
|
|
679
|
+
|
|
680
|
+
self._save()
|
|
681
|
+
|
|
682
|
+
def analyze_experiment(self, experiment_id: str) -> ExperimentResult | None:
|
|
683
|
+
"""Analyze experiment results.
|
|
684
|
+
|
|
685
|
+
Args:
|
|
686
|
+
experiment_id: Experiment ID
|
|
687
|
+
|
|
688
|
+
Returns:
|
|
689
|
+
Analysis results
|
|
690
|
+
"""
|
|
691
|
+
experiment = self._experiments.get(experiment_id)
|
|
692
|
+
if not experiment:
|
|
693
|
+
return None
|
|
694
|
+
|
|
695
|
+
control = experiment.control
|
|
696
|
+
if not control:
|
|
697
|
+
return None
|
|
698
|
+
|
|
699
|
+
treatments = experiment.treatments
|
|
700
|
+
if not treatments:
|
|
701
|
+
return None
|
|
702
|
+
|
|
703
|
+
# Find best treatment
|
|
704
|
+
best_treatment = max(treatments, key=lambda v: v.conversion_rate)
|
|
705
|
+
|
|
706
|
+
# Statistical test
|
|
707
|
+
z_score, p_value = StatisticalAnalyzer.z_test_proportions(
|
|
708
|
+
control.impressions,
|
|
709
|
+
control.conversions,
|
|
710
|
+
best_treatment.impressions,
|
|
711
|
+
best_treatment.conversions,
|
|
712
|
+
)
|
|
713
|
+
|
|
714
|
+
is_significant = p_value < (1 - experiment.confidence_level)
|
|
715
|
+
|
|
716
|
+
# Calculate lift
|
|
717
|
+
if control.conversion_rate > 0:
|
|
718
|
+
lift = (
|
|
719
|
+
(best_treatment.conversion_rate - control.conversion_rate) / control.conversion_rate
|
|
720
|
+
) * 100
|
|
721
|
+
else:
|
|
722
|
+
lift = 0.0
|
|
723
|
+
|
|
724
|
+
# Confidence interval for treatment
|
|
725
|
+
ci = StatisticalAnalyzer.confidence_interval(
|
|
726
|
+
best_treatment.impressions,
|
|
727
|
+
best_treatment.conversions,
|
|
728
|
+
experiment.confidence_level,
|
|
729
|
+
)
|
|
730
|
+
|
|
731
|
+
# Determine winner
|
|
732
|
+
winner = None
|
|
733
|
+
recommendation = ""
|
|
734
|
+
|
|
735
|
+
if is_significant:
|
|
736
|
+
if best_treatment.conversion_rate > control.conversion_rate:
|
|
737
|
+
winner = best_treatment
|
|
738
|
+
recommendation = (
|
|
739
|
+
f"Adopt {best_treatment.name}. It shows {lift:.1f}% improvement "
|
|
740
|
+
f"over control with p-value {p_value:.4f}."
|
|
741
|
+
)
|
|
742
|
+
else:
|
|
743
|
+
winner = control
|
|
744
|
+
recommendation = "Keep control. Treatment did not show improvement."
|
|
745
|
+
else:
|
|
746
|
+
recommendation = (
|
|
747
|
+
f"No significant difference detected (p={p_value:.4f}). "
|
|
748
|
+
f"Consider running longer or increasing sample size."
|
|
749
|
+
)
|
|
750
|
+
|
|
751
|
+
return ExperimentResult(
|
|
752
|
+
experiment=experiment,
|
|
753
|
+
winner=winner,
|
|
754
|
+
is_significant=is_significant,
|
|
755
|
+
p_value=p_value,
|
|
756
|
+
confidence_interval=ci,
|
|
757
|
+
lift=lift,
|
|
758
|
+
recommendation=recommendation,
|
|
759
|
+
)
|
|
760
|
+
|
|
761
|
+
def get_running_experiments(
|
|
762
|
+
self,
|
|
763
|
+
domain: str | None = None,
|
|
764
|
+
) -> list[Experiment]:
|
|
765
|
+
"""Get all running experiments.
|
|
766
|
+
|
|
767
|
+
Args:
|
|
768
|
+
domain: Optional domain filter
|
|
769
|
+
|
|
770
|
+
Returns:
|
|
771
|
+
List of running experiments
|
|
772
|
+
"""
|
|
773
|
+
running = []
|
|
774
|
+
for exp in self._experiments.values():
|
|
775
|
+
if exp.status != ExperimentStatus.RUNNING:
|
|
776
|
+
continue
|
|
777
|
+
if domain and exp.domain_filter and exp.domain_filter != domain:
|
|
778
|
+
continue
|
|
779
|
+
running.append(exp)
|
|
780
|
+
return running
|
|
781
|
+
|
|
782
|
+
def get_experiment(self, experiment_id: str) -> Experiment | None:
|
|
783
|
+
"""Get experiment by ID."""
|
|
784
|
+
return self._experiments.get(experiment_id)
|
|
785
|
+
|
|
786
|
+
def list_experiments(self) -> list[Experiment]:
|
|
787
|
+
"""List all experiments."""
|
|
788
|
+
return list(self._experiments.values())
|
|
789
|
+
|
|
790
|
+
def _save(self):
|
|
791
|
+
"""Save experiments to storage."""
|
|
792
|
+
self.storage_path.parent.mkdir(parents=True, exist_ok=True)
|
|
793
|
+
|
|
794
|
+
data = {
|
|
795
|
+
"version": 1,
|
|
796
|
+
"experiments": [e.to_dict() for e in self._experiments.values()],
|
|
797
|
+
}
|
|
798
|
+
|
|
799
|
+
with self.storage_path.open("w") as f:
|
|
800
|
+
json.dump(data, f, indent=2)
|
|
801
|
+
|
|
802
|
+
def _load(self):
|
|
803
|
+
"""Load experiments from storage."""
|
|
804
|
+
if not self.storage_path.exists():
|
|
805
|
+
return
|
|
806
|
+
|
|
807
|
+
try:
|
|
808
|
+
with self.storage_path.open("r") as f:
|
|
809
|
+
data = json.load(f)
|
|
810
|
+
|
|
811
|
+
for exp_data in data.get("experiments", []):
|
|
812
|
+
exp = Experiment.from_dict(exp_data)
|
|
813
|
+
self._experiments[exp.experiment_id] = exp
|
|
814
|
+
|
|
815
|
+
# Restore allocators for running experiments
|
|
816
|
+
if exp.status == ExperimentStatus.RUNNING:
|
|
817
|
+
self._allocators[exp.experiment_id] = TrafficAllocator(exp)
|
|
818
|
+
|
|
819
|
+
except Exception as e:
|
|
820
|
+
logger.warning(f"Failed to load experiments: {e}")
|
|
821
|
+
|
|
822
|
+
|
|
823
|
+
# =============================================================================
|
|
824
|
+
# WORKFLOW A/B TESTING INTEGRATION
|
|
825
|
+
# =============================================================================
|
|
826
|
+
|
|
827
|
+
|
|
828
|
+
class WorkflowABTester:
|
|
829
|
+
"""High-level API for A/B testing workflow configurations.
|
|
830
|
+
|
|
831
|
+
Integrates with the Socratic workflow builder to test different
|
|
832
|
+
configurations and optimize over time.
|
|
833
|
+
"""
|
|
834
|
+
|
|
835
|
+
def __init__(self, storage_path: Path | str | None = None):
|
|
836
|
+
"""Initialize the tester.
|
|
837
|
+
|
|
838
|
+
Args:
|
|
839
|
+
storage_path: Path to persist data
|
|
840
|
+
"""
|
|
841
|
+
self.manager = ExperimentManager(storage_path)
|
|
842
|
+
|
|
843
|
+
def create_workflow_experiment(
|
|
844
|
+
self,
|
|
845
|
+
name: str,
|
|
846
|
+
hypothesis: str,
|
|
847
|
+
control_agents: list[str],
|
|
848
|
+
treatment_agents_list: list[list[str]],
|
|
849
|
+
domain: str | None = None,
|
|
850
|
+
) -> str:
|
|
851
|
+
"""Create an experiment comparing workflow agent configurations.
|
|
852
|
+
|
|
853
|
+
Args:
|
|
854
|
+
name: Experiment name
|
|
855
|
+
hypothesis: What we're testing
|
|
856
|
+
control_agents: Agent list for control
|
|
857
|
+
treatment_agents_list: Agent lists for treatments
|
|
858
|
+
domain: Domain filter
|
|
859
|
+
|
|
860
|
+
Returns:
|
|
861
|
+
Experiment ID
|
|
862
|
+
"""
|
|
863
|
+
control_config = {"agents": control_agents}
|
|
864
|
+
treatment_configs = [
|
|
865
|
+
{
|
|
866
|
+
"name": f"Treatment {i + 1}",
|
|
867
|
+
"config": {"agents": agents},
|
|
868
|
+
}
|
|
869
|
+
for i, agents in enumerate(treatment_agents_list)
|
|
870
|
+
]
|
|
871
|
+
|
|
872
|
+
experiment = self.manager.create_experiment(
|
|
873
|
+
name=name,
|
|
874
|
+
description=f"Testing different agent configurations for {domain or 'general'} workflows",
|
|
875
|
+
hypothesis=hypothesis,
|
|
876
|
+
control_config=control_config,
|
|
877
|
+
treatment_configs=treatment_configs,
|
|
878
|
+
domain_filter=domain,
|
|
879
|
+
allocation_strategy=AllocationStrategy.THOMPSON_SAMPLING,
|
|
880
|
+
)
|
|
881
|
+
|
|
882
|
+
return experiment.experiment_id
|
|
883
|
+
|
|
884
|
+
def get_workflow_config(
|
|
885
|
+
self,
|
|
886
|
+
session_id: str,
|
|
887
|
+
domain: str | None = None,
|
|
888
|
+
) -> tuple[dict[str, Any], str | None, str | None]:
|
|
889
|
+
"""Get workflow configuration for a session.
|
|
890
|
+
|
|
891
|
+
Returns control config or allocates to an experiment.
|
|
892
|
+
|
|
893
|
+
Args:
|
|
894
|
+
session_id: Session ID for allocation
|
|
895
|
+
domain: Optional domain filter
|
|
896
|
+
|
|
897
|
+
Returns:
|
|
898
|
+
(config, experiment_id, variant_id) or (default_config, None, None)
|
|
899
|
+
"""
|
|
900
|
+
# Check for running experiments
|
|
901
|
+
experiments = self.manager.get_running_experiments(domain)
|
|
902
|
+
|
|
903
|
+
for exp in experiments:
|
|
904
|
+
variant = self.manager.allocate_variant(exp.experiment_id, session_id)
|
|
905
|
+
if variant:
|
|
906
|
+
self.manager.record_impression(exp.experiment_id, variant.variant_id)
|
|
907
|
+
return (variant.config, exp.experiment_id, variant.variant_id)
|
|
908
|
+
|
|
909
|
+
# No experiment, return default
|
|
910
|
+
return ({}, None, None)
|
|
911
|
+
|
|
912
|
+
def record_workflow_result(
|
|
913
|
+
self,
|
|
914
|
+
experiment_id: str,
|
|
915
|
+
variant_id: str,
|
|
916
|
+
success: bool,
|
|
917
|
+
success_score: float = 0.0,
|
|
918
|
+
):
|
|
919
|
+
"""Record the result of a workflow execution.
|
|
920
|
+
|
|
921
|
+
Args:
|
|
922
|
+
experiment_id: Experiment ID
|
|
923
|
+
variant_id: Variant ID
|
|
924
|
+
success: Whether workflow succeeded
|
|
925
|
+
success_score: Success score (0-1)
|
|
926
|
+
"""
|
|
927
|
+
if success:
|
|
928
|
+
self.manager.record_conversion(
|
|
929
|
+
experiment_id,
|
|
930
|
+
variant_id,
|
|
931
|
+
success_score,
|
|
932
|
+
)
|
|
933
|
+
|
|
934
|
+
def get_best_config(self, domain: str | None = None) -> dict[str, Any]:
|
|
935
|
+
"""Get the best known configuration for a domain.
|
|
936
|
+
|
|
937
|
+
Args:
|
|
938
|
+
domain: Domain filter
|
|
939
|
+
|
|
940
|
+
Returns:
|
|
941
|
+
Best configuration based on completed experiments
|
|
942
|
+
"""
|
|
943
|
+
best_config: dict[str, Any] = {}
|
|
944
|
+
best_score = 0.0
|
|
945
|
+
|
|
946
|
+
for exp in self.manager.list_experiments():
|
|
947
|
+
if exp.status != ExperimentStatus.COMPLETED:
|
|
948
|
+
continue
|
|
949
|
+
if domain and exp.domain_filter != domain:
|
|
950
|
+
continue
|
|
951
|
+
|
|
952
|
+
result = self.manager.analyze_experiment(exp.experiment_id)
|
|
953
|
+
if result and result.winner:
|
|
954
|
+
if result.winner.avg_success_score > best_score:
|
|
955
|
+
best_score = result.winner.avg_success_score
|
|
956
|
+
best_config = result.winner.config
|
|
957
|
+
|
|
958
|
+
return best_config
|