attune-ai 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- attune/__init__.py +358 -0
- attune/adaptive/__init__.py +13 -0
- attune/adaptive/task_complexity.py +127 -0
- attune/agent_monitoring.py +414 -0
- attune/cache/__init__.py +117 -0
- attune/cache/base.py +166 -0
- attune/cache/dependency_manager.py +256 -0
- attune/cache/hash_only.py +251 -0
- attune/cache/hybrid.py +457 -0
- attune/cache/storage.py +285 -0
- attune/cache_monitor.py +356 -0
- attune/cache_stats.py +298 -0
- attune/cli/__init__.py +152 -0
- attune/cli/__main__.py +12 -0
- attune/cli/commands/__init__.py +1 -0
- attune/cli/commands/batch.py +264 -0
- attune/cli/commands/cache.py +248 -0
- attune/cli/commands/help.py +331 -0
- attune/cli/commands/info.py +140 -0
- attune/cli/commands/inspect.py +436 -0
- attune/cli/commands/inspection.py +57 -0
- attune/cli/commands/memory.py +48 -0
- attune/cli/commands/metrics.py +92 -0
- attune/cli/commands/orchestrate.py +184 -0
- attune/cli/commands/patterns.py +207 -0
- attune/cli/commands/profiling.py +202 -0
- attune/cli/commands/provider.py +98 -0
- attune/cli/commands/routing.py +285 -0
- attune/cli/commands/setup.py +96 -0
- attune/cli/commands/status.py +235 -0
- attune/cli/commands/sync.py +166 -0
- attune/cli/commands/tier.py +121 -0
- attune/cli/commands/utilities.py +114 -0
- attune/cli/commands/workflow.py +579 -0
- attune/cli/core.py +32 -0
- attune/cli/parsers/__init__.py +68 -0
- attune/cli/parsers/batch.py +118 -0
- attune/cli/parsers/cache.py +65 -0
- attune/cli/parsers/help.py +41 -0
- attune/cli/parsers/info.py +26 -0
- attune/cli/parsers/inspect.py +66 -0
- attune/cli/parsers/metrics.py +42 -0
- attune/cli/parsers/orchestrate.py +61 -0
- attune/cli/parsers/patterns.py +54 -0
- attune/cli/parsers/provider.py +40 -0
- attune/cli/parsers/routing.py +110 -0
- attune/cli/parsers/setup.py +42 -0
- attune/cli/parsers/status.py +47 -0
- attune/cli/parsers/sync.py +31 -0
- attune/cli/parsers/tier.py +33 -0
- attune/cli/parsers/workflow.py +77 -0
- attune/cli/utils/__init__.py +1 -0
- attune/cli/utils/data.py +242 -0
- attune/cli/utils/helpers.py +68 -0
- attune/cli_legacy.py +3957 -0
- attune/cli_minimal.py +1159 -0
- attune/cli_router.py +437 -0
- attune/cli_unified.py +814 -0
- attune/config/__init__.py +66 -0
- attune/config/xml_config.py +286 -0
- attune/config.py +545 -0
- attune/coordination.py +870 -0
- attune/core.py +1511 -0
- attune/core_modules/__init__.py +15 -0
- attune/cost_tracker.py +626 -0
- attune/dashboard/__init__.py +41 -0
- attune/dashboard/app.py +512 -0
- attune/dashboard/simple_server.py +435 -0
- attune/dashboard/standalone_server.py +547 -0
- attune/discovery.py +306 -0
- attune/emergence.py +306 -0
- attune/exceptions.py +123 -0
- attune/feedback_loops.py +373 -0
- attune/hot_reload/README.md +473 -0
- attune/hot_reload/__init__.py +62 -0
- attune/hot_reload/config.py +83 -0
- attune/hot_reload/integration.py +229 -0
- attune/hot_reload/reloader.py +298 -0
- attune/hot_reload/watcher.py +183 -0
- attune/hot_reload/websocket.py +177 -0
- attune/levels.py +577 -0
- attune/leverage_points.py +441 -0
- attune/logging_config.py +261 -0
- attune/mcp/__init__.py +10 -0
- attune/mcp/server.py +506 -0
- attune/memory/__init__.py +237 -0
- attune/memory/claude_memory.py +469 -0
- attune/memory/config.py +224 -0
- attune/memory/control_panel.py +1290 -0
- attune/memory/control_panel_support.py +145 -0
- attune/memory/cross_session.py +845 -0
- attune/memory/edges.py +179 -0
- attune/memory/encryption.py +159 -0
- attune/memory/file_session.py +770 -0
- attune/memory/graph.py +570 -0
- attune/memory/long_term.py +913 -0
- attune/memory/long_term_types.py +99 -0
- attune/memory/mixins/__init__.py +25 -0
- attune/memory/mixins/backend_init_mixin.py +249 -0
- attune/memory/mixins/capabilities_mixin.py +208 -0
- attune/memory/mixins/handoff_mixin.py +208 -0
- attune/memory/mixins/lifecycle_mixin.py +49 -0
- attune/memory/mixins/long_term_mixin.py +352 -0
- attune/memory/mixins/promotion_mixin.py +109 -0
- attune/memory/mixins/short_term_mixin.py +182 -0
- attune/memory/nodes.py +179 -0
- attune/memory/redis_bootstrap.py +540 -0
- attune/memory/security/__init__.py +31 -0
- attune/memory/security/audit_logger.py +932 -0
- attune/memory/security/pii_scrubber.py +640 -0
- attune/memory/security/secrets_detector.py +678 -0
- attune/memory/short_term.py +2192 -0
- attune/memory/simple_storage.py +302 -0
- attune/memory/storage/__init__.py +15 -0
- attune/memory/storage_backend.py +167 -0
- attune/memory/summary_index.py +583 -0
- attune/memory/types.py +446 -0
- attune/memory/unified.py +182 -0
- attune/meta_workflows/__init__.py +74 -0
- attune/meta_workflows/agent_creator.py +248 -0
- attune/meta_workflows/builtin_templates.py +567 -0
- attune/meta_workflows/cli_commands/__init__.py +56 -0
- attune/meta_workflows/cli_commands/agent_commands.py +321 -0
- attune/meta_workflows/cli_commands/analytics_commands.py +442 -0
- attune/meta_workflows/cli_commands/config_commands.py +232 -0
- attune/meta_workflows/cli_commands/memory_commands.py +182 -0
- attune/meta_workflows/cli_commands/template_commands.py +354 -0
- attune/meta_workflows/cli_commands/workflow_commands.py +382 -0
- attune/meta_workflows/cli_meta_workflows.py +59 -0
- attune/meta_workflows/form_engine.py +292 -0
- attune/meta_workflows/intent_detector.py +409 -0
- attune/meta_workflows/models.py +569 -0
- attune/meta_workflows/pattern_learner.py +738 -0
- attune/meta_workflows/plan_generator.py +384 -0
- attune/meta_workflows/session_context.py +397 -0
- attune/meta_workflows/template_registry.py +229 -0
- attune/meta_workflows/workflow.py +984 -0
- attune/metrics/__init__.py +12 -0
- attune/metrics/collector.py +31 -0
- attune/metrics/prompt_metrics.py +194 -0
- attune/models/__init__.py +172 -0
- attune/models/__main__.py +13 -0
- attune/models/adaptive_routing.py +437 -0
- attune/models/auth_cli.py +444 -0
- attune/models/auth_strategy.py +450 -0
- attune/models/cli.py +655 -0
- attune/models/empathy_executor.py +354 -0
- attune/models/executor.py +257 -0
- attune/models/fallback.py +762 -0
- attune/models/provider_config.py +282 -0
- attune/models/registry.py +472 -0
- attune/models/tasks.py +359 -0
- attune/models/telemetry/__init__.py +71 -0
- attune/models/telemetry/analytics.py +594 -0
- attune/models/telemetry/backend.py +196 -0
- attune/models/telemetry/data_models.py +431 -0
- attune/models/telemetry/storage.py +489 -0
- attune/models/token_estimator.py +420 -0
- attune/models/validation.py +280 -0
- attune/monitoring/__init__.py +52 -0
- attune/monitoring/alerts.py +946 -0
- attune/monitoring/alerts_cli.py +448 -0
- attune/monitoring/multi_backend.py +271 -0
- attune/monitoring/otel_backend.py +362 -0
- attune/optimization/__init__.py +19 -0
- attune/optimization/context_optimizer.py +272 -0
- attune/orchestration/__init__.py +67 -0
- attune/orchestration/agent_templates.py +707 -0
- attune/orchestration/config_store.py +499 -0
- attune/orchestration/execution_strategies.py +2111 -0
- attune/orchestration/meta_orchestrator.py +1168 -0
- attune/orchestration/pattern_learner.py +696 -0
- attune/orchestration/real_tools.py +931 -0
- attune/pattern_cache.py +187 -0
- attune/pattern_library.py +542 -0
- attune/patterns/debugging/all_patterns.json +81 -0
- attune/patterns/debugging/workflow_20260107_1770825e.json +77 -0
- attune/patterns/refactoring_memory.json +89 -0
- attune/persistence.py +564 -0
- attune/platform_utils.py +265 -0
- attune/plugins/__init__.py +28 -0
- attune/plugins/base.py +361 -0
- attune/plugins/registry.py +268 -0
- attune/project_index/__init__.py +32 -0
- attune/project_index/cli.py +335 -0
- attune/project_index/index.py +667 -0
- attune/project_index/models.py +504 -0
- attune/project_index/reports.py +474 -0
- attune/project_index/scanner.py +777 -0
- attune/project_index/scanner_parallel.py +291 -0
- attune/prompts/__init__.py +61 -0
- attune/prompts/config.py +77 -0
- attune/prompts/context.py +177 -0
- attune/prompts/parser.py +285 -0
- attune/prompts/registry.py +313 -0
- attune/prompts/templates.py +208 -0
- attune/redis_config.py +302 -0
- attune/redis_memory.py +799 -0
- attune/resilience/__init__.py +56 -0
- attune/resilience/circuit_breaker.py +256 -0
- attune/resilience/fallback.py +179 -0
- attune/resilience/health.py +300 -0
- attune/resilience/retry.py +209 -0
- attune/resilience/timeout.py +135 -0
- attune/routing/__init__.py +43 -0
- attune/routing/chain_executor.py +433 -0
- attune/routing/classifier.py +217 -0
- attune/routing/smart_router.py +234 -0
- attune/routing/workflow_registry.py +343 -0
- attune/scaffolding/README.md +589 -0
- attune/scaffolding/__init__.py +35 -0
- attune/scaffolding/__main__.py +14 -0
- attune/scaffolding/cli.py +240 -0
- attune/scaffolding/templates/base_wizard.py.jinja2 +121 -0
- attune/scaffolding/templates/coach_wizard.py.jinja2 +321 -0
- attune/scaffolding/templates/domain_wizard.py.jinja2 +408 -0
- attune/scaffolding/templates/linear_flow_wizard.py.jinja2 +203 -0
- attune/socratic/__init__.py +256 -0
- attune/socratic/ab_testing.py +958 -0
- attune/socratic/blueprint.py +533 -0
- attune/socratic/cli.py +703 -0
- attune/socratic/collaboration.py +1114 -0
- attune/socratic/domain_templates.py +924 -0
- attune/socratic/embeddings.py +738 -0
- attune/socratic/engine.py +794 -0
- attune/socratic/explainer.py +682 -0
- attune/socratic/feedback.py +772 -0
- attune/socratic/forms.py +629 -0
- attune/socratic/generator.py +732 -0
- attune/socratic/llm_analyzer.py +637 -0
- attune/socratic/mcp_server.py +702 -0
- attune/socratic/session.py +312 -0
- attune/socratic/storage.py +667 -0
- attune/socratic/success.py +730 -0
- attune/socratic/visual_editor.py +860 -0
- attune/socratic/web_ui.py +958 -0
- attune/telemetry/__init__.py +39 -0
- attune/telemetry/agent_coordination.py +475 -0
- attune/telemetry/agent_tracking.py +367 -0
- attune/telemetry/approval_gates.py +545 -0
- attune/telemetry/cli.py +1231 -0
- attune/telemetry/commands/__init__.py +14 -0
- attune/telemetry/commands/dashboard_commands.py +696 -0
- attune/telemetry/event_streaming.py +409 -0
- attune/telemetry/feedback_loop.py +567 -0
- attune/telemetry/usage_tracker.py +591 -0
- attune/templates.py +754 -0
- attune/test_generator/__init__.py +38 -0
- attune/test_generator/__main__.py +14 -0
- attune/test_generator/cli.py +234 -0
- attune/test_generator/generator.py +355 -0
- attune/test_generator/risk_analyzer.py +216 -0
- attune/test_generator/templates/unit_test.py.jinja2 +272 -0
- attune/tier_recommender.py +384 -0
- attune/tools.py +183 -0
- attune/trust/__init__.py +28 -0
- attune/trust/circuit_breaker.py +579 -0
- attune/trust_building.py +527 -0
- attune/validation/__init__.py +19 -0
- attune/validation/xml_validator.py +281 -0
- attune/vscode_bridge.py +173 -0
- attune/workflow_commands.py +780 -0
- attune/workflow_patterns/__init__.py +33 -0
- attune/workflow_patterns/behavior.py +249 -0
- attune/workflow_patterns/core.py +76 -0
- attune/workflow_patterns/output.py +99 -0
- attune/workflow_patterns/registry.py +255 -0
- attune/workflow_patterns/structural.py +288 -0
- attune/workflows/__init__.py +539 -0
- attune/workflows/autonomous_test_gen.py +1268 -0
- attune/workflows/base.py +2667 -0
- attune/workflows/batch_processing.py +342 -0
- attune/workflows/bug_predict.py +1084 -0
- attune/workflows/builder.py +273 -0
- attune/workflows/caching.py +253 -0
- attune/workflows/code_review.py +1048 -0
- attune/workflows/code_review_adapters.py +312 -0
- attune/workflows/code_review_pipeline.py +722 -0
- attune/workflows/config.py +645 -0
- attune/workflows/dependency_check.py +644 -0
- attune/workflows/document_gen/__init__.py +25 -0
- attune/workflows/document_gen/config.py +30 -0
- attune/workflows/document_gen/report_formatter.py +162 -0
- attune/workflows/document_gen/workflow.py +1426 -0
- attune/workflows/document_manager.py +216 -0
- attune/workflows/document_manager_README.md +134 -0
- attune/workflows/documentation_orchestrator.py +1205 -0
- attune/workflows/history.py +510 -0
- attune/workflows/keyboard_shortcuts/__init__.py +39 -0
- attune/workflows/keyboard_shortcuts/generators.py +391 -0
- attune/workflows/keyboard_shortcuts/parsers.py +416 -0
- attune/workflows/keyboard_shortcuts/prompts.py +295 -0
- attune/workflows/keyboard_shortcuts/schema.py +193 -0
- attune/workflows/keyboard_shortcuts/workflow.py +509 -0
- attune/workflows/llm_base.py +363 -0
- attune/workflows/manage_docs.py +87 -0
- attune/workflows/manage_docs_README.md +134 -0
- attune/workflows/manage_documentation.py +821 -0
- attune/workflows/new_sample_workflow1.py +149 -0
- attune/workflows/new_sample_workflow1_README.md +150 -0
- attune/workflows/orchestrated_health_check.py +849 -0
- attune/workflows/orchestrated_release_prep.py +600 -0
- attune/workflows/output.py +413 -0
- attune/workflows/perf_audit.py +863 -0
- attune/workflows/pr_review.py +762 -0
- attune/workflows/progress.py +785 -0
- attune/workflows/progress_server.py +322 -0
- attune/workflows/progressive/README 2.md +454 -0
- attune/workflows/progressive/README.md +454 -0
- attune/workflows/progressive/__init__.py +82 -0
- attune/workflows/progressive/cli.py +219 -0
- attune/workflows/progressive/core.py +488 -0
- attune/workflows/progressive/orchestrator.py +723 -0
- attune/workflows/progressive/reports.py +520 -0
- attune/workflows/progressive/telemetry.py +274 -0
- attune/workflows/progressive/test_gen.py +495 -0
- attune/workflows/progressive/workflow.py +589 -0
- attune/workflows/refactor_plan.py +694 -0
- attune/workflows/release_prep.py +895 -0
- attune/workflows/release_prep_crew.py +969 -0
- attune/workflows/research_synthesis.py +404 -0
- attune/workflows/routing.py +168 -0
- attune/workflows/secure_release.py +593 -0
- attune/workflows/security_adapters.py +297 -0
- attune/workflows/security_audit.py +1329 -0
- attune/workflows/security_audit_phase3.py +355 -0
- attune/workflows/seo_optimization.py +633 -0
- attune/workflows/step_config.py +234 -0
- attune/workflows/telemetry_mixin.py +269 -0
- attune/workflows/test5.py +125 -0
- attune/workflows/test5_README.md +158 -0
- attune/workflows/test_coverage_boost_crew.py +849 -0
- attune/workflows/test_gen/__init__.py +52 -0
- attune/workflows/test_gen/ast_analyzer.py +249 -0
- attune/workflows/test_gen/config.py +88 -0
- attune/workflows/test_gen/data_models.py +38 -0
- attune/workflows/test_gen/report_formatter.py +289 -0
- attune/workflows/test_gen/test_templates.py +381 -0
- attune/workflows/test_gen/workflow.py +655 -0
- attune/workflows/test_gen.py +54 -0
- attune/workflows/test_gen_behavioral.py +477 -0
- attune/workflows/test_gen_parallel.py +341 -0
- attune/workflows/test_lifecycle.py +526 -0
- attune/workflows/test_maintenance.py +627 -0
- attune/workflows/test_maintenance_cli.py +590 -0
- attune/workflows/test_maintenance_crew.py +840 -0
- attune/workflows/test_runner.py +622 -0
- attune/workflows/tier_tracking.py +531 -0
- attune/workflows/xml_enhanced_crew.py +285 -0
- attune_ai-2.0.0.dist-info/METADATA +1026 -0
- attune_ai-2.0.0.dist-info/RECORD +457 -0
- attune_ai-2.0.0.dist-info/WHEEL +5 -0
- attune_ai-2.0.0.dist-info/entry_points.txt +26 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE +201 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +101 -0
- attune_ai-2.0.0.dist-info/top_level.txt +5 -0
- attune_healthcare/__init__.py +13 -0
- attune_healthcare/monitors/__init__.py +9 -0
- attune_healthcare/monitors/clinical_protocol_monitor.py +315 -0
- attune_healthcare/monitors/monitoring/__init__.py +44 -0
- attune_healthcare/monitors/monitoring/protocol_checker.py +300 -0
- attune_healthcare/monitors/monitoring/protocol_loader.py +214 -0
- attune_healthcare/monitors/monitoring/sensor_parsers.py +306 -0
- attune_healthcare/monitors/monitoring/trajectory_analyzer.py +389 -0
- attune_llm/README.md +553 -0
- attune_llm/__init__.py +28 -0
- attune_llm/agent_factory/__init__.py +53 -0
- attune_llm/agent_factory/adapters/__init__.py +85 -0
- attune_llm/agent_factory/adapters/autogen_adapter.py +312 -0
- attune_llm/agent_factory/adapters/crewai_adapter.py +483 -0
- attune_llm/agent_factory/adapters/haystack_adapter.py +298 -0
- attune_llm/agent_factory/adapters/langchain_adapter.py +362 -0
- attune_llm/agent_factory/adapters/langgraph_adapter.py +333 -0
- attune_llm/agent_factory/adapters/native.py +228 -0
- attune_llm/agent_factory/adapters/wizard_adapter.py +423 -0
- attune_llm/agent_factory/base.py +305 -0
- attune_llm/agent_factory/crews/__init__.py +67 -0
- attune_llm/agent_factory/crews/code_review.py +1113 -0
- attune_llm/agent_factory/crews/health_check.py +1262 -0
- attune_llm/agent_factory/crews/refactoring.py +1128 -0
- attune_llm/agent_factory/crews/security_audit.py +1018 -0
- attune_llm/agent_factory/decorators.py +287 -0
- attune_llm/agent_factory/factory.py +558 -0
- attune_llm/agent_factory/framework.py +193 -0
- attune_llm/agent_factory/memory_integration.py +328 -0
- attune_llm/agent_factory/resilient.py +320 -0
- attune_llm/agents_md/__init__.py +22 -0
- attune_llm/agents_md/loader.py +218 -0
- attune_llm/agents_md/parser.py +271 -0
- attune_llm/agents_md/registry.py +307 -0
- attune_llm/claude_memory.py +466 -0
- attune_llm/cli/__init__.py +8 -0
- attune_llm/cli/sync_claude.py +487 -0
- attune_llm/code_health.py +1313 -0
- attune_llm/commands/__init__.py +51 -0
- attune_llm/commands/context.py +375 -0
- attune_llm/commands/loader.py +301 -0
- attune_llm/commands/models.py +231 -0
- attune_llm/commands/parser.py +371 -0
- attune_llm/commands/registry.py +429 -0
- attune_llm/config/__init__.py +29 -0
- attune_llm/config/unified.py +291 -0
- attune_llm/context/__init__.py +22 -0
- attune_llm/context/compaction.py +455 -0
- attune_llm/context/manager.py +434 -0
- attune_llm/contextual_patterns.py +361 -0
- attune_llm/core.py +907 -0
- attune_llm/git_pattern_extractor.py +435 -0
- attune_llm/hooks/__init__.py +24 -0
- attune_llm/hooks/config.py +306 -0
- attune_llm/hooks/executor.py +289 -0
- attune_llm/hooks/registry.py +302 -0
- attune_llm/hooks/scripts/__init__.py +39 -0
- attune_llm/hooks/scripts/evaluate_session.py +201 -0
- attune_llm/hooks/scripts/first_time_init.py +285 -0
- attune_llm/hooks/scripts/pre_compact.py +207 -0
- attune_llm/hooks/scripts/session_end.py +183 -0
- attune_llm/hooks/scripts/session_start.py +163 -0
- attune_llm/hooks/scripts/suggest_compact.py +225 -0
- attune_llm/learning/__init__.py +30 -0
- attune_llm/learning/evaluator.py +438 -0
- attune_llm/learning/extractor.py +514 -0
- attune_llm/learning/storage.py +560 -0
- attune_llm/levels.py +227 -0
- attune_llm/pattern_confidence.py +414 -0
- attune_llm/pattern_resolver.py +272 -0
- attune_llm/pattern_summary.py +350 -0
- attune_llm/providers.py +967 -0
- attune_llm/routing/__init__.py +32 -0
- attune_llm/routing/model_router.py +362 -0
- attune_llm/security/IMPLEMENTATION_SUMMARY.md +413 -0
- attune_llm/security/PHASE2_COMPLETE.md +384 -0
- attune_llm/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
- attune_llm/security/QUICK_REFERENCE.md +316 -0
- attune_llm/security/README.md +262 -0
- attune_llm/security/__init__.py +62 -0
- attune_llm/security/audit_logger.py +929 -0
- attune_llm/security/audit_logger_example.py +152 -0
- attune_llm/security/pii_scrubber.py +640 -0
- attune_llm/security/secrets_detector.py +678 -0
- attune_llm/security/secrets_detector_example.py +304 -0
- attune_llm/security/secure_memdocs.py +1192 -0
- attune_llm/security/secure_memdocs_example.py +278 -0
- attune_llm/session_status.py +745 -0
- attune_llm/state.py +246 -0
- attune_llm/utils/__init__.py +5 -0
- attune_llm/utils/tokens.py +349 -0
- attune_software/SOFTWARE_PLUGIN_README.md +57 -0
- attune_software/__init__.py +13 -0
- attune_software/cli/__init__.py +120 -0
- attune_software/cli/inspect.py +362 -0
- attune_software/cli.py +574 -0
- attune_software/plugin.py +188 -0
- workflow_scaffolding/__init__.py +11 -0
- workflow_scaffolding/__main__.py +12 -0
- workflow_scaffolding/cli.py +206 -0
- workflow_scaffolding/generator.py +265 -0
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
"""Core EmpathyOS Modules.
|
|
2
|
+
|
|
3
|
+
Modular implementation of EmpathyOS functionality.
|
|
4
|
+
|
|
5
|
+
Copyright 2025 Smart AI Memory, LLC
|
|
6
|
+
Licensed under Fair Source 0.9
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
# Core classes
|
|
10
|
+
from ..core import CollaborationState, EmpathyOS
|
|
11
|
+
|
|
12
|
+
__all__ = [
|
|
13
|
+
"EmpathyOS",
|
|
14
|
+
"CollaborationState",
|
|
15
|
+
]
|
attune/cost_tracker.py
ADDED
|
@@ -0,0 +1,626 @@
|
|
|
1
|
+
"""Cost Tracking for Empathy Framework
|
|
2
|
+
|
|
3
|
+
Tracks API costs across model tiers and calculates savings from
|
|
4
|
+
smart model routing (Haiku/Sonnet/Opus selection).
|
|
5
|
+
|
|
6
|
+
Features:
|
|
7
|
+
- Log each API request with model, tokens, and task type
|
|
8
|
+
- Calculate actual cost vs baseline (if all requests used premium model)
|
|
9
|
+
- Generate weekly/monthly reports
|
|
10
|
+
- Integrate with `empathy costs` and `empathy morning` commands
|
|
11
|
+
- **Performance optimized**: Batch writes (50 requests) + JSONL format
|
|
12
|
+
|
|
13
|
+
Model pricing is sourced from attune.models.MODEL_REGISTRY.
|
|
14
|
+
|
|
15
|
+
Copyright 2025 Smart-AI-Memory
|
|
16
|
+
Licensed under Fair Source License 0.9
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
import atexit
|
|
20
|
+
import heapq
|
|
21
|
+
import json
|
|
22
|
+
from datetime import datetime, timedelta
|
|
23
|
+
from pathlib import Path
|
|
24
|
+
from typing import Any
|
|
25
|
+
|
|
26
|
+
# Import pricing from unified registry
|
|
27
|
+
from attune.config import _validate_file_path
|
|
28
|
+
from attune.models import MODEL_REGISTRY
|
|
29
|
+
from attune.models.registry import TIER_PRICING
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def _build_model_pricing() -> dict[str, dict[str, float]]:
|
|
33
|
+
"""Build MODEL_PRICING from unified registry."""
|
|
34
|
+
pricing: dict[str, dict[str, float]] = {}
|
|
35
|
+
|
|
36
|
+
# Add all models from registry
|
|
37
|
+
for provider_models in MODEL_REGISTRY.values():
|
|
38
|
+
for model_info in provider_models.values():
|
|
39
|
+
pricing[model_info.id] = {
|
|
40
|
+
"input": model_info.input_cost_per_million,
|
|
41
|
+
"output": model_info.output_cost_per_million,
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
# Add tier aliases from registry
|
|
45
|
+
pricing.update(TIER_PRICING)
|
|
46
|
+
|
|
47
|
+
# Add legacy model names for backward compatibility
|
|
48
|
+
legacy_models = {
|
|
49
|
+
"claude-3-haiku-20240307": {"input": 0.25, "output": 1.25},
|
|
50
|
+
"claude-3-5-sonnet-20241022": {"input": 3.00, "output": 15.00},
|
|
51
|
+
"claude-opus-4-20250514": {"input": 15.00, "output": 75.00},
|
|
52
|
+
"gpt-4-turbo": {"input": 10.00, "output": 30.00},
|
|
53
|
+
}
|
|
54
|
+
pricing.update(legacy_models)
|
|
55
|
+
|
|
56
|
+
return pricing
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
# Pricing per million tokens - sourced from unified registry
|
|
60
|
+
MODEL_PRICING = _build_model_pricing()
|
|
61
|
+
|
|
62
|
+
# Default premium model for baseline comparison
|
|
63
|
+
BASELINE_MODEL = "claude-opus-4-5-20251101"
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class CostTracker:
|
|
67
|
+
"""Tracks API costs and calculates savings from model routing.
|
|
68
|
+
|
|
69
|
+
**Performance Optimized:**
|
|
70
|
+
- Batch writes (flush every 50 requests)
|
|
71
|
+
- JSONL append-only format for new data
|
|
72
|
+
- Backward compatible with JSON format
|
|
73
|
+
- Zero data loss (atexit handler)
|
|
74
|
+
- Lazy loading: Full request history only loaded when accessed
|
|
75
|
+
- Separate summary file: Fast init (80-90% faster for large histories)
|
|
76
|
+
|
|
77
|
+
Usage:
|
|
78
|
+
tracker = CostTracker()
|
|
79
|
+
tracker.log_request("claude-3-haiku-20240307", 1000, 500, "summarize")
|
|
80
|
+
report = tracker.get_report()
|
|
81
|
+
"""
|
|
82
|
+
|
|
83
|
+
@property
|
|
84
|
+
def requests(self) -> list[dict]:
|
|
85
|
+
"""Access request history (lazy-loaded for performance).
|
|
86
|
+
|
|
87
|
+
Returns:
|
|
88
|
+
List of request records. Triggers lazy loading on first access.
|
|
89
|
+
"""
|
|
90
|
+
self._load_requests()
|
|
91
|
+
return self.data.get("requests", [])
|
|
92
|
+
|
|
93
|
+
def __init__(self, storage_dir: str = ".empathy", batch_size: int = 50):
|
|
94
|
+
"""Initialize cost tracker.
|
|
95
|
+
|
|
96
|
+
Args:
|
|
97
|
+
storage_dir: Directory for cost data storage
|
|
98
|
+
batch_size: Number of requests to buffer before flushing (default: 50)
|
|
99
|
+
|
|
100
|
+
Performance optimizations:
|
|
101
|
+
- Lazy loading: Only load summary data on init, defer full request history
|
|
102
|
+
- Separate summary file: Fast access to daily_totals without parsing JSONL
|
|
103
|
+
- Init time reduced by 80-90% for large history files
|
|
104
|
+
"""
|
|
105
|
+
self.storage_dir = Path(storage_dir)
|
|
106
|
+
self.storage_dir.mkdir(parents=True, exist_ok=True)
|
|
107
|
+
self.costs_file = self.storage_dir / "costs.json"
|
|
108
|
+
self.costs_jsonl = self.storage_dir / "costs.jsonl"
|
|
109
|
+
self.costs_summary = self.storage_dir / "costs_summary.json"
|
|
110
|
+
self.batch_size = batch_size
|
|
111
|
+
self._buffer: list[dict] = [] # Buffered requests not yet flushed
|
|
112
|
+
self._requests_loaded = False # Track if full history is loaded
|
|
113
|
+
self._load_summary() # Only load summary on init (fast)
|
|
114
|
+
|
|
115
|
+
# Register cleanup handler to flush on exit
|
|
116
|
+
atexit.register(self._cleanup)
|
|
117
|
+
|
|
118
|
+
def _cleanup(self) -> None:
|
|
119
|
+
"""Cleanup handler - flush buffer on exit."""
|
|
120
|
+
try:
|
|
121
|
+
if self._buffer:
|
|
122
|
+
self.flush()
|
|
123
|
+
except Exception: # noqa: BLE001
|
|
124
|
+
# INTENTIONAL: Best-effort flush, don't break shutdown
|
|
125
|
+
pass
|
|
126
|
+
|
|
127
|
+
def _load_summary(self) -> None:
|
|
128
|
+
"""Load only summary data on init (fast path).
|
|
129
|
+
|
|
130
|
+
This loads daily_totals from the summary file without parsing
|
|
131
|
+
the full request history. Full history is lazy-loaded only when needed.
|
|
132
|
+
|
|
133
|
+
Performance: 80-90% faster init for large history files.
|
|
134
|
+
"""
|
|
135
|
+
# Initialize with default structure (no requests loaded yet)
|
|
136
|
+
self.data = self._default_data()
|
|
137
|
+
self.data["requests"] = [] # Start empty, lazy-load later
|
|
138
|
+
|
|
139
|
+
# Try loading pre-computed summary first (fastest)
|
|
140
|
+
if self.costs_summary.exists():
|
|
141
|
+
try:
|
|
142
|
+
with open(self.costs_summary) as f:
|
|
143
|
+
summary_data = json.load(f)
|
|
144
|
+
self.data["daily_totals"] = summary_data.get("daily_totals", {})
|
|
145
|
+
self.data["created_at"] = summary_data.get(
|
|
146
|
+
"created_at", self.data["created_at"]
|
|
147
|
+
)
|
|
148
|
+
self.data["last_updated"] = summary_data.get(
|
|
149
|
+
"last_updated", self.data["last_updated"]
|
|
150
|
+
)
|
|
151
|
+
return # Summary loaded, done
|
|
152
|
+
except (OSError, json.JSONDecodeError):
|
|
153
|
+
pass # Fall through to JSON fallback
|
|
154
|
+
|
|
155
|
+
# Fallback: Load daily_totals from costs.json (backward compatibility)
|
|
156
|
+
if self.costs_file.exists():
|
|
157
|
+
try:
|
|
158
|
+
with open(self.costs_file) as f:
|
|
159
|
+
json_data = json.load(f)
|
|
160
|
+
self.data["daily_totals"] = json_data.get("daily_totals", {})
|
|
161
|
+
self.data["created_at"] = json_data.get("created_at", self.data["created_at"])
|
|
162
|
+
self.data["last_updated"] = json_data.get(
|
|
163
|
+
"last_updated", self.data["last_updated"]
|
|
164
|
+
)
|
|
165
|
+
# Don't load requests here - they'll be lazy-loaded
|
|
166
|
+
except (OSError, json.JSONDecodeError):
|
|
167
|
+
pass # Use defaults
|
|
168
|
+
|
|
169
|
+
def _load_requests(self) -> None:
|
|
170
|
+
"""Lazy-load full request history (only when needed).
|
|
171
|
+
|
|
172
|
+
Called automatically when request history is accessed.
|
|
173
|
+
Most operations use daily_totals and don't need this.
|
|
174
|
+
"""
|
|
175
|
+
if self._requests_loaded:
|
|
176
|
+
return # Already loaded
|
|
177
|
+
|
|
178
|
+
# Load from JSON first
|
|
179
|
+
if self.costs_file.exists():
|
|
180
|
+
try:
|
|
181
|
+
with open(self.costs_file) as f:
|
|
182
|
+
json_data = json.load(f)
|
|
183
|
+
self.data["requests"] = json_data.get("requests", [])
|
|
184
|
+
except (OSError, json.JSONDecodeError):
|
|
185
|
+
self.data["requests"] = []
|
|
186
|
+
|
|
187
|
+
# Append from JSONL if it exists
|
|
188
|
+
if self.costs_jsonl.exists():
|
|
189
|
+
try:
|
|
190
|
+
with open(self.costs_jsonl) as f:
|
|
191
|
+
for line in f:
|
|
192
|
+
if line.strip():
|
|
193
|
+
request = json.loads(line)
|
|
194
|
+
self.data["requests"].append(request)
|
|
195
|
+
except (OSError, json.JSONDecodeError):
|
|
196
|
+
pass # Ignore errors, use what we have
|
|
197
|
+
|
|
198
|
+
self._requests_loaded = True
|
|
199
|
+
|
|
200
|
+
def _load(self) -> None:
|
|
201
|
+
"""Load cost data from storage (supports both JSON and JSONL).
|
|
202
|
+
|
|
203
|
+
Deprecated: Use _load_summary() for fast init, _load_requests() for full history.
|
|
204
|
+
Kept for backward compatibility.
|
|
205
|
+
"""
|
|
206
|
+
self._load_summary()
|
|
207
|
+
self._load_requests()
|
|
208
|
+
|
|
209
|
+
def _default_data(self) -> dict:
|
|
210
|
+
"""Return default data structure."""
|
|
211
|
+
return {
|
|
212
|
+
"requests": [],
|
|
213
|
+
"daily_totals": {},
|
|
214
|
+
"created_at": datetime.now().isoformat(),
|
|
215
|
+
"last_updated": datetime.now().isoformat(),
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
def _update_daily_totals(self, request: dict) -> None:
|
|
219
|
+
"""Update daily totals from a request.
|
|
220
|
+
|
|
221
|
+
Args:
|
|
222
|
+
request: Request record with cost information
|
|
223
|
+
|
|
224
|
+
"""
|
|
225
|
+
timestamp = request.get("timestamp", datetime.now().isoformat())
|
|
226
|
+
date = timestamp[:10] # Extract YYYY-MM-DD
|
|
227
|
+
|
|
228
|
+
if date not in self.data["daily_totals"]:
|
|
229
|
+
self.data["daily_totals"][date] = {
|
|
230
|
+
"requests": 0,
|
|
231
|
+
"input_tokens": 0,
|
|
232
|
+
"output_tokens": 0,
|
|
233
|
+
"actual_cost": 0,
|
|
234
|
+
"baseline_cost": 0,
|
|
235
|
+
"savings": 0,
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
daily = self.data["daily_totals"][date]
|
|
239
|
+
daily["requests"] += 1
|
|
240
|
+
daily["input_tokens"] += request["input_tokens"]
|
|
241
|
+
daily["output_tokens"] += request["output_tokens"]
|
|
242
|
+
daily["actual_cost"] = round(daily["actual_cost"] + request["actual_cost"], 6)
|
|
243
|
+
daily["baseline_cost"] = round(daily["baseline_cost"] + request["baseline_cost"], 6)
|
|
244
|
+
daily["savings"] = round(daily["savings"] + request["savings"], 6)
|
|
245
|
+
|
|
246
|
+
def _save(self) -> None:
|
|
247
|
+
"""Save cost data to storage (legacy JSON format).
|
|
248
|
+
|
|
249
|
+
**Note:** This is only used for backward compatibility.
|
|
250
|
+
New data is written to JSONL format via flush().
|
|
251
|
+
"""
|
|
252
|
+
self.data["last_updated"] = datetime.now().isoformat()
|
|
253
|
+
validated_path = _validate_file_path(str(self.costs_file))
|
|
254
|
+
with open(validated_path, "w") as f:
|
|
255
|
+
json.dump(self.data, f, indent=2)
|
|
256
|
+
|
|
257
|
+
def _save_summary(self) -> None:
|
|
258
|
+
"""Save summary data (daily_totals) to separate file for fast loading.
|
|
259
|
+
|
|
260
|
+
This enables 80-90% faster init by avoiding full JSONL parsing on startup.
|
|
261
|
+
"""
|
|
262
|
+
summary_data = {
|
|
263
|
+
"daily_totals": self.data.get("daily_totals", {}),
|
|
264
|
+
"created_at": self.data.get("created_at", datetime.now().isoformat()),
|
|
265
|
+
"last_updated": datetime.now().isoformat(),
|
|
266
|
+
}
|
|
267
|
+
try:
|
|
268
|
+
validated_path = _validate_file_path(str(self.costs_summary))
|
|
269
|
+
with open(validated_path, "w") as f:
|
|
270
|
+
json.dump(summary_data, f, indent=2)
|
|
271
|
+
except (OSError, ValueError):
|
|
272
|
+
pass # Best effort - summary is an optimization, not critical
|
|
273
|
+
|
|
274
|
+
def flush(self) -> None:
|
|
275
|
+
"""Flush buffered requests to disk (JSONL format).
|
|
276
|
+
|
|
277
|
+
This is called automatically:
|
|
278
|
+
- Every `batch_size` requests
|
|
279
|
+
- On process exit (atexit handler)
|
|
280
|
+
- Manually by calling tracker.flush()
|
|
281
|
+
"""
|
|
282
|
+
if not self._buffer:
|
|
283
|
+
return
|
|
284
|
+
|
|
285
|
+
# Append buffered requests to JSONL file
|
|
286
|
+
try:
|
|
287
|
+
with open(self.costs_jsonl, "a") as f:
|
|
288
|
+
for request in self._buffer:
|
|
289
|
+
f.write(json.dumps(request) + "\n")
|
|
290
|
+
|
|
291
|
+
# Update daily totals (always in memory)
|
|
292
|
+
for request in self._buffer:
|
|
293
|
+
self._update_daily_totals(request)
|
|
294
|
+
|
|
295
|
+
# Load requests if needed before extending (maintains backward compat)
|
|
296
|
+
# This defers the expensive load from init to first flush
|
|
297
|
+
if not self._requests_loaded:
|
|
298
|
+
self._load_requests()
|
|
299
|
+
|
|
300
|
+
self.data["requests"].extend(self._buffer)
|
|
301
|
+
# Keep only last 1000 requests in memory
|
|
302
|
+
if len(self.data["requests"]) > 1000:
|
|
303
|
+
self.data["requests"] = self.data["requests"][-1000:]
|
|
304
|
+
|
|
305
|
+
# Save summary file (fast path for future loads)
|
|
306
|
+
self._save_summary()
|
|
307
|
+
|
|
308
|
+
# Update JSON file periodically (every 10 flushes = 500 requests)
|
|
309
|
+
# This maintains backward compatibility without killing performance
|
|
310
|
+
if len(self._buffer) >= 500 or not self.costs_jsonl.exists():
|
|
311
|
+
self._save()
|
|
312
|
+
|
|
313
|
+
# Clear buffer
|
|
314
|
+
self._buffer.clear()
|
|
315
|
+
|
|
316
|
+
except OSError:
|
|
317
|
+
# If JSONL write fails, fallback to immediate JSON save
|
|
318
|
+
for request in self._buffer:
|
|
319
|
+
self._update_daily_totals(request)
|
|
320
|
+
if not self._requests_loaded:
|
|
321
|
+
self._load_requests()
|
|
322
|
+
self.data["requests"].extend(self._buffer)
|
|
323
|
+
self._buffer.clear()
|
|
324
|
+
self._save()
|
|
325
|
+
self._save_summary()
|
|
326
|
+
|
|
327
|
+
def _calculate_cost(self, model: str, input_tokens: int, output_tokens: int) -> float:
|
|
328
|
+
"""Calculate cost for a request.
|
|
329
|
+
|
|
330
|
+
Args:
|
|
331
|
+
model: Model name or tier
|
|
332
|
+
input_tokens: Number of input tokens
|
|
333
|
+
output_tokens: Number of output tokens
|
|
334
|
+
|
|
335
|
+
Returns:
|
|
336
|
+
Cost in USD
|
|
337
|
+
|
|
338
|
+
"""
|
|
339
|
+
pricing = MODEL_PRICING.get(model) or MODEL_PRICING["capable"]
|
|
340
|
+
input_cost = (input_tokens / 1_000_000) * pricing["input"]
|
|
341
|
+
output_cost = (output_tokens / 1_000_000) * pricing["output"]
|
|
342
|
+
return input_cost + output_cost
|
|
343
|
+
|
|
344
|
+
def log_request(
|
|
345
|
+
self,
|
|
346
|
+
model: str,
|
|
347
|
+
input_tokens: int,
|
|
348
|
+
output_tokens: int,
|
|
349
|
+
task_type: str = "unknown",
|
|
350
|
+
tier: str | None = None,
|
|
351
|
+
) -> dict:
|
|
352
|
+
"""Log an API request with cost tracking (batched writes).
|
|
353
|
+
|
|
354
|
+
**Performance optimized**: Requests are buffered and flushed every
|
|
355
|
+
`batch_size` requests (default: 50) instead of writing to disk
|
|
356
|
+
immediately. This provides a 60x+ performance improvement.
|
|
357
|
+
|
|
358
|
+
Args:
|
|
359
|
+
model: Model name used
|
|
360
|
+
input_tokens: Number of input tokens
|
|
361
|
+
output_tokens: Number of output tokens
|
|
362
|
+
task_type: Type of task (summarize, generate_code, etc.)
|
|
363
|
+
tier: Optional tier override (cheap, capable, premium)
|
|
364
|
+
|
|
365
|
+
Returns:
|
|
366
|
+
Request record with cost information
|
|
367
|
+
|
|
368
|
+
"""
|
|
369
|
+
actual_cost = self._calculate_cost(model, input_tokens, output_tokens)
|
|
370
|
+
baseline_cost = self._calculate_cost(BASELINE_MODEL, input_tokens, output_tokens)
|
|
371
|
+
savings = baseline_cost - actual_cost
|
|
372
|
+
|
|
373
|
+
request = {
|
|
374
|
+
"timestamp": datetime.now().isoformat(),
|
|
375
|
+
"model": model,
|
|
376
|
+
"tier": tier or self._get_tier(model),
|
|
377
|
+
"task_type": task_type,
|
|
378
|
+
"input_tokens": input_tokens,
|
|
379
|
+
"output_tokens": output_tokens,
|
|
380
|
+
"actual_cost": round(actual_cost, 6),
|
|
381
|
+
"baseline_cost": round(baseline_cost, 6),
|
|
382
|
+
"savings": round(savings, 6),
|
|
383
|
+
}
|
|
384
|
+
|
|
385
|
+
# Add to buffer instead of immediate save
|
|
386
|
+
self._buffer.append(request)
|
|
387
|
+
|
|
388
|
+
# Flush when buffer reaches batch size
|
|
389
|
+
if len(self._buffer) >= self.batch_size:
|
|
390
|
+
self.flush()
|
|
391
|
+
|
|
392
|
+
return request
|
|
393
|
+
|
|
394
|
+
def _get_tier(self, model: str) -> str:
|
|
395
|
+
"""Determine tier from model name."""
|
|
396
|
+
if "haiku" in model.lower():
|
|
397
|
+
return "cheap"
|
|
398
|
+
if "opus" in model.lower():
|
|
399
|
+
return "premium"
|
|
400
|
+
return "capable"
|
|
401
|
+
|
|
402
|
+
def get_summary(self, days: int = 7, include_breakdown: bool = True) -> dict:
|
|
403
|
+
"""Get cost summary for recent period (includes buffered requests).
|
|
404
|
+
|
|
405
|
+
**Real-time data**: Includes buffered requests that haven't been
|
|
406
|
+
flushed to disk yet, ensuring accurate real-time reporting.
|
|
407
|
+
|
|
408
|
+
**Performance optimized**: Main totals computed from pre-aggregated
|
|
409
|
+
daily_totals. Full request history only loaded if include_breakdown=True.
|
|
410
|
+
|
|
411
|
+
Args:
|
|
412
|
+
days: Number of days to include
|
|
413
|
+
include_breakdown: If True, include by_tier and by_task breakdown
|
|
414
|
+
(requires loading full request history). Default: True.
|
|
415
|
+
|
|
416
|
+
Returns:
|
|
417
|
+
Summary with totals and savings percentage
|
|
418
|
+
|
|
419
|
+
"""
|
|
420
|
+
cutoff = datetime.now() - timedelta(days=days)
|
|
421
|
+
cutoff_str = cutoff.strftime("%Y-%m-%d")
|
|
422
|
+
|
|
423
|
+
totals: dict[str, Any] = {
|
|
424
|
+
"days": days,
|
|
425
|
+
"requests": 0,
|
|
426
|
+
"input_tokens": 0,
|
|
427
|
+
"output_tokens": 0,
|
|
428
|
+
"actual_cost": 0,
|
|
429
|
+
"baseline_cost": 0,
|
|
430
|
+
"savings": 0,
|
|
431
|
+
"by_tier": {"cheap": 0, "capable": 0, "premium": 0},
|
|
432
|
+
"by_task": {},
|
|
433
|
+
}
|
|
434
|
+
|
|
435
|
+
# Include daily totals from flushed data (fast - always in memory)
|
|
436
|
+
for date, daily in self.data.get("daily_totals", {}).items():
|
|
437
|
+
if date >= cutoff_str:
|
|
438
|
+
totals["requests"] += daily["requests"]
|
|
439
|
+
totals["input_tokens"] += daily["input_tokens"]
|
|
440
|
+
totals["output_tokens"] += daily["output_tokens"]
|
|
441
|
+
totals["actual_cost"] += daily["actual_cost"]
|
|
442
|
+
totals["baseline_cost"] += daily["baseline_cost"]
|
|
443
|
+
totals["savings"] += daily["savings"]
|
|
444
|
+
|
|
445
|
+
# Add buffered request costs to totals (always in memory)
|
|
446
|
+
cutoff_iso = cutoff.isoformat()
|
|
447
|
+
for req in self._buffer:
|
|
448
|
+
if req["timestamp"] >= cutoff_iso:
|
|
449
|
+
totals["requests"] += 1
|
|
450
|
+
totals["input_tokens"] += req["input_tokens"]
|
|
451
|
+
totals["output_tokens"] += req["output_tokens"]
|
|
452
|
+
totals["actual_cost"] += req["actual_cost"]
|
|
453
|
+
totals["baseline_cost"] += req["baseline_cost"]
|
|
454
|
+
totals["savings"] += req["savings"]
|
|
455
|
+
|
|
456
|
+
# Include breakdown by tier/task (requires loading full history)
|
|
457
|
+
if include_breakdown:
|
|
458
|
+
# Lazy-load full request history only when needed
|
|
459
|
+
self._load_requests()
|
|
460
|
+
all_requests = list(self.data.get("requests", [])) + self._buffer
|
|
461
|
+
|
|
462
|
+
for req in all_requests:
|
|
463
|
+
if req["timestamp"] >= cutoff_iso:
|
|
464
|
+
tier = req.get("tier", "capable")
|
|
465
|
+
task = req.get("task_type", "unknown")
|
|
466
|
+
totals["by_tier"][tier] = totals["by_tier"].get(tier, 0) + 1
|
|
467
|
+
totals["by_task"][task] = totals["by_task"].get(task, 0) + 1
|
|
468
|
+
|
|
469
|
+
# Calculate savings percentage
|
|
470
|
+
if totals["baseline_cost"] > 0:
|
|
471
|
+
totals["savings_percent"] = round(
|
|
472
|
+
(totals["savings"] / totals["baseline_cost"]) * 100,
|
|
473
|
+
1,
|
|
474
|
+
)
|
|
475
|
+
else:
|
|
476
|
+
totals["savings_percent"] = 0
|
|
477
|
+
|
|
478
|
+
return totals
|
|
479
|
+
|
|
480
|
+
def get_report(self, days: int = 7) -> str:
|
|
481
|
+
"""Generate a formatted cost report.
|
|
482
|
+
|
|
483
|
+
Args:
|
|
484
|
+
days: Number of days to include
|
|
485
|
+
|
|
486
|
+
Returns:
|
|
487
|
+
Formatted report string
|
|
488
|
+
|
|
489
|
+
"""
|
|
490
|
+
summary = self.get_summary(days)
|
|
491
|
+
|
|
492
|
+
lines = [
|
|
493
|
+
"",
|
|
494
|
+
"=" * 60,
|
|
495
|
+
" COST TRACKING REPORT",
|
|
496
|
+
f" Last {days} days",
|
|
497
|
+
"=" * 60,
|
|
498
|
+
"",
|
|
499
|
+
"SUMMARY",
|
|
500
|
+
"-" * 40,
|
|
501
|
+
f" Total requests: {summary['requests']:,}",
|
|
502
|
+
f" Input tokens: {summary['input_tokens']:,}",
|
|
503
|
+
f" Output tokens: {summary['output_tokens']:,}",
|
|
504
|
+
"",
|
|
505
|
+
"COSTS",
|
|
506
|
+
"-" * 40,
|
|
507
|
+
f" Actual cost: ${summary['actual_cost']:.4f}",
|
|
508
|
+
f" Baseline (Opus): ${summary['baseline_cost']:.4f}",
|
|
509
|
+
f" You saved: ${summary['savings']:.4f} ({summary['savings_percent']}%)",
|
|
510
|
+
"",
|
|
511
|
+
]
|
|
512
|
+
|
|
513
|
+
# Tier breakdown
|
|
514
|
+
if sum(summary["by_tier"].values()) > 0:
|
|
515
|
+
lines.extend(
|
|
516
|
+
[
|
|
517
|
+
"BY MODEL TIER",
|
|
518
|
+
"-" * 40,
|
|
519
|
+
],
|
|
520
|
+
)
|
|
521
|
+
for tier, count in sorted(summary["by_tier"].items(), key=lambda x: -x[1]):
|
|
522
|
+
if count > 0:
|
|
523
|
+
pct = (count / summary["requests"]) * 100 if summary["requests"] else 0
|
|
524
|
+
lines.append(f" {tier:12} {count:6,} requests ({pct:.1f}%)")
|
|
525
|
+
lines.append("")
|
|
526
|
+
|
|
527
|
+
# Task breakdown (top 5)
|
|
528
|
+
if summary["by_task"]:
|
|
529
|
+
lines.extend(
|
|
530
|
+
[
|
|
531
|
+
"BY TASK TYPE (Top 5)",
|
|
532
|
+
"-" * 40,
|
|
533
|
+
],
|
|
534
|
+
)
|
|
535
|
+
top_tasks = heapq.nlargest(5, summary["by_task"].items(), key=lambda x: x[1])
|
|
536
|
+
for task, count in top_tasks:
|
|
537
|
+
lines.append(f" {task:20} {count:,}")
|
|
538
|
+
lines.append("")
|
|
539
|
+
|
|
540
|
+
lines.extend(
|
|
541
|
+
[
|
|
542
|
+
"=" * 60,
|
|
543
|
+
" Model routing saves money by using cheaper models",
|
|
544
|
+
" for simple tasks and Opus only when needed.",
|
|
545
|
+
"=" * 60,
|
|
546
|
+
"",
|
|
547
|
+
],
|
|
548
|
+
)
|
|
549
|
+
|
|
550
|
+
return "\n".join(lines)
|
|
551
|
+
|
|
552
|
+
def get_today(self) -> dict[str, int | float]:
|
|
553
|
+
"""Get today's cost summary (includes buffered requests)."""
|
|
554
|
+
today = datetime.now().strftime("%Y-%m-%d")
|
|
555
|
+
daily_totals = self.data.get("daily_totals", {})
|
|
556
|
+
default: dict[str, int | float] = {
|
|
557
|
+
"requests": 0,
|
|
558
|
+
"input_tokens": 0,
|
|
559
|
+
"output_tokens": 0,
|
|
560
|
+
"actual_cost": 0,
|
|
561
|
+
"baseline_cost": 0,
|
|
562
|
+
"savings": 0,
|
|
563
|
+
}
|
|
564
|
+
|
|
565
|
+
# Start with flushed daily totals
|
|
566
|
+
if isinstance(daily_totals, dict) and today in daily_totals:
|
|
567
|
+
result = daily_totals[today]
|
|
568
|
+
totals = result.copy() if isinstance(result, dict) else default.copy()
|
|
569
|
+
else:
|
|
570
|
+
totals = default.copy()
|
|
571
|
+
|
|
572
|
+
# Add buffered requests for today (real-time data)
|
|
573
|
+
for req in self._buffer:
|
|
574
|
+
req_date = req["timestamp"][:10] # Extract YYYY-MM-DD
|
|
575
|
+
if req_date == today:
|
|
576
|
+
totals["requests"] += 1
|
|
577
|
+
totals["input_tokens"] += req["input_tokens"]
|
|
578
|
+
totals["output_tokens"] += req["output_tokens"]
|
|
579
|
+
totals["actual_cost"] = round(totals["actual_cost"] + req["actual_cost"], 6)
|
|
580
|
+
totals["baseline_cost"] = round(totals["baseline_cost"] + req["baseline_cost"], 6)
|
|
581
|
+
totals["savings"] = round(totals["savings"] + req["savings"], 6)
|
|
582
|
+
|
|
583
|
+
return totals
|
|
584
|
+
|
|
585
|
+
|
|
586
|
+
def cmd_costs(args):
|
|
587
|
+
"""CLI command handler for costs."""
|
|
588
|
+
tracker = CostTracker(storage_dir=getattr(args, "empathy_dir", ".empathy"))
|
|
589
|
+
days = getattr(args, "days", 7)
|
|
590
|
+
|
|
591
|
+
if getattr(args, "json", False):
|
|
592
|
+
import json as json_mod
|
|
593
|
+
|
|
594
|
+
print(json_mod.dumps(tracker.get_summary(days), indent=2))
|
|
595
|
+
else:
|
|
596
|
+
print(tracker.get_report(days))
|
|
597
|
+
|
|
598
|
+
return 0
|
|
599
|
+
|
|
600
|
+
|
|
601
|
+
# Singleton for global tracking
|
|
602
|
+
_tracker: CostTracker | None = None
|
|
603
|
+
|
|
604
|
+
|
|
605
|
+
def get_tracker(storage_dir: str = ".empathy") -> CostTracker:
|
|
606
|
+
"""Get or create the global cost tracker."""
|
|
607
|
+
global _tracker
|
|
608
|
+
if _tracker is None:
|
|
609
|
+
_tracker = CostTracker(storage_dir)
|
|
610
|
+
return _tracker
|
|
611
|
+
|
|
612
|
+
|
|
613
|
+
def log_request(
|
|
614
|
+
model: str,
|
|
615
|
+
input_tokens: int,
|
|
616
|
+
output_tokens: int,
|
|
617
|
+
task_type: str = "unknown",
|
|
618
|
+
tier: str | None = None,
|
|
619
|
+
) -> dict:
|
|
620
|
+
"""Convenience function to log a request to the global tracker.
|
|
621
|
+
|
|
622
|
+
Usage:
|
|
623
|
+
from attune.cost_tracker import log_request
|
|
624
|
+
log_request("claude-3-haiku-20240307", 1000, 500, "summarize")
|
|
625
|
+
"""
|
|
626
|
+
return get_tracker().log_request(model, input_tokens, output_tokens, task_type, tier)
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
"""Agent Coordination Dashboard.
|
|
2
|
+
|
|
3
|
+
Web-based monitoring dashboard for all 6 agent coordination patterns.
|
|
4
|
+
|
|
5
|
+
Usage (Standalone - Direct Redis Access):
|
|
6
|
+
>>> from attune.dashboard import run_standalone_dashboard
|
|
7
|
+
>>> run_standalone_dashboard(host="0.0.0.0", port=8080)
|
|
8
|
+
|
|
9
|
+
Usage (Simple Server - Uses Telemetry API):
|
|
10
|
+
>>> from attune.dashboard import run_simple_dashboard
|
|
11
|
+
>>> run_simple_dashboard(host="0.0.0.0", port=8080)
|
|
12
|
+
|
|
13
|
+
Usage (FastAPI - Requires fastapi and uvicorn):
|
|
14
|
+
>>> from attune.dashboard import run_dashboard
|
|
15
|
+
>>> run_dashboard(host="0.0.0.0", port=8080)
|
|
16
|
+
|
|
17
|
+
Features:
|
|
18
|
+
- Real-time agent status monitoring (Pattern 1)
|
|
19
|
+
- Coordination signal viewer (Pattern 2)
|
|
20
|
+
- Event stream monitor (Pattern 4)
|
|
21
|
+
- Approval request manager (Pattern 5)
|
|
22
|
+
- Quality feedback analytics (Pattern 6)
|
|
23
|
+
- Auto-refresh every 5 seconds
|
|
24
|
+
|
|
25
|
+
Copyright 2025 Smart-AI-Memory
|
|
26
|
+
Licensed under Fair Source License 0.9
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
# Standalone server - reads directly from Redis
|
|
30
|
+
# Simple server - uses telemetry API classes
|
|
31
|
+
from .simple_server import run_simple_dashboard
|
|
32
|
+
from .standalone_server import run_standalone_dashboard
|
|
33
|
+
|
|
34
|
+
# Optional FastAPI version (requires dependencies)
|
|
35
|
+
try:
|
|
36
|
+
from .app import app, run_dashboard
|
|
37
|
+
|
|
38
|
+
__all__ = ["app", "run_dashboard", "run_simple_dashboard", "run_standalone_dashboard"]
|
|
39
|
+
except ImportError:
|
|
40
|
+
# FastAPI not installed
|
|
41
|
+
__all__ = ["run_simple_dashboard", "run_standalone_dashboard"]
|