attune-ai 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- attune/__init__.py +358 -0
- attune/adaptive/__init__.py +13 -0
- attune/adaptive/task_complexity.py +127 -0
- attune/agent_monitoring.py +414 -0
- attune/cache/__init__.py +117 -0
- attune/cache/base.py +166 -0
- attune/cache/dependency_manager.py +256 -0
- attune/cache/hash_only.py +251 -0
- attune/cache/hybrid.py +457 -0
- attune/cache/storage.py +285 -0
- attune/cache_monitor.py +356 -0
- attune/cache_stats.py +298 -0
- attune/cli/__init__.py +152 -0
- attune/cli/__main__.py +12 -0
- attune/cli/commands/__init__.py +1 -0
- attune/cli/commands/batch.py +264 -0
- attune/cli/commands/cache.py +248 -0
- attune/cli/commands/help.py +331 -0
- attune/cli/commands/info.py +140 -0
- attune/cli/commands/inspect.py +436 -0
- attune/cli/commands/inspection.py +57 -0
- attune/cli/commands/memory.py +48 -0
- attune/cli/commands/metrics.py +92 -0
- attune/cli/commands/orchestrate.py +184 -0
- attune/cli/commands/patterns.py +207 -0
- attune/cli/commands/profiling.py +202 -0
- attune/cli/commands/provider.py +98 -0
- attune/cli/commands/routing.py +285 -0
- attune/cli/commands/setup.py +96 -0
- attune/cli/commands/status.py +235 -0
- attune/cli/commands/sync.py +166 -0
- attune/cli/commands/tier.py +121 -0
- attune/cli/commands/utilities.py +114 -0
- attune/cli/commands/workflow.py +579 -0
- attune/cli/core.py +32 -0
- attune/cli/parsers/__init__.py +68 -0
- attune/cli/parsers/batch.py +118 -0
- attune/cli/parsers/cache.py +65 -0
- attune/cli/parsers/help.py +41 -0
- attune/cli/parsers/info.py +26 -0
- attune/cli/parsers/inspect.py +66 -0
- attune/cli/parsers/metrics.py +42 -0
- attune/cli/parsers/orchestrate.py +61 -0
- attune/cli/parsers/patterns.py +54 -0
- attune/cli/parsers/provider.py +40 -0
- attune/cli/parsers/routing.py +110 -0
- attune/cli/parsers/setup.py +42 -0
- attune/cli/parsers/status.py +47 -0
- attune/cli/parsers/sync.py +31 -0
- attune/cli/parsers/tier.py +33 -0
- attune/cli/parsers/workflow.py +77 -0
- attune/cli/utils/__init__.py +1 -0
- attune/cli/utils/data.py +242 -0
- attune/cli/utils/helpers.py +68 -0
- attune/cli_legacy.py +3957 -0
- attune/cli_minimal.py +1159 -0
- attune/cli_router.py +437 -0
- attune/cli_unified.py +814 -0
- attune/config/__init__.py +66 -0
- attune/config/xml_config.py +286 -0
- attune/config.py +545 -0
- attune/coordination.py +870 -0
- attune/core.py +1511 -0
- attune/core_modules/__init__.py +15 -0
- attune/cost_tracker.py +626 -0
- attune/dashboard/__init__.py +41 -0
- attune/dashboard/app.py +512 -0
- attune/dashboard/simple_server.py +435 -0
- attune/dashboard/standalone_server.py +547 -0
- attune/discovery.py +306 -0
- attune/emergence.py +306 -0
- attune/exceptions.py +123 -0
- attune/feedback_loops.py +373 -0
- attune/hot_reload/README.md +473 -0
- attune/hot_reload/__init__.py +62 -0
- attune/hot_reload/config.py +83 -0
- attune/hot_reload/integration.py +229 -0
- attune/hot_reload/reloader.py +298 -0
- attune/hot_reload/watcher.py +183 -0
- attune/hot_reload/websocket.py +177 -0
- attune/levels.py +577 -0
- attune/leverage_points.py +441 -0
- attune/logging_config.py +261 -0
- attune/mcp/__init__.py +10 -0
- attune/mcp/server.py +506 -0
- attune/memory/__init__.py +237 -0
- attune/memory/claude_memory.py +469 -0
- attune/memory/config.py +224 -0
- attune/memory/control_panel.py +1290 -0
- attune/memory/control_panel_support.py +145 -0
- attune/memory/cross_session.py +845 -0
- attune/memory/edges.py +179 -0
- attune/memory/encryption.py +159 -0
- attune/memory/file_session.py +770 -0
- attune/memory/graph.py +570 -0
- attune/memory/long_term.py +913 -0
- attune/memory/long_term_types.py +99 -0
- attune/memory/mixins/__init__.py +25 -0
- attune/memory/mixins/backend_init_mixin.py +249 -0
- attune/memory/mixins/capabilities_mixin.py +208 -0
- attune/memory/mixins/handoff_mixin.py +208 -0
- attune/memory/mixins/lifecycle_mixin.py +49 -0
- attune/memory/mixins/long_term_mixin.py +352 -0
- attune/memory/mixins/promotion_mixin.py +109 -0
- attune/memory/mixins/short_term_mixin.py +182 -0
- attune/memory/nodes.py +179 -0
- attune/memory/redis_bootstrap.py +540 -0
- attune/memory/security/__init__.py +31 -0
- attune/memory/security/audit_logger.py +932 -0
- attune/memory/security/pii_scrubber.py +640 -0
- attune/memory/security/secrets_detector.py +678 -0
- attune/memory/short_term.py +2192 -0
- attune/memory/simple_storage.py +302 -0
- attune/memory/storage/__init__.py +15 -0
- attune/memory/storage_backend.py +167 -0
- attune/memory/summary_index.py +583 -0
- attune/memory/types.py +446 -0
- attune/memory/unified.py +182 -0
- attune/meta_workflows/__init__.py +74 -0
- attune/meta_workflows/agent_creator.py +248 -0
- attune/meta_workflows/builtin_templates.py +567 -0
- attune/meta_workflows/cli_commands/__init__.py +56 -0
- attune/meta_workflows/cli_commands/agent_commands.py +321 -0
- attune/meta_workflows/cli_commands/analytics_commands.py +442 -0
- attune/meta_workflows/cli_commands/config_commands.py +232 -0
- attune/meta_workflows/cli_commands/memory_commands.py +182 -0
- attune/meta_workflows/cli_commands/template_commands.py +354 -0
- attune/meta_workflows/cli_commands/workflow_commands.py +382 -0
- attune/meta_workflows/cli_meta_workflows.py +59 -0
- attune/meta_workflows/form_engine.py +292 -0
- attune/meta_workflows/intent_detector.py +409 -0
- attune/meta_workflows/models.py +569 -0
- attune/meta_workflows/pattern_learner.py +738 -0
- attune/meta_workflows/plan_generator.py +384 -0
- attune/meta_workflows/session_context.py +397 -0
- attune/meta_workflows/template_registry.py +229 -0
- attune/meta_workflows/workflow.py +984 -0
- attune/metrics/__init__.py +12 -0
- attune/metrics/collector.py +31 -0
- attune/metrics/prompt_metrics.py +194 -0
- attune/models/__init__.py +172 -0
- attune/models/__main__.py +13 -0
- attune/models/adaptive_routing.py +437 -0
- attune/models/auth_cli.py +444 -0
- attune/models/auth_strategy.py +450 -0
- attune/models/cli.py +655 -0
- attune/models/empathy_executor.py +354 -0
- attune/models/executor.py +257 -0
- attune/models/fallback.py +762 -0
- attune/models/provider_config.py +282 -0
- attune/models/registry.py +472 -0
- attune/models/tasks.py +359 -0
- attune/models/telemetry/__init__.py +71 -0
- attune/models/telemetry/analytics.py +594 -0
- attune/models/telemetry/backend.py +196 -0
- attune/models/telemetry/data_models.py +431 -0
- attune/models/telemetry/storage.py +489 -0
- attune/models/token_estimator.py +420 -0
- attune/models/validation.py +280 -0
- attune/monitoring/__init__.py +52 -0
- attune/monitoring/alerts.py +946 -0
- attune/monitoring/alerts_cli.py +448 -0
- attune/monitoring/multi_backend.py +271 -0
- attune/monitoring/otel_backend.py +362 -0
- attune/optimization/__init__.py +19 -0
- attune/optimization/context_optimizer.py +272 -0
- attune/orchestration/__init__.py +67 -0
- attune/orchestration/agent_templates.py +707 -0
- attune/orchestration/config_store.py +499 -0
- attune/orchestration/execution_strategies.py +2111 -0
- attune/orchestration/meta_orchestrator.py +1168 -0
- attune/orchestration/pattern_learner.py +696 -0
- attune/orchestration/real_tools.py +931 -0
- attune/pattern_cache.py +187 -0
- attune/pattern_library.py +542 -0
- attune/patterns/debugging/all_patterns.json +81 -0
- attune/patterns/debugging/workflow_20260107_1770825e.json +77 -0
- attune/patterns/refactoring_memory.json +89 -0
- attune/persistence.py +564 -0
- attune/platform_utils.py +265 -0
- attune/plugins/__init__.py +28 -0
- attune/plugins/base.py +361 -0
- attune/plugins/registry.py +268 -0
- attune/project_index/__init__.py +32 -0
- attune/project_index/cli.py +335 -0
- attune/project_index/index.py +667 -0
- attune/project_index/models.py +504 -0
- attune/project_index/reports.py +474 -0
- attune/project_index/scanner.py +777 -0
- attune/project_index/scanner_parallel.py +291 -0
- attune/prompts/__init__.py +61 -0
- attune/prompts/config.py +77 -0
- attune/prompts/context.py +177 -0
- attune/prompts/parser.py +285 -0
- attune/prompts/registry.py +313 -0
- attune/prompts/templates.py +208 -0
- attune/redis_config.py +302 -0
- attune/redis_memory.py +799 -0
- attune/resilience/__init__.py +56 -0
- attune/resilience/circuit_breaker.py +256 -0
- attune/resilience/fallback.py +179 -0
- attune/resilience/health.py +300 -0
- attune/resilience/retry.py +209 -0
- attune/resilience/timeout.py +135 -0
- attune/routing/__init__.py +43 -0
- attune/routing/chain_executor.py +433 -0
- attune/routing/classifier.py +217 -0
- attune/routing/smart_router.py +234 -0
- attune/routing/workflow_registry.py +343 -0
- attune/scaffolding/README.md +589 -0
- attune/scaffolding/__init__.py +35 -0
- attune/scaffolding/__main__.py +14 -0
- attune/scaffolding/cli.py +240 -0
- attune/scaffolding/templates/base_wizard.py.jinja2 +121 -0
- attune/scaffolding/templates/coach_wizard.py.jinja2 +321 -0
- attune/scaffolding/templates/domain_wizard.py.jinja2 +408 -0
- attune/scaffolding/templates/linear_flow_wizard.py.jinja2 +203 -0
- attune/socratic/__init__.py +256 -0
- attune/socratic/ab_testing.py +958 -0
- attune/socratic/blueprint.py +533 -0
- attune/socratic/cli.py +703 -0
- attune/socratic/collaboration.py +1114 -0
- attune/socratic/domain_templates.py +924 -0
- attune/socratic/embeddings.py +738 -0
- attune/socratic/engine.py +794 -0
- attune/socratic/explainer.py +682 -0
- attune/socratic/feedback.py +772 -0
- attune/socratic/forms.py +629 -0
- attune/socratic/generator.py +732 -0
- attune/socratic/llm_analyzer.py +637 -0
- attune/socratic/mcp_server.py +702 -0
- attune/socratic/session.py +312 -0
- attune/socratic/storage.py +667 -0
- attune/socratic/success.py +730 -0
- attune/socratic/visual_editor.py +860 -0
- attune/socratic/web_ui.py +958 -0
- attune/telemetry/__init__.py +39 -0
- attune/telemetry/agent_coordination.py +475 -0
- attune/telemetry/agent_tracking.py +367 -0
- attune/telemetry/approval_gates.py +545 -0
- attune/telemetry/cli.py +1231 -0
- attune/telemetry/commands/__init__.py +14 -0
- attune/telemetry/commands/dashboard_commands.py +696 -0
- attune/telemetry/event_streaming.py +409 -0
- attune/telemetry/feedback_loop.py +567 -0
- attune/telemetry/usage_tracker.py +591 -0
- attune/templates.py +754 -0
- attune/test_generator/__init__.py +38 -0
- attune/test_generator/__main__.py +14 -0
- attune/test_generator/cli.py +234 -0
- attune/test_generator/generator.py +355 -0
- attune/test_generator/risk_analyzer.py +216 -0
- attune/test_generator/templates/unit_test.py.jinja2 +272 -0
- attune/tier_recommender.py +384 -0
- attune/tools.py +183 -0
- attune/trust/__init__.py +28 -0
- attune/trust/circuit_breaker.py +579 -0
- attune/trust_building.py +527 -0
- attune/validation/__init__.py +19 -0
- attune/validation/xml_validator.py +281 -0
- attune/vscode_bridge.py +173 -0
- attune/workflow_commands.py +780 -0
- attune/workflow_patterns/__init__.py +33 -0
- attune/workflow_patterns/behavior.py +249 -0
- attune/workflow_patterns/core.py +76 -0
- attune/workflow_patterns/output.py +99 -0
- attune/workflow_patterns/registry.py +255 -0
- attune/workflow_patterns/structural.py +288 -0
- attune/workflows/__init__.py +539 -0
- attune/workflows/autonomous_test_gen.py +1268 -0
- attune/workflows/base.py +2667 -0
- attune/workflows/batch_processing.py +342 -0
- attune/workflows/bug_predict.py +1084 -0
- attune/workflows/builder.py +273 -0
- attune/workflows/caching.py +253 -0
- attune/workflows/code_review.py +1048 -0
- attune/workflows/code_review_adapters.py +312 -0
- attune/workflows/code_review_pipeline.py +722 -0
- attune/workflows/config.py +645 -0
- attune/workflows/dependency_check.py +644 -0
- attune/workflows/document_gen/__init__.py +25 -0
- attune/workflows/document_gen/config.py +30 -0
- attune/workflows/document_gen/report_formatter.py +162 -0
- attune/workflows/document_gen/workflow.py +1426 -0
- attune/workflows/document_manager.py +216 -0
- attune/workflows/document_manager_README.md +134 -0
- attune/workflows/documentation_orchestrator.py +1205 -0
- attune/workflows/history.py +510 -0
- attune/workflows/keyboard_shortcuts/__init__.py +39 -0
- attune/workflows/keyboard_shortcuts/generators.py +391 -0
- attune/workflows/keyboard_shortcuts/parsers.py +416 -0
- attune/workflows/keyboard_shortcuts/prompts.py +295 -0
- attune/workflows/keyboard_shortcuts/schema.py +193 -0
- attune/workflows/keyboard_shortcuts/workflow.py +509 -0
- attune/workflows/llm_base.py +363 -0
- attune/workflows/manage_docs.py +87 -0
- attune/workflows/manage_docs_README.md +134 -0
- attune/workflows/manage_documentation.py +821 -0
- attune/workflows/new_sample_workflow1.py +149 -0
- attune/workflows/new_sample_workflow1_README.md +150 -0
- attune/workflows/orchestrated_health_check.py +849 -0
- attune/workflows/orchestrated_release_prep.py +600 -0
- attune/workflows/output.py +413 -0
- attune/workflows/perf_audit.py +863 -0
- attune/workflows/pr_review.py +762 -0
- attune/workflows/progress.py +785 -0
- attune/workflows/progress_server.py +322 -0
- attune/workflows/progressive/README 2.md +454 -0
- attune/workflows/progressive/README.md +454 -0
- attune/workflows/progressive/__init__.py +82 -0
- attune/workflows/progressive/cli.py +219 -0
- attune/workflows/progressive/core.py +488 -0
- attune/workflows/progressive/orchestrator.py +723 -0
- attune/workflows/progressive/reports.py +520 -0
- attune/workflows/progressive/telemetry.py +274 -0
- attune/workflows/progressive/test_gen.py +495 -0
- attune/workflows/progressive/workflow.py +589 -0
- attune/workflows/refactor_plan.py +694 -0
- attune/workflows/release_prep.py +895 -0
- attune/workflows/release_prep_crew.py +969 -0
- attune/workflows/research_synthesis.py +404 -0
- attune/workflows/routing.py +168 -0
- attune/workflows/secure_release.py +593 -0
- attune/workflows/security_adapters.py +297 -0
- attune/workflows/security_audit.py +1329 -0
- attune/workflows/security_audit_phase3.py +355 -0
- attune/workflows/seo_optimization.py +633 -0
- attune/workflows/step_config.py +234 -0
- attune/workflows/telemetry_mixin.py +269 -0
- attune/workflows/test5.py +125 -0
- attune/workflows/test5_README.md +158 -0
- attune/workflows/test_coverage_boost_crew.py +849 -0
- attune/workflows/test_gen/__init__.py +52 -0
- attune/workflows/test_gen/ast_analyzer.py +249 -0
- attune/workflows/test_gen/config.py +88 -0
- attune/workflows/test_gen/data_models.py +38 -0
- attune/workflows/test_gen/report_formatter.py +289 -0
- attune/workflows/test_gen/test_templates.py +381 -0
- attune/workflows/test_gen/workflow.py +655 -0
- attune/workflows/test_gen.py +54 -0
- attune/workflows/test_gen_behavioral.py +477 -0
- attune/workflows/test_gen_parallel.py +341 -0
- attune/workflows/test_lifecycle.py +526 -0
- attune/workflows/test_maintenance.py +627 -0
- attune/workflows/test_maintenance_cli.py +590 -0
- attune/workflows/test_maintenance_crew.py +840 -0
- attune/workflows/test_runner.py +622 -0
- attune/workflows/tier_tracking.py +531 -0
- attune/workflows/xml_enhanced_crew.py +285 -0
- attune_ai-2.0.0.dist-info/METADATA +1026 -0
- attune_ai-2.0.0.dist-info/RECORD +457 -0
- attune_ai-2.0.0.dist-info/WHEEL +5 -0
- attune_ai-2.0.0.dist-info/entry_points.txt +26 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE +201 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +101 -0
- attune_ai-2.0.0.dist-info/top_level.txt +5 -0
- attune_healthcare/__init__.py +13 -0
- attune_healthcare/monitors/__init__.py +9 -0
- attune_healthcare/monitors/clinical_protocol_monitor.py +315 -0
- attune_healthcare/monitors/monitoring/__init__.py +44 -0
- attune_healthcare/monitors/monitoring/protocol_checker.py +300 -0
- attune_healthcare/monitors/monitoring/protocol_loader.py +214 -0
- attune_healthcare/monitors/monitoring/sensor_parsers.py +306 -0
- attune_healthcare/monitors/monitoring/trajectory_analyzer.py +389 -0
- attune_llm/README.md +553 -0
- attune_llm/__init__.py +28 -0
- attune_llm/agent_factory/__init__.py +53 -0
- attune_llm/agent_factory/adapters/__init__.py +85 -0
- attune_llm/agent_factory/adapters/autogen_adapter.py +312 -0
- attune_llm/agent_factory/adapters/crewai_adapter.py +483 -0
- attune_llm/agent_factory/adapters/haystack_adapter.py +298 -0
- attune_llm/agent_factory/adapters/langchain_adapter.py +362 -0
- attune_llm/agent_factory/adapters/langgraph_adapter.py +333 -0
- attune_llm/agent_factory/adapters/native.py +228 -0
- attune_llm/agent_factory/adapters/wizard_adapter.py +423 -0
- attune_llm/agent_factory/base.py +305 -0
- attune_llm/agent_factory/crews/__init__.py +67 -0
- attune_llm/agent_factory/crews/code_review.py +1113 -0
- attune_llm/agent_factory/crews/health_check.py +1262 -0
- attune_llm/agent_factory/crews/refactoring.py +1128 -0
- attune_llm/agent_factory/crews/security_audit.py +1018 -0
- attune_llm/agent_factory/decorators.py +287 -0
- attune_llm/agent_factory/factory.py +558 -0
- attune_llm/agent_factory/framework.py +193 -0
- attune_llm/agent_factory/memory_integration.py +328 -0
- attune_llm/agent_factory/resilient.py +320 -0
- attune_llm/agents_md/__init__.py +22 -0
- attune_llm/agents_md/loader.py +218 -0
- attune_llm/agents_md/parser.py +271 -0
- attune_llm/agents_md/registry.py +307 -0
- attune_llm/claude_memory.py +466 -0
- attune_llm/cli/__init__.py +8 -0
- attune_llm/cli/sync_claude.py +487 -0
- attune_llm/code_health.py +1313 -0
- attune_llm/commands/__init__.py +51 -0
- attune_llm/commands/context.py +375 -0
- attune_llm/commands/loader.py +301 -0
- attune_llm/commands/models.py +231 -0
- attune_llm/commands/parser.py +371 -0
- attune_llm/commands/registry.py +429 -0
- attune_llm/config/__init__.py +29 -0
- attune_llm/config/unified.py +291 -0
- attune_llm/context/__init__.py +22 -0
- attune_llm/context/compaction.py +455 -0
- attune_llm/context/manager.py +434 -0
- attune_llm/contextual_patterns.py +361 -0
- attune_llm/core.py +907 -0
- attune_llm/git_pattern_extractor.py +435 -0
- attune_llm/hooks/__init__.py +24 -0
- attune_llm/hooks/config.py +306 -0
- attune_llm/hooks/executor.py +289 -0
- attune_llm/hooks/registry.py +302 -0
- attune_llm/hooks/scripts/__init__.py +39 -0
- attune_llm/hooks/scripts/evaluate_session.py +201 -0
- attune_llm/hooks/scripts/first_time_init.py +285 -0
- attune_llm/hooks/scripts/pre_compact.py +207 -0
- attune_llm/hooks/scripts/session_end.py +183 -0
- attune_llm/hooks/scripts/session_start.py +163 -0
- attune_llm/hooks/scripts/suggest_compact.py +225 -0
- attune_llm/learning/__init__.py +30 -0
- attune_llm/learning/evaluator.py +438 -0
- attune_llm/learning/extractor.py +514 -0
- attune_llm/learning/storage.py +560 -0
- attune_llm/levels.py +227 -0
- attune_llm/pattern_confidence.py +414 -0
- attune_llm/pattern_resolver.py +272 -0
- attune_llm/pattern_summary.py +350 -0
- attune_llm/providers.py +967 -0
- attune_llm/routing/__init__.py +32 -0
- attune_llm/routing/model_router.py +362 -0
- attune_llm/security/IMPLEMENTATION_SUMMARY.md +413 -0
- attune_llm/security/PHASE2_COMPLETE.md +384 -0
- attune_llm/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
- attune_llm/security/QUICK_REFERENCE.md +316 -0
- attune_llm/security/README.md +262 -0
- attune_llm/security/__init__.py +62 -0
- attune_llm/security/audit_logger.py +929 -0
- attune_llm/security/audit_logger_example.py +152 -0
- attune_llm/security/pii_scrubber.py +640 -0
- attune_llm/security/secrets_detector.py +678 -0
- attune_llm/security/secrets_detector_example.py +304 -0
- attune_llm/security/secure_memdocs.py +1192 -0
- attune_llm/security/secure_memdocs_example.py +278 -0
- attune_llm/session_status.py +745 -0
- attune_llm/state.py +246 -0
- attune_llm/utils/__init__.py +5 -0
- attune_llm/utils/tokens.py +349 -0
- attune_software/SOFTWARE_PLUGIN_README.md +57 -0
- attune_software/__init__.py +13 -0
- attune_software/cli/__init__.py +120 -0
- attune_software/cli/inspect.py +362 -0
- attune_software/cli.py +574 -0
- attune_software/plugin.py +188 -0
- workflow_scaffolding/__init__.py +11 -0
- workflow_scaffolding/__main__.py +12 -0
- workflow_scaffolding/cli.py +206 -0
- workflow_scaffolding/generator.py +265 -0
|
@@ -0,0 +1,591 @@
|
|
|
1
|
+
"""Usage Tracker for Empathy Framework Telemetry.
|
|
2
|
+
|
|
3
|
+
Privacy-first, local-only tracking of LLM calls to measure actual cost savings.
|
|
4
|
+
All data stored locally in ~/.attune/telemetry/ as JSON Lines format.
|
|
5
|
+
|
|
6
|
+
Copyright 2025 Smart-AI-Memory
|
|
7
|
+
Licensed under Fair Source License 0.9
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import hashlib
|
|
11
|
+
import json
|
|
12
|
+
import logging
|
|
13
|
+
import threading
|
|
14
|
+
from datetime import datetime, timedelta
|
|
15
|
+
from pathlib import Path
|
|
16
|
+
from typing import Any
|
|
17
|
+
|
|
18
|
+
logger = logging.getLogger(__name__)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class UsageTracker:
|
|
22
|
+
"""Privacy-first local telemetry tracker.
|
|
23
|
+
|
|
24
|
+
Tracks LLM calls to JSON Lines format with automatic rotation
|
|
25
|
+
and 90-day retention. Thread-safe with atomic writes.
|
|
26
|
+
|
|
27
|
+
All user identifiers are SHA256 hashed for privacy.
|
|
28
|
+
No prompts, responses, file paths, or PII are ever tracked.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
# Class-level lock for thread safety across all instances
|
|
32
|
+
_lock = threading.Lock()
|
|
33
|
+
# Singleton instance
|
|
34
|
+
_instance: "UsageTracker | None" = None
|
|
35
|
+
|
|
36
|
+
def __init__(
|
|
37
|
+
self,
|
|
38
|
+
telemetry_dir: Path | None = None,
|
|
39
|
+
retention_days: int = 90,
|
|
40
|
+
max_file_size_mb: int = 10,
|
|
41
|
+
):
|
|
42
|
+
"""Initialize UsageTracker.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
telemetry_dir: Directory for telemetry files.
|
|
46
|
+
Defaults to ~/.attune/telemetry/
|
|
47
|
+
retention_days: Days to retain telemetry data (default: 90)
|
|
48
|
+
max_file_size_mb: Max size in MB before rotation (default: 10)
|
|
49
|
+
|
|
50
|
+
"""
|
|
51
|
+
self.telemetry_dir = telemetry_dir or Path.home() / ".empathy" / "telemetry"
|
|
52
|
+
self.retention_days = retention_days
|
|
53
|
+
self.max_file_size_mb = max_file_size_mb
|
|
54
|
+
self.usage_file = self.telemetry_dir / "usage.jsonl"
|
|
55
|
+
|
|
56
|
+
# Create directory if needed (gracefully handle permission errors)
|
|
57
|
+
try:
|
|
58
|
+
self.telemetry_dir.mkdir(parents=True, exist_ok=True)
|
|
59
|
+
except (OSError, PermissionError):
|
|
60
|
+
# Can't create directory - telemetry will be disabled
|
|
61
|
+
logger.debug(f"Failed to create telemetry directory: {self.telemetry_dir}")
|
|
62
|
+
|
|
63
|
+
@classmethod
|
|
64
|
+
def get_instance(cls, **kwargs: Any) -> "UsageTracker":
|
|
65
|
+
"""Get singleton instance of UsageTracker.
|
|
66
|
+
|
|
67
|
+
Args:
|
|
68
|
+
**kwargs: Arguments passed to __init__ if creating new instance
|
|
69
|
+
|
|
70
|
+
Returns:
|
|
71
|
+
Singleton UsageTracker instance
|
|
72
|
+
|
|
73
|
+
"""
|
|
74
|
+
if cls._instance is None:
|
|
75
|
+
cls._instance = cls(**kwargs)
|
|
76
|
+
return cls._instance
|
|
77
|
+
|
|
78
|
+
def track_llm_call(
|
|
79
|
+
self,
|
|
80
|
+
workflow: str,
|
|
81
|
+
stage: str | None,
|
|
82
|
+
tier: str,
|
|
83
|
+
model: str,
|
|
84
|
+
provider: str,
|
|
85
|
+
cost: float,
|
|
86
|
+
tokens: dict[str, int],
|
|
87
|
+
cache_hit: bool,
|
|
88
|
+
cache_type: str | None,
|
|
89
|
+
duration_ms: int,
|
|
90
|
+
user_id: str | None = None,
|
|
91
|
+
prompt_cache_hit: bool = False,
|
|
92
|
+
prompt_cache_creation_tokens: int = 0,
|
|
93
|
+
prompt_cache_read_tokens: int = 0,
|
|
94
|
+
) -> None:
|
|
95
|
+
"""Track a single LLM call with prompt caching metrics.
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
workflow: Workflow name (e.g., "code-review")
|
|
99
|
+
stage: Stage name (e.g., "analysis"), optional
|
|
100
|
+
tier: Model tier (CHEAP, CAPABLE, PREMIUM)
|
|
101
|
+
model: Model ID (e.g., "claude-sonnet-4.5")
|
|
102
|
+
provider: Provider name (anthropic, openai, etc.)
|
|
103
|
+
cost: Cost in USD
|
|
104
|
+
tokens: Dict with "input" and "output" keys
|
|
105
|
+
cache_hit: Whether this was a local cache hit
|
|
106
|
+
cache_type: Cache type if hit ("hash", "hybrid", etc.)
|
|
107
|
+
duration_ms: Call duration in milliseconds
|
|
108
|
+
user_id: Optional user identifier (will be hashed)
|
|
109
|
+
prompt_cache_hit: Whether Anthropic prompt cache was used
|
|
110
|
+
prompt_cache_creation_tokens: Tokens written to Anthropic cache
|
|
111
|
+
prompt_cache_read_tokens: Tokens read from Anthropic cache
|
|
112
|
+
|
|
113
|
+
"""
|
|
114
|
+
# Build entry
|
|
115
|
+
entry: dict[str, Any] = {
|
|
116
|
+
"v": "1.0",
|
|
117
|
+
"ts": datetime.utcnow().isoformat() + "Z",
|
|
118
|
+
"workflow": workflow,
|
|
119
|
+
"tier": tier,
|
|
120
|
+
"model": model,
|
|
121
|
+
"provider": provider,
|
|
122
|
+
"cost": round(cost, 6),
|
|
123
|
+
"tokens": tokens,
|
|
124
|
+
"cache": {"hit": cache_hit},
|
|
125
|
+
"duration_ms": duration_ms,
|
|
126
|
+
"user_id": self._hash_user_id(user_id or "default"),
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
# Add optional fields
|
|
130
|
+
if stage:
|
|
131
|
+
entry["stage"] = stage
|
|
132
|
+
if cache_hit and cache_type:
|
|
133
|
+
entry["cache"]["type"] = cache_type
|
|
134
|
+
|
|
135
|
+
# Add prompt cache metrics (Anthropic-specific)
|
|
136
|
+
if prompt_cache_hit or prompt_cache_creation_tokens > 0 or prompt_cache_read_tokens > 0:
|
|
137
|
+
entry["prompt_cache"] = {
|
|
138
|
+
"hit": prompt_cache_hit,
|
|
139
|
+
"creation_tokens": prompt_cache_creation_tokens,
|
|
140
|
+
"read_tokens": prompt_cache_read_tokens,
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
# Write entry (thread-safe, atomic)
|
|
144
|
+
try:
|
|
145
|
+
self._write_entry(entry)
|
|
146
|
+
# Check if rotation needed
|
|
147
|
+
self._rotate_if_needed()
|
|
148
|
+
except OSError as e:
|
|
149
|
+
# File system errors - log but don't crash
|
|
150
|
+
logger.debug(f"Failed to write telemetry entry: {e}")
|
|
151
|
+
except Exception as ex:
|
|
152
|
+
# INTENTIONAL: Telemetry failures should never crash the workflow
|
|
153
|
+
logger.debug(f"Unexpected error writing telemetry entry: {ex}")
|
|
154
|
+
|
|
155
|
+
def _hash_user_id(self, user_id: str) -> str:
|
|
156
|
+
"""Hash user ID with SHA256 for privacy.
|
|
157
|
+
|
|
158
|
+
Args:
|
|
159
|
+
user_id: User identifier to hash
|
|
160
|
+
|
|
161
|
+
Returns:
|
|
162
|
+
First 16 characters of SHA256 hash
|
|
163
|
+
|
|
164
|
+
"""
|
|
165
|
+
return hashlib.sha256(user_id.encode()).hexdigest()[:16]
|
|
166
|
+
|
|
167
|
+
def _write_entry(self, entry: dict[str, Any]) -> None:
|
|
168
|
+
"""Write entry to JSON Lines file atomically.
|
|
169
|
+
|
|
170
|
+
Uses atomic write pattern: write to temp file, then rename.
|
|
171
|
+
This ensures no partial writes even with concurrent access.
|
|
172
|
+
|
|
173
|
+
Args:
|
|
174
|
+
entry: Dictionary entry to write
|
|
175
|
+
|
|
176
|
+
"""
|
|
177
|
+
with self._lock:
|
|
178
|
+
# Write to temp file
|
|
179
|
+
temp_file = self.usage_file.with_suffix(".tmp")
|
|
180
|
+
try:
|
|
181
|
+
# Append to temp file
|
|
182
|
+
with open(temp_file, "a", encoding="utf-8") as f:
|
|
183
|
+
json.dump(entry, f, separators=(",", ":"))
|
|
184
|
+
f.write("\n")
|
|
185
|
+
|
|
186
|
+
# Atomic rename: temp -> usage.jsonl
|
|
187
|
+
# If usage.jsonl exists, we need to append
|
|
188
|
+
if self.usage_file.exists():
|
|
189
|
+
# Read temp file content
|
|
190
|
+
with open(temp_file, encoding="utf-8") as f:
|
|
191
|
+
new_line = f.read()
|
|
192
|
+
# Append to main file
|
|
193
|
+
with open(self.usage_file, "a", encoding="utf-8") as f:
|
|
194
|
+
f.write(new_line)
|
|
195
|
+
# Clean up temp file
|
|
196
|
+
temp_file.unlink()
|
|
197
|
+
else:
|
|
198
|
+
# Just rename temp to main
|
|
199
|
+
temp_file.replace(self.usage_file)
|
|
200
|
+
except OSError:
|
|
201
|
+
# Clean up temp file if it exists
|
|
202
|
+
if temp_file.exists():
|
|
203
|
+
try:
|
|
204
|
+
temp_file.unlink()
|
|
205
|
+
except OSError:
|
|
206
|
+
pass
|
|
207
|
+
raise
|
|
208
|
+
|
|
209
|
+
def _rotate_if_needed(self) -> None:
|
|
210
|
+
"""Rotate log file if size exceeds max_file_size_mb.
|
|
211
|
+
|
|
212
|
+
Rotates usage.jsonl -> usage.YYYY-MM-DD.jsonl
|
|
213
|
+
Also cleans up files older than retention_days.
|
|
214
|
+
"""
|
|
215
|
+
if not self.usage_file.exists():
|
|
216
|
+
return
|
|
217
|
+
|
|
218
|
+
# Check file size
|
|
219
|
+
size_mb = self.usage_file.stat().st_size / (1024 * 1024)
|
|
220
|
+
if size_mb < self.max_file_size_mb:
|
|
221
|
+
return
|
|
222
|
+
|
|
223
|
+
with self._lock:
|
|
224
|
+
# Rotate: usage.jsonl -> usage.YYYY-MM-DD.jsonl
|
|
225
|
+
timestamp = datetime.now().strftime("%Y-%m-%d")
|
|
226
|
+
rotated_file = self.telemetry_dir / f"usage.{timestamp}.jsonl"
|
|
227
|
+
|
|
228
|
+
# If rotated file already exists, append a counter
|
|
229
|
+
counter = 1
|
|
230
|
+
while rotated_file.exists():
|
|
231
|
+
rotated_file = self.telemetry_dir / f"usage.{timestamp}.{counter}.jsonl"
|
|
232
|
+
counter += 1
|
|
233
|
+
|
|
234
|
+
# Rename current file
|
|
235
|
+
self.usage_file.rename(rotated_file)
|
|
236
|
+
|
|
237
|
+
# Clean up old files
|
|
238
|
+
self._cleanup_old_files()
|
|
239
|
+
|
|
240
|
+
def _cleanup_old_files(self) -> None:
|
|
241
|
+
"""Remove files older than retention_days."""
|
|
242
|
+
cutoff = datetime.now() - timedelta(days=self.retention_days)
|
|
243
|
+
|
|
244
|
+
for file in self.telemetry_dir.glob("usage.*.jsonl"):
|
|
245
|
+
try:
|
|
246
|
+
# Get file modification time
|
|
247
|
+
mtime = datetime.fromtimestamp(file.stat().st_mtime)
|
|
248
|
+
if mtime < cutoff:
|
|
249
|
+
file.unlink()
|
|
250
|
+
logger.debug(f"Deleted old telemetry file: {file.name}")
|
|
251
|
+
except (OSError, ValueError):
|
|
252
|
+
# File system errors - log but continue
|
|
253
|
+
logger.debug(f"Failed to clean up telemetry file: {file.name}")
|
|
254
|
+
|
|
255
|
+
def get_recent_entries(
|
|
256
|
+
self,
|
|
257
|
+
limit: int = 20,
|
|
258
|
+
days: int | None = None,
|
|
259
|
+
) -> list[dict[str, Any]]:
|
|
260
|
+
"""Read recent telemetry entries.
|
|
261
|
+
|
|
262
|
+
Args:
|
|
263
|
+
limit: Maximum number of entries to return (default: 20)
|
|
264
|
+
days: Only return entries from last N days (optional)
|
|
265
|
+
|
|
266
|
+
Returns:
|
|
267
|
+
List of telemetry entries (most recent first)
|
|
268
|
+
|
|
269
|
+
"""
|
|
270
|
+
entries: list[dict[str, Any]] = []
|
|
271
|
+
cutoff_time = datetime.utcnow() - timedelta(days=days) if days else None
|
|
272
|
+
|
|
273
|
+
# Read all relevant files
|
|
274
|
+
files = sorted(self.telemetry_dir.glob("usage*.jsonl"), reverse=True)
|
|
275
|
+
|
|
276
|
+
for file in files:
|
|
277
|
+
if not file.exists():
|
|
278
|
+
continue
|
|
279
|
+
|
|
280
|
+
try:
|
|
281
|
+
with open(file, encoding="utf-8") as f:
|
|
282
|
+
for line in f:
|
|
283
|
+
if not line.strip():
|
|
284
|
+
continue
|
|
285
|
+
try:
|
|
286
|
+
entry = json.loads(line)
|
|
287
|
+
# Check timestamp if filtering by days
|
|
288
|
+
if cutoff_time:
|
|
289
|
+
ts = datetime.fromisoformat(entry["ts"].rstrip("Z"))
|
|
290
|
+
if ts < cutoff_time:
|
|
291
|
+
continue
|
|
292
|
+
entries.append(entry)
|
|
293
|
+
except (json.JSONDecodeError, KeyError, ValueError):
|
|
294
|
+
# Skip invalid entries
|
|
295
|
+
continue
|
|
296
|
+
except OSError:
|
|
297
|
+
# File read errors - log but continue
|
|
298
|
+
logger.debug(f"Failed to read telemetry file: {file.name}")
|
|
299
|
+
continue
|
|
300
|
+
|
|
301
|
+
# Sort by timestamp (most recent first) and limit
|
|
302
|
+
entries.sort(key=lambda e: e.get("ts", ""), reverse=True)
|
|
303
|
+
return entries[:limit]
|
|
304
|
+
|
|
305
|
+
def get_stats(self, days: int = 30) -> dict[str, Any]:
|
|
306
|
+
"""Calculate telemetry statistics.
|
|
307
|
+
|
|
308
|
+
Args:
|
|
309
|
+
days: Number of days to analyze (default: 30)
|
|
310
|
+
|
|
311
|
+
Returns:
|
|
312
|
+
Dictionary with statistics including:
|
|
313
|
+
- total_calls: Total number of LLM calls
|
|
314
|
+
- total_cost: Total cost in USD
|
|
315
|
+
- total_tokens_input: Total input tokens
|
|
316
|
+
- total_tokens_output: Total output tokens
|
|
317
|
+
- cache_hits: Number of cache hits
|
|
318
|
+
- cache_misses: Number of cache misses
|
|
319
|
+
- cache_hit_rate: Cache hit rate as percentage
|
|
320
|
+
- by_tier: Cost breakdown by tier
|
|
321
|
+
- by_workflow: Cost breakdown by workflow
|
|
322
|
+
- by_provider: Cost breakdown by provider
|
|
323
|
+
|
|
324
|
+
"""
|
|
325
|
+
entries = self.get_recent_entries(limit=100000, days=days)
|
|
326
|
+
|
|
327
|
+
if not entries:
|
|
328
|
+
return {
|
|
329
|
+
"total_calls": 0,
|
|
330
|
+
"total_cost": 0.0,
|
|
331
|
+
"total_tokens_input": 0,
|
|
332
|
+
"total_tokens_output": 0,
|
|
333
|
+
"cache_hits": 0,
|
|
334
|
+
"cache_misses": 0,
|
|
335
|
+
"cache_hit_rate": 0.0,
|
|
336
|
+
"by_tier": {},
|
|
337
|
+
"by_workflow": {},
|
|
338
|
+
"by_provider": {},
|
|
339
|
+
}
|
|
340
|
+
|
|
341
|
+
# Aggregate stats
|
|
342
|
+
total_cost = 0.0
|
|
343
|
+
total_tokens_input = 0
|
|
344
|
+
total_tokens_output = 0
|
|
345
|
+
cache_hits = 0
|
|
346
|
+
cache_misses = 0
|
|
347
|
+
by_tier: dict[str, float] = {}
|
|
348
|
+
by_workflow: dict[str, float] = {}
|
|
349
|
+
by_provider: dict[str, float] = {}
|
|
350
|
+
|
|
351
|
+
for entry in entries:
|
|
352
|
+
cost = entry.get("cost", 0.0)
|
|
353
|
+
tokens = entry.get("tokens", {})
|
|
354
|
+
cache = entry.get("cache", {})
|
|
355
|
+
tier = entry.get("tier", "unknown")
|
|
356
|
+
workflow = entry.get("workflow", "unknown")
|
|
357
|
+
provider = entry.get("provider", "unknown")
|
|
358
|
+
|
|
359
|
+
total_cost += cost
|
|
360
|
+
total_tokens_input += tokens.get("input", 0)
|
|
361
|
+
total_tokens_output += tokens.get("output", 0)
|
|
362
|
+
|
|
363
|
+
if cache.get("hit"):
|
|
364
|
+
cache_hits += 1
|
|
365
|
+
else:
|
|
366
|
+
cache_misses += 1
|
|
367
|
+
|
|
368
|
+
by_tier[tier] = by_tier.get(tier, 0.0) + cost
|
|
369
|
+
by_workflow[workflow] = by_workflow.get(workflow, 0.0) + cost
|
|
370
|
+
by_provider[provider] = by_provider.get(provider, 0.0) + cost
|
|
371
|
+
|
|
372
|
+
total_calls = len(entries)
|
|
373
|
+
cache_hit_rate = (cache_hits / total_calls * 100) if total_calls > 0 else 0.0
|
|
374
|
+
|
|
375
|
+
return {
|
|
376
|
+
"total_calls": total_calls,
|
|
377
|
+
"total_cost": round(total_cost, 2),
|
|
378
|
+
"total_tokens_input": total_tokens_input,
|
|
379
|
+
"total_tokens_output": total_tokens_output,
|
|
380
|
+
"cache_hits": cache_hits,
|
|
381
|
+
"cache_misses": cache_misses,
|
|
382
|
+
"cache_hit_rate": round(cache_hit_rate, 1),
|
|
383
|
+
"by_tier": by_tier,
|
|
384
|
+
"by_workflow": by_workflow,
|
|
385
|
+
"by_provider": by_provider,
|
|
386
|
+
}
|
|
387
|
+
|
|
388
|
+
def calculate_savings(self, days: int = 30) -> dict[str, Any]:
|
|
389
|
+
"""Calculate actual savings vs all-PREMIUM baseline.
|
|
390
|
+
|
|
391
|
+
Args:
|
|
392
|
+
days: Number of days to analyze (default: 30)
|
|
393
|
+
|
|
394
|
+
Returns:
|
|
395
|
+
Dictionary with savings calculation:
|
|
396
|
+
- actual_cost: Actual cost with tier routing
|
|
397
|
+
- baseline_cost: Cost if all calls used PREMIUM tier
|
|
398
|
+
- savings: Dollar amount saved
|
|
399
|
+
- savings_percent: Percentage saved
|
|
400
|
+
- tier_distribution: Percentage of calls by tier
|
|
401
|
+
- cache_savings: Additional savings from cache hits
|
|
402
|
+
|
|
403
|
+
"""
|
|
404
|
+
entries = self.get_recent_entries(limit=100000, days=days)
|
|
405
|
+
|
|
406
|
+
if not entries:
|
|
407
|
+
return {
|
|
408
|
+
"actual_cost": 0.0,
|
|
409
|
+
"baseline_cost": 0.0,
|
|
410
|
+
"savings": 0.0,
|
|
411
|
+
"savings_percent": 0.0,
|
|
412
|
+
"tier_distribution": {},
|
|
413
|
+
"cache_savings": 0.0,
|
|
414
|
+
"total_calls": 0,
|
|
415
|
+
}
|
|
416
|
+
|
|
417
|
+
# Calculate actual cost
|
|
418
|
+
actual_cost = sum(e.get("cost", 0.0) for e in entries)
|
|
419
|
+
|
|
420
|
+
# Calculate baseline cost (all PREMIUM)
|
|
421
|
+
# Get average PREMIUM cost from actual data, or use standard rate
|
|
422
|
+
premium_costs = [e.get("cost", 0.0) for e in entries if e.get("tier") == "PREMIUM"]
|
|
423
|
+
avg_premium_cost = (sum(premium_costs) / len(premium_costs)) if premium_costs else 0.05
|
|
424
|
+
baseline_cost = len(entries) * avg_premium_cost
|
|
425
|
+
|
|
426
|
+
# Tier distribution
|
|
427
|
+
tier_counts: dict[str, int] = {}
|
|
428
|
+
for entry in entries:
|
|
429
|
+
tier = entry.get("tier", "unknown")
|
|
430
|
+
tier_counts[tier] = tier_counts.get(tier, 0) + 1
|
|
431
|
+
|
|
432
|
+
total_calls = len(entries)
|
|
433
|
+
tier_distribution = {
|
|
434
|
+
tier: round(count / total_calls * 100, 1) for tier, count in tier_counts.items()
|
|
435
|
+
}
|
|
436
|
+
|
|
437
|
+
# Cache savings estimation
|
|
438
|
+
cache_hits = sum(1 for e in entries if e.get("cache", {}).get("hit"))
|
|
439
|
+
avg_cost_per_call = actual_cost / total_calls if total_calls > 0 else 0.0
|
|
440
|
+
cache_savings = cache_hits * avg_cost_per_call
|
|
441
|
+
|
|
442
|
+
savings = baseline_cost - actual_cost
|
|
443
|
+
savings_percent = (savings / baseline_cost * 100) if baseline_cost > 0 else 0.0
|
|
444
|
+
|
|
445
|
+
return {
|
|
446
|
+
"actual_cost": round(actual_cost, 2),
|
|
447
|
+
"baseline_cost": round(baseline_cost, 2),
|
|
448
|
+
"savings": round(savings, 2),
|
|
449
|
+
"savings_percent": round(savings_percent, 1),
|
|
450
|
+
"tier_distribution": tier_distribution,
|
|
451
|
+
"cache_savings": round(cache_savings, 2),
|
|
452
|
+
"total_calls": total_calls,
|
|
453
|
+
}
|
|
454
|
+
|
|
455
|
+
def reset(self) -> int:
|
|
456
|
+
"""Clear all telemetry data.
|
|
457
|
+
|
|
458
|
+
Returns:
|
|
459
|
+
Number of entries deleted
|
|
460
|
+
|
|
461
|
+
"""
|
|
462
|
+
count = 0
|
|
463
|
+
with self._lock:
|
|
464
|
+
for file in self.telemetry_dir.glob("usage*.jsonl"):
|
|
465
|
+
try:
|
|
466
|
+
# Count entries before deleting
|
|
467
|
+
with open(file, encoding="utf-8") as f:
|
|
468
|
+
count += sum(1 for line in f if line.strip())
|
|
469
|
+
file.unlink()
|
|
470
|
+
except OSError:
|
|
471
|
+
# File system errors - log but continue
|
|
472
|
+
logger.debug(f"Failed to delete telemetry file: {file.name}")
|
|
473
|
+
|
|
474
|
+
return count
|
|
475
|
+
|
|
476
|
+
def get_cache_stats(self, days: int = 7) -> dict[str, Any]:
|
|
477
|
+
"""Get prompt caching statistics.
|
|
478
|
+
|
|
479
|
+
Analyzes Anthropic prompt cache usage including:
|
|
480
|
+
- Cache hit rate
|
|
481
|
+
- Total cache reads and writes
|
|
482
|
+
- Estimated cost savings from caching
|
|
483
|
+
- Top workflows benefiting from cache
|
|
484
|
+
|
|
485
|
+
Args:
|
|
486
|
+
days: Number of days to analyze (default: 7)
|
|
487
|
+
|
|
488
|
+
Returns:
|
|
489
|
+
Dictionary with caching statistics:
|
|
490
|
+
- hit_rate: Percentage of requests that used cache
|
|
491
|
+
- total_reads: Total tokens read from cache
|
|
492
|
+
- total_writes: Total tokens written to cache
|
|
493
|
+
- savings: Estimated USD saved by caching
|
|
494
|
+
- hit_count: Number of requests with cache hits
|
|
495
|
+
- total_requests: Total requests analyzed
|
|
496
|
+
- by_workflow: Cache stats by workflow
|
|
497
|
+
|
|
498
|
+
Example:
|
|
499
|
+
>>> tracker = UsageTracker.get_instance()
|
|
500
|
+
>>> stats = tracker.get_cache_stats(days=7)
|
|
501
|
+
>>> print(f"Cache hit rate: {stats['hit_rate']:.1%}")
|
|
502
|
+
Cache hit rate: 65.3%
|
|
503
|
+
>>> print(f"Savings: ${stats['savings']:.2f}")
|
|
504
|
+
Savings: $12.45
|
|
505
|
+
|
|
506
|
+
"""
|
|
507
|
+
entries = self.get_recent_entries(limit=100000, days=days)
|
|
508
|
+
|
|
509
|
+
if not entries:
|
|
510
|
+
return {
|
|
511
|
+
"hit_rate": 0.0,
|
|
512
|
+
"total_reads": 0,
|
|
513
|
+
"total_writes": 0,
|
|
514
|
+
"savings": 0.0,
|
|
515
|
+
"hit_count": 0,
|
|
516
|
+
"total_requests": 0,
|
|
517
|
+
"by_workflow": {},
|
|
518
|
+
}
|
|
519
|
+
|
|
520
|
+
# Aggregate prompt cache stats
|
|
521
|
+
hit_count = 0
|
|
522
|
+
total_reads = 0
|
|
523
|
+
total_writes = 0
|
|
524
|
+
total_requests = len(entries)
|
|
525
|
+
by_workflow: dict[str, dict[str, Any]] = {}
|
|
526
|
+
|
|
527
|
+
for entry in entries:
|
|
528
|
+
prompt_cache = entry.get("prompt_cache", {})
|
|
529
|
+
|
|
530
|
+
# Check if entry has prompt cache data
|
|
531
|
+
if prompt_cache.get("hit"):
|
|
532
|
+
hit_count += 1
|
|
533
|
+
|
|
534
|
+
# Accumulate tokens
|
|
535
|
+
total_reads += prompt_cache.get("read_tokens", 0)
|
|
536
|
+
total_writes += prompt_cache.get("creation_tokens", 0)
|
|
537
|
+
|
|
538
|
+
# Per-workflow stats
|
|
539
|
+
workflow = entry.get("workflow", "unknown")
|
|
540
|
+
if workflow not in by_workflow:
|
|
541
|
+
by_workflow[workflow] = {
|
|
542
|
+
"hits": 0,
|
|
543
|
+
"reads": 0,
|
|
544
|
+
"writes": 0,
|
|
545
|
+
"requests": 0,
|
|
546
|
+
}
|
|
547
|
+
|
|
548
|
+
wf_stats = by_workflow[workflow]
|
|
549
|
+
wf_stats["requests"] += 1
|
|
550
|
+
if prompt_cache.get("hit"):
|
|
551
|
+
wf_stats["hits"] += 1
|
|
552
|
+
wf_stats["reads"] += prompt_cache.get("read_tokens", 0)
|
|
553
|
+
wf_stats["writes"] += prompt_cache.get("creation_tokens", 0)
|
|
554
|
+
|
|
555
|
+
# Calculate hit rate
|
|
556
|
+
hit_rate = (hit_count / total_requests) if total_requests > 0 else 0.0
|
|
557
|
+
|
|
558
|
+
# Estimate savings (cache reads cost 90% less)
|
|
559
|
+
# Assume Sonnet 4.5 pricing: $3.00/M input tokens
|
|
560
|
+
# Cache reads: $0.30/M (90% discount)
|
|
561
|
+
# Full price would be: $3.00/M
|
|
562
|
+
# Savings per token: $2.70/M
|
|
563
|
+
savings_per_token = 0.0000027 # $2.70 / 1M tokens
|
|
564
|
+
savings = total_reads * savings_per_token
|
|
565
|
+
|
|
566
|
+
# Calculate hit rates for workflows
|
|
567
|
+
for wf_stats in by_workflow.values():
|
|
568
|
+
wf_requests = wf_stats["requests"]
|
|
569
|
+
wf_stats["hit_rate"] = (wf_stats["hits"] / wf_requests) if wf_requests > 0 else 0.0
|
|
570
|
+
|
|
571
|
+
return {
|
|
572
|
+
"hit_rate": round(hit_rate, 4),
|
|
573
|
+
"total_reads": total_reads,
|
|
574
|
+
"total_writes": total_writes,
|
|
575
|
+
"savings": round(savings, 2),
|
|
576
|
+
"hit_count": hit_count,
|
|
577
|
+
"total_requests": total_requests,
|
|
578
|
+
"by_workflow": by_workflow,
|
|
579
|
+
}
|
|
580
|
+
|
|
581
|
+
def export_to_dict(self, days: int | None = None) -> list[dict[str, Any]]:
|
|
582
|
+
"""Export all entries as list of dicts.
|
|
583
|
+
|
|
584
|
+
Args:
|
|
585
|
+
days: Only export entries from last N days (optional)
|
|
586
|
+
|
|
587
|
+
Returns:
|
|
588
|
+
List of telemetry entries
|
|
589
|
+
|
|
590
|
+
"""
|
|
591
|
+
return self.get_recent_entries(limit=1000000, days=days)
|