attune-ai 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- attune/__init__.py +358 -0
- attune/adaptive/__init__.py +13 -0
- attune/adaptive/task_complexity.py +127 -0
- attune/agent_monitoring.py +414 -0
- attune/cache/__init__.py +117 -0
- attune/cache/base.py +166 -0
- attune/cache/dependency_manager.py +256 -0
- attune/cache/hash_only.py +251 -0
- attune/cache/hybrid.py +457 -0
- attune/cache/storage.py +285 -0
- attune/cache_monitor.py +356 -0
- attune/cache_stats.py +298 -0
- attune/cli/__init__.py +152 -0
- attune/cli/__main__.py +12 -0
- attune/cli/commands/__init__.py +1 -0
- attune/cli/commands/batch.py +264 -0
- attune/cli/commands/cache.py +248 -0
- attune/cli/commands/help.py +331 -0
- attune/cli/commands/info.py +140 -0
- attune/cli/commands/inspect.py +436 -0
- attune/cli/commands/inspection.py +57 -0
- attune/cli/commands/memory.py +48 -0
- attune/cli/commands/metrics.py +92 -0
- attune/cli/commands/orchestrate.py +184 -0
- attune/cli/commands/patterns.py +207 -0
- attune/cli/commands/profiling.py +202 -0
- attune/cli/commands/provider.py +98 -0
- attune/cli/commands/routing.py +285 -0
- attune/cli/commands/setup.py +96 -0
- attune/cli/commands/status.py +235 -0
- attune/cli/commands/sync.py +166 -0
- attune/cli/commands/tier.py +121 -0
- attune/cli/commands/utilities.py +114 -0
- attune/cli/commands/workflow.py +579 -0
- attune/cli/core.py +32 -0
- attune/cli/parsers/__init__.py +68 -0
- attune/cli/parsers/batch.py +118 -0
- attune/cli/parsers/cache.py +65 -0
- attune/cli/parsers/help.py +41 -0
- attune/cli/parsers/info.py +26 -0
- attune/cli/parsers/inspect.py +66 -0
- attune/cli/parsers/metrics.py +42 -0
- attune/cli/parsers/orchestrate.py +61 -0
- attune/cli/parsers/patterns.py +54 -0
- attune/cli/parsers/provider.py +40 -0
- attune/cli/parsers/routing.py +110 -0
- attune/cli/parsers/setup.py +42 -0
- attune/cli/parsers/status.py +47 -0
- attune/cli/parsers/sync.py +31 -0
- attune/cli/parsers/tier.py +33 -0
- attune/cli/parsers/workflow.py +77 -0
- attune/cli/utils/__init__.py +1 -0
- attune/cli/utils/data.py +242 -0
- attune/cli/utils/helpers.py +68 -0
- attune/cli_legacy.py +3957 -0
- attune/cli_minimal.py +1159 -0
- attune/cli_router.py +437 -0
- attune/cli_unified.py +814 -0
- attune/config/__init__.py +66 -0
- attune/config/xml_config.py +286 -0
- attune/config.py +545 -0
- attune/coordination.py +870 -0
- attune/core.py +1511 -0
- attune/core_modules/__init__.py +15 -0
- attune/cost_tracker.py +626 -0
- attune/dashboard/__init__.py +41 -0
- attune/dashboard/app.py +512 -0
- attune/dashboard/simple_server.py +435 -0
- attune/dashboard/standalone_server.py +547 -0
- attune/discovery.py +306 -0
- attune/emergence.py +306 -0
- attune/exceptions.py +123 -0
- attune/feedback_loops.py +373 -0
- attune/hot_reload/README.md +473 -0
- attune/hot_reload/__init__.py +62 -0
- attune/hot_reload/config.py +83 -0
- attune/hot_reload/integration.py +229 -0
- attune/hot_reload/reloader.py +298 -0
- attune/hot_reload/watcher.py +183 -0
- attune/hot_reload/websocket.py +177 -0
- attune/levels.py +577 -0
- attune/leverage_points.py +441 -0
- attune/logging_config.py +261 -0
- attune/mcp/__init__.py +10 -0
- attune/mcp/server.py +506 -0
- attune/memory/__init__.py +237 -0
- attune/memory/claude_memory.py +469 -0
- attune/memory/config.py +224 -0
- attune/memory/control_panel.py +1290 -0
- attune/memory/control_panel_support.py +145 -0
- attune/memory/cross_session.py +845 -0
- attune/memory/edges.py +179 -0
- attune/memory/encryption.py +159 -0
- attune/memory/file_session.py +770 -0
- attune/memory/graph.py +570 -0
- attune/memory/long_term.py +913 -0
- attune/memory/long_term_types.py +99 -0
- attune/memory/mixins/__init__.py +25 -0
- attune/memory/mixins/backend_init_mixin.py +249 -0
- attune/memory/mixins/capabilities_mixin.py +208 -0
- attune/memory/mixins/handoff_mixin.py +208 -0
- attune/memory/mixins/lifecycle_mixin.py +49 -0
- attune/memory/mixins/long_term_mixin.py +352 -0
- attune/memory/mixins/promotion_mixin.py +109 -0
- attune/memory/mixins/short_term_mixin.py +182 -0
- attune/memory/nodes.py +179 -0
- attune/memory/redis_bootstrap.py +540 -0
- attune/memory/security/__init__.py +31 -0
- attune/memory/security/audit_logger.py +932 -0
- attune/memory/security/pii_scrubber.py +640 -0
- attune/memory/security/secrets_detector.py +678 -0
- attune/memory/short_term.py +2192 -0
- attune/memory/simple_storage.py +302 -0
- attune/memory/storage/__init__.py +15 -0
- attune/memory/storage_backend.py +167 -0
- attune/memory/summary_index.py +583 -0
- attune/memory/types.py +446 -0
- attune/memory/unified.py +182 -0
- attune/meta_workflows/__init__.py +74 -0
- attune/meta_workflows/agent_creator.py +248 -0
- attune/meta_workflows/builtin_templates.py +567 -0
- attune/meta_workflows/cli_commands/__init__.py +56 -0
- attune/meta_workflows/cli_commands/agent_commands.py +321 -0
- attune/meta_workflows/cli_commands/analytics_commands.py +442 -0
- attune/meta_workflows/cli_commands/config_commands.py +232 -0
- attune/meta_workflows/cli_commands/memory_commands.py +182 -0
- attune/meta_workflows/cli_commands/template_commands.py +354 -0
- attune/meta_workflows/cli_commands/workflow_commands.py +382 -0
- attune/meta_workflows/cli_meta_workflows.py +59 -0
- attune/meta_workflows/form_engine.py +292 -0
- attune/meta_workflows/intent_detector.py +409 -0
- attune/meta_workflows/models.py +569 -0
- attune/meta_workflows/pattern_learner.py +738 -0
- attune/meta_workflows/plan_generator.py +384 -0
- attune/meta_workflows/session_context.py +397 -0
- attune/meta_workflows/template_registry.py +229 -0
- attune/meta_workflows/workflow.py +984 -0
- attune/metrics/__init__.py +12 -0
- attune/metrics/collector.py +31 -0
- attune/metrics/prompt_metrics.py +194 -0
- attune/models/__init__.py +172 -0
- attune/models/__main__.py +13 -0
- attune/models/adaptive_routing.py +437 -0
- attune/models/auth_cli.py +444 -0
- attune/models/auth_strategy.py +450 -0
- attune/models/cli.py +655 -0
- attune/models/empathy_executor.py +354 -0
- attune/models/executor.py +257 -0
- attune/models/fallback.py +762 -0
- attune/models/provider_config.py +282 -0
- attune/models/registry.py +472 -0
- attune/models/tasks.py +359 -0
- attune/models/telemetry/__init__.py +71 -0
- attune/models/telemetry/analytics.py +594 -0
- attune/models/telemetry/backend.py +196 -0
- attune/models/telemetry/data_models.py +431 -0
- attune/models/telemetry/storage.py +489 -0
- attune/models/token_estimator.py +420 -0
- attune/models/validation.py +280 -0
- attune/monitoring/__init__.py +52 -0
- attune/monitoring/alerts.py +946 -0
- attune/monitoring/alerts_cli.py +448 -0
- attune/monitoring/multi_backend.py +271 -0
- attune/monitoring/otel_backend.py +362 -0
- attune/optimization/__init__.py +19 -0
- attune/optimization/context_optimizer.py +272 -0
- attune/orchestration/__init__.py +67 -0
- attune/orchestration/agent_templates.py +707 -0
- attune/orchestration/config_store.py +499 -0
- attune/orchestration/execution_strategies.py +2111 -0
- attune/orchestration/meta_orchestrator.py +1168 -0
- attune/orchestration/pattern_learner.py +696 -0
- attune/orchestration/real_tools.py +931 -0
- attune/pattern_cache.py +187 -0
- attune/pattern_library.py +542 -0
- attune/patterns/debugging/all_patterns.json +81 -0
- attune/patterns/debugging/workflow_20260107_1770825e.json +77 -0
- attune/patterns/refactoring_memory.json +89 -0
- attune/persistence.py +564 -0
- attune/platform_utils.py +265 -0
- attune/plugins/__init__.py +28 -0
- attune/plugins/base.py +361 -0
- attune/plugins/registry.py +268 -0
- attune/project_index/__init__.py +32 -0
- attune/project_index/cli.py +335 -0
- attune/project_index/index.py +667 -0
- attune/project_index/models.py +504 -0
- attune/project_index/reports.py +474 -0
- attune/project_index/scanner.py +777 -0
- attune/project_index/scanner_parallel.py +291 -0
- attune/prompts/__init__.py +61 -0
- attune/prompts/config.py +77 -0
- attune/prompts/context.py +177 -0
- attune/prompts/parser.py +285 -0
- attune/prompts/registry.py +313 -0
- attune/prompts/templates.py +208 -0
- attune/redis_config.py +302 -0
- attune/redis_memory.py +799 -0
- attune/resilience/__init__.py +56 -0
- attune/resilience/circuit_breaker.py +256 -0
- attune/resilience/fallback.py +179 -0
- attune/resilience/health.py +300 -0
- attune/resilience/retry.py +209 -0
- attune/resilience/timeout.py +135 -0
- attune/routing/__init__.py +43 -0
- attune/routing/chain_executor.py +433 -0
- attune/routing/classifier.py +217 -0
- attune/routing/smart_router.py +234 -0
- attune/routing/workflow_registry.py +343 -0
- attune/scaffolding/README.md +589 -0
- attune/scaffolding/__init__.py +35 -0
- attune/scaffolding/__main__.py +14 -0
- attune/scaffolding/cli.py +240 -0
- attune/scaffolding/templates/base_wizard.py.jinja2 +121 -0
- attune/scaffolding/templates/coach_wizard.py.jinja2 +321 -0
- attune/scaffolding/templates/domain_wizard.py.jinja2 +408 -0
- attune/scaffolding/templates/linear_flow_wizard.py.jinja2 +203 -0
- attune/socratic/__init__.py +256 -0
- attune/socratic/ab_testing.py +958 -0
- attune/socratic/blueprint.py +533 -0
- attune/socratic/cli.py +703 -0
- attune/socratic/collaboration.py +1114 -0
- attune/socratic/domain_templates.py +924 -0
- attune/socratic/embeddings.py +738 -0
- attune/socratic/engine.py +794 -0
- attune/socratic/explainer.py +682 -0
- attune/socratic/feedback.py +772 -0
- attune/socratic/forms.py +629 -0
- attune/socratic/generator.py +732 -0
- attune/socratic/llm_analyzer.py +637 -0
- attune/socratic/mcp_server.py +702 -0
- attune/socratic/session.py +312 -0
- attune/socratic/storage.py +667 -0
- attune/socratic/success.py +730 -0
- attune/socratic/visual_editor.py +860 -0
- attune/socratic/web_ui.py +958 -0
- attune/telemetry/__init__.py +39 -0
- attune/telemetry/agent_coordination.py +475 -0
- attune/telemetry/agent_tracking.py +367 -0
- attune/telemetry/approval_gates.py +545 -0
- attune/telemetry/cli.py +1231 -0
- attune/telemetry/commands/__init__.py +14 -0
- attune/telemetry/commands/dashboard_commands.py +696 -0
- attune/telemetry/event_streaming.py +409 -0
- attune/telemetry/feedback_loop.py +567 -0
- attune/telemetry/usage_tracker.py +591 -0
- attune/templates.py +754 -0
- attune/test_generator/__init__.py +38 -0
- attune/test_generator/__main__.py +14 -0
- attune/test_generator/cli.py +234 -0
- attune/test_generator/generator.py +355 -0
- attune/test_generator/risk_analyzer.py +216 -0
- attune/test_generator/templates/unit_test.py.jinja2 +272 -0
- attune/tier_recommender.py +384 -0
- attune/tools.py +183 -0
- attune/trust/__init__.py +28 -0
- attune/trust/circuit_breaker.py +579 -0
- attune/trust_building.py +527 -0
- attune/validation/__init__.py +19 -0
- attune/validation/xml_validator.py +281 -0
- attune/vscode_bridge.py +173 -0
- attune/workflow_commands.py +780 -0
- attune/workflow_patterns/__init__.py +33 -0
- attune/workflow_patterns/behavior.py +249 -0
- attune/workflow_patterns/core.py +76 -0
- attune/workflow_patterns/output.py +99 -0
- attune/workflow_patterns/registry.py +255 -0
- attune/workflow_patterns/structural.py +288 -0
- attune/workflows/__init__.py +539 -0
- attune/workflows/autonomous_test_gen.py +1268 -0
- attune/workflows/base.py +2667 -0
- attune/workflows/batch_processing.py +342 -0
- attune/workflows/bug_predict.py +1084 -0
- attune/workflows/builder.py +273 -0
- attune/workflows/caching.py +253 -0
- attune/workflows/code_review.py +1048 -0
- attune/workflows/code_review_adapters.py +312 -0
- attune/workflows/code_review_pipeline.py +722 -0
- attune/workflows/config.py +645 -0
- attune/workflows/dependency_check.py +644 -0
- attune/workflows/document_gen/__init__.py +25 -0
- attune/workflows/document_gen/config.py +30 -0
- attune/workflows/document_gen/report_formatter.py +162 -0
- attune/workflows/document_gen/workflow.py +1426 -0
- attune/workflows/document_manager.py +216 -0
- attune/workflows/document_manager_README.md +134 -0
- attune/workflows/documentation_orchestrator.py +1205 -0
- attune/workflows/history.py +510 -0
- attune/workflows/keyboard_shortcuts/__init__.py +39 -0
- attune/workflows/keyboard_shortcuts/generators.py +391 -0
- attune/workflows/keyboard_shortcuts/parsers.py +416 -0
- attune/workflows/keyboard_shortcuts/prompts.py +295 -0
- attune/workflows/keyboard_shortcuts/schema.py +193 -0
- attune/workflows/keyboard_shortcuts/workflow.py +509 -0
- attune/workflows/llm_base.py +363 -0
- attune/workflows/manage_docs.py +87 -0
- attune/workflows/manage_docs_README.md +134 -0
- attune/workflows/manage_documentation.py +821 -0
- attune/workflows/new_sample_workflow1.py +149 -0
- attune/workflows/new_sample_workflow1_README.md +150 -0
- attune/workflows/orchestrated_health_check.py +849 -0
- attune/workflows/orchestrated_release_prep.py +600 -0
- attune/workflows/output.py +413 -0
- attune/workflows/perf_audit.py +863 -0
- attune/workflows/pr_review.py +762 -0
- attune/workflows/progress.py +785 -0
- attune/workflows/progress_server.py +322 -0
- attune/workflows/progressive/README 2.md +454 -0
- attune/workflows/progressive/README.md +454 -0
- attune/workflows/progressive/__init__.py +82 -0
- attune/workflows/progressive/cli.py +219 -0
- attune/workflows/progressive/core.py +488 -0
- attune/workflows/progressive/orchestrator.py +723 -0
- attune/workflows/progressive/reports.py +520 -0
- attune/workflows/progressive/telemetry.py +274 -0
- attune/workflows/progressive/test_gen.py +495 -0
- attune/workflows/progressive/workflow.py +589 -0
- attune/workflows/refactor_plan.py +694 -0
- attune/workflows/release_prep.py +895 -0
- attune/workflows/release_prep_crew.py +969 -0
- attune/workflows/research_synthesis.py +404 -0
- attune/workflows/routing.py +168 -0
- attune/workflows/secure_release.py +593 -0
- attune/workflows/security_adapters.py +297 -0
- attune/workflows/security_audit.py +1329 -0
- attune/workflows/security_audit_phase3.py +355 -0
- attune/workflows/seo_optimization.py +633 -0
- attune/workflows/step_config.py +234 -0
- attune/workflows/telemetry_mixin.py +269 -0
- attune/workflows/test5.py +125 -0
- attune/workflows/test5_README.md +158 -0
- attune/workflows/test_coverage_boost_crew.py +849 -0
- attune/workflows/test_gen/__init__.py +52 -0
- attune/workflows/test_gen/ast_analyzer.py +249 -0
- attune/workflows/test_gen/config.py +88 -0
- attune/workflows/test_gen/data_models.py +38 -0
- attune/workflows/test_gen/report_formatter.py +289 -0
- attune/workflows/test_gen/test_templates.py +381 -0
- attune/workflows/test_gen/workflow.py +655 -0
- attune/workflows/test_gen.py +54 -0
- attune/workflows/test_gen_behavioral.py +477 -0
- attune/workflows/test_gen_parallel.py +341 -0
- attune/workflows/test_lifecycle.py +526 -0
- attune/workflows/test_maintenance.py +627 -0
- attune/workflows/test_maintenance_cli.py +590 -0
- attune/workflows/test_maintenance_crew.py +840 -0
- attune/workflows/test_runner.py +622 -0
- attune/workflows/tier_tracking.py +531 -0
- attune/workflows/xml_enhanced_crew.py +285 -0
- attune_ai-2.0.0.dist-info/METADATA +1026 -0
- attune_ai-2.0.0.dist-info/RECORD +457 -0
- attune_ai-2.0.0.dist-info/WHEEL +5 -0
- attune_ai-2.0.0.dist-info/entry_points.txt +26 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE +201 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +101 -0
- attune_ai-2.0.0.dist-info/top_level.txt +5 -0
- attune_healthcare/__init__.py +13 -0
- attune_healthcare/monitors/__init__.py +9 -0
- attune_healthcare/monitors/clinical_protocol_monitor.py +315 -0
- attune_healthcare/monitors/monitoring/__init__.py +44 -0
- attune_healthcare/monitors/monitoring/protocol_checker.py +300 -0
- attune_healthcare/monitors/monitoring/protocol_loader.py +214 -0
- attune_healthcare/monitors/monitoring/sensor_parsers.py +306 -0
- attune_healthcare/monitors/monitoring/trajectory_analyzer.py +389 -0
- attune_llm/README.md +553 -0
- attune_llm/__init__.py +28 -0
- attune_llm/agent_factory/__init__.py +53 -0
- attune_llm/agent_factory/adapters/__init__.py +85 -0
- attune_llm/agent_factory/adapters/autogen_adapter.py +312 -0
- attune_llm/agent_factory/adapters/crewai_adapter.py +483 -0
- attune_llm/agent_factory/adapters/haystack_adapter.py +298 -0
- attune_llm/agent_factory/adapters/langchain_adapter.py +362 -0
- attune_llm/agent_factory/adapters/langgraph_adapter.py +333 -0
- attune_llm/agent_factory/adapters/native.py +228 -0
- attune_llm/agent_factory/adapters/wizard_adapter.py +423 -0
- attune_llm/agent_factory/base.py +305 -0
- attune_llm/agent_factory/crews/__init__.py +67 -0
- attune_llm/agent_factory/crews/code_review.py +1113 -0
- attune_llm/agent_factory/crews/health_check.py +1262 -0
- attune_llm/agent_factory/crews/refactoring.py +1128 -0
- attune_llm/agent_factory/crews/security_audit.py +1018 -0
- attune_llm/agent_factory/decorators.py +287 -0
- attune_llm/agent_factory/factory.py +558 -0
- attune_llm/agent_factory/framework.py +193 -0
- attune_llm/agent_factory/memory_integration.py +328 -0
- attune_llm/agent_factory/resilient.py +320 -0
- attune_llm/agents_md/__init__.py +22 -0
- attune_llm/agents_md/loader.py +218 -0
- attune_llm/agents_md/parser.py +271 -0
- attune_llm/agents_md/registry.py +307 -0
- attune_llm/claude_memory.py +466 -0
- attune_llm/cli/__init__.py +8 -0
- attune_llm/cli/sync_claude.py +487 -0
- attune_llm/code_health.py +1313 -0
- attune_llm/commands/__init__.py +51 -0
- attune_llm/commands/context.py +375 -0
- attune_llm/commands/loader.py +301 -0
- attune_llm/commands/models.py +231 -0
- attune_llm/commands/parser.py +371 -0
- attune_llm/commands/registry.py +429 -0
- attune_llm/config/__init__.py +29 -0
- attune_llm/config/unified.py +291 -0
- attune_llm/context/__init__.py +22 -0
- attune_llm/context/compaction.py +455 -0
- attune_llm/context/manager.py +434 -0
- attune_llm/contextual_patterns.py +361 -0
- attune_llm/core.py +907 -0
- attune_llm/git_pattern_extractor.py +435 -0
- attune_llm/hooks/__init__.py +24 -0
- attune_llm/hooks/config.py +306 -0
- attune_llm/hooks/executor.py +289 -0
- attune_llm/hooks/registry.py +302 -0
- attune_llm/hooks/scripts/__init__.py +39 -0
- attune_llm/hooks/scripts/evaluate_session.py +201 -0
- attune_llm/hooks/scripts/first_time_init.py +285 -0
- attune_llm/hooks/scripts/pre_compact.py +207 -0
- attune_llm/hooks/scripts/session_end.py +183 -0
- attune_llm/hooks/scripts/session_start.py +163 -0
- attune_llm/hooks/scripts/suggest_compact.py +225 -0
- attune_llm/learning/__init__.py +30 -0
- attune_llm/learning/evaluator.py +438 -0
- attune_llm/learning/extractor.py +514 -0
- attune_llm/learning/storage.py +560 -0
- attune_llm/levels.py +227 -0
- attune_llm/pattern_confidence.py +414 -0
- attune_llm/pattern_resolver.py +272 -0
- attune_llm/pattern_summary.py +350 -0
- attune_llm/providers.py +967 -0
- attune_llm/routing/__init__.py +32 -0
- attune_llm/routing/model_router.py +362 -0
- attune_llm/security/IMPLEMENTATION_SUMMARY.md +413 -0
- attune_llm/security/PHASE2_COMPLETE.md +384 -0
- attune_llm/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
- attune_llm/security/QUICK_REFERENCE.md +316 -0
- attune_llm/security/README.md +262 -0
- attune_llm/security/__init__.py +62 -0
- attune_llm/security/audit_logger.py +929 -0
- attune_llm/security/audit_logger_example.py +152 -0
- attune_llm/security/pii_scrubber.py +640 -0
- attune_llm/security/secrets_detector.py +678 -0
- attune_llm/security/secrets_detector_example.py +304 -0
- attune_llm/security/secure_memdocs.py +1192 -0
- attune_llm/security/secure_memdocs_example.py +278 -0
- attune_llm/session_status.py +745 -0
- attune_llm/state.py +246 -0
- attune_llm/utils/__init__.py +5 -0
- attune_llm/utils/tokens.py +349 -0
- attune_software/SOFTWARE_PLUGIN_README.md +57 -0
- attune_software/__init__.py +13 -0
- attune_software/cli/__init__.py +120 -0
- attune_software/cli/inspect.py +362 -0
- attune_software/cli.py +574 -0
- attune_software/plugin.py +188 -0
- workflow_scaffolding/__init__.py +11 -0
- workflow_scaffolding/__main__.py +12 -0
- workflow_scaffolding/cli.py +206 -0
- workflow_scaffolding/generator.py +265 -0
|
@@ -0,0 +1,271 @@
|
|
|
1
|
+
"""Multi-Backend Telemetry Support
|
|
2
|
+
|
|
3
|
+
Enables simultaneous logging to multiple backends (JSONL + OTEL).
|
|
4
|
+
|
|
5
|
+
**Features:**
|
|
6
|
+
- Composite pattern for multiple backends
|
|
7
|
+
- Parallel writes to all configured backends
|
|
8
|
+
- Graceful handling of backend failures
|
|
9
|
+
- Automatic backend selection based on configuration
|
|
10
|
+
|
|
11
|
+
**Example:**
|
|
12
|
+
```python
|
|
13
|
+
from attune.monitoring import TelemetryStore
|
|
14
|
+
from attune.monitoring.otel_backend import OTELBackend
|
|
15
|
+
from attune.monitoring.multi_backend import MultiBackend
|
|
16
|
+
|
|
17
|
+
# Create composite backend
|
|
18
|
+
backends = [
|
|
19
|
+
TelemetryStore(), # JSONL (always enabled)
|
|
20
|
+
OTELBackend(), # OTEL (if configured)
|
|
21
|
+
]
|
|
22
|
+
multi = MultiBackend(backends)
|
|
23
|
+
|
|
24
|
+
# Logs to both backends
|
|
25
|
+
multi.log_call(record)
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
Copyright 2025 Smart-AI-Memory
|
|
29
|
+
Licensed under Fair Source License 0.9
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
from typing import Protocol, runtime_checkable
|
|
33
|
+
|
|
34
|
+
from attune.models.telemetry import LLMCallRecord, TelemetryStore, WorkflowRunRecord
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
@runtime_checkable
|
|
38
|
+
class TelemetryBackend(Protocol):
|
|
39
|
+
"""Protocol for telemetry storage backends.
|
|
40
|
+
|
|
41
|
+
All backends must implement log_call() and log_workflow().
|
|
42
|
+
"""
|
|
43
|
+
|
|
44
|
+
def log_call(self, record: LLMCallRecord) -> None:
|
|
45
|
+
"""Log an LLM call record."""
|
|
46
|
+
...
|
|
47
|
+
|
|
48
|
+
def log_workflow(self, record: WorkflowRunRecord) -> None:
|
|
49
|
+
"""Log a workflow run record."""
|
|
50
|
+
...
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
class MultiBackend:
|
|
54
|
+
"""Composite backend for simultaneous logging to multiple backends.
|
|
55
|
+
|
|
56
|
+
Implements the TelemetryBackend protocol and forwards calls to all
|
|
57
|
+
configured backends. Handles failures gracefully - if one backend
|
|
58
|
+
fails, others continue to work.
|
|
59
|
+
|
|
60
|
+
**Auto-Configuration:**
|
|
61
|
+
- JSONL backend is always enabled (default)
|
|
62
|
+
- OTEL backend is enabled if EMPATHY_OTEL_ENDPOINT is set
|
|
63
|
+
|
|
64
|
+
Example:
|
|
65
|
+
>>> backend = MultiBackend.from_config()
|
|
66
|
+
>>> backend.log_call(call_record) # Logs to JSONL + OTEL
|
|
67
|
+
>>> backend.log_workflow(workflow_record)
|
|
68
|
+
"""
|
|
69
|
+
|
|
70
|
+
def __init__(self, backends: list[TelemetryBackend] | None = None):
|
|
71
|
+
"""Initialize multi-backend.
|
|
72
|
+
|
|
73
|
+
Args:
|
|
74
|
+
backends: List of backend instances (default: auto-detect)
|
|
75
|
+
"""
|
|
76
|
+
self.backends = backends or []
|
|
77
|
+
self._failed_backends: set[int] = set()
|
|
78
|
+
|
|
79
|
+
@classmethod
|
|
80
|
+
def from_config(cls, storage_dir: str = ".empathy") -> "MultiBackend":
|
|
81
|
+
"""Create multi-backend from configuration.
|
|
82
|
+
|
|
83
|
+
Auto-detects available backends:
|
|
84
|
+
1. JSONL backend (always enabled)
|
|
85
|
+
2. OTEL backend (if EMPATHY_OTEL_ENDPOINT is set or collector detected)
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
storage_dir: Directory for JSONL storage (default: .empathy)
|
|
89
|
+
|
|
90
|
+
Returns:
|
|
91
|
+
MultiBackend instance with all available backends
|
|
92
|
+
"""
|
|
93
|
+
backends: list[TelemetryBackend] = []
|
|
94
|
+
|
|
95
|
+
# Always add JSONL backend
|
|
96
|
+
try:
|
|
97
|
+
jsonl_backend = TelemetryStore(storage_dir)
|
|
98
|
+
backends.append(jsonl_backend)
|
|
99
|
+
except Exception as e:
|
|
100
|
+
print(f"⚠️ Failed to initialize JSONL backend: {e}")
|
|
101
|
+
|
|
102
|
+
# Add OTEL backend if configured
|
|
103
|
+
try:
|
|
104
|
+
from attune.monitoring.otel_backend import OTELBackend
|
|
105
|
+
|
|
106
|
+
otel_backend = OTELBackend()
|
|
107
|
+
if otel_backend.is_available():
|
|
108
|
+
backends.append(otel_backend)
|
|
109
|
+
except ImportError:
|
|
110
|
+
# OTEL dependencies not installed
|
|
111
|
+
pass
|
|
112
|
+
except Exception as e:
|
|
113
|
+
print(f"⚠️ Failed to initialize OTEL backend: {e}")
|
|
114
|
+
|
|
115
|
+
return cls(backends)
|
|
116
|
+
|
|
117
|
+
def add_backend(self, backend: TelemetryBackend) -> None:
|
|
118
|
+
"""Add a backend to the multi-backend.
|
|
119
|
+
|
|
120
|
+
Args:
|
|
121
|
+
backend: Backend instance to add
|
|
122
|
+
"""
|
|
123
|
+
if isinstance(backend, TelemetryBackend):
|
|
124
|
+
self.backends.append(backend)
|
|
125
|
+
else:
|
|
126
|
+
raise TypeError(
|
|
127
|
+
f"Backend must implement TelemetryBackend protocol, got {type(backend)}"
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
def remove_backend(self, backend: TelemetryBackend) -> None:
|
|
131
|
+
"""Remove a backend from the multi-backend.
|
|
132
|
+
|
|
133
|
+
Args:
|
|
134
|
+
backend: Backend instance to remove
|
|
135
|
+
"""
|
|
136
|
+
if backend in self.backends:
|
|
137
|
+
self.backends.remove(backend)
|
|
138
|
+
|
|
139
|
+
def log_call(self, record: LLMCallRecord) -> None:
|
|
140
|
+
"""Log an LLM call record to all backends.
|
|
141
|
+
|
|
142
|
+
Failures in individual backends are logged but don't affect other backends.
|
|
143
|
+
|
|
144
|
+
Args:
|
|
145
|
+
record: LLM call record to log
|
|
146
|
+
"""
|
|
147
|
+
for i, backend in enumerate(self.backends):
|
|
148
|
+
if i in self._failed_backends:
|
|
149
|
+
# Skip backends that have failed before
|
|
150
|
+
continue
|
|
151
|
+
|
|
152
|
+
try:
|
|
153
|
+
backend.log_call(record)
|
|
154
|
+
except Exception as e:
|
|
155
|
+
backend_name = type(backend).__name__
|
|
156
|
+
print(f"⚠️ Failed to log call to {backend_name}: {e}")
|
|
157
|
+
# Mark backend as failed to reduce log spam
|
|
158
|
+
self._failed_backends.add(i)
|
|
159
|
+
|
|
160
|
+
def log_workflow(self, record: WorkflowRunRecord) -> None:
|
|
161
|
+
"""Log a workflow run record to all backends.
|
|
162
|
+
|
|
163
|
+
Failures in individual backends are logged but don't affect other backends.
|
|
164
|
+
|
|
165
|
+
Args:
|
|
166
|
+
record: Workflow run record to log
|
|
167
|
+
"""
|
|
168
|
+
for i, backend in enumerate(self.backends):
|
|
169
|
+
if i in self._failed_backends:
|
|
170
|
+
# Skip backends that have failed before
|
|
171
|
+
continue
|
|
172
|
+
|
|
173
|
+
try:
|
|
174
|
+
backend.log_workflow(record)
|
|
175
|
+
except Exception as e:
|
|
176
|
+
backend_name = type(backend).__name__
|
|
177
|
+
print(f"⚠️ Failed to log workflow to {backend_name}: {e}")
|
|
178
|
+
# Mark backend as failed to reduce log spam
|
|
179
|
+
self._failed_backends.add(i)
|
|
180
|
+
|
|
181
|
+
def get_active_backends(self) -> list[str]:
|
|
182
|
+
"""Get list of active backend names.
|
|
183
|
+
|
|
184
|
+
Returns:
|
|
185
|
+
List of backend class names that are active (not failed)
|
|
186
|
+
"""
|
|
187
|
+
return [
|
|
188
|
+
type(backend).__name__
|
|
189
|
+
for i, backend in enumerate(self.backends)
|
|
190
|
+
if i not in self._failed_backends
|
|
191
|
+
]
|
|
192
|
+
|
|
193
|
+
def get_failed_backends(self) -> list[str]:
|
|
194
|
+
"""Get list of failed backend names.
|
|
195
|
+
|
|
196
|
+
Returns:
|
|
197
|
+
List of backend class names that have failed
|
|
198
|
+
"""
|
|
199
|
+
return [
|
|
200
|
+
type(self.backends[i]).__name__ for i in self._failed_backends if i < len(self.backends)
|
|
201
|
+
]
|
|
202
|
+
|
|
203
|
+
def reset_failures(self) -> None:
|
|
204
|
+
"""Reset failed backend tracking.
|
|
205
|
+
|
|
206
|
+
Allows retry of previously failed backends.
|
|
207
|
+
"""
|
|
208
|
+
self._failed_backends.clear()
|
|
209
|
+
|
|
210
|
+
def flush(self) -> None:
|
|
211
|
+
"""Flush all backends.
|
|
212
|
+
|
|
213
|
+
Calls flush() on backends that support it (e.g., OTEL backend).
|
|
214
|
+
"""
|
|
215
|
+
for backend in self.backends:
|
|
216
|
+
if hasattr(backend, "flush"):
|
|
217
|
+
try:
|
|
218
|
+
backend.flush()
|
|
219
|
+
except Exception as e:
|
|
220
|
+
backend_name = type(backend).__name__
|
|
221
|
+
print(f"⚠️ Failed to flush {backend_name}: {e}")
|
|
222
|
+
|
|
223
|
+
def __len__(self) -> int:
|
|
224
|
+
"""Return number of active backends."""
|
|
225
|
+
return len(self.backends) - len(self._failed_backends)
|
|
226
|
+
|
|
227
|
+
def __repr__(self) -> str:
|
|
228
|
+
"""String representation."""
|
|
229
|
+
active = self.get_active_backends()
|
|
230
|
+
failed = self.get_failed_backends()
|
|
231
|
+
status = f"active={active}"
|
|
232
|
+
if failed:
|
|
233
|
+
status += f", failed={failed}"
|
|
234
|
+
return f"MultiBackend({status})"
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
# Singleton instance for global access
|
|
238
|
+
_global_backend: MultiBackend | None = None
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
def get_multi_backend(storage_dir: str = ".empathy") -> MultiBackend:
|
|
242
|
+
"""Get or create the global multi-backend instance.
|
|
243
|
+
|
|
244
|
+
This is the recommended way to get a multi-backend instance.
|
|
245
|
+
It ensures a single instance is shared across the application.
|
|
246
|
+
|
|
247
|
+
Args:
|
|
248
|
+
storage_dir: Directory for JSONL storage (default: .empathy)
|
|
249
|
+
|
|
250
|
+
Returns:
|
|
251
|
+
Global MultiBackend instance
|
|
252
|
+
|
|
253
|
+
Example:
|
|
254
|
+
>>> backend = get_multi_backend()
|
|
255
|
+
>>> backend.log_call(record)
|
|
256
|
+
"""
|
|
257
|
+
global _global_backend
|
|
258
|
+
if _global_backend is None:
|
|
259
|
+
_global_backend = MultiBackend.from_config(storage_dir)
|
|
260
|
+
return _global_backend
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
def reset_multi_backend() -> None:
|
|
264
|
+
"""Reset the global multi-backend instance.
|
|
265
|
+
|
|
266
|
+
Useful for testing or reconfiguration.
|
|
267
|
+
"""
|
|
268
|
+
global _global_backend
|
|
269
|
+
if _global_backend is not None:
|
|
270
|
+
_global_backend.flush()
|
|
271
|
+
_global_backend = None
|
|
@@ -0,0 +1,362 @@
|
|
|
1
|
+
"""OpenTelemetry Backend for LLM Telemetry
|
|
2
|
+
|
|
3
|
+
Exports telemetry data to OTEL-compatible collectors (SigNoz, Datadog, New Relic).
|
|
4
|
+
|
|
5
|
+
**Features:**
|
|
6
|
+
- Auto-detection of OTEL collector (localhost:4317)
|
|
7
|
+
- Environment variable configuration (EMPATHY_OTEL_ENDPOINT)
|
|
8
|
+
- Semantic conventions for LLM traces
|
|
9
|
+
- Batch export with retry logic
|
|
10
|
+
- Graceful fallback if collector unavailable
|
|
11
|
+
|
|
12
|
+
**Setup:**
|
|
13
|
+
```bash
|
|
14
|
+
export EMPATHY_OTEL_ENDPOINT=http://localhost:4317
|
|
15
|
+
pip install empathy-framework[otel]
|
|
16
|
+
```
|
|
17
|
+
|
|
18
|
+
Copyright 2025 Smart-AI-Memory
|
|
19
|
+
Licensed under Fair Source License 0.9
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
import os
|
|
23
|
+
import socket
|
|
24
|
+
|
|
25
|
+
from attune.models.telemetry import LLMCallRecord, WorkflowRunRecord
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class OTELBackend:
|
|
29
|
+
"""OpenTelemetry backend for exporting telemetry to OTEL collectors.
|
|
30
|
+
|
|
31
|
+
Implements the TelemetryBackend protocol for OTEL export.
|
|
32
|
+
|
|
33
|
+
**Auto-detection:**
|
|
34
|
+
- Checks for OTEL collector on localhost:4317
|
|
35
|
+
- Falls back to EMPATHY_OTEL_ENDPOINT environment variable
|
|
36
|
+
|
|
37
|
+
**Semantic Conventions:**
|
|
38
|
+
- LLM calls → OTEL spans with llm.* attributes
|
|
39
|
+
- Workflows → OTEL traces with workflow.* attributes
|
|
40
|
+
|
|
41
|
+
**Batch Export:**
|
|
42
|
+
- Buffers records and exports in batches
|
|
43
|
+
- Retries on transient failures
|
|
44
|
+
- Logs errors but doesn't crash
|
|
45
|
+
|
|
46
|
+
Example:
|
|
47
|
+
>>> backend = OTELBackend()
|
|
48
|
+
>>> if backend.is_available():
|
|
49
|
+
... backend.log_call(call_record)
|
|
50
|
+
... backend.log_workflow(workflow_record)
|
|
51
|
+
"""
|
|
52
|
+
|
|
53
|
+
def __init__(
|
|
54
|
+
self,
|
|
55
|
+
endpoint: str | None = None,
|
|
56
|
+
batch_size: int = 10,
|
|
57
|
+
retry_count: int = 3,
|
|
58
|
+
):
|
|
59
|
+
"""Initialize OTEL backend.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
endpoint: OTEL collector endpoint (default: auto-detect)
|
|
63
|
+
batch_size: Number of records to buffer before export
|
|
64
|
+
retry_count: Number of retries on transient failures
|
|
65
|
+
"""
|
|
66
|
+
self.endpoint = endpoint or self._detect_endpoint()
|
|
67
|
+
self.batch_size = batch_size
|
|
68
|
+
self.retry_count = retry_count
|
|
69
|
+
self.call_buffer: list[LLMCallRecord] = []
|
|
70
|
+
self.workflow_buffer: list[WorkflowRunRecord] = []
|
|
71
|
+
self._available = self._check_availability()
|
|
72
|
+
|
|
73
|
+
# Try importing OTEL dependencies
|
|
74
|
+
self._otel_available = self._check_otel_installed()
|
|
75
|
+
|
|
76
|
+
if self._otel_available and self._available:
|
|
77
|
+
self._init_otel()
|
|
78
|
+
|
|
79
|
+
def _detect_endpoint(self) -> str:
|
|
80
|
+
"""Detect OTEL collector endpoint.
|
|
81
|
+
|
|
82
|
+
Checks (in order):
|
|
83
|
+
1. EMPATHY_OTEL_ENDPOINT environment variable
|
|
84
|
+
2. localhost:4317 (default OTEL gRPC port)
|
|
85
|
+
|
|
86
|
+
Returns:
|
|
87
|
+
OTEL collector endpoint URL
|
|
88
|
+
"""
|
|
89
|
+
# Check environment variable
|
|
90
|
+
endpoint = os.getenv("EMPATHY_OTEL_ENDPOINT")
|
|
91
|
+
if endpoint:
|
|
92
|
+
return endpoint
|
|
93
|
+
|
|
94
|
+
# Check localhost:4317
|
|
95
|
+
if self._is_port_open("localhost", 4317):
|
|
96
|
+
return "http://localhost:4317"
|
|
97
|
+
|
|
98
|
+
# Default (will fail availability check)
|
|
99
|
+
return "http://localhost:4317"
|
|
100
|
+
|
|
101
|
+
def _is_port_open(self, host: str, port: int, timeout: float = 1.0) -> bool:
|
|
102
|
+
"""Check if a port is open on a host.
|
|
103
|
+
|
|
104
|
+
Args:
|
|
105
|
+
host: Hostname or IP address
|
|
106
|
+
port: Port number
|
|
107
|
+
timeout: Connection timeout in seconds
|
|
108
|
+
|
|
109
|
+
Returns:
|
|
110
|
+
True if port is open, False otherwise
|
|
111
|
+
"""
|
|
112
|
+
try:
|
|
113
|
+
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
|
114
|
+
sock.settimeout(timeout)
|
|
115
|
+
sock.connect((host, port))
|
|
116
|
+
sock.close()
|
|
117
|
+
return True
|
|
118
|
+
except (TimeoutError, OSError):
|
|
119
|
+
return False
|
|
120
|
+
|
|
121
|
+
def _check_availability(self) -> bool:
|
|
122
|
+
"""Check if OTEL collector is available.
|
|
123
|
+
|
|
124
|
+
Returns:
|
|
125
|
+
True if collector is reachable, False otherwise
|
|
126
|
+
"""
|
|
127
|
+
if not self.endpoint:
|
|
128
|
+
return False
|
|
129
|
+
|
|
130
|
+
# Parse endpoint to extract host and port
|
|
131
|
+
try:
|
|
132
|
+
# Remove http:// or https://
|
|
133
|
+
endpoint = self.endpoint.replace("http://", "").replace("https://", "")
|
|
134
|
+
if ":" in endpoint:
|
|
135
|
+
host, port_str = endpoint.split(":")
|
|
136
|
+
port = int(port_str.split("/")[0]) # Remove any path
|
|
137
|
+
else:
|
|
138
|
+
host = endpoint
|
|
139
|
+
port = 4317 # Default OTEL gRPC port
|
|
140
|
+
|
|
141
|
+
return self._is_port_open(host, port)
|
|
142
|
+
except (ValueError, IndexError):
|
|
143
|
+
return False
|
|
144
|
+
|
|
145
|
+
def _check_otel_installed(self) -> bool:
|
|
146
|
+
"""Check if OTEL dependencies are installed.
|
|
147
|
+
|
|
148
|
+
Returns:
|
|
149
|
+
True if opentelemetry-api and opentelemetry-sdk are installed
|
|
150
|
+
"""
|
|
151
|
+
import importlib.util
|
|
152
|
+
|
|
153
|
+
required_packages = [
|
|
154
|
+
"opentelemetry.trace",
|
|
155
|
+
"opentelemetry.exporter.otlp.proto.grpc.trace_exporter",
|
|
156
|
+
"opentelemetry.sdk.trace",
|
|
157
|
+
"opentelemetry.sdk.trace.export",
|
|
158
|
+
]
|
|
159
|
+
|
|
160
|
+
return all(importlib.util.find_spec(pkg) is not None for pkg in required_packages)
|
|
161
|
+
|
|
162
|
+
def _init_otel(self) -> None:
|
|
163
|
+
"""Initialize OTEL tracer and exporter."""
|
|
164
|
+
try:
|
|
165
|
+
from opentelemetry import trace
|
|
166
|
+
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
|
|
167
|
+
from opentelemetry.sdk.resources import Resource
|
|
168
|
+
from opentelemetry.sdk.trace import TracerProvider
|
|
169
|
+
from opentelemetry.sdk.trace.export import BatchSpanProcessor
|
|
170
|
+
|
|
171
|
+
# Create resource with service name
|
|
172
|
+
resource = Resource.create(
|
|
173
|
+
{
|
|
174
|
+
"service.name": "empathy-framework",
|
|
175
|
+
"service.version": "3.8.0-alpha",
|
|
176
|
+
}
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
# Create tracer provider
|
|
180
|
+
provider = TracerProvider(resource=resource)
|
|
181
|
+
|
|
182
|
+
# Create OTLP exporter
|
|
183
|
+
exporter = OTLPSpanExporter(endpoint=self.endpoint, insecure=True)
|
|
184
|
+
|
|
185
|
+
# Add batch processor
|
|
186
|
+
processor = BatchSpanProcessor(exporter)
|
|
187
|
+
provider.add_span_processor(processor)
|
|
188
|
+
|
|
189
|
+
# Set global tracer provider
|
|
190
|
+
trace.set_tracer_provider(provider)
|
|
191
|
+
|
|
192
|
+
# Get tracer
|
|
193
|
+
self.tracer = trace.get_tracer("empathy.llm.telemetry", "3.8.0-alpha")
|
|
194
|
+
|
|
195
|
+
except Exception as e:
|
|
196
|
+
print(f"⚠️ Failed to initialize OTEL: {e}")
|
|
197
|
+
self._available = False
|
|
198
|
+
|
|
199
|
+
def is_available(self) -> bool:
|
|
200
|
+
"""Check if OTEL backend is available.
|
|
201
|
+
|
|
202
|
+
Returns:
|
|
203
|
+
True if OTEL collector is reachable and dependencies installed
|
|
204
|
+
"""
|
|
205
|
+
return self._available and self._otel_available
|
|
206
|
+
|
|
207
|
+
def log_call(self, record: LLMCallRecord) -> None:
|
|
208
|
+
"""Log an LLM call record to OTEL.
|
|
209
|
+
|
|
210
|
+
Creates an OTEL span with LLM semantic conventions.
|
|
211
|
+
|
|
212
|
+
Args:
|
|
213
|
+
record: LLM call record to log
|
|
214
|
+
"""
|
|
215
|
+
if not self.is_available():
|
|
216
|
+
return
|
|
217
|
+
|
|
218
|
+
try:
|
|
219
|
+
# Create span with LLM semantic conventions
|
|
220
|
+
with self.tracer.start_as_current_span(
|
|
221
|
+
f"llm.{record.provider}.{record.model_id}"
|
|
222
|
+
) as span:
|
|
223
|
+
# Set standard LLM attributes
|
|
224
|
+
span.set_attribute("llm.provider", record.provider)
|
|
225
|
+
span.set_attribute("llm.model", record.model_id)
|
|
226
|
+
span.set_attribute("llm.tier", record.tier)
|
|
227
|
+
span.set_attribute("llm.task_type", record.task_type)
|
|
228
|
+
|
|
229
|
+
# Set token usage
|
|
230
|
+
span.set_attribute("llm.usage.input_tokens", record.input_tokens)
|
|
231
|
+
span.set_attribute("llm.usage.output_tokens", record.output_tokens)
|
|
232
|
+
span.set_attribute(
|
|
233
|
+
"llm.usage.total_tokens", record.input_tokens + record.output_tokens
|
|
234
|
+
)
|
|
235
|
+
|
|
236
|
+
# Set cost and latency
|
|
237
|
+
span.set_attribute("llm.cost.estimated", record.estimated_cost)
|
|
238
|
+
if record.actual_cost:
|
|
239
|
+
span.set_attribute("llm.cost.actual", record.actual_cost)
|
|
240
|
+
span.set_attribute("llm.latency_ms", record.latency_ms)
|
|
241
|
+
|
|
242
|
+
# Set workflow context
|
|
243
|
+
if record.workflow_name:
|
|
244
|
+
span.set_attribute("workflow.name", record.workflow_name)
|
|
245
|
+
if record.step_name:
|
|
246
|
+
span.set_attribute("workflow.step", record.step_name)
|
|
247
|
+
if record.session_id:
|
|
248
|
+
span.set_attribute("session.id", record.session_id)
|
|
249
|
+
|
|
250
|
+
# Set fallback info
|
|
251
|
+
if record.fallback_used:
|
|
252
|
+
span.set_attribute("llm.fallback.used", True)
|
|
253
|
+
if record.original_provider:
|
|
254
|
+
span.set_attribute(
|
|
255
|
+
"llm.fallback.original_provider", record.original_provider
|
|
256
|
+
)
|
|
257
|
+
if record.original_model:
|
|
258
|
+
span.set_attribute("llm.fallback.original_model", record.original_model)
|
|
259
|
+
|
|
260
|
+
# Set error info
|
|
261
|
+
if not record.success:
|
|
262
|
+
span.set_attribute("llm.error", True)
|
|
263
|
+
if record.error_type:
|
|
264
|
+
span.set_attribute("llm.error.type", record.error_type)
|
|
265
|
+
if record.error_message:
|
|
266
|
+
span.set_attribute("llm.error.message", record.error_message)
|
|
267
|
+
|
|
268
|
+
except Exception as e:
|
|
269
|
+
print(f"⚠️ Failed to export LLM call to OTEL: {e}")
|
|
270
|
+
|
|
271
|
+
def log_workflow(self, record: WorkflowRunRecord) -> None:
|
|
272
|
+
"""Log a workflow run record to OTEL.
|
|
273
|
+
|
|
274
|
+
Creates an OTEL trace with workflow semantic conventions.
|
|
275
|
+
|
|
276
|
+
Args:
|
|
277
|
+
record: Workflow run record to log
|
|
278
|
+
"""
|
|
279
|
+
if not self.is_available():
|
|
280
|
+
return
|
|
281
|
+
|
|
282
|
+
try:
|
|
283
|
+
# Create trace for workflow
|
|
284
|
+
with self.tracer.start_as_current_span(f"workflow.{record.workflow_name}") as span:
|
|
285
|
+
# Set workflow attributes
|
|
286
|
+
span.set_attribute("workflow.name", record.workflow_name)
|
|
287
|
+
span.set_attribute("workflow.run_id", record.run_id)
|
|
288
|
+
if record.session_id:
|
|
289
|
+
span.set_attribute("session.id", record.session_id)
|
|
290
|
+
|
|
291
|
+
# Set token usage
|
|
292
|
+
span.set_attribute("workflow.usage.input_tokens", record.total_input_tokens)
|
|
293
|
+
span.set_attribute("workflow.usage.output_tokens", record.total_output_tokens)
|
|
294
|
+
span.set_attribute(
|
|
295
|
+
"workflow.usage.total_tokens",
|
|
296
|
+
record.total_input_tokens + record.total_output_tokens,
|
|
297
|
+
)
|
|
298
|
+
|
|
299
|
+
# Set cost and savings
|
|
300
|
+
span.set_attribute("workflow.cost.total", record.total_cost)
|
|
301
|
+
span.set_attribute("workflow.cost.baseline", record.baseline_cost)
|
|
302
|
+
span.set_attribute("workflow.cost.savings", record.savings)
|
|
303
|
+
span.set_attribute("workflow.cost.savings_percent", record.savings_percent)
|
|
304
|
+
|
|
305
|
+
# Set duration
|
|
306
|
+
span.set_attribute("workflow.duration_ms", record.total_duration_ms)
|
|
307
|
+
|
|
308
|
+
# Set providers and tiers used
|
|
309
|
+
span.set_attribute("workflow.providers_used", ",".join(record.providers_used))
|
|
310
|
+
span.set_attribute("workflow.tiers_used", ",".join(record.tiers_used))
|
|
311
|
+
|
|
312
|
+
# Set success status
|
|
313
|
+
span.set_attribute("workflow.success", record.success)
|
|
314
|
+
if not record.success and record.error:
|
|
315
|
+
span.set_attribute("workflow.error", record.error)
|
|
316
|
+
|
|
317
|
+
# Create child spans for each stage
|
|
318
|
+
for stage in record.stages:
|
|
319
|
+
with self.tracer.start_as_current_span(
|
|
320
|
+
f"stage.{stage.stage_name}"
|
|
321
|
+
) as stage_span:
|
|
322
|
+
stage_span.set_attribute("stage.name", stage.stage_name)
|
|
323
|
+
stage_span.set_attribute("llm.tier", stage.tier)
|
|
324
|
+
stage_span.set_attribute("llm.model", stage.model_id)
|
|
325
|
+
stage_span.set_attribute("llm.usage.input_tokens", stage.input_tokens)
|
|
326
|
+
stage_span.set_attribute("llm.usage.output_tokens", stage.output_tokens)
|
|
327
|
+
stage_span.set_attribute("llm.cost", stage.cost)
|
|
328
|
+
stage_span.set_attribute("llm.latency_ms", stage.latency_ms)
|
|
329
|
+
stage_span.set_attribute("stage.success", stage.success)
|
|
330
|
+
|
|
331
|
+
if stage.skipped:
|
|
332
|
+
stage_span.set_attribute("stage.skipped", True)
|
|
333
|
+
if stage.skip_reason:
|
|
334
|
+
stage_span.set_attribute("stage.skip_reason", stage.skip_reason)
|
|
335
|
+
|
|
336
|
+
if stage.error:
|
|
337
|
+
stage_span.set_attribute("stage.error", stage.error)
|
|
338
|
+
|
|
339
|
+
except Exception as e:
|
|
340
|
+
print(f"⚠️ Failed to export workflow to OTEL: {e}")
|
|
341
|
+
|
|
342
|
+
def flush(self) -> None:
|
|
343
|
+
"""Flush any buffered records to OTEL collector.
|
|
344
|
+
|
|
345
|
+
Called automatically on shutdown or can be called manually.
|
|
346
|
+
"""
|
|
347
|
+
if not self.is_available():
|
|
348
|
+
return
|
|
349
|
+
|
|
350
|
+
try:
|
|
351
|
+
from opentelemetry import trace
|
|
352
|
+
|
|
353
|
+
# Get tracer provider and force flush
|
|
354
|
+
provider = trace.get_tracer_provider()
|
|
355
|
+
if hasattr(provider, "force_flush"):
|
|
356
|
+
provider.force_flush()
|
|
357
|
+
except Exception as e:
|
|
358
|
+
print(f"⚠️ Failed to flush OTEL data: {e}")
|
|
359
|
+
|
|
360
|
+
def __del__(self) -> None:
|
|
361
|
+
"""Cleanup on deletion."""
|
|
362
|
+
self.flush()
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
"""Context window optimization for XML-enhanced prompts.
|
|
2
|
+
|
|
3
|
+
Provides compression and optimization to reduce token usage by 20-30%.
|
|
4
|
+
|
|
5
|
+
Copyright 2026 Smart-AI-Memory
|
|
6
|
+
Licensed under Fair Source License 0.9
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from attune.optimization.context_optimizer import (
|
|
10
|
+
CompressionLevel,
|
|
11
|
+
ContextOptimizer,
|
|
12
|
+
optimize_xml_prompt,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
__all__ = [
|
|
16
|
+
"CompressionLevel",
|
|
17
|
+
"ContextOptimizer",
|
|
18
|
+
"optimize_xml_prompt",
|
|
19
|
+
]
|