attune-ai 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- attune/__init__.py +358 -0
- attune/adaptive/__init__.py +13 -0
- attune/adaptive/task_complexity.py +127 -0
- attune/agent_monitoring.py +414 -0
- attune/cache/__init__.py +117 -0
- attune/cache/base.py +166 -0
- attune/cache/dependency_manager.py +256 -0
- attune/cache/hash_only.py +251 -0
- attune/cache/hybrid.py +457 -0
- attune/cache/storage.py +285 -0
- attune/cache_monitor.py +356 -0
- attune/cache_stats.py +298 -0
- attune/cli/__init__.py +152 -0
- attune/cli/__main__.py +12 -0
- attune/cli/commands/__init__.py +1 -0
- attune/cli/commands/batch.py +264 -0
- attune/cli/commands/cache.py +248 -0
- attune/cli/commands/help.py +331 -0
- attune/cli/commands/info.py +140 -0
- attune/cli/commands/inspect.py +436 -0
- attune/cli/commands/inspection.py +57 -0
- attune/cli/commands/memory.py +48 -0
- attune/cli/commands/metrics.py +92 -0
- attune/cli/commands/orchestrate.py +184 -0
- attune/cli/commands/patterns.py +207 -0
- attune/cli/commands/profiling.py +202 -0
- attune/cli/commands/provider.py +98 -0
- attune/cli/commands/routing.py +285 -0
- attune/cli/commands/setup.py +96 -0
- attune/cli/commands/status.py +235 -0
- attune/cli/commands/sync.py +166 -0
- attune/cli/commands/tier.py +121 -0
- attune/cli/commands/utilities.py +114 -0
- attune/cli/commands/workflow.py +579 -0
- attune/cli/core.py +32 -0
- attune/cli/parsers/__init__.py +68 -0
- attune/cli/parsers/batch.py +118 -0
- attune/cli/parsers/cache.py +65 -0
- attune/cli/parsers/help.py +41 -0
- attune/cli/parsers/info.py +26 -0
- attune/cli/parsers/inspect.py +66 -0
- attune/cli/parsers/metrics.py +42 -0
- attune/cli/parsers/orchestrate.py +61 -0
- attune/cli/parsers/patterns.py +54 -0
- attune/cli/parsers/provider.py +40 -0
- attune/cli/parsers/routing.py +110 -0
- attune/cli/parsers/setup.py +42 -0
- attune/cli/parsers/status.py +47 -0
- attune/cli/parsers/sync.py +31 -0
- attune/cli/parsers/tier.py +33 -0
- attune/cli/parsers/workflow.py +77 -0
- attune/cli/utils/__init__.py +1 -0
- attune/cli/utils/data.py +242 -0
- attune/cli/utils/helpers.py +68 -0
- attune/cli_legacy.py +3957 -0
- attune/cli_minimal.py +1159 -0
- attune/cli_router.py +437 -0
- attune/cli_unified.py +814 -0
- attune/config/__init__.py +66 -0
- attune/config/xml_config.py +286 -0
- attune/config.py +545 -0
- attune/coordination.py +870 -0
- attune/core.py +1511 -0
- attune/core_modules/__init__.py +15 -0
- attune/cost_tracker.py +626 -0
- attune/dashboard/__init__.py +41 -0
- attune/dashboard/app.py +512 -0
- attune/dashboard/simple_server.py +435 -0
- attune/dashboard/standalone_server.py +547 -0
- attune/discovery.py +306 -0
- attune/emergence.py +306 -0
- attune/exceptions.py +123 -0
- attune/feedback_loops.py +373 -0
- attune/hot_reload/README.md +473 -0
- attune/hot_reload/__init__.py +62 -0
- attune/hot_reload/config.py +83 -0
- attune/hot_reload/integration.py +229 -0
- attune/hot_reload/reloader.py +298 -0
- attune/hot_reload/watcher.py +183 -0
- attune/hot_reload/websocket.py +177 -0
- attune/levels.py +577 -0
- attune/leverage_points.py +441 -0
- attune/logging_config.py +261 -0
- attune/mcp/__init__.py +10 -0
- attune/mcp/server.py +506 -0
- attune/memory/__init__.py +237 -0
- attune/memory/claude_memory.py +469 -0
- attune/memory/config.py +224 -0
- attune/memory/control_panel.py +1290 -0
- attune/memory/control_panel_support.py +145 -0
- attune/memory/cross_session.py +845 -0
- attune/memory/edges.py +179 -0
- attune/memory/encryption.py +159 -0
- attune/memory/file_session.py +770 -0
- attune/memory/graph.py +570 -0
- attune/memory/long_term.py +913 -0
- attune/memory/long_term_types.py +99 -0
- attune/memory/mixins/__init__.py +25 -0
- attune/memory/mixins/backend_init_mixin.py +249 -0
- attune/memory/mixins/capabilities_mixin.py +208 -0
- attune/memory/mixins/handoff_mixin.py +208 -0
- attune/memory/mixins/lifecycle_mixin.py +49 -0
- attune/memory/mixins/long_term_mixin.py +352 -0
- attune/memory/mixins/promotion_mixin.py +109 -0
- attune/memory/mixins/short_term_mixin.py +182 -0
- attune/memory/nodes.py +179 -0
- attune/memory/redis_bootstrap.py +540 -0
- attune/memory/security/__init__.py +31 -0
- attune/memory/security/audit_logger.py +932 -0
- attune/memory/security/pii_scrubber.py +640 -0
- attune/memory/security/secrets_detector.py +678 -0
- attune/memory/short_term.py +2192 -0
- attune/memory/simple_storage.py +302 -0
- attune/memory/storage/__init__.py +15 -0
- attune/memory/storage_backend.py +167 -0
- attune/memory/summary_index.py +583 -0
- attune/memory/types.py +446 -0
- attune/memory/unified.py +182 -0
- attune/meta_workflows/__init__.py +74 -0
- attune/meta_workflows/agent_creator.py +248 -0
- attune/meta_workflows/builtin_templates.py +567 -0
- attune/meta_workflows/cli_commands/__init__.py +56 -0
- attune/meta_workflows/cli_commands/agent_commands.py +321 -0
- attune/meta_workflows/cli_commands/analytics_commands.py +442 -0
- attune/meta_workflows/cli_commands/config_commands.py +232 -0
- attune/meta_workflows/cli_commands/memory_commands.py +182 -0
- attune/meta_workflows/cli_commands/template_commands.py +354 -0
- attune/meta_workflows/cli_commands/workflow_commands.py +382 -0
- attune/meta_workflows/cli_meta_workflows.py +59 -0
- attune/meta_workflows/form_engine.py +292 -0
- attune/meta_workflows/intent_detector.py +409 -0
- attune/meta_workflows/models.py +569 -0
- attune/meta_workflows/pattern_learner.py +738 -0
- attune/meta_workflows/plan_generator.py +384 -0
- attune/meta_workflows/session_context.py +397 -0
- attune/meta_workflows/template_registry.py +229 -0
- attune/meta_workflows/workflow.py +984 -0
- attune/metrics/__init__.py +12 -0
- attune/metrics/collector.py +31 -0
- attune/metrics/prompt_metrics.py +194 -0
- attune/models/__init__.py +172 -0
- attune/models/__main__.py +13 -0
- attune/models/adaptive_routing.py +437 -0
- attune/models/auth_cli.py +444 -0
- attune/models/auth_strategy.py +450 -0
- attune/models/cli.py +655 -0
- attune/models/empathy_executor.py +354 -0
- attune/models/executor.py +257 -0
- attune/models/fallback.py +762 -0
- attune/models/provider_config.py +282 -0
- attune/models/registry.py +472 -0
- attune/models/tasks.py +359 -0
- attune/models/telemetry/__init__.py +71 -0
- attune/models/telemetry/analytics.py +594 -0
- attune/models/telemetry/backend.py +196 -0
- attune/models/telemetry/data_models.py +431 -0
- attune/models/telemetry/storage.py +489 -0
- attune/models/token_estimator.py +420 -0
- attune/models/validation.py +280 -0
- attune/monitoring/__init__.py +52 -0
- attune/monitoring/alerts.py +946 -0
- attune/monitoring/alerts_cli.py +448 -0
- attune/monitoring/multi_backend.py +271 -0
- attune/monitoring/otel_backend.py +362 -0
- attune/optimization/__init__.py +19 -0
- attune/optimization/context_optimizer.py +272 -0
- attune/orchestration/__init__.py +67 -0
- attune/orchestration/agent_templates.py +707 -0
- attune/orchestration/config_store.py +499 -0
- attune/orchestration/execution_strategies.py +2111 -0
- attune/orchestration/meta_orchestrator.py +1168 -0
- attune/orchestration/pattern_learner.py +696 -0
- attune/orchestration/real_tools.py +931 -0
- attune/pattern_cache.py +187 -0
- attune/pattern_library.py +542 -0
- attune/patterns/debugging/all_patterns.json +81 -0
- attune/patterns/debugging/workflow_20260107_1770825e.json +77 -0
- attune/patterns/refactoring_memory.json +89 -0
- attune/persistence.py +564 -0
- attune/platform_utils.py +265 -0
- attune/plugins/__init__.py +28 -0
- attune/plugins/base.py +361 -0
- attune/plugins/registry.py +268 -0
- attune/project_index/__init__.py +32 -0
- attune/project_index/cli.py +335 -0
- attune/project_index/index.py +667 -0
- attune/project_index/models.py +504 -0
- attune/project_index/reports.py +474 -0
- attune/project_index/scanner.py +777 -0
- attune/project_index/scanner_parallel.py +291 -0
- attune/prompts/__init__.py +61 -0
- attune/prompts/config.py +77 -0
- attune/prompts/context.py +177 -0
- attune/prompts/parser.py +285 -0
- attune/prompts/registry.py +313 -0
- attune/prompts/templates.py +208 -0
- attune/redis_config.py +302 -0
- attune/redis_memory.py +799 -0
- attune/resilience/__init__.py +56 -0
- attune/resilience/circuit_breaker.py +256 -0
- attune/resilience/fallback.py +179 -0
- attune/resilience/health.py +300 -0
- attune/resilience/retry.py +209 -0
- attune/resilience/timeout.py +135 -0
- attune/routing/__init__.py +43 -0
- attune/routing/chain_executor.py +433 -0
- attune/routing/classifier.py +217 -0
- attune/routing/smart_router.py +234 -0
- attune/routing/workflow_registry.py +343 -0
- attune/scaffolding/README.md +589 -0
- attune/scaffolding/__init__.py +35 -0
- attune/scaffolding/__main__.py +14 -0
- attune/scaffolding/cli.py +240 -0
- attune/scaffolding/templates/base_wizard.py.jinja2 +121 -0
- attune/scaffolding/templates/coach_wizard.py.jinja2 +321 -0
- attune/scaffolding/templates/domain_wizard.py.jinja2 +408 -0
- attune/scaffolding/templates/linear_flow_wizard.py.jinja2 +203 -0
- attune/socratic/__init__.py +256 -0
- attune/socratic/ab_testing.py +958 -0
- attune/socratic/blueprint.py +533 -0
- attune/socratic/cli.py +703 -0
- attune/socratic/collaboration.py +1114 -0
- attune/socratic/domain_templates.py +924 -0
- attune/socratic/embeddings.py +738 -0
- attune/socratic/engine.py +794 -0
- attune/socratic/explainer.py +682 -0
- attune/socratic/feedback.py +772 -0
- attune/socratic/forms.py +629 -0
- attune/socratic/generator.py +732 -0
- attune/socratic/llm_analyzer.py +637 -0
- attune/socratic/mcp_server.py +702 -0
- attune/socratic/session.py +312 -0
- attune/socratic/storage.py +667 -0
- attune/socratic/success.py +730 -0
- attune/socratic/visual_editor.py +860 -0
- attune/socratic/web_ui.py +958 -0
- attune/telemetry/__init__.py +39 -0
- attune/telemetry/agent_coordination.py +475 -0
- attune/telemetry/agent_tracking.py +367 -0
- attune/telemetry/approval_gates.py +545 -0
- attune/telemetry/cli.py +1231 -0
- attune/telemetry/commands/__init__.py +14 -0
- attune/telemetry/commands/dashboard_commands.py +696 -0
- attune/telemetry/event_streaming.py +409 -0
- attune/telemetry/feedback_loop.py +567 -0
- attune/telemetry/usage_tracker.py +591 -0
- attune/templates.py +754 -0
- attune/test_generator/__init__.py +38 -0
- attune/test_generator/__main__.py +14 -0
- attune/test_generator/cli.py +234 -0
- attune/test_generator/generator.py +355 -0
- attune/test_generator/risk_analyzer.py +216 -0
- attune/test_generator/templates/unit_test.py.jinja2 +272 -0
- attune/tier_recommender.py +384 -0
- attune/tools.py +183 -0
- attune/trust/__init__.py +28 -0
- attune/trust/circuit_breaker.py +579 -0
- attune/trust_building.py +527 -0
- attune/validation/__init__.py +19 -0
- attune/validation/xml_validator.py +281 -0
- attune/vscode_bridge.py +173 -0
- attune/workflow_commands.py +780 -0
- attune/workflow_patterns/__init__.py +33 -0
- attune/workflow_patterns/behavior.py +249 -0
- attune/workflow_patterns/core.py +76 -0
- attune/workflow_patterns/output.py +99 -0
- attune/workflow_patterns/registry.py +255 -0
- attune/workflow_patterns/structural.py +288 -0
- attune/workflows/__init__.py +539 -0
- attune/workflows/autonomous_test_gen.py +1268 -0
- attune/workflows/base.py +2667 -0
- attune/workflows/batch_processing.py +342 -0
- attune/workflows/bug_predict.py +1084 -0
- attune/workflows/builder.py +273 -0
- attune/workflows/caching.py +253 -0
- attune/workflows/code_review.py +1048 -0
- attune/workflows/code_review_adapters.py +312 -0
- attune/workflows/code_review_pipeline.py +722 -0
- attune/workflows/config.py +645 -0
- attune/workflows/dependency_check.py +644 -0
- attune/workflows/document_gen/__init__.py +25 -0
- attune/workflows/document_gen/config.py +30 -0
- attune/workflows/document_gen/report_formatter.py +162 -0
- attune/workflows/document_gen/workflow.py +1426 -0
- attune/workflows/document_manager.py +216 -0
- attune/workflows/document_manager_README.md +134 -0
- attune/workflows/documentation_orchestrator.py +1205 -0
- attune/workflows/history.py +510 -0
- attune/workflows/keyboard_shortcuts/__init__.py +39 -0
- attune/workflows/keyboard_shortcuts/generators.py +391 -0
- attune/workflows/keyboard_shortcuts/parsers.py +416 -0
- attune/workflows/keyboard_shortcuts/prompts.py +295 -0
- attune/workflows/keyboard_shortcuts/schema.py +193 -0
- attune/workflows/keyboard_shortcuts/workflow.py +509 -0
- attune/workflows/llm_base.py +363 -0
- attune/workflows/manage_docs.py +87 -0
- attune/workflows/manage_docs_README.md +134 -0
- attune/workflows/manage_documentation.py +821 -0
- attune/workflows/new_sample_workflow1.py +149 -0
- attune/workflows/new_sample_workflow1_README.md +150 -0
- attune/workflows/orchestrated_health_check.py +849 -0
- attune/workflows/orchestrated_release_prep.py +600 -0
- attune/workflows/output.py +413 -0
- attune/workflows/perf_audit.py +863 -0
- attune/workflows/pr_review.py +762 -0
- attune/workflows/progress.py +785 -0
- attune/workflows/progress_server.py +322 -0
- attune/workflows/progressive/README 2.md +454 -0
- attune/workflows/progressive/README.md +454 -0
- attune/workflows/progressive/__init__.py +82 -0
- attune/workflows/progressive/cli.py +219 -0
- attune/workflows/progressive/core.py +488 -0
- attune/workflows/progressive/orchestrator.py +723 -0
- attune/workflows/progressive/reports.py +520 -0
- attune/workflows/progressive/telemetry.py +274 -0
- attune/workflows/progressive/test_gen.py +495 -0
- attune/workflows/progressive/workflow.py +589 -0
- attune/workflows/refactor_plan.py +694 -0
- attune/workflows/release_prep.py +895 -0
- attune/workflows/release_prep_crew.py +969 -0
- attune/workflows/research_synthesis.py +404 -0
- attune/workflows/routing.py +168 -0
- attune/workflows/secure_release.py +593 -0
- attune/workflows/security_adapters.py +297 -0
- attune/workflows/security_audit.py +1329 -0
- attune/workflows/security_audit_phase3.py +355 -0
- attune/workflows/seo_optimization.py +633 -0
- attune/workflows/step_config.py +234 -0
- attune/workflows/telemetry_mixin.py +269 -0
- attune/workflows/test5.py +125 -0
- attune/workflows/test5_README.md +158 -0
- attune/workflows/test_coverage_boost_crew.py +849 -0
- attune/workflows/test_gen/__init__.py +52 -0
- attune/workflows/test_gen/ast_analyzer.py +249 -0
- attune/workflows/test_gen/config.py +88 -0
- attune/workflows/test_gen/data_models.py +38 -0
- attune/workflows/test_gen/report_formatter.py +289 -0
- attune/workflows/test_gen/test_templates.py +381 -0
- attune/workflows/test_gen/workflow.py +655 -0
- attune/workflows/test_gen.py +54 -0
- attune/workflows/test_gen_behavioral.py +477 -0
- attune/workflows/test_gen_parallel.py +341 -0
- attune/workflows/test_lifecycle.py +526 -0
- attune/workflows/test_maintenance.py +627 -0
- attune/workflows/test_maintenance_cli.py +590 -0
- attune/workflows/test_maintenance_crew.py +840 -0
- attune/workflows/test_runner.py +622 -0
- attune/workflows/tier_tracking.py +531 -0
- attune/workflows/xml_enhanced_crew.py +285 -0
- attune_ai-2.0.0.dist-info/METADATA +1026 -0
- attune_ai-2.0.0.dist-info/RECORD +457 -0
- attune_ai-2.0.0.dist-info/WHEEL +5 -0
- attune_ai-2.0.0.dist-info/entry_points.txt +26 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE +201 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +101 -0
- attune_ai-2.0.0.dist-info/top_level.txt +5 -0
- attune_healthcare/__init__.py +13 -0
- attune_healthcare/monitors/__init__.py +9 -0
- attune_healthcare/monitors/clinical_protocol_monitor.py +315 -0
- attune_healthcare/monitors/monitoring/__init__.py +44 -0
- attune_healthcare/monitors/monitoring/protocol_checker.py +300 -0
- attune_healthcare/monitors/monitoring/protocol_loader.py +214 -0
- attune_healthcare/monitors/monitoring/sensor_parsers.py +306 -0
- attune_healthcare/monitors/monitoring/trajectory_analyzer.py +389 -0
- attune_llm/README.md +553 -0
- attune_llm/__init__.py +28 -0
- attune_llm/agent_factory/__init__.py +53 -0
- attune_llm/agent_factory/adapters/__init__.py +85 -0
- attune_llm/agent_factory/adapters/autogen_adapter.py +312 -0
- attune_llm/agent_factory/adapters/crewai_adapter.py +483 -0
- attune_llm/agent_factory/adapters/haystack_adapter.py +298 -0
- attune_llm/agent_factory/adapters/langchain_adapter.py +362 -0
- attune_llm/agent_factory/adapters/langgraph_adapter.py +333 -0
- attune_llm/agent_factory/adapters/native.py +228 -0
- attune_llm/agent_factory/adapters/wizard_adapter.py +423 -0
- attune_llm/agent_factory/base.py +305 -0
- attune_llm/agent_factory/crews/__init__.py +67 -0
- attune_llm/agent_factory/crews/code_review.py +1113 -0
- attune_llm/agent_factory/crews/health_check.py +1262 -0
- attune_llm/agent_factory/crews/refactoring.py +1128 -0
- attune_llm/agent_factory/crews/security_audit.py +1018 -0
- attune_llm/agent_factory/decorators.py +287 -0
- attune_llm/agent_factory/factory.py +558 -0
- attune_llm/agent_factory/framework.py +193 -0
- attune_llm/agent_factory/memory_integration.py +328 -0
- attune_llm/agent_factory/resilient.py +320 -0
- attune_llm/agents_md/__init__.py +22 -0
- attune_llm/agents_md/loader.py +218 -0
- attune_llm/agents_md/parser.py +271 -0
- attune_llm/agents_md/registry.py +307 -0
- attune_llm/claude_memory.py +466 -0
- attune_llm/cli/__init__.py +8 -0
- attune_llm/cli/sync_claude.py +487 -0
- attune_llm/code_health.py +1313 -0
- attune_llm/commands/__init__.py +51 -0
- attune_llm/commands/context.py +375 -0
- attune_llm/commands/loader.py +301 -0
- attune_llm/commands/models.py +231 -0
- attune_llm/commands/parser.py +371 -0
- attune_llm/commands/registry.py +429 -0
- attune_llm/config/__init__.py +29 -0
- attune_llm/config/unified.py +291 -0
- attune_llm/context/__init__.py +22 -0
- attune_llm/context/compaction.py +455 -0
- attune_llm/context/manager.py +434 -0
- attune_llm/contextual_patterns.py +361 -0
- attune_llm/core.py +907 -0
- attune_llm/git_pattern_extractor.py +435 -0
- attune_llm/hooks/__init__.py +24 -0
- attune_llm/hooks/config.py +306 -0
- attune_llm/hooks/executor.py +289 -0
- attune_llm/hooks/registry.py +302 -0
- attune_llm/hooks/scripts/__init__.py +39 -0
- attune_llm/hooks/scripts/evaluate_session.py +201 -0
- attune_llm/hooks/scripts/first_time_init.py +285 -0
- attune_llm/hooks/scripts/pre_compact.py +207 -0
- attune_llm/hooks/scripts/session_end.py +183 -0
- attune_llm/hooks/scripts/session_start.py +163 -0
- attune_llm/hooks/scripts/suggest_compact.py +225 -0
- attune_llm/learning/__init__.py +30 -0
- attune_llm/learning/evaluator.py +438 -0
- attune_llm/learning/extractor.py +514 -0
- attune_llm/learning/storage.py +560 -0
- attune_llm/levels.py +227 -0
- attune_llm/pattern_confidence.py +414 -0
- attune_llm/pattern_resolver.py +272 -0
- attune_llm/pattern_summary.py +350 -0
- attune_llm/providers.py +967 -0
- attune_llm/routing/__init__.py +32 -0
- attune_llm/routing/model_router.py +362 -0
- attune_llm/security/IMPLEMENTATION_SUMMARY.md +413 -0
- attune_llm/security/PHASE2_COMPLETE.md +384 -0
- attune_llm/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
- attune_llm/security/QUICK_REFERENCE.md +316 -0
- attune_llm/security/README.md +262 -0
- attune_llm/security/__init__.py +62 -0
- attune_llm/security/audit_logger.py +929 -0
- attune_llm/security/audit_logger_example.py +152 -0
- attune_llm/security/pii_scrubber.py +640 -0
- attune_llm/security/secrets_detector.py +678 -0
- attune_llm/security/secrets_detector_example.py +304 -0
- attune_llm/security/secure_memdocs.py +1192 -0
- attune_llm/security/secure_memdocs_example.py +278 -0
- attune_llm/session_status.py +745 -0
- attune_llm/state.py +246 -0
- attune_llm/utils/__init__.py +5 -0
- attune_llm/utils/tokens.py +349 -0
- attune_software/SOFTWARE_PLUGIN_README.md +57 -0
- attune_software/__init__.py +13 -0
- attune_software/cli/__init__.py +120 -0
- attune_software/cli/inspect.py +362 -0
- attune_software/cli.py +574 -0
- attune_software/plugin.py +188 -0
- workflow_scaffolding/__init__.py +11 -0
- workflow_scaffolding/__main__.py +12 -0
- workflow_scaffolding/cli.py +206 -0
- workflow_scaffolding/generator.py +265 -0
attune_llm/state.py
ADDED
|
@@ -0,0 +1,246 @@
|
|
|
1
|
+
"""Collaboration State Management
|
|
2
|
+
|
|
3
|
+
Tracks AI-human collaboration over time to enable Level 3+ empathy.
|
|
4
|
+
|
|
5
|
+
Copyright 2025 Smart AI Memory, LLC
|
|
6
|
+
Licensed under Fair Source 0.9
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from dataclasses import dataclass, field
|
|
10
|
+
from datetime import datetime
|
|
11
|
+
from enum import Enum
|
|
12
|
+
from typing import Any
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class PatternType(Enum):
|
|
16
|
+
"""Types of patterns that can be detected"""
|
|
17
|
+
|
|
18
|
+
SEQUENTIAL = "sequential" # User always does X then Y
|
|
19
|
+
TEMPORAL = "temporal" # User does X at specific time
|
|
20
|
+
CONDITIONAL = "conditional" # When Z happens, user does X
|
|
21
|
+
PREFERENCE = "preference" # User prefers format/style X
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
@dataclass
|
|
25
|
+
class UserPattern:
|
|
26
|
+
"""A detected pattern in user behavior.
|
|
27
|
+
|
|
28
|
+
Enables Level 3 (Proactive) empathy.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
pattern_type: PatternType
|
|
32
|
+
trigger: str # What triggers this pattern
|
|
33
|
+
action: str # What user typically does
|
|
34
|
+
confidence: float # 0.0 to 1.0
|
|
35
|
+
occurrences: int # How many times observed
|
|
36
|
+
last_seen: datetime
|
|
37
|
+
context: dict[str, Any] = field(default_factory=dict)
|
|
38
|
+
|
|
39
|
+
def should_act(self, trust_level: float) -> bool:
|
|
40
|
+
"""Determine if we should act proactively on this pattern.
|
|
41
|
+
|
|
42
|
+
Requires both high confidence and sufficient trust.
|
|
43
|
+
"""
|
|
44
|
+
return self.confidence > 0.7 and trust_level > 0.6
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
@dataclass
|
|
48
|
+
class Interaction:
|
|
49
|
+
"""Single interaction in conversation history"""
|
|
50
|
+
|
|
51
|
+
timestamp: datetime
|
|
52
|
+
role: str # "user" or "assistant"
|
|
53
|
+
content: str
|
|
54
|
+
empathy_level: int # Which level was used
|
|
55
|
+
metadata: dict[str, Any] = field(default_factory=dict)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
@dataclass
|
|
59
|
+
class CollaborationState:
|
|
60
|
+
"""Tracks AI-human collaboration state over time.
|
|
61
|
+
|
|
62
|
+
This is the foundation for Level 2+ empathy:
|
|
63
|
+
- Level 2: Uses conversation history for context
|
|
64
|
+
- Level 3: Detects patterns, builds trust
|
|
65
|
+
- Level 4: Analyzes trajectory
|
|
66
|
+
- Level 5: Contributes to shared pattern library
|
|
67
|
+
"""
|
|
68
|
+
|
|
69
|
+
user_id: str
|
|
70
|
+
session_start: datetime = field(default_factory=datetime.now)
|
|
71
|
+
|
|
72
|
+
# Conversation tracking
|
|
73
|
+
interactions: list[Interaction] = field(default_factory=list)
|
|
74
|
+
|
|
75
|
+
# Pattern detection (Level 3)
|
|
76
|
+
detected_patterns: list[UserPattern] = field(default_factory=list)
|
|
77
|
+
|
|
78
|
+
# Trust building
|
|
79
|
+
trust_level: float = 0.5 # 0.0 to 1.0, starts neutral
|
|
80
|
+
successful_actions: int = 0
|
|
81
|
+
failed_actions: int = 0
|
|
82
|
+
trust_trajectory: list[float] = field(default_factory=list)
|
|
83
|
+
|
|
84
|
+
# Empathy level progression
|
|
85
|
+
current_level: int = 1 # Start at Level 1
|
|
86
|
+
level_history: list[int] = field(default_factory=list)
|
|
87
|
+
|
|
88
|
+
# User preferences learned over time
|
|
89
|
+
preferences: dict[str, Any] = field(default_factory=dict)
|
|
90
|
+
|
|
91
|
+
# Context that persists across interactions
|
|
92
|
+
shared_context: dict[str, Any] = field(default_factory=dict)
|
|
93
|
+
|
|
94
|
+
@property
|
|
95
|
+
def success_rate(self) -> float:
|
|
96
|
+
"""Calculate success rate from successful and failed actions."""
|
|
97
|
+
total = self.successful_actions + self.failed_actions
|
|
98
|
+
if total == 0:
|
|
99
|
+
return 1.0 # Default to 100% if no actions yet
|
|
100
|
+
return self.successful_actions / total
|
|
101
|
+
|
|
102
|
+
def add_interaction(
|
|
103
|
+
self,
|
|
104
|
+
role: str,
|
|
105
|
+
content: str,
|
|
106
|
+
empathy_level: int,
|
|
107
|
+
metadata: dict | None = None,
|
|
108
|
+
):
|
|
109
|
+
"""Add interaction to history"""
|
|
110
|
+
self.interactions.append(
|
|
111
|
+
Interaction(
|
|
112
|
+
timestamp=datetime.now(),
|
|
113
|
+
role=role,
|
|
114
|
+
content=content,
|
|
115
|
+
empathy_level=empathy_level,
|
|
116
|
+
metadata=metadata or {},
|
|
117
|
+
),
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
# Track level history
|
|
121
|
+
if role == "assistant":
|
|
122
|
+
self.level_history.append(empathy_level)
|
|
123
|
+
|
|
124
|
+
def update_trust(self, outcome: str, magnitude: float = 1.0):
|
|
125
|
+
"""Update trust level based on action outcome.
|
|
126
|
+
|
|
127
|
+
Args:
|
|
128
|
+
outcome: "success" or "failure"
|
|
129
|
+
magnitude: How much to adjust (0.0 to 1.0)
|
|
130
|
+
|
|
131
|
+
"""
|
|
132
|
+
if outcome == "success":
|
|
133
|
+
adjustment = 0.05 * magnitude
|
|
134
|
+
self.trust_level = min(1.0, self.trust_level + adjustment)
|
|
135
|
+
self.successful_actions += 1
|
|
136
|
+
elif outcome == "failure":
|
|
137
|
+
adjustment = 0.10 * magnitude # Trust erodes faster
|
|
138
|
+
self.trust_level = max(0.0, self.trust_level - adjustment)
|
|
139
|
+
self.failed_actions += 1
|
|
140
|
+
|
|
141
|
+
# Track trajectory
|
|
142
|
+
self.trust_trajectory.append(self.trust_level)
|
|
143
|
+
|
|
144
|
+
def add_pattern(self, pattern: UserPattern):
|
|
145
|
+
"""Add or update a detected pattern"""
|
|
146
|
+
# Check if pattern already exists
|
|
147
|
+
for existing in self.detected_patterns:
|
|
148
|
+
if (
|
|
149
|
+
existing.pattern_type == pattern.pattern_type
|
|
150
|
+
and existing.trigger == pattern.trigger
|
|
151
|
+
):
|
|
152
|
+
# Update existing
|
|
153
|
+
existing.occurrences = pattern.occurrences
|
|
154
|
+
existing.confidence = pattern.confidence
|
|
155
|
+
existing.last_seen = pattern.last_seen
|
|
156
|
+
return
|
|
157
|
+
|
|
158
|
+
# Add new pattern
|
|
159
|
+
self.detected_patterns.append(pattern)
|
|
160
|
+
|
|
161
|
+
def find_matching_pattern(self, trigger_text: str) -> UserPattern | None:
|
|
162
|
+
"""Find pattern that matches current input.
|
|
163
|
+
|
|
164
|
+
Returns pattern with highest confidence if found.
|
|
165
|
+
"""
|
|
166
|
+
matches = [
|
|
167
|
+
p
|
|
168
|
+
for p in self.detected_patterns
|
|
169
|
+
if p.trigger.lower() in trigger_text.lower() and p.should_act(self.trust_level)
|
|
170
|
+
]
|
|
171
|
+
|
|
172
|
+
if matches:
|
|
173
|
+
# Return highest confidence match
|
|
174
|
+
return max(matches, key=lambda p: p.confidence)
|
|
175
|
+
|
|
176
|
+
return None
|
|
177
|
+
|
|
178
|
+
def get_conversation_history(
|
|
179
|
+
self,
|
|
180
|
+
max_turns: int = 10,
|
|
181
|
+
include_metadata: bool = False,
|
|
182
|
+
) -> list[dict[str, Any]]:
|
|
183
|
+
"""Get recent conversation history in LLM format.
|
|
184
|
+
|
|
185
|
+
Args:
|
|
186
|
+
max_turns: Maximum number of turns to include
|
|
187
|
+
include_metadata: Whether to include interaction metadata
|
|
188
|
+
|
|
189
|
+
Returns:
|
|
190
|
+
List of {"role": "user/assistant", "content": "..."}
|
|
191
|
+
|
|
192
|
+
"""
|
|
193
|
+
recent = self.interactions[-max_turns:] if max_turns else self.interactions
|
|
194
|
+
|
|
195
|
+
if include_metadata:
|
|
196
|
+
return [{"role": i.role, "content": i.content, "metadata": i.metadata} for i in recent]
|
|
197
|
+
return [{"role": i.role, "content": i.content} for i in recent]
|
|
198
|
+
|
|
199
|
+
def should_progress_to_level(self, level: int) -> bool:
|
|
200
|
+
"""Determine if system should progress to higher empathy level.
|
|
201
|
+
|
|
202
|
+
Progression criteria:
|
|
203
|
+
- Level 2: Immediate (guided questions always helpful)
|
|
204
|
+
- Level 3: Trust > 0.6, patterns detected
|
|
205
|
+
- Level 4: Trust > 0.7, sufficient history
|
|
206
|
+
- Level 5: Trust > 0.8, cross-domain patterns available
|
|
207
|
+
"""
|
|
208
|
+
if level <= 2:
|
|
209
|
+
return True # Level 2 always appropriate
|
|
210
|
+
|
|
211
|
+
if level == 3:
|
|
212
|
+
return self.trust_level > 0.6 and len(self.detected_patterns) > 0
|
|
213
|
+
|
|
214
|
+
if level == 4:
|
|
215
|
+
return (
|
|
216
|
+
self.trust_level > 0.7
|
|
217
|
+
and len(self.interactions) > 10
|
|
218
|
+
and len(self.detected_patterns) > 2
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
if level == 5:
|
|
222
|
+
return self.trust_level > 0.8
|
|
223
|
+
|
|
224
|
+
return False
|
|
225
|
+
|
|
226
|
+
def get_statistics(self) -> dict[str, Any]:
|
|
227
|
+
"""Get collaboration statistics"""
|
|
228
|
+
total_interactions = len(self.interactions)
|
|
229
|
+
success_rate = (
|
|
230
|
+
self.successful_actions / (self.successful_actions + self.failed_actions)
|
|
231
|
+
if (self.successful_actions + self.failed_actions) > 0
|
|
232
|
+
else 0.0
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
return {
|
|
236
|
+
"user_id": self.user_id,
|
|
237
|
+
"session_duration": (datetime.now() - self.session_start).total_seconds(),
|
|
238
|
+
"total_interactions": total_interactions,
|
|
239
|
+
"trust_level": self.trust_level,
|
|
240
|
+
"success_rate": success_rate,
|
|
241
|
+
"patterns_detected": len(self.detected_patterns),
|
|
242
|
+
"current_level": self.current_level,
|
|
243
|
+
"average_level": (
|
|
244
|
+
sum(self.level_history) / len(self.level_history) if self.level_history else 1
|
|
245
|
+
),
|
|
246
|
+
}
|
|
@@ -0,0 +1,349 @@
|
|
|
1
|
+
"""Token counting utilities using Anthropic's tokenizer.
|
|
2
|
+
|
|
3
|
+
Provides accurate token counting for billing-accurate cost tracking.
|
|
4
|
+
Replaces rough estimates (4 chars per token) with Anthropic's official counter.
|
|
5
|
+
|
|
6
|
+
Copyright 2025 Smart-AI-Memory
|
|
7
|
+
Licensed under Fair Source License 0.9
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import functools
|
|
11
|
+
import logging
|
|
12
|
+
import os
|
|
13
|
+
from dataclasses import dataclass
|
|
14
|
+
from typing import Any
|
|
15
|
+
|
|
16
|
+
logger = logging.getLogger(__name__)
|
|
17
|
+
|
|
18
|
+
# Lazy import to avoid requiring dependencies if not used
|
|
19
|
+
_client = None
|
|
20
|
+
_tiktoken_encoding = None
|
|
21
|
+
|
|
22
|
+
# Try to import tiktoken for fast local estimation
|
|
23
|
+
try:
|
|
24
|
+
import tiktoken
|
|
25
|
+
|
|
26
|
+
TIKTOKEN_AVAILABLE = True
|
|
27
|
+
except ImportError:
|
|
28
|
+
TIKTOKEN_AVAILABLE = False
|
|
29
|
+
logger.debug("tiktoken not available - will use API or heuristic fallback")
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@dataclass
|
|
33
|
+
class TokenCount:
|
|
34
|
+
"""Token count result with metadata."""
|
|
35
|
+
|
|
36
|
+
tokens: int
|
|
37
|
+
method: str # "anthropic_api", "tiktoken", "heuristic"
|
|
38
|
+
model: str | None = None
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def _get_client():
|
|
42
|
+
"""Get or create Anthropic client for token counting."""
|
|
43
|
+
global _client
|
|
44
|
+
if _client is None:
|
|
45
|
+
try:
|
|
46
|
+
from anthropic import Anthropic
|
|
47
|
+
|
|
48
|
+
api_key = os.getenv("ANTHROPIC_API_KEY")
|
|
49
|
+
if not api_key:
|
|
50
|
+
raise ValueError(
|
|
51
|
+
"ANTHROPIC_API_KEY environment variable required for API token counting"
|
|
52
|
+
)
|
|
53
|
+
_client = Anthropic(api_key=api_key)
|
|
54
|
+
except ImportError as e:
|
|
55
|
+
raise ImportError(
|
|
56
|
+
"anthropic package required for token counting. Install with: pip install anthropic"
|
|
57
|
+
) from e
|
|
58
|
+
return _client
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
@functools.lru_cache(maxsize=4)
|
|
62
|
+
def _get_tiktoken_encoding(model: str) -> Any:
|
|
63
|
+
"""Get tiktoken encoding for Claude models (cached)."""
|
|
64
|
+
if not TIKTOKEN_AVAILABLE:
|
|
65
|
+
return None
|
|
66
|
+
try:
|
|
67
|
+
# Claude uses cl100k_base encoding (similar to GPT-4)
|
|
68
|
+
return tiktoken.get_encoding("cl100k_base")
|
|
69
|
+
except Exception as e:
|
|
70
|
+
logger.warning(f"Failed to get tiktoken encoding: {e}")
|
|
71
|
+
return None
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def _count_tokens_tiktoken(text: str, model: str) -> int:
|
|
75
|
+
"""Count tokens using tiktoken (fast local estimation)."""
|
|
76
|
+
if not text:
|
|
77
|
+
return 0
|
|
78
|
+
|
|
79
|
+
encoding = _get_tiktoken_encoding(model)
|
|
80
|
+
if not encoding:
|
|
81
|
+
return 0
|
|
82
|
+
|
|
83
|
+
try:
|
|
84
|
+
return len(encoding.encode(text))
|
|
85
|
+
except Exception as e:
|
|
86
|
+
logger.warning(f"tiktoken encoding failed: {e}")
|
|
87
|
+
return 0
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def _count_tokens_heuristic(text: str) -> int:
|
|
91
|
+
"""Fallback heuristic token counting (~4 chars per token)."""
|
|
92
|
+
if not text:
|
|
93
|
+
return 0
|
|
94
|
+
return max(1, len(text) // 4)
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def count_tokens(text: str, model: str = "claude-sonnet-4-5-20250929", use_api: bool = False) -> int:
|
|
98
|
+
"""Count tokens using best available method.
|
|
99
|
+
|
|
100
|
+
By default, uses tiktoken for fast local estimation (~98% accurate).
|
|
101
|
+
Set use_api=True for exact count via Anthropic API (requires network call).
|
|
102
|
+
|
|
103
|
+
Args:
|
|
104
|
+
text: Text to tokenize
|
|
105
|
+
model: Model ID (different models may have different tokenizers)
|
|
106
|
+
use_api: Whether to use Anthropic API for exact count (slower, requires API key)
|
|
107
|
+
|
|
108
|
+
Returns:
|
|
109
|
+
Token count
|
|
110
|
+
|
|
111
|
+
Example:
|
|
112
|
+
>>> count_tokens("Hello, world!")
|
|
113
|
+
4
|
|
114
|
+
>>> count_tokens("def hello():\\n print('hi')", use_api=True)
|
|
115
|
+
8
|
|
116
|
+
|
|
117
|
+
Raises:
|
|
118
|
+
ImportError: If anthropic package not installed (when use_api=True)
|
|
119
|
+
ValueError: If API key missing (when use_api=True)
|
|
120
|
+
|
|
121
|
+
"""
|
|
122
|
+
if not text:
|
|
123
|
+
return 0
|
|
124
|
+
|
|
125
|
+
# Use API if explicitly requested
|
|
126
|
+
if use_api:
|
|
127
|
+
try:
|
|
128
|
+
client = _get_client()
|
|
129
|
+
# FIXED: Use correct API method - client.messages.count_tokens()
|
|
130
|
+
result = client.messages.count_tokens(
|
|
131
|
+
model=model,
|
|
132
|
+
messages=[{"role": "user", "content": text}],
|
|
133
|
+
)
|
|
134
|
+
return int(result.input_tokens)
|
|
135
|
+
except Exception as e:
|
|
136
|
+
logger.warning(f"API token counting failed, using fallback: {e}")
|
|
137
|
+
# Continue to fallback methods
|
|
138
|
+
|
|
139
|
+
# Try tiktoken first (fast and accurate)
|
|
140
|
+
if TIKTOKEN_AVAILABLE:
|
|
141
|
+
tokens = _count_tokens_tiktoken(text, model)
|
|
142
|
+
if tokens > 0:
|
|
143
|
+
return tokens
|
|
144
|
+
|
|
145
|
+
# Fallback to heuristic
|
|
146
|
+
return _count_tokens_heuristic(text)
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
def count_message_tokens(
|
|
150
|
+
messages: list[dict[str, str]],
|
|
151
|
+
system_prompt: str | None = None,
|
|
152
|
+
model: str = "claude-sonnet-4-5-20250929",
|
|
153
|
+
use_api: bool = False,
|
|
154
|
+
) -> dict[str, int]:
|
|
155
|
+
"""Count tokens in a conversation.
|
|
156
|
+
|
|
157
|
+
By default uses tiktoken for fast estimation. Set use_api=True for exact count.
|
|
158
|
+
|
|
159
|
+
Args:
|
|
160
|
+
messages: List of message dicts with "role" and "content"
|
|
161
|
+
system_prompt: Optional system prompt
|
|
162
|
+
model: Model ID
|
|
163
|
+
use_api: Whether to use Anthropic API for exact count
|
|
164
|
+
|
|
165
|
+
Returns:
|
|
166
|
+
Dict with token counts by component:
|
|
167
|
+
- "system": System prompt tokens
|
|
168
|
+
- "messages": Message tokens
|
|
169
|
+
- "total": Sum of all tokens
|
|
170
|
+
|
|
171
|
+
Example:
|
|
172
|
+
>>> messages = [{"role": "user", "content": "Hello!"}]
|
|
173
|
+
>>> count_message_tokens(messages, system_prompt="You are helpful")
|
|
174
|
+
{"system": 4, "messages": 6, "total": 10}
|
|
175
|
+
|
|
176
|
+
"""
|
|
177
|
+
if not messages:
|
|
178
|
+
if system_prompt:
|
|
179
|
+
tokens = count_tokens(system_prompt, model, use_api)
|
|
180
|
+
return {"system": tokens, "messages": 0, "total": tokens}
|
|
181
|
+
return {"system": 0, "messages": 0, "total": 0}
|
|
182
|
+
|
|
183
|
+
# Use Anthropic API for exact count if requested
|
|
184
|
+
if use_api:
|
|
185
|
+
try:
|
|
186
|
+
client = _get_client()
|
|
187
|
+
kwargs: dict[str, Any] = {"model": model, "messages": messages}
|
|
188
|
+
if system_prompt:
|
|
189
|
+
kwargs["system"] = system_prompt
|
|
190
|
+
|
|
191
|
+
result = client.messages.count_tokens(**kwargs)
|
|
192
|
+
# API returns total input tokens, estimate breakdown
|
|
193
|
+
total_tokens = result.input_tokens
|
|
194
|
+
|
|
195
|
+
# Estimate system vs message breakdown
|
|
196
|
+
if system_prompt:
|
|
197
|
+
system_tokens = count_tokens(system_prompt, model, use_api=False)
|
|
198
|
+
message_tokens = max(0, total_tokens - system_tokens)
|
|
199
|
+
else:
|
|
200
|
+
system_tokens = 0
|
|
201
|
+
message_tokens = total_tokens
|
|
202
|
+
|
|
203
|
+
return {
|
|
204
|
+
"system": system_tokens,
|
|
205
|
+
"messages": message_tokens,
|
|
206
|
+
"total": total_tokens,
|
|
207
|
+
}
|
|
208
|
+
except Exception as e:
|
|
209
|
+
logger.warning(f"API token counting failed, using fallback: {e}")
|
|
210
|
+
# Continue to fallback method
|
|
211
|
+
|
|
212
|
+
# Fallback: count each component separately
|
|
213
|
+
counts: dict[str, int] = {}
|
|
214
|
+
|
|
215
|
+
# Count system prompt
|
|
216
|
+
if system_prompt:
|
|
217
|
+
counts["system"] = count_tokens(system_prompt, model, use_api=False)
|
|
218
|
+
else:
|
|
219
|
+
counts["system"] = 0
|
|
220
|
+
|
|
221
|
+
# Count messages with overhead
|
|
222
|
+
message_tokens = 0
|
|
223
|
+
for message in messages:
|
|
224
|
+
content = message.get("content", "")
|
|
225
|
+
message_tokens += count_tokens(content, model, use_api=False)
|
|
226
|
+
message_tokens += 4 # Overhead for role markers
|
|
227
|
+
|
|
228
|
+
counts["messages"] = message_tokens
|
|
229
|
+
counts["total"] = counts["system"] + message_tokens
|
|
230
|
+
|
|
231
|
+
return counts
|
|
232
|
+
|
|
233
|
+
|
|
234
|
+
def estimate_cost(input_tokens: int, output_tokens: int, model: str = "claude-sonnet-4-5") -> float:
|
|
235
|
+
"""Estimate cost in USD based on token counts.
|
|
236
|
+
|
|
237
|
+
Args:
|
|
238
|
+
input_tokens: Input token count
|
|
239
|
+
output_tokens: Output token count
|
|
240
|
+
model: Model ID (used to look up pricing)
|
|
241
|
+
|
|
242
|
+
Returns:
|
|
243
|
+
Estimated cost in USD
|
|
244
|
+
|
|
245
|
+
Example:
|
|
246
|
+
>>> estimate_cost(1000, 500, "claude-sonnet-4-5")
|
|
247
|
+
0.0105 # $3/M input + $15/M output
|
|
248
|
+
|
|
249
|
+
Raises:
|
|
250
|
+
ValueError: If model is unknown
|
|
251
|
+
|
|
252
|
+
"""
|
|
253
|
+
# Import here to avoid circular dependency
|
|
254
|
+
try:
|
|
255
|
+
from attune.models.registry import get_pricing_for_model
|
|
256
|
+
|
|
257
|
+
pricing = get_pricing_for_model(model)
|
|
258
|
+
if not pricing:
|
|
259
|
+
raise ValueError(f"Unknown model: {model}")
|
|
260
|
+
|
|
261
|
+
input_cost = (input_tokens / 1_000_000) * pricing["input"]
|
|
262
|
+
output_cost = (output_tokens / 1_000_000) * pricing["output"]
|
|
263
|
+
|
|
264
|
+
return input_cost + output_cost
|
|
265
|
+
except ImportError:
|
|
266
|
+
# Fallback if registry not available
|
|
267
|
+
# Use default Sonnet 4.5 pricing
|
|
268
|
+
input_cost = (input_tokens / 1_000_000) * 3.00
|
|
269
|
+
output_cost = (output_tokens / 1_000_000) * 15.00
|
|
270
|
+
return input_cost + output_cost
|
|
271
|
+
|
|
272
|
+
|
|
273
|
+
def calculate_cost_with_cache(
|
|
274
|
+
input_tokens: int,
|
|
275
|
+
output_tokens: int,
|
|
276
|
+
cache_creation_tokens: int,
|
|
277
|
+
cache_read_tokens: int,
|
|
278
|
+
model: str = "claude-sonnet-4-5",
|
|
279
|
+
) -> dict[str, Any]:
|
|
280
|
+
"""Calculate cost including Anthropic prompt caching.
|
|
281
|
+
|
|
282
|
+
Anthropic prompt caching pricing:
|
|
283
|
+
- Cache writes: 25% markup over standard input pricing
|
|
284
|
+
- Cache reads: 90% discount from standard input pricing
|
|
285
|
+
|
|
286
|
+
Args:
|
|
287
|
+
input_tokens: Regular input tokens (not cached)
|
|
288
|
+
output_tokens: Output tokens
|
|
289
|
+
cache_creation_tokens: Tokens written to cache
|
|
290
|
+
cache_read_tokens: Tokens read from cache
|
|
291
|
+
model: Model ID
|
|
292
|
+
|
|
293
|
+
Returns:
|
|
294
|
+
Dict with cost breakdown:
|
|
295
|
+
- "base_cost": Cost without cache
|
|
296
|
+
- "cache_write_cost": Cost for cache writes (25% markup)
|
|
297
|
+
- "cache_read_cost": Cost for cache reads (90% discount)
|
|
298
|
+
- "total_cost": Sum of all costs
|
|
299
|
+
- "savings": Amount saved by cache reads
|
|
300
|
+
|
|
301
|
+
Example:
|
|
302
|
+
>>> calculate_cost_with_cache(1000, 500, 5000, 10000, "claude-sonnet-4-5")
|
|
303
|
+
{
|
|
304
|
+
"base_cost": 0.0105,
|
|
305
|
+
"cache_write_cost": 0.01875, # 5000 tokens * $3.75/M
|
|
306
|
+
"cache_read_cost": 0.003, # 10000 tokens * $0.30/M
|
|
307
|
+
"total_cost": 0.03225,
|
|
308
|
+
"savings": 0.027, # Saved vs. no cache
|
|
309
|
+
}
|
|
310
|
+
|
|
311
|
+
"""
|
|
312
|
+
# Get pricing for model
|
|
313
|
+
try:
|
|
314
|
+
from attune.models.registry import get_pricing_for_model
|
|
315
|
+
|
|
316
|
+
pricing = get_pricing_for_model(model)
|
|
317
|
+
if not pricing:
|
|
318
|
+
raise ValueError(f"Unknown model: {model}")
|
|
319
|
+
|
|
320
|
+
input_price_per_million = pricing["input"]
|
|
321
|
+
output_price_per_million = pricing["output"]
|
|
322
|
+
except (ImportError, ValueError):
|
|
323
|
+
# Fallback to default Sonnet 4.5 pricing
|
|
324
|
+
input_price_per_million = 3.00
|
|
325
|
+
output_price_per_million = 15.00
|
|
326
|
+
|
|
327
|
+
# Base cost (non-cached tokens)
|
|
328
|
+
base_cost = (input_tokens / 1_000_000) * input_price_per_million
|
|
329
|
+
base_cost += (output_tokens / 1_000_000) * output_price_per_million
|
|
330
|
+
|
|
331
|
+
# Cache write cost (25% markup)
|
|
332
|
+
cache_write_price = input_price_per_million * 1.25
|
|
333
|
+
cache_write_cost = (cache_creation_tokens / 1_000_000) * cache_write_price
|
|
334
|
+
|
|
335
|
+
# Cache read cost (90% discount = 10% of input price)
|
|
336
|
+
cache_read_price = input_price_per_million * 0.1
|
|
337
|
+
cache_read_cost = (cache_read_tokens / 1_000_000) * cache_read_price
|
|
338
|
+
|
|
339
|
+
# Calculate what we would have paid without cache
|
|
340
|
+
full_price_for_cached = (cache_read_tokens / 1_000_000) * input_price_per_million
|
|
341
|
+
savings = full_price_for_cached - cache_read_cost
|
|
342
|
+
|
|
343
|
+
return {
|
|
344
|
+
"base_cost": round(base_cost, 6),
|
|
345
|
+
"cache_write_cost": round(cache_write_cost, 6),
|
|
346
|
+
"cache_read_cost": round(cache_read_cost, 6),
|
|
347
|
+
"total_cost": round(base_cost + cache_write_cost + cache_read_cost, 6),
|
|
348
|
+
"savings": round(savings, 6),
|
|
349
|
+
}
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
# Software Development Plugin
|
|
2
|
+
|
|
3
|
+
> **DEPRECATION NOTICE (January 2026):** The `empathy_software_plugin.wizards` module has been removed. Please use CLI workflows instead.
|
|
4
|
+
|
|
5
|
+
Production-ready analysis tools for software development.
|
|
6
|
+
|
|
7
|
+
**Copyright 2025-2026 Smart AI Memory, LLC**
|
|
8
|
+
**Licensed under Fair Source 0.9**
|
|
9
|
+
|
|
10
|
+
## Overview
|
|
11
|
+
|
|
12
|
+
The Software Development Plugin provides analysis capabilities through CLI workflows.
|
|
13
|
+
|
|
14
|
+
## Recommended Approach
|
|
15
|
+
|
|
16
|
+
```bash
|
|
17
|
+
# Security analysis
|
|
18
|
+
empathy workflow run security-audit --path ./src
|
|
19
|
+
|
|
20
|
+
# Bug prediction
|
|
21
|
+
empathy workflow run bug-predict --path ./src
|
|
22
|
+
|
|
23
|
+
# Test coverage analysis
|
|
24
|
+
empathy workflow run test-coverage --path ./src
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
Or use the Python workflow API:
|
|
28
|
+
|
|
29
|
+
```python
|
|
30
|
+
from attune.workflows import BugPredictWorkflow
|
|
31
|
+
|
|
32
|
+
workflow = BugPredictWorkflow()
|
|
33
|
+
result = await workflow.execute(target_path="./src")
|
|
34
|
+
```
|
|
35
|
+
|
|
36
|
+
## Migration Guide
|
|
37
|
+
|
|
38
|
+
| Old Wizard | New Approach |
|
|
39
|
+
|------------|--------------|
|
|
40
|
+
| `EnhancedTestingWizard` | `empathy workflow run test-coverage` |
|
|
41
|
+
| `PerformanceProfilingWizard` | `empathy workflow run profile` |
|
|
42
|
+
| `SecurityAnalysisWizard` | `empathy workflow run security-audit` |
|
|
43
|
+
|
|
44
|
+
## Installation
|
|
45
|
+
|
|
46
|
+
```bash
|
|
47
|
+
pip install attune-ai
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
## Support
|
|
51
|
+
|
|
52
|
+
- **Documentation:** [docs/](../docs/)
|
|
53
|
+
- **Issues:** [GitHub Issues](https://github.com/deepstudyai/empathy/issues)
|
|
54
|
+
|
|
55
|
+
## License
|
|
56
|
+
|
|
57
|
+
Copyright 2025-2026 Smart AI Memory, LLC - Licensed under Fair Source 0.9
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
"""Empathy Framework - Software Development Plugin
|
|
2
|
+
|
|
3
|
+
Primary domain plugin demonstrating Level 4 Anticipatory Empathy
|
|
4
|
+
for software development teams.
|
|
5
|
+
|
|
6
|
+
Copyright 2025 Smart AI Memory, LLC
|
|
7
|
+
Licensed under Fair Source 0.9
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from .plugin import SoftwarePlugin
|
|
11
|
+
|
|
12
|
+
__version__ = "1.0.0"
|
|
13
|
+
__all__ = ["SoftwarePlugin"]
|