attune-ai 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (457) hide show
  1. attune/__init__.py +358 -0
  2. attune/adaptive/__init__.py +13 -0
  3. attune/adaptive/task_complexity.py +127 -0
  4. attune/agent_monitoring.py +414 -0
  5. attune/cache/__init__.py +117 -0
  6. attune/cache/base.py +166 -0
  7. attune/cache/dependency_manager.py +256 -0
  8. attune/cache/hash_only.py +251 -0
  9. attune/cache/hybrid.py +457 -0
  10. attune/cache/storage.py +285 -0
  11. attune/cache_monitor.py +356 -0
  12. attune/cache_stats.py +298 -0
  13. attune/cli/__init__.py +152 -0
  14. attune/cli/__main__.py +12 -0
  15. attune/cli/commands/__init__.py +1 -0
  16. attune/cli/commands/batch.py +264 -0
  17. attune/cli/commands/cache.py +248 -0
  18. attune/cli/commands/help.py +331 -0
  19. attune/cli/commands/info.py +140 -0
  20. attune/cli/commands/inspect.py +436 -0
  21. attune/cli/commands/inspection.py +57 -0
  22. attune/cli/commands/memory.py +48 -0
  23. attune/cli/commands/metrics.py +92 -0
  24. attune/cli/commands/orchestrate.py +184 -0
  25. attune/cli/commands/patterns.py +207 -0
  26. attune/cli/commands/profiling.py +202 -0
  27. attune/cli/commands/provider.py +98 -0
  28. attune/cli/commands/routing.py +285 -0
  29. attune/cli/commands/setup.py +96 -0
  30. attune/cli/commands/status.py +235 -0
  31. attune/cli/commands/sync.py +166 -0
  32. attune/cli/commands/tier.py +121 -0
  33. attune/cli/commands/utilities.py +114 -0
  34. attune/cli/commands/workflow.py +579 -0
  35. attune/cli/core.py +32 -0
  36. attune/cli/parsers/__init__.py +68 -0
  37. attune/cli/parsers/batch.py +118 -0
  38. attune/cli/parsers/cache.py +65 -0
  39. attune/cli/parsers/help.py +41 -0
  40. attune/cli/parsers/info.py +26 -0
  41. attune/cli/parsers/inspect.py +66 -0
  42. attune/cli/parsers/metrics.py +42 -0
  43. attune/cli/parsers/orchestrate.py +61 -0
  44. attune/cli/parsers/patterns.py +54 -0
  45. attune/cli/parsers/provider.py +40 -0
  46. attune/cli/parsers/routing.py +110 -0
  47. attune/cli/parsers/setup.py +42 -0
  48. attune/cli/parsers/status.py +47 -0
  49. attune/cli/parsers/sync.py +31 -0
  50. attune/cli/parsers/tier.py +33 -0
  51. attune/cli/parsers/workflow.py +77 -0
  52. attune/cli/utils/__init__.py +1 -0
  53. attune/cli/utils/data.py +242 -0
  54. attune/cli/utils/helpers.py +68 -0
  55. attune/cli_legacy.py +3957 -0
  56. attune/cli_minimal.py +1159 -0
  57. attune/cli_router.py +437 -0
  58. attune/cli_unified.py +814 -0
  59. attune/config/__init__.py +66 -0
  60. attune/config/xml_config.py +286 -0
  61. attune/config.py +545 -0
  62. attune/coordination.py +870 -0
  63. attune/core.py +1511 -0
  64. attune/core_modules/__init__.py +15 -0
  65. attune/cost_tracker.py +626 -0
  66. attune/dashboard/__init__.py +41 -0
  67. attune/dashboard/app.py +512 -0
  68. attune/dashboard/simple_server.py +435 -0
  69. attune/dashboard/standalone_server.py +547 -0
  70. attune/discovery.py +306 -0
  71. attune/emergence.py +306 -0
  72. attune/exceptions.py +123 -0
  73. attune/feedback_loops.py +373 -0
  74. attune/hot_reload/README.md +473 -0
  75. attune/hot_reload/__init__.py +62 -0
  76. attune/hot_reload/config.py +83 -0
  77. attune/hot_reload/integration.py +229 -0
  78. attune/hot_reload/reloader.py +298 -0
  79. attune/hot_reload/watcher.py +183 -0
  80. attune/hot_reload/websocket.py +177 -0
  81. attune/levels.py +577 -0
  82. attune/leverage_points.py +441 -0
  83. attune/logging_config.py +261 -0
  84. attune/mcp/__init__.py +10 -0
  85. attune/mcp/server.py +506 -0
  86. attune/memory/__init__.py +237 -0
  87. attune/memory/claude_memory.py +469 -0
  88. attune/memory/config.py +224 -0
  89. attune/memory/control_panel.py +1290 -0
  90. attune/memory/control_panel_support.py +145 -0
  91. attune/memory/cross_session.py +845 -0
  92. attune/memory/edges.py +179 -0
  93. attune/memory/encryption.py +159 -0
  94. attune/memory/file_session.py +770 -0
  95. attune/memory/graph.py +570 -0
  96. attune/memory/long_term.py +913 -0
  97. attune/memory/long_term_types.py +99 -0
  98. attune/memory/mixins/__init__.py +25 -0
  99. attune/memory/mixins/backend_init_mixin.py +249 -0
  100. attune/memory/mixins/capabilities_mixin.py +208 -0
  101. attune/memory/mixins/handoff_mixin.py +208 -0
  102. attune/memory/mixins/lifecycle_mixin.py +49 -0
  103. attune/memory/mixins/long_term_mixin.py +352 -0
  104. attune/memory/mixins/promotion_mixin.py +109 -0
  105. attune/memory/mixins/short_term_mixin.py +182 -0
  106. attune/memory/nodes.py +179 -0
  107. attune/memory/redis_bootstrap.py +540 -0
  108. attune/memory/security/__init__.py +31 -0
  109. attune/memory/security/audit_logger.py +932 -0
  110. attune/memory/security/pii_scrubber.py +640 -0
  111. attune/memory/security/secrets_detector.py +678 -0
  112. attune/memory/short_term.py +2192 -0
  113. attune/memory/simple_storage.py +302 -0
  114. attune/memory/storage/__init__.py +15 -0
  115. attune/memory/storage_backend.py +167 -0
  116. attune/memory/summary_index.py +583 -0
  117. attune/memory/types.py +446 -0
  118. attune/memory/unified.py +182 -0
  119. attune/meta_workflows/__init__.py +74 -0
  120. attune/meta_workflows/agent_creator.py +248 -0
  121. attune/meta_workflows/builtin_templates.py +567 -0
  122. attune/meta_workflows/cli_commands/__init__.py +56 -0
  123. attune/meta_workflows/cli_commands/agent_commands.py +321 -0
  124. attune/meta_workflows/cli_commands/analytics_commands.py +442 -0
  125. attune/meta_workflows/cli_commands/config_commands.py +232 -0
  126. attune/meta_workflows/cli_commands/memory_commands.py +182 -0
  127. attune/meta_workflows/cli_commands/template_commands.py +354 -0
  128. attune/meta_workflows/cli_commands/workflow_commands.py +382 -0
  129. attune/meta_workflows/cli_meta_workflows.py +59 -0
  130. attune/meta_workflows/form_engine.py +292 -0
  131. attune/meta_workflows/intent_detector.py +409 -0
  132. attune/meta_workflows/models.py +569 -0
  133. attune/meta_workflows/pattern_learner.py +738 -0
  134. attune/meta_workflows/plan_generator.py +384 -0
  135. attune/meta_workflows/session_context.py +397 -0
  136. attune/meta_workflows/template_registry.py +229 -0
  137. attune/meta_workflows/workflow.py +984 -0
  138. attune/metrics/__init__.py +12 -0
  139. attune/metrics/collector.py +31 -0
  140. attune/metrics/prompt_metrics.py +194 -0
  141. attune/models/__init__.py +172 -0
  142. attune/models/__main__.py +13 -0
  143. attune/models/adaptive_routing.py +437 -0
  144. attune/models/auth_cli.py +444 -0
  145. attune/models/auth_strategy.py +450 -0
  146. attune/models/cli.py +655 -0
  147. attune/models/empathy_executor.py +354 -0
  148. attune/models/executor.py +257 -0
  149. attune/models/fallback.py +762 -0
  150. attune/models/provider_config.py +282 -0
  151. attune/models/registry.py +472 -0
  152. attune/models/tasks.py +359 -0
  153. attune/models/telemetry/__init__.py +71 -0
  154. attune/models/telemetry/analytics.py +594 -0
  155. attune/models/telemetry/backend.py +196 -0
  156. attune/models/telemetry/data_models.py +431 -0
  157. attune/models/telemetry/storage.py +489 -0
  158. attune/models/token_estimator.py +420 -0
  159. attune/models/validation.py +280 -0
  160. attune/monitoring/__init__.py +52 -0
  161. attune/monitoring/alerts.py +946 -0
  162. attune/monitoring/alerts_cli.py +448 -0
  163. attune/monitoring/multi_backend.py +271 -0
  164. attune/monitoring/otel_backend.py +362 -0
  165. attune/optimization/__init__.py +19 -0
  166. attune/optimization/context_optimizer.py +272 -0
  167. attune/orchestration/__init__.py +67 -0
  168. attune/orchestration/agent_templates.py +707 -0
  169. attune/orchestration/config_store.py +499 -0
  170. attune/orchestration/execution_strategies.py +2111 -0
  171. attune/orchestration/meta_orchestrator.py +1168 -0
  172. attune/orchestration/pattern_learner.py +696 -0
  173. attune/orchestration/real_tools.py +931 -0
  174. attune/pattern_cache.py +187 -0
  175. attune/pattern_library.py +542 -0
  176. attune/patterns/debugging/all_patterns.json +81 -0
  177. attune/patterns/debugging/workflow_20260107_1770825e.json +77 -0
  178. attune/patterns/refactoring_memory.json +89 -0
  179. attune/persistence.py +564 -0
  180. attune/platform_utils.py +265 -0
  181. attune/plugins/__init__.py +28 -0
  182. attune/plugins/base.py +361 -0
  183. attune/plugins/registry.py +268 -0
  184. attune/project_index/__init__.py +32 -0
  185. attune/project_index/cli.py +335 -0
  186. attune/project_index/index.py +667 -0
  187. attune/project_index/models.py +504 -0
  188. attune/project_index/reports.py +474 -0
  189. attune/project_index/scanner.py +777 -0
  190. attune/project_index/scanner_parallel.py +291 -0
  191. attune/prompts/__init__.py +61 -0
  192. attune/prompts/config.py +77 -0
  193. attune/prompts/context.py +177 -0
  194. attune/prompts/parser.py +285 -0
  195. attune/prompts/registry.py +313 -0
  196. attune/prompts/templates.py +208 -0
  197. attune/redis_config.py +302 -0
  198. attune/redis_memory.py +799 -0
  199. attune/resilience/__init__.py +56 -0
  200. attune/resilience/circuit_breaker.py +256 -0
  201. attune/resilience/fallback.py +179 -0
  202. attune/resilience/health.py +300 -0
  203. attune/resilience/retry.py +209 -0
  204. attune/resilience/timeout.py +135 -0
  205. attune/routing/__init__.py +43 -0
  206. attune/routing/chain_executor.py +433 -0
  207. attune/routing/classifier.py +217 -0
  208. attune/routing/smart_router.py +234 -0
  209. attune/routing/workflow_registry.py +343 -0
  210. attune/scaffolding/README.md +589 -0
  211. attune/scaffolding/__init__.py +35 -0
  212. attune/scaffolding/__main__.py +14 -0
  213. attune/scaffolding/cli.py +240 -0
  214. attune/scaffolding/templates/base_wizard.py.jinja2 +121 -0
  215. attune/scaffolding/templates/coach_wizard.py.jinja2 +321 -0
  216. attune/scaffolding/templates/domain_wizard.py.jinja2 +408 -0
  217. attune/scaffolding/templates/linear_flow_wizard.py.jinja2 +203 -0
  218. attune/socratic/__init__.py +256 -0
  219. attune/socratic/ab_testing.py +958 -0
  220. attune/socratic/blueprint.py +533 -0
  221. attune/socratic/cli.py +703 -0
  222. attune/socratic/collaboration.py +1114 -0
  223. attune/socratic/domain_templates.py +924 -0
  224. attune/socratic/embeddings.py +738 -0
  225. attune/socratic/engine.py +794 -0
  226. attune/socratic/explainer.py +682 -0
  227. attune/socratic/feedback.py +772 -0
  228. attune/socratic/forms.py +629 -0
  229. attune/socratic/generator.py +732 -0
  230. attune/socratic/llm_analyzer.py +637 -0
  231. attune/socratic/mcp_server.py +702 -0
  232. attune/socratic/session.py +312 -0
  233. attune/socratic/storage.py +667 -0
  234. attune/socratic/success.py +730 -0
  235. attune/socratic/visual_editor.py +860 -0
  236. attune/socratic/web_ui.py +958 -0
  237. attune/telemetry/__init__.py +39 -0
  238. attune/telemetry/agent_coordination.py +475 -0
  239. attune/telemetry/agent_tracking.py +367 -0
  240. attune/telemetry/approval_gates.py +545 -0
  241. attune/telemetry/cli.py +1231 -0
  242. attune/telemetry/commands/__init__.py +14 -0
  243. attune/telemetry/commands/dashboard_commands.py +696 -0
  244. attune/telemetry/event_streaming.py +409 -0
  245. attune/telemetry/feedback_loop.py +567 -0
  246. attune/telemetry/usage_tracker.py +591 -0
  247. attune/templates.py +754 -0
  248. attune/test_generator/__init__.py +38 -0
  249. attune/test_generator/__main__.py +14 -0
  250. attune/test_generator/cli.py +234 -0
  251. attune/test_generator/generator.py +355 -0
  252. attune/test_generator/risk_analyzer.py +216 -0
  253. attune/test_generator/templates/unit_test.py.jinja2 +272 -0
  254. attune/tier_recommender.py +384 -0
  255. attune/tools.py +183 -0
  256. attune/trust/__init__.py +28 -0
  257. attune/trust/circuit_breaker.py +579 -0
  258. attune/trust_building.py +527 -0
  259. attune/validation/__init__.py +19 -0
  260. attune/validation/xml_validator.py +281 -0
  261. attune/vscode_bridge.py +173 -0
  262. attune/workflow_commands.py +780 -0
  263. attune/workflow_patterns/__init__.py +33 -0
  264. attune/workflow_patterns/behavior.py +249 -0
  265. attune/workflow_patterns/core.py +76 -0
  266. attune/workflow_patterns/output.py +99 -0
  267. attune/workflow_patterns/registry.py +255 -0
  268. attune/workflow_patterns/structural.py +288 -0
  269. attune/workflows/__init__.py +539 -0
  270. attune/workflows/autonomous_test_gen.py +1268 -0
  271. attune/workflows/base.py +2667 -0
  272. attune/workflows/batch_processing.py +342 -0
  273. attune/workflows/bug_predict.py +1084 -0
  274. attune/workflows/builder.py +273 -0
  275. attune/workflows/caching.py +253 -0
  276. attune/workflows/code_review.py +1048 -0
  277. attune/workflows/code_review_adapters.py +312 -0
  278. attune/workflows/code_review_pipeline.py +722 -0
  279. attune/workflows/config.py +645 -0
  280. attune/workflows/dependency_check.py +644 -0
  281. attune/workflows/document_gen/__init__.py +25 -0
  282. attune/workflows/document_gen/config.py +30 -0
  283. attune/workflows/document_gen/report_formatter.py +162 -0
  284. attune/workflows/document_gen/workflow.py +1426 -0
  285. attune/workflows/document_manager.py +216 -0
  286. attune/workflows/document_manager_README.md +134 -0
  287. attune/workflows/documentation_orchestrator.py +1205 -0
  288. attune/workflows/history.py +510 -0
  289. attune/workflows/keyboard_shortcuts/__init__.py +39 -0
  290. attune/workflows/keyboard_shortcuts/generators.py +391 -0
  291. attune/workflows/keyboard_shortcuts/parsers.py +416 -0
  292. attune/workflows/keyboard_shortcuts/prompts.py +295 -0
  293. attune/workflows/keyboard_shortcuts/schema.py +193 -0
  294. attune/workflows/keyboard_shortcuts/workflow.py +509 -0
  295. attune/workflows/llm_base.py +363 -0
  296. attune/workflows/manage_docs.py +87 -0
  297. attune/workflows/manage_docs_README.md +134 -0
  298. attune/workflows/manage_documentation.py +821 -0
  299. attune/workflows/new_sample_workflow1.py +149 -0
  300. attune/workflows/new_sample_workflow1_README.md +150 -0
  301. attune/workflows/orchestrated_health_check.py +849 -0
  302. attune/workflows/orchestrated_release_prep.py +600 -0
  303. attune/workflows/output.py +413 -0
  304. attune/workflows/perf_audit.py +863 -0
  305. attune/workflows/pr_review.py +762 -0
  306. attune/workflows/progress.py +785 -0
  307. attune/workflows/progress_server.py +322 -0
  308. attune/workflows/progressive/README 2.md +454 -0
  309. attune/workflows/progressive/README.md +454 -0
  310. attune/workflows/progressive/__init__.py +82 -0
  311. attune/workflows/progressive/cli.py +219 -0
  312. attune/workflows/progressive/core.py +488 -0
  313. attune/workflows/progressive/orchestrator.py +723 -0
  314. attune/workflows/progressive/reports.py +520 -0
  315. attune/workflows/progressive/telemetry.py +274 -0
  316. attune/workflows/progressive/test_gen.py +495 -0
  317. attune/workflows/progressive/workflow.py +589 -0
  318. attune/workflows/refactor_plan.py +694 -0
  319. attune/workflows/release_prep.py +895 -0
  320. attune/workflows/release_prep_crew.py +969 -0
  321. attune/workflows/research_synthesis.py +404 -0
  322. attune/workflows/routing.py +168 -0
  323. attune/workflows/secure_release.py +593 -0
  324. attune/workflows/security_adapters.py +297 -0
  325. attune/workflows/security_audit.py +1329 -0
  326. attune/workflows/security_audit_phase3.py +355 -0
  327. attune/workflows/seo_optimization.py +633 -0
  328. attune/workflows/step_config.py +234 -0
  329. attune/workflows/telemetry_mixin.py +269 -0
  330. attune/workflows/test5.py +125 -0
  331. attune/workflows/test5_README.md +158 -0
  332. attune/workflows/test_coverage_boost_crew.py +849 -0
  333. attune/workflows/test_gen/__init__.py +52 -0
  334. attune/workflows/test_gen/ast_analyzer.py +249 -0
  335. attune/workflows/test_gen/config.py +88 -0
  336. attune/workflows/test_gen/data_models.py +38 -0
  337. attune/workflows/test_gen/report_formatter.py +289 -0
  338. attune/workflows/test_gen/test_templates.py +381 -0
  339. attune/workflows/test_gen/workflow.py +655 -0
  340. attune/workflows/test_gen.py +54 -0
  341. attune/workflows/test_gen_behavioral.py +477 -0
  342. attune/workflows/test_gen_parallel.py +341 -0
  343. attune/workflows/test_lifecycle.py +526 -0
  344. attune/workflows/test_maintenance.py +627 -0
  345. attune/workflows/test_maintenance_cli.py +590 -0
  346. attune/workflows/test_maintenance_crew.py +840 -0
  347. attune/workflows/test_runner.py +622 -0
  348. attune/workflows/tier_tracking.py +531 -0
  349. attune/workflows/xml_enhanced_crew.py +285 -0
  350. attune_ai-2.0.0.dist-info/METADATA +1026 -0
  351. attune_ai-2.0.0.dist-info/RECORD +457 -0
  352. attune_ai-2.0.0.dist-info/WHEEL +5 -0
  353. attune_ai-2.0.0.dist-info/entry_points.txt +26 -0
  354. attune_ai-2.0.0.dist-info/licenses/LICENSE +201 -0
  355. attune_ai-2.0.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +101 -0
  356. attune_ai-2.0.0.dist-info/top_level.txt +5 -0
  357. attune_healthcare/__init__.py +13 -0
  358. attune_healthcare/monitors/__init__.py +9 -0
  359. attune_healthcare/monitors/clinical_protocol_monitor.py +315 -0
  360. attune_healthcare/monitors/monitoring/__init__.py +44 -0
  361. attune_healthcare/monitors/monitoring/protocol_checker.py +300 -0
  362. attune_healthcare/monitors/monitoring/protocol_loader.py +214 -0
  363. attune_healthcare/monitors/monitoring/sensor_parsers.py +306 -0
  364. attune_healthcare/monitors/monitoring/trajectory_analyzer.py +389 -0
  365. attune_llm/README.md +553 -0
  366. attune_llm/__init__.py +28 -0
  367. attune_llm/agent_factory/__init__.py +53 -0
  368. attune_llm/agent_factory/adapters/__init__.py +85 -0
  369. attune_llm/agent_factory/adapters/autogen_adapter.py +312 -0
  370. attune_llm/agent_factory/adapters/crewai_adapter.py +483 -0
  371. attune_llm/agent_factory/adapters/haystack_adapter.py +298 -0
  372. attune_llm/agent_factory/adapters/langchain_adapter.py +362 -0
  373. attune_llm/agent_factory/adapters/langgraph_adapter.py +333 -0
  374. attune_llm/agent_factory/adapters/native.py +228 -0
  375. attune_llm/agent_factory/adapters/wizard_adapter.py +423 -0
  376. attune_llm/agent_factory/base.py +305 -0
  377. attune_llm/agent_factory/crews/__init__.py +67 -0
  378. attune_llm/agent_factory/crews/code_review.py +1113 -0
  379. attune_llm/agent_factory/crews/health_check.py +1262 -0
  380. attune_llm/agent_factory/crews/refactoring.py +1128 -0
  381. attune_llm/agent_factory/crews/security_audit.py +1018 -0
  382. attune_llm/agent_factory/decorators.py +287 -0
  383. attune_llm/agent_factory/factory.py +558 -0
  384. attune_llm/agent_factory/framework.py +193 -0
  385. attune_llm/agent_factory/memory_integration.py +328 -0
  386. attune_llm/agent_factory/resilient.py +320 -0
  387. attune_llm/agents_md/__init__.py +22 -0
  388. attune_llm/agents_md/loader.py +218 -0
  389. attune_llm/agents_md/parser.py +271 -0
  390. attune_llm/agents_md/registry.py +307 -0
  391. attune_llm/claude_memory.py +466 -0
  392. attune_llm/cli/__init__.py +8 -0
  393. attune_llm/cli/sync_claude.py +487 -0
  394. attune_llm/code_health.py +1313 -0
  395. attune_llm/commands/__init__.py +51 -0
  396. attune_llm/commands/context.py +375 -0
  397. attune_llm/commands/loader.py +301 -0
  398. attune_llm/commands/models.py +231 -0
  399. attune_llm/commands/parser.py +371 -0
  400. attune_llm/commands/registry.py +429 -0
  401. attune_llm/config/__init__.py +29 -0
  402. attune_llm/config/unified.py +291 -0
  403. attune_llm/context/__init__.py +22 -0
  404. attune_llm/context/compaction.py +455 -0
  405. attune_llm/context/manager.py +434 -0
  406. attune_llm/contextual_patterns.py +361 -0
  407. attune_llm/core.py +907 -0
  408. attune_llm/git_pattern_extractor.py +435 -0
  409. attune_llm/hooks/__init__.py +24 -0
  410. attune_llm/hooks/config.py +306 -0
  411. attune_llm/hooks/executor.py +289 -0
  412. attune_llm/hooks/registry.py +302 -0
  413. attune_llm/hooks/scripts/__init__.py +39 -0
  414. attune_llm/hooks/scripts/evaluate_session.py +201 -0
  415. attune_llm/hooks/scripts/first_time_init.py +285 -0
  416. attune_llm/hooks/scripts/pre_compact.py +207 -0
  417. attune_llm/hooks/scripts/session_end.py +183 -0
  418. attune_llm/hooks/scripts/session_start.py +163 -0
  419. attune_llm/hooks/scripts/suggest_compact.py +225 -0
  420. attune_llm/learning/__init__.py +30 -0
  421. attune_llm/learning/evaluator.py +438 -0
  422. attune_llm/learning/extractor.py +514 -0
  423. attune_llm/learning/storage.py +560 -0
  424. attune_llm/levels.py +227 -0
  425. attune_llm/pattern_confidence.py +414 -0
  426. attune_llm/pattern_resolver.py +272 -0
  427. attune_llm/pattern_summary.py +350 -0
  428. attune_llm/providers.py +967 -0
  429. attune_llm/routing/__init__.py +32 -0
  430. attune_llm/routing/model_router.py +362 -0
  431. attune_llm/security/IMPLEMENTATION_SUMMARY.md +413 -0
  432. attune_llm/security/PHASE2_COMPLETE.md +384 -0
  433. attune_llm/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
  434. attune_llm/security/QUICK_REFERENCE.md +316 -0
  435. attune_llm/security/README.md +262 -0
  436. attune_llm/security/__init__.py +62 -0
  437. attune_llm/security/audit_logger.py +929 -0
  438. attune_llm/security/audit_logger_example.py +152 -0
  439. attune_llm/security/pii_scrubber.py +640 -0
  440. attune_llm/security/secrets_detector.py +678 -0
  441. attune_llm/security/secrets_detector_example.py +304 -0
  442. attune_llm/security/secure_memdocs.py +1192 -0
  443. attune_llm/security/secure_memdocs_example.py +278 -0
  444. attune_llm/session_status.py +745 -0
  445. attune_llm/state.py +246 -0
  446. attune_llm/utils/__init__.py +5 -0
  447. attune_llm/utils/tokens.py +349 -0
  448. attune_software/SOFTWARE_PLUGIN_README.md +57 -0
  449. attune_software/__init__.py +13 -0
  450. attune_software/cli/__init__.py +120 -0
  451. attune_software/cli/inspect.py +362 -0
  452. attune_software/cli.py +574 -0
  453. attune_software/plugin.py +188 -0
  454. workflow_scaffolding/__init__.py +11 -0
  455. workflow_scaffolding/__main__.py +12 -0
  456. workflow_scaffolding/cli.py +206 -0
  457. workflow_scaffolding/generator.py +265 -0
@@ -0,0 +1,354 @@
1
+ """EmpathyLLM Executor Implementation
2
+
3
+ Default LLMExecutor implementation that wraps EmpathyLLM for use
4
+ in workflows with automatic model routing and cost tracking.
5
+
6
+ Copyright 2025 Smart-AI-Memory
7
+ Licensed under Fair Source License 0.9
8
+ """
9
+
10
+ import logging
11
+ import time
12
+ import uuid
13
+ from datetime import datetime
14
+ from typing import Any
15
+
16
+ from .executor import ExecutionContext, LLMResponse
17
+ from .registry import get_model
18
+ from .tasks import get_tier_for_task
19
+ from .telemetry import LLMCallRecord, TelemetryBackend, TelemetryStore
20
+
21
+ logger = logging.getLogger(__name__)
22
+
23
+
24
+ class EmpathyLLMExecutor:
25
+ """Default executor wrapping EmpathyLLM with routing.
26
+
27
+ This executor provides a unified interface for workflows to call LLMs
28
+ with automatic tier-based model routing and cost tracking.
29
+
30
+ Supports hybrid mode where different tiers use different providers.
31
+
32
+ Example:
33
+ >>> executor = EmpathyLLMExecutor(provider="anthropic")
34
+ >>> response = await executor.run(
35
+ ... task_type="summarize",
36
+ ... prompt="Summarize this document...",
37
+ ... )
38
+ >>> print(f"Model used: {response.model_used}")
39
+ >>> print(f"Cost: ${response.cost:.4f}")
40
+
41
+ """
42
+
43
+ def __init__(
44
+ self,
45
+ empathy_llm: Any | None = None,
46
+ provider: str = "anthropic",
47
+ api_key: str | None = None,
48
+ telemetry_store: TelemetryBackend | TelemetryStore | None = None,
49
+ **llm_kwargs: Any,
50
+ ):
51
+ """Initialize the EmpathyLLM executor.
52
+
53
+ Args:
54
+ empathy_llm: Optional pre-configured EmpathyLLM instance.
55
+ provider: LLM provider (anthropic, openai, google, ollama, hybrid).
56
+ api_key: Optional API key for the provider.
57
+ telemetry_store: Optional telemetry store for recording calls.
58
+ **llm_kwargs: Additional arguments for EmpathyLLM.
59
+
60
+ """
61
+ self._provider = provider
62
+ self._api_key = api_key
63
+ self._llm_kwargs = llm_kwargs
64
+ self._llm = empathy_llm
65
+ self._telemetry = telemetry_store
66
+ self._hybrid_llms: dict[str, Any] = {} # Cache per-provider LLMs for hybrid mode
67
+ self._hybrid_config: dict[str, str] | None = None # tier -> model_id mapping
68
+
69
+ # Load hybrid config if provider is hybrid
70
+ if provider == "hybrid":
71
+ self._load_hybrid_config()
72
+
73
+ def _load_hybrid_config(self) -> None:
74
+ """Load hybrid tier->model mapping from workflows.yaml."""
75
+ try:
76
+ from attune.workflows.config import WorkflowConfig
77
+
78
+ config = WorkflowConfig.load()
79
+ if config.custom_models and "hybrid" in config.custom_models:
80
+ self._hybrid_config = config.custom_models["hybrid"]
81
+ logger.info(f"Loaded hybrid config: {self._hybrid_config}")
82
+ except Exception as e:
83
+ logger.warning(f"Failed to load hybrid config: {e}")
84
+
85
+ def _get_provider_for_model(self, model_id: str) -> str:
86
+ """Determine which provider a model belongs to based on its ID."""
87
+ model_lower = model_id.lower()
88
+ if (
89
+ "claude" in model_lower
90
+ or "haiku" in model_lower
91
+ or "sonnet" in model_lower
92
+ or "opus" in model_lower
93
+ ):
94
+ return "anthropic"
95
+ if "gpt" in model_lower or "o1" in model_lower:
96
+ return "openai"
97
+ if "gemini" in model_lower:
98
+ return "google"
99
+ if "llama" in model_lower or "mixtral" in model_lower or ":" in model_id:
100
+ return "ollama"
101
+ # Default to anthropic
102
+ return "anthropic"
103
+
104
+ def _get_llm_for_tier(self, tier: str) -> tuple[Any, str, str]:
105
+ """Get the appropriate LLM for a tier (supports hybrid mode).
106
+
107
+ Returns:
108
+ Tuple of (llm_instance, actual_provider, model_id)
109
+
110
+ """
111
+ if self._provider != "hybrid" or not self._hybrid_config:
112
+ # Non-hybrid mode: use single provider
113
+ return self._get_llm(), self._provider, ""
114
+
115
+ # Hybrid mode: determine provider based on tier's model
116
+ model_id = self._hybrid_config.get(tier, "")
117
+ if not model_id:
118
+ # Fall back to non-hybrid
119
+ return self._get_llm(), self._provider, ""
120
+
121
+ actual_provider = self._get_provider_for_model(model_id)
122
+
123
+ # Get or create LLM for this provider
124
+ if actual_provider not in self._hybrid_llms:
125
+ try:
126
+ import os
127
+
128
+ from attune_llm import EmpathyLLM
129
+
130
+ # Get API key for this provider from environment
131
+ api_key_map = {
132
+ "anthropic": os.getenv("ANTHROPIC_API_KEY"),
133
+ "openai": os.getenv("OPENAI_API_KEY"),
134
+ "google": os.getenv("GOOGLE_API_KEY") or os.getenv("GEMINI_API_KEY"),
135
+ "ollama": None, # Ollama doesn't need API key
136
+ }
137
+ api_key = api_key_map.get(actual_provider)
138
+
139
+ kwargs = {
140
+ "provider": actual_provider,
141
+ "model": model_id,
142
+ "enable_model_routing": False, # Use explicit model
143
+ **self._llm_kwargs,
144
+ }
145
+ if api_key:
146
+ kwargs["api_key"] = api_key
147
+
148
+ self._hybrid_llms[actual_provider] = EmpathyLLM(**kwargs)
149
+ logger.info(f"Created hybrid LLM for {actual_provider} with model {model_id}")
150
+ except ImportError as e:
151
+ raise ImportError("empathy_llm_toolkit is required for EmpathyLLMExecutor.") from e
152
+
153
+ return self._hybrid_llms[actual_provider], actual_provider, model_id
154
+
155
+ def _get_llm(self) -> Any:
156
+ """Lazy initialization of EmpathyLLM."""
157
+ if self._llm is None:
158
+ try:
159
+ from attune_llm import EmpathyLLM
160
+
161
+ kwargs = {
162
+ "provider": self._provider,
163
+ "enable_model_routing": True,
164
+ **self._llm_kwargs,
165
+ }
166
+ if self._api_key:
167
+ kwargs["api_key"] = self._api_key
168
+
169
+ self._llm = EmpathyLLM(**kwargs)
170
+ except ImportError as e:
171
+ raise ImportError(
172
+ "empathy_llm_toolkit is required for EmpathyLLMExecutor. "
173
+ "Install it or use MockLLMExecutor for testing.",
174
+ ) from e
175
+ return self._llm
176
+
177
+ async def run(
178
+ self,
179
+ task_type: str,
180
+ prompt: str,
181
+ system: str | None = None,
182
+ context: ExecutionContext | None = None,
183
+ **kwargs: Any,
184
+ ) -> LLMResponse:
185
+ """Execute an LLM call with routing and cost tracking.
186
+
187
+ Args:
188
+ task_type: Type of task for routing (e.g., "summarize", "fix_bug").
189
+ prompt: The user prompt to send.
190
+ system: Optional system prompt (passed as context).
191
+ context: Optional execution context for tracking.
192
+ **kwargs: Additional arguments for EmpathyLLM.interact().
193
+
194
+ Returns:
195
+ LLMResponse with content, tokens, cost, and metadata.
196
+
197
+ """
198
+ start_time = time.time()
199
+ call_id = str(uuid.uuid4())
200
+
201
+ # Use task_type from context if provided
202
+ effective_task_type = task_type
203
+ if context and context.task_type:
204
+ effective_task_type = context.task_type
205
+
206
+ # Determine tier for this task
207
+ tier = get_tier_for_task(effective_task_type)
208
+ tier_str = tier.value if hasattr(tier, "value") else str(tier)
209
+
210
+ # Get appropriate LLM (supports hybrid mode)
211
+ llm, actual_provider, hybrid_model_id = self._get_llm_for_tier(tier_str)
212
+
213
+ # Build context dict
214
+ full_context: dict[str, Any] = kwargs.pop("existing_context", {})
215
+ if system:
216
+ full_context["system_prompt"] = system
217
+ if context:
218
+ if context.workflow_name:
219
+ full_context["workflow_name"] = context.workflow_name
220
+ if context.step_name:
221
+ full_context["step_name"] = context.step_name
222
+ if context.session_id:
223
+ full_context["session_id"] = context.session_id
224
+ if context.metadata:
225
+ full_context.update(context.metadata)
226
+
227
+ # Determine user_id
228
+ user_id = "workflow"
229
+ if context and context.user_id:
230
+ user_id = context.user_id
231
+
232
+ # Use actual provider (resolved for hybrid mode)
233
+ provider = actual_provider
234
+
235
+ # Call EmpathyLLM with task_type routing
236
+ result = await llm.interact(
237
+ user_id=user_id,
238
+ user_input=prompt,
239
+ context=full_context if full_context else None,
240
+ task_type=effective_task_type,
241
+ **kwargs,
242
+ )
243
+
244
+ # Calculate latency
245
+ latency_ms = int((time.time() - start_time) * 1000)
246
+
247
+ # Extract routing metadata
248
+ metadata = result.get("metadata", {})
249
+
250
+ # Get token counts
251
+ tokens_input = metadata.get("tokens_used", 0)
252
+ tokens_output = metadata.get("output_tokens", 0)
253
+
254
+ # Get model info - use hybrid_model_id if set, otherwise look up
255
+ model_info = get_model(provider, tier_str)
256
+ model_id = hybrid_model_id or metadata.get("routed_model", metadata.get("model", ""))
257
+ if not model_id and model_info:
258
+ model_id = model_info.id
259
+
260
+ # Calculate cost
261
+ cost_estimate = 0.0
262
+ if model_info:
263
+ cost_estimate = (tokens_input / 1_000_000) * model_info.input_cost_per_million + (
264
+ tokens_output / 1_000_000
265
+ ) * model_info.output_cost_per_million
266
+
267
+ # Build response
268
+ response = LLMResponse(
269
+ content=result.get("content", ""),
270
+ model_id=model_id,
271
+ provider=provider,
272
+ tier=tier_str,
273
+ tokens_input=tokens_input,
274
+ tokens_output=tokens_output,
275
+ cost_estimate=cost_estimate,
276
+ latency_ms=latency_ms,
277
+ metadata={
278
+ "call_id": call_id,
279
+ "level_used": result.get("level_used"),
280
+ "level_description": result.get("level_description"),
281
+ "proactive": result.get("proactive"),
282
+ "task_type": effective_task_type,
283
+ "model_routing_enabled": metadata.get("model_routing_enabled", False),
284
+ "routed_tier": metadata.get("routed_tier"),
285
+ **metadata,
286
+ },
287
+ )
288
+
289
+ # Record telemetry (silent failure)
290
+ if self._telemetry:
291
+ try:
292
+ record = LLMCallRecord(
293
+ call_id=call_id,
294
+ timestamp=datetime.now().isoformat(),
295
+ workflow_name=context.workflow_name if context else None,
296
+ step_name=context.step_name if context else None,
297
+ user_id=user_id,
298
+ session_id=context.session_id if context else None,
299
+ task_type=effective_task_type,
300
+ provider=provider,
301
+ tier=tier_str,
302
+ model_id=model_id,
303
+ input_tokens=tokens_input,
304
+ output_tokens=tokens_output,
305
+ estimated_cost=cost_estimate,
306
+ latency_ms=latency_ms,
307
+ success=True,
308
+ )
309
+ self._telemetry.log_call(record)
310
+ except Exception as e:
311
+ logger.warning("Failed to record telemetry: %s", e)
312
+
313
+ return response
314
+
315
+ def get_model_for_task(self, task_type: str) -> str:
316
+ """Get the model that would be used for a task type.
317
+
318
+ Args:
319
+ task_type: Type of task to route
320
+
321
+ Returns:
322
+ Model identifier string
323
+
324
+ """
325
+ tier = get_tier_for_task(task_type)
326
+ model_info = get_model(self._provider, tier.value)
327
+ return model_info.id if model_info else ""
328
+
329
+ def estimate_cost(
330
+ self,
331
+ task_type: str,
332
+ input_tokens: int,
333
+ output_tokens: int,
334
+ ) -> float:
335
+ """Estimate cost for a task before execution.
336
+
337
+ Args:
338
+ task_type: Type of task
339
+ input_tokens: Estimated input tokens
340
+ output_tokens: Estimated output tokens
341
+
342
+ Returns:
343
+ Estimated cost in dollars
344
+
345
+ """
346
+ tier = get_tier_for_task(task_type)
347
+ model_info = get_model(self._provider, tier.value)
348
+
349
+ if not model_info:
350
+ return 0.0
351
+
352
+ return (input_tokens / 1_000_000) * model_info.input_cost_per_million + (
353
+ output_tokens / 1_000_000
354
+ ) * model_info.output_cost_per_million
@@ -0,0 +1,257 @@
1
+ """LLM Executor Protocol for Empathy Framework
2
+
3
+ Provides a unified interface for LLM execution that can be used by:
4
+ - src/attune/workflows.BaseWorkflow
5
+ - Custom workflow implementations
6
+ - Testing and mocking
7
+
8
+ This protocol enables:
9
+ - Consistent model routing across workflows
10
+ - Unified cost tracking
11
+ - Easy swapping of LLM implementations
12
+
13
+ Copyright 2025 Smart-AI-Memory
14
+ Licensed under Fair Source License 0.9
15
+ """
16
+
17
+ from dataclasses import dataclass, field
18
+ from typing import Any, Protocol, runtime_checkable
19
+
20
+
21
+ @dataclass
22
+ class LLMResponse:
23
+ """Standardized response from an LLM execution.
24
+
25
+ Contains the response content along with token counts, cost information,
26
+ and metadata about the execution.
27
+
28
+ Attributes:
29
+ content: The LLM response text
30
+ model_id: Model identifier (e.g., "claude-sonnet-4-5-20250514")
31
+ provider: Provider name (e.g., "anthropic", "openai")
32
+ tier: Model tier ("cheap", "capable", "premium")
33
+ tokens_input: Number of input tokens used
34
+ tokens_output: Number of output tokens generated
35
+ cost_estimate: Estimated cost in USD
36
+ latency_ms: Response time in milliseconds
37
+ metadata: Additional response metadata
38
+
39
+ """
40
+
41
+ content: str
42
+ model_id: str
43
+ provider: str
44
+ tier: str
45
+ tokens_input: int = 0
46
+ tokens_output: int = 0
47
+ cost_estimate: float = 0.0
48
+ latency_ms: int = 0
49
+ metadata: dict[str, Any] = field(default_factory=dict)
50
+
51
+ # Backwards compatibility aliases
52
+ @property
53
+ def input_tokens(self) -> int:
54
+ """Alias for tokens_input (backwards compatibility)."""
55
+ return self.tokens_input
56
+
57
+ @property
58
+ def output_tokens(self) -> int:
59
+ """Alias for tokens_output (backwards compatibility)."""
60
+ return self.tokens_output
61
+
62
+ @property
63
+ def model_used(self) -> str:
64
+ """Alias for model_id (backwards compatibility)."""
65
+ return self.model_id
66
+
67
+ @property
68
+ def cost(self) -> float:
69
+ """Alias for cost_estimate (backwards compatibility)."""
70
+ return self.cost_estimate
71
+
72
+ @property
73
+ def total_tokens(self) -> int:
74
+ """Total tokens used (input + output)."""
75
+ return self.tokens_input + self.tokens_output
76
+
77
+ @property
78
+ def success(self) -> bool:
79
+ """Check if the response was successful (has content)."""
80
+ return bool(self.content)
81
+
82
+
83
+ @dataclass
84
+ class ExecutionContext:
85
+ """Context for an LLM execution.
86
+
87
+ Provides additional information that may be used for routing,
88
+ logging, or cost tracking.
89
+
90
+ Attributes:
91
+ user_id: User identifier for tracking
92
+ workflow_name: Name of the workflow (e.g., "security-audit")
93
+ step_name: Name of the current step (e.g., "scan")
94
+ task_type: Task type for routing (e.g., "summarize", "fix_bug")
95
+ provider_hint: Override default provider selection
96
+ tier_hint: Override tier selection (cheap/capable/premium)
97
+ timeout_seconds: Timeout for this execution
98
+ session_id: Session identifier
99
+ metadata: Additional context (can include retry_policy, fallback_policy)
100
+
101
+ """
102
+
103
+ user_id: str | None = None
104
+ workflow_name: str | None = None
105
+ step_name: str | None = None
106
+ task_type: str | None = None
107
+ provider_hint: str | None = None
108
+ tier_hint: str | None = None
109
+ timeout_seconds: int | None = None
110
+ session_id: str | None = None
111
+ metadata: dict[str, Any] = field(default_factory=dict)
112
+
113
+
114
+ @runtime_checkable
115
+ class LLMExecutor(Protocol):
116
+ """Protocol for unified LLM execution across routing and workflows.
117
+
118
+ Implementations of this protocol provide a consistent interface
119
+ for calling LLMs with automatic model routing and cost tracking.
120
+
121
+ Example:
122
+ >>> executor = EmpathyLLMExecutor(provider="anthropic")
123
+ >>> response = await executor.run(
124
+ ... task_type="summarize",
125
+ ... prompt="Summarize this document...",
126
+ ... context=ExecutionContext(workflow_name="doc-gen"),
127
+ ... )
128
+ >>> print(f"Cost: ${response.cost:.4f}")
129
+
130
+ """
131
+
132
+ async def run(
133
+ self,
134
+ task_type: str,
135
+ prompt: str,
136
+ system: str | None = None,
137
+ context: ExecutionContext | None = None,
138
+ **kwargs: Any,
139
+ ) -> LLMResponse:
140
+ """Execute an LLM call with routing and cost tracking.
141
+
142
+ Args:
143
+ task_type: Type of task (e.g., "summarize", "fix_bug", "coordinate")
144
+ Used for model tier routing.
145
+ prompt: The user prompt to send to the LLM.
146
+ system: Optional system prompt.
147
+ context: Optional execution context for tracking.
148
+ **kwargs: Additional provider-specific arguments.
149
+
150
+ Returns:
151
+ LLMResponse with content, tokens, cost, and metadata.
152
+
153
+ """
154
+ ...
155
+
156
+ def get_model_for_task(self, task_type: str) -> str:
157
+ """Get the model that would be used for a task type.
158
+
159
+ Args:
160
+ task_type: Type of task to route
161
+
162
+ Returns:
163
+ Model identifier string
164
+
165
+ """
166
+ ...
167
+
168
+ def estimate_cost(
169
+ self,
170
+ task_type: str,
171
+ input_tokens: int,
172
+ output_tokens: int,
173
+ ) -> float:
174
+ """Estimate cost for a task before execution.
175
+
176
+ Args:
177
+ task_type: Type of task
178
+ input_tokens: Estimated input tokens
179
+ output_tokens: Estimated output tokens
180
+
181
+ Returns:
182
+ Estimated cost in dollars
183
+
184
+ """
185
+ ...
186
+
187
+
188
+ class MockLLMExecutor:
189
+ """Mock executor for testing.
190
+
191
+ Returns configurable responses without making actual LLM calls.
192
+ """
193
+
194
+ def __init__(
195
+ self,
196
+ default_response: str = "Mock response",
197
+ default_model: str = "mock-model",
198
+ ):
199
+ """Initialize mock executor.
200
+
201
+ Args:
202
+ default_response: Default content to return
203
+ default_model: Default model name to report
204
+
205
+ """
206
+ self.default_response = default_response
207
+ self.default_model = default_model
208
+ self.call_history: list[dict[str, Any]] = []
209
+
210
+ async def run(
211
+ self,
212
+ task_type: str,
213
+ prompt: str,
214
+ system: str | None = None,
215
+ context: ExecutionContext | None = None,
216
+ **kwargs: Any,
217
+ ) -> LLMResponse:
218
+ """Mock LLM execution."""
219
+ from .tasks import get_tier_for_task
220
+
221
+ tier = get_tier_for_task(task_type)
222
+
223
+ # Record the call
224
+ self.call_history.append(
225
+ {
226
+ "task_type": task_type,
227
+ "prompt": prompt,
228
+ "system": system,
229
+ "context": context,
230
+ "kwargs": kwargs,
231
+ },
232
+ )
233
+
234
+ return LLMResponse(
235
+ content=self.default_response,
236
+ model_id=self.default_model,
237
+ provider="mock",
238
+ tier=tier.value if hasattr(tier, "value") else str(tier),
239
+ tokens_input=len(prompt.split()) * 4, # Rough estimate
240
+ tokens_output=len(self.default_response.split()) * 4,
241
+ cost_estimate=0.0,
242
+ latency_ms=10,
243
+ metadata={"mock": True, "task_type": task_type},
244
+ )
245
+
246
+ def get_model_for_task(self, task_type: str) -> str:
247
+ """Return mock model."""
248
+ return self.default_model
249
+
250
+ def estimate_cost(
251
+ self,
252
+ task_type: str,
253
+ input_tokens: int,
254
+ output_tokens: int,
255
+ ) -> float:
256
+ """Return zero cost for mock."""
257
+ return 0.0