attune-ai 2.1.4__py3-none-any.whl → 2.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. attune/cli/__init__.py +3 -55
  2. attune/cli/commands/batch.py +4 -12
  3. attune/cli/commands/cache.py +7 -15
  4. attune/cli/commands/provider.py +17 -0
  5. attune/cli/commands/routing.py +3 -1
  6. attune/cli/commands/setup.py +122 -0
  7. attune/cli/commands/tier.py +1 -3
  8. attune/cli/commands/workflow.py +31 -0
  9. attune/cli/parsers/cache.py +1 -0
  10. attune/cli/parsers/help.py +1 -3
  11. attune/cli/parsers/provider.py +7 -0
  12. attune/cli/parsers/routing.py +1 -3
  13. attune/cli/parsers/setup.py +7 -0
  14. attune/cli/parsers/status.py +1 -3
  15. attune/cli/parsers/tier.py +1 -3
  16. attune/cli_minimal.py +34 -28
  17. attune/cli_router.py +9 -7
  18. attune/cli_unified.py +3 -0
  19. attune/core.py +190 -0
  20. attune/dashboard/app.py +4 -2
  21. attune/dashboard/simple_server.py +3 -1
  22. attune/dashboard/standalone_server.py +7 -3
  23. attune/mcp/server.py +54 -102
  24. attune/memory/long_term.py +0 -2
  25. attune/memory/short_term/__init__.py +84 -0
  26. attune/memory/short_term/base.py +467 -0
  27. attune/memory/short_term/batch.py +219 -0
  28. attune/memory/short_term/caching.py +227 -0
  29. attune/memory/short_term/conflicts.py +265 -0
  30. attune/memory/short_term/cross_session.py +122 -0
  31. attune/memory/short_term/facade.py +655 -0
  32. attune/memory/short_term/pagination.py +215 -0
  33. attune/memory/short_term/patterns.py +271 -0
  34. attune/memory/short_term/pubsub.py +286 -0
  35. attune/memory/short_term/queues.py +244 -0
  36. attune/memory/short_term/security.py +300 -0
  37. attune/memory/short_term/sessions.py +250 -0
  38. attune/memory/short_term/streams.py +249 -0
  39. attune/memory/short_term/timelines.py +234 -0
  40. attune/memory/short_term/transactions.py +186 -0
  41. attune/memory/short_term/working.py +252 -0
  42. attune/meta_workflows/cli_commands/__init__.py +3 -0
  43. attune/meta_workflows/cli_commands/agent_commands.py +0 -4
  44. attune/meta_workflows/cli_commands/analytics_commands.py +0 -6
  45. attune/meta_workflows/cli_commands/config_commands.py +0 -5
  46. attune/meta_workflows/cli_commands/memory_commands.py +0 -5
  47. attune/meta_workflows/cli_commands/template_commands.py +0 -5
  48. attune/meta_workflows/cli_commands/workflow_commands.py +0 -6
  49. attune/meta_workflows/workflow.py +1 -1
  50. attune/models/adaptive_routing.py +4 -8
  51. attune/models/auth_cli.py +3 -9
  52. attune/models/auth_strategy.py +2 -4
  53. attune/models/provider_config.py +20 -1
  54. attune/models/telemetry/analytics.py +0 -2
  55. attune/models/telemetry/backend.py +0 -3
  56. attune/models/telemetry/storage.py +0 -2
  57. attune/orchestration/_strategies/__init__.py +156 -0
  58. attune/orchestration/_strategies/base.py +231 -0
  59. attune/orchestration/_strategies/conditional_strategies.py +373 -0
  60. attune/orchestration/_strategies/conditions.py +369 -0
  61. attune/orchestration/_strategies/core_strategies.py +491 -0
  62. attune/orchestration/_strategies/data_classes.py +64 -0
  63. attune/orchestration/_strategies/nesting.py +233 -0
  64. attune/orchestration/execution_strategies.py +58 -1567
  65. attune/orchestration/meta_orchestrator.py +1 -3
  66. attune/project_index/scanner.py +1 -3
  67. attune/project_index/scanner_parallel.py +7 -5
  68. attune/socratic_router.py +1 -3
  69. attune/telemetry/agent_coordination.py +9 -3
  70. attune/telemetry/agent_tracking.py +16 -3
  71. attune/telemetry/approval_gates.py +22 -5
  72. attune/telemetry/cli.py +3 -3
  73. attune/telemetry/commands/dashboard_commands.py +24 -8
  74. attune/telemetry/event_streaming.py +8 -2
  75. attune/telemetry/feedback_loop.py +10 -2
  76. attune/tools.py +1 -0
  77. attune/workflow_commands.py +1 -3
  78. attune/workflows/__init__.py +53 -10
  79. attune/workflows/autonomous_test_gen.py +160 -104
  80. attune/workflows/base.py +48 -664
  81. attune/workflows/batch_processing.py +2 -4
  82. attune/workflows/compat.py +156 -0
  83. attune/workflows/cost_mixin.py +141 -0
  84. attune/workflows/data_classes.py +92 -0
  85. attune/workflows/document_gen/workflow.py +11 -14
  86. attune/workflows/history.py +62 -37
  87. attune/workflows/llm_base.py +2 -4
  88. attune/workflows/migration.py +422 -0
  89. attune/workflows/output.py +3 -9
  90. attune/workflows/parsing_mixin.py +427 -0
  91. attune/workflows/perf_audit.py +3 -1
  92. attune/workflows/progress.py +10 -13
  93. attune/workflows/release_prep.py +5 -1
  94. attune/workflows/routing.py +0 -2
  95. attune/workflows/secure_release.py +2 -1
  96. attune/workflows/security_audit.py +19 -14
  97. attune/workflows/security_audit_phase3.py +28 -22
  98. attune/workflows/seo_optimization.py +29 -29
  99. attune/workflows/test_gen/test_templates.py +1 -4
  100. attune/workflows/test_gen/workflow.py +0 -2
  101. attune/workflows/test_gen_behavioral.py +7 -20
  102. attune/workflows/test_gen_parallel.py +6 -4
  103. {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/METADATA +4 -3
  104. {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/RECORD +119 -94
  105. {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/entry_points.txt +0 -2
  106. attune_healthcare/monitors/monitoring/__init__.py +9 -9
  107. attune_llm/agent_factory/__init__.py +6 -6
  108. attune_llm/commands/__init__.py +10 -10
  109. attune_llm/commands/models.py +3 -3
  110. attune_llm/config/__init__.py +8 -8
  111. attune_llm/learning/__init__.py +3 -3
  112. attune_llm/learning/extractor.py +5 -3
  113. attune_llm/learning/storage.py +5 -3
  114. attune_llm/security/__init__.py +17 -17
  115. attune_llm/utils/tokens.py +3 -1
  116. attune/cli_legacy.py +0 -3957
  117. attune/memory/short_term.py +0 -2192
  118. attune/workflows/manage_docs.py +0 -87
  119. attune/workflows/test5.py +0 -125
  120. {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/WHEEL +0 -0
  121. {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/licenses/LICENSE +0 -0
  122. {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +0 -0
  123. {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/top_level.txt +0 -0
@@ -700,9 +700,7 @@ class MetaOrchestrator:
700
700
  except ImportError:
701
701
  logger.warning("AskUserQuestion not available, using defaults")
702
702
  suggested_pattern = self._choose_composition_pattern(requirements, suggested_agents)
703
- return self.create_execution_plan(
704
- requirements, suggested_agents, suggested_pattern
705
- )
703
+ return self.create_execution_plan(requirements, suggested_agents, suggested_pattern)
706
704
 
707
705
  # Present all patterns with descriptions
708
706
  pattern_response = AskUserQuestion(
@@ -483,9 +483,7 @@ class ProjectScanner:
483
483
  import re
484
484
 
485
485
  test_func_pattern = re.compile(r"^\s*def\s+test_\w+\(")
486
- metrics["test_count"] = sum(
487
- 1 for line in lines if test_func_pattern.match(line)
488
- )
486
+ metrics["test_count"] = sum(1 for line in lines if test_func_pattern.match(line))
489
487
  # Mark as having test functions (for test file records)
490
488
  if metrics["test_count"] > 0:
491
489
  metrics["lines_of_test"] = metrics["lines_of_code"]
@@ -253,9 +253,11 @@ def compare_sequential_vs_parallel(project_root: str = ".", workers: int = 4) ->
253
253
  "sequential_time": sequential_time,
254
254
  "parallel_time": parallel_time,
255
255
  "speedup": speedup,
256
- "improvement_pct": ((sequential_time - parallel_time) / sequential_time * 100)
257
- if sequential_time > 0
258
- else 0,
256
+ "improvement_pct": (
257
+ ((sequential_time - parallel_time) / sequential_time * 100)
258
+ if sequential_time > 0
259
+ else 0
260
+ ),
259
261
  "files_scanned": summary_seq.total_files,
260
262
  "workers": workers,
261
263
  }
@@ -281,9 +283,9 @@ if __name__ == "__main__":
281
283
  print(f"\nSpeedup: {results['speedup']:.2f}x")
282
284
  print(f"Improvement: {results['improvement_pct']:.1f}%")
283
285
 
284
- if results['speedup'] >= 2.0:
286
+ if results["speedup"] >= 2.0:
285
287
  print("\n✅ Parallel processing is highly effective!")
286
- elif results['speedup'] >= 1.5:
288
+ elif results["speedup"] >= 1.5:
287
289
  print("\n✅ Parallel processing provides moderate benefit")
288
290
  else:
289
291
  print("\n⚠️ Parallel processing may not be worth the overhead")
attune/socratic_router.py CHANGED
@@ -720,9 +720,7 @@ class AttuneRouter:
720
720
  + (f", args='{args}'" if args else ""),
721
721
  }
722
722
 
723
- def continue_session(
724
- self, session_id: str, answers: dict[str, Any]
725
- ) -> dict[str, Any]:
723
+ def continue_session(self, session_id: str, answers: dict[str, Any]) -> dict[str, Any]:
726
724
  """Continue a deep discovery session.
727
725
 
728
726
  Args:
@@ -71,7 +71,11 @@ class CoordinationSignal:
71
71
  "source_agent": self.source_agent,
72
72
  "target_agent": self.target_agent,
73
73
  "payload": self.payload,
74
- "timestamp": self.timestamp.isoformat() if isinstance(self.timestamp, datetime) else self.timestamp,
74
+ "timestamp": (
75
+ self.timestamp.isoformat()
76
+ if isinstance(self.timestamp, datetime)
77
+ else self.timestamp
78
+ ),
75
79
  "ttl_seconds": self.ttl_seconds,
76
80
  }
77
81
 
@@ -303,7 +307,9 @@ class CoordinationSignals:
303
307
 
304
308
  while time.time() - start_time < timeout:
305
309
  # Check for signal
306
- signal = self.check_signal(signal_type=signal_type, source_agent=source_agent, consume=True)
310
+ signal = self.check_signal(
311
+ signal_type=signal_type, source_agent=source_agent, consume=True
312
+ )
307
313
 
308
314
  if signal:
309
315
  return signal
@@ -335,7 +341,7 @@ class CoordinationSignals:
335
341
  # Check broadcast signals: empathy:signal:*:{type}:*
336
342
  patterns = [
337
343
  f"{self.KEY_PREFIX}{self.agent_id}:{signal_type}:*",
338
- f"{self.KEY_PREFIX}{self.BROADCAST_TARGET}:{signal_type}:*"
344
+ f"{self.KEY_PREFIX}{self.BROADCAST_TARGET}:{signal_type}:*",
339
345
  ]
340
346
 
341
347
  for pattern in patterns:
@@ -62,7 +62,11 @@ class AgentHeartbeat:
62
62
  "status": self.status,
63
63
  "progress": self.progress,
64
64
  "current_task": self.current_task,
65
- "last_beat": self.last_beat.isoformat() if isinstance(self.last_beat, datetime) else self.last_beat,
65
+ "last_beat": (
66
+ self.last_beat.isoformat()
67
+ if isinstance(self.last_beat, datetime)
68
+ else self.last_beat
69
+ ),
66
70
  "metadata": self.metadata,
67
71
  "display_name": self.display_name,
68
72
  }
@@ -210,7 +214,12 @@ class HeartbeatCoordinator:
210
214
  self.agent_id = None
211
215
 
212
216
  def _publish_heartbeat(
213
- self, status: str, progress: float, current_task: str, metadata: dict[str, Any], display_name: str | None = None
217
+ self,
218
+ status: str,
219
+ progress: float,
220
+ current_task: str,
221
+ metadata: dict[str, Any],
222
+ display_name: str | None = None,
214
223
  ) -> None:
215
224
  """Publish heartbeat to Redis with TTL and optionally to event stream."""
216
225
  if not self.memory or not self.agent_id:
@@ -361,7 +370,11 @@ class HeartbeatCoordinator:
361
370
 
362
371
  for agent in active:
363
372
  time_since_beat = (now - agent.last_beat).total_seconds()
364
- if time_since_beat > threshold_seconds and agent.status not in ("completed", "failed", "cancelled"):
373
+ if time_since_beat > threshold_seconds and agent.status not in (
374
+ "completed",
375
+ "failed",
376
+ "cancelled",
377
+ ):
365
378
  stale.append(agent)
366
379
 
367
380
  return stale
@@ -71,7 +71,11 @@ class ApprovalRequest:
71
71
  "approval_type": self.approval_type,
72
72
  "agent_id": self.agent_id,
73
73
  "context": self.context,
74
- "timestamp": self.timestamp.isoformat() if isinstance(self.timestamp, datetime) else self.timestamp,
74
+ "timestamp": (
75
+ self.timestamp.isoformat()
76
+ if isinstance(self.timestamp, datetime)
77
+ else self.timestamp
78
+ ),
75
79
  "timeout_seconds": self.timeout_seconds,
76
80
  "status": self.status,
77
81
  }
@@ -116,7 +120,11 @@ class ApprovalResponse:
116
120
  "approved": self.approved,
117
121
  "responder": self.responder,
118
122
  "reason": self.reason,
119
- "timestamp": self.timestamp.isoformat() if isinstance(self.timestamp, datetime) else self.timestamp,
123
+ "timestamp": (
124
+ self.timestamp.isoformat()
125
+ if isinstance(self.timestamp, datetime)
126
+ else self.timestamp
127
+ ),
120
128
  }
121
129
 
122
130
  @classmethod
@@ -240,13 +248,18 @@ class ApprovalGate:
240
248
  if hasattr(self.memory, "_client") and self.memory._client:
241
249
  import json
242
250
 
243
- self.memory._client.setex(request_key, int(timeout) + 60, json.dumps(request.to_dict()))
251
+ self.memory._client.setex(
252
+ request_key, int(timeout) + 60, json.dumps(request.to_dict())
253
+ )
244
254
  else:
245
255
  logger.warning("Cannot store approval request: no Redis backend available")
246
256
  except Exception as e:
247
257
  logger.error(f"Failed to store approval request: {e}")
248
258
  return ApprovalResponse(
249
- request_id=request_id, approved=False, responder="system", reason=f"Storage error: {e}"
259
+ request_id=request_id,
260
+ approved=False,
261
+ responder="system",
262
+ reason=f"Storage error: {e}",
250
263
  )
251
264
 
252
265
  # Send approval_request signal (for notifications)
@@ -363,7 +376,11 @@ class ApprovalGate:
363
376
  return False
364
377
 
365
378
  response = ApprovalResponse(
366
- request_id=request_id, approved=approved, responder=responder, reason=reason, timestamp=datetime.utcnow()
379
+ request_id=request_id,
380
+ approved=approved,
381
+ responder=responder,
382
+ reason=reason,
383
+ timestamp=datetime.utcnow(),
367
384
  )
368
385
 
369
386
  # Store approval response (for workflow to retrieve)
attune/telemetry/cli.py CHANGED
@@ -25,6 +25,8 @@ except ImportError:
25
25
 
26
26
  from attune.config import _validate_file_path
27
27
 
28
+ # Import dashboard commands for backward compatibility (re-exported)
29
+ from .commands import cmd_file_test_dashboard, cmd_telemetry_dashboard # noqa: F401
28
30
  from .usage_tracker import UsageTracker
29
31
 
30
32
  # _validate_file_path is now imported from attune.config
@@ -597,7 +599,6 @@ def cmd_telemetry_export(args: Any) -> int:
597
599
  # ==============================================================================
598
600
 
599
601
 
600
-
601
602
  # ==============================================================================
602
603
  # Dashboard Commands (Extracted to Separate Module)
603
604
  # ==============================================================================
@@ -606,6 +607,7 @@ def cmd_telemetry_export(args: Any) -> int:
606
607
  # Imported at top of file for backward compatibility.
607
608
  # ==============================================================================
608
609
 
610
+
609
611
  def cmd_tier1_status(args: Any) -> int:
610
612
  """Show comprehensive Tier 1 automation status.
611
613
 
@@ -1227,5 +1229,3 @@ def cmd_file_test_status(args: Any) -> int:
1227
1229
  print(f" - {test.get('name', 'unknown')}: {test.get('error', '')[:40]}")
1228
1230
 
1229
1231
  return 0
1230
-
1231
-
@@ -359,7 +359,8 @@ def cmd_file_test_dashboard(args: Any) -> int:
359
359
  </tr>
360
360
  """
361
361
 
362
- return """<!DOCTYPE html>
362
+ return (
363
+ """<!DOCTYPE html>
363
364
  <html lang="en">
364
365
  <head>
365
366
  <meta charset="UTF-8">
@@ -515,23 +516,33 @@ def cmd_file_test_dashboard(args: Any) -> int:
515
516
 
516
517
  <div class="stats">
517
518
  <div class="stat-card total">
518
- <div class="stat-value">""" + str(total) + """</div>
519
+ <div class="stat-value">"""
520
+ + str(total)
521
+ + """</div>
519
522
  <div class="stat-label">Total Files</div>
520
523
  </div>
521
524
  <div class="stat-card passed">
522
- <div class="stat-value">""" + str(passed) + """</div>
525
+ <div class="stat-value">"""
526
+ + str(passed)
527
+ + """</div>
523
528
  <div class="stat-label">Passed</div>
524
529
  </div>
525
530
  <div class="stat-card failed">
526
- <div class="stat-value">""" + str(failed) + """</div>
531
+ <div class="stat-value">"""
532
+ + str(failed)
533
+ + """</div>
527
534
  <div class="stat-label">Failed</div>
528
535
  </div>
529
536
  <div class="stat-card no-tests">
530
- <div class="stat-value">""" + str(no_tests) + """</div>
537
+ <div class="stat-value">"""
538
+ + str(no_tests)
539
+ + """</div>
531
540
  <div class="stat-label">No Tests</div>
532
541
  </div>
533
542
  <div class="stat-card stale">
534
- <div class="stat-value">""" + str(stale) + """</div>
543
+ <div class="stat-value">"""
544
+ + str(stale)
545
+ + """</div>
535
546
  <div class="stat-label">Stale</div>
536
547
  </div>
537
548
  </div>
@@ -558,12 +569,16 @@ def cmd_file_test_dashboard(args: Any) -> int:
558
569
  </tr>
559
570
  </thead>
560
571
  <tbody>
561
- """ + rows_html + """
572
+ """
573
+ + rows_html
574
+ + """
562
575
  </tbody>
563
576
  </table>
564
577
 
565
578
  <div class="last-updated">
566
- Last updated: """ + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + """
579
+ Last updated: """
580
+ + datetime.now().strftime("%Y-%m-%d %H:%M:%S")
581
+ + """
567
582
  </div>
568
583
  </div>
569
584
 
@@ -618,6 +633,7 @@ def cmd_file_test_dashboard(args: Any) -> int:
618
633
  </script>
619
634
  </body>
620
635
  </html>"""
636
+ )
621
637
 
622
638
  def _generate_empty_dashboard() -> str:
623
639
  """Generate dashboard HTML when no data available."""
@@ -55,7 +55,11 @@ class StreamEvent:
55
55
  return {
56
56
  "event_id": self.event_id,
57
57
  "event_type": self.event_type,
58
- "timestamp": self.timestamp.isoformat() if isinstance(self.timestamp, datetime) else self.timestamp,
58
+ "timestamp": (
59
+ self.timestamp.isoformat()
60
+ if isinstance(self.timestamp, datetime)
61
+ else self.timestamp
62
+ ),
59
63
  "data": self.data,
60
64
  "source": self.source,
61
65
  }
@@ -235,7 +239,9 @@ class EventStreamer:
235
239
  else:
236
240
  # Subscribe to all event streams (expensive - requires KEYS scan)
237
241
  all_streams = self.memory._client.keys(f"{self.STREAM_PREFIX}*")
238
- streams = {s.decode("utf-8") if isinstance(s, bytes) else s: start_id for s in all_streams}
242
+ streams = {
243
+ s.decode("utf-8") if isinstance(s, bytes) else s: start_id for s in all_streams
244
+ }
239
245
 
240
246
  if not streams:
241
247
  logger.debug("No streams to consume")
@@ -80,7 +80,11 @@ class FeedbackEntry:
80
80
  "stage_name": self.stage_name,
81
81
  "tier": self.tier,
82
82
  "quality_score": self.quality_score,
83
- "timestamp": self.timestamp.isoformat() if isinstance(self.timestamp, datetime) else self.timestamp,
83
+ "timestamp": (
84
+ self.timestamp.isoformat()
85
+ if isinstance(self.timestamp, datetime)
86
+ else self.timestamp
87
+ ),
84
88
  "metadata": self.metadata,
85
89
  }
86
90
 
@@ -252,7 +256,11 @@ class FeedbackLoop:
252
256
  return feedback_id
253
257
 
254
258
  def get_feedback_history(
255
- self, workflow_name: str, stage_name: str, tier: str | ModelTier | None = None, limit: int = 100
259
+ self,
260
+ workflow_name: str,
261
+ stage_name: str,
262
+ tier: str | ModelTier | None = None,
263
+ limit: int = 100,
256
264
  ) -> list[FeedbackEntry]:
257
265
  """Get feedback history for a workflow stage.
258
266
 
attune/tools.py CHANGED
@@ -11,6 +11,7 @@ Integration with Claude Code:
11
11
 
12
12
  Created: 2026-01-29
13
13
  """
14
+
14
15
  import json
15
16
  import logging
16
17
  import os
@@ -427,9 +427,7 @@ def ship_workflow(
427
427
  print("4. Checking git status...")
428
428
  success, output = _run_command(["git", "status", "--porcelain"])
429
429
  if success:
430
- staged = sum(
431
- 1 for line in output.split("\n") if line.startswith(("A ", "M ", "D ", "R "))
432
- )
430
+ staged = sum(1 for line in output.split("\n") if line.startswith(("A ", "M ", "D ", "R ")))
433
431
  unstaged = sum(1 for line in output.split("\n") if line.startswith((" M", " D", "??")))
434
432
  if staged > 0:
435
433
  print(f" INFO - {staged} staged, {unstaged} unstaged")
@@ -64,7 +64,6 @@ if TYPE_CHECKING:
64
64
  from .security_audit import SecurityAuditWorkflow
65
65
  from .seo_optimization import SEOOptimizationWorkflow
66
66
  from .step_config import WorkflowStepConfig
67
- from .test5 import Test5Workflow
68
67
  from .test_coverage_boost_crew import TestCoverageBoostCrew, TestCoverageBoostCrewResult
69
68
  from .test_gen import TestGenerationWorkflow
70
69
  from .test_gen_behavioral import BehavioralTestGenerationWorkflow
@@ -89,6 +88,10 @@ from .builder import WorkflowBuilder, workflow_builder
89
88
  # Config is small and frequently needed
90
89
  from .config import DEFAULT_MODELS, ModelConfig, WorkflowConfig, create_example_config, get_model
91
90
 
91
+ # Mixins for workflow composition (extracted for maintainability)
92
+ from .cost_mixin import CostTrackingMixin
93
+ from .parsing_mixin import ResponseParsingMixin
94
+
92
95
  # Routing strategies (small, frequently needed for builder pattern)
93
96
  from .routing import (
94
97
  BalancedRouting,
@@ -136,12 +139,19 @@ _LAZY_WORKFLOW_IMPORTS: dict[str, tuple[str, str]] = {
136
139
  "SecureReleaseResult": (".secure_release", "SecureReleaseResult"),
137
140
  "SecurityAuditWorkflow": (".security_audit", "SecurityAuditWorkflow"),
138
141
  "SEOOptimizationWorkflow": (".seo_optimization", "SEOOptimizationWorkflow"),
139
- "Test5Workflow": (".test5", "Test5Workflow"),
140
142
  "TestCoverageBoostCrew": (".test_coverage_boost_crew", "TestCoverageBoostCrew"),
141
143
  "TestCoverageBoostCrewResult": (".test_coverage_boost_crew", "TestCoverageBoostCrewResult"),
142
144
  "TestGenerationWorkflow": (".test_gen", "TestGenerationWorkflow"),
143
- "BehavioralTestGenerationWorkflow": (".test_gen_behavioral", "BehavioralTestGenerationWorkflow"),
145
+ "BehavioralTestGenerationWorkflow": (
146
+ ".test_gen_behavioral",
147
+ "BehavioralTestGenerationWorkflow",
148
+ ),
144
149
  "ParallelTestGenerationWorkflow": (".test_gen_parallel", "ParallelTestGenerationWorkflow"),
150
+ # Additional workflows (restored to registry)
151
+ "TestMaintenanceWorkflow": (".test_maintenance", "TestMaintenanceWorkflow"),
152
+ "BatchProcessingWorkflow": (".batch_processing", "BatchProcessingWorkflow"),
153
+ "ProgressiveTestGenWorkflow": (".progressive.test_gen", "ProgressiveTestGenWorkflow"),
154
+ "AutonomousTestGenerator": (".autonomous_test_gen", "AutonomousTestGenerator"),
145
155
  "XMLAgent": (".xml_enhanced_crew", "XMLAgent"),
146
156
  "XMLTask": (".xml_enhanced_crew", "XMLTask"),
147
157
  "parse_xml_response": (".xml_enhanced_crew", "parse_xml_response"),
@@ -226,7 +236,7 @@ _DEFAULT_WORKFLOW_NAMES: dict[str, str] = {
226
236
  "refactor-plan": "RefactorPlanWorkflow",
227
237
  # Operational workflows
228
238
  "dependency-check": "DependencyCheckWorkflow",
229
- "release-prep-legacy": "ReleasePreparationWorkflow",
239
+ # release-prep-legacy removed - handled by migration system (use release-prep instead)
230
240
  # Composite security pipeline (v3.0)
231
241
  "secure-release": "SecureReleasePipeline",
232
242
  # Code review crew integration (v3.1)
@@ -234,19 +244,26 @@ _DEFAULT_WORKFLOW_NAMES: dict[str, str] = {
234
244
  "pr-review": "PRReviewWorkflow",
235
245
  # Documentation management (v3.5)
236
246
  "doc-orchestrator": "DocumentationOrchestrator",
237
- "manage-docs": "DocumentationOrchestrator", # Points to orchestrator (crew deprecated)
247
+ # manage-docs removed - handled by migration system (was deprecated)
238
248
  # Keyboard Conductor (v3.6)
239
249
  "keyboard-shortcuts": "KeyboardShortcutWorkflow",
240
- # User-generated workflows
250
+ # User-generated workflows (document-manager kept for backward compat, handled by migration)
241
251
  "document-manager": "DocumentManagerWorkflow",
242
- "test5": "Test5Workflow",
243
252
  # Meta-orchestration workflows (v4.0.0 - CANONICAL)
244
253
  "orchestrated-health-check": "OrchestratedHealthCheckWorkflow",
245
254
  "orchestrated-release-prep": "OrchestratedReleasePrepWorkflow",
246
255
  # Backward compatibility aliases (point to orchestrated versions)
247
256
  "release-prep": "OrchestratedReleasePrepWorkflow",
248
- "orchestrated-health-check-experimental": "OrchestratedHealthCheckWorkflow",
249
- "orchestrated-release-prep-experimental": "OrchestratedReleasePrepWorkflow",
257
+ # Experimental aliases removed (use production versions instead)
258
+ # Research and synthesis workflows
259
+ "research-synthesis": "ResearchSynthesisWorkflow",
260
+ # Test management workflows
261
+ "test-coverage-boost": "TestCoverageBoostCrew",
262
+ "test-maintenance": "TestMaintenanceWorkflow",
263
+ "autonomous-test-gen": "AutonomousTestGenerator",
264
+ # Batch and progressive workflows
265
+ "batch-processing": "BatchProcessingWorkflow",
266
+ "progressive-test-gen": "ProgressiveTestGenWorkflow",
250
267
  }
251
268
 
252
269
  # Opt-in workflows - class names for lazy loading
@@ -440,6 +457,18 @@ def __getattr__(name: str) -> object:
440
457
  _load_cli_commands()
441
458
  return globals().get(name)
442
459
 
460
+ # Handle migration module exports
461
+ _MIGRATION_EXPORTS = {
462
+ "resolve_workflow_migration",
463
+ "MigrationConfig",
464
+ "WORKFLOW_ALIASES",
465
+ "show_migration_tip",
466
+ "list_migrations",
467
+ }
468
+ if name in _MIGRATION_EXPORTS:
469
+ from attune.workflows import migration
470
+ return getattr(migration, name)
471
+
443
472
  raise AttributeError(f"module 'attune.workflows' has no attribute '{name}'")
444
473
 
445
474
 
@@ -448,8 +477,10 @@ __all__ = [
448
477
  "PROVIDER_MODELS",
449
478
  # Registry and discovery
450
479
  "WORKFLOW_REGISTRY",
451
- # Base classes
480
+ # Base classes and mixins
452
481
  "BaseWorkflow",
482
+ "CostTrackingMixin",
483
+ "ResponseParsingMixin",
453
484
  # Routing strategies
454
485
  "TierRoutingStrategy",
455
486
  "RoutingContext",
@@ -532,8 +563,20 @@ __all__ = [
532
563
  "OrchestratedReleasePrepWorkflow",
533
564
  "HealthCheckReport",
534
565
  "ReleaseReadinessReport",
566
+ # Additional workflows (added for completeness)
567
+ # Test5Workflow removed - test artifact
568
+ "TestMaintenanceWorkflow",
569
+ "BatchProcessingWorkflow",
570
+ "ProgressiveTestGenWorkflow",
571
+ "AutonomousTestGenerator",
535
572
  # XML-enhanced prompting
536
573
  "XMLAgent",
537
574
  "XMLTask",
538
575
  "parse_xml_response",
576
+ # Workflow migration system
577
+ "resolve_workflow_migration",
578
+ "MigrationConfig",
579
+ "WORKFLOW_ALIASES",
580
+ "show_migration_tip",
581
+ "list_migrations",
539
582
  ]