attune-ai 2.1.5__py3-none-any.whl → 2.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (120) hide show
  1. attune/cli/__init__.py +3 -59
  2. attune/cli/commands/batch.py +4 -12
  3. attune/cli/commands/cache.py +7 -15
  4. attune/cli/commands/provider.py +17 -0
  5. attune/cli/commands/routing.py +3 -1
  6. attune/cli/commands/setup.py +122 -0
  7. attune/cli/commands/tier.py +1 -3
  8. attune/cli/commands/workflow.py +31 -0
  9. attune/cli/parsers/cache.py +1 -0
  10. attune/cli/parsers/help.py +1 -3
  11. attune/cli/parsers/provider.py +7 -0
  12. attune/cli/parsers/routing.py +1 -3
  13. attune/cli/parsers/setup.py +7 -0
  14. attune/cli/parsers/status.py +1 -3
  15. attune/cli/parsers/tier.py +1 -3
  16. attune/cli_minimal.py +9 -3
  17. attune/cli_router.py +9 -7
  18. attune/cli_unified.py +3 -0
  19. attune/dashboard/app.py +3 -1
  20. attune/dashboard/simple_server.py +3 -1
  21. attune/dashboard/standalone_server.py +7 -3
  22. attune/mcp/server.py +54 -102
  23. attune/memory/long_term.py +0 -2
  24. attune/memory/short_term/__init__.py +84 -0
  25. attune/memory/short_term/base.py +467 -0
  26. attune/memory/short_term/batch.py +219 -0
  27. attune/memory/short_term/caching.py +227 -0
  28. attune/memory/short_term/conflicts.py +265 -0
  29. attune/memory/short_term/cross_session.py +122 -0
  30. attune/memory/short_term/facade.py +655 -0
  31. attune/memory/short_term/pagination.py +215 -0
  32. attune/memory/short_term/patterns.py +271 -0
  33. attune/memory/short_term/pubsub.py +286 -0
  34. attune/memory/short_term/queues.py +244 -0
  35. attune/memory/short_term/security.py +300 -0
  36. attune/memory/short_term/sessions.py +250 -0
  37. attune/memory/short_term/streams.py +249 -0
  38. attune/memory/short_term/timelines.py +234 -0
  39. attune/memory/short_term/transactions.py +186 -0
  40. attune/memory/short_term/working.py +252 -0
  41. attune/meta_workflows/cli_commands/__init__.py +3 -0
  42. attune/meta_workflows/cli_commands/agent_commands.py +0 -4
  43. attune/meta_workflows/cli_commands/analytics_commands.py +0 -6
  44. attune/meta_workflows/cli_commands/config_commands.py +0 -5
  45. attune/meta_workflows/cli_commands/memory_commands.py +0 -5
  46. attune/meta_workflows/cli_commands/template_commands.py +0 -5
  47. attune/meta_workflows/cli_commands/workflow_commands.py +0 -6
  48. attune/models/adaptive_routing.py +4 -8
  49. attune/models/auth_cli.py +3 -9
  50. attune/models/auth_strategy.py +2 -4
  51. attune/models/telemetry/analytics.py +0 -2
  52. attune/models/telemetry/backend.py +0 -3
  53. attune/models/telemetry/storage.py +0 -2
  54. attune/orchestration/_strategies/__init__.py +156 -0
  55. attune/orchestration/_strategies/base.py +231 -0
  56. attune/orchestration/_strategies/conditional_strategies.py +373 -0
  57. attune/orchestration/_strategies/conditions.py +369 -0
  58. attune/orchestration/_strategies/core_strategies.py +491 -0
  59. attune/orchestration/_strategies/data_classes.py +64 -0
  60. attune/orchestration/_strategies/nesting.py +233 -0
  61. attune/orchestration/execution_strategies.py +58 -1567
  62. attune/orchestration/meta_orchestrator.py +1 -3
  63. attune/project_index/scanner.py +1 -3
  64. attune/project_index/scanner_parallel.py +7 -5
  65. attune/socratic_router.py +1 -3
  66. attune/telemetry/agent_coordination.py +9 -3
  67. attune/telemetry/agent_tracking.py +16 -3
  68. attune/telemetry/approval_gates.py +22 -5
  69. attune/telemetry/cli.py +1 -3
  70. attune/telemetry/commands/dashboard_commands.py +24 -8
  71. attune/telemetry/event_streaming.py +8 -2
  72. attune/telemetry/feedback_loop.py +10 -2
  73. attune/tools.py +1 -0
  74. attune/workflow_commands.py +1 -3
  75. attune/workflows/__init__.py +53 -10
  76. attune/workflows/autonomous_test_gen.py +158 -102
  77. attune/workflows/base.py +48 -672
  78. attune/workflows/batch_processing.py +1 -3
  79. attune/workflows/compat.py +156 -0
  80. attune/workflows/cost_mixin.py +141 -0
  81. attune/workflows/data_classes.py +92 -0
  82. attune/workflows/document_gen/workflow.py +11 -14
  83. attune/workflows/history.py +62 -37
  84. attune/workflows/llm_base.py +1 -3
  85. attune/workflows/migration.py +422 -0
  86. attune/workflows/output.py +2 -7
  87. attune/workflows/parsing_mixin.py +427 -0
  88. attune/workflows/perf_audit.py +3 -1
  89. attune/workflows/progress.py +9 -11
  90. attune/workflows/release_prep.py +5 -1
  91. attune/workflows/routing.py +0 -2
  92. attune/workflows/secure_release.py +2 -1
  93. attune/workflows/security_audit.py +19 -14
  94. attune/workflows/security_audit_phase3.py +28 -22
  95. attune/workflows/seo_optimization.py +27 -27
  96. attune/workflows/test_gen/test_templates.py +1 -4
  97. attune/workflows/test_gen/workflow.py +0 -2
  98. attune/workflows/test_gen_behavioral.py +6 -19
  99. attune/workflows/test_gen_parallel.py +6 -4
  100. {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/METADATA +4 -3
  101. {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/RECORD +116 -91
  102. {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/entry_points.txt +0 -2
  103. attune_healthcare/monitors/monitoring/__init__.py +9 -9
  104. attune_llm/agent_factory/__init__.py +6 -6
  105. attune_llm/commands/__init__.py +10 -10
  106. attune_llm/commands/models.py +3 -3
  107. attune_llm/config/__init__.py +8 -8
  108. attune_llm/learning/__init__.py +3 -3
  109. attune_llm/learning/extractor.py +5 -3
  110. attune_llm/learning/storage.py +5 -3
  111. attune_llm/security/__init__.py +17 -17
  112. attune_llm/utils/tokens.py +3 -1
  113. attune/cli_legacy.py +0 -3978
  114. attune/memory/short_term.py +0 -2192
  115. attune/workflows/manage_docs.py +0 -87
  116. attune/workflows/test5.py +0 -125
  117. {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/WHEEL +0 -0
  118. {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/licenses/LICENSE +0 -0
  119. {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +0 -0
  120. {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,427 @@
1
+ """Response parsing mixin for workflow classes.
2
+
3
+ This module provides methods for parsing and extracting structured data
4
+ from LLM responses, including XML parsing, regex-based extraction, and
5
+ finding inference.
6
+
7
+ Extracted from base.py for improved maintainability and import performance.
8
+
9
+ Copyright 2025 Smart-AI-Memory
10
+ Licensed under Fair Source License 0.9
11
+ """
12
+
13
+ from __future__ import annotations
14
+
15
+ import re
16
+ import uuid
17
+ from typing import TYPE_CHECKING, Any
18
+
19
+ if TYPE_CHECKING:
20
+ pass
21
+
22
+
23
+ class ResponseParsingMixin:
24
+ """Mixin providing response parsing capabilities for workflows.
25
+
26
+ This mixin adds methods for extracting structured findings from
27
+ LLM responses, including XML parsing and regex-based extraction.
28
+
29
+ Methods:
30
+ _parse_xml_response: Parse XML-formatted LLM responses
31
+ _extract_findings_from_response: Extract findings using multiple strategies
32
+ _enrich_finding_with_location: Add location details to findings
33
+ _parse_location_string: Parse location strings to file/line/column
34
+ _infer_severity: Infer severity level from text
35
+ _infer_category: Infer category from text
36
+
37
+ Note:
38
+ This mixin expects the class to have a _get_xml_config() method
39
+ that returns XML configuration settings.
40
+ """
41
+
42
+ def _parse_xml_response(self, response: str) -> dict[str, Any]:
43
+ """Parse an XML response if XML enforcement is enabled.
44
+
45
+ Args:
46
+ response: The LLM response text.
47
+
48
+ Returns:
49
+ Dictionary with parsed fields or raw response data.
50
+ """
51
+ from attune.prompts import XmlResponseParser
52
+
53
+ config = self._get_xml_config()
54
+
55
+ if not config.get("enforce_response_xml", False):
56
+ # No parsing needed, return as-is
57
+ return {
58
+ "_parsed_response": None,
59
+ "_raw": response,
60
+ }
61
+
62
+ fallback = config.get("fallback_on_parse_error", True)
63
+ parser = XmlResponseParser(fallback_on_error=fallback)
64
+ parsed = parser.parse(response)
65
+
66
+ return {
67
+ "_parsed_response": parsed,
68
+ "_raw": response,
69
+ "summary": parsed.summary,
70
+ "findings": [f.to_dict() for f in parsed.findings],
71
+ "checklist": parsed.checklist,
72
+ "xml_parsed": parsed.success,
73
+ "parse_errors": parsed.errors,
74
+ }
75
+
76
+ def _extract_findings_from_response(
77
+ self,
78
+ response: str,
79
+ files_changed: list[str],
80
+ code_context: str = "",
81
+ ) -> list[dict[str, Any]]:
82
+ """Extract structured findings from LLM response.
83
+
84
+ Tries multiple strategies in order:
85
+ 1. XML parsing (if XML tags present)
86
+ 2. Regex-based extraction for file:line patterns
87
+ 3. Returns empty list if no findings extractable
88
+
89
+ Args:
90
+ response: Raw LLM response text
91
+ files_changed: List of files being analyzed (for context)
92
+ code_context: Original code being reviewed (optional)
93
+
94
+ Returns:
95
+ List of findings matching WorkflowFinding schema:
96
+ [
97
+ {
98
+ "id": "unique-id",
99
+ "file": "relative/path.py",
100
+ "line": 42,
101
+ "column": 10,
102
+ "severity": "high",
103
+ "category": "security",
104
+ "message": "Brief message",
105
+ "details": "Extended explanation",
106
+ "recommendation": "Fix suggestion"
107
+ }
108
+ ]
109
+ """
110
+ findings: list[dict[str, Any]] = []
111
+
112
+ # Strategy 1: Try XML parsing first
113
+ response_lower = response.lower()
114
+ if (
115
+ "<finding>" in response_lower
116
+ or "<issue>" in response_lower
117
+ or "<findings>" in response_lower
118
+ ):
119
+ # Parse XML directly (bypass config checks)
120
+ from attune.prompts import XmlResponseParser
121
+
122
+ parser = XmlResponseParser(fallback_on_error=True)
123
+ parsed = parser.parse(response)
124
+
125
+ if parsed.success and parsed.findings:
126
+ for raw_finding in parsed.findings:
127
+ enriched = self._enrich_finding_with_location(
128
+ raw_finding.to_dict(),
129
+ files_changed,
130
+ )
131
+ findings.append(enriched)
132
+ return findings
133
+
134
+ # Strategy 2: Regex-based extraction for common patterns
135
+ # Match patterns like:
136
+ # - "src/auth.py:42: SQL injection found"
137
+ # - "In file src/auth.py line 42"
138
+ # - "auth.py (line 42, column 10)"
139
+ patterns = [
140
+ # Pattern 1: file.py:line:column: message
141
+ r"([^\s:]+\.(?:py|ts|tsx|js|jsx|java|go|rb|php)):(\d+):(\d+):\s*(.+)",
142
+ # Pattern 2: file.py:line: message
143
+ r"([^\s:]+\.(?:py|ts|tsx|js|jsx|java|go|rb|php)):(\d+):\s*(.+)",
144
+ # Pattern 3: in file X line Y
145
+ r"(?:in file|file)\s+([^\s]+\.(?:py|ts|tsx|js|jsx|java|go|rb|php))\s+line\s+(\d+)",
146
+ # Pattern 4: file.py (line X)
147
+ r"([^\s]+\.(?:py|ts|tsx|js|jsx|java|go|rb|php))\s*\(line\s+(\d+)(?:,\s*col(?:umn)?\s+(\d+))?\)",
148
+ ]
149
+
150
+ for pattern in patterns:
151
+ matches = re.findall(pattern, response, re.IGNORECASE)
152
+ for match in matches:
153
+ if len(match) >= 2:
154
+ file_path = match[0]
155
+ line = int(match[1])
156
+
157
+ # Handle different pattern formats
158
+ if len(match) == 4 and match[2].isdigit():
159
+ # Pattern 1: file:line:col:message
160
+ column = int(match[2])
161
+ message = match[3]
162
+ elif len(match) == 3 and match[2] and not match[2].isdigit():
163
+ # Pattern 2: file:line:message
164
+ column = 1
165
+ message = match[2]
166
+ elif len(match) == 3 and match[2].isdigit():
167
+ # Pattern 4: file (line col)
168
+ column = int(match[2])
169
+ message = ""
170
+ else:
171
+ # Pattern 3: in file X line Y (no message)
172
+ column = 1
173
+ message = ""
174
+
175
+ # Determine severity from keywords in message
176
+ severity = self._infer_severity(message)
177
+ category = self._infer_category(message)
178
+
179
+ findings.append(
180
+ {
181
+ "id": str(uuid.uuid4())[:8],
182
+ "file": file_path,
183
+ "line": line,
184
+ "column": column,
185
+ "severity": severity,
186
+ "category": category,
187
+ "message": message.strip() if message else "",
188
+ "details": "",
189
+ "recommendation": "",
190
+ },
191
+ )
192
+
193
+ # Deduplicate by file:line
194
+ seen: set[tuple[str, int]] = set()
195
+ unique_findings = []
196
+ for finding in findings:
197
+ key = (finding["file"], finding["line"])
198
+ if key not in seen:
199
+ seen.add(key)
200
+ unique_findings.append(finding)
201
+
202
+ return unique_findings
203
+
204
+ def _enrich_finding_with_location(
205
+ self,
206
+ raw_finding: dict[str, Any],
207
+ files_changed: list[str],
208
+ ) -> dict[str, Any]:
209
+ """Enrich a finding from XML parser with file/line/column fields.
210
+
211
+ Args:
212
+ raw_finding: Finding dict from XML parser (has 'location' string field)
213
+ files_changed: List of files being analyzed
214
+
215
+ Returns:
216
+ Enriched finding dict with file, line, column fields
217
+ """
218
+ location_str = raw_finding.get("location", "")
219
+ file_path, line, column = self._parse_location_string(location_str, files_changed)
220
+
221
+ # Map category from severity or title keywords
222
+ category = self._infer_category(
223
+ raw_finding.get("title", "") + " " + raw_finding.get("details", ""),
224
+ )
225
+
226
+ return {
227
+ "id": str(uuid.uuid4())[:8],
228
+ "file": file_path,
229
+ "line": line,
230
+ "column": column,
231
+ "severity": raw_finding.get("severity", "medium"),
232
+ "category": category,
233
+ "message": raw_finding.get("title", ""),
234
+ "details": raw_finding.get("details", ""),
235
+ "recommendation": raw_finding.get("fix", ""),
236
+ }
237
+
238
+ def _parse_location_string(
239
+ self,
240
+ location: str,
241
+ files_changed: list[str],
242
+ ) -> tuple[str, int, int]:
243
+ """Parse a location string to extract file, line, column.
244
+
245
+ Handles formats like:
246
+ - "src/auth.py:42:10"
247
+ - "src/auth.py:42"
248
+ - "auth.py line 42"
249
+ - "line 42 in auth.py"
250
+
251
+ Args:
252
+ location: Location string from finding
253
+ files_changed: List of files being analyzed (for fallback)
254
+
255
+ Returns:
256
+ Tuple of (file_path, line_number, column_number)
257
+ Defaults: ("", 1, 1) if parsing fails
258
+ """
259
+ if not location:
260
+ # Fallback: use first file if available
261
+ return (files_changed[0] if files_changed else "", 1, 1)
262
+
263
+ # Try colon-separated format: file.py:line:col
264
+ match = re.search(
265
+ r"([^\s:]+\.(?:py|ts|tsx|js|jsx|java|go|rb|php)):(\d+)(?::(\d+))?",
266
+ location,
267
+ )
268
+ if match:
269
+ file_path = match.group(1)
270
+ line = int(match.group(2))
271
+ column = int(match.group(3)) if match.group(3) else 1
272
+ return (file_path, line, column)
273
+
274
+ # Try "line X in file.py" format
275
+ match = re.search(
276
+ r"line\s+(\d+)\s+(?:in|of)\s+([^\s]+\.(?:py|ts|tsx|js|jsx|java|go|rb|php))",
277
+ location,
278
+ re.IGNORECASE,
279
+ )
280
+ if match:
281
+ line = int(match.group(1))
282
+ file_path = match.group(2)
283
+ return (file_path, line, 1)
284
+
285
+ # Try "file.py line X" format
286
+ match = re.search(
287
+ r"([^\s]+\.(?:py|ts|tsx|js|jsx|java|go|rb|php))\s+line\s+(\d+)",
288
+ location,
289
+ re.IGNORECASE,
290
+ )
291
+ if match:
292
+ file_path = match.group(1)
293
+ line = int(match.group(2))
294
+ return (file_path, line, 1)
295
+
296
+ # Extract just line number if present
297
+ match = re.search(r"line\s+(\d+)", location, re.IGNORECASE)
298
+ if match:
299
+ line = int(match.group(1))
300
+ # Use first file from files_changed as fallback
301
+ file_path = files_changed[0] if files_changed else ""
302
+ return (file_path, line, 1)
303
+
304
+ # Couldn't parse - return defaults
305
+ return (files_changed[0] if files_changed else "", 1, 1)
306
+
307
+ def _infer_severity(self, text: str) -> str:
308
+ """Infer severity from keywords in text.
309
+
310
+ Args:
311
+ text: Message or title text
312
+
313
+ Returns:
314
+ Severity level: critical, high, medium, low, or info
315
+ """
316
+ text_lower = text.lower()
317
+
318
+ if any(
319
+ word in text_lower
320
+ for word in [
321
+ "critical",
322
+ "severe",
323
+ "exploit",
324
+ "vulnerability",
325
+ "injection",
326
+ "remote code execution",
327
+ "rce",
328
+ ]
329
+ ):
330
+ return "critical"
331
+
332
+ if any(
333
+ word in text_lower
334
+ for word in [
335
+ "high",
336
+ "security",
337
+ "unsafe",
338
+ "dangerous",
339
+ "xss",
340
+ "csrf",
341
+ "auth",
342
+ "password",
343
+ "secret",
344
+ ]
345
+ ):
346
+ return "high"
347
+
348
+ if any(
349
+ word in text_lower
350
+ for word in [
351
+ "warning",
352
+ "issue",
353
+ "problem",
354
+ "bug",
355
+ "error",
356
+ "deprecated",
357
+ "leak",
358
+ ]
359
+ ):
360
+ return "medium"
361
+
362
+ if any(word in text_lower for word in ["low", "minor", "style", "format", "typo"]):
363
+ return "low"
364
+
365
+ return "info"
366
+
367
+ def _infer_category(self, text: str) -> str:
368
+ """Infer finding category from keywords.
369
+
370
+ Args:
371
+ text: Message or title text
372
+
373
+ Returns:
374
+ Category: security, performance, maintainability, style, or correctness
375
+ """
376
+ text_lower = text.lower()
377
+
378
+ if any(
379
+ word in text_lower
380
+ for word in [
381
+ "security",
382
+ "vulnerability",
383
+ "injection",
384
+ "xss",
385
+ "csrf",
386
+ "auth",
387
+ "encrypt",
388
+ "password",
389
+ "secret",
390
+ "unsafe",
391
+ ]
392
+ ):
393
+ return "security"
394
+
395
+ if any(
396
+ word in text_lower
397
+ for word in [
398
+ "performance",
399
+ "slow",
400
+ "memory",
401
+ "leak",
402
+ "inefficient",
403
+ "optimization",
404
+ "cache",
405
+ ]
406
+ ):
407
+ return "performance"
408
+
409
+ if any(
410
+ word in text_lower
411
+ for word in [
412
+ "complex",
413
+ "refactor",
414
+ "duplicate",
415
+ "maintainability",
416
+ "readability",
417
+ "documentation",
418
+ ]
419
+ ):
420
+ return "maintainability"
421
+
422
+ if any(
423
+ word in text_lower for word in ["style", "format", "lint", "convention", "whitespace"]
424
+ ):
425
+ return "style"
426
+
427
+ return "correctness"
@@ -649,7 +649,9 @@ def create_perf_audit_workflow_report(result: dict, input_data: dict) -> Workflo
649
649
  top_issues = result.get("top_issues", [])
650
650
  if top_issues:
651
651
  issues_content = {
652
- issue.get("type", "unknown").replace("_", " ").title(): f"{issue.get('count', 0)} occurrences"
652
+ issue.get("type", "unknown")
653
+ .replace("_", " ")
654
+ .title(): f"{issue.get('count', 0)} occurrences"
653
655
  for issue in top_issues
654
656
  }
655
657
  report.add_section("Top Performance Issues", issues_content)
@@ -483,7 +483,9 @@ class ConsoleProgressReporter:
483
483
  tokens_str = f" | {update.tokens_so_far:,} tokens"
484
484
 
485
485
  # Format: [100%] ✓ Completed optimize [PREMIUM] ($0.0279) [12.3s]
486
- output = f"[{percent}] {status_icon} {update.message}{tier_info} ({cost}{tokens_str}){elapsed}"
486
+ output = (
487
+ f"[{percent}] {status_icon} {update.message}{tier_info} ({cost}{tokens_str}){elapsed}"
488
+ )
487
489
  print(output)
488
490
 
489
491
  # Verbose output
@@ -508,7 +510,9 @@ class ConsoleProgressReporter:
508
510
  for stage in update.stages:
509
511
  if stage.status == ProgressStatus.COMPLETED:
510
512
  duration_ms = stage.duration_ms or self._stage_times.get(stage.name, 0)
511
- duration_str = f"{duration_ms}ms" if duration_ms < 1000 else f"{duration_ms/1000:.1f}s"
513
+ duration_str = (
514
+ f"{duration_ms}ms" if duration_ms < 1000 else f"{duration_ms/1000:.1f}s"
515
+ )
512
516
  cost_str = f"${stage.cost:.4f}" if stage.cost > 0 else "—"
513
517
  print(f" {stage.name}: {duration_str} | {cost_str}")
514
518
  elif stage.status == ProgressStatus.SKIPPED:
@@ -590,8 +594,7 @@ class RichProgressReporter:
590
594
  """
591
595
  if not RICH_AVAILABLE:
592
596
  raise RuntimeError(
593
- "Rich library required for RichProgressReporter. "
594
- "Install with: pip install rich"
597
+ "Rich library required for RichProgressReporter. " "Install with: pip install rich"
595
598
  )
596
599
 
597
600
  self.workflow_name = workflow_name
@@ -652,9 +655,7 @@ class RichProgressReporter:
652
655
 
653
656
  # Update progress bar
654
657
  if self._progress is not None and self._task_id is not None:
655
- completed = sum(
656
- 1 for s in update.stages if s.status == ProgressStatus.COMPLETED
657
- )
658
+ completed = sum(1 for s in update.stages if s.status == ProgressStatus.COMPLETED)
658
659
  self._progress.update(
659
660
  self._task_id,
660
661
  completed=completed,
@@ -676,10 +677,7 @@ class RichProgressReporter:
676
677
  Rich Panel containing progress information
677
678
  """
678
679
  if not RICH_AVAILABLE or Panel is None or Table is None:
679
- raise RuntimeError(
680
- "Rich library not available. "
681
- "Install with: pip install rich"
682
- )
680
+ raise RuntimeError("Rich library not available. " "Install with: pip install rich")
683
681
 
684
682
  # Build metrics table
685
683
  metrics = Table(show_header=False, box=None, padding=(0, 2))
@@ -153,6 +153,7 @@ class ReleasePreparationWorkflow(BaseWorkflow):
153
153
  get_auth_strategy,
154
154
  get_module_size_category,
155
155
  )
156
+
156
157
  logger = logging.getLogger(__name__)
157
158
 
158
159
  # Calculate total LOC for project/directory
@@ -173,7 +174,9 @@ class ReleasePreparationWorkflow(BaseWorkflow):
173
174
  self._auth_mode_used = recommended_mode.value
174
175
 
175
176
  size_category = get_module_size_category(total_lines)
176
- logger.info(f"Release prep target: {target_path} ({total_lines:,} LOC, {size_category})")
177
+ logger.info(
178
+ f"Release prep target: {target_path} ({total_lines:,} LOC, {size_category})"
179
+ )
177
180
  logger.info(f"Recommended auth mode: {recommended_mode.value}")
178
181
 
179
182
  cost_estimate = strategy.estimate_cost(total_lines, recommended_mode)
@@ -184,6 +187,7 @@ class ReleasePreparationWorkflow(BaseWorkflow):
184
187
 
185
188
  except Exception as e:
186
189
  import logging
190
+
187
191
  logger = logging.getLogger(__name__)
188
192
  logger.warning(f"Auth strategy detection failed: {e}")
189
193
 
@@ -164,5 +164,3 @@ class BalancedRouting(TierRoutingStrategy):
164
164
  def can_fallback(self, tier: ModelTier) -> bool:
165
165
  """Allow fallback when budget-constrained."""
166
166
  return True
167
-
168
-
@@ -170,7 +170,8 @@ class SecureReleasePipeline:
170
170
  adapters_available = True
171
171
  except ImportError:
172
172
  adapters_available = False
173
- _check_crew_available = lambda: False
173
+ def _check_crew_available():
174
+ return False
174
175
  _get_crew_audit = None
175
176
  crew_report_to_workflow_format = None
176
177
 
@@ -344,9 +344,7 @@ class SecurityAuditWorkflow(BaseWorkflow):
344
344
  for match in matches:
345
345
  # Find line number and get the line content
346
346
  line_num = content[: match.start()].count("\n") + 1
347
- line_content = (
348
- lines[line_num - 1] if line_num <= len(lines) else ""
349
- )
347
+ line_content = lines[line_num - 1] if line_num <= len(lines) else ""
350
348
 
351
349
  # Skip if file is a security example/test file
352
350
  file_name = str(file_path)
@@ -471,9 +469,7 @@ class SecurityAuditWorkflow(BaseWorkflow):
471
469
  size_category = get_module_size_category(codebase_lines)
472
470
 
473
471
  # Log recommendation
474
- logger.info(
475
- f"Codebase: {target} ({codebase_lines} LOC, {size_category})"
476
- )
472
+ logger.info(f"Codebase: {target} ({codebase_lines} LOC, {size_category})")
477
473
  logger.info(f"Recommended auth mode: {recommended_mode.value}")
478
474
 
479
475
  # Get cost estimate
@@ -486,8 +482,7 @@ class SecurityAuditWorkflow(BaseWorkflow):
486
482
  )
487
483
  else: # API
488
484
  logger.info(
489
- f"Cost: ~${cost_estimate['monetary_cost']:.4f} "
490
- f"(1M context window)"
485
+ f"Cost: ~${cost_estimate['monetary_cost']:.4f} " f"(1M context window)"
491
486
  )
492
487
 
493
488
  except Exception as e:
@@ -619,7 +614,12 @@ class SecurityAuditWorkflow(BaseWorkflow):
619
614
  line = line_content.strip()
620
615
 
621
616
  # Check if line is a comment or documentation
622
- if line.startswith("#") or line.startswith("//") or line.startswith("*") or line.startswith("-"):
617
+ if (
618
+ line.startswith("#")
619
+ or line.startswith("//")
620
+ or line.startswith("*")
621
+ or line.startswith("-")
622
+ ):
623
623
  return True
624
624
 
625
625
  # Check if inside a docstring (triple quotes)
@@ -655,7 +655,9 @@ class SecurityAuditWorkflow(BaseWorkflow):
655
655
 
656
656
  return False
657
657
 
658
- def _is_safe_sql_parameterization(self, line_content: str, match_text: str, file_content: str) -> bool:
658
+ def _is_safe_sql_parameterization(
659
+ self, line_content: str, match_text: str, file_content: str
660
+ ) -> bool:
659
661
  """Check if SQL query uses safe parameterization despite f-string usage.
660
662
 
661
663
  Phase 2 Enhancement: Detects safe patterns like:
@@ -682,7 +684,7 @@ class SecurityAuditWorkflow(BaseWorkflow):
682
684
  return False
683
685
 
684
686
  # Extract a larger context (next 200 chars after match)
685
- context = file_content[match_pos:match_pos + 200]
687
+ context = file_content[match_pos : match_pos + 200]
686
688
 
687
689
  # Also get lines before the match for placeholder detection
688
690
  lines_before = file_content[:match_pos].split("\n")
@@ -701,12 +703,14 @@ class SecurityAuditWorkflow(BaseWorkflow):
701
703
  if "placeholders" in prev_line and '"?"' in prev_line and "join" in prev_line:
702
704
  # Found placeholder construction
703
705
  # Now check if the execute has separate parameters
704
- if "," in context and any(param in context for param in ["run_ids", "ids", "params", "values", ")"]):
706
+ if "," in context and any(
707
+ param in context for param in ["run_ids", "ids", "params", "values", ")"]
708
+ ):
705
709
  return True
706
710
 
707
711
  # Pattern 2: Check if f-string only builds SQL structure with constants
708
712
  # Example: f"SELECT * FROM {TABLE_NAME}" where TABLE_NAME is a constant
709
- f_string_vars = re.findall(r'\{(\w+)\}', context)
713
+ f_string_vars = re.findall(r"\{(\w+)\}", context)
710
714
  if f_string_vars:
711
715
  # Check if all variables are constants (UPPERCASE or table/column names)
712
716
  all_constants = all(
@@ -940,7 +944,8 @@ class SecurityAuditWorkflow(BaseWorkflow):
940
944
  adapters_available = True
941
945
  except ImportError:
942
946
  adapters_available = False
943
- _check_crew_available = lambda: False
947
+ def _check_crew_available():
948
+ return False
944
949
 
945
950
  assessment = input_data.get("assessment", {})
946
951
  critical = assessment.get("critical_findings", [])