empathy-framework 5.1.1__py3-none-any.whl → 5.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (106) hide show
  1. {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/METADATA +79 -6
  2. {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/RECORD +83 -64
  3. empathy_os/__init__.py +1 -1
  4. empathy_os/cache/hybrid.py +5 -1
  5. empathy_os/cli/commands/batch.py +8 -0
  6. empathy_os/cli/commands/profiling.py +4 -0
  7. empathy_os/cli/commands/workflow.py +8 -4
  8. empathy_os/cli_router.py +9 -0
  9. empathy_os/config.py +15 -2
  10. empathy_os/core_modules/__init__.py +15 -0
  11. empathy_os/dashboard/simple_server.py +62 -30
  12. empathy_os/mcp/__init__.py +10 -0
  13. empathy_os/mcp/server.py +506 -0
  14. empathy_os/memory/control_panel.py +1 -131
  15. empathy_os/memory/control_panel_support.py +145 -0
  16. empathy_os/memory/encryption.py +159 -0
  17. empathy_os/memory/long_term.py +46 -631
  18. empathy_os/memory/long_term_types.py +99 -0
  19. empathy_os/memory/mixins/__init__.py +25 -0
  20. empathy_os/memory/mixins/backend_init_mixin.py +249 -0
  21. empathy_os/memory/mixins/capabilities_mixin.py +208 -0
  22. empathy_os/memory/mixins/handoff_mixin.py +208 -0
  23. empathy_os/memory/mixins/lifecycle_mixin.py +49 -0
  24. empathy_os/memory/mixins/long_term_mixin.py +352 -0
  25. empathy_os/memory/mixins/promotion_mixin.py +109 -0
  26. empathy_os/memory/mixins/short_term_mixin.py +182 -0
  27. empathy_os/memory/short_term.py +61 -12
  28. empathy_os/memory/simple_storage.py +302 -0
  29. empathy_os/memory/storage_backend.py +167 -0
  30. empathy_os/memory/types.py +8 -3
  31. empathy_os/memory/unified.py +21 -1120
  32. empathy_os/meta_workflows/cli_commands/__init__.py +56 -0
  33. empathy_os/meta_workflows/cli_commands/agent_commands.py +321 -0
  34. empathy_os/meta_workflows/cli_commands/analytics_commands.py +442 -0
  35. empathy_os/meta_workflows/cli_commands/config_commands.py +232 -0
  36. empathy_os/meta_workflows/cli_commands/memory_commands.py +182 -0
  37. empathy_os/meta_workflows/cli_commands/template_commands.py +354 -0
  38. empathy_os/meta_workflows/cli_commands/workflow_commands.py +382 -0
  39. empathy_os/meta_workflows/cli_meta_workflows.py +52 -1802
  40. empathy_os/models/telemetry/__init__.py +71 -0
  41. empathy_os/models/telemetry/analytics.py +594 -0
  42. empathy_os/models/telemetry/backend.py +196 -0
  43. empathy_os/models/telemetry/data_models.py +431 -0
  44. empathy_os/models/telemetry/storage.py +489 -0
  45. empathy_os/orchestration/__init__.py +35 -0
  46. empathy_os/orchestration/execution_strategies.py +481 -0
  47. empathy_os/orchestration/meta_orchestrator.py +488 -1
  48. empathy_os/routing/workflow_registry.py +36 -0
  49. empathy_os/telemetry/agent_coordination.py +2 -3
  50. empathy_os/telemetry/agent_tracking.py +26 -7
  51. empathy_os/telemetry/approval_gates.py +18 -24
  52. empathy_os/telemetry/cli.py +19 -724
  53. empathy_os/telemetry/commands/__init__.py +14 -0
  54. empathy_os/telemetry/commands/dashboard_commands.py +696 -0
  55. empathy_os/telemetry/event_streaming.py +7 -3
  56. empathy_os/telemetry/feedback_loop.py +28 -15
  57. empathy_os/tools.py +183 -0
  58. empathy_os/workflows/__init__.py +5 -0
  59. empathy_os/workflows/autonomous_test_gen.py +860 -161
  60. empathy_os/workflows/base.py +6 -2
  61. empathy_os/workflows/code_review.py +4 -1
  62. empathy_os/workflows/document_gen/__init__.py +25 -0
  63. empathy_os/workflows/document_gen/config.py +30 -0
  64. empathy_os/workflows/document_gen/report_formatter.py +162 -0
  65. empathy_os/workflows/{document_gen.py → document_gen/workflow.py} +5 -184
  66. empathy_os/workflows/output.py +4 -1
  67. empathy_os/workflows/progress.py +8 -2
  68. empathy_os/workflows/security_audit.py +2 -2
  69. empathy_os/workflows/security_audit_phase3.py +7 -4
  70. empathy_os/workflows/seo_optimization.py +633 -0
  71. empathy_os/workflows/test_gen/__init__.py +52 -0
  72. empathy_os/workflows/test_gen/ast_analyzer.py +249 -0
  73. empathy_os/workflows/test_gen/config.py +88 -0
  74. empathy_os/workflows/test_gen/data_models.py +38 -0
  75. empathy_os/workflows/test_gen/report_formatter.py +289 -0
  76. empathy_os/workflows/test_gen/test_templates.py +381 -0
  77. empathy_os/workflows/test_gen/workflow.py +655 -0
  78. empathy_os/workflows/test_gen.py +42 -1905
  79. empathy_os/cli/parsers/cache 2.py +0 -65
  80. empathy_os/cli_router 2.py +0 -416
  81. empathy_os/dashboard/app 2.py +0 -512
  82. empathy_os/dashboard/simple_server 2.py +0 -403
  83. empathy_os/dashboard/standalone_server 2.py +0 -536
  84. empathy_os/memory/types 2.py +0 -441
  85. empathy_os/models/adaptive_routing 2.py +0 -437
  86. empathy_os/models/telemetry.py +0 -1660
  87. empathy_os/project_index/scanner_parallel 2.py +0 -291
  88. empathy_os/telemetry/agent_coordination 2.py +0 -478
  89. empathy_os/telemetry/agent_tracking 2.py +0 -350
  90. empathy_os/telemetry/approval_gates 2.py +0 -563
  91. empathy_os/telemetry/event_streaming 2.py +0 -405
  92. empathy_os/telemetry/feedback_loop 2.py +0 -557
  93. empathy_os/vscode_bridge 2.py +0 -173
  94. empathy_os/workflows/progressive/__init__ 2.py +0 -92
  95. empathy_os/workflows/progressive/cli 2.py +0 -242
  96. empathy_os/workflows/progressive/core 2.py +0 -488
  97. empathy_os/workflows/progressive/orchestrator 2.py +0 -701
  98. empathy_os/workflows/progressive/reports 2.py +0 -528
  99. empathy_os/workflows/progressive/telemetry 2.py +0 -280
  100. empathy_os/workflows/progressive/test_gen 2.py +0 -514
  101. empathy_os/workflows/progressive/workflow 2.py +0 -628
  102. {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/WHEEL +0 -0
  103. {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/entry_points.txt +0 -0
  104. {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/licenses/LICENSE +0 -0
  105. {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +0 -0
  106. {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/top_level.txt +0 -0
@@ -142,8 +142,12 @@ class ModelProvider(Enum):
142
142
  CUSTOM = "custom" # User-defined custom models
143
143
 
144
144
  def to_unified(self) -> UnifiedModelProvider:
145
- """Convert to unified ModelProvider from empathy_os.models."""
146
- return UnifiedModelProvider(self.value)
145
+ """Convert to unified ModelProvider from empathy_os.models.
146
+
147
+ As of v5.0.0, framework is Claude-native. All providers map to ANTHROPIC.
148
+ """
149
+ # v5.0.0: Framework is Claude-native, only ANTHROPIC supported
150
+ return UnifiedModelProvider.ANTHROPIC
147
151
 
148
152
 
149
153
  # Import unified MODEL_REGISTRY as single source of truth
@@ -645,6 +645,9 @@ Code to review:
645
645
  "Code will proceed to architectural review."
646
646
  )
647
647
 
648
+ # Calculate security score
649
+ security_score = 70 if has_critical else 90
650
+
648
651
  # Determine preliminary verdict based on scan
649
652
  if has_critical:
650
653
  preliminary_verdict = "request_changes"
@@ -661,7 +664,7 @@ Code to review:
661
664
  "bug_patterns": [],
662
665
  "quality_issues": [],
663
666
  "has_critical_issues": has_critical,
664
- "security_score": 70 if has_critical else 90,
667
+ "security_score": security_score,
665
668
  "verdict": preliminary_verdict, # Add verdict for when architect_review is skipped
666
669
  "needs_architect_review": input_data.get("needs_architect_review", False)
667
670
  or has_critical,
@@ -0,0 +1,25 @@
1
+ """Document Generation Workflow Package.
2
+
3
+ Cost-optimized documentation generation pipeline.
4
+
5
+ Copyright 2025 Smart-AI-Memory
6
+ Licensed under Fair Source License 0.9
7
+ """
8
+
9
+ # Core workflow
10
+ # Configuration
11
+ from .config import DOC_GEN_STEPS, TOKEN_COSTS
12
+
13
+ # Report formatter
14
+ from .report_formatter import format_doc_gen_report
15
+ from .workflow import DocumentGenerationWorkflow
16
+
17
+ __all__ = [
18
+ # Workflow
19
+ "DocumentGenerationWorkflow",
20
+ # Configuration
21
+ "DOC_GEN_STEPS",
22
+ "TOKEN_COSTS",
23
+ # Report formatter
24
+ "format_doc_gen_report",
25
+ ]
@@ -0,0 +1,30 @@
1
+ """Document Generation Configuration.
2
+
3
+ Token costs and step configurations for documentation generation workflow.
4
+
5
+ Copyright 2025 Smart-AI-Memory
6
+ Licensed under Fair Source License 0.9
7
+ """
8
+
9
+ from ..base import ModelTier
10
+ from ..step_config import WorkflowStepConfig
11
+
12
+ # Approximate cost per 1K tokens (USD) - used for cost estimation
13
+ # These are estimates and should be updated as pricing changes
14
+ TOKEN_COSTS = {
15
+ ModelTier.CHEAP: {"input": 0.00025, "output": 0.00125}, # Haiku
16
+ ModelTier.CAPABLE: {"input": 0.003, "output": 0.015}, # Sonnet
17
+ ModelTier.PREMIUM: {"input": 0.015, "output": 0.075}, # Opus
18
+ }
19
+
20
+ # Define step configurations for executor-based execution
21
+ # Note: max_tokens for polish is dynamically set based on input size
22
+ DOC_GEN_STEPS = {
23
+ "polish": WorkflowStepConfig(
24
+ name="polish",
25
+ task_type="final_review", # Premium tier task
26
+ tier_hint="premium",
27
+ description="Polish and improve documentation for consistency and quality",
28
+ max_tokens=20000, # Increased to handle large chunked documents
29
+ ),
30
+ }
@@ -0,0 +1,162 @@
1
+ """Document Generation Report Formatter.
2
+
3
+ Format documentation generation output as human-readable reports.
4
+
5
+ Copyright 2025 Smart-AI-Memory
6
+ Licensed under Fair Source License 0.9
7
+ """
8
+
9
+
10
+ def format_doc_gen_report(result: dict, input_data: dict) -> str:
11
+ """Format document generation output as a human-readable report.
12
+
13
+ Args:
14
+ result: The polish stage result
15
+ input_data: Input data from previous stages
16
+
17
+ Returns:
18
+ Formatted report string
19
+
20
+ """
21
+ lines = []
22
+
23
+ # Header
24
+ doc_type = result.get("doc_type", "general").replace("_", " ").title()
25
+ audience = result.get("audience", "developers").replace("_", " ").title()
26
+
27
+ lines.append("=" * 60)
28
+ lines.append("DOCUMENTATION GENERATION REPORT")
29
+ lines.append("=" * 60)
30
+ lines.append("")
31
+ lines.append(f"Document Type: {doc_type}")
32
+ lines.append(f"Target Audience: {audience}")
33
+ lines.append("")
34
+
35
+ # Outline summary
36
+ outline = input_data.get("outline", "")
37
+ if outline:
38
+ lines.append("-" * 60)
39
+ lines.append("DOCUMENT OUTLINE")
40
+ lines.append("-" * 60)
41
+ # Show just a preview of the outline
42
+ outline_lines = outline.split("\n")[:10]
43
+ lines.extend(outline_lines)
44
+ if len(outline.split("\n")) > 10:
45
+ lines.append("...")
46
+ lines.append("")
47
+
48
+ # Generated document
49
+ document = result.get("document", "")
50
+ if document:
51
+ lines.append("-" * 60)
52
+ lines.append("GENERATED DOCUMENTATION")
53
+ lines.append("-" * 60)
54
+ lines.append("")
55
+ lines.append(document)
56
+ lines.append("")
57
+
58
+ # Statistics
59
+ word_count = len(document.split()) if document else 0
60
+ section_count = document.count("##") if document else 0 # Count markdown headers
61
+ was_chunked = input_data.get("chunked", False)
62
+ chunk_count = input_data.get("chunk_count", 0)
63
+ chunks_completed = input_data.get("chunks_completed", chunk_count)
64
+ stopped_early = input_data.get("stopped_early", False)
65
+ accumulated_cost = result.get("accumulated_cost", 0)
66
+ polish_chunked = result.get("polish_chunked", False)
67
+
68
+ lines.append("-" * 60)
69
+ lines.append("STATISTICS")
70
+ lines.append("-" * 60)
71
+ lines.append(f"Word Count: {word_count}")
72
+ lines.append(f"Section Count: ~{section_count}")
73
+ if was_chunked:
74
+ if stopped_early:
75
+ lines.append(
76
+ f"Generation Mode: Chunked ({chunks_completed}/{chunk_count} chunks completed)",
77
+ )
78
+ else:
79
+ lines.append(f"Generation Mode: Chunked ({chunk_count} chunks)")
80
+ if polish_chunked:
81
+ polish_chunks = result.get("polish_chunks", 0)
82
+ lines.append(f"Polish Mode: Chunked ({polish_chunks} sections)")
83
+ if accumulated_cost > 0:
84
+ lines.append(f"Estimated Cost: ${accumulated_cost:.2f}")
85
+ lines.append("")
86
+
87
+ # Export info
88
+ export_path = result.get("export_path")
89
+ if export_path:
90
+ lines.append("-" * 60)
91
+ lines.append("FILE EXPORT")
92
+ lines.append("-" * 60)
93
+ lines.append(f"Documentation saved to: {export_path}")
94
+ report_path = result.get("report_path")
95
+ if report_path:
96
+ lines.append(f"Report saved to: {report_path}")
97
+ lines.append("")
98
+ lines.append("Full documentation is available in the exported file.")
99
+ lines.append("")
100
+
101
+ # Warning notice (cost limit, errors, etc.)
102
+ warning = input_data.get("warning") or result.get("warning")
103
+ if warning or stopped_early:
104
+ lines.append("-" * 60)
105
+ lines.append("⚠️ WARNING")
106
+ lines.append("-" * 60)
107
+ if warning:
108
+ lines.append(warning)
109
+ if stopped_early and not warning:
110
+ lines.append("Generation stopped early due to cost or error limits.")
111
+ lines.append("")
112
+
113
+ # Truncation detection and scope notice
114
+ truncation_indicators = []
115
+ if document: # Handle None or empty document
116
+ truncation_indicators = [
117
+ document.rstrip().endswith("..."),
118
+ document.rstrip().endswith("-"),
119
+ "```" in document and document.count("```") % 2 != 0, # Unclosed code block
120
+ any(
121
+ phrase in document.lower()
122
+ for phrase in ["continued in", "see next section", "to be continued"]
123
+ ),
124
+ ]
125
+
126
+ # Count planned sections from outline (top-level only)
127
+ import re
128
+
129
+ planned_sections = 0
130
+ top_level_pattern = re.compile(r"^(\d+)\.\s+([A-Za-z].*)")
131
+ if outline:
132
+ for line in outline.split("\n"):
133
+ stripped = line.strip()
134
+ if top_level_pattern.match(stripped):
135
+ planned_sections += 1
136
+
137
+ is_truncated = any(truncation_indicators) or (
138
+ planned_sections > 0 and section_count < planned_sections - 1
139
+ )
140
+
141
+ if is_truncated or planned_sections > section_count + 1:
142
+ lines.append("-" * 60)
143
+ lines.append("SCOPE NOTICE")
144
+ lines.append("-" * 60)
145
+ lines.append("⚠️ DOCUMENTATION MAY BE INCOMPLETE")
146
+ if planned_sections > 0:
147
+ lines.append(f" Planned sections: {planned_sections}")
148
+ lines.append(f" Generated sections: {section_count}")
149
+ lines.append("")
150
+ lines.append("To generate missing sections, re-run with section_focus:")
151
+ lines.append(" workflow = DocumentGenerationWorkflow(")
152
+ lines.append(' section_focus=["Testing Guide", "API Reference"]')
153
+ lines.append(" )")
154
+ lines.append("")
155
+
156
+ # Footer
157
+ lines.append("=" * 60)
158
+ model_tier = result.get("model_tier_used", "unknown")
159
+ lines.append(f"Generated using {model_tier} tier model")
160
+ lines.append("=" * 60)
161
+
162
+ return "\n".join(lines)
@@ -1,15 +1,6 @@
1
- """Document Generation Workflow
1
+ """Document Generation Workflow.
2
2
 
3
- A cost-optimized, enterprise-safe documentation pipeline:
4
- 1. Haiku: Generate outline from code/specs (cheap, fast)
5
- 2. Sonnet: Write each section (capable, chunked for large projects)
6
- 3. Opus: Final review + consistency polish (premium, chunked if needed)
7
-
8
- Enterprise Features:
9
- - Auto-scaling tokens based on project complexity
10
- - Chunked polish for large documents
11
- - Cost guardrails with configurable max_cost
12
- - Graceful degradation with partial results on errors
3
+ Main workflow orchestration for documentation generation.
13
4
 
14
5
  Copyright 2025 Smart-AI-Memory
15
6
  Licensed under Fair Source License 0.9
@@ -22,31 +13,12 @@ from typing import Any
22
13
 
23
14
  from empathy_os.config import _validate_file_path
24
15
 
25
- from .base import BaseWorkflow, ModelTier
26
- from .step_config import WorkflowStepConfig
16
+ from ..base import BaseWorkflow, ModelTier
17
+ from .config import DOC_GEN_STEPS, TOKEN_COSTS
18
+ from .report_formatter import format_doc_gen_report
27
19
 
28
20
  logger = logging.getLogger(__name__)
29
21
 
30
- # Approximate cost per 1K tokens (USD) - used for cost estimation
31
- # These are estimates and should be updated as pricing changes
32
- TOKEN_COSTS = {
33
- ModelTier.CHEAP: {"input": 0.00025, "output": 0.00125}, # Haiku
34
- ModelTier.CAPABLE: {"input": 0.003, "output": 0.015}, # Sonnet
35
- ModelTier.PREMIUM: {"input": 0.015, "output": 0.075}, # Opus
36
- }
37
-
38
- # Define step configurations for executor-based execution
39
- # Note: max_tokens for polish is dynamically set based on input size
40
- DOC_GEN_STEPS = {
41
- "polish": WorkflowStepConfig(
42
- name="polish",
43
- task_type="final_review", # Premium tier task
44
- tier_hint="premium",
45
- description="Polish and improve documentation for consistency and quality",
46
- max_tokens=20000, # Increased to handle large chunked documents
47
- ),
48
- }
49
-
50
22
 
51
23
  class DocumentGenerationWorkflow(BaseWorkflow):
52
24
  """Multi-tier document generation workflow.
@@ -1452,154 +1424,3 @@ None
1452
1424
  return full_doc
1453
1425
 
1454
1426
 
1455
- def format_doc_gen_report(result: dict, input_data: dict) -> str:
1456
- """Format document generation output as a human-readable report.
1457
-
1458
- Args:
1459
- result: The polish stage result
1460
- input_data: Input data from previous stages
1461
-
1462
- Returns:
1463
- Formatted report string
1464
-
1465
- """
1466
- lines = []
1467
-
1468
- # Header
1469
- doc_type = result.get("doc_type", "general").replace("_", " ").title()
1470
- audience = result.get("audience", "developers").title()
1471
-
1472
- lines.append("=" * 60)
1473
- lines.append("DOCUMENTATION GENERATION REPORT")
1474
- lines.append("=" * 60)
1475
- lines.append("")
1476
- lines.append(f"Document Type: {doc_type}")
1477
- lines.append(f"Target Audience: {audience}")
1478
- lines.append("")
1479
-
1480
- # Outline summary
1481
- outline = input_data.get("outline", "")
1482
- if outline:
1483
- lines.append("-" * 60)
1484
- lines.append("DOCUMENT OUTLINE")
1485
- lines.append("-" * 60)
1486
- # Show just a preview of the outline
1487
- outline_lines = outline.split("\n")[:10]
1488
- lines.extend(outline_lines)
1489
- if len(outline.split("\n")) > 10:
1490
- lines.append("...")
1491
- lines.append("")
1492
-
1493
- # Generated document
1494
- document = result.get("document", "")
1495
- if document:
1496
- lines.append("-" * 60)
1497
- lines.append("GENERATED DOCUMENTATION")
1498
- lines.append("-" * 60)
1499
- lines.append("")
1500
- lines.append(document)
1501
- lines.append("")
1502
-
1503
- # Statistics
1504
- word_count = len(document.split()) if document else 0
1505
- section_count = document.count("##") if document else 0 # Count markdown headers
1506
- was_chunked = input_data.get("chunked", False)
1507
- chunk_count = input_data.get("chunk_count", 0)
1508
- chunks_completed = input_data.get("chunks_completed", chunk_count)
1509
- stopped_early = input_data.get("stopped_early", False)
1510
- accumulated_cost = result.get("accumulated_cost", 0)
1511
- polish_chunked = result.get("polish_chunked", False)
1512
-
1513
- lines.append("-" * 60)
1514
- lines.append("STATISTICS")
1515
- lines.append("-" * 60)
1516
- lines.append(f"Word Count: {word_count}")
1517
- lines.append(f"Section Count: ~{section_count}")
1518
- if was_chunked:
1519
- if stopped_early:
1520
- lines.append(
1521
- f"Generation Mode: Chunked ({chunks_completed}/{chunk_count} chunks completed)",
1522
- )
1523
- else:
1524
- lines.append(f"Generation Mode: Chunked ({chunk_count} chunks)")
1525
- if polish_chunked:
1526
- polish_chunks = result.get("polish_chunks", 0)
1527
- lines.append(f"Polish Mode: Chunked ({polish_chunks} sections)")
1528
- if accumulated_cost > 0:
1529
- lines.append(f"Estimated Cost: ${accumulated_cost:.2f}")
1530
- lines.append("")
1531
-
1532
- # Export info
1533
- export_path = result.get("export_path")
1534
- if export_path:
1535
- lines.append("-" * 60)
1536
- lines.append("FILE EXPORT")
1537
- lines.append("-" * 60)
1538
- lines.append(f"Documentation saved to: {export_path}")
1539
- report_path = result.get("report_path")
1540
- if report_path:
1541
- lines.append(f"Report saved to: {report_path}")
1542
- lines.append("")
1543
- lines.append("Full documentation is available in the exported file.")
1544
- lines.append("")
1545
-
1546
- # Warning notice (cost limit, errors, etc.)
1547
- warning = input_data.get("warning") or result.get("warning")
1548
- if warning or stopped_early:
1549
- lines.append("-" * 60)
1550
- lines.append("⚠️ WARNING")
1551
- lines.append("-" * 60)
1552
- if warning:
1553
- lines.append(warning)
1554
- if stopped_early and not warning:
1555
- lines.append("Generation stopped early due to cost or error limits.")
1556
- lines.append("")
1557
-
1558
- # Truncation detection and scope notice
1559
- truncation_indicators = [
1560
- document.rstrip().endswith("..."),
1561
- document.rstrip().endswith("-"),
1562
- "```" in document and document.count("```") % 2 != 0, # Unclosed code block
1563
- any(
1564
- phrase in document.lower()
1565
- for phrase in ["continued in", "see next section", "to be continued"]
1566
- ),
1567
- ]
1568
-
1569
- # Count planned sections from outline (top-level only)
1570
- import re
1571
-
1572
- planned_sections = 0
1573
- top_level_pattern = re.compile(r"^(\d+)\.\s+([A-Za-z].*)")
1574
- if outline:
1575
- for line in outline.split("\n"):
1576
- stripped = line.strip()
1577
- if top_level_pattern.match(stripped):
1578
- planned_sections += 1
1579
-
1580
- is_truncated = any(truncation_indicators) or (
1581
- planned_sections > 0 and section_count < planned_sections - 1
1582
- )
1583
-
1584
- if is_truncated or planned_sections > section_count + 1:
1585
- lines.append("-" * 60)
1586
- lines.append("SCOPE NOTICE")
1587
- lines.append("-" * 60)
1588
- lines.append("⚠️ DOCUMENTATION MAY BE INCOMPLETE")
1589
- if planned_sections > 0:
1590
- lines.append(f" Planned sections: {planned_sections}")
1591
- lines.append(f" Generated sections: {section_count}")
1592
- lines.append("")
1593
- lines.append("To generate missing sections, re-run with section_focus:")
1594
- lines.append(" workflow = DocumentGenerationWorkflow(")
1595
- lines.append(' section_focus=["Testing Guide", "API Reference"]')
1596
- lines.append(" )")
1597
- lines.append("")
1598
-
1599
- # Footer
1600
- lines.append("=" * 60)
1601
- model_tier = result.get("model_tier_used", "unknown")
1602
- lines.append(f"Generated using {model_tier} tier model")
1603
- lines.append("=" * 60)
1604
-
1605
- return "\n".join(lines)
@@ -326,7 +326,10 @@ class MetricsPanel:
326
326
  Rich Panel with formatted score
327
327
  """
328
328
  if not RICH_AVAILABLE or Panel is None:
329
- raise RuntimeError("Rich is not available")
329
+ raise RuntimeError(
330
+ "Rich library not available. "
331
+ "Install with: pip install rich"
332
+ )
330
333
 
331
334
  style = cls.get_style(score)
332
335
  icon = cls.get_icon(score)
@@ -590,7 +590,10 @@ class RichProgressReporter:
590
590
  stage_names: List of stage names for progress tracking
591
591
  """
592
592
  if not RICH_AVAILABLE:
593
- raise RuntimeError("Rich library required for RichProgressReporter")
593
+ raise RuntimeError(
594
+ "Rich library required for RichProgressReporter. "
595
+ "Install with: pip install rich"
596
+ )
594
597
 
595
598
  self.workflow_name = workflow_name
596
599
  self.stage_names = stage_names
@@ -674,7 +677,10 @@ class RichProgressReporter:
674
677
  Rich Panel containing progress information
675
678
  """
676
679
  if not RICH_AVAILABLE or Panel is None or Table is None:
677
- raise RuntimeError("Rich not available")
680
+ raise RuntimeError(
681
+ "Rich library not available. "
682
+ "Install with: pip install rich"
683
+ )
678
684
 
679
685
  # Build metrics table
680
686
  metrics = Table(show_header=False, box=None, padding=(0, 2))
@@ -618,8 +618,8 @@ class SecurityAuditWorkflow(BaseWorkflow):
618
618
  """
619
619
  line = line_content.strip()
620
620
 
621
- # Check if line is a comment
622
- if line.startswith("#") or line.startswith("//") or line.startswith("*"):
621
+ # Check if line is a comment or documentation
622
+ if line.startswith("#") or line.startswith("//") or line.startswith("*") or line.startswith("-"):
623
623
  return True
624
624
 
625
625
  # Check if inside a docstring (triple quotes)
@@ -9,6 +9,7 @@ Related: docs/SECURITY_PHASE2_COMPLETE.md
9
9
 
10
10
  import ast
11
11
  import logging
12
+ import re
12
13
  from pathlib import Path
13
14
  from typing import Any
14
15
 
@@ -167,12 +168,14 @@ def is_in_docstring_or_comment(line_content: str, file_content: str, line_num: i
167
168
  try:
168
169
  tree = ast.parse(file_content)
169
170
 
170
- # Get all docstrings
171
+ # Get all docstrings - only from nodes that can have docstrings
171
172
  docstrings = []
172
173
  for node in ast.walk(tree):
173
- docstring = ast.get_docstring(node)
174
- if docstring:
175
- docstrings.append(docstring)
174
+ # Only these node types can have docstrings
175
+ if isinstance(node, (ast.FunctionDef, ast.ClassDef, ast.AsyncFunctionDef, ast.Module)):
176
+ docstring = ast.get_docstring(node)
177
+ if docstring:
178
+ docstrings.append(docstring)
176
179
 
177
180
  # Check if any docstring contains this line content
178
181
  for docstring in docstrings: