empathy-framework 5.1.1__py3-none-any.whl → 5.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (71) hide show
  1. {empathy_framework-5.1.1.dist-info → empathy_framework-5.2.1.dist-info}/METADATA +52 -3
  2. {empathy_framework-5.1.1.dist-info → empathy_framework-5.2.1.dist-info}/RECORD +69 -28
  3. empathy_os/cli_router.py +9 -0
  4. empathy_os/core_modules/__init__.py +15 -0
  5. empathy_os/mcp/__init__.py +10 -0
  6. empathy_os/mcp/server.py +506 -0
  7. empathy_os/memory/control_panel.py +1 -131
  8. empathy_os/memory/control_panel_support.py +145 -0
  9. empathy_os/memory/encryption.py +159 -0
  10. empathy_os/memory/long_term.py +41 -626
  11. empathy_os/memory/long_term_types.py +99 -0
  12. empathy_os/memory/mixins/__init__.py +25 -0
  13. empathy_os/memory/mixins/backend_init_mixin.py +244 -0
  14. empathy_os/memory/mixins/capabilities_mixin.py +199 -0
  15. empathy_os/memory/mixins/handoff_mixin.py +208 -0
  16. empathy_os/memory/mixins/lifecycle_mixin.py +49 -0
  17. empathy_os/memory/mixins/long_term_mixin.py +352 -0
  18. empathy_os/memory/mixins/promotion_mixin.py +109 -0
  19. empathy_os/memory/mixins/short_term_mixin.py +182 -0
  20. empathy_os/memory/short_term.py +7 -0
  21. empathy_os/memory/simple_storage.py +302 -0
  22. empathy_os/memory/storage_backend.py +167 -0
  23. empathy_os/memory/unified.py +21 -1120
  24. empathy_os/meta_workflows/cli_commands/__init__.py +56 -0
  25. empathy_os/meta_workflows/cli_commands/agent_commands.py +321 -0
  26. empathy_os/meta_workflows/cli_commands/analytics_commands.py +442 -0
  27. empathy_os/meta_workflows/cli_commands/config_commands.py +232 -0
  28. empathy_os/meta_workflows/cli_commands/memory_commands.py +182 -0
  29. empathy_os/meta_workflows/cli_commands/template_commands.py +354 -0
  30. empathy_os/meta_workflows/cli_commands/workflow_commands.py +382 -0
  31. empathy_os/meta_workflows/cli_meta_workflows.py +52 -1802
  32. empathy_os/models/telemetry/__init__.py +71 -0
  33. empathy_os/models/telemetry/analytics.py +594 -0
  34. empathy_os/models/telemetry/backend.py +196 -0
  35. empathy_os/models/telemetry/data_models.py +431 -0
  36. empathy_os/models/telemetry/storage.py +489 -0
  37. empathy_os/orchestration/__init__.py +35 -0
  38. empathy_os/orchestration/execution_strategies.py +481 -0
  39. empathy_os/orchestration/meta_orchestrator.py +488 -1
  40. empathy_os/routing/workflow_registry.py +36 -0
  41. empathy_os/telemetry/cli.py +19 -724
  42. empathy_os/telemetry/commands/__init__.py +14 -0
  43. empathy_os/telemetry/commands/dashboard_commands.py +696 -0
  44. empathy_os/tools.py +183 -0
  45. empathy_os/workflows/__init__.py +5 -0
  46. empathy_os/workflows/autonomous_test_gen.py +860 -161
  47. empathy_os/workflows/base.py +6 -2
  48. empathy_os/workflows/code_review.py +4 -1
  49. empathy_os/workflows/document_gen/__init__.py +25 -0
  50. empathy_os/workflows/document_gen/config.py +30 -0
  51. empathy_os/workflows/document_gen/report_formatter.py +162 -0
  52. empathy_os/workflows/document_gen/workflow.py +1426 -0
  53. empathy_os/workflows/document_gen.py +22 -1598
  54. empathy_os/workflows/security_audit.py +2 -2
  55. empathy_os/workflows/security_audit_phase3.py +7 -4
  56. empathy_os/workflows/seo_optimization.py +633 -0
  57. empathy_os/workflows/test_gen/__init__.py +52 -0
  58. empathy_os/workflows/test_gen/ast_analyzer.py +249 -0
  59. empathy_os/workflows/test_gen/config.py +88 -0
  60. empathy_os/workflows/test_gen/data_models.py +38 -0
  61. empathy_os/workflows/test_gen/report_formatter.py +289 -0
  62. empathy_os/workflows/test_gen/test_templates.py +381 -0
  63. empathy_os/workflows/test_gen/workflow.py +655 -0
  64. empathy_os/workflows/test_gen.py +42 -1905
  65. empathy_os/memory/types 2.py +0 -441
  66. empathy_os/models/telemetry.py +0 -1660
  67. {empathy_framework-5.1.1.dist-info → empathy_framework-5.2.1.dist-info}/WHEEL +0 -0
  68. {empathy_framework-5.1.1.dist-info → empathy_framework-5.2.1.dist-info}/entry_points.txt +0 -0
  69. {empathy_framework-5.1.1.dist-info → empathy_framework-5.2.1.dist-info}/licenses/LICENSE +0 -0
  70. {empathy_framework-5.1.1.dist-info → empathy_framework-5.2.1.dist-info}/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +0 -0
  71. {empathy_framework-5.1.1.dist-info → empathy_framework-5.2.1.dist-info}/top_level.txt +0 -0
@@ -142,8 +142,12 @@ class ModelProvider(Enum):
142
142
  CUSTOM = "custom" # User-defined custom models
143
143
 
144
144
  def to_unified(self) -> UnifiedModelProvider:
145
- """Convert to unified ModelProvider from empathy_os.models."""
146
- return UnifiedModelProvider(self.value)
145
+ """Convert to unified ModelProvider from empathy_os.models.
146
+
147
+ As of v5.0.0, framework is Claude-native. All providers map to ANTHROPIC.
148
+ """
149
+ # v5.0.0: Framework is Claude-native, only ANTHROPIC supported
150
+ return UnifiedModelProvider.ANTHROPIC
147
151
 
148
152
 
149
153
  # Import unified MODEL_REGISTRY as single source of truth
@@ -645,6 +645,9 @@ Code to review:
645
645
  "Code will proceed to architectural review."
646
646
  )
647
647
 
648
+ # Calculate security score
649
+ security_score = 70 if has_critical else 90
650
+
648
651
  # Determine preliminary verdict based on scan
649
652
  if has_critical:
650
653
  preliminary_verdict = "request_changes"
@@ -661,7 +664,7 @@ Code to review:
661
664
  "bug_patterns": [],
662
665
  "quality_issues": [],
663
666
  "has_critical_issues": has_critical,
664
- "security_score": 70 if has_critical else 90,
667
+ "security_score": security_score,
665
668
  "verdict": preliminary_verdict, # Add verdict for when architect_review is skipped
666
669
  "needs_architect_review": input_data.get("needs_architect_review", False)
667
670
  or has_critical,
@@ -0,0 +1,25 @@
1
+ """Document Generation Workflow Package.
2
+
3
+ Cost-optimized documentation generation pipeline.
4
+
5
+ Copyright 2025 Smart-AI-Memory
6
+ Licensed under Fair Source License 0.9
7
+ """
8
+
9
+ # Core workflow
10
+ # Configuration
11
+ from .config import DOC_GEN_STEPS, TOKEN_COSTS
12
+
13
+ # Report formatter
14
+ from .report_formatter import format_doc_gen_report
15
+ from .workflow import DocumentGenerationWorkflow
16
+
17
+ __all__ = [
18
+ # Workflow
19
+ "DocumentGenerationWorkflow",
20
+ # Configuration
21
+ "DOC_GEN_STEPS",
22
+ "TOKEN_COSTS",
23
+ # Report formatter
24
+ "format_doc_gen_report",
25
+ ]
@@ -0,0 +1,30 @@
1
+ """Document Generation Configuration.
2
+
3
+ Token costs and step configurations for documentation generation workflow.
4
+
5
+ Copyright 2025 Smart-AI-Memory
6
+ Licensed under Fair Source License 0.9
7
+ """
8
+
9
+ from ..base import ModelTier
10
+ from ..step_config import WorkflowStepConfig
11
+
12
+ # Approximate cost per 1K tokens (USD) - used for cost estimation
13
+ # These are estimates and should be updated as pricing changes
14
+ TOKEN_COSTS = {
15
+ ModelTier.CHEAP: {"input": 0.00025, "output": 0.00125}, # Haiku
16
+ ModelTier.CAPABLE: {"input": 0.003, "output": 0.015}, # Sonnet
17
+ ModelTier.PREMIUM: {"input": 0.015, "output": 0.075}, # Opus
18
+ }
19
+
20
+ # Define step configurations for executor-based execution
21
+ # Note: max_tokens for polish is dynamically set based on input size
22
+ DOC_GEN_STEPS = {
23
+ "polish": WorkflowStepConfig(
24
+ name="polish",
25
+ task_type="final_review", # Premium tier task
26
+ tier_hint="premium",
27
+ description="Polish and improve documentation for consistency and quality",
28
+ max_tokens=20000, # Increased to handle large chunked documents
29
+ ),
30
+ }
@@ -0,0 +1,162 @@
1
+ """Document Generation Report Formatter.
2
+
3
+ Format documentation generation output as human-readable reports.
4
+
5
+ Copyright 2025 Smart-AI-Memory
6
+ Licensed under Fair Source License 0.9
7
+ """
8
+
9
+
10
+ def format_doc_gen_report(result: dict, input_data: dict) -> str:
11
+ """Format document generation output as a human-readable report.
12
+
13
+ Args:
14
+ result: The polish stage result
15
+ input_data: Input data from previous stages
16
+
17
+ Returns:
18
+ Formatted report string
19
+
20
+ """
21
+ lines = []
22
+
23
+ # Header
24
+ doc_type = result.get("doc_type", "general").replace("_", " ").title()
25
+ audience = result.get("audience", "developers").replace("_", " ").title()
26
+
27
+ lines.append("=" * 60)
28
+ lines.append("DOCUMENTATION GENERATION REPORT")
29
+ lines.append("=" * 60)
30
+ lines.append("")
31
+ lines.append(f"Document Type: {doc_type}")
32
+ lines.append(f"Target Audience: {audience}")
33
+ lines.append("")
34
+
35
+ # Outline summary
36
+ outline = input_data.get("outline", "")
37
+ if outline:
38
+ lines.append("-" * 60)
39
+ lines.append("DOCUMENT OUTLINE")
40
+ lines.append("-" * 60)
41
+ # Show just a preview of the outline
42
+ outline_lines = outline.split("\n")[:10]
43
+ lines.extend(outline_lines)
44
+ if len(outline.split("\n")) > 10:
45
+ lines.append("...")
46
+ lines.append("")
47
+
48
+ # Generated document
49
+ document = result.get("document", "")
50
+ if document:
51
+ lines.append("-" * 60)
52
+ lines.append("GENERATED DOCUMENTATION")
53
+ lines.append("-" * 60)
54
+ lines.append("")
55
+ lines.append(document)
56
+ lines.append("")
57
+
58
+ # Statistics
59
+ word_count = len(document.split()) if document else 0
60
+ section_count = document.count("##") if document else 0 # Count markdown headers
61
+ was_chunked = input_data.get("chunked", False)
62
+ chunk_count = input_data.get("chunk_count", 0)
63
+ chunks_completed = input_data.get("chunks_completed", chunk_count)
64
+ stopped_early = input_data.get("stopped_early", False)
65
+ accumulated_cost = result.get("accumulated_cost", 0)
66
+ polish_chunked = result.get("polish_chunked", False)
67
+
68
+ lines.append("-" * 60)
69
+ lines.append("STATISTICS")
70
+ lines.append("-" * 60)
71
+ lines.append(f"Word Count: {word_count}")
72
+ lines.append(f"Section Count: ~{section_count}")
73
+ if was_chunked:
74
+ if stopped_early:
75
+ lines.append(
76
+ f"Generation Mode: Chunked ({chunks_completed}/{chunk_count} chunks completed)",
77
+ )
78
+ else:
79
+ lines.append(f"Generation Mode: Chunked ({chunk_count} chunks)")
80
+ if polish_chunked:
81
+ polish_chunks = result.get("polish_chunks", 0)
82
+ lines.append(f"Polish Mode: Chunked ({polish_chunks} sections)")
83
+ if accumulated_cost > 0:
84
+ lines.append(f"Estimated Cost: ${accumulated_cost:.2f}")
85
+ lines.append("")
86
+
87
+ # Export info
88
+ export_path = result.get("export_path")
89
+ if export_path:
90
+ lines.append("-" * 60)
91
+ lines.append("FILE EXPORT")
92
+ lines.append("-" * 60)
93
+ lines.append(f"Documentation saved to: {export_path}")
94
+ report_path = result.get("report_path")
95
+ if report_path:
96
+ lines.append(f"Report saved to: {report_path}")
97
+ lines.append("")
98
+ lines.append("Full documentation is available in the exported file.")
99
+ lines.append("")
100
+
101
+ # Warning notice (cost limit, errors, etc.)
102
+ warning = input_data.get("warning") or result.get("warning")
103
+ if warning or stopped_early:
104
+ lines.append("-" * 60)
105
+ lines.append("⚠️ WARNING")
106
+ lines.append("-" * 60)
107
+ if warning:
108
+ lines.append(warning)
109
+ if stopped_early and not warning:
110
+ lines.append("Generation stopped early due to cost or error limits.")
111
+ lines.append("")
112
+
113
+ # Truncation detection and scope notice
114
+ truncation_indicators = []
115
+ if document: # Handle None or empty document
116
+ truncation_indicators = [
117
+ document.rstrip().endswith("..."),
118
+ document.rstrip().endswith("-"),
119
+ "```" in document and document.count("```") % 2 != 0, # Unclosed code block
120
+ any(
121
+ phrase in document.lower()
122
+ for phrase in ["continued in", "see next section", "to be continued"]
123
+ ),
124
+ ]
125
+
126
+ # Count planned sections from outline (top-level only)
127
+ import re
128
+
129
+ planned_sections = 0
130
+ top_level_pattern = re.compile(r"^(\d+)\.\s+([A-Za-z].*)")
131
+ if outline:
132
+ for line in outline.split("\n"):
133
+ stripped = line.strip()
134
+ if top_level_pattern.match(stripped):
135
+ planned_sections += 1
136
+
137
+ is_truncated = any(truncation_indicators) or (
138
+ planned_sections > 0 and section_count < planned_sections - 1
139
+ )
140
+
141
+ if is_truncated or planned_sections > section_count + 1:
142
+ lines.append("-" * 60)
143
+ lines.append("SCOPE NOTICE")
144
+ lines.append("-" * 60)
145
+ lines.append("⚠️ DOCUMENTATION MAY BE INCOMPLETE")
146
+ if planned_sections > 0:
147
+ lines.append(f" Planned sections: {planned_sections}")
148
+ lines.append(f" Generated sections: {section_count}")
149
+ lines.append("")
150
+ lines.append("To generate missing sections, re-run with section_focus:")
151
+ lines.append(" workflow = DocumentGenerationWorkflow(")
152
+ lines.append(' section_focus=["Testing Guide", "API Reference"]')
153
+ lines.append(" )")
154
+ lines.append("")
155
+
156
+ # Footer
157
+ lines.append("=" * 60)
158
+ model_tier = result.get("model_tier_used", "unknown")
159
+ lines.append(f"Generated using {model_tier} tier model")
160
+ lines.append("=" * 60)
161
+
162
+ return "\n".join(lines)