empathy-framework 5.1.1__py3-none-any.whl → 5.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/METADATA +79 -6
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/RECORD +83 -64
- empathy_os/__init__.py +1 -1
- empathy_os/cache/hybrid.py +5 -1
- empathy_os/cli/commands/batch.py +8 -0
- empathy_os/cli/commands/profiling.py +4 -0
- empathy_os/cli/commands/workflow.py +8 -4
- empathy_os/cli_router.py +9 -0
- empathy_os/config.py +15 -2
- empathy_os/core_modules/__init__.py +15 -0
- empathy_os/dashboard/simple_server.py +62 -30
- empathy_os/mcp/__init__.py +10 -0
- empathy_os/mcp/server.py +506 -0
- empathy_os/memory/control_panel.py +1 -131
- empathy_os/memory/control_panel_support.py +145 -0
- empathy_os/memory/encryption.py +159 -0
- empathy_os/memory/long_term.py +46 -631
- empathy_os/memory/long_term_types.py +99 -0
- empathy_os/memory/mixins/__init__.py +25 -0
- empathy_os/memory/mixins/backend_init_mixin.py +249 -0
- empathy_os/memory/mixins/capabilities_mixin.py +208 -0
- empathy_os/memory/mixins/handoff_mixin.py +208 -0
- empathy_os/memory/mixins/lifecycle_mixin.py +49 -0
- empathy_os/memory/mixins/long_term_mixin.py +352 -0
- empathy_os/memory/mixins/promotion_mixin.py +109 -0
- empathy_os/memory/mixins/short_term_mixin.py +182 -0
- empathy_os/memory/short_term.py +61 -12
- empathy_os/memory/simple_storage.py +302 -0
- empathy_os/memory/storage_backend.py +167 -0
- empathy_os/memory/types.py +8 -3
- empathy_os/memory/unified.py +21 -1120
- empathy_os/meta_workflows/cli_commands/__init__.py +56 -0
- empathy_os/meta_workflows/cli_commands/agent_commands.py +321 -0
- empathy_os/meta_workflows/cli_commands/analytics_commands.py +442 -0
- empathy_os/meta_workflows/cli_commands/config_commands.py +232 -0
- empathy_os/meta_workflows/cli_commands/memory_commands.py +182 -0
- empathy_os/meta_workflows/cli_commands/template_commands.py +354 -0
- empathy_os/meta_workflows/cli_commands/workflow_commands.py +382 -0
- empathy_os/meta_workflows/cli_meta_workflows.py +52 -1802
- empathy_os/models/telemetry/__init__.py +71 -0
- empathy_os/models/telemetry/analytics.py +594 -0
- empathy_os/models/telemetry/backend.py +196 -0
- empathy_os/models/telemetry/data_models.py +431 -0
- empathy_os/models/telemetry/storage.py +489 -0
- empathy_os/orchestration/__init__.py +35 -0
- empathy_os/orchestration/execution_strategies.py +481 -0
- empathy_os/orchestration/meta_orchestrator.py +488 -1
- empathy_os/routing/workflow_registry.py +36 -0
- empathy_os/telemetry/agent_coordination.py +2 -3
- empathy_os/telemetry/agent_tracking.py +26 -7
- empathy_os/telemetry/approval_gates.py +18 -24
- empathy_os/telemetry/cli.py +19 -724
- empathy_os/telemetry/commands/__init__.py +14 -0
- empathy_os/telemetry/commands/dashboard_commands.py +696 -0
- empathy_os/telemetry/event_streaming.py +7 -3
- empathy_os/telemetry/feedback_loop.py +28 -15
- empathy_os/tools.py +183 -0
- empathy_os/workflows/__init__.py +5 -0
- empathy_os/workflows/autonomous_test_gen.py +860 -161
- empathy_os/workflows/base.py +6 -2
- empathy_os/workflows/code_review.py +4 -1
- empathy_os/workflows/document_gen/__init__.py +25 -0
- empathy_os/workflows/document_gen/config.py +30 -0
- empathy_os/workflows/document_gen/report_formatter.py +162 -0
- empathy_os/workflows/{document_gen.py → document_gen/workflow.py} +5 -184
- empathy_os/workflows/output.py +4 -1
- empathy_os/workflows/progress.py +8 -2
- empathy_os/workflows/security_audit.py +2 -2
- empathy_os/workflows/security_audit_phase3.py +7 -4
- empathy_os/workflows/seo_optimization.py +633 -0
- empathy_os/workflows/test_gen/__init__.py +52 -0
- empathy_os/workflows/test_gen/ast_analyzer.py +249 -0
- empathy_os/workflows/test_gen/config.py +88 -0
- empathy_os/workflows/test_gen/data_models.py +38 -0
- empathy_os/workflows/test_gen/report_formatter.py +289 -0
- empathy_os/workflows/test_gen/test_templates.py +381 -0
- empathy_os/workflows/test_gen/workflow.py +655 -0
- empathy_os/workflows/test_gen.py +42 -1905
- empathy_os/cli/parsers/cache 2.py +0 -65
- empathy_os/cli_router 2.py +0 -416
- empathy_os/dashboard/app 2.py +0 -512
- empathy_os/dashboard/simple_server 2.py +0 -403
- empathy_os/dashboard/standalone_server 2.py +0 -536
- empathy_os/memory/types 2.py +0 -441
- empathy_os/models/adaptive_routing 2.py +0 -437
- empathy_os/models/telemetry.py +0 -1660
- empathy_os/project_index/scanner_parallel 2.py +0 -291
- empathy_os/telemetry/agent_coordination 2.py +0 -478
- empathy_os/telemetry/agent_tracking 2.py +0 -350
- empathy_os/telemetry/approval_gates 2.py +0 -563
- empathy_os/telemetry/event_streaming 2.py +0 -405
- empathy_os/telemetry/feedback_loop 2.py +0 -557
- empathy_os/vscode_bridge 2.py +0 -173
- empathy_os/workflows/progressive/__init__ 2.py +0 -92
- empathy_os/workflows/progressive/cli 2.py +0 -242
- empathy_os/workflows/progressive/core 2.py +0 -488
- empathy_os/workflows/progressive/orchestrator 2.py +0 -701
- empathy_os/workflows/progressive/reports 2.py +0 -528
- empathy_os/workflows/progressive/telemetry 2.py +0 -280
- empathy_os/workflows/progressive/test_gen 2.py +0 -514
- empathy_os/workflows/progressive/workflow 2.py +0 -628
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/WHEEL +0 -0
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/entry_points.txt +0 -0
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/licenses/LICENSE +0 -0
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +0 -0
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/top_level.txt +0 -0
|
@@ -66,13 +66,17 @@ class StreamEvent:
|
|
|
66
66
|
|
|
67
67
|
Args:
|
|
68
68
|
event_id: Redis stream entry ID
|
|
69
|
-
entry_data: Raw entry data from Redis (bytes dict)
|
|
69
|
+
entry_data: Raw entry data from Redis (bytes dict or str dict)
|
|
70
70
|
|
|
71
71
|
Returns:
|
|
72
72
|
StreamEvent instance
|
|
73
73
|
"""
|
|
74
|
-
# Decode bytes to strings
|
|
75
|
-
decoded = {
|
|
74
|
+
# Decode bytes to strings (handle both bytes and str)
|
|
75
|
+
decoded = {}
|
|
76
|
+
for k, v in entry_data.items():
|
|
77
|
+
key = k.decode("utf-8") if isinstance(k, bytes) else k
|
|
78
|
+
value = v.decode("utf-8") if isinstance(v, bytes) else v
|
|
79
|
+
decoded[key] = value
|
|
76
80
|
|
|
77
81
|
# Parse timestamp
|
|
78
82
|
timestamp_str = decoded.get("timestamp", "")
|
|
@@ -93,8 +93,13 @@ class FeedbackEntry:
|
|
|
93
93
|
elif not isinstance(timestamp, datetime):
|
|
94
94
|
timestamp = datetime.utcnow()
|
|
95
95
|
|
|
96
|
+
# Handle missing feedback_id (legacy entries)
|
|
97
|
+
feedback_id = data.get("feedback_id")
|
|
98
|
+
if not feedback_id:
|
|
99
|
+
feedback_id = f"fb-{int(timestamp.timestamp()*1000)}"
|
|
100
|
+
|
|
96
101
|
return cls(
|
|
97
|
-
feedback_id=
|
|
102
|
+
feedback_id=feedback_id,
|
|
98
103
|
workflow_name=data["workflow_name"],
|
|
99
104
|
stage_name=data["stage_name"],
|
|
100
105
|
tier=data["tier"],
|
|
@@ -284,7 +289,11 @@ class FeedbackLoop:
|
|
|
284
289
|
# Retrieve entry
|
|
285
290
|
data = self._retrieve_feedback(key)
|
|
286
291
|
if data:
|
|
287
|
-
|
|
292
|
+
try:
|
|
293
|
+
entries.append(FeedbackEntry.from_dict(data))
|
|
294
|
+
except Exception as e:
|
|
295
|
+
logger.error(f"Failed to parse feedback entry {key}: {e}, data={data}")
|
|
296
|
+
continue
|
|
288
297
|
|
|
289
298
|
if len(entries) >= limit:
|
|
290
299
|
break
|
|
@@ -303,9 +312,8 @@ class FeedbackLoop:
|
|
|
303
312
|
return None
|
|
304
313
|
|
|
305
314
|
try:
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
elif hasattr(self.memory, "_client"):
|
|
315
|
+
# Use direct Redis access (feedback keys are stored without prefix)
|
|
316
|
+
if hasattr(self.memory, "_client"):
|
|
309
317
|
import json
|
|
310
318
|
|
|
311
319
|
data = self.memory._client.get(key)
|
|
@@ -482,14 +490,15 @@ class FeedbackLoop:
|
|
|
482
490
|
def get_underperforming_stages(
|
|
483
491
|
self, workflow_name: str, quality_threshold: float = 0.7
|
|
484
492
|
) -> list[tuple[str, QualityStats]]:
|
|
485
|
-
"""Get workflow stages with poor quality scores.
|
|
493
|
+
"""Get workflow stages/tiers with poor quality scores.
|
|
486
494
|
|
|
487
495
|
Args:
|
|
488
496
|
workflow_name: Name of workflow
|
|
489
|
-
quality_threshold: Threshold below which stage is considered underperforming
|
|
497
|
+
quality_threshold: Threshold below which stage/tier is considered underperforming
|
|
490
498
|
|
|
491
499
|
Returns:
|
|
492
|
-
List of (stage_name, stats) tuples for underperforming
|
|
500
|
+
List of (stage_name, stats) tuples for underperforming stage/tier combinations
|
|
501
|
+
The stage_name includes the tier for clarity (e.g., "analysis/cheap")
|
|
493
502
|
"""
|
|
494
503
|
if not self.memory or not hasattr(self.memory, "_client"):
|
|
495
504
|
return []
|
|
@@ -499,22 +508,26 @@ class FeedbackLoop:
|
|
|
499
508
|
pattern = f"feedback:{workflow_name}:*"
|
|
500
509
|
keys = self.memory._client.keys(pattern)
|
|
501
510
|
|
|
502
|
-
# Extract unique
|
|
503
|
-
|
|
511
|
+
# Extract unique stage/tier combinations
|
|
512
|
+
stage_tier_combos = set()
|
|
504
513
|
for key in keys:
|
|
505
514
|
if isinstance(key, bytes):
|
|
506
515
|
key = key.decode("utf-8")
|
|
507
516
|
# Parse key: feedback:{workflow}:{stage}:{tier}:{id}
|
|
508
517
|
parts = key.split(":")
|
|
509
518
|
if len(parts) >= 4:
|
|
510
|
-
|
|
519
|
+
stage_name = parts[2]
|
|
520
|
+
tier = parts[3]
|
|
521
|
+
stage_tier_combos.add((stage_name, tier))
|
|
511
522
|
|
|
512
|
-
# Get stats for each stage
|
|
523
|
+
# Get stats for each stage/tier combination
|
|
513
524
|
underperforming = []
|
|
514
|
-
for stage_name in
|
|
515
|
-
stats = self.get_quality_stats(workflow_name, stage_name)
|
|
525
|
+
for stage_name, tier in stage_tier_combos:
|
|
526
|
+
stats = self.get_quality_stats(workflow_name, stage_name, tier=tier)
|
|
516
527
|
if stats and stats.avg_quality < quality_threshold:
|
|
517
|
-
|
|
528
|
+
# Include tier in the stage name for clarity
|
|
529
|
+
stage_label = f"{stage_name}/{tier}"
|
|
530
|
+
underperforming.append((stage_label, stats))
|
|
518
531
|
|
|
519
532
|
# Sort by quality (worst first)
|
|
520
533
|
underperforming.sort(key=lambda x: x[1].avg_quality)
|
empathy_os/tools.py
ADDED
|
@@ -0,0 +1,183 @@
|
|
|
1
|
+
"""Interactive user prompting tools.
|
|
2
|
+
|
|
3
|
+
Provides tools for asking users questions and getting structured responses.
|
|
4
|
+
This module implements the AskUserQuestion functionality used by the
|
|
5
|
+
meta-orchestrator for interactive agent team creation.
|
|
6
|
+
|
|
7
|
+
Integration with Claude Code:
|
|
8
|
+
When running Python code that calls this function, Claude Code will
|
|
9
|
+
detect the call and use its AskUserQuestion tool to prompt the user
|
|
10
|
+
in the IDE. This is implemented via a request/response IPC mechanism.
|
|
11
|
+
|
|
12
|
+
Created: 2026-01-29
|
|
13
|
+
"""
|
|
14
|
+
import json
|
|
15
|
+
import logging
|
|
16
|
+
import os
|
|
17
|
+
import tempfile
|
|
18
|
+
from collections.abc import Callable
|
|
19
|
+
from pathlib import Path
|
|
20
|
+
from typing import Any
|
|
21
|
+
|
|
22
|
+
logger = logging.getLogger(__name__)
|
|
23
|
+
|
|
24
|
+
# Global callback for custom AskUserQuestion implementations
|
|
25
|
+
_custom_ask_function: Callable | None = None
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def set_ask_user_question_handler(handler: Callable) -> None:
|
|
29
|
+
"""Set a custom handler for AskUserQuestion.
|
|
30
|
+
|
|
31
|
+
This allows integration with different UI systems (CLI, web, IDE).
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
handler: Callable that takes questions list and returns response dict
|
|
35
|
+
|
|
36
|
+
Example:
|
|
37
|
+
>>> def my_handler(questions):
|
|
38
|
+
... # Custom UI logic
|
|
39
|
+
... return {"Pattern": "sequential"}
|
|
40
|
+
>>> set_ask_user_question_handler(my_handler)
|
|
41
|
+
"""
|
|
42
|
+
global _custom_ask_function
|
|
43
|
+
_custom_ask_function = handler
|
|
44
|
+
logger.info("Custom AskUserQuestion handler registered")
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def AskUserQuestion(questions: list[dict[str, Any]]) -> dict[str, Any]:
|
|
48
|
+
"""Ask user questions and get structured responses.
|
|
49
|
+
|
|
50
|
+
This function supports multiple integration modes:
|
|
51
|
+
1. Custom handler (via set_ask_user_question_handler)
|
|
52
|
+
2. Claude Code IPC (when running in Claude Code environment)
|
|
53
|
+
3. Fallback to NotImplementedError (prompts caller to use automatic mode)
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
questions: List of question dictionaries, each with:
|
|
57
|
+
- header: Short label for the question (str, max 12 chars)
|
|
58
|
+
- question: Full question text (str)
|
|
59
|
+
- multiSelect: Allow multiple selections (bool)
|
|
60
|
+
- options: List of option dicts with label and description
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
Dictionary mapping question headers to selected answers
|
|
64
|
+
|
|
65
|
+
Raises:
|
|
66
|
+
NotImplementedError: If no handler available and not in Claude Code
|
|
67
|
+
RuntimeError: If user cancels or interaction fails
|
|
68
|
+
|
|
69
|
+
Example:
|
|
70
|
+
>>> response = AskUserQuestion(
|
|
71
|
+
... questions=[{
|
|
72
|
+
... "header": "Pattern",
|
|
73
|
+
... "question": "Which pattern to use?",
|
|
74
|
+
... "multiSelect": False,
|
|
75
|
+
... "options": [
|
|
76
|
+
... {"label": "sequential", "description": "One after another"},
|
|
77
|
+
... {"label": "parallel", "description": "All at once"}
|
|
78
|
+
... ]
|
|
79
|
+
... }]
|
|
80
|
+
... )
|
|
81
|
+
>>> response
|
|
82
|
+
{"Pattern": "sequential"}
|
|
83
|
+
"""
|
|
84
|
+
# Mode 1: Custom handler
|
|
85
|
+
if _custom_ask_function is not None:
|
|
86
|
+
logger.info("Using custom AskUserQuestion handler")
|
|
87
|
+
return _custom_ask_function(questions)
|
|
88
|
+
|
|
89
|
+
# Mode 2: Claude Code IPC
|
|
90
|
+
# When running inside Claude Code, we can use a file-based IPC mechanism
|
|
91
|
+
if _is_running_in_claude_code():
|
|
92
|
+
logger.info("Using Claude Code IPC for AskUserQuestion")
|
|
93
|
+
return _ask_via_claude_code_ipc(questions)
|
|
94
|
+
|
|
95
|
+
# Mode 3: Fallback - raise error with helpful message
|
|
96
|
+
logger.warning("No AskUserQuestion handler available")
|
|
97
|
+
raise NotImplementedError(
|
|
98
|
+
"AskUserQuestion requires either:\n"
|
|
99
|
+
"1. Custom handler via set_ask_user_question_handler()\n"
|
|
100
|
+
"2. Running in Claude Code environment\n"
|
|
101
|
+
"3. Using interactive=False for automatic mode\n\n"
|
|
102
|
+
"Use: orchestrator.analyze_and_compose(task, interactive=False)"
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def _is_running_in_claude_code() -> bool:
|
|
107
|
+
"""Check if code is running inside Claude Code environment.
|
|
108
|
+
|
|
109
|
+
Returns:
|
|
110
|
+
True if running in Claude Code, False otherwise
|
|
111
|
+
"""
|
|
112
|
+
# Check for Claude Code environment markers
|
|
113
|
+
return (
|
|
114
|
+
os.getenv("CLAUDE_CODE_SESSION") is not None
|
|
115
|
+
or os.getenv("CLAUDE_AGENT_MODE") is not None
|
|
116
|
+
or Path("/tmp/.claude-code").exists()
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def _ask_via_claude_code_ipc(questions: list[dict[str, Any]]) -> dict[str, Any]:
|
|
121
|
+
"""Ask user questions via Claude Code IPC mechanism.
|
|
122
|
+
|
|
123
|
+
This creates a request file that Claude Code monitors, then waits for
|
|
124
|
+
the response file to be created with the user's answers.
|
|
125
|
+
|
|
126
|
+
Args:
|
|
127
|
+
questions: List of question dictionaries
|
|
128
|
+
|
|
129
|
+
Returns:
|
|
130
|
+
User's responses as a dictionary
|
|
131
|
+
|
|
132
|
+
Raises:
|
|
133
|
+
RuntimeError: If communication fails or times out
|
|
134
|
+
"""
|
|
135
|
+
import time
|
|
136
|
+
import uuid
|
|
137
|
+
|
|
138
|
+
request_id = str(uuid.uuid4())
|
|
139
|
+
ipc_dir = Path(tempfile.gettempdir()) / ".claude-code-ipc"
|
|
140
|
+
ipc_dir.mkdir(exist_ok=True)
|
|
141
|
+
|
|
142
|
+
request_file = ipc_dir / f"ask-request-{request_id}.json"
|
|
143
|
+
response_file = ipc_dir / f"ask-response-{request_id}.json"
|
|
144
|
+
|
|
145
|
+
try:
|
|
146
|
+
# Write request
|
|
147
|
+
request_data = {
|
|
148
|
+
"request_id": request_id,
|
|
149
|
+
"questions": questions,
|
|
150
|
+
"timestamp": time.time(),
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
request_file.write_text(json.dumps(request_data, indent=2))
|
|
154
|
+
logger.info(f"Wrote IPC request: {request_file}")
|
|
155
|
+
|
|
156
|
+
# Wait for response (max 60 seconds)
|
|
157
|
+
timeout = 60
|
|
158
|
+
start_time = time.time()
|
|
159
|
+
|
|
160
|
+
while time.time() - start_time < timeout:
|
|
161
|
+
if response_file.exists():
|
|
162
|
+
# Read response
|
|
163
|
+
response_data = json.loads(response_file.read_text())
|
|
164
|
+
logger.info(f"Received IPC response: {response_data}")
|
|
165
|
+
|
|
166
|
+
# Cleanup
|
|
167
|
+
request_file.unlink(missing_ok=True)
|
|
168
|
+
response_file.unlink(missing_ok=True)
|
|
169
|
+
|
|
170
|
+
return response_data.get("answers", {})
|
|
171
|
+
|
|
172
|
+
time.sleep(0.1) # Poll every 100ms
|
|
173
|
+
|
|
174
|
+
raise RuntimeError(
|
|
175
|
+
f"Timeout waiting for user response (waited {timeout}s). "
|
|
176
|
+
"User may have cancelled or Claude Code IPC is not active."
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
except Exception as e:
|
|
180
|
+
# Cleanup on error
|
|
181
|
+
request_file.unlink(missing_ok=True)
|
|
182
|
+
response_file.unlink(missing_ok=True)
|
|
183
|
+
raise RuntimeError(f"Claude Code IPC failed: {e}") from e
|
empathy_os/workflows/__init__.py
CHANGED
|
@@ -62,6 +62,7 @@ if TYPE_CHECKING:
|
|
|
62
62
|
from .research_synthesis import ResearchSynthesisWorkflow
|
|
63
63
|
from .secure_release import SecureReleasePipeline, SecureReleaseResult
|
|
64
64
|
from .security_audit import SecurityAuditWorkflow
|
|
65
|
+
from .seo_optimization import SEOOptimizationWorkflow
|
|
65
66
|
from .step_config import WorkflowStepConfig
|
|
66
67
|
from .test5 import Test5Workflow
|
|
67
68
|
from .test_coverage_boost_crew import TestCoverageBoostCrew, TestCoverageBoostCrewResult
|
|
@@ -134,6 +135,7 @@ _LAZY_WORKFLOW_IMPORTS: dict[str, tuple[str, str]] = {
|
|
|
134
135
|
"SecureReleasePipeline": (".secure_release", "SecureReleasePipeline"),
|
|
135
136
|
"SecureReleaseResult": (".secure_release", "SecureReleaseResult"),
|
|
136
137
|
"SecurityAuditWorkflow": (".security_audit", "SecurityAuditWorkflow"),
|
|
138
|
+
"SEOOptimizationWorkflow": (".seo_optimization", "SEOOptimizationWorkflow"),
|
|
137
139
|
"Test5Workflow": (".test5", "Test5Workflow"),
|
|
138
140
|
"TestCoverageBoostCrew": (".test_coverage_boost_crew", "TestCoverageBoostCrew"),
|
|
139
141
|
"TestCoverageBoostCrewResult": (".test_coverage_boost_crew", "TestCoverageBoostCrewResult"),
|
|
@@ -210,7 +212,9 @@ def _load_cli_commands() -> None:
|
|
|
210
212
|
_DEFAULT_WORKFLOW_NAMES: dict[str, str] = {
|
|
211
213
|
# Core workflows
|
|
212
214
|
"code-review": "CodeReviewWorkflow",
|
|
215
|
+
# Documentation workflows
|
|
213
216
|
"doc-gen": "DocumentGenerationWorkflow",
|
|
217
|
+
"seo-optimization": "SEOOptimizationWorkflow",
|
|
214
218
|
# Analysis workflows
|
|
215
219
|
"bug-predict": "BugPredictionWorkflow",
|
|
216
220
|
"security-audit": "SecurityAuditWorkflow",
|
|
@@ -489,6 +493,7 @@ __all__ = [
|
|
|
489
493
|
"SecureReleasePipeline",
|
|
490
494
|
"SecureReleaseResult",
|
|
491
495
|
"SecurityAuditWorkflow",
|
|
496
|
+
"SEOOptimizationWorkflow",
|
|
492
497
|
"TestGenerationWorkflow",
|
|
493
498
|
"BehavioralTestGenerationWorkflow",
|
|
494
499
|
"ParallelTestGenerationWorkflow",
|