deepwork 0.5.1__py3-none-any.whl → 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deepwork/__init__.py +1 -1
- deepwork/cli/hook.py +3 -4
- deepwork/cli/install.py +70 -117
- deepwork/cli/main.py +2 -2
- deepwork/cli/serve.py +133 -0
- deepwork/cli/sync.py +93 -58
- deepwork/core/adapters.py +91 -102
- deepwork/core/generator.py +19 -386
- deepwork/core/hooks_syncer.py +1 -1
- deepwork/core/parser.py +270 -1
- deepwork/hooks/README.md +0 -44
- deepwork/hooks/__init__.py +3 -6
- deepwork/hooks/check_version.sh +54 -21
- deepwork/mcp/__init__.py +23 -0
- deepwork/mcp/quality_gate.py +347 -0
- deepwork/mcp/schemas.py +263 -0
- deepwork/mcp/server.py +253 -0
- deepwork/mcp/state.py +422 -0
- deepwork/mcp/tools.py +394 -0
- deepwork/schemas/job.schema.json +347 -0
- deepwork/schemas/job_schema.py +27 -239
- deepwork/standard_jobs/deepwork_jobs/doc_specs/job_spec.md +9 -15
- deepwork/standard_jobs/deepwork_jobs/job.yml +146 -46
- deepwork/standard_jobs/deepwork_jobs/steps/define.md +100 -33
- deepwork/standard_jobs/deepwork_jobs/steps/errata.md +154 -0
- deepwork/standard_jobs/deepwork_jobs/steps/fix_jobs.md +207 -0
- deepwork/standard_jobs/deepwork_jobs/steps/fix_settings.md +177 -0
- deepwork/standard_jobs/deepwork_jobs/steps/implement.md +22 -138
- deepwork/standard_jobs/deepwork_jobs/steps/iterate.md +221 -0
- deepwork/standard_jobs/deepwork_jobs/steps/learn.md +2 -26
- deepwork/standard_jobs/deepwork_jobs/steps/test.md +154 -0
- deepwork/standard_jobs/deepwork_jobs/templates/job.yml.template +2 -0
- deepwork/templates/claude/settings.json +16 -0
- deepwork/templates/claude/skill-deepwork.md.jinja +37 -0
- deepwork/templates/gemini/skill-deepwork.md.jinja +37 -0
- deepwork-0.7.0.dist-info/METADATA +317 -0
- deepwork-0.7.0.dist-info/RECORD +64 -0
- deepwork/cli/rules.py +0 -32
- deepwork/core/command_executor.py +0 -190
- deepwork/core/pattern_matcher.py +0 -271
- deepwork/core/rules_parser.py +0 -559
- deepwork/core/rules_queue.py +0 -321
- deepwork/hooks/rules_check.py +0 -759
- deepwork/schemas/rules_schema.py +0 -135
- deepwork/standard_jobs/deepwork_jobs/steps/review_job_spec.md +0 -208
- deepwork/standard_jobs/deepwork_jobs/templates/doc_spec.md.example +0 -86
- deepwork/standard_jobs/deepwork_rules/hooks/capture_prompt_work_tree.sh +0 -38
- deepwork/standard_jobs/deepwork_rules/hooks/global_hooks.yml +0 -8
- deepwork/standard_jobs/deepwork_rules/hooks/user_prompt_submit.sh +0 -16
- deepwork/standard_jobs/deepwork_rules/job.yml +0 -49
- deepwork/standard_jobs/deepwork_rules/rules/.gitkeep +0 -13
- deepwork/standard_jobs/deepwork_rules/rules/api-documentation-sync.md.example +0 -10
- deepwork/standard_jobs/deepwork_rules/rules/readme-documentation.md.example +0 -10
- deepwork/standard_jobs/deepwork_rules/rules/security-review.md.example +0 -11
- deepwork/standard_jobs/deepwork_rules/rules/skill-md-validation.md +0 -46
- deepwork/standard_jobs/deepwork_rules/rules/source-test-pairing.md.example +0 -13
- deepwork/standard_jobs/deepwork_rules/steps/define.md +0 -249
- deepwork/templates/claude/skill-job-meta.md.jinja +0 -77
- deepwork/templates/claude/skill-job-step.md.jinja +0 -235
- deepwork/templates/gemini/skill-job-meta.toml.jinja +0 -76
- deepwork/templates/gemini/skill-job-step.toml.jinja +0 -162
- deepwork-0.5.1.dist-info/METADATA +0 -381
- deepwork-0.5.1.dist-info/RECORD +0 -72
- {deepwork-0.5.1.dist-info → deepwork-0.7.0.dist-info}/WHEEL +0 -0
- {deepwork-0.5.1.dist-info → deepwork-0.7.0.dist-info}/entry_points.txt +0 -0
- {deepwork-0.5.1.dist-info → deepwork-0.7.0.dist-info}/licenses/LICENSE.md +0 -0
deepwork/mcp/server.py
ADDED
|
@@ -0,0 +1,253 @@
|
|
|
1
|
+
"""FastMCP server for DeepWork workflows.
|
|
2
|
+
|
|
3
|
+
This module creates and configures the MCP server that exposes workflow
|
|
4
|
+
management tools to AI agents.
|
|
5
|
+
|
|
6
|
+
Usage:
|
|
7
|
+
deepwork serve --path /path/to/project
|
|
8
|
+
|
|
9
|
+
IMPORTANT: If you modify any tool signatures, parameters, or return types in this
|
|
10
|
+
file, you MUST also update the documentation in doc/mcp_interface.md to keep it
|
|
11
|
+
in sync with the implementation.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
import logging
|
|
17
|
+
from pathlib import Path
|
|
18
|
+
from typing import Any
|
|
19
|
+
|
|
20
|
+
from fastmcp import FastMCP
|
|
21
|
+
|
|
22
|
+
from deepwork.mcp.quality_gate import QualityGate
|
|
23
|
+
from deepwork.mcp.schemas import (
|
|
24
|
+
AbortWorkflowInput,
|
|
25
|
+
FinishedStepInput,
|
|
26
|
+
StartWorkflowInput,
|
|
27
|
+
)
|
|
28
|
+
from deepwork.mcp.state import StateManager
|
|
29
|
+
from deepwork.mcp.tools import WorkflowTools
|
|
30
|
+
|
|
31
|
+
# Configure logging
|
|
32
|
+
logger = logging.getLogger("deepwork.mcp")
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def create_server(
|
|
36
|
+
project_root: Path | str,
|
|
37
|
+
enable_quality_gate: bool = True,
|
|
38
|
+
quality_gate_timeout: int = 120,
|
|
39
|
+
quality_gate_max_attempts: int = 3,
|
|
40
|
+
) -> FastMCP:
|
|
41
|
+
"""Create and configure the MCP server.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
project_root: Path to the project root
|
|
45
|
+
enable_quality_gate: Whether to enable quality gate evaluation (default: True)
|
|
46
|
+
quality_gate_timeout: Timeout in seconds for quality gate (default: 120)
|
|
47
|
+
quality_gate_max_attempts: Max attempts before failing quality gate (default: 3)
|
|
48
|
+
|
|
49
|
+
Returns:
|
|
50
|
+
Configured FastMCP server instance
|
|
51
|
+
"""
|
|
52
|
+
project_path = Path(project_root).resolve()
|
|
53
|
+
|
|
54
|
+
# Initialize components
|
|
55
|
+
state_manager = StateManager(project_path)
|
|
56
|
+
|
|
57
|
+
quality_gate: QualityGate | None = None
|
|
58
|
+
if enable_quality_gate:
|
|
59
|
+
quality_gate = QualityGate(
|
|
60
|
+
timeout=quality_gate_timeout,
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
tools = WorkflowTools(
|
|
64
|
+
project_root=project_path,
|
|
65
|
+
state_manager=state_manager,
|
|
66
|
+
quality_gate=quality_gate,
|
|
67
|
+
max_quality_attempts=quality_gate_max_attempts,
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
# Create MCP server
|
|
71
|
+
mcp = FastMCP(
|
|
72
|
+
name="deepwork",
|
|
73
|
+
instructions=_get_server_instructions(),
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
# =========================================================================
|
|
77
|
+
# MCP Tool Registrations
|
|
78
|
+
# =========================================================================
|
|
79
|
+
# IMPORTANT: When modifying these tool signatures (parameters, return types,
|
|
80
|
+
# descriptions), update doc/mcp_interface.md to keep documentation in sync.
|
|
81
|
+
# =========================================================================
|
|
82
|
+
|
|
83
|
+
def _log_tool_call(tool_name: str, params: dict[str, Any] | None = None) -> None:
|
|
84
|
+
"""Log a tool call with stack information."""
|
|
85
|
+
stack = [entry.model_dump() for entry in state_manager.get_stack()]
|
|
86
|
+
log_data = {
|
|
87
|
+
"tool": tool_name,
|
|
88
|
+
"stack": stack,
|
|
89
|
+
"stack_depth": len(stack),
|
|
90
|
+
}
|
|
91
|
+
if params:
|
|
92
|
+
log_data["params"] = params
|
|
93
|
+
logger.info("MCP tool call: %s", log_data)
|
|
94
|
+
|
|
95
|
+
@mcp.tool(
|
|
96
|
+
description=(
|
|
97
|
+
"List all available DeepWork workflows. "
|
|
98
|
+
"Returns job names, workflow definitions, and step information. "
|
|
99
|
+
"Call this first to discover available workflows."
|
|
100
|
+
)
|
|
101
|
+
)
|
|
102
|
+
def get_workflows() -> dict[str, Any]:
|
|
103
|
+
"""Get all available workflows."""
|
|
104
|
+
_log_tool_call("get_workflows")
|
|
105
|
+
response = tools.get_workflows()
|
|
106
|
+
return response.model_dump()
|
|
107
|
+
|
|
108
|
+
@mcp.tool(
|
|
109
|
+
description=(
|
|
110
|
+
"Start a new workflow session. "
|
|
111
|
+
"Creates a git branch, initializes state tracking, and returns "
|
|
112
|
+
"the first step's instructions. "
|
|
113
|
+
"Required parameters: goal (what user wants), job_name, workflow_name. "
|
|
114
|
+
"Optional: instance_id for naming (e.g., 'acme', 'q1-2026'). "
|
|
115
|
+
"Supports nested workflows - starting a workflow while one is active "
|
|
116
|
+
"pushes onto the stack. Use abort_workflow to cancel and return to parent."
|
|
117
|
+
)
|
|
118
|
+
)
|
|
119
|
+
async def start_workflow(
|
|
120
|
+
goal: str,
|
|
121
|
+
job_name: str,
|
|
122
|
+
workflow_name: str,
|
|
123
|
+
instance_id: str | None = None,
|
|
124
|
+
) -> dict[str, Any]:
|
|
125
|
+
"""Start a workflow and get first step instructions."""
|
|
126
|
+
_log_tool_call(
|
|
127
|
+
"start_workflow",
|
|
128
|
+
{
|
|
129
|
+
"goal": goal,
|
|
130
|
+
"job_name": job_name,
|
|
131
|
+
"workflow_name": workflow_name,
|
|
132
|
+
"instance_id": instance_id,
|
|
133
|
+
},
|
|
134
|
+
)
|
|
135
|
+
input_data = StartWorkflowInput(
|
|
136
|
+
goal=goal,
|
|
137
|
+
job_name=job_name,
|
|
138
|
+
workflow_name=workflow_name,
|
|
139
|
+
instance_id=instance_id,
|
|
140
|
+
)
|
|
141
|
+
response = await tools.start_workflow(input_data)
|
|
142
|
+
return response.model_dump()
|
|
143
|
+
|
|
144
|
+
@mcp.tool(
|
|
145
|
+
description=(
|
|
146
|
+
"Report that you've finished a workflow step. "
|
|
147
|
+
"Validates outputs against quality criteria (if configured), "
|
|
148
|
+
"then returns either: "
|
|
149
|
+
"'needs_work' with feedback to fix issues, "
|
|
150
|
+
"'next_step' with instructions for the next step, or "
|
|
151
|
+
"'workflow_complete' when finished (pops from stack if nested). "
|
|
152
|
+
"Required: outputs (list of file paths created). "
|
|
153
|
+
"Optional: notes about work done. "
|
|
154
|
+
"Optional: quality_review_override_reason to skip quality review (must explain why)."
|
|
155
|
+
)
|
|
156
|
+
)
|
|
157
|
+
async def finished_step(
|
|
158
|
+
outputs: list[str],
|
|
159
|
+
notes: str | None = None,
|
|
160
|
+
quality_review_override_reason: str | None = None,
|
|
161
|
+
) -> dict[str, Any]:
|
|
162
|
+
"""Report step completion and get next instructions."""
|
|
163
|
+
_log_tool_call(
|
|
164
|
+
"finished_step",
|
|
165
|
+
{
|
|
166
|
+
"outputs": outputs,
|
|
167
|
+
"notes": notes,
|
|
168
|
+
"quality_review_override_reason": quality_review_override_reason,
|
|
169
|
+
},
|
|
170
|
+
)
|
|
171
|
+
input_data = FinishedStepInput(
|
|
172
|
+
outputs=outputs,
|
|
173
|
+
notes=notes,
|
|
174
|
+
quality_review_override_reason=quality_review_override_reason,
|
|
175
|
+
)
|
|
176
|
+
response = await tools.finished_step(input_data)
|
|
177
|
+
return response.model_dump()
|
|
178
|
+
|
|
179
|
+
@mcp.tool(
|
|
180
|
+
description=(
|
|
181
|
+
"Abort the current workflow and return to the parent workflow (if nested). "
|
|
182
|
+
"Use this when a workflow cannot be completed and needs to be abandoned. "
|
|
183
|
+
"Required: explanation (why the workflow is being aborted). "
|
|
184
|
+
"Returns the aborted workflow info and the resumed parent workflow (if any)."
|
|
185
|
+
)
|
|
186
|
+
)
|
|
187
|
+
async def abort_workflow(
|
|
188
|
+
explanation: str,
|
|
189
|
+
) -> dict[str, Any]:
|
|
190
|
+
"""Abort the current workflow and return to parent."""
|
|
191
|
+
_log_tool_call("abort_workflow", {"explanation": explanation})
|
|
192
|
+
input_data = AbortWorkflowInput(explanation=explanation)
|
|
193
|
+
response = await tools.abort_workflow(input_data)
|
|
194
|
+
return response.model_dump()
|
|
195
|
+
|
|
196
|
+
return mcp
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
def _get_server_instructions() -> str:
|
|
200
|
+
"""Get the server instructions for agents.
|
|
201
|
+
|
|
202
|
+
Returns:
|
|
203
|
+
Instructions string describing how to use the DeepWork MCP server.
|
|
204
|
+
"""
|
|
205
|
+
return """# DeepWork Workflow Server
|
|
206
|
+
|
|
207
|
+
This MCP server guides you through multi-step workflows with quality gates.
|
|
208
|
+
|
|
209
|
+
## Workflow
|
|
210
|
+
|
|
211
|
+
1. **Discover**: Call `get_workflows` to see available workflows
|
|
212
|
+
2. **Start**: Call `start_workflow` with your goal, job_name, and workflow_name
|
|
213
|
+
3. **Execute**: Follow the step instructions returned
|
|
214
|
+
4. **Checkpoint**: Call `finished_step` with your outputs when done with each step
|
|
215
|
+
5. **Iterate**: If `needs_work`, fix issues and call `finished_step` again
|
|
216
|
+
6. **Continue**: If `next_step`, execute new instructions and repeat
|
|
217
|
+
7. **Complete**: When `workflow_complete`, the workflow is done
|
|
218
|
+
|
|
219
|
+
## Quality Gates
|
|
220
|
+
|
|
221
|
+
Steps may have quality criteria. When you call `finished_step`:
|
|
222
|
+
- Your outputs are evaluated against the criteria
|
|
223
|
+
- If any fail, you'll get `needs_work` status with feedback
|
|
224
|
+
- Fix the issues and call `finished_step` again
|
|
225
|
+
- After passing, you'll get the next step or completion
|
|
226
|
+
|
|
227
|
+
## Nested Workflows
|
|
228
|
+
|
|
229
|
+
Workflows can be nested - starting a new workflow while one is active pushes
|
|
230
|
+
onto a stack. This is useful when a step requires running another workflow.
|
|
231
|
+
|
|
232
|
+
- All tool responses include a `stack` field showing the current workflow stack
|
|
233
|
+
- Each stack entry shows `{workflow: "job/workflow", step: "current_step"}`
|
|
234
|
+
- When a workflow completes, it pops from the stack and resumes the parent
|
|
235
|
+
- Use `abort_workflow` to cancel the current workflow and return to parent
|
|
236
|
+
|
|
237
|
+
## Aborting Workflows
|
|
238
|
+
|
|
239
|
+
If a workflow cannot be completed, use `abort_workflow` with an explanation:
|
|
240
|
+
- The current workflow is marked as aborted and popped from the stack
|
|
241
|
+
- If there was a parent workflow, it becomes active again
|
|
242
|
+
- The explanation is saved for debugging and audit purposes
|
|
243
|
+
|
|
244
|
+
## Best Practices
|
|
245
|
+
|
|
246
|
+
- Always call `get_workflows` first to understand available options
|
|
247
|
+
- Provide clear goals when starting - they're used for context
|
|
248
|
+
- Create all expected outputs before calling `finished_step`
|
|
249
|
+
- Use instance_id for meaningful names (e.g., client name, quarter)
|
|
250
|
+
- Read quality gate feedback carefully before retrying
|
|
251
|
+
- Check the `stack` field in responses to understand nesting depth
|
|
252
|
+
- Use `abort_workflow` rather than leaving workflows in a broken state
|
|
253
|
+
"""
|
deepwork/mcp/state.py
ADDED
|
@@ -0,0 +1,422 @@
|
|
|
1
|
+
"""Workflow state management for MCP server.
|
|
2
|
+
|
|
3
|
+
State is persisted to `.deepwork/tmp/session_[id].json` for transparency
|
|
4
|
+
and recovery.
|
|
5
|
+
|
|
6
|
+
Supports nested workflows via a session stack - when a step starts a new
|
|
7
|
+
workflow, it's pushed onto the stack. When a workflow completes or is
|
|
8
|
+
aborted, it's popped from the stack.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from __future__ import annotations
|
|
12
|
+
|
|
13
|
+
import asyncio
|
|
14
|
+
import json
|
|
15
|
+
import uuid
|
|
16
|
+
from datetime import UTC, datetime
|
|
17
|
+
from pathlib import Path
|
|
18
|
+
|
|
19
|
+
import aiofiles
|
|
20
|
+
|
|
21
|
+
from deepwork.mcp.schemas import StackEntry, StepProgress, WorkflowSession
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class StateError(Exception):
|
|
25
|
+
"""Exception raised for state management errors."""
|
|
26
|
+
|
|
27
|
+
pass
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class StateManager:
|
|
31
|
+
"""Manages workflow session state with stack-based nesting support.
|
|
32
|
+
|
|
33
|
+
Sessions are persisted to `.deepwork/tmp/` as JSON files for:
|
|
34
|
+
- Transparency: Users can inspect session state
|
|
35
|
+
- Recovery: Sessions survive server restarts
|
|
36
|
+
- Debugging: State history is preserved
|
|
37
|
+
|
|
38
|
+
This implementation is async-safe and uses a lock to prevent
|
|
39
|
+
concurrent access issues.
|
|
40
|
+
|
|
41
|
+
Supports nested workflows via a session stack - starting a new workflow
|
|
42
|
+
while one is active pushes onto the stack. Completing or aborting pops
|
|
43
|
+
from the stack.
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
def __init__(self, project_root: Path):
|
|
47
|
+
"""Initialize state manager.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
project_root: Path to the project root
|
|
51
|
+
"""
|
|
52
|
+
self.project_root = project_root
|
|
53
|
+
self.sessions_dir = project_root / ".deepwork" / "tmp"
|
|
54
|
+
self._session_stack: list[WorkflowSession] = []
|
|
55
|
+
self._lock = asyncio.Lock()
|
|
56
|
+
|
|
57
|
+
def _ensure_sessions_dir(self) -> None:
|
|
58
|
+
"""Ensure the sessions directory exists."""
|
|
59
|
+
self.sessions_dir.mkdir(parents=True, exist_ok=True)
|
|
60
|
+
|
|
61
|
+
def _session_file(self, session_id: str) -> Path:
|
|
62
|
+
"""Get the path to a session file."""
|
|
63
|
+
return self.sessions_dir / f"session_{session_id}.json"
|
|
64
|
+
|
|
65
|
+
def _generate_session_id(self) -> str:
|
|
66
|
+
"""Generate a unique session ID."""
|
|
67
|
+
return str(uuid.uuid4())[:8]
|
|
68
|
+
|
|
69
|
+
def _generate_branch_name(
|
|
70
|
+
self, job_name: str, workflow_name: str, instance_id: str | None
|
|
71
|
+
) -> str:
|
|
72
|
+
"""Generate a git branch name for the workflow.
|
|
73
|
+
|
|
74
|
+
Format: deepwork/[job_name]-[workflow_name]-[instance_id or date]
|
|
75
|
+
"""
|
|
76
|
+
date_str = datetime.now(UTC).strftime("%Y%m%d")
|
|
77
|
+
instance = instance_id or date_str
|
|
78
|
+
return f"deepwork/{job_name}-{workflow_name}-{instance}"
|
|
79
|
+
|
|
80
|
+
async def create_session(
|
|
81
|
+
self,
|
|
82
|
+
job_name: str,
|
|
83
|
+
workflow_name: str,
|
|
84
|
+
goal: str,
|
|
85
|
+
first_step_id: str,
|
|
86
|
+
instance_id: str | None = None,
|
|
87
|
+
) -> WorkflowSession:
|
|
88
|
+
"""Create a new workflow session.
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
job_name: Name of the job
|
|
92
|
+
workflow_name: Name of the workflow
|
|
93
|
+
goal: User's goal for this workflow
|
|
94
|
+
first_step_id: ID of the first step
|
|
95
|
+
instance_id: Optional instance identifier
|
|
96
|
+
|
|
97
|
+
Returns:
|
|
98
|
+
New WorkflowSession
|
|
99
|
+
"""
|
|
100
|
+
async with self._lock:
|
|
101
|
+
self._ensure_sessions_dir()
|
|
102
|
+
|
|
103
|
+
session_id = self._generate_session_id()
|
|
104
|
+
branch_name = self._generate_branch_name(job_name, workflow_name, instance_id)
|
|
105
|
+
now = datetime.now(UTC).isoformat()
|
|
106
|
+
|
|
107
|
+
session = WorkflowSession(
|
|
108
|
+
session_id=session_id,
|
|
109
|
+
job_name=job_name,
|
|
110
|
+
workflow_name=workflow_name,
|
|
111
|
+
instance_id=instance_id,
|
|
112
|
+
goal=goal,
|
|
113
|
+
branch_name=branch_name,
|
|
114
|
+
current_step_id=first_step_id,
|
|
115
|
+
current_entry_index=0,
|
|
116
|
+
step_progress={},
|
|
117
|
+
started_at=now,
|
|
118
|
+
status="active",
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
await self._save_session_unlocked(session)
|
|
122
|
+
self._session_stack.append(session)
|
|
123
|
+
return session
|
|
124
|
+
|
|
125
|
+
async def _save_session_unlocked(self, session: WorkflowSession) -> None:
|
|
126
|
+
"""Save session to file (must be called with lock held)."""
|
|
127
|
+
self._ensure_sessions_dir()
|
|
128
|
+
session_file = self._session_file(session.session_id)
|
|
129
|
+
content = json.dumps(session.to_dict(), indent=2)
|
|
130
|
+
async with aiofiles.open(session_file, "w", encoding="utf-8") as f:
|
|
131
|
+
await f.write(content)
|
|
132
|
+
|
|
133
|
+
async def _save_session(self, session: WorkflowSession) -> None:
|
|
134
|
+
"""Save session to file with lock."""
|
|
135
|
+
async with self._lock:
|
|
136
|
+
await self._save_session_unlocked(session)
|
|
137
|
+
|
|
138
|
+
async def load_session(self, session_id: str) -> WorkflowSession:
|
|
139
|
+
"""Load a session from file.
|
|
140
|
+
|
|
141
|
+
Args:
|
|
142
|
+
session_id: Session ID to load
|
|
143
|
+
|
|
144
|
+
Returns:
|
|
145
|
+
WorkflowSession
|
|
146
|
+
|
|
147
|
+
Raises:
|
|
148
|
+
StateError: If session not found
|
|
149
|
+
"""
|
|
150
|
+
async with self._lock:
|
|
151
|
+
session_file = self._session_file(session_id)
|
|
152
|
+
if not session_file.exists():
|
|
153
|
+
raise StateError(f"Session not found: {session_id}")
|
|
154
|
+
|
|
155
|
+
async with aiofiles.open(session_file, encoding="utf-8") as f:
|
|
156
|
+
content = await f.read()
|
|
157
|
+
data = json.loads(content)
|
|
158
|
+
|
|
159
|
+
session = WorkflowSession.from_dict(data)
|
|
160
|
+
# Replace top of stack or push if empty
|
|
161
|
+
if self._session_stack:
|
|
162
|
+
self._session_stack[-1] = session
|
|
163
|
+
else:
|
|
164
|
+
self._session_stack.append(session)
|
|
165
|
+
return session
|
|
166
|
+
|
|
167
|
+
def get_active_session(self) -> WorkflowSession | None:
|
|
168
|
+
"""Get the currently active session (top of stack).
|
|
169
|
+
|
|
170
|
+
Returns:
|
|
171
|
+
Active session or None if no session active
|
|
172
|
+
"""
|
|
173
|
+
return self._session_stack[-1] if self._session_stack else None
|
|
174
|
+
|
|
175
|
+
def require_active_session(self) -> WorkflowSession:
|
|
176
|
+
"""Get active session (top of stack) or raise error.
|
|
177
|
+
|
|
178
|
+
Returns:
|
|
179
|
+
Active session
|
|
180
|
+
|
|
181
|
+
Raises:
|
|
182
|
+
StateError: If no active session
|
|
183
|
+
"""
|
|
184
|
+
if not self._session_stack:
|
|
185
|
+
raise StateError("No active workflow session. Use start_workflow to begin a workflow.")
|
|
186
|
+
return self._session_stack[-1]
|
|
187
|
+
|
|
188
|
+
async def start_step(self, step_id: str) -> None:
|
|
189
|
+
"""Mark a step as started.
|
|
190
|
+
|
|
191
|
+
Args:
|
|
192
|
+
step_id: Step ID to start
|
|
193
|
+
|
|
194
|
+
Raises:
|
|
195
|
+
StateError: If no active session
|
|
196
|
+
"""
|
|
197
|
+
async with self._lock:
|
|
198
|
+
session = self.require_active_session()
|
|
199
|
+
now = datetime.now(UTC).isoformat()
|
|
200
|
+
|
|
201
|
+
if step_id not in session.step_progress:
|
|
202
|
+
session.step_progress[step_id] = StepProgress(
|
|
203
|
+
step_id=step_id,
|
|
204
|
+
started_at=now,
|
|
205
|
+
)
|
|
206
|
+
else:
|
|
207
|
+
session.step_progress[step_id].started_at = now
|
|
208
|
+
|
|
209
|
+
session.current_step_id = step_id
|
|
210
|
+
await self._save_session_unlocked(session)
|
|
211
|
+
|
|
212
|
+
async def complete_step(
|
|
213
|
+
self, step_id: str, outputs: list[str], notes: str | None = None
|
|
214
|
+
) -> None:
|
|
215
|
+
"""Mark a step as completed.
|
|
216
|
+
|
|
217
|
+
Args:
|
|
218
|
+
step_id: Step ID to complete
|
|
219
|
+
outputs: Output files created
|
|
220
|
+
notes: Optional notes
|
|
221
|
+
|
|
222
|
+
Raises:
|
|
223
|
+
StateError: If no active session
|
|
224
|
+
"""
|
|
225
|
+
async with self._lock:
|
|
226
|
+
session = self.require_active_session()
|
|
227
|
+
now = datetime.now(UTC).isoformat()
|
|
228
|
+
|
|
229
|
+
if step_id not in session.step_progress:
|
|
230
|
+
session.step_progress[step_id] = StepProgress(
|
|
231
|
+
step_id=step_id,
|
|
232
|
+
started_at=now,
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
progress = session.step_progress[step_id]
|
|
236
|
+
progress.completed_at = now
|
|
237
|
+
progress.outputs = outputs
|
|
238
|
+
progress.notes = notes
|
|
239
|
+
|
|
240
|
+
await self._save_session_unlocked(session)
|
|
241
|
+
|
|
242
|
+
async def record_quality_attempt(self, step_id: str) -> int:
|
|
243
|
+
"""Record a quality gate attempt for a step.
|
|
244
|
+
|
|
245
|
+
Args:
|
|
246
|
+
step_id: Step ID
|
|
247
|
+
|
|
248
|
+
Returns:
|
|
249
|
+
Total number of attempts for this step
|
|
250
|
+
|
|
251
|
+
Raises:
|
|
252
|
+
StateError: If no active session
|
|
253
|
+
"""
|
|
254
|
+
async with self._lock:
|
|
255
|
+
session = self.require_active_session()
|
|
256
|
+
|
|
257
|
+
if step_id not in session.step_progress:
|
|
258
|
+
session.step_progress[step_id] = StepProgress(step_id=step_id)
|
|
259
|
+
|
|
260
|
+
session.step_progress[step_id].quality_attempts += 1
|
|
261
|
+
await self._save_session_unlocked(session)
|
|
262
|
+
|
|
263
|
+
return session.step_progress[step_id].quality_attempts
|
|
264
|
+
|
|
265
|
+
async def advance_to_step(self, step_id: str, entry_index: int) -> None:
|
|
266
|
+
"""Advance the session to a new step.
|
|
267
|
+
|
|
268
|
+
Args:
|
|
269
|
+
step_id: New current step ID
|
|
270
|
+
entry_index: Index in workflow step_entries
|
|
271
|
+
|
|
272
|
+
Raises:
|
|
273
|
+
StateError: If no active session
|
|
274
|
+
"""
|
|
275
|
+
async with self._lock:
|
|
276
|
+
session = self.require_active_session()
|
|
277
|
+
session.current_step_id = step_id
|
|
278
|
+
session.current_entry_index = entry_index
|
|
279
|
+
await self._save_session_unlocked(session)
|
|
280
|
+
|
|
281
|
+
async def complete_workflow(self) -> WorkflowSession | None:
|
|
282
|
+
"""Mark the workflow as complete and pop from stack.
|
|
283
|
+
|
|
284
|
+
Returns:
|
|
285
|
+
The new active session after popping, or None if stack is empty
|
|
286
|
+
|
|
287
|
+
Raises:
|
|
288
|
+
StateError: If no active session
|
|
289
|
+
"""
|
|
290
|
+
async with self._lock:
|
|
291
|
+
session = self.require_active_session()
|
|
292
|
+
now = datetime.now(UTC).isoformat()
|
|
293
|
+
session.completed_at = now
|
|
294
|
+
session.status = "completed"
|
|
295
|
+
await self._save_session_unlocked(session)
|
|
296
|
+
|
|
297
|
+
# Pop completed session from stack
|
|
298
|
+
self._session_stack.pop()
|
|
299
|
+
|
|
300
|
+
# Return new active session (if any)
|
|
301
|
+
return self._session_stack[-1] if self._session_stack else None
|
|
302
|
+
|
|
303
|
+
async def abort_workflow(
|
|
304
|
+
self, explanation: str
|
|
305
|
+
) -> tuple[WorkflowSession, WorkflowSession | None]:
|
|
306
|
+
"""Abort the current workflow and pop from stack.
|
|
307
|
+
|
|
308
|
+
Args:
|
|
309
|
+
explanation: Reason for aborting the workflow
|
|
310
|
+
|
|
311
|
+
Returns:
|
|
312
|
+
Tuple of (aborted session, new active session or None)
|
|
313
|
+
|
|
314
|
+
Raises:
|
|
315
|
+
StateError: If no active session
|
|
316
|
+
"""
|
|
317
|
+
async with self._lock:
|
|
318
|
+
session = self.require_active_session()
|
|
319
|
+
now = datetime.now(UTC).isoformat()
|
|
320
|
+
session.completed_at = now
|
|
321
|
+
session.status = "aborted"
|
|
322
|
+
session.abort_reason = explanation
|
|
323
|
+
await self._save_session_unlocked(session)
|
|
324
|
+
|
|
325
|
+
# Pop aborted session from stack
|
|
326
|
+
self._session_stack.pop()
|
|
327
|
+
|
|
328
|
+
# Return aborted session and new active session (if any)
|
|
329
|
+
new_active = self._session_stack[-1] if self._session_stack else None
|
|
330
|
+
return session, new_active
|
|
331
|
+
|
|
332
|
+
def get_all_outputs(self) -> list[str]:
|
|
333
|
+
"""Get all outputs from all completed steps.
|
|
334
|
+
|
|
335
|
+
Returns:
|
|
336
|
+
List of all output file paths
|
|
337
|
+
|
|
338
|
+
Raises:
|
|
339
|
+
StateError: If no active session
|
|
340
|
+
"""
|
|
341
|
+
session = self.require_active_session()
|
|
342
|
+
outputs: list[str] = []
|
|
343
|
+
for progress in session.step_progress.values():
|
|
344
|
+
outputs.extend(progress.outputs)
|
|
345
|
+
return outputs
|
|
346
|
+
|
|
347
|
+
def get_stack(self) -> list[StackEntry]:
|
|
348
|
+
"""Get the current workflow stack as StackEntry objects.
|
|
349
|
+
|
|
350
|
+
Returns:
|
|
351
|
+
List of StackEntry with workflow and step info, bottom to top
|
|
352
|
+
"""
|
|
353
|
+
return [
|
|
354
|
+
StackEntry(
|
|
355
|
+
workflow=f"{s.job_name}/{s.workflow_name}",
|
|
356
|
+
step=s.current_step_id,
|
|
357
|
+
)
|
|
358
|
+
for s in self._session_stack
|
|
359
|
+
]
|
|
360
|
+
|
|
361
|
+
def get_stack_depth(self) -> int:
|
|
362
|
+
"""Get the current stack depth.
|
|
363
|
+
|
|
364
|
+
Returns:
|
|
365
|
+
Number of active workflow sessions on the stack
|
|
366
|
+
"""
|
|
367
|
+
return len(self._session_stack)
|
|
368
|
+
|
|
369
|
+
async def list_sessions(self) -> list[WorkflowSession]:
|
|
370
|
+
"""List all saved sessions.
|
|
371
|
+
|
|
372
|
+
Returns:
|
|
373
|
+
List of WorkflowSession objects
|
|
374
|
+
"""
|
|
375
|
+
if not self.sessions_dir.exists():
|
|
376
|
+
return []
|
|
377
|
+
|
|
378
|
+
sessions = []
|
|
379
|
+
for session_file in self.sessions_dir.glob("session_*.json"):
|
|
380
|
+
try:
|
|
381
|
+
async with aiofiles.open(session_file, encoding="utf-8") as f:
|
|
382
|
+
content = await f.read()
|
|
383
|
+
data = json.loads(content)
|
|
384
|
+
sessions.append(WorkflowSession.from_dict(data))
|
|
385
|
+
except (json.JSONDecodeError, ValueError):
|
|
386
|
+
# Skip corrupted files
|
|
387
|
+
continue
|
|
388
|
+
|
|
389
|
+
return sorted(sessions, key=lambda s: s.started_at, reverse=True)
|
|
390
|
+
|
|
391
|
+
async def find_active_sessions_for_workflow(
|
|
392
|
+
self, job_name: str, workflow_name: str
|
|
393
|
+
) -> list[WorkflowSession]:
|
|
394
|
+
"""Find active sessions for a specific workflow.
|
|
395
|
+
|
|
396
|
+
Args:
|
|
397
|
+
job_name: Job name
|
|
398
|
+
workflow_name: Workflow name
|
|
399
|
+
|
|
400
|
+
Returns:
|
|
401
|
+
List of active sessions matching the criteria
|
|
402
|
+
"""
|
|
403
|
+
all_sessions = await self.list_sessions()
|
|
404
|
+
return [
|
|
405
|
+
s
|
|
406
|
+
for s in all_sessions
|
|
407
|
+
if s.job_name == job_name and s.workflow_name == workflow_name and s.status == "active"
|
|
408
|
+
]
|
|
409
|
+
|
|
410
|
+
async def delete_session(self, session_id: str) -> None:
|
|
411
|
+
"""Delete a session file.
|
|
412
|
+
|
|
413
|
+
Args:
|
|
414
|
+
session_id: Session ID to delete
|
|
415
|
+
"""
|
|
416
|
+
async with self._lock:
|
|
417
|
+
session_file = self._session_file(session_id)
|
|
418
|
+
if session_file.exists():
|
|
419
|
+
session_file.unlink()
|
|
420
|
+
|
|
421
|
+
# Remove from stack if present
|
|
422
|
+
self._session_stack = [s for s in self._session_stack if s.session_id != session_id]
|