deepwork 0.5.1__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. deepwork/__init__.py +1 -1
  2. deepwork/cli/hook.py +3 -4
  3. deepwork/cli/install.py +70 -117
  4. deepwork/cli/main.py +2 -2
  5. deepwork/cli/serve.py +133 -0
  6. deepwork/cli/sync.py +93 -58
  7. deepwork/core/adapters.py +91 -102
  8. deepwork/core/generator.py +19 -386
  9. deepwork/core/hooks_syncer.py +1 -1
  10. deepwork/core/parser.py +270 -1
  11. deepwork/hooks/README.md +0 -44
  12. deepwork/hooks/__init__.py +3 -6
  13. deepwork/hooks/check_version.sh +54 -21
  14. deepwork/mcp/__init__.py +23 -0
  15. deepwork/mcp/quality_gate.py +347 -0
  16. deepwork/mcp/schemas.py +263 -0
  17. deepwork/mcp/server.py +253 -0
  18. deepwork/mcp/state.py +422 -0
  19. deepwork/mcp/tools.py +394 -0
  20. deepwork/schemas/job.schema.json +347 -0
  21. deepwork/schemas/job_schema.py +27 -239
  22. deepwork/standard_jobs/deepwork_jobs/doc_specs/job_spec.md +9 -15
  23. deepwork/standard_jobs/deepwork_jobs/job.yml +146 -46
  24. deepwork/standard_jobs/deepwork_jobs/steps/define.md +100 -33
  25. deepwork/standard_jobs/deepwork_jobs/steps/errata.md +154 -0
  26. deepwork/standard_jobs/deepwork_jobs/steps/fix_jobs.md +207 -0
  27. deepwork/standard_jobs/deepwork_jobs/steps/fix_settings.md +177 -0
  28. deepwork/standard_jobs/deepwork_jobs/steps/implement.md +22 -138
  29. deepwork/standard_jobs/deepwork_jobs/steps/iterate.md +221 -0
  30. deepwork/standard_jobs/deepwork_jobs/steps/learn.md +2 -26
  31. deepwork/standard_jobs/deepwork_jobs/steps/test.md +154 -0
  32. deepwork/standard_jobs/deepwork_jobs/templates/job.yml.template +2 -0
  33. deepwork/templates/claude/settings.json +16 -0
  34. deepwork/templates/claude/skill-deepwork.md.jinja +37 -0
  35. deepwork/templates/gemini/skill-deepwork.md.jinja +37 -0
  36. deepwork-0.7.0.dist-info/METADATA +317 -0
  37. deepwork-0.7.0.dist-info/RECORD +64 -0
  38. deepwork/cli/rules.py +0 -32
  39. deepwork/core/command_executor.py +0 -190
  40. deepwork/core/pattern_matcher.py +0 -271
  41. deepwork/core/rules_parser.py +0 -559
  42. deepwork/core/rules_queue.py +0 -321
  43. deepwork/hooks/rules_check.py +0 -759
  44. deepwork/schemas/rules_schema.py +0 -135
  45. deepwork/standard_jobs/deepwork_jobs/steps/review_job_spec.md +0 -208
  46. deepwork/standard_jobs/deepwork_jobs/templates/doc_spec.md.example +0 -86
  47. deepwork/standard_jobs/deepwork_rules/hooks/capture_prompt_work_tree.sh +0 -38
  48. deepwork/standard_jobs/deepwork_rules/hooks/global_hooks.yml +0 -8
  49. deepwork/standard_jobs/deepwork_rules/hooks/user_prompt_submit.sh +0 -16
  50. deepwork/standard_jobs/deepwork_rules/job.yml +0 -49
  51. deepwork/standard_jobs/deepwork_rules/rules/.gitkeep +0 -13
  52. deepwork/standard_jobs/deepwork_rules/rules/api-documentation-sync.md.example +0 -10
  53. deepwork/standard_jobs/deepwork_rules/rules/readme-documentation.md.example +0 -10
  54. deepwork/standard_jobs/deepwork_rules/rules/security-review.md.example +0 -11
  55. deepwork/standard_jobs/deepwork_rules/rules/skill-md-validation.md +0 -46
  56. deepwork/standard_jobs/deepwork_rules/rules/source-test-pairing.md.example +0 -13
  57. deepwork/standard_jobs/deepwork_rules/steps/define.md +0 -249
  58. deepwork/templates/claude/skill-job-meta.md.jinja +0 -77
  59. deepwork/templates/claude/skill-job-step.md.jinja +0 -235
  60. deepwork/templates/gemini/skill-job-meta.toml.jinja +0 -76
  61. deepwork/templates/gemini/skill-job-step.toml.jinja +0 -162
  62. deepwork-0.5.1.dist-info/METADATA +0 -381
  63. deepwork-0.5.1.dist-info/RECORD +0 -72
  64. {deepwork-0.5.1.dist-info → deepwork-0.7.0.dist-info}/WHEEL +0 -0
  65. {deepwork-0.5.1.dist-info → deepwork-0.7.0.dist-info}/entry_points.txt +0 -0
  66. {deepwork-0.5.1.dist-info → deepwork-0.7.0.dist-info}/licenses/LICENSE.md +0 -0
deepwork/mcp/tools.py ADDED
@@ -0,0 +1,394 @@
1
+ """MCP tool implementations for DeepWork workflows.
2
+
3
+ This module provides the core tools for guiding agents through workflows:
4
+ - get_workflows: List all available workflows
5
+ - start_workflow: Initialize a workflow session
6
+ - finished_step: Report step completion and get next instructions
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ from pathlib import Path
12
+ from typing import TYPE_CHECKING
13
+
14
+ from deepwork.core.parser import JobDefinition, ParseError, Workflow, parse_job_definition
15
+ from deepwork.mcp.schemas import (
16
+ AbortWorkflowInput,
17
+ AbortWorkflowResponse,
18
+ ActiveStepInfo,
19
+ FinishedStepInput,
20
+ FinishedStepResponse,
21
+ GetWorkflowsResponse,
22
+ JobInfo,
23
+ StartWorkflowInput,
24
+ StartWorkflowResponse,
25
+ StepStatus,
26
+ WorkflowInfo,
27
+ )
28
+ from deepwork.mcp.state import StateManager
29
+
30
+ if TYPE_CHECKING:
31
+ from deepwork.mcp.quality_gate import QualityGate
32
+
33
+
34
+ class ToolError(Exception):
35
+ """Exception raised for tool execution errors."""
36
+
37
+ pass
38
+
39
+
40
+ class WorkflowTools:
41
+ """Implements the MCP tools for workflow management."""
42
+
43
+ def __init__(
44
+ self,
45
+ project_root: Path,
46
+ state_manager: StateManager,
47
+ quality_gate: QualityGate | None = None,
48
+ max_quality_attempts: int = 3,
49
+ ):
50
+ """Initialize workflow tools.
51
+
52
+ Args:
53
+ project_root: Path to project root
54
+ state_manager: State manager instance
55
+ quality_gate: Optional quality gate for step validation
56
+ max_quality_attempts: Maximum attempts before failing quality gate
57
+ """
58
+ self.project_root = project_root
59
+ self.jobs_dir = project_root / ".deepwork" / "jobs"
60
+ self.state_manager = state_manager
61
+ self.quality_gate = quality_gate
62
+ self.max_quality_attempts = max_quality_attempts
63
+
64
+ def _load_all_jobs(self) -> list[JobDefinition]:
65
+ """Load all job definitions from the jobs directory.
66
+
67
+ Returns:
68
+ List of parsed JobDefinition objects
69
+ """
70
+ jobs: list[JobDefinition] = []
71
+
72
+ if not self.jobs_dir.exists():
73
+ return jobs
74
+
75
+ for job_dir in self.jobs_dir.iterdir():
76
+ if job_dir.is_dir() and (job_dir / "job.yml").exists():
77
+ try:
78
+ job = parse_job_definition(job_dir)
79
+ jobs.append(job)
80
+ except ParseError:
81
+ # Skip invalid job definitions
82
+ continue
83
+
84
+ return jobs
85
+
86
+ def _job_to_info(self, job: JobDefinition) -> JobInfo:
87
+ """Convert a JobDefinition to JobInfo for response.
88
+
89
+ Args:
90
+ job: Parsed job definition
91
+
92
+ Returns:
93
+ JobInfo with workflow details
94
+ """
95
+ # Convert workflows
96
+ workflows = [
97
+ WorkflowInfo(
98
+ name=wf.name,
99
+ summary=wf.summary,
100
+ )
101
+ for wf in job.workflows
102
+ ]
103
+
104
+ return JobInfo(
105
+ name=job.name,
106
+ summary=job.summary,
107
+ description=job.description,
108
+ workflows=workflows,
109
+ )
110
+
111
+ def _get_job(self, job_name: str) -> JobDefinition:
112
+ """Get a specific job by name.
113
+
114
+ Args:
115
+ job_name: Job name to find
116
+
117
+ Returns:
118
+ JobDefinition
119
+
120
+ Raises:
121
+ ToolError: If job not found
122
+ """
123
+ job_dir = self.jobs_dir / job_name
124
+ if not job_dir.exists():
125
+ raise ToolError(f"Job not found: {job_name}")
126
+
127
+ try:
128
+ return parse_job_definition(job_dir)
129
+ except ParseError as e:
130
+ raise ToolError(f"Failed to parse job '{job_name}': {e}") from e
131
+
132
+ def _get_workflow(self, job: JobDefinition, workflow_name: str) -> Workflow:
133
+ """Get a specific workflow from a job.
134
+
135
+ Args:
136
+ job: Job definition
137
+ workflow_name: Workflow name to find
138
+
139
+ Returns:
140
+ Workflow
141
+
142
+ Raises:
143
+ ToolError: If workflow not found
144
+ """
145
+ for wf in job.workflows:
146
+ if wf.name == workflow_name:
147
+ return wf
148
+
149
+ available = [wf.name for wf in job.workflows]
150
+ raise ToolError(
151
+ f"Workflow '{workflow_name}' not found in job '{job.name}'. "
152
+ f"Available workflows: {', '.join(available)}"
153
+ )
154
+
155
+ def _get_step_instructions(self, job: JobDefinition, step_id: str) -> str:
156
+ """Get the instruction content for a step.
157
+
158
+ Args:
159
+ job: Job definition
160
+ step_id: Step ID
161
+
162
+ Returns:
163
+ Step instruction content
164
+
165
+ Raises:
166
+ ToolError: If step or instruction file not found
167
+ """
168
+ step = job.get_step(step_id)
169
+ if step is None:
170
+ raise ToolError(f"Step not found: {step_id}")
171
+
172
+ instructions_path = job.job_dir / step.instructions_file
173
+ if not instructions_path.exists():
174
+ raise ToolError(f"Instructions file not found: {step.instructions_file}")
175
+
176
+ return instructions_path.read_text(encoding="utf-8")
177
+
178
+ # =========================================================================
179
+ # Tool Implementations
180
+ # =========================================================================
181
+
182
+ def get_workflows(self) -> GetWorkflowsResponse:
183
+ """List all available workflows.
184
+
185
+ Returns:
186
+ GetWorkflowsResponse with all jobs and their workflows
187
+ """
188
+ jobs = self._load_all_jobs()
189
+ job_infos = [self._job_to_info(job) for job in jobs]
190
+
191
+ return GetWorkflowsResponse(jobs=job_infos)
192
+
193
+ async def start_workflow(self, input_data: StartWorkflowInput) -> StartWorkflowResponse:
194
+ """Start a new workflow session.
195
+
196
+ Args:
197
+ input_data: StartWorkflowInput with goal, job_name, workflow_name
198
+
199
+ Returns:
200
+ StartWorkflowResponse with session ID, branch, and first step
201
+
202
+ Raises:
203
+ ToolError: If job or workflow not found
204
+ """
205
+ # Load job and workflow
206
+ job = self._get_job(input_data.job_name)
207
+ workflow = self._get_workflow(job, input_data.workflow_name)
208
+
209
+ if not workflow.steps:
210
+ raise ToolError(f"Workflow '{workflow.name}' has no steps")
211
+
212
+ first_step_id = workflow.steps[0]
213
+ first_step = job.get_step(first_step_id)
214
+ if first_step is None:
215
+ raise ToolError(f"First step not found: {first_step_id}")
216
+
217
+ # Create session
218
+ session = await self.state_manager.create_session(
219
+ job_name=input_data.job_name,
220
+ workflow_name=input_data.workflow_name,
221
+ goal=input_data.goal,
222
+ first_step_id=first_step_id,
223
+ instance_id=input_data.instance_id,
224
+ )
225
+
226
+ # Mark first step as started
227
+ await self.state_manager.start_step(first_step_id)
228
+
229
+ # Get step instructions
230
+ instructions = self._get_step_instructions(job, first_step_id)
231
+
232
+ # Get expected outputs
233
+ step_outputs = [out.file for out in first_step.outputs]
234
+
235
+ return StartWorkflowResponse(
236
+ begin_step=ActiveStepInfo(
237
+ session_id=session.session_id,
238
+ branch_name=session.branch_name,
239
+ step_id=first_step_id,
240
+ step_expected_outputs=step_outputs,
241
+ step_quality_criteria=first_step.quality_criteria,
242
+ step_instructions=instructions,
243
+ ),
244
+ stack=self.state_manager.get_stack(),
245
+ )
246
+
247
+ async def finished_step(self, input_data: FinishedStepInput) -> FinishedStepResponse:
248
+ """Report step completion and get next instructions.
249
+
250
+ Args:
251
+ input_data: FinishedStepInput with outputs and optional notes
252
+
253
+ Returns:
254
+ FinishedStepResponse with status and next step or completion
255
+
256
+ Raises:
257
+ StateError: If no active session
258
+ ToolError: If quality gate fails after max attempts
259
+ """
260
+ session = self.state_manager.require_active_session()
261
+ current_step_id = session.current_step_id
262
+
263
+ # Load job and workflow
264
+ job = self._get_job(session.job_name)
265
+ workflow = self._get_workflow(job, session.workflow_name)
266
+ current_step = job.get_step(current_step_id)
267
+
268
+ if current_step is None:
269
+ raise ToolError(f"Current step not found: {current_step_id}")
270
+
271
+ # Run quality gate if available and step has criteria (unless overridden)
272
+ if (
273
+ self.quality_gate
274
+ and current_step.quality_criteria
275
+ and not input_data.quality_review_override_reason
276
+ ):
277
+ attempts = await self.state_manager.record_quality_attempt(current_step_id)
278
+
279
+ result = await self.quality_gate.evaluate(
280
+ quality_criteria=current_step.quality_criteria,
281
+ outputs=input_data.outputs,
282
+ project_root=self.project_root,
283
+ )
284
+
285
+ if not result.passed:
286
+ # Check max attempts
287
+ if attempts >= self.max_quality_attempts:
288
+ raise ToolError(
289
+ f"Quality gate failed after {self.max_quality_attempts} attempts. "
290
+ f"Feedback: {result.feedback}"
291
+ )
292
+
293
+ # Return needs_work status
294
+ failed_criteria = [cr for cr in result.criteria_results if not cr.passed]
295
+ return FinishedStepResponse(
296
+ status=StepStatus.NEEDS_WORK,
297
+ feedback=result.feedback,
298
+ failed_criteria=failed_criteria,
299
+ stack=self.state_manager.get_stack(),
300
+ )
301
+
302
+ # Mark step as completed
303
+ await self.state_manager.complete_step(
304
+ step_id=current_step_id,
305
+ outputs=input_data.outputs,
306
+ notes=input_data.notes,
307
+ )
308
+
309
+ # Find next step
310
+ current_entry_index = session.current_entry_index
311
+ next_entry_index = current_entry_index + 1
312
+
313
+ if next_entry_index >= len(workflow.step_entries):
314
+ # Workflow complete - get outputs before completing (which pops from stack)
315
+ all_outputs = self.state_manager.get_all_outputs()
316
+ await self.state_manager.complete_workflow()
317
+
318
+ return FinishedStepResponse(
319
+ status=StepStatus.WORKFLOW_COMPLETE,
320
+ summary=f"Workflow '{workflow.name}' completed successfully!",
321
+ all_outputs=all_outputs,
322
+ stack=self.state_manager.get_stack(),
323
+ )
324
+
325
+ # Get next step
326
+ next_entry = workflow.step_entries[next_entry_index]
327
+
328
+ # For concurrent entries, we use the first step as the "current"
329
+ # The agent will handle running them in parallel via Task tool
330
+ next_step_id = next_entry.step_ids[0]
331
+ next_step = job.get_step(next_step_id)
332
+
333
+ if next_step is None:
334
+ raise ToolError(f"Next step not found: {next_step_id}")
335
+
336
+ # Advance session
337
+ await self.state_manager.advance_to_step(next_step_id, next_entry_index)
338
+ await self.state_manager.start_step(next_step_id)
339
+
340
+ # Get instructions
341
+ instructions = self._get_step_instructions(job, next_step_id)
342
+ step_outputs = [out.file for out in next_step.outputs]
343
+
344
+ # Add info about concurrent steps if this is a concurrent entry
345
+ if next_entry.is_concurrent and len(next_entry.step_ids) > 1:
346
+ concurrent_info = (
347
+ f"\n\n**CONCURRENT STEPS**: This entry has {len(next_entry.step_ids)} "
348
+ f"steps that can run in parallel: {', '.join(next_entry.step_ids)}\n"
349
+ f"Use the Task tool to execute them concurrently."
350
+ )
351
+ instructions = instructions + concurrent_info
352
+
353
+ # Reload session to get current state after advance
354
+ session = self.state_manager.require_active_session()
355
+
356
+ return FinishedStepResponse(
357
+ status=StepStatus.NEXT_STEP,
358
+ begin_step=ActiveStepInfo(
359
+ session_id=session.session_id,
360
+ branch_name=session.branch_name,
361
+ step_id=next_step_id,
362
+ step_expected_outputs=step_outputs,
363
+ step_quality_criteria=next_step.quality_criteria,
364
+ step_instructions=instructions,
365
+ ),
366
+ stack=self.state_manager.get_stack(),
367
+ )
368
+
369
+ async def abort_workflow(self, input_data: AbortWorkflowInput) -> AbortWorkflowResponse:
370
+ """Abort the current workflow and return to the previous one.
371
+
372
+ Args:
373
+ input_data: AbortWorkflowInput with explanation
374
+
375
+ Returns:
376
+ AbortWorkflowResponse with abort info and new stack state
377
+
378
+ Raises:
379
+ StateError: If no active session
380
+ """
381
+ aborted_session, new_active = await self.state_manager.abort_workflow(
382
+ input_data.explanation
383
+ )
384
+
385
+ return AbortWorkflowResponse(
386
+ aborted_workflow=f"{aborted_session.job_name}/{aborted_session.workflow_name}",
387
+ aborted_step=aborted_session.current_step_id,
388
+ explanation=input_data.explanation,
389
+ stack=self.state_manager.get_stack(),
390
+ resumed_workflow=(
391
+ f"{new_active.job_name}/{new_active.workflow_name}" if new_active else None
392
+ ),
393
+ resumed_step=new_active.current_step_id if new_active else None,
394
+ )