deepwork 0.4.0__py3-none-any.whl → 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deepwork/__init__.py +1 -1
- deepwork/cli/hook.py +3 -4
- deepwork/cli/install.py +70 -117
- deepwork/cli/main.py +2 -2
- deepwork/cli/serve.py +133 -0
- deepwork/cli/sync.py +93 -58
- deepwork/core/adapters.py +91 -98
- deepwork/core/generator.py +19 -386
- deepwork/core/hooks_syncer.py +1 -1
- deepwork/core/parser.py +270 -1
- deepwork/hooks/README.md +0 -44
- deepwork/hooks/__init__.py +3 -6
- deepwork/hooks/check_version.sh +54 -21
- deepwork/mcp/__init__.py +23 -0
- deepwork/mcp/quality_gate.py +347 -0
- deepwork/mcp/schemas.py +263 -0
- deepwork/mcp/server.py +253 -0
- deepwork/mcp/state.py +422 -0
- deepwork/mcp/tools.py +394 -0
- deepwork/schemas/job.schema.json +347 -0
- deepwork/schemas/job_schema.py +27 -239
- deepwork/standard_jobs/deepwork_jobs/doc_specs/job_spec.md +9 -15
- deepwork/standard_jobs/deepwork_jobs/job.yml +146 -46
- deepwork/standard_jobs/deepwork_jobs/steps/define.md +100 -33
- deepwork/standard_jobs/deepwork_jobs/steps/errata.md +154 -0
- deepwork/standard_jobs/deepwork_jobs/steps/fix_jobs.md +207 -0
- deepwork/standard_jobs/deepwork_jobs/steps/fix_settings.md +177 -0
- deepwork/standard_jobs/deepwork_jobs/steps/implement.md +22 -138
- deepwork/standard_jobs/deepwork_jobs/steps/iterate.md +221 -0
- deepwork/standard_jobs/deepwork_jobs/steps/learn.md +2 -26
- deepwork/standard_jobs/deepwork_jobs/steps/test.md +154 -0
- deepwork/standard_jobs/deepwork_jobs/templates/job.yml.template +2 -0
- deepwork/templates/claude/AGENTS.md +38 -0
- deepwork/templates/claude/settings.json +16 -0
- deepwork/templates/claude/skill-deepwork.md.jinja +37 -0
- deepwork/templates/gemini/skill-deepwork.md.jinja +37 -0
- deepwork-0.7.0.dist-info/METADATA +317 -0
- deepwork-0.7.0.dist-info/RECORD +64 -0
- deepwork/cli/rules.py +0 -32
- deepwork/core/command_executor.py +0 -190
- deepwork/core/pattern_matcher.py +0 -271
- deepwork/core/rules_parser.py +0 -559
- deepwork/core/rules_queue.py +0 -321
- deepwork/hooks/rules_check.py +0 -759
- deepwork/schemas/rules_schema.py +0 -135
- deepwork/standard_jobs/deepwork_jobs/steps/review_job_spec.md +0 -208
- deepwork/standard_jobs/deepwork_jobs/templates/doc_spec.md.example +0 -86
- deepwork/standard_jobs/deepwork_rules/hooks/capture_prompt_work_tree.sh +0 -38
- deepwork/standard_jobs/deepwork_rules/hooks/global_hooks.yml +0 -8
- deepwork/standard_jobs/deepwork_rules/hooks/user_prompt_submit.sh +0 -16
- deepwork/standard_jobs/deepwork_rules/job.yml +0 -49
- deepwork/standard_jobs/deepwork_rules/rules/.gitkeep +0 -13
- deepwork/standard_jobs/deepwork_rules/rules/api-documentation-sync.md.example +0 -10
- deepwork/standard_jobs/deepwork_rules/rules/readme-documentation.md.example +0 -10
- deepwork/standard_jobs/deepwork_rules/rules/security-review.md.example +0 -11
- deepwork/standard_jobs/deepwork_rules/rules/skill-md-validation.md +0 -46
- deepwork/standard_jobs/deepwork_rules/rules/source-test-pairing.md.example +0 -13
- deepwork/standard_jobs/deepwork_rules/steps/define.md +0 -249
- deepwork/templates/claude/skill-job-meta.md.jinja +0 -77
- deepwork/templates/claude/skill-job-step.md.jinja +0 -251
- deepwork/templates/gemini/skill-job-meta.toml.jinja +0 -76
- deepwork/templates/gemini/skill-job-step.toml.jinja +0 -162
- deepwork-0.4.0.dist-info/METADATA +0 -381
- deepwork-0.4.0.dist-info/RECORD +0 -71
- {deepwork-0.4.0.dist-info → deepwork-0.7.0.dist-info}/WHEEL +0 -0
- {deepwork-0.4.0.dist-info → deepwork-0.7.0.dist-info}/entry_points.txt +0 -0
- {deepwork-0.4.0.dist-info → deepwork-0.7.0.dist-info}/licenses/LICENSE.md +0 -0
deepwork/core/parser.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
"""Job definition parser."""
|
|
2
2
|
|
|
3
|
+
import logging
|
|
3
4
|
from dataclasses import dataclass, field
|
|
4
5
|
from pathlib import Path
|
|
5
6
|
from typing import Any
|
|
@@ -8,6 +9,8 @@ from deepwork.schemas.job_schema import JOB_SCHEMA, LIFECYCLE_HOOK_EVENTS
|
|
|
8
9
|
from deepwork.utils.validation import ValidationError, validate_against_schema
|
|
9
10
|
from deepwork.utils.yaml_utils import YAMLError, load_yaml
|
|
10
11
|
|
|
12
|
+
logger = logging.getLogger("deepwork.parser")
|
|
13
|
+
|
|
11
14
|
|
|
12
15
|
class ParseError(Exception):
|
|
13
16
|
"""Exception raised for job parsing errors."""
|
|
@@ -142,6 +145,9 @@ class Step:
|
|
|
142
145
|
# Declarative quality criteria rendered with standard evaluation framing
|
|
143
146
|
quality_criteria: list[str] = field(default_factory=list)
|
|
144
147
|
|
|
148
|
+
# Agent type for this step (e.g., "general-purpose"). When set, skill uses context: fork
|
|
149
|
+
agent: str | None = None
|
|
150
|
+
|
|
145
151
|
@property
|
|
146
152
|
def stop_hooks(self) -> list[HookAction]:
|
|
147
153
|
"""
|
|
@@ -180,6 +186,78 @@ class Step:
|
|
|
180
186
|
hooks=hooks,
|
|
181
187
|
exposed=data.get("exposed", False),
|
|
182
188
|
quality_criteria=data.get("quality_criteria", []),
|
|
189
|
+
agent=data.get("agent"),
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
@dataclass
|
|
194
|
+
class WorkflowStepEntry:
|
|
195
|
+
"""Represents a single entry in a workflow's step list.
|
|
196
|
+
|
|
197
|
+
Each entry can be either:
|
|
198
|
+
- A single step (sequential execution)
|
|
199
|
+
- A list of steps (concurrent execution)
|
|
200
|
+
"""
|
|
201
|
+
|
|
202
|
+
step_ids: list[str] # Single step has one ID, concurrent group has multiple
|
|
203
|
+
is_concurrent: bool = False
|
|
204
|
+
|
|
205
|
+
@property
|
|
206
|
+
def first_step(self) -> str:
|
|
207
|
+
"""Get the first step ID in this entry."""
|
|
208
|
+
return self.step_ids[0] if self.step_ids else ""
|
|
209
|
+
|
|
210
|
+
def all_step_ids(self) -> list[str]:
|
|
211
|
+
"""Get all step IDs in this entry."""
|
|
212
|
+
return self.step_ids
|
|
213
|
+
|
|
214
|
+
@classmethod
|
|
215
|
+
def from_data(cls, data: str | list[str]) -> "WorkflowStepEntry":
|
|
216
|
+
"""Create WorkflowStepEntry from YAML data (string or list)."""
|
|
217
|
+
if isinstance(data, str):
|
|
218
|
+
return cls(step_ids=[data], is_concurrent=False)
|
|
219
|
+
else:
|
|
220
|
+
return cls(step_ids=list(data), is_concurrent=True)
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
@dataclass
|
|
224
|
+
class Workflow:
|
|
225
|
+
"""Represents a named workflow grouping steps into a multi-step sequence."""
|
|
226
|
+
|
|
227
|
+
name: str
|
|
228
|
+
summary: str
|
|
229
|
+
step_entries: list[WorkflowStepEntry] # List of step entries (sequential or concurrent)
|
|
230
|
+
|
|
231
|
+
@property
|
|
232
|
+
def steps(self) -> list[str]:
|
|
233
|
+
"""Get flattened list of all step IDs for backward compatibility."""
|
|
234
|
+
result: list[str] = []
|
|
235
|
+
for entry in self.step_entries:
|
|
236
|
+
result.extend(entry.step_ids)
|
|
237
|
+
return result
|
|
238
|
+
|
|
239
|
+
def get_step_entry_for_step(self, step_id: str) -> WorkflowStepEntry | None:
|
|
240
|
+
"""Get the workflow step entry containing the given step ID."""
|
|
241
|
+
for entry in self.step_entries:
|
|
242
|
+
if step_id in entry.step_ids:
|
|
243
|
+
return entry
|
|
244
|
+
return None
|
|
245
|
+
|
|
246
|
+
def get_entry_index_for_step(self, step_id: str) -> int | None:
|
|
247
|
+
"""Get the index of the entry containing the given step ID."""
|
|
248
|
+
for i, entry in enumerate(self.step_entries):
|
|
249
|
+
if step_id in entry.step_ids:
|
|
250
|
+
return i
|
|
251
|
+
return None
|
|
252
|
+
|
|
253
|
+
@classmethod
|
|
254
|
+
def from_dict(cls, data: dict[str, Any]) -> "Workflow":
|
|
255
|
+
"""Create Workflow from dictionary."""
|
|
256
|
+
step_entries = [WorkflowStepEntry.from_data(s) for s in data["steps"]]
|
|
257
|
+
return cls(
|
|
258
|
+
name=data["name"],
|
|
259
|
+
summary=data["summary"],
|
|
260
|
+
step_entries=step_entries,
|
|
183
261
|
)
|
|
184
262
|
|
|
185
263
|
|
|
@@ -193,6 +271,7 @@ class JobDefinition:
|
|
|
193
271
|
description: str | None
|
|
194
272
|
steps: list[Step]
|
|
195
273
|
job_dir: Path
|
|
274
|
+
workflows: list[Workflow] = field(default_factory=list)
|
|
196
275
|
|
|
197
276
|
def get_step(self, step_id: str) -> Step | None:
|
|
198
277
|
"""
|
|
@@ -308,6 +387,190 @@ class JobDefinition:
|
|
|
308
387
|
doc_spec_refs.add(output.doc_spec)
|
|
309
388
|
return list(doc_spec_refs)
|
|
310
389
|
|
|
390
|
+
def get_workflow_for_step(self, step_id: str) -> Workflow | None:
|
|
391
|
+
"""
|
|
392
|
+
Get the workflow containing a step.
|
|
393
|
+
|
|
394
|
+
Args:
|
|
395
|
+
step_id: Step ID to look up
|
|
396
|
+
|
|
397
|
+
Returns:
|
|
398
|
+
Workflow containing the step, or None if step is standalone
|
|
399
|
+
"""
|
|
400
|
+
for workflow in self.workflows:
|
|
401
|
+
if step_id in workflow.steps:
|
|
402
|
+
return workflow
|
|
403
|
+
return None
|
|
404
|
+
|
|
405
|
+
def get_next_step_in_workflow(self, step_id: str) -> str | None:
|
|
406
|
+
"""
|
|
407
|
+
Get the next step in a workflow after the given step.
|
|
408
|
+
|
|
409
|
+
Args:
|
|
410
|
+
step_id: Current step ID
|
|
411
|
+
|
|
412
|
+
Returns:
|
|
413
|
+
Next step ID, or None if this is the last step or not in a workflow
|
|
414
|
+
"""
|
|
415
|
+
workflow = self.get_workflow_for_step(step_id)
|
|
416
|
+
if not workflow:
|
|
417
|
+
return None
|
|
418
|
+
try:
|
|
419
|
+
index = workflow.steps.index(step_id)
|
|
420
|
+
if index < len(workflow.steps) - 1:
|
|
421
|
+
return workflow.steps[index + 1]
|
|
422
|
+
except ValueError:
|
|
423
|
+
pass
|
|
424
|
+
return None
|
|
425
|
+
|
|
426
|
+
def get_prev_step_in_workflow(self, step_id: str) -> str | None:
|
|
427
|
+
"""
|
|
428
|
+
Get the previous step in a workflow before the given step.
|
|
429
|
+
|
|
430
|
+
Args:
|
|
431
|
+
step_id: Current step ID
|
|
432
|
+
|
|
433
|
+
Returns:
|
|
434
|
+
Previous step ID, or None if this is the first step or not in a workflow
|
|
435
|
+
"""
|
|
436
|
+
workflow = self.get_workflow_for_step(step_id)
|
|
437
|
+
if not workflow:
|
|
438
|
+
return None
|
|
439
|
+
try:
|
|
440
|
+
index = workflow.steps.index(step_id)
|
|
441
|
+
if index > 0:
|
|
442
|
+
return workflow.steps[index - 1]
|
|
443
|
+
except ValueError:
|
|
444
|
+
pass
|
|
445
|
+
return None
|
|
446
|
+
|
|
447
|
+
def get_step_position_in_workflow(self, step_id: str) -> tuple[int, int] | None:
|
|
448
|
+
"""
|
|
449
|
+
Get the position of a step within its workflow.
|
|
450
|
+
|
|
451
|
+
Args:
|
|
452
|
+
step_id: Step ID to look up
|
|
453
|
+
|
|
454
|
+
Returns:
|
|
455
|
+
Tuple of (1-based position, total steps in workflow), or None if standalone
|
|
456
|
+
"""
|
|
457
|
+
workflow = self.get_workflow_for_step(step_id)
|
|
458
|
+
if not workflow:
|
|
459
|
+
return None
|
|
460
|
+
try:
|
|
461
|
+
index = workflow.steps.index(step_id)
|
|
462
|
+
return (index + 1, len(workflow.steps))
|
|
463
|
+
except ValueError:
|
|
464
|
+
return None
|
|
465
|
+
|
|
466
|
+
def get_step_entry_position_in_workflow(
|
|
467
|
+
self, step_id: str
|
|
468
|
+
) -> tuple[int, int, WorkflowStepEntry] | None:
|
|
469
|
+
"""
|
|
470
|
+
Get the entry-based position of a step within its workflow.
|
|
471
|
+
|
|
472
|
+
For concurrent step groups, multiple steps share the same entry position.
|
|
473
|
+
|
|
474
|
+
Args:
|
|
475
|
+
step_id: Step ID to look up
|
|
476
|
+
|
|
477
|
+
Returns:
|
|
478
|
+
Tuple of (1-based entry position, total entries, WorkflowStepEntry),
|
|
479
|
+
or None if standalone
|
|
480
|
+
"""
|
|
481
|
+
workflow = self.get_workflow_for_step(step_id)
|
|
482
|
+
if not workflow:
|
|
483
|
+
return None
|
|
484
|
+
|
|
485
|
+
entry_index = workflow.get_entry_index_for_step(step_id)
|
|
486
|
+
if entry_index is None:
|
|
487
|
+
return None
|
|
488
|
+
|
|
489
|
+
entry = workflow.step_entries[entry_index]
|
|
490
|
+
return (entry_index + 1, len(workflow.step_entries), entry)
|
|
491
|
+
|
|
492
|
+
def get_concurrent_step_info(self, step_id: str) -> tuple[int, int] | None:
|
|
493
|
+
"""
|
|
494
|
+
Get information about a step's position within a concurrent group.
|
|
495
|
+
|
|
496
|
+
Args:
|
|
497
|
+
step_id: Step ID to look up
|
|
498
|
+
|
|
499
|
+
Returns:
|
|
500
|
+
Tuple of (1-based position in group, total in group) if step is in
|
|
501
|
+
a concurrent group, None if step is not in a concurrent group
|
|
502
|
+
"""
|
|
503
|
+
workflow = self.get_workflow_for_step(step_id)
|
|
504
|
+
if not workflow:
|
|
505
|
+
return None
|
|
506
|
+
|
|
507
|
+
entry = workflow.get_step_entry_for_step(step_id)
|
|
508
|
+
if entry is None or not entry.is_concurrent:
|
|
509
|
+
return None
|
|
510
|
+
|
|
511
|
+
try:
|
|
512
|
+
index = entry.step_ids.index(step_id)
|
|
513
|
+
return (index + 1, len(entry.step_ids))
|
|
514
|
+
except ValueError:
|
|
515
|
+
return None
|
|
516
|
+
|
|
517
|
+
def validate_workflows(self) -> None:
|
|
518
|
+
"""
|
|
519
|
+
Validate workflow definitions.
|
|
520
|
+
|
|
521
|
+
Raises:
|
|
522
|
+
ParseError: If workflow references non-existent steps or has duplicates
|
|
523
|
+
"""
|
|
524
|
+
step_ids = {step.id for step in self.steps}
|
|
525
|
+
workflow_names = set()
|
|
526
|
+
|
|
527
|
+
for workflow in self.workflows:
|
|
528
|
+
# Check for duplicate workflow names
|
|
529
|
+
if workflow.name in workflow_names:
|
|
530
|
+
raise ParseError(f"Duplicate workflow name: '{workflow.name}'")
|
|
531
|
+
workflow_names.add(workflow.name)
|
|
532
|
+
|
|
533
|
+
# Check all step references exist
|
|
534
|
+
for step_id in workflow.steps:
|
|
535
|
+
if step_id not in step_ids:
|
|
536
|
+
raise ParseError(
|
|
537
|
+
f"Workflow '{workflow.name}' references non-existent step '{step_id}'"
|
|
538
|
+
)
|
|
539
|
+
|
|
540
|
+
# Check for duplicate steps within a workflow
|
|
541
|
+
seen_steps = set()
|
|
542
|
+
for step_id in workflow.steps:
|
|
543
|
+
if step_id in seen_steps:
|
|
544
|
+
raise ParseError(
|
|
545
|
+
f"Workflow '{workflow.name}' contains duplicate step '{step_id}'"
|
|
546
|
+
)
|
|
547
|
+
seen_steps.add(step_id)
|
|
548
|
+
|
|
549
|
+
def warn_orphaned_steps(self) -> list[str]:
|
|
550
|
+
"""
|
|
551
|
+
Check for steps not included in any workflow and emit warnings.
|
|
552
|
+
|
|
553
|
+
Returns:
|
|
554
|
+
List of orphaned step IDs
|
|
555
|
+
"""
|
|
556
|
+
# Collect all step IDs referenced in workflows
|
|
557
|
+
workflow_step_ids: set[str] = set()
|
|
558
|
+
for workflow in self.workflows:
|
|
559
|
+
workflow_step_ids.update(workflow.steps)
|
|
560
|
+
|
|
561
|
+
# Find orphaned steps
|
|
562
|
+
orphaned_steps = [step.id for step in self.steps if step.id not in workflow_step_ids]
|
|
563
|
+
|
|
564
|
+
if orphaned_steps:
|
|
565
|
+
logger.warning(
|
|
566
|
+
"Job '%s' has steps not included in any workflow: %s. "
|
|
567
|
+
"These steps are not accessible via the MCP interface.",
|
|
568
|
+
self.name,
|
|
569
|
+
", ".join(orphaned_steps),
|
|
570
|
+
)
|
|
571
|
+
|
|
572
|
+
return orphaned_steps
|
|
573
|
+
|
|
311
574
|
@classmethod
|
|
312
575
|
def from_dict(cls, data: dict[str, Any], job_dir: Path) -> "JobDefinition":
|
|
313
576
|
"""
|
|
@@ -320,6 +583,7 @@ class JobDefinition:
|
|
|
320
583
|
Returns:
|
|
321
584
|
JobDefinition instance
|
|
322
585
|
"""
|
|
586
|
+
workflows = [Workflow.from_dict(wf_data) for wf_data in data.get("workflows", [])]
|
|
323
587
|
return cls(
|
|
324
588
|
name=data["name"],
|
|
325
589
|
version=data["version"],
|
|
@@ -327,6 +591,7 @@ class JobDefinition:
|
|
|
327
591
|
description=data.get("description"),
|
|
328
592
|
steps=[Step.from_dict(step_data) for step_data in data["steps"]],
|
|
329
593
|
job_dir=job_dir,
|
|
594
|
+
workflows=workflows,
|
|
330
595
|
)
|
|
331
596
|
|
|
332
597
|
|
|
@@ -373,8 +638,12 @@ def parse_job_definition(job_dir: Path | str) -> JobDefinition:
|
|
|
373
638
|
# Parse into dataclass
|
|
374
639
|
job_def = JobDefinition.from_dict(job_data, job_dir_path)
|
|
375
640
|
|
|
376
|
-
# Validate dependencies
|
|
641
|
+
# Validate dependencies, file inputs, and workflows
|
|
377
642
|
job_def.validate_dependencies()
|
|
378
643
|
job_def.validate_file_inputs()
|
|
644
|
+
job_def.validate_workflows()
|
|
645
|
+
|
|
646
|
+
# Warn about orphaned steps (not in any workflow)
|
|
647
|
+
job_def.warn_orphaned_steps()
|
|
379
648
|
|
|
380
649
|
return job_def
|
deepwork/hooks/README.md
CHANGED
|
@@ -15,51 +15,8 @@ The hook system provides:
|
|
|
15
15
|
- Output denormalization (decision values, JSON structure)
|
|
16
16
|
- Cross-platform compatibility
|
|
17
17
|
|
|
18
|
-
3. **Hook implementations**:
|
|
19
|
-
- `rules_check.py` - Evaluates DeepWork rules on `after_agent` events
|
|
20
|
-
|
|
21
18
|
## Usage
|
|
22
19
|
|
|
23
|
-
### Registering Hooks
|
|
24
|
-
|
|
25
|
-
#### Claude Code (`.claude/settings.json`)
|
|
26
|
-
|
|
27
|
-
```json
|
|
28
|
-
{
|
|
29
|
-
"hooks": {
|
|
30
|
-
"Stop": [
|
|
31
|
-
{
|
|
32
|
-
"hooks": [
|
|
33
|
-
{
|
|
34
|
-
"type": "command",
|
|
35
|
-
"command": "path/to/claude_hook.sh deepwork.hooks.rules_check"
|
|
36
|
-
}
|
|
37
|
-
]
|
|
38
|
-
}
|
|
39
|
-
]
|
|
40
|
-
}
|
|
41
|
-
}
|
|
42
|
-
```
|
|
43
|
-
|
|
44
|
-
#### Gemini CLI (`.gemini/settings.json`)
|
|
45
|
-
|
|
46
|
-
```json
|
|
47
|
-
{
|
|
48
|
-
"hooks": {
|
|
49
|
-
"AfterAgent": [
|
|
50
|
-
{
|
|
51
|
-
"hooks": [
|
|
52
|
-
{
|
|
53
|
-
"type": "command",
|
|
54
|
-
"command": "path/to/gemini_hook.sh deepwork.hooks.rules_check"
|
|
55
|
-
}
|
|
56
|
-
]
|
|
57
|
-
}
|
|
58
|
-
]
|
|
59
|
-
}
|
|
60
|
-
}
|
|
61
|
-
```
|
|
62
|
-
|
|
63
20
|
### Writing Custom Hooks
|
|
64
21
|
|
|
65
22
|
1. Create a new Python module in `deepwork/hooks/`:
|
|
@@ -178,4 +135,3 @@ pytest tests/shell_script_tests/test_hook_wrappers.py -v
|
|
|
178
135
|
| `wrapper.py` | Cross-platform input/output normalization |
|
|
179
136
|
| `claude_hook.sh` | Shell wrapper for Claude Code |
|
|
180
137
|
| `gemini_hook.sh` | Shell wrapper for Gemini CLI |
|
|
181
|
-
| `rules_check.py` | Cross-platform rule evaluation hook |
|
deepwork/hooks/__init__.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
"""DeepWork hooks package for
|
|
1
|
+
"""DeepWork hooks package for lifecycle events.
|
|
2
2
|
|
|
3
3
|
This package provides:
|
|
4
4
|
|
|
@@ -7,9 +7,6 @@ This package provides:
|
|
|
7
7
|
- claude_hook.sh: Shell wrapper for Claude Code hooks
|
|
8
8
|
- gemini_hook.sh: Shell wrapper for Gemini CLI hooks
|
|
9
9
|
|
|
10
|
-
2. Hook implementations:
|
|
11
|
-
- rules_check.py: Evaluates rules on after_agent events
|
|
12
|
-
|
|
13
10
|
Usage with wrapper system:
|
|
14
11
|
# Register hook in .claude/settings.json:
|
|
15
12
|
{
|
|
@@ -17,7 +14,7 @@ Usage with wrapper system:
|
|
|
17
14
|
"Stop": [{
|
|
18
15
|
"hooks": [{
|
|
19
16
|
"type": "command",
|
|
20
|
-
"command": ".deepwork/hooks/claude_hook.sh
|
|
17
|
+
"command": ".deepwork/hooks/claude_hook.sh my_hook"
|
|
21
18
|
}]
|
|
22
19
|
}]
|
|
23
20
|
}
|
|
@@ -29,7 +26,7 @@ Usage with wrapper system:
|
|
|
29
26
|
"AfterAgent": [{
|
|
30
27
|
"hooks": [{
|
|
31
28
|
"type": "command",
|
|
32
|
-
"command": ".gemini/hooks/gemini_hook.sh
|
|
29
|
+
"command": ".gemini/hooks/gemini_hook.sh my_hook"
|
|
33
30
|
}]
|
|
34
31
|
}]
|
|
35
32
|
}
|
deepwork/hooks/check_version.sh
CHANGED
|
@@ -10,17 +10,58 @@
|
|
|
10
10
|
#
|
|
11
11
|
# Uses hookSpecificOutput.additionalContext to pass messages to Claude's context.
|
|
12
12
|
|
|
13
|
+
# ============================================================================
|
|
14
|
+
# READ STDIN INPUT
|
|
15
|
+
# ============================================================================
|
|
16
|
+
# SessionStart hooks receive JSON input via stdin with session information.
|
|
17
|
+
# We need to read this to check the session source (startup, resume, clear).
|
|
18
|
+
|
|
19
|
+
HOOK_INPUT=""
|
|
20
|
+
if [ ! -t 0 ]; then
|
|
21
|
+
HOOK_INPUT=$(cat)
|
|
22
|
+
fi
|
|
23
|
+
|
|
24
|
+
# ============================================================================
|
|
25
|
+
# SKIP NON-INITIAL SESSIONS
|
|
26
|
+
# ============================================================================
|
|
27
|
+
# SessionStart hooks can be triggered for different reasons:
|
|
28
|
+
# - "startup": Initial session start (user ran `claude` or similar)
|
|
29
|
+
# - "resume": Session resumed (user ran `claude --resume`)
|
|
30
|
+
# - "clear": Context was cleared/compacted
|
|
31
|
+
#
|
|
32
|
+
# We only want to run the full check on initial startup. For resumed or
|
|
33
|
+
# compacted sessions, return immediately with empty JSON to avoid redundant
|
|
34
|
+
# checks and noise.
|
|
35
|
+
|
|
36
|
+
get_session_source() {
|
|
37
|
+
# Extract the "source" field from the JSON input
|
|
38
|
+
# Returns empty string if not found or not valid JSON
|
|
39
|
+
if [ -n "$HOOK_INPUT" ]; then
|
|
40
|
+
# Use grep and sed for simple JSON parsing (avoid jq dependency)
|
|
41
|
+
echo "$HOOK_INPUT" | grep -o '"source"[[:space:]]*:[[:space:]]*"[^"]*"' | sed 's/.*:.*"\([^"]*\)"/\1/' | head -1
|
|
42
|
+
fi
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
SESSION_SOURCE=$(get_session_source)
|
|
46
|
+
|
|
47
|
+
# If source is anything other than "startup" (or empty/missing for backwards compat),
|
|
48
|
+
# skip this hook entirely. Empty source means older Claude Code version that doesn't
|
|
49
|
+
# send the source field - we treat that as an initial session to maintain backwards compat.
|
|
50
|
+
if [ -n "$SESSION_SOURCE" ] && [ "$SESSION_SOURCE" != "startup" ]; then
|
|
51
|
+
# Non-initial session (resume, clear, etc.) - skip all checks
|
|
52
|
+
echo '{}'
|
|
53
|
+
exit 0
|
|
54
|
+
fi
|
|
55
|
+
|
|
13
56
|
# ============================================================================
|
|
14
57
|
# DEEPWORK INSTALLATION CHECK (BLOCKING)
|
|
15
58
|
# ============================================================================
|
|
16
|
-
# This check runs on
|
|
17
|
-
#
|
|
59
|
+
# This check runs on initial session start because if deepwork is not installed,
|
|
60
|
+
# nothing else will work.
|
|
18
61
|
|
|
19
62
|
check_deepwork_installed() {
|
|
20
|
-
# Run 'deepwork
|
|
21
|
-
|
|
22
|
-
# 2. Clears any stale rules from the queue, ensuring a clean slate for the session
|
|
23
|
-
if ! deepwork rules clear_queue >/dev/null 2>&1; then
|
|
63
|
+
# Run 'deepwork --version' to verify the command is installed and directly invokable
|
|
64
|
+
if ! deepwork --version >/dev/null 2>&1; then
|
|
24
65
|
return 1
|
|
25
66
|
fi
|
|
26
67
|
return 0
|
|
@@ -36,11 +77,13 @@ print_deepwork_error() {
|
|
|
36
77
|
ERROR: The 'deepwork' command is not available or cannot be directly invoked.
|
|
37
78
|
|
|
38
79
|
DeepWork must be installed such that running 'deepwork' directly works.
|
|
39
|
-
For example, running 'deepwork
|
|
80
|
+
For example, running 'deepwork --version' should succeed.
|
|
40
81
|
|
|
41
82
|
IMPORTANT: Do NOT use 'uv run deepwork' or similar wrappers.
|
|
42
83
|
The command must be directly invokable as just 'deepwork'.
|
|
43
84
|
|
|
85
|
+
To verify: 'deepwork --version' should succeed.
|
|
86
|
+
|
|
44
87
|
------------------------------------------------------------------------
|
|
45
88
|
| |
|
|
46
89
|
| Please fix your deepwork installation before proceeding. |
|
|
@@ -70,20 +113,10 @@ if ! check_deepwork_installed; then
|
|
|
70
113
|
exit 2 # Blocking error - prevent session from continuing
|
|
71
114
|
fi
|
|
72
115
|
|
|
73
|
-
#
|
|
74
|
-
#
|
|
75
|
-
#
|
|
76
|
-
#
|
|
77
|
-
# clear, etc.). We only want to show the version warning once per session to
|
|
78
|
-
# avoid spamming the user. We use an environment variable to track whether
|
|
79
|
-
# we've already run. Note: This relies on the parent process preserving env
|
|
80
|
-
# vars across hook invocations within the same session.
|
|
81
|
-
if [ -n "$DEEPWORK_VERSION_CHECK_DONE" ]; then
|
|
82
|
-
# Already checked version this session, exit silently with empty JSON
|
|
83
|
-
echo '{}'
|
|
84
|
-
exit 0
|
|
85
|
-
fi
|
|
86
|
-
export DEEPWORK_VERSION_CHECK_DONE=1
|
|
116
|
+
# Note: We previously had a re-entry guard using DEEPWORK_VERSION_CHECK_DONE
|
|
117
|
+
# environment variable, but that was unreliable across session resumptions.
|
|
118
|
+
# Now we use the source field in the hook input JSON to detect initial sessions
|
|
119
|
+
# vs resumed/compacted sessions (see SKIP NON-INITIAL SESSIONS section above).
|
|
87
120
|
|
|
88
121
|
# ============================================================================
|
|
89
122
|
# MINIMUM VERSION CONFIGURATION
|
deepwork/mcp/__init__.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
"""DeepWork MCP Server module.
|
|
2
|
+
|
|
3
|
+
This module provides an MCP (Model Context Protocol) server that guides AI agents
|
|
4
|
+
through DeepWork workflows via checkpoint calls with quality gate enforcement.
|
|
5
|
+
|
|
6
|
+
The server exposes three main tools:
|
|
7
|
+
- get_workflows: List all available workflows
|
|
8
|
+
- start_workflow: Initialize a workflow session
|
|
9
|
+
- finished_step: Report step completion and get next instructions
|
|
10
|
+
|
|
11
|
+
Example usage:
|
|
12
|
+
deepwork serve --path /path/to/project
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def create_server(*args, **kwargs): # type: ignore
|
|
17
|
+
"""Lazy import to avoid loading fastmcp at module import time."""
|
|
18
|
+
from deepwork.mcp.server import create_server as _create_server
|
|
19
|
+
|
|
20
|
+
return _create_server(*args, **kwargs)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
__all__ = ["create_server"]
|