deepwork 0.3.1__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. deepwork/cli/hook.py +70 -0
  2. deepwork/cli/install.py +58 -14
  3. deepwork/cli/main.py +4 -0
  4. deepwork/cli/rules.py +32 -0
  5. deepwork/cli/sync.py +27 -1
  6. deepwork/core/adapters.py +209 -0
  7. deepwork/core/command_executor.py +26 -9
  8. deepwork/core/doc_spec_parser.py +205 -0
  9. deepwork/core/generator.py +79 -4
  10. deepwork/core/hooks_syncer.py +15 -2
  11. deepwork/core/parser.py +64 -2
  12. deepwork/hooks/__init__.py +9 -3
  13. deepwork/hooks/check_version.sh +230 -0
  14. deepwork/hooks/claude_hook.sh +13 -17
  15. deepwork/hooks/gemini_hook.sh +13 -17
  16. deepwork/hooks/rules_check.py +71 -12
  17. deepwork/hooks/wrapper.py +66 -16
  18. deepwork/schemas/doc_spec_schema.py +64 -0
  19. deepwork/schemas/job_schema.py +25 -3
  20. deepwork/standard_jobs/deepwork_jobs/doc_specs/job_spec.md +190 -0
  21. deepwork/standard_jobs/deepwork_jobs/job.yml +41 -8
  22. deepwork/standard_jobs/deepwork_jobs/steps/define.md +68 -2
  23. deepwork/standard_jobs/deepwork_jobs/steps/implement.md +3 -3
  24. deepwork/standard_jobs/deepwork_jobs/steps/learn.md +74 -5
  25. deepwork/standard_jobs/deepwork_jobs/steps/review_job_spec.md +208 -0
  26. deepwork/standard_jobs/deepwork_jobs/templates/doc_spec.md.example +86 -0
  27. deepwork/standard_jobs/deepwork_jobs/templates/doc_spec.md.template +26 -0
  28. deepwork/standard_jobs/deepwork_rules/hooks/capture_prompt_work_tree.sh +8 -0
  29. deepwork/standard_jobs/deepwork_rules/job.yml +5 -3
  30. deepwork/templates/claude/skill-job-meta.md.jinja +7 -0
  31. deepwork/templates/claude/skill-job-step.md.jinja +60 -7
  32. deepwork/templates/gemini/skill-job-step.toml.jinja +18 -3
  33. deepwork/utils/fs.py +36 -0
  34. deepwork/utils/yaml_utils.py +24 -0
  35. {deepwork-0.3.1.dist-info → deepwork-0.4.0.dist-info}/METADATA +39 -2
  36. {deepwork-0.3.1.dist-info → deepwork-0.4.0.dist-info}/RECORD +39 -30
  37. {deepwork-0.3.1.dist-info → deepwork-0.4.0.dist-info}/WHEEL +0 -0
  38. {deepwork-0.3.1.dist-info → deepwork-0.4.0.dist-info}/entry_points.txt +0 -0
  39. {deepwork-0.3.1.dist-info → deepwork-0.4.0.dist-info}/licenses/LICENSE.md +0 -0
@@ -0,0 +1,205 @@
1
+ """Doc spec parser."""
2
+
3
+ import re
4
+ from dataclasses import dataclass, field
5
+ from pathlib import Path
6
+ from typing import Any
7
+
8
+ from deepwork.schemas.doc_spec_schema import DOC_SPEC_FRONTMATTER_SCHEMA
9
+ from deepwork.utils.validation import ValidationError, validate_against_schema
10
+ from deepwork.utils.yaml_utils import YAMLError, load_yaml_from_string
11
+
12
+
13
+ class DocSpecParseError(Exception):
14
+ """Exception raised for doc spec parsing errors."""
15
+
16
+ pass
17
+
18
+
19
+ @dataclass
20
+ class QualityCriterion:
21
+ """Represents a single quality criterion for a doc spec."""
22
+
23
+ name: str
24
+ description: str
25
+
26
+ @classmethod
27
+ def from_dict(cls, data: dict[str, Any]) -> "QualityCriterion":
28
+ """Create QualityCriterion from dictionary."""
29
+ return cls(
30
+ name=data["name"],
31
+ description=data["description"],
32
+ )
33
+
34
+
35
+ @dataclass
36
+ class DocSpec:
37
+ """Represents a complete doc spec (document specification)."""
38
+
39
+ # Required fields
40
+ name: str
41
+ description: str
42
+ quality_criteria: list[QualityCriterion]
43
+
44
+ # Optional fields
45
+ path_patterns: list[str] = field(default_factory=list)
46
+ target_audience: str | None = None
47
+ frequency: str | None = None
48
+
49
+ # The example document body (markdown content after frontmatter)
50
+ example_document: str = ""
51
+
52
+ # Source file path for reference
53
+ source_file: Path | None = None
54
+
55
+ @classmethod
56
+ def from_dict(
57
+ cls, data: dict[str, Any], example_document: str = "", source_file: Path | None = None
58
+ ) -> "DocSpec":
59
+ """
60
+ Create DocSpec from dictionary.
61
+
62
+ Args:
63
+ data: Parsed YAML frontmatter data
64
+ example_document: The markdown body content (example document)
65
+ source_file: Path to the source doc spec file
66
+
67
+ Returns:
68
+ DocSpec instance
69
+ """
70
+ return cls(
71
+ name=data["name"],
72
+ description=data["description"],
73
+ quality_criteria=[QualityCriterion.from_dict(qc) for qc in data["quality_criteria"]],
74
+ path_patterns=data.get("path_patterns", []),
75
+ target_audience=data.get("target_audience"),
76
+ frequency=data.get("frequency"),
77
+ example_document=example_document,
78
+ source_file=source_file,
79
+ )
80
+
81
+
82
+ # Backward compatibility alias
83
+ DocumentTypeDefinition = DocSpec
84
+
85
+
86
+ def _parse_frontmatter_markdown(content: str) -> tuple[dict[str, Any], str]:
87
+ """
88
+ Parse frontmatter from markdown content.
89
+
90
+ Expects format:
91
+ ---
92
+ key: value
93
+ ---
94
+ markdown body
95
+
96
+ Args:
97
+ content: Full file content
98
+
99
+ Returns:
100
+ Tuple of (frontmatter dict, body content)
101
+
102
+ Raises:
103
+ DocSpecParseError: If frontmatter is missing or invalid
104
+ """
105
+ # Match frontmatter pattern: starts with ---, ends with ---
106
+ # The (.*?) captures frontmatter content, which may be empty
107
+ pattern = r"^---[ \t]*\n(.*?)^---[ \t]*\n?(.*)"
108
+ match = re.match(pattern, content.strip(), re.DOTALL | re.MULTILINE)
109
+
110
+ if not match:
111
+ raise DocSpecParseError(
112
+ "Doc spec file must have YAML frontmatter (content between --- markers)"
113
+ )
114
+
115
+ frontmatter_yaml = match.group(1)
116
+ body = match.group(2).strip() if match.group(2) else ""
117
+
118
+ try:
119
+ frontmatter = load_yaml_from_string(frontmatter_yaml)
120
+ except YAMLError as e:
121
+ raise DocSpecParseError(f"Failed to parse doc spec frontmatter: {e}") from e
122
+
123
+ if frontmatter is None:
124
+ raise DocSpecParseError("Doc spec frontmatter is empty")
125
+
126
+ return frontmatter, body
127
+
128
+
129
+ def parse_doc_spec_file(filepath: Path | str) -> DocSpec:
130
+ """
131
+ Parse a doc spec file.
132
+
133
+ Args:
134
+ filepath: Path to the doc spec file (markdown with YAML frontmatter)
135
+
136
+ Returns:
137
+ Parsed DocSpec
138
+
139
+ Raises:
140
+ DocSpecParseError: If parsing fails or validation errors occur
141
+ """
142
+ filepath = Path(filepath)
143
+
144
+ if not filepath.exists():
145
+ raise DocSpecParseError(f"Doc spec file does not exist: {filepath}")
146
+
147
+ if not filepath.is_file():
148
+ raise DocSpecParseError(f"Doc spec path is not a file: {filepath}")
149
+
150
+ # Read content
151
+ try:
152
+ content = filepath.read_text(encoding="utf-8")
153
+ except Exception as e:
154
+ raise DocSpecParseError(f"Failed to read doc spec file: {e}") from e
155
+
156
+ # Parse frontmatter and body
157
+ frontmatter, body = _parse_frontmatter_markdown(content)
158
+
159
+ # Validate against schema
160
+ try:
161
+ validate_against_schema(frontmatter, DOC_SPEC_FRONTMATTER_SCHEMA)
162
+ except ValidationError as e:
163
+ raise DocSpecParseError(f"Doc spec validation failed: {e}") from e
164
+
165
+ # Create doc spec instance
166
+ return DocSpec.from_dict(frontmatter, body, filepath)
167
+
168
+
169
+ def load_doc_specs_from_directory(
170
+ doc_specs_dir: Path | str,
171
+ ) -> dict[str, DocSpec]:
172
+ """
173
+ Load all doc spec files from a directory.
174
+
175
+ Args:
176
+ doc_specs_dir: Path to the doc_specs directory
177
+
178
+ Returns:
179
+ Dictionary mapping doc spec filename (without extension) to DocSpec
180
+
181
+ Raises:
182
+ DocSpecParseError: If any doc spec file fails to parse
183
+ """
184
+ doc_specs_dir = Path(doc_specs_dir)
185
+
186
+ if not doc_specs_dir.exists():
187
+ return {}
188
+
189
+ if not doc_specs_dir.is_dir():
190
+ raise DocSpecParseError(f"Doc specs path is not a directory: {doc_specs_dir}")
191
+
192
+ doc_specs: dict[str, DocSpec] = {}
193
+
194
+ for doc_spec_file in doc_specs_dir.glob("*.md"):
195
+ # Use stem (filename without extension) as key
196
+ doc_spec_key = doc_spec_file.stem
197
+
198
+ try:
199
+ doc_spec = parse_doc_spec_file(doc_spec_file)
200
+ doc_specs[doc_spec_key] = doc_spec
201
+ except DocSpecParseError:
202
+ # Re-raise with context about which file failed
203
+ raise
204
+
205
+ return doc_specs
@@ -6,6 +6,11 @@ from typing import Any
6
6
  from jinja2 import Environment, FileSystemLoader, TemplateNotFound
7
7
 
8
8
  from deepwork.core.adapters import AgentAdapter, SkillLifecycleHook
9
+ from deepwork.core.doc_spec_parser import (
10
+ DocSpec,
11
+ DocSpecParseError,
12
+ parse_doc_spec_file,
13
+ )
9
14
  from deepwork.core.parser import JobDefinition, Step
10
15
  from deepwork.schemas.job_schema import LIFECYCLE_HOOK_EVENTS
11
16
  from deepwork.utils.fs import safe_read, safe_write
@@ -37,6 +42,35 @@ class SkillGenerator:
37
42
  if not self.templates_dir.exists():
38
43
  raise GeneratorError(f"Templates directory not found: {self.templates_dir}")
39
44
 
45
+ # Cache for loaded doc specs (keyed by absolute file path)
46
+ self._doc_spec_cache: dict[Path, DocSpec] = {}
47
+
48
+ def _load_doc_spec(self, project_root: Path, doc_spec_path: str) -> DocSpec | None:
49
+ """
50
+ Load a doc spec by file path with caching.
51
+
52
+ Args:
53
+ project_root: Path to project root
54
+ doc_spec_path: Relative path to doc spec file (e.g., ".deepwork/doc_specs/report.md")
55
+
56
+ Returns:
57
+ DocSpec if file exists and parses, None otherwise
58
+ """
59
+ full_path = project_root / doc_spec_path
60
+ if full_path in self._doc_spec_cache:
61
+ return self._doc_spec_cache[full_path]
62
+
63
+ if not full_path.exists():
64
+ return None
65
+
66
+ try:
67
+ doc_spec = parse_doc_spec_file(full_path)
68
+ except DocSpecParseError:
69
+ return None
70
+
71
+ self._doc_spec_cache[full_path] = doc_spec
72
+ return doc_spec
73
+
40
74
  def _get_jinja_env(self, adapter: AgentAdapter) -> Environment:
41
75
  """
42
76
  Get Jinja2 environment for an adapter.
@@ -113,7 +147,12 @@ class SkillGenerator:
113
147
  return hook_ctx
114
148
 
115
149
  def _build_step_context(
116
- self, job: JobDefinition, step: Step, step_index: int, adapter: AgentAdapter
150
+ self,
151
+ job: JobDefinition,
152
+ step: Step,
153
+ step_index: int,
154
+ adapter: AgentAdapter,
155
+ project_root: Path | None = None,
117
156
  ) -> dict[str, Any]:
118
157
  """
119
158
  Build template context for a step.
@@ -123,6 +162,7 @@ class SkillGenerator:
123
162
  step: Step to generate context for
124
163
  step_index: Index of step in job (0-based)
125
164
  adapter: Agent adapter for platform-specific hook name mapping
165
+ project_root: Optional project root for loading doc specs
126
166
 
127
167
  Returns:
128
168
  Template context dictionary
@@ -173,11 +213,40 @@ class SkillGenerator:
173
213
  if hook_contexts:
174
214
  hooks[platform_event_name] = hook_contexts
175
215
 
216
+ # Claude Code has separate Stop and SubagentStop events. When a Stop hook
217
+ # is defined, also register it for SubagentStop so it triggers for both
218
+ # the main agent and subagents.
219
+ if "Stop" in hooks:
220
+ hooks["SubagentStop"] = hooks["Stop"]
221
+
176
222
  # Backward compatibility: stop_hooks is after_agent hooks
177
223
  stop_hooks = hooks.get(
178
224
  adapter.get_platform_hook_name(SkillLifecycleHook.AFTER_AGENT) or "Stop", []
179
225
  )
180
226
 
227
+ # Build rich outputs context with doc spec information
228
+ outputs_context = []
229
+ for output in step.outputs:
230
+ output_ctx: dict[str, Any] = {
231
+ "file": output.file,
232
+ "has_doc_spec": output.has_doc_spec(),
233
+ }
234
+ if output.has_doc_spec() and output.doc_spec and project_root:
235
+ doc_spec = self._load_doc_spec(project_root, output.doc_spec)
236
+ if doc_spec:
237
+ output_ctx["doc_spec"] = {
238
+ "path": output.doc_spec,
239
+ "name": doc_spec.name,
240
+ "description": doc_spec.description,
241
+ "target_audience": doc_spec.target_audience,
242
+ "quality_criteria": [
243
+ {"name": c.name, "description": c.description}
244
+ for c in doc_spec.quality_criteria
245
+ ],
246
+ "example_document": doc_spec.example_document,
247
+ }
248
+ outputs_context.append(output_ctx)
249
+
181
250
  return {
182
251
  "job_name": job.name,
183
252
  "job_version": job.version,
@@ -192,7 +261,7 @@ class SkillGenerator:
192
261
  "instructions_content": instructions_content,
193
262
  "user_inputs": user_inputs,
194
263
  "file_inputs": file_inputs,
195
- "outputs": step.outputs,
264
+ "outputs": outputs_context,
196
265
  "dependencies": step.dependencies,
197
266
  "next_step": next_step,
198
267
  "prev_step": prev_step,
@@ -315,6 +384,7 @@ class SkillGenerator:
315
384
  step: Step,
316
385
  adapter: AgentAdapter,
317
386
  output_dir: Path | str,
387
+ project_root: Path | str | None = None,
318
388
  ) -> Path:
319
389
  """
320
390
  Generate skill file for a single step.
@@ -324,6 +394,7 @@ class SkillGenerator:
324
394
  step: Step to generate skill for
325
395
  adapter: Agent adapter for the target platform
326
396
  output_dir: Directory to write skill file to
397
+ project_root: Optional project root for loading doc specs (defaults to output_dir)
327
398
 
328
399
  Returns:
329
400
  Path to generated skill file
@@ -332,6 +403,7 @@ class SkillGenerator:
332
403
  GeneratorError: If generation fails
333
404
  """
334
405
  output_dir = Path(output_dir)
406
+ project_root_path = Path(project_root) if project_root else output_dir
335
407
 
336
408
  # Create skills subdirectory if needed
337
409
  skills_dir = output_dir / adapter.skills_dir
@@ -344,7 +416,7 @@ class SkillGenerator:
344
416
  raise GeneratorError(f"Step '{step.id}' not found in job '{job.name}'") from e
345
417
 
346
418
  # Build context (include exposed for template user-invocable setting)
347
- context = self._build_step_context(job, step, step_index, adapter)
419
+ context = self._build_step_context(job, step, step_index, adapter, project_root_path)
348
420
  context["exposed"] = step.exposed
349
421
 
350
422
  # Load and render template
@@ -378,6 +450,7 @@ class SkillGenerator:
378
450
  job: JobDefinition,
379
451
  adapter: AgentAdapter,
380
452
  output_dir: Path | str,
453
+ project_root: Path | str | None = None,
381
454
  ) -> list[Path]:
382
455
  """
383
456
  Generate all skill files for a job: meta-skill and step skills.
@@ -386,6 +459,7 @@ class SkillGenerator:
386
459
  job: Job definition
387
460
  adapter: Agent adapter for the target platform
388
461
  output_dir: Directory to write skill files to
462
+ project_root: Optional project root for loading doc specs (defaults to output_dir)
389
463
 
390
464
  Returns:
391
465
  List of paths to generated skill files (meta-skill first, then steps)
@@ -394,6 +468,7 @@ class SkillGenerator:
394
468
  GeneratorError: If generation fails
395
469
  """
396
470
  skill_paths = []
471
+ project_root_path = Path(project_root) if project_root else Path(output_dir)
397
472
 
398
473
  # Generate meta-skill first (job-level entry point)
399
474
  meta_skill_path = self.generate_meta_skill(job, adapter, output_dir)
@@ -401,7 +476,7 @@ class SkillGenerator:
401
476
 
402
477
  # Generate step skills
403
478
  for step in job.steps:
404
- skill_path = self.generate_step_skill(job, step, adapter, output_dir)
479
+ skill_path = self.generate_step_skill(job, step, adapter, output_dir, project_root_path)
405
480
  skill_paths.append(skill_path)
406
481
 
407
482
  return skill_paths
@@ -35,8 +35,10 @@ class HookEntry:
35
35
  Command string to execute
36
36
  """
37
37
  if self.module:
38
- # Python module - run directly with python -m
39
- return f"python -m {self.module}"
38
+ # Python module - use deepwork hook CLI for portability
39
+ # Extract hook name from module path (e.g., "deepwork.hooks.rules_check" -> "rules_check")
40
+ hook_name = self.module.rsplit(".", 1)[-1]
41
+ return f"deepwork hook {hook_name}"
40
42
  elif self.script:
41
43
  # Script path is: .deepwork/jobs/{job_name}/hooks/{script}
42
44
  script_path = self.job_dir / "hooks" / self.script
@@ -187,6 +189,17 @@ def merge_hooks_for_platform(
187
189
  if not _hook_already_present(merged[event], command):
188
190
  merged[event].append(hook_config)
189
191
 
192
+ # Claude Code has separate Stop and SubagentStop events. When a Stop hook
193
+ # is defined, also register it for SubagentStop so it triggers for both
194
+ # the main agent and subagents.
195
+ if "Stop" in merged:
196
+ if "SubagentStop" not in merged:
197
+ merged["SubagentStop"] = []
198
+ for hook_config in merged["Stop"]:
199
+ command = hook_config.get("hooks", [{}])[0].get("command", "")
200
+ if not _hook_already_present(merged["SubagentStop"], command):
201
+ merged["SubagentStop"].append(hook_config)
202
+
190
203
  return merged
191
204
 
192
205
 
deepwork/core/parser.py CHANGED
@@ -46,6 +46,34 @@ class StepInput:
46
46
  )
47
47
 
48
48
 
49
+ @dataclass
50
+ class OutputSpec:
51
+ """Represents a step output specification, optionally with doc spec reference."""
52
+
53
+ file: str
54
+ doc_spec: str | None = None
55
+
56
+ def has_doc_spec(self) -> bool:
57
+ """Check if this output has a doc spec reference."""
58
+ return self.doc_spec is not None
59
+
60
+ @classmethod
61
+ def from_dict(cls, data: dict[str, Any] | str) -> "OutputSpec":
62
+ """
63
+ Create OutputSpec from dictionary or string.
64
+
65
+ Supports both formats:
66
+ - String: "output.md" -> OutputSpec(file="output.md")
67
+ - Dict: {"file": "output.md", "doc_spec": ".deepwork/doc_specs/report.md"}
68
+ """
69
+ if isinstance(data, str):
70
+ return cls(file=data)
71
+ return cls(
72
+ file=data["file"],
73
+ doc_spec=data.get("doc_spec"),
74
+ )
75
+
76
+
49
77
  @dataclass
50
78
  class HookAction:
51
79
  """Represents a hook action configuration.
@@ -101,7 +129,7 @@ class Step:
101
129
  description: str
102
130
  instructions_file: str
103
131
  inputs: list[StepInput] = field(default_factory=list)
104
- outputs: list[str] = field(default_factory=list)
132
+ outputs: list[OutputSpec] = field(default_factory=list)
105
133
  dependencies: list[str] = field(default_factory=list)
106
134
 
107
135
  # New: hooks dict mapping lifecycle event names to HookAction lists
@@ -147,7 +175,7 @@ class Step:
147
175
  description=data["description"],
148
176
  instructions_file=data["instructions_file"],
149
177
  inputs=[StepInput.from_dict(inp) for inp in data.get("inputs", [])],
150
- outputs=data["outputs"],
178
+ outputs=[OutputSpec.from_dict(out) for out in data["outputs"]],
151
179
  dependencies=data.get("dependencies", []),
152
180
  hooks=hooks,
153
181
  exposed=data.get("exposed", False),
@@ -246,6 +274,40 @@ class JobDefinition:
246
274
  f"but '{inp.from_step}' is not in dependencies"
247
275
  )
248
276
 
277
+ def validate_doc_spec_references(self, project_root: Path) -> None:
278
+ """
279
+ Validate that doc spec references in outputs point to existing files.
280
+
281
+ Args:
282
+ project_root: Path to the project root directory
283
+
284
+ Raises:
285
+ ParseError: If doc spec references are invalid
286
+ """
287
+ for step in self.steps:
288
+ for output in step.outputs:
289
+ if output.has_doc_spec():
290
+ doc_spec_file = project_root / output.doc_spec
291
+ if not doc_spec_file.exists():
292
+ raise ParseError(
293
+ f"Step '{step.id}' references non-existent doc spec "
294
+ f"'{output.doc_spec}'. Expected file at {doc_spec_file}"
295
+ )
296
+
297
+ def get_doc_spec_references(self) -> list[str]:
298
+ """
299
+ Get all unique doc spec file paths referenced in this job's outputs.
300
+
301
+ Returns:
302
+ List of doc spec file paths (e.g., ".deepwork/doc_specs/report.md")
303
+ """
304
+ doc_spec_refs = set()
305
+ for step in self.steps:
306
+ for output in step.outputs:
307
+ if output.has_doc_spec() and output.doc_spec:
308
+ doc_spec_refs.add(output.doc_spec)
309
+ return list(doc_spec_refs)
310
+
249
311
  @classmethod
250
312
  def from_dict(cls, data: dict[str, Any], job_dir: Path) -> "JobDefinition":
251
313
  """
@@ -17,7 +17,7 @@ Usage with wrapper system:
17
17
  "Stop": [{
18
18
  "hooks": [{
19
19
  "type": "command",
20
- "command": ".deepwork/hooks/claude_hook.sh deepwork.hooks.rules_check"
20
+ "command": ".deepwork/hooks/claude_hook.sh rules_check"
21
21
  }]
22
22
  }]
23
23
  }
@@ -29,12 +29,15 @@ Usage with wrapper system:
29
29
  "AfterAgent": [{
30
30
  "hooks": [{
31
31
  "type": "command",
32
- "command": ".gemini/hooks/gemini_hook.sh deepwork.hooks.rules_check"
32
+ "command": ".gemini/hooks/gemini_hook.sh rules_check"
33
33
  }]
34
34
  }]
35
35
  }
36
36
  }
37
37
 
38
+ The shell wrappers call `deepwork hook <hook_name>` which works regardless
39
+ of how deepwork was installed (pipx, uv, nix flake, etc.).
40
+
38
41
  Writing custom hooks:
39
42
  from deepwork.hooks.wrapper import (
40
43
  HookInput,
@@ -50,10 +53,13 @@ Writing custom hooks:
50
53
  return HookOutput(decision="block", reason="Complete X first")
51
54
  return HookOutput()
52
55
 
53
- if __name__ == "__main__":
56
+ def main():
54
57
  import os, sys
55
58
  platform = Platform(os.environ.get("DEEPWORK_HOOK_PLATFORM", "claude"))
56
59
  sys.exit(run_hook(my_hook, platform))
60
+
61
+ if __name__ == "__main__":
62
+ main()
57
63
  """
58
64
 
59
65
  from deepwork.hooks.wrapper import (