deepwork 0.3.0__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deepwork/cli/hook.py +70 -0
- deepwork/cli/install.py +77 -29
- deepwork/cli/main.py +4 -0
- deepwork/cli/rules.py +32 -0
- deepwork/cli/sync.py +27 -1
- deepwork/core/adapters.py +209 -0
- deepwork/core/command_executor.py +26 -9
- deepwork/core/doc_spec_parser.py +205 -0
- deepwork/core/generator.py +79 -4
- deepwork/core/hooks_syncer.py +15 -2
- deepwork/core/parser.py +64 -2
- deepwork/core/rules_parser.py +58 -10
- deepwork/hooks/__init__.py +9 -3
- deepwork/hooks/check_version.sh +230 -0
- deepwork/hooks/claude_hook.sh +13 -17
- deepwork/hooks/gemini_hook.sh +13 -17
- deepwork/hooks/rules_check.py +269 -24
- deepwork/hooks/wrapper.py +66 -16
- deepwork/schemas/doc_spec_schema.py +64 -0
- deepwork/schemas/job_schema.py +25 -3
- deepwork/schemas/rules_schema.py +38 -6
- deepwork/standard_jobs/deepwork_jobs/doc_specs/job_spec.md +190 -0
- deepwork/standard_jobs/deepwork_jobs/job.yml +41 -8
- deepwork/standard_jobs/deepwork_jobs/steps/define.md +68 -2
- deepwork/standard_jobs/deepwork_jobs/steps/implement.md +3 -3
- deepwork/standard_jobs/deepwork_jobs/steps/learn.md +74 -5
- deepwork/standard_jobs/deepwork_jobs/steps/review_job_spec.md +208 -0
- deepwork/standard_jobs/deepwork_jobs/templates/doc_spec.md.example +86 -0
- deepwork/standard_jobs/deepwork_jobs/templates/doc_spec.md.template +26 -0
- deepwork/standard_jobs/deepwork_rules/hooks/capture_prompt_work_tree.sh +21 -10
- deepwork/standard_jobs/deepwork_rules/job.yml +13 -3
- deepwork/standard_jobs/deepwork_rules/rules/skill-md-validation.md +1 -0
- deepwork/templates/claude/skill-job-meta.md.jinja +7 -0
- deepwork/templates/claude/skill-job-step.md.jinja +60 -7
- deepwork/templates/gemini/skill-job-step.toml.jinja +18 -3
- deepwork/utils/fs.py +36 -0
- deepwork/utils/yaml_utils.py +24 -0
- {deepwork-0.3.0.dist-info → deepwork-0.4.0.dist-info}/METADATA +41 -2
- deepwork-0.4.0.dist-info/RECORD +71 -0
- deepwork-0.3.0.dist-info/RECORD +0 -62
- {deepwork-0.3.0.dist-info → deepwork-0.4.0.dist-info}/WHEEL +0 -0
- {deepwork-0.3.0.dist-info → deepwork-0.4.0.dist-info}/entry_points.txt +0 -0
- {deepwork-0.3.0.dist-info → deepwork-0.4.0.dist-info}/licenses/LICENSE.md +0 -0
deepwork/hooks/rules_check.py
CHANGED
|
@@ -6,12 +6,15 @@ It uses the wrapper system for cross-platform compatibility.
|
|
|
6
6
|
|
|
7
7
|
Rule files are loaded from .deepwork/rules/ directory as frontmatter markdown files.
|
|
8
8
|
|
|
9
|
-
Usage (via shell wrapper):
|
|
10
|
-
claude_hook.sh
|
|
11
|
-
gemini_hook.sh
|
|
9
|
+
Usage (via shell wrapper - recommended):
|
|
10
|
+
claude_hook.sh rules_check
|
|
11
|
+
gemini_hook.sh rules_check
|
|
12
12
|
|
|
13
|
-
Or directly
|
|
14
|
-
|
|
13
|
+
Or directly via deepwork CLI:
|
|
14
|
+
deepwork hook rules_check
|
|
15
|
+
|
|
16
|
+
Or with platform environment variable:
|
|
17
|
+
DEEPWORK_HOOK_PLATFORM=claude deepwork hook rules_check
|
|
15
18
|
"""
|
|
16
19
|
|
|
17
20
|
from __future__ import annotations
|
|
@@ -199,7 +202,176 @@ def get_changed_files_default_tip() -> list[str]:
|
|
|
199
202
|
|
|
200
203
|
|
|
201
204
|
def get_changed_files_prompt() -> list[str]:
|
|
202
|
-
"""Get files changed since prompt was submitted.
|
|
205
|
+
"""Get files changed since prompt was submitted.
|
|
206
|
+
|
|
207
|
+
Returns files that changed since the prompt was submitted, including:
|
|
208
|
+
- Committed changes (compared to captured HEAD ref)
|
|
209
|
+
- Staged changes (not yet committed)
|
|
210
|
+
- Untracked files
|
|
211
|
+
|
|
212
|
+
This is used by trigger/safety, set, and pair mode rules to detect
|
|
213
|
+
file modifications during the agent response.
|
|
214
|
+
"""
|
|
215
|
+
baseline_ref_path = Path(".deepwork/.last_head_ref")
|
|
216
|
+
changed_files: set[str] = set()
|
|
217
|
+
|
|
218
|
+
try:
|
|
219
|
+
# Stage all changes first
|
|
220
|
+
subprocess.run(["git", "add", "-A"], capture_output=True, check=False)
|
|
221
|
+
|
|
222
|
+
# If we have a captured HEAD ref, compare committed changes against it
|
|
223
|
+
if baseline_ref_path.exists():
|
|
224
|
+
baseline_ref = baseline_ref_path.read_text().strip()
|
|
225
|
+
if baseline_ref:
|
|
226
|
+
# Get files changed in commits since the baseline
|
|
227
|
+
result = subprocess.run(
|
|
228
|
+
["git", "diff", "--name-only", baseline_ref, "HEAD"],
|
|
229
|
+
capture_output=True,
|
|
230
|
+
text=True,
|
|
231
|
+
check=False,
|
|
232
|
+
)
|
|
233
|
+
if result.returncode == 0 and result.stdout.strip():
|
|
234
|
+
committed_files = set(result.stdout.strip().split("\n"))
|
|
235
|
+
changed_files.update(f for f in committed_files if f)
|
|
236
|
+
|
|
237
|
+
# Also get currently staged changes (in case not everything is committed)
|
|
238
|
+
result = subprocess.run(
|
|
239
|
+
["git", "diff", "--name-only", "--cached"],
|
|
240
|
+
capture_output=True,
|
|
241
|
+
text=True,
|
|
242
|
+
check=False,
|
|
243
|
+
)
|
|
244
|
+
if result.stdout.strip():
|
|
245
|
+
staged_files = set(result.stdout.strip().split("\n"))
|
|
246
|
+
changed_files.update(f for f in staged_files if f)
|
|
247
|
+
|
|
248
|
+
# Include untracked files
|
|
249
|
+
result = subprocess.run(
|
|
250
|
+
["git", "ls-files", "--others", "--exclude-standard"],
|
|
251
|
+
capture_output=True,
|
|
252
|
+
text=True,
|
|
253
|
+
check=False,
|
|
254
|
+
)
|
|
255
|
+
if result.stdout.strip():
|
|
256
|
+
untracked_files = set(result.stdout.strip().split("\n"))
|
|
257
|
+
changed_files.update(f for f in untracked_files if f)
|
|
258
|
+
|
|
259
|
+
return sorted(changed_files)
|
|
260
|
+
|
|
261
|
+
except (subprocess.CalledProcessError, OSError):
|
|
262
|
+
return []
|
|
263
|
+
|
|
264
|
+
|
|
265
|
+
def get_changed_files_for_mode(mode: str) -> list[str]:
|
|
266
|
+
"""Get changed files for a specific compare_to mode."""
|
|
267
|
+
if mode == "base":
|
|
268
|
+
return get_changed_files_base()
|
|
269
|
+
elif mode == "default_tip":
|
|
270
|
+
return get_changed_files_default_tip()
|
|
271
|
+
elif mode == "prompt":
|
|
272
|
+
return get_changed_files_prompt()
|
|
273
|
+
else:
|
|
274
|
+
return get_changed_files_base()
|
|
275
|
+
|
|
276
|
+
|
|
277
|
+
def get_created_files_base() -> list[str]:
|
|
278
|
+
"""Get files created (added) relative to branch base."""
|
|
279
|
+
default_branch = get_default_branch()
|
|
280
|
+
|
|
281
|
+
try:
|
|
282
|
+
result = subprocess.run(
|
|
283
|
+
["git", "merge-base", "HEAD", f"origin/{default_branch}"],
|
|
284
|
+
capture_output=True,
|
|
285
|
+
text=True,
|
|
286
|
+
check=True,
|
|
287
|
+
)
|
|
288
|
+
merge_base = result.stdout.strip()
|
|
289
|
+
|
|
290
|
+
subprocess.run(["git", "add", "-A"], capture_output=True, check=False)
|
|
291
|
+
|
|
292
|
+
# Get only added files (not modified) using --diff-filter=A
|
|
293
|
+
result = subprocess.run(
|
|
294
|
+
["git", "diff", "--name-only", "--diff-filter=A", merge_base, "HEAD"],
|
|
295
|
+
capture_output=True,
|
|
296
|
+
text=True,
|
|
297
|
+
check=True,
|
|
298
|
+
)
|
|
299
|
+
committed_added = set(result.stdout.strip().split("\n")) if result.stdout.strip() else set()
|
|
300
|
+
|
|
301
|
+
# Staged new files that don't exist in merge_base
|
|
302
|
+
result = subprocess.run(
|
|
303
|
+
["git", "diff", "--name-only", "--diff-filter=A", "--cached", merge_base],
|
|
304
|
+
capture_output=True,
|
|
305
|
+
text=True,
|
|
306
|
+
check=False,
|
|
307
|
+
)
|
|
308
|
+
staged_added = set(result.stdout.strip().split("\n")) if result.stdout.strip() else set()
|
|
309
|
+
|
|
310
|
+
# Untracked files are by definition "created"
|
|
311
|
+
result = subprocess.run(
|
|
312
|
+
["git", "ls-files", "--others", "--exclude-standard"],
|
|
313
|
+
capture_output=True,
|
|
314
|
+
text=True,
|
|
315
|
+
check=False,
|
|
316
|
+
)
|
|
317
|
+
untracked_files = set(result.stdout.strip().split("\n")) if result.stdout.strip() else set()
|
|
318
|
+
|
|
319
|
+
all_created = committed_added | staged_added | untracked_files
|
|
320
|
+
return sorted([f for f in all_created if f])
|
|
321
|
+
|
|
322
|
+
except subprocess.CalledProcessError:
|
|
323
|
+
return []
|
|
324
|
+
|
|
325
|
+
|
|
326
|
+
def get_created_files_default_tip() -> list[str]:
|
|
327
|
+
"""Get files created compared to default branch tip."""
|
|
328
|
+
default_branch = get_default_branch()
|
|
329
|
+
|
|
330
|
+
try:
|
|
331
|
+
subprocess.run(["git", "add", "-A"], capture_output=True, check=False)
|
|
332
|
+
|
|
333
|
+
# Get only added files using --diff-filter=A
|
|
334
|
+
result = subprocess.run(
|
|
335
|
+
["git", "diff", "--name-only", "--diff-filter=A", f"origin/{default_branch}..HEAD"],
|
|
336
|
+
capture_output=True,
|
|
337
|
+
text=True,
|
|
338
|
+
check=True,
|
|
339
|
+
)
|
|
340
|
+
committed_added = set(result.stdout.strip().split("\n")) if result.stdout.strip() else set()
|
|
341
|
+
|
|
342
|
+
result = subprocess.run(
|
|
343
|
+
[
|
|
344
|
+
"git",
|
|
345
|
+
"diff",
|
|
346
|
+
"--name-only",
|
|
347
|
+
"--diff-filter=A",
|
|
348
|
+
"--cached",
|
|
349
|
+
f"origin/{default_branch}",
|
|
350
|
+
],
|
|
351
|
+
capture_output=True,
|
|
352
|
+
text=True,
|
|
353
|
+
check=False,
|
|
354
|
+
)
|
|
355
|
+
staged_added = set(result.stdout.strip().split("\n")) if result.stdout.strip() else set()
|
|
356
|
+
|
|
357
|
+
# Untracked files are by definition "created"
|
|
358
|
+
result = subprocess.run(
|
|
359
|
+
["git", "ls-files", "--others", "--exclude-standard"],
|
|
360
|
+
capture_output=True,
|
|
361
|
+
text=True,
|
|
362
|
+
check=False,
|
|
363
|
+
)
|
|
364
|
+
untracked_files = set(result.stdout.strip().split("\n")) if result.stdout.strip() else set()
|
|
365
|
+
|
|
366
|
+
all_created = committed_added | staged_added | untracked_files
|
|
367
|
+
return sorted([f for f in all_created if f])
|
|
368
|
+
|
|
369
|
+
except subprocess.CalledProcessError:
|
|
370
|
+
return []
|
|
371
|
+
|
|
372
|
+
|
|
373
|
+
def get_created_files_prompt() -> list[str]:
|
|
374
|
+
"""Get files created since prompt was submitted."""
|
|
203
375
|
baseline_path = Path(".deepwork/.last_work_tree")
|
|
204
376
|
|
|
205
377
|
try:
|
|
@@ -214,28 +386,42 @@ def get_changed_files_prompt() -> list[str]:
|
|
|
214
386
|
current_files = set(result.stdout.strip().split("\n")) if result.stdout.strip() else set()
|
|
215
387
|
current_files = {f for f in current_files if f}
|
|
216
388
|
|
|
389
|
+
# Untracked files
|
|
390
|
+
result = subprocess.run(
|
|
391
|
+
["git", "ls-files", "--others", "--exclude-standard"],
|
|
392
|
+
capture_output=True,
|
|
393
|
+
text=True,
|
|
394
|
+
check=False,
|
|
395
|
+
)
|
|
396
|
+
untracked_files = set(result.stdout.strip().split("\n")) if result.stdout.strip() else set()
|
|
397
|
+
untracked_files = {f for f in untracked_files if f}
|
|
398
|
+
|
|
399
|
+
all_current = current_files | untracked_files
|
|
400
|
+
|
|
217
401
|
if baseline_path.exists():
|
|
218
402
|
baseline_files = set(baseline_path.read_text().strip().split("\n"))
|
|
219
403
|
baseline_files = {f for f in baseline_files if f}
|
|
220
|
-
|
|
221
|
-
|
|
404
|
+
# Created files are those that didn't exist at baseline
|
|
405
|
+
created_files = all_current - baseline_files
|
|
406
|
+
return sorted(created_files)
|
|
222
407
|
else:
|
|
223
|
-
|
|
408
|
+
# No baseline means all current files are "new" to this prompt
|
|
409
|
+
return sorted(all_current)
|
|
224
410
|
|
|
225
411
|
except (subprocess.CalledProcessError, OSError):
|
|
226
412
|
return []
|
|
227
413
|
|
|
228
414
|
|
|
229
|
-
def
|
|
230
|
-
"""Get
|
|
415
|
+
def get_created_files_for_mode(mode: str) -> list[str]:
|
|
416
|
+
"""Get created files for a specific compare_to mode."""
|
|
231
417
|
if mode == "base":
|
|
232
|
-
return
|
|
418
|
+
return get_created_files_base()
|
|
233
419
|
elif mode == "default_tip":
|
|
234
|
-
return
|
|
420
|
+
return get_created_files_default_tip()
|
|
235
421
|
elif mode == "prompt":
|
|
236
|
-
return
|
|
422
|
+
return get_created_files_prompt()
|
|
237
423
|
else:
|
|
238
|
-
return
|
|
424
|
+
return get_created_files_base()
|
|
239
425
|
|
|
240
426
|
|
|
241
427
|
def extract_promise_tags(text: str) -> set[str]:
|
|
@@ -399,13 +585,16 @@ def rules_check_hook(hook_input: HookInput) -> HookOutput:
|
|
|
399
585
|
|
|
400
586
|
for mode, mode_rules in rules_by_mode.items():
|
|
401
587
|
changed_files = get_changed_files_for_mode(mode)
|
|
402
|
-
|
|
588
|
+
created_files = get_created_files_for_mode(mode)
|
|
589
|
+
|
|
590
|
+
# Skip if no changed or created files
|
|
591
|
+
if not changed_files and not created_files:
|
|
403
592
|
continue
|
|
404
593
|
|
|
405
594
|
baseline_ref = get_baseline_ref(mode)
|
|
406
595
|
|
|
407
596
|
# Evaluate which rules fire
|
|
408
|
-
results = evaluate_rules(mode_rules, changed_files, promised_rules)
|
|
597
|
+
results = evaluate_rules(mode_rules, changed_files, promised_rules, created_files)
|
|
409
598
|
|
|
410
599
|
for result in results:
|
|
411
600
|
rule = result.rule
|
|
@@ -425,6 +614,26 @@ def rules_check_hook(hook_input: HookInput) -> HookOutput:
|
|
|
425
614
|
):
|
|
426
615
|
continue
|
|
427
616
|
|
|
617
|
+
# For PROMPT rules, also skip if already QUEUED (already shown to agent).
|
|
618
|
+
# This prevents infinite loops when transcript is unavailable or promise
|
|
619
|
+
# tags haven't been written yet. The agent has already seen this rule.
|
|
620
|
+
if (
|
|
621
|
+
existing
|
|
622
|
+
and existing.status == QueueEntryStatus.QUEUED
|
|
623
|
+
and rule.action_type == ActionType.PROMPT
|
|
624
|
+
):
|
|
625
|
+
continue
|
|
626
|
+
|
|
627
|
+
# For COMMAND rules with FAILED status, don't re-run the command.
|
|
628
|
+
# The agent has already seen the error. If they provide a promise,
|
|
629
|
+
# the after-loop logic will update the status to SKIPPED.
|
|
630
|
+
if (
|
|
631
|
+
existing
|
|
632
|
+
and existing.status == QueueEntryStatus.FAILED
|
|
633
|
+
and rule.action_type == ActionType.COMMAND
|
|
634
|
+
):
|
|
635
|
+
continue
|
|
636
|
+
|
|
428
637
|
# Create queue entry if new
|
|
429
638
|
if not existing:
|
|
430
639
|
queue.create_entry(
|
|
@@ -458,10 +667,10 @@ def rules_check_hook(hook_input: HookInput) -> HookOutput:
|
|
|
458
667
|
),
|
|
459
668
|
)
|
|
460
669
|
else:
|
|
461
|
-
# Command failed
|
|
462
|
-
error_msg = format_command_errors(cmd_results)
|
|
463
|
-
skip_hint = f"
|
|
464
|
-
command_errors.append(f"
|
|
670
|
+
# Command failed - format detailed error message
|
|
671
|
+
error_msg = format_command_errors(cmd_results, rule_name=rule.name)
|
|
672
|
+
skip_hint = f"\nTo skip, include `<promise>✓ {rule.name}</promise>` in your response."
|
|
673
|
+
command_errors.append(f"{error_msg}{skip_hint}")
|
|
465
674
|
queue.update_status(
|
|
466
675
|
trigger_hash,
|
|
467
676
|
QueueEntryStatus.FAILED,
|
|
@@ -476,6 +685,26 @@ def rules_check_hook(hook_input: HookInput) -> HookOutput:
|
|
|
476
685
|
# Collect for prompt output
|
|
477
686
|
prompt_results.append(result)
|
|
478
687
|
|
|
688
|
+
# Handle FAILED queue entries that have been promised
|
|
689
|
+
# (These rules weren't in results because evaluate_rules skips promised rules,
|
|
690
|
+
# but we need to update their queue status to SKIPPED)
|
|
691
|
+
if promised_rules:
|
|
692
|
+
promised_lower = {name.lower() for name in promised_rules}
|
|
693
|
+
for entry in queue.get_all_entries():
|
|
694
|
+
if (
|
|
695
|
+
entry.status == QueueEntryStatus.FAILED
|
|
696
|
+
and entry.rule_name.lower() in promised_lower
|
|
697
|
+
):
|
|
698
|
+
queue.update_status(
|
|
699
|
+
entry.trigger_hash,
|
|
700
|
+
QueueEntryStatus.SKIPPED,
|
|
701
|
+
ActionResult(
|
|
702
|
+
type="command",
|
|
703
|
+
output="Acknowledged via promise tag",
|
|
704
|
+
exit_code=None,
|
|
705
|
+
),
|
|
706
|
+
)
|
|
707
|
+
|
|
479
708
|
# Build response
|
|
480
709
|
messages: list[str] = []
|
|
481
710
|
|
|
@@ -498,17 +727,33 @@ def rules_check_hook(hook_input: HookInput) -> HookOutput:
|
|
|
498
727
|
|
|
499
728
|
def main() -> None:
|
|
500
729
|
"""Entry point for the rules check hook."""
|
|
501
|
-
# Determine platform from environment
|
|
502
730
|
platform_str = os.environ.get("DEEPWORK_HOOK_PLATFORM", "claude")
|
|
503
731
|
try:
|
|
504
732
|
platform = Platform(platform_str)
|
|
505
733
|
except ValueError:
|
|
506
734
|
platform = Platform.CLAUDE
|
|
507
735
|
|
|
508
|
-
# Run the hook with the wrapper
|
|
509
736
|
exit_code = run_hook(rules_check_hook, platform)
|
|
510
737
|
sys.exit(exit_code)
|
|
511
738
|
|
|
512
739
|
|
|
513
740
|
if __name__ == "__main__":
|
|
514
|
-
|
|
741
|
+
# Wrap entry point to catch early failures (e.g., import errors in wrapper.py)
|
|
742
|
+
try:
|
|
743
|
+
main()
|
|
744
|
+
except Exception as e:
|
|
745
|
+
# Last resort error handling - output JSON manually since wrapper may be broken
|
|
746
|
+
import json
|
|
747
|
+
import traceback
|
|
748
|
+
|
|
749
|
+
error_output = {
|
|
750
|
+
"decision": "block",
|
|
751
|
+
"reason": (
|
|
752
|
+
"## Hook Script Error\n\n"
|
|
753
|
+
f"Error type: {type(e).__name__}\n"
|
|
754
|
+
f"Error: {e}\n\n"
|
|
755
|
+
f"Traceback:\n```\n{traceback.format_exc()}\n```"
|
|
756
|
+
),
|
|
757
|
+
}
|
|
758
|
+
print(json.dumps(error_output))
|
|
759
|
+
sys.exit(0)
|
deepwork/hooks/wrapper.py
CHANGED
|
@@ -321,6 +321,55 @@ def write_stdout(data: str) -> None:
|
|
|
321
321
|
print(data)
|
|
322
322
|
|
|
323
323
|
|
|
324
|
+
def format_hook_error(
|
|
325
|
+
error: Exception,
|
|
326
|
+
context: str = "",
|
|
327
|
+
) -> dict[str, Any]:
|
|
328
|
+
"""
|
|
329
|
+
Format an error into a blocking JSON response with detailed information.
|
|
330
|
+
|
|
331
|
+
This is used when the hook script itself fails, to provide useful
|
|
332
|
+
error information to the user instead of a generic "non-blocking status code" message.
|
|
333
|
+
|
|
334
|
+
Args:
|
|
335
|
+
error: The exception that occurred
|
|
336
|
+
context: Additional context about where the error occurred
|
|
337
|
+
|
|
338
|
+
Returns:
|
|
339
|
+
Dict with decision="block" and detailed error message
|
|
340
|
+
"""
|
|
341
|
+
import traceback
|
|
342
|
+
|
|
343
|
+
error_type = type(error).__name__
|
|
344
|
+
error_msg = str(error)
|
|
345
|
+
tb = traceback.format_exc()
|
|
346
|
+
|
|
347
|
+
parts = ["## Hook Script Error", ""]
|
|
348
|
+
if context:
|
|
349
|
+
parts.append(f"Context: {context}")
|
|
350
|
+
parts.append(f"Error type: {error_type}")
|
|
351
|
+
parts.append(f"Error: {error_msg}")
|
|
352
|
+
parts.append("")
|
|
353
|
+
parts.append("Traceback:")
|
|
354
|
+
parts.append(f"```\n{tb}\n```")
|
|
355
|
+
|
|
356
|
+
return {
|
|
357
|
+
"decision": "block",
|
|
358
|
+
"reason": "\n".join(parts),
|
|
359
|
+
}
|
|
360
|
+
|
|
361
|
+
|
|
362
|
+
def output_hook_error(error: Exception, context: str = "") -> None:
|
|
363
|
+
"""
|
|
364
|
+
Output a hook error as JSON to stdout.
|
|
365
|
+
|
|
366
|
+
Use this in exception handlers to ensure the hook always outputs
|
|
367
|
+
valid JSON even when crashing.
|
|
368
|
+
"""
|
|
369
|
+
error_dict = format_hook_error(error, context)
|
|
370
|
+
print(json.dumps(error_dict))
|
|
371
|
+
|
|
372
|
+
|
|
324
373
|
def run_hook(
|
|
325
374
|
hook_fn: Callable[[HookInput], HookOutput],
|
|
326
375
|
platform: Platform,
|
|
@@ -340,24 +389,25 @@ def run_hook(
|
|
|
340
389
|
platform: The platform calling this hook
|
|
341
390
|
|
|
342
391
|
Returns:
|
|
343
|
-
Exit code (0 for success
|
|
392
|
+
Exit code (0 for success)
|
|
344
393
|
"""
|
|
345
|
-
# Read and normalize input
|
|
346
|
-
raw_input = read_stdin()
|
|
347
|
-
hook_input = normalize_input(raw_input, platform)
|
|
348
|
-
|
|
349
|
-
# Call the hook
|
|
350
394
|
try:
|
|
395
|
+
# Read and normalize input
|
|
396
|
+
raw_input = read_stdin()
|
|
397
|
+
hook_input = normalize_input(raw_input, platform)
|
|
398
|
+
|
|
399
|
+
# Call the hook
|
|
351
400
|
hook_output = hook_fn(hook_input)
|
|
352
|
-
except Exception as e:
|
|
353
|
-
# On error, allow the action but log
|
|
354
|
-
print(f"Hook error: {e}", file=sys.stderr)
|
|
355
|
-
hook_output = HookOutput()
|
|
356
401
|
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
402
|
+
# Denormalize and write output
|
|
403
|
+
output_json = denormalize_output(hook_output, platform, hook_input.event)
|
|
404
|
+
write_stdout(output_json)
|
|
360
405
|
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
406
|
+
# Always return 0 when using JSON output format
|
|
407
|
+
# The decision field in the JSON controls blocking behavior
|
|
408
|
+
return 0
|
|
409
|
+
|
|
410
|
+
except Exception as e:
|
|
411
|
+
# On any error, output a proper JSON error response
|
|
412
|
+
output_hook_error(e, context=f"Running hook {hook_fn.__name__}")
|
|
413
|
+
return 0 # Return 0 so Claude Code processes our JSON output
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
"""JSON Schema definition for doc specs (document type definitions)."""
|
|
2
|
+
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
# Schema for a single quality criterion
|
|
6
|
+
QUALITY_CRITERION_SCHEMA: dict[str, Any] = {
|
|
7
|
+
"type": "object",
|
|
8
|
+
"required": ["name", "description"],
|
|
9
|
+
"properties": {
|
|
10
|
+
"name": {
|
|
11
|
+
"type": "string",
|
|
12
|
+
"minLength": 1,
|
|
13
|
+
"description": "Short name for the quality criterion",
|
|
14
|
+
},
|
|
15
|
+
"description": {
|
|
16
|
+
"type": "string",
|
|
17
|
+
"minLength": 1,
|
|
18
|
+
"description": "Detailed description of what this criterion requires",
|
|
19
|
+
},
|
|
20
|
+
},
|
|
21
|
+
"additionalProperties": False,
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
# Schema for doc spec frontmatter
|
|
25
|
+
DOC_SPEC_FRONTMATTER_SCHEMA: dict[str, Any] = {
|
|
26
|
+
"$schema": "http://json-schema.org/draft-07/schema#",
|
|
27
|
+
"type": "object",
|
|
28
|
+
"required": ["name", "description", "quality_criteria"],
|
|
29
|
+
"properties": {
|
|
30
|
+
"name": {
|
|
31
|
+
"type": "string",
|
|
32
|
+
"minLength": 1,
|
|
33
|
+
"description": "Human-readable name for the document type",
|
|
34
|
+
},
|
|
35
|
+
"description": {
|
|
36
|
+
"type": "string",
|
|
37
|
+
"minLength": 1,
|
|
38
|
+
"description": "Description of this document type's purpose",
|
|
39
|
+
},
|
|
40
|
+
"path_patterns": {
|
|
41
|
+
"type": "array",
|
|
42
|
+
"description": "Glob patterns for where documents of this type should be stored",
|
|
43
|
+
"items": {
|
|
44
|
+
"type": "string",
|
|
45
|
+
"minLength": 1,
|
|
46
|
+
},
|
|
47
|
+
},
|
|
48
|
+
"target_audience": {
|
|
49
|
+
"type": "string",
|
|
50
|
+
"description": "Who this document is written for",
|
|
51
|
+
},
|
|
52
|
+
"frequency": {
|
|
53
|
+
"type": "string",
|
|
54
|
+
"description": "How often this document type is produced (e.g., 'Monthly', 'Per sprint')",
|
|
55
|
+
},
|
|
56
|
+
"quality_criteria": {
|
|
57
|
+
"type": "array",
|
|
58
|
+
"description": "Quality criteria that documents of this type must meet",
|
|
59
|
+
"minItems": 1,
|
|
60
|
+
"items": QUALITY_CRITERION_SCHEMA,
|
|
61
|
+
},
|
|
62
|
+
},
|
|
63
|
+
"additionalProperties": False,
|
|
64
|
+
}
|
deepwork/schemas/job_schema.py
CHANGED
|
@@ -161,10 +161,32 @@ JOB_SCHEMA: dict[str, Any] = {
|
|
|
161
161
|
},
|
|
162
162
|
"outputs": {
|
|
163
163
|
"type": "array",
|
|
164
|
-
"description": "List of output files/directories",
|
|
164
|
+
"description": "List of output files/directories, optionally with document type references",
|
|
165
165
|
"items": {
|
|
166
|
-
"
|
|
167
|
-
|
|
166
|
+
"oneOf": [
|
|
167
|
+
{
|
|
168
|
+
"type": "string",
|
|
169
|
+
"minLength": 1,
|
|
170
|
+
"description": "Simple output file path (backward compatible)",
|
|
171
|
+
},
|
|
172
|
+
{
|
|
173
|
+
"type": "object",
|
|
174
|
+
"required": ["file"],
|
|
175
|
+
"properties": {
|
|
176
|
+
"file": {
|
|
177
|
+
"type": "string",
|
|
178
|
+
"minLength": 1,
|
|
179
|
+
"description": "Output file path",
|
|
180
|
+
},
|
|
181
|
+
"doc_spec": {
|
|
182
|
+
"type": "string",
|
|
183
|
+
"pattern": r"^\.deepwork/doc_specs/[a-z][a-z0-9_-]*\.md$",
|
|
184
|
+
"description": "Path to doc spec file",
|
|
185
|
+
},
|
|
186
|
+
},
|
|
187
|
+
"additionalProperties": False,
|
|
188
|
+
},
|
|
189
|
+
],
|
|
168
190
|
},
|
|
169
191
|
},
|
|
170
192
|
"dependencies": {
|
deepwork/schemas/rules_schema.py
CHANGED
|
@@ -15,7 +15,7 @@ STRING_OR_ARRAY: dict[str, Any] = {
|
|
|
15
15
|
RULES_FRONTMATTER_SCHEMA: dict[str, Any] = {
|
|
16
16
|
"$schema": "http://json-schema.org/draft-07/schema#",
|
|
17
17
|
"type": "object",
|
|
18
|
-
"required": ["name"],
|
|
18
|
+
"required": ["name", "compare_to"],
|
|
19
19
|
"properties": {
|
|
20
20
|
"name": {
|
|
21
21
|
"type": "string",
|
|
@@ -56,6 +56,11 @@ RULES_FRONTMATTER_SCHEMA: dict[str, Any] = {
|
|
|
56
56
|
"additionalProperties": False,
|
|
57
57
|
"description": "Directional file correspondence (trigger -> expects)",
|
|
58
58
|
},
|
|
59
|
+
# Detection mode: created (fire when files are created matching patterns)
|
|
60
|
+
"created": {
|
|
61
|
+
**STRING_OR_ARRAY,
|
|
62
|
+
"description": "Glob pattern(s) for newly created files that trigger this rule",
|
|
63
|
+
},
|
|
59
64
|
# Action type: command (default is prompt using markdown body)
|
|
60
65
|
"action": {
|
|
61
66
|
"type": "object",
|
|
@@ -80,24 +85,51 @@ RULES_FRONTMATTER_SCHEMA: dict[str, Any] = {
|
|
|
80
85
|
"compare_to": {
|
|
81
86
|
"type": "string",
|
|
82
87
|
"enum": ["base", "default_tip", "prompt"],
|
|
83
|
-
"default": "base",
|
|
84
88
|
"description": "Baseline for detecting file changes",
|
|
85
89
|
},
|
|
86
90
|
},
|
|
87
91
|
"additionalProperties": False,
|
|
88
|
-
# Detection mode must be exactly one of: trigger, set, or
|
|
92
|
+
# Detection mode must be exactly one of: trigger, set, pair, or created
|
|
89
93
|
"oneOf": [
|
|
90
94
|
{
|
|
91
95
|
"required": ["trigger"],
|
|
92
|
-
"not": {
|
|
96
|
+
"not": {
|
|
97
|
+
"anyOf": [
|
|
98
|
+
{"required": ["set"]},
|
|
99
|
+
{"required": ["pair"]},
|
|
100
|
+
{"required": ["created"]},
|
|
101
|
+
]
|
|
102
|
+
},
|
|
93
103
|
},
|
|
94
104
|
{
|
|
95
105
|
"required": ["set"],
|
|
96
|
-
"not": {
|
|
106
|
+
"not": {
|
|
107
|
+
"anyOf": [
|
|
108
|
+
{"required": ["trigger"]},
|
|
109
|
+
{"required": ["pair"]},
|
|
110
|
+
{"required": ["created"]},
|
|
111
|
+
]
|
|
112
|
+
},
|
|
97
113
|
},
|
|
98
114
|
{
|
|
99
115
|
"required": ["pair"],
|
|
100
|
-
"not": {
|
|
116
|
+
"not": {
|
|
117
|
+
"anyOf": [
|
|
118
|
+
{"required": ["trigger"]},
|
|
119
|
+
{"required": ["set"]},
|
|
120
|
+
{"required": ["created"]},
|
|
121
|
+
]
|
|
122
|
+
},
|
|
123
|
+
},
|
|
124
|
+
{
|
|
125
|
+
"required": ["created"],
|
|
126
|
+
"not": {
|
|
127
|
+
"anyOf": [
|
|
128
|
+
{"required": ["trigger"]},
|
|
129
|
+
{"required": ["set"]},
|
|
130
|
+
{"required": ["pair"]},
|
|
131
|
+
]
|
|
132
|
+
},
|
|
101
133
|
},
|
|
102
134
|
],
|
|
103
135
|
}
|