deepwork 0.4.0__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. deepwork/__init__.py +1 -1
  2. deepwork/cli/hook.py +3 -4
  3. deepwork/cli/install.py +70 -117
  4. deepwork/cli/main.py +2 -2
  5. deepwork/cli/serve.py +133 -0
  6. deepwork/cli/sync.py +93 -58
  7. deepwork/core/adapters.py +91 -98
  8. deepwork/core/generator.py +19 -386
  9. deepwork/core/hooks_syncer.py +1 -1
  10. deepwork/core/parser.py +270 -1
  11. deepwork/hooks/README.md +0 -44
  12. deepwork/hooks/__init__.py +3 -6
  13. deepwork/hooks/check_version.sh +54 -21
  14. deepwork/mcp/__init__.py +23 -0
  15. deepwork/mcp/quality_gate.py +347 -0
  16. deepwork/mcp/schemas.py +263 -0
  17. deepwork/mcp/server.py +253 -0
  18. deepwork/mcp/state.py +422 -0
  19. deepwork/mcp/tools.py +394 -0
  20. deepwork/schemas/job.schema.json +347 -0
  21. deepwork/schemas/job_schema.py +27 -239
  22. deepwork/standard_jobs/deepwork_jobs/doc_specs/job_spec.md +9 -15
  23. deepwork/standard_jobs/deepwork_jobs/job.yml +146 -46
  24. deepwork/standard_jobs/deepwork_jobs/steps/define.md +100 -33
  25. deepwork/standard_jobs/deepwork_jobs/steps/errata.md +154 -0
  26. deepwork/standard_jobs/deepwork_jobs/steps/fix_jobs.md +207 -0
  27. deepwork/standard_jobs/deepwork_jobs/steps/fix_settings.md +177 -0
  28. deepwork/standard_jobs/deepwork_jobs/steps/implement.md +22 -138
  29. deepwork/standard_jobs/deepwork_jobs/steps/iterate.md +221 -0
  30. deepwork/standard_jobs/deepwork_jobs/steps/learn.md +2 -26
  31. deepwork/standard_jobs/deepwork_jobs/steps/test.md +154 -0
  32. deepwork/standard_jobs/deepwork_jobs/templates/job.yml.template +2 -0
  33. deepwork/templates/claude/AGENTS.md +38 -0
  34. deepwork/templates/claude/settings.json +16 -0
  35. deepwork/templates/claude/skill-deepwork.md.jinja +37 -0
  36. deepwork/templates/gemini/skill-deepwork.md.jinja +37 -0
  37. deepwork-0.7.0.dist-info/METADATA +317 -0
  38. deepwork-0.7.0.dist-info/RECORD +64 -0
  39. deepwork/cli/rules.py +0 -32
  40. deepwork/core/command_executor.py +0 -190
  41. deepwork/core/pattern_matcher.py +0 -271
  42. deepwork/core/rules_parser.py +0 -559
  43. deepwork/core/rules_queue.py +0 -321
  44. deepwork/hooks/rules_check.py +0 -759
  45. deepwork/schemas/rules_schema.py +0 -135
  46. deepwork/standard_jobs/deepwork_jobs/steps/review_job_spec.md +0 -208
  47. deepwork/standard_jobs/deepwork_jobs/templates/doc_spec.md.example +0 -86
  48. deepwork/standard_jobs/deepwork_rules/hooks/capture_prompt_work_tree.sh +0 -38
  49. deepwork/standard_jobs/deepwork_rules/hooks/global_hooks.yml +0 -8
  50. deepwork/standard_jobs/deepwork_rules/hooks/user_prompt_submit.sh +0 -16
  51. deepwork/standard_jobs/deepwork_rules/job.yml +0 -49
  52. deepwork/standard_jobs/deepwork_rules/rules/.gitkeep +0 -13
  53. deepwork/standard_jobs/deepwork_rules/rules/api-documentation-sync.md.example +0 -10
  54. deepwork/standard_jobs/deepwork_rules/rules/readme-documentation.md.example +0 -10
  55. deepwork/standard_jobs/deepwork_rules/rules/security-review.md.example +0 -11
  56. deepwork/standard_jobs/deepwork_rules/rules/skill-md-validation.md +0 -46
  57. deepwork/standard_jobs/deepwork_rules/rules/source-test-pairing.md.example +0 -13
  58. deepwork/standard_jobs/deepwork_rules/steps/define.md +0 -249
  59. deepwork/templates/claude/skill-job-meta.md.jinja +0 -77
  60. deepwork/templates/claude/skill-job-step.md.jinja +0 -251
  61. deepwork/templates/gemini/skill-job-meta.toml.jinja +0 -76
  62. deepwork/templates/gemini/skill-job-step.toml.jinja +0 -162
  63. deepwork-0.4.0.dist-info/METADATA +0 -381
  64. deepwork-0.4.0.dist-info/RECORD +0 -71
  65. {deepwork-0.4.0.dist-info → deepwork-0.7.0.dist-info}/WHEEL +0 -0
  66. {deepwork-0.4.0.dist-info → deepwork-0.7.0.dist-info}/entry_points.txt +0 -0
  67. {deepwork-0.4.0.dist-info → deepwork-0.7.0.dist-info}/licenses/LICENSE.md +0 -0
@@ -1,321 +0,0 @@
1
- """Queue system for tracking rule state in .deepwork/tmp/rules/queue/."""
2
-
3
- import hashlib
4
- import json
5
- from dataclasses import asdict, dataclass, field
6
- from datetime import UTC, datetime
7
- from enum import Enum
8
- from pathlib import Path
9
- from typing import Any
10
-
11
-
12
- class QueueEntryStatus(Enum):
13
- """Status of a queue entry."""
14
-
15
- QUEUED = "queued" # Detected, awaiting evaluation
16
- PASSED = "passed" # Evaluated, rule satisfied (promise found or action succeeded)
17
- FAILED = "failed" # Evaluated, rule not satisfied
18
- SKIPPED = "skipped" # Safety pattern matched, skipped
19
-
20
-
21
- @dataclass
22
- class ActionResult:
23
- """Result of executing a rule action."""
24
-
25
- type: str # "prompt" or "command"
26
- output: str | None = None # Command stdout or prompt message shown
27
- exit_code: int | None = None # Command exit code (None for prompt)
28
-
29
-
30
- @dataclass
31
- class QueueEntry:
32
- """A single entry in the rules queue."""
33
-
34
- # Identity
35
- rule_name: str # Human-friendly name
36
- rule_file: str # Filename (e.g., "source-test-pairing.md")
37
- trigger_hash: str # Hash for deduplication
38
-
39
- # State
40
- status: QueueEntryStatus = QueueEntryStatus.QUEUED
41
- created_at: str = "" # ISO8601 timestamp
42
- evaluated_at: str | None = None # ISO8601 timestamp
43
-
44
- # Context
45
- baseline_ref: str = "" # Commit hash or timestamp used as baseline
46
- trigger_files: list[str] = field(default_factory=list)
47
- expected_files: list[str] = field(default_factory=list) # For set/pair modes
48
- matched_files: list[str] = field(default_factory=list) # Files that also changed
49
-
50
- # Result
51
- action_result: ActionResult | None = None
52
-
53
- def __post_init__(self) -> None:
54
- if not self.created_at:
55
- self.created_at = datetime.now(UTC).isoformat()
56
-
57
- def to_dict(self) -> dict[str, Any]:
58
- """Convert to dictionary for JSON serialization."""
59
- data = asdict(self)
60
- data["status"] = self.status.value
61
- if self.action_result:
62
- data["action_result"] = asdict(self.action_result)
63
- return data
64
-
65
- @classmethod
66
- def from_dict(cls, data: dict[str, Any]) -> "QueueEntry":
67
- """Create from dictionary."""
68
- action_result = None
69
- if data.get("action_result"):
70
- action_result = ActionResult(**data["action_result"])
71
-
72
- return cls(
73
- rule_name=data.get("rule_name", data.get("policy_name", "")),
74
- rule_file=data.get("rule_file", data.get("policy_file", "")),
75
- trigger_hash=data["trigger_hash"],
76
- status=QueueEntryStatus(data["status"]),
77
- created_at=data.get("created_at", ""),
78
- evaluated_at=data.get("evaluated_at"),
79
- baseline_ref=data.get("baseline_ref", ""),
80
- trigger_files=data.get("trigger_files", []),
81
- expected_files=data.get("expected_files", []),
82
- matched_files=data.get("matched_files", []),
83
- action_result=action_result,
84
- )
85
-
86
-
87
- def compute_trigger_hash(
88
- rule_name: str,
89
- trigger_files: list[str],
90
- baseline_ref: str,
91
- ) -> str:
92
- """
93
- Compute a hash for deduplication.
94
-
95
- The hash is based on:
96
- - Rule name
97
- - Sorted list of trigger files
98
- - Baseline reference (commit hash or timestamp)
99
-
100
- Returns:
101
- 12-character hex hash
102
- """
103
- hash_input = f"{rule_name}:{sorted(trigger_files)}:{baseline_ref}"
104
- return hashlib.sha256(hash_input.encode()).hexdigest()[:12]
105
-
106
-
107
- class RulesQueue:
108
- """
109
- Manages the rules queue in .deepwork/tmp/rules/queue/.
110
-
111
- Queue entries are stored as JSON files named {hash}.{status}.json
112
- """
113
-
114
- def __init__(self, queue_dir: Path | None = None):
115
- """
116
- Initialize the queue.
117
-
118
- Args:
119
- queue_dir: Path to queue directory. Defaults to .deepwork/tmp/rules/queue/
120
- """
121
- if queue_dir is None:
122
- queue_dir = Path(".deepwork/tmp/rules/queue")
123
- self.queue_dir = queue_dir
124
-
125
- def _ensure_dir(self) -> None:
126
- """Ensure queue directory exists."""
127
- self.queue_dir.mkdir(parents=True, exist_ok=True)
128
-
129
- def _get_entry_path(self, trigger_hash: str, status: QueueEntryStatus) -> Path:
130
- """Get path for an entry file."""
131
- return self.queue_dir / f"{trigger_hash}.{status.value}.json"
132
-
133
- def _find_entry_path(self, trigger_hash: str) -> Path | None:
134
- """Find existing entry file for a hash (any status)."""
135
- for status in QueueEntryStatus:
136
- path = self._get_entry_path(trigger_hash, status)
137
- if path.exists():
138
- return path
139
- return None
140
-
141
- def has_entry(self, trigger_hash: str) -> bool:
142
- """Check if an entry exists for this hash."""
143
- return self._find_entry_path(trigger_hash) is not None
144
-
145
- def get_entry(self, trigger_hash: str) -> QueueEntry | None:
146
- """Get an entry by hash."""
147
- path = self._find_entry_path(trigger_hash)
148
- if path is None:
149
- return None
150
-
151
- try:
152
- with open(path, encoding="utf-8") as f:
153
- data = json.load(f)
154
- return QueueEntry.from_dict(data)
155
- except (json.JSONDecodeError, OSError, KeyError):
156
- return None
157
-
158
- def create_entry(
159
- self,
160
- rule_name: str,
161
- rule_file: str,
162
- trigger_files: list[str],
163
- baseline_ref: str,
164
- expected_files: list[str] | None = None,
165
- ) -> QueueEntry | None:
166
- """
167
- Create a new queue entry if one doesn't already exist.
168
-
169
- Args:
170
- rule_name: Human-friendly rule name
171
- rule_file: Rule filename (e.g., "source-test-pairing.md")
172
- trigger_files: Files that triggered the rule
173
- baseline_ref: Baseline reference for change detection
174
- expected_files: Expected corresponding files (for set/pair)
175
-
176
- Returns:
177
- Created QueueEntry, or None if entry already exists
178
- """
179
- trigger_hash = compute_trigger_hash(rule_name, trigger_files, baseline_ref)
180
-
181
- # Check if already exists
182
- if self.has_entry(trigger_hash):
183
- return None
184
-
185
- self._ensure_dir()
186
-
187
- entry = QueueEntry(
188
- rule_name=rule_name,
189
- rule_file=rule_file,
190
- trigger_hash=trigger_hash,
191
- status=QueueEntryStatus.QUEUED,
192
- baseline_ref=baseline_ref,
193
- trigger_files=trigger_files,
194
- expected_files=expected_files or [],
195
- )
196
-
197
- path = self._get_entry_path(trigger_hash, QueueEntryStatus.QUEUED)
198
- with open(path, "w", encoding="utf-8") as f:
199
- json.dump(entry.to_dict(), f, indent=2)
200
-
201
- return entry
202
-
203
- def update_status(
204
- self,
205
- trigger_hash: str,
206
- new_status: QueueEntryStatus,
207
- action_result: ActionResult | None = None,
208
- ) -> bool:
209
- """
210
- Update the status of an entry.
211
-
212
- This renames the file to reflect the new status.
213
-
214
- Args:
215
- trigger_hash: Hash of the entry to update
216
- new_status: New status
217
- action_result: Optional result of action execution
218
-
219
- Returns:
220
- True if updated, False if entry not found
221
- """
222
- old_path = self._find_entry_path(trigger_hash)
223
- if old_path is None:
224
- return False
225
-
226
- # Load existing entry
227
- try:
228
- with open(old_path, encoding="utf-8") as f:
229
- data = json.load(f)
230
- except (json.JSONDecodeError, OSError):
231
- return False
232
-
233
- # Update fields
234
- data["status"] = new_status.value
235
- data["evaluated_at"] = datetime.now(UTC).isoformat()
236
- if action_result:
237
- data["action_result"] = asdict(action_result)
238
-
239
- # Write to new path
240
- new_path = self._get_entry_path(trigger_hash, new_status)
241
-
242
- # If status didn't change, just update in place
243
- if old_path == new_path:
244
- with open(new_path, "w", encoding="utf-8") as f:
245
- json.dump(data, f, indent=2)
246
- else:
247
- # Write new file then delete old
248
- with open(new_path, "w", encoding="utf-8") as f:
249
- json.dump(data, f, indent=2)
250
- old_path.unlink()
251
-
252
- return True
253
-
254
- def get_queued_entries(self) -> list[QueueEntry]:
255
- """Get all entries with QUEUED status."""
256
- if not self.queue_dir.exists():
257
- return []
258
-
259
- entries = []
260
- for path in self.queue_dir.glob("*.queued.json"):
261
- try:
262
- with open(path, encoding="utf-8") as f:
263
- data = json.load(f)
264
- entries.append(QueueEntry.from_dict(data))
265
- except (json.JSONDecodeError, OSError, KeyError):
266
- continue
267
-
268
- return entries
269
-
270
- def get_all_entries(self) -> list[QueueEntry]:
271
- """Get all entries regardless of status."""
272
- if not self.queue_dir.exists():
273
- return []
274
-
275
- entries = []
276
- for path in self.queue_dir.glob("*.json"):
277
- try:
278
- with open(path, encoding="utf-8") as f:
279
- data = json.load(f)
280
- entries.append(QueueEntry.from_dict(data))
281
- except (json.JSONDecodeError, OSError, KeyError):
282
- continue
283
-
284
- return entries
285
-
286
- def clear(self) -> int:
287
- """
288
- Clear all entries from the queue.
289
-
290
- Returns:
291
- Number of entries removed
292
- """
293
- if not self.queue_dir.exists():
294
- return 0
295
-
296
- count = 0
297
- for path in self.queue_dir.glob("*.json"):
298
- try:
299
- path.unlink()
300
- count += 1
301
- except OSError:
302
- continue
303
-
304
- return count
305
-
306
- def remove_entry(self, trigger_hash: str) -> bool:
307
- """
308
- Remove an entry by hash.
309
-
310
- Returns:
311
- True if removed, False if not found
312
- """
313
- path = self._find_entry_path(trigger_hash)
314
- if path is None:
315
- return False
316
-
317
- try:
318
- path.unlink()
319
- return True
320
- except OSError:
321
- return False