deepwork 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deepwork/__init__.py +25 -0
- deepwork/cli/__init__.py +1 -0
- deepwork/cli/install.py +290 -0
- deepwork/cli/main.py +25 -0
- deepwork/cli/sync.py +176 -0
- deepwork/core/__init__.py +1 -0
- deepwork/core/adapters.py +373 -0
- deepwork/core/detector.py +93 -0
- deepwork/core/generator.py +290 -0
- deepwork/core/hooks_syncer.py +206 -0
- deepwork/core/parser.py +310 -0
- deepwork/core/policy_parser.py +285 -0
- deepwork/hooks/__init__.py +1 -0
- deepwork/hooks/evaluate_policies.py +159 -0
- deepwork/schemas/__init__.py +1 -0
- deepwork/schemas/job_schema.py +212 -0
- deepwork/schemas/policy_schema.py +68 -0
- deepwork/standard_jobs/deepwork_jobs/job.yml +102 -0
- deepwork/standard_jobs/deepwork_jobs/steps/define.md +359 -0
- deepwork/standard_jobs/deepwork_jobs/steps/implement.md +435 -0
- deepwork/standard_jobs/deepwork_jobs/steps/refine.md +447 -0
- deepwork/standard_jobs/deepwork_policy/hooks/capture_work_tree.sh +26 -0
- deepwork/standard_jobs/deepwork_policy/hooks/get_changed_files.sh +30 -0
- deepwork/standard_jobs/deepwork_policy/hooks/global_hooks.yml +8 -0
- deepwork/standard_jobs/deepwork_policy/hooks/policy_stop_hook.sh +72 -0
- deepwork/standard_jobs/deepwork_policy/hooks/user_prompt_submit.sh +17 -0
- deepwork/standard_jobs/deepwork_policy/job.yml +35 -0
- deepwork/standard_jobs/deepwork_policy/steps/define.md +174 -0
- deepwork/templates/__init__.py +1 -0
- deepwork/templates/claude/command-job-step.md.jinja +210 -0
- deepwork/templates/gemini/command-job-step.toml.jinja +169 -0
- deepwork/utils/__init__.py +1 -0
- deepwork/utils/fs.py +128 -0
- deepwork/utils/git.py +164 -0
- deepwork/utils/validation.py +31 -0
- deepwork/utils/yaml_utils.py +89 -0
- deepwork-0.1.0.dist-info/METADATA +389 -0
- deepwork-0.1.0.dist-info/RECORD +41 -0
- deepwork-0.1.0.dist-info/WHEEL +4 -0
- deepwork-0.1.0.dist-info/entry_points.txt +2 -0
- deepwork-0.1.0.dist-info/licenses/LICENSE.md +60 -0
deepwork/core/parser.py
ADDED
|
@@ -0,0 +1,310 @@
|
|
|
1
|
+
"""Job definition parser."""
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from deepwork.schemas.job_schema import JOB_SCHEMA, LIFECYCLE_HOOK_EVENTS
|
|
8
|
+
from deepwork.utils.validation import ValidationError, validate_against_schema
|
|
9
|
+
from deepwork.utils.yaml_utils import YAMLError, load_yaml
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class ParseError(Exception):
|
|
13
|
+
"""Exception raised for job parsing errors."""
|
|
14
|
+
|
|
15
|
+
pass
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@dataclass
|
|
19
|
+
class StepInput:
|
|
20
|
+
"""Represents a step input (either user parameter or file from previous step)."""
|
|
21
|
+
|
|
22
|
+
# User parameter input
|
|
23
|
+
name: str | None = None
|
|
24
|
+
description: str | None = None
|
|
25
|
+
|
|
26
|
+
# File input from previous step
|
|
27
|
+
file: str | None = None
|
|
28
|
+
from_step: str | None = None
|
|
29
|
+
|
|
30
|
+
def is_user_input(self) -> bool:
|
|
31
|
+
"""Check if this is a user parameter input."""
|
|
32
|
+
return self.name is not None and self.description is not None
|
|
33
|
+
|
|
34
|
+
def is_file_input(self) -> bool:
|
|
35
|
+
"""Check if this is a file input from previous step."""
|
|
36
|
+
return self.file is not None and self.from_step is not None
|
|
37
|
+
|
|
38
|
+
@classmethod
|
|
39
|
+
def from_dict(cls, data: dict[str, Any]) -> "StepInput":
|
|
40
|
+
"""Create StepInput from dictionary."""
|
|
41
|
+
return cls(
|
|
42
|
+
name=data.get("name"),
|
|
43
|
+
description=data.get("description"),
|
|
44
|
+
file=data.get("file"),
|
|
45
|
+
from_step=data.get("from_step"),
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
@dataclass
|
|
50
|
+
class HookAction:
|
|
51
|
+
"""Represents a hook action configuration.
|
|
52
|
+
|
|
53
|
+
Hook actions define what happens when a lifecycle hook is triggered.
|
|
54
|
+
Three types are supported:
|
|
55
|
+
- prompt: Inline prompt text for validation/action
|
|
56
|
+
- prompt_file: Path to a file containing the prompt
|
|
57
|
+
- script: Path to a shell script for custom logic
|
|
58
|
+
"""
|
|
59
|
+
|
|
60
|
+
# Inline prompt
|
|
61
|
+
prompt: str | None = None
|
|
62
|
+
|
|
63
|
+
# Prompt file reference (relative to job directory)
|
|
64
|
+
prompt_file: str | None = None
|
|
65
|
+
|
|
66
|
+
# Shell script reference (relative to job directory)
|
|
67
|
+
script: str | None = None
|
|
68
|
+
|
|
69
|
+
def is_prompt(self) -> bool:
|
|
70
|
+
"""Check if this is an inline prompt hook."""
|
|
71
|
+
return self.prompt is not None
|
|
72
|
+
|
|
73
|
+
def is_prompt_file(self) -> bool:
|
|
74
|
+
"""Check if this is a prompt file reference hook."""
|
|
75
|
+
return self.prompt_file is not None
|
|
76
|
+
|
|
77
|
+
def is_script(self) -> bool:
|
|
78
|
+
"""Check if this is a shell script hook."""
|
|
79
|
+
return self.script is not None
|
|
80
|
+
|
|
81
|
+
@classmethod
|
|
82
|
+
def from_dict(cls, data: dict[str, Any]) -> "HookAction":
|
|
83
|
+
"""Create HookAction from dictionary."""
|
|
84
|
+
return cls(
|
|
85
|
+
prompt=data.get("prompt"),
|
|
86
|
+
prompt_file=data.get("prompt_file"),
|
|
87
|
+
script=data.get("script"),
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
# Backward compatibility alias
|
|
92
|
+
StopHook = HookAction
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
@dataclass
|
|
96
|
+
class Step:
|
|
97
|
+
"""Represents a single step in a job."""
|
|
98
|
+
|
|
99
|
+
id: str
|
|
100
|
+
name: str
|
|
101
|
+
description: str
|
|
102
|
+
instructions_file: str
|
|
103
|
+
inputs: list[StepInput] = field(default_factory=list)
|
|
104
|
+
outputs: list[str] = field(default_factory=list)
|
|
105
|
+
dependencies: list[str] = field(default_factory=list)
|
|
106
|
+
|
|
107
|
+
# New: hooks dict mapping lifecycle event names to HookAction lists
|
|
108
|
+
# Event names: after_agent, before_tool, before_prompt
|
|
109
|
+
hooks: dict[str, list[HookAction]] = field(default_factory=dict)
|
|
110
|
+
|
|
111
|
+
@property
|
|
112
|
+
def stop_hooks(self) -> list[HookAction]:
|
|
113
|
+
"""
|
|
114
|
+
Backward compatibility property for stop_hooks.
|
|
115
|
+
|
|
116
|
+
Returns hooks for after_agent event.
|
|
117
|
+
"""
|
|
118
|
+
return self.hooks.get("after_agent", [])
|
|
119
|
+
|
|
120
|
+
@classmethod
|
|
121
|
+
def from_dict(cls, data: dict[str, Any]) -> "Step":
|
|
122
|
+
"""Create Step from dictionary."""
|
|
123
|
+
# Parse new hooks structure
|
|
124
|
+
hooks: dict[str, list[HookAction]] = {}
|
|
125
|
+
if "hooks" in data:
|
|
126
|
+
hooks_data = data["hooks"]
|
|
127
|
+
for event in LIFECYCLE_HOOK_EVENTS:
|
|
128
|
+
if event in hooks_data:
|
|
129
|
+
hooks[event] = [HookAction.from_dict(h) for h in hooks_data[event]]
|
|
130
|
+
|
|
131
|
+
# Handle deprecated stop_hooks -> after_agent
|
|
132
|
+
if "stop_hooks" in data and data["stop_hooks"]:
|
|
133
|
+
# Merge with any existing after_agent hooks
|
|
134
|
+
after_agent_hooks = hooks.get("after_agent", [])
|
|
135
|
+
after_agent_hooks.extend([HookAction.from_dict(h) for h in data["stop_hooks"]])
|
|
136
|
+
hooks["after_agent"] = after_agent_hooks
|
|
137
|
+
|
|
138
|
+
return cls(
|
|
139
|
+
id=data["id"],
|
|
140
|
+
name=data["name"],
|
|
141
|
+
description=data["description"],
|
|
142
|
+
instructions_file=data["instructions_file"],
|
|
143
|
+
inputs=[StepInput.from_dict(inp) for inp in data.get("inputs", [])],
|
|
144
|
+
outputs=data["outputs"],
|
|
145
|
+
dependencies=data.get("dependencies", []),
|
|
146
|
+
hooks=hooks,
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
@dataclass
|
|
151
|
+
class JobDefinition:
|
|
152
|
+
"""Represents a complete job definition."""
|
|
153
|
+
|
|
154
|
+
name: str
|
|
155
|
+
version: str
|
|
156
|
+
summary: str
|
|
157
|
+
description: str | None
|
|
158
|
+
steps: list[Step]
|
|
159
|
+
job_dir: Path
|
|
160
|
+
|
|
161
|
+
def get_step(self, step_id: str) -> Step | None:
|
|
162
|
+
"""
|
|
163
|
+
Get step by ID.
|
|
164
|
+
|
|
165
|
+
Args:
|
|
166
|
+
step_id: Step ID to retrieve
|
|
167
|
+
|
|
168
|
+
Returns:
|
|
169
|
+
Step if found, None otherwise
|
|
170
|
+
"""
|
|
171
|
+
for step in self.steps:
|
|
172
|
+
if step.id == step_id:
|
|
173
|
+
return step
|
|
174
|
+
return None
|
|
175
|
+
|
|
176
|
+
def validate_dependencies(self) -> None:
|
|
177
|
+
"""
|
|
178
|
+
Validate step dependencies.
|
|
179
|
+
|
|
180
|
+
Raises:
|
|
181
|
+
ParseError: If dependencies are invalid (missing steps, circular deps)
|
|
182
|
+
"""
|
|
183
|
+
step_ids = {step.id for step in self.steps}
|
|
184
|
+
|
|
185
|
+
# Check all dependencies reference existing steps
|
|
186
|
+
for step in self.steps:
|
|
187
|
+
for dep_id in step.dependencies:
|
|
188
|
+
if dep_id not in step_ids:
|
|
189
|
+
raise ParseError(f"Step '{step.id}' depends on non-existent step '{dep_id}'")
|
|
190
|
+
|
|
191
|
+
# Check for circular dependencies using topological sort
|
|
192
|
+
visited = set()
|
|
193
|
+
rec_stack = set()
|
|
194
|
+
|
|
195
|
+
def has_cycle(step_id: str) -> bool:
|
|
196
|
+
visited.add(step_id)
|
|
197
|
+
rec_stack.add(step_id)
|
|
198
|
+
|
|
199
|
+
step = self.get_step(step_id)
|
|
200
|
+
if step:
|
|
201
|
+
for dep_id in step.dependencies:
|
|
202
|
+
if dep_id not in visited:
|
|
203
|
+
if has_cycle(dep_id):
|
|
204
|
+
return True
|
|
205
|
+
elif dep_id in rec_stack:
|
|
206
|
+
return True
|
|
207
|
+
|
|
208
|
+
rec_stack.remove(step_id)
|
|
209
|
+
return False
|
|
210
|
+
|
|
211
|
+
for step in self.steps:
|
|
212
|
+
if step.id not in visited:
|
|
213
|
+
if has_cycle(step.id):
|
|
214
|
+
raise ParseError(f"Circular dependency detected involving step '{step.id}'")
|
|
215
|
+
|
|
216
|
+
def validate_file_inputs(self) -> None:
|
|
217
|
+
"""
|
|
218
|
+
Validate that file inputs reference valid steps and dependencies.
|
|
219
|
+
|
|
220
|
+
Raises:
|
|
221
|
+
ParseError: If file inputs are invalid
|
|
222
|
+
"""
|
|
223
|
+
for step in self.steps:
|
|
224
|
+
for inp in step.inputs:
|
|
225
|
+
if inp.is_file_input():
|
|
226
|
+
# Check that from_step exists
|
|
227
|
+
from_step = self.get_step(inp.from_step) # type: ignore
|
|
228
|
+
if from_step is None:
|
|
229
|
+
raise ParseError(
|
|
230
|
+
f"Step '{step.id}' references non-existent step "
|
|
231
|
+
f"'{inp.from_step}' in file input"
|
|
232
|
+
)
|
|
233
|
+
|
|
234
|
+
# Check that from_step is in dependencies
|
|
235
|
+
if inp.from_step not in step.dependencies:
|
|
236
|
+
raise ParseError(
|
|
237
|
+
f"Step '{step.id}' has file input from '{inp.from_step}' "
|
|
238
|
+
f"but '{inp.from_step}' is not in dependencies"
|
|
239
|
+
)
|
|
240
|
+
|
|
241
|
+
@classmethod
|
|
242
|
+
def from_dict(cls, data: dict[str, Any], job_dir: Path) -> "JobDefinition":
|
|
243
|
+
"""
|
|
244
|
+
Create JobDefinition from dictionary.
|
|
245
|
+
|
|
246
|
+
Args:
|
|
247
|
+
data: Parsed YAML data
|
|
248
|
+
job_dir: Directory containing job definition
|
|
249
|
+
|
|
250
|
+
Returns:
|
|
251
|
+
JobDefinition instance
|
|
252
|
+
"""
|
|
253
|
+
return cls(
|
|
254
|
+
name=data["name"],
|
|
255
|
+
version=data["version"],
|
|
256
|
+
summary=data["summary"],
|
|
257
|
+
description=data.get("description"),
|
|
258
|
+
steps=[Step.from_dict(step_data) for step_data in data["steps"]],
|
|
259
|
+
job_dir=job_dir,
|
|
260
|
+
)
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
def parse_job_definition(job_dir: Path | str) -> JobDefinition:
|
|
264
|
+
"""
|
|
265
|
+
Parse job definition from directory.
|
|
266
|
+
|
|
267
|
+
Args:
|
|
268
|
+
job_dir: Directory containing job.yml
|
|
269
|
+
|
|
270
|
+
Returns:
|
|
271
|
+
Parsed JobDefinition
|
|
272
|
+
|
|
273
|
+
Raises:
|
|
274
|
+
ParseError: If parsing fails or validation errors occur
|
|
275
|
+
"""
|
|
276
|
+
job_dir_path = Path(job_dir)
|
|
277
|
+
|
|
278
|
+
if not job_dir_path.exists():
|
|
279
|
+
raise ParseError(f"Job directory does not exist: {job_dir_path}")
|
|
280
|
+
|
|
281
|
+
if not job_dir_path.is_dir():
|
|
282
|
+
raise ParseError(f"Job path is not a directory: {job_dir_path}")
|
|
283
|
+
|
|
284
|
+
job_file = job_dir_path / "job.yml"
|
|
285
|
+
if not job_file.exists():
|
|
286
|
+
raise ParseError(f"job.yml not found in {job_dir_path}")
|
|
287
|
+
|
|
288
|
+
# Load YAML
|
|
289
|
+
try:
|
|
290
|
+
job_data = load_yaml(job_file)
|
|
291
|
+
except YAMLError as e:
|
|
292
|
+
raise ParseError(f"Failed to load job.yml: {e}") from e
|
|
293
|
+
|
|
294
|
+
if job_data is None:
|
|
295
|
+
raise ParseError("job.yml is empty")
|
|
296
|
+
|
|
297
|
+
# Validate against schema
|
|
298
|
+
try:
|
|
299
|
+
validate_against_schema(job_data, JOB_SCHEMA)
|
|
300
|
+
except ValidationError as e:
|
|
301
|
+
raise ParseError(f"Job definition validation failed: {e}") from e
|
|
302
|
+
|
|
303
|
+
# Parse into dataclass
|
|
304
|
+
job_def = JobDefinition.from_dict(job_data, job_dir_path)
|
|
305
|
+
|
|
306
|
+
# Validate dependencies and file inputs
|
|
307
|
+
job_def.validate_dependencies()
|
|
308
|
+
job_def.validate_file_inputs()
|
|
309
|
+
|
|
310
|
+
return job_def
|
|
@@ -0,0 +1,285 @@
|
|
|
1
|
+
"""Policy definition parser."""
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
from fnmatch import fnmatch
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
import yaml
|
|
9
|
+
|
|
10
|
+
from deepwork.schemas.policy_schema import POLICY_SCHEMA
|
|
11
|
+
from deepwork.utils.validation import ValidationError, validate_against_schema
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class PolicyParseError(Exception):
|
|
15
|
+
"""Exception raised for policy parsing errors."""
|
|
16
|
+
|
|
17
|
+
pass
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@dataclass
|
|
21
|
+
class Policy:
|
|
22
|
+
"""Represents a single policy definition."""
|
|
23
|
+
|
|
24
|
+
name: str
|
|
25
|
+
triggers: list[str] # Normalized to list
|
|
26
|
+
safety: list[str] = field(default_factory=list) # Normalized to list, empty if not specified
|
|
27
|
+
instructions: str = "" # Resolved content (either inline or from file)
|
|
28
|
+
|
|
29
|
+
@classmethod
|
|
30
|
+
def from_dict(cls, data: dict[str, Any], base_dir: Path | None = None) -> "Policy":
|
|
31
|
+
"""
|
|
32
|
+
Create Policy from dictionary.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
data: Parsed YAML data for a single policy
|
|
36
|
+
base_dir: Base directory for resolving instructions_file paths
|
|
37
|
+
|
|
38
|
+
Returns:
|
|
39
|
+
Policy instance
|
|
40
|
+
|
|
41
|
+
Raises:
|
|
42
|
+
PolicyParseError: If instructions cannot be resolved
|
|
43
|
+
"""
|
|
44
|
+
# Normalize trigger to list
|
|
45
|
+
trigger = data["trigger"]
|
|
46
|
+
triggers = [trigger] if isinstance(trigger, str) else list(trigger)
|
|
47
|
+
|
|
48
|
+
# Normalize safety to list (empty if not present)
|
|
49
|
+
safety_data = data.get("safety", [])
|
|
50
|
+
safety = [safety_data] if isinstance(safety_data, str) else list(safety_data)
|
|
51
|
+
|
|
52
|
+
# Resolve instructions
|
|
53
|
+
if "instructions" in data:
|
|
54
|
+
instructions = data["instructions"]
|
|
55
|
+
elif "instructions_file" in data:
|
|
56
|
+
if base_dir is None:
|
|
57
|
+
raise PolicyParseError(
|
|
58
|
+
f"Policy '{data['name']}' uses instructions_file but no base_dir provided"
|
|
59
|
+
)
|
|
60
|
+
instructions_path = base_dir / data["instructions_file"]
|
|
61
|
+
if not instructions_path.exists():
|
|
62
|
+
raise PolicyParseError(
|
|
63
|
+
f"Policy '{data['name']}' instructions file not found: {instructions_path}"
|
|
64
|
+
)
|
|
65
|
+
try:
|
|
66
|
+
instructions = instructions_path.read_text()
|
|
67
|
+
except Exception as e:
|
|
68
|
+
raise PolicyParseError(
|
|
69
|
+
f"Policy '{data['name']}' failed to read instructions file: {e}"
|
|
70
|
+
) from e
|
|
71
|
+
else:
|
|
72
|
+
# Schema should catch this, but be defensive
|
|
73
|
+
raise PolicyParseError(
|
|
74
|
+
f"Policy '{data['name']}' must have either 'instructions' or 'instructions_file'"
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
return cls(
|
|
78
|
+
name=data["name"],
|
|
79
|
+
triggers=triggers,
|
|
80
|
+
safety=safety,
|
|
81
|
+
instructions=instructions,
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def matches_pattern(file_path: str, patterns: list[str]) -> bool:
|
|
86
|
+
"""
|
|
87
|
+
Check if a file path matches any of the given glob patterns.
|
|
88
|
+
|
|
89
|
+
Args:
|
|
90
|
+
file_path: File path to check (relative path)
|
|
91
|
+
patterns: List of glob patterns to match against
|
|
92
|
+
|
|
93
|
+
Returns:
|
|
94
|
+
True if the file matches any pattern
|
|
95
|
+
"""
|
|
96
|
+
for pattern in patterns:
|
|
97
|
+
if _matches_glob(file_path, pattern):
|
|
98
|
+
return True
|
|
99
|
+
return False
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def _matches_glob(file_path: str, pattern: str) -> bool:
|
|
103
|
+
"""
|
|
104
|
+
Match a file path against a glob pattern, supporting ** for recursive matching.
|
|
105
|
+
|
|
106
|
+
Args:
|
|
107
|
+
file_path: File path to check
|
|
108
|
+
pattern: Glob pattern (supports *, **, ?)
|
|
109
|
+
|
|
110
|
+
Returns:
|
|
111
|
+
True if matches
|
|
112
|
+
"""
|
|
113
|
+
# Normalize path separators
|
|
114
|
+
file_path = file_path.replace("\\", "/")
|
|
115
|
+
pattern = pattern.replace("\\", "/")
|
|
116
|
+
|
|
117
|
+
# Handle ** patterns (recursive directory matching)
|
|
118
|
+
if "**" in pattern:
|
|
119
|
+
# Split pattern by **
|
|
120
|
+
parts = pattern.split("**")
|
|
121
|
+
|
|
122
|
+
if len(parts) == 2:
|
|
123
|
+
prefix, suffix = parts[0], parts[1]
|
|
124
|
+
|
|
125
|
+
# Remove leading/trailing slashes from suffix
|
|
126
|
+
suffix = suffix.lstrip("/")
|
|
127
|
+
|
|
128
|
+
# Check if prefix matches the start of the path
|
|
129
|
+
if prefix:
|
|
130
|
+
prefix = prefix.rstrip("/")
|
|
131
|
+
if not file_path.startswith(prefix + "/") and file_path != prefix:
|
|
132
|
+
return False
|
|
133
|
+
# Get the remaining path after prefix
|
|
134
|
+
remaining = file_path[len(prefix) :].lstrip("/")
|
|
135
|
+
else:
|
|
136
|
+
remaining = file_path
|
|
137
|
+
|
|
138
|
+
# If no suffix, any remaining path matches
|
|
139
|
+
if not suffix:
|
|
140
|
+
return True
|
|
141
|
+
|
|
142
|
+
# Check if suffix matches the end of any remaining path segment
|
|
143
|
+
# For pattern "src/**/*.py", suffix is "*.py"
|
|
144
|
+
# We need to match *.py against the filename portion
|
|
145
|
+
remaining_parts = remaining.split("/")
|
|
146
|
+
for i in range(len(remaining_parts)):
|
|
147
|
+
test_path = "/".join(remaining_parts[i:])
|
|
148
|
+
if fnmatch(test_path, suffix):
|
|
149
|
+
return True
|
|
150
|
+
# Also try just the filename
|
|
151
|
+
if fnmatch(remaining_parts[-1], suffix):
|
|
152
|
+
return True
|
|
153
|
+
|
|
154
|
+
return False
|
|
155
|
+
|
|
156
|
+
# Simple pattern without **
|
|
157
|
+
return fnmatch(file_path, pattern)
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
def evaluate_policy(policy: Policy, changed_files: list[str]) -> bool:
|
|
161
|
+
"""
|
|
162
|
+
Evaluate whether a policy should fire based on changed files.
|
|
163
|
+
|
|
164
|
+
A policy fires if:
|
|
165
|
+
- At least one changed file matches a trigger pattern
|
|
166
|
+
- AND no changed file matches a safety pattern
|
|
167
|
+
|
|
168
|
+
Args:
|
|
169
|
+
policy: Policy to evaluate
|
|
170
|
+
changed_files: List of changed file paths (relative)
|
|
171
|
+
|
|
172
|
+
Returns:
|
|
173
|
+
True if the policy should fire
|
|
174
|
+
"""
|
|
175
|
+
# Check if any trigger matches
|
|
176
|
+
trigger_matched = False
|
|
177
|
+
for file_path in changed_files:
|
|
178
|
+
if matches_pattern(file_path, policy.triggers):
|
|
179
|
+
trigger_matched = True
|
|
180
|
+
break
|
|
181
|
+
|
|
182
|
+
if not trigger_matched:
|
|
183
|
+
return False
|
|
184
|
+
|
|
185
|
+
# Check if any safety pattern matches
|
|
186
|
+
if policy.safety:
|
|
187
|
+
for file_path in changed_files:
|
|
188
|
+
if matches_pattern(file_path, policy.safety):
|
|
189
|
+
# Safety file was also changed, don't fire
|
|
190
|
+
return False
|
|
191
|
+
|
|
192
|
+
return True
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
def evaluate_policies(
|
|
196
|
+
policies: list[Policy],
|
|
197
|
+
changed_files: list[str],
|
|
198
|
+
promised_policies: set[str] | None = None,
|
|
199
|
+
) -> list[Policy]:
|
|
200
|
+
"""
|
|
201
|
+
Evaluate which policies should fire.
|
|
202
|
+
|
|
203
|
+
Args:
|
|
204
|
+
policies: List of policies to evaluate
|
|
205
|
+
changed_files: List of changed file paths (relative)
|
|
206
|
+
promised_policies: Set of policy names that have been marked as addressed
|
|
207
|
+
via <promise> tags (these are skipped)
|
|
208
|
+
|
|
209
|
+
Returns:
|
|
210
|
+
List of policies that should fire (trigger matches, no safety match, not promised)
|
|
211
|
+
"""
|
|
212
|
+
if promised_policies is None:
|
|
213
|
+
promised_policies = set()
|
|
214
|
+
|
|
215
|
+
fired_policies = []
|
|
216
|
+
for policy in policies:
|
|
217
|
+
# Skip if already promised/addressed
|
|
218
|
+
if policy.name in promised_policies:
|
|
219
|
+
continue
|
|
220
|
+
|
|
221
|
+
if evaluate_policy(policy, changed_files):
|
|
222
|
+
fired_policies.append(policy)
|
|
223
|
+
|
|
224
|
+
return fired_policies
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
def parse_policy_file(policy_path: Path | str, base_dir: Path | None = None) -> list[Policy]:
|
|
228
|
+
"""
|
|
229
|
+
Parse policy definitions from a YAML file.
|
|
230
|
+
|
|
231
|
+
Args:
|
|
232
|
+
policy_path: Path to .deepwork.policy.yml file
|
|
233
|
+
base_dir: Base directory for resolving instructions_file paths.
|
|
234
|
+
Defaults to the directory containing the policy file.
|
|
235
|
+
|
|
236
|
+
Returns:
|
|
237
|
+
List of parsed Policy objects
|
|
238
|
+
|
|
239
|
+
Raises:
|
|
240
|
+
PolicyParseError: If parsing fails or validation errors occur
|
|
241
|
+
"""
|
|
242
|
+
policy_path = Path(policy_path)
|
|
243
|
+
|
|
244
|
+
if not policy_path.exists():
|
|
245
|
+
raise PolicyParseError(f"Policy file does not exist: {policy_path}")
|
|
246
|
+
|
|
247
|
+
if not policy_path.is_file():
|
|
248
|
+
raise PolicyParseError(f"Policy path is not a file: {policy_path}")
|
|
249
|
+
|
|
250
|
+
# Default base_dir to policy file's directory
|
|
251
|
+
if base_dir is None:
|
|
252
|
+
base_dir = policy_path.parent
|
|
253
|
+
|
|
254
|
+
# Load YAML (policies are stored as a list, not a dict)
|
|
255
|
+
try:
|
|
256
|
+
with open(policy_path, encoding="utf-8") as f:
|
|
257
|
+
policy_data = yaml.safe_load(f)
|
|
258
|
+
except yaml.YAMLError as e:
|
|
259
|
+
raise PolicyParseError(f"Failed to parse policy YAML: {e}") from e
|
|
260
|
+
except OSError as e:
|
|
261
|
+
raise PolicyParseError(f"Failed to read policy file: {e}") from e
|
|
262
|
+
|
|
263
|
+
# Handle empty file or null content
|
|
264
|
+
if policy_data is None:
|
|
265
|
+
return []
|
|
266
|
+
|
|
267
|
+
# Validate it's a list (schema expects array)
|
|
268
|
+
if not isinstance(policy_data, list):
|
|
269
|
+
raise PolicyParseError(
|
|
270
|
+
f"Policy file must contain a list of policies, got {type(policy_data).__name__}"
|
|
271
|
+
)
|
|
272
|
+
|
|
273
|
+
# Validate against schema
|
|
274
|
+
try:
|
|
275
|
+
validate_against_schema(policy_data, POLICY_SCHEMA)
|
|
276
|
+
except ValidationError as e:
|
|
277
|
+
raise PolicyParseError(f"Policy definition validation failed: {e}") from e
|
|
278
|
+
|
|
279
|
+
# Parse into dataclasses
|
|
280
|
+
policies = []
|
|
281
|
+
for policy_item in policy_data:
|
|
282
|
+
policy = Policy.from_dict(policy_item, base_dir)
|
|
283
|
+
policies.append(policy)
|
|
284
|
+
|
|
285
|
+
return policies
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""DeepWork hooks package for policy enforcement and lifecycle events."""
|