deepwork 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deepwork/__init__.py +25 -0
- deepwork/cli/__init__.py +1 -0
- deepwork/cli/install.py +290 -0
- deepwork/cli/main.py +25 -0
- deepwork/cli/sync.py +176 -0
- deepwork/core/__init__.py +1 -0
- deepwork/core/adapters.py +373 -0
- deepwork/core/detector.py +93 -0
- deepwork/core/generator.py +290 -0
- deepwork/core/hooks_syncer.py +206 -0
- deepwork/core/parser.py +310 -0
- deepwork/core/policy_parser.py +285 -0
- deepwork/hooks/__init__.py +1 -0
- deepwork/hooks/evaluate_policies.py +159 -0
- deepwork/schemas/__init__.py +1 -0
- deepwork/schemas/job_schema.py +212 -0
- deepwork/schemas/policy_schema.py +68 -0
- deepwork/standard_jobs/deepwork_jobs/job.yml +102 -0
- deepwork/standard_jobs/deepwork_jobs/steps/define.md +359 -0
- deepwork/standard_jobs/deepwork_jobs/steps/implement.md +435 -0
- deepwork/standard_jobs/deepwork_jobs/steps/refine.md +447 -0
- deepwork/standard_jobs/deepwork_policy/hooks/capture_work_tree.sh +26 -0
- deepwork/standard_jobs/deepwork_policy/hooks/get_changed_files.sh +30 -0
- deepwork/standard_jobs/deepwork_policy/hooks/global_hooks.yml +8 -0
- deepwork/standard_jobs/deepwork_policy/hooks/policy_stop_hook.sh +72 -0
- deepwork/standard_jobs/deepwork_policy/hooks/user_prompt_submit.sh +17 -0
- deepwork/standard_jobs/deepwork_policy/job.yml +35 -0
- deepwork/standard_jobs/deepwork_policy/steps/define.md +174 -0
- deepwork/templates/__init__.py +1 -0
- deepwork/templates/claude/command-job-step.md.jinja +210 -0
- deepwork/templates/gemini/command-job-step.toml.jinja +169 -0
- deepwork/utils/__init__.py +1 -0
- deepwork/utils/fs.py +128 -0
- deepwork/utils/git.py +164 -0
- deepwork/utils/validation.py +31 -0
- deepwork/utils/yaml_utils.py +89 -0
- deepwork-0.1.0.dist-info/METADATA +389 -0
- deepwork-0.1.0.dist-info/RECORD +41 -0
- deepwork-0.1.0.dist-info/WHEEL +4 -0
- deepwork-0.1.0.dist-info/entry_points.txt +2 -0
- deepwork-0.1.0.dist-info/licenses/LICENSE.md +60 -0
|
@@ -0,0 +1,290 @@
|
|
|
1
|
+
"""Slash-command file generator using Jinja2 templates."""
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
from jinja2 import Environment, FileSystemLoader, TemplateNotFound
|
|
7
|
+
|
|
8
|
+
from deepwork.core.adapters import AgentAdapter, CommandLifecycleHook
|
|
9
|
+
from deepwork.core.parser import JobDefinition, Step
|
|
10
|
+
from deepwork.schemas.job_schema import LIFECYCLE_HOOK_EVENTS
|
|
11
|
+
from deepwork.utils.fs import safe_read, safe_write
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class GeneratorError(Exception):
|
|
15
|
+
"""Exception raised for command generation errors."""
|
|
16
|
+
|
|
17
|
+
pass
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class CommandGenerator:
|
|
21
|
+
"""Generates slash-command files from job definitions."""
|
|
22
|
+
|
|
23
|
+
def __init__(self, templates_dir: Path | str | None = None):
|
|
24
|
+
"""
|
|
25
|
+
Initialize generator.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
templates_dir: Path to templates directory
|
|
29
|
+
(defaults to package templates directory)
|
|
30
|
+
"""
|
|
31
|
+
if templates_dir is None:
|
|
32
|
+
# Use package templates directory
|
|
33
|
+
templates_dir = Path(__file__).parent.parent / "templates"
|
|
34
|
+
|
|
35
|
+
self.templates_dir = Path(templates_dir)
|
|
36
|
+
|
|
37
|
+
if not self.templates_dir.exists():
|
|
38
|
+
raise GeneratorError(f"Templates directory not found: {self.templates_dir}")
|
|
39
|
+
|
|
40
|
+
def _get_jinja_env(self, adapter: AgentAdapter) -> Environment:
|
|
41
|
+
"""
|
|
42
|
+
Get Jinja2 environment for an adapter.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
adapter: Agent adapter
|
|
46
|
+
|
|
47
|
+
Returns:
|
|
48
|
+
Jinja2 Environment
|
|
49
|
+
"""
|
|
50
|
+
platform_templates_dir = adapter.get_template_dir(self.templates_dir)
|
|
51
|
+
if not platform_templates_dir.exists():
|
|
52
|
+
raise GeneratorError(
|
|
53
|
+
f"Templates for platform '{adapter.name}' not found at {platform_templates_dir}"
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
return Environment(
|
|
57
|
+
loader=FileSystemLoader(platform_templates_dir),
|
|
58
|
+
trim_blocks=True,
|
|
59
|
+
lstrip_blocks=True,
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
def _is_standalone_step(self, job: JobDefinition, step: Step) -> bool:
|
|
63
|
+
"""
|
|
64
|
+
Check if a step is standalone (disconnected from the main workflow).
|
|
65
|
+
|
|
66
|
+
A standalone step has no dependencies AND no other steps depend on it.
|
|
67
|
+
|
|
68
|
+
Args:
|
|
69
|
+
job: Job definition
|
|
70
|
+
step: Step to check
|
|
71
|
+
|
|
72
|
+
Returns:
|
|
73
|
+
True if step is standalone
|
|
74
|
+
"""
|
|
75
|
+
# Step has dependencies - not standalone
|
|
76
|
+
if step.dependencies:
|
|
77
|
+
return False
|
|
78
|
+
|
|
79
|
+
# Check if any other step depends on this step
|
|
80
|
+
for other_step in job.steps:
|
|
81
|
+
if step.id in other_step.dependencies:
|
|
82
|
+
return False
|
|
83
|
+
|
|
84
|
+
return True
|
|
85
|
+
|
|
86
|
+
def _build_hook_context(self, job: JobDefinition, hook_action: Any) -> dict[str, Any]:
|
|
87
|
+
"""
|
|
88
|
+
Build context for a single hook action.
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
job: Job definition
|
|
92
|
+
hook_action: HookAction instance
|
|
93
|
+
|
|
94
|
+
Returns:
|
|
95
|
+
Hook context dictionary
|
|
96
|
+
"""
|
|
97
|
+
hook_ctx: dict[str, Any] = {}
|
|
98
|
+
if hook_action.is_prompt():
|
|
99
|
+
hook_ctx["type"] = "prompt"
|
|
100
|
+
hook_ctx["content"] = hook_action.prompt
|
|
101
|
+
elif hook_action.is_prompt_file():
|
|
102
|
+
hook_ctx["type"] = "prompt_file"
|
|
103
|
+
hook_ctx["path"] = hook_action.prompt_file
|
|
104
|
+
# Read the prompt file content
|
|
105
|
+
prompt_file_path = job.job_dir / hook_action.prompt_file
|
|
106
|
+
prompt_content = safe_read(prompt_file_path)
|
|
107
|
+
if prompt_content is None:
|
|
108
|
+
raise GeneratorError(f"Hook prompt file not found: {prompt_file_path}")
|
|
109
|
+
hook_ctx["content"] = prompt_content
|
|
110
|
+
elif hook_action.is_script():
|
|
111
|
+
hook_ctx["type"] = "script"
|
|
112
|
+
hook_ctx["path"] = hook_action.script
|
|
113
|
+
return hook_ctx
|
|
114
|
+
|
|
115
|
+
def _build_step_context(
|
|
116
|
+
self, job: JobDefinition, step: Step, step_index: int, adapter: AgentAdapter
|
|
117
|
+
) -> dict[str, Any]:
|
|
118
|
+
"""
|
|
119
|
+
Build template context for a step.
|
|
120
|
+
|
|
121
|
+
Args:
|
|
122
|
+
job: Job definition
|
|
123
|
+
step: Step to generate context for
|
|
124
|
+
step_index: Index of step in job (0-based)
|
|
125
|
+
adapter: Agent adapter for platform-specific hook name mapping
|
|
126
|
+
|
|
127
|
+
Returns:
|
|
128
|
+
Template context dictionary
|
|
129
|
+
"""
|
|
130
|
+
# Read step instructions
|
|
131
|
+
instructions_file = job.job_dir / step.instructions_file
|
|
132
|
+
instructions_content = safe_read(instructions_file)
|
|
133
|
+
if instructions_content is None:
|
|
134
|
+
raise GeneratorError(f"Step instructions file not found: {instructions_file}")
|
|
135
|
+
|
|
136
|
+
# Separate user inputs and file inputs
|
|
137
|
+
user_inputs = [
|
|
138
|
+
{"name": inp.name, "description": inp.description}
|
|
139
|
+
for inp in step.inputs
|
|
140
|
+
if inp.is_user_input()
|
|
141
|
+
]
|
|
142
|
+
file_inputs = [
|
|
143
|
+
{"file": inp.file, "from_step": inp.from_step}
|
|
144
|
+
for inp in step.inputs
|
|
145
|
+
if inp.is_file_input()
|
|
146
|
+
]
|
|
147
|
+
|
|
148
|
+
# Check if this is a standalone step
|
|
149
|
+
is_standalone = self._is_standalone_step(job, step)
|
|
150
|
+
|
|
151
|
+
# Determine next and previous steps (only for non-standalone steps)
|
|
152
|
+
next_step = None
|
|
153
|
+
prev_step = None
|
|
154
|
+
if not is_standalone:
|
|
155
|
+
if step_index < len(job.steps) - 1:
|
|
156
|
+
next_step = job.steps[step_index + 1].id
|
|
157
|
+
if step_index > 0:
|
|
158
|
+
prev_step = job.steps[step_index - 1].id
|
|
159
|
+
|
|
160
|
+
# Build hooks context for all lifecycle events
|
|
161
|
+
# Structure: {platform_event_name: [hook_contexts]}
|
|
162
|
+
hooks: dict[str, list[dict[str, Any]]] = {}
|
|
163
|
+
for event in LIFECYCLE_HOOK_EVENTS:
|
|
164
|
+
if event in step.hooks:
|
|
165
|
+
# Get platform-specific event name from adapter
|
|
166
|
+
hook_enum = CommandLifecycleHook(event)
|
|
167
|
+
platform_event_name = adapter.get_platform_hook_name(hook_enum)
|
|
168
|
+
if platform_event_name:
|
|
169
|
+
hook_contexts = [
|
|
170
|
+
self._build_hook_context(job, hook_action)
|
|
171
|
+
for hook_action in step.hooks[event]
|
|
172
|
+
]
|
|
173
|
+
if hook_contexts:
|
|
174
|
+
hooks[platform_event_name] = hook_contexts
|
|
175
|
+
|
|
176
|
+
# Backward compatibility: stop_hooks is after_agent hooks
|
|
177
|
+
stop_hooks = hooks.get(
|
|
178
|
+
adapter.get_platform_hook_name(CommandLifecycleHook.AFTER_AGENT) or "Stop", []
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
return {
|
|
182
|
+
"job_name": job.name,
|
|
183
|
+
"job_version": job.version,
|
|
184
|
+
"job_summary": job.summary,
|
|
185
|
+
"job_description": job.description,
|
|
186
|
+
"step_id": step.id,
|
|
187
|
+
"step_name": step.name,
|
|
188
|
+
"step_description": step.description,
|
|
189
|
+
"step_number": step_index + 1, # 1-based for display
|
|
190
|
+
"total_steps": len(job.steps),
|
|
191
|
+
"instructions_file": step.instructions_file,
|
|
192
|
+
"instructions_content": instructions_content,
|
|
193
|
+
"user_inputs": user_inputs,
|
|
194
|
+
"file_inputs": file_inputs,
|
|
195
|
+
"outputs": step.outputs,
|
|
196
|
+
"dependencies": step.dependencies,
|
|
197
|
+
"next_step": next_step,
|
|
198
|
+
"prev_step": prev_step,
|
|
199
|
+
"is_standalone": is_standalone,
|
|
200
|
+
"hooks": hooks, # New: all hooks by platform event name
|
|
201
|
+
"stop_hooks": stop_hooks, # Backward compat: after_agent hooks only
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
def generate_step_command(
|
|
205
|
+
self,
|
|
206
|
+
job: JobDefinition,
|
|
207
|
+
step: Step,
|
|
208
|
+
adapter: AgentAdapter,
|
|
209
|
+
output_dir: Path | str,
|
|
210
|
+
) -> Path:
|
|
211
|
+
"""
|
|
212
|
+
Generate slash-command file for a single step.
|
|
213
|
+
|
|
214
|
+
Args:
|
|
215
|
+
job: Job definition
|
|
216
|
+
step: Step to generate command for
|
|
217
|
+
adapter: Agent adapter for the target platform
|
|
218
|
+
output_dir: Directory to write command file to
|
|
219
|
+
|
|
220
|
+
Returns:
|
|
221
|
+
Path to generated command file
|
|
222
|
+
|
|
223
|
+
Raises:
|
|
224
|
+
GeneratorError: If generation fails
|
|
225
|
+
"""
|
|
226
|
+
output_dir = Path(output_dir)
|
|
227
|
+
|
|
228
|
+
# Create commands subdirectory if needed
|
|
229
|
+
commands_dir = output_dir / adapter.commands_dir
|
|
230
|
+
commands_dir.mkdir(parents=True, exist_ok=True)
|
|
231
|
+
|
|
232
|
+
# Find step index
|
|
233
|
+
try:
|
|
234
|
+
step_index = next(i for i, s in enumerate(job.steps) if s.id == step.id)
|
|
235
|
+
except StopIteration as e:
|
|
236
|
+
raise GeneratorError(f"Step '{step.id}' not found in job '{job.name}'") from e
|
|
237
|
+
|
|
238
|
+
# Build context
|
|
239
|
+
context = self._build_step_context(job, step, step_index, adapter)
|
|
240
|
+
|
|
241
|
+
# Load and render template
|
|
242
|
+
env = self._get_jinja_env(adapter)
|
|
243
|
+
try:
|
|
244
|
+
template = env.get_template(adapter.command_template)
|
|
245
|
+
except TemplateNotFound as e:
|
|
246
|
+
raise GeneratorError(f"Template not found: {e}") from e
|
|
247
|
+
|
|
248
|
+
try:
|
|
249
|
+
rendered = template.render(**context)
|
|
250
|
+
except Exception as e:
|
|
251
|
+
raise GeneratorError(f"Template rendering failed: {e}") from e
|
|
252
|
+
|
|
253
|
+
# Write command file
|
|
254
|
+
command_filename = adapter.get_command_filename(job.name, step.id)
|
|
255
|
+
command_path = commands_dir / command_filename
|
|
256
|
+
|
|
257
|
+
try:
|
|
258
|
+
safe_write(command_path, rendered)
|
|
259
|
+
except Exception as e:
|
|
260
|
+
raise GeneratorError(f"Failed to write command file: {e}") from e
|
|
261
|
+
|
|
262
|
+
return command_path
|
|
263
|
+
|
|
264
|
+
def generate_all_commands(
|
|
265
|
+
self,
|
|
266
|
+
job: JobDefinition,
|
|
267
|
+
adapter: AgentAdapter,
|
|
268
|
+
output_dir: Path | str,
|
|
269
|
+
) -> list[Path]:
|
|
270
|
+
"""
|
|
271
|
+
Generate slash-command files for all steps in a job.
|
|
272
|
+
|
|
273
|
+
Args:
|
|
274
|
+
job: Job definition
|
|
275
|
+
adapter: Agent adapter for the target platform
|
|
276
|
+
output_dir: Directory to write command files to
|
|
277
|
+
|
|
278
|
+
Returns:
|
|
279
|
+
List of paths to generated command files
|
|
280
|
+
|
|
281
|
+
Raises:
|
|
282
|
+
GeneratorError: If generation fails
|
|
283
|
+
"""
|
|
284
|
+
command_paths = []
|
|
285
|
+
|
|
286
|
+
for step in job.steps:
|
|
287
|
+
command_path = self.generate_step_command(job, step, adapter, output_dir)
|
|
288
|
+
command_paths.append(command_path)
|
|
289
|
+
|
|
290
|
+
return command_paths
|
|
@@ -0,0 +1,206 @@
|
|
|
1
|
+
"""Hooks syncer for DeepWork - collects and syncs hooks from jobs to platform settings."""
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
import yaml
|
|
8
|
+
|
|
9
|
+
from deepwork.core.adapters import AgentAdapter
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class HooksSyncError(Exception):
|
|
13
|
+
"""Exception raised for hooks sync errors."""
|
|
14
|
+
|
|
15
|
+
pass
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@dataclass
|
|
19
|
+
class HookEntry:
|
|
20
|
+
"""Represents a single hook entry for a lifecycle event."""
|
|
21
|
+
|
|
22
|
+
script: str # Script filename
|
|
23
|
+
job_name: str # Job that provides this hook
|
|
24
|
+
job_dir: Path # Full path to job directory
|
|
25
|
+
|
|
26
|
+
def get_script_path(self, project_path: Path) -> str:
|
|
27
|
+
"""
|
|
28
|
+
Get the script path relative to project root.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
project_path: Path to project root
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
Relative path to script from project root
|
|
35
|
+
"""
|
|
36
|
+
# Script path is: .deepwork/jobs/{job_name}/hooks/{script}
|
|
37
|
+
script_path = self.job_dir / "hooks" / self.script
|
|
38
|
+
try:
|
|
39
|
+
return str(script_path.relative_to(project_path))
|
|
40
|
+
except ValueError:
|
|
41
|
+
# If not relative, return the full path
|
|
42
|
+
return str(script_path)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
@dataclass
|
|
46
|
+
class JobHooks:
|
|
47
|
+
"""Hooks configuration for a job."""
|
|
48
|
+
|
|
49
|
+
job_name: str
|
|
50
|
+
job_dir: Path
|
|
51
|
+
hooks: dict[str, list[str]] = field(default_factory=dict) # event -> [scripts]
|
|
52
|
+
|
|
53
|
+
@classmethod
|
|
54
|
+
def from_job_dir(cls, job_dir: Path) -> "JobHooks | None":
|
|
55
|
+
"""
|
|
56
|
+
Load hooks configuration from a job directory.
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
job_dir: Path to job directory containing hooks/global_hooks.yml
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
JobHooks instance or None if no hooks defined
|
|
63
|
+
"""
|
|
64
|
+
hooks_file = job_dir / "hooks" / "global_hooks.yml"
|
|
65
|
+
if not hooks_file.exists():
|
|
66
|
+
return None
|
|
67
|
+
|
|
68
|
+
try:
|
|
69
|
+
with open(hooks_file, encoding="utf-8") as f:
|
|
70
|
+
data = yaml.safe_load(f)
|
|
71
|
+
except (yaml.YAMLError, OSError):
|
|
72
|
+
return None
|
|
73
|
+
|
|
74
|
+
if not data or not isinstance(data, dict):
|
|
75
|
+
return None
|
|
76
|
+
|
|
77
|
+
# Parse hooks - each key is an event, value is list of scripts
|
|
78
|
+
hooks: dict[str, list[str]] = {}
|
|
79
|
+
for event, scripts in data.items():
|
|
80
|
+
if isinstance(scripts, list):
|
|
81
|
+
hooks[event] = [str(s) for s in scripts]
|
|
82
|
+
elif isinstance(scripts, str):
|
|
83
|
+
hooks[event] = [scripts]
|
|
84
|
+
|
|
85
|
+
if not hooks:
|
|
86
|
+
return None
|
|
87
|
+
|
|
88
|
+
return cls(
|
|
89
|
+
job_name=job_dir.name,
|
|
90
|
+
job_dir=job_dir,
|
|
91
|
+
hooks=hooks,
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def collect_job_hooks(jobs_dir: Path) -> list[JobHooks]:
|
|
96
|
+
"""
|
|
97
|
+
Collect hooks from all jobs in the jobs directory.
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
jobs_dir: Path to .deepwork/jobs directory
|
|
101
|
+
|
|
102
|
+
Returns:
|
|
103
|
+
List of JobHooks for all jobs with hooks defined
|
|
104
|
+
"""
|
|
105
|
+
if not jobs_dir.exists():
|
|
106
|
+
return []
|
|
107
|
+
|
|
108
|
+
job_hooks_list = []
|
|
109
|
+
for job_dir in jobs_dir.iterdir():
|
|
110
|
+
if not job_dir.is_dir():
|
|
111
|
+
continue
|
|
112
|
+
|
|
113
|
+
job_hooks = JobHooks.from_job_dir(job_dir)
|
|
114
|
+
if job_hooks:
|
|
115
|
+
job_hooks_list.append(job_hooks)
|
|
116
|
+
|
|
117
|
+
return job_hooks_list
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def merge_hooks_for_platform(
|
|
121
|
+
job_hooks_list: list[JobHooks],
|
|
122
|
+
project_path: Path,
|
|
123
|
+
) -> dict[str, list[dict[str, Any]]]:
|
|
124
|
+
"""
|
|
125
|
+
Merge hooks from multiple jobs into a single configuration.
|
|
126
|
+
|
|
127
|
+
Args:
|
|
128
|
+
job_hooks_list: List of JobHooks from different jobs
|
|
129
|
+
project_path: Path to project root for relative path calculation
|
|
130
|
+
|
|
131
|
+
Returns:
|
|
132
|
+
Dict mapping lifecycle events to hook configurations
|
|
133
|
+
"""
|
|
134
|
+
merged: dict[str, list[dict[str, Any]]] = {}
|
|
135
|
+
|
|
136
|
+
for job_hooks in job_hooks_list:
|
|
137
|
+
for event, scripts in job_hooks.hooks.items():
|
|
138
|
+
if event not in merged:
|
|
139
|
+
merged[event] = []
|
|
140
|
+
|
|
141
|
+
for script in scripts:
|
|
142
|
+
entry = HookEntry(
|
|
143
|
+
script=script,
|
|
144
|
+
job_name=job_hooks.job_name,
|
|
145
|
+
job_dir=job_hooks.job_dir,
|
|
146
|
+
)
|
|
147
|
+
script_path = entry.get_script_path(project_path)
|
|
148
|
+
|
|
149
|
+
# Create hook configuration for Claude Code format
|
|
150
|
+
hook_config = {
|
|
151
|
+
"matcher": "", # Match all
|
|
152
|
+
"hooks": [
|
|
153
|
+
{
|
|
154
|
+
"type": "command",
|
|
155
|
+
"command": script_path,
|
|
156
|
+
}
|
|
157
|
+
],
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
# Check if this hook is already present (avoid duplicates)
|
|
161
|
+
if not _hook_already_present(merged[event], script_path):
|
|
162
|
+
merged[event].append(hook_config)
|
|
163
|
+
|
|
164
|
+
return merged
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
def _hook_already_present(hooks: list[dict[str, Any]], script_path: str) -> bool:
|
|
168
|
+
"""Check if a hook with the given script path is already in the list."""
|
|
169
|
+
for hook in hooks:
|
|
170
|
+
hook_list = hook.get("hooks", [])
|
|
171
|
+
for h in hook_list:
|
|
172
|
+
if h.get("command") == script_path:
|
|
173
|
+
return True
|
|
174
|
+
return False
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
def sync_hooks_to_platform(
|
|
178
|
+
project_path: Path,
|
|
179
|
+
adapter: AgentAdapter,
|
|
180
|
+
job_hooks_list: list[JobHooks],
|
|
181
|
+
) -> int:
|
|
182
|
+
"""
|
|
183
|
+
Sync hooks from jobs to a specific platform's settings.
|
|
184
|
+
|
|
185
|
+
Args:
|
|
186
|
+
project_path: Path to project root
|
|
187
|
+
adapter: Agent adapter for the target platform
|
|
188
|
+
job_hooks_list: List of JobHooks from jobs
|
|
189
|
+
|
|
190
|
+
Returns:
|
|
191
|
+
Number of hooks synced
|
|
192
|
+
|
|
193
|
+
Raises:
|
|
194
|
+
HooksSyncError: If sync fails
|
|
195
|
+
"""
|
|
196
|
+
# Merge hooks from all jobs
|
|
197
|
+
merged_hooks = merge_hooks_for_platform(job_hooks_list, project_path)
|
|
198
|
+
|
|
199
|
+
if not merged_hooks:
|
|
200
|
+
return 0
|
|
201
|
+
|
|
202
|
+
# Delegate to adapter's sync_hooks method
|
|
203
|
+
try:
|
|
204
|
+
return adapter.sync_hooks(project_path, merged_hooks)
|
|
205
|
+
except Exception as e:
|
|
206
|
+
raise HooksSyncError(f"Failed to sync hooks: {e}") from e
|