ai-push-hooks 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.ai-push-hooks.toml +73 -0
- package/LICENSE +21 -0
- package/README.md +234 -0
- package/bin/ai-push-hooks.js +35 -0
- package/package.json +24 -0
- package/pyproject.toml +38 -0
- package/run.sh +29 -0
- package/src/ai_push_hooks/__init__.py +6 -0
- package/src/ai_push_hooks/__main__.py +3 -0
- package/src/ai_push_hooks/artifacts.py +86 -0
- package/src/ai_push_hooks/cli.py +49 -0
- package/src/ai_push_hooks/config.py +356 -0
- package/src/ai_push_hooks/engine.py +172 -0
- package/src/ai_push_hooks/executors/__init__.py +1 -0
- package/src/ai_push_hooks/executors/apply.py +55 -0
- package/src/ai_push_hooks/executors/assertions.py +44 -0
- package/src/ai_push_hooks/executors/exec.py +413 -0
- package/src/ai_push_hooks/executors/llm.py +308 -0
- package/src/ai_push_hooks/hook.py +130 -0
- package/src/ai_push_hooks/modules/__init__.py +11 -0
- package/src/ai_push_hooks/modules/beads.py +46 -0
- package/src/ai_push_hooks/modules/docs.py +159 -0
- package/src/ai_push_hooks/modules/pr.py +73 -0
- package/src/ai_push_hooks/prompts_builtin.py +135 -0
- package/src/ai_push_hooks/types.py +236 -0
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
[general]
|
|
2
|
+
enabled = true
|
|
3
|
+
allow_push_on_error = false
|
|
4
|
+
require_clean_worktree = false
|
|
5
|
+
skip_on_sync_branch = true
|
|
6
|
+
|
|
7
|
+
[llm]
|
|
8
|
+
runner = "opencode"
|
|
9
|
+
model = "openai/gpt-5.3-codex-spark"
|
|
10
|
+
variant = ""
|
|
11
|
+
timeout_seconds = 800
|
|
12
|
+
max_parallel = 2
|
|
13
|
+
json_max_retries = 2
|
|
14
|
+
invalid_json_feedback_max_chars = 6000
|
|
15
|
+
json_retry_new_session = true
|
|
16
|
+
delete_session_after_run = true
|
|
17
|
+
|
|
18
|
+
[logging]
|
|
19
|
+
level = "status"
|
|
20
|
+
jsonl = true
|
|
21
|
+
dir = ".git/ai-push-hooks/logs"
|
|
22
|
+
capture_llm_transcript = true
|
|
23
|
+
transcript_dir = ".git/ai-push-hooks/transcripts"
|
|
24
|
+
summary_dir = ".git/ai-push-hooks/summaries"
|
|
25
|
+
|
|
26
|
+
[workflow]
|
|
27
|
+
modules = ["docs"]
|
|
28
|
+
|
|
29
|
+
[modules.docs]
|
|
30
|
+
enabled = true
|
|
31
|
+
|
|
32
|
+
[[modules.docs.steps]]
|
|
33
|
+
id = "collect"
|
|
34
|
+
type = "collect"
|
|
35
|
+
collector = "docs_context"
|
|
36
|
+
|
|
37
|
+
[[modules.docs.steps]]
|
|
38
|
+
id = "query"
|
|
39
|
+
type = "llm"
|
|
40
|
+
prompt = """
|
|
41
|
+
Given the attached diff and changed file list, output a JSON array of concise
|
|
42
|
+
documentation search queries. Return JSON only.
|
|
43
|
+
"""
|
|
44
|
+
inputs = ["collect/push.diff", "collect/changed-files.txt"]
|
|
45
|
+
output = "queries.json"
|
|
46
|
+
schema = "string_array"
|
|
47
|
+
|
|
48
|
+
[[modules.docs.steps]]
|
|
49
|
+
id = "analyze"
|
|
50
|
+
type = "llm"
|
|
51
|
+
prompt = """
|
|
52
|
+
Review the diff and matched docs excerpts. Return JSON issues only for factual
|
|
53
|
+
documentation drift caused by the code changes.
|
|
54
|
+
"""
|
|
55
|
+
inputs = ["collect/push.diff", "collect/docs-context.txt", "query/queries.json", "collect/recent-commits.txt"]
|
|
56
|
+
output = "issues.json"
|
|
57
|
+
schema = "docs_issue_array"
|
|
58
|
+
|
|
59
|
+
[[modules.docs.steps]]
|
|
60
|
+
id = "apply"
|
|
61
|
+
type = "apply"
|
|
62
|
+
prompt = """
|
|
63
|
+
Apply the minimum Markdown documentation changes required to fix the detected
|
|
64
|
+
factual drift. Modify only files allowed by the step.
|
|
65
|
+
"""
|
|
66
|
+
inputs = ["collect/push.diff", "collect/docs-context.txt", "analyze/issues.json"]
|
|
67
|
+
allow_paths = ["README.md", "docs/**/*.md"]
|
|
68
|
+
|
|
69
|
+
[[modules.docs.steps]]
|
|
70
|
+
id = "assert"
|
|
71
|
+
type = "assert"
|
|
72
|
+
assertion = "docs_apply_requires_manual_commit"
|
|
73
|
+
inputs = ["apply/result.json"]
|
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 ai-push-hooks contributors
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,234 @@
|
|
|
1
|
+
# ai-push-hooks
|
|
2
|
+
|
|
3
|
+
AI-assisted pre-push workflow runner for modular repo checks, docs sync, Beads alignment, and PR creation.
|
|
4
|
+
|
|
5
|
+
## Install
|
|
6
|
+
|
|
7
|
+
### Python / uv
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
uv tool install ai-push-hooks
|
|
11
|
+
# or
|
|
12
|
+
pipx install ai-push-hooks
|
|
13
|
+
```
|
|
14
|
+
|
|
15
|
+
### npm
|
|
16
|
+
|
|
17
|
+
```bash
|
|
18
|
+
npm install --save-dev ai-push-hooks
|
|
19
|
+
# or
|
|
20
|
+
pnpm add -D ai-push-hooks
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
The npm binary wraps the bundled Python module, so `python3` (or `python`) must be available.
|
|
24
|
+
|
|
25
|
+
## Maintainer Release
|
|
26
|
+
|
|
27
|
+
This repo supports automated dual publishing to PyPI and npm from a git tag.
|
|
28
|
+
|
|
29
|
+
1. Bump `version` in `pyproject.toml` and `package.json` to the same value.
|
|
30
|
+
2. Commit and tag: `git tag vX.Y.Z`.
|
|
31
|
+
3. Push commit + tag: `git push && git push --tags`.
|
|
32
|
+
|
|
33
|
+
The GitHub Actions release workflow then:
|
|
34
|
+
|
|
35
|
+
- verifies the tag matches both package versions
|
|
36
|
+
- runs tests
|
|
37
|
+
- builds and validates Python distributions
|
|
38
|
+
- smoke-tests the installed Python CLI
|
|
39
|
+
- publishes to PyPI (Trusted Publishing)
|
|
40
|
+
- publishes to npm (`NPM_TOKEN` secret)
|
|
41
|
+
|
|
42
|
+
Required one-time setup:
|
|
43
|
+
|
|
44
|
+
- Configure PyPI Trusted Publisher for this repository.
|
|
45
|
+
- Add repository secret `NPM_TOKEN` with publish access to `ai-push-hooks`.
|
|
46
|
+
|
|
47
|
+
## Commands
|
|
48
|
+
|
|
49
|
+
```bash
|
|
50
|
+
ai-push-hooks hook <remote-name> <remote-url>
|
|
51
|
+
ai-push-hooks init --template minimal-docs
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
`init` supports exactly one template: `minimal-docs`. Use `--force` to overwrite an existing config.
|
|
55
|
+
|
|
56
|
+
## Lefthook Usage
|
|
57
|
+
|
|
58
|
+
```yaml
|
|
59
|
+
pre-push:
|
|
60
|
+
commands:
|
|
61
|
+
ai-push-hooks:
|
|
62
|
+
run: ai-push-hooks hook {1} {2}
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
For local source checkout usage, `./run.sh` works as a wrapper entrypoint.
|
|
66
|
+
|
|
67
|
+
## Configuration
|
|
68
|
+
|
|
69
|
+
Put `.ai-push-hooks.toml` in the target repo root. If no file is present, built-in modular defaults are used.
|
|
70
|
+
|
|
71
|
+
Prompt resolution precedence is:
|
|
72
|
+
|
|
73
|
+
1. inline `prompt`
|
|
74
|
+
2. `prompt_file`
|
|
75
|
+
3. built-in `fallback_prompt_id`
|
|
76
|
+
|
|
77
|
+
Minimal docs example:
|
|
78
|
+
|
|
79
|
+
```toml
|
|
80
|
+
[workflow]
|
|
81
|
+
modules = ["docs"]
|
|
82
|
+
|
|
83
|
+
[modules.docs]
|
|
84
|
+
enabled = true
|
|
85
|
+
|
|
86
|
+
[[modules.docs.steps]]
|
|
87
|
+
id = "collect"
|
|
88
|
+
type = "collect"
|
|
89
|
+
collector = "docs_context"
|
|
90
|
+
|
|
91
|
+
[[modules.docs.steps]]
|
|
92
|
+
id = "query"
|
|
93
|
+
type = "llm"
|
|
94
|
+
prompt = "Return a JSON array of documentation search queries. JSON only."
|
|
95
|
+
inputs = ["collect/push.diff", "collect/changed-files.txt"]
|
|
96
|
+
output = "queries.json"
|
|
97
|
+
schema = "string_array"
|
|
98
|
+
|
|
99
|
+
[[modules.docs.steps]]
|
|
100
|
+
id = "analyze"
|
|
101
|
+
type = "llm"
|
|
102
|
+
prompt = "Return JSON issues only for factual documentation drift."
|
|
103
|
+
inputs = ["collect/push.diff", "collect/docs-context.txt", "query/queries.json", "collect/recent-commits.txt"]
|
|
104
|
+
output = "issues.json"
|
|
105
|
+
schema = "docs_issue_array"
|
|
106
|
+
|
|
107
|
+
[[modules.docs.steps]]
|
|
108
|
+
id = "apply"
|
|
109
|
+
type = "apply"
|
|
110
|
+
prompt = "Apply the minimum Markdown fixes required."
|
|
111
|
+
inputs = ["collect/push.diff", "collect/docs-context.txt", "analyze/issues.json"]
|
|
112
|
+
allow_paths = ["README.md", "docs/**/*.md"]
|
|
113
|
+
|
|
114
|
+
[[modules.docs.steps]]
|
|
115
|
+
id = "assert"
|
|
116
|
+
type = "assert"
|
|
117
|
+
assertion = "docs_apply_requires_manual_commit"
|
|
118
|
+
inputs = ["apply/result.json"]
|
|
119
|
+
```
|
|
120
|
+
|
|
121
|
+
Example config that recreates the current docs + beads + PR behavior through configuration only:
|
|
122
|
+
|
|
123
|
+
The sample below is runnable as-is because each `prompt_file` step also declares a built-in `fallback_prompt_id`. If you add local prompt files, they override the built-ins.
|
|
124
|
+
|
|
125
|
+
```toml
|
|
126
|
+
[workflow]
|
|
127
|
+
modules = ["beads", "docs", "pr"]
|
|
128
|
+
|
|
129
|
+
[modules.beads]
|
|
130
|
+
enabled = true
|
|
131
|
+
|
|
132
|
+
[[modules.beads.steps]]
|
|
133
|
+
id = "collect"
|
|
134
|
+
type = "collect"
|
|
135
|
+
collector = "beads_status_context"
|
|
136
|
+
|
|
137
|
+
[[modules.beads.steps]]
|
|
138
|
+
id = "plan"
|
|
139
|
+
type = "llm"
|
|
140
|
+
prompt_file = ".ai-push-hooks.prompts/beads-status.txt"
|
|
141
|
+
fallback_prompt_id = "beads-plan-basic"
|
|
142
|
+
inputs = ["collect/branch-context.txt", "collect/changed-files.txt", "collect/push.diff", "collect/commits.txt"]
|
|
143
|
+
output = "beads-plan.json"
|
|
144
|
+
schema = "beads_alignment_result"
|
|
145
|
+
|
|
146
|
+
[[modules.beads.steps]]
|
|
147
|
+
id = "apply"
|
|
148
|
+
type = "exec"
|
|
149
|
+
executor = "beads_alignment"
|
|
150
|
+
inputs = ["plan/beads-plan.json"]
|
|
151
|
+
|
|
152
|
+
[[modules.beads.steps]]
|
|
153
|
+
id = "assert"
|
|
154
|
+
type = "assert"
|
|
155
|
+
assertion = "beads_alignment_clean"
|
|
156
|
+
inputs = ["plan/beads-plan.json"]
|
|
157
|
+
|
|
158
|
+
[modules.docs]
|
|
159
|
+
enabled = true
|
|
160
|
+
|
|
161
|
+
[[modules.docs.steps]]
|
|
162
|
+
id = "collect"
|
|
163
|
+
type = "collect"
|
|
164
|
+
collector = "docs_context"
|
|
165
|
+
|
|
166
|
+
[[modules.docs.steps]]
|
|
167
|
+
id = "query"
|
|
168
|
+
type = "llm"
|
|
169
|
+
prompt_file = ".ai-push-hooks.prompts/query.txt"
|
|
170
|
+
fallback_prompt_id = "docs-query-basic"
|
|
171
|
+
inputs = ["collect/push.diff", "collect/changed-files.txt"]
|
|
172
|
+
output = "queries.json"
|
|
173
|
+
schema = "string_array"
|
|
174
|
+
|
|
175
|
+
[[modules.docs.steps]]
|
|
176
|
+
id = "analyze"
|
|
177
|
+
type = "llm"
|
|
178
|
+
prompt_file = ".ai-push-hooks.prompts/analysis.txt"
|
|
179
|
+
fallback_prompt_id = "docs-analysis-basic"
|
|
180
|
+
inputs = ["collect/push.diff", "collect/docs-context.txt", "query/queries.json", "collect/recent-commits.txt"]
|
|
181
|
+
output = "issues.json"
|
|
182
|
+
schema = "docs_issue_array"
|
|
183
|
+
|
|
184
|
+
[[modules.docs.steps]]
|
|
185
|
+
id = "apply"
|
|
186
|
+
type = "apply"
|
|
187
|
+
prompt_file = ".ai-push-hooks.prompts/apply.txt"
|
|
188
|
+
fallback_prompt_id = "docs-apply-basic"
|
|
189
|
+
inputs = ["collect/push.diff", "collect/docs-context.txt", "analyze/issues.json"]
|
|
190
|
+
allow_paths = ["README.md", "docs/**/*.md"]
|
|
191
|
+
|
|
192
|
+
[[modules.docs.steps]]
|
|
193
|
+
id = "assert"
|
|
194
|
+
type = "assert"
|
|
195
|
+
assertion = "docs_apply_requires_manual_commit"
|
|
196
|
+
inputs = ["apply/result.json"]
|
|
197
|
+
|
|
198
|
+
[modules.pr]
|
|
199
|
+
enabled = true
|
|
200
|
+
|
|
201
|
+
[[modules.pr.steps]]
|
|
202
|
+
id = "collect"
|
|
203
|
+
type = "collect"
|
|
204
|
+
collector = "pr_context"
|
|
205
|
+
|
|
206
|
+
[[modules.pr.steps]]
|
|
207
|
+
id = "compose"
|
|
208
|
+
type = "llm"
|
|
209
|
+
prompt_file = ".ai-push-hooks.prompts/create-pr.txt"
|
|
210
|
+
fallback_prompt_id = "pr-compose-basic"
|
|
211
|
+
inputs = ["collect/pr-context.txt", "collect/changed-files.txt", "collect/push.diff", "collect/commits.txt"]
|
|
212
|
+
output = "pr-draft.json"
|
|
213
|
+
schema = "pr_create_payload"
|
|
214
|
+
|
|
215
|
+
[[modules.pr.steps]]
|
|
216
|
+
id = "create"
|
|
217
|
+
type = "exec"
|
|
218
|
+
executor = "gh_pr_create"
|
|
219
|
+
when_env = "AI_PUSH_HOOKS_CREATE_PR"
|
|
220
|
+
inputs = ["compose/pr-draft.json"]
|
|
221
|
+
```
|
|
222
|
+
|
|
223
|
+
## Layout
|
|
224
|
+
|
|
225
|
+
- `src/ai_push_hooks/cli.py` - CLI entrypoint
|
|
226
|
+
- `src/ai_push_hooks/config.py` - config loading and validation
|
|
227
|
+
- `src/ai_push_hooks/engine.py` - scheduler and workflow runtime
|
|
228
|
+
- `src/ai_push_hooks/artifacts.py` - run-directory artifact store
|
|
229
|
+
- `src/ai_push_hooks/prompts_builtin.py` - built-in fallback prompts
|
|
230
|
+
- `src/ai_push_hooks/modules/` - docs, beads, and PR collectors
|
|
231
|
+
- `src/ai_push_hooks/executors/` - LLM, apply, exec, and assertion handlers
|
|
232
|
+
- `run.sh` - source checkout wrapper
|
|
233
|
+
- `bin/ai-push-hooks.js` - npm bin wrapper
|
|
234
|
+
- `.ai-push-hooks.toml` - sample config
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
const { spawnSync } = require('node:child_process');
|
|
4
|
+
const path = require('node:path');
|
|
5
|
+
|
|
6
|
+
const packageRoot = path.resolve(__dirname, '..');
|
|
7
|
+
const srcDir = path.join(packageRoot, 'src');
|
|
8
|
+
const args = ['-m', 'ai_push_hooks', ...process.argv.slice(2)];
|
|
9
|
+
|
|
10
|
+
function buildEnv() {
|
|
11
|
+
const env = { ...process.env };
|
|
12
|
+
env.PYTHONPATH = env.PYTHONPATH
|
|
13
|
+
? `${srcDir}${path.delimiter}${env.PYTHONPATH}`
|
|
14
|
+
: srcDir;
|
|
15
|
+
return env;
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
function run(command) {
|
|
19
|
+
return spawnSync(command, args, {
|
|
20
|
+
stdio: 'inherit',
|
|
21
|
+
env: buildEnv(),
|
|
22
|
+
});
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
let result = run('python3');
|
|
26
|
+
if (result.error && result.error.code === 'ENOENT') {
|
|
27
|
+
result = run('python');
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
if (result.error && result.error.code === 'ENOENT') {
|
|
31
|
+
console.error('[ai-push-hooks] python3/python is required but not installed.');
|
|
32
|
+
process.exit(1);
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
process.exit(typeof result.status === 'number' ? result.status : 1);
|
package/package.json
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "ai-push-hooks",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "Modular AI push-hook workflow runner",
|
|
5
|
+
"license": "MIT",
|
|
6
|
+
"bin": {
|
|
7
|
+
"ai-push-hooks": "bin/ai-push-hooks.js"
|
|
8
|
+
},
|
|
9
|
+
"scripts": {
|
|
10
|
+
"test": "uv run --with pytest pytest tests -q"
|
|
11
|
+
},
|
|
12
|
+
"files": [
|
|
13
|
+
"bin",
|
|
14
|
+
"src/**/*.py",
|
|
15
|
+
"README.md",
|
|
16
|
+
"run.sh",
|
|
17
|
+
"pyproject.toml",
|
|
18
|
+
"LICENSE",
|
|
19
|
+
".ai-push-hooks.toml"
|
|
20
|
+
],
|
|
21
|
+
"engines": {
|
|
22
|
+
"node": ">=18"
|
|
23
|
+
}
|
|
24
|
+
}
|
package/pyproject.toml
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=69", "wheel"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "ai-push-hooks"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "Modular AI push-hook workflow runner"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
requires-python = ">=3.10"
|
|
11
|
+
license = "MIT"
|
|
12
|
+
authors = [{ name = "ai-push-hooks contributors" }]
|
|
13
|
+
keywords = ["git", "lefthook", "docs", "ai", "pre-push"]
|
|
14
|
+
classifiers = [
|
|
15
|
+
"Development Status :: 3 - Alpha",
|
|
16
|
+
"Intended Audience :: Developers",
|
|
17
|
+
"Programming Language :: Python :: 3",
|
|
18
|
+
"Programming Language :: Python :: 3 :: Only",
|
|
19
|
+
"Programming Language :: Python :: 3.10",
|
|
20
|
+
"Programming Language :: Python :: 3.11",
|
|
21
|
+
"Programming Language :: Python :: 3.12",
|
|
22
|
+
"Topic :: Software Development :: Version Control :: Git",
|
|
23
|
+
]
|
|
24
|
+
|
|
25
|
+
[project.scripts]
|
|
26
|
+
ai-push-hooks = "ai_push_hooks.cli:main"
|
|
27
|
+
|
|
28
|
+
[project.optional-dependencies]
|
|
29
|
+
dev = ["pytest>=8.0"]
|
|
30
|
+
|
|
31
|
+
[tool.setuptools]
|
|
32
|
+
include-package-data = true
|
|
33
|
+
|
|
34
|
+
[tool.setuptools.packages.find]
|
|
35
|
+
where = ["src"]
|
|
36
|
+
|
|
37
|
+
[tool.pytest.ini_options]
|
|
38
|
+
testpaths = ["tests"]
|
package/run.sh
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
|
|
3
|
+
set -euo pipefail
|
|
4
|
+
|
|
5
|
+
if [[ "${AI_PUSH_HOOKS_SKIP:-0}" == "1" ]]; then
|
|
6
|
+
printf '[ai-push-hooks] Skipped (AI_PUSH_HOOKS_SKIP=1).\n' >&2
|
|
7
|
+
exit 0
|
|
8
|
+
fi
|
|
9
|
+
|
|
10
|
+
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
11
|
+
|
|
12
|
+
if command -v python3 >/dev/null 2>&1; then
|
|
13
|
+
py_cmd="python3"
|
|
14
|
+
elif command -v python >/dev/null 2>&1; then
|
|
15
|
+
py_cmd="python"
|
|
16
|
+
else
|
|
17
|
+
printf '[ai-push-hooks] python3/python is required but not installed.\n' >&2
|
|
18
|
+
exit 1
|
|
19
|
+
fi
|
|
20
|
+
|
|
21
|
+
if [[ -d "${script_dir}/src" ]]; then
|
|
22
|
+
if [[ -n "${PYTHONPATH:-}" ]]; then
|
|
23
|
+
export PYTHONPATH="${script_dir}/src:${PYTHONPATH}"
|
|
24
|
+
else
|
|
25
|
+
export PYTHONPATH="${script_dir}/src"
|
|
26
|
+
fi
|
|
27
|
+
fi
|
|
28
|
+
|
|
29
|
+
exec "${py_cmd}" -m ai_push_hooks "$@"
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import pathlib
|
|
5
|
+
from datetime import datetime, timezone
|
|
6
|
+
from typing import Any
|
|
7
|
+
from uuid import uuid4
|
|
8
|
+
|
|
9
|
+
from .types import HookError, ModuleRuntimeState
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def generate_run_id() -> str:
|
|
13
|
+
timestamp = datetime.now(timezone.utc).strftime("%Y%m%dT%H%M%S%fZ")
|
|
14
|
+
return f"{timestamp}-{uuid4().hex[:8]}"
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class ArtifactStore:
|
|
18
|
+
def __init__(self, run_dir: pathlib.Path) -> None:
|
|
19
|
+
self.run_dir = run_dir
|
|
20
|
+
|
|
21
|
+
def prepare(self) -> pathlib.Path:
|
|
22
|
+
self.run_dir.mkdir(parents=True, exist_ok=True)
|
|
23
|
+
return self.run_dir
|
|
24
|
+
|
|
25
|
+
def step_dir(self, module_id: str, step_index: int, step_id: str) -> pathlib.Path:
|
|
26
|
+
path = self.run_dir / module_id / f"{step_index:02d}-{step_id}"
|
|
27
|
+
path.mkdir(parents=True, exist_ok=True)
|
|
28
|
+
return path
|
|
29
|
+
|
|
30
|
+
def register(
|
|
31
|
+
self,
|
|
32
|
+
state: ModuleRuntimeState,
|
|
33
|
+
step_id: str,
|
|
34
|
+
artifact_name: str,
|
|
35
|
+
path: pathlib.Path,
|
|
36
|
+
) -> pathlib.Path:
|
|
37
|
+
state.artifacts[f"{step_id}/{artifact_name}"] = path
|
|
38
|
+
return path
|
|
39
|
+
|
|
40
|
+
def write_text(
|
|
41
|
+
self,
|
|
42
|
+
state: ModuleRuntimeState,
|
|
43
|
+
step_index: int,
|
|
44
|
+
step_id: str,
|
|
45
|
+
artifact_name: str,
|
|
46
|
+
content: str,
|
|
47
|
+
) -> pathlib.Path:
|
|
48
|
+
path = self.step_dir(state.module.id, step_index, step_id) / artifact_name
|
|
49
|
+
path.write_text(content, encoding="utf-8")
|
|
50
|
+
return self.register(state, step_id, artifact_name, path)
|
|
51
|
+
|
|
52
|
+
def write_json(
|
|
53
|
+
self,
|
|
54
|
+
state: ModuleRuntimeState,
|
|
55
|
+
step_index: int,
|
|
56
|
+
step_id: str,
|
|
57
|
+
artifact_name: str,
|
|
58
|
+
payload: Any,
|
|
59
|
+
) -> pathlib.Path:
|
|
60
|
+
path = self.step_dir(state.module.id, step_index, step_id) / artifact_name
|
|
61
|
+
path.write_text(json.dumps(payload, ensure_ascii=True, indent=2) + "\n", encoding="utf-8")
|
|
62
|
+
return self.register(state, step_id, artifact_name, path)
|
|
63
|
+
|
|
64
|
+
def resolve_input(self, state: ModuleRuntimeState, reference: str) -> pathlib.Path:
|
|
65
|
+
if ":" in reference:
|
|
66
|
+
module_and_step, artifact_name = reference.split("/", 1)
|
|
67
|
+
module_id, step_id = module_and_step.split(":", 1)
|
|
68
|
+
key = f"{module_id}:{step_id}/{artifact_name}"
|
|
69
|
+
else:
|
|
70
|
+
key = reference
|
|
71
|
+
path = state.artifacts.get(key)
|
|
72
|
+
if path is None:
|
|
73
|
+
path = state.artifacts.get(reference)
|
|
74
|
+
if path is None:
|
|
75
|
+
raise HookError(f"Unknown artifact reference: {reference}")
|
|
76
|
+
return path
|
|
77
|
+
|
|
78
|
+
def register_external(
|
|
79
|
+
self,
|
|
80
|
+
state: ModuleRuntimeState,
|
|
81
|
+
module_id: str,
|
|
82
|
+
step_id: str,
|
|
83
|
+
artifact_name: str,
|
|
84
|
+
path: pathlib.Path,
|
|
85
|
+
) -> None:
|
|
86
|
+
state.artifacts[f"{module_id}:{step_id}/{artifact_name}"] = path
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import argparse
|
|
4
|
+
import pathlib
|
|
5
|
+
import sys
|
|
6
|
+
|
|
7
|
+
from .hook import run_hook
|
|
8
|
+
from .prompts_builtin import MINIMAL_DOCS_TEMPLATE
|
|
9
|
+
from .types import HookError
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _build_parser() -> argparse.ArgumentParser:
|
|
13
|
+
parser = argparse.ArgumentParser(description="AI push hooks workflow runner")
|
|
14
|
+
subparsers = parser.add_subparsers(dest="command", required=True)
|
|
15
|
+
|
|
16
|
+
hook_parser = subparsers.add_parser("hook", help="Run the hook workflow")
|
|
17
|
+
hook_parser.add_argument("remote_name", nargs="?", default="")
|
|
18
|
+
hook_parser.add_argument("remote_url", nargs="?", default="")
|
|
19
|
+
|
|
20
|
+
init_parser = subparsers.add_parser("init", help="Write a starter config")
|
|
21
|
+
init_parser.add_argument("--template", default="minimal-docs")
|
|
22
|
+
init_parser.add_argument("--force", action="store_true")
|
|
23
|
+
return parser
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def init_config(template: str, force: bool, cwd: pathlib.Path | None = None) -> int:
|
|
27
|
+
if template != "minimal-docs":
|
|
28
|
+
raise HookError("Only `minimal-docs` is supported")
|
|
29
|
+
target_dir = cwd or pathlib.Path.cwd()
|
|
30
|
+
config_path = target_dir / ".ai-push-hooks.toml"
|
|
31
|
+
if config_path.exists() and not force:
|
|
32
|
+
raise HookError(f"Refusing to overwrite existing config without --force: {config_path}")
|
|
33
|
+
config_path.write_text(MINIMAL_DOCS_TEMPLATE, encoding="utf-8")
|
|
34
|
+
sys.stdout.write(str(config_path) + "\n")
|
|
35
|
+
return 0
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def main(argv: list[str] | None = None) -> int:
|
|
39
|
+
parser = _build_parser()
|
|
40
|
+
args = parser.parse_args(argv)
|
|
41
|
+
try:
|
|
42
|
+
if args.command == "hook":
|
|
43
|
+
return run_hook(args.remote_name, args.remote_url)
|
|
44
|
+
if args.command == "init":
|
|
45
|
+
return init_config(args.template, args.force)
|
|
46
|
+
raise HookError(f"Unknown command: {args.command}")
|
|
47
|
+
except HookError as exc:
|
|
48
|
+
sys.stderr.write(f"[ai-push-hooks] {exc}\n")
|
|
49
|
+
return 1
|