pi-agent-toolkit 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/dotfiles/AGENTS.md +197 -0
- package/dist/dotfiles/APPEND_SYSTEM.md +78 -0
- package/dist/dotfiles/agent-modes.json +12 -0
- package/dist/dotfiles/agent-skills/exa-search/.env.example +4 -0
- package/dist/dotfiles/agent-skills/exa-search/SKILL.md +234 -0
- package/dist/dotfiles/agent-skills/exa-search/scripts/exa-api.cjs +197 -0
- package/dist/dotfiles/auth.json.template +5 -0
- package/dist/dotfiles/damage-control-rules.yaml +318 -0
- package/dist/dotfiles/extensions/btw.ts +1031 -0
- package/dist/dotfiles/extensions/commit-approval.ts +590 -0
- package/dist/dotfiles/extensions/context.ts +578 -0
- package/dist/dotfiles/extensions/control.ts +1748 -0
- package/dist/dotfiles/extensions/damage-control/index.ts +543 -0
- package/dist/dotfiles/extensions/damage-control/node_modules/.package-lock.json +22 -0
- package/dist/dotfiles/extensions/damage-control/package-lock.json +28 -0
- package/dist/dotfiles/extensions/damage-control/package.json +7 -0
- package/dist/dotfiles/extensions/dirty-repo-guard.ts +56 -0
- package/dist/dotfiles/extensions/exa-enforce.ts +51 -0
- package/dist/dotfiles/extensions/exa-search-tool.ts +384 -0
- package/dist/dotfiles/extensions/execute-command/index.ts +82 -0
- package/dist/dotfiles/extensions/files.ts +1112 -0
- package/dist/dotfiles/extensions/loop.ts +446 -0
- package/dist/dotfiles/extensions/pr-approval.ts +730 -0
- package/dist/dotfiles/extensions/qna-interactive.ts +532 -0
- package/dist/dotfiles/extensions/question-mode.ts +242 -0
- package/dist/dotfiles/extensions/require-session-name-on-exit.ts +141 -0
- package/dist/dotfiles/extensions/review.ts +2091 -0
- package/dist/dotfiles/extensions/session-breakdown.ts +1629 -0
- package/dist/dotfiles/extensions/term-notify.ts +150 -0
- package/dist/dotfiles/extensions/tilldone.ts +527 -0
- package/dist/dotfiles/extensions/todos.ts +2082 -0
- package/dist/dotfiles/extensions/tools.ts +146 -0
- package/dist/dotfiles/extensions/uv.ts +123 -0
- package/dist/dotfiles/global-skills/brainstorm/SKILL.md +10 -0
- package/dist/dotfiles/global-skills/cli-detector/SKILL.md +192 -0
- package/dist/dotfiles/global-skills/gh-issue-creator/SKILL.md +173 -0
- package/dist/dotfiles/global-skills/google-chat-cards-v2/SKILL.md +237 -0
- package/dist/dotfiles/global-skills/google-chat-cards-v2/references/bridge_tap_implementation.md +466 -0
- package/dist/dotfiles/global-skills/technical-docs/SKILL.md +204 -0
- package/dist/dotfiles/global-skills/technical-docs/references/diagrams.md +168 -0
- package/dist/dotfiles/global-skills/technical-docs/references/examples.md +449 -0
- package/dist/dotfiles/global-skills/technical-docs/scripts/validate_docs.py +352 -0
- package/dist/dotfiles/global-skills/whats-new/SKILL.md +159 -0
- package/dist/dotfiles/intercepted-commands/pip +7 -0
- package/dist/dotfiles/intercepted-commands/pip3 +7 -0
- package/dist/dotfiles/intercepted-commands/poetry +10 -0
- package/dist/dotfiles/intercepted-commands/python +104 -0
- package/dist/dotfiles/intercepted-commands/python3 +104 -0
- package/dist/dotfiles/mcp.json.template +32 -0
- package/dist/dotfiles/models.json +27 -0
- package/dist/dotfiles/settings.json +25 -0
- package/dist/index.js +1344 -0
- package/package.json +34 -0
|
@@ -0,0 +1,352 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Validate documentation against technical-docs standards.
|
|
3
|
+
|
|
4
|
+
This script provides automated validation checks for documentation quality,
|
|
5
|
+
helping catch common issues before manual review.
|
|
6
|
+
|
|
7
|
+
Usage:
|
|
8
|
+
python validate_docs.py <file_path>
|
|
9
|
+
python validate_docs.py --check-docstrings <python_file>
|
|
10
|
+
python validate_docs.py --check-links <markdown_file>
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import argparse
|
|
14
|
+
import ast
|
|
15
|
+
import re
|
|
16
|
+
import sys
|
|
17
|
+
from pathlib import Path
|
|
18
|
+
from typing import Any
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class ValidationResult:
|
|
22
|
+
"""Container for validation results."""
|
|
23
|
+
|
|
24
|
+
def __init__(self) -> None:
|
|
25
|
+
"""Initialize validation result tracker."""
|
|
26
|
+
self.errors: list[str] = []
|
|
27
|
+
self.warnings: list[str] = []
|
|
28
|
+
self.passed: list[str] = []
|
|
29
|
+
|
|
30
|
+
def add_error(self, message: str) -> None:
|
|
31
|
+
"""Add an error message.
|
|
32
|
+
|
|
33
|
+
:param message: Error description
|
|
34
|
+
"""
|
|
35
|
+
self.errors.append(f"❌ ERROR: {message}")
|
|
36
|
+
|
|
37
|
+
def add_warning(self, message: str) -> None:
|
|
38
|
+
"""Add a warning message.
|
|
39
|
+
|
|
40
|
+
:param message: Warning description
|
|
41
|
+
"""
|
|
42
|
+
self.warnings.append(f"⚠️ WARNING: {message}")
|
|
43
|
+
|
|
44
|
+
def add_pass(self, message: str) -> None:
|
|
45
|
+
"""Add a passing check message.
|
|
46
|
+
|
|
47
|
+
:param message: Success description
|
|
48
|
+
"""
|
|
49
|
+
self.passed.append(f"✅ PASS: {message}")
|
|
50
|
+
|
|
51
|
+
def has_errors(self) -> bool:
|
|
52
|
+
"""Check if any errors were found.
|
|
53
|
+
|
|
54
|
+
:returns: True if errors exist
|
|
55
|
+
"""
|
|
56
|
+
return len(self.errors) > 0
|
|
57
|
+
|
|
58
|
+
def print_summary(self) -> None:
|
|
59
|
+
"""Print validation results summary."""
|
|
60
|
+
print("\n" + "=" * 60)
|
|
61
|
+
print("VALIDATION RESULTS")
|
|
62
|
+
print("=" * 60 + "\n")
|
|
63
|
+
|
|
64
|
+
if self.passed:
|
|
65
|
+
print("Passed Checks:")
|
|
66
|
+
for msg in self.passed:
|
|
67
|
+
print(f" {msg}")
|
|
68
|
+
print()
|
|
69
|
+
|
|
70
|
+
if self.warnings:
|
|
71
|
+
print("Warnings:")
|
|
72
|
+
for msg in self.warnings:
|
|
73
|
+
print(f" {msg}")
|
|
74
|
+
print()
|
|
75
|
+
|
|
76
|
+
if self.errors:
|
|
77
|
+
print("Errors:")
|
|
78
|
+
for msg in self.errors:
|
|
79
|
+
print(f" {msg}")
|
|
80
|
+
print()
|
|
81
|
+
|
|
82
|
+
print("-" * 60)
|
|
83
|
+
print(
|
|
84
|
+
f"Summary: {len(self.passed)} passed, {len(self.warnings)} warnings, {len(self.errors)} errors"
|
|
85
|
+
)
|
|
86
|
+
print("-" * 60 + "\n")
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def validate_docstring_format(file_path: Path) -> ValidationResult:
|
|
90
|
+
"""Validate Python docstrings follow Sphinx-style format.
|
|
91
|
+
|
|
92
|
+
Checks for:
|
|
93
|
+
- Presence of docstrings on functions/classes
|
|
94
|
+
- Sphinx-style :param, :returns, :raises tags
|
|
95
|
+
- Parameter documentation completeness
|
|
96
|
+
- Type hint presence
|
|
97
|
+
|
|
98
|
+
:param file_path: Path to Python file to validate
|
|
99
|
+
:returns: ValidationResult with findings
|
|
100
|
+
:raises FileNotFoundError: If file_path does not exist
|
|
101
|
+
:raises SyntaxError: If Python file has syntax errors
|
|
102
|
+
"""
|
|
103
|
+
result = ValidationResult()
|
|
104
|
+
|
|
105
|
+
if not file_path.exists():
|
|
106
|
+
result.add_error(f"File not found: {file_path}")
|
|
107
|
+
return result
|
|
108
|
+
|
|
109
|
+
content = file_path.read_text(encoding="utf-8")
|
|
110
|
+
|
|
111
|
+
try:
|
|
112
|
+
tree = ast.parse(content, filename=str(file_path))
|
|
113
|
+
except SyntaxError as e:
|
|
114
|
+
result.add_error(f"Python syntax error: {e}")
|
|
115
|
+
return result
|
|
116
|
+
|
|
117
|
+
functions = [node for node in ast.walk(tree) if isinstance(node, ast.FunctionDef)]
|
|
118
|
+
classes = [node for node in ast.walk(tree) if isinstance(node, ast.ClassDef)]
|
|
119
|
+
|
|
120
|
+
# Check functions
|
|
121
|
+
for func in functions:
|
|
122
|
+
if func.name.startswith("_") and func.name != "__init__":
|
|
123
|
+
continue # Skip private functions
|
|
124
|
+
|
|
125
|
+
docstring = ast.get_docstring(func)
|
|
126
|
+
|
|
127
|
+
if not docstring:
|
|
128
|
+
result.add_warning(
|
|
129
|
+
f"Function '{func.name}' at line {func.lineno} has no docstring"
|
|
130
|
+
)
|
|
131
|
+
continue
|
|
132
|
+
|
|
133
|
+
# Check for Sphinx-style tags
|
|
134
|
+
has_param = ":param" in docstring
|
|
135
|
+
has_returns = ":returns:" in docstring or ":return:" in docstring
|
|
136
|
+
has_raises = ":raises" in docstring
|
|
137
|
+
|
|
138
|
+
# Check parameters
|
|
139
|
+
params = [arg.arg for arg in func.args.args if arg.arg != "self"]
|
|
140
|
+
if params and not has_param:
|
|
141
|
+
result.add_warning(
|
|
142
|
+
f"Function '{func.name}' at line {func.lineno} has parameters but no :param tags"
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
# Check return type
|
|
146
|
+
if func.returns and not has_returns:
|
|
147
|
+
result.add_warning(
|
|
148
|
+
f"Function '{func.name}' at line {func.lineno} has return type but no :returns: tag"
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
# Type hints
|
|
152
|
+
if params:
|
|
153
|
+
missing_hints = [
|
|
154
|
+
arg.arg for arg in func.args.args if arg.annotation is None and arg.arg != "self"
|
|
155
|
+
]
|
|
156
|
+
if missing_hints:
|
|
157
|
+
result.add_warning(
|
|
158
|
+
f"Function '{func.name}' at line {func.lineno} missing type hints for: {', '.join(missing_hints)}"
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
if docstring and has_param and (not params or has_returns):
|
|
162
|
+
result.add_pass(f"Function '{func.name}' has proper Sphinx docstring")
|
|
163
|
+
|
|
164
|
+
# Check classes
|
|
165
|
+
for cls in classes:
|
|
166
|
+
docstring = ast.get_docstring(cls)
|
|
167
|
+
if not docstring:
|
|
168
|
+
result.add_warning(
|
|
169
|
+
f"Class '{cls.name}' at line {cls.lineno} has no docstring"
|
|
170
|
+
)
|
|
171
|
+
else:
|
|
172
|
+
result.add_pass(f"Class '{cls.name}' has docstring")
|
|
173
|
+
|
|
174
|
+
return result
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
def validate_markdown_links(file_path: Path) -> ValidationResult:
|
|
178
|
+
"""Validate links in markdown files.
|
|
179
|
+
|
|
180
|
+
Checks for:
|
|
181
|
+
- Broken internal links (file references)
|
|
182
|
+
- Malformed link syntax
|
|
183
|
+
- Anchor references to non-existent sections
|
|
184
|
+
|
|
185
|
+
:param file_path: Path to markdown file to validate
|
|
186
|
+
:returns: ValidationResult with findings
|
|
187
|
+
:raises FileNotFoundError: If file_path does not exist
|
|
188
|
+
"""
|
|
189
|
+
result = ValidationResult()
|
|
190
|
+
|
|
191
|
+
if not file_path.exists():
|
|
192
|
+
result.add_error(f"File not found: {file_path}")
|
|
193
|
+
return result
|
|
194
|
+
|
|
195
|
+
content = file_path.read_text(encoding="utf-8")
|
|
196
|
+
|
|
197
|
+
# Pattern for markdown links: [text](url)
|
|
198
|
+
link_pattern = re.compile(r"\[([^\]]+)\]\(([^)]+)\)")
|
|
199
|
+
matches = link_pattern.findall(content)
|
|
200
|
+
|
|
201
|
+
if not matches:
|
|
202
|
+
result.add_pass("No links found to validate")
|
|
203
|
+
return result
|
|
204
|
+
|
|
205
|
+
base_dir = file_path.parent
|
|
206
|
+
|
|
207
|
+
for text, url in matches:
|
|
208
|
+
# Skip external URLs
|
|
209
|
+
if url.startswith(("http://", "https://", "mailto:", "#")):
|
|
210
|
+
continue
|
|
211
|
+
|
|
212
|
+
# Parse internal file links
|
|
213
|
+
link_parts = url.split("#")
|
|
214
|
+
file_ref = link_parts[0]
|
|
215
|
+
|
|
216
|
+
if not file_ref:
|
|
217
|
+
continue # Anchor-only link
|
|
218
|
+
|
|
219
|
+
# Resolve relative path
|
|
220
|
+
target_path = (base_dir / file_ref).resolve()
|
|
221
|
+
|
|
222
|
+
if not target_path.exists():
|
|
223
|
+
result.add_error(
|
|
224
|
+
f"Broken link: [{text}]({url}) - target does not exist: {target_path}"
|
|
225
|
+
)
|
|
226
|
+
else:
|
|
227
|
+
result.add_pass(f"Valid internal link: [{text}]({url})")
|
|
228
|
+
|
|
229
|
+
return result
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
def validate_code_blocks(file_path: Path) -> ValidationResult:
|
|
233
|
+
"""Validate code blocks in markdown files.
|
|
234
|
+
|
|
235
|
+
Checks for:
|
|
236
|
+
- Code blocks have language tags
|
|
237
|
+
- Python code blocks are syntactically valid
|
|
238
|
+
- Consistent code formatting
|
|
239
|
+
|
|
240
|
+
:param file_path: Path to markdown file to validate
|
|
241
|
+
:returns: ValidationResult with findings
|
|
242
|
+
"""
|
|
243
|
+
result = ValidationResult()
|
|
244
|
+
|
|
245
|
+
if not file_path.exists():
|
|
246
|
+
result.add_error(f"File not found: {file_path}")
|
|
247
|
+
return result
|
|
248
|
+
|
|
249
|
+
content = file_path.read_text(encoding="utf-8")
|
|
250
|
+
|
|
251
|
+
# Find code blocks
|
|
252
|
+
code_block_pattern = re.compile(r"```(\w*)\n(.*?)```", re.DOTALL)
|
|
253
|
+
matches = code_block_pattern.findall(content)
|
|
254
|
+
|
|
255
|
+
if not matches:
|
|
256
|
+
result.add_pass("No code blocks found")
|
|
257
|
+
return result
|
|
258
|
+
|
|
259
|
+
for i, (language, code) in enumerate(matches, 1):
|
|
260
|
+
if not language:
|
|
261
|
+
result.add_warning(f"Code block #{i} is missing language tag")
|
|
262
|
+
continue
|
|
263
|
+
|
|
264
|
+
result.add_pass(f"Code block #{i} has language tag: {language}")
|
|
265
|
+
|
|
266
|
+
# Validate Python syntax
|
|
267
|
+
if language.lower() in ("python", "py"):
|
|
268
|
+
try:
|
|
269
|
+
ast.parse(code)
|
|
270
|
+
result.add_pass(f"Python code block #{i} has valid syntax")
|
|
271
|
+
except SyntaxError as e:
|
|
272
|
+
result.add_warning(
|
|
273
|
+
f"Python code block #{i} has syntax error: {e.msg} at line {e.lineno}"
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
return result
|
|
277
|
+
|
|
278
|
+
|
|
279
|
+
def validate_file(file_path: Path, check_type: str | None = None) -> ValidationResult:
|
|
280
|
+
"""Validate a file based on its type or specified check.
|
|
281
|
+
|
|
282
|
+
:param file_path: Path to file to validate
|
|
283
|
+
:param check_type: Optional specific check type (docstrings, links, code-blocks)
|
|
284
|
+
:returns: Combined validation results
|
|
285
|
+
"""
|
|
286
|
+
combined = ValidationResult()
|
|
287
|
+
|
|
288
|
+
if check_type == "docstrings" or (check_type is None and file_path.suffix == ".py"):
|
|
289
|
+
print(f"Validating docstrings in {file_path}...")
|
|
290
|
+
result = validate_docstring_format(file_path)
|
|
291
|
+
combined.errors.extend(result.errors)
|
|
292
|
+
combined.warnings.extend(result.warnings)
|
|
293
|
+
combined.passed.extend(result.passed)
|
|
294
|
+
|
|
295
|
+
if check_type == "links" or (check_type is None and file_path.suffix == ".md"):
|
|
296
|
+
print(f"Validating links in {file_path}...")
|
|
297
|
+
result = validate_markdown_links(file_path)
|
|
298
|
+
combined.errors.extend(result.errors)
|
|
299
|
+
combined.warnings.extend(result.warnings)
|
|
300
|
+
combined.passed.extend(result.passed)
|
|
301
|
+
|
|
302
|
+
if check_type == "code-blocks" or (check_type is None and file_path.suffix == ".md"):
|
|
303
|
+
print(f"Validating code blocks in {file_path}...")
|
|
304
|
+
result = validate_code_blocks(file_path)
|
|
305
|
+
combined.errors.extend(result.errors)
|
|
306
|
+
combined.warnings.extend(result.warnings)
|
|
307
|
+
combined.passed.extend(result.passed)
|
|
308
|
+
|
|
309
|
+
return combined
|
|
310
|
+
|
|
311
|
+
|
|
312
|
+
def main() -> int:
|
|
313
|
+
"""Main entry point for validation script.
|
|
314
|
+
|
|
315
|
+
:returns: Exit code (0 for success, 1 for errors)
|
|
316
|
+
"""
|
|
317
|
+
parser = argparse.ArgumentParser(
|
|
318
|
+
description="Validate documentation against technical-docs standards"
|
|
319
|
+
)
|
|
320
|
+
parser.add_argument("file_path", type=Path, help="Path to file to validate")
|
|
321
|
+
parser.add_argument(
|
|
322
|
+
"--check-docstrings",
|
|
323
|
+
action="store_true",
|
|
324
|
+
help="Check Python docstring format",
|
|
325
|
+
)
|
|
326
|
+
parser.add_argument(
|
|
327
|
+
"--check-links", action="store_true", help="Check markdown links"
|
|
328
|
+
)
|
|
329
|
+
parser.add_argument(
|
|
330
|
+
"--check-code-blocks", action="store_true", help="Check markdown code blocks"
|
|
331
|
+
)
|
|
332
|
+
|
|
333
|
+
args = parser.parse_args()
|
|
334
|
+
|
|
335
|
+
# Determine check type
|
|
336
|
+
check_type = None
|
|
337
|
+
if args.check_docstrings:
|
|
338
|
+
check_type = "docstrings"
|
|
339
|
+
elif args.check_links:
|
|
340
|
+
check_type = "links"
|
|
341
|
+
elif args.check_code_blocks:
|
|
342
|
+
check_type = "code-blocks"
|
|
343
|
+
|
|
344
|
+
# Run validation
|
|
345
|
+
result = validate_file(args.file_path, check_type)
|
|
346
|
+
result.print_summary()
|
|
347
|
+
|
|
348
|
+
return 1 if result.has_errors() else 0
|
|
349
|
+
|
|
350
|
+
|
|
351
|
+
if __name__ == "__main__":
|
|
352
|
+
sys.exit(main())
|
|
@@ -0,0 +1,159 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: whats-new
|
|
3
|
+
description: Analyze git changes between branches and generate a changelog entry. Use when catching up on repo changes, after merges, or when asking "what's new in the codebase?"
|
|
4
|
+
context: main
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
# whats-new
|
|
8
|
+
|
|
9
|
+
Analyze git changes and generate a changelog entry to help track what's new in the codebase.
|
|
10
|
+
|
|
11
|
+
## Trigger
|
|
12
|
+
|
|
13
|
+
- `/whats-new` - Compare current branch to origin/master (default)
|
|
14
|
+
- `/whats-new <branch>` - Compare to a specific branch
|
|
15
|
+
- `/whats-new --since "2 weeks ago"` - Time-based comparison
|
|
16
|
+
- "What's new in the repo?"
|
|
17
|
+
- "Catch me up on changes"
|
|
18
|
+
|
|
19
|
+
## Workflow
|
|
20
|
+
|
|
21
|
+
### Step 1: Determine Comparison Target
|
|
22
|
+
|
|
23
|
+
Parse the user's input to determine what to compare against:
|
|
24
|
+
- If no argument: compare `HEAD` to `origin/master`
|
|
25
|
+
- If branch name provided: compare `HEAD` to that branch
|
|
26
|
+
- If `--since` provided: use time-based git log
|
|
27
|
+
|
|
28
|
+
### Step 2: Gather Git Information
|
|
29
|
+
|
|
30
|
+
Run these commands to understand the changes:
|
|
31
|
+
|
|
32
|
+
```bash
|
|
33
|
+
# Fetch latest from remote
|
|
34
|
+
git fetch origin
|
|
35
|
+
|
|
36
|
+
# Get commit summary
|
|
37
|
+
git log --oneline HEAD..<target> 2>/dev/null || git log --oneline <target>..HEAD
|
|
38
|
+
|
|
39
|
+
# Get file change statistics
|
|
40
|
+
git diff --stat HEAD..<target> 2>/dev/null || git diff --stat <target>..HEAD
|
|
41
|
+
|
|
42
|
+
# Get list of new files
|
|
43
|
+
git diff --name-status HEAD..<target> 2>/dev/null | grep "^A" || git diff --name-status <target>..HEAD | grep "^A"
|
|
44
|
+
|
|
45
|
+
# Get list of modified files
|
|
46
|
+
git diff --name-only HEAD..<target> 2>/dev/null || git diff --name-only <target>..HEAD
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
### Step 3: Analyze Changes
|
|
50
|
+
|
|
51
|
+
For each significant area of change, provide architectural context:
|
|
52
|
+
|
|
53
|
+
**Areas to analyze:**
|
|
54
|
+
- `src/model/` - New data models, schema changes
|
|
55
|
+
- `src/service/` - New business logic, services
|
|
56
|
+
- `src/tasks/` - Celery tasks, background jobs
|
|
57
|
+
- `src/api/` - New endpoints, API changes
|
|
58
|
+
- `src/cli/` - New commands, CLI enhancements
|
|
59
|
+
- `src/repository/` - Data access patterns
|
|
60
|
+
- `src/schema/` - Pydantic validation schemas
|
|
61
|
+
- `etc/settings.toml` - Configuration changes
|
|
62
|
+
- `migrations/` - Database migrations
|
|
63
|
+
|
|
64
|
+
**For each new file or significant change:**
|
|
65
|
+
1. Read the file to understand its purpose
|
|
66
|
+
2. Identify key classes, functions, patterns
|
|
67
|
+
3. Note how it relates to existing code
|
|
68
|
+
4. Highlight any new dependencies or patterns introduced
|
|
69
|
+
|
|
70
|
+
### Step 4: Generate Changelog Entry
|
|
71
|
+
|
|
72
|
+
Create a markdown file at `.notes/changelog/YYYY-MM-DD-<branch-name>.md` with this structure:
|
|
73
|
+
|
|
74
|
+
```markdown
|
|
75
|
+
---
|
|
76
|
+
date: YYYY-MM-DD
|
|
77
|
+
branch: <branch-name>
|
|
78
|
+
compared_to: <target-branch>
|
|
79
|
+
author: <from git log if available>
|
|
80
|
+
---
|
|
81
|
+
|
|
82
|
+
# <Branch Name> Changes
|
|
83
|
+
|
|
84
|
+
## Summary
|
|
85
|
+
<1-2 sentence overview of what this branch adds>
|
|
86
|
+
|
|
87
|
+
## New Capabilities
|
|
88
|
+
- <Capability 1>: Brief description
|
|
89
|
+
- <Capability 2>: Brief description
|
|
90
|
+
|
|
91
|
+
## New Files
|
|
92
|
+
|
|
93
|
+
### Models
|
|
94
|
+
| File | Purpose |
|
|
95
|
+
|------|---------|
|
|
96
|
+
| `src/model/example.py` | Description |
|
|
97
|
+
|
|
98
|
+
### Services
|
|
99
|
+
| File | Purpose |
|
|
100
|
+
|------|---------|
|
|
101
|
+
| `src/service/example.py` | Description |
|
|
102
|
+
|
|
103
|
+
### Tasks
|
|
104
|
+
| File | Purpose |
|
|
105
|
+
|------|---------|
|
|
106
|
+
| `src/tasks/example.py` | Description |
|
|
107
|
+
|
|
108
|
+
<... other sections as needed ...>
|
|
109
|
+
|
|
110
|
+
## Modified Files
|
|
111
|
+
- `path/to/file.py` - What changed and why
|
|
112
|
+
|
|
113
|
+
## New Patterns to Know
|
|
114
|
+
- **Pattern name**: Explanation of new pattern or convention introduced
|
|
115
|
+
- **Configuration**: Any new settings in `etc/settings.toml`
|
|
116
|
+
|
|
117
|
+
## New CLI Commands
|
|
118
|
+
```bash
|
|
119
|
+
# command - description
|
|
120
|
+
python -m src.cli <command>
|
|
121
|
+
```
|
|
122
|
+
|
|
123
|
+
## New API Endpoints
|
|
124
|
+
| Method | Endpoint | Description |
|
|
125
|
+
|--------|----------|-------------|
|
|
126
|
+
| GET | `/api/v1/...` | Description |
|
|
127
|
+
|
|
128
|
+
## Database Changes
|
|
129
|
+
- New migrations: `migrations/versions/xxx_description.py`
|
|
130
|
+
- Schema changes: ...
|
|
131
|
+
|
|
132
|
+
## Dependencies
|
|
133
|
+
- New packages added to `pyproject.toml`
|
|
134
|
+
```
|
|
135
|
+
|
|
136
|
+
### Step 5: Report Summary
|
|
137
|
+
|
|
138
|
+
After creating the changelog entry:
|
|
139
|
+
1. Display a brief summary to the user
|
|
140
|
+
2. Show the path to the created file
|
|
141
|
+
3. Highlight the most important things to know
|
|
142
|
+
|
|
143
|
+
## Output Location
|
|
144
|
+
|
|
145
|
+
All changelog entries go to: `.notes/changelog/`
|
|
146
|
+
|
|
147
|
+
Naming convention: `YYYY-MM-DD-<branch-name-slug>.md`
|
|
148
|
+
|
|
149
|
+
Examples:
|
|
150
|
+
- `2025-01-13-dmng-3-monitoring.md`
|
|
151
|
+
- `2025-01-10-dmng-2-data-model.md`
|
|
152
|
+
|
|
153
|
+
## Notes
|
|
154
|
+
|
|
155
|
+
- Always fetch from origin before comparing to ensure you have latest changes
|
|
156
|
+
- Focus on architectural significance, not just file listings
|
|
157
|
+
- Identify patterns that the user should adopt in future work
|
|
158
|
+
- If comparing to a branch that has YOUR changes (you're ahead), flip the comparison direction
|
|
159
|
+
- Keep the changelog entry focused and scannable
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
|
|
3
|
+
echo "Error: poetry is disabled. Use uv instead:" >&2
|
|
4
|
+
echo "" >&2
|
|
5
|
+
echo " To initialize a project: uv init" >&2
|
|
6
|
+
echo " To add a dependency: uv add PACKAGE" >&2
|
|
7
|
+
echo " To sync dependencies: uv sync" >&2
|
|
8
|
+
echo " To run commands: uv run COMMAND" >&2
|
|
9
|
+
echo "" >&2
|
|
10
|
+
exit 1
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
|
|
3
|
+
# Check for disallowed module invocations
|
|
4
|
+
for arg in "$@"; do
|
|
5
|
+
case "$arg" in
|
|
6
|
+
-mpip|-m\ pip|pip)
|
|
7
|
+
echo "Error: 'python -m pip' is disabled. Use uv instead:" >&2
|
|
8
|
+
echo "" >&2
|
|
9
|
+
echo " To install a package for a script: uv run --with PACKAGE python script.py" >&2
|
|
10
|
+
echo " To add a dependency to the project: uv add PACKAGE" >&2
|
|
11
|
+
echo "" >&2
|
|
12
|
+
exit 1
|
|
13
|
+
;;
|
|
14
|
+
-mvenv|-m\ venv|venv)
|
|
15
|
+
echo "Error: 'python -m venv' is disabled. Use uv instead:" >&2
|
|
16
|
+
echo "" >&2
|
|
17
|
+
echo " To create a virtual environment: uv venv" >&2
|
|
18
|
+
echo "" >&2
|
|
19
|
+
exit 1
|
|
20
|
+
;;
|
|
21
|
+
-mpy_compile|-m\ py_compile|py_compile)
|
|
22
|
+
echo "Error: 'python -m py_compile' is disabled because it writes .pyc files to __pycache__." >&2
|
|
23
|
+
echo "" >&2
|
|
24
|
+
echo " To verify syntax without bytecode output: uv run python -m ast path/to/file.py >/dev/null" >&2
|
|
25
|
+
echo "" >&2
|
|
26
|
+
exit 1
|
|
27
|
+
;;
|
|
28
|
+
esac
|
|
29
|
+
done
|
|
30
|
+
|
|
31
|
+
# Check for -m flag followed by pip or venv
|
|
32
|
+
prev=""
|
|
33
|
+
for arg in "$@"; do
|
|
34
|
+
if [ "$prev" = "-m" ]; then
|
|
35
|
+
case "$arg" in
|
|
36
|
+
pip)
|
|
37
|
+
echo "Error: 'python -m pip' is disabled. Use uv instead:" >&2
|
|
38
|
+
echo "" >&2
|
|
39
|
+
echo " To install a package for a script: uv run --with PACKAGE python script.py" >&2
|
|
40
|
+
echo " To add a dependency to the project: uv add PACKAGE" >&2
|
|
41
|
+
echo "" >&2
|
|
42
|
+
exit 1
|
|
43
|
+
;;
|
|
44
|
+
venv)
|
|
45
|
+
echo "Error: 'python -m venv' is disabled. Use uv instead:" >&2
|
|
46
|
+
echo "" >&2
|
|
47
|
+
echo " To create a virtual environment: uv venv" >&2
|
|
48
|
+
echo "" >&2
|
|
49
|
+
exit 1
|
|
50
|
+
;;
|
|
51
|
+
py_compile)
|
|
52
|
+
echo "Error: 'python -m py_compile' is disabled because it writes .pyc files to __pycache__." >&2
|
|
53
|
+
echo "" >&2
|
|
54
|
+
echo " To verify syntax without bytecode output: uv run python -m ast path/to/file.py >/dev/null" >&2
|
|
55
|
+
echo "" >&2
|
|
56
|
+
exit 1
|
|
57
|
+
;;
|
|
58
|
+
esac
|
|
59
|
+
fi
|
|
60
|
+
prev="$arg"
|
|
61
|
+
done
|
|
62
|
+
|
|
63
|
+
# Resolve a Python interpreter for uv. Prefer uv-managed Python to match
|
|
64
|
+
# `uv run python` defaults, while still avoiding shim recursion.
|
|
65
|
+
resolve_uv_python() {
|
|
66
|
+
local shim_dir candidate candidate_dir
|
|
67
|
+
shim_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
68
|
+
|
|
69
|
+
candidate="$(uv python find --managed-python 2>/dev/null || true)"
|
|
70
|
+
if [ -n "$candidate" ] && [ -x "$candidate" ]; then
|
|
71
|
+
candidate_dir="$(cd "$(dirname "$candidate")" && pwd)"
|
|
72
|
+
if [ "$candidate_dir" != "$shim_dir" ]; then
|
|
73
|
+
echo "$candidate"
|
|
74
|
+
return 0
|
|
75
|
+
fi
|
|
76
|
+
fi
|
|
77
|
+
|
|
78
|
+
while IFS= read -r candidate; do
|
|
79
|
+
[ -n "$candidate" ] || continue
|
|
80
|
+
candidate_dir="$(cd "$(dirname "$candidate")" && pwd)"
|
|
81
|
+
[ "$candidate_dir" = "$shim_dir" ] && continue
|
|
82
|
+
echo "$candidate"
|
|
83
|
+
return 0
|
|
84
|
+
done < <(type -aP python3 2>/dev/null)
|
|
85
|
+
|
|
86
|
+
while IFS= read -r candidate; do
|
|
87
|
+
[ -n "$candidate" ] || continue
|
|
88
|
+
candidate_dir="$(cd "$(dirname "$candidate")" && pwd)"
|
|
89
|
+
[ "$candidate_dir" = "$shim_dir" ] && continue
|
|
90
|
+
echo "$candidate"
|
|
91
|
+
return 0
|
|
92
|
+
done < <(type -aP python 2>/dev/null)
|
|
93
|
+
|
|
94
|
+
return 1
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
UV_PYTHON="$(resolve_uv_python)"
|
|
98
|
+
if [ -z "$UV_PYTHON" ]; then
|
|
99
|
+
echo "Error: Unable to locate a Python interpreter outside intercepted-commands." >&2
|
|
100
|
+
exit 1
|
|
101
|
+
fi
|
|
102
|
+
|
|
103
|
+
# Dispatch through uv with an explicit interpreter path to avoid recursion.
|
|
104
|
+
exec uv run --python "$UV_PYTHON" python "$@"
|