@laitszkin/apollo-toolkit 3.13.2 → 3.14.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (154) hide show
  1. package/AGENTS.md +7 -7
  2. package/CHANGELOG.md +27 -0
  3. package/CLAUDE.md +8 -8
  4. package/analyse-app-logs/SKILL.md +3 -3
  5. package/bin/apollo-toolkit.ts +7 -0
  6. package/codex/codex-memory-manager/SKILL.md +2 -2
  7. package/codex/learn-skill-from-conversations/SKILL.md +3 -3
  8. package/dist/bin/apollo-toolkit.d.ts +2 -0
  9. package/dist/bin/apollo-toolkit.js +7 -0
  10. package/dist/lib/cli.d.ts +41 -0
  11. package/dist/lib/cli.js +655 -0
  12. package/dist/lib/installer.d.ts +59 -0
  13. package/dist/lib/installer.js +404 -0
  14. package/dist/lib/tool-runner.d.ts +19 -0
  15. package/dist/lib/tool-runner.js +536 -0
  16. package/dist/lib/tools/architecture.d.ts +2 -0
  17. package/dist/lib/tools/architecture.js +34 -0
  18. package/dist/lib/tools/create-specs.d.ts +2 -0
  19. package/dist/lib/tools/create-specs.js +175 -0
  20. package/dist/lib/tools/docs-to-voice.d.ts +2 -0
  21. package/dist/lib/tools/docs-to-voice.js +705 -0
  22. package/dist/lib/tools/enforce-video-aspect-ratio.d.ts +2 -0
  23. package/dist/lib/tools/enforce-video-aspect-ratio.js +312 -0
  24. package/dist/lib/tools/extract-conversations.d.ts +2 -0
  25. package/dist/lib/tools/extract-conversations.js +105 -0
  26. package/dist/lib/tools/extract-pdf-text.d.ts +2 -0
  27. package/dist/lib/tools/extract-pdf-text.js +92 -0
  28. package/dist/lib/tools/filter-logs.d.ts +2 -0
  29. package/dist/lib/tools/filter-logs.js +94 -0
  30. package/dist/lib/tools/find-github-issues.d.ts +2 -0
  31. package/dist/lib/tools/find-github-issues.js +176 -0
  32. package/dist/lib/tools/generate-storyboard-images.d.ts +2 -0
  33. package/dist/lib/tools/generate-storyboard-images.js +419 -0
  34. package/dist/lib/tools/log-cli-utils.d.ts +35 -0
  35. package/dist/lib/tools/log-cli-utils.js +233 -0
  36. package/dist/lib/tools/open-github-issue.d.ts +2 -0
  37. package/dist/lib/tools/open-github-issue.js +750 -0
  38. package/dist/lib/tools/read-github-issue.d.ts +2 -0
  39. package/dist/lib/tools/read-github-issue.js +134 -0
  40. package/dist/lib/tools/render-error-book.d.ts +2 -0
  41. package/dist/lib/tools/render-error-book.js +265 -0
  42. package/dist/lib/tools/render-katex.d.ts +2 -0
  43. package/dist/lib/tools/render-katex.js +294 -0
  44. package/dist/lib/tools/review-threads.d.ts +2 -0
  45. package/dist/lib/tools/review-threads.js +491 -0
  46. package/dist/lib/tools/search-logs.d.ts +2 -0
  47. package/dist/lib/tools/search-logs.js +164 -0
  48. package/dist/lib/tools/sync-memory-index.d.ts +2 -0
  49. package/dist/lib/tools/sync-memory-index.js +113 -0
  50. package/dist/lib/tools/validate-openai-agent-config.d.ts +2 -0
  51. package/dist/lib/tools/validate-openai-agent-config.js +184 -0
  52. package/dist/lib/tools/validate-skill-frontmatter.d.ts +2 -0
  53. package/dist/lib/tools/validate-skill-frontmatter.js +118 -0
  54. package/dist/lib/types.d.ts +82 -0
  55. package/dist/lib/types.js +2 -0
  56. package/dist/lib/updater.d.ts +34 -0
  57. package/dist/lib/updater.js +112 -0
  58. package/dist/lib/utils/format.d.ts +2 -0
  59. package/dist/lib/utils/format.js +6 -0
  60. package/dist/lib/utils/terminal.d.ts +12 -0
  61. package/dist/lib/utils/terminal.js +26 -0
  62. package/docs-to-voice/SKILL.md +0 -1
  63. package/generate-spec/SKILL.md +1 -1
  64. package/katex/SKILL.md +1 -2
  65. package/lib/cli.ts +780 -0
  66. package/lib/installer.ts +466 -0
  67. package/lib/tool-runner.ts +561 -0
  68. package/lib/tools/architecture.ts +34 -0
  69. package/lib/tools/create-specs.ts +204 -0
  70. package/lib/tools/docs-to-voice.ts +799 -0
  71. package/lib/tools/enforce-video-aspect-ratio.ts +368 -0
  72. package/lib/tools/extract-conversations.ts +114 -0
  73. package/lib/tools/extract-pdf-text.ts +99 -0
  74. package/lib/tools/filter-logs.ts +118 -0
  75. package/lib/tools/find-github-issues.ts +211 -0
  76. package/lib/tools/generate-storyboard-images.ts +455 -0
  77. package/lib/tools/log-cli-utils.ts +262 -0
  78. package/lib/tools/open-github-issue.ts +930 -0
  79. package/lib/tools/read-github-issue.ts +179 -0
  80. package/lib/tools/render-error-book.ts +300 -0
  81. package/lib/tools/render-katex.ts +325 -0
  82. package/lib/tools/review-threads.ts +590 -0
  83. package/lib/tools/search-logs.ts +200 -0
  84. package/lib/tools/sync-memory-index.ts +114 -0
  85. package/lib/tools/validate-openai-agent-config.ts +209 -0
  86. package/lib/tools/validate-skill-frontmatter.ts +124 -0
  87. package/lib/types.ts +90 -0
  88. package/lib/updater.ts +165 -0
  89. package/lib/utils/format.ts +7 -0
  90. package/lib/utils/terminal.ts +22 -0
  91. package/open-github-issue/SKILL.md +2 -2
  92. package/optimise-skill/SKILL.md +1 -1
  93. package/package.json +13 -4
  94. package/resources/project-architecture/assets/architecture.css +764 -0
  95. package/resources/project-architecture/assets/viewer.client.js +144 -0
  96. package/resources/project-architecture/index.html +42 -0
  97. package/review-spec-related-changes/SKILL.md +1 -1
  98. package/solve-issues-found-during-review/SKILL.md +2 -1
  99. package/tsconfig.json +28 -0
  100. package/analyse-app-logs/scripts/__pycache__/filter_logs_by_time.cpython-312.pyc +0 -0
  101. package/analyse-app-logs/scripts/__pycache__/log_cli_utils.cpython-312.pyc +0 -0
  102. package/analyse-app-logs/scripts/__pycache__/search_logs.cpython-312.pyc +0 -0
  103. package/analyse-app-logs/scripts/filter_logs_by_time.py +0 -64
  104. package/analyse-app-logs/scripts/log_cli_utils.py +0 -112
  105. package/analyse-app-logs/scripts/search_logs.py +0 -137
  106. package/analyse-app-logs/tests/test_filter_logs_by_time.py +0 -95
  107. package/analyse-app-logs/tests/test_search_logs.py +0 -100
  108. package/codex/codex-memory-manager/scripts/extract_recent_conversations.py +0 -369
  109. package/codex/codex-memory-manager/scripts/sync_memory_index.py +0 -130
  110. package/codex/codex-memory-manager/tests/test_extract_recent_conversations.py +0 -177
  111. package/codex/codex-memory-manager/tests/test_memory_template.py +0 -37
  112. package/codex/codex-memory-manager/tests/test_sync_memory_index.py +0 -84
  113. package/codex/learn-skill-from-conversations/scripts/extract_recent_conversations.py +0 -369
  114. package/codex/learn-skill-from-conversations/tests/test_extract_recent_conversations.py +0 -177
  115. package/docs-to-voice/scripts/__pycache__/docs_to_voice.cpython-312.pyc +0 -0
  116. package/docs-to-voice/scripts/docs_to_voice.py +0 -1385
  117. package/docs-to-voice/scripts/docs_to_voice.sh +0 -11
  118. package/docs-to-voice/tests/test_docs_to_voice_api_max_chars.py +0 -210
  119. package/docs-to-voice/tests/test_docs_to_voice_sentence_timeline.py +0 -115
  120. package/docs-to-voice/tests/test_docs_to_voice_settings.py +0 -43
  121. package/docs-to-voice/tests/test_docs_to_voice_shell_wrapper.py +0 -51
  122. package/docs-to-voice/tests/test_docs_to_voice_speech_rate.py +0 -57
  123. package/generate-spec/scripts/__pycache__/create-specscpython-312.pyc +0 -0
  124. package/generate-spec/scripts/create-specs +0 -215
  125. package/generate-spec/tests/test_create_specs.py +0 -200
  126. package/init-project-html/scripts/architecture-bootstrap-render.js +0 -16
  127. package/init-project-html/scripts/architecture.js +0 -296
  128. package/katex/scripts/__pycache__/render_katex.cpython-312.pyc +0 -0
  129. package/katex/scripts/render_katex.py +0 -247
  130. package/katex/scripts/render_katex.sh +0 -11
  131. package/katex/tests/test_render_katex.py +0 -174
  132. package/learning-error-book/scripts/render_error_book_json_to_pdf.py +0 -590
  133. package/learning-error-book/tests/test_render_error_book_json_to_pdf.py +0 -134
  134. package/open-github-issue/scripts/__pycache__/open_github_issue.cpython-312.pyc +0 -0
  135. package/open-github-issue/scripts/open_github_issue.py +0 -705
  136. package/open-github-issue/tests/test_open_github_issue.py +0 -381
  137. package/openai-text-to-image-storyboard/scripts/generate_storyboard_images.py +0 -763
  138. package/openai-text-to-image-storyboard/tests/test_generate_storyboard_images.py +0 -177
  139. package/read-github-issue/scripts/__pycache__/find_issues.cpython-312.pyc +0 -0
  140. package/read-github-issue/scripts/__pycache__/read_issue.cpython-312.pyc +0 -0
  141. package/read-github-issue/scripts/find_issues.py +0 -148
  142. package/read-github-issue/scripts/read_issue.py +0 -108
  143. package/read-github-issue/tests/test_find_issues.py +0 -127
  144. package/read-github-issue/tests/test_read_issue.py +0 -109
  145. package/resolve-review-comments/scripts/__pycache__/review_threads.cpython-312.pyc +0 -0
  146. package/resolve-review-comments/scripts/review_threads.py +0 -425
  147. package/resolve-review-comments/tests/test_review_threads.py +0 -74
  148. package/scripts/validate_openai_agent_config.py +0 -209
  149. package/scripts/validate_skill_frontmatter.py +0 -131
  150. package/text-to-short-video/scripts/__pycache__/enforce_video_aspect_ratio.cpython-312.pyc +0 -0
  151. package/text-to-short-video/scripts/enforce_video_aspect_ratio.py +0 -350
  152. package/text-to-short-video/tests/test_enforce_video_aspect_ratio.py +0 -194
  153. package/weekly-financial-event-report/scripts/extract_pdf_text_pdfkit.swift +0 -99
  154. package/weekly-financial-event-report/tests/test_extract_pdf_text_pdfkit.py +0 -64
@@ -1,425 +0,0 @@
1
- #!/usr/bin/env python3
2
- from __future__ import annotations
3
-
4
- import argparse
5
- import json
6
- import subprocess
7
- import sys
8
- from pathlib import Path
9
- from typing import Any
10
-
11
- LIST_QUERY = """
12
- query($owner: String!, $name: String!, $number: Int!, $after: String) {
13
- repository(owner: $owner, name: $name) {
14
- pullRequest(number: $number) {
15
- reviewThreads(first: 100, after: $after) {
16
- nodes {
17
- id
18
- isResolved
19
- isOutdated
20
- path
21
- line
22
- startLine
23
- comments(first: 20) {
24
- nodes {
25
- id
26
- url
27
- body
28
- author {
29
- login
30
- }
31
- createdAt
32
- path
33
- line
34
- outdated
35
- }
36
- }
37
- }
38
- pageInfo {
39
- hasNextPage
40
- endCursor
41
- }
42
- }
43
- }
44
- }
45
- }
46
- """
47
-
48
- RESOLVE_MUTATION = """
49
- mutation($threadId: ID!) {
50
- resolveReviewThread(input: {threadId: $threadId}) {
51
- thread {
52
- id
53
- isResolved
54
- }
55
- }
56
- }
57
- """
58
-
59
-
60
- def parse_args() -> argparse.Namespace:
61
- parser = argparse.ArgumentParser(
62
- description="List and resolve GitHub PR review threads via gh graphql."
63
- )
64
- subparsers = parser.add_subparsers(dest="command", required=True)
65
-
66
- list_parser = subparsers.add_parser("list", help="List review threads.")
67
- add_common_args(list_parser)
68
- list_parser.add_argument(
69
- "--state",
70
- choices=["unresolved", "resolved", "all"],
71
- default="unresolved",
72
- help="Thread state filter.",
73
- )
74
- list_parser.add_argument(
75
- "--output",
76
- choices=["table", "json"],
77
- default="table",
78
- help="Output format.",
79
- )
80
-
81
- resolve_parser = subparsers.add_parser("resolve", help="Resolve selected threads.")
82
- add_common_args(resolve_parser)
83
- resolve_parser.add_argument(
84
- "--thread-id",
85
- action="append",
86
- default=[],
87
- help="Thread GraphQL ID to resolve (repeatable).",
88
- )
89
- resolve_parser.add_argument(
90
- "--thread-id-file",
91
- help="Path to JSON file containing thread IDs.",
92
- )
93
- resolve_parser.add_argument(
94
- "--all-unresolved",
95
- action="store_true",
96
- help="Resolve every unresolved thread in the PR.",
97
- )
98
- resolve_parser.add_argument(
99
- "--dry-run",
100
- action="store_true",
101
- help="Print thread IDs without resolving.",
102
- )
103
-
104
- return parser.parse_args()
105
-
106
-
107
- def add_common_args(parser: argparse.ArgumentParser) -> None:
108
- parser.add_argument("--repo", help="Target repository in owner/name format.")
109
- parser.add_argument("--pr", type=positive_int, help="Pull request number.")
110
-
111
-
112
- def positive_int(raw: str) -> int:
113
- value = int(raw)
114
- if value <= 0:
115
- raise argparse.ArgumentTypeError("value must be a positive integer")
116
- return value
117
-
118
-
119
- def run_gh(cmd: list[str], expect_json: bool = False) -> Any:
120
- try:
121
- result = subprocess.run(cmd, check=True, capture_output=True, text=True)
122
- except FileNotFoundError as exc:
123
- raise RuntimeError("gh CLI is not installed or not in PATH") from exc
124
- except subprocess.CalledProcessError as exc:
125
- stderr = exc.stderr.strip() or "gh command failed"
126
- raise RuntimeError(stderr) from exc
127
-
128
- if not expect_json:
129
- return result.stdout.strip()
130
-
131
- try:
132
- return json.loads(result.stdout)
133
- except json.JSONDecodeError as exc:
134
- raise RuntimeError("Failed to parse gh JSON output") from exc
135
-
136
-
137
- def parse_owner_repo(repo: str) -> tuple[str, str]:
138
- parts = repo.split("/")
139
- if len(parts) != 2 or not parts[0] or not parts[1]:
140
- raise ValueError("repo must be in owner/name format")
141
- return parts[0], parts[1]
142
-
143
-
144
- def resolve_repo(repo: str | None) -> str:
145
- if repo:
146
- parse_owner_repo(repo)
147
- return repo
148
-
149
- return run_gh(["gh", "repo", "view", "--json", "nameWithOwner", "--jq", ".nameWithOwner"])
150
-
151
-
152
- def resolve_pr_number(repo: str, pr: int | None) -> int:
153
- if pr is not None:
154
- return pr
155
-
156
- value = run_gh(["gh", "pr", "view", "--repo", repo, "--json", "number", "--jq", ".number"])
157
- try:
158
- return int(value)
159
- except ValueError as exc:
160
- raise RuntimeError("Unable to infer PR number from current branch context") from exc
161
-
162
-
163
- def gh_graphql(query: str, variables: dict[str, Any]) -> dict[str, Any]:
164
- cmd = ["gh", "api", "graphql", "-f", f"query={query}"]
165
- for key, value in variables.items():
166
- cmd.extend(["-F", f"{key}={json.dumps(value)}"])
167
- return run_gh(cmd, expect_json=True)
168
-
169
-
170
- def fetch_review_threads(repo: str, pr_number: int) -> list[dict[str, Any]]:
171
- owner, name = parse_owner_repo(repo)
172
- threads: list[dict[str, Any]] = []
173
- after: str | None = None
174
-
175
- while True:
176
- payload = gh_graphql(
177
- LIST_QUERY,
178
- {
179
- "owner": owner,
180
- "name": name,
181
- "number": pr_number,
182
- "after": after,
183
- },
184
- )
185
- pr = payload["data"]["repository"]["pullRequest"]
186
- if pr is None:
187
- raise RuntimeError(f"PR #{pr_number} not found in {repo}")
188
-
189
- review_threads = pr["reviewThreads"]
190
- threads.extend(review_threads.get("nodes", []))
191
-
192
- page_info = review_threads["pageInfo"]
193
- if not page_info.get("hasNextPage"):
194
- break
195
- after = page_info.get("endCursor")
196
-
197
- return threads
198
-
199
-
200
- def filter_threads(threads: list[dict[str, Any]], state: str) -> list[dict[str, Any]]:
201
- if state == "all":
202
- return threads
203
- if state == "resolved":
204
- return [item for item in threads if item.get("isResolved")]
205
- return [item for item in threads if not item.get("isResolved")]
206
-
207
-
208
- def normalize_thread(thread: dict[str, Any]) -> dict[str, Any]:
209
- comments = thread.get("comments", {}).get("nodes", [])
210
- normalized_comments = [
211
- {
212
- "id": comment.get("id"),
213
- "url": comment.get("url"),
214
- "author": (comment.get("author") or {}).get("login"),
215
- "body": comment.get("body", ""),
216
- "created_at": comment.get("createdAt"),
217
- "path": comment.get("path"),
218
- "line": comment.get("line"),
219
- "outdated": comment.get("outdated"),
220
- }
221
- for comment in comments
222
- ]
223
-
224
- return {
225
- "thread_id": thread.get("id"),
226
- "is_resolved": thread.get("isResolved"),
227
- "is_outdated": thread.get("isOutdated"),
228
- "path": thread.get("path"),
229
- "line": thread.get("line"),
230
- "start_line": thread.get("startLine"),
231
- "comments": normalized_comments,
232
- }
233
-
234
-
235
- def truncate(text: str, width: int) -> str:
236
- if len(text) <= width:
237
- return text
238
- if width <= 3:
239
- return text[:width]
240
- return text[: width - 3] + "..."
241
-
242
-
243
- def preview_body(thread: dict[str, Any]) -> str:
244
- comments = thread.get("comments", [])
245
- if not comments:
246
- return "-"
247
- body = comments[0].get("body", "").replace("\n", " ").strip()
248
- return truncate(body or "-", 72)
249
-
250
-
251
- def render_location(thread: dict[str, Any]) -> str:
252
- path = thread.get("path") or "-"
253
- line = thread.get("line")
254
- if line is None:
255
- return path
256
- return f"{path}:{line}"
257
-
258
-
259
- def print_table(threads: list[dict[str, Any]]) -> None:
260
- widths = {
261
- "idx": 4,
262
- "thread": 12,
263
- "location": 36,
264
- "author": 18,
265
- "preview": 72,
266
- }
267
- header = (
268
- f"{'#':<{widths['idx']}} "
269
- f"{'THREAD_ID':<{widths['thread']}} "
270
- f"{'LOCATION':<{widths['location']}} "
271
- f"{'AUTHOR':<{widths['author']}} "
272
- f"{'COMMENT_PREVIEW':<{widths['preview']}}"
273
- )
274
- print(header)
275
- print("-" * len(header))
276
-
277
- for idx, thread in enumerate(threads, start=1):
278
- comments = thread.get("comments", [])
279
- author = comments[0].get("author") if comments else "-"
280
- row = (
281
- f"{idx:<{widths['idx']}} "
282
- f"{truncate(thread.get('thread_id', '-') or '-', widths['thread']):<{widths['thread']}} "
283
- f"{truncate(render_location(thread), widths['location']):<{widths['location']}} "
284
- f"{truncate(author or '-', widths['author']):<{widths['author']}} "
285
- f"{preview_body(thread):<{widths['preview']}}"
286
- )
287
- print(row)
288
-
289
-
290
- def load_thread_ids(path: str) -> list[str]:
291
- raw = Path(path).read_text(encoding="utf-8")
292
- payload = json.loads(raw)
293
-
294
- if isinstance(payload, list):
295
- ids = payload
296
- elif isinstance(payload, dict):
297
- if "thread_ids" in payload:
298
- ids = payload["thread_ids"]
299
- elif "adopted_thread_ids" in payload:
300
- ids = payload["adopted_thread_ids"]
301
- elif "threads" in payload:
302
- ids = [
303
- item.get("thread_id")
304
- for item in payload["threads"]
305
- if isinstance(item, dict)
306
- ]
307
- else:
308
- raise ValueError("JSON must include thread_ids, adopted_thread_ids, or threads")
309
- else:
310
- raise ValueError("Unsupported JSON payload for thread IDs")
311
-
312
- output = [item for item in ids if isinstance(item, str) and item.strip()]
313
- return list(dict.fromkeys(output))
314
-
315
-
316
- def collect_thread_ids(args: argparse.Namespace, unresolved_threads: list[dict[str, Any]]) -> list[str]:
317
- ids: list[str] = []
318
-
319
- if args.all_unresolved:
320
- ids.extend([item["thread_id"] for item in unresolved_threads if item.get("thread_id")])
321
-
322
- ids.extend(args.thread_id)
323
-
324
- if args.thread_id_file:
325
- ids.extend(load_thread_ids(args.thread_id_file))
326
-
327
- normalized = [item for item in ids if item]
328
- return list(dict.fromkeys(normalized))
329
-
330
-
331
- def resolve_threads(thread_ids: list[str], dry_run: bool) -> tuple[list[str], list[dict[str, str]]]:
332
- resolved: list[str] = []
333
- failed: list[dict[str, str]] = []
334
-
335
- for thread_id in thread_ids:
336
- if dry_run:
337
- resolved.append(thread_id)
338
- continue
339
-
340
- try:
341
- payload = gh_graphql(RESOLVE_MUTATION, {"threadId": thread_id})
342
- thread = payload["data"]["resolveReviewThread"]["thread"]
343
- if not thread or not thread.get("isResolved"):
344
- raise RuntimeError("thread did not resolve")
345
- resolved.append(thread_id)
346
- except Exception as exc: # pylint: disable=broad-except
347
- failed.append({"thread_id": thread_id, "error": str(exc)})
348
-
349
- return resolved, failed
350
-
351
-
352
- def cmd_list(args: argparse.Namespace) -> int:
353
- repo = resolve_repo(args.repo)
354
- pr_number = resolve_pr_number(repo, args.pr)
355
-
356
- threads = fetch_review_threads(repo, pr_number)
357
- filtered = filter_threads(threads, args.state)
358
- normalized = [normalize_thread(item) for item in filtered]
359
-
360
- result = {
361
- "repo": repo,
362
- "pr_number": pr_number,
363
- "state": args.state,
364
- "thread_count": len(normalized),
365
- "threads": normalized,
366
- }
367
-
368
- if args.output == "json":
369
- print(json.dumps(result, indent=2, ensure_ascii=False))
370
- else:
371
- print(f"Repository: {repo}")
372
- print(f"PR: #{pr_number}")
373
- print(f"Threads ({args.state}): {len(normalized)}")
374
- print_table(normalized)
375
-
376
- return 0
377
-
378
-
379
- def cmd_resolve(args: argparse.Namespace) -> int:
380
- repo = resolve_repo(args.repo)
381
- pr_number = resolve_pr_number(repo, args.pr)
382
-
383
- threads = fetch_review_threads(repo, pr_number)
384
- unresolved = [normalize_thread(item) for item in filter_threads(threads, "unresolved")]
385
- thread_ids = collect_thread_ids(args, unresolved)
386
-
387
- if not thread_ids:
388
- print(
389
- "Error: no thread IDs selected. Use --thread-id, --thread-id-file, or --all-unresolved.",
390
- file=sys.stderr,
391
- )
392
- return 1
393
-
394
- resolved, failed = resolve_threads(thread_ids, args.dry_run)
395
-
396
- summary = {
397
- "repo": repo,
398
- "pr_number": pr_number,
399
- "requested": thread_ids,
400
- "resolved": resolved,
401
- "failed": failed,
402
- "dry_run": args.dry_run,
403
- }
404
- print(json.dumps(summary, indent=2, ensure_ascii=False))
405
-
406
- return 0 if not failed else 1
407
-
408
-
409
- def main() -> int:
410
- args = parse_args()
411
-
412
- try:
413
- if args.command == "list":
414
- return cmd_list(args)
415
- if args.command == "resolve":
416
- return cmd_resolve(args)
417
- print(f"Unsupported command: {args.command}", file=sys.stderr)
418
- return 1
419
- except Exception as exc: # pylint: disable=broad-except
420
- print(f"Error: {exc}", file=sys.stderr)
421
- return 1
422
-
423
-
424
- if __name__ == "__main__":
425
- sys.exit(main())
@@ -1,74 +0,0 @@
1
- #!/usr/bin/env python3
2
-
3
- from __future__ import annotations
4
-
5
- import argparse
6
- import importlib.util
7
- import json
8
- import tempfile
9
- import unittest
10
- from pathlib import Path
11
-
12
- SCRIPT_PATH = Path(__file__).resolve().parents[1] / "scripts" / "review_threads.py"
13
- SPEC = importlib.util.spec_from_file_location("review_threads", SCRIPT_PATH)
14
- MODULE = importlib.util.module_from_spec(SPEC)
15
- SPEC.loader.exec_module(MODULE)
16
-
17
-
18
- class ReviewThreadsTests(unittest.TestCase):
19
- def test_parse_owner_repo(self) -> None:
20
- self.assertEqual(MODULE.parse_owner_repo("octo/repo"), ("octo", "repo"))
21
-
22
- def test_parse_owner_repo_rejects_invalid_format(self) -> None:
23
- with self.assertRaises(ValueError):
24
- MODULE.parse_owner_repo("octo")
25
-
26
- def test_parse_owner_repo_rejects_extra_segments(self) -> None:
27
- with self.assertRaises(ValueError):
28
- MODULE.parse_owner_repo("octo/repo/extra")
29
-
30
- def test_load_thread_ids_supports_multiple_shapes(self) -> None:
31
- payload = {"adopted_thread_ids": ["A", "B", "A"]}
32
- with tempfile.TemporaryDirectory() as tmp_dir:
33
- tmp_path = Path(tmp_dir) / "ids.json"
34
- tmp_path.write_text(json.dumps(payload), encoding="utf-8")
35
- ids = MODULE.load_thread_ids(str(tmp_path))
36
-
37
- self.assertEqual(ids, ["A", "B"])
38
-
39
- def test_collect_thread_ids_from_flags(self) -> None:
40
- args = argparse.Namespace(
41
- all_unresolved=True,
42
- thread_id=["thread-2"],
43
- thread_id_file=None,
44
- )
45
- unresolved = [{"thread_id": "thread-1"}, {"thread_id": "thread-2"}]
46
-
47
- ids = MODULE.collect_thread_ids(args, unresolved)
48
-
49
- self.assertEqual(ids, ["thread-1", "thread-2"])
50
-
51
- def test_load_thread_ids_ignores_non_dict_thread_entries(self) -> None:
52
- payload = {"threads": [{"thread_id": "A"}, "bad", 123, {"thread_id": "B"}]}
53
- with tempfile.TemporaryDirectory() as tmp_dir:
54
- tmp_path = Path(tmp_dir) / "ids.json"
55
- tmp_path.write_text(json.dumps(payload), encoding="utf-8")
56
- ids = MODULE.load_thread_ids(str(tmp_path))
57
-
58
- self.assertEqual(ids, ["A", "B"])
59
-
60
- def test_render_location_without_line(self) -> None:
61
- self.assertEqual(MODULE.render_location({"path": "a.txt", "line": None}), "a.txt")
62
-
63
- def test_filter_threads(self) -> None:
64
- data = [
65
- {"id": "a", "isResolved": True},
66
- {"id": "b", "isResolved": False},
67
- ]
68
- self.assertEqual(len(MODULE.filter_threads(data, "resolved")), 1)
69
- self.assertEqual(len(MODULE.filter_threads(data, "unresolved")), 1)
70
- self.assertEqual(len(MODULE.filter_threads(data, "all")), 2)
71
-
72
-
73
- if __name__ == "__main__":
74
- unittest.main()
@@ -1,209 +0,0 @@
1
- #!/usr/bin/env python3
2
- """Validate agents/openai.yaml for all top-level skills."""
3
-
4
- from __future__ import annotations
5
-
6
- import argparse
7
- import re
8
- import sys
9
- from pathlib import Path
10
-
11
- import yaml
12
-
13
- TOP_LEVEL_ALLOWED_KEYS = {"interface", "dependencies", "policy"}
14
- INTERFACE_REQUIRED_KEYS = {"display_name", "short_description", "default_prompt"}
15
- INTERFACE_ALLOWED_KEYS = {
16
- "display_name",
17
- "short_description",
18
- "default_prompt",
19
- "icon_small",
20
- "icon_large",
21
- "brand_color",
22
- }
23
- HEX_COLOR_PATTERN = re.compile(r"^#[0-9A-Fa-f]{6}$")
24
-
25
- HELP_EPILOG = """Examples:
26
- apltk validate-openai-agent-config
27
- Result: prints either a pass summary or one error per invalid agents/openai.yaml file.
28
- """
29
-
30
-
31
- def repo_root() -> Path:
32
- return Path(__file__).resolve().parent.parent
33
-
34
-
35
- def iter_skill_dirs(root: Path) -> list[Path]:
36
- return sorted(path for path in root.iterdir() if path.is_dir() and (path / "SKILL.md").is_file())
37
-
38
-
39
- def extract_frontmatter(content: str) -> dict[str, object]:
40
- lines = content.splitlines()
41
- if not lines or lines[0].strip() != "---":
42
- raise ValueError("SKILL.md must start with YAML frontmatter delimiter '---'.")
43
-
44
- for index in range(1, len(lines)):
45
- if lines[index].strip() == "---":
46
- raw_frontmatter = "\n".join(lines[1:index])
47
- parsed = yaml.safe_load(raw_frontmatter)
48
- if not isinstance(parsed, dict):
49
- raise ValueError("SKILL.md frontmatter must be a YAML mapping.")
50
- return parsed
51
-
52
- raise ValueError("SKILL.md frontmatter is missing the closing '---' delimiter.")
53
-
54
-
55
- def require_non_empty_string(container: dict[str, object], key: str, context: str, errors: list[str]) -> None:
56
- value = container.get(key)
57
- if not isinstance(value, str) or not value.strip():
58
- errors.append(f"{context}: '{key}' must be a non-empty string.")
59
-
60
-
61
- def validate_dependencies(dependencies: object, context: str, errors: list[str]) -> None:
62
- if not isinstance(dependencies, dict):
63
- errors.append(f"{context}: 'dependencies' must be a mapping.")
64
- return
65
-
66
- tools = dependencies.get("tools")
67
- if tools is None:
68
- return
69
- if not isinstance(tools, list):
70
- errors.append(f"{context}: 'dependencies.tools' must be a list.")
71
- return
72
-
73
- for index, item in enumerate(tools):
74
- item_context = f"{context}: dependencies.tools[{index}]"
75
- if not isinstance(item, dict):
76
- errors.append(f"{item_context} must be a mapping.")
77
- continue
78
- require_non_empty_string(item, "type", item_context, errors)
79
- require_non_empty_string(item, "value", item_context, errors)
80
-
81
- tool_type = item.get("type")
82
- if isinstance(tool_type, str) and tool_type != "mcp":
83
- errors.append(f"{item_context}: unsupported tool type '{tool_type}', only 'mcp' is allowed.")
84
-
85
- for optional_key in ("description", "transport", "url"):
86
- optional_value = item.get(optional_key)
87
- if optional_value is not None and (not isinstance(optional_value, str) or not optional_value.strip()):
88
- errors.append(f"{item_context}: '{optional_key}' must be a non-empty string when provided.")
89
-
90
-
91
- def validate_policy(policy: object, context: str, errors: list[str]) -> None:
92
- if not isinstance(policy, dict):
93
- errors.append(f"{context}: 'policy' must be a mapping.")
94
- return
95
-
96
- allow_implicit = policy.get("allow_implicit_invocation")
97
- if allow_implicit is not None and not isinstance(allow_implicit, bool):
98
- errors.append(
99
- f"{context}: 'policy.allow_implicit_invocation' must be a boolean when provided."
100
- )
101
-
102
-
103
- def validate_skill(skill_dir: Path) -> list[str]:
104
- errors: list[str] = []
105
- skill_md = skill_dir / "SKILL.md"
106
- openai_yaml = skill_dir / "agents" / "openai.yaml"
107
-
108
- try:
109
- skill_frontmatter = extract_frontmatter(skill_md.read_text(encoding="utf-8"))
110
- except (OSError, ValueError, yaml.YAMLError) as exc:
111
- return [f"{skill_md}: unable to read skill name for validation ({exc})."]
112
-
113
- skill_name = skill_frontmatter.get("name")
114
- if not isinstance(skill_name, str) or not skill_name.strip():
115
- return [f"{skill_md}: frontmatter 'name' must be a non-empty string."]
116
-
117
- if not openai_yaml.is_file():
118
- return [f"{openai_yaml}: file is required for every skill."]
119
-
120
- try:
121
- parsed = yaml.safe_load(openai_yaml.read_text(encoding="utf-8"))
122
- except (OSError, yaml.YAMLError) as exc:
123
- return [f"{openai_yaml}: invalid YAML ({exc})."]
124
-
125
- if not isinstance(parsed, dict):
126
- return [f"{openai_yaml}: top-level structure must be a YAML mapping."]
127
-
128
- top_level_keys = set(parsed.keys())
129
- unsupported_top_keys = sorted(top_level_keys - TOP_LEVEL_ALLOWED_KEYS)
130
- if unsupported_top_keys:
131
- errors.append(
132
- f"{openai_yaml}: unsupported top-level keys: {', '.join(unsupported_top_keys)}."
133
- )
134
-
135
- interface = parsed.get("interface")
136
- if not isinstance(interface, dict):
137
- errors.append(f"{openai_yaml}: 'interface' must be a mapping.")
138
- return errors
139
-
140
- missing_interface_keys = sorted(INTERFACE_REQUIRED_KEYS - set(interface.keys()))
141
- if missing_interface_keys:
142
- errors.append(
143
- f"{openai_yaml}: missing required interface keys: {', '.join(missing_interface_keys)}."
144
- )
145
-
146
- unsupported_interface_keys = sorted(set(interface.keys()) - INTERFACE_ALLOWED_KEYS)
147
- if unsupported_interface_keys:
148
- errors.append(
149
- f"{openai_yaml}: unsupported interface keys: {', '.join(unsupported_interface_keys)}."
150
- )
151
-
152
- for required_key in sorted(INTERFACE_REQUIRED_KEYS):
153
- require_non_empty_string(interface, required_key, str(openai_yaml), errors)
154
-
155
- default_prompt = interface.get("default_prompt")
156
- expected_skill_ref = f"${skill_name.strip()}"
157
- if isinstance(default_prompt, str) and expected_skill_ref not in default_prompt:
158
- errors.append(
159
- f"{openai_yaml}: interface.default_prompt must reference '{expected_skill_ref}'."
160
- )
161
-
162
- brand_color = interface.get("brand_color")
163
- if brand_color is not None:
164
- if not isinstance(brand_color, str) or not HEX_COLOR_PATTERN.fullmatch(brand_color):
165
- errors.append(f"{openai_yaml}: interface.brand_color must be a hex color like '#1A2B3C'.")
166
-
167
- dependencies = parsed.get("dependencies")
168
- if dependencies is not None:
169
- validate_dependencies(dependencies, str(openai_yaml), errors)
170
-
171
- policy = parsed.get("policy")
172
- if policy is not None:
173
- validate_policy(policy, str(openai_yaml), errors)
174
-
175
- return errors
176
-
177
-
178
- def build_parser() -> argparse.ArgumentParser:
179
- return argparse.ArgumentParser(
180
- description="Validate agents/openai.yaml for all top-level skills.",
181
- epilog=HELP_EPILOG,
182
- formatter_class=argparse.RawDescriptionHelpFormatter,
183
- )
184
-
185
-
186
- def main(argv: list[str] | None = None) -> int:
187
- build_parser().parse_args(argv)
188
- root = repo_root()
189
- skill_dirs = iter_skill_dirs(root)
190
- if not skill_dirs:
191
- print("No top-level skill directories found.")
192
- return 1
193
-
194
- all_errors: list[str] = []
195
- for skill_dir in skill_dirs:
196
- all_errors.extend(validate_skill(skill_dir))
197
-
198
- if all_errors:
199
- print("agents/openai.yaml validation failed:")
200
- for error in all_errors:
201
- print(f"- {error}")
202
- return 1
203
-
204
- print(f"agents/openai.yaml validation passed for {len(skill_dirs)} skills.")
205
- return 0
206
-
207
-
208
- if __name__ == "__main__":
209
- sys.exit(main())