@akiojin/gwt 6.30.3 → 9.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (98) hide show
  1. package/.cargo/config.toml +2 -0
  2. package/.claude-plugin/marketplace.json +18 -0
  3. package/.coderabbit.yaml +8 -0
  4. package/.codex/skills/gwt-fix-issue/scripts/inspect_issue.py +833 -0
  5. package/.dockerignore +63 -0
  6. package/.gitattributes +27 -0
  7. package/.husky/commit-msg +2 -0
  8. package/.husky/pre-commit +9 -0
  9. package/.husky/pre-push +12 -0
  10. package/.markdownlint.json +18 -0
  11. package/.markdownlintignore +2 -0
  12. package/Dockerfile +58 -0
  13. package/README.ja.md +161 -484
  14. package/README.md +164 -444
  15. package/cliff.toml +56 -0
  16. package/clippy.toml +2 -0
  17. package/cmake/ci-disable-native.cmake +16 -0
  18. package/codecov.yml +16 -0
  19. package/commitlint.config.cjs +107 -0
  20. package/deny.toml +35 -0
  21. package/docker-compose.yml +59 -0
  22. package/messages/errors.toml +52 -0
  23. package/package.json +12 -22
  24. package/rustfmt.toml +8 -0
  25. package/scripts/check-e2e-coverage-threshold.mjs +238 -0
  26. package/scripts/entrypoint.sh +36 -25
  27. package/scripts/install-linux-deps.sh +46 -0
  28. package/scripts/postinstall.js +73 -229
  29. package/scripts/release_issue_refs.py +317 -0
  30. package/scripts/run-local-backend-tests-on-commit.sh +15 -0
  31. package/scripts/run-local-e2e-coverage-on-commit.sh +69 -0
  32. package/scripts/run-local-e2e-on-commit.sh +60 -0
  33. package/scripts/test-all.sh +13 -0
  34. package/scripts/test_release_issue_refs.py +257 -0
  35. package/scripts/validate-skill-frontmatter.sh +108 -0
  36. package/scripts/verify-ci-node-toolchain.sh +76 -0
  37. package/scripts/verify-husky-hooks.sh +6 -0
  38. package/scripts/voice-eval.sh +48 -0
  39. package/tests/voice_eval/README.md +53 -0
  40. package/tests/voice_eval/manifest.template.json +55 -0
  41. package/tests/voice_eval/samples/.gitkeep +1 -0
  42. package/tests/voice_eval/script-ja.txt +10 -0
  43. package/vendor/ratatui-core/src/backend/test.rs +1077 -0
  44. package/vendor/ratatui-core/src/backend.rs +405 -0
  45. package/vendor/ratatui-core/src/buffer/assert.rs +71 -0
  46. package/vendor/ratatui-core/src/buffer/buffer.rs +1388 -0
  47. package/vendor/ratatui-core/src/buffer/cell.rs +377 -0
  48. package/vendor/ratatui-core/src/buffer.rs +9 -0
  49. package/vendor/ratatui-core/src/layout/alignment.rs +89 -0
  50. package/vendor/ratatui-core/src/layout/constraint.rs +526 -0
  51. package/vendor/ratatui-core/src/layout/direction.rs +63 -0
  52. package/vendor/ratatui-core/src/layout/flex.rs +212 -0
  53. package/vendor/ratatui-core/src/layout/layout.rs +2838 -0
  54. package/vendor/ratatui-core/src/layout/margin.rs +79 -0
  55. package/vendor/ratatui-core/src/layout/offset.rs +66 -0
  56. package/vendor/ratatui-core/src/layout/position.rs +253 -0
  57. package/vendor/ratatui-core/src/layout/rect/iter.rs +356 -0
  58. package/vendor/ratatui-core/src/layout/rect/ops.rs +136 -0
  59. package/vendor/ratatui-core/src/layout/rect.rs +1114 -0
  60. package/vendor/ratatui-core/src/layout/size.rs +147 -0
  61. package/vendor/ratatui-core/src/layout.rs +333 -0
  62. package/vendor/ratatui-core/src/lib.rs +82 -0
  63. package/vendor/ratatui-core/src/style/anstyle.rs +348 -0
  64. package/vendor/ratatui-core/src/style/color.rs +788 -0
  65. package/vendor/ratatui-core/src/style/palette/material.rs +608 -0
  66. package/vendor/ratatui-core/src/style/palette/tailwind.rs +653 -0
  67. package/vendor/ratatui-core/src/style/palette.rs +6 -0
  68. package/vendor/ratatui-core/src/style/palette_conversion.rs +82 -0
  69. package/vendor/ratatui-core/src/style/stylize.rs +668 -0
  70. package/vendor/ratatui-core/src/style.rs +1069 -0
  71. package/vendor/ratatui-core/src/symbols/bar.rs +51 -0
  72. package/vendor/ratatui-core/src/symbols/block.rs +51 -0
  73. package/vendor/ratatui-core/src/symbols/border.rs +709 -0
  74. package/vendor/ratatui-core/src/symbols/braille.rs +21 -0
  75. package/vendor/ratatui-core/src/symbols/half_block.rs +3 -0
  76. package/vendor/ratatui-core/src/symbols/line.rs +259 -0
  77. package/vendor/ratatui-core/src/symbols/marker.rs +82 -0
  78. package/vendor/ratatui-core/src/symbols/merge.rs +748 -0
  79. package/vendor/ratatui-core/src/symbols/pixel.rs +30 -0
  80. package/vendor/ratatui-core/src/symbols/scrollbar.rs +46 -0
  81. package/vendor/ratatui-core/src/symbols/shade.rs +5 -0
  82. package/vendor/ratatui-core/src/symbols.rs +15 -0
  83. package/vendor/ratatui-core/src/terminal/frame.rs +192 -0
  84. package/vendor/ratatui-core/src/terminal/terminal.rs +926 -0
  85. package/vendor/ratatui-core/src/terminal/viewport.rs +58 -0
  86. package/vendor/ratatui-core/src/terminal.rs +40 -0
  87. package/vendor/ratatui-core/src/text/grapheme.rs +84 -0
  88. package/vendor/ratatui-core/src/text/line.rs +1678 -0
  89. package/vendor/ratatui-core/src/text/masked.rs +149 -0
  90. package/vendor/ratatui-core/src/text/span.rs +904 -0
  91. package/vendor/ratatui-core/src/text/text.rs +1434 -0
  92. package/vendor/ratatui-core/src/text.rs +64 -0
  93. package/vendor/ratatui-core/src/widgets/stateful_widget.rs +193 -0
  94. package/vendor/ratatui-core/src/widgets/widget.rs +174 -0
  95. package/vendor/ratatui-core/src/widgets.rs +9 -0
  96. package/bin/gwt.js +0 -131
  97. package/scripts/postinstall.test.js +0 -71
  98. package/scripts/release-download.js +0 -66
@@ -0,0 +1,833 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ inspect_issue.py - GitHub Issue inspection and analysis tool
4
+
5
+ Fetches Issue data (title, body, state, labels, assignees, comments),
6
+ parses error messages, stack traces, file references, code blocks,
7
+ and cross-references. Classifies the issue type and checks file existence.
8
+ """
9
+ from __future__ import annotations
10
+
11
+ import argparse
12
+ import json
13
+ import os
14
+ import re
15
+ import subprocess
16
+ import sys
17
+ from pathlib import Path
18
+ from typing import Any, Sequence
19
+
20
+
21
+ # =============================================================================
22
+ # Constants
23
+ # =============================================================================
24
+
25
+ ISSUE_TYPES = ("BUG", "FEATURE", "ENHANCEMENT", "DOCUMENTATION", "QUESTION", "UNCLASSIFIED")
26
+
27
+ BUG_LABELS = {"bug", "defect", "regression", "crash", "error"}
28
+ FEATURE_LABELS = {"feature", "feature-request", "enhancement", "improvement"}
29
+ DOCUMENTATION_LABELS = {"documentation", "docs", "doc"}
30
+ QUESTION_LABELS = {"question", "help", "support"}
31
+
32
+ ERROR_PATTERNS = (
33
+ re.compile(r"(?:^|\s)(Error:\s*.+)", re.MULTILINE),
34
+ re.compile(r"(?:^|\s)(TypeError:\s*.+)", re.MULTILINE),
35
+ re.compile(r"(?:^|\s)(ReferenceError:\s*.+)", re.MULTILINE),
36
+ re.compile(r"(?:^|\s)(SyntaxError:\s*.+)", re.MULTILINE),
37
+ re.compile(r"(?:^|\s)(RuntimeError:\s*.+)", re.MULTILINE),
38
+ re.compile(r"(?:^|\s)(ValueError:\s*.+)", re.MULTILINE),
39
+ re.compile(r"(?:^|\s)(KeyError:\s*.+)", re.MULTILINE),
40
+ re.compile(r"(?:^|\s)(AttributeError:\s*.+)", re.MULTILINE),
41
+ re.compile(r"(?:^|\s)(ImportError:\s*.+)", re.MULTILINE),
42
+ re.compile(r"(?:^|\s)(ModuleNotFoundError:\s*.+)", re.MULTILINE),
43
+ re.compile(r"(?:^|\s)(IOError:\s*.+)", re.MULTILINE),
44
+ re.compile(r"(?:^|\s)(OSError:\s*.+)", re.MULTILINE),
45
+ re.compile(r"(?:^|\s)(panicked at\s*.+)", re.MULTILINE),
46
+ re.compile(r"(?:^|\s)(thread '.+' panicked at .+)", re.MULTILINE),
47
+ re.compile(r"(?:^|\s)(FATAL:\s*.+)", re.MULTILINE),
48
+ re.compile(r"(?:^|\s)(FAILED:\s*.+)", re.MULTILINE),
49
+ re.compile(r"(?:^|\s)(error\[E\d+\]:\s*.+)", re.MULTILINE),
50
+ )
51
+
52
+ STACK_TRACE_PATTERNS = (
53
+ re.compile(r"(^\s+at\s+.+$)", re.MULTILINE),
54
+ re.compile(r"(^Traceback \(most recent call last\):.*?)(?=^\S|\Z)", re.MULTILINE | re.DOTALL),
55
+ re.compile(r"(thread '.+' panicked at .+[\s\S]*?stack backtrace:[\s\S]*?)(?=\n\n|\Z)", re.MULTILINE),
56
+ re.compile(r"(^\s*\d+:\s+0x[0-9a-f]+\s+-\s+.+$)", re.MULTILINE),
57
+ )
58
+
59
+ FILE_PATH_PATTERN = re.compile(
60
+ r"(?:^|[\s`\"'(])([a-zA-Z0-9_./-]+\.[a-zA-Z0-9]+(?::\d+)?(?::\d+)?)(?:[\s`\"'),]|$)",
61
+ re.MULTILINE,
62
+ )
63
+
64
+ CROSS_REF_PATTERN = re.compile(
65
+ r"(?:^|[\s(])(?:(?:([a-zA-Z0-9._-]+/[a-zA-Z0-9._-]+))?#(\d+))(?:[\s),.]|$)",
66
+ re.MULTILINE,
67
+ )
68
+
69
+ SECTION_PATTERNS = {
70
+ "steps_to_reproduce": re.compile(
71
+ r"#+\s*(?:Steps?\s+to\s+Reproduce|Reproduction\s+Steps?|How\s+to\s+Reproduce|STR)\s*\n(.*?)(?=\n#+\s|\Z)",
72
+ re.IGNORECASE | re.DOTALL,
73
+ ),
74
+ "expected": re.compile(
75
+ r"#+\s*(?:Expected\s+(?:Behavior|Result|Outcome|Output))\s*\n(.*?)(?=\n#+\s|\Z)",
76
+ re.IGNORECASE | re.DOTALL,
77
+ ),
78
+ "actual": re.compile(
79
+ r"#+\s*(?:Actual\s+(?:Behavior|Result|Outcome|Output)|What\s+(?:Happened|Occurs))\s*\n(.*?)(?=\n#+\s|\Z)",
80
+ re.IGNORECASE | re.DOTALL,
81
+ ),
82
+ }
83
+
84
+ CODE_BLOCK_PATTERN = re.compile(r"```[\w]*\n(.*?)```", re.DOTALL)
85
+
86
+ # File extensions likely to be source code paths
87
+ SOURCE_EXTENSIONS = {
88
+ ".rs", ".py", ".ts", ".tsx", ".js", ".jsx", ".svelte", ".vue",
89
+ ".go", ".java", ".kt", ".c", ".cpp", ".h", ".hpp", ".cs",
90
+ ".rb", ".php", ".swift", ".sh", ".bash", ".zsh",
91
+ ".toml", ".yaml", ".yml", ".json", ".xml", ".html", ".css", ".scss",
92
+ ".md", ".txt", ".cfg", ".ini", ".env",
93
+ }
94
+
95
+
96
+ class GhResult:
97
+ def __init__(self, returncode: int, stdout: str, stderr: str):
98
+ self.returncode = returncode
99
+ self.stdout = stdout
100
+ self.stderr = stderr
101
+
102
+
103
+ # =============================================================================
104
+ # Git and GH utilities
105
+ # =============================================================================
106
+
107
+ def run_gh_command(args: Sequence[str], cwd: Path) -> GhResult:
108
+ try:
109
+ process = subprocess.run(
110
+ ["gh", *args],
111
+ cwd=cwd,
112
+ text=True,
113
+ capture_output=True,
114
+ encoding="utf-8",
115
+ )
116
+ except OSError as exc:
117
+ return GhResult(1, "", str(exc))
118
+ return GhResult(process.returncode, process.stdout, process.stderr)
119
+
120
+
121
+ def find_git_root(start: Path) -> Path | None:
122
+ try:
123
+ result = subprocess.run(
124
+ ["git", "rev-parse", "--show-toplevel"],
125
+ cwd=start,
126
+ text=True,
127
+ capture_output=True,
128
+ encoding="utf-8",
129
+ )
130
+ except OSError:
131
+ return None
132
+ if result.returncode != 0:
133
+ return None
134
+ return Path(result.stdout.strip())
135
+
136
+
137
+ def ensure_gh_available(repo_root: Path) -> bool:
138
+ result = run_gh_command(["auth", "status"], cwd=repo_root)
139
+ if result.returncode == 0:
140
+ return True
141
+ message = (result.stderr or result.stdout or "").strip()
142
+ print(message or "Error: gh not authenticated.", file=sys.stderr)
143
+ return False
144
+
145
+
146
+ def fetch_repo_slug(repo_root: Path) -> str | None:
147
+ result = run_gh_command(["repo", "view", "--json", "nameWithOwner"], cwd=repo_root)
148
+ if result.returncode != 0:
149
+ return None
150
+ try:
151
+ data = json.loads(result.stdout or "{}")
152
+ except json.JSONDecodeError:
153
+ return None
154
+ name_with_owner = data.get("nameWithOwner")
155
+ if not name_with_owner:
156
+ return None
157
+ return str(name_with_owner)
158
+
159
+
160
+ def parse_repo_owner_name(repo_slug: str) -> tuple[str, str] | None:
161
+ """Parse 'owner/repo' into (owner, repo)."""
162
+ parts = repo_slug.split("/")
163
+ if len(parts) != 2:
164
+ return None
165
+ return parts[0], parts[1]
166
+
167
+
168
+ # =============================================================================
169
+ # Issue resolution
170
+ # =============================================================================
171
+
172
+ def extract_issue_number(issue_value: str) -> str | None:
173
+ """Extract issue number from a number or URL."""
174
+ if issue_value.isdigit():
175
+ return issue_value
176
+ match = re.search(r"/issues/(\d+)", issue_value)
177
+ if match:
178
+ return match.group(1)
179
+ return None
180
+
181
+
182
+ def resolve_issue(issue_value: str, repo_root: Path) -> str | None:
183
+ """Resolve an issue number from user input."""
184
+ extracted = extract_issue_number(issue_value)
185
+ if extracted:
186
+ return extracted
187
+ result = run_gh_command(
188
+ ["issue", "view", issue_value, "--json", "number"],
189
+ cwd=repo_root,
190
+ )
191
+ if result.returncode != 0:
192
+ message = (result.stderr or result.stdout or "").strip()
193
+ print(message or "Error: unable to resolve issue.", file=sys.stderr)
194
+ return None
195
+ try:
196
+ data = json.loads(result.stdout or "{}")
197
+ except json.JSONDecodeError:
198
+ print("Error: unable to parse issue JSON.", file=sys.stderr)
199
+ return None
200
+ number = data.get("number")
201
+ if not number:
202
+ print("Error: no issue number found.", file=sys.stderr)
203
+ return None
204
+ return str(number)
205
+
206
+
207
+ # =============================================================================
208
+ # Issue data fetching
209
+ # =============================================================================
210
+
211
+ def fetch_issue_data(issue_number: str, repo_root: Path) -> dict[str, Any] | None:
212
+ """Fetch issue metadata for inspection."""
213
+ fields = "number,title,body,state,labels,assignees,author,createdAt,updatedAt,url"
214
+ result = run_gh_command(
215
+ ["issue", "view", issue_number, "--json", fields],
216
+ cwd=repo_root,
217
+ )
218
+ if result.returncode != 0:
219
+ message = (result.stderr or result.stdout or "").strip()
220
+ print(message or f"Error: failed to fetch issue #{issue_number}.", file=sys.stderr)
221
+ return None
222
+ try:
223
+ data = json.loads(result.stdout or "{}")
224
+ except json.JSONDecodeError:
225
+ print("Error: unable to parse issue data.", file=sys.stderr)
226
+ return None
227
+ return data
228
+
229
+
230
+ def fetch_issue_comments(
231
+ issue_number: str,
232
+ repo_root: Path,
233
+ max_comment_length: int = 0,
234
+ ) -> list[dict[str, Any]]:
235
+ """Fetch all comments on an issue."""
236
+ repo_slug = fetch_repo_slug(repo_root)
237
+ if not repo_slug:
238
+ raise RuntimeError("failed to resolve repository slug")
239
+
240
+ result = run_gh_command(
241
+ [
242
+ "api",
243
+ "--paginate",
244
+ "--slurp",
245
+ f"repos/{repo_slug}/issues/{issue_number}/comments?per_page=100",
246
+ ],
247
+ cwd=repo_root,
248
+ )
249
+ if result.returncode != 0:
250
+ message = (result.stderr or result.stdout or "").strip()
251
+ raise RuntimeError(message or "failed to fetch issue comments")
252
+
253
+ try:
254
+ pages = json.loads(result.stdout or "[]")
255
+ except json.JSONDecodeError:
256
+ raise RuntimeError("failed to parse paginated issue comments")
257
+
258
+ if not isinstance(pages, list):
259
+ raise RuntimeError("unexpected paginated issue comment payload")
260
+
261
+ comments: list[dict[str, Any]] = []
262
+ for page in pages:
263
+ if page is None:
264
+ continue
265
+ if not isinstance(page, list):
266
+ raise RuntimeError("unexpected issue comment page payload")
267
+ for comment in page:
268
+ if isinstance(comment, dict):
269
+ comments.append(comment)
270
+
271
+ formatted: list[dict[str, Any]] = []
272
+ for comment in comments:
273
+ body = (comment.get("body") or "").strip()
274
+ if max_comment_length > 0 and len(body) > max_comment_length:
275
+ body = body[:max_comment_length] + "..."
276
+ formatted.append({
277
+ "id": comment.get("id"),
278
+ "author": (comment.get("user") or {}).get("login", "unknown"),
279
+ "body": body,
280
+ "createdAt": comment.get("created_at", ""),
281
+ "htmlUrl": comment.get("html_url", ""),
282
+ })
283
+ return formatted
284
+
285
+
286
+ # =============================================================================
287
+ # Timeline events (linked PRs)
288
+ # =============================================================================
289
+
290
+ def fetch_timeline_linked_prs(
291
+ issue_number: str,
292
+ repo_root: Path,
293
+ ) -> list[dict[str, Any]]:
294
+ """Fetch linked PRs via paginated GraphQL timeline events."""
295
+ repo_slug = fetch_repo_slug(repo_root)
296
+ if not repo_slug:
297
+ raise RuntimeError("failed to resolve repository slug")
298
+
299
+ parsed = parse_repo_owner_name(repo_slug)
300
+ if not parsed:
301
+ raise RuntimeError("failed to parse repository slug")
302
+
303
+ owner, repo = parsed
304
+
305
+ query = """
306
+ query($owner: String!, $repo: String!, $number: Int!, $endCursor: String) {
307
+ repository(owner: $owner, name: $repo) {
308
+ issue(number: $number) {
309
+ timelineItems(
310
+ first: 100,
311
+ after: $endCursor,
312
+ itemTypes: [CROSS_REFERENCED_EVENT, CONNECTED_EVENT]
313
+ ) {
314
+ pageInfo {
315
+ hasNextPage
316
+ endCursor
317
+ }
318
+ nodes {
319
+ __typename
320
+ ... on CrossReferencedEvent {
321
+ source {
322
+ __typename
323
+ ... on PullRequest {
324
+ number
325
+ title
326
+ state
327
+ url
328
+ }
329
+ }
330
+ }
331
+ ... on ConnectedEvent {
332
+ subject {
333
+ __typename
334
+ ... on PullRequest {
335
+ number
336
+ title
337
+ state
338
+ url
339
+ }
340
+ }
341
+ }
342
+ }
343
+ }
344
+ }
345
+ }
346
+ }
347
+ """
348
+
349
+ result = run_gh_command(
350
+ [
351
+ "api", "graphql",
352
+ "--paginate",
353
+ "--slurp",
354
+ "-f", f"query={query}",
355
+ "-f", f"owner={owner}",
356
+ "-f", f"repo={repo}",
357
+ "-F", f"number={issue_number}",
358
+ ],
359
+ cwd=repo_root,
360
+ )
361
+
362
+ if result.returncode != 0:
363
+ message = (result.stderr or result.stdout or "").strip()
364
+ raise RuntimeError(message or "failed to fetch linked pull requests")
365
+
366
+ try:
367
+ pages = json.loads(result.stdout or "[]")
368
+ except json.JSONDecodeError:
369
+ raise RuntimeError("failed to parse linked pull request timeline")
370
+
371
+ if not isinstance(pages, list):
372
+ raise RuntimeError("unexpected paginated timeline payload")
373
+ seen: set[int] = set()
374
+ linked_prs: list[dict[str, Any]] = []
375
+
376
+ for page in pages:
377
+ nodes = (
378
+ (page or {})
379
+ .get("data", {})
380
+ .get("repository", {})
381
+ .get("issue", {})
382
+ .get("timelineItems", {})
383
+ .get("nodes", [])
384
+ )
385
+ if not isinstance(nodes, list):
386
+ raise RuntimeError("unexpected timeline node payload")
387
+
388
+ for node in nodes:
389
+ typename = node.get("__typename", "")
390
+ pr_data: dict[str, Any] | None = None
391
+
392
+ if typename == "CrossReferencedEvent":
393
+ source = node.get("source") or {}
394
+ if source.get("__typename") == "PullRequest":
395
+ pr_data = source
396
+ elif typename == "ConnectedEvent":
397
+ subject = node.get("subject") or {}
398
+ if subject.get("__typename") == "PullRequest":
399
+ pr_data = subject
400
+
401
+ if pr_data and pr_data.get("number") and pr_data["number"] not in seen:
402
+ seen.add(pr_data["number"])
403
+ linked_prs.append({
404
+ "number": pr_data["number"],
405
+ "title": pr_data.get("title", ""),
406
+ "state": pr_data.get("state", ""),
407
+ "url": pr_data.get("url", ""),
408
+ })
409
+
410
+ return linked_prs
411
+
412
+
413
+ # =============================================================================
414
+ # Body / comment parsing
415
+ # =============================================================================
416
+
417
+ def extract_error_messages(text: str) -> list[str]:
418
+ """Extract error messages from text."""
419
+ errors: list[str] = []
420
+ seen: set[str] = set()
421
+ for pattern in ERROR_PATTERNS:
422
+ for match in pattern.finditer(text):
423
+ msg = match.group(1).strip()
424
+ if msg not in seen:
425
+ seen.add(msg)
426
+ errors.append(msg)
427
+ return errors
428
+
429
+
430
+ def extract_stack_traces(text: str) -> list[str]:
431
+ """Extract stack traces from text."""
432
+ traces: list[str] = []
433
+ seen: set[str] = set()
434
+ for pattern in STACK_TRACE_PATTERNS:
435
+ for match in pattern.finditer(text):
436
+ trace = match.group(1).strip()
437
+ if trace and trace not in seen:
438
+ seen.add(trace)
439
+ traces.append(trace)
440
+ return traces
441
+
442
+
443
+ def extract_file_references(text: str) -> list[str]:
444
+ """Extract file path references (path/to/file.ext:line) from text."""
445
+ refs: list[str] = []
446
+ seen: set[str] = set()
447
+ for match in FILE_PATH_PATTERN.finditer(text):
448
+ ref = match.group(1).strip()
449
+ # Filter: must contain a plausible file extension
450
+ base = ref.split(":")[0]
451
+ ext = os.path.splitext(base)[1].lower()
452
+ if ext not in SOURCE_EXTENSIONS:
453
+ continue
454
+ # Skip URLs and very short matches
455
+ if ref.startswith("http") or len(base) < 3:
456
+ continue
457
+ if ref not in seen:
458
+ seen.add(ref)
459
+ refs.append(ref)
460
+ return refs
461
+
462
+
463
+ def extract_code_blocks(text: str) -> list[str]:
464
+ """Extract fenced code blocks from text."""
465
+ blocks: list[str] = []
466
+ for match in CODE_BLOCK_PATTERN.finditer(text):
467
+ block = match.group(1).strip()
468
+ if block:
469
+ blocks.append(block)
470
+ return blocks
471
+
472
+
473
+ def extract_sections(text: str) -> dict[str, str]:
474
+ """Extract well-known sections (Steps to Reproduce, Expected, Actual)."""
475
+ sections: dict[str, str] = {}
476
+ for key, pattern in SECTION_PATTERNS.items():
477
+ match = pattern.search(text)
478
+ if match:
479
+ sections[key] = match.group(1).strip()
480
+ return sections
481
+
482
+
483
+ def extract_cross_references(text: str) -> list[dict[str, Any]]:
484
+ """Extract cross-references (#123, org/repo#123) from text."""
485
+ refs: list[dict[str, Any]] = []
486
+ seen: set[str] = set()
487
+ for match in CROSS_REF_PATTERN.finditer(text):
488
+ repo_ref = match.group(1) or ""
489
+ number = match.group(2)
490
+ key = f"{repo_ref}#{number}"
491
+ if key not in seen:
492
+ seen.add(key)
493
+ refs.append({
494
+ "repo": repo_ref,
495
+ "number": int(number),
496
+ "ref": key,
497
+ })
498
+ return refs
499
+
500
+
501
+ def parse_all_text(body: str, comments: list[dict[str, Any]]) -> dict[str, Any]:
502
+ """Parse issue body and all comments, aggregating extracted data."""
503
+ all_text = body or ""
504
+ for comment in comments:
505
+ comment_body = comment.get("body", "")
506
+ if comment_body:
507
+ all_text += "\n\n" + comment_body
508
+
509
+ return {
510
+ "errorMessages": extract_error_messages(all_text),
511
+ "stackTraces": extract_stack_traces(all_text),
512
+ "fileReferences": extract_file_references(all_text),
513
+ "codeBlocks": extract_code_blocks(all_text),
514
+ "sections": extract_sections(body or ""),
515
+ "crossReferences": extract_cross_references(all_text),
516
+ }
517
+
518
+
519
+ # =============================================================================
520
+ # Issue classification
521
+ # =============================================================================
522
+
523
+ def classify_issue(
524
+ labels: list[str],
525
+ body: str,
526
+ title: str,
527
+ ) -> str:
528
+ """Classify issue type based on labels and body heuristics."""
529
+ labels_lower = {lbl.lower() for lbl in labels}
530
+
531
+ # Label-based classification (highest priority)
532
+ if labels_lower & BUG_LABELS:
533
+ return "BUG"
534
+ if labels_lower & FEATURE_LABELS:
535
+ return "FEATURE" if "feature" in labels_lower or "feature-request" in labels_lower else "ENHANCEMENT"
536
+ if labels_lower & DOCUMENTATION_LABELS:
537
+ return "DOCUMENTATION"
538
+ if labels_lower & QUESTION_LABELS:
539
+ return "QUESTION"
540
+
541
+ # Body/title heuristic
542
+ combined = (title + " " + (body or "")).lower()
543
+
544
+ bug_indicators = (
545
+ "error", "bug", "crash", "fail", "broken", "regression",
546
+ "panicked", "traceback", "exception", "unexpected",
547
+ )
548
+ feature_indicators = (
549
+ "feature request", "would be nice", "please add", "suggestion",
550
+ "propose", "enhancement", "new feature",
551
+ )
552
+ question_indicators = (
553
+ "how do i", "how to", "is it possible", "question",
554
+ "help", "what is the",
555
+ )
556
+
557
+ if any(ind in combined for ind in bug_indicators):
558
+ return "BUG"
559
+ if any(ind in combined for ind in feature_indicators):
560
+ return "FEATURE"
561
+ if any(ind in combined for ind in question_indicators):
562
+ return "QUESTION"
563
+
564
+ return "UNCLASSIFIED"
565
+
566
+
567
+ # =============================================================================
568
+ # File existence check
569
+ # =============================================================================
570
+
571
+ def check_file_existence(
572
+ file_refs: list[str],
573
+ repo_root: Path,
574
+ ) -> list[dict[str, Any]]:
575
+ """Check whether referenced files exist in the repository."""
576
+ results: list[dict[str, Any]] = []
577
+ repo_root = repo_root.resolve()
578
+ for ref in file_refs:
579
+ path_str = ref.split(":")[0]
580
+ candidate = (repo_root / path_str).resolve(strict=False)
581
+ try:
582
+ candidate.relative_to(repo_root)
583
+ except ValueError:
584
+ exists = False
585
+ else:
586
+ exists = candidate.exists()
587
+ results.append({
588
+ "reference": ref,
589
+ "path": path_str,
590
+ "exists": exists,
591
+ })
592
+ return results
593
+
594
+
595
+ # =============================================================================
596
+ # Output rendering
597
+ # =============================================================================
598
+
599
+ def render_text_output(results: dict[str, Any]) -> None:
600
+ """Render results in human-readable text format."""
601
+ issue = results.get("issue", {})
602
+ issue_number = issue.get("number", "?")
603
+ title = issue.get("title", "")
604
+ state = issue.get("state", "")
605
+ issue_type = results.get("issueType", "UNCLASSIFIED")
606
+
607
+ print(f"Issue #{issue_number}: {title}")
608
+ print("=" * 60)
609
+ print(f"State: {state}")
610
+ print(f"Type: {issue_type}")
611
+
612
+ labels = issue.get("labels", [])
613
+ if labels:
614
+ label_names = [lbl.get("name", "") if isinstance(lbl, dict) else str(lbl) for lbl in labels]
615
+ print(f"Labels: {', '.join(label_names)}")
616
+
617
+ assignees = issue.get("assignees", [])
618
+ if assignees:
619
+ assignee_names = [a.get("login", "") if isinstance(a, dict) else str(a) for a in assignees]
620
+ print(f"Assignees: {', '.join(assignee_names)}")
621
+
622
+ author = issue.get("author", {})
623
+ if isinstance(author, dict) and author.get("login"):
624
+ print(f"Author: @{author['login']}")
625
+
626
+ print(f"URL: {issue.get('url', '')}")
627
+
628
+ # Body
629
+ body = issue.get("body", "")
630
+ if body:
631
+ print("\nBODY")
632
+ print("-" * 60)
633
+ print(body)
634
+
635
+ # Extracted context
636
+ parsed = results.get("parsed", {})
637
+
638
+ sections = parsed.get("sections", {})
639
+ if sections:
640
+ print("\nEXTRACTED SECTIONS")
641
+ print("-" * 60)
642
+ for key, value in sections.items():
643
+ label = key.replace("_", " ").title()
644
+ print(f"\n[{label}]")
645
+ print(value)
646
+
647
+ errors = parsed.get("errorMessages", [])
648
+ if errors:
649
+ print(f"\nERROR MESSAGES ({len(errors)})")
650
+ print("-" * 60)
651
+ for i, err in enumerate(errors, 1):
652
+ print(f" [{i}] {err}")
653
+
654
+ traces = parsed.get("stackTraces", [])
655
+ if traces:
656
+ print(f"\nSTACK TRACES ({len(traces)})")
657
+ print("-" * 60)
658
+ for i, trace in enumerate(traces, 1):
659
+ print(f" [{i}]")
660
+ for line in trace.splitlines():
661
+ print(f" {line}")
662
+
663
+ file_refs = parsed.get("fileReferences", [])
664
+ if file_refs:
665
+ print(f"\nFILE REFERENCES ({len(file_refs)})")
666
+ print("-" * 60)
667
+ file_checks = results.get("fileChecks", [])
668
+ check_map = {fc["reference"]: fc["exists"] for fc in file_checks}
669
+ for ref in file_refs:
670
+ exists = check_map.get(ref)
671
+ marker = " [EXISTS]" if exists else " [NOT FOUND]" if exists is not None else ""
672
+ print(f" {ref}{marker}")
673
+
674
+ code_blocks = parsed.get("codeBlocks", [])
675
+ if code_blocks:
676
+ print(f"\nCODE BLOCKS ({len(code_blocks)})")
677
+ print("-" * 60)
678
+ for i, block in enumerate(code_blocks, 1):
679
+ print(f" [{i}]")
680
+ for line in block.splitlines():
681
+ print(f" {line}")
682
+
683
+ cross_refs = parsed.get("crossReferences", [])
684
+ if cross_refs:
685
+ print(f"\nCROSS-REFERENCES ({len(cross_refs)})")
686
+ print("-" * 60)
687
+ for ref in cross_refs:
688
+ print(f" {ref['ref']}")
689
+
690
+ # Comments
691
+ comments = results.get("comments", [])
692
+ if comments:
693
+ print(f"\nCOMMENTS ({len(comments)})")
694
+ print("-" * 60)
695
+ for comment in comments:
696
+ author_name = comment.get("author", "unknown")
697
+ created = comment.get("createdAt", "")[:10] if comment.get("createdAt") else ""
698
+ body_text = comment.get("body", "")
699
+ print(f"@{author_name} ({created}):")
700
+ if body_text:
701
+ for line in body_text.splitlines():
702
+ print(f" {line}")
703
+ else:
704
+ print(" (empty)")
705
+ print()
706
+
707
+ # Linked PRs
708
+ linked_prs = results.get("linkedPRs", [])
709
+ if linked_prs:
710
+ print(f"\nLINKED PULL REQUESTS ({len(linked_prs)})")
711
+ print("-" * 60)
712
+ for pr in linked_prs:
713
+ state_str = pr.get("state", "")
714
+ print(f" PR #{pr['number']}: {pr.get('title', '')} [{state_str}]")
715
+ if pr.get("url"):
716
+ print(f" {pr['url']}")
717
+
718
+ print("=" * 60)
719
+
720
+
721
+ # =============================================================================
722
+ # Argument parsing
723
+ # =============================================================================
724
+
725
+ def parse_args() -> argparse.Namespace:
726
+ parser = argparse.ArgumentParser(
727
+ description=(
728
+ "Inspect a GitHub Issue: fetch data, parse error context, "
729
+ "extract file references, classify type, and check file existence."
730
+ ),
731
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
732
+ )
733
+ parser.add_argument("--repo", default=".", help="Path inside the target Git repository.")
734
+ parser.add_argument(
735
+ "--issue",
736
+ required=True,
737
+ help="Issue number or URL (required).",
738
+ )
739
+ parser.add_argument(
740
+ "--focus",
741
+ default=None,
742
+ help="Focus area for codebase search narrowing (e.g., 'src/lib/components').",
743
+ )
744
+ parser.add_argument(
745
+ "--max-comment-length",
746
+ type=int,
747
+ default=0,
748
+ help="Max characters per comment body (0 = unlimited).",
749
+ )
750
+ parser.add_argument(
751
+ "--json",
752
+ action="store_true",
753
+ help="Emit JSON instead of text output.",
754
+ )
755
+ return parser.parse_args()
756
+
757
+
758
+ # =============================================================================
759
+ # Main
760
+ # =============================================================================
761
+
762
+ def main() -> int:
763
+ args = parse_args()
764
+ repo_root = find_git_root(Path(args.repo))
765
+ if repo_root is None:
766
+ print("Error: not inside a Git repository.", file=sys.stderr)
767
+ return 1
768
+
769
+ if not ensure_gh_available(repo_root):
770
+ return 1
771
+
772
+ issue_number = resolve_issue(args.issue, repo_root)
773
+ if issue_number is None:
774
+ return 1
775
+
776
+ # Fetch issue data
777
+ issue_data = fetch_issue_data(issue_number, repo_root)
778
+ if issue_data is None:
779
+ return 1
780
+
781
+ # Fetch comments
782
+ try:
783
+ comments = fetch_issue_comments(
784
+ issue_number,
785
+ repo_root,
786
+ max_comment_length=args.max_comment_length,
787
+ )
788
+
789
+ # Fetch linked PRs
790
+ linked_prs = fetch_timeline_linked_prs(issue_number, repo_root)
791
+ except RuntimeError as err:
792
+ print(f"Error: {err}", file=sys.stderr)
793
+ return 1
794
+
795
+ # Parse body + comments
796
+ body = issue_data.get("body") or ""
797
+ parsed = parse_all_text(body, comments)
798
+
799
+ # Classify issue
800
+ labels_raw = issue_data.get("labels") or []
801
+ label_names = [
802
+ lbl.get("name", "") if isinstance(lbl, dict) else str(lbl)
803
+ for lbl in labels_raw
804
+ ]
805
+ issue_type = classify_issue(label_names, body, issue_data.get("title", ""))
806
+
807
+ # Check file existence
808
+ file_checks = check_file_existence(parsed["fileReferences"], repo_root)
809
+
810
+ # Build results
811
+ results: dict[str, Any] = {
812
+ "issue": issue_data,
813
+ "issueType": issue_type,
814
+ "comments": comments,
815
+ "linkedPRs": linked_prs,
816
+ "parsed": parsed,
817
+ "fileChecks": file_checks,
818
+ }
819
+
820
+ if args.focus:
821
+ results["focus"] = args.focus
822
+
823
+ # Output
824
+ if args.json:
825
+ print(json.dumps(results, indent=2))
826
+ else:
827
+ render_text_output(results)
828
+
829
+ return 0
830
+
831
+
832
+ if __name__ == "__main__":
833
+ raise SystemExit(main())