ultralytics-actions 0.2.1__py3-none-any.whl → 0.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics-actions might be problematic. Click here for more details.

actions/__init__.py CHANGED
@@ -18,6 +18,7 @@
18
18
  # │ ├── scan_prs.py
19
19
  # │ ├── summarize_pr.py
20
20
  # │ ├── summarize_release.py
21
+ # │ ├── format_python_docstrings.py
21
22
  # │ ├── update_file_headers.py
22
23
  # │ └── update_markdown_code_blocks.py
23
24
  # └── tests/
@@ -26,4 +27,4 @@
26
27
  # ├── test_summarize_pr.py
27
28
  # └── ...
28
29
 
29
- __version__ = "0.2.1"
30
+ __version__ = "0.2.2"
@@ -7,7 +7,7 @@ import time
7
7
 
8
8
  from . import review_pr
9
9
  from .summarize_pr import SUMMARY_MARKER
10
- from .utils import ACTIONS_CREDIT, Action, filter_labels, get_completion, get_pr_open_response, remove_html_comments
10
+ from .utils import ACTIONS_CREDIT, Action, filter_labels, get_pr_open_response, get_response, remove_html_comments
11
11
 
12
12
  BLOCK_USER = os.getenv("BLOCK_USER", "false").lower() == "true"
13
13
  AUTO_PR_REVIEW = os.getenv("REVIEW", "true").lower() == "true"
@@ -91,7 +91,7 @@ YOUR RESPONSE (label names only):
91
91
  },
92
92
  {"role": "user", "content": prompt},
93
93
  ]
94
- suggested_labels = get_completion(messages, temperature=1.0)
94
+ suggested_labels = get_response(messages, temperature=1.0)
95
95
  if "none" in suggested_labels.lower():
96
96
  return []
97
97
 
@@ -170,7 +170,7 @@ YOUR {issue_type.upper()} RESPONSE:
170
170
  },
171
171
  {"role": "user", "content": prompt},
172
172
  ]
173
- return get_completion(messages)
173
+ return get_response(messages)
174
174
 
175
175
 
176
176
  def main(*args, **kwargs):
@@ -0,0 +1,511 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ from __future__ import annotations
4
+
5
+ import ast
6
+ import re
7
+ import sys
8
+ import time
9
+ from pathlib import Path
10
+
11
+ SECTIONS = ("Args", "Attributes", "Methods", "Returns", "Yields", "Raises", "Example", "Notes", "References")
12
+ LIST_RX = re.compile(r"""^(\s*)(?:[-*•]\s+|(?:\d+|[A-Za-z]+)[\.\)]\s+)""")
13
+ TABLE_RX = re.compile(r"^\s*\|.*\|\s*$")
14
+ TABLE_RULE_RX = re.compile(r"^\s*[:\-\|\s]{3,}$")
15
+ TREE_CHARS = ("└", "├", "│", "─")
16
+
17
+
18
+ def wrap_words(words: list[str], width: int, indent: int, min_words_per_line: int = 1) -> list[str]:
19
+ """Wrap words to width with indent; optionally avoid very short orphan lines."""
20
+ pad = " " * indent
21
+ if not words:
22
+ return []
23
+ lines: list[list[str]] = []
24
+ cur: list[str] = []
25
+ cur_len = indent
26
+ for w in words:
27
+ need = len(w) + (1 if cur else 0)
28
+ if cur and cur_len + need > width:
29
+ lines.append(cur)
30
+ cur, cur_len = [w], indent + len(w)
31
+ else:
32
+ cur.append(w)
33
+ cur_len += need
34
+ if cur:
35
+ lines.append(cur)
36
+
37
+ # Rebalance to avoid too-short continuation lines when requested
38
+ if min_words_per_line > 1:
39
+ i = 1
40
+ while i < len(lines):
41
+ if len(lines[i]) < min_words_per_line and len(lines[i - 1]) > 1:
42
+ donor = lines[i - 1][-1]
43
+ this_len = len(pad) + sum(len(w) for w in lines[i]) + (len(lines[i]) - 1)
44
+ if this_len + (1 if lines[i] else 0) + len(donor) <= width:
45
+ lines[i - 1].pop()
46
+ lines[i].insert(0, donor)
47
+ if i - 1 > 0 and len(lines[i - 1]) == 1:
48
+ i -= 1
49
+ continue
50
+ i += 1
51
+
52
+ return [pad + " ".join(line) for line in lines]
53
+
54
+
55
+ def wrap_para(text: str, width: int, indent: int, min_words_per_line: int = 1) -> list[str]:
56
+ """Wrap a paragraph string; orphan control via min_words_per_line."""
57
+ text = text.strip()
58
+ if not text:
59
+ return []
60
+ return wrap_words(text.split(), width, indent, min_words_per_line)
61
+
62
+
63
+ def wrap_hanging(head: str, desc: str, width: int, cont_indent: int) -> list[str]:
64
+ """Wrap 'head + desc' with hanging indent; ensure first continuation has ≥2 words."""
65
+ room = width - len(head)
66
+ words = desc.split()
67
+ if not words:
68
+ return [head.rstrip()]
69
+
70
+ take, used = [], 0
71
+ for w in words:
72
+ need = len(w) + (1 if take else 0)
73
+ if used + need <= room:
74
+ take.append(w)
75
+ used += need
76
+ else:
77
+ break
78
+
79
+ out: list[str] = []
80
+ if take:
81
+ out.append(head + " ".join(take))
82
+ rest = words[len(take) :]
83
+ else:
84
+ out.append(head.rstrip())
85
+ rest = words
86
+
87
+ out.extend(wrap_words(rest, width, cont_indent, min_words_per_line=2))
88
+ return out
89
+
90
+
91
+ def is_list_item(s: str) -> bool:
92
+ """True if s looks like a bullet/numbered list item."""
93
+ return bool(LIST_RX.match(s.lstrip()))
94
+
95
+
96
+ def is_fence_line(s: str) -> bool:
97
+ t = s.lstrip()
98
+ return t.startswith("```")
99
+
100
+
101
+ def is_table_like(s: str) -> bool:
102
+ return bool(TABLE_RX.match(s)) or bool(TABLE_RULE_RX.match(s))
103
+
104
+
105
+ def is_tree_like(s: str) -> bool:
106
+ return any(ch in s for ch in TREE_CHARS)
107
+
108
+
109
+ def is_indented_block_line(s: str) -> bool:
110
+ # Treat lines with >=8 leading spaces or any tab as preformatted
111
+ return bool(s.startswith(" ")) or s.startswith("\t")
112
+
113
+
114
+ def header_name(line: str) -> str | None:
115
+ """Return canonical section header or None."""
116
+ s = line.strip()
117
+ if not s.endswith(":") or len(s) <= 1:
118
+ return None
119
+ name = s[:-1].strip()
120
+ if name == "Examples":
121
+ name = "Example"
122
+ if name == "Note":
123
+ name = "Notes"
124
+ return name if name in SECTIONS else None
125
+
126
+
127
+ def add_header(lines: list[str], indent: int, title: str, opener_line: str) -> None:
128
+ """Append a section header; no blank before first header, exactly one before subsequent ones."""
129
+ while lines and lines[-1] == "":
130
+ lines.pop()
131
+ if lines and lines[-1] != opener_line:
132
+ lines.append("")
133
+ lines.append(" " * indent + f"{title}:")
134
+
135
+
136
+ def emit_paragraphs(
137
+ src: list[str], width: int, indent: int, list_indent: int | None = None, orphan_min: int = 1
138
+ ) -> list[str]:
139
+ """Wrap normal text; preserve lists, fenced code, tables, ASCII trees, and deeply-indented blocks."""
140
+ out: list[str] = []
141
+ buf: list[str] = []
142
+ in_fence = False
143
+
144
+ def flush():
145
+ nonlocal buf
146
+ if buf:
147
+ out.extend(wrap_para(" ".join(x.strip() for x in buf), width, indent, min_words_per_line=orphan_min))
148
+ buf = []
149
+
150
+ for raw in src:
151
+ s = raw.rstrip("\n")
152
+ stripped = s.strip()
153
+
154
+ # blank line
155
+ if not stripped:
156
+ flush()
157
+ out.append("")
158
+ continue
159
+
160
+ # fence start/stop
161
+ if is_fence_line(s):
162
+ flush()
163
+ out.append(s.rstrip())
164
+ in_fence = not in_fence
165
+ continue
166
+
167
+ if in_fence or is_table_like(s) or is_tree_like(s) or is_indented_block_line(s):
168
+ flush()
169
+ out.append(s.rstrip())
170
+ continue
171
+
172
+ if is_list_item(s):
173
+ flush()
174
+ out.append((" " * list_indent + stripped) if list_indent is not None else s.rstrip())
175
+ continue
176
+
177
+ buf.append(s)
178
+ flush()
179
+ while out and out[-1] == "":
180
+ out.pop()
181
+ return out
182
+
183
+
184
+ def parse_sections(text: str) -> dict[str, list[str]]:
185
+ """Parse Google-style docstring into sections."""
186
+ parts = {k: [] for k in ("summary", "description", *SECTIONS)}
187
+ cur = "summary"
188
+ for raw in text.splitlines():
189
+ line = raw.rstrip("\n")
190
+ h = header_name(line)
191
+ if h:
192
+ cur = h
193
+ continue
194
+ if not line.strip():
195
+ if cur == "summary" and parts["summary"]:
196
+ cur = "description"
197
+ if parts[cur]:
198
+ parts[cur].append("")
199
+ continue
200
+ parts[cur].append(line)
201
+ return parts
202
+
203
+
204
+ def looks_like_param(s: str) -> bool:
205
+ """Heuristic: 'name:' without being a list item."""
206
+ if is_list_item(s) or ":" not in s:
207
+ return False
208
+ head = s.split(":", 1)[0].strip()
209
+ return bool(head)
210
+
211
+
212
+ def iter_items(lines: list[str]) -> list[list[str]]:
213
+ """Group lines into logical items separated by next param."""
214
+ items, i, n = [], 0, len(lines)
215
+ while i < n:
216
+ while i < n and not lines[i].strip():
217
+ i += 1
218
+ if i >= n:
219
+ break
220
+ item = [lines[i]]
221
+ i += 1
222
+ while i < n:
223
+ st = lines[i].strip()
224
+ if st and looks_like_param(st):
225
+ break
226
+ item.append(lines[i])
227
+ i += 1
228
+ items.append(item)
229
+ return items
230
+
231
+
232
+ def format_structured_block(lines: list[str], width: int, base: int) -> list[str]:
233
+ """Format Args/Returns/etc.; continuation at base+4, lists at base+8. Preserve missing colons."""
234
+ out: list[str] = []
235
+ cont, lst = base + 4, base + 8
236
+ for item in iter_items(lines):
237
+ first = item[0].strip()
238
+ name, desc = ([*first.split(":", 1), ""])[:2]
239
+ name, desc = name.strip(), desc.strip()
240
+ had_colon = ":" in first
241
+
242
+ if not name or (" " in name and "(" not in name and ")" not in name):
243
+ out.extend(emit_paragraphs(item, width, cont, lst, orphan_min=2))
244
+ continue
245
+
246
+ head = " " * cont + (f"{name}: " if (desc or had_colon) else name)
247
+ out.extend(wrap_hanging(head, desc, width, cont + 4))
248
+
249
+ tail = item[1:]
250
+ if tail:
251
+ body = emit_paragraphs(tail, width, cont + 4, lst, orphan_min=2)
252
+ if body:
253
+ out.extend(body)
254
+ return out
255
+
256
+
257
+ def detect_opener(original_literal: str) -> tuple[str, str, bool]:
258
+ """Return (prefix, quotes, inline_hint) from the original string token safely."""
259
+ s = original_literal.lstrip()
260
+ i = 0
261
+ while i < len(s) and s[i] in "rRuUbBfF":
262
+ i += 1
263
+ quotes = '"""'
264
+ if i + 3 <= len(s) and s[i : i + 3] in ('"""', "'''"):
265
+ quotes = s[i : i + 3]
266
+ keep = "".join(ch for ch in s[:i] if ch in "rRuU")
267
+ j = i + len(quotes)
268
+ inline_hint = j < len(s) and s[j : j + 1] not in {"", "\n", "\r"}
269
+ return keep, quotes, inline_hint
270
+
271
+
272
+ def format_google(text: str, indent: int, width: int, quotes: str, prefix: str, inline_first_line: bool) -> str:
273
+ """Format multi-line Google-style docstring with given quotes/prefix."""
274
+ p = parse_sections(text)
275
+ opener = prefix + quotes
276
+ out = [opener]
277
+ if p["summary"]:
278
+ lines = emit_paragraphs(p["summary"], width, indent, list_indent=indent, orphan_min=1)
279
+ if inline_first_line and lines:
280
+ pad = " " * indent
281
+ first = lines[0][len(pad) :] if lines[0].startswith(pad) else lines[0].lstrip()
282
+ out[0] = opener + first
283
+ out.extend(lines[1:])
284
+ else:
285
+ out.extend(lines)
286
+ if any(x.strip() for x in p["description"]):
287
+ out.append("")
288
+ out.extend(emit_paragraphs(p["description"], width, indent, list_indent=indent, orphan_min=1))
289
+ for sec in ("Args", "Attributes", "Methods", "Returns", "Yields", "Raises"):
290
+ if any(x.strip() for x in p[sec]):
291
+ add_header(out, indent, sec, opener)
292
+ out.extend(format_structured_block(p[sec], width, indent))
293
+ for sec in ("Example", "Notes", "References"):
294
+ if any(x.strip() for x in p[sec]):
295
+ title = "Examples" if sec == "Example" else sec
296
+ add_header(out, indent, title, opener)
297
+ out.extend(x.rstrip() for x in p[sec])
298
+ while out and out[-1] == "":
299
+ out.pop()
300
+ out.append(" " * indent + quotes)
301
+ return "\n".join(out)
302
+
303
+
304
+ def format_docstring(
305
+ content: str,
306
+ indent: int,
307
+ width: int,
308
+ quotes: str,
309
+ prefix: str,
310
+ inline_hint: bool,
311
+ preserve_inline: bool = True,
312
+ ) -> str:
313
+ """Single-line if short/sectionless/no-lists; else Google-style. Preserve quotes/prefix."""
314
+ if not content or not content.strip():
315
+ return f"{prefix}{quotes}{quotes}"
316
+ text = content.strip()
317
+ has_section = any(f"{s}:" in text for s in (*SECTIONS, "Examples"))
318
+ has_list = any(is_list_item(l) for l in text.splitlines())
319
+ single_ok = (
320
+ ("\n" not in text)
321
+ and not has_section
322
+ and not has_list
323
+ and (indent + len(prefix) + len(quotes) * 2 + len(text) <= width)
324
+ )
325
+ if single_ok:
326
+ words = text.split()
327
+ if words and not (words[0].startswith(("http://", "https://")) or words[0][0].isupper()):
328
+ words[0] = words[0][0].upper() + words[0][1:]
329
+ out = " ".join(words)
330
+ if out and out[-1] not in ".!?":
331
+ out += "."
332
+ return f"{prefix}{quotes}{out}{quotes}"
333
+ return format_google(text, indent, width, quotes, prefix, inline_first_line=(preserve_inline and inline_hint))
334
+
335
+
336
+ class Visitor(ast.NodeVisitor):
337
+ """Collect docstring replacements for classes and functions."""
338
+
339
+ def __init__(self, src: list[str], width: int = 120, preserve_inline: bool = True):
340
+ """Init with source lines, target width, and inline preservation flag."""
341
+ self.src, self.width, self.repl, self.preserve_inline = src, width, [], preserve_inline
342
+
343
+ def visit_Module(self, node):
344
+ """Skip module docstring; visit children."""
345
+ self.generic_visit(node)
346
+
347
+ def visit_ClassDef(self, node):
348
+ self._handle(node)
349
+ self.generic_visit(node)
350
+
351
+ def visit_FunctionDef(self, node):
352
+ self._handle(node)
353
+ self.generic_visit(node)
354
+
355
+ def visit_AsyncFunctionDef(self, node):
356
+ self._handle(node)
357
+ self.generic_visit(node)
358
+
359
+ def _handle(self, node):
360
+ """If first stmt is a string expr, schedule replacement."""
361
+ try:
362
+ doc = ast.get_docstring(node, clean=False)
363
+ if not doc or not node.body or not isinstance(node.body[0], ast.Expr):
364
+ return
365
+ s = node.body[0].value
366
+ if not (isinstance(s, ast.Constant) and isinstance(s.value, str)):
367
+ return
368
+ sl, el = node.body[0].lineno - 1, node.body[0].end_lineno - 1
369
+ sc, ec = node.body[0].col_offset, node.body[0].end_col_offset
370
+ if sl < 0 or el >= len(self.src):
371
+ return
372
+ original = (
373
+ self.src[sl][sc:ec]
374
+ if sl == el
375
+ else "\n".join([self.src[sl][sc:], *self.src[sl + 1 : el], self.src[el][:ec]])
376
+ )
377
+ prefix, quotes, inline_hint = detect_opener(original)
378
+ formatted = format_docstring(doc, sc, self.width, quotes, prefix, inline_hint, self.preserve_inline)
379
+ if formatted.strip() != original.strip():
380
+ self.repl.append((sl, el, sc, ec, formatted))
381
+ except Exception:
382
+ return
383
+
384
+
385
+ def format_python_file(text: str, width: int = 120, preserve_inline: bool = True) -> str:
386
+ """Return source with reformatted docstrings; on failure, return original."""
387
+ if not text.strip():
388
+ return text
389
+ try:
390
+ tree = ast.parse(text)
391
+ except SyntaxError:
392
+ return text
393
+ src = text.splitlines()
394
+ v = Visitor(src, width, preserve_inline=preserve_inline)
395
+ try:
396
+ v.visit(tree)
397
+ except Exception:
398
+ return text
399
+ for sl, el, sc, ec, rep in reversed(v.repl):
400
+ try:
401
+ if sl == el:
402
+ src[sl] = src[sl][:sc] + rep + src[sl][ec:]
403
+ else:
404
+ nl = rep.splitlines()
405
+ nl[0] = src[sl][:sc] + nl[0]
406
+ nl[-1] += src[el][ec:]
407
+ src[sl : el + 1] = nl
408
+ except Exception:
409
+ continue
410
+ return "\n".join(src)
411
+
412
+
413
+ def preserve_trailing_newlines(original: str, formatted: str) -> str:
414
+ """Preserve the original trailing newline count."""
415
+ o = len(original) - len(original.rstrip("\n"))
416
+ f = len(formatted) - len(formatted.rstrip("\n"))
417
+ return formatted if o == f else formatted.rstrip("\n") + ("\n" * o)
418
+
419
+
420
+ def iter_py_files(paths: list[Path]) -> list[Path]:
421
+ """Expand input paths to sorted unique *.py files."""
422
+ out: list[Path] = []
423
+ for p in paths:
424
+ if p.is_dir():
425
+ out.extend(sorted(p.rglob("*.py")))
426
+ elif p.is_file() and p.suffix == ".py":
427
+ out.append(p)
428
+ seen, uniq = set(), []
429
+ for f in out:
430
+ if f not in seen:
431
+ seen.add(f)
432
+ uniq.append(f)
433
+ return uniq
434
+
435
+
436
+ def process_file(path: Path, width: int = 120, check: bool = False, preserve_inline: bool = True) -> bool:
437
+ """Process one file; True if unchanged/success, False if changed."""
438
+ if path.suffix != ".py":
439
+ return True
440
+ try:
441
+ orig = path.read_text(encoding="utf-8")
442
+ fmt = preserve_trailing_newlines(orig, format_python_file(orig, width, preserve_inline=preserve_inline))
443
+ if check:
444
+ if orig != fmt:
445
+ print(f" {path}")
446
+ return False
447
+ return True
448
+ if orig != fmt:
449
+ path.write_text(fmt, encoding="utf-8")
450
+ print(f" {path}")
451
+ return False
452
+ return True
453
+ except Exception as e:
454
+ print(f" Error: {path}: {e}")
455
+ return True
456
+
457
+
458
+ def parse_cli(argv: list[str]) -> tuple[list[Path], int, bool, bool]:
459
+ """Minimal argv parser: (paths, width, check, preserve_inline)."""
460
+ width, check, paths, preserve_inline = 120, False, [], True
461
+ for a in argv:
462
+ if a == "--check":
463
+ check = True
464
+ elif a == "--no-preserve-inline":
465
+ preserve_inline = False
466
+ elif a.startswith("--line-width="):
467
+ try:
468
+ width = int(a.split("=", 1)[1])
469
+ except ValueError:
470
+ pass
471
+ else:
472
+ paths.append(Path(a))
473
+ return paths, width, check, preserve_inline
474
+
475
+
476
+ def main() -> None:
477
+ """CLI entry point."""
478
+ args = sys.argv[1:]
479
+ if not args:
480
+ print(
481
+ "Usage: format_python_docstrings.py [--check] [--no-preserve-inline] [--line-width=120] <files_or_dirs...>"
482
+ )
483
+ return
484
+ paths, width, check, preserve_inline = parse_cli(args)
485
+ files = iter_py_files(paths)
486
+ if not files:
487
+ print("No Python files found")
488
+ return
489
+
490
+ t0 = time.time()
491
+ print(f"{'Checking' if check else 'Formatting'} {len(files)} file{'s' if len(files) != 1 else ''}")
492
+ changed = sum(not process_file(f, width, check, preserve_inline=preserve_inline) for f in files)
493
+
494
+ dur = time.time() - t0
495
+ if changed:
496
+ verb = "would be reformatted" if check else "reformatted"
497
+ unchanged = len(files) - changed
498
+ parts = []
499
+ if changed:
500
+ parts.append(f"{changed} file{'s' if changed != 1 else ''} {verb}")
501
+ if unchanged:
502
+ parts.append(f"{unchanged} file{'s' if unchanged != 1 else ''} left unchanged")
503
+ print(f"{', '.join(parts)} ({dur:.1f}s)")
504
+ if check:
505
+ sys.exit(1)
506
+ else:
507
+ print(f"{len(files)} file{'s' if len(files) != 1 else ''} left unchanged ({dur:.1f}s)")
508
+
509
+
510
+ if __name__ == "__main__":
511
+ main()
actions/review_pr.py CHANGED
@@ -4,8 +4,9 @@ from __future__ import annotations
4
4
 
5
5
  import json
6
6
  import re
7
+ from pathlib import Path
7
8
 
8
- from .utils import ACTIONS_CREDIT, GITHUB_API_URL, MAX_PROMPT_CHARS, Action, get_completion, remove_html_comments
9
+ from .utils import ACTIONS_CREDIT, GITHUB_API_URL, MAX_PROMPT_CHARS, Action, get_response, remove_html_comments
9
10
 
10
11
  REVIEW_MARKER = "## 🔍 PR Review"
11
12
  ERROR_MARKER = "⚠️ Review generation encountered an error"
@@ -69,7 +70,9 @@ def parse_diff_files(diff_text: str) -> tuple[dict, str]:
69
70
  return files, "\n".join(augmented_lines)
70
71
 
71
72
 
72
- def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_description: str) -> dict:
73
+ def generate_pr_review(
74
+ repository: str, diff_text: str, pr_title: str, pr_description: str, event: Action = None
75
+ ) -> dict:
73
76
  """Generate comprehensive PR review with line-specific comments and overall assessment."""
74
77
  if not diff_text:
75
78
  return {"comments": [], "summary": "No changes detected in diff"}
@@ -94,6 +97,28 @@ def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_descri
94
97
  diff_truncated = len(augmented_diff) > MAX_PROMPT_CHARS
95
98
  lines_changed = sum(len(sides["RIGHT"]) + len(sides["LEFT"]) for sides in diff_files.values())
96
99
 
100
+ # Fetch full file contents for better context if within token budget
101
+ full_files_section = ""
102
+ if event and len(file_list) <= 10: # Reasonable file count limit
103
+ file_contents = []
104
+ total_chars = len(augmented_diff)
105
+ for file_path in file_list:
106
+ try:
107
+ local_path = Path(file_path)
108
+ if not local_path.exists():
109
+ continue
110
+ content = local_path.read_text(encoding="utf-8")
111
+ # Only include if within budget
112
+ if total_chars + len(content) + 1000 < MAX_PROMPT_CHARS: # 1000 char buffer for formatting
113
+ file_contents.append(f"### {file_path}\n```\n{content}\n```")
114
+ total_chars += len(content) + 1000
115
+ else:
116
+ break # Stop when we hit budget limit
117
+ except Exception:
118
+ continue
119
+ if file_contents:
120
+ full_files_section = f"FULL FILE CONTENTS:\n{chr(10).join(file_contents)}\n\n"
121
+
97
122
  content = (
98
123
  "You are an expert code reviewer for Ultralytics. Review the code changes and provide inline comments where you identify issues or opportunities for improvement.\n\n"
99
124
  "Focus on: bugs, security vulnerabilities, performance issues, best practices, edge cases, error handling, and code clarity.\n\n"
@@ -113,6 +138,7 @@ def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_descri
113
138
  "- For single-line fixes: provide 'suggestion' without 'start_line' to replace the line at 'line'\n"
114
139
  "- Do not provide multi-line fixes: suggestions should only be single line\n"
115
140
  "- Match the exact indentation of the original code\n"
141
+ "- Web search is available to consult docs, dependencies, or technical details\n"
116
142
  "- Avoid triple backticks (```) in suggestions as they break markdown formatting\n\n"
117
143
  "LINE NUMBERS:\n"
118
144
  "- Each line in the diff is prefixed with its line number for clarity:\n"
@@ -141,6 +167,7 @@ def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_descri
141
167
  f"Review this PR in https://github.com/{repository}:\n\n"
142
168
  f"TITLE:\n{pr_title}\n\n"
143
169
  f"BODY:\n{remove_html_comments(pr_description or '')[:1000]}\n\n"
170
+ f"{full_files_section}"
144
171
  f"DIFF:\n{augmented_diff[:MAX_PROMPT_CHARS]}\n\n"
145
172
  "Now review this diff according to the rules above. Return JSON with comments array and summary."
146
173
  ),
@@ -152,10 +179,37 @@ def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_descri
152
179
  # print(f"\nUser prompt (first 3000 chars):\n{messages[1]['content'][:3000]}...\n")
153
180
 
154
181
  try:
155
- response = get_completion(
182
+ schema = {
183
+ "type": "object",
184
+ "properties": {
185
+ "comments": {
186
+ "type": "array",
187
+ "items": {
188
+ "type": "object",
189
+ "properties": {
190
+ "file": {"type": "string"},
191
+ "line": {"type": "integer"},
192
+ "side": {"type": "string", "enum": ["LEFT", "RIGHT"]},
193
+ "severity": {"type": "string", "enum": ["CRITICAL", "HIGH", "MEDIUM", "LOW", "SUGGESTION"]},
194
+ "message": {"type": "string"},
195
+ "start_line": {"type": ["integer", "null"]},
196
+ "suggestion": {"type": ["string", "null"]},
197
+ },
198
+ "required": ["file", "line", "side", "severity", "message", "start_line", "suggestion"],
199
+ "additionalProperties": False,
200
+ },
201
+ },
202
+ "summary": {"type": "string"},
203
+ },
204
+ "required": ["comments", "summary"],
205
+ "additionalProperties": False,
206
+ }
207
+
208
+ response = get_response(
156
209
  messages,
157
210
  reasoning_effort="low",
158
211
  model="gpt-5-codex",
212
+ text_format={"format": {"type": "json_schema", "name": "pr_review", "strict": True, "schema": schema}},
159
213
  tools=[
160
214
  {
161
215
  "type": "web_search",
@@ -170,17 +224,15 @@ def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_descri
170
224
  ],
171
225
  )
172
226
 
173
- json_str = re.search(r"```(?:json)?\s*(\{.*?\})\s*```", response, re.DOTALL)
174
- review_data = json.loads(json_str.group(1) if json_str else response)
175
- print(json.dumps(review_data, indent=2))
227
+ print(json.dumps(response, indent=2))
176
228
 
177
229
  # Count comments BEFORE filtering (for COMMENT vs APPROVE decision)
178
- comments_before_filtering = len(review_data.get("comments", []))
230
+ comments_before_filtering = len(response.get("comments", []))
179
231
  print(f"AI generated {comments_before_filtering} comments")
180
232
 
181
233
  # Validate, filter, and deduplicate comments
182
234
  unique_comments = {}
183
- for c in review_data.get("comments", []):
235
+ for c in response.get("comments", []):
184
236
  file_path, line_num = c.get("file"), c.get("line", 0)
185
237
  start_line = c.get("start_line")
186
238
  side = (c.get("side") or "RIGHT").upper() # Default to RIGHT (added lines)
@@ -218,7 +270,7 @@ def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_descri
218
270
  else:
219
271
  print(f"⚠️ AI duplicate for {key}: {c.get('severity')} - {(c.get('message') or '')[:60]}...")
220
272
 
221
- review_data.update(
273
+ response.update(
222
274
  {
223
275
  "comments": list(unique_comments.values()),
224
276
  "comments_before_filtering": comments_before_filtering,
@@ -227,8 +279,8 @@ def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_descri
227
279
  "skipped_files": skipped_count,
228
280
  }
229
281
  )
230
- print(f"Valid comments after filtering: {len(review_data['comments'])}")
231
- return review_data
282
+ print(f"Valid comments after filtering: {len(response['comments'])}")
283
+ return response
232
284
 
233
285
  except Exception as e:
234
286
  import traceback
@@ -360,7 +412,7 @@ def main(*args, **kwargs):
360
412
  review_number = dismiss_previous_reviews(event)
361
413
 
362
414
  diff = event.get_pr_diff()
363
- review = generate_pr_review(event.repository, diff, event.pr.get("title") or "", event.pr.get("body") or "")
415
+ review = generate_pr_review(event.repository, diff, event.pr.get("title") or "", event.pr.get("body") or "", event)
364
416
 
365
417
  post_review_summary(event, review, review_number)
366
418
  print("PR review completed")
actions/scan_prs.py CHANGED
@@ -24,32 +24,70 @@ def get_phase_emoji(age_days):
24
24
  return "🔴", f"{age_days} days"
25
25
 
26
26
 
27
- def run():
28
- """List open PRs across organization and auto-merge eligible Dependabot PRs."""
29
- # Get and validate settings
30
- org = os.getenv("ORG", "ultralytics")
31
- visibility = os.getenv("VISIBILITY", "public").lower()
32
- repo_visibility = os.getenv("REPO_VISIBILITY", "public").lower()
33
- valid_visibilities = {"public", "private", "internal", "all"}
27
+ def parse_visibility(visibility_input, repo_visibility):
28
+ """Parse and validate visibility settings with security checks."""
29
+ valid = {"public", "private", "internal", "all"}
30
+ stripped = [v.strip() for v in visibility_input.lower().split(",") if v.strip()]
31
+ repo_visibility = (repo_visibility or "").lower()
32
+
33
+ # Warn about invalid values
34
+ if invalid := [v for v in stripped if v not in valid]:
35
+ print(f"⚠️ Invalid visibility values: {', '.join(invalid)} - ignoring")
36
+
37
+ visibility_list = [v for v in stripped if v in valid]
38
+ if not visibility_list:
39
+ print("⚠️ No valid visibility values, defaulting to 'public'")
40
+ return ["public"]
41
+
42
+ # Security: public repos can only scan public repos
43
+ if repo_visibility == "public" and visibility_list != ["public"]:
44
+ print("⚠️ Security: Public repo cannot scan non-public repos. Restricting to public only.")
45
+ return ["public"]
46
+
47
+ return visibility_list
34
48
 
35
- if visibility not in valid_visibilities:
36
- print(f"⚠️ Invalid visibility '{visibility}', defaulting to 'public'")
37
- visibility = "public"
38
49
 
39
- # Security: if calling repo is public, restrict to public repos only
40
- if repo_visibility == "public" and visibility != "public":
41
- print(f"⚠️ Security: Public repo cannot scan {visibility} repos. Restricting to public only.")
42
- visibility = "public"
50
+ def get_repo_filter(visibility_list):
51
+ """Return filtering strategy for repo visibility."""
52
+ if len(visibility_list) == 1 and visibility_list[0] != "all":
53
+ return {"flag": ["--visibility", visibility_list[0]], "filter": None, "str": visibility_list[0]}
43
54
 
44
- print(f"🔍 Scanning {visibility} repositories in {org} organization...")
55
+ filter_set = {"public", "private", "internal"} if "all" in visibility_list else set(visibility_list)
56
+ return {
57
+ "flag": [],
58
+ "filter": filter_set,
59
+ "str": "all" if "all" in visibility_list else ", ".join(sorted(visibility_list)),
60
+ }
45
61
 
46
- # Get active repos with specified visibility
47
- cmd = ["gh", "repo", "list", org, "--limit", "1000", "--json", "name,url,isArchived"]
48
- if visibility != "all":
49
- cmd.extend(["--visibility", visibility])
50
62
 
51
- result = subprocess.run(cmd, capture_output=True, text=True, check=True)
52
- repos = {r["name"]: r["url"] for r in json.loads(result.stdout) if not r["isArchived"]}
63
+ def get_status_checks(rollup):
64
+ """Extract and validate status checks from rollup, return failed checks."""
65
+ checks = rollup if isinstance(rollup, list) else rollup.get("contexts", []) if isinstance(rollup, dict) else []
66
+ return [c for c in checks if c.get("conclusion") not in ["SUCCESS", "SKIPPED", "NEUTRAL"]]
67
+
68
+
69
+ def run():
70
+ """List open PRs across organization and auto-merge eligible Dependabot PRs."""
71
+ org = os.getenv("ORG", "ultralytics")
72
+ visibility_list = parse_visibility(os.getenv("VISIBILITY", "public"), os.getenv("REPO_VISIBILITY", "public"))
73
+ filter_config = get_repo_filter(visibility_list)
74
+
75
+ print(f"🔍 Scanning {filter_config['str']} repositories in {org} organization...")
76
+
77
+ # Get active repos
78
+ result = subprocess.run(
79
+ ["gh", "repo", "list", org, "--limit", "1000", "--json", "name,url,isArchived,visibility"]
80
+ + filter_config["flag"],
81
+ capture_output=True,
82
+ text=True,
83
+ check=True,
84
+ )
85
+ all_repos = [r for r in json.loads(result.stdout) if not r["isArchived"]]
86
+ repos = {
87
+ r["name"]: r["url"]
88
+ for r in all_repos
89
+ if not filter_config["filter"] or r["visibility"].lower() in filter_config["filter"]
90
+ }
53
91
 
54
92
  if not repos:
55
93
  print("⚠️ No repositories found")
@@ -84,25 +122,26 @@ def run():
84
122
  print("✅ No open PRs found")
85
123
  return
86
124
 
125
+ # Filter PRs to only include those from scanned repos
126
+ all_prs = [pr for pr in all_prs if pr["repository"]["name"] in repos]
127
+
128
+ if not all_prs:
129
+ print("✅ No open PRs found in scanned repositories")
130
+ return
131
+
87
132
  # Count PRs by phase
88
133
  phase_counts = {"new": 0, "green": 0, "yellow": 0, "red": 0}
89
134
  for pr in all_prs:
90
- age_days = get_age_days(pr["createdAt"])
91
- phase_counts[
92
- "new" if age_days == 0 else "green" if age_days <= 7 else "yellow" if age_days <= 30 else "red"
93
- ] += 1
135
+ age = get_age_days(pr["createdAt"])
136
+ phase_counts["new" if age == 0 else "green" if age <= 7 else "yellow" if age <= 30 else "red"] += 1
94
137
 
95
- repo_count = len({pr["repository"]["name"] for pr in all_prs if pr["repository"]["name"] in repos})
96
138
  summary = [
97
139
  f"# 🔍 Open Pull Requests - {org.title()} Organization\n",
98
- f"**Total:** {len(all_prs)} open PRs across {repo_count} repos",
99
- f"**By Phase:** 🆕 {phase_counts['new']} New | 🟢 {phase_counts['green']} Green (≤7d) | 🟡 {phase_counts['yellow']} Yellow (≤30d) | 🔴 {phase_counts['red']} Red (>30d)\n",
140
+ f"**Total:** {len(all_prs)} open PRs across {len({pr['repository']['name'] for pr in all_prs})}/{len(repos)} {filter_config['str']} repos",
141
+ f"**By Phase:** 🆕 {phase_counts['new']} New | 🟢 {phase_counts['green']} ≤7d | 🟡 {phase_counts['yellow']} ≤30d | 🔴 {phase_counts['red']} >30d\n",
100
142
  ]
101
143
 
102
144
  for repo_name in sorted({pr["repository"]["name"] for pr in all_prs}):
103
- if repo_name not in repos:
104
- continue
105
-
106
145
  repo_prs = [pr for pr in all_prs if pr["repository"]["name"] == repo_name]
107
146
  summary.append(
108
147
  f"## 📦 [{repo_name}]({repos[repo_name]}) - {len(repo_prs)} open PR{'s' if len(repo_prs) > 1 else ''}"
@@ -110,7 +149,7 @@ def run():
110
149
 
111
150
  for pr in repo_prs[:30]:
112
151
  emoji, age_str = get_phase_emoji(get_age_days(pr["createdAt"]))
113
- summary.append(f"- 🔀 [#{pr['number']}]({pr['url']}) {pr['title']} {emoji} {age_str}")
152
+ summary.append(f"- [#{pr['number']}]({pr['url']}) {pr['title']} {emoji} {age_str}")
114
153
 
115
154
  if len(repo_prs) > 30:
116
155
  summary.append(f"- ... {len(repo_prs) - 30} more PRs")
@@ -161,18 +200,8 @@ def run():
161
200
  total_skipped += 1
162
201
  continue
163
202
 
164
- # Check if all status checks passed (normalize rollup structure)
165
- rollup = pr.get("statusCheckRollup")
166
- if isinstance(rollup, list):
167
- checks = rollup
168
- elif isinstance(rollup, dict):
169
- checks = rollup.get("contexts", [])
170
- else:
171
- checks = []
172
- failed_checks = [c for c in checks if c.get("conclusion") not in ["SUCCESS", "SKIPPED", "NEUTRAL"]]
173
-
174
- if failed_checks:
175
- for check in failed_checks:
203
+ if failed := get_status_checks(pr.get("statusCheckRollup")):
204
+ for check in failed:
176
205
  print(f" ❌ Failing check: {check.get('name', 'unknown')} = {check.get('conclusion')}")
177
206
  total_skipped += 1
178
207
  continue
@@ -195,7 +224,6 @@ def run():
195
224
  summary.append(f"\n**Summary:** Found {total_found} | Merged {total_merged} | Skipped {total_skipped}")
196
225
  print(f"\n📊 Dependabot Summary: Found {total_found} | Merged {total_merged} | Skipped {total_skipped}")
197
226
 
198
- # Write to GitHub step summary if available
199
227
  if summary_file := os.getenv("GITHUB_STEP_SUMMARY"):
200
228
  with open(summary_file, "a") as f:
201
229
  f.write("\n".join(summary))
actions/summarize_pr.py CHANGED
@@ -2,7 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from .utils import ACTIONS_CREDIT, GITHUB_API_URL, Action, get_completion, get_pr_summary_prompt
5
+ from .utils import ACTIONS_CREDIT, GITHUB_API_URL, Action, get_pr_summary_prompt, get_response
6
6
 
7
7
  SUMMARY_MARKER = "## 🛠️ PR Summary"
8
8
 
@@ -24,7 +24,7 @@ def generate_merge_message(pr_summary, pr_credit, pr_url):
24
24
  ),
25
25
  },
26
26
  ]
27
- return get_completion(messages)
27
+ return get_response(messages)
28
28
 
29
29
 
30
30
  def generate_issue_comment(pr_url, pr_summary, pr_credit, pr_title=""):
@@ -54,7 +54,7 @@ def generate_issue_comment(pr_url, pr_summary, pr_credit, pr_title=""):
54
54
  f"5. Thank 🙏 for reporting the issue and welcome any further feedback if the issue persists\n\n",
55
55
  },
56
56
  ]
57
- return get_completion(messages)
57
+ return get_response(messages)
58
58
 
59
59
 
60
60
  def generate_pr_summary(repository, diff_text):
@@ -68,7 +68,7 @@ def generate_pr_summary(repository, diff_text):
68
68
  },
69
69
  {"role": "user", "content": prompt},
70
70
  ]
71
- reply = get_completion(messages, temperature=1.0)
71
+ reply = get_response(messages, temperature=1.0)
72
72
  if is_large:
73
73
  reply = "**WARNING ⚠️** this PR is very large, summary may not cover all changes.\n\n" + reply
74
74
 
@@ -8,7 +8,7 @@ import subprocess
8
8
  import time
9
9
  from datetime import datetime
10
10
 
11
- from .utils import GITHUB_API_URL, Action, get_completion, remove_html_comments
11
+ from .utils import GITHUB_API_URL, Action, get_response, remove_html_comments
12
12
 
13
13
  # Environment variables
14
14
  CURRENT_TAG = os.getenv("CURRENT_TAG")
@@ -150,7 +150,7 @@ def generate_release_summary(
150
150
  },
151
151
  ]
152
152
  # print(messages[-1]["content"]) # for debug
153
- return get_completion(messages, temperature=1.0) + release_suffix
153
+ return get_response(messages, temperature=1.0) + release_suffix
154
154
 
155
155
 
156
156
  def create_github_release(event, tag_name: str, name: str, body: str):
@@ -38,7 +38,7 @@ def add_indentation(code_block, num_spaces):
38
38
 
39
39
 
40
40
  def format_code_with_ruff(temp_dir):
41
- """Formats Python code files in the specified directory using ruff linter and docformatter tools."""
41
+ """Formats Python code files in the specified directory using ruff and Python docstring formatter."""
42
42
  if not next(Path(temp_dir).rglob("*.py"), None):
43
43
  return
44
44
 
@@ -81,23 +81,17 @@ def format_code_with_ruff(temp_dir):
81
81
  print(f"ERROR running ruff check ❌ {e}")
82
82
 
83
83
  try:
84
- # Run docformatter
84
+ # Run Ultralytics Python docstring formatter
85
85
  subprocess.run(
86
86
  [
87
- "docformatter",
88
- "--wrap-summaries=120",
89
- "--wrap-descriptions=120",
90
- "--pre-summary-newline",
91
- "--close-quotes-on-newline",
92
- "--in-place",
93
- "--recursive",
87
+ "ultralytics-actions-format-python-docstrings",
94
88
  str(temp_dir),
95
89
  ],
96
90
  check=True,
97
91
  )
98
- print("Completed docformatter ✅")
92
+ print("Completed Python docstring formatting ✅")
99
93
  except Exception as e:
100
- print(f"ERROR running docformatter ❌ {e}")
94
+ print(f"ERROR running Python docstring formatter ❌ {e}")
101
95
 
102
96
 
103
97
  def format_bash_with_prettier(temp_dir):
actions/utils/__init__.py CHANGED
@@ -13,10 +13,10 @@ from .github_utils import GITHUB_API_URL, GITHUB_GRAPHQL_URL, Action, ultralytic
13
13
  from .openai_utils import (
14
14
  MAX_PROMPT_CHARS,
15
15
  filter_labels,
16
- get_completion,
17
16
  get_pr_open_response,
18
17
  get_pr_summary_guidelines,
19
18
  get_pr_summary_prompt,
19
+ get_response,
20
20
  )
21
21
  from .version_utils import check_pubdev_version, check_pypi_version
22
22
 
@@ -34,10 +34,10 @@ __all__ = (
34
34
  "check_pubdev_version",
35
35
  "check_pypi_version",
36
36
  "filter_labels",
37
- "get_completion",
38
37
  "get_pr_open_response",
39
38
  "get_pr_summary_guidelines",
40
39
  "get_pr_summary_prompt",
40
+ "get_response",
41
41
  "remove_html_comments",
42
42
  "ultralytics_actions_info",
43
43
  )
@@ -109,13 +109,13 @@ def get_pr_first_comment_template(repository: str, username: str) -> str:
109
109
  For more guidance, please refer to our [Contributing Guide](https://docs.ultralytics.com/help/contributing/). Don't hesitate to leave a comment if you have any questions. Thank you for contributing to Ultralytics! 🚀"""
110
110
 
111
111
 
112
- def get_completion(
112
+ def get_response(
113
113
  messages: list[dict[str, str]],
114
114
  check_links: bool = True,
115
115
  remove: list[str] = (" @giscus[bot]",),
116
116
  temperature: float = 1.0,
117
117
  reasoning_effort: str | None = None,
118
- response_format: dict | None = None,
118
+ text_format: dict | None = None,
119
119
  model: str = OPENAI_MODEL,
120
120
  tools: list[dict] | None = None,
121
121
  ) -> str | dict:
@@ -130,6 +130,8 @@ def get_completion(
130
130
  data = {"model": model, "input": messages, "store": False, "temperature": temperature}
131
131
  if "gpt-5" in model:
132
132
  data["reasoning"] = {"effort": reasoning_effort or "low"}
133
+ if text_format:
134
+ data["text"] = text_format
133
135
  if tools:
134
136
  data["tools"] = tools
135
137
 
@@ -145,6 +147,11 @@ def get_completion(
145
147
  time.sleep(2**attempt)
146
148
  continue
147
149
 
150
+ if r.status_code >= 400:
151
+ error_body = r.text
152
+ print(f"API Error {r.status_code}: {error_body}")
153
+ r.reason = f"{r.reason}\n{error_body}" # Add error body to exception message
154
+
148
155
  r.raise_for_status()
149
156
 
150
157
  # Parse response
@@ -173,7 +180,7 @@ def get_completion(
173
180
  token_str += f" (+{thinking_tokens} thinking)"
174
181
  print(f"{model} ({token_str} = {input_tokens + output_tokens} tokens, ${cost:.5f}, {elapsed:.1f}s)")
175
182
 
176
- if response_format and response_format.get("type") == "json_object":
183
+ if text_format and text_format.get("format", {}).get("type") in ["json_object", "json_schema"]:
177
184
  return json.loads(content)
178
185
 
179
186
  content = remove_outer_codeblocks(content)
@@ -230,16 +237,28 @@ Customized welcome message adapting the template below:
230
237
  - No spaces between bullet points
231
238
 
232
239
  Example comment template (adapt as needed, keep all links):
233
- {get_pr_first_comment_template(repository, username)}
234
-
235
- Return ONLY valid JSON in this exact format:
236
- {{"summary": "...", "labels": [...], "first_comment": "..."}}"""
240
+ {get_pr_first_comment_template(repository, username)}"""
241
+
242
+ schema = {
243
+ "type": "object",
244
+ "properties": {
245
+ "summary": {"type": "string", "description": "PR summary with emoji sections"},
246
+ "labels": {"type": "array", "items": {"type": "string"}, "description": "Array of label names"},
247
+ "first_comment": {"type": "string", "description": "Welcome comment with checklist"},
248
+ },
249
+ "required": ["summary", "labels", "first_comment"],
250
+ "additionalProperties": False,
251
+ }
237
252
 
238
253
  messages = [
239
254
  {"role": "system", "content": "You are an Ultralytics AI assistant processing GitHub PRs."},
240
255
  {"role": "user", "content": prompt},
241
256
  ]
242
- result = get_completion(messages, temperature=1.0, response_format={"type": "json_object"})
257
+ result = get_response(
258
+ messages,
259
+ temperature=1.0,
260
+ text_format={"format": {"type": "json_schema", "name": "pr_open_response", "strict": True, "schema": schema}},
261
+ )
243
262
  if is_large and "summary" in result:
244
263
  result["summary"] = (
245
264
  "**WARNING ⚠️** this PR is very large, summary may not cover all changes.\n\n" + result["summary"]
@@ -252,5 +271,5 @@ if __name__ == "__main__":
252
271
  {"role": "system", "content": "You are a helpful AI assistant."},
253
272
  {"role": "user", "content": "Explain how to export a YOLO11 model to CoreML."},
254
273
  ]
255
- response = get_completion(messages)
274
+ response = get_response(messages)
256
275
  print(response)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ultralytics-actions
3
- Version: 0.2.1
3
+ Version: 0.2.2
4
4
  Summary: Ultralytics Actions for GitHub automation and PR management.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -30,7 +30,6 @@ Description-Content-Type: text/markdown
30
30
  License-File: LICENSE
31
31
  Requires-Dist: requests>=2.28.1
32
32
  Requires-Dist: ruff>=0.9.1
33
- Requires-Dist: docformatter>=1.7.5
34
33
  Provides-Extra: dev
35
34
  Requires-Dist: pytest; extra == "dev"
36
35
  Requires-Dist: pytest-cov; extra == "dev"
@@ -68,8 +67,8 @@ AI-powered formatting, labeling, and PR summaries for Python, Swift, and Markdow
68
67
  ### 📄 Features
69
68
 
70
69
  - **Python Code:** Formatted using [Ruff](https://github.com/astral-sh/ruff), an extremely fast Python linter and formatter
70
+ - **Python Docstrings:** Google-style formatting enforced with Ultralytics Python docstring formatter (optional)
71
71
  - **Markdown Files:** Styled with [Prettier](https://github.com/prettier/prettier) to ensure consistent documentation appearance
72
- - **Docstrings:** Cleaned and standardized using [docformatter](https://github.com/PyCQA/docformatter)
73
72
  - **Swift Code:** Formatted with [`swift-format`](https://github.com/swiftlang/swift-format) _(requires `macos-latest` runner)_
74
73
  - **Spell Check:** Common misspellings caught using [codespell](https://github.com/codespell-project/codespell)
75
74
  - **Broken Links Check:** Broken links identified using [Lychee](https://github.com/lycheeverse/lychee)
@@ -118,7 +117,8 @@ jobs:
118
117
  with:
119
118
  token: ${{ secrets.GITHUB_TOKEN }} # Auto-generated token
120
119
  labels: true # Auto-label issues/PRs using AI
121
- python: true # Format Python with Ruff and docformatter
120
+ python: true # Format Python with Ruff
121
+ python_docstrings: false # Format Python docstrings (default: false)
122
122
  prettier: true # Format YAML, JSON, Markdown, CSS
123
123
  swift: false # Format Swift (requires macos-latest)
124
124
  dart: false # Format Dart/Flutter
@@ -166,7 +166,7 @@ List open PRs across an organization and auto-merge eligible Dependabot PRs.
166
166
  with:
167
167
  token: ${{ secrets.GITHUB_TOKEN }}
168
168
  org: ultralytics # Optional: defaults to ultralytics
169
- visibility: public # Optional: public, private, internal, or all
169
+ visibility: private,internal # Optional: public, private, internal, all, or comma-separated
170
170
  ```
171
171
 
172
172
  [**📖 Full Documentation →**](scan-prs/README.md)
@@ -0,0 +1,21 @@
1
+ actions/__init__.py,sha256=GaEFrlnYBqX5C9GJF1MEGHZP1TVWbOfa7VSXFMaffHQ,927
2
+ actions/dispatch_actions.py,sha256=ljlFR1o8m1qTHbStsJJVMVDdJv7iVqMfdPzKlZyKXl8,6743
3
+ actions/first_interaction.py,sha256=FTPcXjtBkM0Egc6VR5eVMvjF6uvarditTx9qaaoPcnU,9809
4
+ actions/format_python_docstrings.py,sha256=prK0U2EpwBlhkXgNnr72NsJFtSXTJaTCzMBcqL1N7wo,17150
5
+ actions/review_pr.py,sha256=wuPYqtDwt92e990mx1kSyfciICgZ1ZxduCyhpx4tc4M,19790
6
+ actions/scan_prs.py,sha256=y0J0PvEFVfWyB8SpGqyMXMhuPG7rGwrzkbkRL8VBS6A,8483
7
+ actions/summarize_pr.py,sha256=NChej-45frhhYLiNZ67DqhaHbxMhsjzYQqIE6FoAOcs,5707
8
+ actions/summarize_release.py,sha256=W1lgMdArgcte-_4h408eQH17t6H1giVnoIvnm-jR02g,9074
9
+ actions/update_file_headers.py,sha256=E5fKYLdeW16-BHCcuqxohGpGZqgEh-WX4ZmCQJw2R90,6684
10
+ actions/update_markdown_code_blocks.py,sha256=l3_nWNvujplYlY-Db3JOCG-qwMigp6BADjd7SaIEBpQ,8550
11
+ actions/utils/__init__.py,sha256=RcmBEPaQIR9mtNt2rO96Eru69EkSOaJwP2Eb9SogQbY,1111
12
+ actions/utils/common_utils.py,sha256=InBc-bsXcwzQYjuDxtrrm3bj7J-70U54G0s2nQKgCg8,12052
13
+ actions/utils/github_utils.py,sha256=5yzNIiu7-WBmH1-gSi4O31m1Fwd4k8pfbwM2BPVGf88,19989
14
+ actions/utils/openai_utils.py,sha256=XnteiRDrhQZRm9TTszM0qGO9fxSafRPJwQKM_zXuKvA,12753
15
+ actions/utils/version_utils.py,sha256=EIbm3iZVNyNl3dh8aNz_9ITeTC93ZxfyUzIRkO3tSXw,3242
16
+ ultralytics_actions-0.2.2.dist-info/licenses/LICENSE,sha256=hIahDEOTzuHCU5J2nd07LWwkLW7Hko4UFO__ffsvB-8,34523
17
+ ultralytics_actions-0.2.2.dist-info/METADATA,sha256=Kt1MSjpUo7mgHfd6NTI8mJqEtJmjrMUxXXrFEolAsO0,12542
18
+ ultralytics_actions-0.2.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
19
+ ultralytics_actions-0.2.2.dist-info/entry_points.txt,sha256=HZhkijH7tyA2igb3OoS9K4xc-_yAl8kYNRVUobqeq7w,581
20
+ ultralytics_actions-0.2.2.dist-info/top_level.txt,sha256=5apM5x80QlJcGbACn1v3fkmIuL1-XQCKcItJre7w7Tw,8
21
+ ultralytics_actions-0.2.2.dist-info/RECORD,,
@@ -1,5 +1,6 @@
1
1
  [console_scripts]
2
2
  ultralytics-actions-first-interaction = actions.first_interaction:main
3
+ ultralytics-actions-format-python-docstrings = actions.format_python_docstrings:main
3
4
  ultralytics-actions-headers = actions.update_file_headers:main
4
5
  ultralytics-actions-info = actions.utils:ultralytics_actions_info
5
6
  ultralytics-actions-review-pr = actions.review_pr:main
@@ -1,20 +0,0 @@
1
- actions/__init__.py,sha256=G-r-dtUaDXDKiTJuqW8GafMpIpOjbCVlG1ETyC4_yqY,881
2
- actions/dispatch_actions.py,sha256=ljlFR1o8m1qTHbStsJJVMVDdJv7iVqMfdPzKlZyKXl8,6743
3
- actions/first_interaction.py,sha256=wcKzLEUJmYnHmtwn-sz3N5erwftMT9jn7XxSKATAmXY,9815
4
- actions/review_pr.py,sha256=QqYmWE37sA4mJ6bPcY5M2dlNc1lRJPwT7XcJJFP1C7c,17466
5
- actions/scan_prs.py,sha256=9Gu4EHmLjdShIlkoCQfIrcxLpMZeOOnpKEyv_mVc3rU,7407
6
- actions/summarize_pr.py,sha256=0y4Cl4_ZMMtDWVhxwWasn3mHo_4GCnegJrf29yujUYM,5715
7
- actions/summarize_release.py,sha256=8D5EOQ36mho1HKtWD2J-IDH_xJJb3q0shgXZSdemmDM,9078
8
- actions/update_file_headers.py,sha256=E5fKYLdeW16-BHCcuqxohGpGZqgEh-WX4ZmCQJw2R90,6684
9
- actions/update_markdown_code_blocks.py,sha256=w3DTRltg2Rmr4-qrNawv_S2vJbheKE0tne1iz79FzXg,8692
10
- actions/utils/__init__.py,sha256=Uf7S5qYHS59zoAP9uKVIZwhpUbgyI947dD9jAWu50Lg,1115
11
- actions/utils/common_utils.py,sha256=InBc-bsXcwzQYjuDxtrrm3bj7J-70U54G0s2nQKgCg8,12052
12
- actions/utils/github_utils.py,sha256=5yzNIiu7-WBmH1-gSi4O31m1Fwd4k8pfbwM2BPVGf88,19989
13
- actions/utils/openai_utils.py,sha256=07g5NsfAfSuJ6CqWWQxsZ0MR4_kh6-Rjmud_iGPm49U,11965
14
- actions/utils/version_utils.py,sha256=EIbm3iZVNyNl3dh8aNz_9ITeTC93ZxfyUzIRkO3tSXw,3242
15
- ultralytics_actions-0.2.1.dist-info/licenses/LICENSE,sha256=hIahDEOTzuHCU5J2nd07LWwkLW7Hko4UFO__ffsvB-8,34523
16
- ultralytics_actions-0.2.1.dist-info/METADATA,sha256=1kN57DVDjQZMQGlhfF_3ugsfYGaXCsxFM-5guwrgFT4,12478
17
- ultralytics_actions-0.2.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
18
- ultralytics_actions-0.2.1.dist-info/entry_points.txt,sha256=n_VbDs3Xj33daaeN_2D72UTEuyeH8hVc6-CPH55ymkY,496
19
- ultralytics_actions-0.2.1.dist-info/top_level.txt,sha256=5apM5x80QlJcGbACn1v3fkmIuL1-XQCKcItJre7w7Tw,8
20
- ultralytics_actions-0.2.1.dist-info/RECORD,,