ultralytics-actions 0.1.9__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics-actions might be problematic. Click here for more details.

actions/__init__.py CHANGED
@@ -12,10 +12,13 @@
12
12
  # │ │ ├── github_utils.py
13
13
  # │ │ ├── openai_utils.py
14
14
  # │ │ └── common_utils.py
15
+ # │ ├── dispatch_actions.py
15
16
  # │ ├── first_interaction.py
16
17
  # │ ├── review_pr.py
18
+ # │ ├── scan_prs.py
17
19
  # │ ├── summarize_pr.py
18
20
  # │ ├── summarize_release.py
21
+ # │ ├── update_file_headers.py
19
22
  # │ └── update_markdown_code_blocks.py
20
23
  # └── tests/
21
24
  # ├── __init__.py
@@ -23,4 +26,4 @@
23
26
  # ├── test_summarize_pr.py
24
27
  # └── ...
25
28
 
26
- __version__ = "0.1.9"
29
+ __version__ = "0.2.1"
@@ -2,6 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
+ import os
5
6
  import time
6
7
  from datetime import datetime
7
8
 
@@ -12,47 +13,99 @@ RUN_CI_KEYWORD = "@ultralytics/run-ci" # and then to merge "@ultralytics/run-ci
12
13
  WORKFLOW_FILES = ["ci.yml", "docker.yml"]
13
14
 
14
15
 
15
- def get_pr_branch(event) -> str:
16
- """Gets the PR branch name."""
16
+ def get_pr_branch(event) -> tuple[str, str | None]:
17
+ """Gets the PR branch name, creating temp branch for forks, returning (branch, temp_branch_to_delete)."""
18
+ import subprocess
19
+ import tempfile
20
+
17
21
  pr_number = event.event_data["issue"]["number"]
18
22
  pr_data = event.get_repo_data(f"pulls/{pr_number}")
19
- return pr_data.get("head", {}).get("ref", "main")
20
-
21
-
22
- def trigger_and_get_workflow_info(event, branch: str) -> list[dict]:
23
- """Triggers workflows and returns their information."""
23
+ head = pr_data.get("head", {})
24
+
25
+ # Check if PR is from a fork
26
+ is_fork = head.get("repo") and head["repo"]["id"] != pr_data["base"]["repo"]["id"]
27
+
28
+ if is_fork:
29
+ # Create temp branch in base repo by pushing fork code
30
+ temp_branch = f"temp-ci-{pr_number}-{int(time.time() * 1000)}"
31
+ fork_repo = head["repo"]["full_name"]
32
+ fork_branch = head["ref"]
33
+ base_repo = event.repository
34
+ token = os.environ.get("GITHUB_TOKEN")
35
+ if not token:
36
+ raise ValueError("GITHUB_TOKEN environment variable is not set")
37
+
38
+ with tempfile.TemporaryDirectory() as tmp_dir:
39
+ repo_dir = os.path.join(tmp_dir, "repo")
40
+ base_url = f"https://x-access-token:{token}@github.com/{base_repo}.git"
41
+ fork_url = f"https://github.com/{fork_repo}.git"
42
+
43
+ try:
44
+ # Clone base repo (minimal)
45
+ subprocess.run(["git", "clone", "--depth", "1", base_url, repo_dir], check=True, capture_output=True)
46
+
47
+ # Add fork as remote and fetch the PR branch
48
+ subprocess.run(
49
+ ["git", "remote", "add", "fork", fork_url], cwd=repo_dir, check=True, capture_output=True
50
+ )
51
+ subprocess.run(
52
+ ["git", "fetch", "fork", f"{fork_branch}:{temp_branch}"],
53
+ cwd=repo_dir,
54
+ check=True,
55
+ capture_output=True,
56
+ )
57
+
58
+ # Push temp branch to base repo
59
+ subprocess.run(["git", "push", "origin", temp_branch], cwd=repo_dir, check=True, capture_output=True)
60
+ except subprocess.CalledProcessError as e:
61
+ # Sanitize error output to prevent token leakage
62
+ stderr = e.stderr.decode() if e.stderr else "No stderr output"
63
+ stderr = stderr.replace(token, "***TOKEN***")
64
+ raise RuntimeError(f"Failed to create tmp branch from fork (exit code {e.returncode}): {stderr}") from e
65
+
66
+ return temp_branch, temp_branch
67
+
68
+ return head.get("ref", "main"), None
69
+
70
+
71
+ def trigger_and_get_workflow_info(event, branch: str, temp_branch: str | None = None) -> list[dict]:
72
+ """Triggers workflows and returns their information, deleting temp branch if provided."""
24
73
  repo = event.repository
25
74
  results = []
26
75
 
27
- # Trigger all workflows
28
- for file in WORKFLOW_FILES:
29
- event.post(f"{GITHUB_API_URL}/repos/{repo}/actions/workflows/{file}/dispatches", json={"ref": branch})
76
+ try:
77
+ # Trigger all workflows
78
+ for file in WORKFLOW_FILES:
79
+ event.post(f"{GITHUB_API_URL}/repos/{repo}/actions/workflows/{file}/dispatches", json={"ref": branch})
30
80
 
31
- # Wait for workflows to be created
32
- time.sleep(10)
81
+ # Wait for workflows to be created and start
82
+ time.sleep(60)
33
83
 
34
- # Collect information about all workflows
35
- for file in WORKFLOW_FILES:
36
- # Get workflow name
37
- response = event.get(f"{GITHUB_API_URL}/repos/{repo}/actions/workflows/{file}")
38
- name = file.replace(".yml", "").title()
39
- if response.status_code == 200:
40
- name = response.json().get("name", name)
84
+ # Collect information about all workflows
85
+ for file in WORKFLOW_FILES:
86
+ # Get workflow name
87
+ response = event.get(f"{GITHUB_API_URL}/repos/{repo}/actions/workflows/{file}")
88
+ name = file.replace(".yml", "").title()
89
+ if response.status_code == 200:
90
+ name = response.json().get("name", name)
41
91
 
42
- # Get run information
43
- run_url = f"https://github.com/{repo}/actions/workflows/{file}"
44
- run_number = None
92
+ # Get run information
93
+ run_url = f"https://github.com/{repo}/actions/workflows/{file}"
94
+ run_number = None
45
95
 
46
- runs_response = event.get(
47
- f"{GITHUB_API_URL}/repos/{repo}/actions/workflows/{file}/runs?branch={branch}&event=workflow_dispatch&per_page=1"
48
- )
96
+ runs_response = event.get(
97
+ f"{GITHUB_API_URL}/repos/{repo}/actions/workflows/{file}/runs?branch={branch}&event=workflow_dispatch&per_page=1"
98
+ )
49
99
 
50
- if runs_response.status_code == 200:
51
- if runs := runs_response.json().get("workflow_runs", []):
100
+ if runs_response.status_code == 200 and (runs := runs_response.json().get("workflow_runs", [])):
52
101
  run_url = runs[0].get("html_url", run_url)
53
102
  run_number = runs[0].get("run_number")
54
103
 
55
- results.append({"name": name, "file": file, "url": run_url, "run_number": run_number})
104
+ results.append({"name": name, "file": file, "url": run_url, "run_number": run_number})
105
+ finally:
106
+ # Always delete temp branch even if workflow collection fails
107
+ if temp_branch:
108
+ event.delete(f"{GITHUB_API_URL}/repos/{repo}/git/refs/heads/{temp_branch}")
56
109
 
57
110
  return results
58
111
 
@@ -63,12 +116,15 @@ def update_comment(event, comment_body: str, triggered_actions: list[dict], bran
63
116
  return
64
117
 
65
118
  timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S UTC")
66
- summary = (
67
- f"\n\n## ⚡ Actions Trigger\n\n"
68
- f"<sub>Made with ❤️ by [Ultralytics Actions](https://www.ultralytics.com/actions)<sub>\n\n"
69
- f"GitHub Actions below triggered via workflow dispatch on this "
70
- f"PR branch `{branch}` at {timestamp} with `{RUN_CI_KEYWORD}` command:\n\n"
71
- )
119
+ summary = f"""
120
+
121
+ ## Actions Trigger
122
+
123
+ <sub>Made with ❤️ by [Ultralytics Actions](https://www.ultralytics.com/actions)<sub>
124
+
125
+ GitHub Actions below triggered via workflow dispatch for this PR at {timestamp} with `{RUN_CI_KEYWORD}` command:
126
+
127
+ """
72
128
 
73
129
  for action in triggered_actions:
74
130
  run_info = f" run {action['run_number']}" if action["run_number"] else ""
@@ -104,10 +160,10 @@ def main(*args, **kwargs):
104
160
 
105
161
  # Get branch, trigger workflows, and update comment
106
162
  event.toggle_eyes_reaction(True)
107
- branch = get_pr_branch(event)
108
- print(f"Triggering workflows on branch: {branch}")
163
+ branch, temp_branch = get_pr_branch(event)
164
+ print(f"Triggering workflows on branch: {branch}" + (" (temp)" if temp_branch else ""))
109
165
 
110
- triggered_actions = trigger_and_get_workflow_info(event, branch)
166
+ triggered_actions = trigger_and_get_workflow_info(event, branch, temp_branch)
111
167
  update_comment(event, comment_body, triggered_actions, branch)
112
168
  event.toggle_eyes_reaction(False)
113
169
 
@@ -6,11 +6,9 @@ import os
6
6
  import time
7
7
 
8
8
  from . import review_pr
9
- from .utils import Action, filter_labels, get_completion, get_pr_open_response, remove_html_comments
9
+ from .summarize_pr import SUMMARY_MARKER
10
+ from .utils import ACTIONS_CREDIT, Action, filter_labels, get_completion, get_pr_open_response, remove_html_comments
10
11
 
11
- SUMMARY_START = (
12
- "## 🛠️ PR Summary\n\n<sub>Made with ❤️ by [Ultralytics Actions](https://github.com/ultralytics/actions)<sub>\n\n"
13
- )
14
12
  BLOCK_USER = os.getenv("BLOCK_USER", "false").lower() == "true"
15
13
  AUTO_PR_REVIEW = os.getenv("REVIEW", "true").lower() == "true"
16
14
 
@@ -190,13 +188,13 @@ def main(*args, **kwargs):
190
188
  if event.should_skip_pr_author():
191
189
  return
192
190
 
193
- print("Processing PR open with unified API call...")
191
+ print(f"Processing PR open by @{username} with unified API call...")
194
192
  diff = event.get_pr_diff()
195
- response = get_pr_open_response(event.repository, diff, title, body, label_descriptions)
193
+ response = get_pr_open_response(event.repository, diff, title, username, label_descriptions)
196
194
 
197
195
  if summary := response.get("summary"):
198
196
  print("Updating PR description with summary...")
199
- event.update_pr_description(number, SUMMARY_START + summary)
197
+ event.update_pr_description(number, f"{SUMMARY_MARKER}\n\n{ACTIONS_CREDIT}\n\n{summary}")
200
198
  else:
201
199
  summary = body
202
200
 
actions/review_pr.py CHANGED
@@ -5,9 +5,9 @@ from __future__ import annotations
5
5
  import json
6
6
  import re
7
7
 
8
- from .utils import GITHUB_API_URL, MAX_PROMPT_CHARS, Action, get_completion, remove_html_comments
8
+ from .utils import ACTIONS_CREDIT, GITHUB_API_URL, MAX_PROMPT_CHARS, Action, get_completion, remove_html_comments
9
9
 
10
- REVIEW_MARKER = "🔍 PR Review"
10
+ REVIEW_MARKER = "## 🔍 PR Review"
11
11
  ERROR_MARKER = "⚠️ Review generation encountered an error"
12
12
  EMOJI_MAP = {"CRITICAL": "❗", "HIGH": "⚠️", "MEDIUM": "💡", "LOW": "📝", "SUGGESTION": "💭"}
13
13
  SKIP_PATTERNS = [
@@ -95,27 +95,25 @@ def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_descri
95
95
  lines_changed = sum(len(sides["RIGHT"]) + len(sides["LEFT"]) for sides in diff_files.values())
96
96
 
97
97
  content = (
98
- "You are an expert code reviewer for Ultralytics. Provide detailed inline comments on specific code changes.\n\n"
99
- "Focus on: Bugs, security, performance, best practices, edge cases, error handling\n\n"
100
- "FORMATTING: Use backticks for code: `x=3`, `file.py`, `function()`\n\n"
98
+ "You are an expert code reviewer for Ultralytics. Review the code changes and provide inline comments where you identify issues or opportunities for improvement.\n\n"
99
+ "Focus on: bugs, security vulnerabilities, performance issues, best practices, edge cases, error handling, and code clarity.\n\n"
101
100
  "CRITICAL RULES:\n"
102
- "1. Quality over quantity - zero comments is fine for clean code, only flag truly important issues\n"
103
- "2. Combine issues that are directly related to the same problem\n"
104
- "3. Use 'start_line' and 'line' to highlight multi-line ranges when issues span multiple lines\n"
101
+ "1. Provide balanced, constructive feedback - flag bugs, improvements, and best practice issues\n"
102
+ "2. For issues spanning multiple adjacent lines, use 'start_line' to create ONE multi-line comment, never separate comments\n"
103
+ "3. Combine related issues into a single comment when they stem from the same root cause\n"
105
104
  "4. Prioritize: CRITICAL bugs/security > HIGH impact > code quality improvements\n"
106
105
  "5. Keep comments concise and friendly - avoid jargon\n"
107
- "6. Skip routine changes: imports, version updates, standard refactoring\n\n"
106
+ "6. Use backticks for code: `x=3`, `file.py`, `function()`\n"
107
+ "7. Skip routine changes: imports, version updates, standard refactoring\n\n"
108
108
  "SUMMARY:\n"
109
109
  "- Brief and actionable - what needs fixing, not where (locations shown in inline comments)\n\n"
110
110
  "SUGGESTIONS:\n"
111
- "- ONLY provide 'suggestion' field when you have high certainty the code is problematic AND sufficient context for a confident fix\n"
112
- "- If uncertain about the correct fix, omit 'suggestion' field and explain the concern in 'message' only\n"
113
- "- Suggestions must be ready-to-merge code with NO comments, placeholders, or explanations\n"
114
- "- Suggestions replace ONLY the single line at 'line' - for multi-line fixes, describe the change in 'message' instead\n"
115
- "- Do NOT provide 'start_line' when including a 'suggestion' - suggestions are always single-line only\n"
116
- "- Suggestion content must match the exact indentation of the original line\n"
117
- "- Avoid triple backticks (```) in suggestions as they break markdown formatting\n"
118
- "- It's better to flag an issue without a suggestion than provide a wrong or uncertain fix\n\n"
111
+ "- Provide 'suggestion' field with ready-to-merge code when you can confidently fix the issue\n"
112
+ "- Suggestions must be complete, working code with NO comments, placeholders, or explanations\n"
113
+ "- For single-line fixes: provide 'suggestion' without 'start_line' to replace the line at 'line'\n"
114
+ "- Do not provide multi-line fixes: suggestions should only be single line\n"
115
+ "- Match the exact indentation of the original code\n"
116
+ "- Avoid triple backticks (```) in suggestions as they break markdown formatting\n\n"
119
117
  "LINE NUMBERS:\n"
120
118
  "- Each line in the diff is prefixed with its line number for clarity:\n"
121
119
  " R 123 +added code <- RIGHT side (new file), line 123\n"
@@ -131,7 +129,7 @@ def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_descri
131
129
  "- Extract line numbers from R#### or L#### prefixes in the diff\n"
132
130
  "- Exact paths (no ./), 'side' field must match R (RIGHT) or L (LEFT) prefix\n"
133
131
  "- Severity: CRITICAL, HIGH, MEDIUM, LOW, SUGGESTION\n"
134
- f"- Files changed: {len(file_list)} ({', '.join(file_list[:10])}{'...' if len(file_list) > 10 else ''})\n"
132
+ f"- Files changed: {len(file_list)} ({', '.join(file_list[:30])}{'...' if len(file_list) > 30 else ''})\n"
135
133
  f"- Lines changed: {lines_changed}\n"
136
134
  )
137
135
 
@@ -140,21 +138,37 @@ def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_descri
140
138
  {
141
139
  "role": "user",
142
140
  "content": (
143
- f"Review this PR in https://github.com/{repository}:\n"
144
- f"Title: {pr_title}\n"
145
- f"Description: {remove_html_comments(pr_description or '')[:1000]}\n\n"
146
- f"Diff:\n{augmented_diff[:MAX_PROMPT_CHARS]}\n\n"
141
+ f"Review this PR in https://github.com/{repository}:\n\n"
142
+ f"TITLE:\n{pr_title}\n\n"
143
+ f"BODY:\n{remove_html_comments(pr_description or '')[:1000]}\n\n"
144
+ f"DIFF:\n{augmented_diff[:MAX_PROMPT_CHARS]}\n\n"
147
145
  "Now review this diff according to the rules above. Return JSON with comments array and summary."
148
146
  ),
149
147
  },
150
148
  ]
151
149
 
152
- # Debug: print prompts sent to AI
153
- # print(f"\nSystem prompt (first 1000 chars):\n{messages[0]['content'][:2000]}...\n")
154
- # print(f"\nUser prompt (first 1000 chars):\n{messages[1]['content'][:2000]}...\n")
150
+ # Debug output
151
+ # print(f"\nSystem prompt (first 3000 chars):\n{messages[0]['content'][:3000]}...\n")
152
+ # print(f"\nUser prompt (first 3000 chars):\n{messages[1]['content'][:3000]}...\n")
155
153
 
156
154
  try:
157
- response = get_completion(messages, reasoning_effort="low", model="gpt-5-codex")
155
+ response = get_completion(
156
+ messages,
157
+ reasoning_effort="low",
158
+ model="gpt-5-codex",
159
+ tools=[
160
+ {
161
+ "type": "web_search",
162
+ "filters": {
163
+ "allowed_domains": [
164
+ "ultralytics.com",
165
+ "github.com",
166
+ "stackoverflow.com",
167
+ ]
168
+ },
169
+ }
170
+ ],
171
+ )
158
172
 
159
173
  json_str = re.search(r"```(?:json)?\s*(\{.*?\})\s*```", response, re.DOTALL)
160
174
  review_data = json.loads(json_str.group(1) if json_str else response)
@@ -271,9 +285,9 @@ def post_review_summary(event: Action, review_data: dict, review_number: int) ->
271
285
  event_type = "COMMENT" if (has_error or has_inline_comments or has_issues) else "APPROVE"
272
286
 
273
287
  body = (
274
- f"## {review_title}\n\n"
275
- "<sub>Made with ❤️ by [Ultralytics Actions](https://github.com/ultralytics/actions)</sub>\n\n"
276
- f"{review_data.get('summary', 'Review completed')[:1000]}\n\n" # Clip summary length
288
+ f"{review_title}\n\n"
289
+ f"{ACTIONS_CREDIT}\n\n"
290
+ f"{review_data.get('summary', 'Review completed')[:3000]}\n\n" # Clip summary length
277
291
  )
278
292
 
279
293
  if comments:
@@ -294,10 +308,10 @@ def post_review_summary(event: Action, review_data: dict, review_number: int) ->
294
308
 
295
309
  severity = comment.get("severity") or "SUGGESTION"
296
310
  side = comment.get("side", "RIGHT")
297
- comment_body = f"{EMOJI_MAP.get(severity, '💭')} **{severity}**: {(comment.get('message') or '')[:1000]}"
311
+ comment_body = f"{EMOJI_MAP.get(severity, '💭')} **{severity}**: {(comment.get('message') or '')[:3000]}"
298
312
 
299
313
  if suggestion := comment.get("suggestion"):
300
- suggestion = suggestion[:1000] # Clip suggestion length
314
+ suggestion = suggestion[:3000] # Clip suggestion length
301
315
  if "```" not in suggestion:
302
316
  # Extract original line indentation and apply to suggestion
303
317
  if original_line := review_data.get("diff_files", {}).get(file_path, {}).get(side, {}).get(line):
@@ -307,10 +321,9 @@ def post_review_summary(event: Action, review_data: dict, review_number: int) ->
307
321
 
308
322
  # Build comment with optional start_line for multi-line context
309
323
  review_comment = {"path": file_path, "line": line, "body": comment_body, "side": side}
310
- if start_line := comment.get("start_line"):
311
- if start_line < line:
312
- review_comment["start_line"] = start_line
313
- review_comment["start_side"] = side
324
+ if (start_line := comment.get("start_line")) and start_line < line:
325
+ review_comment["start_line"] = start_line
326
+ review_comment["start_side"] = side
314
327
 
315
328
  review_comments.append(review_comment)
316
329
 
actions/scan_prs.py ADDED
@@ -0,0 +1,205 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+ """List and auto-merge open PRs across GitHub organization."""
3
+
4
+ import json
5
+ import os
6
+ import subprocess
7
+ from datetime import datetime, timezone
8
+
9
+
10
+ def get_age_days(created_at):
11
+ """Calculate PR age in days from ISO timestamp."""
12
+ return (datetime.now(timezone.utc) - datetime.fromisoformat(created_at.replace("Z", "+00:00"))).days
13
+
14
+
15
+ def get_phase_emoji(age_days):
16
+ """Return emoji and label for PR age phase."""
17
+ if age_days == 0:
18
+ return "🆕", "NEW"
19
+ elif age_days <= 7:
20
+ return "🟢", f"{age_days} days"
21
+ elif age_days <= 30:
22
+ return "🟡", f"{age_days} days"
23
+ else:
24
+ return "🔴", f"{age_days} days"
25
+
26
+
27
+ def run():
28
+ """List open PRs across organization and auto-merge eligible Dependabot PRs."""
29
+ # Get and validate settings
30
+ org = os.getenv("ORG", "ultralytics")
31
+ visibility = os.getenv("VISIBILITY", "public").lower()
32
+ repo_visibility = os.getenv("REPO_VISIBILITY", "public").lower()
33
+ valid_visibilities = {"public", "private", "internal", "all"}
34
+
35
+ if visibility not in valid_visibilities:
36
+ print(f"⚠️ Invalid visibility '{visibility}', defaulting to 'public'")
37
+ visibility = "public"
38
+
39
+ # Security: if calling repo is public, restrict to public repos only
40
+ if repo_visibility == "public" and visibility != "public":
41
+ print(f"⚠️ Security: Public repo cannot scan {visibility} repos. Restricting to public only.")
42
+ visibility = "public"
43
+
44
+ print(f"🔍 Scanning {visibility} repositories in {org} organization...")
45
+
46
+ # Get active repos with specified visibility
47
+ cmd = ["gh", "repo", "list", org, "--limit", "1000", "--json", "name,url,isArchived"]
48
+ if visibility != "all":
49
+ cmd.extend(["--visibility", visibility])
50
+
51
+ result = subprocess.run(cmd, capture_output=True, text=True, check=True)
52
+ repos = {r["name"]: r["url"] for r in json.loads(result.stdout) if not r["isArchived"]}
53
+
54
+ if not repos:
55
+ print("⚠️ No repositories found")
56
+ return
57
+
58
+ # Get all open PRs
59
+ result = subprocess.run(
60
+ [
61
+ "gh",
62
+ "search",
63
+ "prs",
64
+ "--owner",
65
+ org,
66
+ "--state",
67
+ "open",
68
+ "--limit",
69
+ "1000",
70
+ "--json",
71
+ "repository,number,title,url,createdAt",
72
+ "--sort",
73
+ "created",
74
+ "--order",
75
+ "desc",
76
+ ],
77
+ capture_output=True,
78
+ text=True,
79
+ check=True,
80
+ )
81
+ all_prs = json.loads(result.stdout)
82
+
83
+ if not all_prs:
84
+ print("✅ No open PRs found")
85
+ return
86
+
87
+ # Count PRs by phase
88
+ phase_counts = {"new": 0, "green": 0, "yellow": 0, "red": 0}
89
+ for pr in all_prs:
90
+ age_days = get_age_days(pr["createdAt"])
91
+ phase_counts[
92
+ "new" if age_days == 0 else "green" if age_days <= 7 else "yellow" if age_days <= 30 else "red"
93
+ ] += 1
94
+
95
+ repo_count = len({pr["repository"]["name"] for pr in all_prs if pr["repository"]["name"] in repos})
96
+ summary = [
97
+ f"# 🔍 Open Pull Requests - {org.title()} Organization\n",
98
+ f"**Total:** {len(all_prs)} open PRs across {repo_count} repos",
99
+ f"**By Phase:** 🆕 {phase_counts['new']} New | 🟢 {phase_counts['green']} Green (≤7d) | 🟡 {phase_counts['yellow']} Yellow (≤30d) | 🔴 {phase_counts['red']} Red (>30d)\n",
100
+ ]
101
+
102
+ for repo_name in sorted({pr["repository"]["name"] for pr in all_prs}):
103
+ if repo_name not in repos:
104
+ continue
105
+
106
+ repo_prs = [pr for pr in all_prs if pr["repository"]["name"] == repo_name]
107
+ summary.append(
108
+ f"## 📦 [{repo_name}]({repos[repo_name]}) - {len(repo_prs)} open PR{'s' if len(repo_prs) > 1 else ''}"
109
+ )
110
+
111
+ for pr in repo_prs[:30]:
112
+ emoji, age_str = get_phase_emoji(get_age_days(pr["createdAt"]))
113
+ summary.append(f"- 🔀 [#{pr['number']}]({pr['url']}) {pr['title']} {emoji} {age_str}")
114
+
115
+ if len(repo_prs) > 30:
116
+ summary.append(f"- ... {len(repo_prs) - 30} more PRs")
117
+ summary.append("")
118
+
119
+ # Auto-merge Dependabot GitHub Actions PRs
120
+ print("\n🤖 Checking for Dependabot PRs to auto-merge...")
121
+ summary.append("\n# 🤖 Auto-Merge Dependabot GitHub Actions PRs\n")
122
+ total_found = total_merged = total_skipped = 0
123
+
124
+ for repo_name in repos:
125
+ result = subprocess.run(
126
+ [
127
+ "gh",
128
+ "pr",
129
+ "list",
130
+ "--repo",
131
+ f"{org}/{repo_name}",
132
+ "--author",
133
+ "app/dependabot",
134
+ "--state",
135
+ "open",
136
+ "--json",
137
+ "number,title,files,mergeable,statusCheckRollup",
138
+ ],
139
+ capture_output=True,
140
+ text=True,
141
+ )
142
+ if result.returncode != 0:
143
+ continue
144
+
145
+ merged = 0
146
+ for pr in json.loads(result.stdout):
147
+ if not all(f["path"].startswith(".github/workflows/") for f in pr["files"]):
148
+ continue
149
+
150
+ total_found += 1
151
+ pr_ref = f"{org}/{repo_name}#{pr['number']}"
152
+ print(f" Found: {pr_ref} - {pr['title']}")
153
+
154
+ if merged >= 1:
155
+ print(f" ⏭️ Skipped (already merged 1 PR in {repo_name})")
156
+ total_skipped += 1
157
+ continue
158
+
159
+ if pr["mergeable"] != "MERGEABLE":
160
+ print(f" ❌ Skipped (not mergeable: {pr['mergeable']})")
161
+ total_skipped += 1
162
+ continue
163
+
164
+ # Check if all status checks passed (normalize rollup structure)
165
+ rollup = pr.get("statusCheckRollup")
166
+ if isinstance(rollup, list):
167
+ checks = rollup
168
+ elif isinstance(rollup, dict):
169
+ checks = rollup.get("contexts", [])
170
+ else:
171
+ checks = []
172
+ failed_checks = [c for c in checks if c.get("conclusion") not in ["SUCCESS", "SKIPPED", "NEUTRAL"]]
173
+
174
+ if failed_checks:
175
+ for check in failed_checks:
176
+ print(f" ❌ Failing check: {check.get('name', 'unknown')} = {check.get('conclusion')}")
177
+ total_skipped += 1
178
+ continue
179
+
180
+ print(" ✅ All checks passed, merging...")
181
+ result = subprocess.run(
182
+ ["gh", "pr", "merge", str(pr["number"]), "--repo", f"{org}/{repo_name}", "--squash", "--admin"],
183
+ capture_output=True,
184
+ text=True,
185
+ )
186
+ if result.returncode == 0:
187
+ print(f" ✅ Successfully merged {pr_ref}")
188
+ summary.append(f"- ✅ Merged {pr_ref}")
189
+ total_merged += 1
190
+ merged += 1
191
+ else:
192
+ print(f" ❌ Merge failed: {result.stderr.strip()}")
193
+ total_skipped += 1
194
+
195
+ summary.append(f"\n**Summary:** Found {total_found} | Merged {total_merged} | Skipped {total_skipped}")
196
+ print(f"\n📊 Dependabot Summary: Found {total_found} | Merged {total_merged} | Skipped {total_skipped}")
197
+
198
+ # Write to GitHub step summary if available
199
+ if summary_file := os.getenv("GITHUB_STEP_SUMMARY"):
200
+ with open(summary_file, "a") as f:
201
+ f.write("\n".join(summary))
202
+
203
+
204
+ if __name__ == "__main__":
205
+ run()
actions/summarize_pr.py CHANGED
@@ -2,11 +2,9 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from .utils import GITHUB_API_URL, Action, get_completion, get_pr_summary_prompt
5
+ from .utils import ACTIONS_CREDIT, GITHUB_API_URL, Action, get_completion, get_pr_summary_prompt
6
6
 
7
- SUMMARY_START = (
8
- "## 🛠️ PR Summary\n\n<sub>Made with ❤️ by [Ultralytics Actions](https://github.com/ultralytics/actions)<sub>\n\n"
9
- )
7
+ SUMMARY_MARKER = "## 🛠️ PR Summary"
10
8
 
11
9
 
12
10
  def generate_merge_message(pr_summary, pr_credit, pr_url):
@@ -73,7 +71,8 @@ def generate_pr_summary(repository, diff_text):
73
71
  reply = get_completion(messages, temperature=1.0)
74
72
  if is_large:
75
73
  reply = "**WARNING ⚠️** this PR is very large, summary may not cover all changes.\n\n" + reply
76
- return SUMMARY_START + reply
74
+
75
+ return f"{SUMMARY_MARKER}\n\n{ACTIONS_CREDIT}\n\n{reply}"
77
76
 
78
77
 
79
78
  def label_fixed_issues(event, pr_summary):
@@ -196,7 +196,7 @@ def main(*args, **kwargs):
196
196
  try:
197
197
  summary = generate_release_summary(event, diff, prs, CURRENT_TAG, previous_tag)
198
198
  except Exception as e:
199
- print(f"Failed to generate summary: {str(e)}")
199
+ print(f"Failed to generate summary: {e}")
200
200
  summary = "Failed to generate summary."
201
201
 
202
202
  # Get the latest commit message
actions/utils/__init__.py CHANGED
@@ -1,6 +1,7 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
3
  from .common_utils import (
4
+ ACTIONS_CREDIT,
4
5
  REDIRECT_END_IGNORE_LIST,
5
6
  REDIRECT_START_IGNORE_LIST,
6
7
  REQUESTS_HEADERS,
@@ -20,13 +21,14 @@ from .openai_utils import (
20
21
  from .version_utils import check_pubdev_version, check_pypi_version
21
22
 
22
23
  __all__ = (
24
+ "ACTIONS_CREDIT",
23
25
  "GITHUB_API_URL",
24
26
  "GITHUB_GRAPHQL_URL",
25
27
  "MAX_PROMPT_CHARS",
28
+ "REDIRECT_END_IGNORE_LIST",
29
+ "REDIRECT_START_IGNORE_LIST",
26
30
  "REQUESTS_HEADERS",
27
31
  "URL_IGNORE_LIST",
28
- "REDIRECT_START_IGNORE_LIST",
29
- "REDIRECT_END_IGNORE_LIST",
30
32
  "Action",
31
33
  "allow_redirect",
32
34
  "check_pubdev_version",
@@ -23,6 +23,7 @@ REQUESTS_HEADERS = {
23
23
  "Sec-Fetch-User": "?1",
24
24
  "Sec-Fetch-Dest": "document",
25
25
  }
26
+ ACTIONS_CREDIT = "<sub>Made with ❤️ by [Ultralytics Actions](https://www.ultralytics.com/actions)</sub>"
26
27
  BAD_HTTP_CODES = frozenset(
27
28
  {
28
29
  204, # No content
@@ -101,7 +101,13 @@ mutation($labelableId: ID!, $labelIds: [ID!]!) {
101
101
  class Action:
102
102
  """Handles GitHub Actions API interactions and event processing."""
103
103
 
104
- def __init__(self, token: str = None, event_name: str = None, event_data: dict = None, verbose: bool = True):
104
+ def __init__(
105
+ self,
106
+ token: str | None = None,
107
+ event_name: str | None = None,
108
+ event_data: dict | None = None,
109
+ verbose: bool = True,
110
+ ):
105
111
  """Initializes a GitHub Actions API handler with token and event data for processing events."""
106
112
  self.token = token or os.getenv("GITHUB_TOKEN")
107
113
  self.event_name = event_name or os.getenv("GITHUB_EVENT_NAME")
@@ -116,8 +122,8 @@ class Action:
116
122
  self._pr_summary_cache = None
117
123
  self._username_cache = None
118
124
  self._default_status = {
119
- "get": [200],
120
- "post": [200, 201],
125
+ "get": [200, 204],
126
+ "post": [200, 201, 204],
121
127
  "put": [200, 201, 204],
122
128
  "patch": [200],
123
129
  "delete": [200, 204],
@@ -134,9 +140,12 @@ class Action:
134
140
  print(f"{'✓' if success else '✗'} {method.upper()} {url} → {r.status_code} ({elapsed:.1f}s)", flush=True)
135
141
  if not success:
136
142
  try:
137
- print(f" ❌ Error: {r.json().get('message', 'Unknown error')}")
143
+ error_data = r.json()
144
+ print(f" ❌ Error: {error_data.get('message', 'Unknown error')}")
145
+ if errors := error_data.get("errors"):
146
+ print(f" Details: {errors}")
138
147
  except Exception:
139
- print(f" ❌ Error: {r.text[:200]}")
148
+ print(f" ❌ Error: {r.text[:1000]}")
140
149
 
141
150
  if not success and hard:
142
151
  r.raise_for_status()
@@ -257,7 +266,7 @@ class Action:
257
266
  self.delete(f"{url}/{self.eyes_reaction_id}")
258
267
  self.eyes_reaction_id = None
259
268
 
260
- def graphql_request(self, query: str, variables: dict = None) -> dict:
269
+ def graphql_request(self, query: str, variables: dict | None = None) -> dict:
261
270
  """Executes a GraphQL query against the GitHub API."""
262
271
  result = self.post(GITHUB_GRAPHQL_URL, json={"query": query, "variables": variables}).json()
263
272
  if "data" not in result or result.get("errors"):
@@ -331,7 +340,9 @@ class Action:
331
340
  else:
332
341
  self.post(f"{GITHUB_API_URL}/repos/{self.repository}/issues/{number}/comments", json={"body": comment})
333
342
 
334
- def update_content(self, number: int, node_id: str, issue_type: str, title: str = None, body: str = None):
343
+ def update_content(
344
+ self, number: int, node_id: str, issue_type: str, title: str | None = None, body: str | None = None
345
+ ):
335
346
  """Updates the title and/or body of an issue, pull request, or discussion."""
336
347
  if issue_type == "discussion":
337
348
  variables = {"discussionId": node_id}
@@ -373,7 +384,7 @@ class Action:
373
384
  def handle_alert(self, number: int, node_id: str, issue_type: str, username: str, block: bool = False):
374
385
  """Handles content flagged as alert: updates content, locks, optionally closes and blocks user."""
375
386
  new_title = "Content Under Review"
376
- new_body = """This post has been flagged for review by [Ultralytics Actions](https://ultralytics.com/actions) due to possible spam, abuse, or off-topic content. For more information please see our:
387
+ new_body = """This post has been flagged for review by [Ultralytics Actions](https://www.ultralytics.com/actions) due to possible spam, abuse, or off-topic content. For more information please see our:
377
388
 
378
389
  - [Code of Conduct](https://docs.ultralytics.com/help/code-of-conduct/)
379
390
  - [Security Policy](https://docs.ultralytics.com/help/security/)
@@ -2,6 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
+ import json
5
6
  import os
6
7
  import time
7
8
 
@@ -12,6 +13,12 @@ from actions.utils.common_utils import check_links_in_string
12
13
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
13
14
  OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-5-2025-08-07")
14
15
  MAX_PROMPT_CHARS = round(128000 * 3.3 * 0.5) # Max characters for prompt (50% of 128k context)
16
+ MODEL_COSTS = {
17
+ "gpt-5-codex": (1.25, 10.00),
18
+ "gpt-5-2025-08-07": (1.25, 10.00),
19
+ "gpt-5-nano-2025-08-07": (0.05, 0.40),
20
+ "gpt-5-mini-2025-08-07": (0.25, 2.00),
21
+ }
15
22
  SYSTEM_PROMPT_ADDITION = """Guidance:
16
23
  - Ultralytics Branding: Use YOLO11, YOLO26, etc., not YOLOv11, YOLOv26 (only older versions like YOLOv10 have a v). Always capitalize "HUB" in "Ultralytics HUB"; use "Ultralytics HUB", not "The Ultralytics HUB".
17
24
  - Avoid Equations: Do not include equations or mathematical notations.
@@ -34,7 +41,7 @@ def remove_outer_codeblocks(string):
34
41
  return string
35
42
 
36
43
 
37
- def filter_labels(available_labels: dict, current_labels: list = None, is_pr: bool = False) -> dict:
44
+ def filter_labels(available_labels: dict, current_labels: list | None = None, is_pr: bool = False) -> dict:
38
45
  """Filters labels by removing manually-assigned and mutually exclusive labels."""
39
46
  current_labels = current_labels or []
40
47
  filtered = available_labels.copy()
@@ -87,9 +94,9 @@ def get_pr_summary_prompt(repository: str, diff_text: str) -> tuple[str, bool]:
87
94
  return prompt, len(diff_text) > MAX_PROMPT_CHARS
88
95
 
89
96
 
90
- def get_pr_first_comment_template(repository: str) -> str:
97
+ def get_pr_first_comment_template(repository: str, username: str) -> str:
91
98
  """Returns the PR first comment template with checklist (used only by unified PR open)."""
92
- return f"""👋 Hello @username, thank you for submitting an `{repository}` 🚀 PR! To ensure a seamless integration of your work, please review the following checklist:
99
+ return f"""👋 Hello @{username}, thank you for submitting a `{repository}` 🚀 PR! To ensure a seamless integration of your work, please review the following checklist:
93
100
 
94
101
  - ✅ **Define a Purpose**: Clearly explain the purpose of your fix or feature in your PR description, and link to any [relevant issues](https://github.com/{repository}/issues). Ensure your commit messages are clear, concise, and adhere to the project's conventions.
95
102
  - ✅ **Synchronize with Source**: Confirm your PR is synchronized with the `{repository}` `main` branch. If it's behind, update it by clicking the 'Update branch' button or by running `git pull` and `git merge main` locally.
@@ -107,9 +114,10 @@ def get_completion(
107
114
  check_links: bool = True,
108
115
  remove: list[str] = (" @giscus[bot]",),
109
116
  temperature: float = 1.0,
110
- reasoning_effort: str = None,
111
- response_format: dict = None,
117
+ reasoning_effort: str | None = None,
118
+ response_format: dict | None = None,
112
119
  model: str = OPENAI_MODEL,
120
+ tools: list[dict] | None = None,
113
121
  ) -> str | dict:
114
122
  """Generates a completion using OpenAI's Responses API with retry logic."""
115
123
  assert OPENAI_API_KEY, "OpenAI API key is required."
@@ -122,25 +130,50 @@ def get_completion(
122
130
  data = {"model": model, "input": messages, "store": False, "temperature": temperature}
123
131
  if "gpt-5" in model:
124
132
  data["reasoning"] = {"effort": reasoning_effort or "low"}
133
+ if tools:
134
+ data["tools"] = tools
125
135
 
126
136
  try:
127
- r = requests.post(url, json=data, headers=headers, timeout=600)
137
+ r = requests.post(url, json=data, headers=headers, timeout=(30, 900))
138
+ elapsed = r.elapsed.total_seconds()
128
139
  success = r.status_code == 200
129
- print(f"{'✓' if success else '✗'} POST {url} → {r.status_code} ({r.elapsed.total_seconds():.1f}s)")
140
+ print(f"{'✓' if success else '✗'} POST {url} → {r.status_code} ({elapsed:.1f}s)")
141
+
142
+ # Retry server errors
143
+ if attempt < 2 and r.status_code >= 500:
144
+ print(f"Retrying {r.status_code} in {2**attempt}s (attempt {attempt + 1}/3)...")
145
+ time.sleep(2**attempt)
146
+ continue
147
+
130
148
  r.raise_for_status()
131
149
 
132
150
  # Parse response
151
+ response_json = r.json()
133
152
  content = ""
134
- for item in r.json().get("output", []):
153
+ for item in response_json.get("output", []):
135
154
  if item.get("type") == "message":
136
155
  for c in item.get("content", []):
137
156
  if c.get("type") == "output_text":
138
157
  content += c.get("text") or ""
139
158
  content = content.strip()
140
159
 
141
- if response_format and response_format.get("type") == "json_object":
142
- import json
160
+ # Extract and print token usage
161
+ if usage := response_json.get("usage"):
162
+ input_tokens = usage.get("input_tokens", 0)
163
+ output_tokens = usage.get("output_tokens", 0)
164
+ thinking_tokens = (usage.get("output_tokens_details") or {}).get("reasoning_tokens", 0)
143
165
 
166
+ # Calculate cost
167
+ costs = MODEL_COSTS.get(model, (0.0, 0.0))
168
+ cost = (input_tokens * costs[0] + output_tokens * costs[1]) / 1e6
169
+
170
+ # Format summary
171
+ token_str = f"{input_tokens}→{output_tokens - thinking_tokens}"
172
+ if thinking_tokens:
173
+ token_str += f" (+{thinking_tokens} thinking)"
174
+ print(f"{model} ({token_str} = {input_tokens + output_tokens} tokens, ${cost:.5f}, {elapsed:.1f}s)")
175
+
176
+ if response_format and response_format.get("type") == "json_object":
144
177
  return json.loads(content)
145
178
 
146
179
  content = remove_outer_codeblocks(content)
@@ -154,33 +187,28 @@ def get_completion(
154
187
 
155
188
  return content
156
189
 
157
- except (requests.exceptions.ConnectionError, requests.exceptions.Timeout):
190
+ except (requests.exceptions.ConnectionError, requests.exceptions.Timeout, json.JSONDecodeError) as e:
158
191
  if attempt < 2:
159
- print(f"Connection error, retrying in {2**attempt}s")
192
+ print(f"Retrying {e.__class__.__name__} in {2**attempt}s (attempt {attempt + 1}/3)...")
160
193
  time.sleep(2**attempt)
161
194
  continue
162
195
  raise
163
- except requests.exceptions.HTTPError as e:
164
- status_code = getattr(e.response, "status_code", 0) if e.response else 0
165
- if attempt < 2 and status_code >= 500:
166
- print(f"Server error {status_code}, retrying in {2**attempt}s")
167
- time.sleep(2**attempt)
168
- continue
196
+ except requests.exceptions.HTTPError: # 4xx errors
169
197
  raise
170
198
 
171
199
  return content
172
200
 
173
201
 
174
- def get_pr_open_response(repository: str, diff_text: str, title: str, body: str, available_labels: dict) -> dict:
202
+ def get_pr_open_response(repository: str, diff_text: str, title: str, username: str, available_labels: dict) -> dict:
175
203
  """Generates unified PR response with summary, labels, and first comment in a single API call."""
176
204
  is_large = len(diff_text) > MAX_PROMPT_CHARS
177
205
 
178
206
  filtered_labels = filter_labels(available_labels, is_pr=True)
179
207
  labels_str = "\n".join(f"- {name}: {description}" for name, description in filtered_labels.items())
180
208
 
181
- prompt = f"""You are processing a new GitHub pull request for the {repository} repository.
209
+ prompt = f"""You are processing a new GitHub PR by @{username} for the {repository} repository.
182
210
 
183
- Generate 3 outputs in a single JSON response for the PR titled {title} with the following diff:
211
+ Generate 3 outputs in a single JSON response for the PR titled '{title}' with the following diff:
184
212
  {diff_text[:MAX_PROMPT_CHARS]}
185
213
 
186
214
 
@@ -202,7 +230,7 @@ Customized welcome message adapting the template below:
202
230
  - No spaces between bullet points
203
231
 
204
232
  Example comment template (adapt as needed, keep all links):
205
- {get_pr_first_comment_template(repository)}
233
+ {get_pr_first_comment_template(repository, username)}
206
234
 
207
235
  Return ONLY valid JSON in this exact format:
208
236
  {{"summary": "...", "labels": [...], "first_comment": "..."}}"""
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ultralytics-actions
3
- Version: 0.1.9
3
+ Version: 0.2.1
4
4
  Summary: Ultralytics Actions for GitHub automation and PR management.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -38,111 +38,159 @@ Dynamic: license-file
38
38
 
39
39
  <a href="https://www.ultralytics.com/"><img src="https://raw.githubusercontent.com/ultralytics/assets/main/logo/Ultralytics_Logotype_Original.svg" width="320" alt="Ultralytics logo"></a>
40
40
 
41
- # 🚀 Ultralytics Actions: AI-powered formatting, labeling & PR summaries for Python and Markdown
41
+ # 🚀 Ultralytics Actions
42
42
 
43
- Welcome to the [Ultralytics Actions](https://github.com/ultralytics/actions) repository, your go-to solution for maintaining consistent code quality across Ultralytics Python and Swift projects. This GitHub Action is designed to automate the formatting of Python, Markdown, and Swift files, ensuring adherence to our coding standards and enhancing project maintainability.
43
+ Welcome to [Ultralytics Actions](https://github.com/ultralytics/actions) - a collection of GitHub Actions and Python tools for automating code quality, PR management, and CI/CD workflows across Ultralytics projects.
44
44
 
45
45
  [![GitHub Actions Marketplace](https://img.shields.io/badge/Marketplace-Ultralytics_Actions-blue?style=flat&logo=github)](https://github.com/marketplace/actions/ultralytics-actions)
46
46
 
47
47
  [![Actions CI](https://github.com/ultralytics/actions/actions/workflows/ci.yml/badge.svg)](https://github.com/ultralytics/actions/actions/workflows/ci.yml)
48
48
  [![Ultralytics Actions](https://github.com/ultralytics/actions/actions/workflows/format.yml/badge.svg)](https://github.com/ultralytics/actions/actions/workflows/format.yml)
49
- [![List Open PRs](https://github.com/ultralytics/actions/actions/workflows/open-prs.yml/badge.svg)](https://github.com/ultralytics/actions/actions/workflows/open-prs.yml)
49
+ [![Scan PRs](https://github.com/ultralytics/actions/actions/workflows/scan-prs.yml/badge.svg)](https://github.com/ultralytics/actions/actions/workflows/scan-prs.yml)
50
50
  [![codecov](https://codecov.io/github/ultralytics/actions/graph/badge.svg?token=DoizJ1WS6j)](https://codecov.io/github/ultralytics/actions)
51
51
 
52
52
  [![Ultralytics Discord](https://img.shields.io/discord/1089800235347353640?logo=discord&logoColor=white&label=Discord&color=blue)](https://discord.com/invite/ultralytics)
53
53
  [![Ultralytics Forums](https://img.shields.io/discourse/users?server=https%3A%2F%2Fcommunity.ultralytics.com&logo=discourse&label=Forums&color=blue)](https://community.ultralytics.com/)
54
54
  [![Ultralytics Reddit](https://img.shields.io/reddit/subreddit-subscribers/ultralytics?style=flat&logo=reddit&logoColor=white&label=Reddit&color=blue)](https://reddit.com/r/ultralytics)
55
55
 
56
- ## 📄 Actions Description
56
+ ## 📦 Repository Contents
57
57
 
58
- Ultralytics Actions automatically applies formats, updates, and enhancements using a suite of powerful tools:
58
+ This repository provides three main components:
59
59
 
60
- - **Python Code:** Formatted using [Ruff](https://github.com/astral-sh/ruff), an extremely fast Python linter and formatter.
61
- - **Markdown Files:** Styled with [Prettier](https://github.com/prettier/prettier) to ensure consistent documentation appearance.
62
- - **Docstrings:** Cleaned and standardized using [docformatter](https://github.com/PyCQA/docformatter).
63
- - **Swift Code:** Formatted with [`swift-format`](https://github.com/swiftlang/swift-format) to maintain a uniform coding style across Swift projects. _(Note: Requires the `macos-latest` runner.)_
64
- - **Spell Check:** Common misspellings are caught using [codespell](https://github.com/codespell-project/codespell).
65
- - **Broken Links Check:** Broken links in documentation and Markdown files are identified using [Lychee](https://github.com/lycheeverse/lychee).
66
- - **PR Summary:** Concise Pull Request summaries are generated using [OpenAI](https://openai.com/) GPT-5, improving clarity and review efficiency.
67
- - **PR Review:** AI-powered code reviews identify critical bugs, security issues, and code quality concerns with suggested fixes.
68
- - **Auto-labeling:** Applies relevant labels to issues and PRs via [OpenAI](https://openai.com/) GPT-5 for intelligent categorization.
60
+ 1. **[Ultralytics Actions](#ultralytics-actions-main-action)** - Main GitHub Action for AI-powered code formatting, PR summaries, and auto-labeling
61
+ 2. **[Standalone Actions](#standalone-actions)** - Reusable composite actions for common CI/CD tasks
62
+ 3. **[Python Package](#python-package)** - `ultralytics-actions` package for programmatic use
69
63
 
70
- ## 🛠️ How It Works
64
+ ## Ultralytics Actions (Main Action)
71
65
 
72
- Ultralytics Actions triggers on various GitHub events to streamline workflows:
66
+ AI-powered formatting, labeling, and PR summaries for Python, Swift, and Markdown files.
73
67
 
74
- - **Push Events:** Automatically formats code when changes are pushed to the `main` branch.
75
- - **Pull Requests:**
76
- - Ensures contributions meet formatting standards before merging.
77
- - Generates a concise summary of changes using GPT-5.
78
- - Provides AI-powered inline code reviews with suggested fixes for critical issues.
79
- - Applies relevant labels using GPT-5 for intelligent categorization.
80
- - **Issues:** Automatically applies relevant labels using GPT-5 when new issues are created.
68
+ ### 📄 Features
81
69
 
82
- These automated actions help maintain high code quality, improve documentation clarity, and streamline the review process by providing consistent formatting, informative summaries, and appropriate categorization.
70
+ - **Python Code:** Formatted using [Ruff](https://github.com/astral-sh/ruff), an extremely fast Python linter and formatter
71
+ - **Markdown Files:** Styled with [Prettier](https://github.com/prettier/prettier) to ensure consistent documentation appearance
72
+ - **Docstrings:** Cleaned and standardized using [docformatter](https://github.com/PyCQA/docformatter)
73
+ - **Swift Code:** Formatted with [`swift-format`](https://github.com/swiftlang/swift-format) _(requires `macos-latest` runner)_
74
+ - **Spell Check:** Common misspellings caught using [codespell](https://github.com/codespell-project/codespell)
75
+ - **Broken Links Check:** Broken links identified using [Lychee](https://github.com/lycheeverse/lychee)
76
+ - **PR Summary:** Concise Pull Request summaries generated using [OpenAI](https://openai.com/) GPT-5
77
+ - **PR Review:** AI-powered code reviews identify critical bugs, security issues, and quality concerns with suggested fixes
78
+ - **Auto-labeling:** Applies relevant labels to issues and PRs via [OpenAI](https://openai.com/) GPT-5
83
79
 
84
- ## 🔧 Setting Up the Action
80
+ ### 🛠️ How It Works
85
81
 
86
- To integrate this action into your Ultralytics repository:
82
+ Triggers on GitHub events to streamline workflows:
87
83
 
88
- 1. **Create a Workflow File:** In your repository, create a YAML file under `.github/workflows/`, for example, `ultralytics-actions.yml`.
84
+ - **Push Events:** Automatically formats code when changes are pushed to `main`
85
+ - **Pull Requests:** Ensures formatting standards, generates summaries, provides AI reviews, and applies labels
86
+ - **Issues:** Automatically applies relevant labels using GPT-5
89
87
 
90
- 2. **Add the Action:** Configure the Ultralytics Actions in your workflow file as shown below:
88
+ ### 🔧 Setup
91
89
 
92
- ```yaml
93
- # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
90
+ Create `.github/workflows/ultralytics-actions.yml`:
94
91
 
95
- # Ultralytics Actions https://github.com/ultralytics/actions
96
- # This workflow formats code and documentation in PRs to Ultralytics standards
92
+ ```yaml
93
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
97
94
 
98
- name: Ultralytics Actions
95
+ # Ultralytics Actions https://github.com/ultralytics/actions
96
+ # This workflow formats code and documentation in PRs to Ultralytics standards
99
97
 
100
- on:
101
- issues:
102
- types: [opened]
103
- pull_request:
104
- branches: [main]
105
- types: [opened, closed, synchronize, review_requested]
98
+ name: Ultralytics Actions
106
99
 
107
- permissions:
108
- contents: write # Modify code in PRs
109
- pull-requests: write # Add comments and labels to PRs
110
- issues: write # Add comments and labels to issues
100
+ on:
101
+ issues:
102
+ types: [opened]
103
+ pull_request:
104
+ branches: [main]
105
+ types: [opened, closed, synchronize, review_requested]
111
106
 
112
- jobs:
113
- actions:
114
- runs-on: ubuntu-latest
115
- steps:
116
- - name: Run Ultralytics Actions
117
- uses: ultralytics/actions@main
118
- with:
119
- token: ${{ secrets.GITHUB_TOKEN }} # Auto-generated token
120
- labels: true # Auto-label issues/PRs using AI
121
- python: true # Format Python with Ruff and docformatter
122
- prettier: true # Format YAML, JSON, Markdown, CSS
123
- swift: false # Format Swift (requires macos-latest)
124
- dart: false # Format Dart/Flutter
125
- spelling: true # Check spelling with codespell
126
- links: true # Check broken links with Lychee
127
- summary: true # Generate AI-powered PR summaries
128
- openai_api_key: ${{ secrets.OPENAI_API_KEY }} # Powers PR summaries, labels and comments
129
- brave_api_key: ${{ secrets.BRAVE_API_KEY }} # Used for broken link resolution
130
- ```
107
+ permissions:
108
+ contents: write # Modify code in PRs
109
+ pull-requests: write # Add comments and labels to PRs
110
+ issues: write # Add comments and labels to issues
131
111
 
132
- 3. **Customize:** Adjust the `runs-on` runner and the boolean flags (`labels`, `python`, `prettier`, `swift`, `spelling`, `links`, `summary`) based on your project's needs. Remember to add your `OPENAI_API_KEY` as a secret in your repository settings if you enable `labels` or `summary`.
112
+ jobs:
113
+ actions:
114
+ runs-on: ubuntu-latest
115
+ steps:
116
+ - name: Run Ultralytics Actions
117
+ uses: ultralytics/actions@main
118
+ with:
119
+ token: ${{ secrets.GITHUB_TOKEN }} # Auto-generated token
120
+ labels: true # Auto-label issues/PRs using AI
121
+ python: true # Format Python with Ruff and docformatter
122
+ prettier: true # Format YAML, JSON, Markdown, CSS
123
+ swift: false # Format Swift (requires macos-latest)
124
+ dart: false # Format Dart/Flutter
125
+ spelling: true # Check spelling with codespell
126
+ links: true # Check broken links with Lychee
127
+ summary: true # Generate AI-powered PR summaries
128
+ openai_api_key: ${{ secrets.OPENAI_API_KEY }} # Powers PR summaries, labels and reviews
129
+ brave_api_key: ${{ secrets.BRAVE_API_KEY }} # Used for broken link resolution
130
+ ```
131
+
132
+ ## Standalone Actions
133
+
134
+ Reusable composite actions for common CI/CD tasks. Each can be used independently in your workflows.
135
+
136
+ ### 1. Retry Action
137
+
138
+ Retry failed commands with exponential backoff.
139
+
140
+ ```yaml
141
+ - uses: ultralytics/actions/retry@main
142
+ with:
143
+ command: npm install
144
+ max_attempts: 3
145
+ timeout_minutes: 5
146
+ ```
147
+
148
+ [**📖 Full Documentation →**](retry/README.md)
149
+
150
+ ### 2. Cleanup Disk Action
151
+
152
+ Free up disk space on GitHub runners by removing unnecessary packages and files.
153
+
154
+ ```yaml
155
+ - uses: ultralytics/actions/cleanup-disk@main
156
+ ```
157
+
158
+ [**📖 Full Documentation →**](cleanup-disk/README.md)
159
+
160
+ ### 3. Scan PRs Action
161
+
162
+ List open PRs across an organization and auto-merge eligible Dependabot PRs.
163
+
164
+ ```yaml
165
+ - uses: ultralytics/actions/scan-prs@main
166
+ with:
167
+ token: ${{ secrets.GITHUB_TOKEN }}
168
+ org: ultralytics # Optional: defaults to ultralytics
169
+ visibility: public # Optional: public, private, internal, or all
170
+ ```
171
+
172
+ [**📖 Full Documentation →**](scan-prs/README.md)
133
173
 
134
174
  ## Python Package
135
175
 
136
- Install the `ultralytics-actions` Python package directly with Pip:
176
+ Install `ultralytics-actions` for programmatic access to action utilities.
137
177
 
138
178
  [![PyPI - Version](https://img.shields.io/pypi/v/ultralytics-actions?logo=pypi&logoColor=white)](https://pypi.org/project/ultralytics-actions/)
139
179
  [![Ultralytics Downloads](https://static.pepy.tech/badge/ultralytics-actions)](https://clickpy.clickhouse.com/dashboard/ultralytics-actions)
140
180
  [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/ultralytics-actions?logo=python&logoColor=gold)](https://pypi.org/project/ultralytics-actions/)
141
181
 
142
- ```sh
182
+ ```bash
143
183
  pip install ultralytics-actions
144
184
  ```
145
185
 
186
+ **Available Modules:**
187
+
188
+ - `actions.review_pr` - AI-powered PR review
189
+ - `actions.summarize_pr` - Generate PR summaries
190
+ - `actions.scan_prs` - Scan and manage organization PRs
191
+ - `actions.first_interaction` - Welcome message for new contributors
192
+ - And more in `actions/` directory
193
+
146
194
  ## 💡 Contribute
147
195
 
148
196
  Ultralytics thrives on community collaboration, and we deeply value your contributions! Please see our [Contributing Guide](https://docs.ultralytics.com/help/contributing/) for details on how you can get involved. We also encourage you to share your feedback through our [Survey](https://www.ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey). A huge thank you 🙏 to all our contributors!
@@ -0,0 +1,20 @@
1
+ actions/__init__.py,sha256=G-r-dtUaDXDKiTJuqW8GafMpIpOjbCVlG1ETyC4_yqY,881
2
+ actions/dispatch_actions.py,sha256=ljlFR1o8m1qTHbStsJJVMVDdJv7iVqMfdPzKlZyKXl8,6743
3
+ actions/first_interaction.py,sha256=wcKzLEUJmYnHmtwn-sz3N5erwftMT9jn7XxSKATAmXY,9815
4
+ actions/review_pr.py,sha256=QqYmWE37sA4mJ6bPcY5M2dlNc1lRJPwT7XcJJFP1C7c,17466
5
+ actions/scan_prs.py,sha256=9Gu4EHmLjdShIlkoCQfIrcxLpMZeOOnpKEyv_mVc3rU,7407
6
+ actions/summarize_pr.py,sha256=0y4Cl4_ZMMtDWVhxwWasn3mHo_4GCnegJrf29yujUYM,5715
7
+ actions/summarize_release.py,sha256=8D5EOQ36mho1HKtWD2J-IDH_xJJb3q0shgXZSdemmDM,9078
8
+ actions/update_file_headers.py,sha256=E5fKYLdeW16-BHCcuqxohGpGZqgEh-WX4ZmCQJw2R90,6684
9
+ actions/update_markdown_code_blocks.py,sha256=w3DTRltg2Rmr4-qrNawv_S2vJbheKE0tne1iz79FzXg,8692
10
+ actions/utils/__init__.py,sha256=Uf7S5qYHS59zoAP9uKVIZwhpUbgyI947dD9jAWu50Lg,1115
11
+ actions/utils/common_utils.py,sha256=InBc-bsXcwzQYjuDxtrrm3bj7J-70U54G0s2nQKgCg8,12052
12
+ actions/utils/github_utils.py,sha256=5yzNIiu7-WBmH1-gSi4O31m1Fwd4k8pfbwM2BPVGf88,19989
13
+ actions/utils/openai_utils.py,sha256=07g5NsfAfSuJ6CqWWQxsZ0MR4_kh6-Rjmud_iGPm49U,11965
14
+ actions/utils/version_utils.py,sha256=EIbm3iZVNyNl3dh8aNz_9ITeTC93ZxfyUzIRkO3tSXw,3242
15
+ ultralytics_actions-0.2.1.dist-info/licenses/LICENSE,sha256=hIahDEOTzuHCU5J2nd07LWwkLW7Hko4UFO__ffsvB-8,34523
16
+ ultralytics_actions-0.2.1.dist-info/METADATA,sha256=1kN57DVDjQZMQGlhfF_3ugsfYGaXCsxFM-5guwrgFT4,12478
17
+ ultralytics_actions-0.2.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
18
+ ultralytics_actions-0.2.1.dist-info/entry_points.txt,sha256=n_VbDs3Xj33daaeN_2D72UTEuyeH8hVc6-CPH55ymkY,496
19
+ ultralytics_actions-0.2.1.dist-info/top_level.txt,sha256=5apM5x80QlJcGbACn1v3fkmIuL1-XQCKcItJre7w7Tw,8
20
+ ultralytics_actions-0.2.1.dist-info/RECORD,,
@@ -1,19 +0,0 @@
1
- actions/__init__.py,sha256=j5AWc7zYYZL-B2DuboeBC58Yx3gjYg4eHLe1Np7bQxQ,772
2
- actions/dispatch_actions.py,sha256=i81UeHrYudAsOUFUfN71u6X-1cmZaZaiiTj6p2rvz8A,4217
3
- actions/first_interaction.py,sha256=QxPsLjd-m2G-QYOcQb2hQfIB_alupzeZzSHTk-jw0bg,9856
4
- actions/review_pr.py,sha256=tZztKjHmoGU3XBXy4dsxCWTHQGQIUpjmOGE8sNtxYfg,17329
5
- actions/summarize_pr.py,sha256=3nFotiZX42dz-mzDQ9wcoUILJKkcaxrC5EeyxvuvY60,5775
6
- actions/summarize_release.py,sha256=iCXa9a1DcOrDVe8pMWEsYKgDxuIOhIgMsYymElOLK6o,9083
7
- actions/update_file_headers.py,sha256=E5fKYLdeW16-BHCcuqxohGpGZqgEh-WX4ZmCQJw2R90,6684
8
- actions/update_markdown_code_blocks.py,sha256=w3DTRltg2Rmr4-qrNawv_S2vJbheKE0tne1iz79FzXg,8692
9
- actions/utils/__init__.py,sha256=unjXYIFNFeHrdC8LooDFVWlj6fAdGhssUgASo5229zY,1073
10
- actions/utils/common_utils.py,sha256=2DRvcyCgmn507w3T4FJcQSZNI9KC1gVUb8CnJqPapD0,11943
11
- actions/utils/github_utils.py,sha256=OKbUOjqOdu7rTLWZdFsB2uMggEtcwrjW98ecBT8lFMg,19714
12
- actions/utils/openai_utils.py,sha256=WPRiLJYOMEsmmWcQ-IirnQp1N37EQhO9OvgQaK9JIV0,10706
13
- actions/utils/version_utils.py,sha256=EIbm3iZVNyNl3dh8aNz_9ITeTC93ZxfyUzIRkO3tSXw,3242
14
- ultralytics_actions-0.1.9.dist-info/licenses/LICENSE,sha256=hIahDEOTzuHCU5J2nd07LWwkLW7Hko4UFO__ffsvB-8,34523
15
- ultralytics_actions-0.1.9.dist-info/METADATA,sha256=E0FUdK1lP_igrfWdDrUYyx30-vS6BtOgrj0kyEsmemk,12368
16
- ultralytics_actions-0.1.9.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
17
- ultralytics_actions-0.1.9.dist-info/entry_points.txt,sha256=n_VbDs3Xj33daaeN_2D72UTEuyeH8hVc6-CPH55ymkY,496
18
- ultralytics_actions-0.1.9.dist-info/top_level.txt,sha256=5apM5x80QlJcGbACn1v3fkmIuL1-XQCKcItJre7w7Tw,8
19
- ultralytics_actions-0.1.9.dist-info/RECORD,,