ultralytics-actions 0.1.8__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics-actions might be problematic. Click here for more details.

actions/__init__.py CHANGED
@@ -23,4 +23,4 @@
23
23
  # ├── test_summarize_pr.py
24
24
  # └── ...
25
25
 
26
- __version__ = "0.1.8"
26
+ __version__ = "0.2.0"
@@ -2,6 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
+ import os
5
6
  import time
6
7
  from datetime import datetime
7
8
 
@@ -12,47 +13,99 @@ RUN_CI_KEYWORD = "@ultralytics/run-ci" # and then to merge "@ultralytics/run-ci
12
13
  WORKFLOW_FILES = ["ci.yml", "docker.yml"]
13
14
 
14
15
 
15
- def get_pr_branch(event) -> str:
16
- """Gets the PR branch name."""
16
+ def get_pr_branch(event) -> tuple[str, str | None]:
17
+ """Gets the PR branch name, creating temp branch for forks, returning (branch, temp_branch_to_delete)."""
18
+ import subprocess
19
+ import tempfile
20
+
17
21
  pr_number = event.event_data["issue"]["number"]
18
22
  pr_data = event.get_repo_data(f"pulls/{pr_number}")
19
- return pr_data.get("head", {}).get("ref", "main")
20
-
21
-
22
- def trigger_and_get_workflow_info(event, branch: str) -> list[dict]:
23
- """Triggers workflows and returns their information."""
23
+ head = pr_data.get("head", {})
24
+
25
+ # Check if PR is from a fork
26
+ is_fork = head.get("repo") and head["repo"]["id"] != pr_data["base"]["repo"]["id"]
27
+
28
+ if is_fork:
29
+ # Create temp branch in base repo by pushing fork code
30
+ temp_branch = f"temp-ci-{pr_number}-{int(time.time() * 1000)}"
31
+ fork_repo = head["repo"]["full_name"]
32
+ fork_branch = head["ref"]
33
+ base_repo = event.repository
34
+ token = os.environ.get("GITHUB_TOKEN")
35
+ if not token:
36
+ raise ValueError("GITHUB_TOKEN environment variable is not set")
37
+
38
+ with tempfile.TemporaryDirectory() as tmp_dir:
39
+ repo_dir = os.path.join(tmp_dir, "repo")
40
+ base_url = f"https://x-access-token:{token}@github.com/{base_repo}.git"
41
+ fork_url = f"https://github.com/{fork_repo}.git"
42
+
43
+ try:
44
+ # Clone base repo (minimal)
45
+ subprocess.run(["git", "clone", "--depth", "1", base_url, repo_dir], check=True, capture_output=True)
46
+
47
+ # Add fork as remote and fetch the PR branch
48
+ subprocess.run(
49
+ ["git", "remote", "add", "fork", fork_url], cwd=repo_dir, check=True, capture_output=True
50
+ )
51
+ subprocess.run(
52
+ ["git", "fetch", "fork", f"{fork_branch}:{temp_branch}"],
53
+ cwd=repo_dir,
54
+ check=True,
55
+ capture_output=True,
56
+ )
57
+
58
+ # Push temp branch to base repo
59
+ subprocess.run(["git", "push", "origin", temp_branch], cwd=repo_dir, check=True, capture_output=True)
60
+ except subprocess.CalledProcessError as e:
61
+ # Sanitize error output to prevent token leakage
62
+ stderr = e.stderr.decode() if e.stderr else "No stderr output"
63
+ stderr = stderr.replace(token, "***TOKEN***")
64
+ raise RuntimeError(f"Failed to create tmp branch from fork (exit code {e.returncode}): {stderr}") from e
65
+
66
+ return temp_branch, temp_branch
67
+
68
+ return head.get("ref", "main"), None
69
+
70
+
71
+ def trigger_and_get_workflow_info(event, branch: str, temp_branch: str | None = None) -> list[dict]:
72
+ """Triggers workflows and returns their information, deleting temp branch if provided."""
24
73
  repo = event.repository
25
74
  results = []
26
75
 
27
- # Trigger all workflows
28
- for file in WORKFLOW_FILES:
29
- event.post(f"{GITHUB_API_URL}/repos/{repo}/actions/workflows/{file}/dispatches", json={"ref": branch})
76
+ try:
77
+ # Trigger all workflows
78
+ for file in WORKFLOW_FILES:
79
+ event.post(f"{GITHUB_API_URL}/repos/{repo}/actions/workflows/{file}/dispatches", json={"ref": branch})
30
80
 
31
- # Wait for workflows to be created
32
- time.sleep(10)
81
+ # Wait for workflows to be created and start
82
+ time.sleep(60)
33
83
 
34
- # Collect information about all workflows
35
- for file in WORKFLOW_FILES:
36
- # Get workflow name
37
- response = event.get(f"{GITHUB_API_URL}/repos/{repo}/actions/workflows/{file}")
38
- name = file.replace(".yml", "").title()
39
- if response.status_code == 200:
40
- name = response.json().get("name", name)
84
+ # Collect information about all workflows
85
+ for file in WORKFLOW_FILES:
86
+ # Get workflow name
87
+ response = event.get(f"{GITHUB_API_URL}/repos/{repo}/actions/workflows/{file}")
88
+ name = file.replace(".yml", "").title()
89
+ if response.status_code == 200:
90
+ name = response.json().get("name", name)
41
91
 
42
- # Get run information
43
- run_url = f"https://github.com/{repo}/actions/workflows/{file}"
44
- run_number = None
92
+ # Get run information
93
+ run_url = f"https://github.com/{repo}/actions/workflows/{file}"
94
+ run_number = None
45
95
 
46
- runs_response = event.get(
47
- f"{GITHUB_API_URL}/repos/{repo}/actions/workflows/{file}/runs?branch={branch}&event=workflow_dispatch&per_page=1"
48
- )
96
+ runs_response = event.get(
97
+ f"{GITHUB_API_URL}/repos/{repo}/actions/workflows/{file}/runs?branch={branch}&event=workflow_dispatch&per_page=1"
98
+ )
49
99
 
50
- if runs_response.status_code == 200:
51
- if runs := runs_response.json().get("workflow_runs", []):
100
+ if runs_response.status_code == 200 and (runs := runs_response.json().get("workflow_runs", [])):
52
101
  run_url = runs[0].get("html_url", run_url)
53
102
  run_number = runs[0].get("run_number")
54
103
 
55
- results.append({"name": name, "file": file, "url": run_url, "run_number": run_number})
104
+ results.append({"name": name, "file": file, "url": run_url, "run_number": run_number})
105
+ finally:
106
+ # Always delete temp branch even if workflow collection fails
107
+ if temp_branch:
108
+ event.delete(f"{GITHUB_API_URL}/repos/{repo}/git/refs/heads/{temp_branch}")
56
109
 
57
110
  return results
58
111
 
@@ -63,12 +116,15 @@ def update_comment(event, comment_body: str, triggered_actions: list[dict], bran
63
116
  return
64
117
 
65
118
  timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S UTC")
66
- summary = (
67
- f"\n\n## ⚡ Actions Trigger\n\n"
68
- f"<sub>Made with ❤️ by [Ultralytics Actions](https://www.ultralytics.com/actions)<sub>\n\n"
69
- f"GitHub Actions below triggered via workflow dispatch on this "
70
- f"PR branch `{branch}` at {timestamp} with `{RUN_CI_KEYWORD}` command:\n\n"
71
- )
119
+ summary = f"""
120
+
121
+ ## Actions Trigger
122
+
123
+ <sub>Made with ❤️ by [Ultralytics Actions](https://www.ultralytics.com/actions)<sub>
124
+
125
+ GitHub Actions below triggered via workflow dispatch for this PR at {timestamp} with `{RUN_CI_KEYWORD}` command:
126
+
127
+ """
72
128
 
73
129
  for action in triggered_actions:
74
130
  run_info = f" run {action['run_number']}" if action["run_number"] else ""
@@ -104,10 +160,10 @@ def main(*args, **kwargs):
104
160
 
105
161
  # Get branch, trigger workflows, and update comment
106
162
  event.toggle_eyes_reaction(True)
107
- branch = get_pr_branch(event)
108
- print(f"Triggering workflows on branch: {branch}")
163
+ branch, temp_branch = get_pr_branch(event)
164
+ print(f"Triggering workflows on branch: {branch}" + (" (temp)" if temp_branch else ""))
109
165
 
110
- triggered_actions = trigger_and_get_workflow_info(event, branch)
166
+ triggered_actions = trigger_and_get_workflow_info(event, branch, temp_branch)
111
167
  update_comment(event, comment_body, triggered_actions, branch)
112
168
  event.toggle_eyes_reaction(False)
113
169
 
@@ -6,11 +6,9 @@ import os
6
6
  import time
7
7
 
8
8
  from . import review_pr
9
- from .utils import Action, filter_labels, get_completion, get_pr_open_response, remove_html_comments
9
+ from .summarize_pr import SUMMARY_MARKER
10
+ from .utils import ACTIONS_CREDIT, Action, filter_labels, get_completion, get_pr_open_response, remove_html_comments
10
11
 
11
- SUMMARY_START = (
12
- "## 🛠️ PR Summary\n\n<sub>Made with ❤️ by [Ultralytics Actions](https://github.com/ultralytics/actions)<sub>\n\n"
13
- )
14
12
  BLOCK_USER = os.getenv("BLOCK_USER", "false").lower() == "true"
15
13
  AUTO_PR_REVIEW = os.getenv("REVIEW", "true").lower() == "true"
16
14
 
@@ -196,7 +194,7 @@ def main(*args, **kwargs):
196
194
 
197
195
  if summary := response.get("summary"):
198
196
  print("Updating PR description with summary...")
199
- event.update_pr_description(number, SUMMARY_START + summary)
197
+ event.update_pr_description(number, f"{SUMMARY_MARKER}\n\n{ACTIONS_CREDIT}\n\n{summary}")
200
198
  else:
201
199
  summary = body
202
200
 
actions/review_pr.py CHANGED
@@ -5,9 +5,9 @@ from __future__ import annotations
5
5
  import json
6
6
  import re
7
7
 
8
- from .utils import GITHUB_API_URL, MAX_PROMPT_CHARS, Action, get_completion, remove_html_comments
8
+ from .utils import ACTIONS_CREDIT, GITHUB_API_URL, MAX_PROMPT_CHARS, Action, get_completion, remove_html_comments
9
9
 
10
- REVIEW_MARKER = "🔍 PR Review"
10
+ REVIEW_MARKER = "## 🔍 PR Review"
11
11
  ERROR_MARKER = "⚠️ Review generation encountered an error"
12
12
  EMOJI_MAP = {"CRITICAL": "❗", "HIGH": "⚠️", "MEDIUM": "💡", "LOW": "📝", "SUGGESTION": "💭"}
13
13
  SKIP_PATTERNS = [
@@ -30,9 +30,10 @@ SKIP_PATTERNS = [
30
30
  ]
31
31
 
32
32
 
33
- def parse_diff_files(diff_text: str) -> dict:
34
- """Parse diff to extract file paths, valid line numbers, and line content for comments (both sides)."""
33
+ def parse_diff_files(diff_text: str) -> tuple[dict, str]:
34
+ """Parse diff and return file mapping with line numbers AND augmented diff with explicit line numbers."""
35
35
  files, current_file, new_line, old_line = {}, None, 0, 0
36
+ augmented_lines = []
36
37
 
37
38
  for line in diff_text.split("\n"):
38
39
  if line.startswith("diff --git"):
@@ -41,23 +42,31 @@ def parse_diff_files(diff_text: str) -> dict:
41
42
  new_line, old_line = 0, 0
42
43
  if current_file:
43
44
  files[current_file] = {"RIGHT": {}, "LEFT": {}}
45
+ augmented_lines.append(line)
44
46
  elif line.startswith("@@") and current_file:
45
- # Extract both old and new line numbers
46
47
  match = re.search(r"@@ -(\d+)(?:,\d+)? \+(\d+)(?:,\d+)?", line)
47
48
  if match:
48
49
  old_line, new_line = int(match.group(1)), int(match.group(2))
50
+ augmented_lines.append(line)
49
51
  elif current_file and (new_line > 0 or old_line > 0):
50
52
  if line.startswith("+") and not line.startswith("+++"):
51
- files[current_file]["RIGHT"][new_line] = line[1:] # Added line (right/new side)
53
+ files[current_file]["RIGHT"][new_line] = line[1:]
54
+ augmented_lines.append(f"R{new_line:>5} {line}") # Prefix with RIGHT line number
52
55
  new_line += 1
53
56
  elif line.startswith("-") and not line.startswith("---"):
54
- files[current_file]["LEFT"][old_line] = line[1:] # Removed line (left/old side)
57
+ files[current_file]["LEFT"][old_line] = line[1:]
58
+ augmented_lines.append(f"L{old_line:>5} {line}") # Prefix with LEFT line number
55
59
  old_line += 1
56
- elif not line.startswith("\\"): # Context line (ignore "No newline" markers)
60
+ elif not line.startswith("\\"):
61
+ augmented_lines.append(f" {line}") # Context line, no number
57
62
  new_line += 1
58
63
  old_line += 1
64
+ else:
65
+ augmented_lines.append(line)
66
+ else:
67
+ augmented_lines.append(line)
59
68
 
60
- return files
69
+ return files, "\n".join(augmented_lines)
61
70
 
62
71
 
63
72
  def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_description: str) -> dict:
@@ -65,7 +74,7 @@ def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_descri
65
74
  if not diff_text:
66
75
  return {"comments": [], "summary": "No changes detected in diff"}
67
76
 
68
- diff_files = parse_diff_files(diff_text)
77
+ diff_files, augmented_diff = parse_diff_files(diff_text)
69
78
  if not diff_files:
70
79
  return {"comments": [], "summary": "No files with changes detected in diff"}
71
80
 
@@ -82,45 +91,45 @@ def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_descri
82
91
  return {"comments": [], "summary": f"All {skipped_count} changed files are generated/vendored (skipped review)"}
83
92
 
84
93
  file_list = list(diff_files.keys())
85
- diff_truncated = len(diff_text) > MAX_PROMPT_CHARS
94
+ diff_truncated = len(augmented_diff) > MAX_PROMPT_CHARS
86
95
  lines_changed = sum(len(sides["RIGHT"]) + len(sides["LEFT"]) for sides in diff_files.values())
87
96
 
88
97
  content = (
89
- "You are an expert code reviewer for Ultralytics. Provide detailed inline comments on specific code changes.\n\n"
90
- "Focus on: Bugs, security, performance, best practices, edge cases, error handling\n\n"
91
- "FORMATTING: Use backticks for code: `x=3`, `file.py`, `function()`\n\n"
98
+ "You are an expert code reviewer for Ultralytics. Review the code changes and provide inline comments where you identify issues or opportunities for improvement.\n\n"
99
+ "Focus on: bugs, security vulnerabilities, performance issues, best practices, edge cases, error handling, and code clarity.\n\n"
92
100
  "CRITICAL RULES:\n"
93
- "1. Quality over quantity - zero comments is fine for clean code, only flag truly important issues\n"
94
- "2. Combine issues that are directly related to the same problem\n"
95
- "3. Use 'start_line' and 'line' to highlight multi-line ranges when issues span multiple lines\n"
101
+ "1. Provide balanced, constructive feedback - flag bugs, improvements, and best practice issues\n"
102
+ "2. For issues spanning multiple adjacent lines, use 'start_line' to create ONE multi-line comment, never separate comments\n"
103
+ "3. Combine related issues into a single comment when they stem from the same root cause\n"
96
104
  "4. Prioritize: CRITICAL bugs/security > HIGH impact > code quality improvements\n"
97
105
  "5. Keep comments concise and friendly - avoid jargon\n"
98
- "6. Skip routine changes: imports, version updates, standard refactoring\n\n"
106
+ "6. Use backticks for code: `x=3`, `file.py`, `function()`\n"
107
+ "7. Skip routine changes: imports, version updates, standard refactoring\n\n"
99
108
  "SUMMARY:\n"
100
109
  "- Brief and actionable - what needs fixing, not where (locations shown in inline comments)\n\n"
101
110
  "SUGGESTIONS:\n"
102
- "- ONLY provide 'suggestion' field when you have high certainty the code is problematic AND sufficient context for a confident fix\n"
103
- "- If uncertain about the correct fix, omit 'suggestion' field and explain the concern in 'message' only\n"
104
- "- Suggestions must be ready-to-merge code with NO comments, placeholders, or explanations\n"
105
- "- Suggestions replace ONLY the single line at 'line' - for multi-line fixes, describe the change in 'message' instead\n"
106
- "- Do NOT provide 'start_line' when including a 'suggestion' - suggestions are always single-line only\n"
107
- "- Suggestion content must match the exact indentation of the original line\n"
108
- "- Avoid triple backticks (```) in suggestions as they break markdown formatting\n"
109
- "- It's better to flag an issue without a suggestion than provide a wrong or uncertain fix\n\n"
111
+ "- Provide 'suggestion' field with ready-to-merge code when you can confidently fix the issue\n"
112
+ "- Suggestions must be complete, working code with NO comments, placeholders, or explanations\n"
113
+ "- For single-line fixes: provide 'suggestion' without 'start_line' to replace the line at 'line'\n"
114
+ "- Do not provide multi-line fixes: suggestions should only be single line\n"
115
+ "- Match the exact indentation of the original code\n"
116
+ "- Avoid triple backticks (```) in suggestions as they break markdown formatting\n\n"
110
117
  "LINE NUMBERS:\n"
111
- "- You MUST extract line numbers directly from the @@ hunk headers in the diff below\n"
112
- "- RIGHT (added +): Find @@ lines, use numbers after +N (e.g., @@ -10,5 +20,7 @@ means RIGHT starts at line 20)\n"
113
- "- LEFT (removed -): Find @@ lines, use numbers after -N (e.g., @@ -10,5 +20,7 @@ means LEFT starts at line 10)\n"
114
- "- Count forward from hunk start: + lines increment RIGHT, - lines increment LEFT, context lines increment both\n"
115
- "- CRITICAL: Using line numbers not in the diff will cause your comment to be rejected\n"
116
- "- Suggestions only work on RIGHT (added) lines, never on LEFT (removed) lines\n\n"
118
+ "- Each line in the diff is prefixed with its line number for clarity:\n"
119
+ " R 123 +added code <- RIGHT side (new file), line 123\n"
120
+ " L 45 -removed code <- LEFT side (old file), line 45\n"
121
+ " context line <- context (no number needed)\n"
122
+ "- Extract the number after R or L prefix to get the exact line number\n"
123
+ "- Use 'side': 'RIGHT' for R-prefixed lines, 'side': 'LEFT' for L-prefixed lines\n"
124
+ "- Suggestions only work on RIGHT lines, never on LEFT lines\n"
125
+ "- CRITICAL: Only use line numbers that you see explicitly prefixed in the diff\n\n"
117
126
  "Return JSON: "
118
127
  '{"comments": [{"file": "exact/path", "line": N, "side": "RIGHT", "severity": "HIGH", "message": "..."}], "summary": "..."}\n\n'
119
128
  "Rules:\n"
120
- "- Verify line numbers from @@ hunks: +N for RIGHT (added), -N for LEFT (removed)\n"
121
- "- Exact paths (no ./), 'side' field defaults to RIGHT if omitted\n"
129
+ "- Extract line numbers from R#### or L#### prefixes in the diff\n"
130
+ "- Exact paths (no ./), 'side' field must match R (RIGHT) or L (LEFT) prefix\n"
122
131
  "- Severity: CRITICAL, HIGH, MEDIUM, LOW, SUGGESTION\n"
123
- f"- Files changed: {len(file_list)} ({', '.join(file_list[:10])}{'...' if len(file_list) > 10 else ''})\n"
132
+ f"- Files changed: {len(file_list)} ({', '.join(file_list[:30])}{'...' if len(file_list) > 30 else ''})\n"
124
133
  f"- Lines changed: {lines_changed}\n"
125
134
  )
126
135
 
@@ -129,17 +138,21 @@ def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_descri
129
138
  {
130
139
  "role": "user",
131
140
  "content": (
132
- f"Review this PR in https://github.com/{repository}:\n"
133
- f"Title: {pr_title}\n"
134
- f"Description: {remove_html_comments(pr_description or '')[:1000]}\n\n"
135
- f"Diff:\n{diff_text[:MAX_PROMPT_CHARS]}\n\n"
141
+ f"Review this PR in https://github.com/{repository}:\n\n"
142
+ f"TITLE:\n{pr_title}\n\n"
143
+ f"BODY:\n{remove_html_comments(pr_description or '')[:1000]}\n\n"
144
+ f"DIFF:\n{augmented_diff[:MAX_PROMPT_CHARS]}\n\n"
136
145
  "Now review this diff according to the rules above. Return JSON with comments array and summary."
137
146
  ),
138
147
  },
139
148
  ]
140
149
 
150
+ # Debug output
151
+ # print(f"\nSystem prompt (first 3000 chars):\n{messages[0]['content'][:3000]}...\n")
152
+ # print(f"\nUser prompt (first 3000 chars):\n{messages[1]['content'][:3000]}...\n")
153
+
141
154
  try:
142
- response = get_completion(messages, reasoning_effort="medium", model="gpt-5-codex")
155
+ response = get_completion(messages, reasoning_effort="low", model="gpt-5-codex")
143
156
 
144
157
  json_str = re.search(r"```(?:json)?\s*(\{.*?\})\s*```", response, re.DOTALL)
145
158
  review_data = json.loads(json_str.group(1) if json_str else response)
@@ -161,20 +174,14 @@ def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_descri
161
174
  print(f"Filtered out {file_path}:{line_num} (file not in diff)")
162
175
  continue
163
176
  if line_num not in diff_files[file_path].get(side, {}):
164
- # Try other side if not found
165
- other_side = "LEFT" if side == "RIGHT" else "RIGHT"
166
- if line_num in diff_files[file_path].get(other_side, {}):
167
- print(f"Switching {file_path}:{line_num} from {side} to {other_side}")
168
- c["side"] = other_side
169
- side = other_side
170
- # GitHub rejects suggestions on removed lines
171
- if side == "LEFT" and c.get("suggestion"):
172
- print(f"Dropping suggestion for {file_path}:{line_num} - LEFT side doesn't support suggestions")
173
- c.pop("suggestion", None)
174
- else:
175
- available = {s: list(diff_files[file_path][s].keys())[:10] for s in ["RIGHT", "LEFT"]}
176
- print(f"Filtered out {file_path}:{line_num} (available: {available})")
177
- continue
177
+ available = {s: list(diff_files[file_path][s].keys())[:10] for s in ["RIGHT", "LEFT"]}
178
+ print(f"Filtered out {file_path}:{line_num} (side={side}, available: {available})")
179
+ continue
180
+
181
+ # GitHub rejects suggestions on removed lines
182
+ if side == "LEFT" and c.get("suggestion"):
183
+ print(f"Dropping suggestion for {file_path}:{line_num} - LEFT side doesn't support suggestions")
184
+ c.pop("suggestion", None)
178
185
 
179
186
  # Validate start_line if provided - drop start_line for suggestions (single-line only)
180
187
  if start_line:
@@ -262,9 +269,9 @@ def post_review_summary(event: Action, review_data: dict, review_number: int) ->
262
269
  event_type = "COMMENT" if (has_error or has_inline_comments or has_issues) else "APPROVE"
263
270
 
264
271
  body = (
265
- f"## {review_title}\n\n"
266
- "<sub>Made with ❤️ by [Ultralytics Actions](https://github.com/ultralytics/actions)</sub>\n\n"
267
- f"{review_data.get('summary', 'Review completed')[:1000]}\n\n" # Clip summary length
272
+ f"{review_title}\n\n"
273
+ f"{ACTIONS_CREDIT}\n\n"
274
+ f"{review_data.get('summary', 'Review completed')[:3000]}\n\n" # Clip summary length
268
275
  )
269
276
 
270
277
  if comments:
@@ -284,13 +291,11 @@ def post_review_summary(event: Action, review_data: dict, review_number: int) ->
284
291
  continue
285
292
 
286
293
  severity = comment.get("severity") or "SUGGESTION"
287
- comment_body = f"{EMOJI_MAP.get(severity, '💭')} **{severity}**: {(comment.get('message') or '')[:1000]}"
288
-
289
- # Get side (LEFT for removed lines, RIGHT for added lines)
290
294
  side = comment.get("side", "RIGHT")
295
+ comment_body = f"{EMOJI_MAP.get(severity, '💭')} **{severity}**: {(comment.get('message') or '')[:3000]}"
291
296
 
292
297
  if suggestion := comment.get("suggestion"):
293
- suggestion = suggestion[:1000] # Clip suggestion length
298
+ suggestion = suggestion[:3000] # Clip suggestion length
294
299
  if "```" not in suggestion:
295
300
  # Extract original line indentation and apply to suggestion
296
301
  if original_line := review_data.get("diff_files", {}).get(file_path, {}).get(side, {}).get(line):
@@ -300,19 +305,16 @@ def post_review_summary(event: Action, review_data: dict, review_number: int) ->
300
305
 
301
306
  # Build comment with optional start_line for multi-line context
302
307
  review_comment = {"path": file_path, "line": line, "body": comment_body, "side": side}
303
- if start_line := comment.get("start_line"):
304
- if start_line < line:
305
- review_comment["start_line"] = start_line
306
- review_comment["start_side"] = side
307
- print(f"Multi-line comment: {file_path}:{start_line}-{line} ({side})")
308
+ if (start_line := comment.get("start_line")) and start_line < line:
309
+ review_comment["start_line"] = start_line
310
+ review_comment["start_side"] = side
308
311
 
309
312
  review_comments.append(review_comment)
310
313
 
311
314
  # Submit review with inline comments
312
- payload = {"commit_id": commit_sha, "body": body, "event": event_type}
315
+ payload = {"commit_id": commit_sha, "body": body.strip(), "event": event_type}
313
316
  if review_comments:
314
317
  payload["comments"] = review_comments
315
- print(f"Posting review with {len(review_comments)} inline comments")
316
318
 
317
319
  event.post(
318
320
  f"{GITHUB_API_URL}/repos/{event.repository}/pulls/{pr_number}/reviews",
actions/summarize_pr.py CHANGED
@@ -2,11 +2,9 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from .utils import GITHUB_API_URL, Action, get_completion, get_pr_summary_prompt
5
+ from .utils import ACTIONS_CREDIT, GITHUB_API_URL, Action, get_completion, get_pr_summary_prompt
6
6
 
7
- SUMMARY_START = (
8
- "## 🛠️ PR Summary\n\n<sub>Made with ❤️ by [Ultralytics Actions](https://github.com/ultralytics/actions)<sub>\n\n"
9
- )
7
+ SUMMARY_MARKER = "## 🛠️ PR Summary"
10
8
 
11
9
 
12
10
  def generate_merge_message(pr_summary, pr_credit, pr_url):
@@ -73,7 +71,8 @@ def generate_pr_summary(repository, diff_text):
73
71
  reply = get_completion(messages, temperature=1.0)
74
72
  if is_large:
75
73
  reply = "**WARNING ⚠️** this PR is very large, summary may not cover all changes.\n\n" + reply
76
- return SUMMARY_START + reply
74
+
75
+ return f"{SUMMARY_MARKER}\n\n{ACTIONS_CREDIT}\n\n{reply}"
77
76
 
78
77
 
79
78
  def label_fixed_issues(event, pr_summary):
@@ -196,7 +196,7 @@ def main(*args, **kwargs):
196
196
  try:
197
197
  summary = generate_release_summary(event, diff, prs, CURRENT_TAG, previous_tag)
198
198
  except Exception as e:
199
- print(f"Failed to generate summary: {str(e)}")
199
+ print(f"Failed to generate summary: {e}")
200
200
  summary = "Failed to generate summary."
201
201
 
202
202
  # Get the latest commit message
actions/utils/__init__.py CHANGED
@@ -1,6 +1,7 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
3
  from .common_utils import (
4
+ ACTIONS_CREDIT,
4
5
  REDIRECT_END_IGNORE_LIST,
5
6
  REDIRECT_START_IGNORE_LIST,
6
7
  REQUESTS_HEADERS,
@@ -20,13 +21,14 @@ from .openai_utils import (
20
21
  from .version_utils import check_pubdev_version, check_pypi_version
21
22
 
22
23
  __all__ = (
24
+ "ACTIONS_CREDIT",
23
25
  "GITHUB_API_URL",
24
26
  "GITHUB_GRAPHQL_URL",
25
27
  "MAX_PROMPT_CHARS",
28
+ "REDIRECT_END_IGNORE_LIST",
29
+ "REDIRECT_START_IGNORE_LIST",
26
30
  "REQUESTS_HEADERS",
27
31
  "URL_IGNORE_LIST",
28
- "REDIRECT_START_IGNORE_LIST",
29
- "REDIRECT_END_IGNORE_LIST",
30
32
  "Action",
31
33
  "allow_redirect",
32
34
  "check_pubdev_version",
@@ -23,6 +23,7 @@ REQUESTS_HEADERS = {
23
23
  "Sec-Fetch-User": "?1",
24
24
  "Sec-Fetch-Dest": "document",
25
25
  }
26
+ ACTIONS_CREDIT = "<sub>Made with ❤️ by [Ultralytics Actions](https://www.ultralytics.com/actions)</sub>"
26
27
  BAD_HTTP_CODES = frozenset(
27
28
  {
28
29
  204, # No content
@@ -101,7 +101,13 @@ mutation($labelableId: ID!, $labelIds: [ID!]!) {
101
101
  class Action:
102
102
  """Handles GitHub Actions API interactions and event processing."""
103
103
 
104
- def __init__(self, token: str = None, event_name: str = None, event_data: dict = None, verbose: bool = True):
104
+ def __init__(
105
+ self,
106
+ token: str | None = None,
107
+ event_name: str | None = None,
108
+ event_data: dict | None = None,
109
+ verbose: bool = True,
110
+ ):
105
111
  """Initializes a GitHub Actions API handler with token and event data for processing events."""
106
112
  self.token = token or os.getenv("GITHUB_TOKEN")
107
113
  self.event_name = event_name or os.getenv("GITHUB_EVENT_NAME")
@@ -116,8 +122,8 @@ class Action:
116
122
  self._pr_summary_cache = None
117
123
  self._username_cache = None
118
124
  self._default_status = {
119
- "get": [200],
120
- "post": [200, 201],
125
+ "get": [200, 204],
126
+ "post": [200, 201, 204],
121
127
  "put": [200, 201, 204],
122
128
  "patch": [200],
123
129
  "delete": [200, 204],
@@ -131,12 +137,15 @@ class Action:
131
137
 
132
138
  if self.verbose:
133
139
  elapsed = r.elapsed.total_seconds()
134
- print(f"{'✓' if success else '✗'} {method.upper()} {url} → {r.status_code} ({elapsed:.1f}s)")
140
+ print(f"{'✓' if success else '✗'} {method.upper()} {url} → {r.status_code} ({elapsed:.1f}s)", flush=True)
135
141
  if not success:
136
142
  try:
137
- print(f" ❌ Error: {r.json().get('message', 'Unknown error')}")
143
+ error_data = r.json()
144
+ print(f" ❌ Error: {error_data.get('message', 'Unknown error')}")
145
+ if errors := error_data.get("errors"):
146
+ print(f" Details: {errors}")
138
147
  except Exception:
139
- print(f" ❌ Error: {r.text[:200]}")
148
+ print(f" ❌ Error: {r.text[:1000]}")
140
149
 
141
150
  if not success and hard:
142
151
  r.raise_for_status()
@@ -257,7 +266,7 @@ class Action:
257
266
  self.delete(f"{url}/{self.eyes_reaction_id}")
258
267
  self.eyes_reaction_id = None
259
268
 
260
- def graphql_request(self, query: str, variables: dict = None) -> dict:
269
+ def graphql_request(self, query: str, variables: dict | None = None) -> dict:
261
270
  """Executes a GraphQL query against the GitHub API."""
262
271
  result = self.post(GITHUB_GRAPHQL_URL, json={"query": query, "variables": variables}).json()
263
272
  if "data" not in result or result.get("errors"):
@@ -331,7 +340,9 @@ class Action:
331
340
  else:
332
341
  self.post(f"{GITHUB_API_URL}/repos/{self.repository}/issues/{number}/comments", json={"body": comment})
333
342
 
334
- def update_content(self, number: int, node_id: str, issue_type: str, title: str = None, body: str = None):
343
+ def update_content(
344
+ self, number: int, node_id: str, issue_type: str, title: str | None = None, body: str | None = None
345
+ ):
335
346
  """Updates the title and/or body of an issue, pull request, or discussion."""
336
347
  if issue_type == "discussion":
337
348
  variables = {"discussionId": node_id}
@@ -373,7 +384,7 @@ class Action:
373
384
  def handle_alert(self, number: int, node_id: str, issue_type: str, username: str, block: bool = False):
374
385
  """Handles content flagged as alert: updates content, locks, optionally closes and blocks user."""
375
386
  new_title = "Content Under Review"
376
- new_body = """This post has been flagged for review by [Ultralytics Actions](https://ultralytics.com/actions) due to possible spam, abuse, or off-topic content. For more information please see our:
387
+ new_body = """This post has been flagged for review by [Ultralytics Actions](https://www.ultralytics.com/actions) due to possible spam, abuse, or off-topic content. For more information please see our:
377
388
 
378
389
  - [Code of Conduct](https://docs.ultralytics.com/help/code-of-conduct/)
379
390
  - [Security Policy](https://docs.ultralytics.com/help/security/)
@@ -2,6 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
+ import json
5
6
  import os
6
7
  import time
7
8
 
@@ -12,6 +13,12 @@ from actions.utils.common_utils import check_links_in_string
12
13
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
13
14
  OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-5-2025-08-07")
14
15
  MAX_PROMPT_CHARS = round(128000 * 3.3 * 0.5) # Max characters for prompt (50% of 128k context)
16
+ MODEL_COSTS = {
17
+ "gpt-5-codex": (1.25, 10.00),
18
+ "gpt-5-2025-08-07": (1.25, 10.00),
19
+ "gpt-5-nano-2025-08-07": (0.05, 0.40),
20
+ "gpt-5-mini-2025-08-07": (0.25, 2.00),
21
+ }
15
22
  SYSTEM_PROMPT_ADDITION = """Guidance:
16
23
  - Ultralytics Branding: Use YOLO11, YOLO26, etc., not YOLOv11, YOLOv26 (only older versions like YOLOv10 have a v). Always capitalize "HUB" in "Ultralytics HUB"; use "Ultralytics HUB", not "The Ultralytics HUB".
17
24
  - Avoid Equations: Do not include equations or mathematical notations.
@@ -34,7 +41,7 @@ def remove_outer_codeblocks(string):
34
41
  return string
35
42
 
36
43
 
37
- def filter_labels(available_labels: dict, current_labels: list = None, is_pr: bool = False) -> dict:
44
+ def filter_labels(available_labels: dict, current_labels: list | None = None, is_pr: bool = False) -> dict:
38
45
  """Filters labels by removing manually-assigned and mutually exclusive labels."""
39
46
  current_labels = current_labels or []
40
47
  filtered = available_labels.copy()
@@ -107,8 +114,8 @@ def get_completion(
107
114
  check_links: bool = True,
108
115
  remove: list[str] = (" @giscus[bot]",),
109
116
  temperature: float = 1.0,
110
- reasoning_effort: str = None,
111
- response_format: dict = None,
117
+ reasoning_effort: str | None = None,
118
+ response_format: dict | None = None,
112
119
  model: str = OPENAI_MODEL,
113
120
  ) -> str | dict:
114
121
  """Generates a completion using OpenAI's Responses API with retry logic."""
@@ -124,23 +131,46 @@ def get_completion(
124
131
  data["reasoning"] = {"effort": reasoning_effort or "low"}
125
132
 
126
133
  try:
127
- r = requests.post(url, json=data, headers=headers, timeout=600)
134
+ r = requests.post(url, json=data, headers=headers, timeout=(30, 900))
135
+ elapsed = r.elapsed.total_seconds()
128
136
  success = r.status_code == 200
129
- print(f"{'✓' if success else '✗'} POST {url} → {r.status_code} ({r.elapsed.total_seconds():.1f}s)")
137
+ print(f"{'✓' if success else '✗'} POST {url} → {r.status_code} ({elapsed:.1f}s)")
138
+
139
+ # Retry server errors
140
+ if attempt < 2 and r.status_code >= 500:
141
+ print(f"Retrying {r.status_code} in {2**attempt}s (attempt {attempt + 1}/3)...")
142
+ time.sleep(2**attempt)
143
+ continue
144
+
130
145
  r.raise_for_status()
131
146
 
132
147
  # Parse response
148
+ response_json = r.json()
133
149
  content = ""
134
- for item in r.json().get("output", []):
150
+ for item in response_json.get("output", []):
135
151
  if item.get("type") == "message":
136
152
  for c in item.get("content", []):
137
153
  if c.get("type") == "output_text":
138
154
  content += c.get("text") or ""
139
155
  content = content.strip()
140
156
 
141
- if response_format and response_format.get("type") == "json_object":
142
- import json
157
+ # Extract and print token usage
158
+ if usage := response_json.get("usage"):
159
+ input_tokens = usage.get("input_tokens", 0)
160
+ output_tokens = usage.get("output_tokens", 0)
161
+ thinking_tokens = (usage.get("output_tokens_details") or {}).get("reasoning_tokens", 0)
143
162
 
163
+ # Calculate cost
164
+ costs = MODEL_COSTS.get(model, (0.0, 0.0))
165
+ cost = (input_tokens * costs[0] + output_tokens * costs[1]) / 1e6
166
+
167
+ # Format summary
168
+ token_str = f"{input_tokens}→{output_tokens - thinking_tokens}"
169
+ if thinking_tokens:
170
+ token_str += f" (+{thinking_tokens} thinking)"
171
+ print(f"{model} ({token_str} = {input_tokens + output_tokens} tokens, ${cost:.5f}, {elapsed:.1f}s)")
172
+
173
+ if response_format and response_format.get("type") == "json_object":
144
174
  return json.loads(content)
145
175
 
146
176
  content = remove_outer_codeblocks(content)
@@ -154,17 +184,13 @@ def get_completion(
154
184
 
155
185
  return content
156
186
 
157
- except (requests.exceptions.ConnectionError, requests.exceptions.Timeout):
187
+ except (requests.exceptions.ConnectionError, requests.exceptions.Timeout, json.JSONDecodeError) as e:
158
188
  if attempt < 2:
159
- print(f"Connection error, retrying in {2**attempt}s")
189
+ print(f"Retrying {e.__class__.__name__} in {2**attempt}s (attempt {attempt + 1}/3)...")
160
190
  time.sleep(2**attempt)
161
191
  continue
162
192
  raise
163
- except requests.exceptions.HTTPError as e:
164
- if attempt < 2 and e.response and e.response.status_code >= 500:
165
- print(f"Server error {e.response.status_code}, retrying in {2**attempt}s")
166
- time.sleep(2**attempt)
167
- continue
193
+ except requests.exceptions.HTTPError: # 4xx errors
168
194
  raise
169
195
 
170
196
  return content
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ultralytics-actions
3
- Version: 0.1.8
3
+ Version: 0.2.0
4
4
  Summary: Ultralytics Actions for GitHub automation and PR management.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -0,0 +1,19 @@
1
+ actions/__init__.py,sha256=Ms2e2jGfy0slxc1dw9maEddDHLCkempqURchc3G3TRw,772
2
+ actions/dispatch_actions.py,sha256=ljlFR1o8m1qTHbStsJJVMVDdJv7iVqMfdPzKlZyKXl8,6743
3
+ actions/first_interaction.py,sha256=c8I6trXvsgii3B3k_HFWOYmLqbyz_oimHD2BeTGPoUM,9795
4
+ actions/review_pr.py,sha256=JSAHyqoVty6Ob8_08zfllCynI6cY_3rcZT6ZLz5OHFU,17041
5
+ actions/summarize_pr.py,sha256=0y4Cl4_ZMMtDWVhxwWasn3mHo_4GCnegJrf29yujUYM,5715
6
+ actions/summarize_release.py,sha256=8D5EOQ36mho1HKtWD2J-IDH_xJJb3q0shgXZSdemmDM,9078
7
+ actions/update_file_headers.py,sha256=E5fKYLdeW16-BHCcuqxohGpGZqgEh-WX4ZmCQJw2R90,6684
8
+ actions/update_markdown_code_blocks.py,sha256=w3DTRltg2Rmr4-qrNawv_S2vJbheKE0tne1iz79FzXg,8692
9
+ actions/utils/__init__.py,sha256=Uf7S5qYHS59zoAP9uKVIZwhpUbgyI947dD9jAWu50Lg,1115
10
+ actions/utils/common_utils.py,sha256=InBc-bsXcwzQYjuDxtrrm3bj7J-70U54G0s2nQKgCg8,12052
11
+ actions/utils/github_utils.py,sha256=5yzNIiu7-WBmH1-gSi4O31m1Fwd4k8pfbwM2BPVGf88,19989
12
+ actions/utils/openai_utils.py,sha256=gblrRoWND0AMlN9AYmMxIPAnppK9ZWrsYR5547_5zUU,11839
13
+ actions/utils/version_utils.py,sha256=EIbm3iZVNyNl3dh8aNz_9ITeTC93ZxfyUzIRkO3tSXw,3242
14
+ ultralytics_actions-0.2.0.dist-info/licenses/LICENSE,sha256=hIahDEOTzuHCU5J2nd07LWwkLW7Hko4UFO__ffsvB-8,34523
15
+ ultralytics_actions-0.2.0.dist-info/METADATA,sha256=CbtP6vJwbnRoElLzOPgYDRYMSZsEEZGUjHyZvkWWlb4,12368
16
+ ultralytics_actions-0.2.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
17
+ ultralytics_actions-0.2.0.dist-info/entry_points.txt,sha256=n_VbDs3Xj33daaeN_2D72UTEuyeH8hVc6-CPH55ymkY,496
18
+ ultralytics_actions-0.2.0.dist-info/top_level.txt,sha256=5apM5x80QlJcGbACn1v3fkmIuL1-XQCKcItJre7w7Tw,8
19
+ ultralytics_actions-0.2.0.dist-info/RECORD,,
@@ -1,19 +0,0 @@
1
- actions/__init__.py,sha256=iaUhZH1t2gpoOoqwPRBkP7bO6z4Xd3R_JPDt8VxcO2U,772
2
- actions/dispatch_actions.py,sha256=i81UeHrYudAsOUFUfN71u6X-1cmZaZaiiTj6p2rvz8A,4217
3
- actions/first_interaction.py,sha256=QxPsLjd-m2G-QYOcQb2hQfIB_alupzeZzSHTk-jw0bg,9856
4
- actions/review_pr.py,sha256=6svsUPJTH4FSVfq4yQ4Y_PuwcEy522hmYSTOtDZbBKc,17371
5
- actions/summarize_pr.py,sha256=3nFotiZX42dz-mzDQ9wcoUILJKkcaxrC5EeyxvuvY60,5775
6
- actions/summarize_release.py,sha256=iCXa9a1DcOrDVe8pMWEsYKgDxuIOhIgMsYymElOLK6o,9083
7
- actions/update_file_headers.py,sha256=E5fKYLdeW16-BHCcuqxohGpGZqgEh-WX4ZmCQJw2R90,6684
8
- actions/update_markdown_code_blocks.py,sha256=w3DTRltg2Rmr4-qrNawv_S2vJbheKE0tne1iz79FzXg,8692
9
- actions/utils/__init__.py,sha256=unjXYIFNFeHrdC8LooDFVWlj6fAdGhssUgASo5229zY,1073
10
- actions/utils/common_utils.py,sha256=2DRvcyCgmn507w3T4FJcQSZNI9KC1gVUb8CnJqPapD0,11943
11
- actions/utils/github_utils.py,sha256=cBgEDJBpImTJbGBoZTteVSmCqXPuzEb51np7gRhqPeM,19702
12
- actions/utils/openai_utils.py,sha256=xI_DZpsEBzXyqQDozMLEtmjwuNlOpNL9n2b-gA6xL5Y,10658
13
- actions/utils/version_utils.py,sha256=EIbm3iZVNyNl3dh8aNz_9ITeTC93ZxfyUzIRkO3tSXw,3242
14
- ultralytics_actions-0.1.8.dist-info/licenses/LICENSE,sha256=hIahDEOTzuHCU5J2nd07LWwkLW7Hko4UFO__ffsvB-8,34523
15
- ultralytics_actions-0.1.8.dist-info/METADATA,sha256=dSciUEZ62eiZNHZS60RdWLla4u7Qt_81RSBqDR7xSNs,12368
16
- ultralytics_actions-0.1.8.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
17
- ultralytics_actions-0.1.8.dist-info/entry_points.txt,sha256=n_VbDs3Xj33daaeN_2D72UTEuyeH8hVc6-CPH55ymkY,496
18
- ultralytics_actions-0.1.8.dist-info/top_level.txt,sha256=5apM5x80QlJcGbACn1v3fkmIuL1-XQCKcItJre7w7Tw,8
19
- ultralytics_actions-0.1.8.dist-info/RECORD,,