ultralytics-actions 0.2.0__py3-none-any.whl → 0.2.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ultralytics-actions might be problematic. Click here for more details.
- actions/__init__.py +5 -1
- actions/first_interaction.py +5 -5
- actions/format_python_docstrings.py +511 -0
- actions/review_pr.py +80 -12
- actions/scan_prs.py +233 -0
- actions/summarize_pr.py +4 -4
- actions/summarize_release.py +2 -2
- actions/update_markdown_code_blocks.py +5 -11
- actions/utils/__init__.py +2 -2
- actions/utils/openai_utils.py +36 -14
- {ultralytics_actions-0.2.0.dist-info → ultralytics_actions-0.2.2.dist-info}/METADATA +118 -70
- ultralytics_actions-0.2.2.dist-info/RECORD +21 -0
- {ultralytics_actions-0.2.0.dist-info → ultralytics_actions-0.2.2.dist-info}/entry_points.txt +1 -0
- ultralytics_actions-0.2.0.dist-info/RECORD +0 -19
- {ultralytics_actions-0.2.0.dist-info → ultralytics_actions-0.2.2.dist-info}/WHEEL +0 -0
- {ultralytics_actions-0.2.0.dist-info → ultralytics_actions-0.2.2.dist-info}/licenses/LICENSE +0 -0
- {ultralytics_actions-0.2.0.dist-info → ultralytics_actions-0.2.2.dist-info}/top_level.txt +0 -0
actions/review_pr.py
CHANGED
|
@@ -4,8 +4,9 @@ from __future__ import annotations
|
|
|
4
4
|
|
|
5
5
|
import json
|
|
6
6
|
import re
|
|
7
|
+
from pathlib import Path
|
|
7
8
|
|
|
8
|
-
from .utils import ACTIONS_CREDIT, GITHUB_API_URL, MAX_PROMPT_CHARS, Action,
|
|
9
|
+
from .utils import ACTIONS_CREDIT, GITHUB_API_URL, MAX_PROMPT_CHARS, Action, get_response, remove_html_comments
|
|
9
10
|
|
|
10
11
|
REVIEW_MARKER = "## 🔍 PR Review"
|
|
11
12
|
ERROR_MARKER = "⚠️ Review generation encountered an error"
|
|
@@ -69,7 +70,9 @@ def parse_diff_files(diff_text: str) -> tuple[dict, str]:
|
|
|
69
70
|
return files, "\n".join(augmented_lines)
|
|
70
71
|
|
|
71
72
|
|
|
72
|
-
def generate_pr_review(
|
|
73
|
+
def generate_pr_review(
|
|
74
|
+
repository: str, diff_text: str, pr_title: str, pr_description: str, event: Action = None
|
|
75
|
+
) -> dict:
|
|
73
76
|
"""Generate comprehensive PR review with line-specific comments and overall assessment."""
|
|
74
77
|
if not diff_text:
|
|
75
78
|
return {"comments": [], "summary": "No changes detected in diff"}
|
|
@@ -94,6 +97,28 @@ def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_descri
|
|
|
94
97
|
diff_truncated = len(augmented_diff) > MAX_PROMPT_CHARS
|
|
95
98
|
lines_changed = sum(len(sides["RIGHT"]) + len(sides["LEFT"]) for sides in diff_files.values())
|
|
96
99
|
|
|
100
|
+
# Fetch full file contents for better context if within token budget
|
|
101
|
+
full_files_section = ""
|
|
102
|
+
if event and len(file_list) <= 10: # Reasonable file count limit
|
|
103
|
+
file_contents = []
|
|
104
|
+
total_chars = len(augmented_diff)
|
|
105
|
+
for file_path in file_list:
|
|
106
|
+
try:
|
|
107
|
+
local_path = Path(file_path)
|
|
108
|
+
if not local_path.exists():
|
|
109
|
+
continue
|
|
110
|
+
content = local_path.read_text(encoding="utf-8")
|
|
111
|
+
# Only include if within budget
|
|
112
|
+
if total_chars + len(content) + 1000 < MAX_PROMPT_CHARS: # 1000 char buffer for formatting
|
|
113
|
+
file_contents.append(f"### {file_path}\n```\n{content}\n```")
|
|
114
|
+
total_chars += len(content) + 1000
|
|
115
|
+
else:
|
|
116
|
+
break # Stop when we hit budget limit
|
|
117
|
+
except Exception:
|
|
118
|
+
continue
|
|
119
|
+
if file_contents:
|
|
120
|
+
full_files_section = f"FULL FILE CONTENTS:\n{chr(10).join(file_contents)}\n\n"
|
|
121
|
+
|
|
97
122
|
content = (
|
|
98
123
|
"You are an expert code reviewer for Ultralytics. Review the code changes and provide inline comments where you identify issues or opportunities for improvement.\n\n"
|
|
99
124
|
"Focus on: bugs, security vulnerabilities, performance issues, best practices, edge cases, error handling, and code clarity.\n\n"
|
|
@@ -113,6 +138,7 @@ def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_descri
|
|
|
113
138
|
"- For single-line fixes: provide 'suggestion' without 'start_line' to replace the line at 'line'\n"
|
|
114
139
|
"- Do not provide multi-line fixes: suggestions should only be single line\n"
|
|
115
140
|
"- Match the exact indentation of the original code\n"
|
|
141
|
+
"- Web search is available to consult docs, dependencies, or technical details\n"
|
|
116
142
|
"- Avoid triple backticks (```) in suggestions as they break markdown formatting\n\n"
|
|
117
143
|
"LINE NUMBERS:\n"
|
|
118
144
|
"- Each line in the diff is prefixed with its line number for clarity:\n"
|
|
@@ -141,6 +167,7 @@ def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_descri
|
|
|
141
167
|
f"Review this PR in https://github.com/{repository}:\n\n"
|
|
142
168
|
f"TITLE:\n{pr_title}\n\n"
|
|
143
169
|
f"BODY:\n{remove_html_comments(pr_description or '')[:1000]}\n\n"
|
|
170
|
+
f"{full_files_section}"
|
|
144
171
|
f"DIFF:\n{augmented_diff[:MAX_PROMPT_CHARS]}\n\n"
|
|
145
172
|
"Now review this diff according to the rules above. Return JSON with comments array and summary."
|
|
146
173
|
),
|
|
@@ -152,19 +179,60 @@ def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_descri
|
|
|
152
179
|
# print(f"\nUser prompt (first 3000 chars):\n{messages[1]['content'][:3000]}...\n")
|
|
153
180
|
|
|
154
181
|
try:
|
|
155
|
-
|
|
182
|
+
schema = {
|
|
183
|
+
"type": "object",
|
|
184
|
+
"properties": {
|
|
185
|
+
"comments": {
|
|
186
|
+
"type": "array",
|
|
187
|
+
"items": {
|
|
188
|
+
"type": "object",
|
|
189
|
+
"properties": {
|
|
190
|
+
"file": {"type": "string"},
|
|
191
|
+
"line": {"type": "integer"},
|
|
192
|
+
"side": {"type": "string", "enum": ["LEFT", "RIGHT"]},
|
|
193
|
+
"severity": {"type": "string", "enum": ["CRITICAL", "HIGH", "MEDIUM", "LOW", "SUGGESTION"]},
|
|
194
|
+
"message": {"type": "string"},
|
|
195
|
+
"start_line": {"type": ["integer", "null"]},
|
|
196
|
+
"suggestion": {"type": ["string", "null"]},
|
|
197
|
+
},
|
|
198
|
+
"required": ["file", "line", "side", "severity", "message", "start_line", "suggestion"],
|
|
199
|
+
"additionalProperties": False,
|
|
200
|
+
},
|
|
201
|
+
},
|
|
202
|
+
"summary": {"type": "string"},
|
|
203
|
+
},
|
|
204
|
+
"required": ["comments", "summary"],
|
|
205
|
+
"additionalProperties": False,
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
response = get_response(
|
|
209
|
+
messages,
|
|
210
|
+
reasoning_effort="low",
|
|
211
|
+
model="gpt-5-codex",
|
|
212
|
+
text_format={"format": {"type": "json_schema", "name": "pr_review", "strict": True, "schema": schema}},
|
|
213
|
+
tools=[
|
|
214
|
+
{
|
|
215
|
+
"type": "web_search",
|
|
216
|
+
"filters": {
|
|
217
|
+
"allowed_domains": [
|
|
218
|
+
"ultralytics.com",
|
|
219
|
+
"github.com",
|
|
220
|
+
"stackoverflow.com",
|
|
221
|
+
]
|
|
222
|
+
},
|
|
223
|
+
}
|
|
224
|
+
],
|
|
225
|
+
)
|
|
156
226
|
|
|
157
|
-
|
|
158
|
-
review_data = json.loads(json_str.group(1) if json_str else response)
|
|
159
|
-
print(json.dumps(review_data, indent=2))
|
|
227
|
+
print(json.dumps(response, indent=2))
|
|
160
228
|
|
|
161
229
|
# Count comments BEFORE filtering (for COMMENT vs APPROVE decision)
|
|
162
|
-
comments_before_filtering = len(
|
|
230
|
+
comments_before_filtering = len(response.get("comments", []))
|
|
163
231
|
print(f"AI generated {comments_before_filtering} comments")
|
|
164
232
|
|
|
165
233
|
# Validate, filter, and deduplicate comments
|
|
166
234
|
unique_comments = {}
|
|
167
|
-
for c in
|
|
235
|
+
for c in response.get("comments", []):
|
|
168
236
|
file_path, line_num = c.get("file"), c.get("line", 0)
|
|
169
237
|
start_line = c.get("start_line")
|
|
170
238
|
side = (c.get("side") or "RIGHT").upper() # Default to RIGHT (added lines)
|
|
@@ -202,7 +270,7 @@ def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_descri
|
|
|
202
270
|
else:
|
|
203
271
|
print(f"⚠️ AI duplicate for {key}: {c.get('severity')} - {(c.get('message') or '')[:60]}...")
|
|
204
272
|
|
|
205
|
-
|
|
273
|
+
response.update(
|
|
206
274
|
{
|
|
207
275
|
"comments": list(unique_comments.values()),
|
|
208
276
|
"comments_before_filtering": comments_before_filtering,
|
|
@@ -211,8 +279,8 @@ def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_descri
|
|
|
211
279
|
"skipped_files": skipped_count,
|
|
212
280
|
}
|
|
213
281
|
)
|
|
214
|
-
print(f"Valid comments after filtering: {len(
|
|
215
|
-
return
|
|
282
|
+
print(f"Valid comments after filtering: {len(response['comments'])}")
|
|
283
|
+
return response
|
|
216
284
|
|
|
217
285
|
except Exception as e:
|
|
218
286
|
import traceback
|
|
@@ -344,7 +412,7 @@ def main(*args, **kwargs):
|
|
|
344
412
|
review_number = dismiss_previous_reviews(event)
|
|
345
413
|
|
|
346
414
|
diff = event.get_pr_diff()
|
|
347
|
-
review = generate_pr_review(event.repository, diff, event.pr.get("title") or "", event.pr.get("body") or "")
|
|
415
|
+
review = generate_pr_review(event.repository, diff, event.pr.get("title") or "", event.pr.get("body") or "", event)
|
|
348
416
|
|
|
349
417
|
post_review_summary(event, review, review_number)
|
|
350
418
|
print("PR review completed")
|
actions/scan_prs.py
ADDED
|
@@ -0,0 +1,233 @@
|
|
|
1
|
+
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
|
2
|
+
"""List and auto-merge open PRs across GitHub organization."""
|
|
3
|
+
|
|
4
|
+
import json
|
|
5
|
+
import os
|
|
6
|
+
import subprocess
|
|
7
|
+
from datetime import datetime, timezone
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def get_age_days(created_at):
|
|
11
|
+
"""Calculate PR age in days from ISO timestamp."""
|
|
12
|
+
return (datetime.now(timezone.utc) - datetime.fromisoformat(created_at.replace("Z", "+00:00"))).days
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def get_phase_emoji(age_days):
|
|
16
|
+
"""Return emoji and label for PR age phase."""
|
|
17
|
+
if age_days == 0:
|
|
18
|
+
return "🆕", "NEW"
|
|
19
|
+
elif age_days <= 7:
|
|
20
|
+
return "🟢", f"{age_days} days"
|
|
21
|
+
elif age_days <= 30:
|
|
22
|
+
return "🟡", f"{age_days} days"
|
|
23
|
+
else:
|
|
24
|
+
return "🔴", f"{age_days} days"
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def parse_visibility(visibility_input, repo_visibility):
|
|
28
|
+
"""Parse and validate visibility settings with security checks."""
|
|
29
|
+
valid = {"public", "private", "internal", "all"}
|
|
30
|
+
stripped = [v.strip() for v in visibility_input.lower().split(",") if v.strip()]
|
|
31
|
+
repo_visibility = (repo_visibility or "").lower()
|
|
32
|
+
|
|
33
|
+
# Warn about invalid values
|
|
34
|
+
if invalid := [v for v in stripped if v not in valid]:
|
|
35
|
+
print(f"⚠️ Invalid visibility values: {', '.join(invalid)} - ignoring")
|
|
36
|
+
|
|
37
|
+
visibility_list = [v for v in stripped if v in valid]
|
|
38
|
+
if not visibility_list:
|
|
39
|
+
print("⚠️ No valid visibility values, defaulting to 'public'")
|
|
40
|
+
return ["public"]
|
|
41
|
+
|
|
42
|
+
# Security: public repos can only scan public repos
|
|
43
|
+
if repo_visibility == "public" and visibility_list != ["public"]:
|
|
44
|
+
print("⚠️ Security: Public repo cannot scan non-public repos. Restricting to public only.")
|
|
45
|
+
return ["public"]
|
|
46
|
+
|
|
47
|
+
return visibility_list
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def get_repo_filter(visibility_list):
|
|
51
|
+
"""Return filtering strategy for repo visibility."""
|
|
52
|
+
if len(visibility_list) == 1 and visibility_list[0] != "all":
|
|
53
|
+
return {"flag": ["--visibility", visibility_list[0]], "filter": None, "str": visibility_list[0]}
|
|
54
|
+
|
|
55
|
+
filter_set = {"public", "private", "internal"} if "all" in visibility_list else set(visibility_list)
|
|
56
|
+
return {
|
|
57
|
+
"flag": [],
|
|
58
|
+
"filter": filter_set,
|
|
59
|
+
"str": "all" if "all" in visibility_list else ", ".join(sorted(visibility_list)),
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def get_status_checks(rollup):
|
|
64
|
+
"""Extract and validate status checks from rollup, return failed checks."""
|
|
65
|
+
checks = rollup if isinstance(rollup, list) else rollup.get("contexts", []) if isinstance(rollup, dict) else []
|
|
66
|
+
return [c for c in checks if c.get("conclusion") not in ["SUCCESS", "SKIPPED", "NEUTRAL"]]
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def run():
|
|
70
|
+
"""List open PRs across organization and auto-merge eligible Dependabot PRs."""
|
|
71
|
+
org = os.getenv("ORG", "ultralytics")
|
|
72
|
+
visibility_list = parse_visibility(os.getenv("VISIBILITY", "public"), os.getenv("REPO_VISIBILITY", "public"))
|
|
73
|
+
filter_config = get_repo_filter(visibility_list)
|
|
74
|
+
|
|
75
|
+
print(f"🔍 Scanning {filter_config['str']} repositories in {org} organization...")
|
|
76
|
+
|
|
77
|
+
# Get active repos
|
|
78
|
+
result = subprocess.run(
|
|
79
|
+
["gh", "repo", "list", org, "--limit", "1000", "--json", "name,url,isArchived,visibility"]
|
|
80
|
+
+ filter_config["flag"],
|
|
81
|
+
capture_output=True,
|
|
82
|
+
text=True,
|
|
83
|
+
check=True,
|
|
84
|
+
)
|
|
85
|
+
all_repos = [r for r in json.loads(result.stdout) if not r["isArchived"]]
|
|
86
|
+
repos = {
|
|
87
|
+
r["name"]: r["url"]
|
|
88
|
+
for r in all_repos
|
|
89
|
+
if not filter_config["filter"] or r["visibility"].lower() in filter_config["filter"]
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
if not repos:
|
|
93
|
+
print("⚠️ No repositories found")
|
|
94
|
+
return
|
|
95
|
+
|
|
96
|
+
# Get all open PRs
|
|
97
|
+
result = subprocess.run(
|
|
98
|
+
[
|
|
99
|
+
"gh",
|
|
100
|
+
"search",
|
|
101
|
+
"prs",
|
|
102
|
+
"--owner",
|
|
103
|
+
org,
|
|
104
|
+
"--state",
|
|
105
|
+
"open",
|
|
106
|
+
"--limit",
|
|
107
|
+
"1000",
|
|
108
|
+
"--json",
|
|
109
|
+
"repository,number,title,url,createdAt",
|
|
110
|
+
"--sort",
|
|
111
|
+
"created",
|
|
112
|
+
"--order",
|
|
113
|
+
"desc",
|
|
114
|
+
],
|
|
115
|
+
capture_output=True,
|
|
116
|
+
text=True,
|
|
117
|
+
check=True,
|
|
118
|
+
)
|
|
119
|
+
all_prs = json.loads(result.stdout)
|
|
120
|
+
|
|
121
|
+
if not all_prs:
|
|
122
|
+
print("✅ No open PRs found")
|
|
123
|
+
return
|
|
124
|
+
|
|
125
|
+
# Filter PRs to only include those from scanned repos
|
|
126
|
+
all_prs = [pr for pr in all_prs if pr["repository"]["name"] in repos]
|
|
127
|
+
|
|
128
|
+
if not all_prs:
|
|
129
|
+
print("✅ No open PRs found in scanned repositories")
|
|
130
|
+
return
|
|
131
|
+
|
|
132
|
+
# Count PRs by phase
|
|
133
|
+
phase_counts = {"new": 0, "green": 0, "yellow": 0, "red": 0}
|
|
134
|
+
for pr in all_prs:
|
|
135
|
+
age = get_age_days(pr["createdAt"])
|
|
136
|
+
phase_counts["new" if age == 0 else "green" if age <= 7 else "yellow" if age <= 30 else "red"] += 1
|
|
137
|
+
|
|
138
|
+
summary = [
|
|
139
|
+
f"# 🔍 Open Pull Requests - {org.title()} Organization\n",
|
|
140
|
+
f"**Total:** {len(all_prs)} open PRs across {len({pr['repository']['name'] for pr in all_prs})}/{len(repos)} {filter_config['str']} repos",
|
|
141
|
+
f"**By Phase:** 🆕 {phase_counts['new']} New | 🟢 {phase_counts['green']} ≤7d | 🟡 {phase_counts['yellow']} ≤30d | 🔴 {phase_counts['red']} >30d\n",
|
|
142
|
+
]
|
|
143
|
+
|
|
144
|
+
for repo_name in sorted({pr["repository"]["name"] for pr in all_prs}):
|
|
145
|
+
repo_prs = [pr for pr in all_prs if pr["repository"]["name"] == repo_name]
|
|
146
|
+
summary.append(
|
|
147
|
+
f"## 📦 [{repo_name}]({repos[repo_name]}) - {len(repo_prs)} open PR{'s' if len(repo_prs) > 1 else ''}"
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
for pr in repo_prs[:30]:
|
|
151
|
+
emoji, age_str = get_phase_emoji(get_age_days(pr["createdAt"]))
|
|
152
|
+
summary.append(f"- [#{pr['number']}]({pr['url']}) {pr['title']} {emoji} {age_str}")
|
|
153
|
+
|
|
154
|
+
if len(repo_prs) > 30:
|
|
155
|
+
summary.append(f"- ... {len(repo_prs) - 30} more PRs")
|
|
156
|
+
summary.append("")
|
|
157
|
+
|
|
158
|
+
# Auto-merge Dependabot GitHub Actions PRs
|
|
159
|
+
print("\n🤖 Checking for Dependabot PRs to auto-merge...")
|
|
160
|
+
summary.append("\n# 🤖 Auto-Merge Dependabot GitHub Actions PRs\n")
|
|
161
|
+
total_found = total_merged = total_skipped = 0
|
|
162
|
+
|
|
163
|
+
for repo_name in repos:
|
|
164
|
+
result = subprocess.run(
|
|
165
|
+
[
|
|
166
|
+
"gh",
|
|
167
|
+
"pr",
|
|
168
|
+
"list",
|
|
169
|
+
"--repo",
|
|
170
|
+
f"{org}/{repo_name}",
|
|
171
|
+
"--author",
|
|
172
|
+
"app/dependabot",
|
|
173
|
+
"--state",
|
|
174
|
+
"open",
|
|
175
|
+
"--json",
|
|
176
|
+
"number,title,files,mergeable,statusCheckRollup",
|
|
177
|
+
],
|
|
178
|
+
capture_output=True,
|
|
179
|
+
text=True,
|
|
180
|
+
)
|
|
181
|
+
if result.returncode != 0:
|
|
182
|
+
continue
|
|
183
|
+
|
|
184
|
+
merged = 0
|
|
185
|
+
for pr in json.loads(result.stdout):
|
|
186
|
+
if not all(f["path"].startswith(".github/workflows/") for f in pr["files"]):
|
|
187
|
+
continue
|
|
188
|
+
|
|
189
|
+
total_found += 1
|
|
190
|
+
pr_ref = f"{org}/{repo_name}#{pr['number']}"
|
|
191
|
+
print(f" Found: {pr_ref} - {pr['title']}")
|
|
192
|
+
|
|
193
|
+
if merged >= 1:
|
|
194
|
+
print(f" ⏭️ Skipped (already merged 1 PR in {repo_name})")
|
|
195
|
+
total_skipped += 1
|
|
196
|
+
continue
|
|
197
|
+
|
|
198
|
+
if pr["mergeable"] != "MERGEABLE":
|
|
199
|
+
print(f" ❌ Skipped (not mergeable: {pr['mergeable']})")
|
|
200
|
+
total_skipped += 1
|
|
201
|
+
continue
|
|
202
|
+
|
|
203
|
+
if failed := get_status_checks(pr.get("statusCheckRollup")):
|
|
204
|
+
for check in failed:
|
|
205
|
+
print(f" ❌ Failing check: {check.get('name', 'unknown')} = {check.get('conclusion')}")
|
|
206
|
+
total_skipped += 1
|
|
207
|
+
continue
|
|
208
|
+
|
|
209
|
+
print(" ✅ All checks passed, merging...")
|
|
210
|
+
result = subprocess.run(
|
|
211
|
+
["gh", "pr", "merge", str(pr["number"]), "--repo", f"{org}/{repo_name}", "--squash", "--admin"],
|
|
212
|
+
capture_output=True,
|
|
213
|
+
text=True,
|
|
214
|
+
)
|
|
215
|
+
if result.returncode == 0:
|
|
216
|
+
print(f" ✅ Successfully merged {pr_ref}")
|
|
217
|
+
summary.append(f"- ✅ Merged {pr_ref}")
|
|
218
|
+
total_merged += 1
|
|
219
|
+
merged += 1
|
|
220
|
+
else:
|
|
221
|
+
print(f" ❌ Merge failed: {result.stderr.strip()}")
|
|
222
|
+
total_skipped += 1
|
|
223
|
+
|
|
224
|
+
summary.append(f"\n**Summary:** Found {total_found} | Merged {total_merged} | Skipped {total_skipped}")
|
|
225
|
+
print(f"\n📊 Dependabot Summary: Found {total_found} | Merged {total_merged} | Skipped {total_skipped}")
|
|
226
|
+
|
|
227
|
+
if summary_file := os.getenv("GITHUB_STEP_SUMMARY"):
|
|
228
|
+
with open(summary_file, "a") as f:
|
|
229
|
+
f.write("\n".join(summary))
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
if __name__ == "__main__":
|
|
233
|
+
run()
|
actions/summarize_pr.py
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
-
from .utils import ACTIONS_CREDIT, GITHUB_API_URL, Action,
|
|
5
|
+
from .utils import ACTIONS_CREDIT, GITHUB_API_URL, Action, get_pr_summary_prompt, get_response
|
|
6
6
|
|
|
7
7
|
SUMMARY_MARKER = "## 🛠️ PR Summary"
|
|
8
8
|
|
|
@@ -24,7 +24,7 @@ def generate_merge_message(pr_summary, pr_credit, pr_url):
|
|
|
24
24
|
),
|
|
25
25
|
},
|
|
26
26
|
]
|
|
27
|
-
return
|
|
27
|
+
return get_response(messages)
|
|
28
28
|
|
|
29
29
|
|
|
30
30
|
def generate_issue_comment(pr_url, pr_summary, pr_credit, pr_title=""):
|
|
@@ -54,7 +54,7 @@ def generate_issue_comment(pr_url, pr_summary, pr_credit, pr_title=""):
|
|
|
54
54
|
f"5. Thank 🙏 for reporting the issue and welcome any further feedback if the issue persists\n\n",
|
|
55
55
|
},
|
|
56
56
|
]
|
|
57
|
-
return
|
|
57
|
+
return get_response(messages)
|
|
58
58
|
|
|
59
59
|
|
|
60
60
|
def generate_pr_summary(repository, diff_text):
|
|
@@ -68,7 +68,7 @@ def generate_pr_summary(repository, diff_text):
|
|
|
68
68
|
},
|
|
69
69
|
{"role": "user", "content": prompt},
|
|
70
70
|
]
|
|
71
|
-
reply =
|
|
71
|
+
reply = get_response(messages, temperature=1.0)
|
|
72
72
|
if is_large:
|
|
73
73
|
reply = "**WARNING ⚠️** this PR is very large, summary may not cover all changes.\n\n" + reply
|
|
74
74
|
|
actions/summarize_release.py
CHANGED
|
@@ -8,7 +8,7 @@ import subprocess
|
|
|
8
8
|
import time
|
|
9
9
|
from datetime import datetime
|
|
10
10
|
|
|
11
|
-
from .utils import GITHUB_API_URL, Action,
|
|
11
|
+
from .utils import GITHUB_API_URL, Action, get_response, remove_html_comments
|
|
12
12
|
|
|
13
13
|
# Environment variables
|
|
14
14
|
CURRENT_TAG = os.getenv("CURRENT_TAG")
|
|
@@ -150,7 +150,7 @@ def generate_release_summary(
|
|
|
150
150
|
},
|
|
151
151
|
]
|
|
152
152
|
# print(messages[-1]["content"]) # for debug
|
|
153
|
-
return
|
|
153
|
+
return get_response(messages, temperature=1.0) + release_suffix
|
|
154
154
|
|
|
155
155
|
|
|
156
156
|
def create_github_release(event, tag_name: str, name: str, body: str):
|
|
@@ -38,7 +38,7 @@ def add_indentation(code_block, num_spaces):
|
|
|
38
38
|
|
|
39
39
|
|
|
40
40
|
def format_code_with_ruff(temp_dir):
|
|
41
|
-
"""Formats Python code files in the specified directory using ruff
|
|
41
|
+
"""Formats Python code files in the specified directory using ruff and Python docstring formatter."""
|
|
42
42
|
if not next(Path(temp_dir).rglob("*.py"), None):
|
|
43
43
|
return
|
|
44
44
|
|
|
@@ -81,23 +81,17 @@ def format_code_with_ruff(temp_dir):
|
|
|
81
81
|
print(f"ERROR running ruff check ❌ {e}")
|
|
82
82
|
|
|
83
83
|
try:
|
|
84
|
-
# Run
|
|
84
|
+
# Run Ultralytics Python docstring formatter
|
|
85
85
|
subprocess.run(
|
|
86
86
|
[
|
|
87
|
-
"
|
|
88
|
-
"--wrap-summaries=120",
|
|
89
|
-
"--wrap-descriptions=120",
|
|
90
|
-
"--pre-summary-newline",
|
|
91
|
-
"--close-quotes-on-newline",
|
|
92
|
-
"--in-place",
|
|
93
|
-
"--recursive",
|
|
87
|
+
"ultralytics-actions-format-python-docstrings",
|
|
94
88
|
str(temp_dir),
|
|
95
89
|
],
|
|
96
90
|
check=True,
|
|
97
91
|
)
|
|
98
|
-
print("Completed
|
|
92
|
+
print("Completed Python docstring formatting ✅")
|
|
99
93
|
except Exception as e:
|
|
100
|
-
print(f"ERROR running
|
|
94
|
+
print(f"ERROR running Python docstring formatter ❌ {e}")
|
|
101
95
|
|
|
102
96
|
|
|
103
97
|
def format_bash_with_prettier(temp_dir):
|
actions/utils/__init__.py
CHANGED
|
@@ -13,10 +13,10 @@ from .github_utils import GITHUB_API_URL, GITHUB_GRAPHQL_URL, Action, ultralytic
|
|
|
13
13
|
from .openai_utils import (
|
|
14
14
|
MAX_PROMPT_CHARS,
|
|
15
15
|
filter_labels,
|
|
16
|
-
get_completion,
|
|
17
16
|
get_pr_open_response,
|
|
18
17
|
get_pr_summary_guidelines,
|
|
19
18
|
get_pr_summary_prompt,
|
|
19
|
+
get_response,
|
|
20
20
|
)
|
|
21
21
|
from .version_utils import check_pubdev_version, check_pypi_version
|
|
22
22
|
|
|
@@ -34,10 +34,10 @@ __all__ = (
|
|
|
34
34
|
"check_pubdev_version",
|
|
35
35
|
"check_pypi_version",
|
|
36
36
|
"filter_labels",
|
|
37
|
-
"get_completion",
|
|
38
37
|
"get_pr_open_response",
|
|
39
38
|
"get_pr_summary_guidelines",
|
|
40
39
|
"get_pr_summary_prompt",
|
|
40
|
+
"get_response",
|
|
41
41
|
"remove_html_comments",
|
|
42
42
|
"ultralytics_actions_info",
|
|
43
43
|
)
|
actions/utils/openai_utils.py
CHANGED
|
@@ -94,9 +94,9 @@ def get_pr_summary_prompt(repository: str, diff_text: str) -> tuple[str, bool]:
|
|
|
94
94
|
return prompt, len(diff_text) > MAX_PROMPT_CHARS
|
|
95
95
|
|
|
96
96
|
|
|
97
|
-
def get_pr_first_comment_template(repository: str) -> str:
|
|
97
|
+
def get_pr_first_comment_template(repository: str, username: str) -> str:
|
|
98
98
|
"""Returns the PR first comment template with checklist (used only by unified PR open)."""
|
|
99
|
-
return f"""👋 Hello @username, thank you for submitting
|
|
99
|
+
return f"""👋 Hello @{username}, thank you for submitting a `{repository}` 🚀 PR! To ensure a seamless integration of your work, please review the following checklist:
|
|
100
100
|
|
|
101
101
|
- ✅ **Define a Purpose**: Clearly explain the purpose of your fix or feature in your PR description, and link to any [relevant issues](https://github.com/{repository}/issues). Ensure your commit messages are clear, concise, and adhere to the project's conventions.
|
|
102
102
|
- ✅ **Synchronize with Source**: Confirm your PR is synchronized with the `{repository}` `main` branch. If it's behind, update it by clicking the 'Update branch' button or by running `git pull` and `git merge main` locally.
|
|
@@ -109,14 +109,15 @@ def get_pr_first_comment_template(repository: str) -> str:
|
|
|
109
109
|
For more guidance, please refer to our [Contributing Guide](https://docs.ultralytics.com/help/contributing/). Don't hesitate to leave a comment if you have any questions. Thank you for contributing to Ultralytics! 🚀"""
|
|
110
110
|
|
|
111
111
|
|
|
112
|
-
def
|
|
112
|
+
def get_response(
|
|
113
113
|
messages: list[dict[str, str]],
|
|
114
114
|
check_links: bool = True,
|
|
115
115
|
remove: list[str] = (" @giscus[bot]",),
|
|
116
116
|
temperature: float = 1.0,
|
|
117
117
|
reasoning_effort: str | None = None,
|
|
118
|
-
|
|
118
|
+
text_format: dict | None = None,
|
|
119
119
|
model: str = OPENAI_MODEL,
|
|
120
|
+
tools: list[dict] | None = None,
|
|
120
121
|
) -> str | dict:
|
|
121
122
|
"""Generates a completion using OpenAI's Responses API with retry logic."""
|
|
122
123
|
assert OPENAI_API_KEY, "OpenAI API key is required."
|
|
@@ -129,6 +130,10 @@ def get_completion(
|
|
|
129
130
|
data = {"model": model, "input": messages, "store": False, "temperature": temperature}
|
|
130
131
|
if "gpt-5" in model:
|
|
131
132
|
data["reasoning"] = {"effort": reasoning_effort or "low"}
|
|
133
|
+
if text_format:
|
|
134
|
+
data["text"] = text_format
|
|
135
|
+
if tools:
|
|
136
|
+
data["tools"] = tools
|
|
132
137
|
|
|
133
138
|
try:
|
|
134
139
|
r = requests.post(url, json=data, headers=headers, timeout=(30, 900))
|
|
@@ -142,6 +147,11 @@ def get_completion(
|
|
|
142
147
|
time.sleep(2**attempt)
|
|
143
148
|
continue
|
|
144
149
|
|
|
150
|
+
if r.status_code >= 400:
|
|
151
|
+
error_body = r.text
|
|
152
|
+
print(f"API Error {r.status_code}: {error_body}")
|
|
153
|
+
r.reason = f"{r.reason}\n{error_body}" # Add error body to exception message
|
|
154
|
+
|
|
145
155
|
r.raise_for_status()
|
|
146
156
|
|
|
147
157
|
# Parse response
|
|
@@ -170,7 +180,7 @@ def get_completion(
|
|
|
170
180
|
token_str += f" (+{thinking_tokens} thinking)"
|
|
171
181
|
print(f"{model} ({token_str} = {input_tokens + output_tokens} tokens, ${cost:.5f}, {elapsed:.1f}s)")
|
|
172
182
|
|
|
173
|
-
if
|
|
183
|
+
if text_format and text_format.get("format", {}).get("type") in ["json_object", "json_schema"]:
|
|
174
184
|
return json.loads(content)
|
|
175
185
|
|
|
176
186
|
content = remove_outer_codeblocks(content)
|
|
@@ -196,16 +206,16 @@ def get_completion(
|
|
|
196
206
|
return content
|
|
197
207
|
|
|
198
208
|
|
|
199
|
-
def get_pr_open_response(repository: str, diff_text: str, title: str,
|
|
209
|
+
def get_pr_open_response(repository: str, diff_text: str, title: str, username: str, available_labels: dict) -> dict:
|
|
200
210
|
"""Generates unified PR response with summary, labels, and first comment in a single API call."""
|
|
201
211
|
is_large = len(diff_text) > MAX_PROMPT_CHARS
|
|
202
212
|
|
|
203
213
|
filtered_labels = filter_labels(available_labels, is_pr=True)
|
|
204
214
|
labels_str = "\n".join(f"- {name}: {description}" for name, description in filtered_labels.items())
|
|
205
215
|
|
|
206
|
-
prompt = f"""You are processing a new GitHub
|
|
216
|
+
prompt = f"""You are processing a new GitHub PR by @{username} for the {repository} repository.
|
|
207
217
|
|
|
208
|
-
Generate 3 outputs in a single JSON response for the PR titled {title} with the following diff:
|
|
218
|
+
Generate 3 outputs in a single JSON response for the PR titled '{title}' with the following diff:
|
|
209
219
|
{diff_text[:MAX_PROMPT_CHARS]}
|
|
210
220
|
|
|
211
221
|
|
|
@@ -227,16 +237,28 @@ Customized welcome message adapting the template below:
|
|
|
227
237
|
- No spaces between bullet points
|
|
228
238
|
|
|
229
239
|
Example comment template (adapt as needed, keep all links):
|
|
230
|
-
{get_pr_first_comment_template(repository)}
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
240
|
+
{get_pr_first_comment_template(repository, username)}"""
|
|
241
|
+
|
|
242
|
+
schema = {
|
|
243
|
+
"type": "object",
|
|
244
|
+
"properties": {
|
|
245
|
+
"summary": {"type": "string", "description": "PR summary with emoji sections"},
|
|
246
|
+
"labels": {"type": "array", "items": {"type": "string"}, "description": "Array of label names"},
|
|
247
|
+
"first_comment": {"type": "string", "description": "Welcome comment with checklist"},
|
|
248
|
+
},
|
|
249
|
+
"required": ["summary", "labels", "first_comment"],
|
|
250
|
+
"additionalProperties": False,
|
|
251
|
+
}
|
|
234
252
|
|
|
235
253
|
messages = [
|
|
236
254
|
{"role": "system", "content": "You are an Ultralytics AI assistant processing GitHub PRs."},
|
|
237
255
|
{"role": "user", "content": prompt},
|
|
238
256
|
]
|
|
239
|
-
result =
|
|
257
|
+
result = get_response(
|
|
258
|
+
messages,
|
|
259
|
+
temperature=1.0,
|
|
260
|
+
text_format={"format": {"type": "json_schema", "name": "pr_open_response", "strict": True, "schema": schema}},
|
|
261
|
+
)
|
|
240
262
|
if is_large and "summary" in result:
|
|
241
263
|
result["summary"] = (
|
|
242
264
|
"**WARNING ⚠️** this PR is very large, summary may not cover all changes.\n\n" + result["summary"]
|
|
@@ -249,5 +271,5 @@ if __name__ == "__main__":
|
|
|
249
271
|
{"role": "system", "content": "You are a helpful AI assistant."},
|
|
250
272
|
{"role": "user", "content": "Explain how to export a YOLO11 model to CoreML."},
|
|
251
273
|
]
|
|
252
|
-
response =
|
|
274
|
+
response = get_response(messages)
|
|
253
275
|
print(response)
|