ultralytics-actions 0.1.6__py3-none-any.whl → 0.1.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ultralytics-actions might be problematic. Click here for more details.
- actions/__init__.py +1 -1
- actions/review_pr.py +67 -47
- {ultralytics_actions-0.1.6.dist-info → ultralytics_actions-0.1.8.dist-info}/METADATA +1 -1
- {ultralytics_actions-0.1.6.dist-info → ultralytics_actions-0.1.8.dist-info}/RECORD +8 -8
- {ultralytics_actions-0.1.6.dist-info → ultralytics_actions-0.1.8.dist-info}/WHEEL +0 -0
- {ultralytics_actions-0.1.6.dist-info → ultralytics_actions-0.1.8.dist-info}/entry_points.txt +0 -0
- {ultralytics_actions-0.1.6.dist-info → ultralytics_actions-0.1.8.dist-info}/licenses/LICENSE +0 -0
- {ultralytics_actions-0.1.6.dist-info → ultralytics_actions-0.1.8.dist-info}/top_level.txt +0 -0
actions/__init__.py
CHANGED
actions/review_pr.py
CHANGED
|
@@ -31,25 +31,31 @@ SKIP_PATTERNS = [
|
|
|
31
31
|
|
|
32
32
|
|
|
33
33
|
def parse_diff_files(diff_text: str) -> dict:
|
|
34
|
-
"""Parse diff to extract file paths, valid line numbers, and line content for comments."""
|
|
35
|
-
files, current_file,
|
|
34
|
+
"""Parse diff to extract file paths, valid line numbers, and line content for comments (both sides)."""
|
|
35
|
+
files, current_file, new_line, old_line = {}, None, 0, 0
|
|
36
36
|
|
|
37
37
|
for line in diff_text.split("\n"):
|
|
38
38
|
if line.startswith("diff --git"):
|
|
39
39
|
match = re.search(r" b/(.+)$", line)
|
|
40
40
|
current_file = match.group(1) if match else None
|
|
41
|
-
|
|
41
|
+
new_line, old_line = 0, 0
|
|
42
42
|
if current_file:
|
|
43
|
-
files[current_file] = {}
|
|
43
|
+
files[current_file] = {"RIGHT": {}, "LEFT": {}}
|
|
44
44
|
elif line.startswith("@@") and current_file:
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
45
|
+
# Extract both old and new line numbers
|
|
46
|
+
match = re.search(r"@@ -(\d+)(?:,\d+)? \+(\d+)(?:,\d+)?", line)
|
|
47
|
+
if match:
|
|
48
|
+
old_line, new_line = int(match.group(1)), int(match.group(2))
|
|
49
|
+
elif current_file and (new_line > 0 or old_line > 0):
|
|
48
50
|
if line.startswith("+") and not line.startswith("+++"):
|
|
49
|
-
files[current_file][
|
|
50
|
-
|
|
51
|
-
elif not line.startswith("
|
|
52
|
-
|
|
51
|
+
files[current_file]["RIGHT"][new_line] = line[1:] # Added line (right/new side)
|
|
52
|
+
new_line += 1
|
|
53
|
+
elif line.startswith("-") and not line.startswith("---"):
|
|
54
|
+
files[current_file]["LEFT"][old_line] = line[1:] # Removed line (left/old side)
|
|
55
|
+
old_line += 1
|
|
56
|
+
elif not line.startswith("\\"): # Context line (ignore "No newline" markers)
|
|
57
|
+
new_line += 1
|
|
58
|
+
old_line += 1
|
|
53
59
|
|
|
54
60
|
return files
|
|
55
61
|
|
|
@@ -65,8 +71,8 @@ def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_descri
|
|
|
65
71
|
|
|
66
72
|
# Filter out generated/vendored files
|
|
67
73
|
filtered_files = {
|
|
68
|
-
path:
|
|
69
|
-
for path,
|
|
74
|
+
path: sides
|
|
75
|
+
for path, sides in diff_files.items()
|
|
70
76
|
if not any(re.search(pattern, path) for pattern in SKIP_PATTERNS)
|
|
71
77
|
}
|
|
72
78
|
skipped_count = len(diff_files) - len(filtered_files)
|
|
@@ -77,7 +83,7 @@ def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_descri
|
|
|
77
83
|
|
|
78
84
|
file_list = list(diff_files.keys())
|
|
79
85
|
diff_truncated = len(diff_text) > MAX_PROMPT_CHARS
|
|
80
|
-
lines_changed = sum(len(
|
|
86
|
+
lines_changed = sum(len(sides["RIGHT"]) + len(sides["LEFT"]) for sides in diff_files.values())
|
|
81
87
|
|
|
82
88
|
content = (
|
|
83
89
|
"You are an expert code reviewer for Ultralytics. Provide detailed inline comments on specific code changes.\n\n"
|
|
@@ -101,10 +107,18 @@ def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_descri
|
|
|
101
107
|
"- Suggestion content must match the exact indentation of the original line\n"
|
|
102
108
|
"- Avoid triple backticks (```) in suggestions as they break markdown formatting\n"
|
|
103
109
|
"- It's better to flag an issue without a suggestion than provide a wrong or uncertain fix\n\n"
|
|
110
|
+
"LINE NUMBERS:\n"
|
|
111
|
+
"- You MUST extract line numbers directly from the @@ hunk headers in the diff below\n"
|
|
112
|
+
"- RIGHT (added +): Find @@ lines, use numbers after +N (e.g., @@ -10,5 +20,7 @@ means RIGHT starts at line 20)\n"
|
|
113
|
+
"- LEFT (removed -): Find @@ lines, use numbers after -N (e.g., @@ -10,5 +20,7 @@ means LEFT starts at line 10)\n"
|
|
114
|
+
"- Count forward from hunk start: + lines increment RIGHT, - lines increment LEFT, context lines increment both\n"
|
|
115
|
+
"- CRITICAL: Using line numbers not in the diff will cause your comment to be rejected\n"
|
|
116
|
+
"- Suggestions only work on RIGHT (added) lines, never on LEFT (removed) lines\n\n"
|
|
104
117
|
"Return JSON: "
|
|
105
|
-
'{"comments": [{"file": "exact/path", "line": N, "
|
|
118
|
+
'{"comments": [{"file": "exact/path", "line": N, "side": "RIGHT", "severity": "HIGH", "message": "..."}], "summary": "..."}\n\n'
|
|
106
119
|
"Rules:\n"
|
|
107
|
-
"-
|
|
120
|
+
"- Verify line numbers from @@ hunks: +N for RIGHT (added), -N for LEFT (removed)\n"
|
|
121
|
+
"- Exact paths (no ./), 'side' field defaults to RIGHT if omitted\n"
|
|
108
122
|
"- Severity: CRITICAL, HIGH, MEDIUM, LOW, SUGGESTION\n"
|
|
109
123
|
f"- Files changed: {len(file_list)} ({', '.join(file_list[:10])}{'...' if len(file_list) > 10 else ''})\n"
|
|
110
124
|
f"- Lines changed: {lines_changed}\n"
|
|
@@ -126,23 +140,41 @@ def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_descri
|
|
|
126
140
|
|
|
127
141
|
try:
|
|
128
142
|
response = get_completion(messages, reasoning_effort="medium", model="gpt-5-codex")
|
|
129
|
-
print("\n" + "=" * 80 + f"\nFULL AI RESPONSE:\n{response}\n" + "=" * 80 + "\n")
|
|
130
143
|
|
|
131
144
|
json_str = re.search(r"```(?:json)?\s*(\{.*?\})\s*```", response, re.DOTALL)
|
|
132
145
|
review_data = json.loads(json_str.group(1) if json_str else response)
|
|
146
|
+
print(json.dumps(review_data, indent=2))
|
|
133
147
|
|
|
134
|
-
|
|
148
|
+
# Count comments BEFORE filtering (for COMMENT vs APPROVE decision)
|
|
149
|
+
comments_before_filtering = len(review_data.get("comments", []))
|
|
150
|
+
print(f"AI generated {comments_before_filtering} comments")
|
|
135
151
|
|
|
136
152
|
# Validate, filter, and deduplicate comments
|
|
137
153
|
unique_comments = {}
|
|
138
154
|
for c in review_data.get("comments", []):
|
|
139
155
|
file_path, line_num = c.get("file"), c.get("line", 0)
|
|
140
156
|
start_line = c.get("start_line")
|
|
157
|
+
side = (c.get("side") or "RIGHT").upper() # Default to RIGHT (added lines)
|
|
141
158
|
|
|
142
|
-
# Validate line numbers are in diff
|
|
143
|
-
if file_path not in diff_files
|
|
144
|
-
print(f"Filtered out {file_path}:{line_num} (
|
|
159
|
+
# Validate line numbers are in diff (check appropriate side)
|
|
160
|
+
if file_path not in diff_files:
|
|
161
|
+
print(f"Filtered out {file_path}:{line_num} (file not in diff)")
|
|
145
162
|
continue
|
|
163
|
+
if line_num not in diff_files[file_path].get(side, {}):
|
|
164
|
+
# Try other side if not found
|
|
165
|
+
other_side = "LEFT" if side == "RIGHT" else "RIGHT"
|
|
166
|
+
if line_num in diff_files[file_path].get(other_side, {}):
|
|
167
|
+
print(f"Switching {file_path}:{line_num} from {side} to {other_side}")
|
|
168
|
+
c["side"] = other_side
|
|
169
|
+
side = other_side
|
|
170
|
+
# GitHub rejects suggestions on removed lines
|
|
171
|
+
if side == "LEFT" and c.get("suggestion"):
|
|
172
|
+
print(f"Dropping suggestion for {file_path}:{line_num} - LEFT side doesn't support suggestions")
|
|
173
|
+
c.pop("suggestion", None)
|
|
174
|
+
else:
|
|
175
|
+
available = {s: list(diff_files[file_path][s].keys())[:10] for s in ["RIGHT", "LEFT"]}
|
|
176
|
+
print(f"Filtered out {file_path}:{line_num} (available: {available})")
|
|
177
|
+
continue
|
|
146
178
|
|
|
147
179
|
# Validate start_line if provided - drop start_line for suggestions (single-line only)
|
|
148
180
|
if start_line:
|
|
@@ -152,12 +184,12 @@ def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_descri
|
|
|
152
184
|
elif start_line >= line_num:
|
|
153
185
|
print(f"Invalid start_line {start_line} >= line {line_num} for {file_path}, dropping start_line")
|
|
154
186
|
c.pop("start_line", None)
|
|
155
|
-
elif start_line not in diff_files[file_path]:
|
|
187
|
+
elif start_line not in diff_files[file_path].get(side, {}):
|
|
156
188
|
print(f"start_line {start_line} not in diff for {file_path}, dropping start_line")
|
|
157
189
|
c.pop("start_line", None)
|
|
158
190
|
|
|
159
|
-
# Deduplicate by line number
|
|
160
|
-
key = f"{file_path}:{line_num}"
|
|
191
|
+
# Deduplicate by line number and side
|
|
192
|
+
key = f"{file_path}:{side}:{line_num}"
|
|
161
193
|
if key not in unique_comments:
|
|
162
194
|
unique_comments[key] = c
|
|
163
195
|
else:
|
|
@@ -166,6 +198,7 @@ def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_descri
|
|
|
166
198
|
review_data.update(
|
|
167
199
|
{
|
|
168
200
|
"comments": list(unique_comments.values()),
|
|
201
|
+
"comments_before_filtering": comments_before_filtering,
|
|
169
202
|
"diff_files": diff_files,
|
|
170
203
|
"diff_truncated": diff_truncated,
|
|
171
204
|
"skipped_files": skipped_count,
|
|
@@ -222,27 +255,11 @@ def post_review_summary(event: Action, review_data: dict, review_number: int) ->
|
|
|
222
255
|
comments = review_data.get("comments", [])
|
|
223
256
|
summary = review_data.get("summary") or ""
|
|
224
257
|
|
|
225
|
-
# Don't approve if error occurred
|
|
258
|
+
# Don't approve if error occurred, inline comments exist, or critical/high severity issues
|
|
226
259
|
has_error = not summary or ERROR_MARKER in summary
|
|
260
|
+
has_inline_comments = review_data.get("comments_before_filtering", 0) > 0
|
|
227
261
|
has_issues = any(c.get("severity") not in ["LOW", "SUGGESTION", None] for c in comments)
|
|
228
|
-
|
|
229
|
-
phrase in summary.lower()
|
|
230
|
-
for phrase in [
|
|
231
|
-
"please",
|
|
232
|
-
"should",
|
|
233
|
-
"must",
|
|
234
|
-
"need to",
|
|
235
|
-
"needs to",
|
|
236
|
-
"before merging",
|
|
237
|
-
"fix",
|
|
238
|
-
"error",
|
|
239
|
-
"issue",
|
|
240
|
-
"problem",
|
|
241
|
-
"warning",
|
|
242
|
-
"concern",
|
|
243
|
-
]
|
|
244
|
-
)
|
|
245
|
-
event_type = "COMMENT" if (has_error or has_issues or requests_changes) else "APPROVE"
|
|
262
|
+
event_type = "COMMENT" if (has_error or has_inline_comments or has_issues) else "APPROVE"
|
|
246
263
|
|
|
247
264
|
body = (
|
|
248
265
|
f"## {review_title}\n\n"
|
|
@@ -269,22 +286,25 @@ def post_review_summary(event: Action, review_data: dict, review_number: int) ->
|
|
|
269
286
|
severity = comment.get("severity") or "SUGGESTION"
|
|
270
287
|
comment_body = f"{EMOJI_MAP.get(severity, '💭')} **{severity}**: {(comment.get('message') or '')[:1000]}"
|
|
271
288
|
|
|
289
|
+
# Get side (LEFT for removed lines, RIGHT for added lines)
|
|
290
|
+
side = comment.get("side", "RIGHT")
|
|
291
|
+
|
|
272
292
|
if suggestion := comment.get("suggestion"):
|
|
273
293
|
suggestion = suggestion[:1000] # Clip suggestion length
|
|
274
294
|
if "```" not in suggestion:
|
|
275
295
|
# Extract original line indentation and apply to suggestion
|
|
276
|
-
if original_line := review_data.get("diff_files", {}).get(file_path, {}).get(line):
|
|
296
|
+
if original_line := review_data.get("diff_files", {}).get(file_path, {}).get(side, {}).get(line):
|
|
277
297
|
indent = len(original_line) - len(original_line.lstrip())
|
|
278
298
|
suggestion = " " * indent + suggestion.strip()
|
|
279
299
|
comment_body += f"\n\n**Suggested change:**\n```suggestion\n{suggestion}\n```"
|
|
280
300
|
|
|
281
301
|
# Build comment with optional start_line for multi-line context
|
|
282
|
-
review_comment = {"path": file_path, "line": line, "body": comment_body, "side":
|
|
302
|
+
review_comment = {"path": file_path, "line": line, "body": comment_body, "side": side}
|
|
283
303
|
if start_line := comment.get("start_line"):
|
|
284
304
|
if start_line < line:
|
|
285
305
|
review_comment["start_line"] = start_line
|
|
286
|
-
review_comment["start_side"] =
|
|
287
|
-
print(f"Multi-line comment: {file_path}:{start_line}-{line}")
|
|
306
|
+
review_comment["start_side"] = side
|
|
307
|
+
print(f"Multi-line comment: {file_path}:{start_line}-{line} ({side})")
|
|
288
308
|
|
|
289
309
|
review_comments.append(review_comment)
|
|
290
310
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: ultralytics-actions
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.8
|
|
4
4
|
Summary: Ultralytics Actions for GitHub automation and PR management.
|
|
5
5
|
Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>
|
|
6
6
|
Maintainer-email: Ultralytics <hello@ultralytics.com>
|
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
actions/__init__.py,sha256=
|
|
1
|
+
actions/__init__.py,sha256=iaUhZH1t2gpoOoqwPRBkP7bO6z4Xd3R_JPDt8VxcO2U,772
|
|
2
2
|
actions/dispatch_actions.py,sha256=i81UeHrYudAsOUFUfN71u6X-1cmZaZaiiTj6p2rvz8A,4217
|
|
3
3
|
actions/first_interaction.py,sha256=QxPsLjd-m2G-QYOcQb2hQfIB_alupzeZzSHTk-jw0bg,9856
|
|
4
|
-
actions/review_pr.py,sha256=
|
|
4
|
+
actions/review_pr.py,sha256=6svsUPJTH4FSVfq4yQ4Y_PuwcEy522hmYSTOtDZbBKc,17371
|
|
5
5
|
actions/summarize_pr.py,sha256=3nFotiZX42dz-mzDQ9wcoUILJKkcaxrC5EeyxvuvY60,5775
|
|
6
6
|
actions/summarize_release.py,sha256=iCXa9a1DcOrDVe8pMWEsYKgDxuIOhIgMsYymElOLK6o,9083
|
|
7
7
|
actions/update_file_headers.py,sha256=E5fKYLdeW16-BHCcuqxohGpGZqgEh-WX4ZmCQJw2R90,6684
|
|
@@ -11,9 +11,9 @@ actions/utils/common_utils.py,sha256=2DRvcyCgmn507w3T4FJcQSZNI9KC1gVUb8CnJqPapD0
|
|
|
11
11
|
actions/utils/github_utils.py,sha256=cBgEDJBpImTJbGBoZTteVSmCqXPuzEb51np7gRhqPeM,19702
|
|
12
12
|
actions/utils/openai_utils.py,sha256=xI_DZpsEBzXyqQDozMLEtmjwuNlOpNL9n2b-gA6xL5Y,10658
|
|
13
13
|
actions/utils/version_utils.py,sha256=EIbm3iZVNyNl3dh8aNz_9ITeTC93ZxfyUzIRkO3tSXw,3242
|
|
14
|
-
ultralytics_actions-0.1.
|
|
15
|
-
ultralytics_actions-0.1.
|
|
16
|
-
ultralytics_actions-0.1.
|
|
17
|
-
ultralytics_actions-0.1.
|
|
18
|
-
ultralytics_actions-0.1.
|
|
19
|
-
ultralytics_actions-0.1.
|
|
14
|
+
ultralytics_actions-0.1.8.dist-info/licenses/LICENSE,sha256=hIahDEOTzuHCU5J2nd07LWwkLW7Hko4UFO__ffsvB-8,34523
|
|
15
|
+
ultralytics_actions-0.1.8.dist-info/METADATA,sha256=dSciUEZ62eiZNHZS60RdWLla4u7Qt_81RSBqDR7xSNs,12368
|
|
16
|
+
ultralytics_actions-0.1.8.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
17
|
+
ultralytics_actions-0.1.8.dist-info/entry_points.txt,sha256=n_VbDs3Xj33daaeN_2D72UTEuyeH8hVc6-CPH55ymkY,496
|
|
18
|
+
ultralytics_actions-0.1.8.dist-info/top_level.txt,sha256=5apM5x80QlJcGbACn1v3fkmIuL1-XQCKcItJre7w7Tw,8
|
|
19
|
+
ultralytics_actions-0.1.8.dist-info/RECORD,,
|
|
File without changes
|
{ultralytics_actions-0.1.6.dist-info → ultralytics_actions-0.1.8.dist-info}/entry_points.txt
RENAMED
|
File without changes
|
{ultralytics_actions-0.1.6.dist-info → ultralytics_actions-0.1.8.dist-info}/licenses/LICENSE
RENAMED
|
File without changes
|
|
File without changes
|