ultralytics-actions 0.1.3__tar.gz → 0.1.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics-actions might be problematic. Click here for more details.

Files changed (36) hide show
  1. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/PKG-INFO +3 -3
  2. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/README.md +2 -2
  3. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/actions/__init__.py +1 -1
  4. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/actions/dispatch_actions.py +1 -1
  5. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/actions/first_interaction.py +3 -3
  6. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/actions/review_pr.py +16 -16
  7. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/actions/summarize_pr.py +1 -1
  8. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/actions/summarize_release.py +1 -1
  9. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/actions/utils/__init__.py +2 -0
  10. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/actions/utils/common_utils.py +2 -0
  11. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/actions/utils/github_utils.py +2 -3
  12. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/actions/utils/openai_utils.py +45 -42
  13. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/ultralytics_actions.egg-info/PKG-INFO +3 -3
  14. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/LICENSE +0 -0
  15. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/actions/update_file_headers.py +0 -0
  16. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/actions/update_markdown_code_blocks.py +0 -0
  17. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/actions/utils/version_utils.py +0 -0
  18. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/pyproject.toml +0 -0
  19. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/setup.cfg +0 -0
  20. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/tests/test_cli_commands.py +0 -0
  21. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/tests/test_common_utils.py +0 -0
  22. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/tests/test_dispatch_actions.py +0 -0
  23. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/tests/test_file_headers.py +0 -0
  24. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/tests/test_first_interaction.py +0 -0
  25. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/tests/test_github_utils.py +0 -0
  26. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/tests/test_init.py +0 -0
  27. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/tests/test_openai_utils.py +0 -0
  28. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/tests/test_summarize_pr.py +0 -0
  29. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/tests/test_summarize_release.py +0 -0
  30. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/tests/test_update_markdown_codeblocks.py +0 -0
  31. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/tests/test_urls.py +0 -0
  32. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/ultralytics_actions.egg-info/SOURCES.txt +0 -0
  33. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/ultralytics_actions.egg-info/dependency_links.txt +0 -0
  34. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/ultralytics_actions.egg-info/entry_points.txt +0 -0
  35. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/ultralytics_actions.egg-info/requires.txt +0 -0
  36. {ultralytics_actions-0.1.3 → ultralytics_actions-0.1.5}/ultralytics_actions.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ultralytics-actions
3
- Version: 0.1.3
3
+ Version: 0.1.5
4
4
  Summary: Ultralytics Actions for GitHub automation and PR management.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -64,8 +64,8 @@ Ultralytics Actions automatically applies formats, updates, and enhancements usi
64
64
  - **Spell Check:** Common misspellings are caught using [codespell](https://github.com/codespell-project/codespell).
65
65
  - **Broken Links Check:** Broken links in documentation and Markdown files are identified using [Lychee](https://github.com/lycheeverse/lychee).
66
66
  - **PR Summary:** Concise Pull Request summaries are generated using [OpenAI](https://openai.com/) GPT-5, improving clarity and review efficiency.
67
- - **PR Review:** AI-powered inline code reviews identify critical bugs, security issues, and code quality concerns with suggested fixes.
68
- - **Auto-labeling:** Relevant labels are applied to issues and pull requests via [OpenAI](https://openai.com/) GPT-5 for intelligent categorization.
67
+ - **PR Review:** AI-powered code reviews identify critical bugs, security issues, and code quality concerns with suggested fixes.
68
+ - **Auto-labeling:** Applies relevant labels to issues and PRs via [OpenAI](https://openai.com/) GPT-5 for intelligent categorization.
69
69
 
70
70
  ## 🛠️ How It Works
71
71
 
@@ -26,8 +26,8 @@ Ultralytics Actions automatically applies formats, updates, and enhancements usi
26
26
  - **Spell Check:** Common misspellings are caught using [codespell](https://github.com/codespell-project/codespell).
27
27
  - **Broken Links Check:** Broken links in documentation and Markdown files are identified using [Lychee](https://github.com/lycheeverse/lychee).
28
28
  - **PR Summary:** Concise Pull Request summaries are generated using [OpenAI](https://openai.com/) GPT-5, improving clarity and review efficiency.
29
- - **PR Review:** AI-powered inline code reviews identify critical bugs, security issues, and code quality concerns with suggested fixes.
30
- - **Auto-labeling:** Relevant labels are applied to issues and pull requests via [OpenAI](https://openai.com/) GPT-5 for intelligent categorization.
29
+ - **PR Review:** AI-powered code reviews identify critical bugs, security issues, and code quality concerns with suggested fixes.
30
+ - **Auto-labeling:** Applies relevant labels to issues and PRs via [OpenAI](https://openai.com/) GPT-5 for intelligent categorization.
31
31
 
32
32
  ## 🛠️ How It Works
33
33
 
@@ -23,4 +23,4 @@
23
23
  # ├── test_summarize_pr.py
24
24
  # └── ...
25
25
 
26
- __version__ = "0.1.3"
26
+ __version__ = "0.1.5"
@@ -92,7 +92,7 @@ def main(*args, **kwargs):
92
92
  return
93
93
 
94
94
  # Get comment info
95
- comment_body = event.event_data["comment"].get("body", "")
95
+ comment_body = event.event_data["comment"].get("body") or ""
96
96
  username = event.event_data["comment"]["user"]["login"]
97
97
 
98
98
  # Check for keyword without surrounding backticks to avoid triggering on replies
@@ -52,7 +52,7 @@ def get_event_content(event) -> tuple[int, str, str, str, str, str, str]:
52
52
  number = item["number"]
53
53
  node_id = item.get("node_id") or item.get("id")
54
54
  title = item["title"]
55
- body = remove_html_comments(item.get("body", ""))
55
+ body = remove_html_comments(item.get("body") or "")
56
56
  username = item["user"]["login"]
57
57
  return number, node_id, title, body, username, issue_type, action
58
58
 
@@ -183,7 +183,7 @@ def main(*args, **kwargs):
183
183
 
184
184
  number, node_id, title, body, username, issue_type, action = get_event_content(event)
185
185
  available_labels = event.get_repo_data("labels")
186
- label_descriptions = {label["name"]: label.get("description", "") for label in available_labels}
186
+ label_descriptions = {label["name"]: label.get("description") or "" for label in available_labels}
187
187
 
188
188
  # Use unified PR open response for new PRs (summary + labels + first comment in 1 API call)
189
189
  if issue_type == "pull request" and action == "opened":
@@ -196,7 +196,7 @@ def main(*args, **kwargs):
196
196
 
197
197
  if summary := response.get("summary"):
198
198
  print("Updating PR description with summary...")
199
- event.update_pr_description(number, SUMMARY_START + summary + "\n\n" + body)
199
+ event.update_pr_description(number, SUMMARY_START + summary)
200
200
  else:
201
201
  summary = body
202
202
 
@@ -5,7 +5,7 @@ from __future__ import annotations
5
5
  import json
6
6
  import re
7
7
 
8
- from .utils import GITHUB_API_URL, Action, get_completion, remove_html_comments
8
+ from .utils import GITHUB_API_URL, MAX_PROMPT_CHARS, Action, get_completion, remove_html_comments
9
9
 
10
10
  REVIEW_MARKER = "🔍 PR Review"
11
11
  EMOJI_MAP = {"CRITICAL": "❗", "HIGH": "⚠️", "MEDIUM": "💡", "LOW": "📝", "SUGGESTION": "💭"}
@@ -75,8 +75,7 @@ def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_descri
75
75
  return {"comments": [], "summary": f"All {skipped_count} changed files are generated/vendored (skipped review)"}
76
76
 
77
77
  file_list = list(diff_files.keys())
78
- limit = round(128000 * 3.3 * 0.5) # 3.3 characters per token for half a 256k context window
79
- diff_truncated = len(diff_text) > limit
78
+ diff_truncated = len(diff_text) > MAX_PROMPT_CHARS
80
79
  lines_changed = sum(len(lines) for lines in diff_files.values())
81
80
 
82
81
  content = (
@@ -118,14 +117,14 @@ def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_descri
118
117
  f"Review this PR in https://github.com/{repository}:\n"
119
118
  f"Title: {pr_title}\n"
120
119
  f"Description: {remove_html_comments(pr_description or '')[:1000]}\n\n"
121
- f"Diff:\n{diff_text[:limit]}\n\n"
120
+ f"Diff:\n{diff_text[:MAX_PROMPT_CHARS]}\n\n"
122
121
  "Now review this diff according to the rules above. Return JSON with comments array and summary."
123
122
  ),
124
123
  },
125
124
  ]
126
125
 
127
126
  try:
128
- response = get_completion(messages, reasoning_effort="medium")
127
+ response = get_completion(messages, reasoning_effort="medium", model="gpt-5-codex")
129
128
  print("\n" + "=" * 80 + f"\nFULL AI RESPONSE:\n{response}\n" + "=" * 80 + "\n")
130
129
 
131
130
  json_str = re.search(r"```(?:json)?\s*(\{.*?\})\s*```", response, re.DOTALL)
@@ -161,7 +160,7 @@ def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_descri
161
160
  if key not in unique_comments:
162
161
  unique_comments[key] = c
163
162
  else:
164
- print(f"⚠️ AI duplicate for {key}: {c.get('severity')} - {c.get('message')[:60]}...")
163
+ print(f"⚠️ AI duplicate for {key}: {c.get('severity')} - {(c.get('message') or '')[:60]}...")
165
164
 
166
165
  review_data.update(
167
166
  {
@@ -174,15 +173,16 @@ def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_descri
174
173
  print(f"Valid comments after filtering: {len(review_data['comments'])}")
175
174
  return review_data
176
175
 
177
- except json.JSONDecodeError as e:
178
- print(f"JSON parsing failed... {e}")
179
- return {"comments": [], "summary": "Review generation encountered a JSON parsing error"}
180
176
  except Exception as e:
181
- print(f"Review generation failed: {e}")
182
177
  import traceback
183
178
 
184
- traceback.print_exc()
185
- return {"comments": [], "summary": "Review generation encountered an error"}
179
+ error_details = traceback.format_exc()
180
+ print(f"Review generation failed: {e}\n{error_details}")
181
+ summary = (
182
+ f"⚠️ Review generation encountered an error: `{type(e).__name__}`\n\n"
183
+ f"<details><summary>Debug Info</summary>\n\n```\n{error_details}\n```\n</details>"
184
+ )
185
+ return {"comments": [], "summary": summary}
186
186
 
187
187
 
188
188
  def dismiss_previous_reviews(event: Action) -> int:
@@ -239,12 +239,12 @@ def post_review_summary(event: Action, review_data: dict, review_number: int) ->
239
239
 
240
240
  # Build inline comments for the review
241
241
  review_comments = []
242
- for comment in comments[:10]: # Limit to 10 comments
242
+ for comment in comments[:10]: # Limit inline comments
243
243
  if not (file_path := comment.get("file")) or not (line := comment.get("line", 0)):
244
244
  continue
245
245
 
246
- severity = comment.get("severity", "SUGGESTION")
247
- comment_body = f"{EMOJI_MAP.get(severity, '💭')} **{severity}**: {comment.get('message', '')[:1000]}"
246
+ severity = comment.get("severity") or "SUGGESTION"
247
+ comment_body = f"{EMOJI_MAP.get(severity, '💭')} **{severity}**: {(comment.get('message') or '')[:1000]}"
248
248
 
249
249
  if suggestion := comment.get("suggestion"):
250
250
  suggestion = suggestion[:1000] # Clip suggestion length
@@ -299,7 +299,7 @@ def main(*args, **kwargs):
299
299
  review_number = dismiss_previous_reviews(event)
300
300
 
301
301
  diff = event.get_pr_diff()
302
- review = generate_pr_review(event.repository, diff, event.pr.get("title", ""), event.pr.get("body", ""))
302
+ review = generate_pr_review(event.repository, diff, event.pr.get("title") or "", event.pr.get("body") or "")
303
303
 
304
304
  post_review_summary(event, review, review_number)
305
305
  print("PR review completed")
@@ -82,7 +82,7 @@ def label_fixed_issues(event, pr_summary):
82
82
  if not pr_credit:
83
83
  return None
84
84
 
85
- comment = generate_issue_comment(data["url"], pr_summary, pr_credit, data.get("title", ""))
85
+ comment = generate_issue_comment(data["url"], pr_summary, pr_credit, data.get("title") or "")
86
86
 
87
87
  for issue in data["closingIssuesReferences"]["nodes"]:
88
88
  number = issue["number"]
@@ -52,7 +52,7 @@ def get_prs_between_tags(event, previous_tag: str, latest_tag: str) -> list:
52
52
  {
53
53
  "number": pr_data["number"],
54
54
  "title": pr_data["title"],
55
- "body": remove_html_comments(pr_data.get("body", "")),
55
+ "body": remove_html_comments(pr_data.get("body") or ""),
56
56
  "author": pr_data["user"]["login"],
57
57
  "html_url": pr_data["html_url"],
58
58
  "merged_at": pr_data["merged_at"],
@@ -10,6 +10,7 @@ from .common_utils import (
10
10
  )
11
11
  from .github_utils import GITHUB_API_URL, GITHUB_GRAPHQL_URL, Action, ultralytics_actions_info
12
12
  from .openai_utils import (
13
+ MAX_PROMPT_CHARS,
13
14
  filter_labels,
14
15
  get_completion,
15
16
  get_pr_open_response,
@@ -21,6 +22,7 @@ from .version_utils import check_pubdev_version, check_pypi_version
21
22
  __all__ = (
22
23
  "GITHUB_API_URL",
23
24
  "GITHUB_GRAPHQL_URL",
25
+ "MAX_PROMPT_CHARS",
24
26
  "REQUESTS_HEADERS",
25
27
  "URL_IGNORE_LIST",
26
28
  "REDIRECT_START_IGNORE_LIST",
@@ -133,6 +133,8 @@ URL_PATTERN = re.compile(
133
133
 
134
134
  def remove_html_comments(body: str) -> str:
135
135
  """Removes HTML comments from a string using regex pattern matching."""
136
+ if not body:
137
+ return ""
136
138
  return re.sub(r"<!--.*?-->", "", body, flags=re.DOTALL).strip()
137
139
 
138
140
 
@@ -34,7 +34,6 @@ query($owner: String!, $repo: String!, $pr_number: Int!) {
34
34
  closingIssuesReferences(first: 50) { nodes { number } }
35
35
  url
36
36
  title
37
- body
38
37
  author { login, __typename }
39
38
  reviews(first: 50) { nodes { author { login, __typename } } }
40
39
  comments(first: 50) { nodes { author { login, __typename } } }
@@ -281,10 +280,10 @@ class Action:
281
280
  start = "## 🛠️ PR Summary"
282
281
  if start in description:
283
282
  print("Existing PR Summary found, replacing.")
284
- updated_description = description.split(start)[0] + new_summary
283
+ updated_description = description.split(start)[0].rstrip() + "\n\n" + new_summary
285
284
  else:
286
285
  print("PR Summary not found, appending.")
287
- updated_description = description + "\n\n" + new_summary
286
+ updated_description = (description.rstrip() + "\n\n" + new_summary) if description.strip() else new_summary
288
287
 
289
288
  self.patch(url, json={"body": updated_description})
290
289
  self._pr_summary_cache = new_summary
@@ -3,6 +3,7 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  import os
6
+ import time
6
7
 
7
8
  import requests
8
9
 
@@ -10,6 +11,7 @@ from actions.utils.common_utils import check_links_in_string
10
11
 
11
12
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
12
13
  OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-5-2025-08-07")
14
+ MAX_PROMPT_CHARS = round(128000 * 3.3 * 0.5) # Max characters for prompt (50% of 128k context)
13
15
  SYSTEM_PROMPT_ADDITION = """Guidance:
14
16
  - Ultralytics Branding: Use YOLO11, YOLO26, etc., not YOLOv11, YOLOv26 (only older versions like YOLOv10 have a v). Always capitalize "HUB" in "Ultralytics HUB"; use "Ultralytics HUB", not "The Ultralytics HUB".
15
17
  - Avoid Equations: Do not include equations or mathematical notations.
@@ -81,13 +83,8 @@ def get_pr_summary_guidelines() -> str:
81
83
 
82
84
  def get_pr_summary_prompt(repository: str, diff_text: str) -> tuple[str, bool]:
83
85
  """Returns the complete PR summary generation prompt with diff (used by PR update/merge)."""
84
- ratio = 3.3 # about 3.3 characters per token
85
- limit = round(128000 * ratio * 0.5) # use up to 50% of the 128k context window for prompt
86
-
87
- prompt = (
88
- f"{get_pr_summary_guidelines()}\n\nRepository: '{repository}'\n\nHere's the PR diff:\n\n{diff_text[:limit]}"
89
- )
90
- return prompt, len(diff_text) > limit
86
+ prompt = f"{get_pr_summary_guidelines()}\n\nRepository: '{repository}'\n\nHere's the PR diff:\n\n{diff_text[:MAX_PROMPT_CHARS]}"
87
+ return prompt, len(diff_text) > MAX_PROMPT_CHARS
91
88
 
92
89
 
93
90
  def get_pr_first_comment_template(repository: str) -> str:
@@ -112,62 +109,68 @@ def get_completion(
112
109
  temperature: float = 1.0,
113
110
  reasoning_effort: str = None,
114
111
  response_format: dict = None,
112
+ model: str = OPENAI_MODEL,
115
113
  ) -> str | dict:
116
- """Generates a completion using OpenAI's Responses API based on input messages."""
114
+ """Generates a completion using OpenAI's Responses API with retry logic."""
117
115
  assert OPENAI_API_KEY, "OpenAI API key is required."
118
116
  url = "https://api.openai.com/v1/responses"
119
117
  headers = {"Authorization": f"Bearer {OPENAI_API_KEY}", "Content-Type": "application/json"}
120
118
  if messages and messages[0].get("role") == "system":
121
119
  messages[0]["content"] += "\n\n" + SYSTEM_PROMPT_ADDITION
122
120
 
123
- max_retries = 2
124
- for attempt in range(max_retries + 2):
125
- data = {"model": OPENAI_MODEL, "input": messages, "store": False, "temperature": temperature}
126
- if "gpt-5" in OPENAI_MODEL:
121
+ for attempt in range(3):
122
+ data = {"model": model, "input": messages, "store": False, "temperature": temperature}
123
+ if "gpt-5" in model:
127
124
  data["reasoning"] = {"effort": reasoning_effort or "low"}
128
- # GPT-5 Responses API handles JSON via prompting, not format parameter
129
125
 
130
- r = requests.post(url, json=data, headers=headers)
131
- if r.status_code != 200:
132
- print(f"❌ OpenAI error {r.status_code}:\n{r.text}\n")
133
- r.raise_for_status()
134
- response_data = r.json()
126
+ try:
127
+ r = requests.post(url, json=data, headers=headers, timeout=600)
128
+ r.raise_for_status()
129
+
130
+ # Parse response
131
+ content = ""
132
+ for item in r.json().get("output", []):
133
+ if item.get("type") == "message":
134
+ for c in item.get("content", []):
135
+ if c.get("type") == "output_text":
136
+ content += c.get("text") or ""
137
+ content = content.strip()
135
138
 
136
- content = ""
137
- for item in response_data.get("output", []):
138
- if item.get("type") == "message":
139
- for content_item in item.get("content", []):
140
- if content_item.get("type") == "output_text":
141
- content += content_item.get("text", "")
139
+ if response_format and response_format.get("type") == "json_object":
140
+ import json
142
141
 
143
- content = content.strip()
144
- if response_format and response_format.get("type") == "json_object":
145
- import json
142
+ return json.loads(content)
146
143
 
147
- return json.loads(content)
144
+ content = remove_outer_codeblocks(content)
145
+ for x in remove:
146
+ content = content.replace(x, "")
148
147
 
149
- content = remove_outer_codeblocks(content)
150
- for x in remove:
151
- content = content.replace(x, "")
148
+ # Retry on bad links
149
+ if attempt < 2 and check_links and not check_links_in_string(content):
150
+ print("Bad URLs detected, retrying")
151
+ continue
152
152
 
153
- if not check_links or check_links_in_string(content):
154
153
  return content
155
154
 
156
- if attempt < max_retries:
157
- print(f"Attempt {attempt + 1}: Found bad URLs. Retrying with a new random seed.")
158
- else:
159
- print("Max retries reached. Updating prompt to exclude links.")
160
- messages.append({"role": "user", "content": "Please provide a response without any URLs or links in it."})
161
- check_links = False
155
+ except (requests.exceptions.ConnectionError, requests.exceptions.Timeout):
156
+ if attempt < 2:
157
+ print(f"Connection error, retrying in {2**attempt}s")
158
+ time.sleep(2**attempt)
159
+ continue
160
+ raise
161
+ except requests.exceptions.HTTPError as e:
162
+ if attempt < 2 and e.response and e.response.status_code >= 500:
163
+ print(f"Server error {e.response.status_code}, retrying in {2**attempt}s")
164
+ time.sleep(2**attempt)
165
+ continue
166
+ raise
162
167
 
163
168
  return content
164
169
 
165
170
 
166
171
  def get_pr_open_response(repository: str, diff_text: str, title: str, body: str, available_labels: dict) -> dict:
167
172
  """Generates unified PR response with summary, labels, and first comment in a single API call."""
168
- ratio = 3.3 # about 3.3 characters per token
169
- limit = round(128000 * ratio * 0.5) # use up to 50% of the 128k context window for prompt
170
- is_large = len(diff_text) > limit
173
+ is_large = len(diff_text) > MAX_PROMPT_CHARS
171
174
 
172
175
  filtered_labels = filter_labels(available_labels, is_pr=True)
173
176
  labels_str = "\n".join(f"- {name}: {description}" for name, description in filtered_labels.items())
@@ -175,7 +178,7 @@ def get_pr_open_response(repository: str, diff_text: str, title: str, body: str,
175
178
  prompt = f"""You are processing a new GitHub pull request for the {repository} repository.
176
179
 
177
180
  Generate 3 outputs in a single JSON response for the PR titled {title} with the following diff:
178
- {diff_text[:limit]}
181
+ {diff_text[:MAX_PROMPT_CHARS]}
179
182
 
180
183
 
181
184
  --- FIRST JSON OUTPUT (PR SUMMARY) ---
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ultralytics-actions
3
- Version: 0.1.3
3
+ Version: 0.1.5
4
4
  Summary: Ultralytics Actions for GitHub automation and PR management.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -64,8 +64,8 @@ Ultralytics Actions automatically applies formats, updates, and enhancements usi
64
64
  - **Spell Check:** Common misspellings are caught using [codespell](https://github.com/codespell-project/codespell).
65
65
  - **Broken Links Check:** Broken links in documentation and Markdown files are identified using [Lychee](https://github.com/lycheeverse/lychee).
66
66
  - **PR Summary:** Concise Pull Request summaries are generated using [OpenAI](https://openai.com/) GPT-5, improving clarity and review efficiency.
67
- - **PR Review:** AI-powered inline code reviews identify critical bugs, security issues, and code quality concerns with suggested fixes.
68
- - **Auto-labeling:** Relevant labels are applied to issues and pull requests via [OpenAI](https://openai.com/) GPT-5 for intelligent categorization.
67
+ - **PR Review:** AI-powered code reviews identify critical bugs, security issues, and code quality concerns with suggested fixes.
68
+ - **Auto-labeling:** Applies relevant labels to issues and PRs via [OpenAI](https://openai.com/) GPT-5 for intelligent categorization.
69
69
 
70
70
  ## 🛠️ How It Works
71
71