ultralytics-actions 0.1.4__py3-none-any.whl → 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics-actions might be problematic. Click here for more details.

actions/__init__.py CHANGED
@@ -23,4 +23,4 @@
23
23
  # ├── test_summarize_pr.py
24
24
  # └── ...
25
25
 
26
- __version__ = "0.1.4"
26
+ __version__ = "0.1.6"
@@ -92,7 +92,7 @@ def main(*args, **kwargs):
92
92
  return
93
93
 
94
94
  # Get comment info
95
- comment_body = event.event_data["comment"].get("body", "")
95
+ comment_body = event.event_data["comment"].get("body") or ""
96
96
  username = event.event_data["comment"]["user"]["login"]
97
97
 
98
98
  # Check for keyword without surrounding backticks to avoid triggering on replies
@@ -52,7 +52,7 @@ def get_event_content(event) -> tuple[int, str, str, str, str, str, str]:
52
52
  number = item["number"]
53
53
  node_id = item.get("node_id") or item.get("id")
54
54
  title = item["title"]
55
- body = remove_html_comments(item.get("body", ""))
55
+ body = remove_html_comments(item.get("body") or "")
56
56
  username = item["user"]["login"]
57
57
  return number, node_id, title, body, username, issue_type, action
58
58
 
@@ -183,7 +183,7 @@ def main(*args, **kwargs):
183
183
 
184
184
  number, node_id, title, body, username, issue_type, action = get_event_content(event)
185
185
  available_labels = event.get_repo_data("labels")
186
- label_descriptions = {label["name"]: label.get("description", "") for label in available_labels}
186
+ label_descriptions = {label["name"]: label.get("description") or "" for label in available_labels}
187
187
 
188
188
  # Use unified PR open response for new PRs (summary + labels + first comment in 1 API call)
189
189
  if issue_type == "pull request" and action == "opened":
actions/review_pr.py CHANGED
@@ -5,9 +5,10 @@ from __future__ import annotations
5
5
  import json
6
6
  import re
7
7
 
8
- from .utils import GITHUB_API_URL, Action, get_completion, remove_html_comments
8
+ from .utils import GITHUB_API_URL, MAX_PROMPT_CHARS, Action, get_completion, remove_html_comments
9
9
 
10
10
  REVIEW_MARKER = "🔍 PR Review"
11
+ ERROR_MARKER = "⚠️ Review generation encountered an error"
11
12
  EMOJI_MAP = {"CRITICAL": "❗", "HIGH": "⚠️", "MEDIUM": "💡", "LOW": "📝", "SUGGESTION": "💭"}
12
13
  SKIP_PATTERNS = [
13
14
  r"\.lock$", # Lock files
@@ -75,8 +76,7 @@ def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_descri
75
76
  return {"comments": [], "summary": f"All {skipped_count} changed files are generated/vendored (skipped review)"}
76
77
 
77
78
  file_list = list(diff_files.keys())
78
- limit = round(128000 * 3.3 * 0.5) # 3.3 characters per token for half a 256k context window
79
- diff_truncated = len(diff_text) > limit
79
+ diff_truncated = len(diff_text) > MAX_PROMPT_CHARS
80
80
  lines_changed = sum(len(lines) for lines in diff_files.values())
81
81
 
82
82
  content = (
@@ -118,7 +118,7 @@ def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_descri
118
118
  f"Review this PR in https://github.com/{repository}:\n"
119
119
  f"Title: {pr_title}\n"
120
120
  f"Description: {remove_html_comments(pr_description or '')[:1000]}\n\n"
121
- f"Diff:\n{diff_text[:limit]}\n\n"
121
+ f"Diff:\n{diff_text[:MAX_PROMPT_CHARS]}\n\n"
122
122
  "Now review this diff according to the rules above. Return JSON with comments array and summary."
123
123
  ),
124
124
  },
@@ -161,7 +161,7 @@ def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_descri
161
161
  if key not in unique_comments:
162
162
  unique_comments[key] = c
163
163
  else:
164
- print(f"⚠️ AI duplicate for {key}: {c.get('severity')} - {c.get('message')[:60]}...")
164
+ print(f"⚠️ AI duplicate for {key}: {c.get('severity')} - {(c.get('message') or '')[:60]}...")
165
165
 
166
166
  review_data.update(
167
167
  {
@@ -174,15 +174,16 @@ def generate_pr_review(repository: str, diff_text: str, pr_title: str, pr_descri
174
174
  print(f"Valid comments after filtering: {len(review_data['comments'])}")
175
175
  return review_data
176
176
 
177
- except json.JSONDecodeError as e:
178
- print(f"JSON parsing failed... {e}")
179
- return {"comments": [], "summary": "Review generation encountered a JSON parsing error"}
180
177
  except Exception as e:
181
- print(f"Review generation failed: {e}")
182
178
  import traceback
183
179
 
184
- traceback.print_exc()
185
- return {"comments": [], "summary": "Review generation encountered an error"}
180
+ error_details = traceback.format_exc()
181
+ print(f"Review generation failed: {e}\n{error_details}")
182
+ summary = (
183
+ f"{ERROR_MARKER}: `{type(e).__name__}`\n\n"
184
+ f"<details><summary>Debug Info</summary>\n\n```\n{error_details}\n```\n</details>"
185
+ )
186
+ return {"comments": [], "summary": summary}
186
187
 
187
188
 
188
189
  def dismiss_previous_reviews(event: Action) -> int:
@@ -219,7 +220,29 @@ def post_review_summary(event: Action, review_data: dict, review_number: int) ->
219
220
 
220
221
  review_title = f"{REVIEW_MARKER} {review_number}" if review_number > 1 else REVIEW_MARKER
221
222
  comments = review_data.get("comments", [])
222
- event_type = "COMMENT" if any(c.get("severity") not in ["LOW", "SUGGESTION", None] for c in comments) else "APPROVE"
223
+ summary = review_data.get("summary") or ""
224
+
225
+ # Don't approve if error occurred or if there are critical/high severity issues
226
+ has_error = not summary or ERROR_MARKER in summary
227
+ has_issues = any(c.get("severity") not in ["LOW", "SUGGESTION", None] for c in comments)
228
+ requests_changes = any(
229
+ phrase in summary.lower()
230
+ for phrase in [
231
+ "please",
232
+ "should",
233
+ "must",
234
+ "need to",
235
+ "needs to",
236
+ "before merging",
237
+ "fix",
238
+ "error",
239
+ "issue",
240
+ "problem",
241
+ "warning",
242
+ "concern",
243
+ ]
244
+ )
245
+ event_type = "COMMENT" if (has_error or has_issues or requests_changes) else "APPROVE"
223
246
 
224
247
  body = (
225
248
  f"## {review_title}\n\n"
@@ -239,12 +262,12 @@ def post_review_summary(event: Action, review_data: dict, review_number: int) ->
239
262
 
240
263
  # Build inline comments for the review
241
264
  review_comments = []
242
- for comment in comments[:10]: # Limit to 10 comments
265
+ for comment in comments[:10]: # Limit inline comments
243
266
  if not (file_path := comment.get("file")) or not (line := comment.get("line", 0)):
244
267
  continue
245
268
 
246
- severity = comment.get("severity", "SUGGESTION")
247
- comment_body = f"{EMOJI_MAP.get(severity, '💭')} **{severity}**: {comment.get('message', '')[:1000]}"
269
+ severity = comment.get("severity") or "SUGGESTION"
270
+ comment_body = f"{EMOJI_MAP.get(severity, '💭')} **{severity}**: {(comment.get('message') or '')[:1000]}"
248
271
 
249
272
  if suggestion := comment.get("suggestion"):
250
273
  suggestion = suggestion[:1000] # Clip suggestion length
@@ -299,7 +322,7 @@ def main(*args, **kwargs):
299
322
  review_number = dismiss_previous_reviews(event)
300
323
 
301
324
  diff = event.get_pr_diff()
302
- review = generate_pr_review(event.repository, diff, event.pr.get("title", ""), event.pr.get("body", ""))
325
+ review = generate_pr_review(event.repository, diff, event.pr.get("title") or "", event.pr.get("body") or "")
303
326
 
304
327
  post_review_summary(event, review, review_number)
305
328
  print("PR review completed")
actions/summarize_pr.py CHANGED
@@ -82,7 +82,7 @@ def label_fixed_issues(event, pr_summary):
82
82
  if not pr_credit:
83
83
  return None
84
84
 
85
- comment = generate_issue_comment(data["url"], pr_summary, pr_credit, data.get("title", ""))
85
+ comment = generate_issue_comment(data["url"], pr_summary, pr_credit, data.get("title") or "")
86
86
 
87
87
  for issue in data["closingIssuesReferences"]["nodes"]:
88
88
  number = issue["number"]
@@ -52,7 +52,7 @@ def get_prs_between_tags(event, previous_tag: str, latest_tag: str) -> list:
52
52
  {
53
53
  "number": pr_data["number"],
54
54
  "title": pr_data["title"],
55
- "body": remove_html_comments(pr_data.get("body", "")),
55
+ "body": remove_html_comments(pr_data.get("body") or ""),
56
56
  "author": pr_data["user"]["login"],
57
57
  "html_url": pr_data["html_url"],
58
58
  "merged_at": pr_data["merged_at"],
actions/utils/__init__.py CHANGED
@@ -10,6 +10,7 @@ from .common_utils import (
10
10
  )
11
11
  from .github_utils import GITHUB_API_URL, GITHUB_GRAPHQL_URL, Action, ultralytics_actions_info
12
12
  from .openai_utils import (
13
+ MAX_PROMPT_CHARS,
13
14
  filter_labels,
14
15
  get_completion,
15
16
  get_pr_open_response,
@@ -21,6 +22,7 @@ from .version_utils import check_pubdev_version, check_pypi_version
21
22
  __all__ = (
22
23
  "GITHUB_API_URL",
23
24
  "GITHUB_GRAPHQL_URL",
25
+ "MAX_PROMPT_CHARS",
24
26
  "REQUESTS_HEADERS",
25
27
  "URL_IGNORE_LIST",
26
28
  "REDIRECT_START_IGNORE_LIST",
@@ -133,6 +133,8 @@ URL_PATTERN = re.compile(
133
133
 
134
134
  def remove_html_comments(body: str) -> str:
135
135
  """Removes HTML comments from a string using regex pattern matching."""
136
+ if not body:
137
+ return ""
136
138
  return re.sub(r"<!--.*?-->", "", body, flags=re.DOTALL).strip()
137
139
 
138
140
 
@@ -34,7 +34,6 @@ query($owner: String!, $repo: String!, $pr_number: Int!) {
34
34
  closingIssuesReferences(first: 50) { nodes { number } }
35
35
  url
36
36
  title
37
- body
38
37
  author { login, __typename }
39
38
  reviews(first: 50) { nodes { author { login, __typename } } }
40
39
  comments(first: 50) { nodes { author { login, __typename } } }
@@ -126,21 +125,22 @@ class Action:
126
125
 
127
126
  def _request(self, method: str, url: str, headers=None, expected_status=None, hard=False, **kwargs):
128
127
  """Unified request handler with error checking."""
129
- response = getattr(requests, method)(url, headers=headers or self.headers, **kwargs)
128
+ r = getattr(requests, method)(url, headers=headers or self.headers, **kwargs)
130
129
  expected = expected_status or self._default_status[method]
131
- success = response.status_code in expected
130
+ success = r.status_code in expected
132
131
 
133
132
  if self.verbose:
134
- print(f"{'✓' if success else '✗'} {method.upper()} {url} → {response.status_code}")
133
+ elapsed = r.elapsed.total_seconds()
134
+ print(f"{'✓' if success else '✗'} {method.upper()} {url} → {r.status_code} ({elapsed:.1f}s)")
135
135
  if not success:
136
136
  try:
137
- print(f" ❌ Error: {response.json().get('message', 'Unknown error')}")
137
+ print(f" ❌ Error: {r.json().get('message', 'Unknown error')}")
138
138
  except Exception:
139
- print(f" ❌ Error: {response.text[:200]}")
139
+ print(f" ❌ Error: {r.text[:200]}")
140
140
 
141
141
  if not success and hard:
142
- response.raise_for_status()
143
- return response
142
+ r.raise_for_status()
143
+ return r
144
144
 
145
145
  def get(self, url, **kwargs):
146
146
  """Performs GET request with error handling."""
@@ -3,6 +3,7 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  import os
6
+ import time
6
7
 
7
8
  import requests
8
9
 
@@ -10,6 +11,7 @@ from actions.utils.common_utils import check_links_in_string
10
11
 
11
12
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
12
13
  OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-5-2025-08-07")
14
+ MAX_PROMPT_CHARS = round(128000 * 3.3 * 0.5) # Max characters for prompt (50% of 128k context)
13
15
  SYSTEM_PROMPT_ADDITION = """Guidance:
14
16
  - Ultralytics Branding: Use YOLO11, YOLO26, etc., not YOLOv11, YOLOv26 (only older versions like YOLOv10 have a v). Always capitalize "HUB" in "Ultralytics HUB"; use "Ultralytics HUB", not "The Ultralytics HUB".
15
17
  - Avoid Equations: Do not include equations or mathematical notations.
@@ -81,13 +83,8 @@ def get_pr_summary_guidelines() -> str:
81
83
 
82
84
  def get_pr_summary_prompt(repository: str, diff_text: str) -> tuple[str, bool]:
83
85
  """Returns the complete PR summary generation prompt with diff (used by PR update/merge)."""
84
- ratio = 3.3 # about 3.3 characters per token
85
- limit = round(128000 * ratio * 0.5) # use up to 50% of the 128k context window for prompt
86
-
87
- prompt = (
88
- f"{get_pr_summary_guidelines()}\n\nRepository: '{repository}'\n\nHere's the PR diff:\n\n{diff_text[:limit]}"
89
- )
90
- return prompt, len(diff_text) > limit
86
+ prompt = f"{get_pr_summary_guidelines()}\n\nRepository: '{repository}'\n\nHere's the PR diff:\n\n{diff_text[:MAX_PROMPT_CHARS]}"
87
+ return prompt, len(diff_text) > MAX_PROMPT_CHARS
91
88
 
92
89
 
93
90
  def get_pr_first_comment_template(repository: str) -> str:
@@ -114,61 +111,68 @@ def get_completion(
114
111
  response_format: dict = None,
115
112
  model: str = OPENAI_MODEL,
116
113
  ) -> str | dict:
117
- """Generates a completion using OpenAI's Responses API based on input messages."""
114
+ """Generates a completion using OpenAI's Responses API with retry logic."""
118
115
  assert OPENAI_API_KEY, "OpenAI API key is required."
119
116
  url = "https://api.openai.com/v1/responses"
120
117
  headers = {"Authorization": f"Bearer {OPENAI_API_KEY}", "Content-Type": "application/json"}
121
118
  if messages and messages[0].get("role") == "system":
122
119
  messages[0]["content"] += "\n\n" + SYSTEM_PROMPT_ADDITION
123
120
 
124
- max_retries = 2
125
- for attempt in range(max_retries + 2):
121
+ for attempt in range(3):
126
122
  data = {"model": model, "input": messages, "store": False, "temperature": temperature}
127
123
  if "gpt-5" in model:
128
124
  data["reasoning"] = {"effort": reasoning_effort or "low"}
129
- # GPT-5 Responses API handles JSON via prompting, not format parameter
130
125
 
131
- r = requests.post(url, json=data, headers=headers)
132
- if r.status_code != 200:
133
- print(f"❌ OpenAI error {r.status_code}:\n{r.text}\n")
134
- r.raise_for_status()
135
- response_data = r.json()
126
+ try:
127
+ r = requests.post(url, json=data, headers=headers, timeout=600)
128
+ success = r.status_code == 200
129
+ print(f"{'✓' if success else '✗'} POST {url} → {r.status_code} ({r.elapsed.total_seconds():.1f}s)")
130
+ r.raise_for_status()
131
+
132
+ # Parse response
133
+ content = ""
134
+ for item in r.json().get("output", []):
135
+ if item.get("type") == "message":
136
+ for c in item.get("content", []):
137
+ if c.get("type") == "output_text":
138
+ content += c.get("text") or ""
139
+ content = content.strip()
136
140
 
137
- content = ""
138
- for item in response_data.get("output", []):
139
- if item.get("type") == "message":
140
- for content_item in item.get("content", []):
141
- if content_item.get("type") == "output_text":
142
- content += content_item.get("text", "")
141
+ if response_format and response_format.get("type") == "json_object":
142
+ import json
143
143
 
144
- content = content.strip()
145
- if response_format and response_format.get("type") == "json_object":
146
- import json
144
+ return json.loads(content)
147
145
 
148
- return json.loads(content)
146
+ content = remove_outer_codeblocks(content)
147
+ for x in remove:
148
+ content = content.replace(x, "")
149
149
 
150
- content = remove_outer_codeblocks(content)
151
- for x in remove:
152
- content = content.replace(x, "")
150
+ # Retry on bad links
151
+ if attempt < 2 and check_links and not check_links_in_string(content):
152
+ print("Bad URLs detected, retrying")
153
+ continue
153
154
 
154
- if not check_links or check_links_in_string(content):
155
155
  return content
156
156
 
157
- if attempt < max_retries:
158
- print(f"Attempt {attempt + 1}: Found bad URLs. Retrying with a new random seed.")
159
- else:
160
- print("Max retries reached. Updating prompt to exclude links.")
161
- messages.append({"role": "user", "content": "Please provide a response without any URLs or links in it."})
162
- check_links = False
157
+ except (requests.exceptions.ConnectionError, requests.exceptions.Timeout):
158
+ if attempt < 2:
159
+ print(f"Connection error, retrying in {2**attempt}s")
160
+ time.sleep(2**attempt)
161
+ continue
162
+ raise
163
+ except requests.exceptions.HTTPError as e:
164
+ if attempt < 2 and e.response and e.response.status_code >= 500:
165
+ print(f"Server error {e.response.status_code}, retrying in {2**attempt}s")
166
+ time.sleep(2**attempt)
167
+ continue
168
+ raise
163
169
 
164
170
  return content
165
171
 
166
172
 
167
173
  def get_pr_open_response(repository: str, diff_text: str, title: str, body: str, available_labels: dict) -> dict:
168
174
  """Generates unified PR response with summary, labels, and first comment in a single API call."""
169
- ratio = 3.3 # about 3.3 characters per token
170
- limit = round(128000 * ratio * 0.5) # use up to 50% of the 128k context window for prompt
171
- is_large = len(diff_text) > limit
175
+ is_large = len(diff_text) > MAX_PROMPT_CHARS
172
176
 
173
177
  filtered_labels = filter_labels(available_labels, is_pr=True)
174
178
  labels_str = "\n".join(f"- {name}: {description}" for name, description in filtered_labels.items())
@@ -176,7 +180,7 @@ def get_pr_open_response(repository: str, diff_text: str, title: str, body: str,
176
180
  prompt = f"""You are processing a new GitHub pull request for the {repository} repository.
177
181
 
178
182
  Generate 3 outputs in a single JSON response for the PR titled {title} with the following diff:
179
- {diff_text[:limit]}
183
+ {diff_text[:MAX_PROMPT_CHARS]}
180
184
 
181
185
 
182
186
  --- FIRST JSON OUTPUT (PR SUMMARY) ---
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ultralytics-actions
3
- Version: 0.1.4
3
+ Version: 0.1.6
4
4
  Summary: Ultralytics Actions for GitHub automation and PR management.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -0,0 +1,19 @@
1
+ actions/__init__.py,sha256=owzYNIk3pmwSP7At9iZuEgZyKtiaKSazh22WkbEyfFo,772
2
+ actions/dispatch_actions.py,sha256=i81UeHrYudAsOUFUfN71u6X-1cmZaZaiiTj6p2rvz8A,4217
3
+ actions/first_interaction.py,sha256=QxPsLjd-m2G-QYOcQb2hQfIB_alupzeZzSHTk-jw0bg,9856
4
+ actions/review_pr.py,sha256=_x-HwX2iqR1xhQkJBDW5CPVq-4Cba7pZZBz63hN9e1Y,15060
5
+ actions/summarize_pr.py,sha256=3nFotiZX42dz-mzDQ9wcoUILJKkcaxrC5EeyxvuvY60,5775
6
+ actions/summarize_release.py,sha256=iCXa9a1DcOrDVe8pMWEsYKgDxuIOhIgMsYymElOLK6o,9083
7
+ actions/update_file_headers.py,sha256=E5fKYLdeW16-BHCcuqxohGpGZqgEh-WX4ZmCQJw2R90,6684
8
+ actions/update_markdown_code_blocks.py,sha256=w3DTRltg2Rmr4-qrNawv_S2vJbheKE0tne1iz79FzXg,8692
9
+ actions/utils/__init__.py,sha256=unjXYIFNFeHrdC8LooDFVWlj6fAdGhssUgASo5229zY,1073
10
+ actions/utils/common_utils.py,sha256=2DRvcyCgmn507w3T4FJcQSZNI9KC1gVUb8CnJqPapD0,11943
11
+ actions/utils/github_utils.py,sha256=cBgEDJBpImTJbGBoZTteVSmCqXPuzEb51np7gRhqPeM,19702
12
+ actions/utils/openai_utils.py,sha256=xI_DZpsEBzXyqQDozMLEtmjwuNlOpNL9n2b-gA6xL5Y,10658
13
+ actions/utils/version_utils.py,sha256=EIbm3iZVNyNl3dh8aNz_9ITeTC93ZxfyUzIRkO3tSXw,3242
14
+ ultralytics_actions-0.1.6.dist-info/licenses/LICENSE,sha256=hIahDEOTzuHCU5J2nd07LWwkLW7Hko4UFO__ffsvB-8,34523
15
+ ultralytics_actions-0.1.6.dist-info/METADATA,sha256=7Zj67iXEIw61XyAKY8Myu0vZYqsO7F_08XvcDstqhWI,12368
16
+ ultralytics_actions-0.1.6.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
17
+ ultralytics_actions-0.1.6.dist-info/entry_points.txt,sha256=n_VbDs3Xj33daaeN_2D72UTEuyeH8hVc6-CPH55ymkY,496
18
+ ultralytics_actions-0.1.6.dist-info/top_level.txt,sha256=5apM5x80QlJcGbACn1v3fkmIuL1-XQCKcItJre7w7Tw,8
19
+ ultralytics_actions-0.1.6.dist-info/RECORD,,
@@ -1,19 +0,0 @@
1
- actions/__init__.py,sha256=UDIRZMqICZj8MM9O1637KKavoMhRWu2lgE1IltT0m0s,772
2
- actions/dispatch_actions.py,sha256=8jaaVkA_LSlpUQ4tuzmQtf2kw3G09uVRD_LmJyXYKNE,4215
3
- actions/first_interaction.py,sha256=8LxJ0RI4ddm-DjdD5tU1jAljCemP__tAc47nmSP8EdA,9852
4
- actions/review_pr.py,sha256=hbk2y04tjtsizhgWrYi69btUy8yBdfnIUOe2bODTr28,14410
5
- actions/summarize_pr.py,sha256=XLYsNTf4J6VPyyecZcuiJaSBDgjDSWFj37v5vb1ATCA,5773
6
- actions/summarize_release.py,sha256=_067Q5AP-Zdnt_qzhHaCuGCr7T4MXSB5_N-M5GX6qgQ,9081
7
- actions/update_file_headers.py,sha256=E5fKYLdeW16-BHCcuqxohGpGZqgEh-WX4ZmCQJw2R90,6684
8
- actions/update_markdown_code_blocks.py,sha256=w3DTRltg2Rmr4-qrNawv_S2vJbheKE0tne1iz79FzXg,8692
9
- actions/utils/__init__.py,sha256=sKNx6o5jcAraEdGFph0o-YC7dMMY-dg_FprIBa6Jydw,1027
10
- actions/utils/common_utils.py,sha256=8ZmgaXZU3J2sg-HSaldp3hHYq7bI3akcJHdIXPmcNAo,11908
11
- actions/utils/github_utils.py,sha256=mexywBlj4_eVuRFnh6-sXpnHTrAffoCUfeKidAqZwqA,19703
12
- actions/utils/openai_utils.py,sha256=EIu7UQEYUatuU_sYQv-UxsIVveArMGHlpy4nXrI5Kl8,10503
13
- actions/utils/version_utils.py,sha256=EIbm3iZVNyNl3dh8aNz_9ITeTC93ZxfyUzIRkO3tSXw,3242
14
- ultralytics_actions-0.1.4.dist-info/licenses/LICENSE,sha256=hIahDEOTzuHCU5J2nd07LWwkLW7Hko4UFO__ffsvB-8,34523
15
- ultralytics_actions-0.1.4.dist-info/METADATA,sha256=nsiw0RdHqqVa0-jbY4eI-cvFhOQlxzKkTyTOtxLYcrc,12368
16
- ultralytics_actions-0.1.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
17
- ultralytics_actions-0.1.4.dist-info/entry_points.txt,sha256=n_VbDs3Xj33daaeN_2D72UTEuyeH8hVc6-CPH55ymkY,496
18
- ultralytics_actions-0.1.4.dist-info/top_level.txt,sha256=5apM5x80QlJcGbACn1v3fkmIuL1-XQCKcItJre7w7Tw,8
19
- ultralytics_actions-0.1.4.dist-info/RECORD,,