ultralytics-actions 0.0.40__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,194 @@
1
+ # Ultralytics Actions 🚀, AGPL-3.0 license https://ultralytics.com/license
2
+
3
+ import os
4
+ import re
5
+ import subprocess
6
+ import time
7
+ from datetime import datetime
8
+
9
+ import requests
10
+
11
+ from .utils import (
12
+ GITHUB_API_URL,
13
+ Action,
14
+ get_completion,
15
+ remove_html_comments,
16
+ )
17
+
18
+ # Environment variables
19
+ CURRENT_TAG = os.getenv("CURRENT_TAG")
20
+ PREVIOUS_TAG = os.getenv("PREVIOUS_TAG")
21
+
22
+
23
+ def get_release_diff(repo_name: str, previous_tag: str, latest_tag: str, headers: dict) -> str:
24
+ """Retrieves the differences between two specified Git tags in a GitHub repository."""
25
+ url = f"{GITHUB_API_URL}/repos/{repo_name}/compare/{previous_tag}...{latest_tag}"
26
+ r = requests.get(url, headers=headers)
27
+ return r.text if r.status_code == 200 else f"Failed to get diff: {r.content}"
28
+
29
+
30
+ def get_prs_between_tags(repo_name: str, previous_tag: str, latest_tag: str, headers: dict) -> list:
31
+ """Retrieves and processes pull requests merged between two specified tags in a GitHub repository."""
32
+ url = f"{GITHUB_API_URL}/repos/{repo_name}/compare/{previous_tag}...{latest_tag}"
33
+ r = requests.get(url, headers=headers)
34
+ r.raise_for_status()
35
+
36
+ data = r.json()
37
+ pr_numbers = set()
38
+
39
+ for commit in data["commits"]:
40
+ pr_matches = re.findall(r"#(\d+)", commit["commit"]["message"])
41
+ pr_numbers.update(pr_matches)
42
+
43
+ prs = []
44
+ time.sleep(10) # sleep 10 seconds to allow final PR summary to update on merge
45
+ for pr_number in sorted(pr_numbers): # earliest to latest
46
+ pr_url = f"{GITHUB_API_URL}/repos/{repo_name}/pulls/{pr_number}"
47
+ pr_response = requests.get(pr_url, headers=headers)
48
+ if pr_response.status_code == 200:
49
+ pr_data = pr_response.json()
50
+ prs.append(
51
+ {
52
+ "number": pr_data["number"],
53
+ "title": pr_data["title"],
54
+ "body": remove_html_comments(pr_data["body"]),
55
+ "author": pr_data["user"]["login"],
56
+ "html_url": pr_data["html_url"],
57
+ "merged_at": pr_data["merged_at"],
58
+ }
59
+ )
60
+
61
+ # Sort PRs by merge date
62
+ prs.sort(key=lambda x: datetime.strptime(x["merged_at"], "%Y-%m-%dT%H:%M:%SZ"))
63
+
64
+ return prs
65
+
66
+
67
+ def get_new_contributors(repo: str, prs: list, headers: dict) -> set:
68
+ """Identify new contributors who made their first merged PR in the current release."""
69
+ new_contributors = set()
70
+ for pr in prs:
71
+ author = pr["author"]
72
+ # Check if this is the author's first contribution
73
+ url = f"{GITHUB_API_URL}/search/issues?q=repo:{repo}+author:{author}+is:pr+is:merged&sort=created&order=asc"
74
+ r = requests.get(url, headers=headers)
75
+ if r.status_code == 200:
76
+ data = r.json()
77
+ if data["total_count"] > 0:
78
+ first_pr = data["items"][0]
79
+ if first_pr["number"] == pr["number"]:
80
+ new_contributors.add(author)
81
+ return new_contributors
82
+
83
+
84
+ def generate_release_summary(
85
+ diff: str, prs: list, latest_tag: str, previous_tag: str, repo_name: str, headers: dict
86
+ ) -> str:
87
+ """Generate a concise release summary with key changes, purpose, and impact for a new Ultralytics version."""
88
+ pr_summaries = "\n\n".join(
89
+ [f"PR #{pr['number']}: {pr['title']} by @{pr['author']}\n{pr['body'][:1000]}" for pr in prs]
90
+ )
91
+
92
+ current_pr = prs[-1] if prs else None
93
+ current_pr_summary = (
94
+ f"Current PR #{current_pr['number']}: {current_pr['title']} by @{current_pr['author']}\n{current_pr['body'][:1000]}"
95
+ if current_pr
96
+ else "No current PR found."
97
+ )
98
+
99
+ whats_changed = "\n".join([f"* {pr['title']} by @{pr['author']} in {pr['html_url']}" for pr in prs])
100
+
101
+ # Generate New Contributors section
102
+ new_contributors = get_new_contributors(repo_name, prs, headers)
103
+ new_contributors_section = (
104
+ "\n## New Contributors\n"
105
+ + "\n".join(
106
+ [
107
+ f"* @{contributor} made their first contribution in {next(pr['html_url'] for pr in prs if pr['author'] == contributor)}"
108
+ for contributor in new_contributors
109
+ ]
110
+ )
111
+ if new_contributors
112
+ else ""
113
+ )
114
+
115
+ full_changelog = f"https://github.com/{repo_name}/compare/{previous_tag}...{latest_tag}"
116
+ release_suffix = (
117
+ f"\n\n## What's Changed\n{whats_changed}\n{new_contributors_section}\n\n**Full Changelog**: {full_changelog}\n"
118
+ )
119
+
120
+ messages = [
121
+ {
122
+ "role": "system",
123
+ "content": "You are an Ultralytics AI assistant skilled in software development and technical communication. Your task is to summarize GitHub releases in a way that is detailed, accurate, and understandable to both expert developers and non-expert users. Focus on highlighting the key changes and their impact in simple and intuitive terms.",
124
+ },
125
+ {
126
+ "role": "user",
127
+ "content": f"Summarize the updates made in the '{latest_tag}' tag, focusing on major model or features changes, their purpose, and potential impact. Keep the summary clear and suitable for a broad audience. Add emojis to enliven the summary. Prioritize changes from the current PR (the first in the list), which is usually the most important in the release. Reply directly with a summary along these example guidelines, though feel free to adjust as appropriate:\n\n"
128
+ f"## 🌟 Summary (single-line synopsis)\n"
129
+ f"## 📊 Key Changes (bullet points highlighting any major changes)\n"
130
+ f"## 🎯 Purpose & Impact (bullet points explaining any benefits and potential impact to users)\n\n\n"
131
+ f"Here's the information about the current PR:\n\n{current_pr_summary}\n\n"
132
+ f"Here's the information about PRs merged between the previous release and this one:\n\n{pr_summaries[:30000]}\n\n"
133
+ f"Here's the release diff:\n\n{diff[:300000]}",
134
+ },
135
+ ]
136
+ print(messages[-1]["content"]) # for debug
137
+ return get_completion(messages) + release_suffix
138
+
139
+
140
+ def create_github_release(repo_name: str, tag_name: str, name: str, body: str, headers: dict) -> int:
141
+ """Creates a GitHub release with specified tag, name, and body content for the given repository."""
142
+ url = f"{GITHUB_API_URL}/repos/{repo_name}/releases"
143
+ data = {"tag_name": tag_name, "name": name, "body": body, "draft": False, "prerelease": False}
144
+ r = requests.post(url, headers=headers, json=data)
145
+ return r.status_code
146
+
147
+
148
+ def get_previous_tag() -> str:
149
+ """Retrieves the previous Git tag, excluding the current tag, using the git describe command."""
150
+ cmd = ["git", "describe", "--tags", "--abbrev=0", "--exclude", CURRENT_TAG]
151
+ try:
152
+ return subprocess.run(cmd, check=True, text=True, capture_output=True).stdout.strip()
153
+ except subprocess.CalledProcessError:
154
+ print("Failed to get previous tag from git. Using previous commit.")
155
+ return "HEAD~1"
156
+
157
+
158
+ def main(*args, **kwargs):
159
+ """Automates generating and publishing a GitHub release summary from PRs and commit differences."""
160
+ action = Action(*args, **kwargs)
161
+
162
+ if not all([action.token, CURRENT_TAG]):
163
+ raise ValueError("One or more required environment variables are missing.")
164
+
165
+ previous_tag = PREVIOUS_TAG or get_previous_tag()
166
+
167
+ # Get the diff between the tags
168
+ diff = get_release_diff(action.repository, previous_tag, CURRENT_TAG, action.headers_diff)
169
+
170
+ # Get PRs merged between the tags
171
+ prs = get_prs_between_tags(action.repository, previous_tag, CURRENT_TAG, action.headers)
172
+
173
+ # Generate release summary
174
+ try:
175
+ summary = generate_release_summary(diff, prs, CURRENT_TAG, previous_tag, action.repository, action.headers)
176
+ except Exception as e:
177
+ print(f"Failed to generate summary: {str(e)}")
178
+ summary = "Failed to generate summary."
179
+
180
+ # Get the latest commit message
181
+ cmd = ["git", "log", "-1", "--pretty=%B"]
182
+ commit_message = subprocess.run(cmd, check=True, text=True, capture_output=True).stdout.split("\n")[0].strip()
183
+
184
+ # Create the release on GitHub
185
+ msg = f"{CURRENT_TAG} - {commit_message}"
186
+ status_code = create_github_release(action.repository, CURRENT_TAG, msg, summary, action.headers)
187
+ if status_code == 201:
188
+ print(f"Successfully created release {CURRENT_TAG}")
189
+ else:
190
+ print(f"Failed to create release {CURRENT_TAG}. Status code: {status_code}")
191
+
192
+
193
+ if __name__ == "__main__":
194
+ main()
@@ -0,0 +1,173 @@
1
+ # Ultralytics Actions 🚀, AGPL-3.0 license https://ultralytics.com/license
2
+
3
+ import hashlib
4
+ import re
5
+ import shutil
6
+ import subprocess
7
+ from pathlib import Path
8
+
9
+
10
+ def extract_code_blocks(markdown_content):
11
+ """Extracts Python code blocks from markdown content using regex pattern matching."""
12
+ pattern = r"^( *)```(?:python|py|\{[ ]*\.py[ ]*\.annotate[ ]*\})\n(.*?)\n\1```"
13
+ code_block_pattern = re.compile(pattern, re.DOTALL | re.MULTILINE)
14
+ return code_block_pattern.findall(markdown_content)
15
+
16
+
17
+ def remove_indentation(code_block, num_spaces):
18
+ """Removes specified leading spaces from each line in a code block to adjust indentation."""
19
+ lines = code_block.split("\n")
20
+ stripped_lines = [line[num_spaces:] if len(line) >= num_spaces else line for line in lines]
21
+ return "\n".join(stripped_lines)
22
+
23
+
24
+ def add_indentation(code_block, num_spaces):
25
+ """Adds specified number of leading spaces to non-empty lines in a code block."""
26
+ indent = " " * num_spaces
27
+ lines = code_block.split("\n")
28
+ indented_lines = [indent + line if line.strip() != "" else line for line in lines]
29
+ return "\n".join(indented_lines)
30
+
31
+
32
+ def format_code_with_ruff(temp_dir):
33
+ """Formats Python code files in the specified directory using ruff linter and docformatter tools."""
34
+ try:
35
+ # Run ruff format
36
+ subprocess.run(
37
+ [
38
+ "ruff",
39
+ "format",
40
+ "--line-length=120",
41
+ str(temp_dir),
42
+ ],
43
+ check=True,
44
+ )
45
+ print("Completed ruff format ✅")
46
+ except Exception as e:
47
+ print(f"ERROR running ruff format ❌ {e}")
48
+
49
+ try:
50
+ # Run ruff check with ignored rules:
51
+ # F821: Undefined name
52
+ # F841: Local variable is assigned to but never used
53
+ subprocess.run(
54
+ [
55
+ "ruff",
56
+ "check",
57
+ "--fix",
58
+ "--extend-select=I",
59
+ "--ignore=F821,F841",
60
+ str(temp_dir),
61
+ ],
62
+ check=True,
63
+ )
64
+ print("Completed ruff check ✅")
65
+ except Exception as e:
66
+ print(f"ERROR running ruff check ❌ {e}")
67
+
68
+ try:
69
+ # Run docformatter
70
+ subprocess.run(
71
+ [
72
+ "docformatter",
73
+ "--wrap-summaries=120",
74
+ "--wrap-descriptions=120",
75
+ "--pre-summary-newline",
76
+ "--close-quotes-on-newline",
77
+ "--in-place",
78
+ "--recursive",
79
+ str(temp_dir),
80
+ ],
81
+ check=True,
82
+ )
83
+ print("Completed docformatter ✅")
84
+ except Exception as e:
85
+ print(f"ERROR running docformatter ❌ {e}")
86
+
87
+
88
+ def generate_temp_filename(file_path, index):
89
+ """Generates a unique temporary filename using a hash of the file path and index."""
90
+ unique_string = f"{file_path.parent}_{file_path.stem}_{index}"
91
+ unique_hash = hashlib.md5(unique_string.encode()).hexdigest()
92
+ return f"temp_{unique_hash}.py"
93
+
94
+
95
+ def process_markdown_file(file_path, temp_dir, verbose=False):
96
+ """Processes a markdown file, extracting Python code blocks for formatting and updating the original file."""
97
+ try:
98
+ markdown_content = Path(file_path).read_text()
99
+ code_blocks = extract_code_blocks(markdown_content)
100
+ temp_files = []
101
+
102
+ for i, (num_spaces, code_block) in enumerate(code_blocks):
103
+ if verbose:
104
+ print(f"Extracting code block {i} from {file_path}")
105
+ num_spaces = len(num_spaces)
106
+ code_without_indentation = remove_indentation(code_block, num_spaces)
107
+
108
+ # Generate a unique temp file path
109
+ temp_file_path = temp_dir / generate_temp_filename(file_path, i)
110
+ with open(temp_file_path, "w") as temp_file:
111
+ temp_file.write(code_without_indentation)
112
+ temp_files.append((num_spaces, code_block, temp_file_path))
113
+
114
+ return markdown_content, temp_files
115
+
116
+ except Exception as e:
117
+ print(f"Error processing file {file_path}: {e}")
118
+ return None, None
119
+
120
+
121
+ def update_markdown_file(file_path, markdown_content, temp_files):
122
+ """Updates a markdown file with formatted Python code blocks extracted and processed externally."""
123
+ for num_spaces, original_code_block, temp_file_path in temp_files:
124
+ try:
125
+ with open(temp_file_path) as temp_file:
126
+ formatted_code = temp_file.read().rstrip("\n") # Strip trailing newlines
127
+ formatted_code_with_indentation = add_indentation(formatted_code, num_spaces)
128
+
129
+ # Replace both `python` and `py` code blocks
130
+ for lang in ["python", "py", "{ .py .annotate }"]:
131
+ markdown_content = markdown_content.replace(
132
+ f"{' ' * num_spaces}```{lang}\n{original_code_block}\n{' ' * num_spaces}```",
133
+ f"{' ' * num_spaces}```{lang}\n{formatted_code_with_indentation}\n{' ' * num_spaces}```",
134
+ )
135
+ except Exception as e:
136
+ print(f"Error updating code block in file {file_path}: {e}")
137
+
138
+ try:
139
+ with open(file_path, "w") as file:
140
+ file.write(markdown_content)
141
+ except Exception as e:
142
+ print(f"Error writing file {file_path}: {e}")
143
+
144
+
145
+ def main(root_dir=Path.cwd(), verbose=False):
146
+ """Processes markdown files, extracts and formats Python code blocks, and updates the original files."""
147
+ root_path = Path(root_dir)
148
+ markdown_files = list(root_path.rglob("*.md"))
149
+ temp_dir = Path("temp_code_blocks")
150
+ temp_dir.mkdir(exist_ok=True)
151
+
152
+ # Extract code blocks and save to temp files
153
+ all_temp_files = []
154
+ for markdown_file in markdown_files:
155
+ if verbose:
156
+ print(f"Processing {markdown_file}")
157
+ markdown_content, temp_files = process_markdown_file(markdown_file, temp_dir)
158
+ if markdown_content and temp_files:
159
+ all_temp_files.append((markdown_file, markdown_content, temp_files))
160
+
161
+ # Format all code blocks with ruff
162
+ format_code_with_ruff(temp_dir)
163
+
164
+ # Update markdown files with formatted code blocks
165
+ for markdown_file, markdown_content, temp_files in all_temp_files:
166
+ update_markdown_file(markdown_file, markdown_content, temp_files)
167
+
168
+ # Clean up temp directory
169
+ shutil.rmtree(temp_dir)
170
+
171
+
172
+ if __name__ == "__main__":
173
+ main()
@@ -0,0 +1,19 @@
1
+ # Ultralytics Actions 🚀, AGPL-3.0 license https://ultralytics.com/license
2
+
3
+ from .common_utils import remove_html_comments
4
+ from .github_utils import (
5
+ GITHUB_API_URL,
6
+ Action,
7
+ check_pypi_version,
8
+ ultralytics_actions_info,
9
+ )
10
+ from .openai_utils import get_completion
11
+
12
+ __all__ = (
13
+ "GITHUB_API_URL",
14
+ "Action",
15
+ "check_pypi_version",
16
+ "get_completion",
17
+ "remove_html_comments",
18
+ "ultralytics_actions_info",
19
+ )
@@ -0,0 +1,111 @@
1
+ # Ultralytics Actions 🚀, AGPL-3.0 license https://ultralytics.com/license
2
+
3
+ import re
4
+ import time
5
+ from concurrent.futures import ThreadPoolExecutor
6
+ from urllib import parse
7
+
8
+ import requests
9
+
10
+
11
+ def remove_html_comments(body: str) -> str:
12
+ """Removes HTML comments from a string using regex pattern matching."""
13
+ return re.sub(r"<!--.*?-->", "", body, flags=re.DOTALL).strip()
14
+
15
+
16
+ def clean_url(url):
17
+ """Remove extra characters from URL strings."""
18
+ for _ in range(3):
19
+ url = str(url).strip('"').strip("'").rstrip(".,:;!?`\\").replace(".git@main", "").replace("git+", "")
20
+ return url
21
+
22
+
23
+ def is_url(url, check=True, max_attempts=3, timeout=2):
24
+ """Check if string is URL and check if URL exists."""
25
+ allow_list = (
26
+ "localhost",
27
+ "127.0.0",
28
+ ":5000",
29
+ ":3000",
30
+ ":8000",
31
+ ":8080",
32
+ ":6006",
33
+ "MODEL_ID",
34
+ "API_KEY",
35
+ "url",
36
+ "example",
37
+ "mailto:",
38
+ "github.com", # ignore GitHub links that may be private repos
39
+ "kaggle.com", # blocks automated header requests
40
+ "reddit.com", # blocks automated header requests
41
+ "linkedin.com",
42
+ "twitter.com",
43
+ "x.com",
44
+ "storage.googleapis.com", # private GCS buckets
45
+ )
46
+ try:
47
+ # Check allow list
48
+ if any(x in url for x in allow_list):
49
+ return True
50
+
51
+ # Check structure
52
+ result = parse.urlparse(url)
53
+ partition = result.netloc.partition(".") # i.e. netloc = "github.com" -> ("github", ".", "com")
54
+ if not result.scheme or not partition[0] or not partition[2]:
55
+ return False
56
+
57
+ # Check response
58
+ if check:
59
+ for attempt in range(max_attempts):
60
+ try:
61
+ headers = {
62
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
63
+ "Accept": "*/*", # Wildcard for maximum compatibility
64
+ "Accept-Language": "*", # Wildcard for any language
65
+ "Accept-Encoding": "*", # Wildcard for any encoding
66
+ }
67
+ return requests.head(url, headers=headers, timeout=timeout, allow_redirects=True).status_code < 400
68
+ except Exception:
69
+ if attempt == max_attempts - 1: # last attempt
70
+ return False
71
+ time.sleep(2**attempt) # exponential backoff
72
+ return False
73
+ return True
74
+ except Exception:
75
+ return False
76
+
77
+
78
+ def check_links_in_string(text, verbose=True, return_bad=False):
79
+ """Process a given text, find unique URLs within it, and check for any 404 errors."""
80
+ pattern = (
81
+ r"\[([^\]]+)\]\(([^)]+)\)" # Matches Markdown links [text](url)
82
+ r"|"
83
+ r"(" # Start capturing group for plaintext URLs
84
+ r"(?:https?://)?" # Optional http:// or https://
85
+ r"(?:www\.)?" # Optional www.
86
+ r"(?:[\w.-]+)?" # Optional domain name and subdomains
87
+ r"\.[a-zA-Z]{2,}" # TLD
88
+ r"(?:/[^\s\"')\]]*)?" # Optional path
89
+ r")"
90
+ )
91
+ # all_urls.extend([url for url in match if url and parse.urlparse(url).scheme])
92
+ all_urls = []
93
+ for md_text, md_url, plain_url in re.findall(pattern, text):
94
+ url = md_url or plain_url
95
+ if url and parse.urlparse(url).scheme:
96
+ all_urls.append(url)
97
+
98
+ urls = set(map(clean_url, all_urls)) # remove extra characters and make unique
99
+ # bad_urls = [x for x in urls if not is_url(x, check=True)] # single-thread
100
+ with ThreadPoolExecutor(max_workers=16) as executor: # multi-thread
101
+ bad_urls = [url for url, valid in zip(urls, executor.map(lambda x: not is_url(x, check=True), urls)) if valid]
102
+
103
+ passing = not bad_urls
104
+ if verbose and not passing:
105
+ print(f"WARNING ⚠️ errors found in URLs {bad_urls}")
106
+
107
+ return (passing, bad_urls) if return_bad else passing
108
+
109
+
110
+ if __name__ == "__main__":
111
+ print(is_url("https://ultralytics.com/images/bus.jpg"))
@@ -0,0 +1,163 @@
1
+ # Ultralytics Actions 🚀, AGPL-3.0 license https://ultralytics.com/license
2
+ import json
3
+ import os
4
+ from pathlib import Path
5
+
6
+ import requests
7
+
8
+ from actions import __version__
9
+
10
+ GITHUB_API_URL = "https://api.github.com"
11
+
12
+
13
+ class Action:
14
+ """Handles GitHub Actions API interactions and event processing."""
15
+
16
+ def __init__(
17
+ self,
18
+ token: str = None,
19
+ event_name: str = None,
20
+ event_data: dict = None,
21
+ ):
22
+ """Initializes a GitHub Actions API handler with token and event data for processing events."""
23
+ self.token = token or os.getenv("GITHUB_TOKEN")
24
+ self.event_name = event_name or os.getenv("GITHUB_EVENT_NAME")
25
+ self.event_data = event_data or self._load_event_data(os.getenv("GITHUB_EVENT_PATH"))
26
+
27
+ self.pr = self.event_data.get("pull_request", {})
28
+ self.repository = self.event_data.get("repository", {}).get("full_name")
29
+ self.headers = {"Authorization": f"token {self.token}", "Accept": "application/vnd.github.v3+json"}
30
+ self.headers_diff = {"Authorization": f"token {self.token}", "Accept": "application/vnd.github.v3.diff"}
31
+
32
+ @staticmethod
33
+ def _load_event_data(event_path: str) -> dict:
34
+ """Loads GitHub event data from path if it exists."""
35
+ if event_path and Path(event_path).exists():
36
+ return json.loads(Path(event_path).read_text())
37
+ return {}
38
+
39
+ def get_username(self) -> str | None:
40
+ """Gets username associated with the GitHub token."""
41
+ query = "query { viewer { login } }"
42
+ response = requests.post(f"{GITHUB_API_URL}/graphql", json={"query": query}, headers=self.headers)
43
+ if response.status_code != 200:
44
+ print(f"Failed to fetch authenticated user. Status code: {response.status_code}")
45
+ return None
46
+ try:
47
+ return response.json()["data"]["viewer"]["login"]
48
+ except KeyError as e:
49
+ print(f"Error parsing authenticated user response: {e}")
50
+ return None
51
+
52
+ def get_pr_diff(self) -> str:
53
+ """Retrieves the diff content for a specified pull request."""
54
+ url = f"{GITHUB_API_URL}/repos/{self.repository}/pulls/{self.pr.get('number')}"
55
+ r = requests.get(url, headers=self.headers_diff)
56
+ return r.text if r.status_code == 200 else ""
57
+
58
+ def get_repo_data(self, endpoint: str) -> dict:
59
+ """Fetches repository data from a specified endpoint."""
60
+ r = requests.get(f"{GITHUB_API_URL}/repos/{self.repository}/{endpoint}", headers=self.headers)
61
+ r.raise_for_status()
62
+ return r.json()
63
+
64
+ def graphql_request(self, query: str, variables: dict = None) -> dict:
65
+ """Executes a GraphQL query against the GitHub API."""
66
+ headers = {
67
+ "Authorization": f"Bearer {self.token}",
68
+ "Content-Type": "application/json",
69
+ "Accept": "application/vnd.github.v4+json",
70
+ }
71
+ r = requests.post(f"{GITHUB_API_URL}/graphql", json={"query": query, "variables": variables}, headers=headers)
72
+ r.raise_for_status()
73
+ result = r.json()
74
+ success = "data" in result and not result.get("errors")
75
+ print(
76
+ f"{'Successful' if success else 'Failed'} discussion GraphQL request: {result.get('errors', 'No errors')}"
77
+ )
78
+ return result
79
+
80
+ def print_info(self):
81
+ """Print GitHub Actions information."""
82
+ info = {
83
+ "github.event_name": self.event_name,
84
+ "github.event.action": self.event_data.get("action"),
85
+ "github.repository": self.repository,
86
+ "github.event.pull_request.number": self.pr.get("number"),
87
+ "github.event.pull_request.head.repo.full_name": self.pr.get("head", {}).get("repo", {}).get("full_name"),
88
+ "github.actor": os.environ.get("GITHUB_ACTOR"),
89
+ "github.event.pull_request.head.ref": self.pr.get("head", {}).get("ref"),
90
+ "github.ref": os.environ.get("GITHUB_REF"),
91
+ "github.head_ref": os.environ.get("GITHUB_HEAD_REF"),
92
+ "github.base_ref": os.environ.get("GITHUB_BASE_REF"),
93
+ "github.base_sha": self.pr.get("base", {}).get("sha"),
94
+ }
95
+
96
+ if self.event_name == "discussion":
97
+ discussion = self.event_data.get("discussion", {})
98
+ info |= {
99
+ "github.event.discussion.node_id": discussion.get("node_id"),
100
+ "github.event.discussion.number": discussion.get("number"),
101
+ }
102
+
103
+ max_key_length = max(len(key) for key in info)
104
+ header = f"Ultralytics Actions {__version__} Information " + "-" * 40
105
+ print(header)
106
+ for key, value in info.items():
107
+ print(f"{key:<{max_key_length + 5}}{value}")
108
+ print("-" * len(header))
109
+
110
+
111
+ def ultralytics_actions_info():
112
+ """Returns GitHub Actions environment information and configuration details for Ultralytics workflows."""
113
+ Action().print_info()
114
+
115
+
116
+ def check_pypi_version(pyproject_toml="pyproject.toml"):
117
+ """Compares local and PyPI package versions to determine if a new version should be published."""
118
+ import re
119
+
120
+ import tomllib # requires Python>=3.11
121
+
122
+ version_pattern = re.compile(r"^\d+\.\d+\.\d+$") # e.g. 0.0.0
123
+
124
+ with open(pyproject_toml, "rb") as f:
125
+ pyproject = tomllib.load(f)
126
+
127
+ package_name = pyproject["project"]["name"]
128
+ local_version = pyproject["project"].get("version", "dynamic")
129
+
130
+ # If version is dynamic, extract it from the specified file
131
+ if local_version == "dynamic":
132
+ version_attr = pyproject["tool"]["setuptools"]["dynamic"]["version"]["attr"]
133
+ module_path, attr_name = version_attr.rsplit(".", 1)
134
+ with open(f"{module_path.replace('.', '/')}/__init__.py") as f:
135
+ local_version = next(line.split("=")[1].strip().strip("'\"") for line in f if line.startswith(attr_name))
136
+
137
+ print(f"Local Version: {local_version}")
138
+ if not bool(version_pattern.match(local_version)):
139
+ print("WARNING: Incorrect local version pattern")
140
+ return "0.0.0", "0.0.0", False
141
+
142
+ # Get online version from PyPI
143
+ response = requests.get(f"https://pypi.org/pypi/{package_name}/json")
144
+ online_version = response.json()["info"]["version"] if response.status_code == 200 else None
145
+ print(f"Online Version: {online_version or 'Not Found'}")
146
+
147
+ # Determine if a new version should be published
148
+ if online_version:
149
+ local_ver = tuple(map(int, local_version.split(".")))
150
+ online_ver = tuple(map(int, online_version.split(".")))
151
+ major_diff = local_ver[0] - online_ver[0]
152
+ minor_diff = local_ver[1] - online_ver[1]
153
+ patch_diff = local_ver[2] - online_ver[2]
154
+
155
+ publish = (
156
+ (major_diff == 0 and minor_diff == 0 and 0 < patch_diff <= 2)
157
+ or (major_diff == 0 and minor_diff == 1 and local_ver[2] == 0)
158
+ or (major_diff == 1 and local_ver[1] == 0 and local_ver[2] == 0)
159
+ ) # should publish an update
160
+ else:
161
+ publish = True # publish as this is likely a first release
162
+
163
+ return local_version, online_version, publish