ultralytics-actions 0.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,192 @@
1
+ # Ultralytics Actions 🚀, AGPL-3.0 license https://ultralytics.com/license
2
+
3
+ import os
4
+ import re
5
+ import subprocess
6
+ import time
7
+ from datetime import datetime
8
+
9
+ import requests
10
+
11
+ from .utils import (
12
+ GITHUB_API_URL,
13
+ GITHUB_HEADERS,
14
+ GITHUB_HEADERS_DIFF,
15
+ GITHUB_TOKEN,
16
+ REPO_NAME,
17
+ get_completion,
18
+ remove_html_comments,
19
+ )
20
+
21
+ # Environment variables
22
+ CURRENT_TAG = os.getenv("CURRENT_TAG")
23
+ PREVIOUS_TAG = os.getenv("PREVIOUS_TAG")
24
+
25
+
26
+ def get_release_diff(repo_name: str, previous_tag: str, latest_tag: str) -> str:
27
+ """Get the diff between two tags."""
28
+ url = f"{GITHUB_API_URL}/repos/{repo_name}/compare/{previous_tag}...{latest_tag}"
29
+ r = requests.get(url, headers=GITHUB_HEADERS_DIFF)
30
+ return r.text if r.status_code == 200 else f"Failed to get diff: {r.content}"
31
+
32
+
33
+ def get_prs_between_tags(repo_name: str, previous_tag: str, latest_tag: str) -> list:
34
+ """Get PRs merged between two tags using the compare API."""
35
+ url = f"{GITHUB_API_URL}/repos/{repo_name}/compare/{previous_tag}...{latest_tag}"
36
+ r = requests.get(url, headers=GITHUB_HEADERS)
37
+ r.raise_for_status()
38
+
39
+ data = r.json()
40
+ pr_numbers = set()
41
+
42
+ for commit in data["commits"]:
43
+ pr_matches = re.findall(r"#(\d+)", commit["commit"]["message"])
44
+ pr_numbers.update(pr_matches)
45
+
46
+ prs = []
47
+ time.sleep(10) # sleep 10 seconds to allow final PR summary to update on merge
48
+ for pr_number in sorted(pr_numbers): # earliest to latest
49
+ pr_url = f"{GITHUB_API_URL}/repos/{repo_name}/pulls/{pr_number}"
50
+ pr_response = requests.get(pr_url, headers=GITHUB_HEADERS)
51
+ if pr_response.status_code == 200:
52
+ pr_data = pr_response.json()
53
+ prs.append(
54
+ {
55
+ "number": pr_data["number"],
56
+ "title": pr_data["title"],
57
+ "body": remove_html_comments(pr_data["body"]),
58
+ "author": pr_data["user"]["login"],
59
+ "html_url": pr_data["html_url"],
60
+ "merged_at": pr_data["merged_at"],
61
+ }
62
+ )
63
+
64
+ # Sort PRs by merge date
65
+ prs.sort(key=lambda x: datetime.strptime(x["merged_at"], "%Y-%m-%dT%H:%M:%SZ"))
66
+
67
+ return prs
68
+
69
+
70
+ def get_new_contributors(repo: str, prs: list) -> set:
71
+ """Identify genuinely new contributors in the current release."""
72
+ new_contributors = set()
73
+ for pr in prs:
74
+ author = pr["author"]
75
+ # Check if this is the author's first contribution
76
+ url = f"{GITHUB_API_URL}/search/issues?q=repo:{repo}+author:{author}+is:pr+is:merged&sort=created&order=asc"
77
+ r = requests.get(url, headers=GITHUB_HEADERS)
78
+ if r.status_code == 200:
79
+ data = r.json()
80
+ if data["total_count"] > 0:
81
+ first_pr = data["items"][0]
82
+ if first_pr["number"] == pr["number"]:
83
+ new_contributors.add(author)
84
+ return new_contributors
85
+
86
+
87
+ def generate_release_summary(diff: str, prs: list, latest_tag: str, previous_tag: str, repo_name: str) -> str:
88
+ """Generate a summary for the release."""
89
+ pr_summaries = "\n\n".join(
90
+ [f"PR #{pr['number']}: {pr['title']} by @{pr['author']}\n{pr['body'][:1000]}" for pr in prs]
91
+ )
92
+
93
+ current_pr = prs[-1] if prs else None
94
+ current_pr_summary = (
95
+ f"Current PR #{current_pr['number']}: {current_pr['title']} by @{current_pr['author']}\n{current_pr['body'][:1000]}"
96
+ if current_pr
97
+ else "No current PR found."
98
+ )
99
+
100
+ whats_changed = "\n".join([f"* {pr['title']} by @{pr['author']} in {pr['html_url']}" for pr in prs])
101
+
102
+ # Generate New Contributors section
103
+ new_contributors = get_new_contributors(repo_name, prs)
104
+ new_contributors_section = (
105
+ "\n## New Contributors\n"
106
+ + "\n".join(
107
+ [
108
+ f"* @{contributor} made their first contribution in {next(pr['html_url'] for pr in prs if pr['author'] == contributor)}"
109
+ for contributor in new_contributors
110
+ ]
111
+ )
112
+ if new_contributors
113
+ else ""
114
+ )
115
+
116
+ full_changelog = f"https://github.com/{repo_name}/compare/{previous_tag}...{latest_tag}"
117
+ release_suffix = (
118
+ f"\n\n## What's Changed\n{whats_changed}\n{new_contributors_section}\n\n**Full Changelog**: {full_changelog}\n"
119
+ )
120
+
121
+ messages = [
122
+ {
123
+ "role": "system",
124
+ "content": "You are an Ultralytics AI assistant skilled in software development and technical communication. Your task is to summarize GitHub releases in a way that is detailed, accurate, and understandable to both expert developers and non-expert users. Focus on highlighting the key changes and their impact in simple and intuitive terms.",
125
+ },
126
+ {
127
+ "role": "user",
128
+ "content": f"Summarize the updates made in the '{latest_tag}' tag, focusing on major model or features changes, their purpose, and potential impact. Keep the summary clear and suitable for a broad audience. Add emojis to enliven the summary. Prioritize changes from the current PR (the first in the list), which is usually the most important in the release. Reply directly with a summary along these example guidelines, though feel free to adjust as appropriate:\n\n"
129
+ f"## 🌟 Summary (single-line synopsis)\n"
130
+ f"## 📊 Key Changes (bullet points highlighting any major changes)\n"
131
+ f"## 🎯 Purpose & Impact (bullet points explaining any benefits and potential impact to users)\n\n\n"
132
+ f"Here's the information about the current PR:\n\n{current_pr_summary}\n\n"
133
+ f"Here's the information about PRs merged between the previous release and this one:\n\n{pr_summaries[:30000]}\n\n"
134
+ f"Here's the release diff:\n\n{diff[:300000]}",
135
+ },
136
+ ]
137
+ print(messages[-1]["content"]) # for debug
138
+ return get_completion(messages) + release_suffix
139
+
140
+
141
+ def create_github_release(repo_name: str, tag_name: str, name: str, body: str) -> int:
142
+ """Create a release on GitHub."""
143
+ url = f"{GITHUB_API_URL}/repos/{repo_name}/releases"
144
+ data = {"tag_name": tag_name, "name": name, "body": body, "draft": False, "prerelease": False}
145
+ r = requests.post(url, headers=GITHUB_HEADERS, json=data)
146
+ return r.status_code
147
+
148
+
149
+ def get_previous_tag() -> str:
150
+ """Get the previous tag from git tags."""
151
+ cmd = ["git", "describe", "--tags", "--abbrev=0", "--exclude", CURRENT_TAG]
152
+ try:
153
+ return subprocess.run(cmd, check=True, text=True, capture_output=True).stdout.strip()
154
+ except subprocess.CalledProcessError:
155
+ print("Failed to get previous tag from git. Using previous commit.")
156
+ return "HEAD~1"
157
+
158
+
159
+ def main():
160
+ """Automates generating and publishing a GitHub release summary from PRs and commit differences."""
161
+ if not all([GITHUB_TOKEN, CURRENT_TAG]):
162
+ raise ValueError("One or more required environment variables are missing.")
163
+
164
+ previous_tag = PREVIOUS_TAG or get_previous_tag()
165
+
166
+ # Get the diff between the tags
167
+ diff = get_release_diff(REPO_NAME, previous_tag, CURRENT_TAG)
168
+
169
+ # Get PRs merged between the tags
170
+ prs = get_prs_between_tags(REPO_NAME, previous_tag, CURRENT_TAG)
171
+
172
+ # Generate release summary
173
+ try:
174
+ summary = generate_release_summary(diff, prs, CURRENT_TAG, previous_tag, REPO_NAME)
175
+ except Exception as e:
176
+ print(f"Failed to generate summary: {str(e)}")
177
+ summary = "Failed to generate summary."
178
+
179
+ # Get the latest commit message
180
+ cmd = ["git", "log", "-1", "--pretty=%B"]
181
+ commit_message = subprocess.run(cmd, check=True, text=True, capture_output=True).stdout.split("\n")[0].strip()
182
+
183
+ # Create the release on GitHub
184
+ status_code = create_github_release(REPO_NAME, CURRENT_TAG, f"{CURRENT_TAG} - {commit_message}", summary)
185
+ if status_code == 201:
186
+ print(f"Successfully created release {CURRENT_TAG}")
187
+ else:
188
+ print(f"Failed to create release {CURRENT_TAG}. Status code: {status_code}")
189
+
190
+
191
+ if __name__ == "__main__":
192
+ main()
@@ -0,0 +1,173 @@
1
+ # Ultralytics Actions 🚀, AGPL-3.0 license https://ultralytics.com/license
2
+
3
+ import hashlib
4
+ import re
5
+ import shutil
6
+ import subprocess
7
+ from pathlib import Path
8
+
9
+
10
+ def extract_code_blocks(markdown_content):
11
+ """Extract Python code blocks with ``` followed by "python", "py", or "{ .py .annotate }"."""
12
+ pattern = r"^( *)```(?:python|py|\{[ ]*\.py[ ]*\.annotate[ ]*\})\n(.*?)\n\1```"
13
+ code_block_pattern = re.compile(pattern, re.DOTALL | re.MULTILINE)
14
+ return code_block_pattern.findall(markdown_content)
15
+
16
+
17
+ def remove_indentation(code_block, num_spaces):
18
+ """Removes `num_spaces` leading spaces from each line in `code_block` and returns the modified string."""
19
+ lines = code_block.split("\n")
20
+ stripped_lines = [line[num_spaces:] if len(line) >= num_spaces else line for line in lines]
21
+ return "\n".join(stripped_lines)
22
+
23
+
24
+ def add_indentation(code_block, num_spaces):
25
+ """Adds `num_spaces` leading spaces to each non-empty line in `code_block`."""
26
+ indent = " " * num_spaces
27
+ lines = code_block.split("\n")
28
+ indented_lines = [indent + line if line.strip() != "" else line for line in lines]
29
+ return "\n".join(indented_lines)
30
+
31
+
32
+ def format_code_with_ruff(temp_dir):
33
+ """Formats all Python code files in the `temp_dir` directory using the 'ruff' linter tool."""
34
+ try:
35
+ # Run ruff format
36
+ subprocess.run(
37
+ [
38
+ "ruff",
39
+ "format",
40
+ "--line-length=120",
41
+ str(temp_dir),
42
+ ],
43
+ check=True,
44
+ )
45
+ print("Completed ruff format ✅")
46
+ except Exception as e:
47
+ print(f"ERROR running ruff format ❌ {e}")
48
+
49
+ try:
50
+ # Run ruff check with ignored rules:
51
+ # F821: Undefined name
52
+ # F841: Local variable is assigned to but never used
53
+ subprocess.run(
54
+ [
55
+ "ruff",
56
+ "check",
57
+ "--fix",
58
+ "--extend-select=I",
59
+ "--ignore=F821,F841",
60
+ str(temp_dir),
61
+ ],
62
+ check=True,
63
+ )
64
+ print("Completed ruff check ✅")
65
+ except Exception as e:
66
+ print(f"ERROR running ruff check ❌ {e}")
67
+
68
+ try:
69
+ # Run docformatter
70
+ subprocess.run(
71
+ [
72
+ "docformatter",
73
+ "--wrap-summaries=120",
74
+ "--wrap-descriptions=120",
75
+ "--pre-summary-newline",
76
+ "--close-quotes-on-newline",
77
+ "--in-place",
78
+ "--recursive",
79
+ str(temp_dir),
80
+ ],
81
+ check=True,
82
+ )
83
+ print("Completed docformatter ✅")
84
+ except Exception as e:
85
+ print(f"ERROR running docformatter ❌ {e}")
86
+
87
+
88
+ def generate_temp_filename(file_path, index):
89
+ """Generates a unique temporary filename based on the file path and index."""
90
+ unique_string = f"{file_path.parent}_{file_path.stem}_{index}"
91
+ unique_hash = hashlib.md5(unique_string.encode()).hexdigest()
92
+ return f"temp_{unique_hash}.py"
93
+
94
+
95
+ def process_markdown_file(file_path, temp_dir, verbose=False):
96
+ """Reads a markdown file, extracts Python code blocks, saves them to temp files, and updates the file."""
97
+ try:
98
+ markdown_content = Path(file_path).read_text()
99
+ code_blocks = extract_code_blocks(markdown_content)
100
+ temp_files = []
101
+
102
+ for i, (num_spaces, code_block) in enumerate(code_blocks):
103
+ if verbose:
104
+ print(f"Extracting code block {i} from {file_path}")
105
+ num_spaces = len(num_spaces)
106
+ code_without_indentation = remove_indentation(code_block, num_spaces)
107
+
108
+ # Generate a unique temp file path
109
+ temp_file_path = temp_dir / generate_temp_filename(file_path, i)
110
+ with open(temp_file_path, "w") as temp_file:
111
+ temp_file.write(code_without_indentation)
112
+ temp_files.append((num_spaces, code_block, temp_file_path))
113
+
114
+ return markdown_content, temp_files
115
+
116
+ except Exception as e:
117
+ print(f"Error processing file {file_path}: {e}")
118
+ return None, None
119
+
120
+
121
+ def update_markdown_file(file_path, markdown_content, temp_files):
122
+ """Updates the markdown file with formatted code blocks."""
123
+ for num_spaces, original_code_block, temp_file_path in temp_files:
124
+ try:
125
+ with open(temp_file_path) as temp_file:
126
+ formatted_code = temp_file.read().rstrip("\n") # Strip trailing newlines
127
+ formatted_code_with_indentation = add_indentation(formatted_code, num_spaces)
128
+
129
+ # Replace both `python` and `py` code blocks
130
+ for lang in ["python", "py", "{ .py .annotate }"]:
131
+ markdown_content = markdown_content.replace(
132
+ f"{' ' * num_spaces}```{lang}\n{original_code_block}\n{' ' * num_spaces}```",
133
+ f"{' ' * num_spaces}```{lang}\n{formatted_code_with_indentation}\n{' ' * num_spaces}```",
134
+ )
135
+ except Exception as e:
136
+ print(f"Error updating code block in file {file_path}: {e}")
137
+
138
+ try:
139
+ with open(file_path, "w") as file:
140
+ file.write(markdown_content)
141
+ except Exception as e:
142
+ print(f"Error writing file {file_path}: {e}")
143
+
144
+
145
+ def main(root_dir=Path.cwd(), verbose=False):
146
+ """Processes all markdown files in a specified directory and its subdirectories."""
147
+ root_path = Path(root_dir)
148
+ markdown_files = list(root_path.rglob("*.md"))
149
+ temp_dir = Path("temp_code_blocks")
150
+ temp_dir.mkdir(exist_ok=True)
151
+
152
+ # Extract code blocks and save to temp files
153
+ all_temp_files = []
154
+ for markdown_file in markdown_files:
155
+ if verbose:
156
+ print(f"Processing {markdown_file}")
157
+ markdown_content, temp_files = process_markdown_file(markdown_file, temp_dir)
158
+ if markdown_content and temp_files:
159
+ all_temp_files.append((markdown_file, markdown_content, temp_files))
160
+
161
+ # Format all code blocks with ruff
162
+ format_code_with_ruff(temp_dir)
163
+
164
+ # Update markdown files with formatted code blocks
165
+ for markdown_file, markdown_content, temp_files in all_temp_files:
166
+ update_markdown_file(markdown_file, markdown_content, temp_files)
167
+
168
+ # Clean up temp directory
169
+ shutil.rmtree(temp_dir)
170
+
171
+
172
+ if __name__ == "__main__":
173
+ main()
@@ -0,0 +1,35 @@
1
+ # Ultralytics Actions 🚀, AGPL-3.0 license https://ultralytics.com/license
2
+
3
+ from .common_utils import remove_html_comments
4
+ from .github_utils import (
5
+ GITHUB_API_URL,
6
+ GITHUB_EVENT_NAME,
7
+ GITHUB_EVENT_PATH,
8
+ GITHUB_HEADERS,
9
+ GITHUB_HEADERS_DIFF,
10
+ GITHUB_TOKEN,
11
+ PR_NUMBER,
12
+ REPO_NAME,
13
+ get_github_data,
14
+ get_pr_diff,
15
+ graphql_request,
16
+ )
17
+ from .openai_utils import OPENAI_API_KEY, OPENAI_MODEL, get_completion
18
+
19
+ __all__ = (
20
+ "remove_html_comments",
21
+ "GITHUB_API_URL",
22
+ "GITHUB_HEADERS",
23
+ "GITHUB_HEADERS_DIFF",
24
+ "GITHUB_TOKEN",
25
+ "REPO_NAME",
26
+ "PR_NUMBER",
27
+ "GITHUB_EVENT_NAME",
28
+ "GITHUB_EVENT_PATH",
29
+ "get_github_data",
30
+ "get_pr_diff",
31
+ "graphql_request",
32
+ "OPENAI_API_KEY",
33
+ "OPENAI_MODEL",
34
+ "get_completion",
35
+ )
@@ -0,0 +1,8 @@
1
+ # Ultralytics Actions 🚀, AGPL-3.0 license https://ultralytics.com/license
2
+
3
+ import re
4
+
5
+
6
+ def remove_html_comments(body: str) -> str:
7
+ """Removes HTML comments from a string using regex pattern matching."""
8
+ return re.sub(r"<!--.*?-->", "", body, flags=re.DOTALL).strip()
@@ -0,0 +1,44 @@
1
+ # Ultralytics Actions 🚀, AGPL-3.0 license https://ultralytics.com/license
2
+
3
+ import os
4
+
5
+ import requests
6
+
7
+ GITHUB_TOKEN = os.getenv("GITHUB_TOKEN")
8
+ GITHUB_API_URL = "https://api.github.com"
9
+ GITHUB_HEADERS = {"Authorization": f"token {GITHUB_TOKEN}", "Accept": "application/vnd.github.v3+json"}
10
+ GITHUB_HEADERS_DIFF = {"Authorization": f"token {GITHUB_TOKEN}", "Accept": "application/vnd.github.v3.diff"}
11
+
12
+ PR_NUMBER = os.getenv("PR_NUMBER")
13
+ REPO_NAME = os.getenv("GITHUB_REPOSITORY")
14
+ GITHUB_EVENT_NAME = os.getenv("GITHUB_EVENT_NAME")
15
+ GITHUB_EVENT_PATH = os.getenv("GITHUB_EVENT_PATH")
16
+
17
+
18
+ def get_pr_diff(pr_number: int) -> str:
19
+ """Retrieves the diff content for a specified pull request in a GitHub repository."""
20
+ url = f"{GITHUB_API_URL}/repos/{REPO_NAME}/pulls/{pr_number}"
21
+ r = requests.get(url, headers=GITHUB_HEADERS_DIFF)
22
+ return r.text if r.status_code == 200 else ""
23
+
24
+
25
+ def get_github_data(endpoint: str) -> dict:
26
+ """Fetches GitHub repository data from a specified endpoint using the GitHub API."""
27
+ r = requests.get(f"{GITHUB_API_URL}/repos/{REPO_NAME}/{endpoint}", headers=GITHUB_HEADERS)
28
+ r.raise_for_status()
29
+ return r.json()
30
+
31
+
32
+ def graphql_request(query: str, variables: dict = None) -> dict:
33
+ """Executes a GraphQL query against the GitHub API and returns the response as a dictionary."""
34
+ headers = {
35
+ "Authorization": f"Bearer {GITHUB_TOKEN}",
36
+ "Content-Type": "application/json",
37
+ "Accept": "application/vnd.github.v4+json",
38
+ }
39
+ r = requests.post(f"{GITHUB_API_URL}/graphql", json={"query": query, "variables": variables}, headers=headers)
40
+ r.raise_for_status()
41
+ result = r.json()
42
+ success = "data" in result and not result.get("errors")
43
+ print(f"{'Successful' if success else 'Fail'} discussion GraphQL request: {result.get('errors', 'No errors')}")
44
+ return result
@@ -0,0 +1,24 @@
1
+ # Ultralytics Actions 🚀, AGPL-3.0 license https://ultralytics.com/license
2
+
3
+ import os
4
+
5
+ import requests
6
+
7
+ OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-4o")
8
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
9
+
10
+
11
+ def get_completion(messages: list) -> str:
12
+ """Generates a completion using OpenAI's API based on input messages."""
13
+ assert OPENAI_API_KEY, "OpenAI API key is required."
14
+ url = "https://api.openai.com/v1/chat/completions"
15
+ headers = {"Authorization": f"Bearer {OPENAI_API_KEY}", "Content-Type": "application/json"}
16
+ data = {"model": OPENAI_MODEL, "messages": messages}
17
+
18
+ r = requests.post(url, headers=headers, json=data)
19
+ r.raise_for_status()
20
+ content = r.json()["choices"][0]["message"]["content"].strip()
21
+ remove = [" @giscus[bot]"]
22
+ for x in remove:
23
+ content = content.replace(x, "")
24
+ return content