ultralytics-actions 0.2.1__tar.gz → 0.2.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ultralytics-actions might be problematic. Click here for more details.
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/PKG-INFO +5 -5
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/README.md +4 -3
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/actions/__init__.py +2 -1
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/actions/first_interaction.py +3 -3
- ultralytics_actions-0.2.2/actions/format_python_docstrings.py +511 -0
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/actions/review_pr.py +64 -12
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/actions/scan_prs.py +73 -45
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/actions/summarize_pr.py +4 -4
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/actions/summarize_release.py +2 -2
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/actions/update_markdown_code_blocks.py +5 -11
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/actions/utils/__init__.py +2 -2
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/actions/utils/openai_utils.py +28 -9
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/pyproject.toml +1 -8
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/tests/test_first_interaction.py +8 -8
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/tests/test_init.py +2 -2
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/tests/test_openai_utils.py +6 -6
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/tests/test_summarize_pr.py +12 -12
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/tests/test_summarize_release.py +4 -4
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/ultralytics_actions.egg-info/PKG-INFO +5 -5
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/ultralytics_actions.egg-info/SOURCES.txt +1 -0
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/ultralytics_actions.egg-info/entry_points.txt +1 -0
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/ultralytics_actions.egg-info/requires.txt +0 -1
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/LICENSE +0 -0
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/actions/dispatch_actions.py +0 -0
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/actions/update_file_headers.py +0 -0
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/actions/utils/common_utils.py +0 -0
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/actions/utils/github_utils.py +0 -0
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/actions/utils/version_utils.py +0 -0
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/setup.cfg +0 -0
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/tests/test_cli_commands.py +0 -0
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/tests/test_common_utils.py +0 -0
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/tests/test_dispatch_actions.py +0 -0
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/tests/test_file_headers.py +0 -0
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/tests/test_github_utils.py +0 -0
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/tests/test_update_markdown_codeblocks.py +0 -0
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/tests/test_urls.py +0 -0
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/ultralytics_actions.egg-info/dependency_links.txt +0 -0
- {ultralytics_actions-0.2.1 → ultralytics_actions-0.2.2}/ultralytics_actions.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: ultralytics-actions
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.2
|
|
4
4
|
Summary: Ultralytics Actions for GitHub automation and PR management.
|
|
5
5
|
Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>
|
|
6
6
|
Maintainer-email: Ultralytics <hello@ultralytics.com>
|
|
@@ -30,7 +30,6 @@ Description-Content-Type: text/markdown
|
|
|
30
30
|
License-File: LICENSE
|
|
31
31
|
Requires-Dist: requests>=2.28.1
|
|
32
32
|
Requires-Dist: ruff>=0.9.1
|
|
33
|
-
Requires-Dist: docformatter>=1.7.5
|
|
34
33
|
Provides-Extra: dev
|
|
35
34
|
Requires-Dist: pytest; extra == "dev"
|
|
36
35
|
Requires-Dist: pytest-cov; extra == "dev"
|
|
@@ -68,8 +67,8 @@ AI-powered formatting, labeling, and PR summaries for Python, Swift, and Markdow
|
|
|
68
67
|
### 📄 Features
|
|
69
68
|
|
|
70
69
|
- **Python Code:** Formatted using [Ruff](https://github.com/astral-sh/ruff), an extremely fast Python linter and formatter
|
|
70
|
+
- **Python Docstrings:** Google-style formatting enforced with Ultralytics Python docstring formatter (optional)
|
|
71
71
|
- **Markdown Files:** Styled with [Prettier](https://github.com/prettier/prettier) to ensure consistent documentation appearance
|
|
72
|
-
- **Docstrings:** Cleaned and standardized using [docformatter](https://github.com/PyCQA/docformatter)
|
|
73
72
|
- **Swift Code:** Formatted with [`swift-format`](https://github.com/swiftlang/swift-format) _(requires `macos-latest` runner)_
|
|
74
73
|
- **Spell Check:** Common misspellings caught using [codespell](https://github.com/codespell-project/codespell)
|
|
75
74
|
- **Broken Links Check:** Broken links identified using [Lychee](https://github.com/lycheeverse/lychee)
|
|
@@ -118,7 +117,8 @@ jobs:
|
|
|
118
117
|
with:
|
|
119
118
|
token: ${{ secrets.GITHUB_TOKEN }} # Auto-generated token
|
|
120
119
|
labels: true # Auto-label issues/PRs using AI
|
|
121
|
-
python: true # Format Python with Ruff
|
|
120
|
+
python: true # Format Python with Ruff
|
|
121
|
+
python_docstrings: false # Format Python docstrings (default: false)
|
|
122
122
|
prettier: true # Format YAML, JSON, Markdown, CSS
|
|
123
123
|
swift: false # Format Swift (requires macos-latest)
|
|
124
124
|
dart: false # Format Dart/Flutter
|
|
@@ -166,7 +166,7 @@ List open PRs across an organization and auto-merge eligible Dependabot PRs.
|
|
|
166
166
|
with:
|
|
167
167
|
token: ${{ secrets.GITHUB_TOKEN }}
|
|
168
168
|
org: ultralytics # Optional: defaults to ultralytics
|
|
169
|
-
visibility:
|
|
169
|
+
visibility: private,internal # Optional: public, private, internal, all, or comma-separated
|
|
170
170
|
```
|
|
171
171
|
|
|
172
172
|
[**📖 Full Documentation →**](scan-prs/README.md)
|
|
@@ -30,8 +30,8 @@ AI-powered formatting, labeling, and PR summaries for Python, Swift, and Markdow
|
|
|
30
30
|
### 📄 Features
|
|
31
31
|
|
|
32
32
|
- **Python Code:** Formatted using [Ruff](https://github.com/astral-sh/ruff), an extremely fast Python linter and formatter
|
|
33
|
+
- **Python Docstrings:** Google-style formatting enforced with Ultralytics Python docstring formatter (optional)
|
|
33
34
|
- **Markdown Files:** Styled with [Prettier](https://github.com/prettier/prettier) to ensure consistent documentation appearance
|
|
34
|
-
- **Docstrings:** Cleaned and standardized using [docformatter](https://github.com/PyCQA/docformatter)
|
|
35
35
|
- **Swift Code:** Formatted with [`swift-format`](https://github.com/swiftlang/swift-format) _(requires `macos-latest` runner)_
|
|
36
36
|
- **Spell Check:** Common misspellings caught using [codespell](https://github.com/codespell-project/codespell)
|
|
37
37
|
- **Broken Links Check:** Broken links identified using [Lychee](https://github.com/lycheeverse/lychee)
|
|
@@ -80,7 +80,8 @@ jobs:
|
|
|
80
80
|
with:
|
|
81
81
|
token: ${{ secrets.GITHUB_TOKEN }} # Auto-generated token
|
|
82
82
|
labels: true # Auto-label issues/PRs using AI
|
|
83
|
-
python: true # Format Python with Ruff
|
|
83
|
+
python: true # Format Python with Ruff
|
|
84
|
+
python_docstrings: false # Format Python docstrings (default: false)
|
|
84
85
|
prettier: true # Format YAML, JSON, Markdown, CSS
|
|
85
86
|
swift: false # Format Swift (requires macos-latest)
|
|
86
87
|
dart: false # Format Dart/Flutter
|
|
@@ -128,7 +129,7 @@ List open PRs across an organization and auto-merge eligible Dependabot PRs.
|
|
|
128
129
|
with:
|
|
129
130
|
token: ${{ secrets.GITHUB_TOKEN }}
|
|
130
131
|
org: ultralytics # Optional: defaults to ultralytics
|
|
131
|
-
visibility:
|
|
132
|
+
visibility: private,internal # Optional: public, private, internal, all, or comma-separated
|
|
132
133
|
```
|
|
133
134
|
|
|
134
135
|
[**📖 Full Documentation →**](scan-prs/README.md)
|
|
@@ -18,6 +18,7 @@
|
|
|
18
18
|
# │ ├── scan_prs.py
|
|
19
19
|
# │ ├── summarize_pr.py
|
|
20
20
|
# │ ├── summarize_release.py
|
|
21
|
+
# │ ├── format_python_docstrings.py
|
|
21
22
|
# │ ├── update_file_headers.py
|
|
22
23
|
# │ └── update_markdown_code_blocks.py
|
|
23
24
|
# └── tests/
|
|
@@ -26,4 +27,4 @@
|
|
|
26
27
|
# ├── test_summarize_pr.py
|
|
27
28
|
# └── ...
|
|
28
29
|
|
|
29
|
-
__version__ = "0.2.
|
|
30
|
+
__version__ = "0.2.2"
|
|
@@ -7,7 +7,7 @@ import time
|
|
|
7
7
|
|
|
8
8
|
from . import review_pr
|
|
9
9
|
from .summarize_pr import SUMMARY_MARKER
|
|
10
|
-
from .utils import ACTIONS_CREDIT, Action, filter_labels,
|
|
10
|
+
from .utils import ACTIONS_CREDIT, Action, filter_labels, get_pr_open_response, get_response, remove_html_comments
|
|
11
11
|
|
|
12
12
|
BLOCK_USER = os.getenv("BLOCK_USER", "false").lower() == "true"
|
|
13
13
|
AUTO_PR_REVIEW = os.getenv("REVIEW", "true").lower() == "true"
|
|
@@ -91,7 +91,7 @@ YOUR RESPONSE (label names only):
|
|
|
91
91
|
},
|
|
92
92
|
{"role": "user", "content": prompt},
|
|
93
93
|
]
|
|
94
|
-
suggested_labels =
|
|
94
|
+
suggested_labels = get_response(messages, temperature=1.0)
|
|
95
95
|
if "none" in suggested_labels.lower():
|
|
96
96
|
return []
|
|
97
97
|
|
|
@@ -170,7 +170,7 @@ YOUR {issue_type.upper()} RESPONSE:
|
|
|
170
170
|
},
|
|
171
171
|
{"role": "user", "content": prompt},
|
|
172
172
|
]
|
|
173
|
-
return
|
|
173
|
+
return get_response(messages)
|
|
174
174
|
|
|
175
175
|
|
|
176
176
|
def main(*args, **kwargs):
|
|
@@ -0,0 +1,511 @@
|
|
|
1
|
+
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import ast
|
|
6
|
+
import re
|
|
7
|
+
import sys
|
|
8
|
+
import time
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
|
|
11
|
+
SECTIONS = ("Args", "Attributes", "Methods", "Returns", "Yields", "Raises", "Example", "Notes", "References")
|
|
12
|
+
LIST_RX = re.compile(r"""^(\s*)(?:[-*•]\s+|(?:\d+|[A-Za-z]+)[\.\)]\s+)""")
|
|
13
|
+
TABLE_RX = re.compile(r"^\s*\|.*\|\s*$")
|
|
14
|
+
TABLE_RULE_RX = re.compile(r"^\s*[:\-\|\s]{3,}$")
|
|
15
|
+
TREE_CHARS = ("└", "├", "│", "─")
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def wrap_words(words: list[str], width: int, indent: int, min_words_per_line: int = 1) -> list[str]:
|
|
19
|
+
"""Wrap words to width with indent; optionally avoid very short orphan lines."""
|
|
20
|
+
pad = " " * indent
|
|
21
|
+
if not words:
|
|
22
|
+
return []
|
|
23
|
+
lines: list[list[str]] = []
|
|
24
|
+
cur: list[str] = []
|
|
25
|
+
cur_len = indent
|
|
26
|
+
for w in words:
|
|
27
|
+
need = len(w) + (1 if cur else 0)
|
|
28
|
+
if cur and cur_len + need > width:
|
|
29
|
+
lines.append(cur)
|
|
30
|
+
cur, cur_len = [w], indent + len(w)
|
|
31
|
+
else:
|
|
32
|
+
cur.append(w)
|
|
33
|
+
cur_len += need
|
|
34
|
+
if cur:
|
|
35
|
+
lines.append(cur)
|
|
36
|
+
|
|
37
|
+
# Rebalance to avoid too-short continuation lines when requested
|
|
38
|
+
if min_words_per_line > 1:
|
|
39
|
+
i = 1
|
|
40
|
+
while i < len(lines):
|
|
41
|
+
if len(lines[i]) < min_words_per_line and len(lines[i - 1]) > 1:
|
|
42
|
+
donor = lines[i - 1][-1]
|
|
43
|
+
this_len = len(pad) + sum(len(w) for w in lines[i]) + (len(lines[i]) - 1)
|
|
44
|
+
if this_len + (1 if lines[i] else 0) + len(donor) <= width:
|
|
45
|
+
lines[i - 1].pop()
|
|
46
|
+
lines[i].insert(0, donor)
|
|
47
|
+
if i - 1 > 0 and len(lines[i - 1]) == 1:
|
|
48
|
+
i -= 1
|
|
49
|
+
continue
|
|
50
|
+
i += 1
|
|
51
|
+
|
|
52
|
+
return [pad + " ".join(line) for line in lines]
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def wrap_para(text: str, width: int, indent: int, min_words_per_line: int = 1) -> list[str]:
|
|
56
|
+
"""Wrap a paragraph string; orphan control via min_words_per_line."""
|
|
57
|
+
text = text.strip()
|
|
58
|
+
if not text:
|
|
59
|
+
return []
|
|
60
|
+
return wrap_words(text.split(), width, indent, min_words_per_line)
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def wrap_hanging(head: str, desc: str, width: int, cont_indent: int) -> list[str]:
|
|
64
|
+
"""Wrap 'head + desc' with hanging indent; ensure first continuation has ≥2 words."""
|
|
65
|
+
room = width - len(head)
|
|
66
|
+
words = desc.split()
|
|
67
|
+
if not words:
|
|
68
|
+
return [head.rstrip()]
|
|
69
|
+
|
|
70
|
+
take, used = [], 0
|
|
71
|
+
for w in words:
|
|
72
|
+
need = len(w) + (1 if take else 0)
|
|
73
|
+
if used + need <= room:
|
|
74
|
+
take.append(w)
|
|
75
|
+
used += need
|
|
76
|
+
else:
|
|
77
|
+
break
|
|
78
|
+
|
|
79
|
+
out: list[str] = []
|
|
80
|
+
if take:
|
|
81
|
+
out.append(head + " ".join(take))
|
|
82
|
+
rest = words[len(take) :]
|
|
83
|
+
else:
|
|
84
|
+
out.append(head.rstrip())
|
|
85
|
+
rest = words
|
|
86
|
+
|
|
87
|
+
out.extend(wrap_words(rest, width, cont_indent, min_words_per_line=2))
|
|
88
|
+
return out
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
def is_list_item(s: str) -> bool:
|
|
92
|
+
"""True if s looks like a bullet/numbered list item."""
|
|
93
|
+
return bool(LIST_RX.match(s.lstrip()))
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def is_fence_line(s: str) -> bool:
|
|
97
|
+
t = s.lstrip()
|
|
98
|
+
return t.startswith("```")
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def is_table_like(s: str) -> bool:
|
|
102
|
+
return bool(TABLE_RX.match(s)) or bool(TABLE_RULE_RX.match(s))
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def is_tree_like(s: str) -> bool:
|
|
106
|
+
return any(ch in s for ch in TREE_CHARS)
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
def is_indented_block_line(s: str) -> bool:
|
|
110
|
+
# Treat lines with >=8 leading spaces or any tab as preformatted
|
|
111
|
+
return bool(s.startswith(" ")) or s.startswith("\t")
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def header_name(line: str) -> str | None:
|
|
115
|
+
"""Return canonical section header or None."""
|
|
116
|
+
s = line.strip()
|
|
117
|
+
if not s.endswith(":") or len(s) <= 1:
|
|
118
|
+
return None
|
|
119
|
+
name = s[:-1].strip()
|
|
120
|
+
if name == "Examples":
|
|
121
|
+
name = "Example"
|
|
122
|
+
if name == "Note":
|
|
123
|
+
name = "Notes"
|
|
124
|
+
return name if name in SECTIONS else None
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
def add_header(lines: list[str], indent: int, title: str, opener_line: str) -> None:
|
|
128
|
+
"""Append a section header; no blank before first header, exactly one before subsequent ones."""
|
|
129
|
+
while lines and lines[-1] == "":
|
|
130
|
+
lines.pop()
|
|
131
|
+
if lines and lines[-1] != opener_line:
|
|
132
|
+
lines.append("")
|
|
133
|
+
lines.append(" " * indent + f"{title}:")
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def emit_paragraphs(
|
|
137
|
+
src: list[str], width: int, indent: int, list_indent: int | None = None, orphan_min: int = 1
|
|
138
|
+
) -> list[str]:
|
|
139
|
+
"""Wrap normal text; preserve lists, fenced code, tables, ASCII trees, and deeply-indented blocks."""
|
|
140
|
+
out: list[str] = []
|
|
141
|
+
buf: list[str] = []
|
|
142
|
+
in_fence = False
|
|
143
|
+
|
|
144
|
+
def flush():
|
|
145
|
+
nonlocal buf
|
|
146
|
+
if buf:
|
|
147
|
+
out.extend(wrap_para(" ".join(x.strip() for x in buf), width, indent, min_words_per_line=orphan_min))
|
|
148
|
+
buf = []
|
|
149
|
+
|
|
150
|
+
for raw in src:
|
|
151
|
+
s = raw.rstrip("\n")
|
|
152
|
+
stripped = s.strip()
|
|
153
|
+
|
|
154
|
+
# blank line
|
|
155
|
+
if not stripped:
|
|
156
|
+
flush()
|
|
157
|
+
out.append("")
|
|
158
|
+
continue
|
|
159
|
+
|
|
160
|
+
# fence start/stop
|
|
161
|
+
if is_fence_line(s):
|
|
162
|
+
flush()
|
|
163
|
+
out.append(s.rstrip())
|
|
164
|
+
in_fence = not in_fence
|
|
165
|
+
continue
|
|
166
|
+
|
|
167
|
+
if in_fence or is_table_like(s) or is_tree_like(s) or is_indented_block_line(s):
|
|
168
|
+
flush()
|
|
169
|
+
out.append(s.rstrip())
|
|
170
|
+
continue
|
|
171
|
+
|
|
172
|
+
if is_list_item(s):
|
|
173
|
+
flush()
|
|
174
|
+
out.append((" " * list_indent + stripped) if list_indent is not None else s.rstrip())
|
|
175
|
+
continue
|
|
176
|
+
|
|
177
|
+
buf.append(s)
|
|
178
|
+
flush()
|
|
179
|
+
while out and out[-1] == "":
|
|
180
|
+
out.pop()
|
|
181
|
+
return out
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
def parse_sections(text: str) -> dict[str, list[str]]:
|
|
185
|
+
"""Parse Google-style docstring into sections."""
|
|
186
|
+
parts = {k: [] for k in ("summary", "description", *SECTIONS)}
|
|
187
|
+
cur = "summary"
|
|
188
|
+
for raw in text.splitlines():
|
|
189
|
+
line = raw.rstrip("\n")
|
|
190
|
+
h = header_name(line)
|
|
191
|
+
if h:
|
|
192
|
+
cur = h
|
|
193
|
+
continue
|
|
194
|
+
if not line.strip():
|
|
195
|
+
if cur == "summary" and parts["summary"]:
|
|
196
|
+
cur = "description"
|
|
197
|
+
if parts[cur]:
|
|
198
|
+
parts[cur].append("")
|
|
199
|
+
continue
|
|
200
|
+
parts[cur].append(line)
|
|
201
|
+
return parts
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
def looks_like_param(s: str) -> bool:
|
|
205
|
+
"""Heuristic: 'name:' without being a list item."""
|
|
206
|
+
if is_list_item(s) or ":" not in s:
|
|
207
|
+
return False
|
|
208
|
+
head = s.split(":", 1)[0].strip()
|
|
209
|
+
return bool(head)
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
def iter_items(lines: list[str]) -> list[list[str]]:
|
|
213
|
+
"""Group lines into logical items separated by next param."""
|
|
214
|
+
items, i, n = [], 0, len(lines)
|
|
215
|
+
while i < n:
|
|
216
|
+
while i < n and not lines[i].strip():
|
|
217
|
+
i += 1
|
|
218
|
+
if i >= n:
|
|
219
|
+
break
|
|
220
|
+
item = [lines[i]]
|
|
221
|
+
i += 1
|
|
222
|
+
while i < n:
|
|
223
|
+
st = lines[i].strip()
|
|
224
|
+
if st and looks_like_param(st):
|
|
225
|
+
break
|
|
226
|
+
item.append(lines[i])
|
|
227
|
+
i += 1
|
|
228
|
+
items.append(item)
|
|
229
|
+
return items
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
def format_structured_block(lines: list[str], width: int, base: int) -> list[str]:
|
|
233
|
+
"""Format Args/Returns/etc.; continuation at base+4, lists at base+8. Preserve missing colons."""
|
|
234
|
+
out: list[str] = []
|
|
235
|
+
cont, lst = base + 4, base + 8
|
|
236
|
+
for item in iter_items(lines):
|
|
237
|
+
first = item[0].strip()
|
|
238
|
+
name, desc = ([*first.split(":", 1), ""])[:2]
|
|
239
|
+
name, desc = name.strip(), desc.strip()
|
|
240
|
+
had_colon = ":" in first
|
|
241
|
+
|
|
242
|
+
if not name or (" " in name and "(" not in name and ")" not in name):
|
|
243
|
+
out.extend(emit_paragraphs(item, width, cont, lst, orphan_min=2))
|
|
244
|
+
continue
|
|
245
|
+
|
|
246
|
+
head = " " * cont + (f"{name}: " if (desc or had_colon) else name)
|
|
247
|
+
out.extend(wrap_hanging(head, desc, width, cont + 4))
|
|
248
|
+
|
|
249
|
+
tail = item[1:]
|
|
250
|
+
if tail:
|
|
251
|
+
body = emit_paragraphs(tail, width, cont + 4, lst, orphan_min=2)
|
|
252
|
+
if body:
|
|
253
|
+
out.extend(body)
|
|
254
|
+
return out
|
|
255
|
+
|
|
256
|
+
|
|
257
|
+
def detect_opener(original_literal: str) -> tuple[str, str, bool]:
|
|
258
|
+
"""Return (prefix, quotes, inline_hint) from the original string token safely."""
|
|
259
|
+
s = original_literal.lstrip()
|
|
260
|
+
i = 0
|
|
261
|
+
while i < len(s) and s[i] in "rRuUbBfF":
|
|
262
|
+
i += 1
|
|
263
|
+
quotes = '"""'
|
|
264
|
+
if i + 3 <= len(s) and s[i : i + 3] in ('"""', "'''"):
|
|
265
|
+
quotes = s[i : i + 3]
|
|
266
|
+
keep = "".join(ch for ch in s[:i] if ch in "rRuU")
|
|
267
|
+
j = i + len(quotes)
|
|
268
|
+
inline_hint = j < len(s) and s[j : j + 1] not in {"", "\n", "\r"}
|
|
269
|
+
return keep, quotes, inline_hint
|
|
270
|
+
|
|
271
|
+
|
|
272
|
+
def format_google(text: str, indent: int, width: int, quotes: str, prefix: str, inline_first_line: bool) -> str:
|
|
273
|
+
"""Format multi-line Google-style docstring with given quotes/prefix."""
|
|
274
|
+
p = parse_sections(text)
|
|
275
|
+
opener = prefix + quotes
|
|
276
|
+
out = [opener]
|
|
277
|
+
if p["summary"]:
|
|
278
|
+
lines = emit_paragraphs(p["summary"], width, indent, list_indent=indent, orphan_min=1)
|
|
279
|
+
if inline_first_line and lines:
|
|
280
|
+
pad = " " * indent
|
|
281
|
+
first = lines[0][len(pad) :] if lines[0].startswith(pad) else lines[0].lstrip()
|
|
282
|
+
out[0] = opener + first
|
|
283
|
+
out.extend(lines[1:])
|
|
284
|
+
else:
|
|
285
|
+
out.extend(lines)
|
|
286
|
+
if any(x.strip() for x in p["description"]):
|
|
287
|
+
out.append("")
|
|
288
|
+
out.extend(emit_paragraphs(p["description"], width, indent, list_indent=indent, orphan_min=1))
|
|
289
|
+
for sec in ("Args", "Attributes", "Methods", "Returns", "Yields", "Raises"):
|
|
290
|
+
if any(x.strip() for x in p[sec]):
|
|
291
|
+
add_header(out, indent, sec, opener)
|
|
292
|
+
out.extend(format_structured_block(p[sec], width, indent))
|
|
293
|
+
for sec in ("Example", "Notes", "References"):
|
|
294
|
+
if any(x.strip() for x in p[sec]):
|
|
295
|
+
title = "Examples" if sec == "Example" else sec
|
|
296
|
+
add_header(out, indent, title, opener)
|
|
297
|
+
out.extend(x.rstrip() for x in p[sec])
|
|
298
|
+
while out and out[-1] == "":
|
|
299
|
+
out.pop()
|
|
300
|
+
out.append(" " * indent + quotes)
|
|
301
|
+
return "\n".join(out)
|
|
302
|
+
|
|
303
|
+
|
|
304
|
+
def format_docstring(
|
|
305
|
+
content: str,
|
|
306
|
+
indent: int,
|
|
307
|
+
width: int,
|
|
308
|
+
quotes: str,
|
|
309
|
+
prefix: str,
|
|
310
|
+
inline_hint: bool,
|
|
311
|
+
preserve_inline: bool = True,
|
|
312
|
+
) -> str:
|
|
313
|
+
"""Single-line if short/sectionless/no-lists; else Google-style. Preserve quotes/prefix."""
|
|
314
|
+
if not content or not content.strip():
|
|
315
|
+
return f"{prefix}{quotes}{quotes}"
|
|
316
|
+
text = content.strip()
|
|
317
|
+
has_section = any(f"{s}:" in text for s in (*SECTIONS, "Examples"))
|
|
318
|
+
has_list = any(is_list_item(l) for l in text.splitlines())
|
|
319
|
+
single_ok = (
|
|
320
|
+
("\n" not in text)
|
|
321
|
+
and not has_section
|
|
322
|
+
and not has_list
|
|
323
|
+
and (indent + len(prefix) + len(quotes) * 2 + len(text) <= width)
|
|
324
|
+
)
|
|
325
|
+
if single_ok:
|
|
326
|
+
words = text.split()
|
|
327
|
+
if words and not (words[0].startswith(("http://", "https://")) or words[0][0].isupper()):
|
|
328
|
+
words[0] = words[0][0].upper() + words[0][1:]
|
|
329
|
+
out = " ".join(words)
|
|
330
|
+
if out and out[-1] not in ".!?":
|
|
331
|
+
out += "."
|
|
332
|
+
return f"{prefix}{quotes}{out}{quotes}"
|
|
333
|
+
return format_google(text, indent, width, quotes, prefix, inline_first_line=(preserve_inline and inline_hint))
|
|
334
|
+
|
|
335
|
+
|
|
336
|
+
class Visitor(ast.NodeVisitor):
|
|
337
|
+
"""Collect docstring replacements for classes and functions."""
|
|
338
|
+
|
|
339
|
+
def __init__(self, src: list[str], width: int = 120, preserve_inline: bool = True):
|
|
340
|
+
"""Init with source lines, target width, and inline preservation flag."""
|
|
341
|
+
self.src, self.width, self.repl, self.preserve_inline = src, width, [], preserve_inline
|
|
342
|
+
|
|
343
|
+
def visit_Module(self, node):
|
|
344
|
+
"""Skip module docstring; visit children."""
|
|
345
|
+
self.generic_visit(node)
|
|
346
|
+
|
|
347
|
+
def visit_ClassDef(self, node):
|
|
348
|
+
self._handle(node)
|
|
349
|
+
self.generic_visit(node)
|
|
350
|
+
|
|
351
|
+
def visit_FunctionDef(self, node):
|
|
352
|
+
self._handle(node)
|
|
353
|
+
self.generic_visit(node)
|
|
354
|
+
|
|
355
|
+
def visit_AsyncFunctionDef(self, node):
|
|
356
|
+
self._handle(node)
|
|
357
|
+
self.generic_visit(node)
|
|
358
|
+
|
|
359
|
+
def _handle(self, node):
|
|
360
|
+
"""If first stmt is a string expr, schedule replacement."""
|
|
361
|
+
try:
|
|
362
|
+
doc = ast.get_docstring(node, clean=False)
|
|
363
|
+
if not doc or not node.body or not isinstance(node.body[0], ast.Expr):
|
|
364
|
+
return
|
|
365
|
+
s = node.body[0].value
|
|
366
|
+
if not (isinstance(s, ast.Constant) and isinstance(s.value, str)):
|
|
367
|
+
return
|
|
368
|
+
sl, el = node.body[0].lineno - 1, node.body[0].end_lineno - 1
|
|
369
|
+
sc, ec = node.body[0].col_offset, node.body[0].end_col_offset
|
|
370
|
+
if sl < 0 or el >= len(self.src):
|
|
371
|
+
return
|
|
372
|
+
original = (
|
|
373
|
+
self.src[sl][sc:ec]
|
|
374
|
+
if sl == el
|
|
375
|
+
else "\n".join([self.src[sl][sc:], *self.src[sl + 1 : el], self.src[el][:ec]])
|
|
376
|
+
)
|
|
377
|
+
prefix, quotes, inline_hint = detect_opener(original)
|
|
378
|
+
formatted = format_docstring(doc, sc, self.width, quotes, prefix, inline_hint, self.preserve_inline)
|
|
379
|
+
if formatted.strip() != original.strip():
|
|
380
|
+
self.repl.append((sl, el, sc, ec, formatted))
|
|
381
|
+
except Exception:
|
|
382
|
+
return
|
|
383
|
+
|
|
384
|
+
|
|
385
|
+
def format_python_file(text: str, width: int = 120, preserve_inline: bool = True) -> str:
|
|
386
|
+
"""Return source with reformatted docstrings; on failure, return original."""
|
|
387
|
+
if not text.strip():
|
|
388
|
+
return text
|
|
389
|
+
try:
|
|
390
|
+
tree = ast.parse(text)
|
|
391
|
+
except SyntaxError:
|
|
392
|
+
return text
|
|
393
|
+
src = text.splitlines()
|
|
394
|
+
v = Visitor(src, width, preserve_inline=preserve_inline)
|
|
395
|
+
try:
|
|
396
|
+
v.visit(tree)
|
|
397
|
+
except Exception:
|
|
398
|
+
return text
|
|
399
|
+
for sl, el, sc, ec, rep in reversed(v.repl):
|
|
400
|
+
try:
|
|
401
|
+
if sl == el:
|
|
402
|
+
src[sl] = src[sl][:sc] + rep + src[sl][ec:]
|
|
403
|
+
else:
|
|
404
|
+
nl = rep.splitlines()
|
|
405
|
+
nl[0] = src[sl][:sc] + nl[0]
|
|
406
|
+
nl[-1] += src[el][ec:]
|
|
407
|
+
src[sl : el + 1] = nl
|
|
408
|
+
except Exception:
|
|
409
|
+
continue
|
|
410
|
+
return "\n".join(src)
|
|
411
|
+
|
|
412
|
+
|
|
413
|
+
def preserve_trailing_newlines(original: str, formatted: str) -> str:
|
|
414
|
+
"""Preserve the original trailing newline count."""
|
|
415
|
+
o = len(original) - len(original.rstrip("\n"))
|
|
416
|
+
f = len(formatted) - len(formatted.rstrip("\n"))
|
|
417
|
+
return formatted if o == f else formatted.rstrip("\n") + ("\n" * o)
|
|
418
|
+
|
|
419
|
+
|
|
420
|
+
def iter_py_files(paths: list[Path]) -> list[Path]:
|
|
421
|
+
"""Expand input paths to sorted unique *.py files."""
|
|
422
|
+
out: list[Path] = []
|
|
423
|
+
for p in paths:
|
|
424
|
+
if p.is_dir():
|
|
425
|
+
out.extend(sorted(p.rglob("*.py")))
|
|
426
|
+
elif p.is_file() and p.suffix == ".py":
|
|
427
|
+
out.append(p)
|
|
428
|
+
seen, uniq = set(), []
|
|
429
|
+
for f in out:
|
|
430
|
+
if f not in seen:
|
|
431
|
+
seen.add(f)
|
|
432
|
+
uniq.append(f)
|
|
433
|
+
return uniq
|
|
434
|
+
|
|
435
|
+
|
|
436
|
+
def process_file(path: Path, width: int = 120, check: bool = False, preserve_inline: bool = True) -> bool:
|
|
437
|
+
"""Process one file; True if unchanged/success, False if changed."""
|
|
438
|
+
if path.suffix != ".py":
|
|
439
|
+
return True
|
|
440
|
+
try:
|
|
441
|
+
orig = path.read_text(encoding="utf-8")
|
|
442
|
+
fmt = preserve_trailing_newlines(orig, format_python_file(orig, width, preserve_inline=preserve_inline))
|
|
443
|
+
if check:
|
|
444
|
+
if orig != fmt:
|
|
445
|
+
print(f" {path}")
|
|
446
|
+
return False
|
|
447
|
+
return True
|
|
448
|
+
if orig != fmt:
|
|
449
|
+
path.write_text(fmt, encoding="utf-8")
|
|
450
|
+
print(f" {path}")
|
|
451
|
+
return False
|
|
452
|
+
return True
|
|
453
|
+
except Exception as e:
|
|
454
|
+
print(f" Error: {path}: {e}")
|
|
455
|
+
return True
|
|
456
|
+
|
|
457
|
+
|
|
458
|
+
def parse_cli(argv: list[str]) -> tuple[list[Path], int, bool, bool]:
|
|
459
|
+
"""Minimal argv parser: (paths, width, check, preserve_inline)."""
|
|
460
|
+
width, check, paths, preserve_inline = 120, False, [], True
|
|
461
|
+
for a in argv:
|
|
462
|
+
if a == "--check":
|
|
463
|
+
check = True
|
|
464
|
+
elif a == "--no-preserve-inline":
|
|
465
|
+
preserve_inline = False
|
|
466
|
+
elif a.startswith("--line-width="):
|
|
467
|
+
try:
|
|
468
|
+
width = int(a.split("=", 1)[1])
|
|
469
|
+
except ValueError:
|
|
470
|
+
pass
|
|
471
|
+
else:
|
|
472
|
+
paths.append(Path(a))
|
|
473
|
+
return paths, width, check, preserve_inline
|
|
474
|
+
|
|
475
|
+
|
|
476
|
+
def main() -> None:
|
|
477
|
+
"""CLI entry point."""
|
|
478
|
+
args = sys.argv[1:]
|
|
479
|
+
if not args:
|
|
480
|
+
print(
|
|
481
|
+
"Usage: format_python_docstrings.py [--check] [--no-preserve-inline] [--line-width=120] <files_or_dirs...>"
|
|
482
|
+
)
|
|
483
|
+
return
|
|
484
|
+
paths, width, check, preserve_inline = parse_cli(args)
|
|
485
|
+
files = iter_py_files(paths)
|
|
486
|
+
if not files:
|
|
487
|
+
print("No Python files found")
|
|
488
|
+
return
|
|
489
|
+
|
|
490
|
+
t0 = time.time()
|
|
491
|
+
print(f"{'Checking' if check else 'Formatting'} {len(files)} file{'s' if len(files) != 1 else ''}")
|
|
492
|
+
changed = sum(not process_file(f, width, check, preserve_inline=preserve_inline) for f in files)
|
|
493
|
+
|
|
494
|
+
dur = time.time() - t0
|
|
495
|
+
if changed:
|
|
496
|
+
verb = "would be reformatted" if check else "reformatted"
|
|
497
|
+
unchanged = len(files) - changed
|
|
498
|
+
parts = []
|
|
499
|
+
if changed:
|
|
500
|
+
parts.append(f"{changed} file{'s' if changed != 1 else ''} {verb}")
|
|
501
|
+
if unchanged:
|
|
502
|
+
parts.append(f"{unchanged} file{'s' if unchanged != 1 else ''} left unchanged")
|
|
503
|
+
print(f"{', '.join(parts)} ({dur:.1f}s)")
|
|
504
|
+
if check:
|
|
505
|
+
sys.exit(1)
|
|
506
|
+
else:
|
|
507
|
+
print(f"{len(files)} file{'s' if len(files) != 1 else ''} left unchanged ({dur:.1f}s)")
|
|
508
|
+
|
|
509
|
+
|
|
510
|
+
if __name__ == "__main__":
|
|
511
|
+
main()
|