Sphinx 8.1.2__py3-none-any.whl → 8.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of Sphinx might be problematic. Click here for more details.
- sphinx/__init__.py +8 -4
- sphinx/__main__.py +2 -0
- sphinx/_cli/__init__.py +2 -5
- sphinx/_cli/util/colour.py +34 -11
- sphinx/_cli/util/errors.py +128 -61
- sphinx/addnodes.py +51 -35
- sphinx/application.py +362 -230
- sphinx/builders/__init__.py +87 -64
- sphinx/builders/_epub_base.py +65 -56
- sphinx/builders/changes.py +17 -23
- sphinx/builders/dirhtml.py +8 -13
- sphinx/builders/epub3.py +70 -38
- sphinx/builders/gettext.py +93 -73
- sphinx/builders/html/__init__.py +240 -186
- sphinx/builders/html/_assets.py +9 -2
- sphinx/builders/html/_build_info.py +3 -0
- sphinx/builders/latex/__init__.py +64 -54
- sphinx/builders/latex/constants.py +14 -11
- sphinx/builders/latex/nodes.py +2 -0
- sphinx/builders/latex/theming.py +8 -9
- sphinx/builders/latex/transforms.py +7 -5
- sphinx/builders/linkcheck.py +193 -149
- sphinx/builders/manpage.py +17 -17
- sphinx/builders/singlehtml.py +28 -16
- sphinx/builders/texinfo.py +28 -21
- sphinx/builders/text.py +10 -15
- sphinx/builders/xml.py +10 -19
- sphinx/cmd/build.py +49 -119
- sphinx/cmd/make_mode.py +35 -31
- sphinx/cmd/quickstart.py +78 -62
- sphinx/config.py +265 -163
- sphinx/directives/__init__.py +51 -54
- sphinx/directives/admonitions.py +107 -0
- sphinx/directives/code.py +24 -19
- sphinx/directives/other.py +21 -42
- sphinx/directives/patches.py +28 -16
- sphinx/domains/__init__.py +54 -31
- sphinx/domains/_domains_container.py +22 -17
- sphinx/domains/_index.py +5 -8
- sphinx/domains/c/__init__.py +366 -245
- sphinx/domains/c/_ast.py +378 -256
- sphinx/domains/c/_ids.py +89 -31
- sphinx/domains/c/_parser.py +283 -214
- sphinx/domains/c/_symbol.py +269 -198
- sphinx/domains/changeset.py +39 -24
- sphinx/domains/citation.py +54 -24
- sphinx/domains/cpp/__init__.py +517 -362
- sphinx/domains/cpp/_ast.py +999 -682
- sphinx/domains/cpp/_ids.py +133 -65
- sphinx/domains/cpp/_parser.py +746 -588
- sphinx/domains/cpp/_symbol.py +692 -489
- sphinx/domains/index.py +10 -8
- sphinx/domains/javascript.py +152 -74
- sphinx/domains/math.py +50 -40
- sphinx/domains/python/__init__.py +402 -211
- sphinx/domains/python/_annotations.py +134 -61
- sphinx/domains/python/_object.py +155 -68
- sphinx/domains/rst.py +94 -49
- sphinx/domains/std/__init__.py +510 -249
- sphinx/environment/__init__.py +345 -61
- sphinx/environment/adapters/asset.py +7 -1
- sphinx/environment/adapters/indexentries.py +15 -20
- sphinx/environment/adapters/toctree.py +19 -9
- sphinx/environment/collectors/__init__.py +3 -1
- sphinx/environment/collectors/asset.py +18 -15
- sphinx/environment/collectors/dependencies.py +8 -10
- sphinx/environment/collectors/metadata.py +6 -4
- sphinx/environment/collectors/title.py +3 -1
- sphinx/environment/collectors/toctree.py +4 -4
- sphinx/errors.py +1 -3
- sphinx/events.py +4 -4
- sphinx/ext/apidoc/__init__.py +66 -0
- sphinx/ext/apidoc/__main__.py +9 -0
- sphinx/ext/apidoc/_cli.py +356 -0
- sphinx/ext/apidoc/_extension.py +262 -0
- sphinx/ext/apidoc/_generate.py +356 -0
- sphinx/ext/apidoc/_shared.py +99 -0
- sphinx/ext/autodoc/__init__.py +837 -483
- sphinx/ext/autodoc/directive.py +57 -21
- sphinx/ext/autodoc/importer.py +184 -67
- sphinx/ext/autodoc/mock.py +25 -10
- sphinx/ext/autodoc/preserve_defaults.py +17 -9
- sphinx/ext/autodoc/type_comment.py +56 -29
- sphinx/ext/autodoc/typehints.py +49 -26
- sphinx/ext/autosectionlabel.py +28 -11
- sphinx/ext/autosummary/__init__.py +281 -142
- sphinx/ext/autosummary/generate.py +121 -51
- sphinx/ext/coverage.py +152 -91
- sphinx/ext/doctest.py +169 -101
- sphinx/ext/duration.py +12 -6
- sphinx/ext/extlinks.py +33 -21
- sphinx/ext/githubpages.py +8 -8
- sphinx/ext/graphviz.py +175 -109
- sphinx/ext/ifconfig.py +11 -6
- sphinx/ext/imgconverter.py +48 -25
- sphinx/ext/imgmath.py +127 -97
- sphinx/ext/inheritance_diagram.py +177 -103
- sphinx/ext/intersphinx/__init__.py +22 -13
- sphinx/ext/intersphinx/__main__.py +3 -1
- sphinx/ext/intersphinx/_cli.py +18 -14
- sphinx/ext/intersphinx/_load.py +91 -82
- sphinx/ext/intersphinx/_resolve.py +108 -74
- sphinx/ext/intersphinx/_shared.py +2 -2
- sphinx/ext/linkcode.py +28 -12
- sphinx/ext/mathjax.py +60 -29
- sphinx/ext/napoleon/__init__.py +19 -7
- sphinx/ext/napoleon/docstring.py +229 -231
- sphinx/ext/todo.py +44 -49
- sphinx/ext/viewcode.py +105 -57
- sphinx/extension.py +3 -1
- sphinx/highlighting.py +13 -7
- sphinx/io.py +9 -13
- sphinx/jinja2glue.py +29 -26
- sphinx/locale/__init__.py +8 -9
- sphinx/locale/ar/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/ar/LC_MESSAGES/sphinx.po +2155 -2050
- sphinx/locale/bg/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/bg/LC_MESSAGES/sphinx.po +2045 -1940
- sphinx/locale/bn/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/bn/LC_MESSAGES/sphinx.po +2175 -2070
- sphinx/locale/ca/LC_MESSAGES/sphinx.js +3 -3
- sphinx/locale/ca/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/ca/LC_MESSAGES/sphinx.po +2690 -2585
- sphinx/locale/ca@valencia/LC_MESSAGES/sphinx.js +63 -0
- sphinx/locale/ca@valencia/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/ca@valencia/LC_MESSAGES/sphinx.po +4216 -0
- sphinx/locale/cak/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/cak/LC_MESSAGES/sphinx.po +2096 -1991
- sphinx/locale/cs/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/cs/LC_MESSAGES/sphinx.po +2248 -2143
- sphinx/locale/cy/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/cy/LC_MESSAGES/sphinx.po +2201 -2096
- sphinx/locale/da/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/da/LC_MESSAGES/sphinx.po +2282 -2177
- sphinx/locale/de/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/de/LC_MESSAGES/sphinx.po +2261 -2156
- sphinx/locale/de_DE/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/de_DE/LC_MESSAGES/sphinx.po +2045 -1940
- sphinx/locale/el/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/el/LC_MESSAGES/sphinx.po +2604 -2499
- sphinx/locale/en_DE/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/en_DE/LC_MESSAGES/sphinx.po +2045 -1940
- sphinx/locale/en_FR/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/en_FR/LC_MESSAGES/sphinx.po +2045 -1940
- sphinx/locale/en_GB/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/en_GB/LC_MESSAGES/sphinx.po +2631 -2526
- sphinx/locale/en_HK/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/en_HK/LC_MESSAGES/sphinx.po +2045 -1940
- sphinx/locale/eo/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/eo/LC_MESSAGES/sphinx.po +2078 -1973
- sphinx/locale/es/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/es/LC_MESSAGES/sphinx.po +2633 -2528
- sphinx/locale/es_CO/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/es_CO/LC_MESSAGES/sphinx.po +2045 -1940
- sphinx/locale/et/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/et/LC_MESSAGES/sphinx.po +2449 -2344
- sphinx/locale/eu/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/eu/LC_MESSAGES/sphinx.po +2241 -2136
- sphinx/locale/fa/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/fa/LC_MESSAGES/sphinx.po +504 -500
- sphinx/locale/fi/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/fi/LC_MESSAGES/sphinx.po +499 -495
- sphinx/locale/fr/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/fr/LC_MESSAGES/sphinx.po +513 -509
- sphinx/locale/fr_FR/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/fr_FR/LC_MESSAGES/sphinx.po +499 -495
- sphinx/locale/gl/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/gl/LC_MESSAGES/sphinx.po +2644 -2539
- sphinx/locale/he/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/he/LC_MESSAGES/sphinx.po +499 -495
- sphinx/locale/hi/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/hi/LC_MESSAGES/sphinx.po +504 -500
- sphinx/locale/hi_IN/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/hi_IN/LC_MESSAGES/sphinx.po +499 -495
- sphinx/locale/hr/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/hr/LC_MESSAGES/sphinx.po +501 -497
- sphinx/locale/hu/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/hu/LC_MESSAGES/sphinx.po +499 -495
- sphinx/locale/id/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/id/LC_MESSAGES/sphinx.po +2609 -2504
- sphinx/locale/is/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/is/LC_MESSAGES/sphinx.po +499 -495
- sphinx/locale/it/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/it/LC_MESSAGES/sphinx.po +2265 -2160
- sphinx/locale/ja/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/ja/LC_MESSAGES/sphinx.po +2621 -2516
- sphinx/locale/ka/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/ka/LC_MESSAGES/sphinx.po +2567 -2462
- sphinx/locale/ko/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/ko/LC_MESSAGES/sphinx.po +2631 -2526
- sphinx/locale/lt/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/lt/LC_MESSAGES/sphinx.po +2214 -2109
- sphinx/locale/lv/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/lv/LC_MESSAGES/sphinx.po +2218 -2113
- sphinx/locale/mk/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/mk/LC_MESSAGES/sphinx.po +2088 -1983
- sphinx/locale/nb_NO/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/nb_NO/LC_MESSAGES/sphinx.po +2247 -2142
- sphinx/locale/ne/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/ne/LC_MESSAGES/sphinx.po +2227 -2122
- sphinx/locale/nl/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/nl/LC_MESSAGES/sphinx.po +2316 -2211
- sphinx/locale/pl/LC_MESSAGES/sphinx.js +2 -2
- sphinx/locale/pl/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/pl/LC_MESSAGES/sphinx.po +2442 -2336
- sphinx/locale/pt/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/pt/LC_MESSAGES/sphinx.po +2045 -1940
- sphinx/locale/pt_BR/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/pt_BR/LC_MESSAGES/sphinx.po +2657 -2552
- sphinx/locale/pt_PT/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/pt_PT/LC_MESSAGES/sphinx.po +2243 -2138
- sphinx/locale/ro/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/ro/LC_MESSAGES/sphinx.po +2244 -2139
- sphinx/locale/ru/LC_MESSAGES/sphinx.js +1 -1
- sphinx/locale/ru/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/ru/LC_MESSAGES/sphinx.po +2660 -2555
- sphinx/locale/si/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/si/LC_MESSAGES/sphinx.po +2134 -2029
- sphinx/locale/sk/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/sk/LC_MESSAGES/sphinx.po +2614 -2509
- sphinx/locale/sl/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/sl/LC_MESSAGES/sphinx.po +2167 -2062
- sphinx/locale/sphinx.pot +2069 -1964
- sphinx/locale/sq/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/sq/LC_MESSAGES/sphinx.po +2661 -2556
- sphinx/locale/sr/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/sr/LC_MESSAGES/sphinx.po +2213 -2108
- sphinx/locale/sv/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/sv/LC_MESSAGES/sphinx.po +2229 -2124
- sphinx/locale/te/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/te/LC_MESSAGES/sphinx.po +2045 -1940
- sphinx/locale/tr/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/tr/LC_MESSAGES/sphinx.po +2608 -2503
- sphinx/locale/uk_UA/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/uk_UA/LC_MESSAGES/sphinx.po +2167 -2062
- sphinx/locale/ur/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/ur/LC_MESSAGES/sphinx.po +2045 -1940
- sphinx/locale/vi/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/vi/LC_MESSAGES/sphinx.po +2204 -2099
- sphinx/locale/yue/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/yue/LC_MESSAGES/sphinx.po +2045 -1940
- sphinx/locale/zh_HK/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/zh_HK/LC_MESSAGES/sphinx.po +2045 -1940
- sphinx/locale/zh_TW/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/zh_TW/LC_MESSAGES/sphinx.po +2659 -2554
- sphinx/locale/zh_TW.Big5/LC_MESSAGES/sphinx.mo +0 -0
- sphinx/locale/zh_TW.Big5/LC_MESSAGES/sphinx.po +2045 -1940
- sphinx/parsers.py +8 -7
- sphinx/project.py +2 -2
- sphinx/pycode/__init__.py +31 -21
- sphinx/pycode/ast.py +6 -3
- sphinx/pycode/parser.py +14 -8
- sphinx/pygments_styles.py +4 -5
- sphinx/registry.py +192 -92
- sphinx/roles.py +58 -7
- sphinx/search/__init__.py +75 -54
- sphinx/search/en.py +11 -13
- sphinx/search/fi.py +1 -1
- sphinx/search/ja.py +8 -6
- sphinx/search/nl.py +1 -1
- sphinx/search/zh.py +19 -21
- sphinx/testing/fixtures.py +26 -29
- sphinx/testing/path.py +26 -62
- sphinx/testing/restructuredtext.py +14 -8
- sphinx/testing/util.py +21 -19
- sphinx/texinputs/make.bat.jinja +50 -50
- sphinx/texinputs/sphinx.sty +4 -3
- sphinx/texinputs/sphinxlatexadmonitions.sty +1 -1
- sphinx/texinputs/sphinxlatexobjects.sty +29 -10
- sphinx/themes/basic/static/searchtools.js +8 -5
- sphinx/theming.py +49 -61
- sphinx/transforms/__init__.py +17 -38
- sphinx/transforms/compact_bullet_list.py +5 -3
- sphinx/transforms/i18n.py +8 -21
- sphinx/transforms/post_transforms/__init__.py +142 -93
- sphinx/transforms/post_transforms/code.py +5 -5
- sphinx/transforms/post_transforms/images.py +28 -24
- sphinx/transforms/references.py +3 -1
- sphinx/util/__init__.py +109 -60
- sphinx/util/_files.py +39 -23
- sphinx/util/_importer.py +4 -1
- sphinx/util/_inventory_file_reader.py +76 -0
- sphinx/util/_io.py +2 -2
- sphinx/util/_lines.py +6 -3
- sphinx/util/_pathlib.py +40 -2
- sphinx/util/build_phase.py +2 -0
- sphinx/util/cfamily.py +19 -14
- sphinx/util/console.py +44 -179
- sphinx/util/display.py +9 -10
- sphinx/util/docfields.py +140 -122
- sphinx/util/docstrings.py +1 -1
- sphinx/util/docutils.py +118 -77
- sphinx/util/fileutil.py +25 -26
- sphinx/util/http_date.py +2 -0
- sphinx/util/i18n.py +77 -64
- sphinx/util/images.py +8 -6
- sphinx/util/inspect.py +147 -38
- sphinx/util/inventory.py +215 -116
- sphinx/util/logging.py +33 -33
- sphinx/util/matching.py +12 -4
- sphinx/util/nodes.py +18 -13
- sphinx/util/osutil.py +38 -39
- sphinx/util/parallel.py +22 -13
- sphinx/util/parsing.py +2 -1
- sphinx/util/png.py +6 -2
- sphinx/util/requests.py +33 -2
- sphinx/util/rst.py +3 -2
- sphinx/util/tags.py +1 -1
- sphinx/util/template.py +18 -10
- sphinx/util/texescape.py +8 -6
- sphinx/util/typing.py +148 -122
- sphinx/versioning.py +3 -3
- sphinx/writers/html.py +3 -1
- sphinx/writers/html5.py +63 -52
- sphinx/writers/latex.py +83 -67
- sphinx/writers/manpage.py +19 -38
- sphinx/writers/texinfo.py +47 -47
- sphinx/writers/text.py +50 -32
- sphinx/writers/xml.py +11 -8
- {sphinx-8.1.2.dist-info → sphinx-8.2.0.dist-info}/LICENSE.rst +1 -1
- {sphinx-8.1.2.dist-info → sphinx-8.2.0.dist-info}/METADATA +25 -15
- sphinx-8.2.0.dist-info/RECORD +606 -0
- {sphinx-8.1.2.dist-info → sphinx-8.2.0.dist-info}/WHEEL +1 -1
- sphinx/builders/html/transforms.py +0 -90
- sphinx/ext/apidoc.py +0 -721
- sphinx/util/exceptions.py +0 -74
- sphinx-8.1.2.dist-info/RECORD +0 -598
- {sphinx-8.1.2.dist-info → sphinx-8.2.0.dist-info}/entry_points.txt +0 -0
sphinx/ext/napoleon/docstring.py
CHANGED
|
@@ -51,12 +51,11 @@ _token_regex = re.compile(
|
|
|
51
51
|
_default_regex = re.compile(
|
|
52
52
|
r'^default[^_0-9A-Za-z].*$',
|
|
53
53
|
)
|
|
54
|
-
_SINGLETONS = ('None', 'True', 'False', 'Ellipsis')
|
|
54
|
+
_SINGLETONS = frozenset({'None', 'True', 'False', 'Ellipsis', '...'})
|
|
55
55
|
|
|
56
56
|
|
|
57
57
|
class Deque(collections.deque[Any]):
|
|
58
|
-
"""
|
|
59
|
-
A subclass of deque that mimics ``pockets.iterators.modify_iter``.
|
|
58
|
+
"""A subclass of deque that mimics ``pockets.iterators.modify_iter``.
|
|
60
59
|
|
|
61
60
|
The `.Deque.get` and `.Deque.next` methods are added.
|
|
62
61
|
"""
|
|
@@ -64,8 +63,7 @@ class Deque(collections.deque[Any]):
|
|
|
64
63
|
sentinel = object()
|
|
65
64
|
|
|
66
65
|
def get(self, n: int) -> Any:
|
|
67
|
-
"""
|
|
68
|
-
Return the nth element of the stack, or ``self.sentinel`` if n is
|
|
66
|
+
"""Return the nth element of the stack, or ``self.sentinel`` if n is
|
|
69
67
|
greater than the stack size.
|
|
70
68
|
"""
|
|
71
69
|
return self[n] if n < len(self) else self.sentinel
|
|
@@ -77,13 +75,183 @@ class Deque(collections.deque[Any]):
|
|
|
77
75
|
raise StopIteration
|
|
78
76
|
|
|
79
77
|
|
|
80
|
-
def
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
78
|
+
def _recombine_set_tokens(tokens: list[str]) -> list[str]:
|
|
79
|
+
token_queue = collections.deque(tokens)
|
|
80
|
+
keywords = ('optional', 'default')
|
|
81
|
+
|
|
82
|
+
def takewhile_set(tokens: collections.deque[str]) -> Iterator[str]:
|
|
83
|
+
open_braces = 0
|
|
84
|
+
previous_token = None
|
|
85
|
+
while True:
|
|
86
|
+
try:
|
|
87
|
+
token = tokens.popleft()
|
|
88
|
+
except IndexError:
|
|
89
|
+
break
|
|
90
|
+
|
|
91
|
+
if token == ', ':
|
|
92
|
+
previous_token = token
|
|
93
|
+
continue
|
|
94
|
+
|
|
95
|
+
if not token.strip():
|
|
96
|
+
continue
|
|
97
|
+
|
|
98
|
+
if token in keywords:
|
|
99
|
+
tokens.appendleft(token)
|
|
100
|
+
if previous_token is not None:
|
|
101
|
+
tokens.appendleft(previous_token)
|
|
102
|
+
break
|
|
103
|
+
|
|
104
|
+
if previous_token is not None:
|
|
105
|
+
yield previous_token
|
|
106
|
+
previous_token = None
|
|
107
|
+
|
|
108
|
+
if token == '{':
|
|
109
|
+
open_braces += 1
|
|
110
|
+
elif token == '}':
|
|
111
|
+
open_braces -= 1
|
|
112
|
+
|
|
113
|
+
yield token
|
|
114
|
+
|
|
115
|
+
if open_braces == 0:
|
|
116
|
+
break
|
|
117
|
+
|
|
118
|
+
def combine_set(tokens: collections.deque[str]) -> Iterator[str]:
|
|
119
|
+
while True:
|
|
120
|
+
try:
|
|
121
|
+
token = tokens.popleft()
|
|
122
|
+
except IndexError:
|
|
123
|
+
break
|
|
124
|
+
|
|
125
|
+
if token == '{':
|
|
126
|
+
tokens.appendleft('{')
|
|
127
|
+
yield ''.join(takewhile_set(tokens))
|
|
128
|
+
else:
|
|
129
|
+
yield token
|
|
130
|
+
|
|
131
|
+
return list(combine_set(token_queue))
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
def _tokenize_type_spec(spec: str) -> list[str]:
|
|
135
|
+
def postprocess(item: str) -> list[str]:
|
|
136
|
+
if _default_regex.match(item):
|
|
137
|
+
default = item[:7]
|
|
138
|
+
# can't be separated by anything other than a single space
|
|
139
|
+
# for now
|
|
140
|
+
other = item[8:]
|
|
141
|
+
|
|
142
|
+
return [default, ' ', other]
|
|
143
|
+
else:
|
|
144
|
+
return [item]
|
|
145
|
+
|
|
146
|
+
tokens = [
|
|
147
|
+
item
|
|
148
|
+
for raw_token in _token_regex.split(spec)
|
|
149
|
+
for item in postprocess(raw_token)
|
|
150
|
+
if item
|
|
151
|
+
]
|
|
152
|
+
return tokens
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def _token_type(token: str, debug_location: str | None = None) -> str:
|
|
156
|
+
def is_numeric(token: str) -> bool:
|
|
157
|
+
try:
|
|
158
|
+
# use complex to make sure every numeric value is detected as literal
|
|
159
|
+
complex(token)
|
|
160
|
+
except ValueError:
|
|
161
|
+
return False
|
|
162
|
+
else:
|
|
163
|
+
return True
|
|
164
|
+
|
|
165
|
+
if token.startswith(' ') or token.endswith(' '):
|
|
166
|
+
type_ = 'delimiter'
|
|
167
|
+
elif (
|
|
168
|
+
is_numeric(token)
|
|
169
|
+
or (token.startswith('{') and token.endswith('}'))
|
|
170
|
+
or (token.startswith('"') and token.endswith('"'))
|
|
171
|
+
or (token.startswith("'") and token.endswith("'"))
|
|
172
|
+
):
|
|
173
|
+
type_ = 'literal'
|
|
174
|
+
elif token.startswith('{'):
|
|
175
|
+
logger.warning(
|
|
176
|
+
__('invalid value set (missing closing brace): %s'),
|
|
177
|
+
token,
|
|
178
|
+
location=debug_location,
|
|
179
|
+
)
|
|
180
|
+
type_ = 'literal'
|
|
181
|
+
elif token.endswith('}'):
|
|
182
|
+
logger.warning(
|
|
183
|
+
__('invalid value set (missing opening brace): %s'),
|
|
184
|
+
token,
|
|
185
|
+
location=debug_location,
|
|
186
|
+
)
|
|
187
|
+
type_ = 'literal'
|
|
188
|
+
elif token.startswith(("'", '"')):
|
|
189
|
+
logger.warning(
|
|
190
|
+
__('malformed string literal (missing closing quote): %s'),
|
|
191
|
+
token,
|
|
192
|
+
location=debug_location,
|
|
193
|
+
)
|
|
194
|
+
type_ = 'literal'
|
|
195
|
+
elif token.endswith(("'", '"')):
|
|
196
|
+
logger.warning(
|
|
197
|
+
__('malformed string literal (missing opening quote): %s'),
|
|
198
|
+
token,
|
|
199
|
+
location=debug_location,
|
|
200
|
+
)
|
|
201
|
+
type_ = 'literal'
|
|
202
|
+
elif token in {'optional', 'default'}:
|
|
203
|
+
# default is not a official keyword (yet) but supported by the
|
|
204
|
+
# reference implementation (numpydoc) and widely used
|
|
205
|
+
type_ = 'control'
|
|
206
|
+
elif _xref_regex.match(token):
|
|
207
|
+
type_ = 'reference'
|
|
208
|
+
else:
|
|
209
|
+
type_ = 'obj'
|
|
210
|
+
|
|
211
|
+
return type_
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
def _convert_type_spec(
|
|
215
|
+
_type: str,
|
|
216
|
+
translations: dict[str, str] | None = None,
|
|
217
|
+
debug_location: str | None = None,
|
|
218
|
+
) -> str:
|
|
219
|
+
if translations is None:
|
|
220
|
+
translations = {}
|
|
221
|
+
|
|
222
|
+
tokens = _tokenize_type_spec(_type)
|
|
223
|
+
combined_tokens = _recombine_set_tokens(tokens)
|
|
224
|
+
types = [(token, _token_type(token, debug_location)) for token in combined_tokens]
|
|
225
|
+
|
|
226
|
+
converters = {
|
|
227
|
+
'literal': lambda x: f'``{x}``',
|
|
228
|
+
'obj': lambda x: _convert_type_spec_obj(x, translations),
|
|
229
|
+
'control': lambda x: f'*{x}*',
|
|
230
|
+
'delimiter': lambda x: x,
|
|
231
|
+
'reference': lambda x: x,
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
converted = ''.join(
|
|
235
|
+
converters.get(type_)(token) # type: ignore[misc]
|
|
236
|
+
for token, type_ in types
|
|
237
|
+
)
|
|
238
|
+
|
|
239
|
+
return converted
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
def _convert_type_spec_obj(obj: str, translations: dict[str, str]) -> str:
|
|
243
|
+
translation = translations.get(obj, obj)
|
|
244
|
+
|
|
245
|
+
if _xref_regex.match(translation) is not None:
|
|
246
|
+
return translation
|
|
247
|
+
|
|
248
|
+
# use :py:obj: if obj is a standard singleton
|
|
249
|
+
if translation in _SINGLETONS:
|
|
250
|
+
if translation == '...': # allow referencing the builtin ...
|
|
251
|
+
return ':py:obj:`... <Ellipsis>`'
|
|
252
|
+
return f':py:obj:`{translation}`'
|
|
253
|
+
|
|
254
|
+
return f':py:class:`{translation}`'
|
|
87
255
|
|
|
88
256
|
|
|
89
257
|
class GoogleDocstring:
|
|
@@ -252,6 +420,20 @@ class GoogleDocstring:
|
|
|
252
420
|
"""
|
|
253
421
|
return '\n'.join(self.lines())
|
|
254
422
|
|
|
423
|
+
def _get_location(self) -> str | None:
|
|
424
|
+
try:
|
|
425
|
+
filepath = inspect.getfile(self._obj) if self._obj is not None else None
|
|
426
|
+
except TypeError:
|
|
427
|
+
filepath = None
|
|
428
|
+
name = self._name
|
|
429
|
+
|
|
430
|
+
if filepath is None and name is None:
|
|
431
|
+
return None
|
|
432
|
+
elif filepath is None:
|
|
433
|
+
filepath = ''
|
|
434
|
+
|
|
435
|
+
return f'{filepath}:docstring of {name}'
|
|
436
|
+
|
|
255
437
|
def lines(self) -> list[str]:
|
|
256
438
|
"""Return the parsed lines of the docstring in reStructuredText format.
|
|
257
439
|
|
|
@@ -309,7 +491,11 @@ class GoogleDocstring:
|
|
|
309
491
|
_type, _name = _name, _type
|
|
310
492
|
|
|
311
493
|
if _type and self._config.napoleon_preprocess_types:
|
|
312
|
-
_type = _convert_type_spec(
|
|
494
|
+
_type = _convert_type_spec(
|
|
495
|
+
_type,
|
|
496
|
+
translations=self._config.napoleon_type_aliases or {},
|
|
497
|
+
debug_location=self._get_location(),
|
|
498
|
+
)
|
|
313
499
|
|
|
314
500
|
indent = self._get_indent(line) + 1
|
|
315
501
|
_descs = [_desc, *self._dedent(self._consume_indented_block(indent))]
|
|
@@ -357,7 +543,9 @@ class GoogleDocstring:
|
|
|
357
543
|
|
|
358
544
|
if _type and preprocess_types and self._config.napoleon_preprocess_types:
|
|
359
545
|
_type = _convert_type_spec(
|
|
360
|
-
_type,
|
|
546
|
+
_type,
|
|
547
|
+
translations=self._config.napoleon_type_aliases or {},
|
|
548
|
+
debug_location=self._get_location(),
|
|
361
549
|
)
|
|
362
550
|
|
|
363
551
|
_desc = self.__class__(_desc, self._config).lines()
|
|
@@ -428,9 +616,9 @@ class GoogleDocstring:
|
|
|
428
616
|
return [f'.. {admonition}:: {lines[0].strip()}', '']
|
|
429
617
|
elif lines:
|
|
430
618
|
lines = self._indent(self._dedent(lines), 3)
|
|
431
|
-
return ['..
|
|
619
|
+
return [f'.. {admonition}::', '', *lines, '']
|
|
432
620
|
else:
|
|
433
|
-
return ['..
|
|
621
|
+
return [f'.. {admonition}::', '']
|
|
434
622
|
|
|
435
623
|
def _format_block(
|
|
436
624
|
self,
|
|
@@ -507,7 +695,7 @@ class GoogleDocstring:
|
|
|
507
695
|
field_type: str,
|
|
508
696
|
fields: list[tuple[str, str, list[str]]],
|
|
509
697
|
) -> list[str]:
|
|
510
|
-
field_type = '
|
|
698
|
+
field_type = f':{field_type.strip()}:'
|
|
511
699
|
padding = ' ' * len(field_type)
|
|
512
700
|
multi = len(fields) > 1
|
|
513
701
|
lines: list[str] = []
|
|
@@ -558,7 +746,7 @@ class GoogleDocstring:
|
|
|
558
746
|
return [(' ' * n) + line for line in lines]
|
|
559
747
|
|
|
560
748
|
def _is_indented(self, line: str, indent: int = 1) -> bool:
|
|
561
|
-
for i, s in enumerate(line):
|
|
749
|
+
for i, s in enumerate(line):
|
|
562
750
|
if i >= indent:
|
|
563
751
|
return True
|
|
564
752
|
elif not s.isspace():
|
|
@@ -635,7 +823,7 @@ class GoogleDocstring:
|
|
|
635
823
|
def _parse(self) -> None:
|
|
636
824
|
self._parsed_lines = self._consume_empty()
|
|
637
825
|
|
|
638
|
-
if self._name and self._what in
|
|
826
|
+
if self._name and self._what in {'attribute', 'data', 'property'}:
|
|
639
827
|
res: list[str] = []
|
|
640
828
|
with contextlib.suppress(StopIteration):
|
|
641
829
|
res = self._parse_attribute_docstring()
|
|
@@ -672,7 +860,7 @@ class GoogleDocstring:
|
|
|
672
860
|
_type, _desc = self._consume_inline_attribute()
|
|
673
861
|
lines = self._format_field('', '', _desc)
|
|
674
862
|
if _type:
|
|
675
|
-
lines.extend(['', ':type:
|
|
863
|
+
lines.extend(['', f':type: {_type}'])
|
|
676
864
|
return lines
|
|
677
865
|
|
|
678
866
|
def _parse_attributes_section(self, section: str) -> list[str]:
|
|
@@ -681,7 +869,7 @@ class GoogleDocstring:
|
|
|
681
869
|
if not _type:
|
|
682
870
|
_type = self._lookup_annotation(_name)
|
|
683
871
|
if self._config.napoleon_use_ivar:
|
|
684
|
-
field = ':ivar
|
|
872
|
+
field = f':ivar {_name}: '
|
|
685
873
|
lines.extend(self._format_block(field, _desc))
|
|
686
874
|
if _type:
|
|
687
875
|
lines.append(f':vartype {_name}: {_type}')
|
|
@@ -696,7 +884,7 @@ class GoogleDocstring:
|
|
|
696
884
|
lines.extend(self._indent(fields, 3))
|
|
697
885
|
if _type:
|
|
698
886
|
lines.append('')
|
|
699
|
-
lines.extend(self._indent([':type:
|
|
887
|
+
lines.extend(self._indent([f':type: {_type}'], 3))
|
|
700
888
|
lines.append('')
|
|
701
889
|
if self._config.napoleon_use_ivar:
|
|
702
890
|
lines.append('')
|
|
@@ -733,10 +921,10 @@ class GoogleDocstring:
|
|
|
733
921
|
lines = self._strip_empty(self._consume_to_next_section())
|
|
734
922
|
lines = self._dedent(lines)
|
|
735
923
|
if use_admonition:
|
|
736
|
-
header = '.. admonition::
|
|
924
|
+
header = f'.. admonition:: {section}'
|
|
737
925
|
lines = self._indent(lines, 3)
|
|
738
926
|
else:
|
|
739
|
-
header = '.. rubric::
|
|
927
|
+
header = f'.. rubric:: {section}'
|
|
740
928
|
if lines:
|
|
741
929
|
return [header, '', *lines, '']
|
|
742
930
|
else:
|
|
@@ -754,7 +942,7 @@ class GoogleDocstring:
|
|
|
754
942
|
def _parse_methods_section(self, section: str) -> list[str]:
|
|
755
943
|
lines: list[str] = []
|
|
756
944
|
for _name, _type, _desc in self._consume_fields(parse_type=False):
|
|
757
|
-
lines.append('.. method::
|
|
945
|
+
lines.append(f'.. method:: {_name}')
|
|
758
946
|
if self._opt:
|
|
759
947
|
if 'no-index' in self._opt or 'noindex' in self._opt:
|
|
760
948
|
lines.append(' :no-index:')
|
|
@@ -837,7 +1025,7 @@ class GoogleDocstring:
|
|
|
837
1025
|
if any(field): # only add :returns: if there's something to say
|
|
838
1026
|
lines.extend(self._format_block(':returns: ', field))
|
|
839
1027
|
if _type and use_rtype:
|
|
840
|
-
lines.extend([':rtype:
|
|
1028
|
+
lines.extend([f':rtype: {_type}', ''])
|
|
841
1029
|
if lines and lines[-1]:
|
|
842
1030
|
lines.append('')
|
|
843
1031
|
return lines
|
|
@@ -893,7 +1081,7 @@ class GoogleDocstring:
|
|
|
893
1081
|
|
|
894
1082
|
def _lookup_annotation(self, _name: str) -> str:
|
|
895
1083
|
if self._config.napoleon_attr_annotations:
|
|
896
|
-
if self._what in
|
|
1084
|
+
if self._what in {'module', 'class', 'exception'} and self._obj:
|
|
897
1085
|
# cache the class annotations
|
|
898
1086
|
if not hasattr(self, '_annotations'):
|
|
899
1087
|
localns = getattr(self._config, 'autodoc_type_aliases', {})
|
|
@@ -907,194 +1095,18 @@ class GoogleDocstring:
|
|
|
907
1095
|
)
|
|
908
1096
|
self._annotations = get_type_hints(self._obj, None, localns)
|
|
909
1097
|
if _name in self._annotations:
|
|
1098
|
+
short_literals = getattr(
|
|
1099
|
+
self._config, 'python_display_short_literal_types', False
|
|
1100
|
+
)
|
|
910
1101
|
return stringify_annotation(
|
|
911
|
-
self._annotations[_name],
|
|
1102
|
+
self._annotations[_name],
|
|
1103
|
+
mode='fully-qualified-except-typing',
|
|
1104
|
+
short_literals=short_literals,
|
|
912
1105
|
)
|
|
913
1106
|
# No annotation found
|
|
914
1107
|
return ''
|
|
915
1108
|
|
|
916
1109
|
|
|
917
|
-
def _recombine_set_tokens(tokens: list[str]) -> list[str]:
|
|
918
|
-
token_queue = collections.deque(tokens)
|
|
919
|
-
keywords = ('optional', 'default')
|
|
920
|
-
|
|
921
|
-
def takewhile_set(tokens: collections.deque[str]) -> Iterator[str]:
|
|
922
|
-
open_braces = 0
|
|
923
|
-
previous_token = None
|
|
924
|
-
while True:
|
|
925
|
-
try:
|
|
926
|
-
token = tokens.popleft()
|
|
927
|
-
except IndexError:
|
|
928
|
-
break
|
|
929
|
-
|
|
930
|
-
if token == ', ':
|
|
931
|
-
previous_token = token
|
|
932
|
-
continue
|
|
933
|
-
|
|
934
|
-
if not token.strip():
|
|
935
|
-
continue
|
|
936
|
-
|
|
937
|
-
if token in keywords:
|
|
938
|
-
tokens.appendleft(token)
|
|
939
|
-
if previous_token is not None:
|
|
940
|
-
tokens.appendleft(previous_token)
|
|
941
|
-
break
|
|
942
|
-
|
|
943
|
-
if previous_token is not None:
|
|
944
|
-
yield previous_token
|
|
945
|
-
previous_token = None
|
|
946
|
-
|
|
947
|
-
if token == '{':
|
|
948
|
-
open_braces += 1
|
|
949
|
-
elif token == '}':
|
|
950
|
-
open_braces -= 1
|
|
951
|
-
|
|
952
|
-
yield token
|
|
953
|
-
|
|
954
|
-
if open_braces == 0:
|
|
955
|
-
break
|
|
956
|
-
|
|
957
|
-
def combine_set(tokens: collections.deque[str]) -> Iterator[str]:
|
|
958
|
-
while True:
|
|
959
|
-
try:
|
|
960
|
-
token = tokens.popleft()
|
|
961
|
-
except IndexError:
|
|
962
|
-
break
|
|
963
|
-
|
|
964
|
-
if token == '{':
|
|
965
|
-
tokens.appendleft('{')
|
|
966
|
-
yield ''.join(takewhile_set(tokens))
|
|
967
|
-
else:
|
|
968
|
-
yield token
|
|
969
|
-
|
|
970
|
-
return list(combine_set(token_queue))
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
def _tokenize_type_spec(spec: str) -> list[str]:
|
|
974
|
-
def postprocess(item: str) -> list[str]:
|
|
975
|
-
if _default_regex.match(item):
|
|
976
|
-
default = item[:7]
|
|
977
|
-
# can't be separated by anything other than a single space
|
|
978
|
-
# for now
|
|
979
|
-
other = item[8:]
|
|
980
|
-
|
|
981
|
-
return [default, ' ', other]
|
|
982
|
-
else:
|
|
983
|
-
return [item]
|
|
984
|
-
|
|
985
|
-
tokens = [
|
|
986
|
-
item
|
|
987
|
-
for raw_token in _token_regex.split(spec)
|
|
988
|
-
for item in postprocess(raw_token)
|
|
989
|
-
if item
|
|
990
|
-
]
|
|
991
|
-
return tokens
|
|
992
|
-
|
|
993
|
-
|
|
994
|
-
def _token_type(token: str, location: str | None = None) -> str:
|
|
995
|
-
def is_numeric(token: str) -> bool:
|
|
996
|
-
try:
|
|
997
|
-
# use complex to make sure every numeric value is detected as literal
|
|
998
|
-
complex(token)
|
|
999
|
-
except ValueError:
|
|
1000
|
-
return False
|
|
1001
|
-
else:
|
|
1002
|
-
return True
|
|
1003
|
-
|
|
1004
|
-
if token.startswith(' ') or token.endswith(' '):
|
|
1005
|
-
type_ = 'delimiter'
|
|
1006
|
-
elif (
|
|
1007
|
-
is_numeric(token)
|
|
1008
|
-
or (token.startswith('{') and token.endswith('}'))
|
|
1009
|
-
or (token.startswith('"') and token.endswith('"'))
|
|
1010
|
-
or (token.startswith("'") and token.endswith("'"))
|
|
1011
|
-
):
|
|
1012
|
-
type_ = 'literal'
|
|
1013
|
-
elif token.startswith('{'):
|
|
1014
|
-
logger.warning(
|
|
1015
|
-
__('invalid value set (missing closing brace): %s'),
|
|
1016
|
-
token,
|
|
1017
|
-
location=location,
|
|
1018
|
-
)
|
|
1019
|
-
type_ = 'literal'
|
|
1020
|
-
elif token.endswith('}'):
|
|
1021
|
-
logger.warning(
|
|
1022
|
-
__('invalid value set (missing opening brace): %s'),
|
|
1023
|
-
token,
|
|
1024
|
-
location=location,
|
|
1025
|
-
)
|
|
1026
|
-
type_ = 'literal'
|
|
1027
|
-
elif token.startswith(("'", '"')):
|
|
1028
|
-
logger.warning(
|
|
1029
|
-
__('malformed string literal (missing closing quote): %s'),
|
|
1030
|
-
token,
|
|
1031
|
-
location=location,
|
|
1032
|
-
)
|
|
1033
|
-
type_ = 'literal'
|
|
1034
|
-
elif token.endswith(("'", '"')):
|
|
1035
|
-
logger.warning(
|
|
1036
|
-
__('malformed string literal (missing opening quote): %s'),
|
|
1037
|
-
token,
|
|
1038
|
-
location=location,
|
|
1039
|
-
)
|
|
1040
|
-
type_ = 'literal'
|
|
1041
|
-
elif token in ('optional', 'default'):
|
|
1042
|
-
# default is not a official keyword (yet) but supported by the
|
|
1043
|
-
# reference implementation (numpydoc) and widely used
|
|
1044
|
-
type_ = 'control'
|
|
1045
|
-
elif _xref_regex.match(token):
|
|
1046
|
-
type_ = 'reference'
|
|
1047
|
-
else:
|
|
1048
|
-
type_ = 'obj'
|
|
1049
|
-
|
|
1050
|
-
return type_
|
|
1051
|
-
|
|
1052
|
-
|
|
1053
|
-
def _convert_numpy_type_spec(
|
|
1054
|
-
_type: str,
|
|
1055
|
-
location: str | None = None,
|
|
1056
|
-
translations: dict[str, str] | None = None,
|
|
1057
|
-
) -> str:
|
|
1058
|
-
if translations is None:
|
|
1059
|
-
translations = {}
|
|
1060
|
-
|
|
1061
|
-
def convert_obj(
|
|
1062
|
-
obj: str, translations: dict[str, str], default_translation: str
|
|
1063
|
-
) -> str:
|
|
1064
|
-
translation = translations.get(obj, obj)
|
|
1065
|
-
|
|
1066
|
-
# use :class: (the default) only if obj is not a standard singleton
|
|
1067
|
-
if translation in _SINGLETONS and default_translation == ':class:`%s`':
|
|
1068
|
-
default_translation = ':obj:`%s`'
|
|
1069
|
-
elif translation == '...' and default_translation == ':class:`%s`':
|
|
1070
|
-
# allow referencing the builtin ...
|
|
1071
|
-
default_translation = ':obj:`%s <Ellipsis>`'
|
|
1072
|
-
|
|
1073
|
-
if _xref_regex.match(translation) is None:
|
|
1074
|
-
translation = default_translation % translation
|
|
1075
|
-
|
|
1076
|
-
return translation
|
|
1077
|
-
|
|
1078
|
-
tokens = _tokenize_type_spec(_type)
|
|
1079
|
-
combined_tokens = _recombine_set_tokens(tokens)
|
|
1080
|
-
types = [(token, _token_type(token, location)) for token in combined_tokens]
|
|
1081
|
-
|
|
1082
|
-
converters = {
|
|
1083
|
-
'literal': lambda x: '``%s``' % x,
|
|
1084
|
-
'obj': lambda x: convert_obj(x, translations, ':class:`%s`'),
|
|
1085
|
-
'control': lambda x: '*%s*' % x,
|
|
1086
|
-
'delimiter': lambda x: x,
|
|
1087
|
-
'reference': lambda x: x,
|
|
1088
|
-
}
|
|
1089
|
-
|
|
1090
|
-
converted = ''.join(
|
|
1091
|
-
converters.get(type_)(token) # type: ignore[misc]
|
|
1092
|
-
for token, type_ in types
|
|
1093
|
-
)
|
|
1094
|
-
|
|
1095
|
-
return converted
|
|
1096
|
-
|
|
1097
|
-
|
|
1098
1110
|
class NumpyDocstring(GoogleDocstring):
|
|
1099
1111
|
"""Convert NumPy style docstrings to reStructuredText.
|
|
1100
1112
|
|
|
@@ -1202,20 +1214,6 @@ class NumpyDocstring(GoogleDocstring):
|
|
|
1202
1214
|
self._directive_sections = ['.. index::']
|
|
1203
1215
|
super().__init__(docstring, config, app, what, name, obj, options)
|
|
1204
1216
|
|
|
1205
|
-
def _get_location(self) -> str | None:
|
|
1206
|
-
try:
|
|
1207
|
-
filepath = inspect.getfile(self._obj) if self._obj is not None else None
|
|
1208
|
-
except TypeError:
|
|
1209
|
-
filepath = None
|
|
1210
|
-
name = self._name
|
|
1211
|
-
|
|
1212
|
-
if filepath is None and name is None:
|
|
1213
|
-
return None
|
|
1214
|
-
elif filepath is None:
|
|
1215
|
-
filepath = ''
|
|
1216
|
-
|
|
1217
|
-
return f'{filepath}:docstring of {name}'
|
|
1218
|
-
|
|
1219
1217
|
def _escape_args_and_kwargs(self, name: str) -> str:
|
|
1220
1218
|
func = super()._escape_args_and_kwargs
|
|
1221
1219
|
|
|
@@ -1242,10 +1240,10 @@ class NumpyDocstring(GoogleDocstring):
|
|
|
1242
1240
|
_type, _name = _name, _type
|
|
1243
1241
|
|
|
1244
1242
|
if self._config.napoleon_preprocess_types:
|
|
1245
|
-
_type =
|
|
1243
|
+
_type = _convert_type_spec(
|
|
1246
1244
|
_type,
|
|
1247
|
-
location=self._get_location(),
|
|
1248
1245
|
translations=self._config.napoleon_type_aliases or {},
|
|
1246
|
+
debug_location=self._get_location(),
|
|
1249
1247
|
)
|
|
1250
1248
|
|
|
1251
1249
|
indent = self._get_indent(line) + 1
|
|
@@ -1270,7 +1268,7 @@ class NumpyDocstring(GoogleDocstring):
|
|
|
1270
1268
|
return (
|
|
1271
1269
|
not self._lines
|
|
1272
1270
|
or self._is_section_header()
|
|
1273
|
-
or (line1
|
|
1271
|
+
or (not line1 and not line2)
|
|
1274
1272
|
or (
|
|
1275
1273
|
self._is_in_section
|
|
1276
1274
|
and line1
|
|
@@ -1298,8 +1296,7 @@ class NumpyDocstring(GoogleDocstring):
|
|
|
1298
1296
|
return self._format_admonition('seealso', lines)
|
|
1299
1297
|
|
|
1300
1298
|
def _parse_numpydoc_see_also_section(self, content: list[str]) -> list[str]:
|
|
1301
|
-
"""
|
|
1302
|
-
See Also
|
|
1299
|
+
"""See Also
|
|
1303
1300
|
--------
|
|
1304
1301
|
func_name : Descriptive text
|
|
1305
1302
|
continued text
|
|
@@ -1349,7 +1346,8 @@ class NumpyDocstring(GoogleDocstring):
|
|
|
1349
1346
|
return g[3], None
|
|
1350
1347
|
else:
|
|
1351
1348
|
return g[2], g[1]
|
|
1352
|
-
|
|
1349
|
+
msg = f'{text} is not a item name'
|
|
1350
|
+
raise ValueError(msg)
|
|
1353
1351
|
|
|
1354
1352
|
def push_item(name: str | None, rest: list[str]) -> None:
|
|
1355
1353
|
if not name:
|
|
@@ -1417,12 +1415,12 @@ class NumpyDocstring(GoogleDocstring):
|
|
|
1417
1415
|
if role:
|
|
1418
1416
|
link = f':{role}:`{name}`'
|
|
1419
1417
|
else:
|
|
1420
|
-
link = ':obj
|
|
1418
|
+
link = f':py:obj:`{name}`'
|
|
1421
1419
|
if desc or last_had_desc:
|
|
1422
1420
|
lines += ['']
|
|
1423
1421
|
lines += [link]
|
|
1424
1422
|
else:
|
|
1425
|
-
lines[-1] += ',
|
|
1423
|
+
lines[-1] += f', {link}'
|
|
1426
1424
|
if desc:
|
|
1427
1425
|
lines += self._indent([' '.join(desc)])
|
|
1428
1426
|
last_had_desc = True
|