docs2epub 0.1.2__tar.gz → 0.1.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- docs2epub-0.1.6/AGENTS.md +41 -0
- {docs2epub-0.1.2 → docs2epub-0.1.6}/PKG-INFO +1 -1
- {docs2epub-0.1.2 → docs2epub-0.1.6}/pyproject.toml +1 -1
- docs2epub-0.1.6/src/docs2epub/docusaurus_next.py +412 -0
- docs2epub-0.1.6/tests/test_docusaurus_next.py +42 -0
- docs2epub-0.1.6/tests/test_sidebar_scrape.py +163 -0
- {docs2epub-0.1.2 → docs2epub-0.1.6}/uv.lock +1 -1
- docs2epub-0.1.2/src/docs2epub/docusaurus_next.py +0 -140
- {docs2epub-0.1.2 → docs2epub-0.1.6}/.github/workflows/ci.yml +0 -0
- {docs2epub-0.1.2 → docs2epub-0.1.6}/.github/workflows/publish.yml +0 -0
- {docs2epub-0.1.2 → docs2epub-0.1.6}/.gitignore +0 -0
- {docs2epub-0.1.2 → docs2epub-0.1.6}/README.md +0 -0
- {docs2epub-0.1.2 → docs2epub-0.1.6}/src/docs2epub/__init__.py +0 -0
- {docs2epub-0.1.2 → docs2epub-0.1.6}/src/docs2epub/cli.py +0 -0
- {docs2epub-0.1.2 → docs2epub-0.1.6}/src/docs2epub/epub.py +0 -0
- {docs2epub-0.1.2 → docs2epub-0.1.6}/src/docs2epub/kindle_html.py +0 -0
- {docs2epub-0.1.2 → docs2epub-0.1.6}/src/docs2epub/model.py +0 -0
- {docs2epub-0.1.2 → docs2epub-0.1.6}/src/docs2epub/pandoc_epub2.py +0 -0
- {docs2epub-0.1.2 → docs2epub-0.1.6}/tests/test_smoke.py +0 -0
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
# docs2epub AGENTS
|
|
2
|
+
|
|
3
|
+
This file documents local conventions for working on `docs2epub`.
|
|
4
|
+
|
|
5
|
+
**Focus**
|
|
6
|
+
- Keep the scraper generalistic across doc-site frontends (GitBook, Docusaurus, similar).
|
|
7
|
+
- Prefer resilient HTML heuristics over site-specific hacks.
|
|
8
|
+
- Fail gracefully when content is missing; surface actionable errors.
|
|
9
|
+
|
|
10
|
+
**Development Workflow**
|
|
11
|
+
- Use TDD for bug fixes and new behaviors. Add a failing test first.
|
|
12
|
+
- Prefer unit tests with `monkeypatch` and deterministic HTML fixtures.
|
|
13
|
+
- Keep tests fast and offline. Only do real network checks for manual validation.
|
|
14
|
+
- Run tests with `uv run pytest -q`.
|
|
15
|
+
|
|
16
|
+
**Scraping Heuristics**
|
|
17
|
+
- Primary crawl: sidebar/index extraction.
|
|
18
|
+
- Expand index/category pages by collecting in-page content links.
|
|
19
|
+
- Fallback crawl: “Next” navigation when no sidebar is found.
|
|
20
|
+
- Normalize URLs: strip fragments and queries; lower-case scheme/host.
|
|
21
|
+
- Filter non-doc links by extension; avoid cross-origin URLs by default.
|
|
22
|
+
- Resolve relative URLs against the page URL, not the site root.
|
|
23
|
+
|
|
24
|
+
**Code Layout**
|
|
25
|
+
- Core crawler logic: `src/docs2epub/docusaurus_next.py`.
|
|
26
|
+
- EPUB generation: `src/docs2epub/epub.py` and `src/docs2epub/pandoc_epub2.py`.
|
|
27
|
+
- HTML cleanup: `src/docs2epub/kindle_html.py`.
|
|
28
|
+
- Tests live in `tests/`.
|
|
29
|
+
|
|
30
|
+
**Release Discipline**
|
|
31
|
+
- Bump version in `pyproject.toml` for user-visible changes.
|
|
32
|
+
- Run `uv lock` after bumping the version.
|
|
33
|
+
- Build artifacts with `uv build` before publishing.
|
|
34
|
+
- Publish with `uv publish` when explicitly requested.
|
|
35
|
+
- Do not commit generated EPUBs or other artifacts.
|
|
36
|
+
|
|
37
|
+
**Validation**
|
|
38
|
+
- Quick manual checks (optional):
|
|
39
|
+
- `uvx --from . docs2epub https://midl.gitbook.io/midl out.epub`
|
|
40
|
+
- `uvx --from . docs2epub https://tutorial.docusaurus.io/docs/intro out.epub`
|
|
41
|
+
- Clean up any generated files after validation.
|
|
@@ -0,0 +1,412 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import re
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
from urllib.parse import urljoin, urlparse
|
|
6
|
+
|
|
7
|
+
import requests
|
|
8
|
+
from bs4 import BeautifulSoup, Tag
|
|
9
|
+
|
|
10
|
+
from .model import Chapter
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
DEFAULT_USER_AGENT = "docs2epub/0.1 (+https://github.com/brenorb/docs2epub)"
|
|
14
|
+
|
|
15
|
+
_SIDEBAR_SELECTORS = [
|
|
16
|
+
'aside[data-testid="table-of-contents"]',
|
|
17
|
+
"aside#table-of-contents",
|
|
18
|
+
'nav[aria-label="Table of contents"]',
|
|
19
|
+
'nav[aria-label="Table of Contents"]',
|
|
20
|
+
'nav[aria-label="Docs sidebar"]',
|
|
21
|
+
'nav[aria-label="Docs navigation"]',
|
|
22
|
+
'nav[aria-label="Documentation"]',
|
|
23
|
+
'nav[aria-label="Docs"]',
|
|
24
|
+
"aside.theme-doc-sidebar-container",
|
|
25
|
+
"div.theme-doc-sidebar-container",
|
|
26
|
+
"nav.theme-doc-sidebar-menu",
|
|
27
|
+
"nav.menu",
|
|
28
|
+
'nav[class*="menu"]',
|
|
29
|
+
'aside[class*="sidebar"]',
|
|
30
|
+
'nav[class*="sidebar"]',
|
|
31
|
+
]
|
|
32
|
+
|
|
33
|
+
_NON_DOC_EXTENSIONS = {
|
|
34
|
+
".png",
|
|
35
|
+
".jpg",
|
|
36
|
+
".jpeg",
|
|
37
|
+
".gif",
|
|
38
|
+
".svg",
|
|
39
|
+
".webp",
|
|
40
|
+
".css",
|
|
41
|
+
".js",
|
|
42
|
+
".map",
|
|
43
|
+
".json",
|
|
44
|
+
".xml",
|
|
45
|
+
".rss",
|
|
46
|
+
".pdf",
|
|
47
|
+
".zip",
|
|
48
|
+
".tar",
|
|
49
|
+
".gz",
|
|
50
|
+
".tgz",
|
|
51
|
+
".epub",
|
|
52
|
+
".mp4",
|
|
53
|
+
".mp3",
|
|
54
|
+
".wav",
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
@dataclass(frozen=True)
|
|
59
|
+
class DocusaurusNextOptions:
|
|
60
|
+
start_url: str
|
|
61
|
+
base_url: str | None = None
|
|
62
|
+
max_pages: int | None = None
|
|
63
|
+
sleep_s: float = 0.5
|
|
64
|
+
user_agent: str = DEFAULT_USER_AGENT
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def _slugify_filename(text: str) -> str:
|
|
68
|
+
value = text.strip().lower()
|
|
69
|
+
value = re.sub(r"[^\w\s-]", "", value)
|
|
70
|
+
value = re.sub(r"[\s_-]+", "-", value)
|
|
71
|
+
value = value.strip("-")
|
|
72
|
+
return value or "chapter"
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def _extract_article(soup: BeautifulSoup) -> Tag:
|
|
76
|
+
article = soup.find("article")
|
|
77
|
+
if article:
|
|
78
|
+
return article
|
|
79
|
+
main = soup.find("main")
|
|
80
|
+
if main:
|
|
81
|
+
article = main.find("article")
|
|
82
|
+
if article:
|
|
83
|
+
return article
|
|
84
|
+
return main
|
|
85
|
+
role_main = soup.find(attrs={"role": "main"})
|
|
86
|
+
if role_main:
|
|
87
|
+
return role_main
|
|
88
|
+
raise RuntimeError("Could not find <article> in page HTML")
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
def _canonicalize_url(url: str) -> str:
|
|
92
|
+
parsed = urlparse(url)
|
|
93
|
+
path = parsed.path or "/"
|
|
94
|
+
if path != "/" and path.endswith("/"):
|
|
95
|
+
path = path.rstrip("/")
|
|
96
|
+
return parsed._replace(
|
|
97
|
+
scheme=parsed.scheme.lower(),
|
|
98
|
+
netloc=parsed.netloc.lower(),
|
|
99
|
+
path=path,
|
|
100
|
+
query="",
|
|
101
|
+
fragment="",
|
|
102
|
+
).geturl()
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def _infer_root_path(start_url: str) -> str:
|
|
106
|
+
parsed = urlparse(start_url)
|
|
107
|
+
path = (parsed.path or "").rstrip("/")
|
|
108
|
+
if not path:
|
|
109
|
+
return ""
|
|
110
|
+
parts = path.split("/")
|
|
111
|
+
if len(parts) <= 2:
|
|
112
|
+
return path
|
|
113
|
+
return "/".join(parts[:-1])
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def _path_within_root(path: str, root_path: str) -> bool:
|
|
117
|
+
if not root_path or root_path == "/":
|
|
118
|
+
return True
|
|
119
|
+
if path == root_path:
|
|
120
|
+
return True
|
|
121
|
+
root = root_path if root_path.endswith("/") else f"{root_path}/"
|
|
122
|
+
return path.startswith(root)
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def _is_probable_doc_link(url: str) -> bool:
|
|
126
|
+
parsed = urlparse(url)
|
|
127
|
+
path = (parsed.path or "").lower()
|
|
128
|
+
for ext in _NON_DOC_EXTENSIONS:
|
|
129
|
+
if path.endswith(ext):
|
|
130
|
+
return False
|
|
131
|
+
return True
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
def _sidebar_candidates(soup: BeautifulSoup) -> list[Tag]:
|
|
135
|
+
seen: set[int] = set()
|
|
136
|
+
candidates: list[Tag] = []
|
|
137
|
+
|
|
138
|
+
for selector in _SIDEBAR_SELECTORS:
|
|
139
|
+
for el in soup.select(selector):
|
|
140
|
+
key = id(el)
|
|
141
|
+
if key in seen:
|
|
142
|
+
continue
|
|
143
|
+
seen.add(key)
|
|
144
|
+
candidates.append(el)
|
|
145
|
+
|
|
146
|
+
keywords = ["sidebar", "toc", "table of contents", "table-of-contents", "docs", "documentation"]
|
|
147
|
+
for el in soup.find_all(["nav", "aside", "div"]):
|
|
148
|
+
key = id(el)
|
|
149
|
+
if key in seen:
|
|
150
|
+
continue
|
|
151
|
+
label = str(el.get("aria-label") or "").lower()
|
|
152
|
+
elem_id = str(el.get("id") or "").lower()
|
|
153
|
+
data_testid = str(el.get("data-testid") or "").lower()
|
|
154
|
+
classes = " ".join(el.get("class", [])).lower()
|
|
155
|
+
haystack = " ".join([label, elem_id, data_testid, classes])
|
|
156
|
+
if any(k in haystack for k in keywords):
|
|
157
|
+
seen.add(key)
|
|
158
|
+
candidates.append(el)
|
|
159
|
+
|
|
160
|
+
return candidates
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
def _looks_like_pager(container: Tag, links: list[Tag]) -> bool:
|
|
164
|
+
label = str(container.get("aria-label") or "").lower()
|
|
165
|
+
if "docs pages" in label or "breadcrumb" in label:
|
|
166
|
+
return True
|
|
167
|
+
if not links:
|
|
168
|
+
return True
|
|
169
|
+
texts = []
|
|
170
|
+
for a in links:
|
|
171
|
+
text = " ".join(a.get_text(" ", strip=True).split()).lower()
|
|
172
|
+
if text:
|
|
173
|
+
texts.append(text)
|
|
174
|
+
if not texts:
|
|
175
|
+
return False
|
|
176
|
+
pager_words = {"next", "previous", "prev", "back"}
|
|
177
|
+
return all(text in pager_words for text in texts)
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
def _extract_sidebar_urls(
|
|
181
|
+
soup: BeautifulSoup,
|
|
182
|
+
*,
|
|
183
|
+
base_url: str,
|
|
184
|
+
start_url: str,
|
|
185
|
+
) -> list[str]:
|
|
186
|
+
candidates = _sidebar_candidates(soup)
|
|
187
|
+
if not candidates:
|
|
188
|
+
return []
|
|
189
|
+
|
|
190
|
+
origin = urlparse(start_url).netloc.lower()
|
|
191
|
+
root_path = _infer_root_path(start_url)
|
|
192
|
+
best: list[str] = []
|
|
193
|
+
for container in candidates:
|
|
194
|
+
anchors = list(container.find_all("a", href=True))
|
|
195
|
+
if _looks_like_pager(container, anchors):
|
|
196
|
+
continue
|
|
197
|
+
|
|
198
|
+
urls: list[str] = []
|
|
199
|
+
seen: set[str] = set()
|
|
200
|
+
for a in anchors:
|
|
201
|
+
href = str(a.get("href") or "").strip()
|
|
202
|
+
if not href or href.startswith("#"):
|
|
203
|
+
continue
|
|
204
|
+
if href.startswith(("mailto:", "tel:", "javascript:")):
|
|
205
|
+
continue
|
|
206
|
+
abs_url = urljoin(base_url, href)
|
|
207
|
+
parsed = urlparse(abs_url)
|
|
208
|
+
if parsed.scheme not in ("http", "https"):
|
|
209
|
+
continue
|
|
210
|
+
if origin and parsed.netloc.lower() != origin:
|
|
211
|
+
continue
|
|
212
|
+
if not _is_probable_doc_link(abs_url):
|
|
213
|
+
continue
|
|
214
|
+
if not _path_within_root(parsed.path or "", root_path):
|
|
215
|
+
continue
|
|
216
|
+
canonical = _canonicalize_url(abs_url)
|
|
217
|
+
if canonical in seen:
|
|
218
|
+
continue
|
|
219
|
+
seen.add(canonical)
|
|
220
|
+
urls.append(canonical)
|
|
221
|
+
|
|
222
|
+
if len(urls) > len(best):
|
|
223
|
+
best = urls
|
|
224
|
+
|
|
225
|
+
return best
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
def _extract_content_urls(
|
|
229
|
+
container: Tag,
|
|
230
|
+
*,
|
|
231
|
+
base_url: str,
|
|
232
|
+
start_url: str,
|
|
233
|
+
) -> list[str]:
|
|
234
|
+
origin = urlparse(start_url).netloc.lower()
|
|
235
|
+
root_path = _infer_root_path(start_url)
|
|
236
|
+
urls: list[str] = []
|
|
237
|
+
seen: set[str] = set()
|
|
238
|
+
|
|
239
|
+
for a in container.find_all("a", href=True):
|
|
240
|
+
href = str(a.get("href") or "").strip()
|
|
241
|
+
if not href or href.startswith("#"):
|
|
242
|
+
continue
|
|
243
|
+
if href.startswith(("mailto:", "tel:", "javascript:")):
|
|
244
|
+
continue
|
|
245
|
+
abs_url = urljoin(base_url, href)
|
|
246
|
+
parsed = urlparse(abs_url)
|
|
247
|
+
if parsed.scheme not in ("http", "https"):
|
|
248
|
+
continue
|
|
249
|
+
if origin and parsed.netloc.lower() != origin:
|
|
250
|
+
continue
|
|
251
|
+
if not _is_probable_doc_link(abs_url):
|
|
252
|
+
continue
|
|
253
|
+
if not _path_within_root(parsed.path or "", root_path):
|
|
254
|
+
continue
|
|
255
|
+
canonical = _canonicalize_url(abs_url)
|
|
256
|
+
if canonical in seen:
|
|
257
|
+
continue
|
|
258
|
+
seen.add(canonical)
|
|
259
|
+
urls.append(canonical)
|
|
260
|
+
|
|
261
|
+
return urls
|
|
262
|
+
|
|
263
|
+
|
|
264
|
+
def _remove_unwanted(article: Tag) -> None:
|
|
265
|
+
for selector in [
|
|
266
|
+
'nav[aria-label="Breadcrumbs"]',
|
|
267
|
+
'nav[aria-label="Breadcrumb"]',
|
|
268
|
+
'nav[aria-label="Docs pages"]',
|
|
269
|
+
"div.theme-doc-footer",
|
|
270
|
+
"div.theme-doc-footer-edit-meta-row",
|
|
271
|
+
"div.theme-doc-version-badge",
|
|
272
|
+
"script",
|
|
273
|
+
"style",
|
|
274
|
+
"noscript",
|
|
275
|
+
"iframe",
|
|
276
|
+
"svg",
|
|
277
|
+
"button",
|
|
278
|
+
]:
|
|
279
|
+
for el in list(article.select(selector)):
|
|
280
|
+
el.decompose()
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
def _absolutize_urls(container: Tag, base_url: str) -> None:
|
|
284
|
+
for el in container.find_all(True):
|
|
285
|
+
if el.has_attr("href"):
|
|
286
|
+
href = str(el.get("href") or "")
|
|
287
|
+
if href.startswith("/"):
|
|
288
|
+
el["href"] = urljoin(base_url, href)
|
|
289
|
+
if el.has_attr("src"):
|
|
290
|
+
src = str(el.get("src") or "")
|
|
291
|
+
if src.startswith("/"):
|
|
292
|
+
el["src"] = urljoin(base_url, src)
|
|
293
|
+
|
|
294
|
+
|
|
295
|
+
def _extract_next_url(soup: BeautifulSoup, base_url: str) -> str | None:
|
|
296
|
+
nav = soup.select_one('nav[aria-label="Docs pages"]')
|
|
297
|
+
if not nav:
|
|
298
|
+
return None
|
|
299
|
+
|
|
300
|
+
for a in nav.find_all("a", href=True):
|
|
301
|
+
text = " ".join(a.get_text(" ", strip=True).split())
|
|
302
|
+
if text.lower().startswith("next"):
|
|
303
|
+
return urljoin(base_url, a["href"])
|
|
304
|
+
|
|
305
|
+
return None
|
|
306
|
+
|
|
307
|
+
|
|
308
|
+
def iter_docusaurus_next(options: DocusaurusNextOptions) -> list[Chapter]:
|
|
309
|
+
session = requests.Session()
|
|
310
|
+
session.headers.update({"User-Agent": options.user_agent})
|
|
311
|
+
|
|
312
|
+
url = options.start_url
|
|
313
|
+
base_url = options.base_url or options.start_url
|
|
314
|
+
|
|
315
|
+
visited: set[str] = set()
|
|
316
|
+
chapters: list[Chapter] = []
|
|
317
|
+
|
|
318
|
+
def fetch_soup(target_url: str) -> BeautifulSoup:
|
|
319
|
+
resp = session.get(target_url, timeout=30)
|
|
320
|
+
resp.raise_for_status()
|
|
321
|
+
return BeautifulSoup(resp.text, "lxml")
|
|
322
|
+
|
|
323
|
+
initial_soup = fetch_soup(url)
|
|
324
|
+
sidebar_urls = _extract_sidebar_urls(initial_soup, base_url=base_url, start_url=url)
|
|
325
|
+
initial_key = _canonicalize_url(url)
|
|
326
|
+
|
|
327
|
+
def consume_page(target_url: str, *, soup: BeautifulSoup | None = None) -> Tag | None:
|
|
328
|
+
if options.max_pages is not None and len(chapters) >= options.max_pages:
|
|
329
|
+
return None
|
|
330
|
+
key = _canonicalize_url(target_url)
|
|
331
|
+
if key in visited:
|
|
332
|
+
return None
|
|
333
|
+
visited.add(key)
|
|
334
|
+
|
|
335
|
+
page_soup = soup
|
|
336
|
+
if page_soup is None:
|
|
337
|
+
try:
|
|
338
|
+
page_soup = fetch_soup(target_url)
|
|
339
|
+
except requests.HTTPError as exc:
|
|
340
|
+
status = exc.response.status_code if exc.response is not None else None
|
|
341
|
+
if status in {404, 410} and key != initial_key:
|
|
342
|
+
return None
|
|
343
|
+
raise
|
|
344
|
+
|
|
345
|
+
article = _extract_article(page_soup)
|
|
346
|
+
title_el = article.find(["h1", "h2"])
|
|
347
|
+
title = (
|
|
348
|
+
" ".join(title_el.get_text(" ", strip=True).split())
|
|
349
|
+
if title_el
|
|
350
|
+
else f"Chapter {len(chapters) + 1}"
|
|
351
|
+
)
|
|
352
|
+
|
|
353
|
+
_remove_unwanted(article)
|
|
354
|
+
_absolutize_urls(article, base_url=target_url)
|
|
355
|
+
|
|
356
|
+
for a in list(article.select('a.hash-link[href^="#"]')):
|
|
357
|
+
a.decompose()
|
|
358
|
+
|
|
359
|
+
html = article.decode_contents()
|
|
360
|
+
chapters.append(Chapter(index=len(chapters) + 1, title=title, url=target_url, html=html))
|
|
361
|
+
|
|
362
|
+
if options.sleep_s > 0 and (options.max_pages is None or len(chapters) < options.max_pages):
|
|
363
|
+
import time
|
|
364
|
+
|
|
365
|
+
time.sleep(options.sleep_s)
|
|
366
|
+
|
|
367
|
+
return article
|
|
368
|
+
|
|
369
|
+
if sidebar_urls:
|
|
370
|
+
if initial_key not in {_canonicalize_url(u) for u in sidebar_urls}:
|
|
371
|
+
sidebar_urls.insert(0, url)
|
|
372
|
+
queue = list(sidebar_urls)
|
|
373
|
+
discovered = {_canonicalize_url(u) for u in queue}
|
|
374
|
+
idx = 0
|
|
375
|
+
while idx < len(queue):
|
|
376
|
+
if options.max_pages is not None and len(chapters) >= options.max_pages:
|
|
377
|
+
break
|
|
378
|
+
target_url = queue[idx]
|
|
379
|
+
use_soup = initial_soup if _canonicalize_url(target_url) == initial_key else None
|
|
380
|
+
article = consume_page(target_url, soup=use_soup)
|
|
381
|
+
if article is None:
|
|
382
|
+
idx += 1
|
|
383
|
+
continue
|
|
384
|
+
extra = _extract_content_urls(article, base_url=target_url, start_url=url)
|
|
385
|
+
for link in extra:
|
|
386
|
+
key = _canonicalize_url(link)
|
|
387
|
+
if key in discovered:
|
|
388
|
+
continue
|
|
389
|
+
discovered.add(key)
|
|
390
|
+
queue.append(link)
|
|
391
|
+
idx += 1
|
|
392
|
+
return chapters
|
|
393
|
+
|
|
394
|
+
# Fallback: follow next/previous navigation.
|
|
395
|
+
current_url = url
|
|
396
|
+
soup = initial_soup
|
|
397
|
+
while True:
|
|
398
|
+
if options.max_pages is not None and len(chapters) >= options.max_pages:
|
|
399
|
+
break
|
|
400
|
+
|
|
401
|
+
article = consume_page(current_url, soup=soup)
|
|
402
|
+
if article is None:
|
|
403
|
+
break
|
|
404
|
+
|
|
405
|
+
next_url = _extract_next_url(soup, base_url=base_url)
|
|
406
|
+
if not next_url:
|
|
407
|
+
break
|
|
408
|
+
|
|
409
|
+
current_url = next_url
|
|
410
|
+
soup = fetch_soup(current_url)
|
|
411
|
+
|
|
412
|
+
return chapters
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
from docs2epub.docusaurus_next import DocusaurusNextOptions, iter_docusaurus_next
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def test_iter_docusaurus_next_falls_back_to_main_when_no_article(monkeypatch):
|
|
5
|
+
html = """
|
|
6
|
+
<!doctype html>
|
|
7
|
+
<html>
|
|
8
|
+
<body>
|
|
9
|
+
<main>
|
|
10
|
+
<div>
|
|
11
|
+
<h1>Overview</h1>
|
|
12
|
+
<p>Hello world</p>
|
|
13
|
+
</div>
|
|
14
|
+
</main>
|
|
15
|
+
</body>
|
|
16
|
+
</html>
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
class DummyResponse:
|
|
20
|
+
text = html
|
|
21
|
+
|
|
22
|
+
def raise_for_status(self) -> None:
|
|
23
|
+
return None
|
|
24
|
+
|
|
25
|
+
class DummySession:
|
|
26
|
+
def __init__(self) -> None:
|
|
27
|
+
self.headers = {}
|
|
28
|
+
|
|
29
|
+
def get(self, url: str, timeout: int = 30) -> DummyResponse:
|
|
30
|
+
return DummyResponse()
|
|
31
|
+
|
|
32
|
+
monkeypatch.setattr(
|
|
33
|
+
"docs2epub.docusaurus_next.requests.Session",
|
|
34
|
+
lambda: DummySession(),
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
options = DocusaurusNextOptions(start_url="https://example.com/docs", sleep_s=0)
|
|
38
|
+
chapters = iter_docusaurus_next(options)
|
|
39
|
+
|
|
40
|
+
assert len(chapters) == 1
|
|
41
|
+
assert chapters[0].title == "Overview"
|
|
42
|
+
assert "Hello world" in chapters[0].html
|
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
|
|
3
|
+
from docs2epub.docusaurus_next import DocusaurusNextOptions, iter_docusaurus_next
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def _make_session(pages: dict[str, str]):
|
|
7
|
+
class DummyResponse:
|
|
8
|
+
def __init__(self, text: str) -> None:
|
|
9
|
+
self.text = text
|
|
10
|
+
|
|
11
|
+
def raise_for_status(self) -> None:
|
|
12
|
+
return None
|
|
13
|
+
|
|
14
|
+
class DummySession:
|
|
15
|
+
def __init__(self) -> None:
|
|
16
|
+
self.headers = {}
|
|
17
|
+
|
|
18
|
+
def get(self, url: str, timeout: int = 30) -> DummyResponse:
|
|
19
|
+
if url not in pages:
|
|
20
|
+
raise AssertionError(f"unexpected url fetch: {url}")
|
|
21
|
+
return DummyResponse(pages[url])
|
|
22
|
+
|
|
23
|
+
return DummySession
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def _make_session_with_status(pages: dict[str, tuple[int, str]]):
|
|
27
|
+
class DummyResponse:
|
|
28
|
+
def __init__(self, url: str, status_code: int, text: str) -> None:
|
|
29
|
+
self.url = url
|
|
30
|
+
self.status_code = status_code
|
|
31
|
+
self.text = text
|
|
32
|
+
|
|
33
|
+
def raise_for_status(self) -> None:
|
|
34
|
+
if self.status_code >= 400:
|
|
35
|
+
raise requests.HTTPError(
|
|
36
|
+
f"{self.status_code} Client Error",
|
|
37
|
+
response=self,
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
class DummySession:
|
|
41
|
+
def __init__(self) -> None:
|
|
42
|
+
self.headers = {}
|
|
43
|
+
|
|
44
|
+
def get(self, url: str, timeout: int = 30) -> DummyResponse:
|
|
45
|
+
if url not in pages:
|
|
46
|
+
raise AssertionError(f"unexpected url fetch: {url}")
|
|
47
|
+
status_code, text = pages[url]
|
|
48
|
+
return DummyResponse(url, status_code, text)
|
|
49
|
+
|
|
50
|
+
return DummySession
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def test_iter_uses_gitbook_sidebar_links(monkeypatch):
|
|
54
|
+
start_url = "https://example.com/book/intro"
|
|
55
|
+
sidebar = """
|
|
56
|
+
<aside data-testid="table-of-contents">
|
|
57
|
+
<a href="/book/intro">Intro</a>
|
|
58
|
+
<a href="/book/chapter-1">Chapter 1</a>
|
|
59
|
+
</aside>
|
|
60
|
+
"""
|
|
61
|
+
pages = {
|
|
62
|
+
start_url: f"<html><body>{sidebar}<main><h1>Intro</h1><p>Intro text</p></main></body></html>",
|
|
63
|
+
"https://example.com/book/chapter-1": f"<html><body>{sidebar}<main><h1>Chapter 1</h1><p>Ch1</p></main></body></html>",
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
monkeypatch.setattr(
|
|
67
|
+
"docs2epub.docusaurus_next.requests.Session",
|
|
68
|
+
lambda: _make_session(pages)(),
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
options = DocusaurusNextOptions(start_url=start_url, sleep_s=0)
|
|
72
|
+
chapters = iter_docusaurus_next(options)
|
|
73
|
+
|
|
74
|
+
assert [c.title for c in chapters] == ["Intro", "Chapter 1"]
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def test_iter_uses_docusaurus_menu_sidebar(monkeypatch):
|
|
78
|
+
start_url = "https://example.com/docs/intro"
|
|
79
|
+
sidebar = """
|
|
80
|
+
<nav class="menu">
|
|
81
|
+
<a class="menu__link" href="/docs/intro">Intro</a>
|
|
82
|
+
<a class="menu__link" href="/docs/install">Install</a>
|
|
83
|
+
</nav>
|
|
84
|
+
"""
|
|
85
|
+
pages = {
|
|
86
|
+
start_url: f"<html><body>{sidebar}<article><h1>Intro</h1><p>Intro text</p></article></body></html>",
|
|
87
|
+
"https://example.com/docs/install": f"<html><body>{sidebar}<article><h1>Install</h1><p>Install text</p></article></body></html>",
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
monkeypatch.setattr(
|
|
91
|
+
"docs2epub.docusaurus_next.requests.Session",
|
|
92
|
+
lambda: _make_session(pages)(),
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
options = DocusaurusNextOptions(start_url=start_url, sleep_s=0)
|
|
96
|
+
chapters = iter_docusaurus_next(options)
|
|
97
|
+
|
|
98
|
+
assert [c.title for c in chapters] == ["Intro", "Install"]
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def test_iter_expands_links_from_index_pages(monkeypatch):
|
|
102
|
+
start_url = "https://example.com/docs/intro"
|
|
103
|
+
sidebar = """
|
|
104
|
+
<nav class="menu">
|
|
105
|
+
<a class="menu__link" href="/docs/intro">Intro</a>
|
|
106
|
+
<a class="menu__link" href="/docs/category/getting-started">Getting Started</a>
|
|
107
|
+
</nav>
|
|
108
|
+
"""
|
|
109
|
+
pages = {
|
|
110
|
+
start_url: f"<html><body>{sidebar}<article><h1>Intro</h1><p>Intro text</p></article></body></html>",
|
|
111
|
+
"https://example.com/docs/category/getting-started": (
|
|
112
|
+
"<html><body>"
|
|
113
|
+
f"{sidebar}"
|
|
114
|
+
'<article><h1>Getting Started</h1>'
|
|
115
|
+
'<a href="/docs/one">One</a>'
|
|
116
|
+
'<a href="/docs/two">Two</a>'
|
|
117
|
+
"</article></body></html>"
|
|
118
|
+
),
|
|
119
|
+
"https://example.com/docs/one": f"<html><body>{sidebar}<article><h1>One</h1><p>One text</p></article></body></html>",
|
|
120
|
+
"https://example.com/docs/two": f"<html><body>{sidebar}<article><h1>Two</h1><p>Two text</p></article></body></html>",
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
monkeypatch.setattr(
|
|
124
|
+
"docs2epub.docusaurus_next.requests.Session",
|
|
125
|
+
lambda: _make_session(pages)(),
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
options = DocusaurusNextOptions(start_url=start_url, sleep_s=0)
|
|
129
|
+
chapters = iter_docusaurus_next(options)
|
|
130
|
+
|
|
131
|
+
assert [c.title for c in chapters] == ["Intro", "Getting Started", "One", "Two"]
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
def test_iter_skips_sidebar_links_that_404(monkeypatch):
|
|
135
|
+
start_url = "https://example.com/docs/intro"
|
|
136
|
+
sidebar = """
|
|
137
|
+
<nav class="menu">
|
|
138
|
+
<a class="menu__link" href="/docs/intro">Intro</a>
|
|
139
|
+
<a class="menu__link" href="/docs/missing">Missing</a>
|
|
140
|
+
<a class="menu__link" href="/docs/other">Other</a>
|
|
141
|
+
</nav>
|
|
142
|
+
"""
|
|
143
|
+
pages = {
|
|
144
|
+
start_url: (
|
|
145
|
+
200,
|
|
146
|
+
f"<html><body>{sidebar}<article><h1>Intro</h1><p>Intro text</p></article></body></html>",
|
|
147
|
+
),
|
|
148
|
+
"https://example.com/docs/missing": (404, "Not found"),
|
|
149
|
+
"https://example.com/docs/other": (
|
|
150
|
+
200,
|
|
151
|
+
f"<html><body>{sidebar}<article><h1>Other</h1><p>Other text</p></article></body></html>",
|
|
152
|
+
),
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
monkeypatch.setattr(
|
|
156
|
+
"docs2epub.docusaurus_next.requests.Session",
|
|
157
|
+
lambda: _make_session_with_status(pages)(),
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
options = DocusaurusNextOptions(start_url=start_url, sleep_s=0)
|
|
161
|
+
chapters = iter_docusaurus_next(options)
|
|
162
|
+
|
|
163
|
+
assert [c.title for c in chapters] == ["Intro", "Other"]
|
|
@@ -1,140 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
import re
|
|
4
|
-
from dataclasses import dataclass
|
|
5
|
-
from urllib.parse import urljoin
|
|
6
|
-
|
|
7
|
-
import requests
|
|
8
|
-
from bs4 import BeautifulSoup, Tag
|
|
9
|
-
|
|
10
|
-
from .model import Chapter
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
DEFAULT_USER_AGENT = "docs2epub/0.1 (+https://github.com/brenorb/docs2epub)"
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
@dataclass(frozen=True)
|
|
17
|
-
class DocusaurusNextOptions:
|
|
18
|
-
start_url: str
|
|
19
|
-
base_url: str | None = None
|
|
20
|
-
max_pages: int | None = None
|
|
21
|
-
sleep_s: float = 0.5
|
|
22
|
-
user_agent: str = DEFAULT_USER_AGENT
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
def _slugify_filename(text: str) -> str:
|
|
26
|
-
value = text.strip().lower()
|
|
27
|
-
value = re.sub(r"[^\w\s-]", "", value)
|
|
28
|
-
value = re.sub(r"[\s_-]+", "-", value)
|
|
29
|
-
value = value.strip("-")
|
|
30
|
-
return value or "chapter"
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
def _extract_article(soup: BeautifulSoup) -> Tag:
|
|
34
|
-
article = soup.find("article")
|
|
35
|
-
if article:
|
|
36
|
-
return article
|
|
37
|
-
main = soup.find("main")
|
|
38
|
-
if main:
|
|
39
|
-
article = main.find("article")
|
|
40
|
-
if article:
|
|
41
|
-
return article
|
|
42
|
-
raise RuntimeError("Could not find <article> in page HTML")
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
def _remove_unwanted(article: Tag) -> None:
|
|
46
|
-
for selector in [
|
|
47
|
-
'nav[aria-label="Breadcrumbs"]',
|
|
48
|
-
'nav[aria-label="Docs pages"]',
|
|
49
|
-
"div.theme-doc-footer",
|
|
50
|
-
"div.theme-doc-footer-edit-meta-row",
|
|
51
|
-
"div.theme-doc-version-badge",
|
|
52
|
-
"script",
|
|
53
|
-
"style",
|
|
54
|
-
"noscript",
|
|
55
|
-
"iframe",
|
|
56
|
-
"svg",
|
|
57
|
-
"button",
|
|
58
|
-
]:
|
|
59
|
-
for el in list(article.select(selector)):
|
|
60
|
-
el.decompose()
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
def _absolutize_urls(container: Tag, base_url: str) -> None:
|
|
64
|
-
for el in container.find_all(True):
|
|
65
|
-
if el.has_attr("href"):
|
|
66
|
-
href = str(el.get("href") or "")
|
|
67
|
-
if href.startswith("/"):
|
|
68
|
-
el["href"] = urljoin(base_url, href)
|
|
69
|
-
if el.has_attr("src"):
|
|
70
|
-
src = str(el.get("src") or "")
|
|
71
|
-
if src.startswith("/"):
|
|
72
|
-
el["src"] = urljoin(base_url, src)
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
def _extract_next_url(soup: BeautifulSoup, base_url: str) -> str | None:
|
|
76
|
-
nav = soup.select_one('nav[aria-label="Docs pages"]')
|
|
77
|
-
if not nav:
|
|
78
|
-
return None
|
|
79
|
-
|
|
80
|
-
for a in nav.find_all("a", href=True):
|
|
81
|
-
text = " ".join(a.get_text(" ", strip=True).split())
|
|
82
|
-
if text.lower().startswith("next"):
|
|
83
|
-
return urljoin(base_url, a["href"])
|
|
84
|
-
|
|
85
|
-
return None
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
def iter_docusaurus_next(options: DocusaurusNextOptions) -> list[Chapter]:
|
|
89
|
-
session = requests.Session()
|
|
90
|
-
session.headers.update({"User-Agent": options.user_agent})
|
|
91
|
-
|
|
92
|
-
url = options.start_url
|
|
93
|
-
base_url = options.base_url or options.start_url
|
|
94
|
-
|
|
95
|
-
visited: set[str] = set()
|
|
96
|
-
chapters: list[Chapter] = []
|
|
97
|
-
|
|
98
|
-
idx = 1
|
|
99
|
-
while True:
|
|
100
|
-
if options.max_pages is not None and idx > options.max_pages:
|
|
101
|
-
break
|
|
102
|
-
|
|
103
|
-
if url in visited:
|
|
104
|
-
break
|
|
105
|
-
visited.add(url)
|
|
106
|
-
|
|
107
|
-
resp = session.get(url, timeout=30)
|
|
108
|
-
resp.raise_for_status()
|
|
109
|
-
|
|
110
|
-
soup = BeautifulSoup(resp.text, "lxml")
|
|
111
|
-
article = _extract_article(soup)
|
|
112
|
-
|
|
113
|
-
title_el = article.find(["h1", "h2"])
|
|
114
|
-
title = (
|
|
115
|
-
" ".join(title_el.get_text(" ", strip=True).split()) if title_el else f"Chapter {idx}"
|
|
116
|
-
)
|
|
117
|
-
|
|
118
|
-
_remove_unwanted(article)
|
|
119
|
-
_absolutize_urls(article, base_url=base_url)
|
|
120
|
-
|
|
121
|
-
for a in list(article.select('a.hash-link[href^="#"]')):
|
|
122
|
-
a.decompose()
|
|
123
|
-
|
|
124
|
-
html = article.decode_contents()
|
|
125
|
-
|
|
126
|
-
chapters.append(Chapter(index=idx, title=title, url=url, html=html))
|
|
127
|
-
|
|
128
|
-
next_url = _extract_next_url(soup, base_url=base_url)
|
|
129
|
-
if not next_url:
|
|
130
|
-
break
|
|
131
|
-
|
|
132
|
-
url = next_url
|
|
133
|
-
idx += 1
|
|
134
|
-
|
|
135
|
-
if options.sleep_s > 0:
|
|
136
|
-
import time
|
|
137
|
-
|
|
138
|
-
time.sleep(options.sleep_s)
|
|
139
|
-
|
|
140
|
-
return chapters
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|