novel-downloader 1.2.1__py3-none-any.whl → 1.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. novel_downloader/__init__.py +1 -1
  2. novel_downloader/cli/download.py +2 -0
  3. novel_downloader/config/adapter.py +29 -4
  4. novel_downloader/config/models.py +7 -4
  5. novel_downloader/core/downloaders/common_downloader.py +1 -2
  6. novel_downloader/core/downloaders/qidian_downloader.py +1 -2
  7. novel_downloader/core/factory/downloader_factory.py +13 -11
  8. novel_downloader/core/interfaces/async_requester_protocol.py +4 -1
  9. novel_downloader/core/interfaces/requester_protocol.py +4 -1
  10. novel_downloader/core/parsers/base_parser.py +3 -3
  11. novel_downloader/core/parsers/common_parser/helper.py +7 -5
  12. novel_downloader/core/parsers/qidian_parser/browser/chapter_encrypted.py +1 -1
  13. novel_downloader/core/parsers/qidian_parser/browser/main_parser.py +3 -3
  14. novel_downloader/core/parsers/qidian_parser/session/chapter_encrypted.py +1 -1
  15. novel_downloader/core/parsers/qidian_parser/session/main_parser.py +3 -3
  16. novel_downloader/core/parsers/qidian_parser/shared/book_info_parser.py +74 -18
  17. novel_downloader/core/parsers/qidian_parser/shared/helpers.py +2 -2
  18. novel_downloader/core/requesters/base_async_session.py +4 -1
  19. novel_downloader/core/requesters/base_browser.py +9 -5
  20. novel_downloader/core/requesters/base_session.py +4 -1
  21. novel_downloader/core/requesters/common_requester/common_session.py +2 -2
  22. novel_downloader/core/requesters/qidian_requester/qidian_broswer.py +35 -16
  23. novel_downloader/core/requesters/qidian_requester/qidian_session.py +3 -3
  24. novel_downloader/core/savers/common_saver/common_epub.py +1 -1
  25. novel_downloader/locales/en.json +4 -0
  26. novel_downloader/locales/zh.json +4 -0
  27. novel_downloader/utils/constants.py +2 -1
  28. novel_downloader/utils/time_utils/datetime_utils.py +1 -1
  29. novel_downloader/utils/time_utils/sleep_utils.py +27 -11
  30. {novel_downloader-1.2.1.dist-info → novel_downloader-1.2.2.dist-info}/METADATA +1 -1
  31. {novel_downloader-1.2.1.dist-info → novel_downloader-1.2.2.dist-info}/RECORD +35 -35
  32. {novel_downloader-1.2.1.dist-info → novel_downloader-1.2.2.dist-info}/WHEEL +1 -1
  33. {novel_downloader-1.2.1.dist-info → novel_downloader-1.2.2.dist-info}/entry_points.txt +0 -0
  34. {novel_downloader-1.2.1.dist-info → novel_downloader-1.2.2.dist-info}/licenses/LICENSE +0 -0
  35. {novel_downloader-1.2.1.dist-info → novel_downloader-1.2.2.dist-info}/top_level.txt +0 -0
@@ -7,7 +7,7 @@ novel_downloader
7
7
  Core package for the Novel Downloader project.
8
8
  """
9
9
 
10
- __version__ = "1.2.1"
10
+ __version__ = "1.2.2"
11
11
 
12
12
  __author__ = "Saudade Z"
13
13
  __email__ = "saudadez217@gmail.com"
@@ -57,6 +57,8 @@ def download_cli(ctx: Context, book_ids: List[str], site: str) -> None:
57
57
  parser_cfg = adapter.get_parser_config()
58
58
  saver_cfg = adapter.get_saver_config()
59
59
 
60
+ click.echo(t("download_site_mode", mode=downloader_cfg.mode))
61
+
60
62
  # If no book_ids provided on the command line, try to load them from config
61
63
  if not book_ids:
62
64
  try:
@@ -23,6 +23,7 @@ from .models import (
23
23
  RequesterConfig,
24
24
  SaverConfig,
25
25
  )
26
+ from .site_rules import load_site_rules
26
27
 
27
28
 
28
29
  class ConfigAdapter:
@@ -38,19 +39,43 @@ class ConfigAdapter:
38
39
  self._config = config
39
40
  self._site = site
40
41
 
42
+ site_rules = load_site_rules() # -> Dict[str, SiteRules]
43
+ self._supported_sites = set(site_rules.keys())
44
+
41
45
  def set_site(self, site: str) -> None:
42
46
  """
43
47
  切换当前适配的站点
44
48
  """
45
49
  self._site = site
46
50
 
51
+ def _get_site_cfg(self) -> Dict[str, Any]:
52
+ """
53
+ 统一获取站点配置:
54
+
55
+ 1. 先尝试从 self._config["sites"][self._site] 取配置
56
+ 2. 如果没有配置, 且 self._site 在 self._supported_sites 中, 则取 sites["common"]
57
+ 3. 否则返回空 dict
58
+ """
59
+ sites_cfg = self._config.get("sites", {}) or {}
60
+
61
+ # 1. site-specific config
62
+ if self._site in sites_cfg:
63
+ return sites_cfg[self._site] or {}
64
+
65
+ # 2. fallback to "common" only if site is supported
66
+ if self._site in self._supported_sites:
67
+ return sites_cfg.get("common", {}) or {}
68
+
69
+ # 3. completely unsupported site
70
+ return {}
71
+
47
72
  def get_requester_config(self) -> RequesterConfig:
48
73
  """
49
74
  从 config["requests"] 中读取通用请求配置 (含 DrissionPage 设置)
50
75
  返回 RequesterConfig 实例
51
76
  """
52
77
  req = self._config.get("requests", {})
53
- site_cfg = self._config.get("sites", {}).get(self._site, {})
78
+ site_cfg = self._get_site_cfg()
54
79
  return RequesterConfig(
55
80
  wait_time=req.get("wait_time", 5),
56
81
  retry_times=req.get("retry_times", 3),
@@ -73,7 +98,7 @@ class ConfigAdapter:
73
98
  """
74
99
  gen = self._config.get("general", {})
75
100
  debug = gen.get("debug", {})
76
- site_cfg = self._config.get("sites", {}).get(self._site, {})
101
+ site_cfg = self._get_site_cfg()
77
102
  return DownloaderConfig(
78
103
  request_interval=gen.get("request_interval", 5),
79
104
  raw_data_dir=gen.get("raw_data_dir", "./raw_data"),
@@ -94,7 +119,7 @@ class ConfigAdapter:
94
119
  """
95
120
  gen = self._config.get("general", {})
96
121
  font_ocr = gen.get("font_ocr", {})
97
- site_cfg = self._config.get("sites", {}).get(self._site, {})
122
+ site_cfg = self._get_site_cfg()
98
123
  return ParserConfig(
99
124
  cache_dir=gen.get("cache_dir", "./cache"),
100
125
  decode_font=font_ocr.get("decode_font", False),
@@ -139,7 +164,7 @@ class ConfigAdapter:
139
164
  """
140
165
  从 config["sites"][site]["book_ids"] 中提取目标书籍列表
141
166
  """
142
- site_cfg = self._config.get("sites", {}).get(self._site, {})
167
+ site_cfg = self._get_site_cfg()
143
168
  raw_ids = site_cfg.get("book_ids", [])
144
169
 
145
170
  if isinstance(raw_ids, str):
@@ -135,16 +135,19 @@ class ChapterFieldRules(TypedDict):
135
135
  steps: List[RuleStep]
136
136
 
137
137
 
138
- class VolumesRules(TypedDict, total=False):
139
- has_volume: bool # 是否存在卷,false=未分卷
138
+ class VolumesRulesOptional(TypedDict, total=False):
140
139
  volume_selector: str # 有卷时选择 volume 块的 selector
141
- chapter_selector: str # 选择 chapter 节点的 selector
142
140
  volume_name_steps: List[RuleStep]
143
- chapter_steps: List[ChapterFieldRules] # 提取章节信息的步骤列表
144
141
  volume_mode: str # Optional: "normal" (default) or "mixed"
145
142
  list_selector: str # Optional: If "mixed" mode, parent container selector
146
143
 
147
144
 
145
+ class VolumesRules(VolumesRulesOptional):
146
+ has_volume: bool # 是否存在卷,false=未分卷
147
+ chapter_selector: str # 选择 chapter 节点的 selector
148
+ chapter_steps: List[ChapterFieldRules] # 提取章节信息的步骤列表
149
+
150
+
148
151
  class BookInfoRules(TypedDict, total=False):
149
152
  book_name: FieldRules
150
153
  author: FieldRules
@@ -67,8 +67,7 @@ class CommonDownloader(BaseDownloader):
67
67
  cache_base = self.cache_dir / site / book_id
68
68
  info_path = raw_base / "book_info.json"
69
69
  chapter_dir = raw_base / "chapters"
70
- if save_html:
71
- chapters_html_dir = cache_base / "html"
70
+ chapters_html_dir = cache_base / "html"
72
71
 
73
72
  raw_base.mkdir(parents=True, exist_ok=True)
74
73
  chapter_dir.mkdir(parents=True, exist_ok=True)
@@ -87,8 +87,7 @@ class QidianDownloader(BaseDownloader):
87
87
  info_path = raw_base / "book_info.json"
88
88
  chapter_dir = raw_base / "chapters"
89
89
  encrypted_chapter_dir = raw_base / "encrypted_chapters"
90
- if save_html:
91
- chapters_html_dir = cache_base / "html"
90
+ chapters_html_dir = cache_base / "html"
92
91
 
93
92
  raw_base.mkdir(parents=True, exist_ok=True)
94
93
  chapter_dir.mkdir(parents=True, exist_ok=True)
@@ -14,7 +14,7 @@ based on the site name and parser mode specified in the configuration.
14
14
  To add support for new sites or modes, extend the `_site_map` accordingly.
15
15
  """
16
16
 
17
- from typing import Union
17
+ from typing import Union, cast
18
18
 
19
19
  from novel_downloader.config import DownloaderConfig, load_site_rules
20
20
  from novel_downloader.core.downloaders import (
@@ -137,13 +137,15 @@ def get_downloader(
137
137
  :raises TypeError: If the provided requester does not match the required protocol
138
138
  for the chosen mode (sync vs async).
139
139
  """
140
- mode = config.mode.lower()
141
- if mode == "async":
142
- if not isinstance(requester, AsyncRequesterProtocol):
143
- raise TypeError("Async mode requires an AsyncRequesterProtocol")
144
- return get_async_downloader(requester, parser, saver, site, config)
145
- if mode in ("browser", "session"):
146
- if not isinstance(requester, RequesterProtocol):
147
- raise TypeError("Sync mode requires a RequesterProtocol")
148
- return get_sync_downloader(requester, parser, saver, site, config)
149
- raise ValueError(f"Unknown mode '{config.mode}' for site '{site}'")
140
+ if requester.is_async():
141
+ if config.mode.lower() != "async":
142
+ raise TypeError("Requester is async, but config.mode is not 'async'")
143
+ async_requester = cast(AsyncRequesterProtocol, requester)
144
+ return get_async_downloader(async_requester, parser, saver, site, config)
145
+ else:
146
+ if config.mode.lower() not in ("browser", "session"):
147
+ raise TypeError(
148
+ "Requester is sync, but config.mode is not 'browser' or 'session'"
149
+ )
150
+ sync_requester = cast(RequesterProtocol, requester)
151
+ return get_sync_downloader(sync_requester, parser, saver, site, config)
@@ -9,7 +9,7 @@ for book info pages, individual chapters, managing request lifecycle,
9
9
  and optionally retrieving a user's authenticated bookcase — all in async style.
10
10
  """
11
11
 
12
- from typing import Optional, Protocol, runtime_checkable
12
+ from typing import Literal, Optional, Protocol, runtime_checkable
13
13
 
14
14
 
15
15
  @runtime_checkable
@@ -21,6 +21,9 @@ class AsyncRequesterProtocol(Protocol):
21
21
  and manage login/shutdown asynchronously.
22
22
  """
23
23
 
24
+ def is_async(self) -> Literal[True]:
25
+ ...
26
+
24
27
  async def login(self, max_retries: int = 3, manual_login: bool = False) -> bool:
25
28
  """
26
29
  Attempt to log in asynchronously.
@@ -9,7 +9,7 @@ for book info pages, individual chapters, managing request lifecycle,
9
9
  and optionally retrieving a user's authenticated bookcase.
10
10
  """
11
11
 
12
- from typing import Optional, Protocol, runtime_checkable
12
+ from typing import Literal, Optional, Protocol, runtime_checkable
13
13
 
14
14
 
15
15
  @runtime_checkable
@@ -20,6 +20,9 @@ class RequesterProtocol(Protocol):
20
20
  - a specific chapter page.
21
21
  """
22
22
 
23
+ def is_async(self) -> Literal[False]:
24
+ ...
25
+
23
26
  def login(self, max_retries: int = 3, manual_login: bool = False) -> bool:
24
27
  """
25
28
  Attempt to log in
@@ -45,14 +45,14 @@ class BaseParser(ParserProtocol, abc.ABC):
45
45
  self._base_cache_dir = Path(config.cache_dir)
46
46
 
47
47
  @abc.abstractmethod
48
- def parse_book_info(self, html: str) -> Dict[str, Any]:
48
+ def parse_book_info(self, html_str: str) -> Dict[str, Any]:
49
49
  """
50
50
  Parse a book info page and extract metadata and chapter structure.
51
51
 
52
52
  Depending on the site structure, the return dict may include a
53
53
  flat `chapters` list or nested `volumes` with chapter groups.
54
54
 
55
- :param html: Raw HTML of the book info page.
55
+ :param html_str: Raw HTML of the book info page.
56
56
  :return: Parsed metadata and chapter structure as a dictionary.
57
57
  """
58
58
  ...
@@ -62,7 +62,7 @@ class BaseParser(ParserProtocol, abc.ABC):
62
62
  """
63
63
  Parse a single chapter page and extract clean text or simplified HTML.
64
64
 
65
- :param html: Raw HTML of the chapter page.
65
+ :param html_str: Raw HTML of the chapter page.
66
66
  :param chapter_id: Identifier of the chapter being parsed.
67
67
  :return: Cleaned chapter content as plain text or minimal HTML.
68
68
  """
@@ -188,7 +188,7 @@ class HTMLExtractor:
188
188
  current = sep.join(current)
189
189
 
190
190
  elif t == "attr":
191
- name = step.get("attr")
191
+ name = step.get("attr") or ""
192
192
  if isinstance(current, list):
193
193
  current = [elem.get(name, "") for elem in current]
194
194
  elif isinstance(current, Tag):
@@ -216,9 +216,9 @@ class HTMLExtractor:
216
216
  """
217
217
  list_selector = volume_rule.get("list_selector")
218
218
  volume_selector = volume_rule.get("volume_selector")
219
- chapter_selector = volume_rule.get("chapter_selector")
220
219
  volume_name_steps = volume_rule.get("volume_name_steps")
221
- chapter_steps_list = volume_rule.get("chapter_steps")
220
+ chapter_selector = volume_rule["chapter_selector"]
221
+ chapter_steps_list = volume_rule["chapter_steps"]
222
222
 
223
223
  if not (
224
224
  list_selector and volume_selector and chapter_selector and volume_name_steps
@@ -241,6 +241,8 @@ class HTMLExtractor:
241
241
  for elem in list_area.find_all(
242
242
  [volume_selector, chapter_selector], recursive=True
243
243
  ):
244
+ if not isinstance(elem, Tag):
245
+ continue
244
246
  if elem.name == volume_selector:
245
247
  extractor = HTMLExtractor(str(elem))
246
248
  volume_name = extractor.extract_field(volume_name_steps)
@@ -257,9 +259,9 @@ class HTMLExtractor:
257
259
  return volumes
258
260
 
259
261
  def extract_volume_blocks(self, volume_rule: VolumesRules) -> List[Dict[str, Any]]:
260
- volume_selector = volume_rule["volume_selector"]
262
+ volume_selector = volume_rule.get("volume_selector")
263
+ volume_name_steps = volume_rule.get("volume_name_steps")
261
264
  chapter_selector = volume_rule["chapter_selector"]
262
- volume_name_steps = volume_rule["volume_name_steps"]
263
265
  chapter_steps_list = volume_rule["chapter_steps"]
264
266
  if not (volume_selector and volume_name_steps):
265
267
  raise ValueError(
@@ -229,7 +229,7 @@ def extract_paragraphs_recursively(
229
229
  if chapter_id:
230
230
  main_id = f"c-{chapter_id}"
231
231
  main_tag = soup.find("main", id=main_id)
232
- if not main_tag:
232
+ if not isinstance(main_tag, Tag):
233
233
  return []
234
234
  else:
235
235
  main_tag = soup
@@ -69,14 +69,14 @@ class QidianBrowserParser(BaseParser):
69
69
  self._font_debug_dir = self._base_cache_dir / "font_debug"
70
70
  self._font_debug_dir.mkdir(parents=True, exist_ok=True)
71
71
 
72
- def parse_book_info(self, html: str) -> Dict[str, Any]:
72
+ def parse_book_info(self, html_str: str) -> Dict[str, Any]:
73
73
  """
74
74
  Parse a book info page and extract metadata and chapter structure.
75
75
 
76
- :param html: Raw HTML of the book info page.
76
+ :param html_str: Raw HTML of the book info page.
77
77
  :return: Parsed metadata and chapter structure as a dictionary.
78
78
  """
79
- return parse_book_info(html)
79
+ return parse_book_info(html_str)
80
80
 
81
81
  def parse_chapter(self, html_str: str, chapter_id: str) -> Dict[str, Any]:
82
82
  """
@@ -245,7 +245,7 @@ def extract_paragraphs_recursively(
245
245
  if chapter_id > 0:
246
246
  main_id = f"c-{chapter_id}"
247
247
  main_tag = soup.find("main", id=main_id)
248
- if not main_tag:
248
+ if not isinstance(main_tag, Tag):
249
249
  return []
250
250
  else:
251
251
  main_tag = soup
@@ -72,14 +72,14 @@ class QidianSessionParser(BaseParser):
72
72
  self._font_debug_dir = self._base_cache_dir / "font_debug"
73
73
  self._font_debug_dir.mkdir(parents=True, exist_ok=True)
74
74
 
75
- def parse_book_info(self, html: str) -> Dict[str, Any]:
75
+ def parse_book_info(self, html_str: str) -> Dict[str, Any]:
76
76
  """
77
77
  Parse a book info page and extract metadata and chapter structure.
78
78
 
79
- :param html: Raw HTML of the book info page.
79
+ :param html_str: Raw HTML of the book info page.
80
80
  :return: Parsed metadata and chapter structure as a dictionary.
81
81
  """
82
- return parse_book_info(html)
82
+ return parse_book_info(html_str)
83
83
 
84
84
  def parse_chapter(self, html_str: str, chapter_id: str) -> Dict[str, Any]:
85
85
  """
@@ -41,6 +41,58 @@ def _get_volume_name(vol_div: Tag) -> str:
41
41
  return text.split(chr(183))[0].strip()
42
42
 
43
43
 
44
+ def safe_select_text(
45
+ soup: Tag,
46
+ selector: str,
47
+ *,
48
+ separator: str = "",
49
+ strip: bool = False,
50
+ default: str = "",
51
+ ) -> str:
52
+ """
53
+ Safely select the first element matching a CSS selector and return its text.
54
+
55
+ :param soup: A BeautifulSoup Tag or sub-tree to query.
56
+ :param selector: A CSS selector string.
57
+ :param separator: Separator to use between strings when joining.
58
+ :param strip: Whether to strip whitespace from the result.
59
+ :param default: Value to return if no element is found.
60
+ :return: The element's text, or `default` if not found.
61
+ """
62
+ tag = soup.select_one(selector)
63
+ return (
64
+ tag.get_text(separator=separator, strip=strip)
65
+ if isinstance(tag, Tag)
66
+ else default
67
+ )
68
+
69
+
70
+ def safe_select_attr(
71
+ soup: Tag,
72
+ selector: str,
73
+ attr: str,
74
+ *,
75
+ default: str = "",
76
+ ) -> str:
77
+ """
78
+ Safely select the first element matching a CSS selector and return one attributes.
79
+
80
+ :param soup: A BeautifulSoup Tag or sub-tree to query.
81
+ :param selector: A CSS selector string.
82
+ :param attr: The attribute name to retrieve from the selected element.
83
+ :param default: Value to return if no element or attribute is found.
84
+ :return: The attribute's value stripped of whitespace, or `default` if not found.
85
+ """
86
+ tag = soup.select_one(selector)
87
+ if isinstance(tag, Tag) and attr in tag.attrs:
88
+ value = tag.attrs[attr]
89
+ if isinstance(value, list):
90
+ return " ".join(value).strip()
91
+ elif isinstance(value, str):
92
+ return value.strip()
93
+ return default
94
+
95
+
44
96
  def parse_book_info(html_str: str) -> Dict[str, Any]:
45
97
  """
46
98
  Extract metadata: title, author, cover_url, update_time, status,
@@ -52,27 +104,24 @@ def parse_book_info(html_str: str) -> Dict[str, Any]:
52
104
  info: Dict[str, Any] = {}
53
105
  try:
54
106
  soup = html_to_soup(html_str)
55
- info["book_name"] = soup.select_one("em#bookName").get_text(strip=True)
56
- info["author"] = soup.select_one("a.writer").get_text(strip=True)
57
- info["cover_url"] = soup.select_one("div.book-img img")["src"].strip()
107
+ info["book_name"] = safe_select_text(soup, "em#bookName", strip=True)
108
+ info["author"] = safe_select_text(soup, "a.writer", strip=True)
109
+ info["cover_url"] = safe_select_attr(soup, "div.book-img img", "src")
58
110
  info["update_time"] = (
59
- soup.select_one("span.book-update-time")
60
- .get_text(strip=True)
111
+ safe_select_text(soup, "span.book-update-time", strip=True)
61
112
  .replace("更新时间", "")
62
113
  .strip()
63
114
  )
64
- info["serial_status"] = soup.select_one("span.blue").get_text(strip=True)
65
- # word count via regex
66
- match = re.search(
67
- r"<em>([\d.]+)</em>\s*<cite>(.*?)字</cite>",
68
- html_str,
115
+ info["serial_status"] = safe_select_text(soup, "span.blue", strip=True)
116
+
117
+ # Word count via regex fallback
118
+ match = re.search(r"<em>([\d.]+)</em>\s*<cite>(.*?)字</cite>", html_str)
119
+ info["word_count"] = (
120
+ f"{match.group(1)}{match.group(2)}字" if match else "Unknown"
69
121
  )
70
- if match:
71
- info["word_count"] = match.group(1) + match.group(2) + "字"
72
- else:
73
- info["word_count"] = "Unknown"
74
- info["summary"] = soup.select_one("div.book-intro p").get_text(
75
- separator="\n", strip=True
122
+
123
+ info["summary"] = safe_select_text(
124
+ soup, "div.book-intro p", separator="\n", strip=True
76
125
  )
77
126
  # volumes
78
127
  vols = []
@@ -81,11 +130,18 @@ def parse_book_info(html_str: str) -> Dict[str, Any]:
81
130
  chaps = []
82
131
  for li in vol_div.select("li"):
83
132
  a = li.select_one("a")
133
+ if not isinstance(a, Tag) or "href" not in a.attrs:
134
+ continue
135
+ href_val = a["href"]
136
+ if isinstance(href_val, list):
137
+ href = href_val[0].strip()
138
+ else:
139
+ href = str(href_val).strip()
84
140
  chaps.append(
85
141
  {
86
142
  "title": a.get_text(strip=True),
87
- "url": a["href"].strip(),
88
- "chapterId": _chapter_url_to_id(a["href"]),
143
+ "url": href,
144
+ "chapterId": _chapter_url_to_id(href),
89
145
  }
90
146
  )
91
147
  vols.append({"volume_name": name, "chapters": chaps})
@@ -16,7 +16,7 @@ import json
16
16
  import logging
17
17
  from typing import Any, Dict, Union
18
18
 
19
- from bs4 import BeautifulSoup
19
+ from bs4 import BeautifulSoup, Tag
20
20
 
21
21
  logger = logging.getLogger(__name__)
22
22
 
@@ -103,7 +103,7 @@ def find_ssr_page_context(soup: BeautifulSoup) -> Dict[str, Any]:
103
103
  """
104
104
  try:
105
105
  tag = soup.find("script", id="vite-plugin-ssr_pageContext")
106
- if tag and tag.string:
106
+ if isinstance(tag, Tag) and tag.string:
107
107
  data: Dict[str, Any] = json.loads(tag.string.strip())
108
108
  return data
109
109
  except Exception as e:
@@ -13,7 +13,7 @@ cookie handling, and defines abstract methods for subclasses.
13
13
  import abc
14
14
  import asyncio
15
15
  import time
16
- from typing import Any, Dict, Optional, Union
16
+ from typing import Any, Dict, Literal, Optional, Union
17
17
 
18
18
  import aiohttp
19
19
  from aiohttp import ClientResponse, ClientSession, ClientTimeout, TCPConnector
@@ -58,6 +58,9 @@ class BaseAsyncSession(AsyncRequesterProtocol, abc.ABC):
58
58
  _cookies (Dict[str, str]): Optional cookie jar for the session.
59
59
  """
60
60
 
61
+ def is_async(self) -> Literal[True]:
62
+ return True
63
+
61
64
  def _init_session(
62
65
  self,
63
66
  config: RequesterConfig,
@@ -11,9 +11,10 @@ specialized purposes.
11
11
 
12
12
  import abc
13
13
  import logging
14
- from typing import Any, Dict, Optional
14
+ from typing import Any, Dict, Literal, Optional, cast
15
15
 
16
- from DrissionPage import Chromium, ChromiumOptions, ChromiumPage
16
+ from DrissionPage import Chromium, ChromiumOptions
17
+ from DrissionPage._pages.mix_tab import MixTab
17
18
 
18
19
  from novel_downloader.config.models import RequesterConfig
19
20
  from novel_downloader.core.interfaces import RequesterProtocol
@@ -42,6 +43,9 @@ class BaseBrowser(RequesterProtocol, abc.ABC):
42
43
  _page (ChromiumPage): The active browser tab.
43
44
  """
44
45
 
46
+ def is_async(self) -> Literal[False]:
47
+ return False
48
+
45
49
  def _init_browser(self, config: RequesterConfig) -> None:
46
50
  """
47
51
  Initialize the browser with specified options from RequesterConfig.
@@ -99,7 +103,7 @@ class BaseBrowser(RequesterProtocol, abc.ABC):
99
103
  Set up the browser instance and open the default tab.
100
104
  """
101
105
  self._browser = Chromium(self._options)
102
- self._page = self._browser.get_tab()
106
+ self._page = cast(MixTab, self._browser.get_tab())
103
107
 
104
108
  def login(self, max_retries: int = 3, manual_login: bool = False) -> bool:
105
109
  """
@@ -151,7 +155,7 @@ class BaseBrowser(RequesterProtocol, abc.ABC):
151
155
  )
152
156
 
153
157
  @property
154
- def page(self) -> ChromiumPage:
158
+ def page(self) -> Optional[MixTab]:
155
159
  """
156
160
  Return the current Chromium page object.
157
161
 
@@ -160,7 +164,7 @@ class BaseBrowser(RequesterProtocol, abc.ABC):
160
164
  return self._page
161
165
 
162
166
  @property
163
- def browser(self) -> Chromium:
167
+ def browser(self) -> Optional[Chromium]:
164
168
  """
165
169
  Return the Chromium browser instance.
166
170
 
@@ -10,7 +10,7 @@ persistent session and supports retries, headers, and timeout configurations.
10
10
  """
11
11
 
12
12
  import abc
13
- from typing import Any, Dict, Optional, Union
13
+ from typing import Any, Dict, Literal, Optional, Union
14
14
 
15
15
  import requests
16
16
  from requests import Response, Session
@@ -31,6 +31,9 @@ class BaseSession(RequesterProtocol, abc.ABC):
31
31
  _timeout (float): Timeout for each request in seconds.
32
32
  """
33
33
 
34
+ def is_async(self) -> Literal[False]:
35
+ return False
36
+
34
37
  def _init_session(
35
38
  self, config: RequesterConfig, cookies: Optional[Dict[str, str]] = None
36
39
  ) -> None:
@@ -64,7 +64,7 @@ class CommonSession(BaseSession):
64
64
  with self.session.get(url, timeout=self.timeout) as response:
65
65
  response.raise_for_status()
66
66
  content = response.text
67
- sleep_with_random_delay(base)
67
+ sleep_with_random_delay(base, add_spread=1.0)
68
68
  return content
69
69
  except Exception as e:
70
70
  if attempt == self.retry_times:
@@ -94,7 +94,7 @@ class CommonSession(BaseSession):
94
94
  with self.session.get(url, timeout=self.timeout) as response:
95
95
  response.raise_for_status()
96
96
  content = response.text
97
- sleep_with_random_delay(base)
97
+ sleep_with_random_delay(base, add_spread=1.0)
98
98
  return content
99
99
  except Exception as e:
100
100
  if attempt == self.retry_times:
@@ -15,10 +15,12 @@ import random
15
15
  import time
16
16
  from typing import Optional
17
17
 
18
+ from DrissionPage._elements.chromium_element import ChromiumElement
18
19
  from DrissionPage.common import Keys
19
20
 
20
21
  from novel_downloader.config.models import RequesterConfig
21
22
  from novel_downloader.core.requesters.base_browser import BaseBrowser
23
+ from novel_downloader.utils.i18n import t
22
24
  from novel_downloader.utils.time_utils import sleep_with_random_delay
23
25
 
24
26
  logger = logging.getLogger(__name__)
@@ -56,6 +58,8 @@ class QidianBrowser(BaseBrowser):
56
58
 
57
59
  :return: True if the user appears to be logged in, False otherwise.
58
60
  """
61
+ if self._page is None:
62
+ raise RuntimeError("Browser page not initialized.")
59
63
  try:
60
64
  self._handle_overlay_mask()
61
65
  sign_in_elem = self._page.ele("@class=sign-in")
@@ -83,6 +87,8 @@ class QidianBrowser(BaseBrowser):
83
87
  :param max_retries: Maximum number of times to try clicking the login button.
84
88
  :return: True if login succeeds or is already in place; False otherwise.
85
89
  """
90
+ if self._page is None:
91
+ raise RuntimeError("Browser page not initialized.")
86
92
  original_url = self._page.url
87
93
  try:
88
94
  self._page.get("https://www.qidian.com/")
@@ -107,7 +113,8 @@ class QidianBrowser(BaseBrowser):
107
113
 
108
114
  # return to original page
109
115
  try:
110
- self._page.get(original_url)
116
+ if original_url:
117
+ self._page.get(original_url)
111
118
  except Exception as e:
112
119
  logger.debug("[auth] Failed to restore page URL: %s", e)
113
120
 
@@ -117,6 +124,8 @@ class QidianBrowser(BaseBrowser):
117
124
  """
118
125
  Detect and close any full-page overlay mask that might block the login UI.
119
126
  """
127
+ if self._page is None:
128
+ raise RuntimeError("Browser page not initialized.")
120
129
  try:
121
130
  mask = self._page.ele("@@tag()=div@@class=mask", timeout=2)
122
131
  if not mask:
@@ -143,10 +152,12 @@ class QidianBrowser(BaseBrowser):
143
152
 
144
153
  :param attempt: The current attempt number (for logging).
145
154
  """
155
+ if self._page is None:
156
+ raise RuntimeError("Browser page not initialized.")
146
157
  try:
147
158
  logger.debug("[auth] Attempting login click (#%s).", attempt)
148
159
  login_btn = self._page.ele("@id=login-btn", timeout=5)
149
- if login_btn:
160
+ if isinstance(login_btn, ChromiumElement):
150
161
  login_btn.click()
151
162
  logger.debug("[auth] Login button clicked.")
152
163
  else:
@@ -170,6 +181,8 @@ class QidianBrowser(BaseBrowser):
170
181
  :param max_retries: Number of times to check for login success.
171
182
  :return: True if login was detected, False otherwise.
172
183
  """
184
+ if self._page is None:
185
+ raise RuntimeError("Browser page not initialized.")
173
186
  original_headless = self._headless
174
187
 
175
188
  # 1. Switch to headful mode if needed
@@ -193,13 +206,11 @@ class QidianBrowser(BaseBrowser):
193
206
  logger.info("[auth] Detected successful login.")
194
207
  self._logged_in = True
195
208
  break
196
-
197
- logger.info(
198
- "[auth] Attempt %d/%d: Press Enter after completing login...",
199
- attempt,
200
- max_retries,
209
+ if attempt == 1:
210
+ print(t("login_prompt_intro"))
211
+ input(
212
+ t("login_prompt_press_enter", attempt=attempt, max_retries=max_retries)
201
213
  )
202
- input()
203
214
  else:
204
215
  logger.warning("[auth] Manual login failed after %d attempts.", max_retries)
205
216
  self._logged_in = False
@@ -279,14 +290,16 @@ class QidianBrowser(BaseBrowser):
279
290
  If None, uses `self._config.wait_time`.
280
291
  :return: The HTML content of the book info page, or an empty string on error.
281
292
  """
293
+ if self._page is None:
294
+ raise RuntimeError("Browser page not initialized.")
282
295
  url = self._build_book_info_url(book_id)
283
296
  try:
284
297
  # Navigate and fetch
285
298
  self._page.get(url)
286
299
 
287
- # Randomized humanlike delay
300
+ # Randomized human-like delay
288
301
  base = wait_time if wait_time is not None else self._config.wait_time
289
- sleep_with_random_delay(base, base * 0.2)
302
+ sleep_with_random_delay(base, mul_spread=1.2)
290
303
 
291
304
  html = str(self._page.html)
292
305
  logger.debug("[fetch] Fetched book info for ID %s from %s", book_id, url)
@@ -303,6 +316,8 @@ class QidianBrowser(BaseBrowser):
303
316
  :param presses: Number of DOWN key presses.
304
317
  :param pause: Seconds to wait between each press.
305
318
  """
319
+ if self._page is None:
320
+ raise RuntimeError("Browser page not initialized.")
306
321
  for _ in range(presses):
307
322
  try:
308
323
  self._page.actions.key_down(Keys.DOWN)
@@ -318,7 +333,7 @@ class QidianBrowser(BaseBrowser):
318
333
 
319
334
  Ensures the user is logged in, navigates to the chapter page,
320
335
  waits a randomized delay to mimic human reading, then scrolls
321
- to trigger any lazyloaded content.
336
+ to trigger any lazy-loaded content.
322
337
 
323
338
  :param book_id: The identifier of the book.
324
339
  :param chapter_id: The identifier of the chapter.
@@ -326,14 +341,16 @@ class QidianBrowser(BaseBrowser):
326
341
  falls back to `self._config.wait_time`.
327
342
  :return: The HTML content of the chapter page, or empty string on error.
328
343
  """
344
+ if self._page is None:
345
+ raise RuntimeError("Browser page not initialized.")
329
346
  url = self._build_chapter_url(book_id, chapter_id)
330
347
  try:
331
348
  # 1. Navigate to chapter URL
332
349
  self._page.get(url)
333
350
 
334
- # 2. Randomized humanlike delay
351
+ # 2. Randomized human-like delay
335
352
  base = wait_time if wait_time is not None else self._config.wait_time
336
- # sleep_with_random_delay(base, base*0.2)
353
+ # sleep_with_random_delay(base, mul_spread=1.2)
337
354
 
338
355
  # 3. Scroll down to load dynamic content
339
356
  presses = int(random.uniform(base, base + 5) * 2)
@@ -349,13 +366,15 @@ class QidianBrowser(BaseBrowser):
349
366
 
350
367
  def get_bookcase(self, wait_time: Optional[float] = None) -> str:
351
368
  """
352
- Retrieve the HTML content of the loggedin user's Qidian bookcase page.
369
+ Retrieve the HTML content of the logged-in user's Qidian bookcase page.
353
370
 
354
371
  :param wait_time: Base number of seconds to wait before returning content.
355
372
  If None, falls back to `self._config.wait_time`.
356
373
  :return: The HTML markup of the bookcase page, or empty string on error.
357
374
  :raises RuntimeError: If the user is not logged in.
358
375
  """
376
+ if self._page is None:
377
+ raise RuntimeError("Browser page not initialized.")
359
378
  if not self._logged_in:
360
379
  raise RuntimeError("User not logged in. Please call login() first.")
361
380
 
@@ -364,9 +383,9 @@ class QidianBrowser(BaseBrowser):
364
383
  # Navigate to the bookcase page
365
384
  self._page.get(url)
366
385
 
367
- # Randomized humanlike delay
386
+ # Randomized human-like delay
368
387
  base = wait_time if wait_time is not None else self._config.wait_time
369
- sleep_with_random_delay(base, base * 0.2)
388
+ sleep_with_random_delay(base, mul_spread=1.2)
370
389
 
371
390
  html = str(self._page.html)
372
391
  logger.debug("[fetch] Fetched bookcase HTML from %s", url)
@@ -123,7 +123,7 @@ class QidianSession(BaseSession):
123
123
  try:
124
124
  resp = self.get(url)
125
125
  resp.raise_for_status()
126
- sleep_with_random_delay(base_delay, base_delay * 0.2)
126
+ sleep_with_random_delay(base_delay, mul_spread=1.2)
127
127
  return resp.text
128
128
  except Exception as exc:
129
129
  logger.warning(
@@ -157,7 +157,7 @@ class QidianSession(BaseSession):
157
157
  try:
158
158
  resp = self.get(url)
159
159
  resp.raise_for_status()
160
- sleep_with_random_delay(base_delay, base_delay * 0.2)
160
+ sleep_with_random_delay(base_delay, mul_spread=1.2)
161
161
  return resp.text
162
162
  except Exception as exc:
163
163
  logger.warning(
@@ -186,7 +186,7 @@ class QidianSession(BaseSession):
186
186
  try:
187
187
  resp = self.get(self.QIDIAN_BOOKCASE_URL, allow_redirects=True)
188
188
  resp.raise_for_status()
189
- sleep_with_random_delay(base_delay, base_delay * 0.2)
189
+ sleep_with_random_delay(base_delay, mul_spread=1.2)
190
190
  return resp.text
191
191
  except Exception as exc:
192
192
  logger.warning(
@@ -212,7 +212,7 @@ def common_save_as_epub(
212
212
 
213
213
  # --- 5. Finalize EPUB ---
214
214
  logger.info("%s Building TOC and spine...", TAG)
215
- book.toc = tuple(toc_list)
215
+ book.toc = toc_list
216
216
  book.spine = spine
217
217
  book.add_item(epub.EpubNcx())
218
218
  book.add_item(epub.EpubNav())
@@ -58,6 +58,7 @@
58
58
  "download_option_site": "Website source, default is '{default}'.",
59
59
  "download_using_config": "Using config: {path}",
60
60
  "download_site_info": "Site: {site}",
61
+ "download_site_mode": "Mode: {mode}",
61
62
  "download_no_ids": "No book IDs provided. Exiting.",
62
63
  "download_fail_get_ids": "Failed to get book IDs from config: {err}",
63
64
  "download_only_example": "Only example book IDs found (e.g. '{example}').",
@@ -65,6 +66,9 @@
65
66
  "download_downloading": "Downloading book {book_id} from {site}...",
66
67
  "download_prompt_parse": "Parse...",
67
68
 
69
+ "login_prompt_intro": "Manual login is required. Please switch to the browser and log in.",
70
+ "login_prompt_press_enter": "Attempt {attempt}/{max_retries}: Press Enter after completing login in the browser...",
71
+
68
72
  "clean_logs": "Clean log directory",
69
73
  "clean_cache": "Clean scripts and browser cache",
70
74
  "clean_state": "Clean state files (state.json)",
@@ -58,6 +58,7 @@
58
58
  "download_option_site": "网站来源, 默认为 '{default}'",
59
59
  "download_using_config": "使用配置: {path}",
60
60
  "download_site_info": "站点: {site}",
61
+ "download_site_mode": "使用模式: {mode}",
61
62
  "download_no_ids": "未提供书籍 ID, 正在退出",
62
63
  "download_fail_get_ids": "从配置获取书籍 ID 失败: {err}",
63
64
  "download_only_example": "只发现示例书籍 ID (例如 '{example}')",
@@ -65,6 +66,9 @@
65
66
  "download_downloading": "正在从 {site} 下载书籍 {book_id}...",
66
67
  "download_prompt_parse": "结束...",
67
68
 
69
+ "login_prompt_intro": "需要手动登录, 请切换到浏览器窗口完成登录",
70
+ "login_prompt_press_enter": "第 {attempt}/{max_retries} 次尝试: 请在浏览器中完成登录后按回车键...",
71
+
68
72
  "clean_logs": "清理日志目录",
69
73
  "clean_cache": "清理脚本和浏览器缓存",
70
74
  "clean_state": "清理状态文件 (state.json)",
@@ -26,11 +26,12 @@ LOGGER_NAME = PACKAGE_NAME # Root logger name
26
26
  # -----------------------------------------------------------------------------
27
27
  # Base config directory (e.g. ~/AppData/Local/novel_downloader/)
28
28
  BASE_CONFIG_DIR = Path(user_config_dir(APP_DIR_NAME, appauthor=False))
29
+ WORK_DIR = Path.cwd()
29
30
  PACKAGE_ROOT: Path = Path(__file__).parent.parent
30
31
  LOCALES_DIR: Path = PACKAGE_ROOT / "locales"
31
32
 
32
33
  # Subdirectories under BASE_CONFIG_DIR
33
- LOGGER_DIR = BASE_CONFIG_DIR / "logs"
34
+ LOGGER_DIR = WORK_DIR / "logs"
34
35
  JS_SCRIPT_DIR = BASE_CONFIG_DIR / "scripts"
35
36
  STATE_DIR = BASE_CONFIG_DIR / "state"
36
37
  DATA_DIR = BASE_CONFIG_DIR / "data"
@@ -106,7 +106,7 @@ def calculate_time_difference(
106
106
  """
107
107
  Calculate the difference between two datetime values.
108
108
 
109
- :param from_time_str: Date‐time string "YYYYMMDD HH:MM:SS" for the start.
109
+ :param from_time_str: Date‐time string "YYYY-MM-DD HH:MM:SS" for the start.
110
110
  :param tz_str: Timezone of from_time_str, e.g. 'UTC+8'. Defaults to 'UTC'.
111
111
  :param to_time_str: Optional date‐time string for the end; if None, uses now().
112
112
  :param to_tz_str: Timezone of to_time_str. Defaults to 'UTC'.
@@ -20,24 +20,40 @@ logger = logging.getLogger(__name__)
20
20
 
21
21
 
22
22
  def sleep_with_random_delay(
23
- base: float, spread: float = 1.0, *, max_sleep: Optional[float] = None
23
+ base: float,
24
+ add_spread: float = 0.0,
25
+ mul_spread: float = 1.0,
26
+ *,
27
+ max_sleep: Optional[float] = None,
24
28
  ) -> None:
25
29
  """
26
- Sleep for a random duration between `base` and `base + spread`,
27
- optionally capped by `max_sleep`.
30
+ Sleep for a random duration by combining multiplicative and additive jitter.
28
31
 
29
- Useful for simulating human-like behavior or preventing rate-limiting
30
- issues in scripts.
32
+ The total sleep time is computed as:
31
33
 
32
- :param base: Minimum number of seconds to sleep.
33
- :param spread: Maximum extra seconds to add on top of base (default: 1.0).
34
- :param max_sleep: Optional upper limit for the total sleep duration.
34
+ duration = base * uniform(1.0, mul_spread) + uniform(0, add_spread)
35
+
36
+ If `max_sleep` is provided, the duration will be capped at that value.
37
+
38
+ :param base: Base sleep time in seconds. Must be >= 0.
39
+ :param add_spread: Maximum extra seconds to add after scaling base.
40
+ :param mul_spread: Maximum multiplier factor for base; drawn from [1.0, mul_spread].
41
+ :param max_sleep: Optional upper limit for the final sleep duration.
35
42
  """
36
- if base < 0 or spread < 0:
37
- logger.warning("[time] Invalid parameters: base=%s, spread=%s", base, spread)
43
+ if base < 0 or add_spread < 0 or mul_spread < 0:
44
+ logger.warning(
45
+ "[sleep] Invalid parameters: base=%s, add_spread=%s, mul_spread=%s",
46
+ base,
47
+ add_spread,
48
+ mul_spread,
49
+ )
38
50
  return
39
51
 
40
- duration = random.uniform(base, base + spread)
52
+ # Calculate the raw duration
53
+ multiplicative_jitter = random.uniform(1.0, mul_spread)
54
+ additive_jitter = random.uniform(0, add_spread)
55
+ duration = base * multiplicative_jitter + additive_jitter
56
+
41
57
  if max_sleep is not None:
42
58
  duration = min(duration, max_sleep)
43
59
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: novel-downloader
3
- Version: 1.2.1
3
+ Version: 1.2.2
4
4
  Summary: A command-line tool for downloading Chinese web novels from Qidian and similar platforms.
5
5
  Author-email: Saudade Z <saudadez217@gmail.com>
6
6
  License: MIT License
@@ -1,69 +1,69 @@
1
- novel_downloader/__init__.py,sha256=XOpc1EqvKxLGY9qvIlEY7_tCe_HUpPbsjT1S0DFq-68,242
1
+ novel_downloader/__init__.py,sha256=LCdmt0Int1WSZ3n25_tdJ7Mm-gknw2F-Sg4xKyMLWJs,242
2
2
  novel_downloader/cli/__init__.py,sha256=ocGwOO4kmkby8VNol92UikMI1RPUJLv9i5xmB7wbpmw,198
3
3
  novel_downloader/cli/clean.py,sha256=9_hOrxKg8nY7q6cyR8iNech0vSREGagPBmdB4k8Te2U,3937
4
- novel_downloader/cli/download.py,sha256=l-Ht2duKI78EMR8vTEbdVnwFT9NkWe87l3L1LmmIuZc,4156
4
+ novel_downloader/cli/download.py,sha256=WZLnf8cckAGe9t7hI6WWgjrm_KRa67pTwEC-YJBAmFc,4223
5
5
  novel_downloader/cli/interactive.py,sha256=6vROwPsvupb_TWH1dd_78FDqvtAaiPfyEBvQVai9E9c,2154
6
6
  novel_downloader/cli/main.py,sha256=km1MwHzIVZFcxUlKLRiiMctJlGHWKZNjRKrgAGQjkMs,1183
7
7
  novel_downloader/cli/settings.py,sha256=bV3Hgg502V9goeP3g2xSiF-PMQB9G32qGmjb8ncTENA,6522
8
8
  novel_downloader/config/__init__.py,sha256=tJ2k7nwZbxgqw1kKgJM4g1yu5-2fsx2LXU3VTadrTJ4,1129
9
- novel_downloader/config/adapter.py,sha256=ksirRvp4038Xe3tu93Mwm93Iff0fOKNinJnXA746Xik,5949
9
+ novel_downloader/config/adapter.py,sha256=R1y1g7uDBxtDz_sVsDGl2-iasQNAtWSmYdSk-mcFoAA,6698
10
10
  novel_downloader/config/loader.py,sha256=_rm9rp1lmHYg-A7F_0PQETWjlXbvtyJYaqQD5oI-1O0,5690
11
- novel_downloader/config/models.py,sha256=WWzwwWQB2BiiXc3KhJxW6N3vr9JIrqEZ0s4reIcH-vA,5087
11
+ novel_downloader/config/models.py,sha256=i7i1yQzY5lAZWDBhi5ZXqzthlanhvhAc3oViMMoJFyw,5139
12
12
  novel_downloader/config/site_rules.py,sha256=WRw12Tfue-ErAPGKq506gRIqKOxWU-u96kay3JDgTNc,3031
13
13
  novel_downloader/core/__init__.py,sha256=D-ACiIqP0rdARZmjBnF6WMKGvvjVtxGRIM7GhOS9kh4,779
14
14
  novel_downloader/core/downloaders/__init__.py,sha256=Qp0q4p7zTy7lReQQF0hDP7ALUQnNflSNNIl4F7iPGz0,601
15
15
  novel_downloader/core/downloaders/base_async_downloader.py,sha256=8lMSVLU-VtGIdEMGkS0s_rEJpqCgu2WaljBvsEDyPN4,4281
16
16
  novel_downloader/core/downloaders/base_downloader.py,sha256=kFw_yn3QRbWqU9jXJni4IGA8P3AxZf9gfjgfu01TauY,5371
17
17
  novel_downloader/core/downloaders/common_asynb_downloader.py,sha256=u1ODvh_n13CSGWwjkBIMoThTbCeACX5mOhv5ub2Cd0c,7120
18
- novel_downloader/core/downloaders/common_downloader.py,sha256=Ru60j-S9I-Nj1P7gNZJjohJ1H8gAuvK1bELPMeZ2TTo,6532
19
- novel_downloader/core/downloaders/qidian_downloader.py,sha256=Btt59d8N925gx3RqTN7rqaHhMQgLxzPtOhE4Iq6wl1k,7354
18
+ novel_downloader/core/downloaders/common_downloader.py,sha256=-HT7pi8bCSAdIcrU9CFHz26iqgtPhlUTI2MoE7ldCfY,6506
19
+ novel_downloader/core/downloaders/qidian_downloader.py,sha256=KH-rPvwhWsZ9hyuvZxhQ9Q42LDFiAABpMlgReeX2nzI,7328
20
20
  novel_downloader/core/factory/__init__.py,sha256=qGqeROj8Dp_5iNtgWytkrUNI0ICab7SCNK3lba3H_NU,743
21
- novel_downloader/core/factory/downloader_factory.py,sha256=54lgfJ8KDJWYlBfWb6iWniRe55K9bPkcTTRVtipH7nU,5047
21
+ novel_downloader/core/factory/downloader_factory.py,sha256=ldWFZ3189rxW2cWTNcwidiPisJNuaxD8OYB5YyjOXRU,5110
22
22
  novel_downloader/core/factory/parser_factory.py,sha256=4PxiagtSKY58azFsmEWfq2f5vhVbtMFm5gAXS3oQF08,1828
23
23
  novel_downloader/core/factory/requester_factory.py,sha256=OEK2S-rj8vw4IdDTMTEWcb7k7lRmmWBnfijhYnlOCc0,3173
24
24
  novel_downloader/core/factory/saver_factory.py,sha256=OgZPDOWVIfhxLFiVBKI5jaNOEKmzP9f3YWDOnw63Hfc,1275
25
25
  novel_downloader/core/interfaces/__init__.py,sha256=jeT8BmEEjIazVyX80ZdzQXgTccEj-ktG6Bbjs9uAVUM,843
26
26
  novel_downloader/core/interfaces/async_downloader_protocol.py,sha256=QWjdhNc39hC3bD8Q1lUpBv2GqX3roxVxzKWh6cgwLhk,1002
27
- novel_downloader/core/interfaces/async_requester_protocol.py,sha256=fvxw3fFO-azxc9RMvVjq8CUv7RViBQG1KUKIafr-qPE,2281
27
+ novel_downloader/core/interfaces/async_requester_protocol.py,sha256=Jd54mJWnNt_BE2RRGYGmGDe93MM0I4xBLL4m6u6V_Is,2344
28
28
  novel_downloader/core/interfaces/downloader_protocol.py,sha256=YJdSAE9uBWF7wNLWmlKsYd3J7M4rXOjqDV5m9O7kfio,947
29
29
  novel_downloader/core/interfaces/parser_protocol.py,sha256=A2wIe7shEGdeKsNDFpMuPI8HFrK_H34HOseVAzqcnTo,1280
30
- novel_downloader/core/interfaces/requester_protocol.py,sha256=TbzVWKf4w0k5Wk1UmoAr49GVuGRv_Z1abTaXM_d6_KM,1994
30
+ novel_downloader/core/interfaces/requester_protocol.py,sha256=Zskzwom104GWjB0LrV-VvAR7vfTB3LAcHvV110ISdo0,2058
31
31
  novel_downloader/core/interfaces/saver_protocol.py,sha256=_3ha4koF-xBrogJwvCOsQM3GEXfLn3_aYZHvmRd1ZR4,1623
32
32
  novel_downloader/core/parsers/__init__.py,sha256=TSarXiYxthyjFgruRFkpMEucqmylrCtR_4Y1C1RVMhk,544
33
- novel_downloader/core/parsers/base_parser.py,sha256=TCytPIEmh7mJPiaIfPyf2P16OO5jnB0tTdi1duBv76Q,2924
33
+ novel_downloader/core/parsers/base_parser.py,sha256=mRv6tmbWIuX5XnXYnSP9JMsXECgykZymq4zWNNk7wDQ,2936
34
34
  novel_downloader/core/parsers/common_parser/__init__.py,sha256=GmV7Yj57zbLK0-WYmPtv6x1AhKwa3lzKlqsU7mc3lzg,379
35
- novel_downloader/core/parsers/common_parser/helper.py,sha256=gH5M5SAc3aHWj6HylJttDrw4keIZb9RgBc_gT8jhhHo,12017
35
+ novel_downloader/core/parsers/common_parser/helper.py,sha256=gY3HHYnXGHHiCaig59T7IlI7Fa7XEqlmAAKWGmeSmXI,12090
36
36
  novel_downloader/core/parsers/common_parser/main_parser.py,sha256=ANvkZMYACZEwaYLz56lLyBz9dc_XqZMvWp2_jWWoF3g,2868
37
37
  novel_downloader/core/parsers/qidian_parser/__init__.py,sha256=mYbZvWMn4oFFX3qGhGx6Qo6oqvE5I71b-jbanWsTOSI,522
38
38
  novel_downloader/core/parsers/qidian_parser/browser/__init__.py,sha256=E8kMkQW_LdmVFYpHbbYtJTLNTYOy9XItQf_h7KcnGIU,353
39
- novel_downloader/core/parsers/qidian_parser/browser/chapter_encrypted.py,sha256=65pm3-DrHUH--wzo317yks0fbpcFZm_INB-hVboTqBo,17696
39
+ novel_downloader/core/parsers/qidian_parser/browser/chapter_encrypted.py,sha256=v-sCfQYyRNOdZ12Qlb9b5Tqq69rcOS_CKBUG5XG9WUs,17713
40
40
  novel_downloader/core/parsers/qidian_parser/browser/chapter_normal.py,sha256=SZ9Ncw6yLCRo4gJNEtv4V8N2WfONvEAc8lftJREsBTY,3190
41
41
  novel_downloader/core/parsers/qidian_parser/browser/chapter_router.py,sha256=qjN10SpQCUMjFcCaWnqIZhcLDx5sN5jzDfWIrBSbnyo,2101
42
- novel_downloader/core/parsers/qidian_parser/browser/main_parser.py,sha256=vElnyAzZ5douexl-1n_oUY64pfB6st0wYzcF9DIdVec,3844
42
+ novel_downloader/core/parsers/qidian_parser/browser/main_parser.py,sha256=8nmmTlhgina3JNrKoIMkFoNF6d9ztki7kAcCyGysy4g,3856
43
43
  novel_downloader/core/parsers/qidian_parser/session/__init__.py,sha256=Rs2Sz1vNn1-UdpY0O_reECBN4kgb3JYHQZoZ20P7lHU,358
44
- novel_downloader/core/parsers/qidian_parser/session/chapter_encrypted.py,sha256=EgY9qo9v3wx2CZ95zD_Rzcbu0FvfwWtkLuaP3mEcP2c,15999
44
+ novel_downloader/core/parsers/qidian_parser/session/chapter_encrypted.py,sha256=XbQV0MQVsiumyrwGddAUaK9JrZpWZlfbZ0_2vBhGupU,16016
45
45
  novel_downloader/core/parsers/qidian_parser/session/chapter_normal.py,sha256=ySQ7vUs4dLCkHv_nPSifDnH3xq3wqxa9FWGy_ETX-uw,3875
46
46
  novel_downloader/core/parsers/qidian_parser/session/chapter_router.py,sha256=ob8ULDhNdnJgU3rlA-tLy0w0PqbC20vi8auFqQipJww,1978
47
- novel_downloader/core/parsers/qidian_parser/session/main_parser.py,sha256=F_uFibBD6BUIf7JkwurfQ9TEA5zAJqhR5C4Bf9b7hZs,3935
47
+ novel_downloader/core/parsers/qidian_parser/session/main_parser.py,sha256=fL-IYhd99Rbb2eHCOy0DmYpmHeIOSe3DIFbtQKCj53s,3947
48
48
  novel_downloader/core/parsers/qidian_parser/session/node_decryptor.py,sha256=7ZuneGzL7HX1g8taMzn-2qkJXLDHgrVee-FDkMZtIIw,5755
49
49
  novel_downloader/core/parsers/qidian_parser/shared/__init__.py,sha256=K5HX7pgiRiJuTLdbQDbtm60mO-sXgr6bo5Ft8H1-JLs,978
50
- novel_downloader/core/parsers/qidian_parser/shared/book_info_parser.py,sha256=juCV72QKcaAjQZU-j6XiBM1VgdRrXY9w_2NHrflHsv4,3047
51
- novel_downloader/core/parsers/qidian_parser/shared/helpers.py,sha256=E8cWVhehaMLNXQAq2whIKl29xAULUzW4MdZvWshDb9Y,4284
50
+ novel_downloader/core/parsers/qidian_parser/shared/book_info_parser.py,sha256=CGoJXRrf65CjJU3wPjZ8FRy96zV2rkYyWSMEyEFUrzE,4853
51
+ novel_downloader/core/parsers/qidian_parser/shared/helpers.py,sha256=zQqcts8f_JddtKvQjvVJBJQvN5HjeaqlP2I3ztwO75o,4306
52
52
  novel_downloader/core/requesters/__init__.py,sha256=U2jDvt5RxF5P9yL2lwyZ-cRueJBZgRnjil3_5TvAh3Y,798
53
- novel_downloader/core/requesters/base_async_session.py,sha256=7hQfdUCFL-JnJiEltPMfWVrX2tjSntnnAazG1ygh6pU,10564
54
- novel_downloader/core/requesters/base_browser.py,sha256=oHJA0JecYyODkfTmL2L2qMe4PnR-Y_Md_zblD5wQdP4,6759
55
- novel_downloader/core/requesters/base_session.py,sha256=p7TvslYam_8WhIxp7LSBPQ7nWh0vG5KrcjuW24B4y1U,8255
53
+ novel_downloader/core/requesters/base_async_session.py,sha256=xojNO_TCLUHmkuk379ttoiScHwmLCNhQAFzQKSpk5AM,10635
54
+ novel_downloader/core/requesters/base_browser.py,sha256=HceDLOkl86K_nAkZR1XGIzSa-4KyFGt0dG5PyhefJ3c,6899
55
+ novel_downloader/core/requesters/base_session.py,sha256=0yl3wR-myu-r9NrtwnGWzEFMzWPxcD3ROT5x9xeX6zQ,8328
56
56
  novel_downloader/core/requesters/common_requester/__init__.py,sha256=kVKZyrS7PVlUnaV1xGsZdoW2J9XuyQ11A4oMV9Cc64Q,523
57
57
  novel_downloader/core/requesters/common_requester/common_async_session.py,sha256=Bt_pgiJdxfoSDswMANeiZxaUQ4E5YitqEmcTlL4ImLY,3556
58
- novel_downloader/core/requesters/common_requester/common_session.py,sha256=pdTad7LibvHh_4Uh801unyx1qN1SUsSivKMeM6GLyUk,4654
58
+ novel_downloader/core/requesters/common_requester/common_session.py,sha256=hzah5pvRqFBTzmz1J5d7GE5jbpKSOZWBPz-lh18cTv4,4686
59
59
  novel_downloader/core/requesters/qidian_requester/__init__.py,sha256=s0ldqNvfqUsEnm_biM_bXEGN7gz88Z5IAx1OBvGW1lY,682
60
- novel_downloader/core/requesters/qidian_requester/qidian_broswer.py,sha256=kzhLvdX6wIxy-CmEN-7wkrTCEEC4dVPBqGMLt3aj8Uc,14111
61
- novel_downloader/core/requesters/qidian_requester/qidian_session.py,sha256=huZflPnQoKglGNxP0pPD-wVoRZC90ER7hrwFo7WbM0M,7670
60
+ novel_downloader/core/requesters/qidian_requester/qidian_broswer.py,sha256=BkweHqf-TEC2VCGKFxdUp24lPDuQPUepfHPFUXUgTh0,15146
61
+ novel_downloader/core/requesters/qidian_requester/qidian_session.py,sha256=R-B6Hc61v5C-KGdEckDJWBt7Bnn_dbpW5ukQgh3x--Q,7664
62
62
  novel_downloader/core/savers/__init__.py,sha256=p9O6p8ZUblrSheDVJoTSuDr6s1mJpQi8mz3QmQ16nHs,391
63
63
  novel_downloader/core/savers/base_saver.py,sha256=VocVl8go80IkzAp9qY4dgZjmLbK8TVkg48Ugl53pxrc,5513
64
64
  novel_downloader/core/savers/qidian_saver.py,sha256=MVAcWdM-IX_qsRW5It2aIkx9QPdRCLcZGcD3ihfm3gU,627
65
65
  novel_downloader/core/savers/common_saver/__init__.py,sha256=Pg52cjAwG9fgT5qWgVer5oLMACU-duNFNbtfcq4t5xA,292
66
- novel_downloader/core/savers/common_saver/common_epub.py,sha256=nkLCMy6B5-4qiCDc6hHgX1eQc0xPHgZvNSef34lPIiw,7189
66
+ novel_downloader/core/savers/common_saver/common_epub.py,sha256=80a-ibhhTy08wsZr7uiqrlvmITcsd1MH-z1OAj1Dl3c,7182
67
67
  novel_downloader/core/savers/common_saver/common_txt.py,sha256=fgBoKPApmODeUoxCi54txoD322G7mlU9o8HEM5X5atk,5765
68
68
  novel_downloader/core/savers/common_saver/main_saver.py,sha256=scjERGgC7X2R3f0KuLwf462VHkFJ2D2duod53aWlMkg,2765
69
69
  novel_downloader/core/savers/epub_utils/__init__.py,sha256=rqWUMkdebZ2dO-aZRYSutZ-w5uKgFVwe83nBJbYx40E,738
@@ -71,8 +71,8 @@ novel_downloader/core/savers/epub_utils/css_builder.py,sha256=ZiYEAuC4_u3BWBcf6-
71
71
  novel_downloader/core/savers/epub_utils/initializer.py,sha256=QuPWuChq2dcFlGp-PeZBrhfqSwoM7Nko-zHe9beluZw,3255
72
72
  novel_downloader/core/savers/epub_utils/text_to_html.py,sha256=UW70pMmc9HdgSCMKVEgukONFdlNCmQIKs9PCDLNAFQU,4117
73
73
  novel_downloader/core/savers/epub_utils/volume_intro.py,sha256=1NhnLKRL_ieoDgXTRt3vTNeENGvUj7JTZxTME4TmYm8,1820
74
- novel_downloader/locales/en.json,sha256=7xPlFLf6ByH0VMnGTTRC_6gRSW2IdTvQnKa5_FquSsk,5277
75
- novel_downloader/locales/zh.json,sha256=TylYUKSUUbG4Fh_DQazUNTY96HriQWyBfKjh1FrI0xM,5163
74
+ novel_downloader/locales/en.json,sha256=4Zn2cD2vGkHHIam2j_Ig-gpicD14L3ufKX_lq0k0yR4,5531
75
+ novel_downloader/locales/zh.json,sha256=nKmxyb84gmq9tg9DFLSx1r0MWLmo4Jwzf27S4ggDzV4,5420
76
76
  novel_downloader/resources/config/rules.toml,sha256=hrED6h3Z3cjSY5hRPQhp4TFAU5QXnN9xHfVABOJQNrM,4979
77
77
  novel_downloader/resources/config/settings.yaml,sha256=NxtYrf2HWJQyGwIuj3f3JKA72Do5WPvSghonPM0Q7SM,3493
78
78
  novel_downloader/resources/css_styles/main.css,sha256=WM6GePwdOGgM86fbbOxQ0_0oerTBDZeQHt8zRVfcJp8,1617
@@ -83,7 +83,7 @@ novel_downloader/resources/json/replace_word_map.json,sha256=ptL9sGO9aK7rnnAaOIy
83
83
  novel_downloader/resources/text/blacklist.txt,sha256=sovK9JgARZP3lud5b1EZgvv8LSVKPthf4ADpCSZZgQ8,154
84
84
  novel_downloader/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
85
85
  novel_downloader/utils/cache.py,sha256=NB5j7CWNscfE4eFA0A9O5mYR1eW-216M-ljlMo0LDqE,615
86
- novel_downloader/utils/constants.py,sha256=VETwA_prTjLUejmJgrRnOn0QbwNKO4aTHf41dJJy7uE,5505
86
+ novel_downloader/utils/constants.py,sha256=geLu9ufkM1plktIbF2VRzS5N5_N2XuHn0T03Kc0lGCQ,5520
87
87
  novel_downloader/utils/crypto_utils.py,sha256=whGgir2oi_17pNteiIRztiMNaB-ZP63GMP3KPJkXA80,4178
88
88
  novel_downloader/utils/hash_store.py,sha256=rpr61GsvZ9wT_1fEn4_83JZ-nWc1KLtcvb56ZqHawdk,9826
89
89
  novel_downloader/utils/hash_utils.py,sha256=6S4-Q_uLNzdEDkBOUG9QEcflbuPFNDAYe6Gx718AOo4,2998
@@ -105,11 +105,11 @@ novel_downloader/utils/text_utils/diff_display.py,sha256=cLjpeAOtpLOVMHnjgq2yv91
105
105
  novel_downloader/utils/text_utils/font_mapping.py,sha256=lai3lZSaxvSL1BZWgD6JLWdI56aWlZDeZ3zUYQS8pkc,916
106
106
  novel_downloader/utils/text_utils/text_cleaning.py,sha256=1yuaDeUBHqBRkkWhw43rV1i8TL5rx1yK0I78FyEwcTw,1656
107
107
  novel_downloader/utils/time_utils/__init__.py,sha256=bRpO14eorfH5C5xfqvW7QwSe3fQHhpr34j4O3qY5cGc,580
108
- novel_downloader/utils/time_utils/datetime_utils.py,sha256=xYKuI2K6DKwZdfUBZ0j1SNbmHjhYU7hIu46NzlZqr3o,4887
109
- novel_downloader/utils/time_utils/sleep_utils.py,sha256=CffWLotrhOZ-uYwC8Nb1cwZrAO2p83JDIrCGZLQuEC0,1384
110
- novel_downloader-1.2.1.dist-info/licenses/LICENSE,sha256=XgmnH0mBf-qEiizoVAfJQAKzPB9y3rBa-ni7M0Aqv4A,1066
111
- novel_downloader-1.2.1.dist-info/METADATA,sha256=3Wvzvnm4MzObnOT-71EOrmC1UbO1f9yNbfOlFpdFgdw,6291
112
- novel_downloader-1.2.1.dist-info/WHEEL,sha256=DnLRTWE75wApRYVsjgc6wsVswC54sMSJhAEd4xhDpBk,91
113
- novel_downloader-1.2.1.dist-info/entry_points.txt,sha256=v23QrJrfrAcYpxUYslCVxubOVRRTaTw7vlG_tfMsFP8,65
114
- novel_downloader-1.2.1.dist-info/top_level.txt,sha256=hP4jYWM2LTm1jxsW4hqEB8N0dsRvldO2QdhggJT917I,17
115
- novel_downloader-1.2.1.dist-info/RECORD,,
108
+ novel_downloader/utils/time_utils/datetime_utils.py,sha256=BX6W2dWpqEDMpAXF0pm2bPT7xZsy_Q_TZAiwd3dsiiE,4883
109
+ novel_downloader/utils/time_utils/sleep_utils.py,sha256=xClrFcjn4DdOO5KskO5WkGKt6LKZx0Cb8SMH8mQrlT0,1864
110
+ novel_downloader-1.2.2.dist-info/licenses/LICENSE,sha256=XgmnH0mBf-qEiizoVAfJQAKzPB9y3rBa-ni7M0Aqv4A,1066
111
+ novel_downloader-1.2.2.dist-info/METADATA,sha256=IjqO-vr2DU20Tdz3JEHo5hSWI6-3qDuXsbFAW8Af68c,6291
112
+ novel_downloader-1.2.2.dist-info/WHEEL,sha256=Nw36Djuh_5VDukK0H78QzOX-_FQEo6V37m3nkm96gtU,91
113
+ novel_downloader-1.2.2.dist-info/entry_points.txt,sha256=v23QrJrfrAcYpxUYslCVxubOVRRTaTw7vlG_tfMsFP8,65
114
+ novel_downloader-1.2.2.dist-info/top_level.txt,sha256=hP4jYWM2LTm1jxsW4hqEB8N0dsRvldO2QdhggJT917I,17
115
+ novel_downloader-1.2.2.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.4.0)
2
+ Generator: setuptools (80.7.1)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5