novel-downloader 1.3.1__py3-none-any.whl → 1.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (98) hide show
  1. novel_downloader/__init__.py +1 -1
  2. novel_downloader/cli/download.py +1 -1
  3. novel_downloader/config/adapter.py +3 -0
  4. novel_downloader/config/models.py +3 -0
  5. novel_downloader/core/downloaders/__init__.py +23 -1
  6. novel_downloader/core/downloaders/biquge/__init__.py +2 -0
  7. novel_downloader/core/downloaders/biquge/biquge_async.py +27 -0
  8. novel_downloader/core/downloaders/biquge/biquge_sync.py +5 -3
  9. novel_downloader/core/downloaders/common/common_async.py +5 -11
  10. novel_downloader/core/downloaders/common/common_sync.py +18 -18
  11. novel_downloader/core/downloaders/esjzone/__init__.py +14 -0
  12. novel_downloader/core/downloaders/esjzone/esjzone_async.py +27 -0
  13. novel_downloader/core/downloaders/esjzone/esjzone_sync.py +27 -0
  14. novel_downloader/core/downloaders/qianbi/__init__.py +14 -0
  15. novel_downloader/core/downloaders/qianbi/qianbi_async.py +27 -0
  16. novel_downloader/core/downloaders/qianbi/qianbi_sync.py +27 -0
  17. novel_downloader/core/downloaders/qidian/qidian_sync.py +9 -14
  18. novel_downloader/core/downloaders/sfacg/__init__.py +14 -0
  19. novel_downloader/core/downloaders/sfacg/sfacg_async.py +27 -0
  20. novel_downloader/core/downloaders/sfacg/sfacg_sync.py +27 -0
  21. novel_downloader/core/downloaders/yamibo/__init__.py +14 -0
  22. novel_downloader/core/downloaders/yamibo/yamibo_async.py +27 -0
  23. novel_downloader/core/downloaders/yamibo/yamibo_sync.py +27 -0
  24. novel_downloader/core/factory/downloader.py +35 -7
  25. novel_downloader/core/factory/parser.py +23 -2
  26. novel_downloader/core/factory/requester.py +32 -7
  27. novel_downloader/core/factory/saver.py +14 -2
  28. novel_downloader/core/interfaces/async_requester.py +3 -3
  29. novel_downloader/core/interfaces/parser.py +7 -2
  30. novel_downloader/core/interfaces/sync_requester.py +3 -3
  31. novel_downloader/core/parsers/__init__.py +15 -5
  32. novel_downloader/core/parsers/base.py +7 -2
  33. novel_downloader/core/parsers/biquge/main_parser.py +13 -4
  34. novel_downloader/core/parsers/common/main_parser.py +13 -4
  35. novel_downloader/core/parsers/esjzone/__init__.py +10 -0
  36. novel_downloader/core/parsers/esjzone/main_parser.py +220 -0
  37. novel_downloader/core/parsers/qianbi/__init__.py +10 -0
  38. novel_downloader/core/parsers/qianbi/main_parser.py +142 -0
  39. novel_downloader/core/parsers/qidian/browser/main_parser.py +13 -4
  40. novel_downloader/core/parsers/qidian/session/main_parser.py +13 -4
  41. novel_downloader/core/parsers/sfacg/__init__.py +10 -0
  42. novel_downloader/core/parsers/sfacg/main_parser.py +166 -0
  43. novel_downloader/core/parsers/yamibo/__init__.py +10 -0
  44. novel_downloader/core/parsers/yamibo/main_parser.py +194 -0
  45. novel_downloader/core/requesters/__init__.py +33 -3
  46. novel_downloader/core/requesters/base/async_session.py +14 -10
  47. novel_downloader/core/requesters/base/browser.py +4 -7
  48. novel_downloader/core/requesters/base/session.py +25 -11
  49. novel_downloader/core/requesters/biquge/__init__.py +2 -0
  50. novel_downloader/core/requesters/biquge/async_session.py +71 -0
  51. novel_downloader/core/requesters/biquge/session.py +6 -6
  52. novel_downloader/core/requesters/common/async_session.py +4 -4
  53. novel_downloader/core/requesters/common/session.py +6 -6
  54. novel_downloader/core/requesters/esjzone/__init__.py +13 -0
  55. novel_downloader/core/requesters/esjzone/async_session.py +211 -0
  56. novel_downloader/core/requesters/esjzone/session.py +235 -0
  57. novel_downloader/core/requesters/qianbi/__init__.py +13 -0
  58. novel_downloader/core/requesters/qianbi/async_session.py +96 -0
  59. novel_downloader/core/requesters/qianbi/session.py +125 -0
  60. novel_downloader/core/requesters/qidian/broswer.py +9 -9
  61. novel_downloader/core/requesters/qidian/session.py +14 -11
  62. novel_downloader/core/requesters/sfacg/__init__.py +13 -0
  63. novel_downloader/core/requesters/sfacg/async_session.py +204 -0
  64. novel_downloader/core/requesters/sfacg/session.py +242 -0
  65. novel_downloader/core/requesters/yamibo/__init__.py +13 -0
  66. novel_downloader/core/requesters/yamibo/async_session.py +211 -0
  67. novel_downloader/core/requesters/yamibo/session.py +237 -0
  68. novel_downloader/core/savers/__init__.py +15 -3
  69. novel_downloader/core/savers/base.py +3 -7
  70. novel_downloader/core/savers/common/epub.py +21 -33
  71. novel_downloader/core/savers/common/main_saver.py +3 -1
  72. novel_downloader/core/savers/common/txt.py +1 -2
  73. novel_downloader/core/savers/epub_utils/__init__.py +14 -5
  74. novel_downloader/core/savers/epub_utils/css_builder.py +1 -0
  75. novel_downloader/core/savers/epub_utils/image_loader.py +89 -0
  76. novel_downloader/core/savers/epub_utils/initializer.py +1 -0
  77. novel_downloader/core/savers/epub_utils/text_to_html.py +48 -1
  78. novel_downloader/core/savers/epub_utils/volume_intro.py +1 -0
  79. novel_downloader/core/savers/esjzone.py +25 -0
  80. novel_downloader/core/savers/qianbi.py +25 -0
  81. novel_downloader/core/savers/sfacg.py +25 -0
  82. novel_downloader/core/savers/yamibo.py +25 -0
  83. novel_downloader/locales/en.json +1 -0
  84. novel_downloader/locales/zh.json +1 -0
  85. novel_downloader/resources/config/settings.toml +40 -4
  86. novel_downloader/utils/constants.py +4 -0
  87. novel_downloader/utils/file_utils/io.py +1 -1
  88. novel_downloader/utils/network.py +51 -38
  89. novel_downloader/utils/time_utils/__init__.py +2 -1
  90. novel_downloader/utils/time_utils/datetime_utils.py +3 -1
  91. novel_downloader/utils/time_utils/sleep_utils.py +44 -2
  92. {novel_downloader-1.3.1.dist-info → novel_downloader-1.3.3.dist-info}/METADATA +29 -24
  93. novel_downloader-1.3.3.dist-info/RECORD +166 -0
  94. novel_downloader-1.3.1.dist-info/RECORD +0 -127
  95. {novel_downloader-1.3.1.dist-info → novel_downloader-1.3.3.dist-info}/WHEEL +0 -0
  96. {novel_downloader-1.3.1.dist-info → novel_downloader-1.3.3.dist-info}/entry_points.txt +0 -0
  97. {novel_downloader-1.3.1.dist-info → novel_downloader-1.3.3.dist-info}/licenses/LICENSE +0 -0
  98. {novel_downloader-1.3.1.dist-info → novel_downloader-1.3.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,25 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ novel_downloader.core.savers.qianbi
4
+ -----------------------------------
5
+
6
+ """
7
+
8
+ from novel_downloader.config.models import SaverConfig
9
+
10
+ from .common import CommonSaver
11
+
12
+
13
+ class QianbiSaver(CommonSaver):
14
+ def __init__(
15
+ self,
16
+ config: SaverConfig,
17
+ ):
18
+ super().__init__(
19
+ config,
20
+ site="qianbi",
21
+ chap_folders=["chapters"],
22
+ )
23
+
24
+
25
+ __all__ = ["QianbiSaver"]
@@ -0,0 +1,25 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ novel_downloader.core.savers.sfacg
4
+ ----------------------------------
5
+
6
+ """
7
+
8
+ from novel_downloader.config.models import SaverConfig
9
+
10
+ from .common import CommonSaver
11
+
12
+
13
+ class SfacgSaver(CommonSaver):
14
+ def __init__(
15
+ self,
16
+ config: SaverConfig,
17
+ ):
18
+ super().__init__(
19
+ config,
20
+ site="sfacg",
21
+ chap_folders=["chapters"],
22
+ )
23
+
24
+
25
+ __all__ = ["SfacgSaver"]
@@ -0,0 +1,25 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ novel_downloader.core.savers.yamibo
4
+ -----------------------------------
5
+
6
+ """
7
+
8
+ from novel_downloader.config.models import SaverConfig
9
+
10
+ from .common import CommonSaver
11
+
12
+
13
+ class YamiboSaver(CommonSaver):
14
+ def __init__(
15
+ self,
16
+ config: SaverConfig,
17
+ ):
18
+ super().__init__(
19
+ config,
20
+ site="yamibo",
21
+ chap_folders=["chapters"],
22
+ )
23
+
24
+
25
+ __all__ = ["YamiboSaver"]
@@ -72,6 +72,7 @@
72
72
  "session_login_prompt_intro": "Failed to restore login from saved cookies. Please log in via browser, then paste the cookie string below.",
73
73
  "session_login_prompt_paste_cookie": "Attempt {attempt}/{max_retries}: Paste your browser cookie string and press Enter:",
74
74
  "session_login_prompt_invalid_cookie": "Invalid cookie. Please copy and paste again.",
75
+ "session_login_failed": "Login to {site} failed. Please check your credentials or try again later.",
75
76
 
76
77
  "clean_logs": "Clean log directory",
77
78
  "clean_cache": "Clean scripts and browser cache",
@@ -72,6 +72,7 @@
72
72
  "session_login_prompt_intro": "尝试使用历史 Cookie 恢复登录失败, 请在浏览器登录后从开发者工具复制 Cookie 粘贴至下方",
73
73
  "session_login_prompt_paste_cookie": "第 {attempt}/{max_retries} 次尝试, 请粘贴 Cookie 字符串并回车:",
74
74
  "session_login_prompt_invalid_cookie": "Cookie 格式不正确, 请重新复制粘贴",
75
+ "session_login_failed": "登录 {site} 失败, 请检查账号或稍后再试",
75
76
 
76
77
  "clean_logs": "清理日志目录",
77
78
  "clean_cache": "清理脚本和浏览器缓存",
@@ -45,7 +45,7 @@ ocr_weight = 0.5
45
45
  vec_weight = 0.5
46
46
 
47
47
  # 各站点的特定配置
48
- [sites.qidian]
48
+ [sites.qidian] # 起点中文网
49
49
  # 小说 ID 列表
50
50
  # 例如: 访问 https://www.qidian.com/book/1010868264/
51
51
  # 该小说的 ID 就是 1010868264
@@ -56,17 +56,53 @@ book_ids = [
56
56
  mode = "browser" # browser / session
57
57
  login_required = true # 是否需要登录才能访问
58
58
 
59
- [sites.biquge]
59
+ [sites.biquge] # 笔趣阁
60
60
  book_ids = [
61
61
  "0000000000",
62
62
  "0000000000"
63
63
  ]
64
64
  mode = "session" # async / session
65
- login_required = false # 是否需要登录才能访问
65
+ login_required = false
66
+
67
+ [sites.qianbi] # 铅笔小说
68
+ book_ids = [
69
+ "0000000000",
70
+ "0000000000"
71
+ ]
72
+ mode = "session" # async / session
73
+ login_required = false
74
+
75
+ [sites.sfacg] # SF轻小说
76
+ book_ids = [
77
+ "0000000000",
78
+ "0000000000"
79
+ ]
80
+ mode = "session" # async / session
81
+ login_required = false
82
+
83
+ [sites.esjzone] # ESJ Zone
84
+ book_ids = [
85
+ "0000000000",
86
+ "0000000000"
87
+ ]
88
+ mode = "session" # async / session
89
+ login_required = true
90
+ username = "youremail@domain.com" # 登录邮箱
91
+ password = "yourpassword" # 登录密码
92
+
93
+ [sites.yamibo] # 百合会
94
+ book_ids = [
95
+ "0000000000",
96
+ "0000000000"
97
+ ]
98
+ mode = "session" # async / session
99
+ login_required = false
100
+ username = "yourusername" # 登录账户
101
+ password = "yourpassword" # 登录密码
66
102
 
67
103
  [sites.common]
68
104
  mode = "session" # async / session
69
- login_required = false # 是否需要登录才能访问
105
+ login_required = false
70
106
 
71
107
  # 输出文件格式及相关选项
72
108
  [output]
@@ -116,6 +116,10 @@ BLACKLIST_PATH = files("novel_downloader.resources.text").joinpath("blacklist.tx
116
116
  EPUB_IMAGE_FOLDER = "Images"
117
117
  EPUB_TEXT_FOLDER = "Text"
118
118
 
119
+ EPUB_IMAGE_WRAPPER = (
120
+ '<div class="duokan-image-single illus"><img src="../Images/{filename}" /></div>'
121
+ )
122
+
119
123
  EPUB_OPTIONS = {
120
124
  # guide 是 EPUB 2 的一个部分, 包含封面, 目录, 索引等重要导航信息
121
125
  "epub2_guide": True,
@@ -103,7 +103,7 @@ def _write_file(
103
103
  tmp.write(content_to_write)
104
104
  tmp_path = Path(tmp.name)
105
105
  tmp_path.replace(path)
106
- logger.info("[file] '%s' written successfully", path)
106
+ logger.debug("[file] '%s' written successfully", path)
107
107
  return True
108
108
  except Exception as exc:
109
109
  logger.warning("[file] Error writing %r: %s", path, exc)
@@ -16,7 +16,7 @@ from urllib.parse import unquote, urlparse
16
16
  import requests
17
17
 
18
18
  from .constants import DEFAULT_HEADERS, DEFAULT_IMAGE_SUFFIX
19
- from .file_utils.io import _get_non_conflicting_path, _write_file, read_binary_file
19
+ from .file_utils.io import _get_non_conflicting_path, _write_file
20
20
 
21
21
  logger = logging.getLogger(__name__)
22
22
 
@@ -84,28 +84,28 @@ def image_url_to_filename(url: str) -> str:
84
84
  return filename
85
85
 
86
86
 
87
- def download_image_as_bytes(
87
+ def download_image(
88
88
  url: str,
89
89
  target_folder: str | Path | None = None,
90
+ target_name: str | None = None,
90
91
  *,
91
92
  timeout: int = 10,
92
93
  retries: int = 3,
93
94
  backoff: float = 0.5,
94
95
  on_exist: Literal["overwrite", "skip", "rename"] = "overwrite",
95
- ) -> bytes | None:
96
+ ) -> Path | None:
96
97
  """
97
- Download an image from a given URL and return its content as bytes.
98
-
99
- If on_exist='skip' and the file already exists, it will be read from disk
100
- instead of being downloaded again.
98
+ Download an image from `url` and save it to `target_folder`, returning the Path.
99
+ Can override the filename via `target_name`.
101
100
 
102
101
  :param url: Image URL. Can start with 'http', '//', or without protocol.
103
- :param target_folder: Optional folder to save the image (str or Path).
102
+ :param target_folder: Directory to save into (defaults to cwd).
103
+ :param target_name: Optional filename (with or without extension).
104
104
  :param timeout: Request timeout in seconds.
105
105
  :param retries: Number of retry attempts.
106
106
  :param backoff: Base delay between retries (exponential backoff).
107
107
  :param on_exist: What to do if file exists: 'overwrite', 'skip', or 'rename'.
108
- :return: Image content as bytes, or None if failed.
108
+ :return: Path to the saved image, or `None` on any failure.
109
109
  """
110
110
  # Normalize URL
111
111
  if url.startswith("//"):
@@ -113,21 +113,28 @@ def download_image_as_bytes(
113
113
  elif not url.startswith("http"):
114
114
  url = "https://" + url
115
115
 
116
- save_path = None
117
- if target_folder:
118
- target_folder = Path(target_folder)
119
- filename = image_url_to_filename(url)
120
- save_path = target_folder / filename
121
-
122
- if on_exist == "skip" and save_path.exists():
123
- logger.info(
124
- "[image] '%s' exists, skipping download and reading from disk.",
125
- save_path,
126
- )
127
- return read_binary_file(save_path)
116
+ folder = Path(target_folder) if target_folder else Path.cwd()
117
+ folder.mkdir(parents=True, exist_ok=True)
118
+
119
+ if target_name:
120
+ name = target_name
121
+ if not Path(name).suffix:
122
+ # infer ext from URL-derived name
123
+ name += Path(image_url_to_filename(url)).suffix
124
+ else:
125
+ name = image_url_to_filename(url)
126
+ save_path = folder / name
127
+
128
+ # Handle existing file
129
+ if save_path.exists():
130
+ if on_exist == "skip":
131
+ logger.debug("Skipping download; file exists: %s", save_path)
132
+ return save_path
133
+ if on_exist == "rename":
134
+ save_path = _get_non_conflicting_path(save_path)
128
135
 
129
136
  # Proceed with download
130
- response = http_get_with_retry(
137
+ resp = http_get_with_retry(
131
138
  url,
132
139
  retries=retries,
133
140
  timeout=timeout,
@@ -136,19 +143,25 @@ def download_image_as_bytes(
136
143
  stream=False,
137
144
  )
138
145
 
139
- if response and response.ok:
140
- content = response.content
141
-
142
- if save_path:
143
- _write_file(
144
- content=content,
145
- filepath=save_path,
146
- mode="wb",
147
- on_exist=on_exist,
148
- )
149
-
150
- return content
146
+ if not (resp and resp.ok):
147
+ logger.warning(
148
+ "Failed to download %s (status=%s)",
149
+ url,
150
+ getattr(resp, "status_code", None),
151
+ )
152
+ return None
151
153
 
154
+ # Write to disk
155
+ try:
156
+ _write_file(
157
+ content=resp.content,
158
+ filepath=save_path,
159
+ mode="wb",
160
+ on_exist=on_exist,
161
+ )
162
+ return save_path
163
+ except Exception:
164
+ logger.exception("Error saving image to %s", save_path)
152
165
  return None
153
166
 
154
167
 
@@ -191,7 +204,7 @@ def download_font_file(
191
204
 
192
205
  # If skip and file exists -> return immediately
193
206
  if on_exist == "skip" and font_path.exists():
194
- logger.info("[font] File exists, skipping download: %s", font_path)
207
+ logger.debug("[font] File exists, skipping download: %s", font_path)
195
208
  return font_path
196
209
 
197
210
  # Retry download with exponential backoff
@@ -214,7 +227,7 @@ def download_font_file(
214
227
  if chunk:
215
228
  f.write(chunk)
216
229
 
217
- logger.info("[font] Font saved to: %s", font_path)
230
+ logger.debug("[font] Font saved to: %s", font_path)
218
231
  return font_path
219
232
 
220
233
  except Exception as e:
@@ -258,7 +271,7 @@ def download_js_file(
258
271
  save_path = target_folder / filename
259
272
 
260
273
  if on_exist == "skip" and save_path.exists():
261
- logger.info("[js] File exists, skipping download: %s", save_path)
274
+ logger.debug("[js] File exists, skipping download: %s", save_path)
262
275
  return save_path
263
276
 
264
277
  response = http_get_with_retry(
@@ -278,7 +291,7 @@ def download_js_file(
278
291
 
279
292
  try:
280
293
  _write_file(content=content, filepath=save_path, mode="wb")
281
- logger.info("[js] JS file saved to: %s", save_path)
294
+ logger.debug("[js] JS file saved to: %s", save_path)
282
295
  return save_path
283
296
  except Exception as e:
284
297
  logger.error("[js] Error writing JS to disk: %s", e)
@@ -13,9 +13,10 @@ Includes:
13
13
  """
14
14
 
15
15
  from .datetime_utils import calculate_time_difference
16
- from .sleep_utils import sleep_with_random_delay
16
+ from .sleep_utils import async_sleep_with_random_delay, sleep_with_random_delay
17
17
 
18
18
  __all__ = [
19
19
  "calculate_time_difference",
20
+ "async_sleep_with_random_delay",
20
21
  "sleep_with_random_delay",
21
22
  ]
@@ -24,6 +24,8 @@ _DATETIME_FORMATS = [
24
24
  (r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}[+-]\d{2}:\d{2}", "%Y-%m-%dT%H:%M:%S%z"),
25
25
  # 完整年月日+时分秒 空格格式
26
26
  (r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}", "%Y-%m-%d %H:%M:%S"),
27
+ (r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}", "%Y-%m-%d %H:%M"),
28
+ (r"\d{2}-\d{2}-\d{2} \d{2}:\d{2}", "%y-%m-%d %H:%M"),
27
29
  # 年月日 (无时间)
28
30
  (r"\d{4}-\d{2}-\d{2}", "%Y-%m-%d"),
29
31
  # Slashes 分隔
@@ -136,7 +138,7 @@ def calculate_time_difference(
136
138
 
137
139
  except Exception as e:
138
140
  logger.warning("[time] Failed to calculate time difference: %s", e)
139
- return 0, 0, 0, 0
141
+ return 999, 23, 59, 59
140
142
 
141
143
 
142
144
  __all__ = [
@@ -10,6 +10,7 @@ Includes:
10
10
  optionally capped with a max_sleep limit.
11
11
  """
12
12
 
13
+ import asyncio
13
14
  import logging
14
15
  import random
15
16
  import time
@@ -55,9 +56,50 @@ def sleep_with_random_delay(
55
56
  if max_sleep is not None:
56
57
  duration = min(duration, max_sleep)
57
58
 
58
- logger.info("[time] Sleeping for %.2f seconds", duration)
59
+ logger.debug("[time] Sleeping for %.2f seconds", duration)
59
60
  time.sleep(duration)
60
61
  return
61
62
 
62
63
 
63
- __all__ = ["sleep_with_random_delay"]
64
+ async def async_sleep_with_random_delay(
65
+ base: float,
66
+ add_spread: float = 0.0,
67
+ mul_spread: float = 1.0,
68
+ *,
69
+ max_sleep: float | None = None,
70
+ ) -> None:
71
+ """
72
+ Async sleep for a random duration by combining multiplicative and additive jitter.
73
+
74
+ The total sleep time is computed as:
75
+
76
+ duration = base * uniform(1.0, mul_spread) + uniform(0, add_spread)
77
+
78
+ If `max_sleep` is provided, the duration will be capped at that value.
79
+
80
+ :param base: Base sleep time in seconds. Must be >= 0.
81
+ :param add_spread: Maximum extra seconds to add after scaling base.
82
+ :param mul_spread: Maximum multiplier factor for base; drawn from [1.0, mul_spread].
83
+ :param max_sleep: Optional upper limit for the final sleep duration.
84
+ """
85
+ if base < 0 or add_spread < 0 or mul_spread < 0:
86
+ logger.warning(
87
+ "[async sleep] Invalid parameters: base=%s, add_spread=%s, mul_spread=%s",
88
+ base,
89
+ add_spread,
90
+ mul_spread,
91
+ )
92
+ return
93
+
94
+ multiplicative_jitter = random.uniform(1.0, mul_spread)
95
+ additive_jitter = random.uniform(0, add_spread)
96
+ duration = base * multiplicative_jitter + additive_jitter
97
+
98
+ if max_sleep is not None:
99
+ duration = min(duration, max_sleep)
100
+
101
+ logger.debug("[async time] Sleeping for %.2f seconds", duration)
102
+ await asyncio.sleep(duration)
103
+
104
+
105
+ __all__ = ["sleep_with_random_delay", "async_sleep_with_random_delay"]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: novel-downloader
3
- Version: 1.3.1
3
+ Version: 1.3.3
4
4
  Summary: A command-line tool for downloading Chinese web novels from Qidian and similar platforms.
5
5
  Author-email: Saudade Z <saudadez217@gmail.com>
6
6
  License: MIT License
@@ -69,20 +69,37 @@ Dynamic: license-file
69
69
 
70
70
  # novel-downloader
71
71
 
72
- 一个基于 [DrissionPage](https://www.drissionpage.cn) 和 [requests](https://github.com/psf/requests) 的小说下载器。
72
+ 一个基于 [DrissionPage](https://www.drissionpage.cn) 和 [requests](https://github.com/psf/requests) 的小说下载工具/库。
73
73
 
74
74
  ---
75
75
 
76
76
  ## 项目简介
77
77
 
78
- **novel-downloader** 是一个通用的小说下载库 / CLI 工具,
79
- - 大多数支持的站点仅依赖 [`requests`](https://github.com/psf/requests) 进行 HTTP 抓取
78
+ **novel-downloader** 支持多种小说网站的章节抓取与合并导出,
79
+ - **轻量化抓取**: 绝大多数站点仅依赖 `requests` 实现 HTTP 请求, 无需额外浏览器驱动
80
80
  - 对于起点中文网 (Qidian), 可在配置中选择:
81
81
  - `mode: session` : 纯 Requests 模式
82
- - `mode: browser` : 基于 DrissionPage 驱动 Chrome 的浏览器模式 (可处理更复杂的 JS/加密)。
83
- - 如果在 `browser` 模式下且 `login_required: true`, 首次运行会自动打开浏览器, 请完成登录后继续。
82
+ - `mode: browser` : 基于 `DrissionPage` 驱动 Chrome 的浏览器模式 (可处理更复杂的 JS/加密)。
83
+ - **自动登录** (可选)
84
+ - 配置 `login_required: true` 后自动检测并重用历史 Cookie
85
+ - 首次登录或 Cookie 失效时:
86
+ - **browser** 模式: 在程序打开的浏览器窗口登录, 登录后回车继续
87
+ - **session** 模式: 根据提示粘贴浏览器中已登录的 Cookie (参考 [复制 Cookies](https://github.com/BowenZ217/novel-downloader/blob/main/docs/copy-cookies.md))
84
88
 
85
- **快速开始**
89
+ ## 功能特性
90
+
91
+ - 抓取起点中文网免费及已订阅章节内容
92
+ - 支持断点续爬, 自动续传未完成任务
93
+ - 自动整合所有章节并导出为:
94
+ - TXT
95
+ - EPUB (可选包含章节插图)
96
+ - 支持活动广告过滤:
97
+ - [x] 章节标题
98
+ - [ ] 章节正文
99
+
100
+ ---
101
+
102
+ ## 快速开始
86
103
 
87
104
  ```bash
88
105
  # 克隆 + 安装
@@ -101,7 +118,10 @@ novel-cli settings init
101
118
  novel-cli download 123456
102
119
  ```
103
120
 
104
- **从 GitHub 安装 (开发版)**
121
+ - 详细可见: [支持站点列表](https://github.com/BowenZ217/novel-downloader/blob/main/docs/6-supported-sites.md)
122
+ - 更多使用方法, 查看 [使用示例](https://github.com/BowenZ217/novel-downloader/blob/main/docs/5-usage-examples.md)
123
+
124
+ ## 从 GitHub 安装 (开发版)
105
125
 
106
126
  如需体验开发中的最新功能, 可通过 GitHub 安装:
107
127
 
@@ -113,22 +133,6 @@ pip install .
113
133
  # pip install .[font-recovery]
114
134
  ```
115
135
 
116
- 更多使用方法, 查看 [使用示例](https://github.com/BowenZ217/novel-downloader/blob/main/docs/5-usage-examples.md)
117
-
118
- ---
119
-
120
- ## 功能特性
121
-
122
- - 爬取起点中文网的小说章节内容 (支持免费与已订阅章节)
123
- - 断点续爬
124
- - 自动整合所有章节并导出为
125
- - TXT
126
- - EPUB
127
- - 支持活动广告过滤:
128
- - [x] 章节标题
129
- - [ ] 章节正文
130
- - [ ] 作者说
131
-
132
136
  ---
133
137
 
134
138
  ## 文档结构
@@ -140,6 +144,7 @@ pip install .
140
144
  - [settings.toml 配置说明](https://github.com/BowenZ217/novel-downloader/blob/main/docs/4-settings-schema.md)
141
145
  - [使用示例](https://github.com/BowenZ217/novel-downloader/blob/main/docs/5-usage-examples.md)
142
146
  - [支持站点列表](https://github.com/BowenZ217/novel-downloader/blob/main/docs/6-supported-sites.md)
147
+ - [复制 Cookies](https://github.com/BowenZ217/novel-downloader/blob/main/docs/copy-cookies.md)
143
148
  - [文件保存](https://github.com/BowenZ217/novel-downloader/blob/main/docs/file-saving.md)
144
149
  - [TODO](https://github.com/BowenZ217/novel-downloader/blob/main/docs/todo.md)
145
150
  - [开发](https://github.com/BowenZ217/novel-downloader/blob/main/docs/develop.md)