web-novel-scraper 1.1.1__py3-none-any.whl → 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- web_novel_scraper/__main__.py +116 -94
- web_novel_scraper/config_manager.py +84 -0
- web_novel_scraper/decode.py +30 -44
- web_novel_scraper/decode_guide/decode_guide.json +47 -0
- web_novel_scraper/file_manager.py +226 -257
- web_novel_scraper/novel_scraper.py +64 -41
- web_novel_scraper/request_manager.py +2 -2
- web_novel_scraper/utils.py +132 -2
- web_novel_scraper/version.py +1 -1
- {web_novel_scraper-1.1.1.dist-info → web_novel_scraper-2.0.0.dist-info}/METADATA +1 -1
- web_novel_scraper-2.0.0.dist-info/RECORD +19 -0
- web_novel_scraper-1.1.1.dist-info/RECORD +0 -18
- {web_novel_scraper-1.1.1.dist-info → web_novel_scraper-2.0.0.dist-info}/WHEEL +0 -0
- {web_novel_scraper-1.1.1.dist-info → web_novel_scraper-2.0.0.dist-info}/entry_points.txt +0 -0
@@ -11,6 +11,7 @@ from .file_manager import FileManager
|
|
11
11
|
from . import utils
|
12
12
|
|
13
13
|
from .request_manager import get_html_content
|
14
|
+
from .config_manager import ScraperConfig
|
14
15
|
|
15
16
|
logger = logger_manager.create_logger('NOVEL SCRAPPING')
|
16
17
|
|
@@ -18,7 +19,6 @@ logger = logger_manager.create_logger('NOVEL SCRAPPING')
|
|
18
19
|
@dataclass_json
|
19
20
|
@dataclass
|
20
21
|
class Metadata:
|
21
|
-
novel_title: str
|
22
22
|
author: Optional[str] = None
|
23
23
|
start_date: Optional[str] = None
|
24
24
|
end_date: Optional[str] = None
|
@@ -105,10 +105,11 @@ class Chapter:
|
|
105
105
|
return self.chapter_title < another.chapter_title
|
106
106
|
|
107
107
|
|
108
|
-
@dataclass_json
|
108
|
+
@dataclass_json(undefined=Undefined.EXCLUDE)
|
109
109
|
@dataclass
|
110
110
|
class Novel:
|
111
|
-
metadata: Metadata
|
111
|
+
metadata: Metadata = None
|
112
|
+
title: str = None
|
112
113
|
scraper_behavior: ScraperBehavior = None
|
113
114
|
chapters: list[Chapter] = field(default_factory=list)
|
114
115
|
toc_main_url: Optional[str] = None
|
@@ -116,30 +117,23 @@ class Novel:
|
|
116
117
|
host: str = None
|
117
118
|
|
118
119
|
def __init__(self,
|
119
|
-
|
120
|
+
title: str,
|
120
121
|
toc_main_url: str = None,
|
121
122
|
toc_html: str = None,
|
122
123
|
chapters_url_list: list[str] = None,
|
123
124
|
metadata: Metadata = None,
|
124
125
|
chapters: list[Chapter] = None,
|
125
|
-
novel_base_dir: str = None,
|
126
126
|
scraper_behavior: ScraperBehavior = None,
|
127
|
-
host: str = None
|
128
|
-
|
127
|
+
host: str = None
|
128
|
+
):
|
129
129
|
if toc_main_url and toc_html:
|
130
|
-
logger.
|
131
|
-
|
130
|
+
logger.critical('There can only be one or toc_main_url or toc_html')
|
131
|
+
raise ValueError('There can only be one or toc_main_url or toc_html')
|
132
132
|
|
133
|
+
self.title = title
|
134
|
+
self.metadata = Metadata()
|
133
135
|
if metadata is not None:
|
134
136
|
self.metadata = metadata
|
135
|
-
elif novel_title is not None:
|
136
|
-
self.metadata = Metadata(novel_title)
|
137
|
-
else:
|
138
|
-
logger.error('You need to set "novel_title" or "metadata".')
|
139
|
-
sys.exit(1)
|
140
|
-
|
141
|
-
self.file_manager = FileManager(novel_title=self.metadata.novel_title,
|
142
|
-
novel_base_dir=novel_base_dir)
|
143
137
|
|
144
138
|
if toc_html:
|
145
139
|
self.file_manager.add_toc(toc_html)
|
@@ -155,9 +149,10 @@ class Novel:
|
|
155
149
|
sys.exit(1)
|
156
150
|
|
157
151
|
self.host = host if host else utils.obtain_host(self.toc_main_url)
|
158
|
-
self.decoder = Decoder(self.host)
|
159
152
|
|
160
|
-
self.
|
153
|
+
self.config = None
|
154
|
+
self.file_manager = None
|
155
|
+
self.decoder = None
|
161
156
|
|
162
157
|
def __str__(self):
|
163
158
|
"""
|
@@ -165,7 +160,7 @@ class Novel:
|
|
165
160
|
"""
|
166
161
|
toc_info = self.toc_main_url if self.toc_main_url else "TOC added manually"
|
167
162
|
attributes = [
|
168
|
-
f"Title: {self.
|
163
|
+
f"Title: {self.title}",
|
169
164
|
f"Author: {self.metadata.author}",
|
170
165
|
f"Language: {self.metadata.language}",
|
171
166
|
f"Description: {self.metadata.description}",
|
@@ -177,30 +172,57 @@ class Novel:
|
|
177
172
|
return (f"Novel Info: \n"
|
178
173
|
f"{attributes_str}")
|
179
174
|
|
175
|
+
@staticmethod
|
176
|
+
def load(title: str, cfg: ScraperConfig, novel_base_dir: str | None = None):
|
177
|
+
fm = FileManager(title, cfg.base_novels_dir, novel_base_dir, read_only=True)
|
178
|
+
raw = fm.load_novel_json()
|
179
|
+
if raw is None:
|
180
|
+
logger.debug(f'Novel "{title}" was not found.')
|
181
|
+
raise ValueError(f'Novel "{title}" was not found.')
|
182
|
+
novel = Novel.from_json(raw)
|
183
|
+
novel.config = cfg
|
184
|
+
novel.set_config(cfg=cfg, novel_base_dir=novel_base_dir)
|
185
|
+
return novel
|
186
|
+
|
180
187
|
# NOVEL PARAMETERS MANAGEMENT
|
181
188
|
|
182
|
-
def
|
189
|
+
def set_config(self,
|
190
|
+
cfg: ScraperConfig = None,
|
191
|
+
config_file: str = None,
|
192
|
+
base_novels_dir: str = None,
|
193
|
+
novel_base_dir: str = None,
|
194
|
+
decode_guide_file: str = None):
|
195
|
+
if cfg is not None:
|
196
|
+
self.config = cfg
|
197
|
+
else:
|
198
|
+
self.config = ScraperConfig(config_file=config_file,
|
199
|
+
base_novels_dir=base_novels_dir,
|
200
|
+
decode_guide_file=decode_guide_file)
|
201
|
+
|
202
|
+
self.file_manager = FileManager(title=self.title,
|
203
|
+
base_novels_dir=self.config.base_novels_dir,
|
204
|
+
novel_base_dir=novel_base_dir)
|
205
|
+
|
206
|
+
self.decoder = Decoder(self.host, self.config.decode_guide_file)
|
207
|
+
|
208
|
+
def set_scraper_behavior(self, save: bool = False, **kwargs) -> None:
|
183
209
|
self.scraper_behavior.update_behavior(**kwargs)
|
184
|
-
self.save_novel()
|
185
210
|
|
186
211
|
def set_metadata(self, **kwargs) -> None:
|
187
212
|
self.metadata.update_behavior(**kwargs)
|
188
|
-
self.save_novel()
|
189
213
|
|
190
214
|
def add_tag(self, tag: str) -> bool:
|
191
215
|
if tag not in self.metadata.tags:
|
192
216
|
self.metadata.tags.append(tag)
|
193
|
-
self.save_novel()
|
194
217
|
return True
|
195
|
-
logger.warning(f'Tag "{tag}" already exists on novel {self.
|
218
|
+
logger.warning(f'Tag "{tag}" already exists on novel {self.title}')
|
196
219
|
return False
|
197
220
|
|
198
221
|
def remove_tag(self, tag: str) -> bool:
|
199
222
|
if tag in self.metadata.tags:
|
200
223
|
self.metadata.tags.remove(tag)
|
201
|
-
self.save_novel()
|
202
224
|
return True
|
203
|
-
logger.warning(f'Tag "{tag}" doesn\'t exist on novel {self.
|
225
|
+
logger.warning(f'Tag "{tag}" doesn\'t exist on novel {self.title}')
|
204
226
|
return False
|
205
227
|
|
206
228
|
def set_cover_image(self, cover_image_path: str) -> bool:
|
@@ -208,10 +230,9 @@ class Novel:
|
|
208
230
|
|
209
231
|
def set_host(self, host: str) -> None:
|
210
232
|
self.host = host
|
211
|
-
self.decoder
|
212
|
-
self.save_novel()
|
233
|
+
self.decoder.set_host(host)
|
213
234
|
|
214
|
-
def save_novel(self) -> None:
|
235
|
+
def save_novel(self, save: bool = True) -> None:
|
215
236
|
self.file_manager.save_novel_json(self.to_dict())
|
216
237
|
|
217
238
|
# TABLE OF CONTENTS MANAGEMENT
|
@@ -224,7 +245,6 @@ class Novel:
|
|
224
245
|
self.decoder = Decoder(self.host)
|
225
246
|
elif update_host:
|
226
247
|
self.decoder = Decoder(utils.obtain_host(self.toc_main_url))
|
227
|
-
self.save_novel()
|
228
248
|
|
229
249
|
def add_toc_html(self, html: str, host: str = None) -> None:
|
230
250
|
if self.toc_main_url:
|
@@ -236,13 +256,11 @@ class Novel:
|
|
236
256
|
self.decoder = Decoder(self.host)
|
237
257
|
self.file_manager.add_toc(html)
|
238
258
|
# Delete toc_main_url since they are exclusive
|
239
|
-
self.save_novel()
|
240
259
|
|
241
260
|
def delete_toc(self):
|
242
261
|
self.file_manager.delete_toc()
|
243
262
|
self.chapters = []
|
244
263
|
self.chapters_url_list = []
|
245
|
-
self.save_novel()
|
246
264
|
|
247
265
|
def sync_toc(self, reload_files: bool = False) -> bool:
|
248
266
|
# Hard reload will request again the toc files from the toc_main_url
|
@@ -286,8 +304,8 @@ class Novel:
|
|
286
304
|
for chapters_url in (self.chapters_url_list[::-1] if invert else self.chapters_url_list)
|
287
305
|
for chapter in chapters_url
|
288
306
|
]
|
289
|
-
|
290
|
-
if
|
307
|
+
add_host_to_chapter = self.scraper_behavior.auto_add_host or self.decoder.add_host_to_chapter()
|
308
|
+
if add_host_to_chapter:
|
291
309
|
self.chapters_url_list = [
|
292
310
|
f'https://{self.host}{chapter_url}' for chapter_url in self.chapters_url_list]
|
293
311
|
self.chapters_url_list = utils.delete_duplicates(
|
@@ -337,6 +355,7 @@ class Novel:
|
|
337
355
|
chapter = self.chapters[chapter_idx]
|
338
356
|
if update_html:
|
339
357
|
logger.debug('HTML will be updated...')
|
358
|
+
|
340
359
|
chapter = self._get_chapter(chapter,
|
341
360
|
reload=update_html)
|
342
361
|
|
@@ -437,7 +456,7 @@ class Novel:
|
|
437
456
|
return True
|
438
457
|
|
439
458
|
|
440
|
-
|
459
|
+
## UTILS
|
441
460
|
|
442
461
|
|
443
462
|
def clean_files(self, clean_chapters: bool = True, clean_toc: bool = True, hard_clean: bool = False) -> None:
|
@@ -453,6 +472,9 @@ class Novel:
|
|
453
472
|
def show_novel_dir(self) -> str:
|
454
473
|
return self.file_manager.novel_base_dir
|
455
474
|
|
475
|
+
|
476
|
+
## PRIVATE HELPERS
|
477
|
+
|
456
478
|
def _clean_chapter(self, chapter_html_filename: str, hard_clean: bool = False) -> None:
|
457
479
|
hard_clean = hard_clean or self.scraper_behavior.hard_clean
|
458
480
|
chapter_html = self.file_manager.load_chapter_html(
|
@@ -600,13 +622,14 @@ class Novel:
|
|
600
622
|
chapter_title = self.decoder.get_chapter_title(chapter.chapter_html)
|
601
623
|
if not chapter_title:
|
602
624
|
logger.debug('No chapter title found, generating one...')
|
603
|
-
chapter_title = f'{self.
|
625
|
+
chapter_title = f'{self.title} Chapter {idx_for_chapter_name}'
|
604
626
|
chapter.chapter_title = str(chapter_title)
|
605
627
|
logger.debug(f'Chapter title: "{chapter_title}"')
|
606
628
|
|
607
629
|
logger.debug('Obtaining chapter content...')
|
630
|
+
save_title_to_content = self.scraper_behavior.save_title_to_content or self.decoder.save_title_to_content()
|
608
631
|
chapter.chapter_content = self.decoder.get_chapter_content(chapter.chapter_html,
|
609
|
-
|
632
|
+
save_title_to_content,
|
610
633
|
chapter.chapter_title)
|
611
634
|
logger.debug('Chapter successfully decoded')
|
612
635
|
|
@@ -615,7 +638,7 @@ class Novel:
|
|
615
638
|
def _create_epub_book(self, book_title: str = None, calibre_collection: dict = None) -> epub.EpubBook:
|
616
639
|
book = epub.EpubBook()
|
617
640
|
if not book_title:
|
618
|
-
book_title = self.
|
641
|
+
book_title = self.title
|
619
642
|
book.set_title(book_title)
|
620
643
|
book.set_language(self.metadata.language)
|
621
644
|
book.add_metadata('DC', 'description', self.metadata.description)
|
@@ -700,11 +723,11 @@ class Novel:
|
|
700
723
|
idx_start = start_chapter - 1
|
701
724
|
idx_end = end_chapter
|
702
725
|
# We create the epub book
|
703
|
-
book_title = f'{self.
|
726
|
+
book_title = f'{self.title} Chapters {start_chapter} - {end_chapter}'
|
704
727
|
calibre_collection = None
|
705
728
|
# If collection_idx is set, we create a calibre collection
|
706
729
|
if collection_idx:
|
707
|
-
calibre_collection = {'title': self.
|
730
|
+
calibre_collection = {'title': self.title,
|
708
731
|
'idx': str(collection_idx)}
|
709
732
|
book = self._create_epub_book(book_title, calibre_collection)
|
710
733
|
|
@@ -45,7 +45,7 @@ def _get_request(url: str,
|
|
45
45
|
if attempt < retries - 1:
|
46
46
|
logger.debug(f'Waiting {time_between_retries} seconds before retrying')
|
47
47
|
time.sleep(time_between_retries) # Wait before retrying
|
48
|
-
logger.
|
48
|
+
logger.debug(f'Failed to get a successful response for "{url}" after {retries} attempts using common HTTP Request')
|
49
49
|
return None
|
50
50
|
|
51
51
|
|
@@ -96,7 +96,7 @@ def _get_request_flaresolver(url: str,
|
|
96
96
|
logger.debug(f'Waiting {time_between_retries} seconds before retrying')
|
97
97
|
time.sleep(time_between_retries) # Wait before retrying
|
98
98
|
|
99
|
-
logger.
|
99
|
+
logger.debug(f'Failed to get a successful response for "{url}" using FlareSolver after {retries} attempts')
|
100
100
|
return None
|
101
101
|
|
102
102
|
|
web_novel_scraper/utils.py
CHANGED
@@ -1,10 +1,140 @@
|
|
1
|
-
|
2
|
-
|
1
|
+
import json
|
2
|
+
import shutil
|
3
|
+
from datetime import datetime, timezone
|
4
|
+
from pathlib import Path
|
5
|
+
from typing import Optional
|
6
|
+
|
3
7
|
import hashlib
|
4
8
|
from urllib.parse import urlparse
|
5
9
|
import re
|
6
10
|
import unicodedata
|
7
11
|
|
12
|
+
class FileManagerError(Exception):
|
13
|
+
"""Exception raised for any exception for file operations"""
|
14
|
+
|
15
|
+
class FileOps:
|
16
|
+
"""Static helper for disc operations."""
|
17
|
+
|
18
|
+
## HELPERS
|
19
|
+
|
20
|
+
@staticmethod
|
21
|
+
def _atomic_tmp(path: Path) -> Path:
|
22
|
+
"""Temporary file path in the same directory as *path*."""
|
23
|
+
return path.with_suffix(path.suffix + ".tmp")
|
24
|
+
|
25
|
+
## DIRECTORY MANAGEMENT
|
26
|
+
@staticmethod
|
27
|
+
def ensure_dir(path: Path) -> Path:
|
28
|
+
"""Create *path* (and parents) if missing."""
|
29
|
+
try:
|
30
|
+
path.mkdir(parents=True, exist_ok=True)
|
31
|
+
return path
|
32
|
+
except Exception as e:
|
33
|
+
raise FileManagerError(str(e)) from e
|
34
|
+
|
35
|
+
## READ OPERATIONS
|
36
|
+
|
37
|
+
@staticmethod
|
38
|
+
def read_text(path: Path) -> Optional[str]:
|
39
|
+
"""Return UTF-8 contents or None if *path* does not exist."""
|
40
|
+
if not path.exists():
|
41
|
+
return None
|
42
|
+
try:
|
43
|
+
return path.read_text(encoding="utf-8")
|
44
|
+
except Exception as e:
|
45
|
+
raise FileManagerError(str(e)) from e
|
46
|
+
|
47
|
+
@staticmethod
|
48
|
+
def read_json(path: Path | str) -> Optional[dict]:
|
49
|
+
"""Return JSON object or None if *path* does not exist."""
|
50
|
+
path = Path(path)
|
51
|
+
raw = FileOps.read_text(path)
|
52
|
+
if raw is None:
|
53
|
+
return None
|
54
|
+
try:
|
55
|
+
return json.loads(raw)
|
56
|
+
except Exception as e:
|
57
|
+
raise FileManagerError(str(e)) from e
|
58
|
+
|
59
|
+
@staticmethod
|
60
|
+
def read_binary(path: Path) -> Optional[bytes]:
|
61
|
+
"""Return binary contents or None if *path* does not exist."""
|
62
|
+
if not path.exists():
|
63
|
+
return None
|
64
|
+
try:
|
65
|
+
return path.read_bytes()
|
66
|
+
except Exception as e:
|
67
|
+
raise FileManagerError(str(e)) from e
|
68
|
+
|
69
|
+
## WRITE OPERATION
|
70
|
+
|
71
|
+
@staticmethod
|
72
|
+
def save_text(path: Path, text: str) -> None:
|
73
|
+
"""Atomically write UTF-8 text to *path*."""
|
74
|
+
tmp = FileOps._atomic_tmp(path)
|
75
|
+
try:
|
76
|
+
tmp.write_text(text, encoding="utf-8")
|
77
|
+
tmp.replace(path)
|
78
|
+
except Exception as e:
|
79
|
+
FileOps.delete(tmp)
|
80
|
+
raise FileManagerError(str(e)) from e
|
81
|
+
|
82
|
+
@staticmethod
|
83
|
+
def save_json(path: Path, obj: dict) -> None:
|
84
|
+
"""Atomically write pretty-printed JSON to *path*."""
|
85
|
+
tmp = FileOps._atomic_tmp(path)
|
86
|
+
try:
|
87
|
+
tmp.write_text(json.dumps(obj, ensure_ascii=False, indent=2), encoding="utf-8")
|
88
|
+
tmp.replace(path)
|
89
|
+
except Exception as e:
|
90
|
+
FileOps.delete(tmp)
|
91
|
+
raise FileManagerError(str(e)) from e
|
92
|
+
|
93
|
+
@staticmethod
|
94
|
+
def save_binary(path: Path, data: bytes) -> None:
|
95
|
+
"""Atomically write binary data to *path* (e.g., cover images)."""
|
96
|
+
tmp = FileOps._atomic_tmp(path)
|
97
|
+
try:
|
98
|
+
tmp.write_bytes(data)
|
99
|
+
tmp.replace(path)
|
100
|
+
except Exception as e:
|
101
|
+
FileOps.delete(tmp)
|
102
|
+
raise FileManagerError(str(e)) from e
|
103
|
+
|
104
|
+
## DELETE/COPY OPERATIONS
|
105
|
+
|
106
|
+
@staticmethod
|
107
|
+
def delete(path: Path) -> None:
|
108
|
+
"""Delete *path* if it exists."""
|
109
|
+
try:
|
110
|
+
if path.exists():
|
111
|
+
path.unlink()
|
112
|
+
except Exception as e:
|
113
|
+
raise FileManagerError(str(e)) from e
|
114
|
+
|
115
|
+
@staticmethod
|
116
|
+
def copy(src: Path, dst: Path) -> None:
|
117
|
+
"""Copy *src* to *dst*."""
|
118
|
+
try:
|
119
|
+
shutil.copy(src, dst)
|
120
|
+
except Exception as e:
|
121
|
+
raise FileManagerError(str(e)) from e
|
122
|
+
|
123
|
+
def _normalize_dirname(name: str) -> str:
|
124
|
+
"""
|
125
|
+
Keep whitespace as-is while replacing any other unsupported characters
|
126
|
+
with an underscore.
|
127
|
+
Allowed: letters, digits, underscore, hyphen, and spaces.
|
128
|
+
"""
|
129
|
+
# Collapse multiple spaces into a single space (optional; comment out if not desired)
|
130
|
+
name = re.sub(r'\s+', ' ', name.strip())
|
131
|
+
|
132
|
+
# Replace any char that is *not* letter, digit, underscore, hyphen, or space.
|
133
|
+
return re.sub(r'[^\w\-\s]', '_', name)
|
134
|
+
|
135
|
+
def now_iso() -> str:
|
136
|
+
"""Current timestamp in ISO-8601 (seconds precision)."""
|
137
|
+
return datetime.now(timezone.utc).astimezone().isoformat(timespec="seconds")
|
8
138
|
|
9
139
|
def generate_file_name_from_url(url: str) -> str:
|
10
140
|
# Parsea URL
|
web_novel_scraper/version.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
__version__ = "
|
1
|
+
__version__ = "2.0.0"
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: web-novel-scraper
|
3
|
-
Version:
|
3
|
+
Version: 2.0.0
|
4
4
|
Summary: Python tool that allows you to scrape web novels from various sources and save them to more readable formats like EPUB.
|
5
5
|
Project-URL: Homepage, https://github.com/ImagineBrkr/web-novel-scraper
|
6
6
|
Project-URL: Documentation, https://web-novel-scraper.readthedocs.io
|
@@ -0,0 +1,19 @@
|
|
1
|
+
web_novel_scraper/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
2
|
+
web_novel_scraper/__main__.py,sha256=wWfOsHR_nswlWTj_3aZ11vJXzqua2Va_R7h6wyS4XWc,18266
|
3
|
+
web_novel_scraper/config_manager.py,sha256=duwKc6jyLj8NmST5F98qGgpW_o6D6GAenKWsYQ80gcU,3121
|
4
|
+
web_novel_scraper/decode.py,sha256=VlAW3bdWCSC2BloTIKJ74KqlBhzUca9vpvYZVd2dziI,10796
|
5
|
+
web_novel_scraper/file_manager.py,sha256=zmkeJWh8PoVcjnA38qZRSYH06kh0i0txFE-1bfa5vKg,10598
|
6
|
+
web_novel_scraper/logger_manager.py,sha256=A-a4bhYI4YCEuSJd9E3WH_kanJ7YCASMwheBzObZK4Q,1972
|
7
|
+
web_novel_scraper/novel_scraper.py,sha256=dhiRC1h5Q0b4UrU-_0n2RfH2qRP-c7hIPJukAHNwWsQ,30532
|
8
|
+
web_novel_scraper/request_manager.py,sha256=BVWMtUO3HRs44phU2ODkPUjy7tJiIBX_R0rxGGLaJzw,6617
|
9
|
+
web_novel_scraper/utils.py,sha256=1V8UwYhpp_27zqPnBDbl7fohu2Z7Sy_4Fq5J2_JAEvU,6405
|
10
|
+
web_novel_scraper/version.py,sha256=_7OlQdbVkK4jad0CLdpI0grT-zEAb-qgFmH5mFzDXiA,22
|
11
|
+
web_novel_scraper/custom_processor/__init__.py,sha256=iy4tjivMjshSzc52--aa-jK53qu9VwdK-6p4vuQc6oc,103
|
12
|
+
web_novel_scraper/custom_processor/custom_processor.py,sha256=h1MPl6JU_C2Mc7SqK70LsNQHpDzSL6QyraMIQ87HcMM,870
|
13
|
+
web_novel_scraper/custom_processor/sites/genesis.py,sha256=xV0eybI0ieHR5gn4yWXI74l99Eayhqs16PIYs-BrPjE,1843
|
14
|
+
web_novel_scraper/custom_processor/sites/royalroad.py,sha256=_2PsFC_w3RJCUkAPoRn-7R2jlzl3XsG4WYtRaQkp6lg,787
|
15
|
+
web_novel_scraper/decode_guide/decode_guide.json,sha256=kbPjEYZCN_jEpV5CL8v22yZ_4gPkO56XwblkSGDMEeM,9683
|
16
|
+
web_novel_scraper-2.0.0.dist-info/METADATA,sha256=BFByjX9R_QUaRpcbfEjPEt_UmcZq5M_AXSCDo7nbzVA,8423
|
17
|
+
web_novel_scraper-2.0.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
18
|
+
web_novel_scraper-2.0.0.dist-info/entry_points.txt,sha256=bqRvStfvSprSJc2EJXgKIbggWOXSePHFfVIZWy_plDQ,69
|
19
|
+
web_novel_scraper-2.0.0.dist-info/RECORD,,
|
@@ -1,18 +0,0 @@
|
|
1
|
-
web_novel_scraper/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
2
|
-
web_novel_scraper/__main__.py,sha256=OQQVX5CttmAkUwdrnjBSjKPaoh_boUI2ysHi3rLGOSs,17769
|
3
|
-
web_novel_scraper/decode.py,sha256=U-78PhJ4SU2hiUmfAWeWGEBJ3YSoCW3Lupw9cUqQuI0,11013
|
4
|
-
web_novel_scraper/file_manager.py,sha256=qAqgqtaRb7QyVtyEOW2cMhPYWdKM6nJ69weUCYKwVtM,11862
|
5
|
-
web_novel_scraper/logger_manager.py,sha256=A-a4bhYI4YCEuSJd9E3WH_kanJ7YCASMwheBzObZK4Q,1972
|
6
|
-
web_novel_scraper/novel_scraper.py,sha256=DsYnY15s8cZZ2w8pRvmD3_NJw54xarhcnEQdvnTD8XI,29421
|
7
|
-
web_novel_scraper/request_manager.py,sha256=WU8LG6D_fqmDapX6wpVwpQQSItcNU8Qb9dMAlLCYI8U,6621
|
8
|
-
web_novel_scraper/utils.py,sha256=dPp7D2ji9mC2nFydqxsJ_9vkAntA_3VTt8ZmG-F1f78,2270
|
9
|
-
web_novel_scraper/version.py,sha256=q8_5C0f-8mHWNb6mMw02zlYPnEGXBqvOmP3z0CEwZKM,22
|
10
|
-
web_novel_scraper/custom_processor/__init__.py,sha256=iy4tjivMjshSzc52--aa-jK53qu9VwdK-6p4vuQc6oc,103
|
11
|
-
web_novel_scraper/custom_processor/custom_processor.py,sha256=h1MPl6JU_C2Mc7SqK70LsNQHpDzSL6QyraMIQ87HcMM,870
|
12
|
-
web_novel_scraper/custom_processor/sites/genesis.py,sha256=xV0eybI0ieHR5gn4yWXI74l99Eayhqs16PIYs-BrPjE,1843
|
13
|
-
web_novel_scraper/custom_processor/sites/royalroad.py,sha256=_2PsFC_w3RJCUkAPoRn-7R2jlzl3XsG4WYtRaQkp6lg,787
|
14
|
-
web_novel_scraper/decode_guide/decode_guide.json,sha256=gNVencLtK0HmZPlubTm1wA7eatWADCxJ_LCOYWHWuA0,8556
|
15
|
-
web_novel_scraper-1.1.1.dist-info/METADATA,sha256=ow5piBhzzo4mZ0secvHrqc4KCCt4VInpDa09Qo9l4AE,8423
|
16
|
-
web_novel_scraper-1.1.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
17
|
-
web_novel_scraper-1.1.1.dist-info/entry_points.txt,sha256=bqRvStfvSprSJc2EJXgKIbggWOXSePHFfVIZWy_plDQ,69
|
18
|
-
web_novel_scraper-1.1.1.dist-info/RECORD,,
|
File without changes
|
File without changes
|