crawlo 1.1.9__py3-none-any.whl → 1.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crawlo might be problematic. Click here for more details.
- crawlo/__init__.py +61 -61
- crawlo/__version__.py +1 -1
- crawlo/cleaners/__init__.py +60 -60
- crawlo/cleaners/data_formatter.py +225 -225
- crawlo/cleaners/encoding_converter.py +125 -125
- crawlo/cleaners/text_cleaner.py +232 -232
- crawlo/cli.py +65 -65
- crawlo/commands/__init__.py +14 -14
- crawlo/commands/check.py +594 -594
- crawlo/commands/genspider.py +151 -151
- crawlo/commands/help.py +142 -132
- crawlo/commands/list.py +155 -155
- crawlo/commands/run.py +292 -292
- crawlo/commands/startproject.py +418 -418
- crawlo/commands/stats.py +188 -188
- crawlo/commands/utils.py +186 -186
- crawlo/config.py +312 -312
- crawlo/config_validator.py +252 -252
- crawlo/core/__init__.py +2 -2
- crawlo/core/engine.py +354 -345
- crawlo/core/processor.py +40 -40
- crawlo/core/scheduler.py +143 -136
- crawlo/crawler.py +1027 -1027
- crawlo/downloader/__init__.py +266 -266
- crawlo/downloader/aiohttp_downloader.py +220 -220
- crawlo/downloader/cffi_downloader.py +256 -256
- crawlo/downloader/httpx_downloader.py +259 -259
- crawlo/downloader/hybrid_downloader.py +213 -213
- crawlo/downloader/playwright_downloader.py +402 -402
- crawlo/downloader/selenium_downloader.py +472 -472
- crawlo/event.py +11 -11
- crawlo/exceptions.py +81 -81
- crawlo/extension/__init__.py +37 -37
- crawlo/extension/health_check.py +141 -141
- crawlo/extension/log_interval.py +57 -57
- crawlo/extension/log_stats.py +81 -81
- crawlo/extension/logging_extension.py +43 -43
- crawlo/extension/memory_monitor.py +104 -104
- crawlo/extension/performance_profiler.py +133 -133
- crawlo/extension/request_recorder.py +107 -107
- crawlo/filters/__init__.py +154 -154
- crawlo/filters/aioredis_filter.py +280 -280
- crawlo/filters/memory_filter.py +269 -269
- crawlo/items/__init__.py +23 -23
- crawlo/items/base.py +21 -21
- crawlo/items/fields.py +53 -53
- crawlo/items/items.py +104 -104
- crawlo/middleware/__init__.py +21 -21
- crawlo/middleware/default_header.py +132 -32
- crawlo/middleware/download_delay.py +105 -28
- crawlo/middleware/middleware_manager.py +135 -135
- crawlo/middleware/offsite.py +116 -0
- crawlo/middleware/proxy.py +366 -272
- crawlo/middleware/request_ignore.py +88 -30
- crawlo/middleware/response_code.py +164 -18
- crawlo/middleware/response_filter.py +138 -26
- crawlo/middleware/retry.py +124 -124
- crawlo/mode_manager.py +211 -211
- crawlo/network/__init__.py +21 -21
- crawlo/network/request.py +338 -338
- crawlo/network/response.py +359 -359
- crawlo/pipelines/__init__.py +21 -21
- crawlo/pipelines/bloom_dedup_pipeline.py +156 -156
- crawlo/pipelines/console_pipeline.py +39 -39
- crawlo/pipelines/csv_pipeline.py +316 -316
- crawlo/pipelines/database_dedup_pipeline.py +224 -224
- crawlo/pipelines/json_pipeline.py +218 -218
- crawlo/pipelines/memory_dedup_pipeline.py +115 -115
- crawlo/pipelines/mongo_pipeline.py +131 -131
- crawlo/pipelines/mysql_pipeline.py +316 -316
- crawlo/pipelines/pipeline_manager.py +61 -61
- crawlo/pipelines/redis_dedup_pipeline.py +167 -167
- crawlo/project.py +187 -187
- crawlo/queue/pqueue.py +37 -37
- crawlo/queue/queue_manager.py +337 -334
- crawlo/queue/redis_priority_queue.py +298 -298
- crawlo/settings/__init__.py +7 -7
- crawlo/settings/default_settings.py +226 -219
- crawlo/settings/setting_manager.py +122 -122
- crawlo/spider/__init__.py +639 -639
- crawlo/stats_collector.py +59 -59
- crawlo/subscriber.py +130 -130
- crawlo/task_manager.py +30 -30
- crawlo/templates/crawlo.cfg.tmpl +10 -10
- crawlo/templates/project/__init__.py.tmpl +3 -3
- crawlo/templates/project/items.py.tmpl +17 -17
- crawlo/templates/project/middlewares.py.tmpl +118 -109
- crawlo/templates/project/pipelines.py.tmpl +96 -96
- crawlo/templates/project/run.py.tmpl +45 -45
- crawlo/templates/project/settings.py.tmpl +327 -326
- crawlo/templates/project/settings_distributed.py.tmpl +119 -119
- crawlo/templates/project/settings_gentle.py.tmpl +94 -94
- crawlo/templates/project/settings_high_performance.py.tmpl +151 -151
- crawlo/templates/project/settings_simple.py.tmpl +68 -68
- crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
- crawlo/templates/spider/spider.py.tmpl +143 -141
- crawlo/tools/__init__.py +182 -182
- crawlo/tools/anti_crawler.py +268 -268
- crawlo/tools/authenticated_proxy.py +240 -240
- crawlo/tools/data_validator.py +180 -180
- crawlo/tools/date_tools.py +35 -35
- crawlo/tools/distributed_coordinator.py +386 -386
- crawlo/tools/retry_mechanism.py +220 -220
- crawlo/tools/scenario_adapter.py +262 -262
- crawlo/utils/__init__.py +35 -35
- crawlo/utils/batch_processor.py +260 -260
- crawlo/utils/controlled_spider_mixin.py +439 -439
- crawlo/utils/date_tools.py +290 -290
- crawlo/utils/db_helper.py +343 -343
- crawlo/utils/enhanced_error_handler.py +359 -359
- crawlo/utils/env_config.py +105 -105
- crawlo/utils/error_handler.py +125 -125
- crawlo/utils/func_tools.py +82 -82
- crawlo/utils/large_scale_config.py +286 -286
- crawlo/utils/large_scale_helper.py +343 -343
- crawlo/utils/log.py +128 -128
- crawlo/utils/performance_monitor.py +284 -284
- crawlo/utils/queue_helper.py +175 -175
- crawlo/utils/redis_connection_pool.py +334 -334
- crawlo/utils/redis_key_validator.py +199 -199
- crawlo/utils/request.py +267 -267
- crawlo/utils/request_serializer.py +219 -219
- crawlo/utils/spider_loader.py +62 -62
- crawlo/utils/system.py +11 -11
- crawlo/utils/tools.py +4 -4
- crawlo/utils/url.py +39 -39
- crawlo-1.2.1.dist-info/METADATA +692 -0
- crawlo-1.2.1.dist-info/RECORD +220 -0
- examples/__init__.py +7 -7
- examples/aiohttp_settings.py +42 -0
- examples/curl_cffi_settings.py +41 -0
- examples/default_header_middleware_example.py +107 -0
- examples/default_header_spider_example.py +129 -0
- examples/download_delay_middleware_example.py +160 -0
- examples/httpx_settings.py +42 -0
- examples/multi_downloader_proxy_example.py +81 -0
- examples/offsite_middleware_example.py +55 -0
- examples/offsite_spider_example.py +107 -0
- examples/proxy_spider_example.py +166 -0
- examples/request_ignore_middleware_example.py +51 -0
- examples/request_ignore_spider_example.py +99 -0
- examples/response_code_middleware_example.py +52 -0
- examples/response_filter_middleware_example.py +67 -0
- examples/tong_hua_shun_settings.py +62 -0
- examples/tong_hua_shun_spider.py +170 -0
- tests/DOUBLE_CRAWLO_PREFIX_FIX_REPORT.md +81 -81
- tests/__init__.py +7 -7
- tests/advanced_tools_example.py +275 -275
- tests/authenticated_proxy_example.py +236 -236
- tests/cleaners_example.py +160 -160
- tests/config_validation_demo.py +102 -102
- tests/controlled_spider_example.py +205 -205
- tests/date_tools_example.py +180 -180
- tests/dynamic_loading_example.py +523 -523
- tests/dynamic_loading_test.py +104 -104
- tests/env_config_example.py +133 -133
- tests/error_handling_example.py +171 -171
- tests/redis_key_validation_demo.py +130 -130
- tests/response_improvements_example.py +144 -144
- tests/test_advanced_tools.py +148 -148
- tests/test_all_redis_key_configs.py +145 -145
- tests/test_authenticated_proxy.py +141 -141
- tests/test_cleaners.py +54 -54
- tests/test_comprehensive.py +146 -146
- tests/test_config_validator.py +193 -193
- tests/test_crawlo_proxy_integration.py +173 -0
- tests/test_date_tools.py +123 -123
- tests/test_default_header_middleware.py +159 -0
- tests/test_double_crawlo_fix.py +207 -207
- tests/test_double_crawlo_fix_simple.py +124 -124
- tests/test_download_delay_middleware.py +222 -0
- tests/test_downloader_proxy_compatibility.py +269 -0
- tests/test_dynamic_downloaders_proxy.py +124 -124
- tests/test_dynamic_proxy.py +92 -92
- tests/test_dynamic_proxy_config.py +146 -146
- tests/test_dynamic_proxy_real.py +109 -109
- tests/test_edge_cases.py +303 -303
- tests/test_enhanced_error_handler.py +270 -270
- tests/test_env_config.py +121 -121
- tests/test_error_handler_compatibility.py +112 -112
- tests/test_final_validation.py +153 -153
- tests/test_framework_env_usage.py +103 -103
- tests/test_integration.py +356 -356
- tests/test_item_dedup_redis_key.py +122 -122
- tests/test_offsite_middleware.py +222 -0
- tests/test_parsel.py +29 -29
- tests/test_performance.py +327 -327
- tests/test_proxy_api.py +265 -0
- tests/test_proxy_health_check.py +32 -32
- tests/test_proxy_middleware.py +122 -0
- tests/test_proxy_middleware_enhanced.py +217 -0
- tests/test_proxy_middleware_integration.py +136 -136
- tests/test_proxy_providers.py +56 -56
- tests/test_proxy_stats.py +19 -19
- tests/test_proxy_strategies.py +59 -59
- tests/test_queue_manager_double_crawlo.py +174 -231
- tests/test_queue_manager_redis_key.py +176 -176
- tests/test_real_scenario_proxy.py +196 -0
- tests/test_redis_config.py +28 -28
- tests/test_redis_connection_pool.py +294 -294
- tests/test_redis_key_naming.py +181 -181
- tests/test_redis_key_validator.py +123 -123
- tests/test_redis_queue.py +224 -224
- tests/test_request_ignore_middleware.py +183 -0
- tests/test_request_serialization.py +70 -70
- tests/test_response_code_middleware.py +350 -0
- tests/test_response_filter_middleware.py +428 -0
- tests/test_response_improvements.py +152 -152
- tests/test_retry_middleware.py +242 -0
- tests/test_scheduler.py +241 -241
- tests/test_simple_response.py +61 -61
- tests/test_telecom_spider_redis_key.py +205 -205
- tests/test_template_content.py +87 -87
- tests/test_template_redis_key.py +134 -134
- tests/test_tools.py +153 -153
- tests/tools_example.py +257 -257
- crawlo-1.1.9.dist-info/METADATA +0 -626
- crawlo-1.1.9.dist-info/RECORD +0 -190
- {crawlo-1.1.9.dist-info → crawlo-1.2.1.dist-info}/WHEEL +0 -0
- {crawlo-1.1.9.dist-info → crawlo-1.2.1.dist-info}/entry_points.txt +0 -0
- {crawlo-1.1.9.dist-info → crawlo-1.2.1.dist-info}/top_level.txt +0 -0
crawlo/cleaners/text_cleaner.py
CHANGED
|
@@ -1,233 +1,233 @@
|
|
|
1
|
-
#!/usr/bin/python
|
|
2
|
-
# -*- coding: UTF-8 -*-
|
|
3
|
-
"""
|
|
4
|
-
# @Time : 2025-09-10 22:00
|
|
5
|
-
# @Author : crawl-coder
|
|
6
|
-
# @Desc : 文本清洗工具
|
|
7
|
-
"""
|
|
8
|
-
import re
|
|
9
|
-
import html
|
|
10
|
-
from typing import Optional, Union, List
|
|
11
|
-
import unicodedata
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
class TextCleaner:
|
|
15
|
-
"""
|
|
16
|
-
文本清洗工具类,提供各种文本清洗功能。
|
|
17
|
-
特别适用于爬虫中处理网页内容的清洗需求。
|
|
18
|
-
"""
|
|
19
|
-
|
|
20
|
-
@staticmethod
|
|
21
|
-
def remove_html_tags(text: str) -> str:
|
|
22
|
-
"""
|
|
23
|
-
移除HTML标签
|
|
24
|
-
|
|
25
|
-
:param text: 包含HTML标签的文本
|
|
26
|
-
:return: 移除HTML标签后的文本
|
|
27
|
-
"""
|
|
28
|
-
if not isinstance(text, str):
|
|
29
|
-
return str(text)
|
|
30
|
-
|
|
31
|
-
# 使用正则表达式移除HTML标签
|
|
32
|
-
clean_text = re.sub(r'<[^>]+>', '', text)
|
|
33
|
-
return clean_text.strip()
|
|
34
|
-
|
|
35
|
-
@staticmethod
|
|
36
|
-
def decode_html_entities(text: str) -> str:
|
|
37
|
-
"""
|
|
38
|
-
解码HTML实体字符
|
|
39
|
-
|
|
40
|
-
:param text: 包含HTML实体字符的文本
|
|
41
|
-
:return: 解码后的文本
|
|
42
|
-
"""
|
|
43
|
-
if not isinstance(text, str):
|
|
44
|
-
return str(text)
|
|
45
|
-
|
|
46
|
-
return html.unescape(text)
|
|
47
|
-
|
|
48
|
-
@staticmethod
|
|
49
|
-
def remove_extra_whitespace(text: str) -> str:
|
|
50
|
-
"""
|
|
51
|
-
移除多余的空白字符(包括空格、制表符、换行符等)
|
|
52
|
-
|
|
53
|
-
:param text: 文本
|
|
54
|
-
:return: 清理后的文本
|
|
55
|
-
"""
|
|
56
|
-
if not isinstance(text, str):
|
|
57
|
-
return str(text)
|
|
58
|
-
|
|
59
|
-
# 将多个连续的空白字符替换为单个空格
|
|
60
|
-
clean_text = re.sub(r'\s+', ' ', text)
|
|
61
|
-
return clean_text.strip()
|
|
62
|
-
|
|
63
|
-
@staticmethod
|
|
64
|
-
def remove_special_chars(text: str, chars: str = '') -> str:
|
|
65
|
-
"""
|
|
66
|
-
移除特殊字符
|
|
67
|
-
|
|
68
|
-
:param text: 文本
|
|
69
|
-
:param chars: 要移除的特殊字符
|
|
70
|
-
:return: 清理后的文本
|
|
71
|
-
"""
|
|
72
|
-
if not isinstance(text, str):
|
|
73
|
-
return str(text)
|
|
74
|
-
|
|
75
|
-
# 移除常见的特殊字符
|
|
76
|
-
special_chars = r'[^\w\s\u4e00-\u9fff' + chars + r']'
|
|
77
|
-
clean_text = re.sub(special_chars, '', text)
|
|
78
|
-
return clean_text
|
|
79
|
-
|
|
80
|
-
@staticmethod
|
|
81
|
-
def normalize_unicode(text: str) -> str:
|
|
82
|
-
"""
|
|
83
|
-
标准化Unicode字符
|
|
84
|
-
|
|
85
|
-
:param text: 文本
|
|
86
|
-
:return: 标准化后的文本
|
|
87
|
-
"""
|
|
88
|
-
if not isinstance(text, str):
|
|
89
|
-
return str(text)
|
|
90
|
-
|
|
91
|
-
return unicodedata.normalize('NFKC', text)
|
|
92
|
-
|
|
93
|
-
@staticmethod
|
|
94
|
-
def clean_text(text: str,
|
|
95
|
-
remove_html: bool = True,
|
|
96
|
-
decode_entities: bool = True,
|
|
97
|
-
remove_whitespace: bool = True,
|
|
98
|
-
remove_special: bool = False,
|
|
99
|
-
normalize: bool = True) -> str:
|
|
100
|
-
"""
|
|
101
|
-
综合文本清洗方法
|
|
102
|
-
|
|
103
|
-
:param text: 原始文本
|
|
104
|
-
:param remove_html: 是否移除HTML标签
|
|
105
|
-
:param decode_entities: 是否解码HTML实体
|
|
106
|
-
:param remove_whitespace: 是否移除多余空白字符
|
|
107
|
-
:param remove_special: 是否移除特殊字符
|
|
108
|
-
:param normalize: 是否标准化Unicode字符
|
|
109
|
-
:return: 清洗后的文本
|
|
110
|
-
"""
|
|
111
|
-
if not isinstance(text, str):
|
|
112
|
-
text = str(text)
|
|
113
|
-
|
|
114
|
-
if not text:
|
|
115
|
-
return text
|
|
116
|
-
|
|
117
|
-
# 按顺序进行清洗
|
|
118
|
-
if remove_html:
|
|
119
|
-
text = TextCleaner.remove_html_tags(text)
|
|
120
|
-
|
|
121
|
-
if decode_entities:
|
|
122
|
-
text = TextCleaner.decode_html_entities(text)
|
|
123
|
-
|
|
124
|
-
if normalize:
|
|
125
|
-
text = TextCleaner.normalize_unicode(text)
|
|
126
|
-
|
|
127
|
-
if remove_whitespace:
|
|
128
|
-
text = TextCleaner.remove_extra_whitespace(text)
|
|
129
|
-
|
|
130
|
-
if remove_special:
|
|
131
|
-
text = TextCleaner.remove_special_chars(text)
|
|
132
|
-
|
|
133
|
-
return text
|
|
134
|
-
|
|
135
|
-
@staticmethod
|
|
136
|
-
def extract_numbers(text: str) -> List[str]:
|
|
137
|
-
"""
|
|
138
|
-
从文本中提取数字
|
|
139
|
-
|
|
140
|
-
:param text: 文本
|
|
141
|
-
:return: 数字列表
|
|
142
|
-
"""
|
|
143
|
-
if not isinstance(text, str):
|
|
144
|
-
return []
|
|
145
|
-
|
|
146
|
-
# 匹配整数和小数
|
|
147
|
-
numbers = re.findall(r'-?\d+\.?\d*', text)
|
|
148
|
-
return numbers
|
|
149
|
-
|
|
150
|
-
@staticmethod
|
|
151
|
-
def extract_emails(text: str) -> List[str]:
|
|
152
|
-
"""
|
|
153
|
-
从文本中提取邮箱地址
|
|
154
|
-
|
|
155
|
-
:param text: 文本
|
|
156
|
-
:return: 邮箱地址列表
|
|
157
|
-
"""
|
|
158
|
-
if not isinstance(text, str):
|
|
159
|
-
return []
|
|
160
|
-
|
|
161
|
-
# 匹配邮箱地址
|
|
162
|
-
emails = re.findall(r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b', text)
|
|
163
|
-
return emails
|
|
164
|
-
|
|
165
|
-
@staticmethod
|
|
166
|
-
def extract_urls(text: str) -> List[str]:
|
|
167
|
-
"""
|
|
168
|
-
从文本中提取URL
|
|
169
|
-
|
|
170
|
-
:param text: 文本
|
|
171
|
-
:return: URL列表
|
|
172
|
-
"""
|
|
173
|
-
if not isinstance(text, str):
|
|
174
|
-
return []
|
|
175
|
-
|
|
176
|
-
# 匹配URL
|
|
177
|
-
urls = re.findall(
|
|
178
|
-
r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',
|
|
179
|
-
text
|
|
180
|
-
)
|
|
181
|
-
return urls
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
# =======================对外接口=======================
|
|
185
|
-
|
|
186
|
-
def remove_html_tags(text: str) -> str:
|
|
187
|
-
"""移除HTML标签"""
|
|
188
|
-
return TextCleaner.remove_html_tags(text)
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
def decode_html_entities(text: str) -> str:
|
|
192
|
-
"""解码HTML实体字符"""
|
|
193
|
-
return TextCleaner.decode_html_entities(text)
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
def remove_extra_whitespace(text: str) -> str:
|
|
197
|
-
"""移除多余的空白字符"""
|
|
198
|
-
return TextCleaner.remove_extra_whitespace(text)
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
def remove_special_chars(text: str, chars: str = '') -> str:
|
|
202
|
-
"""移除特殊字符"""
|
|
203
|
-
return TextCleaner.remove_special_chars(text, chars)
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
def normalize_unicode(text: str) -> str:
|
|
207
|
-
"""标准化Unicode字符"""
|
|
208
|
-
return TextCleaner.normalize_unicode(text)
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
def clean_text(text: str,
|
|
212
|
-
remove_html: bool = True,
|
|
213
|
-
decode_entities: bool = True,
|
|
214
|
-
remove_whitespace: bool = True,
|
|
215
|
-
remove_special: bool = False,
|
|
216
|
-
normalize: bool = True) -> str:
|
|
217
|
-
"""综合文本清洗"""
|
|
218
|
-
return TextCleaner.clean_text(text, remove_html, decode_entities, remove_whitespace, remove_special, normalize)
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
def extract_numbers(text: str) -> List[str]:
|
|
222
|
-
"""提取数字"""
|
|
223
|
-
return TextCleaner.extract_numbers(text)
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
def extract_emails(text: str) -> List[str]:
|
|
227
|
-
"""提取邮箱地址"""
|
|
228
|
-
return TextCleaner.extract_emails(text)
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
def extract_urls(text: str) -> List[str]:
|
|
232
|
-
"""提取URL"""
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# -*- coding: UTF-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
# @Time : 2025-09-10 22:00
|
|
5
|
+
# @Author : crawl-coder
|
|
6
|
+
# @Desc : 文本清洗工具
|
|
7
|
+
"""
|
|
8
|
+
import re
|
|
9
|
+
import html
|
|
10
|
+
from typing import Optional, Union, List
|
|
11
|
+
import unicodedata
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class TextCleaner:
|
|
15
|
+
"""
|
|
16
|
+
文本清洗工具类,提供各种文本清洗功能。
|
|
17
|
+
特别适用于爬虫中处理网页内容的清洗需求。
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
@staticmethod
|
|
21
|
+
def remove_html_tags(text: str) -> str:
|
|
22
|
+
"""
|
|
23
|
+
移除HTML标签
|
|
24
|
+
|
|
25
|
+
:param text: 包含HTML标签的文本
|
|
26
|
+
:return: 移除HTML标签后的文本
|
|
27
|
+
"""
|
|
28
|
+
if not isinstance(text, str):
|
|
29
|
+
return str(text)
|
|
30
|
+
|
|
31
|
+
# 使用正则表达式移除HTML标签
|
|
32
|
+
clean_text = re.sub(r'<[^>]+>', '', text)
|
|
33
|
+
return clean_text.strip()
|
|
34
|
+
|
|
35
|
+
@staticmethod
|
|
36
|
+
def decode_html_entities(text: str) -> str:
|
|
37
|
+
"""
|
|
38
|
+
解码HTML实体字符
|
|
39
|
+
|
|
40
|
+
:param text: 包含HTML实体字符的文本
|
|
41
|
+
:return: 解码后的文本
|
|
42
|
+
"""
|
|
43
|
+
if not isinstance(text, str):
|
|
44
|
+
return str(text)
|
|
45
|
+
|
|
46
|
+
return html.unescape(text)
|
|
47
|
+
|
|
48
|
+
@staticmethod
|
|
49
|
+
def remove_extra_whitespace(text: str) -> str:
|
|
50
|
+
"""
|
|
51
|
+
移除多余的空白字符(包括空格、制表符、换行符等)
|
|
52
|
+
|
|
53
|
+
:param text: 文本
|
|
54
|
+
:return: 清理后的文本
|
|
55
|
+
"""
|
|
56
|
+
if not isinstance(text, str):
|
|
57
|
+
return str(text)
|
|
58
|
+
|
|
59
|
+
# 将多个连续的空白字符替换为单个空格
|
|
60
|
+
clean_text = re.sub(r'\s+', ' ', text)
|
|
61
|
+
return clean_text.strip()
|
|
62
|
+
|
|
63
|
+
@staticmethod
|
|
64
|
+
def remove_special_chars(text: str, chars: str = '') -> str:
|
|
65
|
+
"""
|
|
66
|
+
移除特殊字符
|
|
67
|
+
|
|
68
|
+
:param text: 文本
|
|
69
|
+
:param chars: 要移除的特殊字符
|
|
70
|
+
:return: 清理后的文本
|
|
71
|
+
"""
|
|
72
|
+
if not isinstance(text, str):
|
|
73
|
+
return str(text)
|
|
74
|
+
|
|
75
|
+
# 移除常见的特殊字符
|
|
76
|
+
special_chars = r'[^\w\s\u4e00-\u9fff' + chars + r']'
|
|
77
|
+
clean_text = re.sub(special_chars, '', text)
|
|
78
|
+
return clean_text
|
|
79
|
+
|
|
80
|
+
@staticmethod
|
|
81
|
+
def normalize_unicode(text: str) -> str:
|
|
82
|
+
"""
|
|
83
|
+
标准化Unicode字符
|
|
84
|
+
|
|
85
|
+
:param text: 文本
|
|
86
|
+
:return: 标准化后的文本
|
|
87
|
+
"""
|
|
88
|
+
if not isinstance(text, str):
|
|
89
|
+
return str(text)
|
|
90
|
+
|
|
91
|
+
return unicodedata.normalize('NFKC', text)
|
|
92
|
+
|
|
93
|
+
@staticmethod
|
|
94
|
+
def clean_text(text: str,
|
|
95
|
+
remove_html: bool = True,
|
|
96
|
+
decode_entities: bool = True,
|
|
97
|
+
remove_whitespace: bool = True,
|
|
98
|
+
remove_special: bool = False,
|
|
99
|
+
normalize: bool = True) -> str:
|
|
100
|
+
"""
|
|
101
|
+
综合文本清洗方法
|
|
102
|
+
|
|
103
|
+
:param text: 原始文本
|
|
104
|
+
:param remove_html: 是否移除HTML标签
|
|
105
|
+
:param decode_entities: 是否解码HTML实体
|
|
106
|
+
:param remove_whitespace: 是否移除多余空白字符
|
|
107
|
+
:param remove_special: 是否移除特殊字符
|
|
108
|
+
:param normalize: 是否标准化Unicode字符
|
|
109
|
+
:return: 清洗后的文本
|
|
110
|
+
"""
|
|
111
|
+
if not isinstance(text, str):
|
|
112
|
+
text = str(text)
|
|
113
|
+
|
|
114
|
+
if not text:
|
|
115
|
+
return text
|
|
116
|
+
|
|
117
|
+
# 按顺序进行清洗
|
|
118
|
+
if remove_html:
|
|
119
|
+
text = TextCleaner.remove_html_tags(text)
|
|
120
|
+
|
|
121
|
+
if decode_entities:
|
|
122
|
+
text = TextCleaner.decode_html_entities(text)
|
|
123
|
+
|
|
124
|
+
if normalize:
|
|
125
|
+
text = TextCleaner.normalize_unicode(text)
|
|
126
|
+
|
|
127
|
+
if remove_whitespace:
|
|
128
|
+
text = TextCleaner.remove_extra_whitespace(text)
|
|
129
|
+
|
|
130
|
+
if remove_special:
|
|
131
|
+
text = TextCleaner.remove_special_chars(text)
|
|
132
|
+
|
|
133
|
+
return text
|
|
134
|
+
|
|
135
|
+
@staticmethod
|
|
136
|
+
def extract_numbers(text: str) -> List[str]:
|
|
137
|
+
"""
|
|
138
|
+
从文本中提取数字
|
|
139
|
+
|
|
140
|
+
:param text: 文本
|
|
141
|
+
:return: 数字列表
|
|
142
|
+
"""
|
|
143
|
+
if not isinstance(text, str):
|
|
144
|
+
return []
|
|
145
|
+
|
|
146
|
+
# 匹配整数和小数
|
|
147
|
+
numbers = re.findall(r'-?\d+\.?\d*', text)
|
|
148
|
+
return numbers
|
|
149
|
+
|
|
150
|
+
@staticmethod
|
|
151
|
+
def extract_emails(text: str) -> List[str]:
|
|
152
|
+
"""
|
|
153
|
+
从文本中提取邮箱地址
|
|
154
|
+
|
|
155
|
+
:param text: 文本
|
|
156
|
+
:return: 邮箱地址列表
|
|
157
|
+
"""
|
|
158
|
+
if not isinstance(text, str):
|
|
159
|
+
return []
|
|
160
|
+
|
|
161
|
+
# 匹配邮箱地址
|
|
162
|
+
emails = re.findall(r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b', text)
|
|
163
|
+
return emails
|
|
164
|
+
|
|
165
|
+
@staticmethod
|
|
166
|
+
def extract_urls(text: str) -> List[str]:
|
|
167
|
+
"""
|
|
168
|
+
从文本中提取URL
|
|
169
|
+
|
|
170
|
+
:param text: 文本
|
|
171
|
+
:return: URL列表
|
|
172
|
+
"""
|
|
173
|
+
if not isinstance(text, str):
|
|
174
|
+
return []
|
|
175
|
+
|
|
176
|
+
# 匹配URL
|
|
177
|
+
urls = re.findall(
|
|
178
|
+
r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',
|
|
179
|
+
text
|
|
180
|
+
)
|
|
181
|
+
return urls
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
# =======================对外接口=======================
|
|
185
|
+
|
|
186
|
+
def remove_html_tags(text: str) -> str:
|
|
187
|
+
"""移除HTML标签"""
|
|
188
|
+
return TextCleaner.remove_html_tags(text)
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
def decode_html_entities(text: str) -> str:
|
|
192
|
+
"""解码HTML实体字符"""
|
|
193
|
+
return TextCleaner.decode_html_entities(text)
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
def remove_extra_whitespace(text: str) -> str:
|
|
197
|
+
"""移除多余的空白字符"""
|
|
198
|
+
return TextCleaner.remove_extra_whitespace(text)
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
def remove_special_chars(text: str, chars: str = '') -> str:
|
|
202
|
+
"""移除特殊字符"""
|
|
203
|
+
return TextCleaner.remove_special_chars(text, chars)
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
def normalize_unicode(text: str) -> str:
|
|
207
|
+
"""标准化Unicode字符"""
|
|
208
|
+
return TextCleaner.normalize_unicode(text)
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
def clean_text(text: str,
|
|
212
|
+
remove_html: bool = True,
|
|
213
|
+
decode_entities: bool = True,
|
|
214
|
+
remove_whitespace: bool = True,
|
|
215
|
+
remove_special: bool = False,
|
|
216
|
+
normalize: bool = True) -> str:
|
|
217
|
+
"""综合文本清洗"""
|
|
218
|
+
return TextCleaner.clean_text(text, remove_html, decode_entities, remove_whitespace, remove_special, normalize)
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
def extract_numbers(text: str) -> List[str]:
|
|
222
|
+
"""提取数字"""
|
|
223
|
+
return TextCleaner.extract_numbers(text)
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
def extract_emails(text: str) -> List[str]:
|
|
227
|
+
"""提取邮箱地址"""
|
|
228
|
+
return TextCleaner.extract_emails(text)
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
def extract_urls(text: str) -> List[str]:
|
|
232
|
+
"""提取URL"""
|
|
233
233
|
return TextCleaner.extract_urls(text)
|
crawlo/cli.py
CHANGED
|
@@ -1,66 +1,66 @@
|
|
|
1
|
-
# crawlo/cli.py
|
|
2
|
-
# !/usr/bin/python
|
|
3
|
-
# -*- coding: UTF-8 -*-
|
|
4
|
-
import sys
|
|
5
|
-
import argparse
|
|
6
|
-
from crawlo.commands import get_commands
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
def main():
|
|
10
|
-
# 获取所有可用命令
|
|
11
|
-
commands = get_commands()
|
|
12
|
-
|
|
13
|
-
# 创建主解析器
|
|
14
|
-
parser = argparse.ArgumentParser(
|
|
15
|
-
description="Crawlo: A lightweight web crawler framework.",
|
|
16
|
-
usage="crawlo <command> [options]",
|
|
17
|
-
add_help=False # 禁用默认帮助,我们自己处理
|
|
18
|
-
)
|
|
19
|
-
|
|
20
|
-
# 添加帮助参数
|
|
21
|
-
parser.add_argument('-h', '--help', action='store_true', help='显示帮助信息')
|
|
22
|
-
parser.add_argument('command', nargs='?', help='可用命令: ' + ', '.join(commands.keys()))
|
|
23
|
-
|
|
24
|
-
# 解析已知参数
|
|
25
|
-
args, unknown = parser.parse_known_args()
|
|
26
|
-
|
|
27
|
-
# 处理帮助参数
|
|
28
|
-
if args.help or (args.command is None and not unknown):
|
|
29
|
-
# 导入并运行帮助命令
|
|
30
|
-
try:
|
|
31
|
-
module = __import__(commands['help'], fromlist=['main'])
|
|
32
|
-
sys.exit(module.main([]))
|
|
33
|
-
except ImportError as e:
|
|
34
|
-
print(f"Failed to load help command: {e}")
|
|
35
|
-
sys.exit(1)
|
|
36
|
-
except Exception as e:
|
|
37
|
-
print(f"Help command failed: {e}")
|
|
38
|
-
sys.exit(1)
|
|
39
|
-
|
|
40
|
-
# 检查命令是否存在
|
|
41
|
-
if args.command not in commands:
|
|
42
|
-
print(f"Unknown command: {args.command}")
|
|
43
|
-
print(f"Available commands: {', '.join(commands.keys())}")
|
|
44
|
-
# 显示帮助信息
|
|
45
|
-
try:
|
|
46
|
-
module = __import__(commands['help'], fromlist=['main'])
|
|
47
|
-
module.main([])
|
|
48
|
-
except:
|
|
49
|
-
pass
|
|
50
|
-
sys.exit(1)
|
|
51
|
-
|
|
52
|
-
# 动态导入并执行命令
|
|
53
|
-
try:
|
|
54
|
-
module = __import__(commands[args.command], fromlist=['main'])
|
|
55
|
-
# 将未知参数传递给子命令
|
|
56
|
-
sys.exit(module.main(unknown))
|
|
57
|
-
except ImportError as e:
|
|
58
|
-
print(f"Failed to load command '{args.command}': {e}")
|
|
59
|
-
sys.exit(1)
|
|
60
|
-
except Exception as e:
|
|
61
|
-
print(f"Command '{args.command}' failed: {e}")
|
|
62
|
-
sys.exit(1)
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
if __name__ == '__main__':
|
|
1
|
+
# crawlo/cli.py
|
|
2
|
+
# !/usr/bin/python
|
|
3
|
+
# -*- coding: UTF-8 -*-
|
|
4
|
+
import sys
|
|
5
|
+
import argparse
|
|
6
|
+
from crawlo.commands import get_commands
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def main():
|
|
10
|
+
# 获取所有可用命令
|
|
11
|
+
commands = get_commands()
|
|
12
|
+
|
|
13
|
+
# 创建主解析器
|
|
14
|
+
parser = argparse.ArgumentParser(
|
|
15
|
+
description="Crawlo: A lightweight web crawler framework.",
|
|
16
|
+
usage="crawlo <command> [options]",
|
|
17
|
+
add_help=False # 禁用默认帮助,我们自己处理
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
# 添加帮助参数
|
|
21
|
+
parser.add_argument('-h', '--help', action='store_true', help='显示帮助信息')
|
|
22
|
+
parser.add_argument('command', nargs='?', help='可用命令: ' + ', '.join(commands.keys()))
|
|
23
|
+
|
|
24
|
+
# 解析已知参数
|
|
25
|
+
args, unknown = parser.parse_known_args()
|
|
26
|
+
|
|
27
|
+
# 处理帮助参数
|
|
28
|
+
if args.help or (args.command is None and not unknown):
|
|
29
|
+
# 导入并运行帮助命令
|
|
30
|
+
try:
|
|
31
|
+
module = __import__(commands['help'], fromlist=['main'])
|
|
32
|
+
sys.exit(module.main([]))
|
|
33
|
+
except ImportError as e:
|
|
34
|
+
print(f"Failed to load help command: {e}")
|
|
35
|
+
sys.exit(1)
|
|
36
|
+
except Exception as e:
|
|
37
|
+
print(f"Help command failed: {e}")
|
|
38
|
+
sys.exit(1)
|
|
39
|
+
|
|
40
|
+
# 检查命令是否存在
|
|
41
|
+
if args.command not in commands:
|
|
42
|
+
print(f"Unknown command: {args.command}")
|
|
43
|
+
print(f"Available commands: {', '.join(commands.keys())}")
|
|
44
|
+
# 显示帮助信息
|
|
45
|
+
try:
|
|
46
|
+
module = __import__(commands['help'], fromlist=['main'])
|
|
47
|
+
module.main([])
|
|
48
|
+
except:
|
|
49
|
+
pass
|
|
50
|
+
sys.exit(1)
|
|
51
|
+
|
|
52
|
+
# 动态导入并执行命令
|
|
53
|
+
try:
|
|
54
|
+
module = __import__(commands[args.command], fromlist=['main'])
|
|
55
|
+
# 将未知参数传递给子命令
|
|
56
|
+
sys.exit(module.main(unknown))
|
|
57
|
+
except ImportError as e:
|
|
58
|
+
print(f"Failed to load command '{args.command}': {e}")
|
|
59
|
+
sys.exit(1)
|
|
60
|
+
except Exception as e:
|
|
61
|
+
print(f"Command '{args.command}' failed: {e}")
|
|
62
|
+
sys.exit(1)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
if __name__ == '__main__':
|
|
66
66
|
main()
|
crawlo/commands/__init__.py
CHANGED
|
@@ -1,15 +1,15 @@
|
|
|
1
|
-
#!/usr/bin/python
|
|
2
|
-
# -*- coding:UTF-8 -*-
|
|
3
|
-
|
|
4
|
-
_commands = {
|
|
5
|
-
'startproject': 'crawlo.commands.startproject',
|
|
6
|
-
'genspider': 'crawlo.commands.genspider',
|
|
7
|
-
'run': 'crawlo.commands.run',
|
|
8
|
-
'check': 'crawlo.commands.check',
|
|
9
|
-
'list': 'crawlo.commands.list',
|
|
10
|
-
'stats': 'crawlo.commands.stats',
|
|
11
|
-
'help': 'crawlo.commands.help'
|
|
12
|
-
}
|
|
13
|
-
|
|
14
|
-
def get_commands():
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# -*- coding:UTF-8 -*-
|
|
3
|
+
|
|
4
|
+
_commands = {
|
|
5
|
+
'startproject': 'crawlo.commands.startproject',
|
|
6
|
+
'genspider': 'crawlo.commands.genspider',
|
|
7
|
+
'run': 'crawlo.commands.run',
|
|
8
|
+
'check': 'crawlo.commands.check',
|
|
9
|
+
'list': 'crawlo.commands.list',
|
|
10
|
+
'stats': 'crawlo.commands.stats',
|
|
11
|
+
'help': 'crawlo.commands.help'
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
def get_commands():
|
|
15
15
|
return _commands
|