crawlo 1.3.3__py3-none-any.whl → 1.3.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crawlo might be problematic. Click here for more details.
- crawlo/__init__.py +87 -63
- crawlo/__version__.py +1 -1
- crawlo/cli.py +75 -75
- crawlo/commands/__init__.py +14 -14
- crawlo/commands/check.py +594 -594
- crawlo/commands/genspider.py +151 -151
- crawlo/commands/help.py +138 -138
- crawlo/commands/list.py +155 -155
- crawlo/commands/run.py +341 -323
- crawlo/commands/startproject.py +436 -436
- crawlo/commands/stats.py +187 -187
- crawlo/commands/utils.py +196 -196
- crawlo/config.py +312 -312
- crawlo/config_validator.py +277 -277
- crawlo/core/__init__.py +46 -2
- crawlo/core/engine.py +439 -365
- crawlo/core/processor.py +40 -40
- crawlo/core/scheduler.py +257 -256
- crawlo/crawler.py +639 -1167
- crawlo/data/__init__.py +5 -5
- crawlo/data/user_agents.py +194 -194
- crawlo/downloader/__init__.py +273 -273
- crawlo/downloader/aiohttp_downloader.py +228 -226
- crawlo/downloader/cffi_downloader.py +245 -245
- crawlo/downloader/httpx_downloader.py +259 -259
- crawlo/downloader/hybrid_downloader.py +212 -212
- crawlo/downloader/playwright_downloader.py +402 -402
- crawlo/downloader/selenium_downloader.py +472 -472
- crawlo/event.py +11 -11
- crawlo/exceptions.py +81 -81
- crawlo/extension/__init__.py +39 -39
- crawlo/extension/health_check.py +141 -141
- crawlo/extension/log_interval.py +57 -57
- crawlo/extension/log_stats.py +81 -81
- crawlo/extension/logging_extension.py +61 -52
- crawlo/extension/memory_monitor.py +104 -104
- crawlo/extension/performance_profiler.py +133 -133
- crawlo/extension/request_recorder.py +107 -107
- crawlo/factories/__init__.py +28 -0
- crawlo/factories/base.py +69 -0
- crawlo/factories/crawler.py +104 -0
- crawlo/factories/registry.py +85 -0
- crawlo/filters/__init__.py +154 -154
- crawlo/filters/aioredis_filter.py +257 -234
- crawlo/filters/memory_filter.py +269 -269
- crawlo/framework.py +292 -0
- crawlo/initialization/__init__.py +40 -0
- crawlo/initialization/built_in.py +426 -0
- crawlo/initialization/context.py +142 -0
- crawlo/initialization/core.py +194 -0
- crawlo/initialization/phases.py +149 -0
- crawlo/initialization/registry.py +146 -0
- crawlo/items/__init__.py +23 -23
- crawlo/items/base.py +23 -22
- crawlo/items/fields.py +52 -52
- crawlo/items/items.py +104 -104
- crawlo/logging/__init__.py +38 -0
- crawlo/logging/config.py +97 -0
- crawlo/logging/factory.py +129 -0
- crawlo/logging/manager.py +112 -0
- crawlo/middleware/__init__.py +21 -21
- crawlo/middleware/default_header.py +132 -132
- crawlo/middleware/download_delay.py +104 -104
- crawlo/middleware/middleware_manager.py +135 -135
- crawlo/middleware/offsite.py +123 -123
- crawlo/middleware/proxy.py +386 -386
- crawlo/middleware/request_ignore.py +86 -86
- crawlo/middleware/response_code.py +163 -163
- crawlo/middleware/response_filter.py +136 -136
- crawlo/middleware/retry.py +124 -124
- crawlo/middleware/simple_proxy.py +65 -65
- crawlo/mode_manager.py +212 -187
- crawlo/network/__init__.py +21 -21
- crawlo/network/request.py +379 -379
- crawlo/network/response.py +359 -359
- crawlo/pipelines/__init__.py +21 -21
- crawlo/pipelines/bloom_dedup_pipeline.py +156 -156
- crawlo/pipelines/console_pipeline.py +39 -39
- crawlo/pipelines/csv_pipeline.py +316 -316
- crawlo/pipelines/database_dedup_pipeline.py +222 -222
- crawlo/pipelines/json_pipeline.py +218 -218
- crawlo/pipelines/memory_dedup_pipeline.py +115 -115
- crawlo/pipelines/mongo_pipeline.py +131 -131
- crawlo/pipelines/mysql_pipeline.py +318 -318
- crawlo/pipelines/pipeline_manager.py +76 -75
- crawlo/pipelines/redis_dedup_pipeline.py +166 -166
- crawlo/project.py +327 -325
- crawlo/queue/pqueue.py +43 -37
- crawlo/queue/queue_manager.py +503 -379
- crawlo/queue/redis_priority_queue.py +326 -306
- crawlo/settings/__init__.py +7 -7
- crawlo/settings/default_settings.py +321 -225
- crawlo/settings/setting_manager.py +214 -198
- crawlo/spider/__init__.py +657 -639
- crawlo/stats_collector.py +73 -59
- crawlo/subscriber.py +129 -129
- crawlo/task_manager.py +139 -30
- crawlo/templates/crawlo.cfg.tmpl +10 -10
- crawlo/templates/project/__init__.py.tmpl +3 -3
- crawlo/templates/project/items.py.tmpl +17 -17
- crawlo/templates/project/middlewares.py.tmpl +118 -118
- crawlo/templates/project/pipelines.py.tmpl +96 -96
- crawlo/templates/project/settings.py.tmpl +168 -267
- crawlo/templates/project/settings_distributed.py.tmpl +167 -180
- crawlo/templates/project/settings_gentle.py.tmpl +167 -61
- crawlo/templates/project/settings_high_performance.py.tmpl +168 -131
- crawlo/templates/project/settings_minimal.py.tmpl +66 -35
- crawlo/templates/project/settings_simple.py.tmpl +165 -102
- crawlo/templates/project/spiders/__init__.py.tmpl +10 -6
- crawlo/templates/run.py.tmpl +34 -38
- crawlo/templates/spider/spider.py.tmpl +143 -143
- crawlo/templates/spiders_init.py.tmpl +10 -0
- crawlo/tools/__init__.py +200 -200
- crawlo/tools/anti_crawler.py +268 -268
- crawlo/tools/authenticated_proxy.py +240 -240
- crawlo/tools/data_formatter.py +225 -225
- crawlo/tools/data_validator.py +180 -180
- crawlo/tools/date_tools.py +289 -289
- crawlo/tools/distributed_coordinator.py +388 -388
- crawlo/tools/encoding_converter.py +127 -127
- crawlo/tools/network_diagnostic.py +365 -0
- crawlo/tools/request_tools.py +82 -82
- crawlo/tools/retry_mechanism.py +224 -224
- crawlo/tools/scenario_adapter.py +262 -262
- crawlo/tools/text_cleaner.py +232 -232
- crawlo/utils/__init__.py +34 -34
- crawlo/utils/batch_processor.py +259 -259
- crawlo/utils/class_loader.py +26 -0
- crawlo/utils/controlled_spider_mixin.py +439 -439
- crawlo/utils/db_helper.py +343 -343
- crawlo/utils/enhanced_error_handler.py +356 -356
- crawlo/utils/env_config.py +142 -142
- crawlo/utils/error_handler.py +165 -124
- crawlo/utils/func_tools.py +82 -82
- crawlo/utils/large_scale_config.py +286 -286
- crawlo/utils/large_scale_helper.py +344 -344
- crawlo/utils/log.py +44 -200
- crawlo/utils/performance_monitor.py +285 -285
- crawlo/utils/queue_helper.py +175 -175
- crawlo/utils/redis_connection_pool.py +388 -351
- crawlo/utils/redis_key_validator.py +198 -198
- crawlo/utils/request.py +267 -267
- crawlo/utils/request_serializer.py +225 -218
- crawlo/utils/spider_loader.py +61 -61
- crawlo/utils/system.py +11 -11
- crawlo/utils/tools.py +4 -4
- crawlo/utils/url.py +39 -39
- {crawlo-1.3.3.dist-info → crawlo-1.3.4.dist-info}/METADATA +1126 -1020
- crawlo-1.3.4.dist-info/RECORD +278 -0
- examples/__init__.py +7 -7
- tests/__init__.py +7 -7
- tests/advanced_tools_example.py +275 -275
- tests/authenticated_proxy_example.py +107 -107
- tests/baidu_performance_test.py +109 -0
- tests/baidu_test.py +60 -0
- tests/cleaners_example.py +160 -160
- tests/comprehensive_framework_test.py +213 -0
- tests/comprehensive_test.py +82 -0
- tests/comprehensive_testing_summary.md +187 -0
- tests/config_validation_demo.py +142 -142
- tests/controlled_spider_example.py +205 -205
- tests/date_tools_example.py +180 -180
- tests/debug_configure.py +70 -0
- tests/debug_framework_logger.py +85 -0
- tests/debug_log_levels.py +64 -0
- tests/debug_pipelines.py +66 -66
- tests/distributed_test.py +67 -0
- tests/distributed_test_debug.py +77 -0
- tests/dynamic_loading_example.py +523 -523
- tests/dynamic_loading_test.py +104 -104
- tests/env_config_example.py +133 -133
- tests/error_handling_example.py +171 -171
- tests/final_command_test_report.md +0 -0
- tests/final_comprehensive_test.py +152 -0
- tests/final_validation_test.py +183 -0
- tests/framework_performance_test.py +203 -0
- tests/optimized_performance_test.py +212 -0
- tests/performance_comparison.py +246 -0
- tests/queue_blocking_test.py +114 -0
- tests/queue_test.py +90 -0
- tests/redis_key_validation_demo.py +130 -130
- tests/request_params_example.py +150 -150
- tests/response_improvements_example.py +144 -144
- tests/scrapy_comparison/ofweek_scrapy.py +139 -0
- tests/scrapy_comparison/scrapy_test.py +134 -0
- tests/simple_command_test.py +120 -0
- tests/simple_crawlo_test.py +128 -0
- tests/simple_log_test.py +58 -0
- tests/simple_optimization_test.py +129 -0
- tests/simple_spider_test.py +50 -0
- tests/simple_test.py +48 -0
- tests/test_advanced_tools.py +148 -148
- tests/test_all_commands.py +231 -0
- tests/test_all_redis_key_configs.py +145 -145
- tests/test_authenticated_proxy.py +141 -141
- tests/test_batch_processor.py +179 -0
- tests/test_cleaners.py +54 -54
- tests/test_component_factory.py +175 -0
- tests/test_comprehensive.py +146 -146
- tests/test_config_consistency.py +80 -80
- tests/test_config_merge.py +152 -152
- tests/test_config_validator.py +182 -182
- tests/test_controlled_spider_mixin.py +80 -0
- tests/test_crawlo_proxy_integration.py +108 -108
- tests/test_date_tools.py +123 -123
- tests/test_default_header_middleware.py +158 -158
- tests/test_distributed.py +65 -65
- tests/test_double_crawlo_fix.py +207 -207
- tests/test_double_crawlo_fix_simple.py +124 -124
- tests/test_download_delay_middleware.py +221 -221
- tests/test_downloader_proxy_compatibility.py +268 -268
- tests/test_dynamic_downloaders_proxy.py +124 -124
- tests/test_dynamic_proxy.py +92 -92
- tests/test_dynamic_proxy_config.py +146 -146
- tests/test_dynamic_proxy_real.py +109 -109
- tests/test_edge_cases.py +303 -303
- tests/test_enhanced_error_handler.py +270 -270
- tests/test_enhanced_error_handler_comprehensive.py +246 -0
- tests/test_env_config.py +121 -121
- tests/test_error_handler_compatibility.py +112 -112
- tests/test_factories.py +253 -0
- tests/test_final_validation.py +153 -153
- tests/test_framework_env_usage.py +103 -103
- tests/test_framework_logger.py +67 -0
- tests/test_framework_startup.py +65 -0
- tests/test_integration.py +169 -169
- tests/test_item_dedup_redis_key.py +122 -122
- tests/test_large_scale_config.py +113 -0
- tests/test_large_scale_helper.py +236 -0
- tests/test_mode_change.py +73 -0
- tests/test_mode_consistency.py +51 -51
- tests/test_offsite_middleware.py +221 -221
- tests/test_parsel.py +29 -29
- tests/test_performance.py +327 -327
- tests/test_performance_monitor.py +116 -0
- tests/test_proxy_api.py +264 -264
- tests/test_proxy_health_check.py +32 -32
- tests/test_proxy_middleware.py +121 -121
- tests/test_proxy_middleware_enhanced.py +216 -216
- tests/test_proxy_middleware_integration.py +136 -136
- tests/test_proxy_middleware_refactored.py +184 -184
- tests/test_proxy_providers.py +56 -56
- tests/test_proxy_stats.py +19 -19
- tests/test_proxy_strategies.py +59 -59
- tests/test_queue_empty_check.py +42 -0
- tests/test_queue_manager_double_crawlo.py +173 -173
- tests/test_queue_manager_redis_key.py +176 -176
- tests/test_random_user_agent.py +72 -72
- tests/test_real_scenario_proxy.py +195 -195
- tests/test_redis_config.py +28 -28
- tests/test_redis_connection_pool.py +294 -294
- tests/test_redis_key_naming.py +181 -181
- tests/test_redis_key_validator.py +123 -123
- tests/test_redis_queue.py +224 -224
- tests/test_request_ignore_middleware.py +182 -182
- tests/test_request_params.py +111 -111
- tests/test_request_serialization.py +70 -70
- tests/test_response_code_middleware.py +349 -349
- tests/test_response_filter_middleware.py +427 -427
- tests/test_response_improvements.py +152 -152
- tests/test_retry_middleware.py +241 -241
- tests/test_scheduler.py +252 -252
- tests/test_scheduler_config_update.py +133 -133
- tests/test_simple_response.py +61 -61
- tests/test_telecom_spider_redis_key.py +205 -205
- tests/test_template_content.py +87 -87
- tests/test_template_redis_key.py +134 -134
- tests/test_tools.py +159 -159
- tests/test_user_agents.py +96 -96
- tests/tools_example.py +260 -260
- tests/untested_features_report.md +139 -0
- tests/verify_debug.py +52 -0
- tests/verify_distributed.py +117 -117
- tests/verify_log_fix.py +112 -0
- crawlo-1.3.3.dist-info/RECORD +0 -219
- tests/DOUBLE_CRAWLO_PREFIX_FIX_REPORT.md +0 -82
- {crawlo-1.3.3.dist-info → crawlo-1.3.4.dist-info}/WHEEL +0 -0
- {crawlo-1.3.3.dist-info → crawlo-1.3.4.dist-info}/entry_points.txt +0 -0
- {crawlo-1.3.3.dist-info → crawlo-1.3.4.dist-info}/top_level.txt +0 -0
tests/cleaners_example.py
CHANGED
|
@@ -1,161 +1,161 @@
|
|
|
1
|
-
#!/usr/bin/python
|
|
2
|
-
# -*- coding: UTF-8 -*-
|
|
3
|
-
"""
|
|
4
|
-
Crawlo框架数据清洗工具使用示例
|
|
5
|
-
"""
|
|
6
|
-
from crawlo.tools import (
|
|
7
|
-
TextCleaner,
|
|
8
|
-
DataFormatter,
|
|
9
|
-
remove_html_tags,
|
|
10
|
-
decode_html_entities,
|
|
11
|
-
clean_text,
|
|
12
|
-
extract_numbers,
|
|
13
|
-
extract_emails,
|
|
14
|
-
extract_urls,
|
|
15
|
-
format_number,
|
|
16
|
-
format_currency,
|
|
17
|
-
format_phone_number,
|
|
18
|
-
format_chinese_id_card
|
|
19
|
-
)
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
def demo_text_cleaner():
|
|
23
|
-
"""演示文本清洗工具的使用"""
|
|
24
|
-
print("=== 文本清洗工具演示 ===\n")
|
|
25
|
-
|
|
26
|
-
# 1. 移除HTML标签
|
|
27
|
-
print("1. 移除HTML标签:")
|
|
28
|
-
html_text = "<p>这是一个<b>测试</b>文本</p>"
|
|
29
|
-
clean_text_result = remove_html_tags(html_text)
|
|
30
|
-
print(f" 原始文本: {html_text}")
|
|
31
|
-
print(f" 清洗后: {clean_text_result}")
|
|
32
|
-
|
|
33
|
-
print()
|
|
34
|
-
|
|
35
|
-
# 2. 解码HTML实体
|
|
36
|
-
print("2. 解码HTML实体:")
|
|
37
|
-
entity_text = "这是一个 <b>测试</b>&文本"
|
|
38
|
-
decoded_text = decode_html_entities(entity_text)
|
|
39
|
-
print(f" 原始文本: {entity_text}")
|
|
40
|
-
print(f" 解码后: {decoded_text}")
|
|
41
|
-
|
|
42
|
-
print()
|
|
43
|
-
|
|
44
|
-
# 3. 移除多余空白字符
|
|
45
|
-
print("3. 移除多余空白字符:")
|
|
46
|
-
whitespace_text = "这是 一个\t\t测试\n\n文本"
|
|
47
|
-
clean_whitespace = TextCleaner.remove_extra_whitespace(whitespace_text)
|
|
48
|
-
print(f" 原始文本: {repr(whitespace_text)}")
|
|
49
|
-
print(f" 清洗后: {repr(clean_whitespace)}")
|
|
50
|
-
|
|
51
|
-
print()
|
|
52
|
-
|
|
53
|
-
# 4. 综合清洗
|
|
54
|
-
print("4. 综合清洗:")
|
|
55
|
-
complex_text = "<p>这是 一个<b>测试</b>&文本 </p>"
|
|
56
|
-
cleaned = clean_text(complex_text)
|
|
57
|
-
print(f" 原始文本: {complex_text}")
|
|
58
|
-
print(f" 清洗后: {cleaned}")
|
|
59
|
-
|
|
60
|
-
print()
|
|
61
|
-
|
|
62
|
-
# 5. 提取信息
|
|
63
|
-
print("5. 提取信息:")
|
|
64
|
-
info_text = "联系邮箱: test@example.com, 电话: 13812345678, 价格: ¥123.45"
|
|
65
|
-
numbers = extract_numbers(info_text)
|
|
66
|
-
emails = extract_emails(info_text)
|
|
67
|
-
urls = extract_urls(info_text)
|
|
68
|
-
print(f" 原始文本: {info_text}")
|
|
69
|
-
print(f" 提取的数字: {numbers}")
|
|
70
|
-
print(f" 提取的邮箱: {emails}")
|
|
71
|
-
print(f" 提取的URL: {urls}")
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
def demo_data_formatter():
|
|
75
|
-
"""演示数据格式化工具的使用"""
|
|
76
|
-
print("\n=== 数据格式化工具演示 ===\n")
|
|
77
|
-
|
|
78
|
-
# 1. 数字格式化
|
|
79
|
-
print("1. 数字格式化:")
|
|
80
|
-
number = 1234567.891
|
|
81
|
-
formatted_num1 = format_number(number, precision=2, thousand_separator=False)
|
|
82
|
-
formatted_num2 = format_number(number, precision=2, thousand_separator=True)
|
|
83
|
-
print(f" 原始数字: {number}")
|
|
84
|
-
print(f" 格式化(无千位分隔符): {formatted_num1}")
|
|
85
|
-
print(f" 格式化(有千位分隔符): {formatted_num2}")
|
|
86
|
-
|
|
87
|
-
print()
|
|
88
|
-
|
|
89
|
-
# 2. 货币格式化
|
|
90
|
-
print("2. 货币格式化:")
|
|
91
|
-
price = 1234.567
|
|
92
|
-
formatted_currency1 = format_currency(price, "¥", 2)
|
|
93
|
-
formatted_currency2 = format_currency(price, "$", 2)
|
|
94
|
-
print(f" 原始价格: {price}")
|
|
95
|
-
print(f" 人民币格式: {formatted_currency1}")
|
|
96
|
-
print(f" 美元格式: {formatted_currency2}")
|
|
97
|
-
|
|
98
|
-
print()
|
|
99
|
-
|
|
100
|
-
# 3. 电话号码格式化
|
|
101
|
-
print("3. 电话号码格式化:")
|
|
102
|
-
phone = "13812345678"
|
|
103
|
-
formatted_phone1 = format_phone_number(phone, "+86", "international")
|
|
104
|
-
formatted_phone2 = format_phone_number(phone, "", "domestic")
|
|
105
|
-
formatted_phone3 = format_phone_number(phone, "", "plain")
|
|
106
|
-
print(f" 原始号码: {phone}")
|
|
107
|
-
print(f" 国际格式: {formatted_phone1}")
|
|
108
|
-
print(f" 国内格式: {formatted_phone2}")
|
|
109
|
-
print(f" 纯数字格式: {formatted_phone3}")
|
|
110
|
-
|
|
111
|
-
print()
|
|
112
|
-
|
|
113
|
-
# 4. 身份证号码格式化
|
|
114
|
-
print("4. 身份证号码格式化:")
|
|
115
|
-
id_card = "110101199001011234"
|
|
116
|
-
formatted_id = format_chinese_id_card(id_card)
|
|
117
|
-
print(f" 原始号码: {id_card}")
|
|
118
|
-
print(f" 格式化后: {formatted_id}")
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
def demo_in_spider():
|
|
122
|
-
"""演示在爬虫中使用数据清洗工具"""
|
|
123
|
-
print("\n=== 在爬虫中使用数据清洗工具 ===\n")
|
|
124
|
-
print("在爬虫项目中,您可以这样使用数据清洗工具:")
|
|
125
|
-
print("""
|
|
126
|
-
from crawlo import Spider, Request, Item, Field
|
|
127
|
-
from crawlo.tools import clean_text, format_currency, extract_numbers
|
|
128
|
-
|
|
129
|
-
class ProductItem(Item):
|
|
130
|
-
name = Field()
|
|
131
|
-
price = Field()
|
|
132
|
-
description = Field()
|
|
133
|
-
|
|
134
|
-
class ProductSpider(Spider):
|
|
135
|
-
def parse(self, response):
|
|
136
|
-
# 从网页中提取数据
|
|
137
|
-
name = response.css('.product-name::text').get()
|
|
138
|
-
price_text = response.css('.price::text').get()
|
|
139
|
-
description = response.css('.description::text').get()
|
|
140
|
-
|
|
141
|
-
# 清洗和格式化数据
|
|
142
|
-
clean_name = clean_text(name) if name else None
|
|
143
|
-
price_numbers = extract_numbers(price_text) if price_text else []
|
|
144
|
-
clean_price = format_currency(price_numbers[0]) if price_numbers else None
|
|
145
|
-
clean_description = clean_text(description) if description else None
|
|
146
|
-
|
|
147
|
-
# 创建数据项
|
|
148
|
-
item = ProductItem()
|
|
149
|
-
item['name'] = clean_name
|
|
150
|
-
item['price'] = clean_price
|
|
151
|
-
item['description'] = clean_description
|
|
152
|
-
|
|
153
|
-
yield item
|
|
154
|
-
""")
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
if __name__ == '__main__':
|
|
158
|
-
# 运行演示
|
|
159
|
-
demo_text_cleaner()
|
|
160
|
-
demo_data_formatter()
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# -*- coding: UTF-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
Crawlo框架数据清洗工具使用示例
|
|
5
|
+
"""
|
|
6
|
+
from crawlo.tools import (
|
|
7
|
+
TextCleaner,
|
|
8
|
+
DataFormatter,
|
|
9
|
+
remove_html_tags,
|
|
10
|
+
decode_html_entities,
|
|
11
|
+
clean_text,
|
|
12
|
+
extract_numbers,
|
|
13
|
+
extract_emails,
|
|
14
|
+
extract_urls,
|
|
15
|
+
format_number,
|
|
16
|
+
format_currency,
|
|
17
|
+
format_phone_number,
|
|
18
|
+
format_chinese_id_card
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def demo_text_cleaner():
|
|
23
|
+
"""演示文本清洗工具的使用"""
|
|
24
|
+
print("=== 文本清洗工具演示 ===\n")
|
|
25
|
+
|
|
26
|
+
# 1. 移除HTML标签
|
|
27
|
+
print("1. 移除HTML标签:")
|
|
28
|
+
html_text = "<p>这是一个<b>测试</b>文本</p>"
|
|
29
|
+
clean_text_result = remove_html_tags(html_text)
|
|
30
|
+
print(f" 原始文本: {html_text}")
|
|
31
|
+
print(f" 清洗后: {clean_text_result}")
|
|
32
|
+
|
|
33
|
+
print()
|
|
34
|
+
|
|
35
|
+
# 2. 解码HTML实体
|
|
36
|
+
print("2. 解码HTML实体:")
|
|
37
|
+
entity_text = "这是一个 <b>测试</b>&文本"
|
|
38
|
+
decoded_text = decode_html_entities(entity_text)
|
|
39
|
+
print(f" 原始文本: {entity_text}")
|
|
40
|
+
print(f" 解码后: {decoded_text}")
|
|
41
|
+
|
|
42
|
+
print()
|
|
43
|
+
|
|
44
|
+
# 3. 移除多余空白字符
|
|
45
|
+
print("3. 移除多余空白字符:")
|
|
46
|
+
whitespace_text = "这是 一个\t\t测试\n\n文本"
|
|
47
|
+
clean_whitespace = TextCleaner.remove_extra_whitespace(whitespace_text)
|
|
48
|
+
print(f" 原始文本: {repr(whitespace_text)}")
|
|
49
|
+
print(f" 清洗后: {repr(clean_whitespace)}")
|
|
50
|
+
|
|
51
|
+
print()
|
|
52
|
+
|
|
53
|
+
# 4. 综合清洗
|
|
54
|
+
print("4. 综合清洗:")
|
|
55
|
+
complex_text = "<p>这是 一个<b>测试</b>&文本 </p>"
|
|
56
|
+
cleaned = clean_text(complex_text)
|
|
57
|
+
print(f" 原始文本: {complex_text}")
|
|
58
|
+
print(f" 清洗后: {cleaned}")
|
|
59
|
+
|
|
60
|
+
print()
|
|
61
|
+
|
|
62
|
+
# 5. 提取信息
|
|
63
|
+
print("5. 提取信息:")
|
|
64
|
+
info_text = "联系邮箱: test@example.com, 电话: 13812345678, 价格: ¥123.45"
|
|
65
|
+
numbers = extract_numbers(info_text)
|
|
66
|
+
emails = extract_emails(info_text)
|
|
67
|
+
urls = extract_urls(info_text)
|
|
68
|
+
print(f" 原始文本: {info_text}")
|
|
69
|
+
print(f" 提取的数字: {numbers}")
|
|
70
|
+
print(f" 提取的邮箱: {emails}")
|
|
71
|
+
print(f" 提取的URL: {urls}")
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def demo_data_formatter():
|
|
75
|
+
"""演示数据格式化工具的使用"""
|
|
76
|
+
print("\n=== 数据格式化工具演示 ===\n")
|
|
77
|
+
|
|
78
|
+
# 1. 数字格式化
|
|
79
|
+
print("1. 数字格式化:")
|
|
80
|
+
number = 1234567.891
|
|
81
|
+
formatted_num1 = format_number(number, precision=2, thousand_separator=False)
|
|
82
|
+
formatted_num2 = format_number(number, precision=2, thousand_separator=True)
|
|
83
|
+
print(f" 原始数字: {number}")
|
|
84
|
+
print(f" 格式化(无千位分隔符): {formatted_num1}")
|
|
85
|
+
print(f" 格式化(有千位分隔符): {formatted_num2}")
|
|
86
|
+
|
|
87
|
+
print()
|
|
88
|
+
|
|
89
|
+
# 2. 货币格式化
|
|
90
|
+
print("2. 货币格式化:")
|
|
91
|
+
price = 1234.567
|
|
92
|
+
formatted_currency1 = format_currency(price, "¥", 2)
|
|
93
|
+
formatted_currency2 = format_currency(price, "$", 2)
|
|
94
|
+
print(f" 原始价格: {price}")
|
|
95
|
+
print(f" 人民币格式: {formatted_currency1}")
|
|
96
|
+
print(f" 美元格式: {formatted_currency2}")
|
|
97
|
+
|
|
98
|
+
print()
|
|
99
|
+
|
|
100
|
+
# 3. 电话号码格式化
|
|
101
|
+
print("3. 电话号码格式化:")
|
|
102
|
+
phone = "13812345678"
|
|
103
|
+
formatted_phone1 = format_phone_number(phone, "+86", "international")
|
|
104
|
+
formatted_phone2 = format_phone_number(phone, "", "domestic")
|
|
105
|
+
formatted_phone3 = format_phone_number(phone, "", "plain")
|
|
106
|
+
print(f" 原始号码: {phone}")
|
|
107
|
+
print(f" 国际格式: {formatted_phone1}")
|
|
108
|
+
print(f" 国内格式: {formatted_phone2}")
|
|
109
|
+
print(f" 纯数字格式: {formatted_phone3}")
|
|
110
|
+
|
|
111
|
+
print()
|
|
112
|
+
|
|
113
|
+
# 4. 身份证号码格式化
|
|
114
|
+
print("4. 身份证号码格式化:")
|
|
115
|
+
id_card = "110101199001011234"
|
|
116
|
+
formatted_id = format_chinese_id_card(id_card)
|
|
117
|
+
print(f" 原始号码: {id_card}")
|
|
118
|
+
print(f" 格式化后: {formatted_id}")
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def demo_in_spider():
|
|
122
|
+
"""演示在爬虫中使用数据清洗工具"""
|
|
123
|
+
print("\n=== 在爬虫中使用数据清洗工具 ===\n")
|
|
124
|
+
print("在爬虫项目中,您可以这样使用数据清洗工具:")
|
|
125
|
+
print("""
|
|
126
|
+
from crawlo import Spider, Request, Item, Field
|
|
127
|
+
from crawlo.tools import clean_text, format_currency, extract_numbers
|
|
128
|
+
|
|
129
|
+
class ProductItem(Item):
|
|
130
|
+
name = Field()
|
|
131
|
+
price = Field()
|
|
132
|
+
description = Field()
|
|
133
|
+
|
|
134
|
+
class ProductSpider(Spider):
|
|
135
|
+
def parse(self, response):
|
|
136
|
+
# 从网页中提取数据
|
|
137
|
+
name = response.css('.product-name::text').get()
|
|
138
|
+
price_text = response.css('.price::text').get()
|
|
139
|
+
description = response.css('.description::text').get()
|
|
140
|
+
|
|
141
|
+
# 清洗和格式化数据
|
|
142
|
+
clean_name = clean_text(name) if name else None
|
|
143
|
+
price_numbers = extract_numbers(price_text) if price_text else []
|
|
144
|
+
clean_price = format_currency(price_numbers[0]) if price_numbers else None
|
|
145
|
+
clean_description = clean_text(description) if description else None
|
|
146
|
+
|
|
147
|
+
# 创建数据项
|
|
148
|
+
item = ProductItem()
|
|
149
|
+
item['name'] = clean_name
|
|
150
|
+
item['price'] = clean_price
|
|
151
|
+
item['description'] = clean_description
|
|
152
|
+
|
|
153
|
+
yield item
|
|
154
|
+
""")
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
if __name__ == '__main__':
|
|
158
|
+
# 运行演示
|
|
159
|
+
demo_text_cleaner()
|
|
160
|
+
demo_data_formatter()
|
|
161
161
|
demo_in_spider()
|
|
@@ -0,0 +1,213 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
综合框架测试脚本
|
|
5
|
+
全面测试框架的所有核心功能
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import sys
|
|
9
|
+
import os
|
|
10
|
+
import asyncio
|
|
11
|
+
import time
|
|
12
|
+
|
|
13
|
+
# 添加项目根目录到Python路径
|
|
14
|
+
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
|
|
15
|
+
|
|
16
|
+
from crawlo.spider import Spider
|
|
17
|
+
from crawlo import Request
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class TestSpider(Spider):
|
|
21
|
+
"""测试爬虫"""
|
|
22
|
+
name = 'framework_test_spider'
|
|
23
|
+
|
|
24
|
+
def start_requests(self):
|
|
25
|
+
"""发起测试请求"""
|
|
26
|
+
# 生成一些测试请求
|
|
27
|
+
for i in range(3):
|
|
28
|
+
yield Request(f'https://httpbin.org/get?page={i}', callback=self.parse)
|
|
29
|
+
|
|
30
|
+
def parse(self, response):
|
|
31
|
+
"""解析响应"""
|
|
32
|
+
print(f"成功获取响应: {response.url}")
|
|
33
|
+
print(f"状态码: {response.status_code}")
|
|
34
|
+
return []
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
async def test_framework_initialization():
|
|
38
|
+
"""测试框架初始化"""
|
|
39
|
+
print("测试框架初始化...")
|
|
40
|
+
|
|
41
|
+
from crawlo.initialization import initialize_framework
|
|
42
|
+
|
|
43
|
+
# 测试默认配置
|
|
44
|
+
settings = initialize_framework()
|
|
45
|
+
print(f"默认配置 - RUN_MODE: {settings.get('RUN_MODE')}")
|
|
46
|
+
print(f"默认配置 - QUEUE_TYPE: {settings.get('QUEUE_TYPE')}")
|
|
47
|
+
|
|
48
|
+
# 测试自定义配置
|
|
49
|
+
custom_settings = {
|
|
50
|
+
'PROJECT_NAME': 'framework_test',
|
|
51
|
+
'SCHEDULER_MAX_QUEUE_SIZE': 50
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
settings = initialize_framework(custom_settings)
|
|
55
|
+
print(f"自定义配置 - PROJECT_NAME: {settings.get('PROJECT_NAME')}")
|
|
56
|
+
print(f"自定义配置 - SCHEDULER_MAX_QUEUE_SIZE: {settings.get('SCHEDULER_MAX_QUEUE_SIZE')}")
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
async def test_crawler_execution():
|
|
60
|
+
"""测试爬虫执行"""
|
|
61
|
+
print("测试爬虫执行...")
|
|
62
|
+
|
|
63
|
+
from crawlo.initialization import initialize_framework
|
|
64
|
+
from crawlo.crawler import CrawlerProcess
|
|
65
|
+
|
|
66
|
+
# 初始化框架
|
|
67
|
+
settings = initialize_framework({
|
|
68
|
+
'PROJECT_NAME': 'framework_test'
|
|
69
|
+
})
|
|
70
|
+
|
|
71
|
+
# 创建爬虫进程
|
|
72
|
+
process = CrawlerProcess(settings=settings)
|
|
73
|
+
|
|
74
|
+
# 运行爬虫
|
|
75
|
+
await process.crawl(TestSpider)
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
async def test_queue_system():
|
|
79
|
+
"""测试队列系统"""
|
|
80
|
+
print("测试队列系统...")
|
|
81
|
+
|
|
82
|
+
from crawlo.queue.queue_manager import QueueConfig, QueueManager
|
|
83
|
+
from crawlo import Request
|
|
84
|
+
|
|
85
|
+
# 创建小队列配置进行测试
|
|
86
|
+
queue_config = QueueConfig(
|
|
87
|
+
queue_type='memory',
|
|
88
|
+
max_queue_size=5
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
# 创建队列管理器
|
|
92
|
+
queue_manager = QueueManager(queue_config)
|
|
93
|
+
await queue_manager.initialize()
|
|
94
|
+
|
|
95
|
+
# 测试添加请求
|
|
96
|
+
print("添加请求到队列...")
|
|
97
|
+
for i in range(3):
|
|
98
|
+
request = Request(f'https://example.com/test{i}')
|
|
99
|
+
await queue_manager.put(request)
|
|
100
|
+
print(f"添加请求 {i}")
|
|
101
|
+
|
|
102
|
+
# 测试获取请求
|
|
103
|
+
print("从队列获取请求...")
|
|
104
|
+
for i in range(3):
|
|
105
|
+
request = await queue_manager.get(timeout=1.0)
|
|
106
|
+
if request:
|
|
107
|
+
print(f"获取请求: {request.url}")
|
|
108
|
+
|
|
109
|
+
# 关闭队列
|
|
110
|
+
await queue_manager.close()
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
async def test_spider_registry():
|
|
114
|
+
"""测试爬虫注册系统"""
|
|
115
|
+
print("测试爬虫注册系统...")
|
|
116
|
+
|
|
117
|
+
from crawlo.spider import get_global_spider_registry, is_spider_registered, get_spider_names
|
|
118
|
+
|
|
119
|
+
# 检查测试爬虫是否已注册
|
|
120
|
+
spider_name = TestSpider.name
|
|
121
|
+
is_registered = is_spider_registered(spider_name)
|
|
122
|
+
print(f"爬虫 '{spider_name}' 是否已注册: {is_registered}")
|
|
123
|
+
|
|
124
|
+
# 获取所有注册的爬虫名称
|
|
125
|
+
spider_names = get_spider_names()
|
|
126
|
+
print(f"所有注册的爬虫: {spider_names}")
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
async def test_logging_system():
|
|
130
|
+
"""测试日志系统"""
|
|
131
|
+
print("测试日志系统...")
|
|
132
|
+
|
|
133
|
+
from crawlo.logging import get_logger, configure_logging
|
|
134
|
+
|
|
135
|
+
# 配置日志系统
|
|
136
|
+
configure_logging({
|
|
137
|
+
'LOG_LEVEL': 'INFO',
|
|
138
|
+
'LOG_FILE': 'logs/test_framework.log'
|
|
139
|
+
})
|
|
140
|
+
|
|
141
|
+
# 获取logger并记录日志
|
|
142
|
+
logger = get_logger('test_framework')
|
|
143
|
+
logger.info("这是测试日志信息")
|
|
144
|
+
logger.warning("这是测试警告信息")
|
|
145
|
+
logger.error("这是测试错误信息")
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
async def test_settings_system():
|
|
149
|
+
"""测试配置系统"""
|
|
150
|
+
print("测试配置系统...")
|
|
151
|
+
|
|
152
|
+
from crawlo.settings.setting_manager import SettingManager
|
|
153
|
+
|
|
154
|
+
# 创建配置管理器
|
|
155
|
+
settings = SettingManager()
|
|
156
|
+
|
|
157
|
+
# 测试配置项
|
|
158
|
+
settings.set('TEST_KEY', 'test_value')
|
|
159
|
+
test_value = settings.get('TEST_KEY')
|
|
160
|
+
print(f"配置项 TEST_KEY 的值: {test_value}")
|
|
161
|
+
|
|
162
|
+
# 测试不同类型的配置项
|
|
163
|
+
settings.set('TEST_INT', 42)
|
|
164
|
+
test_int = settings.get_int('TEST_INT')
|
|
165
|
+
print(f"配置项 TEST_INT 的值: {test_int}")
|
|
166
|
+
|
|
167
|
+
settings.set('TEST_BOOL', True)
|
|
168
|
+
test_bool = settings.get_bool('TEST_BOOL')
|
|
169
|
+
print(f"配置项 TEST_BOOL 的值: {test_bool}")
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
async def main():
|
|
173
|
+
"""主函数"""
|
|
174
|
+
print("开始综合框架测试...")
|
|
175
|
+
print("=" * 50)
|
|
176
|
+
|
|
177
|
+
try:
|
|
178
|
+
# 1. 测试框架初始化
|
|
179
|
+
await test_framework_initialization()
|
|
180
|
+
print()
|
|
181
|
+
|
|
182
|
+
# 2. 测试配置系统
|
|
183
|
+
await test_settings_system()
|
|
184
|
+
print()
|
|
185
|
+
|
|
186
|
+
# 3. 测试日志系统
|
|
187
|
+
await test_logging_system()
|
|
188
|
+
print()
|
|
189
|
+
|
|
190
|
+
# 4. 测试队列系统
|
|
191
|
+
await test_queue_system()
|
|
192
|
+
print()
|
|
193
|
+
|
|
194
|
+
# 5. 测试爬虫注册系统
|
|
195
|
+
await test_spider_registry()
|
|
196
|
+
print()
|
|
197
|
+
|
|
198
|
+
# 6. 测试爬虫执行
|
|
199
|
+
await test_crawler_execution()
|
|
200
|
+
print()
|
|
201
|
+
|
|
202
|
+
print("=" * 50)
|
|
203
|
+
print("所有测试通过!框架工作正常。")
|
|
204
|
+
|
|
205
|
+
except Exception as e:
|
|
206
|
+
print("=" * 50)
|
|
207
|
+
print(f"测试失败: {e}")
|
|
208
|
+
import traceback
|
|
209
|
+
traceback.print_exc()
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
if __name__ == "__main__":
|
|
213
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
综合测试脚本
|
|
5
|
+
测试Crawlo框架的所有优化功能
|
|
6
|
+
"""
|
|
7
|
+
import asyncio
|
|
8
|
+
import sys
|
|
9
|
+
import os
|
|
10
|
+
|
|
11
|
+
# 添加项目根目录到Python路径
|
|
12
|
+
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
|
|
13
|
+
|
|
14
|
+
from crawlo import Spider, Request
|
|
15
|
+
from crawlo.crawler import CrawlerProcess
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class ComprehensiveSpider(Spider):
|
|
19
|
+
name = 'comprehensive_test'
|
|
20
|
+
|
|
21
|
+
def start_requests(self):
|
|
22
|
+
# 测试多个URL
|
|
23
|
+
urls = [
|
|
24
|
+
'https://www.baidu.com/',
|
|
25
|
+
'https://www.baidu.com/s?wd=python',
|
|
26
|
+
'https://www.baidu.com/s?wd=爬虫',
|
|
27
|
+
'https://www.baidu.com/s?wd=框架',
|
|
28
|
+
'https://www.baidu.com/s?wd=异步',
|
|
29
|
+
]
|
|
30
|
+
|
|
31
|
+
for i, url in enumerate(urls):
|
|
32
|
+
# 设置不同的优先级
|
|
33
|
+
priority = -i # 负数表示优先级,数值越小优先级越高
|
|
34
|
+
yield Request(url, callback=self.parse, priority=priority)
|
|
35
|
+
|
|
36
|
+
def parse(self, response):
|
|
37
|
+
self.logger.info(f"访问URL: {response.url}")
|
|
38
|
+
self.logger.info(f"响应状态码: {response.status_code}")
|
|
39
|
+
self.logger.info(f"页面标题: {response.xpath('//title/text()').get()}")
|
|
40
|
+
|
|
41
|
+
# 提取一些链接用于进一步测试
|
|
42
|
+
links = response.xpath('//a/@href').getall()[:3] # 只取前3个链接
|
|
43
|
+
|
|
44
|
+
# 跟进链接,设置不同的深度
|
|
45
|
+
for link in links:
|
|
46
|
+
if link.startswith('http'):
|
|
47
|
+
# 创建新的请求,增加深度
|
|
48
|
+
meta = response.meta.copy()
|
|
49
|
+
meta['depth'] = meta.get('depth', 0) + 1
|
|
50
|
+
yield Request(link, callback=self.parse_link, meta=meta)
|
|
51
|
+
|
|
52
|
+
def parse_link(self, response):
|
|
53
|
+
self.logger.info(f"跟进链接: {response.url}")
|
|
54
|
+
self.logger.info(f"响应状态码: {response.status_code}")
|
|
55
|
+
self.logger.info(f"页面深度: {response.meta.get('depth', 0)}")
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
async def main():
|
|
59
|
+
# 创建爬虫进程
|
|
60
|
+
process = CrawlerProcess(settings={
|
|
61
|
+
'CONCURRENCY': 4, # 设置并发数
|
|
62
|
+
'DOWNLOAD_DELAY': 0.5, # 设置下载延迟
|
|
63
|
+
'LOG_LEVEL': 'INFO', # 设置日志级别
|
|
64
|
+
'SCHEDULER_MAX_QUEUE_SIZE': 100, # 设置队列最大大小
|
|
65
|
+
})
|
|
66
|
+
|
|
67
|
+
# 运行爬虫
|
|
68
|
+
await process.crawl(ComprehensiveSpider)
|
|
69
|
+
|
|
70
|
+
# 输出统计信息
|
|
71
|
+
if hasattr(process, 'get_metrics'):
|
|
72
|
+
metrics = process.get_metrics()
|
|
73
|
+
print(f"\n=== 爬虫统计信息 ===")
|
|
74
|
+
print(f"总执行时间: {metrics.get('total_duration', 0):.2f}秒")
|
|
75
|
+
print(f"总请求数: {metrics.get('total_requests', 0)}")
|
|
76
|
+
print(f"成功请求数: {metrics.get('total_success', 0)}")
|
|
77
|
+
print(f"错误请求数: {metrics.get('total_errors', 0)}")
|
|
78
|
+
print(f"平均成功率: {metrics.get('average_success_rate', 0):.2f}%")
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
if __name__ == '__main__':
|
|
82
|
+
asyncio.run(main())
|