crawlo 1.1.9__py3-none-any.whl → 1.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crawlo might be problematic. Click here for more details.
- crawlo/__init__.py +61 -61
- crawlo/__version__.py +1 -1
- crawlo/cleaners/__init__.py +60 -60
- crawlo/cleaners/data_formatter.py +225 -225
- crawlo/cleaners/encoding_converter.py +125 -125
- crawlo/cleaners/text_cleaner.py +232 -232
- crawlo/cli.py +65 -65
- crawlo/commands/__init__.py +14 -14
- crawlo/commands/check.py +594 -594
- crawlo/commands/genspider.py +151 -151
- crawlo/commands/help.py +142 -132
- crawlo/commands/list.py +155 -155
- crawlo/commands/run.py +292 -292
- crawlo/commands/startproject.py +418 -418
- crawlo/commands/stats.py +188 -188
- crawlo/commands/utils.py +186 -186
- crawlo/config.py +312 -312
- crawlo/config_validator.py +252 -252
- crawlo/core/__init__.py +2 -2
- crawlo/core/engine.py +354 -345
- crawlo/core/processor.py +40 -40
- crawlo/core/scheduler.py +143 -136
- crawlo/crawler.py +1027 -1027
- crawlo/downloader/__init__.py +266 -266
- crawlo/downloader/aiohttp_downloader.py +220 -220
- crawlo/downloader/cffi_downloader.py +256 -256
- crawlo/downloader/httpx_downloader.py +259 -259
- crawlo/downloader/hybrid_downloader.py +213 -213
- crawlo/downloader/playwright_downloader.py +402 -402
- crawlo/downloader/selenium_downloader.py +472 -472
- crawlo/event.py +11 -11
- crawlo/exceptions.py +81 -81
- crawlo/extension/__init__.py +37 -37
- crawlo/extension/health_check.py +141 -141
- crawlo/extension/log_interval.py +57 -57
- crawlo/extension/log_stats.py +81 -81
- crawlo/extension/logging_extension.py +43 -43
- crawlo/extension/memory_monitor.py +104 -104
- crawlo/extension/performance_profiler.py +133 -133
- crawlo/extension/request_recorder.py +107 -107
- crawlo/filters/__init__.py +154 -154
- crawlo/filters/aioredis_filter.py +280 -280
- crawlo/filters/memory_filter.py +269 -269
- crawlo/items/__init__.py +23 -23
- crawlo/items/base.py +21 -21
- crawlo/items/fields.py +53 -53
- crawlo/items/items.py +104 -104
- crawlo/middleware/__init__.py +21 -21
- crawlo/middleware/default_header.py +132 -32
- crawlo/middleware/download_delay.py +105 -28
- crawlo/middleware/middleware_manager.py +135 -135
- crawlo/middleware/offsite.py +116 -0
- crawlo/middleware/proxy.py +366 -272
- crawlo/middleware/request_ignore.py +88 -30
- crawlo/middleware/response_code.py +164 -18
- crawlo/middleware/response_filter.py +138 -26
- crawlo/middleware/retry.py +124 -124
- crawlo/mode_manager.py +211 -211
- crawlo/network/__init__.py +21 -21
- crawlo/network/request.py +338 -338
- crawlo/network/response.py +359 -359
- crawlo/pipelines/__init__.py +21 -21
- crawlo/pipelines/bloom_dedup_pipeline.py +156 -156
- crawlo/pipelines/console_pipeline.py +39 -39
- crawlo/pipelines/csv_pipeline.py +316 -316
- crawlo/pipelines/database_dedup_pipeline.py +224 -224
- crawlo/pipelines/json_pipeline.py +218 -218
- crawlo/pipelines/memory_dedup_pipeline.py +115 -115
- crawlo/pipelines/mongo_pipeline.py +131 -131
- crawlo/pipelines/mysql_pipeline.py +316 -316
- crawlo/pipelines/pipeline_manager.py +61 -61
- crawlo/pipelines/redis_dedup_pipeline.py +167 -167
- crawlo/project.py +187 -187
- crawlo/queue/pqueue.py +37 -37
- crawlo/queue/queue_manager.py +337 -334
- crawlo/queue/redis_priority_queue.py +298 -298
- crawlo/settings/__init__.py +7 -7
- crawlo/settings/default_settings.py +226 -219
- crawlo/settings/setting_manager.py +122 -122
- crawlo/spider/__init__.py +639 -639
- crawlo/stats_collector.py +59 -59
- crawlo/subscriber.py +130 -130
- crawlo/task_manager.py +30 -30
- crawlo/templates/crawlo.cfg.tmpl +10 -10
- crawlo/templates/project/__init__.py.tmpl +3 -3
- crawlo/templates/project/items.py.tmpl +17 -17
- crawlo/templates/project/middlewares.py.tmpl +118 -109
- crawlo/templates/project/pipelines.py.tmpl +96 -96
- crawlo/templates/project/run.py.tmpl +45 -45
- crawlo/templates/project/settings.py.tmpl +327 -326
- crawlo/templates/project/settings_distributed.py.tmpl +119 -119
- crawlo/templates/project/settings_gentle.py.tmpl +94 -94
- crawlo/templates/project/settings_high_performance.py.tmpl +151 -151
- crawlo/templates/project/settings_simple.py.tmpl +68 -68
- crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
- crawlo/templates/spider/spider.py.tmpl +143 -141
- crawlo/tools/__init__.py +182 -182
- crawlo/tools/anti_crawler.py +268 -268
- crawlo/tools/authenticated_proxy.py +240 -240
- crawlo/tools/data_validator.py +180 -180
- crawlo/tools/date_tools.py +35 -35
- crawlo/tools/distributed_coordinator.py +386 -386
- crawlo/tools/retry_mechanism.py +220 -220
- crawlo/tools/scenario_adapter.py +262 -262
- crawlo/utils/__init__.py +35 -35
- crawlo/utils/batch_processor.py +260 -260
- crawlo/utils/controlled_spider_mixin.py +439 -439
- crawlo/utils/date_tools.py +290 -290
- crawlo/utils/db_helper.py +343 -343
- crawlo/utils/enhanced_error_handler.py +359 -359
- crawlo/utils/env_config.py +105 -105
- crawlo/utils/error_handler.py +125 -125
- crawlo/utils/func_tools.py +82 -82
- crawlo/utils/large_scale_config.py +286 -286
- crawlo/utils/large_scale_helper.py +343 -343
- crawlo/utils/log.py +128 -128
- crawlo/utils/performance_monitor.py +284 -284
- crawlo/utils/queue_helper.py +175 -175
- crawlo/utils/redis_connection_pool.py +334 -334
- crawlo/utils/redis_key_validator.py +199 -199
- crawlo/utils/request.py +267 -267
- crawlo/utils/request_serializer.py +219 -219
- crawlo/utils/spider_loader.py +62 -62
- crawlo/utils/system.py +11 -11
- crawlo/utils/tools.py +4 -4
- crawlo/utils/url.py +39 -39
- crawlo-1.2.1.dist-info/METADATA +692 -0
- crawlo-1.2.1.dist-info/RECORD +220 -0
- examples/__init__.py +7 -7
- examples/aiohttp_settings.py +42 -0
- examples/curl_cffi_settings.py +41 -0
- examples/default_header_middleware_example.py +107 -0
- examples/default_header_spider_example.py +129 -0
- examples/download_delay_middleware_example.py +160 -0
- examples/httpx_settings.py +42 -0
- examples/multi_downloader_proxy_example.py +81 -0
- examples/offsite_middleware_example.py +55 -0
- examples/offsite_spider_example.py +107 -0
- examples/proxy_spider_example.py +166 -0
- examples/request_ignore_middleware_example.py +51 -0
- examples/request_ignore_spider_example.py +99 -0
- examples/response_code_middleware_example.py +52 -0
- examples/response_filter_middleware_example.py +67 -0
- examples/tong_hua_shun_settings.py +62 -0
- examples/tong_hua_shun_spider.py +170 -0
- tests/DOUBLE_CRAWLO_PREFIX_FIX_REPORT.md +81 -81
- tests/__init__.py +7 -7
- tests/advanced_tools_example.py +275 -275
- tests/authenticated_proxy_example.py +236 -236
- tests/cleaners_example.py +160 -160
- tests/config_validation_demo.py +102 -102
- tests/controlled_spider_example.py +205 -205
- tests/date_tools_example.py +180 -180
- tests/dynamic_loading_example.py +523 -523
- tests/dynamic_loading_test.py +104 -104
- tests/env_config_example.py +133 -133
- tests/error_handling_example.py +171 -171
- tests/redis_key_validation_demo.py +130 -130
- tests/response_improvements_example.py +144 -144
- tests/test_advanced_tools.py +148 -148
- tests/test_all_redis_key_configs.py +145 -145
- tests/test_authenticated_proxy.py +141 -141
- tests/test_cleaners.py +54 -54
- tests/test_comprehensive.py +146 -146
- tests/test_config_validator.py +193 -193
- tests/test_crawlo_proxy_integration.py +173 -0
- tests/test_date_tools.py +123 -123
- tests/test_default_header_middleware.py +159 -0
- tests/test_double_crawlo_fix.py +207 -207
- tests/test_double_crawlo_fix_simple.py +124 -124
- tests/test_download_delay_middleware.py +222 -0
- tests/test_downloader_proxy_compatibility.py +269 -0
- tests/test_dynamic_downloaders_proxy.py +124 -124
- tests/test_dynamic_proxy.py +92 -92
- tests/test_dynamic_proxy_config.py +146 -146
- tests/test_dynamic_proxy_real.py +109 -109
- tests/test_edge_cases.py +303 -303
- tests/test_enhanced_error_handler.py +270 -270
- tests/test_env_config.py +121 -121
- tests/test_error_handler_compatibility.py +112 -112
- tests/test_final_validation.py +153 -153
- tests/test_framework_env_usage.py +103 -103
- tests/test_integration.py +356 -356
- tests/test_item_dedup_redis_key.py +122 -122
- tests/test_offsite_middleware.py +222 -0
- tests/test_parsel.py +29 -29
- tests/test_performance.py +327 -327
- tests/test_proxy_api.py +265 -0
- tests/test_proxy_health_check.py +32 -32
- tests/test_proxy_middleware.py +122 -0
- tests/test_proxy_middleware_enhanced.py +217 -0
- tests/test_proxy_middleware_integration.py +136 -136
- tests/test_proxy_providers.py +56 -56
- tests/test_proxy_stats.py +19 -19
- tests/test_proxy_strategies.py +59 -59
- tests/test_queue_manager_double_crawlo.py +174 -231
- tests/test_queue_manager_redis_key.py +176 -176
- tests/test_real_scenario_proxy.py +196 -0
- tests/test_redis_config.py +28 -28
- tests/test_redis_connection_pool.py +294 -294
- tests/test_redis_key_naming.py +181 -181
- tests/test_redis_key_validator.py +123 -123
- tests/test_redis_queue.py +224 -224
- tests/test_request_ignore_middleware.py +183 -0
- tests/test_request_serialization.py +70 -70
- tests/test_response_code_middleware.py +350 -0
- tests/test_response_filter_middleware.py +428 -0
- tests/test_response_improvements.py +152 -152
- tests/test_retry_middleware.py +242 -0
- tests/test_scheduler.py +241 -241
- tests/test_simple_response.py +61 -61
- tests/test_telecom_spider_redis_key.py +205 -205
- tests/test_template_content.py +87 -87
- tests/test_template_redis_key.py +134 -134
- tests/test_tools.py +153 -153
- tests/tools_example.py +257 -257
- crawlo-1.1.9.dist-info/METADATA +0 -626
- crawlo-1.1.9.dist-info/RECORD +0 -190
- {crawlo-1.1.9.dist-info → crawlo-1.2.1.dist-info}/WHEEL +0 -0
- {crawlo-1.1.9.dist-info → crawlo-1.2.1.dist-info}/entry_points.txt +0 -0
- {crawlo-1.1.9.dist-info → crawlo-1.2.1.dist-info}/top_level.txt +0 -0
|
@@ -1,142 +1,144 @@
|
|
|
1
|
-
# -*- coding: UTF-8 -*-
|
|
2
|
-
"""
|
|
3
|
-
{{project_name}}.spiders.{{spider_name}}
|
|
4
|
-
=======================================
|
|
5
|
-
由 `crawlo genspider` 命令生成的爬虫。
|
|
6
|
-
基于 Crawlo 框架,支持异步并发、分布式爬取等功能。
|
|
7
|
-
|
|
8
|
-
使用示例:
|
|
9
|
-
crawlo crawl {{spider_name}}
|
|
10
|
-
"""
|
|
11
|
-
|
|
12
|
-
from crawlo.spider import Spider
|
|
13
|
-
from crawlo import Request
|
|
14
|
-
from ..items import ExampleItem
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
class {{class_name}}(Spider):
|
|
18
|
-
"""
|
|
19
|
-
爬虫:{{spider_name}}
|
|
20
|
-
|
|
21
|
-
功能说明:
|
|
22
|
-
- 支持并发爬取
|
|
23
|
-
- 自动去重过滤
|
|
24
|
-
- 错误重试机制
|
|
25
|
-
- 数据管道处理
|
|
26
|
-
"""
|
|
27
|
-
name = '{{spider_name}}'
|
|
28
|
-
allowed_domains = ['{{domain}}']
|
|
29
|
-
start_urls = ['https://{{domain}}/']
|
|
30
|
-
|
|
31
|
-
# 高级配置(可选)
|
|
32
|
-
# custom_settings = {
|
|
33
|
-
# 'DOWNLOAD_DELAY': 2.0,
|
|
34
|
-
# 'CONCURRENCY': 4,
|
|
35
|
-
# 'RETRY_HTTP_CODES': [500, 502, 503, 504, 408, 429],
|
|
36
|
-
#
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
#
|
|
74
|
-
|
|
75
|
-
#
|
|
76
|
-
# item
|
|
77
|
-
# item['
|
|
78
|
-
#
|
|
79
|
-
|
|
80
|
-
#
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
'
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
#
|
|
92
|
-
|
|
93
|
-
#
|
|
94
|
-
#
|
|
95
|
-
#
|
|
96
|
-
#
|
|
97
|
-
#
|
|
98
|
-
#
|
|
99
|
-
#
|
|
100
|
-
#
|
|
101
|
-
|
|
102
|
-
#
|
|
103
|
-
|
|
104
|
-
#
|
|
105
|
-
|
|
106
|
-
#
|
|
107
|
-
|
|
108
|
-
#
|
|
109
|
-
|
|
110
|
-
#
|
|
111
|
-
#
|
|
112
|
-
|
|
113
|
-
#
|
|
114
|
-
|
|
115
|
-
#
|
|
116
|
-
#
|
|
117
|
-
#
|
|
118
|
-
#
|
|
119
|
-
#
|
|
120
|
-
#
|
|
121
|
-
#
|
|
122
|
-
#
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
#
|
|
135
|
-
#
|
|
136
|
-
#
|
|
137
|
-
# '
|
|
138
|
-
# '
|
|
139
|
-
# '
|
|
140
|
-
#
|
|
141
|
-
|
|
1
|
+
# -*- coding: UTF-8 -*-
|
|
2
|
+
"""
|
|
3
|
+
{{project_name}}.spiders.{{spider_name}}
|
|
4
|
+
=======================================
|
|
5
|
+
由 `crawlo genspider` 命令生成的爬虫。
|
|
6
|
+
基于 Crawlo 框架,支持异步并发、分布式爬取等功能。
|
|
7
|
+
|
|
8
|
+
使用示例:
|
|
9
|
+
crawlo crawl {{spider_name}}
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from crawlo.spider import Spider
|
|
13
|
+
from crawlo import Request
|
|
14
|
+
from ..items import ExampleItem
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class {{class_name}}(Spider):
|
|
18
|
+
"""
|
|
19
|
+
爬虫:{{spider_name}}
|
|
20
|
+
|
|
21
|
+
功能说明:
|
|
22
|
+
- 支持并发爬取
|
|
23
|
+
- 自动去重过滤
|
|
24
|
+
- 错误重试机制
|
|
25
|
+
- 数据管道处理
|
|
26
|
+
"""
|
|
27
|
+
name = '{{spider_name}}'
|
|
28
|
+
allowed_domains = ['{{domain}}']
|
|
29
|
+
start_urls = ['https://{{domain}}/']
|
|
30
|
+
|
|
31
|
+
# 高级配置(可选)
|
|
32
|
+
# custom_settings = {
|
|
33
|
+
# 'DOWNLOAD_DELAY': 2.0,
|
|
34
|
+
# 'CONCURRENCY': 4,
|
|
35
|
+
# 'RETRY_HTTP_CODES': [500, 502, 503, 504, 408, 429],
|
|
36
|
+
# 'ALLOWED_RESPONSE_CODES': [200, 301, 302], # 只允许特定状态码
|
|
37
|
+
# 'DENIED_RESPONSE_CODES': [403, 404], # 拒绝特定状态码
|
|
38
|
+
# }
|
|
39
|
+
|
|
40
|
+
def start_requests(self):
|
|
41
|
+
"""
|
|
42
|
+
生成初始请求。
|
|
43
|
+
|
|
44
|
+
支持自定义请求头、代理、优先级等。
|
|
45
|
+
"""
|
|
46
|
+
headers = {
|
|
47
|
+
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
|
|
48
|
+
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
for url in self.start_urls:
|
|
52
|
+
yield Request(
|
|
53
|
+
url=url,
|
|
54
|
+
callback=self.parse,
|
|
55
|
+
headers=headers,
|
|
56
|
+
# meta={'proxy': 'http://proxy.example.com:8080'}, # 自定义代理
|
|
57
|
+
# priority=10, # 请求优先级(数字越大优先级越高)
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
def parse(self, response):
|
|
61
|
+
"""
|
|
62
|
+
解析响应的主方法。
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
response: 响应对象,包含页面内容和元数据
|
|
66
|
+
|
|
67
|
+
Yields:
|
|
68
|
+
Request: 新的请求对象(用于深度爬取)
|
|
69
|
+
Item: 数据项对象(用于数据存储)
|
|
70
|
+
"""
|
|
71
|
+
self.logger.info(f'正在解析页面: {response.url}')
|
|
72
|
+
|
|
73
|
+
# ================== 数据提取示例 ==================
|
|
74
|
+
|
|
75
|
+
# 提取数据并创建 Item
|
|
76
|
+
# item = {{item_class}}()
|
|
77
|
+
# item['title'] = response.xpath('//title/text()').get(default='')
|
|
78
|
+
# item['url'] = response.url
|
|
79
|
+
# item['content'] = response.xpath('//div[@class="content"]//text()').getall()
|
|
80
|
+
# yield item
|
|
81
|
+
|
|
82
|
+
# 直接返回字典(简单数据)
|
|
83
|
+
yield {
|
|
84
|
+
'title': response.xpath('//title/text()').get(default=''),
|
|
85
|
+
'url': response.url,
|
|
86
|
+
'status_code': response.status_code,
|
|
87
|
+
# 'description': response.xpath('//meta[@name="description"]/@content').get(),
|
|
88
|
+
# 'keywords': response.xpath('//meta[@name="keywords"]/@content').get(),
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
# ================== 链接提取示例 ==================
|
|
92
|
+
|
|
93
|
+
# 提取并跟进链接
|
|
94
|
+
# links = response.xpath('//a/@href').getall()
|
|
95
|
+
# for link in links:
|
|
96
|
+
# # 过滤有效链接
|
|
97
|
+
# if link and not link.startswith(('javascript:', 'mailto:', '#')):
|
|
98
|
+
# yield response.follow(
|
|
99
|
+
# link,
|
|
100
|
+
# callback=self.parse_detail, # 或者 self.parse 继续递归
|
|
101
|
+
# meta={'parent_url': response.url} # 传递父页面信息
|
|
102
|
+
# )
|
|
103
|
+
|
|
104
|
+
# 用 CSS 选择器提取链接
|
|
105
|
+
# for link in response.css('a.item-link::attr(href)').getall():
|
|
106
|
+
# yield response.follow(link, callback=self.parse_detail)
|
|
107
|
+
|
|
108
|
+
# ================== 分页处理示例 ==================
|
|
109
|
+
|
|
110
|
+
# 处理分页
|
|
111
|
+
# next_page = response.xpath('//a[@class="next"]/@href').get()
|
|
112
|
+
# if next_page:
|
|
113
|
+
# yield response.follow(next_page, callback=self.parse)
|
|
114
|
+
|
|
115
|
+
# 数字分页
|
|
116
|
+
# current_page = int(response.meta.get('page', 1))
|
|
117
|
+
# max_pages = 100 # 设置最大页数
|
|
118
|
+
# if current_page < max_pages:
|
|
119
|
+
# next_url = f'https://{{domain}}/page/{current_page + 1}'
|
|
120
|
+
# yield Request(
|
|
121
|
+
# url=next_url,
|
|
122
|
+
# callback=self.parse,
|
|
123
|
+
# meta={'page': current_page + 1}
|
|
124
|
+
# )
|
|
125
|
+
|
|
126
|
+
def parse_detail(self, response):
|
|
127
|
+
"""
|
|
128
|
+
解析详情页面的方法(可选)。
|
|
129
|
+
|
|
130
|
+
用于处理从列表页跳转而来的详情页。
|
|
131
|
+
"""
|
|
132
|
+
self.logger.info(f'正在解析详情页: {response.url}')
|
|
133
|
+
|
|
134
|
+
# parent_url = response.meta.get('parent_url', '')
|
|
135
|
+
#
|
|
136
|
+
# yield {
|
|
137
|
+
# 'title': response.xpath('//h1/text()').get(default=''),
|
|
138
|
+
# 'content': '\n'.join(response.xpath('//div[@class="content"]//text()').getall()),
|
|
139
|
+
# 'url': response.url,
|
|
140
|
+
# 'parent_url': parent_url,
|
|
141
|
+
# 'publish_time': response.xpath('//time/@datetime').get(),
|
|
142
|
+
# }
|
|
143
|
+
|
|
142
144
|
pass
|
crawlo/tools/__init__.py
CHANGED
|
@@ -1,183 +1,183 @@
|
|
|
1
|
-
#!/usr/bin/python
|
|
2
|
-
# -*- coding: UTF-8 -*-
|
|
3
|
-
"""
|
|
4
|
-
# @Time : 2025-09-10 22:00
|
|
5
|
-
# @Author : crawl-coder
|
|
6
|
-
# @Desc : Crawlo框架工具包集合
|
|
7
|
-
"""
|
|
8
|
-
|
|
9
|
-
# 日期工具封装
|
|
10
|
-
from .date_tools import (
|
|
11
|
-
TimeUtils,
|
|
12
|
-
parse_time,
|
|
13
|
-
format_time,
|
|
14
|
-
time_diff,
|
|
15
|
-
to_timestamp,
|
|
16
|
-
to_datetime,
|
|
17
|
-
now,
|
|
18
|
-
to_timezone,
|
|
19
|
-
to_utc,
|
|
20
|
-
to_local,
|
|
21
|
-
from_timestamp_with_tz
|
|
22
|
-
)
|
|
23
|
-
|
|
24
|
-
# 数据清洗工具封装
|
|
25
|
-
from ..cleaners import (
|
|
26
|
-
TextCleaner,
|
|
27
|
-
DataFormatter,
|
|
28
|
-
EncodingConverter,
|
|
29
|
-
remove_html_tags,
|
|
30
|
-
decode_html_entities,
|
|
31
|
-
remove_extra_whitespace,
|
|
32
|
-
remove_special_chars,
|
|
33
|
-
normalize_unicode,
|
|
34
|
-
clean_text,
|
|
35
|
-
extract_numbers,
|
|
36
|
-
extract_emails,
|
|
37
|
-
extract_urls,
|
|
38
|
-
format_number,
|
|
39
|
-
format_currency,
|
|
40
|
-
format_percentage,
|
|
41
|
-
format_phone_number,
|
|
42
|
-
format_chinese_id_card,
|
|
43
|
-
capitalize_words,
|
|
44
|
-
detect_encoding,
|
|
45
|
-
to_utf8,
|
|
46
|
-
convert_encoding
|
|
47
|
-
)
|
|
48
|
-
|
|
49
|
-
# 数据验证工具封装
|
|
50
|
-
from .data_validator import (
|
|
51
|
-
DataValidator,
|
|
52
|
-
validate_email,
|
|
53
|
-
validate_phone,
|
|
54
|
-
validate_url,
|
|
55
|
-
validate_chinese_id_card,
|
|
56
|
-
validate_date,
|
|
57
|
-
validate_number_range,
|
|
58
|
-
check_data_integrity
|
|
59
|
-
)
|
|
60
|
-
|
|
61
|
-
# 重试机制封装
|
|
62
|
-
from .retry_mechanism import (
|
|
63
|
-
RetryMechanism,
|
|
64
|
-
retry,
|
|
65
|
-
should_retry,
|
|
66
|
-
exponential_backoff
|
|
67
|
-
)
|
|
68
|
-
|
|
69
|
-
# 反爬虫应对工具
|
|
70
|
-
from .anti_crawler import (
|
|
71
|
-
ProxyPoolManager,
|
|
72
|
-
CaptchaHandler,
|
|
73
|
-
AntiCrawler,
|
|
74
|
-
get_random_user_agent,
|
|
75
|
-
rotate_proxy,
|
|
76
|
-
handle_captcha,
|
|
77
|
-
detect_rate_limiting
|
|
78
|
-
)
|
|
79
|
-
|
|
80
|
-
# 带认证代理工具
|
|
81
|
-
from .authenticated_proxy import (
|
|
82
|
-
AuthenticatedProxy,
|
|
83
|
-
create_proxy_config,
|
|
84
|
-
format_proxy_for_request,
|
|
85
|
-
parse_proxy_url,
|
|
86
|
-
validate_proxy_url,
|
|
87
|
-
get_proxy_info
|
|
88
|
-
)
|
|
89
|
-
|
|
90
|
-
# 分布式协调工具
|
|
91
|
-
from .distributed_coordinator import (
|
|
92
|
-
TaskDistributor,
|
|
93
|
-
DeduplicationTool,
|
|
94
|
-
DistributedCoordinator,
|
|
95
|
-
generate_task_id,
|
|
96
|
-
claim_task,
|
|
97
|
-
report_task_status,
|
|
98
|
-
get_cluster_info,
|
|
99
|
-
generate_pagination_tasks,
|
|
100
|
-
distribute_tasks
|
|
101
|
-
)
|
|
102
|
-
|
|
103
|
-
__all__ = [
|
|
104
|
-
# 日期工具
|
|
105
|
-
"TimeUtils",
|
|
106
|
-
"parse_time",
|
|
107
|
-
"format_time",
|
|
108
|
-
"time_diff",
|
|
109
|
-
"to_timestamp",
|
|
110
|
-
"to_datetime",
|
|
111
|
-
"now",
|
|
112
|
-
"to_timezone",
|
|
113
|
-
"to_utc",
|
|
114
|
-
"to_local",
|
|
115
|
-
"from_timestamp_with_tz",
|
|
116
|
-
|
|
117
|
-
# 数据清洗工具
|
|
118
|
-
"TextCleaner",
|
|
119
|
-
"DataFormatter",
|
|
120
|
-
"EncodingConverter",
|
|
121
|
-
"remove_html_tags",
|
|
122
|
-
"decode_html_entities",
|
|
123
|
-
"remove_extra_whitespace",
|
|
124
|
-
"remove_special_chars",
|
|
125
|
-
"normalize_unicode",
|
|
126
|
-
"clean_text",
|
|
127
|
-
"extract_numbers",
|
|
128
|
-
"extract_emails",
|
|
129
|
-
"extract_urls",
|
|
130
|
-
"format_number",
|
|
131
|
-
"format_currency",
|
|
132
|
-
"format_percentage",
|
|
133
|
-
"format_phone_number",
|
|
134
|
-
"format_chinese_id_card",
|
|
135
|
-
"capitalize_words",
|
|
136
|
-
"detect_encoding",
|
|
137
|
-
"to_utf8",
|
|
138
|
-
"convert_encoding",
|
|
139
|
-
|
|
140
|
-
# 数据验证工具
|
|
141
|
-
"DataValidator",
|
|
142
|
-
"validate_email",
|
|
143
|
-
"validate_phone",
|
|
144
|
-
"validate_url",
|
|
145
|
-
"validate_chinese_id_card",
|
|
146
|
-
"validate_date",
|
|
147
|
-
"validate_number_range",
|
|
148
|
-
"check_data_integrity",
|
|
149
|
-
|
|
150
|
-
# 重试机制封装
|
|
151
|
-
"RetryMechanism",
|
|
152
|
-
"retry",
|
|
153
|
-
"should_retry",
|
|
154
|
-
"exponential_backoff",
|
|
155
|
-
|
|
156
|
-
# 反爬虫应对工具
|
|
157
|
-
"ProxyPoolManager",
|
|
158
|
-
"CaptchaHandler",
|
|
159
|
-
"AntiCrawler",
|
|
160
|
-
"get_random_user_agent",
|
|
161
|
-
"rotate_proxy",
|
|
162
|
-
"handle_captcha",
|
|
163
|
-
"detect_rate_limiting",
|
|
164
|
-
|
|
165
|
-
# 带认证代理工具
|
|
166
|
-
"AuthenticatedProxy",
|
|
167
|
-
"create_proxy_config",
|
|
168
|
-
"format_proxy_for_request",
|
|
169
|
-
"parse_proxy_url",
|
|
170
|
-
"validate_proxy_url",
|
|
171
|
-
"get_proxy_info",
|
|
172
|
-
|
|
173
|
-
# 分布式协调工具
|
|
174
|
-
"TaskDistributor",
|
|
175
|
-
"DeduplicationTool",
|
|
176
|
-
"DistributedCoordinator",
|
|
177
|
-
"generate_task_id",
|
|
178
|
-
"claim_task",
|
|
179
|
-
"report_task_status",
|
|
180
|
-
"get_cluster_info",
|
|
181
|
-
"generate_pagination_tasks",
|
|
182
|
-
"distribute_tasks"
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# -*- coding: UTF-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
# @Time : 2025-09-10 22:00
|
|
5
|
+
# @Author : crawl-coder
|
|
6
|
+
# @Desc : Crawlo框架工具包集合
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
# 日期工具封装
|
|
10
|
+
from .date_tools import (
|
|
11
|
+
TimeUtils,
|
|
12
|
+
parse_time,
|
|
13
|
+
format_time,
|
|
14
|
+
time_diff,
|
|
15
|
+
to_timestamp,
|
|
16
|
+
to_datetime,
|
|
17
|
+
now,
|
|
18
|
+
to_timezone,
|
|
19
|
+
to_utc,
|
|
20
|
+
to_local,
|
|
21
|
+
from_timestamp_with_tz
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
# 数据清洗工具封装
|
|
25
|
+
from ..cleaners import (
|
|
26
|
+
TextCleaner,
|
|
27
|
+
DataFormatter,
|
|
28
|
+
EncodingConverter,
|
|
29
|
+
remove_html_tags,
|
|
30
|
+
decode_html_entities,
|
|
31
|
+
remove_extra_whitespace,
|
|
32
|
+
remove_special_chars,
|
|
33
|
+
normalize_unicode,
|
|
34
|
+
clean_text,
|
|
35
|
+
extract_numbers,
|
|
36
|
+
extract_emails,
|
|
37
|
+
extract_urls,
|
|
38
|
+
format_number,
|
|
39
|
+
format_currency,
|
|
40
|
+
format_percentage,
|
|
41
|
+
format_phone_number,
|
|
42
|
+
format_chinese_id_card,
|
|
43
|
+
capitalize_words,
|
|
44
|
+
detect_encoding,
|
|
45
|
+
to_utf8,
|
|
46
|
+
convert_encoding
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
# 数据验证工具封装
|
|
50
|
+
from .data_validator import (
|
|
51
|
+
DataValidator,
|
|
52
|
+
validate_email,
|
|
53
|
+
validate_phone,
|
|
54
|
+
validate_url,
|
|
55
|
+
validate_chinese_id_card,
|
|
56
|
+
validate_date,
|
|
57
|
+
validate_number_range,
|
|
58
|
+
check_data_integrity
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
# 重试机制封装
|
|
62
|
+
from .retry_mechanism import (
|
|
63
|
+
RetryMechanism,
|
|
64
|
+
retry,
|
|
65
|
+
should_retry,
|
|
66
|
+
exponential_backoff
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
# 反爬虫应对工具
|
|
70
|
+
from .anti_crawler import (
|
|
71
|
+
ProxyPoolManager,
|
|
72
|
+
CaptchaHandler,
|
|
73
|
+
AntiCrawler,
|
|
74
|
+
get_random_user_agent,
|
|
75
|
+
rotate_proxy,
|
|
76
|
+
handle_captcha,
|
|
77
|
+
detect_rate_limiting
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
# 带认证代理工具
|
|
81
|
+
from .authenticated_proxy import (
|
|
82
|
+
AuthenticatedProxy,
|
|
83
|
+
create_proxy_config,
|
|
84
|
+
format_proxy_for_request,
|
|
85
|
+
parse_proxy_url,
|
|
86
|
+
validate_proxy_url,
|
|
87
|
+
get_proxy_info
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
# 分布式协调工具
|
|
91
|
+
from .distributed_coordinator import (
|
|
92
|
+
TaskDistributor,
|
|
93
|
+
DeduplicationTool,
|
|
94
|
+
DistributedCoordinator,
|
|
95
|
+
generate_task_id,
|
|
96
|
+
claim_task,
|
|
97
|
+
report_task_status,
|
|
98
|
+
get_cluster_info,
|
|
99
|
+
generate_pagination_tasks,
|
|
100
|
+
distribute_tasks
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
__all__ = [
|
|
104
|
+
# 日期工具
|
|
105
|
+
"TimeUtils",
|
|
106
|
+
"parse_time",
|
|
107
|
+
"format_time",
|
|
108
|
+
"time_diff",
|
|
109
|
+
"to_timestamp",
|
|
110
|
+
"to_datetime",
|
|
111
|
+
"now",
|
|
112
|
+
"to_timezone",
|
|
113
|
+
"to_utc",
|
|
114
|
+
"to_local",
|
|
115
|
+
"from_timestamp_with_tz",
|
|
116
|
+
|
|
117
|
+
# 数据清洗工具
|
|
118
|
+
"TextCleaner",
|
|
119
|
+
"DataFormatter",
|
|
120
|
+
"EncodingConverter",
|
|
121
|
+
"remove_html_tags",
|
|
122
|
+
"decode_html_entities",
|
|
123
|
+
"remove_extra_whitespace",
|
|
124
|
+
"remove_special_chars",
|
|
125
|
+
"normalize_unicode",
|
|
126
|
+
"clean_text",
|
|
127
|
+
"extract_numbers",
|
|
128
|
+
"extract_emails",
|
|
129
|
+
"extract_urls",
|
|
130
|
+
"format_number",
|
|
131
|
+
"format_currency",
|
|
132
|
+
"format_percentage",
|
|
133
|
+
"format_phone_number",
|
|
134
|
+
"format_chinese_id_card",
|
|
135
|
+
"capitalize_words",
|
|
136
|
+
"detect_encoding",
|
|
137
|
+
"to_utf8",
|
|
138
|
+
"convert_encoding",
|
|
139
|
+
|
|
140
|
+
# 数据验证工具
|
|
141
|
+
"DataValidator",
|
|
142
|
+
"validate_email",
|
|
143
|
+
"validate_phone",
|
|
144
|
+
"validate_url",
|
|
145
|
+
"validate_chinese_id_card",
|
|
146
|
+
"validate_date",
|
|
147
|
+
"validate_number_range",
|
|
148
|
+
"check_data_integrity",
|
|
149
|
+
|
|
150
|
+
# 重试机制封装
|
|
151
|
+
"RetryMechanism",
|
|
152
|
+
"retry",
|
|
153
|
+
"should_retry",
|
|
154
|
+
"exponential_backoff",
|
|
155
|
+
|
|
156
|
+
# 反爬虫应对工具
|
|
157
|
+
"ProxyPoolManager",
|
|
158
|
+
"CaptchaHandler",
|
|
159
|
+
"AntiCrawler",
|
|
160
|
+
"get_random_user_agent",
|
|
161
|
+
"rotate_proxy",
|
|
162
|
+
"handle_captcha",
|
|
163
|
+
"detect_rate_limiting",
|
|
164
|
+
|
|
165
|
+
# 带认证代理工具
|
|
166
|
+
"AuthenticatedProxy",
|
|
167
|
+
"create_proxy_config",
|
|
168
|
+
"format_proxy_for_request",
|
|
169
|
+
"parse_proxy_url",
|
|
170
|
+
"validate_proxy_url",
|
|
171
|
+
"get_proxy_info",
|
|
172
|
+
|
|
173
|
+
# 分布式协调工具
|
|
174
|
+
"TaskDistributor",
|
|
175
|
+
"DeduplicationTool",
|
|
176
|
+
"DistributedCoordinator",
|
|
177
|
+
"generate_task_id",
|
|
178
|
+
"claim_task",
|
|
179
|
+
"report_task_status",
|
|
180
|
+
"get_cluster_info",
|
|
181
|
+
"generate_pagination_tasks",
|
|
182
|
+
"distribute_tasks"
|
|
183
183
|
]
|