crawlo 1.1.4__py3-none-any.whl → 1.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (190) hide show
  1. crawlo/__init__.py +61 -34
  2. crawlo/__version__.py +1 -1
  3. crawlo/cleaners/__init__.py +61 -0
  4. crawlo/cleaners/data_formatter.py +226 -0
  5. crawlo/cleaners/encoding_converter.py +126 -0
  6. crawlo/cleaners/text_cleaner.py +233 -0
  7. crawlo/cli.py +40 -40
  8. crawlo/commands/__init__.py +13 -13
  9. crawlo/commands/check.py +594 -594
  10. crawlo/commands/genspider.py +151 -151
  11. crawlo/commands/list.py +155 -155
  12. crawlo/commands/run.py +292 -285
  13. crawlo/commands/startproject.py +419 -196
  14. crawlo/commands/stats.py +188 -188
  15. crawlo/commands/utils.py +186 -186
  16. crawlo/config.py +312 -279
  17. crawlo/config_validator.py +253 -0
  18. crawlo/core/__init__.py +2 -2
  19. crawlo/core/engine.py +346 -172
  20. crawlo/core/processor.py +40 -40
  21. crawlo/core/scheduler.py +137 -166
  22. crawlo/crawler.py +1027 -1027
  23. crawlo/downloader/__init__.py +266 -242
  24. crawlo/downloader/aiohttp_downloader.py +220 -212
  25. crawlo/downloader/cffi_downloader.py +256 -251
  26. crawlo/downloader/httpx_downloader.py +259 -259
  27. crawlo/downloader/hybrid_downloader.py +214 -0
  28. crawlo/downloader/playwright_downloader.py +403 -0
  29. crawlo/downloader/selenium_downloader.py +473 -0
  30. crawlo/event.py +11 -11
  31. crawlo/exceptions.py +81 -81
  32. crawlo/extension/__init__.py +37 -37
  33. crawlo/extension/health_check.py +141 -141
  34. crawlo/extension/log_interval.py +57 -57
  35. crawlo/extension/log_stats.py +81 -81
  36. crawlo/extension/logging_extension.py +43 -43
  37. crawlo/extension/memory_monitor.py +104 -88
  38. crawlo/extension/performance_profiler.py +133 -117
  39. crawlo/extension/request_recorder.py +107 -107
  40. crawlo/filters/__init__.py +154 -154
  41. crawlo/filters/aioredis_filter.py +281 -242
  42. crawlo/filters/memory_filter.py +269 -269
  43. crawlo/items/__init__.py +23 -23
  44. crawlo/items/base.py +21 -21
  45. crawlo/items/fields.py +53 -53
  46. crawlo/items/items.py +104 -104
  47. crawlo/middleware/__init__.py +21 -21
  48. crawlo/middleware/default_header.py +32 -32
  49. crawlo/middleware/download_delay.py +28 -28
  50. crawlo/middleware/middleware_manager.py +135 -135
  51. crawlo/middleware/proxy.py +272 -248
  52. crawlo/middleware/request_ignore.py +30 -30
  53. crawlo/middleware/response_code.py +18 -18
  54. crawlo/middleware/response_filter.py +26 -26
  55. crawlo/middleware/retry.py +124 -124
  56. crawlo/mode_manager.py +212 -201
  57. crawlo/network/__init__.py +21 -21
  58. crawlo/network/request.py +338 -311
  59. crawlo/network/response.py +360 -271
  60. crawlo/pipelines/__init__.py +21 -21
  61. crawlo/pipelines/bloom_dedup_pipeline.py +156 -156
  62. crawlo/pipelines/console_pipeline.py +39 -39
  63. crawlo/pipelines/csv_pipeline.py +316 -316
  64. crawlo/pipelines/database_dedup_pipeline.py +224 -224
  65. crawlo/pipelines/json_pipeline.py +218 -218
  66. crawlo/pipelines/memory_dedup_pipeline.py +115 -115
  67. crawlo/pipelines/mongo_pipeline.py +131 -131
  68. crawlo/pipelines/mysql_pipeline.py +316 -316
  69. crawlo/pipelines/pipeline_manager.py +61 -56
  70. crawlo/pipelines/redis_dedup_pipeline.py +167 -162
  71. crawlo/project.py +188 -153
  72. crawlo/queue/pqueue.py +37 -37
  73. crawlo/queue/queue_manager.py +334 -307
  74. crawlo/queue/redis_priority_queue.py +299 -209
  75. crawlo/settings/__init__.py +7 -7
  76. crawlo/settings/default_settings.py +219 -278
  77. crawlo/settings/setting_manager.py +123 -100
  78. crawlo/spider/__init__.py +639 -639
  79. crawlo/stats_collector.py +59 -59
  80. crawlo/subscriber.py +130 -130
  81. crawlo/task_manager.py +30 -30
  82. crawlo/templates/crawlo.cfg.tmpl +10 -10
  83. crawlo/templates/project/__init__.py.tmpl +3 -3
  84. crawlo/templates/project/items.py.tmpl +17 -17
  85. crawlo/templates/project/middlewares.py.tmpl +110 -110
  86. crawlo/templates/project/pipelines.py.tmpl +97 -97
  87. crawlo/templates/project/run.py.tmpl +251 -251
  88. crawlo/templates/project/settings.py.tmpl +326 -279
  89. crawlo/templates/project/settings_distributed.py.tmpl +120 -0
  90. crawlo/templates/project/settings_gentle.py.tmpl +95 -0
  91. crawlo/templates/project/settings_high_performance.py.tmpl +152 -0
  92. crawlo/templates/project/settings_simple.py.tmpl +69 -0
  93. crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
  94. crawlo/templates/spider/spider.py.tmpl +141 -141
  95. crawlo/tools/__init__.py +183 -0
  96. crawlo/tools/anti_crawler.py +269 -0
  97. crawlo/tools/authenticated_proxy.py +241 -0
  98. crawlo/tools/data_validator.py +181 -0
  99. crawlo/tools/date_tools.py +36 -0
  100. crawlo/tools/distributed_coordinator.py +387 -0
  101. crawlo/tools/retry_mechanism.py +221 -0
  102. crawlo/tools/scenario_adapter.py +263 -0
  103. crawlo/utils/__init__.py +35 -7
  104. crawlo/utils/batch_processor.py +261 -0
  105. crawlo/utils/controlled_spider_mixin.py +439 -439
  106. crawlo/utils/date_tools.py +290 -233
  107. crawlo/utils/db_helper.py +343 -343
  108. crawlo/utils/enhanced_error_handler.py +360 -0
  109. crawlo/utils/env_config.py +106 -0
  110. crawlo/utils/error_handler.py +126 -0
  111. crawlo/utils/func_tools.py +82 -82
  112. crawlo/utils/large_scale_config.py +286 -286
  113. crawlo/utils/large_scale_helper.py +343 -343
  114. crawlo/utils/log.py +128 -128
  115. crawlo/utils/performance_monitor.py +285 -0
  116. crawlo/utils/queue_helper.py +175 -175
  117. crawlo/utils/redis_connection_pool.py +335 -0
  118. crawlo/utils/redis_key_validator.py +200 -0
  119. crawlo/utils/request.py +267 -267
  120. crawlo/utils/request_serializer.py +219 -219
  121. crawlo/utils/spider_loader.py +62 -62
  122. crawlo/utils/system.py +11 -11
  123. crawlo/utils/tools.py +4 -4
  124. crawlo/utils/url.py +39 -39
  125. {crawlo-1.1.4.dist-info → crawlo-1.1.6.dist-info}/METADATA +401 -403
  126. crawlo-1.1.6.dist-info/RECORD +189 -0
  127. examples/__init__.py +7 -7
  128. tests/DOUBLE_CRAWLO_PREFIX_FIX_REPORT.md +82 -0
  129. tests/__init__.py +7 -7
  130. tests/advanced_tools_example.py +276 -0
  131. tests/authenticated_proxy_example.py +237 -0
  132. tests/cleaners_example.py +161 -0
  133. tests/config_validation_demo.py +103 -0
  134. {examples → tests}/controlled_spider_example.py +205 -205
  135. tests/date_tools_example.py +181 -0
  136. tests/dynamic_loading_example.py +524 -0
  137. tests/dynamic_loading_test.py +105 -0
  138. tests/env_config_example.py +134 -0
  139. tests/error_handling_example.py +172 -0
  140. tests/redis_key_validation_demo.py +131 -0
  141. tests/response_improvements_example.py +145 -0
  142. tests/test_advanced_tools.py +149 -0
  143. tests/test_all_redis_key_configs.py +146 -0
  144. tests/test_authenticated_proxy.py +142 -0
  145. tests/test_cleaners.py +55 -0
  146. tests/test_comprehensive.py +147 -0
  147. tests/test_config_validator.py +194 -0
  148. tests/test_date_tools.py +124 -0
  149. tests/test_double_crawlo_fix.py +208 -0
  150. tests/test_double_crawlo_fix_simple.py +125 -0
  151. tests/test_dynamic_downloaders_proxy.py +125 -0
  152. tests/test_dynamic_proxy.py +93 -0
  153. tests/test_dynamic_proxy_config.py +147 -0
  154. tests/test_dynamic_proxy_real.py +110 -0
  155. tests/test_edge_cases.py +304 -0
  156. tests/test_enhanced_error_handler.py +271 -0
  157. tests/test_env_config.py +122 -0
  158. tests/test_error_handler_compatibility.py +113 -0
  159. tests/test_final_validation.py +153 -153
  160. tests/test_framework_env_usage.py +104 -0
  161. tests/test_integration.py +357 -0
  162. tests/test_item_dedup_redis_key.py +123 -0
  163. tests/test_parsel.py +30 -0
  164. tests/test_performance.py +328 -0
  165. tests/test_proxy_health_check.py +32 -32
  166. tests/test_proxy_middleware_integration.py +136 -136
  167. tests/test_proxy_providers.py +56 -56
  168. tests/test_proxy_stats.py +19 -19
  169. tests/test_proxy_strategies.py +59 -59
  170. tests/test_queue_manager_double_crawlo.py +231 -0
  171. tests/test_queue_manager_redis_key.py +177 -0
  172. tests/test_redis_config.py +28 -28
  173. tests/test_redis_connection_pool.py +295 -0
  174. tests/test_redis_key_naming.py +182 -0
  175. tests/test_redis_key_validator.py +124 -0
  176. tests/test_redis_queue.py +224 -224
  177. tests/test_request_serialization.py +70 -70
  178. tests/test_response_improvements.py +153 -0
  179. tests/test_scheduler.py +241 -241
  180. tests/test_simple_response.py +62 -0
  181. tests/test_telecom_spider_redis_key.py +206 -0
  182. tests/test_template_content.py +88 -0
  183. tests/test_template_redis_key.py +135 -0
  184. tests/test_tools.py +154 -0
  185. tests/tools_example.py +258 -0
  186. crawlo/core/enhanced_engine.py +0 -190
  187. crawlo-1.1.4.dist-info/RECORD +0 -117
  188. {crawlo-1.1.4.dist-info → crawlo-1.1.6.dist-info}/WHEEL +0 -0
  189. {crawlo-1.1.4.dist-info → crawlo-1.1.6.dist-info}/entry_points.txt +0 -0
  190. {crawlo-1.1.4.dist-info → crawlo-1.1.6.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,237 @@
1
+ #!/usr/bin/python
2
+ # -*- coding: UTF-8 -*-
3
+ """
4
+ 带认证代理使用示例
5
+ ================
6
+ 展示如何在Crawlo框架中使用带用户名密码认证的代理和非认证代理
7
+
8
+ 支持的代理类型:
9
+ 1. 带认证的HTTP/HTTPS代理
10
+ 2. 不带认证的HTTP/HTTPS代理
11
+ 3. SOCKS代理(带或不带认证)
12
+ """
13
+
14
+ from crawlo import Spider, Request
15
+ from crawlo.tools import (
16
+ AuthenticatedProxy,
17
+ create_proxy_config,
18
+ format_proxy_for_request,
19
+ get_proxy_info,
20
+ validate_proxy_url
21
+ )
22
+
23
+
24
+ def demo_proxy_parsing():
25
+ """演示代理URL解析"""
26
+ print("=== 代理URL解析演示 ===\n")
27
+
28
+ # 测试不同类型的代理URL
29
+ proxy_urls = [
30
+ # 带认证的HTTP代理
31
+ "http://username:password@proxy.example.com:8080",
32
+ # 不带认证的HTTP代理
33
+ "http://proxy.example.com:8080",
34
+ # 带认证的HTTPS代理
35
+ "https://user123:pass456@secure-proxy.com:443",
36
+ # SOCKS5代理
37
+ "socks5://socks-user:socks-pass@socks-proxy.com:1080",
38
+ # 不带认证的SOCKS4代理
39
+ "socks4://socks4-proxy.com:1080"
40
+ ]
41
+
42
+ for proxy_url in proxy_urls:
43
+ print(f"解析代理URL: {proxy_url}")
44
+
45
+ # 验证代理URL
46
+ is_valid = validate_proxy_url(proxy_url)
47
+ print(f" 有效性: {'有效' if is_valid else '无效'}")
48
+
49
+ if is_valid:
50
+ # 获取代理详细信息
51
+ proxy_info = get_proxy_info(proxy_url)
52
+ print(f" 协议: {proxy_info['scheme']}")
53
+ print(f" 主机: {proxy_info['hostname']}")
54
+ print(f" 端口: {proxy_info['port']}")
55
+ print(f" 有认证: {'是' if proxy_info['has_auth'] else '否'}")
56
+ if proxy_info['has_auth']:
57
+ print(f" 用户名: {proxy_info['username']}")
58
+
59
+ # 创建代理配置
60
+ proxy_config = create_proxy_config(proxy_url)
61
+ print(f" 代理配置: {proxy_config}")
62
+
63
+ # 格式化为不同下载器的配置
64
+ for downloader in ["aiohttp", "httpx", "curl_cffi"]:
65
+ formatted = format_proxy_for_request(proxy_config, downloader)
66
+ print(f" {downloader}格式: {formatted}")
67
+
68
+ print()
69
+
70
+
71
+ def demo_authenticated_proxy_class():
72
+ """演示AuthenticatedProxy类的使用"""
73
+ print("=== AuthenticatedProxy类演示 ===\n")
74
+
75
+ # 带认证的代理
76
+ auth_proxy = AuthenticatedProxy("http://myuser:mypass@proxy.company.com:8080")
77
+ print(f"代理URL: {auth_proxy}")
78
+ print(f"清洁URL: {auth_proxy.clean_url}")
79
+ print(f"用户名: {auth_proxy.username}")
80
+ print(f"密码: {auth_proxy.password}")
81
+ print(f"代理字典: {auth_proxy.proxy_dict}")
82
+ print(f"认证凭据: {auth_proxy.get_auth_credentials()}")
83
+ print(f"认证头: {auth_proxy.get_auth_header()}")
84
+ print(f"是否有效: {auth_proxy.is_valid()}")
85
+ print()
86
+
87
+ # 不带认证的代理
88
+ no_auth_proxy = AuthenticatedProxy("http://public.proxy.com:8080")
89
+ print(f"代理URL: {no_auth_proxy}")
90
+ print(f"清洁URL: {no_auth_proxy.clean_url}")
91
+ print(f"用户名: {no_auth_proxy.username}")
92
+ print(f"密码: {no_auth_proxy.password}")
93
+ print(f"代理字典: {no_auth_proxy.proxy_dict}")
94
+ print(f"认证凭据: {no_auth_proxy.get_auth_credentials()}")
95
+ print(f"认证头: {no_auth_proxy.get_auth_header()}")
96
+ print(f"是否有效: {no_auth_proxy.is_valid()}")
97
+ print()
98
+
99
+
100
+ class ProxySpider(Spider):
101
+ """使用代理的爬虫示例"""
102
+ name = 'proxy_spider'
103
+
104
+ def __init__(self):
105
+ super().__init__()
106
+ # 代理列表
107
+ self.proxies = [
108
+ "http://user1:pass1@proxy1.example.com:8080",
109
+ "http://user2:pass2@proxy2.example.com:8080",
110
+ "http://proxy3.example.com:8080", # 不带认证
111
+ "https://secureuser:securepass@secure.proxy.com:443"
112
+ ]
113
+ self.current_proxy_index = 0
114
+
115
+ def get_next_proxy(self):
116
+ """获取下一个代理"""
117
+ proxy_url = self.proxies[self.current_proxy_index]
118
+ self.current_proxy_index = (self.current_proxy_index + 1) % len(self.proxies)
119
+ return proxy_url
120
+
121
+ def start_requests(self):
122
+ urls = [
123
+ 'https://httpbin.org/ip', # 查看IP地址
124
+ 'https://httpbin.org/headers', # 查看请求头
125
+ 'https://example.com', # 普通网站
126
+ ]
127
+
128
+ for url in urls:
129
+ # 获取代理
130
+ proxy_url = self.get_next_proxy()
131
+ proxy = AuthenticatedProxy(proxy_url)
132
+
133
+ # 创建请求
134
+ request = Request(url=url, callback=self.parse)
135
+
136
+ # 根据不同下载器设置代理
137
+ # 这里以AioHttp为例
138
+ if self.crawler.settings.get("DOWNLOADER_TYPE") == "aiohttp":
139
+ request.proxy = proxy.clean_url
140
+ auth = proxy.get_auth_credentials()
141
+ if auth:
142
+ # AioHttp需要在下载器中处理认证
143
+ request.meta["proxy_auth"] = auth
144
+ else:
145
+ # 其他下载器
146
+ request.proxy = proxy.proxy_dict
147
+
148
+ yield request
149
+
150
+ def parse(self, response):
151
+ """解析响应"""
152
+ print(f"成功访问: {response.url}")
153
+ print(f"状态码: {response.status_code}")
154
+ # 显示前200个字符
155
+ print(f"响应内容: {response.text[:200]}...\n")
156
+ yield {"url": response.url, "status": response.status_code}
157
+
158
+
159
+ def demo_in_spider():
160
+ """演示在爬虫中使用代理"""
161
+ print("=== 在爬虫中使用代理 ===\n")
162
+ print("在爬虫项目中,您可以这样使用带认证的代理:")
163
+ print("""
164
+ from crawlo import Spider, Request
165
+ from crawlo.tools import AuthenticatedProxy
166
+
167
+ class MySpider(Spider):
168
+ name = 'my_spider'
169
+
170
+ def __init__(self):
171
+ super().__init__()
172
+ self.proxy_urls = [
173
+ "http://username:password@proxy1.example.com:8080",
174
+ "http://user:pass@proxy2.example.com:8080",
175
+ "http://proxy3.example.com:8080" # 不带认证
176
+ ]
177
+
178
+ def start_requests(self):
179
+ urls = ['https://httpbin.org/ip', 'https://example.com']
180
+
181
+ for i, url in enumerate(urls):
182
+ # 选择代理
183
+ proxy_url = self.proxy_urls[i % len(self.proxy_urls)]
184
+ proxy = AuthenticatedProxy(proxy_url)
185
+
186
+ # 创建请求
187
+ request = Request(url=url, callback=self.parse)
188
+
189
+ # 设置代理(根据不同下载器)
190
+ downloader_type = self.crawler.settings.get("DOWNLOADER_TYPE", "aiohttp")
191
+
192
+ if downloader_type == "aiohttp":
193
+ # AioHttp下载器
194
+ request.proxy = proxy.clean_url
195
+ auth = proxy.get_auth_credentials()
196
+ if auth:
197
+ request.meta["proxy_auth"] = auth
198
+ elif downloader_type == "httpx":
199
+ # HttpX下载器
200
+ request.proxy = proxy.clean_url
201
+ elif downloader_type == "curl_cffi":
202
+ # CurlCffi下载器
203
+ request.proxy = proxy.proxy_dict
204
+ # 认证信息在URL中或通过headers传递
205
+ auth_header = proxy.get_auth_header()
206
+ if auth_header:
207
+ request.headers["Proxy-Authorization"] = auth_header
208
+
209
+ yield request
210
+
211
+ def parse(self, response):
212
+ # 处理响应
213
+ yield {"url": response.url, "title": response.css('title::text').get()}
214
+ """)
215
+
216
+
217
+ if __name__ == '__main__':
218
+ # 运行演示
219
+ demo_proxy_parsing()
220
+ demo_authenticated_proxy_class()
221
+ demo_in_spider()
222
+
223
+ print("\n=== 配置说明 ===")
224
+ print("在settings.py中配置代理:")
225
+ print("""
226
+ # 启用代理中间件
227
+ MIDDLEWARES = [
228
+ # ... 其他中间件 ...
229
+ 'crawlo.middleware.proxy.ProxyMiddleware',
230
+ ]
231
+
232
+ # 代理配置
233
+ PROXY_ENABLED = True
234
+ PROXY_API_URL = "https://api.proxyprovider.com/get" # 代理API地址
235
+ PROXY_EXTRACTOR = "proxy" # 从API响应中提取代理的字段路径
236
+ PROXY_REFRESH_INTERVAL = 60 # 代理刷新间隔(秒)
237
+ """)
@@ -0,0 +1,161 @@
1
+ #!/usr/bin/python
2
+ # -*- coding: UTF-8 -*-
3
+ """
4
+ Crawlo框架数据清洗工具使用示例
5
+ """
6
+ from crawlo.cleaners import (
7
+ TextCleaner,
8
+ DataFormatter,
9
+ remove_html_tags,
10
+ decode_html_entities,
11
+ clean_text,
12
+ extract_numbers,
13
+ extract_emails,
14
+ extract_urls,
15
+ format_number,
16
+ format_currency,
17
+ format_phone_number,
18
+ format_chinese_id_card
19
+ )
20
+
21
+
22
+ def demo_text_cleaner():
23
+ """演示文本清洗工具的使用"""
24
+ print("=== 文本清洗工具演示 ===\n")
25
+
26
+ # 1. 移除HTML标签
27
+ print("1. 移除HTML标签:")
28
+ html_text = "<p>这是一个<b>测试</b>文本</p>"
29
+ clean_text_result = remove_html_tags(html_text)
30
+ print(f" 原始文本: {html_text}")
31
+ print(f" 清洗后: {clean_text_result}")
32
+
33
+ print()
34
+
35
+ # 2. 解码HTML实体
36
+ print("2. 解码HTML实体:")
37
+ entity_text = "这是一个&nbsp;<b>测试</b>&amp;文本"
38
+ decoded_text = decode_html_entities(entity_text)
39
+ print(f" 原始文本: {entity_text}")
40
+ print(f" 解码后: {decoded_text}")
41
+
42
+ print()
43
+
44
+ # 3. 移除多余空白字符
45
+ print("3. 移除多余空白字符:")
46
+ whitespace_text = "这是 一个\t\t测试\n\n文本"
47
+ clean_whitespace = TextCleaner.remove_extra_whitespace(whitespace_text)
48
+ print(f" 原始文本: {repr(whitespace_text)}")
49
+ print(f" 清洗后: {repr(clean_whitespace)}")
50
+
51
+ print()
52
+
53
+ # 4. 综合清洗
54
+ print("4. 综合清洗:")
55
+ complex_text = "<p>这是&nbsp;一个<b>测试</b>&amp;文本&nbsp;&nbsp;</p>"
56
+ cleaned = clean_text(complex_text)
57
+ print(f" 原始文本: {complex_text}")
58
+ print(f" 清洗后: {cleaned}")
59
+
60
+ print()
61
+
62
+ # 5. 提取信息
63
+ print("5. 提取信息:")
64
+ info_text = "联系邮箱: test@example.com, 电话: 13812345678, 价格: ¥123.45"
65
+ numbers = extract_numbers(info_text)
66
+ emails = extract_emails(info_text)
67
+ urls = extract_urls(info_text)
68
+ print(f" 原始文本: {info_text}")
69
+ print(f" 提取的数字: {numbers}")
70
+ print(f" 提取的邮箱: {emails}")
71
+ print(f" 提取的URL: {urls}")
72
+
73
+
74
+ def demo_data_formatter():
75
+ """演示数据格式化工具的使用"""
76
+ print("\n=== 数据格式化工具演示 ===\n")
77
+
78
+ # 1. 数字格式化
79
+ print("1. 数字格式化:")
80
+ number = 1234567.891
81
+ formatted_num1 = format_number(number, precision=2, thousand_separator=False)
82
+ formatted_num2 = format_number(number, precision=2, thousand_separator=True)
83
+ print(f" 原始数字: {number}")
84
+ print(f" 格式化(无千位分隔符): {formatted_num1}")
85
+ print(f" 格式化(有千位分隔符): {formatted_num2}")
86
+
87
+ print()
88
+
89
+ # 2. 货币格式化
90
+ print("2. 货币格式化:")
91
+ price = 1234.567
92
+ formatted_currency1 = format_currency(price, "¥", 2)
93
+ formatted_currency2 = format_currency(price, "$", 2)
94
+ print(f" 原始价格: {price}")
95
+ print(f" 人民币格式: {formatted_currency1}")
96
+ print(f" 美元格式: {formatted_currency2}")
97
+
98
+ print()
99
+
100
+ # 3. 电话号码格式化
101
+ print("3. 电话号码格式化:")
102
+ phone = "13812345678"
103
+ formatted_phone1 = format_phone_number(phone, "+86", "international")
104
+ formatted_phone2 = format_phone_number(phone, "", "domestic")
105
+ formatted_phone3 = format_phone_number(phone, "", "plain")
106
+ print(f" 原始号码: {phone}")
107
+ print(f" 国际格式: {formatted_phone1}")
108
+ print(f" 国内格式: {formatted_phone2}")
109
+ print(f" 纯数字格式: {formatted_phone3}")
110
+
111
+ print()
112
+
113
+ # 4. 身份证号码格式化
114
+ print("4. 身份证号码格式化:")
115
+ id_card = "110101199001011234"
116
+ formatted_id = format_chinese_id_card(id_card)
117
+ print(f" 原始号码: {id_card}")
118
+ print(f" 格式化后: {formatted_id}")
119
+
120
+
121
+ def demo_in_spider():
122
+ """演示在爬虫中使用数据清洗工具"""
123
+ print("\n=== 在爬虫中使用数据清洗工具 ===\n")
124
+ print("在爬虫项目中,您可以这样使用数据清洗工具:")
125
+ print("""
126
+ from crawlo import Spider, Request, Item, Field
127
+ from crawlo.cleaners import clean_text, format_currency, extract_numbers
128
+
129
+ class ProductItem(Item):
130
+ name = Field()
131
+ price = Field()
132
+ description = Field()
133
+
134
+ class ProductSpider(Spider):
135
+ def parse(self, response):
136
+ # 从网页中提取数据
137
+ name = response.css('.product-name::text').get()
138
+ price_text = response.css('.price::text').get()
139
+ description = response.css('.description::text').get()
140
+
141
+ # 清洗和格式化数据
142
+ clean_name = clean_text(name) if name else None
143
+ price_numbers = extract_numbers(price_text) if price_text else []
144
+ clean_price = format_currency(price_numbers[0]) if price_numbers else None
145
+ clean_description = clean_text(description) if description else None
146
+
147
+ # 创建数据项
148
+ item = ProductItem()
149
+ item['name'] = clean_name
150
+ item['price'] = clean_price
151
+ item['description'] = clean_description
152
+
153
+ yield item
154
+ """)
155
+
156
+
157
+ if __name__ == '__main__':
158
+ # 运行演示
159
+ demo_text_cleaner()
160
+ demo_data_formatter()
161
+ demo_in_spider()
@@ -0,0 +1,103 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ 配置验证演示脚本
5
+ 演示如何使用配置验证器来验证Crawlo配置
6
+ """
7
+ import sys
8
+ import os
9
+
10
+ # 添加项目根目录到路径
11
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
12
+
13
+ from crawlo.config import CrawloConfig
14
+ from crawlo.config_validator import print_validation_report
15
+
16
+
17
+ def demonstrate_config_validation():
18
+ """演示配置验证功能"""
19
+ print("=== 配置验证功能演示 ===\n")
20
+
21
+ # 1. 有效的单机配置
22
+ print("1. 有效的单机配置:")
23
+ valid_standalone_config = {
24
+ 'PROJECT_NAME': 'demo_project',
25
+ 'QUEUE_TYPE': 'memory',
26
+ 'CONCURRENCY': 8,
27
+ 'DOWNLOAD_DELAY': 1.0,
28
+ 'LOG_LEVEL': 'INFO',
29
+ 'MIDDLEWARES': [
30
+ 'crawlo.middleware.request_ignore.RequestIgnoreMiddleware',
31
+ 'crawlo.middleware.download_delay.DownloadDelayMiddleware'
32
+ ],
33
+ 'PIPELINES': [
34
+ 'crawlo.pipelines.console_pipeline.ConsolePipeline'
35
+ ]
36
+ }
37
+
38
+ print_validation_report(valid_standalone_config)
39
+ print()
40
+
41
+ # 2. 有效的分布式配置
42
+ print("2. 有效的分布式配置:")
43
+ valid_distributed_config = {
44
+ 'PROJECT_NAME': 'demo_project',
45
+ 'QUEUE_TYPE': 'redis',
46
+ 'CONCURRENCY': 16,
47
+ 'DOWNLOAD_DELAY': 1.0,
48
+ 'SCHEDULER_QUEUE_NAME': 'crawlo:demo_project:queue:requests',
49
+ 'REDIS_HOST': '127.0.0.1',
50
+ 'REDIS_PORT': 6379,
51
+ 'LOG_LEVEL': 'INFO',
52
+ 'MIDDLEWARES': [
53
+ 'crawlo.middleware.request_ignore.RequestIgnoreMiddleware',
54
+ 'crawlo.middleware.download_delay.DownloadDelayMiddleware'
55
+ ],
56
+ 'PIPELINES': [
57
+ 'crawlo.pipelines.console_pipeline.ConsolePipeline'
58
+ ]
59
+ }
60
+
61
+ print_validation_report(valid_distributed_config)
62
+ print()
63
+
64
+ # 3. 无效配置示例
65
+ print("3. 无效配置示例:")
66
+ invalid_config = {
67
+ 'PROJECT_NAME': '', # 空项目名称
68
+ 'QUEUE_TYPE': 'invalid_type', # 无效队列类型
69
+ 'CONCURRENCY': -1, # 负并发数
70
+ 'REDIS_PORT': 99999, # 无效端口
71
+ 'LOG_LEVEL': 'INVALID_LEVEL' # 无效日志级别
72
+ }
73
+
74
+ print_validation_report(invalid_config)
75
+ print()
76
+
77
+ # 4. 使用配置工厂创建配置并验证
78
+ print("4. 使用配置工厂创建配置并验证:")
79
+ try:
80
+ # 创建有效的配置
81
+ config = CrawloConfig.standalone(
82
+ concurrency=8,
83
+ download_delay=1.0
84
+ )
85
+ print("✅ 单机模式配置创建成功")
86
+ config.print_summary()
87
+ print()
88
+
89
+ # 尝试创建无效配置(会抛出异常)
90
+ try:
91
+ invalid_config_dict = {
92
+ 'CONCURRENCY': -1 # 负并发数
93
+ }
94
+ invalid_config_obj = CrawloConfig.custom(invalid_config_dict)
95
+ except ValueError as e:
96
+ print(f"✅ 捕获到配置验证异常: {e}")
97
+
98
+ except Exception as e:
99
+ print(f"❌ 配置创建失败: {e}")
100
+
101
+
102
+ if __name__ == "__main__":
103
+ demonstrate_config_validation()