crawlo 1.1.4__py3-none-any.whl → 1.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (190) hide show
  1. crawlo/__init__.py +61 -34
  2. crawlo/__version__.py +1 -1
  3. crawlo/cleaners/__init__.py +61 -0
  4. crawlo/cleaners/data_formatter.py +226 -0
  5. crawlo/cleaners/encoding_converter.py +126 -0
  6. crawlo/cleaners/text_cleaner.py +233 -0
  7. crawlo/cli.py +40 -40
  8. crawlo/commands/__init__.py +13 -13
  9. crawlo/commands/check.py +594 -594
  10. crawlo/commands/genspider.py +151 -151
  11. crawlo/commands/list.py +155 -155
  12. crawlo/commands/run.py +292 -285
  13. crawlo/commands/startproject.py +419 -196
  14. crawlo/commands/stats.py +188 -188
  15. crawlo/commands/utils.py +186 -186
  16. crawlo/config.py +312 -279
  17. crawlo/config_validator.py +253 -0
  18. crawlo/core/__init__.py +2 -2
  19. crawlo/core/engine.py +346 -172
  20. crawlo/core/processor.py +40 -40
  21. crawlo/core/scheduler.py +137 -166
  22. crawlo/crawler.py +1027 -1027
  23. crawlo/downloader/__init__.py +266 -242
  24. crawlo/downloader/aiohttp_downloader.py +220 -212
  25. crawlo/downloader/cffi_downloader.py +256 -251
  26. crawlo/downloader/httpx_downloader.py +259 -259
  27. crawlo/downloader/hybrid_downloader.py +214 -0
  28. crawlo/downloader/playwright_downloader.py +403 -0
  29. crawlo/downloader/selenium_downloader.py +473 -0
  30. crawlo/event.py +11 -11
  31. crawlo/exceptions.py +81 -81
  32. crawlo/extension/__init__.py +37 -37
  33. crawlo/extension/health_check.py +141 -141
  34. crawlo/extension/log_interval.py +57 -57
  35. crawlo/extension/log_stats.py +81 -81
  36. crawlo/extension/logging_extension.py +43 -43
  37. crawlo/extension/memory_monitor.py +104 -88
  38. crawlo/extension/performance_profiler.py +133 -117
  39. crawlo/extension/request_recorder.py +107 -107
  40. crawlo/filters/__init__.py +154 -154
  41. crawlo/filters/aioredis_filter.py +281 -242
  42. crawlo/filters/memory_filter.py +269 -269
  43. crawlo/items/__init__.py +23 -23
  44. crawlo/items/base.py +21 -21
  45. crawlo/items/fields.py +53 -53
  46. crawlo/items/items.py +104 -104
  47. crawlo/middleware/__init__.py +21 -21
  48. crawlo/middleware/default_header.py +32 -32
  49. crawlo/middleware/download_delay.py +28 -28
  50. crawlo/middleware/middleware_manager.py +135 -135
  51. crawlo/middleware/proxy.py +272 -248
  52. crawlo/middleware/request_ignore.py +30 -30
  53. crawlo/middleware/response_code.py +18 -18
  54. crawlo/middleware/response_filter.py +26 -26
  55. crawlo/middleware/retry.py +124 -124
  56. crawlo/mode_manager.py +212 -201
  57. crawlo/network/__init__.py +21 -21
  58. crawlo/network/request.py +338 -311
  59. crawlo/network/response.py +360 -271
  60. crawlo/pipelines/__init__.py +21 -21
  61. crawlo/pipelines/bloom_dedup_pipeline.py +156 -156
  62. crawlo/pipelines/console_pipeline.py +39 -39
  63. crawlo/pipelines/csv_pipeline.py +316 -316
  64. crawlo/pipelines/database_dedup_pipeline.py +224 -224
  65. crawlo/pipelines/json_pipeline.py +218 -218
  66. crawlo/pipelines/memory_dedup_pipeline.py +115 -115
  67. crawlo/pipelines/mongo_pipeline.py +131 -131
  68. crawlo/pipelines/mysql_pipeline.py +316 -316
  69. crawlo/pipelines/pipeline_manager.py +61 -56
  70. crawlo/pipelines/redis_dedup_pipeline.py +167 -162
  71. crawlo/project.py +188 -153
  72. crawlo/queue/pqueue.py +37 -37
  73. crawlo/queue/queue_manager.py +334 -307
  74. crawlo/queue/redis_priority_queue.py +299 -209
  75. crawlo/settings/__init__.py +7 -7
  76. crawlo/settings/default_settings.py +219 -278
  77. crawlo/settings/setting_manager.py +123 -100
  78. crawlo/spider/__init__.py +639 -639
  79. crawlo/stats_collector.py +59 -59
  80. crawlo/subscriber.py +130 -130
  81. crawlo/task_manager.py +30 -30
  82. crawlo/templates/crawlo.cfg.tmpl +10 -10
  83. crawlo/templates/project/__init__.py.tmpl +3 -3
  84. crawlo/templates/project/items.py.tmpl +17 -17
  85. crawlo/templates/project/middlewares.py.tmpl +110 -110
  86. crawlo/templates/project/pipelines.py.tmpl +97 -97
  87. crawlo/templates/project/run.py.tmpl +251 -251
  88. crawlo/templates/project/settings.py.tmpl +326 -279
  89. crawlo/templates/project/settings_distributed.py.tmpl +120 -0
  90. crawlo/templates/project/settings_gentle.py.tmpl +95 -0
  91. crawlo/templates/project/settings_high_performance.py.tmpl +152 -0
  92. crawlo/templates/project/settings_simple.py.tmpl +69 -0
  93. crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
  94. crawlo/templates/spider/spider.py.tmpl +141 -141
  95. crawlo/tools/__init__.py +183 -0
  96. crawlo/tools/anti_crawler.py +269 -0
  97. crawlo/tools/authenticated_proxy.py +241 -0
  98. crawlo/tools/data_validator.py +181 -0
  99. crawlo/tools/date_tools.py +36 -0
  100. crawlo/tools/distributed_coordinator.py +387 -0
  101. crawlo/tools/retry_mechanism.py +221 -0
  102. crawlo/tools/scenario_adapter.py +263 -0
  103. crawlo/utils/__init__.py +35 -7
  104. crawlo/utils/batch_processor.py +261 -0
  105. crawlo/utils/controlled_spider_mixin.py +439 -439
  106. crawlo/utils/date_tools.py +290 -233
  107. crawlo/utils/db_helper.py +343 -343
  108. crawlo/utils/enhanced_error_handler.py +360 -0
  109. crawlo/utils/env_config.py +106 -0
  110. crawlo/utils/error_handler.py +126 -0
  111. crawlo/utils/func_tools.py +82 -82
  112. crawlo/utils/large_scale_config.py +286 -286
  113. crawlo/utils/large_scale_helper.py +343 -343
  114. crawlo/utils/log.py +128 -128
  115. crawlo/utils/performance_monitor.py +285 -0
  116. crawlo/utils/queue_helper.py +175 -175
  117. crawlo/utils/redis_connection_pool.py +335 -0
  118. crawlo/utils/redis_key_validator.py +200 -0
  119. crawlo/utils/request.py +267 -267
  120. crawlo/utils/request_serializer.py +219 -219
  121. crawlo/utils/spider_loader.py +62 -62
  122. crawlo/utils/system.py +11 -11
  123. crawlo/utils/tools.py +4 -4
  124. crawlo/utils/url.py +39 -39
  125. {crawlo-1.1.4.dist-info → crawlo-1.1.6.dist-info}/METADATA +401 -403
  126. crawlo-1.1.6.dist-info/RECORD +189 -0
  127. examples/__init__.py +7 -7
  128. tests/DOUBLE_CRAWLO_PREFIX_FIX_REPORT.md +82 -0
  129. tests/__init__.py +7 -7
  130. tests/advanced_tools_example.py +276 -0
  131. tests/authenticated_proxy_example.py +237 -0
  132. tests/cleaners_example.py +161 -0
  133. tests/config_validation_demo.py +103 -0
  134. {examples → tests}/controlled_spider_example.py +205 -205
  135. tests/date_tools_example.py +181 -0
  136. tests/dynamic_loading_example.py +524 -0
  137. tests/dynamic_loading_test.py +105 -0
  138. tests/env_config_example.py +134 -0
  139. tests/error_handling_example.py +172 -0
  140. tests/redis_key_validation_demo.py +131 -0
  141. tests/response_improvements_example.py +145 -0
  142. tests/test_advanced_tools.py +149 -0
  143. tests/test_all_redis_key_configs.py +146 -0
  144. tests/test_authenticated_proxy.py +142 -0
  145. tests/test_cleaners.py +55 -0
  146. tests/test_comprehensive.py +147 -0
  147. tests/test_config_validator.py +194 -0
  148. tests/test_date_tools.py +124 -0
  149. tests/test_double_crawlo_fix.py +208 -0
  150. tests/test_double_crawlo_fix_simple.py +125 -0
  151. tests/test_dynamic_downloaders_proxy.py +125 -0
  152. tests/test_dynamic_proxy.py +93 -0
  153. tests/test_dynamic_proxy_config.py +147 -0
  154. tests/test_dynamic_proxy_real.py +110 -0
  155. tests/test_edge_cases.py +304 -0
  156. tests/test_enhanced_error_handler.py +271 -0
  157. tests/test_env_config.py +122 -0
  158. tests/test_error_handler_compatibility.py +113 -0
  159. tests/test_final_validation.py +153 -153
  160. tests/test_framework_env_usage.py +104 -0
  161. tests/test_integration.py +357 -0
  162. tests/test_item_dedup_redis_key.py +123 -0
  163. tests/test_parsel.py +30 -0
  164. tests/test_performance.py +328 -0
  165. tests/test_proxy_health_check.py +32 -32
  166. tests/test_proxy_middleware_integration.py +136 -136
  167. tests/test_proxy_providers.py +56 -56
  168. tests/test_proxy_stats.py +19 -19
  169. tests/test_proxy_strategies.py +59 -59
  170. tests/test_queue_manager_double_crawlo.py +231 -0
  171. tests/test_queue_manager_redis_key.py +177 -0
  172. tests/test_redis_config.py +28 -28
  173. tests/test_redis_connection_pool.py +295 -0
  174. tests/test_redis_key_naming.py +182 -0
  175. tests/test_redis_key_validator.py +124 -0
  176. tests/test_redis_queue.py +224 -224
  177. tests/test_request_serialization.py +70 -70
  178. tests/test_response_improvements.py +153 -0
  179. tests/test_scheduler.py +241 -241
  180. tests/test_simple_response.py +62 -0
  181. tests/test_telecom_spider_redis_key.py +206 -0
  182. tests/test_template_content.py +88 -0
  183. tests/test_template_redis_key.py +135 -0
  184. tests/test_tools.py +154 -0
  185. tests/tools_example.py +258 -0
  186. crawlo/core/enhanced_engine.py +0 -190
  187. crawlo-1.1.4.dist-info/RECORD +0 -117
  188. {crawlo-1.1.4.dist-info → crawlo-1.1.6.dist-info}/WHEEL +0 -0
  189. {crawlo-1.1.4.dist-info → crawlo-1.1.6.dist-info}/entry_points.txt +0 -0
  190. {crawlo-1.1.4.dist-info → crawlo-1.1.6.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,233 @@
1
+ #!/usr/bin/python
2
+ # -*- coding: UTF-8 -*-
3
+ """
4
+ # @Time : 2025-09-10 22:00
5
+ # @Author : crawl-coder
6
+ # @Desc : 文本清洗工具
7
+ """
8
+ import re
9
+ import html
10
+ from typing import Optional, Union, List
11
+ import unicodedata
12
+
13
+
14
+ class TextCleaner:
15
+ """
16
+ 文本清洗工具类,提供各种文本清洗功能。
17
+ 特别适用于爬虫中处理网页内容的清洗需求。
18
+ """
19
+
20
+ @staticmethod
21
+ def remove_html_tags(text: str) -> str:
22
+ """
23
+ 移除HTML标签
24
+
25
+ :param text: 包含HTML标签的文本
26
+ :return: 移除HTML标签后的文本
27
+ """
28
+ if not isinstance(text, str):
29
+ return str(text)
30
+
31
+ # 使用正则表达式移除HTML标签
32
+ clean_text = re.sub(r'<[^>]+>', '', text)
33
+ return clean_text.strip()
34
+
35
+ @staticmethod
36
+ def decode_html_entities(text: str) -> str:
37
+ """
38
+ 解码HTML实体字符
39
+
40
+ :param text: 包含HTML实体字符的文本
41
+ :return: 解码后的文本
42
+ """
43
+ if not isinstance(text, str):
44
+ return str(text)
45
+
46
+ return html.unescape(text)
47
+
48
+ @staticmethod
49
+ def remove_extra_whitespace(text: str) -> str:
50
+ """
51
+ 移除多余的空白字符(包括空格、制表符、换行符等)
52
+
53
+ :param text: 文本
54
+ :return: 清理后的文本
55
+ """
56
+ if not isinstance(text, str):
57
+ return str(text)
58
+
59
+ # 将多个连续的空白字符替换为单个空格
60
+ clean_text = re.sub(r'\s+', ' ', text)
61
+ return clean_text.strip()
62
+
63
+ @staticmethod
64
+ def remove_special_chars(text: str, chars: str = '') -> str:
65
+ """
66
+ 移除特殊字符
67
+
68
+ :param text: 文本
69
+ :param chars: 要移除的特殊字符
70
+ :return: 清理后的文本
71
+ """
72
+ if not isinstance(text, str):
73
+ return str(text)
74
+
75
+ # 移除常见的特殊字符
76
+ special_chars = r'[^\w\s\u4e00-\u9fff' + chars + r']'
77
+ clean_text = re.sub(special_chars, '', text)
78
+ return clean_text
79
+
80
+ @staticmethod
81
+ def normalize_unicode(text: str) -> str:
82
+ """
83
+ 标准化Unicode字符
84
+
85
+ :param text: 文本
86
+ :return: 标准化后的文本
87
+ """
88
+ if not isinstance(text, str):
89
+ return str(text)
90
+
91
+ return unicodedata.normalize('NFKC', text)
92
+
93
+ @staticmethod
94
+ def clean_text(text: str,
95
+ remove_html: bool = True,
96
+ decode_entities: bool = True,
97
+ remove_whitespace: bool = True,
98
+ remove_special: bool = False,
99
+ normalize: bool = True) -> str:
100
+ """
101
+ 综合文本清洗方法
102
+
103
+ :param text: 原始文本
104
+ :param remove_html: 是否移除HTML标签
105
+ :param decode_entities: 是否解码HTML实体
106
+ :param remove_whitespace: 是否移除多余空白字符
107
+ :param remove_special: 是否移除特殊字符
108
+ :param normalize: 是否标准化Unicode字符
109
+ :return: 清洗后的文本
110
+ """
111
+ if not isinstance(text, str):
112
+ text = str(text)
113
+
114
+ if not text:
115
+ return text
116
+
117
+ # 按顺序进行清洗
118
+ if remove_html:
119
+ text = TextCleaner.remove_html_tags(text)
120
+
121
+ if decode_entities:
122
+ text = TextCleaner.decode_html_entities(text)
123
+
124
+ if normalize:
125
+ text = TextCleaner.normalize_unicode(text)
126
+
127
+ if remove_whitespace:
128
+ text = TextCleaner.remove_extra_whitespace(text)
129
+
130
+ if remove_special:
131
+ text = TextCleaner.remove_special_chars(text)
132
+
133
+ return text
134
+
135
+ @staticmethod
136
+ def extract_numbers(text: str) -> List[str]:
137
+ """
138
+ 从文本中提取数字
139
+
140
+ :param text: 文本
141
+ :return: 数字列表
142
+ """
143
+ if not isinstance(text, str):
144
+ return []
145
+
146
+ # 匹配整数和小数
147
+ numbers = re.findall(r'-?\d+\.?\d*', text)
148
+ return numbers
149
+
150
+ @staticmethod
151
+ def extract_emails(text: str) -> List[str]:
152
+ """
153
+ 从文本中提取邮箱地址
154
+
155
+ :param text: 文本
156
+ :return: 邮箱地址列表
157
+ """
158
+ if not isinstance(text, str):
159
+ return []
160
+
161
+ # 匹配邮箱地址
162
+ emails = re.findall(r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b', text)
163
+ return emails
164
+
165
+ @staticmethod
166
+ def extract_urls(text: str) -> List[str]:
167
+ """
168
+ 从文本中提取URL
169
+
170
+ :param text: 文本
171
+ :return: URL列表
172
+ """
173
+ if not isinstance(text, str):
174
+ return []
175
+
176
+ # 匹配URL
177
+ urls = re.findall(
178
+ r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',
179
+ text
180
+ )
181
+ return urls
182
+
183
+
184
+ # =======================对外接口=======================
185
+
186
+ def remove_html_tags(text: str) -> str:
187
+ """移除HTML标签"""
188
+ return TextCleaner.remove_html_tags(text)
189
+
190
+
191
+ def decode_html_entities(text: str) -> str:
192
+ """解码HTML实体字符"""
193
+ return TextCleaner.decode_html_entities(text)
194
+
195
+
196
+ def remove_extra_whitespace(text: str) -> str:
197
+ """移除多余的空白字符"""
198
+ return TextCleaner.remove_extra_whitespace(text)
199
+
200
+
201
+ def remove_special_chars(text: str, chars: str = '') -> str:
202
+ """移除特殊字符"""
203
+ return TextCleaner.remove_special_chars(text, chars)
204
+
205
+
206
+ def normalize_unicode(text: str) -> str:
207
+ """标准化Unicode字符"""
208
+ return TextCleaner.normalize_unicode(text)
209
+
210
+
211
+ def clean_text(text: str,
212
+ remove_html: bool = True,
213
+ decode_entities: bool = True,
214
+ remove_whitespace: bool = True,
215
+ remove_special: bool = False,
216
+ normalize: bool = True) -> str:
217
+ """综合文本清洗"""
218
+ return TextCleaner.clean_text(text, remove_html, decode_entities, remove_whitespace, remove_special, normalize)
219
+
220
+
221
+ def extract_numbers(text: str) -> List[str]:
222
+ """提取数字"""
223
+ return TextCleaner.extract_numbers(text)
224
+
225
+
226
+ def extract_emails(text: str) -> List[str]:
227
+ """提取邮箱地址"""
228
+ return TextCleaner.extract_emails(text)
229
+
230
+
231
+ def extract_urls(text: str) -> List[str]:
232
+ """提取URL"""
233
+ return TextCleaner.extract_urls(text)
crawlo/cli.py CHANGED
@@ -1,41 +1,41 @@
1
- # crawlo/cli.py
2
- # !/usr/bin/python
3
- # -*- coding: UTF-8 -*-
4
- import sys
5
- import argparse
6
- from crawlo.commands import get_commands
7
-
8
-
9
- def main():
10
- # 获取所有可用命令
11
- commands = get_commands()
12
-
13
- parser = argparse.ArgumentParser(
14
- description="Crawlo: A lightweight web crawler framework.",
15
- usage="crawlo <command> [options]"
16
- )
17
- parser.add_argument('command', help='Available commands: ' + ', '.join(commands.keys()))
18
- # 注意:这里不添加具体参数,由子命令解析
19
-
20
- # 只解析命令
21
- args, unknown = parser.parse_known_args()
22
-
23
- if args.command not in commands:
24
- print(f"Unknown command: {args.command}")
25
- print(f"Available commands: {', '.join(commands.keys())}")
26
- sys.exit(1)
27
-
28
- # 动态导入并执行命令
29
- try:
30
- module = __import__(commands[args.command], fromlist=['main'])
31
- sys.exit(module.main(unknown))
32
- except ImportError as e:
33
- print(f"Failed to load command '{args.command}': {e}")
34
- sys.exit(1)
35
- except Exception as e:
36
- print(f"Command '{args.command}' failed: {e}")
37
- sys.exit(1)
38
-
39
-
40
- if __name__ == '__main__':
1
+ # crawlo/cli.py
2
+ # !/usr/bin/python
3
+ # -*- coding: UTF-8 -*-
4
+ import sys
5
+ import argparse
6
+ from crawlo.commands import get_commands
7
+
8
+
9
+ def main():
10
+ # 获取所有可用命令
11
+ commands = get_commands()
12
+
13
+ parser = argparse.ArgumentParser(
14
+ description="Crawlo: A lightweight web crawler framework.",
15
+ usage="crawlo <command> [options]"
16
+ )
17
+ parser.add_argument('command', help='Available commands: ' + ', '.join(commands.keys()))
18
+ # 注意:这里不添加具体参数,由子命令解析
19
+
20
+ # 只解析命令
21
+ args, unknown = parser.parse_known_args()
22
+
23
+ if args.command not in commands:
24
+ print(f"Unknown command: {args.command}")
25
+ print(f"Available commands: {', '.join(commands.keys())}")
26
+ sys.exit(1)
27
+
28
+ # 动态导入并执行命令
29
+ try:
30
+ module = __import__(commands[args.command], fromlist=['main'])
31
+ sys.exit(module.main(unknown))
32
+ except ImportError as e:
33
+ print(f"Failed to load command '{args.command}': {e}")
34
+ sys.exit(1)
35
+ except Exception as e:
36
+ print(f"Command '{args.command}' failed: {e}")
37
+ sys.exit(1)
38
+
39
+
40
+ if __name__ == '__main__':
41
41
  main()
@@ -1,14 +1,14 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
-
4
- _commands = {
5
- 'startproject': 'crawlo.commands.startproject',
6
- 'genspider': 'crawlo.commands.genspider',
7
- 'run': 'crawlo.commands.run',
8
- 'check': 'crawlo.commands.check',
9
- 'list': 'crawlo.commands.list',
10
- 'stats': 'crawlo.commands.stats'
11
- }
12
-
13
- def get_commands():
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+
4
+ _commands = {
5
+ 'startproject': 'crawlo.commands.startproject',
6
+ 'genspider': 'crawlo.commands.genspider',
7
+ 'run': 'crawlo.commands.run',
8
+ 'check': 'crawlo.commands.check',
9
+ 'list': 'crawlo.commands.list',
10
+ 'stats': 'crawlo.commands.stats'
11
+ }
12
+
13
+ def get_commands():
14
14
  return _commands