crawlo 1.2.0__py3-none-any.whl → 1.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (220) hide show
  1. crawlo/__init__.py +61 -61
  2. crawlo/__version__.py +1 -1
  3. crawlo/cleaners/__init__.py +60 -60
  4. crawlo/cleaners/data_formatter.py +225 -225
  5. crawlo/cleaners/encoding_converter.py +125 -125
  6. crawlo/cleaners/text_cleaner.py +232 -232
  7. crawlo/cli.py +65 -65
  8. crawlo/commands/__init__.py +14 -14
  9. crawlo/commands/check.py +594 -594
  10. crawlo/commands/genspider.py +151 -151
  11. crawlo/commands/help.py +142 -132
  12. crawlo/commands/list.py +155 -155
  13. crawlo/commands/run.py +292 -292
  14. crawlo/commands/startproject.py +418 -418
  15. crawlo/commands/stats.py +188 -188
  16. crawlo/commands/utils.py +186 -186
  17. crawlo/config.py +312 -312
  18. crawlo/config_validator.py +252 -252
  19. crawlo/core/__init__.py +2 -2
  20. crawlo/core/engine.py +354 -354
  21. crawlo/core/processor.py +40 -40
  22. crawlo/core/scheduler.py +143 -143
  23. crawlo/crawler.py +1027 -1027
  24. crawlo/downloader/__init__.py +266 -266
  25. crawlo/downloader/aiohttp_downloader.py +220 -220
  26. crawlo/downloader/cffi_downloader.py +256 -256
  27. crawlo/downloader/httpx_downloader.py +259 -259
  28. crawlo/downloader/hybrid_downloader.py +213 -213
  29. crawlo/downloader/playwright_downloader.py +402 -402
  30. crawlo/downloader/selenium_downloader.py +472 -472
  31. crawlo/event.py +11 -11
  32. crawlo/exceptions.py +81 -81
  33. crawlo/extension/__init__.py +37 -37
  34. crawlo/extension/health_check.py +141 -141
  35. crawlo/extension/log_interval.py +57 -57
  36. crawlo/extension/log_stats.py +81 -81
  37. crawlo/extension/logging_extension.py +43 -43
  38. crawlo/extension/memory_monitor.py +104 -104
  39. crawlo/extension/performance_profiler.py +133 -133
  40. crawlo/extension/request_recorder.py +107 -107
  41. crawlo/filters/__init__.py +154 -154
  42. crawlo/filters/aioredis_filter.py +280 -280
  43. crawlo/filters/memory_filter.py +269 -269
  44. crawlo/items/__init__.py +23 -23
  45. crawlo/items/base.py +21 -21
  46. crawlo/items/fields.py +53 -53
  47. crawlo/items/items.py +104 -104
  48. crawlo/middleware/__init__.py +21 -21
  49. crawlo/middleware/default_header.py +132 -32
  50. crawlo/middleware/download_delay.py +105 -28
  51. crawlo/middleware/middleware_manager.py +135 -135
  52. crawlo/middleware/offsite.py +116 -0
  53. crawlo/middleware/proxy.py +366 -272
  54. crawlo/middleware/request_ignore.py +88 -30
  55. crawlo/middleware/response_code.py +164 -18
  56. crawlo/middleware/response_filter.py +138 -26
  57. crawlo/middleware/retry.py +124 -124
  58. crawlo/mode_manager.py +211 -211
  59. crawlo/network/__init__.py +21 -21
  60. crawlo/network/request.py +338 -338
  61. crawlo/network/response.py +359 -359
  62. crawlo/pipelines/__init__.py +21 -21
  63. crawlo/pipelines/bloom_dedup_pipeline.py +156 -156
  64. crawlo/pipelines/console_pipeline.py +39 -39
  65. crawlo/pipelines/csv_pipeline.py +316 -316
  66. crawlo/pipelines/database_dedup_pipeline.py +224 -224
  67. crawlo/pipelines/json_pipeline.py +218 -218
  68. crawlo/pipelines/memory_dedup_pipeline.py +115 -115
  69. crawlo/pipelines/mongo_pipeline.py +131 -131
  70. crawlo/pipelines/mysql_pipeline.py +316 -316
  71. crawlo/pipelines/pipeline_manager.py +61 -61
  72. crawlo/pipelines/redis_dedup_pipeline.py +167 -167
  73. crawlo/project.py +187 -187
  74. crawlo/queue/pqueue.py +37 -37
  75. crawlo/queue/queue_manager.py +337 -337
  76. crawlo/queue/redis_priority_queue.py +298 -298
  77. crawlo/settings/__init__.py +7 -7
  78. crawlo/settings/default_settings.py +226 -219
  79. crawlo/settings/setting_manager.py +122 -122
  80. crawlo/spider/__init__.py +639 -639
  81. crawlo/stats_collector.py +59 -59
  82. crawlo/subscriber.py +130 -130
  83. crawlo/task_manager.py +30 -30
  84. crawlo/templates/crawlo.cfg.tmpl +10 -10
  85. crawlo/templates/project/__init__.py.tmpl +3 -3
  86. crawlo/templates/project/items.py.tmpl +17 -17
  87. crawlo/templates/project/middlewares.py.tmpl +118 -109
  88. crawlo/templates/project/pipelines.py.tmpl +96 -96
  89. crawlo/templates/project/run.py.tmpl +45 -45
  90. crawlo/templates/project/settings.py.tmpl +327 -326
  91. crawlo/templates/project/settings_distributed.py.tmpl +119 -119
  92. crawlo/templates/project/settings_gentle.py.tmpl +94 -94
  93. crawlo/templates/project/settings_high_performance.py.tmpl +151 -151
  94. crawlo/templates/project/settings_simple.py.tmpl +68 -68
  95. crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
  96. crawlo/templates/spider/spider.py.tmpl +143 -141
  97. crawlo/tools/__init__.py +182 -182
  98. crawlo/tools/anti_crawler.py +268 -268
  99. crawlo/tools/authenticated_proxy.py +240 -240
  100. crawlo/tools/data_validator.py +180 -180
  101. crawlo/tools/date_tools.py +35 -35
  102. crawlo/tools/distributed_coordinator.py +386 -386
  103. crawlo/tools/retry_mechanism.py +220 -220
  104. crawlo/tools/scenario_adapter.py +262 -262
  105. crawlo/utils/__init__.py +35 -35
  106. crawlo/utils/batch_processor.py +260 -260
  107. crawlo/utils/controlled_spider_mixin.py +439 -439
  108. crawlo/utils/date_tools.py +290 -290
  109. crawlo/utils/db_helper.py +343 -343
  110. crawlo/utils/enhanced_error_handler.py +359 -359
  111. crawlo/utils/env_config.py +105 -105
  112. crawlo/utils/error_handler.py +125 -125
  113. crawlo/utils/func_tools.py +82 -82
  114. crawlo/utils/large_scale_config.py +286 -286
  115. crawlo/utils/large_scale_helper.py +343 -343
  116. crawlo/utils/log.py +128 -128
  117. crawlo/utils/performance_monitor.py +284 -284
  118. crawlo/utils/queue_helper.py +175 -175
  119. crawlo/utils/redis_connection_pool.py +334 -334
  120. crawlo/utils/redis_key_validator.py +199 -199
  121. crawlo/utils/request.py +267 -267
  122. crawlo/utils/request_serializer.py +219 -219
  123. crawlo/utils/spider_loader.py +62 -62
  124. crawlo/utils/system.py +11 -11
  125. crawlo/utils/tools.py +4 -4
  126. crawlo/utils/url.py +39 -39
  127. {crawlo-1.2.0.dist-info → crawlo-1.2.1.dist-info}/METADATA +692 -697
  128. crawlo-1.2.1.dist-info/RECORD +220 -0
  129. examples/__init__.py +7 -7
  130. examples/aiohttp_settings.py +42 -0
  131. examples/curl_cffi_settings.py +41 -0
  132. examples/default_header_middleware_example.py +107 -0
  133. examples/default_header_spider_example.py +129 -0
  134. examples/download_delay_middleware_example.py +160 -0
  135. examples/httpx_settings.py +42 -0
  136. examples/multi_downloader_proxy_example.py +81 -0
  137. examples/offsite_middleware_example.py +55 -0
  138. examples/offsite_spider_example.py +107 -0
  139. examples/proxy_spider_example.py +166 -0
  140. examples/request_ignore_middleware_example.py +51 -0
  141. examples/request_ignore_spider_example.py +99 -0
  142. examples/response_code_middleware_example.py +52 -0
  143. examples/response_filter_middleware_example.py +67 -0
  144. examples/tong_hua_shun_settings.py +62 -0
  145. examples/tong_hua_shun_spider.py +170 -0
  146. tests/DOUBLE_CRAWLO_PREFIX_FIX_REPORT.md +81 -81
  147. tests/__init__.py +7 -7
  148. tests/advanced_tools_example.py +275 -275
  149. tests/authenticated_proxy_example.py +236 -236
  150. tests/cleaners_example.py +160 -160
  151. tests/config_validation_demo.py +102 -102
  152. tests/controlled_spider_example.py +205 -205
  153. tests/date_tools_example.py +180 -180
  154. tests/dynamic_loading_example.py +523 -523
  155. tests/dynamic_loading_test.py +104 -104
  156. tests/env_config_example.py +133 -133
  157. tests/error_handling_example.py +171 -171
  158. tests/redis_key_validation_demo.py +130 -130
  159. tests/response_improvements_example.py +144 -144
  160. tests/test_advanced_tools.py +148 -148
  161. tests/test_all_redis_key_configs.py +145 -145
  162. tests/test_authenticated_proxy.py +141 -141
  163. tests/test_cleaners.py +54 -54
  164. tests/test_comprehensive.py +146 -146
  165. tests/test_config_validator.py +193 -193
  166. tests/test_crawlo_proxy_integration.py +173 -0
  167. tests/test_date_tools.py +123 -123
  168. tests/test_default_header_middleware.py +159 -0
  169. tests/test_double_crawlo_fix.py +207 -207
  170. tests/test_double_crawlo_fix_simple.py +124 -124
  171. tests/test_download_delay_middleware.py +222 -0
  172. tests/test_downloader_proxy_compatibility.py +269 -0
  173. tests/test_dynamic_downloaders_proxy.py +124 -124
  174. tests/test_dynamic_proxy.py +92 -92
  175. tests/test_dynamic_proxy_config.py +146 -146
  176. tests/test_dynamic_proxy_real.py +109 -109
  177. tests/test_edge_cases.py +303 -303
  178. tests/test_enhanced_error_handler.py +270 -270
  179. tests/test_env_config.py +121 -121
  180. tests/test_error_handler_compatibility.py +112 -112
  181. tests/test_final_validation.py +153 -153
  182. tests/test_framework_env_usage.py +103 -103
  183. tests/test_integration.py +356 -356
  184. tests/test_item_dedup_redis_key.py +122 -122
  185. tests/test_offsite_middleware.py +222 -0
  186. tests/test_parsel.py +29 -29
  187. tests/test_performance.py +327 -327
  188. tests/test_proxy_api.py +265 -0
  189. tests/test_proxy_health_check.py +32 -32
  190. tests/test_proxy_middleware.py +122 -0
  191. tests/test_proxy_middleware_enhanced.py +217 -0
  192. tests/test_proxy_middleware_integration.py +136 -136
  193. tests/test_proxy_providers.py +56 -56
  194. tests/test_proxy_stats.py +19 -19
  195. tests/test_proxy_strategies.py +59 -59
  196. tests/test_queue_manager_double_crawlo.py +173 -173
  197. tests/test_queue_manager_redis_key.py +176 -176
  198. tests/test_real_scenario_proxy.py +196 -0
  199. tests/test_redis_config.py +28 -28
  200. tests/test_redis_connection_pool.py +294 -294
  201. tests/test_redis_key_naming.py +181 -181
  202. tests/test_redis_key_validator.py +123 -123
  203. tests/test_redis_queue.py +224 -224
  204. tests/test_request_ignore_middleware.py +183 -0
  205. tests/test_request_serialization.py +70 -70
  206. tests/test_response_code_middleware.py +350 -0
  207. tests/test_response_filter_middleware.py +428 -0
  208. tests/test_response_improvements.py +152 -152
  209. tests/test_retry_middleware.py +242 -0
  210. tests/test_scheduler.py +241 -241
  211. tests/test_simple_response.py +61 -61
  212. tests/test_telecom_spider_redis_key.py +205 -205
  213. tests/test_template_content.py +87 -87
  214. tests/test_template_redis_key.py +134 -134
  215. tests/test_tools.py +153 -153
  216. tests/tools_example.py +257 -257
  217. crawlo-1.2.0.dist-info/RECORD +0 -190
  218. {crawlo-1.2.0.dist-info → crawlo-1.2.1.dist-info}/WHEEL +0 -0
  219. {crawlo-1.2.0.dist-info → crawlo-1.2.1.dist-info}/entry_points.txt +0 -0
  220. {crawlo-1.2.0.dist-info → crawlo-1.2.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,99 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ 使用RequestIgnoreMiddleware的爬虫示例
5
+ 展示如何在实际爬虫中利用RequestIgnoreMiddleware处理被忽略的请求
6
+ """
7
+
8
+ from crawlo.spider import Spider
9
+ from crawlo.network.request import Request
10
+ from crawlo.exceptions import IgnoreRequestError
11
+
12
+
13
+ class IgnoreExampleSpider(Spider):
14
+ """
15
+ 示例爬虫,演示RequestIgnoreMiddleware的使用
16
+ """
17
+
18
+ # 爬虫名称
19
+ name = "ignore_example_spider"
20
+
21
+ # 自定义设置
22
+ custom_settings = {
23
+ # 请求延迟(秒)
24
+ 'DOWNLOAD_DELAY': 1,
25
+
26
+ # 并发数
27
+ 'CONCURRENCY': 4,
28
+
29
+ # 日志级别
30
+ 'LOG_LEVEL': 'INFO',
31
+ }
32
+
33
+ def start_requests(self):
34
+ """
35
+ 开始请求
36
+ """
37
+ urls = [
38
+ 'https://httpbin.org/status/200', # 正常请求
39
+ 'https://httpbin.org/status/404', # 404请求
40
+ 'https://httpbin.org/status/500', # 500请求
41
+ ]
42
+
43
+ for url in urls:
44
+ yield Request(url=url, callback=self.parse_response)
45
+
46
+ # 生成一些会被忽略的请求
47
+ yield Request(url='https://example.com/ignore1', callback=self.parse_response)
48
+ yield Request(url='https://example.com/ignore2', callback=self.parse_response)
49
+
50
+ async def parse_response(self, response):
51
+ """
52
+ 处理响应
53
+ """
54
+ self.logger.info(f"收到响应: {response.url} - 状态码: {response.status_code}")
55
+
56
+ # 模拟某些条件下抛出IgnoreRequestError来忽略请求
57
+ if "ignore" in response.url:
58
+ self.logger.info(f"模拟忽略请求: {response.url}")
59
+ # 抛出IgnoreRequestError来忽略这个请求
60
+ raise IgnoreRequestError(f"模拟忽略请求: {response.url}")
61
+
62
+ # 正常处理响应
63
+ return None
64
+
65
+ def handle_ignore_request(self, request, reason):
66
+ """
67
+ 处理被忽略的请求
68
+ 这是一个自定义方法,可以用来处理特定的忽略逻辑
69
+ """
70
+ self.logger.info(f"处理被忽略的请求: {request.url} - 原因: {reason}")
71
+
72
+
73
+ # 运行爬虫的示例代码
74
+ if __name__ == "__main__":
75
+ """
76
+ 运行说明:
77
+
78
+ 1. 确保已在项目根目录下安装了crawlo:
79
+ pip install -e .
80
+
81
+ 2. 运行爬虫:
82
+ crawlo run ignore_example_spider
83
+
84
+ 3. 观察日志输出:
85
+ - 可以看到正常请求的处理
86
+ - 可以看到被忽略请求的记录
87
+ - 查看统计信息中的忽略计数
88
+
89
+ RequestIgnoreMiddleware的优势:
90
+ ✓ 自动记录所有被忽略的请求
91
+ ✓ 提供详细的统计信息,便于分析爬虫行为
92
+ ✓ 支持按原因和域名分类统计
93
+ ✓ 无需额外代码,自动处理IgnoreRequestError异常
94
+ """
95
+ print("RequestIgnoreSpider示例")
96
+ print("=" * 30)
97
+ print("此爬虫演示了RequestIgnoreMiddleware的使用方法")
98
+ print("请使用以下命令运行:")
99
+ print(" crawlo run ignore_example_spider")
@@ -0,0 +1,52 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ ResponseCodeMiddleware 使用示例
5
+ 展示如何使用ResponseCodeMiddleware处理HTTP响应状态码
6
+ """
7
+
8
+ # ResponseCodeMiddleware是默认启用的中间件,无需特殊配置
9
+ # 它会自动记录所有响应的状态码统计信息
10
+
11
+ # 中间件配置(ResponseCodeMiddleware已默认启用)
12
+ SETTINGS = {
13
+ # 中间件配置(ResponseCodeMiddleware已默认启用)
14
+ 'MIDDLEWARES': [
15
+ # === 请求预处理阶段 ===
16
+ 'crawlo.middleware.request_ignore.RequestIgnoreMiddleware', # 1. 忽略无效请求
17
+ 'crawlo.middleware.download_delay.DownloadDelayMiddleware', # 2. 控制请求频率
18
+ 'crawlo.middleware.default_header.DefaultHeaderMiddleware', # 3. 添加默认请求头
19
+ 'crawlo.middleware.proxy.ProxyMiddleware', # 4. 设置代理
20
+ 'crawlo.middleware.offsite.OffsiteMiddleware', # 5. 站外请求过滤
21
+
22
+ # === 响应处理阶段 ===
23
+ 'crawlo.middleware.retry.RetryMiddleware', # 6. 失败请求重试
24
+ 'crawlo.middleware.response_code.ResponseCodeMiddleware', # 7. 处理特殊状态码
25
+ 'crawlo.middleware.response_filter.ResponseFilterMiddleware', # 8. 响应内容过滤
26
+ ],
27
+
28
+ # 其他常用配置
29
+ 'DOWNLOAD_DELAY': 1,
30
+ 'CONCURRENCY': 8,
31
+ 'LOG_LEVEL': 'INFO',
32
+ }
33
+
34
+ def get_settings():
35
+ """获取配置"""
36
+ return SETTINGS
37
+
38
+ if __name__ == "__main__":
39
+ print("ResponseCodeMiddleware配置示例:")
40
+ print("=" * 40)
41
+ print("中间件列表:")
42
+ for i, middleware in enumerate(SETTINGS['MIDDLEWARES'], 1):
43
+ print(f" {i}. {middleware}")
44
+
45
+ print("\n" + "=" * 40)
46
+ print("ResponseCodeMiddleware功能说明:")
47
+ print("✓ 自动记录所有HTTP响应状态码")
48
+ print("✓ 按状态码分类统计(2xx, 3xx, 4xx, 5xx)")
49
+ print("✓ 记录成功/错误响应数量")
50
+ print("✓ 按域名统计响应状态码分布")
51
+ print("✓ 提供详细的日志信息")
52
+ print("✓ 无需特殊配置,默认启用")
@@ -0,0 +1,67 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ ResponseFilterMiddleware 使用示例
5
+ 展示如何使用ResponseFilterMiddleware过滤HTTP响应
6
+ """
7
+
8
+ # ResponseFilterMiddleware默认允许2xx状态码
9
+ # 可通过配置ALLOWED_RESPONSE_CODES和DENIED_RESPONSE_CODES来自定义过滤规则
10
+
11
+ # 中间件配置示例
12
+ SETTINGS = {
13
+ # 允许的响应状态码列表(除了默认的2xx)
14
+ 'ALLOWED_RESPONSE_CODES': [
15
+ 301, # 永久重定向
16
+ 302, # 临时重定向
17
+ 404, # 页面未找到(可能需要特殊处理)
18
+ ],
19
+
20
+ # 拒绝的响应状态码列表(优先级高于ALLOWED_RESPONSE_CODES)
21
+ 'DENIED_RESPONSE_CODES': [
22
+ 200, # 明确拒绝正常响应(仅作示例)
23
+ 403, # 禁止访问
24
+ ],
25
+
26
+ # 中间件配置(ResponseFilterMiddleware已默认启用)
27
+ 'MIDDLEWARES': [
28
+ # === 请求预处理阶段 ===
29
+ 'crawlo.middleware.request_ignore.RequestIgnoreMiddleware', # 1. 忽略无效请求
30
+ 'crawlo.middleware.download_delay.DownloadDelayMiddleware', # 2. 控制请求频率
31
+ 'crawlo.middleware.default_header.DefaultHeaderMiddleware', # 3. 添加默认请求头
32
+ 'crawlo.middleware.proxy.ProxyMiddleware', # 4. 设置代理
33
+ 'crawlo.middleware.offsite.OffsiteMiddleware', # 5. 站外请求过滤
34
+
35
+ # === 响应处理阶段 ===
36
+ 'crawlo.middleware.retry.RetryMiddleware', # 6. 失败请求重试
37
+ 'crawlo.middleware.response_code.ResponseCodeMiddleware', # 7. 处理特殊状态码
38
+ 'crawlo.middleware.response_filter.ResponseFilterMiddleware', # 8. 响应内容过滤
39
+ ],
40
+
41
+ # 其他常用配置
42
+ 'DOWNLOAD_DELAY': 1,
43
+ 'CONCURRENCY': 8,
44
+ 'LOG_LEVEL': 'INFO',
45
+ }
46
+
47
+ def get_settings():
48
+ """获取配置"""
49
+ return SETTINGS
50
+
51
+ if __name__ == "__main__":
52
+ print("ResponseFilterMiddleware配置示例:")
53
+ print("=" * 40)
54
+ print(f"允许的状态码: {SETTINGS['ALLOWED_RESPONSE_CODES']}")
55
+ print(f"拒绝的状态码: {SETTINGS['DENIED_RESPONSE_CODES']}")
56
+ print("\n中间件列表:")
57
+ for i, middleware in enumerate(SETTINGS['MIDDLEWARES'], 1):
58
+ print(f" {i}. {middleware}")
59
+
60
+ print("\n" + "=" * 40)
61
+ print("ResponseFilterMiddleware功能说明:")
62
+ print("✓ 默认允许2xx状态码")
63
+ print("✓ 支持自定义允许的状态码列表")
64
+ print("✓ 支持自定义拒绝的状态码列表")
65
+ print("✓ 拒绝列表优先级高于允许列表")
66
+ print("✓ 自动过滤不符合要求的响应")
67
+ print("✓ 提供详细的日志信息")
@@ -0,0 +1,62 @@
1
+ # 同花顺爬虫配置示例
2
+ # ==================
3
+
4
+ # 项目基本信息
5
+ PROJECT_NAME = 'tong_hua_shun_crawler'
6
+
7
+ # 并发数
8
+ CONCURRENCY = 1
9
+
10
+ # 日志配置
11
+ LOG_LEVEL = 'INFO'
12
+ LOG_FILE = 'logs/tong_hua_shun.log'
13
+
14
+ # 下载延迟配置
15
+ DOWNLOAD_DELAY = 2
16
+ RANDOMNESS = True
17
+
18
+ # 请求头配置
19
+ DEFAULT_REQUEST_HEADERS = {
20
+ "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
21
+ "accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
22
+ "cache-control": "no-cache",
23
+ "pragma": "no-cache",
24
+ "priority": "u=0, i",
25
+ "sec-ch-ua": '"Chromium";v="140", "Not=A?Brand";v="24", "Google Chrome";v="140"',
26
+ "sec-ch-ua-mobile": "?0",
27
+ "sec-ch-ua-platform": '"Windows"',
28
+ "sec-fetch-dest": "document",
29
+ "sec-fetch-mode": "navigate",
30
+ "sec-fetch-site": "none",
31
+ "sec-fetch-user": "?1",
32
+ "upgrade-insecure-requests": "1",
33
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/140.0.0.0 Safari/537.36"
34
+ }
35
+
36
+ # 代理配置
37
+ PROXY_ENABLED = True
38
+ PROXY_API_URL = 'http://test.proxy.api:8080/proxy/getitem/'
39
+ PROXY_EXTRACTOR = 'proxy'
40
+ PROXY_REFRESH_INTERVAL = 60
41
+ PROXY_API_TIMEOUT = 10
42
+ PROXY_POOL_SIZE = 3
43
+ PROXY_HEALTH_CHECK_THRESHOLD = 0.5
44
+
45
+ # 中间件配置
46
+ MIDDLEWARES = [
47
+ 'crawlo.middleware.request_ignore.RequestIgnoreMiddleware',
48
+ 'crawlo.middleware.download_delay.DownloadDelayMiddleware',
49
+ 'crawlo.middleware.default_header.DefaultHeaderMiddleware',
50
+ 'crawlo.middleware.proxy.ProxyMiddleware',
51
+ 'crawlo.middleware.retry.RetryMiddleware',
52
+ 'crawlo.middleware.response_code.ResponseCodeMiddleware',
53
+ 'crawlo.middleware.response_filter.ResponseFilterMiddleware',
54
+ ]
55
+
56
+ # 管道配置
57
+ PIPELINES = [
58
+ 'crawlo.pipelines.console_pipeline.ConsolePipeline',
59
+ ]
60
+
61
+ # 其他配置
62
+ DOWNLOAD_TIMEOUT = 30
@@ -0,0 +1,170 @@
1
+ #!/usr/bin/python
2
+ # -*- coding: UTF-8 -*-
3
+ """
4
+ 同花顺爬虫示例
5
+ ==============
6
+ 使用用户提供的headers和cookies爬取同花顺网站
7
+ """
8
+
9
+ import sys
10
+ import os
11
+
12
+ # 添加项目根目录到Python路径
13
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
14
+
15
+ from crawlo import Spider, Request
16
+
17
+
18
+ class TongHuaShunSpider(Spider):
19
+ """同花顺网站爬虫"""
20
+ name = 'tong_hua_shun_spider'
21
+
22
+ # 用户提供的请求头
23
+ custom_headers = {
24
+ "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
25
+ "accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
26
+ "cache-control": "no-cache",
27
+ "pragma": "no-cache",
28
+ "priority": "u=0, i",
29
+ "sec-ch-ua": "\"Chromium\";v=\"140\", \"Not=A?Brand\";v=\"24\", \"Google Chrome\";v=\"140\"",
30
+ "sec-ch-ua-mobile": "?0",
31
+ "sec-ch-ua-platform": "\"Windows\"",
32
+ "sec-fetch-dest": "document",
33
+ "sec-fetch-mode": "navigate",
34
+ "sec-fetch-site": "none",
35
+ "sec-fetch-user": "?1",
36
+ "upgrade-insecure-requests": "1",
37
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/140.0.0.0 Safari/537.36"
38
+ }
39
+
40
+ # 用户提供的cookies
41
+ custom_cookies = {
42
+ "Hm_lvt_722143063e4892925903024537075d0d": "1758071793",
43
+ "Hm_lvt_929f8b362150b1f77b477230541dbbc2": "1758071793",
44
+ "historystock": "600699",
45
+ "spversion": "20130314",
46
+ "cid": "f9bc812da2c3a7ddf6d5df1fa2d497091758076438",
47
+ "u_ukey": "A10702B8689642C6BE607730E11E6E4A",
48
+ "u_uver": "1.0.0",
49
+ "u_dpass": "Qk3U07X7SHGKa0AcRUg1R1DVWbPioD9Eg270bdikvlwWWXexbsXnRsQNt%2B04iXwdHi80LrSsTFH9a%2B6rtRvqGg%3D%3D",
50
+ "u_did": "E3ED337393E1429DA56E380DD00B3CCD",
51
+ "u_ttype": "WEB",
52
+ "user_status": "0",
53
+ "ttype": "WEB",
54
+ "log": "",
55
+ "Hm_lvt_69929b9dce4c22a060bd22d703b2a280": "1758079404,1758113068,1758157144",
56
+ "HMACCOUNT": "08DF0D235A291EAA",
57
+ "Hm_lvt_78c58f01938e4d85eaf619eae71b4ed1": "1758071793,1758113068,1758157144",
58
+ "user": "MDpteF9lNXRkY3RpdHo6Ok5vbmU6NTAwOjgxNzYyOTAwNDo3LDExMTExMTExMTExLDQwOzQ0LDExLDQwOzYsMSw0MDs1LDEsNDA7MSwxMDEsNDA7MiwxLDQwOzMsMSw0MDs1LDEsNDA7OCwwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMSw0MDsxMDIsMSw0MDoxNjo6OjgwNzYyOTAwNDoxNzU4MTYxNTE0Ojo6MTc1ODA3MjA2MDo2MDQ4MDA6MDoxYTQ0NmFlNDY4M2VmZWY3YmNjYTczY2U3ODZmZTNiODg6ZGVmYXVsdF81OjA%3D",
59
+ "userid": "807629004",
60
+ "u_name": "mx_e5tdctitz",
61
+ "escapename": "mx_e5tdctitz",
62
+ "ticket": "85eea709becdd924d7eb975351de629e",
63
+ "utk": "8959c4c6b6f5fb7628864feab15473f4",
64
+ "sess_tk": "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6InNlc3NfdGtfMSIsImJ0eSI6InNlc3NfdGsifQ.eyJqdGkiOiI4ODNiZmU4NmU3M2NhN2NjN2JlZmVmODM0NmFlNDZhNDEiLCJpYXQiOjE3NTgxNjE1MTQsImV4cCI6MTc1ODc2NjMxNCwic3ViIjoiODA3NjI5MDA0IiwiaXNzIjoidXBhc3MuaXdlbmNhaS5jb20iLCJhdWQiOiIyMDIwMTExODUyODg5MDcyIiwiYWN0Ijoib2ZjIiwiY3VocyI6ImIwNTcyZDVjOWNlNDg0MGFlOWYxYTlhYjU3NGZkNjQyYjgzNmExN2E3Y2NhZjk4ZWRiNzI5ZmJkOWFjOGVkYmYifQ.UBNIzxGvQQtXSiIcB_1JJl-EuAc1S9j2LcTLXjwy4ImhDDbh1oJvyRdDUrXdUpwBpIyx5zgYqgt_3FEhY_iayw",
65
+ "cuc": "ap2eap3gg99g",
66
+ "Hm_lvt_f79b64788a4e377c608617fba4c736e2": "1758161692",
67
+ "v": "A1glI4rWhPCQGqh0MvA0ioufKY3vQbzLHqWQT5JJpBNGLfazOlGMW261YNrh",
68
+ "Hm_lpvt_78c58f01938e4d85eaf619eae71b4ed1": "1758163145",
69
+ "Hm_lpvt_f79b64788a4e377c608617fba4c736e2": "1758163145",
70
+ "Hm_lpvt_69929b9dce4c22a060bd22d703b2a280": "1758163145"
71
+ }
72
+
73
+ def start_requests(self):
74
+ """生成初始请求"""
75
+ # 用户提供的URL
76
+ url = "https://stock.10jqka.com.cn/20240315/c655957791.shtml"
77
+
78
+ # 创建请求并添加自定义headers和cookies
79
+ request = Request(
80
+ url=url,
81
+ callback=self.parse,
82
+ headers=self.custom_headers,
83
+ cookies=self.custom_cookies
84
+ )
85
+ yield request
86
+
87
+ def parse(self, response):
88
+ """解析响应"""
89
+ print(f"\n成功获取页面: {response.url}")
90
+ print(f"状态码: {response.status_code}")
91
+
92
+ # 提取页面标题
93
+ title = response.css('title::text').get()
94
+ if title:
95
+ print(f"页面标题: {title}")
96
+
97
+ # 提取页面中的关键信息
98
+ # 例如提取文章标题、发布时间等
99
+ article_title = response.css('h1.main-title::text').get()
100
+ if article_title:
101
+ print(f"文章标题: {article_title}")
102
+
103
+ publish_time = response.css('.time::text').get()
104
+ if publish_time:
105
+ print(f"发布时间: {publish_time}")
106
+
107
+ # 返回提取的数据
108
+ return {
109
+ 'url': response.url,
110
+ 'status_code': response.status_code,
111
+ 'title': title,
112
+ 'article_title': article_title,
113
+ 'publish_time': publish_time
114
+ }
115
+
116
+
117
+ # 配置说明
118
+ SETTINGS = {
119
+ # 基础配置
120
+ 'LOG_LEVEL': 'INFO',
121
+ 'CONCURRENCY': 1,
122
+
123
+ # 代理配置
124
+ 'PROXY_ENABLED': True,
125
+ 'PROXY_API_URL': 'http://test.proxy.api:8080/proxy/getitem/',
126
+ 'PROXY_EXTRACTOR': 'proxy',
127
+ 'PROXY_REFRESH_INTERVAL': 60,
128
+ 'PROXY_API_TIMEOUT': 10,
129
+ 'PROXY_POOL_SIZE': 3,
130
+ 'PROXY_HEALTH_CHECK_THRESHOLD': 0.5,
131
+
132
+ # 下载延迟
133
+ 'DOWNLOAD_DELAY': 2,
134
+ 'RANDOMNESS': True,
135
+
136
+ # 中间件
137
+ 'MIDDLEWARES': [
138
+ 'crawlo.middleware.request_ignore.RequestIgnoreMiddleware',
139
+ 'crawlo.middleware.download_delay.DownloadDelayMiddleware',
140
+ 'crawlo.middleware.default_header.DefaultHeaderMiddleware',
141
+ 'crawlo.middleware.proxy.ProxyMiddleware',
142
+ 'crawlo.middleware.retry.RetryMiddleware',
143
+ 'crawlo.middleware.response_code.ResponseCodeMiddleware',
144
+ 'crawlo.middleware.response_filter.ResponseFilterMiddleware',
145
+ ],
146
+
147
+ # 管道
148
+ 'PIPELINES': [
149
+ 'crawlo.pipelines.console_pipeline.ConsolePipeline',
150
+ ],
151
+ }
152
+
153
+
154
+ def main():
155
+ """主函数"""
156
+ print("同花顺爬虫示例")
157
+ print("=" * 30)
158
+ print("此示例展示如何在Crawlo框架中:")
159
+ print("1. 使用自定义headers和cookies")
160
+ print("2. 集成代理功能")
161
+ print("3. 爬取同花顺网站内容")
162
+ print("=" * 30)
163
+
164
+ print("\n使用方法:")
165
+ print("1. 在项目settings.py中配置代理参数")
166
+ print("2. 运行爬虫: crawlo run tong_hua_shun_spider")
167
+
168
+
169
+ if __name__ == '__main__':
170
+ main()
@@ -1,82 +1,82 @@
1
- # 双重 crawlo 前缀问题修复报告
2
-
3
- ## 问题描述
4
- 用户在使用分布式爬虫时发现Redis key中出现了双重`crawlo`前缀,例如`crawlo:crawlo:queue:processing:data`。这导致了Redis key命名不一致和潜在的混淆问题。
5
-
6
- ## 问题分析
7
- 经过代码分析,发现问题出在以下两个方面:
8
- 1. RedisPriorityQueue类在处理队列名称时会自动修改用户提供的队列名称
9
- 2. QueueManager类在提取项目名称时没有正确处理双重`crawlo`前缀的情况
10
-
11
- ## 修复方案
12
-
13
- ### 1. RedisPriorityQueue类修复
14
- 文件:`crawlo/queue/redis_priority_queue.py`
15
-
16
- **修复前**:
17
- ```python
18
- # 如果提供了 queue_name,确保符合命名规范
19
- # 处理可能的重复前缀问题
20
- if queue_name.startswith("crawlo:crawlo:"):
21
- # 修复双重 crawlo 前缀
22
- self.queue_name = queue_name.replace("crawlo:crawlo:", "crawlo:", 1)
23
- elif not queue_name.startswith("crawlo:"):
24
- # 如果没有 crawlo 前缀,添加它
25
- self.queue_name = f"crawlo:{module_name}:queue:requests"
26
- else:
27
- # 已经有正确的 crawlo 前缀
28
- self.queue_name = queue_name
29
- ```
30
-
31
- **修复后**:
32
- ```python
33
- # 保持用户提供的队列名称不变,不做修改
34
- self.queue_name = queue_name
35
- ```
36
-
37
- ### 2. QueueManager类修复
38
- 文件:`crawlo/queue/queue_manager.py`
39
-
40
- **修复后**:
41
- ```python
42
- # 处理可能的双重 crawlo 前缀
43
- if parts[0] == "crawlo" and parts[1] == "crawlo":
44
- # 双重 crawlo 前缀,取第三个部分作为项目名称
45
- if len(parts) >= 3:
46
- project_name = parts[2]
47
- else:
48
- project_name = "default"
49
- elif parts[0] == "crawlo":
50
- # 正常的 crawlo 前缀,取第二个部分作为项目名称
51
- project_name = parts[1]
52
- else:
53
- # 没有 crawlo 前缀,使用第一个部分作为项目名称
54
- project_name = parts[0]
55
- ```
56
-
57
- ## 测试验证
58
-
59
- ### 测试1:Redis队列命名修复测试
60
- 验证RedisPriorityQueue正确处理各种队列名称格式:
61
- - 正常命名:`crawlo:test_project:queue:requests` → `crawlo:test_project:queue:requests`
62
- - 双重 crawlo 前缀:`crawlo:crawlo:queue:requests` → `crawlo:crawlo:queue:requests`
63
- - 三重 crawlo 前缀:`crawlo:crawlo:crawlo:queue:requests` → `crawlo:crawlo:crawlo:queue:requests`
64
-
65
- ### 测试2:队列管理器项目名称提取测试
66
- 验证QueueManager正确提取项目名称:
67
- - 正常命名:`crawlo:test_project:queue:requests` → `test_project`
68
- - 双重 crawlo 前缀:`crawlo:crawlo:queue:requests` → [queue](file://d:\dowell\projects\Crawlo\crawlo\core\processor.py#L13-L13)
69
- - 三重 crawlo 前缀:`crawlo:crawlo:crawlo:queue:requests` → `crawlo`
70
-
71
- ### 测试3:队列管理器创建队列测试
72
- 验证整个流程的正确性,确保队列名称在传递过程中保持一致。
73
-
74
- 所有测试均已通过,表明双重`crawlo`前缀问题已得到解决。
75
-
76
- ## 结论
77
- 通过以上修复,我们成功解决了Redis key中出现双重`crawlo`前缀的问题。现在Redis队列名称将保持用户配置的一致性,processing和failed队列也会相应地保持相同的前缀结构。
78
-
79
- ## 建议
80
- 1. 建议用户在项目配置中使用标准的队列名称格式,如`crawlo:{project_name}:queue:requests`
81
- 2. 可以使用Redis key验证工具定期检查和规范Redis key命名
1
+ # 双重 crawlo 前缀问题修复报告
2
+
3
+ ## 问题描述
4
+ 用户在使用分布式爬虫时发现Redis key中出现了双重`crawlo`前缀,例如`crawlo:crawlo:queue:processing:data`。这导致了Redis key命名不一致和潜在的混淆问题。
5
+
6
+ ## 问题分析
7
+ 经过代码分析,发现问题出在以下两个方面:
8
+ 1. RedisPriorityQueue类在处理队列名称时会自动修改用户提供的队列名称
9
+ 2. QueueManager类在提取项目名称时没有正确处理双重`crawlo`前缀的情况
10
+
11
+ ## 修复方案
12
+
13
+ ### 1. RedisPriorityQueue类修复
14
+ 文件:`crawlo/queue/redis_priority_queue.py`
15
+
16
+ **修复前**:
17
+ ```python
18
+ # 如果提供了 queue_name,确保符合命名规范
19
+ # 处理可能的重复前缀问题
20
+ if queue_name.startswith("crawlo:crawlo:"):
21
+ # 修复双重 crawlo 前缀
22
+ self.queue_name = queue_name.replace("crawlo:crawlo:", "crawlo:", 1)
23
+ elif not queue_name.startswith("crawlo:"):
24
+ # 如果没有 crawlo 前缀,添加它
25
+ self.queue_name = f"crawlo:{module_name}:queue:requests"
26
+ else:
27
+ # 已经有正确的 crawlo 前缀
28
+ self.queue_name = queue_name
29
+ ```
30
+
31
+ **修复后**:
32
+ ```python
33
+ # 保持用户提供的队列名称不变,不做修改
34
+ self.queue_name = queue_name
35
+ ```
36
+
37
+ ### 2. QueueManager类修复
38
+ 文件:`crawlo/queue/queue_manager.py`
39
+
40
+ **修复后**:
41
+ ```python
42
+ # 处理可能的双重 crawlo 前缀
43
+ if parts[0] == "crawlo" and parts[1] == "crawlo":
44
+ # 双重 crawlo 前缀,取第三个部分作为项目名称
45
+ if len(parts) >= 3:
46
+ project_name = parts[2]
47
+ else:
48
+ project_name = "default"
49
+ elif parts[0] == "crawlo":
50
+ # 正常的 crawlo 前缀,取第二个部分作为项目名称
51
+ project_name = parts[1]
52
+ else:
53
+ # 没有 crawlo 前缀,使用第一个部分作为项目名称
54
+ project_name = parts[0]
55
+ ```
56
+
57
+ ## 测试验证
58
+
59
+ ### 测试1:Redis队列命名修复测试
60
+ 验证RedisPriorityQueue正确处理各种队列名称格式:
61
+ - 正常命名:`crawlo:test_project:queue:requests` → `crawlo:test_project:queue:requests`
62
+ - 双重 crawlo 前缀:`crawlo:crawlo:queue:requests` → `crawlo:crawlo:queue:requests`
63
+ - 三重 crawlo 前缀:`crawlo:crawlo:crawlo:queue:requests` → `crawlo:crawlo:crawlo:queue:requests`
64
+
65
+ ### 测试2:队列管理器项目名称提取测试
66
+ 验证QueueManager正确提取项目名称:
67
+ - 正常命名:`crawlo:test_project:queue:requests` → `test_project`
68
+ - 双重 crawlo 前缀:`crawlo:crawlo:queue:requests` → [queue](file://d:\dowell\projects\Crawlo\crawlo\core\processor.py#L13-L13)
69
+ - 三重 crawlo 前缀:`crawlo:crawlo:crawlo:queue:requests` → `crawlo`
70
+
71
+ ### 测试3:队列管理器创建队列测试
72
+ 验证整个流程的正确性,确保队列名称在传递过程中保持一致。
73
+
74
+ 所有测试均已通过,表明双重`crawlo`前缀问题已得到解决。
75
+
76
+ ## 结论
77
+ 通过以上修复,我们成功解决了Redis key中出现双重`crawlo`前缀的问题。现在Redis队列名称将保持用户配置的一致性,processing和failed队列也会相应地保持相同的前缀结构。
78
+
79
+ ## 建议
80
+ 1. 建议用户在项目配置中使用标准的队列名称格式,如`crawlo:{project_name}:queue:requests`
81
+ 2. 可以使用Redis key验证工具定期检查和规范Redis key命名
82
82
  3. 如果需要统一的命名规范,可以在项目初始化时明确指定队列名称
tests/__init__.py CHANGED
@@ -1,7 +1,7 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- """
4
- # @Time : 2025-08-24 12:36
5
- # @Author : crawl-coder
6
- # @Desc : None
7
- """
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ """
4
+ # @Time : 2025-08-24 12:36
5
+ # @Author : crawl-coder
6
+ # @Desc : None
7
+ """