crawlo 1.4.1__py3-none-any.whl → 1.4.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (326) hide show
  1. crawlo/__init__.py +93 -93
  2. crawlo/__version__.py +1 -1
  3. crawlo/cli.py +75 -75
  4. crawlo/commands/__init__.py +14 -14
  5. crawlo/commands/check.py +594 -594
  6. crawlo/commands/genspider.py +151 -151
  7. crawlo/commands/help.py +138 -138
  8. crawlo/commands/list.py +155 -155
  9. crawlo/commands/run.py +341 -341
  10. crawlo/commands/startproject.py +436 -436
  11. crawlo/commands/stats.py +187 -187
  12. crawlo/commands/utils.py +196 -196
  13. crawlo/config.py +312 -312
  14. crawlo/config_validator.py +277 -277
  15. crawlo/core/__init__.py +52 -52
  16. crawlo/core/engine.py +438 -439
  17. crawlo/core/processor.py +47 -47
  18. crawlo/core/scheduler.py +291 -257
  19. crawlo/crawler.py +650 -650
  20. crawlo/data/__init__.py +5 -5
  21. crawlo/data/user_agents.py +194 -194
  22. crawlo/downloader/__init__.py +273 -273
  23. crawlo/downloader/aiohttp_downloader.py +233 -228
  24. crawlo/downloader/cffi_downloader.py +245 -245
  25. crawlo/downloader/httpx_downloader.py +259 -259
  26. crawlo/downloader/hybrid_downloader.py +212 -212
  27. crawlo/downloader/playwright_downloader.py +402 -402
  28. crawlo/downloader/selenium_downloader.py +472 -472
  29. crawlo/event.py +11 -11
  30. crawlo/exceptions.py +81 -81
  31. crawlo/extension/__init__.py +63 -63
  32. crawlo/extension/health_check.py +141 -141
  33. crawlo/extension/log_interval.py +94 -94
  34. crawlo/extension/log_stats.py +70 -70
  35. crawlo/extension/logging_extension.py +61 -61
  36. crawlo/extension/memory_monitor.py +104 -104
  37. crawlo/extension/performance_profiler.py +133 -133
  38. crawlo/extension/request_recorder.py +107 -107
  39. crawlo/factories/__init__.py +27 -27
  40. crawlo/factories/base.py +68 -68
  41. crawlo/factories/crawler.py +103 -103
  42. crawlo/factories/registry.py +84 -84
  43. crawlo/filters/__init__.py +154 -154
  44. crawlo/filters/aioredis_filter.py +257 -257
  45. crawlo/filters/memory_filter.py +269 -269
  46. crawlo/framework.py +292 -292
  47. crawlo/initialization/__init__.py +44 -44
  48. crawlo/initialization/built_in.py +425 -425
  49. crawlo/initialization/context.py +141 -141
  50. crawlo/initialization/core.py +193 -193
  51. crawlo/initialization/phases.py +148 -148
  52. crawlo/initialization/registry.py +145 -145
  53. crawlo/items/__init__.py +23 -23
  54. crawlo/items/base.py +23 -23
  55. crawlo/items/fields.py +52 -52
  56. crawlo/items/items.py +104 -104
  57. crawlo/logging/__init__.py +45 -37
  58. crawlo/logging/async_handler.py +181 -0
  59. crawlo/logging/config.py +196 -96
  60. crawlo/logging/factory.py +171 -128
  61. crawlo/logging/manager.py +111 -111
  62. crawlo/logging/monitor.py +153 -0
  63. crawlo/logging/sampler.py +167 -0
  64. crawlo/middleware/__init__.py +21 -21
  65. crawlo/middleware/default_header.py +132 -132
  66. crawlo/middleware/download_delay.py +104 -104
  67. crawlo/middleware/middleware_manager.py +135 -135
  68. crawlo/middleware/offsite.py +123 -123
  69. crawlo/middleware/proxy.py +386 -386
  70. crawlo/middleware/request_ignore.py +86 -86
  71. crawlo/middleware/response_code.py +150 -150
  72. crawlo/middleware/response_filter.py +136 -136
  73. crawlo/middleware/retry.py +124 -124
  74. crawlo/middleware/simple_proxy.py +65 -65
  75. crawlo/mode_manager.py +219 -219
  76. crawlo/network/__init__.py +21 -21
  77. crawlo/network/request.py +379 -379
  78. crawlo/network/response.py +359 -359
  79. crawlo/pipelines/__init__.py +21 -21
  80. crawlo/pipelines/bloom_dedup_pipeline.py +146 -146
  81. crawlo/pipelines/console_pipeline.py +39 -39
  82. crawlo/pipelines/csv_pipeline.py +316 -316
  83. crawlo/pipelines/database_dedup_pipeline.py +197 -197
  84. crawlo/pipelines/json_pipeline.py +218 -218
  85. crawlo/pipelines/memory_dedup_pipeline.py +105 -105
  86. crawlo/pipelines/mongo_pipeline.py +131 -131
  87. crawlo/pipelines/mysql_pipeline.py +325 -325
  88. crawlo/pipelines/pipeline_manager.py +100 -84
  89. crawlo/pipelines/redis_dedup_pipeline.py +156 -156
  90. crawlo/project.py +349 -338
  91. crawlo/queue/pqueue.py +42 -42
  92. crawlo/queue/queue_manager.py +526 -522
  93. crawlo/queue/redis_priority_queue.py +370 -367
  94. crawlo/settings/__init__.py +7 -7
  95. crawlo/settings/default_settings.py +284 -284
  96. crawlo/settings/setting_manager.py +219 -219
  97. crawlo/spider/__init__.py +657 -657
  98. crawlo/stats_collector.py +73 -73
  99. crawlo/subscriber.py +129 -129
  100. crawlo/task_manager.py +138 -138
  101. crawlo/templates/crawlo.cfg.tmpl +10 -10
  102. crawlo/templates/project/__init__.py.tmpl +3 -3
  103. crawlo/templates/project/items.py.tmpl +17 -17
  104. crawlo/templates/project/middlewares.py.tmpl +118 -118
  105. crawlo/templates/project/pipelines.py.tmpl +96 -96
  106. crawlo/templates/project/settings.py.tmpl +170 -170
  107. crawlo/templates/project/settings_distributed.py.tmpl +169 -169
  108. crawlo/templates/project/settings_gentle.py.tmpl +166 -166
  109. crawlo/templates/project/settings_high_performance.py.tmpl +167 -167
  110. crawlo/templates/project/settings_minimal.py.tmpl +65 -65
  111. crawlo/templates/project/settings_simple.py.tmpl +164 -164
  112. crawlo/templates/project/spiders/__init__.py.tmpl +9 -9
  113. crawlo/templates/run.py.tmpl +34 -34
  114. crawlo/templates/spider/spider.py.tmpl +143 -143
  115. crawlo/templates/spiders_init.py.tmpl +9 -9
  116. crawlo/tools/__init__.py +200 -200
  117. crawlo/tools/anti_crawler.py +268 -268
  118. crawlo/tools/authenticated_proxy.py +240 -240
  119. crawlo/tools/data_formatter.py +225 -225
  120. crawlo/tools/data_validator.py +180 -180
  121. crawlo/tools/date_tools.py +289 -289
  122. crawlo/tools/distributed_coordinator.py +384 -384
  123. crawlo/tools/encoding_converter.py +127 -127
  124. crawlo/tools/network_diagnostic.py +364 -364
  125. crawlo/tools/request_tools.py +82 -82
  126. crawlo/tools/retry_mechanism.py +224 -224
  127. crawlo/tools/scenario_adapter.py +262 -262
  128. crawlo/tools/text_cleaner.py +232 -232
  129. crawlo/utils/__init__.py +34 -34
  130. crawlo/utils/batch_processor.py +259 -259
  131. crawlo/utils/class_loader.py +25 -25
  132. crawlo/utils/controlled_spider_mixin.py +439 -439
  133. crawlo/utils/db_helper.py +343 -343
  134. crawlo/utils/enhanced_error_handler.py +356 -356
  135. crawlo/utils/env_config.py +142 -142
  136. crawlo/utils/error_handler.py +165 -165
  137. crawlo/utils/fingerprint.py +122 -122
  138. crawlo/utils/func_tools.py +82 -82
  139. crawlo/utils/large_scale_config.py +286 -286
  140. crawlo/utils/large_scale_helper.py +344 -344
  141. crawlo/utils/log.py +79 -79
  142. crawlo/utils/performance_monitor.py +285 -285
  143. crawlo/utils/queue_helper.py +175 -175
  144. crawlo/utils/redis_connection_pool.py +388 -388
  145. crawlo/utils/redis_key_validator.py +198 -198
  146. crawlo/utils/request.py +267 -267
  147. crawlo/utils/request_serializer.py +225 -225
  148. crawlo/utils/spider_loader.py +61 -61
  149. crawlo/utils/system.py +11 -11
  150. crawlo/utils/tools.py +4 -4
  151. crawlo/utils/url.py +39 -39
  152. crawlo-1.4.3.dist-info/METADATA +190 -0
  153. crawlo-1.4.3.dist-info/RECORD +326 -0
  154. examples/__init__.py +7 -7
  155. examples/test_project/__init__.py +7 -7
  156. examples/test_project/run.py +34 -34
  157. examples/test_project/test_project/__init__.py +3 -3
  158. examples/test_project/test_project/items.py +17 -17
  159. examples/test_project/test_project/middlewares.py +118 -118
  160. examples/test_project/test_project/pipelines.py +96 -96
  161. examples/test_project/test_project/settings.py +169 -169
  162. examples/test_project/test_project/spiders/__init__.py +9 -9
  163. examples/test_project/test_project/spiders/of_week_dis.py +143 -143
  164. tests/__init__.py +7 -7
  165. tests/advanced_tools_example.py +275 -275
  166. tests/authenticated_proxy_example.py +106 -106
  167. tests/baidu_performance_test.py +108 -108
  168. tests/baidu_test.py +59 -59
  169. tests/cleaners_example.py +160 -160
  170. tests/comprehensive_framework_test.py +212 -212
  171. tests/comprehensive_test.py +81 -81
  172. tests/comprehensive_testing_summary.md +186 -186
  173. tests/config_validation_demo.py +142 -142
  174. tests/controlled_spider_example.py +205 -205
  175. tests/date_tools_example.py +180 -180
  176. tests/debug_configure.py +69 -69
  177. tests/debug_framework_logger.py +84 -84
  178. tests/debug_log_config.py +126 -126
  179. tests/debug_log_levels.py +63 -63
  180. tests/debug_pipelines.py +66 -66
  181. tests/detailed_log_test.py +233 -233
  182. tests/distributed_test.py +66 -66
  183. tests/distributed_test_debug.py +76 -76
  184. tests/dynamic_loading_example.py +523 -523
  185. tests/dynamic_loading_test.py +104 -104
  186. tests/env_config_example.py +133 -133
  187. tests/error_handling_example.py +171 -171
  188. tests/final_comprehensive_test.py +151 -151
  189. tests/final_log_test.py +260 -260
  190. tests/final_validation_test.py +182 -182
  191. tests/fix_log_test.py +142 -142
  192. tests/framework_performance_test.py +202 -202
  193. tests/log_buffering_test.py +111 -111
  194. tests/log_generation_timing_test.py +153 -153
  195. tests/optimized_performance_test.py +211 -211
  196. tests/performance_comparison.py +245 -245
  197. tests/queue_blocking_test.py +113 -113
  198. tests/queue_test.py +89 -89
  199. tests/redis_key_validation_demo.py +130 -130
  200. tests/request_params_example.py +150 -150
  201. tests/response_improvements_example.py +144 -144
  202. tests/scrapy_comparison/ofweek_scrapy.py +138 -138
  203. tests/scrapy_comparison/scrapy_test.py +133 -133
  204. tests/simple_command_test.py +119 -119
  205. tests/simple_crawlo_test.py +127 -127
  206. tests/simple_log_test.py +57 -57
  207. tests/simple_log_test2.py +137 -137
  208. tests/simple_optimization_test.py +128 -128
  209. tests/simple_queue_type_test.py +41 -41
  210. tests/simple_spider_test.py +49 -49
  211. tests/simple_test.py +47 -47
  212. tests/spider_log_timing_test.py +177 -177
  213. tests/test_advanced_tools.py +148 -148
  214. tests/test_all_commands.py +230 -230
  215. tests/test_all_pipeline_fingerprints.py +133 -133
  216. tests/test_all_redis_key_configs.py +145 -145
  217. tests/test_authenticated_proxy.py +141 -141
  218. tests/test_batch_processor.py +178 -178
  219. tests/test_cleaners.py +54 -54
  220. tests/test_component_factory.py +174 -174
  221. tests/test_comprehensive.py +146 -146
  222. tests/test_config_consistency.py +80 -80
  223. tests/test_config_merge.py +152 -152
  224. tests/test_config_validator.py +182 -182
  225. tests/test_controlled_spider_mixin.py +79 -79
  226. tests/test_crawlo_proxy_integration.py +108 -108
  227. tests/test_date_tools.py +123 -123
  228. tests/test_dedup_fix.py +220 -220
  229. tests/test_dedup_pipeline_consistency.py +125 -0
  230. tests/test_default_header_middleware.py +313 -313
  231. tests/test_distributed.py +65 -65
  232. tests/test_double_crawlo_fix.py +204 -204
  233. tests/test_double_crawlo_fix_simple.py +124 -124
  234. tests/test_download_delay_middleware.py +221 -221
  235. tests/test_downloader_proxy_compatibility.py +268 -268
  236. tests/test_dynamic_downloaders_proxy.py +124 -124
  237. tests/test_dynamic_proxy.py +92 -92
  238. tests/test_dynamic_proxy_config.py +146 -146
  239. tests/test_dynamic_proxy_real.py +109 -109
  240. tests/test_edge_cases.py +303 -303
  241. tests/test_enhanced_error_handler.py +270 -270
  242. tests/test_enhanced_error_handler_comprehensive.py +245 -245
  243. tests/test_env_config.py +121 -121
  244. tests/test_error_handler_compatibility.py +112 -112
  245. tests/test_factories.py +252 -252
  246. tests/test_final_validation.py +153 -153
  247. tests/test_fingerprint_consistency.py +135 -135
  248. tests/test_fingerprint_simple.py +51 -51
  249. tests/test_framework_env_usage.py +103 -103
  250. tests/test_framework_logger.py +66 -66
  251. tests/test_framework_startup.py +64 -64
  252. tests/test_get_component_logger.py +83 -83
  253. tests/test_hash_performance.py +99 -99
  254. tests/test_integration.py +169 -169
  255. tests/test_item_dedup_redis_key.py +122 -122
  256. tests/test_large_scale_config.py +112 -112
  257. tests/test_large_scale_helper.py +235 -235
  258. tests/test_logging_enhancements.py +375 -0
  259. tests/test_logging_final.py +185 -0
  260. tests/test_logging_integration.py +313 -0
  261. tests/test_logging_system.py +282 -282
  262. tests/test_middleware_debug.py +142 -0
  263. tests/test_mode_change.py +72 -72
  264. tests/test_mode_consistency.py +51 -51
  265. tests/test_offsite_middleware.py +244 -244
  266. tests/test_offsite_middleware_simple.py +203 -203
  267. tests/test_parsel.py +29 -29
  268. tests/test_performance.py +327 -327
  269. tests/test_performance_monitor.py +115 -115
  270. tests/test_pipeline_fingerprint_consistency.py +86 -86
  271. tests/test_priority_behavior.py +212 -0
  272. tests/test_priority_consistency.py +152 -0
  273. tests/test_priority_consistency_fixed.py +250 -0
  274. tests/test_proxy_api.py +264 -264
  275. tests/test_proxy_health_check.py +32 -32
  276. tests/test_proxy_middleware.py +121 -121
  277. tests/test_proxy_middleware_enhanced.py +216 -216
  278. tests/test_proxy_middleware_integration.py +136 -136
  279. tests/test_proxy_middleware_refactored.py +184 -184
  280. tests/test_proxy_providers.py +56 -56
  281. tests/test_proxy_stats.py +19 -19
  282. tests/test_proxy_strategies.py +59 -59
  283. tests/test_queue_empty_check.py +41 -41
  284. tests/test_queue_manager_double_crawlo.py +173 -173
  285. tests/test_queue_manager_redis_key.py +179 -179
  286. tests/test_queue_naming.py +154 -154
  287. tests/test_queue_type.py +106 -106
  288. tests/test_queue_type_redis_config_consistency.py +131 -0
  289. tests/test_random_headers_default.py +323 -0
  290. tests/test_random_headers_necessity.py +309 -0
  291. tests/test_random_user_agent.py +72 -72
  292. tests/test_real_scenario_proxy.py +195 -195
  293. tests/test_redis_config.py +28 -28
  294. tests/test_redis_connection_pool.py +294 -294
  295. tests/test_redis_key_naming.py +181 -181
  296. tests/test_redis_key_validator.py +123 -123
  297. tests/test_redis_queue.py +224 -224
  298. tests/test_redis_queue_name_fix.py +175 -175
  299. tests/test_redis_queue_type_fallback.py +130 -0
  300. tests/test_request_ignore_middleware.py +182 -182
  301. tests/test_request_params.py +111 -111
  302. tests/test_request_serialization.py +70 -70
  303. tests/test_response_code_middleware.py +349 -349
  304. tests/test_response_filter_middleware.py +427 -427
  305. tests/test_response_improvements.py +152 -152
  306. tests/test_retry_middleware.py +334 -242
  307. tests/test_retry_middleware_realistic.py +274 -0
  308. tests/test_scheduler.py +252 -252
  309. tests/test_scheduler_config_update.py +133 -133
  310. tests/test_simple_response.py +61 -61
  311. tests/test_telecom_spider_redis_key.py +205 -205
  312. tests/test_template_content.py +87 -87
  313. tests/test_template_redis_key.py +134 -134
  314. tests/test_tools.py +159 -159
  315. tests/test_user_agent_randomness.py +177 -0
  316. tests/test_user_agents.py +96 -96
  317. tests/tools_example.py +260 -260
  318. tests/untested_features_report.md +138 -138
  319. tests/verify_debug.py +51 -51
  320. tests/verify_distributed.py +117 -117
  321. tests/verify_log_fix.py +111 -111
  322. crawlo-1.4.1.dist-info/METADATA +0 -1199
  323. crawlo-1.4.1.dist-info/RECORD +0 -309
  324. {crawlo-1.4.1.dist-info → crawlo-1.4.3.dist-info}/WHEEL +0 -0
  325. {crawlo-1.4.1.dist-info → crawlo-1.4.3.dist-info}/entry_points.txt +0 -0
  326. {crawlo-1.4.1.dist-info → crawlo-1.4.3.dist-info}/top_level.txt +0 -0
@@ -1,170 +1,170 @@
1
- # -*- coding: UTF-8 -*-
2
- """
3
- test_project 项目配置文件(分布式版)
4
- =============================
5
- 基于 Crawlo 框架的分布式爬虫项目配置。
6
- 适合大规模数据采集和多节点部署。
7
- """
8
-
9
- import os
10
-
11
- # ============================== 项目基本信息 ==============================
12
- PROJECT_NAME = 'test_project'
13
-
14
- # ============================== 运行模式 ==============================
15
- RUN_MODE = 'distributed'
16
-
17
- # ============================== 并发配置 ==============================
18
- CONCURRENCY = 16
19
- MAX_RUNNING_SPIDERS = 5
20
- DOWNLOAD_DELAY = 1.0
21
-
22
- # ============================== 下载器配置 ==============================
23
- # 可选下载器:
24
- # DOWNLOADER = 'crawlo.downloader.aiohttp_downloader.AioHttpDownloader'
25
- # DOWNLOADER = 'crawlo.downloader.httpx_downloader.HttpXDownloader'
26
- # DOWNLOADER = 'crawlo.downloader.cffi_downloader.CurlCffiDownloader'
27
- DOWNLOADER = 'crawlo.downloader.aiohttp_downloader.AioHttpDownloader'
28
-
29
- # ============================== 队列配置 ==============================
30
- QUEUE_TYPE = 'redis'
31
- # 当使用Redis队列时,可自定义队列名称
32
- # 队列名称遵循统一命名规范: crawlo:{PROJECT_NAME}:queue:requests
33
- # SCHEDULER_QUEUE_NAME = f'crawlo:{PROJECT_NAME}:queue:requests'
34
-
35
- # ============================== 去重过滤器 ==============================
36
- FILTER_CLASS = 'crawlo.filters.aioredis_filter.AioRedisFilter'
37
-
38
- # ============================== 默认去重管道 ==============================
39
- DEFAULT_DEDUP_PIPELINE = 'crawlo.pipelines.redis_dedup_pipeline.RedisDedupPipeline'
40
-
41
- # ============================== 爬虫模块配置 ==============================
42
- SPIDER_MODULES = ['test_project.spiders']
43
-
44
- # ============================== 中间件 ==============================
45
- # MIDDLEWARES = [
46
- # 'crawlo.middleware.simple_proxy.SimpleProxyMiddleware',
47
- # ]
48
-
49
- # ============================== 默认请求头配置 ==============================
50
- # 为DefaultHeaderMiddleware配置默认请求头
51
- DEFAULT_REQUEST_HEADERS = {
52
- 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
53
- 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
54
- 'Accept-Encoding': 'gzip, deflate, br',
55
- }
56
-
57
- # ============================== 允许的域名 ==============================
58
- # 为OffsiteMiddleware配置允许的域名
59
- # ALLOWED_DOMAINS = ['example.com']
60
-
61
- # ============================== 数据管道 ==============================
62
- # PIPELINES = [
63
- # 'crawlo.pipelines.mysql_pipeline.AsyncmyMySQLPipeline', # MySQL 存储(使用asyncmy异步库)
64
- # ]
65
-
66
- # ============================== 扩展组件 ==============================
67
- # EXTENSIONS = [
68
- # 'crawlo.extension.log_interval.LogIntervalExtension',
69
- # 'crawlo.extension.log_stats.LogStats',
70
- # 'crawlo.extension.logging_extension.CustomLoggerExtension',
71
- # ]
72
-
73
- # ============================== 日志配置 ==============================
74
- LOG_LEVEL = 'INFO'
75
- LOG_FILE = 'logs/test_project.log'
76
- LOG_ENCODING = 'utf-8' # 明确指定日志文件编码
77
- STATS_DUMP = True
78
-
79
- # ============================== 输出配置 ==============================
80
- OUTPUT_DIR = 'output'
81
-
82
- # ============================== Redis配置 ==============================
83
- REDIS_HOST = os.getenv('REDIS_HOST', '127.0.0.1')
84
- REDIS_PORT = int(os.getenv('REDIS_PORT', 6379))
85
- REDIS_PASSWORD = os.getenv('REDIS_PASSWORD', '')
86
- REDIS_DB = int(os.getenv('REDIS_DB', 0))
87
-
88
- # 根据是否有密码生成 URL
89
- if REDIS_PASSWORD:
90
- REDIS_URL = f'redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}'
91
- else:
92
- REDIS_URL = f'redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}'
93
-
94
- # ============================== MySQL配置 ==============================
95
- MYSQL_HOST = os.getenv('MYSQL_HOST', '127.0.0.1')
96
- MYSQL_PORT = int(os.getenv('MYSQL_PORT', 3306))
97
- MYSQL_USER = os.getenv('MYSQL_USER', 'root')
98
- MYSQL_PASSWORD = os.getenv('MYSQL_PASSWORD', '123456')
99
- MYSQL_DB = os.getenv('MYSQL_DB', 'test_project')
100
- MYSQL_TABLE = 'test_project_data'
101
- MYSQL_BATCH_SIZE = 100
102
- MYSQL_USE_BATCH = True # 是否启用批量插入
103
-
104
- # ============================== MongoDB配置 ==============================
105
- MONGO_URI = os.getenv('MONGO_URI', 'mongodb://localhost:27017')
106
- MONGO_DATABASE = 'test_project_db'
107
- MONGO_COLLECTION = 'test_project_items'
108
- MONGO_MAX_POOL_SIZE = 200
109
- MONGO_MIN_POOL_SIZE = 20
110
- MONGO_BATCH_SIZE = 100 # 批量插入条数
111
- MONGO_USE_BATCH = True # 是否启用批量插入
112
-
113
- # ============================== 代理配置 ==============================
114
- # 代理功能默认不启用,如需使用请在项目配置文件中启用并配置相关参数
115
- PROXY_ENABLED = False # 是否启用代理
116
-
117
- # 简化版代理配置(适用于SimpleProxyMiddleware)
118
- PROXY_LIST = [] # 代理列表,例如: ["http://proxy1:8080", "http://proxy2:8080"]
119
-
120
- # 高级代理配置(适用于ProxyMiddleware)
121
- PROXY_API_URL = "" # 代理获取接口(请替换为真实地址)
122
-
123
- # 代理提取方式(支持字段路径或函数)
124
- # 示例: "proxy" 适用于 {"proxy": "http://1.1.1.1:8080"}
125
- # 示例: "data.proxy" 适用于 {"data": {"proxy": "http://1.1.1.1:8080"}}
126
- PROXY_EXTRACTOR = "proxy"
127
-
128
- # 代理刷新控制
129
- PROXY_REFRESH_INTERVAL = 60 # 代理刷新间隔(秒)
130
- PROXY_API_TIMEOUT = 10 # 请求代理 API 超时时间
131
-
132
- # ============================== Curl-Cffi 特有配置 ==============================
133
- # 浏览器指纹模拟(仅 CurlCffi 下载器有效)
134
- CURL_BROWSER_TYPE = "chrome" # 可选: chrome, edge, safari, firefox 或版本如 chrome136
135
-
136
- # 自定义浏览器版本映射(可覆盖默认行为)
137
- CURL_BROWSER_VERSION_MAP = {
138
- "chrome": "chrome136",
139
- "edge": "edge101",
140
- "safari": "safari184",
141
- "firefox": "firefox135",
142
- }
143
-
144
- # ============================== 下载器优化配置 ==============================
145
- # 下载器健康检查
146
- DOWNLOADER_HEALTH_CHECK = True # 是否启用下载器健康检查
147
- HEALTH_CHECK_INTERVAL = 60 # 健康检查间隔(秒)
148
-
149
- # 请求统计配置
150
- REQUEST_STATS_ENABLED = True # 是否启用请求统计
151
- STATS_RESET_ON_START = False # 启动时是否重置统计
152
-
153
- # HttpX 下载器专用配置
154
- HTTPX_HTTP2 = True # 是否启用HTTP/2支持
155
- HTTPX_FOLLOW_REDIRECTS = True # 是否自动跟随重定向
156
-
157
- # AioHttp 下载器专用配置
158
- AIOHTTP_AUTO_DECOMPRESS = True # 是否自动解压响应
159
- AIOHTTP_FORCE_CLOSE = False # 是否强制关闭连接
160
-
161
- # 通用优化配置
162
- CONNECTION_TTL_DNS_CACHE = 300 # DNS缓存TTL(秒)
163
- CONNECTION_KEEPALIVE_TIMEOUT = 15 # Keep-Alive超时(秒)
164
-
165
- # ============================== 内存监控配置 ==============================
166
- # 内存监控扩展默认不启用,如需使用请在项目配置文件中启用
167
- MEMORY_MONITOR_ENABLED = False # 是否启用内存监控
168
- MEMORY_MONITOR_INTERVAL = 60 # 内存监控检查间隔(秒)
169
- MEMORY_WARNING_THRESHOLD = 80.0 # 内存使用率警告阈值(百分比)
1
+ # -*- coding: UTF-8 -*-
2
+ """
3
+ test_project 项目配置文件(分布式版)
4
+ =============================
5
+ 基于 Crawlo 框架的分布式爬虫项目配置。
6
+ 适合大规模数据采集和多节点部署。
7
+ """
8
+
9
+ import os
10
+
11
+ # ============================== 项目基本信息 ==============================
12
+ PROJECT_NAME = 'test_project'
13
+
14
+ # ============================== 运行模式 ==============================
15
+ RUN_MODE = 'distributed'
16
+
17
+ # ============================== 并发配置 ==============================
18
+ CONCURRENCY = 16
19
+ MAX_RUNNING_SPIDERS = 5
20
+ DOWNLOAD_DELAY = 1.0
21
+
22
+ # ============================== 下载器配置 ==============================
23
+ # 可选下载器:
24
+ # DOWNLOADER = 'crawlo.downloader.aiohttp_downloader.AioHttpDownloader'
25
+ # DOWNLOADER = 'crawlo.downloader.httpx_downloader.HttpXDownloader'
26
+ # DOWNLOADER = 'crawlo.downloader.cffi_downloader.CurlCffiDownloader'
27
+ DOWNLOADER = 'crawlo.downloader.aiohttp_downloader.AioHttpDownloader'
28
+
29
+ # ============================== 队列配置 ==============================
30
+ QUEUE_TYPE = 'redis'
31
+ # 当使用Redis队列时,可自定义队列名称
32
+ # 队列名称遵循统一命名规范: crawlo:{PROJECT_NAME}:queue:requests
33
+ # SCHEDULER_QUEUE_NAME = f'crawlo:{PROJECT_NAME}:queue:requests'
34
+
35
+ # ============================== 去重过滤器 ==============================
36
+ FILTER_CLASS = 'crawlo.filters.aioredis_filter.AioRedisFilter'
37
+
38
+ # ============================== 默认去重管道 ==============================
39
+ DEFAULT_DEDUP_PIPELINE = 'crawlo.pipelines.redis_dedup_pipeline.RedisDedupPipeline'
40
+
41
+ # ============================== 爬虫模块配置 ==============================
42
+ SPIDER_MODULES = ['test_project.spiders']
43
+
44
+ # ============================== 中间件 ==============================
45
+ # MIDDLEWARES = [
46
+ # 'crawlo.middleware.simple_proxy.SimpleProxyMiddleware',
47
+ # ]
48
+
49
+ # ============================== 默认请求头配置 ==============================
50
+ # 为DefaultHeaderMiddleware配置默认请求头
51
+ DEFAULT_REQUEST_HEADERS = {
52
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
53
+ 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
54
+ 'Accept-Encoding': 'gzip, deflate, br',
55
+ }
56
+
57
+ # ============================== 允许的域名 ==============================
58
+ # 为OffsiteMiddleware配置允许的域名
59
+ # ALLOWED_DOMAINS = ['example.com']
60
+
61
+ # ============================== 数据管道 ==============================
62
+ # PIPELINES = [
63
+ # 'crawlo.pipelines.mysql_pipeline.AsyncmyMySQLPipeline', # MySQL 存储(使用asyncmy异步库)
64
+ # ]
65
+
66
+ # ============================== 扩展组件 ==============================
67
+ # EXTENSIONS = [
68
+ # 'crawlo.extension.log_interval.LogIntervalExtension',
69
+ # 'crawlo.extension.log_stats.LogStats',
70
+ # 'crawlo.extension.logging_extension.CustomLoggerExtension',
71
+ # ]
72
+
73
+ # ============================== 日志配置 ==============================
74
+ LOG_LEVEL = 'INFO'
75
+ LOG_FILE = 'logs/test_project.log'
76
+ LOG_ENCODING = 'utf-8' # 明确指定日志文件编码
77
+ STATS_DUMP = True
78
+
79
+ # ============================== 输出配置 ==============================
80
+ OUTPUT_DIR = 'output'
81
+
82
+ # ============================== Redis配置 ==============================
83
+ REDIS_HOST = os.getenv('REDIS_HOST', '127.0.0.1')
84
+ REDIS_PORT = int(os.getenv('REDIS_PORT', 6379))
85
+ REDIS_PASSWORD = os.getenv('REDIS_PASSWORD', '')
86
+ REDIS_DB = int(os.getenv('REDIS_DB', 0))
87
+
88
+ # 根据是否有密码生成 URL
89
+ if REDIS_PASSWORD:
90
+ REDIS_URL = f'redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}'
91
+ else:
92
+ REDIS_URL = f'redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}'
93
+
94
+ # ============================== MySQL配置 ==============================
95
+ MYSQL_HOST = os.getenv('MYSQL_HOST', '127.0.0.1')
96
+ MYSQL_PORT = int(os.getenv('MYSQL_PORT', 3306))
97
+ MYSQL_USER = os.getenv('MYSQL_USER', 'root')
98
+ MYSQL_PASSWORD = os.getenv('MYSQL_PASSWORD', '123456')
99
+ MYSQL_DB = os.getenv('MYSQL_DB', 'test_project')
100
+ MYSQL_TABLE = 'test_project_data'
101
+ MYSQL_BATCH_SIZE = 100
102
+ MYSQL_USE_BATCH = True # 是否启用批量插入
103
+
104
+ # ============================== MongoDB配置 ==============================
105
+ MONGO_URI = os.getenv('MONGO_URI', 'mongodb://localhost:27017')
106
+ MONGO_DATABASE = 'test_project_db'
107
+ MONGO_COLLECTION = 'test_project_items'
108
+ MONGO_MAX_POOL_SIZE = 200
109
+ MONGO_MIN_POOL_SIZE = 20
110
+ MONGO_BATCH_SIZE = 100 # 批量插入条数
111
+ MONGO_USE_BATCH = True # 是否启用批量插入
112
+
113
+ # ============================== 代理配置 ==============================
114
+ # 代理功能默认不启用,如需使用请在项目配置文件中启用并配置相关参数
115
+ PROXY_ENABLED = False # 是否启用代理
116
+
117
+ # 简化版代理配置(适用于SimpleProxyMiddleware)
118
+ PROXY_LIST = [] # 代理列表,例如: ["http://proxy1:8080", "http://proxy2:8080"]
119
+
120
+ # 高级代理配置(适用于ProxyMiddleware)
121
+ PROXY_API_URL = "" # 代理获取接口(请替换为真实地址)
122
+
123
+ # 代理提取方式(支持字段路径或函数)
124
+ # 示例: "proxy" 适用于 {"proxy": "http://1.1.1.1:8080"}
125
+ # 示例: "data.proxy" 适用于 {"data": {"proxy": "http://1.1.1.1:8080"}}
126
+ PROXY_EXTRACTOR = "proxy"
127
+
128
+ # 代理刷新控制
129
+ PROXY_REFRESH_INTERVAL = 60 # 代理刷新间隔(秒)
130
+ PROXY_API_TIMEOUT = 10 # 请求代理 API 超时时间
131
+
132
+ # ============================== Curl-Cffi 特有配置 ==============================
133
+ # 浏览器指纹模拟(仅 CurlCffi 下载器有效)
134
+ CURL_BROWSER_TYPE = "chrome" # 可选: chrome, edge, safari, firefox 或版本如 chrome136
135
+
136
+ # 自定义浏览器版本映射(可覆盖默认行为)
137
+ CURL_BROWSER_VERSION_MAP = {
138
+ "chrome": "chrome136",
139
+ "edge": "edge101",
140
+ "safari": "safari184",
141
+ "firefox": "firefox135",
142
+ }
143
+
144
+ # ============================== 下载器优化配置 ==============================
145
+ # 下载器健康检查
146
+ DOWNLOADER_HEALTH_CHECK = True # 是否启用下载器健康检查
147
+ HEALTH_CHECK_INTERVAL = 60 # 健康检查间隔(秒)
148
+
149
+ # 请求统计配置
150
+ REQUEST_STATS_ENABLED = True # 是否启用请求统计
151
+ STATS_RESET_ON_START = False # 启动时是否重置统计
152
+
153
+ # HttpX 下载器专用配置
154
+ HTTPX_HTTP2 = True # 是否启用HTTP/2支持
155
+ HTTPX_FOLLOW_REDIRECTS = True # 是否自动跟随重定向
156
+
157
+ # AioHttp 下载器专用配置
158
+ AIOHTTP_AUTO_DECOMPRESS = True # 是否自动解压响应
159
+ AIOHTTP_FORCE_CLOSE = False # 是否强制关闭连接
160
+
161
+ # 通用优化配置
162
+ CONNECTION_TTL_DNS_CACHE = 300 # DNS缓存TTL(秒)
163
+ CONNECTION_KEEPALIVE_TIMEOUT = 15 # Keep-Alive超时(秒)
164
+
165
+ # ============================== 内存监控配置 ==============================
166
+ # 内存监控扩展默认不启用,如需使用请在项目配置文件中启用
167
+ MEMORY_MONITOR_ENABLED = False # 是否启用内存监控
168
+ MEMORY_MONITOR_INTERVAL = 60 # 内存监控检查间隔(秒)
169
+ MEMORY_WARNING_THRESHOLD = 80.0 # 内存使用率警告阈值(百分比)
170
170
  MEMORY_CRITICAL_THRESHOLD = 90.0 # 内存使用率严重阈值(百分比)
@@ -1,10 +1,10 @@
1
- # -*- coding: UTF-8 -*-
2
- """
3
- test_project.spiders
4
- ========================
5
- 存放所有的爬虫。
6
- """
7
-
8
- # 自动导入所有爬虫以确保它们被注册
9
- # 示例:
1
+ # -*- coding: UTF-8 -*-
2
+ """
3
+ test_project.spiders
4
+ ========================
5
+ 存放所有的爬虫。
6
+ """
7
+
8
+ # 自动导入所有爬虫以确保它们被注册
9
+ # 示例:
10
10
  # from .YourSpider import YourSpider
@@ -1,144 +1,144 @@
1
- # -*- coding: UTF-8 -*-
2
- """
3
- test_project.spiders.of_week_dis
4
- =======================================
5
- 由 `crawlo genspider` 命令生成的爬虫。
6
- 基于 Crawlo 框架,支持异步并发、分布式爬取等功能。
7
-
8
- 使用示例:
9
- crawlo crawl of_week_dis
10
- """
11
-
12
- from crawlo.spider import Spider
13
- from crawlo import Request
14
- from ..items import ExampleItem
15
-
16
-
17
- class OfweekdisSpider(Spider):
18
- """
19
- 爬虫:of_week_dis
20
-
21
- 功能说明:
22
- - 支持并发爬取
23
- - 自动去重过滤
24
- - 错误重试机制
25
- - 数据管道处理
26
- """
27
- name = 'of_week_dis'
28
- allowed_domains = ['ee.ofweek.com']
29
- start_urls = ['https://ee.ofweek.com/']
30
-
31
- # 高级配置(可选)
32
- # custom_settings = {
33
- # 'DOWNLOAD_DELAY': 2.0,
34
- # 'CONCURRENCY': 4,
35
- # 'RETRY_HTTP_CODES': [500, 502, 503, 504, 408, 429],
36
- # 'ALLOWED_RESPONSE_CODES': [200, 301, 302], # 只允许特定状态码
37
- # 'DENIED_RESPONSE_CODES': [403, 404], # 拒绝特定状态码
38
- # }
39
-
40
- def start_requests(self):
41
- """
42
- 生成初始请求。
43
-
44
- 支持自定义请求头、代理、优先级等。
45
- """
46
- headers = {
47
- 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
48
- 'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
49
- }
50
-
51
- for url in self.start_urls:
52
- yield Request(
53
- url=url,
54
- callback=self.parse,
55
- headers=headers,
56
- # meta={'proxy': 'http://proxy.example.com:8080'}, # 自定义代理
57
- # priority=10, # 请求优先级(数字越大优先级越高)
58
- )
59
-
60
- def parse(self, response):
61
- """
62
- 解析响应的主方法。
63
-
64
- Args:
65
- response: 响应对象,包含页面内容和元数据
66
-
67
- Yields:
68
- Request: 新的请求对象(用于深度爬取)
69
- Item: 数据项对象(用于数据存储)
70
- """
71
- self.logger.info(f'正在解析页面: {response.url}')
72
-
73
- # ================== 数据提取示例 ==================
74
-
75
- # 提取数据并创建 Item
76
- # item = Item()
77
- # item['title'] = response.xpath('//title/text()').get(default='')
78
- # item['url'] = response.url
79
- # item['content'] = response.xpath('//div[@class="content"]//text()').getall()
80
- # yield item
81
-
82
- # 直接返回字典(简单数据)
83
- yield {
84
- 'title': response.xpath('//title/text()').get(default=''),
85
- 'url': response.url,
86
- 'status_code': response.status_code,
87
- # 'description': response.xpath('//meta[@name="description"]/@content').get(),
88
- # 'keywords': response.xpath('//meta[@name="keywords"]/@content').get(),
89
- }
90
-
91
- # ================== 链接提取示例 ==================
92
-
93
- # 提取并跟进链接
94
- # links = response.xpath('//a/@href').getall()
95
- # for link in links:
96
- # # 过滤有效链接
97
- # if link and not link.startswith(('javascript:', 'mailto:', '#')):
98
- # yield response.follow(
99
- # link,
100
- # callback=self.parse_detail, # 或者 self.parse 继续递归
101
- # meta={'parent_url': response.url} # 传递父页面信息
102
- # )
103
-
104
- # 用 CSS 选择器提取链接
105
- # for link in response.css('a.item-link::attr(href)').getall():
106
- # yield response.follow(link, callback=self.parse_detail)
107
-
108
- # ================== 分页处理示例 ==================
109
-
110
- # 处理分页
111
- # next_page = response.xpath('//a[@class="next"]/@href').get()
112
- # if next_page:
113
- # yield response.follow(next_page, callback=self.parse)
114
-
115
- # 数字分页
116
- # current_page = int(response.meta.get('page', 1))
117
- # max_pages = 100 # 设置最大页数
118
- # if current_page < max_pages:
119
- # next_url = f'https://ee.ofweek.com/page/{current_page + 1}'
120
- # yield Request(
121
- # url=next_url,
122
- # callback=self.parse,
123
- # meta={'page': current_page + 1}
124
- # )
125
-
126
- def parse_detail(self, response):
127
- """
128
- 解析详情页面的方法(可选)。
129
-
130
- 用于处理从列表页跳转而来的详情页。
131
- """
132
- self.logger.info(f'正在解析详情页: {response.url}')
133
-
134
- # parent_url = response.meta.get('parent_url', '')
135
- #
136
- # yield {
137
- # 'title': response.xpath('//h1/text()').get(default=''),
138
- # 'content': '\n'.join(response.xpath('//div[@class="content"]//text()').getall()),
139
- # 'url': response.url,
140
- # 'parent_url': parent_url,
141
- # 'publish_time': response.xpath('//time/@datetime').get(),
142
- # }
143
-
1
+ # -*- coding: UTF-8 -*-
2
+ """
3
+ test_project.spiders.of_week_dis
4
+ =======================================
5
+ 由 `crawlo genspider` 命令生成的爬虫。
6
+ 基于 Crawlo 框架,支持异步并发、分布式爬取等功能。
7
+
8
+ 使用示例:
9
+ crawlo crawl of_week_dis
10
+ """
11
+
12
+ from crawlo.spider import Spider
13
+ from crawlo import Request
14
+ from ..items import ExampleItem
15
+
16
+
17
+ class OfweekdisSpider(Spider):
18
+ """
19
+ 爬虫:of_week_dis
20
+
21
+ 功能说明:
22
+ - 支持并发爬取
23
+ - 自动去重过滤
24
+ - 错误重试机制
25
+ - 数据管道处理
26
+ """
27
+ name = 'of_week_dis'
28
+ allowed_domains = ['ee.ofweek.com']
29
+ start_urls = ['https://ee.ofweek.com/']
30
+
31
+ # 高级配置(可选)
32
+ # custom_settings = {
33
+ # 'DOWNLOAD_DELAY': 2.0,
34
+ # 'CONCURRENCY': 4,
35
+ # 'RETRY_HTTP_CODES': [500, 502, 503, 504, 408, 429],
36
+ # 'ALLOWED_RESPONSE_CODES': [200, 301, 302], # 只允许特定状态码
37
+ # 'DENIED_RESPONSE_CODES': [403, 404], # 拒绝特定状态码
38
+ # }
39
+
40
+ def start_requests(self):
41
+ """
42
+ 生成初始请求。
43
+
44
+ 支持自定义请求头、代理、优先级等。
45
+ """
46
+ headers = {
47
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
48
+ 'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
49
+ }
50
+
51
+ for url in self.start_urls:
52
+ yield Request(
53
+ url=url,
54
+ callback=self.parse,
55
+ headers=headers,
56
+ # meta={'proxy': 'http://proxy.example.com:8080'}, # 自定义代理
57
+ # priority=10, # 请求优先级(数字越大优先级越高)
58
+ )
59
+
60
+ def parse(self, response):
61
+ """
62
+ 解析响应的主方法。
63
+
64
+ Args:
65
+ response: 响应对象,包含页面内容和元数据
66
+
67
+ Yields:
68
+ Request: 新的请求对象(用于深度爬取)
69
+ Item: 数据项对象(用于数据存储)
70
+ """
71
+ self.logger.info(f'正在解析页面: {response.url}')
72
+
73
+ # ================== 数据提取示例 ==================
74
+
75
+ # 提取数据并创建 Item
76
+ # item = Item()
77
+ # item['title'] = response.xpath('//title/text()').get(default='')
78
+ # item['url'] = response.url
79
+ # item['content'] = response.xpath('//div[@class="content"]//text()').getall()
80
+ # yield item
81
+
82
+ # 直接返回字典(简单数据)
83
+ yield {
84
+ 'title': response.xpath('//title/text()').get(default=''),
85
+ 'url': response.url,
86
+ 'status_code': response.status_code,
87
+ # 'description': response.xpath('//meta[@name="description"]/@content').get(),
88
+ # 'keywords': response.xpath('//meta[@name="keywords"]/@content').get(),
89
+ }
90
+
91
+ # ================== 链接提取示例 ==================
92
+
93
+ # 提取并跟进链接
94
+ # links = response.xpath('//a/@href').getall()
95
+ # for link in links:
96
+ # # 过滤有效链接
97
+ # if link and not link.startswith(('javascript:', 'mailto:', '#')):
98
+ # yield response.follow(
99
+ # link,
100
+ # callback=self.parse_detail, # 或者 self.parse 继续递归
101
+ # meta={'parent_url': response.url} # 传递父页面信息
102
+ # )
103
+
104
+ # 用 CSS 选择器提取链接
105
+ # for link in response.css('a.item-link::attr(href)').getall():
106
+ # yield response.follow(link, callback=self.parse_detail)
107
+
108
+ # ================== 分页处理示例 ==================
109
+
110
+ # 处理分页
111
+ # next_page = response.xpath('//a[@class="next"]/@href').get()
112
+ # if next_page:
113
+ # yield response.follow(next_page, callback=self.parse)
114
+
115
+ # 数字分页
116
+ # current_page = int(response.meta.get('page', 1))
117
+ # max_pages = 100 # 设置最大页数
118
+ # if current_page < max_pages:
119
+ # next_url = f'https://ee.ofweek.com/page/{current_page + 1}'
120
+ # yield Request(
121
+ # url=next_url,
122
+ # callback=self.parse,
123
+ # meta={'page': current_page + 1}
124
+ # )
125
+
126
+ def parse_detail(self, response):
127
+ """
128
+ 解析详情页面的方法(可选)。
129
+
130
+ 用于处理从列表页跳转而来的详情页。
131
+ """
132
+ self.logger.info(f'正在解析详情页: {response.url}')
133
+
134
+ # parent_url = response.meta.get('parent_url', '')
135
+ #
136
+ # yield {
137
+ # 'title': response.xpath('//h1/text()').get(default=''),
138
+ # 'content': '\n'.join(response.xpath('//div[@class="content"]//text()').getall()),
139
+ # 'url': response.url,
140
+ # 'parent_url': parent_url,
141
+ # 'publish_time': response.xpath('//time/@datetime').get(),
142
+ # }
143
+
144
144
  pass