crawlo 1.3.3__py3-none-any.whl → 1.3.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (289) hide show
  1. crawlo/__init__.py +87 -63
  2. crawlo/__version__.py +1 -1
  3. crawlo/cli.py +75 -75
  4. crawlo/commands/__init__.py +14 -14
  5. crawlo/commands/check.py +594 -594
  6. crawlo/commands/genspider.py +151 -151
  7. crawlo/commands/help.py +138 -138
  8. crawlo/commands/list.py +155 -155
  9. crawlo/commands/run.py +341 -323
  10. crawlo/commands/startproject.py +436 -436
  11. crawlo/commands/stats.py +187 -187
  12. crawlo/commands/utils.py +196 -196
  13. crawlo/config.py +312 -312
  14. crawlo/config_validator.py +277 -277
  15. crawlo/core/__init__.py +46 -2
  16. crawlo/core/engine.py +439 -365
  17. crawlo/core/processor.py +40 -40
  18. crawlo/core/scheduler.py +257 -256
  19. crawlo/crawler.py +639 -1167
  20. crawlo/data/__init__.py +5 -5
  21. crawlo/data/user_agents.py +194 -194
  22. crawlo/downloader/__init__.py +273 -273
  23. crawlo/downloader/aiohttp_downloader.py +228 -226
  24. crawlo/downloader/cffi_downloader.py +245 -245
  25. crawlo/downloader/httpx_downloader.py +259 -259
  26. crawlo/downloader/hybrid_downloader.py +212 -212
  27. crawlo/downloader/playwright_downloader.py +402 -402
  28. crawlo/downloader/selenium_downloader.py +472 -472
  29. crawlo/event.py +11 -11
  30. crawlo/exceptions.py +81 -81
  31. crawlo/extension/__init__.py +39 -39
  32. crawlo/extension/health_check.py +141 -141
  33. crawlo/extension/log_interval.py +57 -57
  34. crawlo/extension/log_stats.py +81 -81
  35. crawlo/extension/logging_extension.py +61 -52
  36. crawlo/extension/memory_monitor.py +104 -104
  37. crawlo/extension/performance_profiler.py +133 -133
  38. crawlo/extension/request_recorder.py +107 -107
  39. crawlo/factories/__init__.py +28 -0
  40. crawlo/factories/base.py +69 -0
  41. crawlo/factories/crawler.py +104 -0
  42. crawlo/factories/registry.py +85 -0
  43. crawlo/filters/__init__.py +154 -154
  44. crawlo/filters/aioredis_filter.py +257 -234
  45. crawlo/filters/memory_filter.py +269 -269
  46. crawlo/framework.py +292 -0
  47. crawlo/initialization/__init__.py +40 -0
  48. crawlo/initialization/built_in.py +426 -0
  49. crawlo/initialization/context.py +142 -0
  50. crawlo/initialization/core.py +194 -0
  51. crawlo/initialization/phases.py +149 -0
  52. crawlo/initialization/registry.py +146 -0
  53. crawlo/items/__init__.py +23 -23
  54. crawlo/items/base.py +23 -22
  55. crawlo/items/fields.py +52 -52
  56. crawlo/items/items.py +104 -104
  57. crawlo/logging/__init__.py +38 -0
  58. crawlo/logging/config.py +97 -0
  59. crawlo/logging/factory.py +129 -0
  60. crawlo/logging/manager.py +112 -0
  61. crawlo/middleware/__init__.py +21 -21
  62. crawlo/middleware/default_header.py +132 -132
  63. crawlo/middleware/download_delay.py +104 -104
  64. crawlo/middleware/middleware_manager.py +135 -135
  65. crawlo/middleware/offsite.py +123 -123
  66. crawlo/middleware/proxy.py +386 -386
  67. crawlo/middleware/request_ignore.py +86 -86
  68. crawlo/middleware/response_code.py +163 -163
  69. crawlo/middleware/response_filter.py +136 -136
  70. crawlo/middleware/retry.py +124 -124
  71. crawlo/middleware/simple_proxy.py +65 -65
  72. crawlo/mode_manager.py +212 -187
  73. crawlo/network/__init__.py +21 -21
  74. crawlo/network/request.py +379 -379
  75. crawlo/network/response.py +359 -359
  76. crawlo/pipelines/__init__.py +21 -21
  77. crawlo/pipelines/bloom_dedup_pipeline.py +156 -156
  78. crawlo/pipelines/console_pipeline.py +39 -39
  79. crawlo/pipelines/csv_pipeline.py +316 -316
  80. crawlo/pipelines/database_dedup_pipeline.py +222 -222
  81. crawlo/pipelines/json_pipeline.py +218 -218
  82. crawlo/pipelines/memory_dedup_pipeline.py +115 -115
  83. crawlo/pipelines/mongo_pipeline.py +131 -131
  84. crawlo/pipelines/mysql_pipeline.py +325 -318
  85. crawlo/pipelines/pipeline_manager.py +76 -75
  86. crawlo/pipelines/redis_dedup_pipeline.py +166 -166
  87. crawlo/project.py +327 -325
  88. crawlo/queue/pqueue.py +43 -37
  89. crawlo/queue/queue_manager.py +503 -379
  90. crawlo/queue/redis_priority_queue.py +326 -306
  91. crawlo/settings/__init__.py +7 -7
  92. crawlo/settings/default_settings.py +321 -225
  93. crawlo/settings/setting_manager.py +214 -198
  94. crawlo/spider/__init__.py +657 -639
  95. crawlo/stats_collector.py +73 -59
  96. crawlo/subscriber.py +129 -129
  97. crawlo/task_manager.py +139 -30
  98. crawlo/templates/crawlo.cfg.tmpl +10 -10
  99. crawlo/templates/project/__init__.py.tmpl +3 -3
  100. crawlo/templates/project/items.py.tmpl +17 -17
  101. crawlo/templates/project/middlewares.py.tmpl +118 -118
  102. crawlo/templates/project/pipelines.py.tmpl +96 -96
  103. crawlo/templates/project/settings.py.tmpl +168 -267
  104. crawlo/templates/project/settings_distributed.py.tmpl +167 -180
  105. crawlo/templates/project/settings_gentle.py.tmpl +167 -61
  106. crawlo/templates/project/settings_high_performance.py.tmpl +168 -131
  107. crawlo/templates/project/settings_minimal.py.tmpl +66 -35
  108. crawlo/templates/project/settings_simple.py.tmpl +165 -102
  109. crawlo/templates/project/spiders/__init__.py.tmpl +10 -6
  110. crawlo/templates/run.py.tmpl +34 -38
  111. crawlo/templates/spider/spider.py.tmpl +143 -143
  112. crawlo/templates/spiders_init.py.tmpl +10 -0
  113. crawlo/tools/__init__.py +200 -200
  114. crawlo/tools/anti_crawler.py +268 -268
  115. crawlo/tools/authenticated_proxy.py +240 -240
  116. crawlo/tools/data_formatter.py +225 -225
  117. crawlo/tools/data_validator.py +180 -180
  118. crawlo/tools/date_tools.py +289 -289
  119. crawlo/tools/distributed_coordinator.py +388 -388
  120. crawlo/tools/encoding_converter.py +127 -127
  121. crawlo/tools/network_diagnostic.py +365 -0
  122. crawlo/tools/request_tools.py +82 -82
  123. crawlo/tools/retry_mechanism.py +224 -224
  124. crawlo/tools/scenario_adapter.py +262 -262
  125. crawlo/tools/text_cleaner.py +232 -232
  126. crawlo/utils/__init__.py +34 -34
  127. crawlo/utils/batch_processor.py +259 -259
  128. crawlo/utils/class_loader.py +26 -0
  129. crawlo/utils/controlled_spider_mixin.py +439 -439
  130. crawlo/utils/db_helper.py +343 -343
  131. crawlo/utils/enhanced_error_handler.py +356 -356
  132. crawlo/utils/env_config.py +142 -142
  133. crawlo/utils/error_handler.py +165 -124
  134. crawlo/utils/func_tools.py +82 -82
  135. crawlo/utils/large_scale_config.py +286 -286
  136. crawlo/utils/large_scale_helper.py +344 -344
  137. crawlo/utils/log.py +80 -200
  138. crawlo/utils/performance_monitor.py +285 -285
  139. crawlo/utils/queue_helper.py +175 -175
  140. crawlo/utils/redis_connection_pool.py +388 -351
  141. crawlo/utils/redis_key_validator.py +198 -198
  142. crawlo/utils/request.py +267 -267
  143. crawlo/utils/request_serializer.py +225 -218
  144. crawlo/utils/spider_loader.py +61 -61
  145. crawlo/utils/system.py +11 -11
  146. crawlo/utils/tools.py +4 -4
  147. crawlo/utils/url.py +39 -39
  148. {crawlo-1.3.3.dist-info → crawlo-1.3.5.dist-info}/METADATA +1126 -1020
  149. crawlo-1.3.5.dist-info/RECORD +288 -0
  150. examples/__init__.py +7 -7
  151. tests/__init__.py +7 -7
  152. tests/advanced_tools_example.py +275 -275
  153. tests/authenticated_proxy_example.py +107 -107
  154. tests/baidu_performance_test.py +109 -0
  155. tests/baidu_test.py +60 -0
  156. tests/cleaners_example.py +160 -160
  157. tests/comprehensive_framework_test.py +213 -0
  158. tests/comprehensive_test.py +82 -0
  159. tests/comprehensive_testing_summary.md +187 -0
  160. tests/config_validation_demo.py +142 -142
  161. tests/controlled_spider_example.py +205 -205
  162. tests/date_tools_example.py +180 -180
  163. tests/debug_configure.py +70 -0
  164. tests/debug_framework_logger.py +85 -0
  165. tests/debug_log_config.py +127 -0
  166. tests/debug_log_levels.py +64 -0
  167. tests/debug_pipelines.py +66 -66
  168. tests/detailed_log_test.py +234 -0
  169. tests/distributed_test.py +67 -0
  170. tests/distributed_test_debug.py +77 -0
  171. tests/dynamic_loading_example.py +523 -523
  172. tests/dynamic_loading_test.py +104 -104
  173. tests/env_config_example.py +133 -133
  174. tests/error_handling_example.py +171 -171
  175. tests/final_command_test_report.md +0 -0
  176. tests/final_comprehensive_test.py +152 -0
  177. tests/final_log_test.py +261 -0
  178. tests/final_validation_test.py +183 -0
  179. tests/fix_log_test.py +143 -0
  180. tests/framework_performance_test.py +203 -0
  181. tests/log_buffering_test.py +112 -0
  182. tests/log_generation_timing_test.py +154 -0
  183. tests/optimized_performance_test.py +212 -0
  184. tests/performance_comparison.py +246 -0
  185. tests/queue_blocking_test.py +114 -0
  186. tests/queue_test.py +90 -0
  187. tests/redis_key_validation_demo.py +130 -130
  188. tests/request_params_example.py +150 -150
  189. tests/response_improvements_example.py +144 -144
  190. tests/scrapy_comparison/ofweek_scrapy.py +139 -0
  191. tests/scrapy_comparison/scrapy_test.py +134 -0
  192. tests/simple_command_test.py +120 -0
  193. tests/simple_crawlo_test.py +128 -0
  194. tests/simple_log_test.py +58 -0
  195. tests/simple_log_test2.py +138 -0
  196. tests/simple_optimization_test.py +129 -0
  197. tests/simple_spider_test.py +50 -0
  198. tests/simple_test.py +48 -0
  199. tests/spider_log_timing_test.py +178 -0
  200. tests/test_advanced_tools.py +148 -148
  201. tests/test_all_commands.py +231 -0
  202. tests/test_all_redis_key_configs.py +145 -145
  203. tests/test_authenticated_proxy.py +141 -141
  204. tests/test_batch_processor.py +179 -0
  205. tests/test_cleaners.py +54 -54
  206. tests/test_component_factory.py +175 -0
  207. tests/test_comprehensive.py +146 -146
  208. tests/test_config_consistency.py +80 -80
  209. tests/test_config_merge.py +152 -152
  210. tests/test_config_validator.py +182 -182
  211. tests/test_controlled_spider_mixin.py +80 -0
  212. tests/test_crawlo_proxy_integration.py +108 -108
  213. tests/test_date_tools.py +123 -123
  214. tests/test_default_header_middleware.py +158 -158
  215. tests/test_distributed.py +65 -65
  216. tests/test_double_crawlo_fix.py +207 -207
  217. tests/test_double_crawlo_fix_simple.py +124 -124
  218. tests/test_download_delay_middleware.py +221 -221
  219. tests/test_downloader_proxy_compatibility.py +268 -268
  220. tests/test_dynamic_downloaders_proxy.py +124 -124
  221. tests/test_dynamic_proxy.py +92 -92
  222. tests/test_dynamic_proxy_config.py +146 -146
  223. tests/test_dynamic_proxy_real.py +109 -109
  224. tests/test_edge_cases.py +303 -303
  225. tests/test_enhanced_error_handler.py +270 -270
  226. tests/test_enhanced_error_handler_comprehensive.py +246 -0
  227. tests/test_env_config.py +121 -121
  228. tests/test_error_handler_compatibility.py +112 -112
  229. tests/test_factories.py +253 -0
  230. tests/test_final_validation.py +153 -153
  231. tests/test_framework_env_usage.py +103 -103
  232. tests/test_framework_logger.py +67 -0
  233. tests/test_framework_startup.py +65 -0
  234. tests/test_get_component_logger.py +84 -0
  235. tests/test_integration.py +169 -169
  236. tests/test_item_dedup_redis_key.py +122 -122
  237. tests/test_large_scale_config.py +113 -0
  238. tests/test_large_scale_helper.py +236 -0
  239. tests/test_logging_system.py +283 -0
  240. tests/test_mode_change.py +73 -0
  241. tests/test_mode_consistency.py +51 -51
  242. tests/test_offsite_middleware.py +221 -221
  243. tests/test_parsel.py +29 -29
  244. tests/test_performance.py +327 -327
  245. tests/test_performance_monitor.py +116 -0
  246. tests/test_proxy_api.py +264 -264
  247. tests/test_proxy_health_check.py +32 -32
  248. tests/test_proxy_middleware.py +121 -121
  249. tests/test_proxy_middleware_enhanced.py +216 -216
  250. tests/test_proxy_middleware_integration.py +136 -136
  251. tests/test_proxy_middleware_refactored.py +184 -184
  252. tests/test_proxy_providers.py +56 -56
  253. tests/test_proxy_stats.py +19 -19
  254. tests/test_proxy_strategies.py +59 -59
  255. tests/test_queue_empty_check.py +42 -0
  256. tests/test_queue_manager_double_crawlo.py +173 -173
  257. tests/test_queue_manager_redis_key.py +176 -176
  258. tests/test_random_user_agent.py +72 -72
  259. tests/test_real_scenario_proxy.py +195 -195
  260. tests/test_redis_config.py +28 -28
  261. tests/test_redis_connection_pool.py +294 -294
  262. tests/test_redis_key_naming.py +181 -181
  263. tests/test_redis_key_validator.py +123 -123
  264. tests/test_redis_queue.py +224 -224
  265. tests/test_request_ignore_middleware.py +182 -182
  266. tests/test_request_params.py +111 -111
  267. tests/test_request_serialization.py +70 -70
  268. tests/test_response_code_middleware.py +349 -349
  269. tests/test_response_filter_middleware.py +427 -427
  270. tests/test_response_improvements.py +152 -152
  271. tests/test_retry_middleware.py +241 -241
  272. tests/test_scheduler.py +252 -252
  273. tests/test_scheduler_config_update.py +133 -133
  274. tests/test_simple_response.py +61 -61
  275. tests/test_telecom_spider_redis_key.py +205 -205
  276. tests/test_template_content.py +87 -87
  277. tests/test_template_redis_key.py +134 -134
  278. tests/test_tools.py +159 -159
  279. tests/test_user_agents.py +96 -96
  280. tests/tools_example.py +260 -260
  281. tests/untested_features_report.md +139 -0
  282. tests/verify_debug.py +52 -0
  283. tests/verify_distributed.py +117 -117
  284. tests/verify_log_fix.py +112 -0
  285. crawlo-1.3.3.dist-info/RECORD +0 -219
  286. tests/DOUBLE_CRAWLO_PREFIX_FIX_REPORT.md +0 -82
  287. {crawlo-1.3.3.dist-info → crawlo-1.3.5.dist-info}/WHEEL +0 -0
  288. {crawlo-1.3.3.dist-info → crawlo-1.3.5.dist-info}/entry_points.txt +0 -0
  289. {crawlo-1.3.3.dist-info → crawlo-1.3.5.dist-info}/top_level.txt +0 -0
@@ -1,102 +1,165 @@
1
- # -*- coding: UTF-8 -*-
2
- """
3
- 简化模式配置模板
4
- 最小配置,适合快速开始和简单项目
5
- """
6
-
7
- # ============================== 项目基本信息 ==============================
8
- PROJECT_NAME = '{{project_name}}'
9
-
10
- # ============================== 简化运行模式 ==============================
11
- # 运行模式:'standalone'(单机), 'distributed'(分布式), 'auto'(自动检测)
12
- RUN_MODE = 'standalone' # 单机模式 - 适用于开发和小规模数据采集
13
-
14
- # 并发配置
15
- CONCURRENCY = 4 # 低并发数以减少资源占用
16
- DOWNLOAD_DELAY = 1.0 # 增加延迟以降低目标网站压力
17
-
18
- # ============================== 队列配置 ==============================
19
-
20
- # 注意:框架已提供默认的队列配置,以下配置项通常无需修改
21
- # 如需自定义,请取消注释并修改相应值
22
-
23
- # 队列类型:'auto'(自动选择), 'memory'(内存队列), 'redis'(分布式队列)
24
- # QUEUE_TYPE = 'auto' # 自动检测,如果Redis可用则使用Redis队列
25
- # SCHEDULER_MAX_QUEUE_SIZE = 1000
26
- # SCHEDULER_QUEUE_NAME = f'crawlo:{{project_name}}:queue:requests'
27
- # QUEUE_MAX_RETRIES = 3
28
- # QUEUE_TIMEOUT = 300
29
-
30
- # ============================== 去重过滤配置 ==============================
31
-
32
- # 注意:框架已提供默认的去重配置,以下配置项通常无需修改
33
- # 如需自定义,请取消注释并修改相应值
34
-
35
- # 简化模式下使用内存去重管道和过滤器
36
- # DEFAULT_DEDUP_PIPELINE = 'crawlo.pipelines.memory_dedup_pipeline.MemoryDedupPipeline'
37
- # FILTER_CLASS = 'crawlo.filters.memory_filter.MemoryFilter'
38
-
39
- # --- Redis 配置(用于分布式去重和队列) ---
40
- # REDIS_HOST = '127.0.0.1'
41
- # REDIS_PORT = 6379
42
- # REDIS_PASSWORD = '' # 如果有密码,请填写
43
-
44
- # 根据是否有密码生成 URL
45
- # if REDIS_PASSWORD:
46
- # REDIS_URL = f'redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/0'
47
- # else:
48
- # REDIS_URL = f'redis://{REDIS_HOST}:{REDIS_PORT}/0'
49
-
50
- # Redis key配置已移至各组件中,使用统一的命名规范
51
- # crawlo:{project_name}:filter:fingerprint (请求去重)
52
- # crawlo:{project_name}:item:fingerprint (数据项去重)
53
- # crawlo:{project_name}:queue:requests (请求队列)
54
- # crawlo:{project_name}:queue:processing (处理中队列)
55
- # crawlo:{project_name}:queue:failed (失败队列)
56
-
57
- # REDIS_TTL = 0
58
- # CLEANUP_FP = 0
59
- # FILTER_DEBUG = True
60
- # DECODE_RESPONSES = True
61
-
62
- # ============================== 域名过滤配置 ==============================
63
- # OffsiteMiddleware 配置,用于限制爬虫只爬取指定域名的页面
64
- # 如需启用域名过滤功能,请取消注释并配置允许的域名列表
65
- # ALLOWED_DOMAINS = ['example.com', 'www.example.com']
66
-
67
- # ============================== 用户自定义中间件配置 ==============================
68
- # 注意:框架默认中间件已自动加载,此处可添加或覆盖默认中间件
69
-
70
- # 中间件列表(框架默认中间件 + 用户自定义中间件)
71
- # MIDDLEWARES = [
72
- # '{{project_name}}.middlewares.CustomMiddleware', # 示例自定义中间件
73
- # ]
74
-
75
- # ============================== 用户自定义数据管道配置 ==============================
76
- # 注意:框架默认管道已自动加载,此处可添加或覆盖默认管道
77
-
78
- # 数据处理管道列表(框架默认管道 + 用户自定义管道)
79
- # PIPELINES = [
80
- # '{{project_name}}.pipelines.DatabasePipeline', # 自定义数据库管道
81
- # 'crawlo.pipelines.mysql_pipeline.AsyncmyMySQLPipeline', # MySQL 存储
82
- # 'crawlo.pipelines.mongo_pipeline.MongoPipeline', # MongoDB 存储
83
- # ]
84
-
85
- # ============================== 用户自定义扩展组件 ==============================
86
- # 注意:框架默认扩展已自动加载,此处可添加或覆盖默认扩展
87
-
88
- # 扩展组件列表(框架默认扩展 + 用户自定义扩展)
89
- # EXTENSIONS = [
90
- # 'crawlo.extension.memory_monitor.MemoryMonitorExtension', # 内存监控
91
- # 'crawlo.extension.request_recorder.RequestRecorderExtension', # 请求记录
92
- # 'crawlo.extension.performance_profiler.PerformanceProfilerExtension', # 性能分析
93
- # 'crawlo.extension.health_check.HealthCheckExtension', # 健康检查
94
- # ]
95
-
96
- # ============================== 日志配置 ==============================
97
-
98
- LOG_LEVEL = 'INFO'
99
- STATS_DUMP = True
100
- LOG_FILE = f'logs/{{project_name}}.log'
101
- LOG_FORMAT = '%(asctime)s - [%(name)s] - %(levelname)s: %(message)s'
102
- LOG_ENCODING = 'utf-8'
1
+ # -*- coding: UTF-8 -*-
2
+ """
3
+ {{project_name}} 项目配置文件(简化版)
4
+ =============================
5
+ 基于 Crawlo 框架的简化爬虫项目配置。
6
+ 适合快速开始和简单项目。
7
+ """
8
+
9
+ # ============================== 项目基本信息 ==============================
10
+ PROJECT_NAME = '{{project_name}}'
11
+
12
+ # ============================== 运行模式 ==============================
13
+ RUN_MODE = 'standalone'
14
+
15
+ # ============================== 并发配置 ==============================
16
+ CONCURRENCY = 4
17
+ MAX_RUNNING_SPIDERS = 1
18
+ DOWNLOAD_DELAY = 1.0
19
+
20
+ # ============================== 下载器配置 ==============================
21
+ # 可选下载器:
22
+ # DOWNLOADER = 'crawlo.downloader.aiohttp_downloader.AioHttpDownloader'
23
+ # DOWNLOADER = 'crawlo.downloader.httpx_downloader.HttpXDownloader'
24
+ # DOWNLOADER = 'crawlo.downloader.cffi_downloader.CurlCffiDownloader'
25
+ DOWNLOADER = 'crawlo.downloader.aiohttp_downloader.AioHttpDownloader'
26
+
27
+ # ============================== 队列配置 ==============================
28
+ QUEUE_TYPE = 'memory'
29
+
30
+ # ============================== 去重过滤器 ==============================
31
+ FILTER_CLASS = 'crawlo.filters.memory_filter.MemoryFilter'
32
+
33
+ # ============================== 默认去重管道 ==============================
34
+ DEFAULT_DEDUP_PIPELINE = 'crawlo.pipelines.memory_dedup_pipeline.MemoryDedupPipeline'
35
+
36
+ # ============================== 爬虫模块配置 ==============================
37
+ SPIDER_MODULES = ['{{project_name}}.spiders']
38
+
39
+ # ============================== 中间件 ==============================
40
+ # MIDDLEWARES = [
41
+ # 'crawlo.middleware.simple_proxy.SimpleProxyMiddleware',
42
+ # ]
43
+
44
+ # ============================== 默认请求头配置 ==============================
45
+ # 为DefaultHeaderMiddleware配置默认请求头
46
+ DEFAULT_REQUEST_HEADERS = {
47
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
48
+ 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
49
+ 'Accept-Encoding': 'gzip, deflate, br',
50
+ }
51
+
52
+ # ============================== 允许的域名 ==============================
53
+ # 为OffsiteMiddleware配置允许的域名
54
+ # ALLOWED_DOMAINS = ['example.com']
55
+
56
+ # ============================== 数据管道 ==============================
57
+ # PIPELINES = [
58
+ # 'crawlo.pipelines.mysql_pipeline.AsyncmyMySQLPipeline', # MySQL 存储(使用asyncmy异步库)
59
+ # ]
60
+
61
+ # ============================== 扩展组件 ==============================
62
+ # EXTENSIONS = [
63
+ # 'crawlo.extension.log_interval.LogIntervalExtension',
64
+ # 'crawlo.extension.log_stats.LogStats',
65
+ # 'crawlo.extension.logging_extension.CustomLoggerExtension',
66
+ # ]
67
+
68
+ # ============================== 日志配置 ==============================
69
+ LOG_LEVEL = 'INFO'
70
+ LOG_FILE = 'logs/{{project_name}}.log'
71
+ LOG_ENCODING = 'utf-8' # 明确指定日志文件编码
72
+ STATS_DUMP = True
73
+
74
+ # ============================== 输出配置 ==============================
75
+ OUTPUT_DIR = 'output'
76
+
77
+ # ============================== Redis配置 ==============================
78
+ REDIS_HOST = '127.0.0.1'
79
+ REDIS_PORT = 6379
80
+ REDIS_PASSWORD = ''
81
+ REDIS_DB = 0
82
+
83
+ # 根据是否有密码生成 URL
84
+ if REDIS_PASSWORD:
85
+ REDIS_URL = f'redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}'
86
+ else:
87
+ REDIS_URL = f'redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}'
88
+
89
+ # ============================== MySQL配置 ==============================
90
+ MYSQL_HOST = '127.0.0.1'
91
+ MYSQL_PORT = 3306
92
+ MYSQL_USER = 'root'
93
+ MYSQL_PASSWORD = '123456'
94
+ MYSQL_DB = '{{project_name}}'
95
+ MYSQL_TABLE = '{{project_name}}_data'
96
+ MYSQL_BATCH_SIZE = 100
97
+ MYSQL_USE_BATCH = False # 是否启用批量插入
98
+
99
+ # ============================== MongoDB配置 ==============================
100
+ MONGO_URI = 'mongodb://localhost:27017'
101
+ MONGO_DATABASE = '{{project_name}}_db'
102
+ MONGO_COLLECTION = '{{project_name}}_items'
103
+ MONGO_MAX_POOL_SIZE = 200
104
+ MONGO_MIN_POOL_SIZE = 20
105
+ MONGO_BATCH_SIZE = 100 # 批量插入条数
106
+ MONGO_USE_BATCH = False # 是否启用批量插入
107
+
108
+ # ============================== 代理配置 ==============================
109
+ # 代理功能默认不启用,如需使用请在项目配置文件中启用并配置相关参数
110
+ PROXY_ENABLED = False # 是否启用代理
111
+
112
+ # 简化版代理配置(适用于SimpleProxyMiddleware)
113
+ PROXY_LIST = [] # 代理列表,例如: ["http://proxy1:8080", "http://proxy2:8080"]
114
+
115
+ # 高级代理配置(适用于ProxyMiddleware)
116
+ PROXY_API_URL = "" # 代理获取接口(请替换为真实地址)
117
+
118
+ # 代理提取方式(支持字段路径或函数)
119
+ # 示例: "proxy" 适用于 {"proxy": "http://1.1.1.1:8080"}
120
+ # 示例: "data.proxy" 适用于 {"data": {"proxy": "http://1.1.1.1:8080"}}
121
+ PROXY_EXTRACTOR = "proxy"
122
+
123
+ # 代理刷新控制
124
+ PROXY_REFRESH_INTERVAL = 60 # 代理刷新间隔(秒)
125
+ PROXY_API_TIMEOUT = 10 # 请求代理 API 超时时间
126
+
127
+ # ============================== Curl-Cffi 特有配置 ==============================
128
+ # 浏览器指纹模拟(仅 CurlCffi 下载器有效)
129
+ CURL_BROWSER_TYPE = "chrome" # 可选: chrome, edge, safari, firefox 或版本如 chrome136
130
+
131
+ # 自定义浏览器版本映射(可覆盖默认行为)
132
+ CURL_BROWSER_VERSION_MAP = {
133
+ "chrome": "chrome136",
134
+ "edge": "edge101",
135
+ "safari": "safari184",
136
+ "firefox": "firefox135",
137
+ }
138
+
139
+ # ============================== 下载器优化配置 ==============================
140
+ # 下载器健康检查
141
+ DOWNLOADER_HEALTH_CHECK = True # 是否启用下载器健康检查
142
+ HEALTH_CHECK_INTERVAL = 60 # 健康检查间隔(秒)
143
+
144
+ # 请求统计配置
145
+ REQUEST_STATS_ENABLED = True # 是否启用请求统计
146
+ STATS_RESET_ON_START = False # 启动时是否重置统计
147
+
148
+ # HttpX 下载器专用配置
149
+ HTTPX_HTTP2 = True # 是否启用HTTP/2支持
150
+ HTTPX_FOLLOW_REDIRECTS = True # 是否自动跟随重定向
151
+
152
+ # AioHttp 下载器专用配置
153
+ AIOHTTP_AUTO_DECOMPRESS = True # 是否自动解压响应
154
+ AIOHTTP_FORCE_CLOSE = False # 是否强制关闭连接
155
+
156
+ # 通用优化配置
157
+ CONNECTION_TTL_DNS_CACHE = 300 # DNS缓存TTL(秒)
158
+ CONNECTION_KEEPALIVE_TIMEOUT = 15 # Keep-Alive超时(秒)
159
+
160
+ # ============================== 内存监控配置 ==============================
161
+ # 内存监控扩展默认不启用,如需使用请在项目配置文件中启用
162
+ MEMORY_MONITOR_ENABLED = False # 是否启用内存监控
163
+ MEMORY_MONITOR_INTERVAL = 60 # 内存监控检查间隔(秒)
164
+ MEMORY_WARNING_THRESHOLD = 80.0 # 内存使用率警告阈值(百分比)
165
+ MEMORY_CRITICAL_THRESHOLD = 90.0 # 内存使用率严重阈值(百分比)
@@ -1,6 +1,10 @@
1
- # -*- coding: UTF-8 -*-
2
- """
3
- {{project_name}}.spiders
4
- ========================
5
- 存放所有的爬虫。
6
- """
1
+ # -*- coding: UTF-8 -*-
2
+ """
3
+ {{project_name}}.spiders
4
+ ========================
5
+ 存放所有的爬虫。
6
+ """
7
+
8
+ # 自动导入所有爬虫以确保它们被注册
9
+ # 示例:
10
+ # from .YourSpider import YourSpider
@@ -1,39 +1,35 @@
1
- #!/usr/bin/env python
2
- # -*- coding: UTF-8 -*-
3
- """
4
- {{project_name}} 项目运行脚本
5
- ============================
6
- 基于 Crawlo 框架的简化爬虫启动器。
7
- """
8
- import sys
9
- import asyncio
10
-
11
- from crawlo.crawler import CrawlerProcess
12
-
13
-
14
- def main():
15
- """主函数:运行固定的爬虫"""
16
- print("🚀 启动 {{project_name}} 爬虫")
17
-
18
- # 创建爬虫进程(自动加载默认配置)
19
- try:
20
- # 确保 spider 模块被正确导入
21
- spider_modules = ['{{project_name}}.spiders']
22
- process = CrawlerProcess(spider_modules=spider_modules)
23
- print("✅ 爬虫进程初始化成功")
24
-
25
- # 运行固定的爬虫
26
- # TODO: 请将 'your_spider_name' 替换为实际的爬虫名称
27
- asyncio.run(process.crawl('your_spider_name'))
28
-
29
- print("✅ 爬虫运行完成")
30
-
31
- except Exception as e:
32
- print(f"❌ 运行失败: {e}")
33
- import traceback
34
- traceback.print_exc()
35
- sys.exit(1)
36
-
37
-
38
- if __name__ == '__main__':
1
+ #!/usr/bin/env python3
2
+ # -*- coding: UTF-8 -*-
3
+ """
4
+ {{project_name}} 项目运行脚本
5
+ ============================
6
+ 基于 Crawlo 框架的简化爬虫启动器。
7
+
8
+ 框架会自动处理爬虫模块的导入和注册,用户无需手动导入。
9
+ 只需指定spider_modules参数,框架会自动扫描并导入所有爬虫。
10
+ """
11
+ import sys
12
+ import asyncio
13
+
14
+ from crawlo.crawler import CrawlerProcess
15
+
16
+
17
+ def main():
18
+ """主函数:运行爬虫"""
19
+ try:
20
+ # 指定爬虫模块路径,框架会自动导入并注册所有爬虫
21
+ spider_modules = ['{{project_name}}.spiders']
22
+ process = CrawlerProcess(spider_modules=spider_modules)
23
+
24
+ # TODO 运行指定的爬虫
25
+ asyncio.run(process.crawl('spider_name'))
26
+
27
+ except Exception as e:
28
+ print(f"❌ 运行失败: {e}")
29
+ import traceback
30
+ traceback.print_exc()
31
+ sys.exit(1)
32
+
33
+
34
+ if __name__ == '__main__':
39
35
  main()
@@ -1,144 +1,144 @@
1
- # -*- coding: UTF-8 -*-
2
- """
3
- {{project_name}}.spiders.{{spider_name}}
4
- =======================================
5
- 由 `crawlo genspider` 命令生成的爬虫。
6
- 基于 Crawlo 框架,支持异步并发、分布式爬取等功能。
7
-
8
- 使用示例:
9
- crawlo crawl {{spider_name}}
10
- """
11
-
12
- from crawlo.spider import Spider
13
- from crawlo import Request
14
- from ..items import ExampleItem
15
-
16
-
17
- class {{class_name}}(Spider):
18
- """
19
- 爬虫:{{spider_name}}
20
-
21
- 功能说明:
22
- - 支持并发爬取
23
- - 自动去重过滤
24
- - 错误重试机制
25
- - 数据管道处理
26
- """
27
- name = '{{spider_name}}'
28
- allowed_domains = ['{{domain}}']
29
- start_urls = ['https://{{domain}}/']
30
-
31
- # 高级配置(可选)
32
- # custom_settings = {
33
- # 'DOWNLOAD_DELAY': 2.0,
34
- # 'CONCURRENCY': 4,
35
- # 'RETRY_HTTP_CODES': [500, 502, 503, 504, 408, 429],
36
- # 'ALLOWED_RESPONSE_CODES': [200, 301, 302], # 只允许特定状态码
37
- # 'DENIED_RESPONSE_CODES': [403, 404], # 拒绝特定状态码
38
- # }
39
-
40
- def start_requests(self):
41
- """
42
- 生成初始请求。
43
-
44
- 支持自定义请求头、代理、优先级等。
45
- """
46
- headers = {
47
- 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
48
- 'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
49
- }
50
-
51
- for url in self.start_urls:
52
- yield Request(
53
- url=url,
54
- callback=self.parse,
55
- headers=headers,
56
- # meta={'proxy': 'http://proxy.example.com:8080'}, # 自定义代理
57
- # priority=10, # 请求优先级(数字越大优先级越高)
58
- )
59
-
60
- def parse(self, response):
61
- """
62
- 解析响应的主方法。
63
-
64
- Args:
65
- response: 响应对象,包含页面内容和元数据
66
-
67
- Yields:
68
- Request: 新的请求对象(用于深度爬取)
69
- Item: 数据项对象(用于数据存储)
70
- """
71
- self.logger.info(f'正在解析页面: {response.url}')
72
-
73
- # ================== 数据提取示例 ==================
74
-
75
- # 提取数据并创建 Item
76
- # item = {{item_class}}()
77
- # item['title'] = response.xpath('//title/text()').get(default='')
78
- # item['url'] = response.url
79
- # item['content'] = response.xpath('//div[@class="content"]//text()').getall()
80
- # yield item
81
-
82
- # 直接返回字典(简单数据)
83
- yield {
84
- 'title': response.xpath('//title/text()').get(default=''),
85
- 'url': response.url,
86
- 'status_code': response.status_code,
87
- # 'description': response.xpath('//meta[@name="description"]/@content').get(),
88
- # 'keywords': response.xpath('//meta[@name="keywords"]/@content').get(),
89
- }
90
-
91
- # ================== 链接提取示例 ==================
92
-
93
- # 提取并跟进链接
94
- # links = response.xpath('//a/@href').getall()
95
- # for link in links:
96
- # # 过滤有效链接
97
- # if link and not link.startswith(('javascript:', 'mailto:', '#')):
98
- # yield response.follow(
99
- # link,
100
- # callback=self.parse_detail, # 或者 self.parse 继续递归
101
- # meta={'parent_url': response.url} # 传递父页面信息
102
- # )
103
-
104
- # 用 CSS 选择器提取链接
105
- # for link in response.css('a.item-link::attr(href)').getall():
106
- # yield response.follow(link, callback=self.parse_detail)
107
-
108
- # ================== 分页处理示例 ==================
109
-
110
- # 处理分页
111
- # next_page = response.xpath('//a[@class="next"]/@href').get()
112
- # if next_page:
113
- # yield response.follow(next_page, callback=self.parse)
114
-
115
- # 数字分页
116
- # current_page = int(response.meta.get('page', 1))
117
- # max_pages = 100 # 设置最大页数
118
- # if current_page < max_pages:
119
- # next_url = f'https://{{domain}}/page/{current_page + 1}'
120
- # yield Request(
121
- # url=next_url,
122
- # callback=self.parse,
123
- # meta={'page': current_page + 1}
124
- # )
125
-
126
- def parse_detail(self, response):
127
- """
128
- 解析详情页面的方法(可选)。
129
-
130
- 用于处理从列表页跳转而来的详情页。
131
- """
132
- self.logger.info(f'正在解析详情页: {response.url}')
133
-
134
- # parent_url = response.meta.get('parent_url', '')
135
- #
136
- # yield {
137
- # 'title': response.xpath('//h1/text()').get(default=''),
138
- # 'content': '\n'.join(response.xpath('//div[@class="content"]//text()').getall()),
139
- # 'url': response.url,
140
- # 'parent_url': parent_url,
141
- # 'publish_time': response.xpath('//time/@datetime').get(),
142
- # }
143
-
1
+ # -*- coding: UTF-8 -*-
2
+ """
3
+ {{project_name}}.spiders.{{spider_name}}
4
+ =======================================
5
+ 由 `crawlo genspider` 命令生成的爬虫。
6
+ 基于 Crawlo 框架,支持异步并发、分布式爬取等功能。
7
+
8
+ 使用示例:
9
+ crawlo crawl {{spider_name}}
10
+ """
11
+
12
+ from crawlo.spider import Spider
13
+ from crawlo import Request
14
+ from ..items import ExampleItem
15
+
16
+
17
+ class {{class_name}}(Spider):
18
+ """
19
+ 爬虫:{{spider_name}}
20
+
21
+ 功能说明:
22
+ - 支持并发爬取
23
+ - 自动去重过滤
24
+ - 错误重试机制
25
+ - 数据管道处理
26
+ """
27
+ name = '{{spider_name}}'
28
+ allowed_domains = ['{{domain}}']
29
+ start_urls = ['https://{{domain}}/']
30
+
31
+ # 高级配置(可选)
32
+ # custom_settings = {
33
+ # 'DOWNLOAD_DELAY': 2.0,
34
+ # 'CONCURRENCY': 4,
35
+ # 'RETRY_HTTP_CODES': [500, 502, 503, 504, 408, 429],
36
+ # 'ALLOWED_RESPONSE_CODES': [200, 301, 302], # 只允许特定状态码
37
+ # 'DENIED_RESPONSE_CODES': [403, 404], # 拒绝特定状态码
38
+ # }
39
+
40
+ def start_requests(self):
41
+ """
42
+ 生成初始请求。
43
+
44
+ 支持自定义请求头、代理、优先级等。
45
+ """
46
+ headers = {
47
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
48
+ 'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
49
+ }
50
+
51
+ for url in self.start_urls:
52
+ yield Request(
53
+ url=url,
54
+ callback=self.parse,
55
+ headers=headers,
56
+ # meta={'proxy': 'http://proxy.example.com:8080'}, # 自定义代理
57
+ # priority=10, # 请求优先级(数字越大优先级越高)
58
+ )
59
+
60
+ def parse(self, response):
61
+ """
62
+ 解析响应的主方法。
63
+
64
+ Args:
65
+ response: 响应对象,包含页面内容和元数据
66
+
67
+ Yields:
68
+ Request: 新的请求对象(用于深度爬取)
69
+ Item: 数据项对象(用于数据存储)
70
+ """
71
+ self.logger.info(f'正在解析页面: {response.url}')
72
+
73
+ # ================== 数据提取示例 ==================
74
+
75
+ # 提取数据并创建 Item
76
+ # item = {{item_class}}()
77
+ # item['title'] = response.xpath('//title/text()').get(default='')
78
+ # item['url'] = response.url
79
+ # item['content'] = response.xpath('//div[@class="content"]//text()').getall()
80
+ # yield item
81
+
82
+ # 直接返回字典(简单数据)
83
+ yield {
84
+ 'title': response.xpath('//title/text()').get(default=''),
85
+ 'url': response.url,
86
+ 'status_code': response.status_code,
87
+ # 'description': response.xpath('//meta[@name="description"]/@content').get(),
88
+ # 'keywords': response.xpath('//meta[@name="keywords"]/@content').get(),
89
+ }
90
+
91
+ # ================== 链接提取示例 ==================
92
+
93
+ # 提取并跟进链接
94
+ # links = response.xpath('//a/@href').getall()
95
+ # for link in links:
96
+ # # 过滤有效链接
97
+ # if link and not link.startswith(('javascript:', 'mailto:', '#')):
98
+ # yield response.follow(
99
+ # link,
100
+ # callback=self.parse_detail, # 或者 self.parse 继续递归
101
+ # meta={'parent_url': response.url} # 传递父页面信息
102
+ # )
103
+
104
+ # 用 CSS 选择器提取链接
105
+ # for link in response.css('a.item-link::attr(href)').getall():
106
+ # yield response.follow(link, callback=self.parse_detail)
107
+
108
+ # ================== 分页处理示例 ==================
109
+
110
+ # 处理分页
111
+ # next_page = response.xpath('//a[@class="next"]/@href').get()
112
+ # if next_page:
113
+ # yield response.follow(next_page, callback=self.parse)
114
+
115
+ # 数字分页
116
+ # current_page = int(response.meta.get('page', 1))
117
+ # max_pages = 100 # 设置最大页数
118
+ # if current_page < max_pages:
119
+ # next_url = f'https://{{domain}}/page/{current_page + 1}'
120
+ # yield Request(
121
+ # url=next_url,
122
+ # callback=self.parse,
123
+ # meta={'page': current_page + 1}
124
+ # )
125
+
126
+ def parse_detail(self, response):
127
+ """
128
+ 解析详情页面的方法(可选)。
129
+
130
+ 用于处理从列表页跳转而来的详情页。
131
+ """
132
+ self.logger.info(f'正在解析详情页: {response.url}')
133
+
134
+ # parent_url = response.meta.get('parent_url', '')
135
+ #
136
+ # yield {
137
+ # 'title': response.xpath('//h1/text()').get(default=''),
138
+ # 'content': '\n'.join(response.xpath('//div[@class="content"]//text()').getall()),
139
+ # 'url': response.url,
140
+ # 'parent_url': parent_url,
141
+ # 'publish_time': response.xpath('//time/@datetime').get(),
142
+ # }
143
+
144
144
  pass