crawlo 1.3.3__py3-none-any.whl → 1.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (279) hide show
  1. crawlo/__init__.py +87 -63
  2. crawlo/__version__.py +1 -1
  3. crawlo/cli.py +75 -75
  4. crawlo/commands/__init__.py +14 -14
  5. crawlo/commands/check.py +594 -594
  6. crawlo/commands/genspider.py +151 -151
  7. crawlo/commands/help.py +138 -138
  8. crawlo/commands/list.py +155 -155
  9. crawlo/commands/run.py +341 -323
  10. crawlo/commands/startproject.py +436 -436
  11. crawlo/commands/stats.py +187 -187
  12. crawlo/commands/utils.py +196 -196
  13. crawlo/config.py +312 -312
  14. crawlo/config_validator.py +277 -277
  15. crawlo/core/__init__.py +46 -2
  16. crawlo/core/engine.py +439 -365
  17. crawlo/core/processor.py +40 -40
  18. crawlo/core/scheduler.py +257 -256
  19. crawlo/crawler.py +639 -1167
  20. crawlo/data/__init__.py +5 -5
  21. crawlo/data/user_agents.py +194 -194
  22. crawlo/downloader/__init__.py +273 -273
  23. crawlo/downloader/aiohttp_downloader.py +228 -226
  24. crawlo/downloader/cffi_downloader.py +245 -245
  25. crawlo/downloader/httpx_downloader.py +259 -259
  26. crawlo/downloader/hybrid_downloader.py +212 -212
  27. crawlo/downloader/playwright_downloader.py +402 -402
  28. crawlo/downloader/selenium_downloader.py +472 -472
  29. crawlo/event.py +11 -11
  30. crawlo/exceptions.py +81 -81
  31. crawlo/extension/__init__.py +39 -39
  32. crawlo/extension/health_check.py +141 -141
  33. crawlo/extension/log_interval.py +57 -57
  34. crawlo/extension/log_stats.py +81 -81
  35. crawlo/extension/logging_extension.py +61 -52
  36. crawlo/extension/memory_monitor.py +104 -104
  37. crawlo/extension/performance_profiler.py +133 -133
  38. crawlo/extension/request_recorder.py +107 -107
  39. crawlo/factories/__init__.py +28 -0
  40. crawlo/factories/base.py +69 -0
  41. crawlo/factories/crawler.py +104 -0
  42. crawlo/factories/registry.py +85 -0
  43. crawlo/filters/__init__.py +154 -154
  44. crawlo/filters/aioredis_filter.py +257 -234
  45. crawlo/filters/memory_filter.py +269 -269
  46. crawlo/framework.py +292 -0
  47. crawlo/initialization/__init__.py +40 -0
  48. crawlo/initialization/built_in.py +426 -0
  49. crawlo/initialization/context.py +142 -0
  50. crawlo/initialization/core.py +194 -0
  51. crawlo/initialization/phases.py +149 -0
  52. crawlo/initialization/registry.py +146 -0
  53. crawlo/items/__init__.py +23 -23
  54. crawlo/items/base.py +23 -22
  55. crawlo/items/fields.py +52 -52
  56. crawlo/items/items.py +104 -104
  57. crawlo/logging/__init__.py +38 -0
  58. crawlo/logging/config.py +97 -0
  59. crawlo/logging/factory.py +129 -0
  60. crawlo/logging/manager.py +112 -0
  61. crawlo/middleware/__init__.py +21 -21
  62. crawlo/middleware/default_header.py +132 -132
  63. crawlo/middleware/download_delay.py +104 -104
  64. crawlo/middleware/middleware_manager.py +135 -135
  65. crawlo/middleware/offsite.py +123 -123
  66. crawlo/middleware/proxy.py +386 -386
  67. crawlo/middleware/request_ignore.py +86 -86
  68. crawlo/middleware/response_code.py +163 -163
  69. crawlo/middleware/response_filter.py +136 -136
  70. crawlo/middleware/retry.py +124 -124
  71. crawlo/middleware/simple_proxy.py +65 -65
  72. crawlo/mode_manager.py +212 -187
  73. crawlo/network/__init__.py +21 -21
  74. crawlo/network/request.py +379 -379
  75. crawlo/network/response.py +359 -359
  76. crawlo/pipelines/__init__.py +21 -21
  77. crawlo/pipelines/bloom_dedup_pipeline.py +156 -156
  78. crawlo/pipelines/console_pipeline.py +39 -39
  79. crawlo/pipelines/csv_pipeline.py +316 -316
  80. crawlo/pipelines/database_dedup_pipeline.py +222 -222
  81. crawlo/pipelines/json_pipeline.py +218 -218
  82. crawlo/pipelines/memory_dedup_pipeline.py +115 -115
  83. crawlo/pipelines/mongo_pipeline.py +131 -131
  84. crawlo/pipelines/mysql_pipeline.py +318 -318
  85. crawlo/pipelines/pipeline_manager.py +76 -75
  86. crawlo/pipelines/redis_dedup_pipeline.py +166 -166
  87. crawlo/project.py +327 -325
  88. crawlo/queue/pqueue.py +43 -37
  89. crawlo/queue/queue_manager.py +503 -379
  90. crawlo/queue/redis_priority_queue.py +326 -306
  91. crawlo/settings/__init__.py +7 -7
  92. crawlo/settings/default_settings.py +321 -225
  93. crawlo/settings/setting_manager.py +214 -198
  94. crawlo/spider/__init__.py +657 -639
  95. crawlo/stats_collector.py +73 -59
  96. crawlo/subscriber.py +129 -129
  97. crawlo/task_manager.py +139 -30
  98. crawlo/templates/crawlo.cfg.tmpl +10 -10
  99. crawlo/templates/project/__init__.py.tmpl +3 -3
  100. crawlo/templates/project/items.py.tmpl +17 -17
  101. crawlo/templates/project/middlewares.py.tmpl +118 -118
  102. crawlo/templates/project/pipelines.py.tmpl +96 -96
  103. crawlo/templates/project/settings.py.tmpl +168 -267
  104. crawlo/templates/project/settings_distributed.py.tmpl +167 -180
  105. crawlo/templates/project/settings_gentle.py.tmpl +167 -61
  106. crawlo/templates/project/settings_high_performance.py.tmpl +168 -131
  107. crawlo/templates/project/settings_minimal.py.tmpl +66 -35
  108. crawlo/templates/project/settings_simple.py.tmpl +165 -102
  109. crawlo/templates/project/spiders/__init__.py.tmpl +10 -6
  110. crawlo/templates/run.py.tmpl +34 -38
  111. crawlo/templates/spider/spider.py.tmpl +143 -143
  112. crawlo/templates/spiders_init.py.tmpl +10 -0
  113. crawlo/tools/__init__.py +200 -200
  114. crawlo/tools/anti_crawler.py +268 -268
  115. crawlo/tools/authenticated_proxy.py +240 -240
  116. crawlo/tools/data_formatter.py +225 -225
  117. crawlo/tools/data_validator.py +180 -180
  118. crawlo/tools/date_tools.py +289 -289
  119. crawlo/tools/distributed_coordinator.py +388 -388
  120. crawlo/tools/encoding_converter.py +127 -127
  121. crawlo/tools/network_diagnostic.py +365 -0
  122. crawlo/tools/request_tools.py +82 -82
  123. crawlo/tools/retry_mechanism.py +224 -224
  124. crawlo/tools/scenario_adapter.py +262 -262
  125. crawlo/tools/text_cleaner.py +232 -232
  126. crawlo/utils/__init__.py +34 -34
  127. crawlo/utils/batch_processor.py +259 -259
  128. crawlo/utils/class_loader.py +26 -0
  129. crawlo/utils/controlled_spider_mixin.py +439 -439
  130. crawlo/utils/db_helper.py +343 -343
  131. crawlo/utils/enhanced_error_handler.py +356 -356
  132. crawlo/utils/env_config.py +142 -142
  133. crawlo/utils/error_handler.py +165 -124
  134. crawlo/utils/func_tools.py +82 -82
  135. crawlo/utils/large_scale_config.py +286 -286
  136. crawlo/utils/large_scale_helper.py +344 -344
  137. crawlo/utils/log.py +44 -200
  138. crawlo/utils/performance_monitor.py +285 -285
  139. crawlo/utils/queue_helper.py +175 -175
  140. crawlo/utils/redis_connection_pool.py +388 -351
  141. crawlo/utils/redis_key_validator.py +198 -198
  142. crawlo/utils/request.py +267 -267
  143. crawlo/utils/request_serializer.py +225 -218
  144. crawlo/utils/spider_loader.py +61 -61
  145. crawlo/utils/system.py +11 -11
  146. crawlo/utils/tools.py +4 -4
  147. crawlo/utils/url.py +39 -39
  148. {crawlo-1.3.3.dist-info → crawlo-1.3.4.dist-info}/METADATA +1126 -1020
  149. crawlo-1.3.4.dist-info/RECORD +278 -0
  150. examples/__init__.py +7 -7
  151. tests/__init__.py +7 -7
  152. tests/advanced_tools_example.py +275 -275
  153. tests/authenticated_proxy_example.py +107 -107
  154. tests/baidu_performance_test.py +109 -0
  155. tests/baidu_test.py +60 -0
  156. tests/cleaners_example.py +160 -160
  157. tests/comprehensive_framework_test.py +213 -0
  158. tests/comprehensive_test.py +82 -0
  159. tests/comprehensive_testing_summary.md +187 -0
  160. tests/config_validation_demo.py +142 -142
  161. tests/controlled_spider_example.py +205 -205
  162. tests/date_tools_example.py +180 -180
  163. tests/debug_configure.py +70 -0
  164. tests/debug_framework_logger.py +85 -0
  165. tests/debug_log_levels.py +64 -0
  166. tests/debug_pipelines.py +66 -66
  167. tests/distributed_test.py +67 -0
  168. tests/distributed_test_debug.py +77 -0
  169. tests/dynamic_loading_example.py +523 -523
  170. tests/dynamic_loading_test.py +104 -104
  171. tests/env_config_example.py +133 -133
  172. tests/error_handling_example.py +171 -171
  173. tests/final_command_test_report.md +0 -0
  174. tests/final_comprehensive_test.py +152 -0
  175. tests/final_validation_test.py +183 -0
  176. tests/framework_performance_test.py +203 -0
  177. tests/optimized_performance_test.py +212 -0
  178. tests/performance_comparison.py +246 -0
  179. tests/queue_blocking_test.py +114 -0
  180. tests/queue_test.py +90 -0
  181. tests/redis_key_validation_demo.py +130 -130
  182. tests/request_params_example.py +150 -150
  183. tests/response_improvements_example.py +144 -144
  184. tests/scrapy_comparison/ofweek_scrapy.py +139 -0
  185. tests/scrapy_comparison/scrapy_test.py +134 -0
  186. tests/simple_command_test.py +120 -0
  187. tests/simple_crawlo_test.py +128 -0
  188. tests/simple_log_test.py +58 -0
  189. tests/simple_optimization_test.py +129 -0
  190. tests/simple_spider_test.py +50 -0
  191. tests/simple_test.py +48 -0
  192. tests/test_advanced_tools.py +148 -148
  193. tests/test_all_commands.py +231 -0
  194. tests/test_all_redis_key_configs.py +145 -145
  195. tests/test_authenticated_proxy.py +141 -141
  196. tests/test_batch_processor.py +179 -0
  197. tests/test_cleaners.py +54 -54
  198. tests/test_component_factory.py +175 -0
  199. tests/test_comprehensive.py +146 -146
  200. tests/test_config_consistency.py +80 -80
  201. tests/test_config_merge.py +152 -152
  202. tests/test_config_validator.py +182 -182
  203. tests/test_controlled_spider_mixin.py +80 -0
  204. tests/test_crawlo_proxy_integration.py +108 -108
  205. tests/test_date_tools.py +123 -123
  206. tests/test_default_header_middleware.py +158 -158
  207. tests/test_distributed.py +65 -65
  208. tests/test_double_crawlo_fix.py +207 -207
  209. tests/test_double_crawlo_fix_simple.py +124 -124
  210. tests/test_download_delay_middleware.py +221 -221
  211. tests/test_downloader_proxy_compatibility.py +268 -268
  212. tests/test_dynamic_downloaders_proxy.py +124 -124
  213. tests/test_dynamic_proxy.py +92 -92
  214. tests/test_dynamic_proxy_config.py +146 -146
  215. tests/test_dynamic_proxy_real.py +109 -109
  216. tests/test_edge_cases.py +303 -303
  217. tests/test_enhanced_error_handler.py +270 -270
  218. tests/test_enhanced_error_handler_comprehensive.py +246 -0
  219. tests/test_env_config.py +121 -121
  220. tests/test_error_handler_compatibility.py +112 -112
  221. tests/test_factories.py +253 -0
  222. tests/test_final_validation.py +153 -153
  223. tests/test_framework_env_usage.py +103 -103
  224. tests/test_framework_logger.py +67 -0
  225. tests/test_framework_startup.py +65 -0
  226. tests/test_integration.py +169 -169
  227. tests/test_item_dedup_redis_key.py +122 -122
  228. tests/test_large_scale_config.py +113 -0
  229. tests/test_large_scale_helper.py +236 -0
  230. tests/test_mode_change.py +73 -0
  231. tests/test_mode_consistency.py +51 -51
  232. tests/test_offsite_middleware.py +221 -221
  233. tests/test_parsel.py +29 -29
  234. tests/test_performance.py +327 -327
  235. tests/test_performance_monitor.py +116 -0
  236. tests/test_proxy_api.py +264 -264
  237. tests/test_proxy_health_check.py +32 -32
  238. tests/test_proxy_middleware.py +121 -121
  239. tests/test_proxy_middleware_enhanced.py +216 -216
  240. tests/test_proxy_middleware_integration.py +136 -136
  241. tests/test_proxy_middleware_refactored.py +184 -184
  242. tests/test_proxy_providers.py +56 -56
  243. tests/test_proxy_stats.py +19 -19
  244. tests/test_proxy_strategies.py +59 -59
  245. tests/test_queue_empty_check.py +42 -0
  246. tests/test_queue_manager_double_crawlo.py +173 -173
  247. tests/test_queue_manager_redis_key.py +176 -176
  248. tests/test_random_user_agent.py +72 -72
  249. tests/test_real_scenario_proxy.py +195 -195
  250. tests/test_redis_config.py +28 -28
  251. tests/test_redis_connection_pool.py +294 -294
  252. tests/test_redis_key_naming.py +181 -181
  253. tests/test_redis_key_validator.py +123 -123
  254. tests/test_redis_queue.py +224 -224
  255. tests/test_request_ignore_middleware.py +182 -182
  256. tests/test_request_params.py +111 -111
  257. tests/test_request_serialization.py +70 -70
  258. tests/test_response_code_middleware.py +349 -349
  259. tests/test_response_filter_middleware.py +427 -427
  260. tests/test_response_improvements.py +152 -152
  261. tests/test_retry_middleware.py +241 -241
  262. tests/test_scheduler.py +252 -252
  263. tests/test_scheduler_config_update.py +133 -133
  264. tests/test_simple_response.py +61 -61
  265. tests/test_telecom_spider_redis_key.py +205 -205
  266. tests/test_template_content.py +87 -87
  267. tests/test_template_redis_key.py +134 -134
  268. tests/test_tools.py +159 -159
  269. tests/test_user_agents.py +96 -96
  270. tests/tools_example.py +260 -260
  271. tests/untested_features_report.md +139 -0
  272. tests/verify_debug.py +52 -0
  273. tests/verify_distributed.py +117 -117
  274. tests/verify_log_fix.py +112 -0
  275. crawlo-1.3.3.dist-info/RECORD +0 -219
  276. tests/DOUBLE_CRAWLO_PREFIX_FIX_REPORT.md +0 -82
  277. {crawlo-1.3.3.dist-info → crawlo-1.3.4.dist-info}/WHEEL +0 -0
  278. {crawlo-1.3.3.dist-info → crawlo-1.3.4.dist-info}/entry_points.txt +0 -0
  279. {crawlo-1.3.3.dist-info → crawlo-1.3.4.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,139 @@
1
+ # -*- coding: utf-8 -*-
2
+ import scrapy
3
+ from urllib.parse import urljoin
4
+
5
+
6
+ class NewsItem(scrapy.Item):
7
+ title = scrapy.Field()
8
+ publish_time = scrapy.Field()
9
+ url = scrapy.Field()
10
+ source = scrapy.Field()
11
+ content = scrapy.Field()
12
+
13
+
14
+ class OfweekScrapySpider(scrapy.Spider):
15
+ name = 'ofweek_scrapy'
16
+ allowed_domains = ['ee.ofweek.com']
17
+
18
+ def start_requests(self):
19
+ headers = {
20
+ "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
21
+ "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
22
+ "Cache-Control": "no-cache",
23
+ "Connection": "keep-alive",
24
+ "Pragma": "no-cache",
25
+ "Referer": "https://ee.ofweek.com/CATList-2800-8100-ee-2.html",
26
+ "Sec-Fetch-Dest": "document",
27
+ "Sec-Fetch-Mode": "navigate",
28
+ "Sec-Fetch-Site": "same-origin",
29
+ "Sec-Fetch-User": "?1",
30
+ "Upgrade-Insecure-Requests": "1",
31
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36",
32
+ "sec-ch-ua": "\"Not;A=Brand\";v=\"99\", \"Google Chrome\";v=\"139\", \"Chromium\";v=\"139\"",
33
+ "sec-ch-ua-mobile": "?0",
34
+ "sec-ch-ua-platform": "\"Windows\""
35
+ }
36
+ cookies = {
37
+ "__utmz": "57425525.1730117117.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none)",
38
+ "Hm_lvt_abe9900db162c6d089cdbfd107db0f03": "1739244841",
39
+ "Hm_lvt_af50e2fc51af73da7720fb324b88a975": "1740100727",
40
+ "JSESSIONID": "FEA96D3B5FC31350B2285E711BF2A541",
41
+ "Hm_lvt_28a416fcfc17063eb9c4f9bb1a1f5cda": "1757477622",
42
+ "HMACCOUNT": "08DF0D235A291EAA",
43
+ "__utma": "57425525.2080994505.1730117117.1747970718.1757477622.50",
44
+ "__utmc": "57425525",
45
+ "__utmt": "1",
46
+ "__utmb": "57425525.2.10.1757477622",
47
+ "Hm_lpvt_28a416fcfc17063eb9c4f9bb1a1f5cda": "1757477628",
48
+ "index_burying_point": "c64d6c31e69d560efe319cc9f8be279f"
49
+ }
50
+
51
+ # 使用较少的页数进行测试
52
+ max_page = 50
53
+ for page in range(1, max_page + 1):
54
+ url = f'https://ee.ofweek.com/CATList-2800-8100-ee-{page}.html'
55
+ yield scrapy.Request(
56
+ url=url,
57
+ callback=self.parse,
58
+ headers=headers,
59
+ cookies=cookies
60
+ )
61
+
62
+ def parse(self, response):
63
+ self.logger.info(f'正在解析页面: {response.url}')
64
+
65
+ rows = response.xpath(
66
+ '//div[@class="main_left"]/div[@class="list_model"]/div[@class="model_right model_right2"]')
67
+ self.logger.info(f"在页面 {response.url} 中找到 {len(rows)} 个条目")
68
+
69
+ for row in rows:
70
+ try:
71
+ # 提取URL和标题
72
+ url = row.xpath('./h3/a/@href').extract_first()
73
+ title = row.xpath('./h3/a/text()').extract_first()
74
+
75
+ # 容错处理
76
+ if not url:
77
+ self.logger.warning(f"条目缺少URL,跳过")
78
+ continue
79
+
80
+ if not title:
81
+ self.logger.warning(f"条目缺少标题,跳过")
82
+ continue
83
+
84
+ # 确保 URL 是绝对路径
85
+ absolute_url = urljoin(response.url, url)
86
+
87
+ # 验证URL格式
88
+ if not absolute_url.startswith(('http://', 'https://')):
89
+ self.logger.warning(f"无效的URL格式,跳过: {absolute_url}")
90
+ continue
91
+
92
+ self.logger.info(f"提取到详情页链接: {absolute_url}, 标题: {title}")
93
+ yield scrapy.Request(
94
+ url=absolute_url,
95
+ meta={
96
+ "title": title.strip() if title else '',
97
+ "parent_url": response.url
98
+ },
99
+ callback=self.parse_detail
100
+ )
101
+ except Exception as e:
102
+ self.logger.error(f"处理条目时出错: {e}")
103
+ continue
104
+
105
+ def parse_detail(self, response):
106
+ self.logger.info(f'正在解析详情页: {response.url}')
107
+
108
+ try:
109
+ title = response.meta.get('title', '')
110
+
111
+ # 提取内容,增加容错处理
112
+ content_elements = response.xpath('//div[@class="TRS_Editor"]|//*[@id="articleC"]')
113
+ if content_elements:
114
+ content = content_elements.xpath('.//text()').extract()
115
+ content = '\n'.join([text.strip() for text in content if text.strip()])
116
+ else:
117
+ content = ''
118
+ self.logger.warning(f"未找到内容区域: {response.url}")
119
+
120
+ # 提取发布时间
121
+ publish_time = response.xpath('//div[@class="time fl"]/text()').extract_first()
122
+ if publish_time:
123
+ publish_time = publish_time.strip()
124
+
125
+ source = response.xpath('//div[@class="source-name"]/text()').extract_first()
126
+
127
+ # 创建数据项
128
+ item = NewsItem()
129
+ item['title'] = title.strip() if title else ''
130
+ item['publish_time'] = publish_time if publish_time else ''
131
+ item['url'] = response.url
132
+ item['source'] = source if source else ''
133
+ item['content'] = content
134
+
135
+ self.logger.info(f"成功提取详情页数据: {item['title']}")
136
+ yield item
137
+
138
+ except Exception as e:
139
+ self.logger.error(f"解析详情页 {response.url} 时出错: {e}")
@@ -0,0 +1,134 @@
1
+ # -*- coding: utf-8 -*-
2
+ import scrapy
3
+ from urllib.parse import urljoin
4
+
5
+ class NewsItem(scrapy.Item):
6
+ title = scrapy.Field()
7
+ publish_time = scrapy.Field()
8
+ url = scrapy.Field()
9
+ source = scrapy.Field()
10
+ content = scrapy.Field()
11
+
12
+ class OfweekScrapyTestSpider(scrapy.Spider):
13
+ name = 'ofweek_scrapy_test'
14
+ allowed_domains = ['ee.ofweek.com']
15
+
16
+ def start_requests(self):
17
+ headers = {
18
+ "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
19
+ "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
20
+ "Cache-Control": "no-cache",
21
+ "Connection": "keep-alive",
22
+ "Pragma": "no-cache",
23
+ "Referer": "https://ee.ofweek.com/CATList-2800-8100-ee-2.html",
24
+ "Sec-Fetch-Dest": "document",
25
+ "Sec-Fetch-Mode": "navigate",
26
+ "Sec-Fetch-Site": "same-origin",
27
+ "Sec-Fetch-User": "?1",
28
+ "Upgrade-Insecure-Requests": "1",
29
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36",
30
+ }
31
+ cookies = {
32
+ "__utmz": "57425525.1730117117.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none)",
33
+ "Hm_lvt_abe9900db162c6d089cdbfd107db0f03": "1739244841",
34
+ "Hm_lvt_af50e2fc51af73da7720fb324b88a975": "1740100727",
35
+ "JSESSIONID": "FEA96D3B5FC31350B2285E711BF2A541",
36
+ "Hm_lvt_28a416fcfc17063eb9c4f9bb1a1f5cda": "1757477622",
37
+ "HMACCOUNT": "08DF0D235A291EAA",
38
+ "__utma": "57425525.2080994505.1730117117.1747970718.1757477622.50",
39
+ "__utmc": "57425525",
40
+ "__utmt": "1",
41
+ "__utmb": "57425525.2.10.1757477622",
42
+ "Hm_lpvt_28a416fcfc17063eb9c4f9bb1a1f5cda": "1757477628",
43
+ "index_burying_point": "c64d6c31e69d560efe319cc9f8be279f"
44
+ }
45
+
46
+ # 使用较少的页数进行测试
47
+ max_page = 5
48
+ for page in range(1, max_page + 1):
49
+ url = f'https://ee.ofweek.com/CATList-2800-8100-ee-{page}.html'
50
+ yield scrapy.Request(
51
+ url=url,
52
+ callback=self.parse,
53
+ headers=headers,
54
+ cookies=cookies
55
+ )
56
+
57
+ def parse(self, response):
58
+ self.logger.info(f'正在解析页面: {response.url}')
59
+
60
+ rows = response.xpath(
61
+ '//div[@class="main_left"]/div[@class="list_model"]/div[@class="model_right model_right2"]')
62
+ self.logger.info(f"在页面 {response.url} 中找到 {len(rows)} 个条目")
63
+
64
+ for row in rows:
65
+ try:
66
+ # 提取URL和标题
67
+ url = row.xpath('./h3/a/@href').extract_first()
68
+ title = row.xpath('./h3/a/text()').extract_first()
69
+
70
+ # 容错处理
71
+ if not url:
72
+ self.logger.warning(f"条目缺少URL,跳过")
73
+ continue
74
+
75
+ if not title:
76
+ self.logger.warning(f"条目缺少标题,跳过")
77
+ continue
78
+
79
+ # 确保 URL 是绝对路径
80
+ absolute_url = urljoin(response.url, url)
81
+
82
+ # 验证URL格式
83
+ if not absolute_url.startswith(('http://', 'https://')):
84
+ self.logger.warning(f"无效的URL格式,跳过: {absolute_url}")
85
+ continue
86
+
87
+ self.logger.info(f"提取到详情页链接: {absolute_url}, 标题: {title}")
88
+ yield scrapy.Request(
89
+ url=absolute_url,
90
+ meta={
91
+ "title": title.strip() if title else '',
92
+ "parent_url": response.url
93
+ },
94
+ callback=self.parse_detail
95
+ )
96
+ except Exception as e:
97
+ self.logger.error(f"处理条目时出错: {e}")
98
+ continue
99
+
100
+ def parse_detail(self, response):
101
+ self.logger.info(f'正在解析详情页: {response.url}')
102
+
103
+ try:
104
+ title = response.meta.get('title', '')
105
+
106
+ # 提取内容,增加容错处理
107
+ content_elements = response.xpath('//div[@class="TRS_Editor"]|//*[@id="articleC"]')
108
+ if content_elements:
109
+ content = content_elements.xpath('.//text()').extract()
110
+ content = '\n'.join([text.strip() for text in content if text.strip()])
111
+ else:
112
+ content = ''
113
+ self.logger.warning(f"未找到内容区域: {response.url}")
114
+
115
+ # 提取发布时间
116
+ publish_time = response.xpath('//div[@class="time fl"]/text()').extract_first()
117
+ if publish_time:
118
+ publish_time = publish_time.strip()
119
+
120
+ source = response.xpath('//div[@class="source-name"]/text()').extract_first()
121
+
122
+ # 创建数据项
123
+ item = NewsItem()
124
+ item['title'] = title.strip() if title else ''
125
+ item['publish_time'] = publish_time if publish_time else ''
126
+ item['url'] = response.url
127
+ item['source'] = source if source else ''
128
+ item['content'] = content
129
+
130
+ self.logger.info(f"成功提取详情页数据: {item['title']}")
131
+ yield item
132
+
133
+ except Exception as e:
134
+ self.logger.error(f"解析详情页 {response.url} 时出错: {e}")
@@ -0,0 +1,120 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ 简单测试所有 crawlo 命令
5
+ """
6
+
7
+ import sys
8
+ import os
9
+ import subprocess
10
+
11
+ # 添加项目根目录到Python路径
12
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
13
+
14
+ def run_command(cmd, cwd=None):
15
+ """运行命令并返回结果"""
16
+ try:
17
+ result = subprocess.run(
18
+ cmd,
19
+ shell=True,
20
+ cwd=cwd,
21
+ capture_output=True,
22
+ text=True,
23
+ timeout=30
24
+ )
25
+ return result.returncode, result.stdout, result.stderr
26
+ except subprocess.TimeoutExpired:
27
+ return -1, "", "Command timed out"
28
+ except Exception as e:
29
+ return -1, "", str(e)
30
+
31
+ def test_help_command():
32
+ """测试 help 命令"""
33
+ print("测试 help 命令...")
34
+
35
+ # 测试 -h 参数
36
+ code, stdout, stderr = run_command("python -m crawlo.cli -h")
37
+ assert code == 0, f"help 命令失败: {stderr}"
38
+ assert "Crawlo" in stdout, "help 输出不包含框架名称"
39
+
40
+ # 测试 --help 参数
41
+ code, stdout, stderr = run_command("python -m crawlo.cli --help")
42
+ assert code == 0, f"help 命令失败: {stderr}"
43
+ assert "Crawlo" in stdout, "help 输出不包含框架名称"
44
+
45
+ print("✅ help 命令测试通过")
46
+
47
+ def test_version_command():
48
+ """测试 version 命令"""
49
+ print("测试 version 命令...")
50
+
51
+ # 测试 -v 参数
52
+ code, stdout, stderr = run_command("python -m crawlo.cli -v")
53
+ assert code == 0, f"version 命令失败: {stderr}"
54
+ assert "Crawlo" in stdout, "version 输出不包含框架名称"
55
+
56
+ # 测试 --version 参数
57
+ code, stdout, stderr = run_command("python -m crawlo.cli --version")
58
+ assert code == 0, f"version 命令失败: {stderr}"
59
+ assert "Crawlo" in stdout, "version 输出不包含框架名称"
60
+
61
+ print("✅ version 命令测试通过")
62
+
63
+ def test_command_help():
64
+ """测试各命令的帮助信息"""
65
+ print("测试各命令的帮助信息...")
66
+
67
+ commands = ["startproject", "genspider", "run", "check", "list", "stats"]
68
+
69
+ for command in commands:
70
+ code, stdout, stderr = run_command(f"python -m crawlo.cli {command} --help")
71
+ # 命令帮助通常返回非0状态码,但我们检查输出
72
+ assert len(stdout) > 0 or len(stderr) > 0, f"{command} 命令帮助无输出"
73
+ print(f"✅ {command} 命令帮助测试通过")
74
+
75
+ def test_invalid_command():
76
+ """测试无效命令"""
77
+ print("测试无效命令...")
78
+
79
+ code, stdout, stderr = run_command("python -m crawlo.cli invalid_command")
80
+ assert code != 0, "无效命令应该返回非0状态码"
81
+ assert "Unknown command" in stderr or "Unknown command" in stdout, "应该提示未知命令"
82
+
83
+ print("✅ 无效命令测试通过")
84
+
85
+ def main():
86
+ """主函数"""
87
+ print("开始简单测试所有 crawlo 命令...")
88
+ print("=" * 50)
89
+
90
+ try:
91
+ # 测试 help 命令
92
+ test_help_command()
93
+ print()
94
+
95
+ # 测试 version 命令
96
+ test_version_command()
97
+ print()
98
+
99
+ # 测试各命令的帮助信息
100
+ test_command_help()
101
+ print()
102
+
103
+ # 测试无效命令
104
+ test_invalid_command()
105
+ print()
106
+
107
+ print("=" * 50)
108
+ print("所有命令简单测试通过!")
109
+
110
+ except Exception as e:
111
+ print("=" * 50)
112
+ print(f"测试失败: {e}")
113
+ import traceback
114
+ traceback.print_exc()
115
+ return 1
116
+
117
+ return 0
118
+
119
+ if __name__ == "__main__":
120
+ sys.exit(main())
@@ -0,0 +1,128 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ 简单的Crawlo性能测试
5
+ """
6
+ import asyncio
7
+ import time
8
+ import sys
9
+ import os
10
+
11
+ # 添加项目根目录到Python路径
12
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
13
+
14
+ from crawlo import Spider, Request
15
+ from crawlo.crawler import CrawlerProcess
16
+ from crawlo.items import Item, Field
17
+
18
+ class NewsItem(Item):
19
+ title = Field()
20
+ publish_time = Field()
21
+ url = Field()
22
+ source = Field()
23
+ content = Field()
24
+
25
+ class OfweekSimpleSpider(Spider):
26
+ name = "ofweek_simple"
27
+
28
+ def start_requests(self):
29
+ headers = {
30
+ "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
31
+ "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
32
+ "Cache-Control": "no-cache",
33
+ "Connection": "keep-alive",
34
+ "Pragma": "no-cache",
35
+ "Referer": "https://ee.ofweek.com/CATList-2800-8100-ee-2.html",
36
+ "Sec-Fetch-Dest": "document",
37
+ "Sec-Fetch-Mode": "navigate",
38
+ "Sec-Fetch-Site": "same-origin",
39
+ "Sec-Fetch-User": "?1",
40
+ "Upgrade-Insecure-Requests": "1",
41
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36",
42
+ }
43
+ cookies = {
44
+ "__utmz": "57425525.1730117117.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none)",
45
+ "Hm_lvt_abe9900db162c6d089cdbfd107db0f03": "1739244841",
46
+ "Hm_lvt_af50e2fc51af73da7720fb324b88a975": "1740100727",
47
+ "JSESSIONID": "FEA96D3B5FC31350B2285E711BF2A541",
48
+ "Hm_lvt_28a416fcfc17063eb9c4f9bb1a1f5cda": "1757477622",
49
+ "HMACCOUNT": "08DF0D235A291EAA",
50
+ "__utma": "57425525.2080994505.1730117117.1747970718.1757477622.50",
51
+ "__utmc": "57425525",
52
+ "__utmt": "1",
53
+ "__utmb": "57425525.2.10.1757477622",
54
+ "Hm_lpvt_28a416fcfc17063eb9c4f9bb1a1f5cda": "1757477628",
55
+ "index_burying_point": "c64d6c31e69d560efe319cc9f8be279f"
56
+ }
57
+
58
+ # 使用较少的页数进行测试
59
+ max_page = 5
60
+ for page in range(1, max_page + 1):
61
+ url = f"https://ee.ofweek.com/CATList-2800-8100-ee-{page}.html"
62
+ yield Request(url=url, callback=self.parse, headers=headers, cookies=cookies)
63
+
64
+ def parse(self, response):
65
+ from urllib.parse import urljoin
66
+ rows = response.xpath("//div[@class=\"main_left\"]/div[@class=\"list_model\"]/div[@class=\"model_right model_right2\"]")
67
+
68
+ for row in rows:
69
+ try:
70
+ url = row.xpath("./h3/a/@href").extract_first()
71
+ title = row.xpath("./h3/a/text()").extract_first()
72
+
73
+ if not url or not title:
74
+ continue
75
+
76
+ absolute_url = urljoin(response.url, url)
77
+ if not absolute_url.startswith(("http://", "https://")):
78
+ continue
79
+
80
+ yield Request(
81
+ url=absolute_url,
82
+ meta={"title": title.strip() if title else "", "parent_url": response.url},
83
+ callback=self.parse_detail
84
+ )
85
+ except Exception:
86
+ continue
87
+
88
+ def parse_detail(self, response):
89
+ title = response.meta.get("title", "")
90
+ content_elements = response.xpath("//div[@class=\"TRS_Editor\"]|//*[@id=\"articleC\"]")
91
+ if content_elements:
92
+ content = content_elements.xpath(".//text()").extract()
93
+ content = "\n".join([text.strip() for text in content if text.strip()])
94
+ else:
95
+ content = ""
96
+
97
+ publish_time = response.xpath("//div[@class=\"time fl\"]/text()").extract_first()
98
+ if publish_time:
99
+ publish_time = publish_time.strip()
100
+
101
+ source = response.xpath("//div[@class=\"source-name\"]/text()").extract_first()
102
+
103
+ item = NewsItem()
104
+ item["title"] = title.strip() if title else ""
105
+ item["publish_time"] = publish_time if publish_time else ""
106
+ item["url"] = response.url
107
+ item["source"] = source if source else ""
108
+ item["content"] = content
109
+
110
+ yield item
111
+
112
+ async def main():
113
+ start_time = time.time()
114
+
115
+ process = CrawlerProcess(settings={
116
+ "CONCURRENCY": 8,
117
+ "DOWNLOAD_DELAY": 0.1,
118
+ "LOG_LEVEL": "ERROR", # 减少日志输出以提高性能
119
+ })
120
+ await process.crawl(OfweekSimpleSpider)
121
+
122
+ end_time = time.time()
123
+ execution_time = end_time - start_time
124
+
125
+ print(f"Crawlo执行时间: {execution_time:.2f}秒")
126
+
127
+ if __name__ == "__main__":
128
+ asyncio.run(main())
@@ -0,0 +1,58 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: UTF-8 -*-
3
+ """
4
+ 简单的日志系统测试
5
+ """
6
+ import sys
7
+ import os
8
+ sys.path.insert(0, '/')
9
+
10
+ # 确保日志目录存在
11
+ os.makedirs('/examples/ofweek_standalone/logs', exist_ok=True)
12
+
13
+ # 测试日志系统
14
+ from crawlo.utils.log import LoggerManager, get_logger
15
+
16
+ print("=== 简单日志系统测试 ===")
17
+
18
+ # 1. 直接配置日志系统
19
+ print("1. 配置日志系统...")
20
+ LoggerManager.configure(
21
+ LOG_LEVEL='INFO',
22
+ LOG_FILE='/Users/oscar/projects/Crawlo/examples/ofweek_standalone/logs/simple_test.log'
23
+ )
24
+
25
+ # 2. 创建logger
26
+ print("2. 创建logger...")
27
+ logger = get_logger('test.logger')
28
+ print(f" Logger: {logger}")
29
+ print(f" Handlers: {len(logger.handlers)}")
30
+
31
+ for i, handler in enumerate(logger.handlers):
32
+ handler_type = type(handler).__name__
33
+ print(f" Handler {i}: {handler_type}")
34
+ if hasattr(handler, 'baseFilename'):
35
+ print(f" File: {handler.baseFilename}")
36
+
37
+ # 3. 测试日志输出
38
+ print("3. 测试日志输出...")
39
+ logger.info("这是一条测试INFO日志")
40
+ logger.debug("这是一条测试DEBUG日志")
41
+ logger.warning("这是一条测试WARNING日志")
42
+
43
+ print("4. 检查日志文件...")
44
+ log_file = '/Users/oscar/projects/Crawlo/examples/ofweek_standalone/logs/simple_test.log'
45
+ if os.path.exists(log_file):
46
+ print(f" 日志文件存在: {log_file}")
47
+ with open(log_file, 'r', encoding='utf-8') as f:
48
+ content = f.read()
49
+ print(f" 文件内容长度: {len(content)} 字符")
50
+ if content:
51
+ print(" 文件内容:")
52
+ print(content)
53
+ else:
54
+ print(" 文件为空")
55
+ else:
56
+ print(f" 日志文件不存在: {log_file}")
57
+
58
+ print("=== 测试完成 ===")