crawlo 1.1.0__py3-none-any.whl → 1.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crawlo might be problematic. Click here for more details.
- crawlo/__init__.py +34 -24
- crawlo/__version__.py +1 -1
- crawlo/cli.py +40 -40
- crawlo/commands/__init__.py +13 -13
- crawlo/commands/check.py +594 -155
- crawlo/commands/genspider.py +152 -111
- crawlo/commands/list.py +156 -119
- crawlo/commands/run.py +285 -170
- crawlo/commands/startproject.py +196 -101
- crawlo/commands/stats.py +188 -167
- crawlo/commands/utils.py +187 -0
- crawlo/config.py +280 -0
- crawlo/core/__init__.py +2 -2
- crawlo/core/engine.py +171 -158
- crawlo/core/enhanced_engine.py +190 -0
- crawlo/core/processor.py +40 -40
- crawlo/core/scheduler.py +162 -57
- crawlo/crawler.py +1028 -493
- crawlo/downloader/__init__.py +242 -78
- crawlo/downloader/aiohttp_downloader.py +212 -199
- crawlo/downloader/cffi_downloader.py +252 -277
- crawlo/downloader/httpx_downloader.py +257 -246
- crawlo/event.py +11 -11
- crawlo/exceptions.py +78 -78
- crawlo/extension/__init__.py +31 -31
- crawlo/extension/log_interval.py +49 -49
- crawlo/extension/log_stats.py +44 -44
- crawlo/extension/logging_extension.py +34 -34
- crawlo/filters/__init__.py +154 -37
- crawlo/filters/aioredis_filter.py +242 -150
- crawlo/filters/memory_filter.py +269 -202
- crawlo/items/__init__.py +23 -23
- crawlo/items/base.py +21 -21
- crawlo/items/fields.py +53 -53
- crawlo/items/items.py +104 -104
- crawlo/middleware/__init__.py +21 -21
- crawlo/middleware/default_header.py +32 -32
- crawlo/middleware/download_delay.py +28 -28
- crawlo/middleware/middleware_manager.py +135 -135
- crawlo/middleware/proxy.py +248 -245
- crawlo/middleware/request_ignore.py +30 -30
- crawlo/middleware/response_code.py +18 -18
- crawlo/middleware/response_filter.py +26 -26
- crawlo/middleware/retry.py +125 -90
- crawlo/mode_manager.py +201 -0
- crawlo/network/__init__.py +21 -7
- crawlo/network/request.py +311 -203
- crawlo/network/response.py +269 -166
- crawlo/pipelines/__init__.py +13 -13
- crawlo/pipelines/console_pipeline.py +39 -39
- crawlo/pipelines/csv_pipeline.py +317 -0
- crawlo/pipelines/json_pipeline.py +219 -0
- crawlo/pipelines/mongo_pipeline.py +116 -116
- crawlo/pipelines/mysql_pipeline.py +195 -195
- crawlo/pipelines/pipeline_manager.py +56 -56
- crawlo/project.py +153 -0
- crawlo/queue/pqueue.py +37 -0
- crawlo/queue/queue_manager.py +304 -0
- crawlo/queue/redis_priority_queue.py +192 -0
- crawlo/settings/__init__.py +7 -7
- crawlo/settings/default_settings.py +226 -169
- crawlo/settings/setting_manager.py +99 -99
- crawlo/spider/__init__.py +639 -129
- crawlo/stats_collector.py +59 -59
- crawlo/subscriber.py +106 -106
- crawlo/task_manager.py +30 -27
- crawlo/templates/crawlo.cfg.tmpl +10 -10
- crawlo/templates/project/__init__.py.tmpl +3 -3
- crawlo/templates/project/items.py.tmpl +17 -17
- crawlo/templates/project/middlewares.py.tmpl +87 -76
- crawlo/templates/project/pipelines.py.tmpl +336 -64
- crawlo/templates/project/run.py.tmpl +239 -0
- crawlo/templates/project/settings.py.tmpl +248 -54
- crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
- crawlo/templates/spider/spider.py.tmpl +178 -32
- crawlo/utils/__init__.py +7 -7
- crawlo/utils/controlled_spider_mixin.py +336 -0
- crawlo/utils/date_tools.py +233 -233
- crawlo/utils/db_helper.py +343 -343
- crawlo/utils/func_tools.py +82 -82
- crawlo/utils/large_scale_config.py +287 -0
- crawlo/utils/large_scale_helper.py +344 -0
- crawlo/utils/log.py +128 -128
- crawlo/utils/queue_helper.py +176 -0
- crawlo/utils/request.py +267 -267
- crawlo/utils/request_serializer.py +220 -0
- crawlo/utils/spider_loader.py +62 -62
- crawlo/utils/system.py +11 -11
- crawlo/utils/tools.py +4 -4
- crawlo/utils/url.py +39 -39
- crawlo-1.1.2.dist-info/METADATA +567 -0
- crawlo-1.1.2.dist-info/RECORD +108 -0
- examples/__init__.py +7 -0
- tests/__init__.py +7 -7
- tests/test_final_validation.py +154 -0
- tests/test_proxy_health_check.py +32 -32
- tests/test_proxy_middleware_integration.py +136 -136
- tests/test_proxy_providers.py +56 -56
- tests/test_proxy_stats.py +19 -19
- tests/test_proxy_strategies.py +59 -59
- tests/test_redis_config.py +29 -0
- tests/test_redis_queue.py +225 -0
- tests/test_request_serialization.py +71 -0
- tests/test_scheduler.py +242 -0
- crawlo/pipelines/mysql_batch_pipline.py +0 -273
- crawlo/utils/concurrency_manager.py +0 -125
- crawlo/utils/pqueue.py +0 -174
- crawlo/utils/project.py +0 -197
- crawlo-1.1.0.dist-info/METADATA +0 -49
- crawlo-1.1.0.dist-info/RECORD +0 -97
- examples/gxb/items.py +0 -36
- examples/gxb/run.py +0 -16
- examples/gxb/settings.py +0 -72
- examples/gxb/spider/__init__.py +0 -2
- examples/gxb/spider/miit_spider.py +0 -180
- examples/gxb/spider/telecom_device.py +0 -129
- {examples/gxb → crawlo/queue}/__init__.py +0 -0
- {crawlo-1.1.0.dist-info → crawlo-1.1.2.dist-info}/WHEEL +0 -0
- {crawlo-1.1.0.dist-info → crawlo-1.1.2.dist-info}/entry_points.txt +0 -0
- {crawlo-1.1.0.dist-info → crawlo-1.1.2.dist-info}/top_level.txt +0 -0
crawlo/crawler.py
CHANGED
|
@@ -1,493 +1,1028 @@
|
|
|
1
|
-
#!/usr/bin/python
|
|
2
|
-
# -*- coding: UTF-8 -*-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
self.
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
def
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
def
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
self.
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
"""
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
self.
|
|
121
|
-
|
|
122
|
-
self.
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
self.
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
logger.error(f"
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
"""
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
#
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
#
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
#
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
#
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
#
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
#
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
#
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
#
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
#
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
#
|
|
370
|
-
|
|
371
|
-
#
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
#
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
#
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
#
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
#
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
#
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
#
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
#
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
#
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
#
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
#
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
#
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
#
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
#
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# -*- coding: UTF-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
Crawlo Crawler Module
|
|
5
|
+
====================
|
|
6
|
+
提供爬虫进程管理和运行时核心功能。
|
|
7
|
+
|
|
8
|
+
核心组件:
|
|
9
|
+
- Crawler: 单个爬虫运行实例,管理Spider与引擎的生命周期
|
|
10
|
+
- CrawlerProcess: 爬虫进程管理器,支持多爬虫并发调度和资源管理
|
|
11
|
+
|
|
12
|
+
功能特性:
|
|
13
|
+
- 智能并发控制和资源管理
|
|
14
|
+
- 优雅关闭和信号处理
|
|
15
|
+
- 统计监控和性能追踪
|
|
16
|
+
- 自动模块发现和注册
|
|
17
|
+
- 错误恢复和重试机制
|
|
18
|
+
- 大规模爬虫优化支持
|
|
19
|
+
|
|
20
|
+
示例用法:
|
|
21
|
+
# 单个爬虫运行
|
|
22
|
+
crawler = Crawler(MySpider, settings)
|
|
23
|
+
await crawler.crawl()
|
|
24
|
+
|
|
25
|
+
# 多爬虫并发管理
|
|
26
|
+
process = CrawlerProcess()
|
|
27
|
+
await process.crawl([Spider1, Spider2])
|
|
28
|
+
"""
|
|
29
|
+
from __future__ import annotations
|
|
30
|
+
import asyncio
|
|
31
|
+
import signal
|
|
32
|
+
import time
|
|
33
|
+
import threading
|
|
34
|
+
from typing import Type, Optional, Set, List, Union, Dict, Any
|
|
35
|
+
from .spider import Spider, get_global_spider_registry
|
|
36
|
+
from .core.engine import Engine
|
|
37
|
+
from .utils.log import get_logger
|
|
38
|
+
from .subscriber import Subscriber
|
|
39
|
+
from .extension import ExtensionManager
|
|
40
|
+
from .stats_collector import StatsCollector
|
|
41
|
+
from .event import spider_opened, spider_closed
|
|
42
|
+
from .settings.setting_manager import SettingManager
|
|
43
|
+
from crawlo.project import merge_settings, get_settings
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
logger = get_logger(__name__)
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class CrawlerContext:
|
|
50
|
+
"""
|
|
51
|
+
爬虫上下文管理器
|
|
52
|
+
提供共享状态和资源管理
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
def __init__(self):
|
|
56
|
+
self.start_time = time.time()
|
|
57
|
+
self.total_crawlers = 0
|
|
58
|
+
self.active_crawlers = 0
|
|
59
|
+
self.completed_crawlers = 0
|
|
60
|
+
self.failed_crawlers = 0
|
|
61
|
+
self.error_log = []
|
|
62
|
+
self._lock = threading.RLock()
|
|
63
|
+
|
|
64
|
+
def increment_total(self):
|
|
65
|
+
with self._lock:
|
|
66
|
+
self.total_crawlers += 1
|
|
67
|
+
|
|
68
|
+
def increment_active(self):
|
|
69
|
+
with self._lock:
|
|
70
|
+
self.active_crawlers += 1
|
|
71
|
+
|
|
72
|
+
def decrement_active(self):
|
|
73
|
+
with self._lock:
|
|
74
|
+
self.active_crawlers -= 1
|
|
75
|
+
|
|
76
|
+
def increment_completed(self):
|
|
77
|
+
with self._lock:
|
|
78
|
+
self.completed_crawlers += 1
|
|
79
|
+
|
|
80
|
+
def increment_failed(self, error: str):
|
|
81
|
+
with self._lock:
|
|
82
|
+
self.failed_crawlers += 1
|
|
83
|
+
self.error_log.append({
|
|
84
|
+
'timestamp': time.time(),
|
|
85
|
+
'error': error
|
|
86
|
+
})
|
|
87
|
+
|
|
88
|
+
def get_stats(self) -> Dict[str, Any]:
|
|
89
|
+
with self._lock:
|
|
90
|
+
duration = time.time() - self.start_time
|
|
91
|
+
return {
|
|
92
|
+
'total_crawlers': self.total_crawlers,
|
|
93
|
+
'active_crawlers': self.active_crawlers,
|
|
94
|
+
'completed_crawlers': self.completed_crawlers,
|
|
95
|
+
'failed_crawlers': self.failed_crawlers,
|
|
96
|
+
'success_rate': (self.completed_crawlers / max(1, self.total_crawlers)) * 100,
|
|
97
|
+
'duration_seconds': round(duration, 2),
|
|
98
|
+
'error_count': len(self.error_log)
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
class Crawler:
|
|
103
|
+
"""
|
|
104
|
+
单个爬虫运行实例,管理 Spider 与引擎的生命周期
|
|
105
|
+
|
|
106
|
+
提供功能:
|
|
107
|
+
- Spider 生命周期管理(初始化、运行、关闭)
|
|
108
|
+
- 引擎组件的协调管理
|
|
109
|
+
- 配置合并和验证
|
|
110
|
+
- 统计数据收集
|
|
111
|
+
- 扩展管理
|
|
112
|
+
- 异常处理和清理
|
|
113
|
+
"""
|
|
114
|
+
|
|
115
|
+
def __init__(self, spider_cls: Type[Spider], settings: SettingManager, context: Optional[CrawlerContext] = None):
|
|
116
|
+
self.spider_cls = spider_cls
|
|
117
|
+
self.spider: Optional[Spider] = None
|
|
118
|
+
self.engine: Optional[Engine] = None
|
|
119
|
+
self.stats: Optional[StatsCollector] = None
|
|
120
|
+
self.subscriber: Optional[Subscriber] = None
|
|
121
|
+
self.extension: Optional[ExtensionManager] = None
|
|
122
|
+
self.settings: SettingManager = settings.copy()
|
|
123
|
+
self.context = context or CrawlerContext()
|
|
124
|
+
|
|
125
|
+
# 状态管理
|
|
126
|
+
self._closed = False
|
|
127
|
+
self._close_lock = asyncio.Lock()
|
|
128
|
+
self._start_time = None
|
|
129
|
+
self._end_time = None
|
|
130
|
+
|
|
131
|
+
# 性能监控
|
|
132
|
+
self._performance_metrics = {
|
|
133
|
+
'initialization_time': 0,
|
|
134
|
+
'crawl_duration': 0,
|
|
135
|
+
'memory_peak': 0,
|
|
136
|
+
'request_count': 0,
|
|
137
|
+
'error_count': 0
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
async def crawl(self):
|
|
141
|
+
"""
|
|
142
|
+
启动爬虫核心流程(增强版)
|
|
143
|
+
|
|
144
|
+
包含以下阶段:
|
|
145
|
+
1. 初始化阶段: 创建所有组件
|
|
146
|
+
2. 验证阶段: 检查配置和状态
|
|
147
|
+
3. 运行阶段: 启动爬虫引擎
|
|
148
|
+
4. 清理阶段: 资源释放
|
|
149
|
+
"""
|
|
150
|
+
init_start = time.time()
|
|
151
|
+
self._start_time = init_start
|
|
152
|
+
|
|
153
|
+
try:
|
|
154
|
+
# 更新上下文状态
|
|
155
|
+
self.context.increment_active()
|
|
156
|
+
|
|
157
|
+
# 阶段 1: 初始化组件
|
|
158
|
+
self.subscriber = self._create_subscriber()
|
|
159
|
+
self.spider = self._create_spider()
|
|
160
|
+
self.engine = self._create_engine()
|
|
161
|
+
self.stats = self._create_stats()
|
|
162
|
+
self.extension = self._create_extension()
|
|
163
|
+
|
|
164
|
+
# 记录初始化时间
|
|
165
|
+
self._performance_metrics['initialization_time'] = time.time() - init_start
|
|
166
|
+
|
|
167
|
+
# 阶段 2: 验证状态
|
|
168
|
+
self._validate_crawler_state()
|
|
169
|
+
|
|
170
|
+
# 阶段 3: 启动爬虫
|
|
171
|
+
crawl_start = time.time()
|
|
172
|
+
await self.engine.start_spider(self.spider)
|
|
173
|
+
|
|
174
|
+
# 记录爬取时间
|
|
175
|
+
self._performance_metrics['crawl_duration'] = time.time() - crawl_start
|
|
176
|
+
self._end_time = time.time()
|
|
177
|
+
|
|
178
|
+
# 更新上下文状态
|
|
179
|
+
self.context.increment_completed()
|
|
180
|
+
|
|
181
|
+
logger.info(f"爬虫 {self.spider.name} 完成,耗时 {self._get_total_duration():.2f}秒")
|
|
182
|
+
|
|
183
|
+
except Exception as e:
|
|
184
|
+
self._performance_metrics['error_count'] += 1
|
|
185
|
+
self.context.increment_failed(str(e))
|
|
186
|
+
logger.error(f"爬虫 {getattr(self.spider, 'name', 'Unknown')} 运行失败: {e}", exc_info=True)
|
|
187
|
+
raise
|
|
188
|
+
finally:
|
|
189
|
+
self.context.decrement_active()
|
|
190
|
+
# 确保资源清理
|
|
191
|
+
await self._ensure_cleanup()
|
|
192
|
+
|
|
193
|
+
def _validate_crawler_state(self):
|
|
194
|
+
"""
|
|
195
|
+
验证爬虫状态和配置
|
|
196
|
+
确保所有必要组件都已正确初始化
|
|
197
|
+
"""
|
|
198
|
+
if not self.spider:
|
|
199
|
+
raise RuntimeError("爬虫实例未初始化")
|
|
200
|
+
if not self.engine:
|
|
201
|
+
raise RuntimeError("引擎未初始化")
|
|
202
|
+
if not self.stats:
|
|
203
|
+
raise RuntimeError("统计收集器未初始化")
|
|
204
|
+
if not self.subscriber:
|
|
205
|
+
raise RuntimeError("事件订阅器未初始化")
|
|
206
|
+
|
|
207
|
+
# 检查关键配置
|
|
208
|
+
if not self.spider.name:
|
|
209
|
+
raise ValueError("爬虫名称不能为空")
|
|
210
|
+
|
|
211
|
+
logger.debug(f"爬虫 {self.spider.name} 状态验证通过")
|
|
212
|
+
|
|
213
|
+
def _get_total_duration(self) -> float:
|
|
214
|
+
"""获取总运行时间"""
|
|
215
|
+
if self._start_time and self._end_time:
|
|
216
|
+
return self._end_time - self._start_time
|
|
217
|
+
return 0.0
|
|
218
|
+
|
|
219
|
+
async def _ensure_cleanup(self):
|
|
220
|
+
"""确保资源清理"""
|
|
221
|
+
try:
|
|
222
|
+
if not self._closed:
|
|
223
|
+
await self.close()
|
|
224
|
+
except Exception as e:
|
|
225
|
+
logger.warning(f"清理资源时发生错误: {e}")
|
|
226
|
+
|
|
227
|
+
def get_performance_metrics(self) -> Dict[str, Any]:
|
|
228
|
+
"""获取性能指标"""
|
|
229
|
+
metrics = self._performance_metrics.copy()
|
|
230
|
+
metrics['total_duration'] = self._get_total_duration()
|
|
231
|
+
if self.stats:
|
|
232
|
+
# 添加统计数据
|
|
233
|
+
stats_data = getattr(self.stats, 'get_stats', lambda: {})()
|
|
234
|
+
metrics.update(stats_data)
|
|
235
|
+
return metrics
|
|
236
|
+
@staticmethod
|
|
237
|
+
def _create_subscriber() -> Subscriber:
|
|
238
|
+
"""创建事件订阅器"""
|
|
239
|
+
return Subscriber()
|
|
240
|
+
|
|
241
|
+
def _create_spider(self) -> Spider:
|
|
242
|
+
"""
|
|
243
|
+
创建并验证爬虫实例(增强版)
|
|
244
|
+
|
|
245
|
+
执行以下验证:
|
|
246
|
+
- 爬虫名称必须存在
|
|
247
|
+
- start_requests 方法必须可调用
|
|
248
|
+
- start_urls 不能是字符串
|
|
249
|
+
- parse 方法建议存在
|
|
250
|
+
"""
|
|
251
|
+
spider = self.spider_cls.create_instance(self)
|
|
252
|
+
|
|
253
|
+
# 必要属性检查
|
|
254
|
+
if not getattr(spider, 'name', None):
|
|
255
|
+
raise AttributeError(
|
|
256
|
+
f"爬虫类 '{self.spider_cls.__name__}' 必须定义 'name' 属性。\n"
|
|
257
|
+
f"示例: name = 'my_spider'"
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
if not callable(getattr(spider, 'start_requests', None)):
|
|
261
|
+
raise AttributeError(
|
|
262
|
+
f"爬虫 '{spider.name}' 必须实现可调用的 'start_requests' 方法。\n"
|
|
263
|
+
f"示例: def start_requests(self): yield Request(url='...')"
|
|
264
|
+
)
|
|
265
|
+
|
|
266
|
+
# start_urls 类型检查
|
|
267
|
+
start_urls = getattr(spider, 'start_urls', [])
|
|
268
|
+
if isinstance(start_urls, str):
|
|
269
|
+
raise TypeError(
|
|
270
|
+
f"爬虫 '{spider.name}' 的 'start_urls' 必须是列表或元组,不能是字符串。\n"
|
|
271
|
+
f"正确写法: start_urls = ['http://example.com']\n"
|
|
272
|
+
f"错误写法: start_urls = 'http://example.com'"
|
|
273
|
+
)
|
|
274
|
+
|
|
275
|
+
# parse 方法检查(警告而非错误)
|
|
276
|
+
if not callable(getattr(spider, 'parse', None)):
|
|
277
|
+
logger.warning(
|
|
278
|
+
f"爬虫 '{spider.name}' 未定义 'parse' 方法。\n"
|
|
279
|
+
f"请确保所有 Request 都指定了回调函数,否则响应将被忽略。"
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
# 设置爬虫配置
|
|
283
|
+
self._set_spider(spider)
|
|
284
|
+
|
|
285
|
+
logger.debug(f"爬虫 '{spider.name}' 初始化完成")
|
|
286
|
+
return spider
|
|
287
|
+
|
|
288
|
+
def _create_engine(self) -> Engine:
|
|
289
|
+
"""创建并初始化引擎"""
|
|
290
|
+
engine = Engine(self)
|
|
291
|
+
engine.engine_start()
|
|
292
|
+
logger.debug(f"引擎初始化完成,爬虫: {getattr(self.spider, 'name', 'Unknown')}")
|
|
293
|
+
return engine
|
|
294
|
+
|
|
295
|
+
def _create_stats(self) -> StatsCollector:
|
|
296
|
+
"""创建统计收集器"""
|
|
297
|
+
stats = StatsCollector(self)
|
|
298
|
+
logger.debug(f"统计收集器初始化完成,爬虫: {getattr(self.spider, 'name', 'Unknown')}")
|
|
299
|
+
return stats
|
|
300
|
+
|
|
301
|
+
def _create_extension(self) -> ExtensionManager:
|
|
302
|
+
"""创建扩展管理器"""
|
|
303
|
+
extension = ExtensionManager.create_instance(self)
|
|
304
|
+
logger.debug(f"扩展管理器初始化完成,爬虫: {getattr(self.spider, 'name', 'Unknown')}")
|
|
305
|
+
return extension
|
|
306
|
+
|
|
307
|
+
def _set_spider(self, spider: Spider):
|
|
308
|
+
"""
|
|
309
|
+
设置爬虫配置和事件订阅
|
|
310
|
+
将爬虫的生命周期事件与订阅器绑定
|
|
311
|
+
"""
|
|
312
|
+
# 订阅爬虫生命周期事件
|
|
313
|
+
self.subscriber.subscribe(spider.spider_opened, event=spider_opened)
|
|
314
|
+
self.subscriber.subscribe(spider.spider_closed, event=spider_closed)
|
|
315
|
+
|
|
316
|
+
# 合并爬虫自定义配置
|
|
317
|
+
merge_settings(spider, self.settings)
|
|
318
|
+
|
|
319
|
+
logger.debug(f"爬虫 '{spider.name}' 配置合并完成")
|
|
320
|
+
|
|
321
|
+
async def close(self, reason='finished') -> None:
|
|
322
|
+
"""
|
|
323
|
+
关闭爬虫并清理资源(增强版)
|
|
324
|
+
|
|
325
|
+
确保只关闭一次,并处理所有清理操作
|
|
326
|
+
"""
|
|
327
|
+
async with self._close_lock:
|
|
328
|
+
if self._closed:
|
|
329
|
+
return
|
|
330
|
+
|
|
331
|
+
self._closed = True
|
|
332
|
+
self._end_time = time.time()
|
|
333
|
+
|
|
334
|
+
try:
|
|
335
|
+
# 通知爬虫关闭事件
|
|
336
|
+
if self.subscriber:
|
|
337
|
+
await self.subscriber.notify(spider_closed)
|
|
338
|
+
|
|
339
|
+
# 统计数据收集
|
|
340
|
+
if self.stats and self.spider:
|
|
341
|
+
self.stats.close_spider(spider=self.spider, reason=reason)
|
|
342
|
+
# 记录统计数据
|
|
343
|
+
try:
|
|
344
|
+
from crawlo.commands.stats import record_stats
|
|
345
|
+
record_stats(self)
|
|
346
|
+
except ImportError:
|
|
347
|
+
logger.debug("统计记录模块不存在,跳过统计记录")
|
|
348
|
+
|
|
349
|
+
logger.info(
|
|
350
|
+
f"爬虫 '{getattr(self.spider, 'name', 'Unknown')}' 已关闭,"
|
|
351
|
+
f"原因: {reason},耗时: {self._get_total_duration():.2f}秒"
|
|
352
|
+
)
|
|
353
|
+
|
|
354
|
+
except Exception as e:
|
|
355
|
+
logger.error(f"关闭爬虫时发生错误: {e}", exc_info=True)
|
|
356
|
+
finally:
|
|
357
|
+
# 确保资源清理
|
|
358
|
+
await self._cleanup_resources()
|
|
359
|
+
|
|
360
|
+
async def _cleanup_resources(self):
|
|
361
|
+
"""清理所有资源"""
|
|
362
|
+
cleanup_tasks = []
|
|
363
|
+
|
|
364
|
+
# 引擎清理
|
|
365
|
+
if self.engine:
|
|
366
|
+
try:
|
|
367
|
+
cleanup_tasks.append(self.engine.close())
|
|
368
|
+
except AttributeError:
|
|
369
|
+
pass # 引擎没有close方法
|
|
370
|
+
|
|
371
|
+
# 扩展清理
|
|
372
|
+
if self.extension:
|
|
373
|
+
try:
|
|
374
|
+
cleanup_tasks.append(self.extension.close())
|
|
375
|
+
except AttributeError:
|
|
376
|
+
pass
|
|
377
|
+
|
|
378
|
+
# 统计收集器清理
|
|
379
|
+
if self.stats:
|
|
380
|
+
try:
|
|
381
|
+
cleanup_tasks.append(self.stats.close())
|
|
382
|
+
except AttributeError:
|
|
383
|
+
pass
|
|
384
|
+
|
|
385
|
+
# 并发执行清理任务
|
|
386
|
+
if cleanup_tasks:
|
|
387
|
+
await asyncio.gather(*cleanup_tasks, return_exceptions=True)
|
|
388
|
+
|
|
389
|
+
logger.debug("资源清理完成")
|
|
390
|
+
|
|
391
|
+
|
|
392
|
+
class CrawlerProcess:
|
|
393
|
+
"""
|
|
394
|
+
爬虫进程管理器(增强版)
|
|
395
|
+
|
|
396
|
+
支持功能:
|
|
397
|
+
- 多爬虫并发调度和资源管理
|
|
398
|
+
- 自动模块发现和爬虫注册
|
|
399
|
+
- 智能并发控制和负载均衡
|
|
400
|
+
- 优雅关闭和信号处理
|
|
401
|
+
- 实时状态监控和统计
|
|
402
|
+
- 错误恢复和重试机制
|
|
403
|
+
- 大规模爬虫优化支持
|
|
404
|
+
|
|
405
|
+
使用示例:
|
|
406
|
+
# 基本用法
|
|
407
|
+
process = CrawlerProcess()
|
|
408
|
+
await process.crawl(MySpider)
|
|
409
|
+
|
|
410
|
+
# 多爬虫并发
|
|
411
|
+
await process.crawl([Spider1, Spider2, 'spider_name'])
|
|
412
|
+
|
|
413
|
+
# 自定义并发数
|
|
414
|
+
process = CrawlerProcess(max_concurrency=8)
|
|
415
|
+
"""
|
|
416
|
+
|
|
417
|
+
def __init__(
|
|
418
|
+
self,
|
|
419
|
+
settings: Optional[SettingManager] = None,
|
|
420
|
+
max_concurrency: Optional[int] = None,
|
|
421
|
+
spider_modules: Optional[List[str]] = None,
|
|
422
|
+
enable_monitoring: bool = True
|
|
423
|
+
):
|
|
424
|
+
# 基础配置
|
|
425
|
+
self.settings: SettingManager = settings or self._get_default_settings()
|
|
426
|
+
self.crawlers: Set[Crawler] = set()
|
|
427
|
+
self._active_tasks: Set[asyncio.Task] = set()
|
|
428
|
+
|
|
429
|
+
# 上下文管理器
|
|
430
|
+
self.context = CrawlerContext()
|
|
431
|
+
|
|
432
|
+
# 并发控制配置
|
|
433
|
+
self.max_concurrency: int = (
|
|
434
|
+
max_concurrency
|
|
435
|
+
or self.settings.get('MAX_RUNNING_SPIDERS')
|
|
436
|
+
or self.settings.get('CONCURRENCY', 3)
|
|
437
|
+
)
|
|
438
|
+
self.semaphore = asyncio.Semaphore(self.max_concurrency)
|
|
439
|
+
|
|
440
|
+
# 监控配置
|
|
441
|
+
self.enable_monitoring = enable_monitoring
|
|
442
|
+
self._monitoring_task = None
|
|
443
|
+
self._shutdown_event = asyncio.Event()
|
|
444
|
+
|
|
445
|
+
# 自动发现并导入爬虫模块
|
|
446
|
+
if spider_modules:
|
|
447
|
+
self.auto_discover(spider_modules)
|
|
448
|
+
|
|
449
|
+
# 使用全局注册表的快照(避免后续导入影响)
|
|
450
|
+
self._spider_registry: Dict[str, Type[Spider]] = get_global_spider_registry()
|
|
451
|
+
|
|
452
|
+
# 性能监控
|
|
453
|
+
self._performance_stats = {
|
|
454
|
+
'total_requests': 0,
|
|
455
|
+
'successful_requests': 0,
|
|
456
|
+
'failed_requests': 0,
|
|
457
|
+
'memory_usage_mb': 0,
|
|
458
|
+
'cpu_usage_percent': 0
|
|
459
|
+
}
|
|
460
|
+
|
|
461
|
+
# 注册信号量
|
|
462
|
+
signal.signal(signal.SIGINT, self._shutdown)
|
|
463
|
+
signal.signal(signal.SIGTERM, self._shutdown)
|
|
464
|
+
|
|
465
|
+
logger.info(
|
|
466
|
+
f"CrawlerProcess 初始化完成\n"
|
|
467
|
+
f" - 最大并行爬虫数: {self.max_concurrency}\n"
|
|
468
|
+
f" - 已注册爬虫数: {len(self._spider_registry)}\n"
|
|
469
|
+
f" - 监控启用: {self.enable_monitoring}"
|
|
470
|
+
)
|
|
471
|
+
|
|
472
|
+
async def start_monitoring(self):
|
|
473
|
+
"""启动监控任务"""
|
|
474
|
+
if not self.enable_monitoring:
|
|
475
|
+
return
|
|
476
|
+
|
|
477
|
+
self._monitoring_task = asyncio.create_task(self._monitor_loop())
|
|
478
|
+
logger.debug("监控任务已启动")
|
|
479
|
+
|
|
480
|
+
async def stop_monitoring(self):
|
|
481
|
+
"""停止监控任务"""
|
|
482
|
+
if self._monitoring_task and not self._monitoring_task.done():
|
|
483
|
+
self._monitoring_task.cancel()
|
|
484
|
+
try:
|
|
485
|
+
await self._monitoring_task
|
|
486
|
+
except asyncio.CancelledError:
|
|
487
|
+
pass
|
|
488
|
+
logger.debug("监控任务已停止")
|
|
489
|
+
|
|
490
|
+
async def _monitor_loop(self):
|
|
491
|
+
"""监控循环,定期收集和报告状态"""
|
|
492
|
+
try:
|
|
493
|
+
while not self._shutdown_event.is_set():
|
|
494
|
+
await self._collect_performance_stats()
|
|
495
|
+
|
|
496
|
+
# 每30秒输出一次状态
|
|
497
|
+
stats = self.context.get_stats()
|
|
498
|
+
if stats['active_crawlers'] > 0:
|
|
499
|
+
logger.info(
|
|
500
|
+
f"爬虫状态: 活跃 {stats['active_crawlers']}, "
|
|
501
|
+
f"完成 {stats['completed_crawlers']}, "
|
|
502
|
+
f"失败 {stats['failed_crawlers']}, "
|
|
503
|
+
f"成功率 {stats['success_rate']:.1f}%"
|
|
504
|
+
)
|
|
505
|
+
|
|
506
|
+
await asyncio.sleep(30) # 30秒间隔
|
|
507
|
+
|
|
508
|
+
except asyncio.CancelledError:
|
|
509
|
+
logger.debug("监控循环被取消")
|
|
510
|
+
except Exception as e:
|
|
511
|
+
logger.error(f"监控循环错误: {e}", exc_info=True)
|
|
512
|
+
|
|
513
|
+
async def _collect_performance_stats(self):
|
|
514
|
+
"""收集性能统计数据"""
|
|
515
|
+
try:
|
|
516
|
+
import psutil
|
|
517
|
+
import os
|
|
518
|
+
|
|
519
|
+
process = psutil.Process(os.getpid())
|
|
520
|
+
memory_info = process.memory_info()
|
|
521
|
+
|
|
522
|
+
self._performance_stats.update({
|
|
523
|
+
'memory_usage_mb': round(memory_info.rss / 1024 / 1024, 2),
|
|
524
|
+
'cpu_usage_percent': round(process.cpu_percent(), 2)
|
|
525
|
+
})
|
|
526
|
+
|
|
527
|
+
except ImportError:
|
|
528
|
+
# psutil 不存在时跳过性能监控
|
|
529
|
+
pass
|
|
530
|
+
except Exception as e:
|
|
531
|
+
logger.debug(f"收集性能统计失败: {e}")
|
|
532
|
+
@staticmethod
|
|
533
|
+
def auto_discover(modules: List[str]):
|
|
534
|
+
"""
|
|
535
|
+
自动导入模块,触发 Spider 类定义和注册(增强版)
|
|
536
|
+
|
|
537
|
+
支持递归扫描和错误恢复
|
|
538
|
+
"""
|
|
539
|
+
import importlib
|
|
540
|
+
import pkgutil
|
|
541
|
+
|
|
542
|
+
discovered_count = 0
|
|
543
|
+
error_count = 0
|
|
544
|
+
|
|
545
|
+
for module_name in modules:
|
|
546
|
+
try:
|
|
547
|
+
module = importlib.import_module(module_name)
|
|
548
|
+
|
|
549
|
+
if hasattr(module, '__path__'):
|
|
550
|
+
# 包模块,递归扫描
|
|
551
|
+
for _, name, _ in pkgutil.walk_packages(module.__path__, module.__name__ + "."):
|
|
552
|
+
try:
|
|
553
|
+
importlib.import_module(name)
|
|
554
|
+
discovered_count += 1
|
|
555
|
+
except Exception as sub_e:
|
|
556
|
+
error_count += 1
|
|
557
|
+
logger.warning(f"导入子模块 {name} 失败: {sub_e}")
|
|
558
|
+
else:
|
|
559
|
+
# 单个模块
|
|
560
|
+
importlib.import_module(module_name)
|
|
561
|
+
discovered_count += 1
|
|
562
|
+
|
|
563
|
+
logger.debug(f"已扫描模块: {module_name}")
|
|
564
|
+
|
|
565
|
+
except Exception as e:
|
|
566
|
+
error_count += 1
|
|
567
|
+
logger.error(f"扫描模块 {module_name} 失败: {e}", exc_info=True)
|
|
568
|
+
|
|
569
|
+
logger.info(
|
|
570
|
+
f"模块发现完成: 成功 {discovered_count} 个,失败 {error_count} 个"
|
|
571
|
+
)
|
|
572
|
+
|
|
573
|
+
# === 公共只读接口:避免直接访问 _spider_registry ===
|
|
574
|
+
|
|
575
|
+
def get_spider_names(self) -> List[str]:
|
|
576
|
+
"""获取所有已注册的爬虫名称"""
|
|
577
|
+
return list(self._spider_registry.keys())
|
|
578
|
+
|
|
579
|
+
def get_spider_class(self, name: str) -> Optional[Type[Spider]]:
|
|
580
|
+
"""根据 name 获取爬虫类"""
|
|
581
|
+
return self._spider_registry.get(name)
|
|
582
|
+
|
|
583
|
+
def is_spider_registered(self, name: str) -> bool:
|
|
584
|
+
"""检查某个 name 是否已注册"""
|
|
585
|
+
return name in self._spider_registry
|
|
586
|
+
|
|
587
|
+
async def crawl(self, spiders: Union[Type[Spider], str, List[Union[Type[Spider], str]]]):
|
|
588
|
+
"""
|
|
589
|
+
启动一个或多个爬虫(增强版)
|
|
590
|
+
|
|
591
|
+
增强功能:
|
|
592
|
+
- 智能并发控制
|
|
593
|
+
- 实时监控和统计
|
|
594
|
+
- 错误恢复和重试
|
|
595
|
+
- 优雅关闭处理
|
|
596
|
+
"""
|
|
597
|
+
# 阶段 1: 预处理和验证
|
|
598
|
+
spider_classes_to_run = self._resolve_spiders_to_run(spiders)
|
|
599
|
+
total = len(spider_classes_to_run)
|
|
600
|
+
|
|
601
|
+
if total == 0:
|
|
602
|
+
raise ValueError("至少需要提供一个爬虫类或名称")
|
|
603
|
+
|
|
604
|
+
# 阶段 2: 初始化上下文和监控
|
|
605
|
+
for _ in range(total):
|
|
606
|
+
self.context.increment_total()
|
|
607
|
+
|
|
608
|
+
# 启动监控任务
|
|
609
|
+
await self.start_monitoring()
|
|
610
|
+
|
|
611
|
+
try:
|
|
612
|
+
# 阶段 3: 按类名排序,保证启动顺序可预测
|
|
613
|
+
spider_classes_to_run.sort(key=lambda cls: cls.__name__.lower())
|
|
614
|
+
|
|
615
|
+
logger.info(
|
|
616
|
+
f"开始启动 {total} 个爬虫\n"
|
|
617
|
+
f" - 最大并发数: {self.max_concurrency}\n"
|
|
618
|
+
f" - 爬虫列表: {[cls.__name__ for cls in spider_classes_to_run]}"
|
|
619
|
+
)
|
|
620
|
+
|
|
621
|
+
# 阶段 4: 流式启动所有爬虫任务
|
|
622
|
+
tasks = [
|
|
623
|
+
asyncio.create_task(
|
|
624
|
+
self._run_spider_with_limit(spider_cls, index + 1, total),
|
|
625
|
+
name=f"spider-{spider_cls.__name__}-{index+1}"
|
|
626
|
+
)
|
|
627
|
+
for index, spider_cls in enumerate(spider_classes_to_run)
|
|
628
|
+
]
|
|
629
|
+
|
|
630
|
+
# 阶段 5: 等待所有任务完成(失败不中断)
|
|
631
|
+
results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
632
|
+
|
|
633
|
+
# 阶段 6: 统计异常和结果
|
|
634
|
+
failed = [i for i, r in enumerate(results) if isinstance(r, Exception)]
|
|
635
|
+
successful = total - len(failed)
|
|
636
|
+
|
|
637
|
+
if failed:
|
|
638
|
+
failed_spiders = [spider_classes_to_run[i].__name__ for i in failed]
|
|
639
|
+
logger.error(
|
|
640
|
+
f"爬虫执行结果: 成功 {successful}/{total},失败 {len(failed)}/{total}\n"
|
|
641
|
+
f" - 失败爬虫: {failed_spiders}"
|
|
642
|
+
)
|
|
643
|
+
|
|
644
|
+
# 记录详细错误信息
|
|
645
|
+
for i in failed:
|
|
646
|
+
error = results[i]
|
|
647
|
+
logger.error(f"爬虫 {spider_classes_to_run[i].__name__} 错误详情: {error}")
|
|
648
|
+
else:
|
|
649
|
+
logger.info(f"所有 {total} 个爬虫均成功完成! 🎉")
|
|
650
|
+
|
|
651
|
+
# 返回统计结果
|
|
652
|
+
return {
|
|
653
|
+
'total': total,
|
|
654
|
+
'successful': successful,
|
|
655
|
+
'failed': len(failed),
|
|
656
|
+
'success_rate': (successful / total) * 100 if total > 0 else 0,
|
|
657
|
+
'context_stats': self.context.get_stats()
|
|
658
|
+
}
|
|
659
|
+
|
|
660
|
+
finally:
|
|
661
|
+
# 阶段 7: 清理和关闭
|
|
662
|
+
await self.stop_monitoring()
|
|
663
|
+
await self._cleanup_process()
|
|
664
|
+
|
|
665
|
+
async def _cleanup_process(self):
|
|
666
|
+
"""清理进程资源"""
|
|
667
|
+
try:
|
|
668
|
+
# 等待所有活跃爬虫完成
|
|
669
|
+
if self.crawlers:
|
|
670
|
+
close_tasks = [crawler.close() for crawler in self.crawlers]
|
|
671
|
+
await asyncio.gather(*close_tasks, return_exceptions=True)
|
|
672
|
+
self.crawlers.clear()
|
|
673
|
+
|
|
674
|
+
# 清理活跃任务
|
|
675
|
+
if self._active_tasks:
|
|
676
|
+
for task in list(self._active_tasks):
|
|
677
|
+
if not task.done():
|
|
678
|
+
task.cancel()
|
|
679
|
+
await asyncio.gather(*self._active_tasks, return_exceptions=True)
|
|
680
|
+
self._active_tasks.clear()
|
|
681
|
+
|
|
682
|
+
logger.debug("进程资源清理完成")
|
|
683
|
+
|
|
684
|
+
except Exception as e:
|
|
685
|
+
logger.error(f"清理进程资源时发生错误: {e}", exc_info=True)
|
|
686
|
+
|
|
687
|
+
def get_process_stats(self) -> Dict[str, Any]:
|
|
688
|
+
"""获取进程统计信息"""
|
|
689
|
+
context_stats = self.context.get_stats()
|
|
690
|
+
|
|
691
|
+
return {
|
|
692
|
+
'context': context_stats,
|
|
693
|
+
'performance': self._performance_stats.copy(),
|
|
694
|
+
'crawlers': {
|
|
695
|
+
'total_registered': len(self._spider_registry),
|
|
696
|
+
'active_crawlers': len(self.crawlers),
|
|
697
|
+
'max_concurrency': self.max_concurrency
|
|
698
|
+
},
|
|
699
|
+
'registry': {
|
|
700
|
+
'spider_names': list(self._spider_registry.keys()),
|
|
701
|
+
'spider_classes': [cls.__name__ for cls in self._spider_registry.values()]
|
|
702
|
+
}
|
|
703
|
+
}
|
|
704
|
+
def _resolve_spiders_to_run(
|
|
705
|
+
self,
|
|
706
|
+
spiders_input: Union[Type[Spider], str, List[Union[Type[Spider], str]]]
|
|
707
|
+
) -> List[Type[Spider]]:
|
|
708
|
+
"""
|
|
709
|
+
解析输入为爬虫类列表
|
|
710
|
+
|
|
711
|
+
支持各种输入格式并验证唯一性
|
|
712
|
+
"""
|
|
713
|
+
inputs = self._normalize_inputs(spiders_input)
|
|
714
|
+
seen_spider_names: Set[str] = set()
|
|
715
|
+
spider_classes: List[Type[Spider]] = []
|
|
716
|
+
|
|
717
|
+
for item in inputs:
|
|
718
|
+
try:
|
|
719
|
+
spider_cls = self._resolve_spider_class(item)
|
|
720
|
+
spider_name = getattr(spider_cls, 'name', None)
|
|
721
|
+
|
|
722
|
+
if not spider_name:
|
|
723
|
+
raise ValueError(f"爬虫类 {spider_cls.__name__} 缺少 'name' 属性")
|
|
724
|
+
|
|
725
|
+
if spider_name in seen_spider_names:
|
|
726
|
+
raise ValueError(
|
|
727
|
+
f"本次运行中爬虫名称 '{spider_name}' 重复。\n"
|
|
728
|
+
f"请确保每个爬虫的 name 属性在本次运行中唯一。"
|
|
729
|
+
)
|
|
730
|
+
|
|
731
|
+
seen_spider_names.add(spider_name)
|
|
732
|
+
spider_classes.append(spider_cls)
|
|
733
|
+
|
|
734
|
+
logger.debug(f"解析爬虫成功: {item} -> {spider_cls.__name__} (name='{spider_name}')")
|
|
735
|
+
|
|
736
|
+
except Exception as e:
|
|
737
|
+
logger.error(f"解析爬虫失败: {item} - {e}")
|
|
738
|
+
raise
|
|
739
|
+
|
|
740
|
+
return spider_classes
|
|
741
|
+
|
|
742
|
+
@staticmethod
|
|
743
|
+
def _normalize_inputs(spiders_input) -> List[Union[Type[Spider], str]]:
|
|
744
|
+
"""
|
|
745
|
+
标准化输入为列表
|
|
746
|
+
|
|
747
|
+
支持更多输入类型并提供更好的错误信息
|
|
748
|
+
"""
|
|
749
|
+
if isinstance(spiders_input, (type, str)):
|
|
750
|
+
return [spiders_input]
|
|
751
|
+
elif isinstance(spiders_input, (list, tuple, set)):
|
|
752
|
+
spider_list = list(spiders_input)
|
|
753
|
+
if not spider_list:
|
|
754
|
+
raise ValueError("爬虫列表不能为空")
|
|
755
|
+
return spider_list
|
|
756
|
+
else:
|
|
757
|
+
raise TypeError(
|
|
758
|
+
f"spiders 参数类型不支持: {type(spiders_input)}\n"
|
|
759
|
+
f"支持的类型: Spider类、name字符串,或它们的列表/元组/集合"
|
|
760
|
+
)
|
|
761
|
+
|
|
762
|
+
def _resolve_spider_class(self, item: Union[Type[Spider], str]) -> Type[Spider]:
|
|
763
|
+
"""
|
|
764
|
+
解析单个输入项为爬虫类
|
|
765
|
+
|
|
766
|
+
提供更好的错误提示和调试信息
|
|
767
|
+
"""
|
|
768
|
+
if isinstance(item, type) and issubclass(item, Spider):
|
|
769
|
+
# 直接是 Spider 类
|
|
770
|
+
return item
|
|
771
|
+
elif isinstance(item, str):
|
|
772
|
+
# 是字符串名称,需要查找注册表
|
|
773
|
+
spider_cls = self._spider_registry.get(item)
|
|
774
|
+
if not spider_cls:
|
|
775
|
+
available_spiders = list(self._spider_registry.keys())
|
|
776
|
+
raise ValueError(
|
|
777
|
+
f"未找到名为 '{item}' 的爬虫。\n"
|
|
778
|
+
f"已注册的爬虫: {available_spiders}\n"
|
|
779
|
+
f"请检查爬虫名称是否正确,或者确保爬虫已被正确导入和注册。"
|
|
780
|
+
)
|
|
781
|
+
return spider_cls
|
|
782
|
+
else:
|
|
783
|
+
raise TypeError(
|
|
784
|
+
f"无效类型 {type(item)}: {item}\n"
|
|
785
|
+
f"必须是 Spider 类或字符串 name。\n"
|
|
786
|
+
f"示例: MySpider 或 'my_spider'"
|
|
787
|
+
)
|
|
788
|
+
|
|
789
|
+
async def _run_spider_with_limit(self, spider_cls: Type[Spider], seq: int, total: int):
|
|
790
|
+
"""
|
|
791
|
+
受信号量限制的爬虫运行函数
|
|
792
|
+
|
|
793
|
+
包含增强的错误处理和监控功能
|
|
794
|
+
"""
|
|
795
|
+
task = asyncio.current_task()
|
|
796
|
+
crawler = None
|
|
797
|
+
|
|
798
|
+
try:
|
|
799
|
+
# 注册任务
|
|
800
|
+
if task:
|
|
801
|
+
self._active_tasks.add(task)
|
|
802
|
+
|
|
803
|
+
# 获取并发许可
|
|
804
|
+
await self.semaphore.acquire()
|
|
805
|
+
|
|
806
|
+
start_msg = f"[{seq}/{total}] 启动爬虫: {spider_cls.__name__}"
|
|
807
|
+
logger.info(start_msg)
|
|
808
|
+
|
|
809
|
+
# 创建并运行爬虫
|
|
810
|
+
crawler = Crawler(spider_cls, self.settings, self.context)
|
|
811
|
+
self.crawlers.add(crawler)
|
|
812
|
+
|
|
813
|
+
# 记录启动时间
|
|
814
|
+
start_time = time.time()
|
|
815
|
+
|
|
816
|
+
# 运行爬虫
|
|
817
|
+
await crawler.crawl()
|
|
818
|
+
|
|
819
|
+
# 计算运行时间
|
|
820
|
+
duration = time.time() - start_time
|
|
821
|
+
|
|
822
|
+
end_msg = (
|
|
823
|
+
f"[{seq}/{total}] 爬虫完成: {spider_cls.__name__}, "
|
|
824
|
+
f"耗时: {duration:.2f}秒"
|
|
825
|
+
)
|
|
826
|
+
logger.info(end_msg)
|
|
827
|
+
|
|
828
|
+
# 记录成功统计
|
|
829
|
+
self._performance_stats['successful_requests'] += 1
|
|
830
|
+
|
|
831
|
+
except Exception as e:
|
|
832
|
+
# 记录失败统计
|
|
833
|
+
self._performance_stats['failed_requests'] += 1
|
|
834
|
+
|
|
835
|
+
error_msg = f"爬虫 {spider_cls.__name__} 执行失败: {e}"
|
|
836
|
+
logger.error(error_msg, exc_info=True)
|
|
837
|
+
|
|
838
|
+
# 将错误信息记录到上下文
|
|
839
|
+
if hasattr(self, 'context'):
|
|
840
|
+
self.context.increment_failed(error_msg)
|
|
841
|
+
|
|
842
|
+
raise
|
|
843
|
+
finally:
|
|
844
|
+
# 清理资源
|
|
845
|
+
try:
|
|
846
|
+
if crawler and crawler in self.crawlers:
|
|
847
|
+
self.crawlers.remove(crawler)
|
|
848
|
+
|
|
849
|
+
if task and task in self._active_tasks:
|
|
850
|
+
self._active_tasks.remove(task)
|
|
851
|
+
|
|
852
|
+
self.semaphore.release()
|
|
853
|
+
|
|
854
|
+
except Exception as cleanup_error:
|
|
855
|
+
logger.warning(f"清理资源时发生错误: {cleanup_error}")
|
|
856
|
+
|
|
857
|
+
def _shutdown(self, _signum, _frame):
|
|
858
|
+
"""
|
|
859
|
+
优雅关闭信号处理(增强版)
|
|
860
|
+
|
|
861
|
+
提供更好的关闭体验和资源清理
|
|
862
|
+
"""
|
|
863
|
+
signal_name = {signal.SIGINT: 'SIGINT', signal.SIGTERM: 'SIGTERM'}.get(_signum, str(_signum))
|
|
864
|
+
logger.warning(f"收到关闭信号 {signal_name},正在停止所有爬虫...")
|
|
865
|
+
|
|
866
|
+
# 设置关闭事件
|
|
867
|
+
if hasattr(self, '_shutdown_event'):
|
|
868
|
+
self._shutdown_event.set()
|
|
869
|
+
|
|
870
|
+
# 停止所有爬虫引擎
|
|
871
|
+
for crawler in list(self.crawlers):
|
|
872
|
+
if crawler.engine:
|
|
873
|
+
crawler.engine.running = False
|
|
874
|
+
crawler.engine.normal = False
|
|
875
|
+
logger.debug(f"已停止爬虫引擎: {getattr(crawler.spider, 'name', 'Unknown')}")
|
|
876
|
+
|
|
877
|
+
# 创建关闭任务
|
|
878
|
+
asyncio.create_task(self._wait_for_shutdown())
|
|
879
|
+
|
|
880
|
+
logger.info("关闭指令已发送,等待爬虫完成当前任务...")
|
|
881
|
+
|
|
882
|
+
async def _wait_for_shutdown(self):
|
|
883
|
+
"""
|
|
884
|
+
等待所有活跃任务完成(增强版)
|
|
885
|
+
|
|
886
|
+
提供更好的关闭时间控制和进度反馈
|
|
887
|
+
"""
|
|
888
|
+
try:
|
|
889
|
+
# 停止监控任务
|
|
890
|
+
await self.stop_monitoring()
|
|
891
|
+
|
|
892
|
+
# 等待活跃任务完成
|
|
893
|
+
pending = [t for t in self._active_tasks if not t.done()]
|
|
894
|
+
|
|
895
|
+
if pending:
|
|
896
|
+
logger.info(
|
|
897
|
+
f"等待 {len(pending)} 个活跃任务完成..."
|
|
898
|
+
f"(最大等待时间: 30秒)"
|
|
899
|
+
)
|
|
900
|
+
|
|
901
|
+
# 设置超时时间
|
|
902
|
+
try:
|
|
903
|
+
await asyncio.wait_for(
|
|
904
|
+
asyncio.gather(*pending, return_exceptions=True),
|
|
905
|
+
timeout=30.0
|
|
906
|
+
)
|
|
907
|
+
except asyncio.TimeoutError:
|
|
908
|
+
logger.warning("部分任务超时,强制取消中...")
|
|
909
|
+
|
|
910
|
+
# 强制取消超时任务
|
|
911
|
+
for task in pending:
|
|
912
|
+
if not task.done():
|
|
913
|
+
task.cancel()
|
|
914
|
+
|
|
915
|
+
# 等待取消完成
|
|
916
|
+
await asyncio.gather(*pending, return_exceptions=True)
|
|
917
|
+
|
|
918
|
+
# 最终清理
|
|
919
|
+
await self._cleanup_process()
|
|
920
|
+
|
|
921
|
+
# 输出最终统计
|
|
922
|
+
final_stats = self.context.get_stats()
|
|
923
|
+
logger.info(
|
|
924
|
+
f"所有爬虫已优雅关闭 👋\n"
|
|
925
|
+
f" - 总计爬虫: {final_stats['total_crawlers']}\n"
|
|
926
|
+
f" - 成功完成: {final_stats['completed_crawlers']}\n"
|
|
927
|
+
f" - 失败数量: {final_stats['failed_crawlers']}\n"
|
|
928
|
+
f" - 成功率: {final_stats['success_rate']:.1f}%\n"
|
|
929
|
+
f" - 总运行时间: {final_stats['duration_seconds']}秒"
|
|
930
|
+
)
|
|
931
|
+
|
|
932
|
+
except Exception as e:
|
|
933
|
+
logger.error(f"关闭过程中发生错误: {e}", exc_info=True)
|
|
934
|
+
|
|
935
|
+
@classmethod
|
|
936
|
+
def _get_default_settings(cls) -> SettingManager:
|
|
937
|
+
"""
|
|
938
|
+
加载默认配置(增强版)
|
|
939
|
+
|
|
940
|
+
提供更好的错误处理和降级策略
|
|
941
|
+
"""
|
|
942
|
+
try:
|
|
943
|
+
settings = get_settings()
|
|
944
|
+
logger.debug("成功加载默认配置")
|
|
945
|
+
return settings
|
|
946
|
+
except Exception as e:
|
|
947
|
+
logger.warning(f"无法加载默认配置: {e},使用空配置")
|
|
948
|
+
return SettingManager()
|
|
949
|
+
|
|
950
|
+
|
|
951
|
+
# === 工具函数 ===
|
|
952
|
+
|
|
953
|
+
def create_crawler_with_optimizations(
|
|
954
|
+
spider_cls: Type[Spider],
|
|
955
|
+
settings: Optional[SettingManager] = None,
|
|
956
|
+
**optimization_kwargs
|
|
957
|
+
) -> Crawler:
|
|
958
|
+
"""
|
|
959
|
+
创建优化的爬虫实例
|
|
960
|
+
|
|
961
|
+
:param spider_cls: 爬虫类
|
|
962
|
+
:param settings: 设置管理器
|
|
963
|
+
:param optimization_kwargs: 优化参数
|
|
964
|
+
:return: 爬虫实例
|
|
965
|
+
"""
|
|
966
|
+
if settings is None:
|
|
967
|
+
settings = SettingManager()
|
|
968
|
+
|
|
969
|
+
# 应用优化配置
|
|
970
|
+
for key, value in optimization_kwargs.items():
|
|
971
|
+
settings.set(key, value)
|
|
972
|
+
|
|
973
|
+
context = CrawlerContext()
|
|
974
|
+
return Crawler(spider_cls, settings, context)
|
|
975
|
+
|
|
976
|
+
|
|
977
|
+
def create_process_with_large_scale_config(
|
|
978
|
+
config_type: str = 'balanced',
|
|
979
|
+
concurrency: int = 16,
|
|
980
|
+
**kwargs
|
|
981
|
+
) -> CrawlerProcess:
|
|
982
|
+
"""
|
|
983
|
+
创建支持大规模优化的进程管理器
|
|
984
|
+
|
|
985
|
+
:param config_type: 配置类型 ('conservative', 'balanced', 'aggressive', 'memory_optimized')
|
|
986
|
+
:param concurrency: 并发数
|
|
987
|
+
:param kwargs: 其他参数
|
|
988
|
+
:return: 进程管理器
|
|
989
|
+
"""
|
|
990
|
+
try:
|
|
991
|
+
from crawlo.utils.large_scale_config import LargeScaleConfig
|
|
992
|
+
|
|
993
|
+
# 获取优化配置
|
|
994
|
+
config_methods = {
|
|
995
|
+
'conservative': LargeScaleConfig.conservative_config,
|
|
996
|
+
'balanced': LargeScaleConfig.balanced_config,
|
|
997
|
+
'aggressive': LargeScaleConfig.aggressive_config,
|
|
998
|
+
'memory_optimized': LargeScaleConfig.memory_optimized_config
|
|
999
|
+
}
|
|
1000
|
+
|
|
1001
|
+
if config_type not in config_methods:
|
|
1002
|
+
logger.warning(f"未知的配置类型: {config_type},使用默认配置")
|
|
1003
|
+
settings = SettingManager()
|
|
1004
|
+
else:
|
|
1005
|
+
config = config_methods[config_type](concurrency)
|
|
1006
|
+
settings = SettingManager()
|
|
1007
|
+
settings.update(config)
|
|
1008
|
+
|
|
1009
|
+
return CrawlerProcess(
|
|
1010
|
+
settings=settings,
|
|
1011
|
+
max_concurrency=concurrency,
|
|
1012
|
+
**kwargs
|
|
1013
|
+
)
|
|
1014
|
+
|
|
1015
|
+
except ImportError:
|
|
1016
|
+
logger.warning("大规模配置模块不存在,使用默认配置")
|
|
1017
|
+
return CrawlerProcess(max_concurrency=concurrency, **kwargs)
|
|
1018
|
+
|
|
1019
|
+
|
|
1020
|
+
# === 导出接口 ===
|
|
1021
|
+
|
|
1022
|
+
__all__ = [
|
|
1023
|
+
'Crawler',
|
|
1024
|
+
'CrawlerProcess',
|
|
1025
|
+
'CrawlerContext',
|
|
1026
|
+
'create_crawler_with_optimizations',
|
|
1027
|
+
'create_process_with_large_scale_config'
|
|
1028
|
+
]
|