crawlo 1.1.2__py3-none-any.whl → 1.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crawlo might be problematic. Click here for more details.
- crawlo/__init__.py +34 -34
- crawlo/__version__.py +1 -1
- crawlo/cli.py +40 -40
- crawlo/commands/__init__.py +13 -13
- crawlo/commands/check.py +594 -594
- crawlo/commands/genspider.py +151 -151
- crawlo/commands/list.py +155 -155
- crawlo/commands/run.py +285 -285
- crawlo/commands/startproject.py +196 -196
- crawlo/commands/stats.py +188 -188
- crawlo/commands/utils.py +186 -186
- crawlo/config.py +279 -279
- crawlo/core/__init__.py +2 -2
- crawlo/core/engine.py +171 -171
- crawlo/core/enhanced_engine.py +189 -189
- crawlo/core/processor.py +40 -40
- crawlo/core/scheduler.py +166 -162
- crawlo/crawler.py +1027 -1027
- crawlo/downloader/__init__.py +242 -242
- crawlo/downloader/aiohttp_downloader.py +212 -212
- crawlo/downloader/cffi_downloader.py +251 -251
- crawlo/downloader/httpx_downloader.py +259 -257
- crawlo/event.py +11 -11
- crawlo/exceptions.py +82 -78
- crawlo/extension/__init__.py +31 -31
- crawlo/extension/log_interval.py +49 -49
- crawlo/extension/log_stats.py +44 -44
- crawlo/extension/logging_extension.py +34 -34
- crawlo/filters/__init__.py +154 -154
- crawlo/filters/aioredis_filter.py +242 -242
- crawlo/filters/memory_filter.py +269 -269
- crawlo/items/__init__.py +23 -23
- crawlo/items/base.py +21 -21
- crawlo/items/fields.py +53 -53
- crawlo/items/items.py +104 -104
- crawlo/middleware/__init__.py +21 -21
- crawlo/middleware/default_header.py +32 -32
- crawlo/middleware/download_delay.py +28 -28
- crawlo/middleware/middleware_manager.py +135 -135
- crawlo/middleware/proxy.py +248 -248
- crawlo/middleware/request_ignore.py +30 -30
- crawlo/middleware/response_code.py +18 -18
- crawlo/middleware/response_filter.py +26 -26
- crawlo/middleware/retry.py +125 -125
- crawlo/mode_manager.py +200 -200
- crawlo/network/__init__.py +21 -21
- crawlo/network/request.py +311 -311
- crawlo/network/response.py +271 -269
- crawlo/pipelines/__init__.py +22 -13
- crawlo/pipelines/bloom_dedup_pipeline.py +157 -0
- crawlo/pipelines/console_pipeline.py +39 -39
- crawlo/pipelines/csv_pipeline.py +316 -316
- crawlo/pipelines/database_dedup_pipeline.py +225 -0
- crawlo/pipelines/json_pipeline.py +218 -218
- crawlo/pipelines/memory_dedup_pipeline.py +116 -0
- crawlo/pipelines/mongo_pipeline.py +116 -116
- crawlo/pipelines/mysql_pipeline.py +195 -195
- crawlo/pipelines/pipeline_manager.py +56 -56
- crawlo/pipelines/redis_dedup_pipeline.py +163 -0
- crawlo/project.py +153 -153
- crawlo/queue/pqueue.py +37 -37
- crawlo/queue/queue_manager.py +307 -303
- crawlo/queue/redis_priority_queue.py +208 -191
- crawlo/settings/__init__.py +7 -7
- crawlo/settings/default_settings.py +245 -226
- crawlo/settings/setting_manager.py +99 -99
- crawlo/spider/__init__.py +639 -639
- crawlo/stats_collector.py +59 -59
- crawlo/subscriber.py +106 -106
- crawlo/task_manager.py +30 -30
- crawlo/templates/crawlo.cfg.tmpl +10 -10
- crawlo/templates/project/__init__.py.tmpl +3 -3
- crawlo/templates/project/items.py.tmpl +17 -17
- crawlo/templates/project/middlewares.py.tmpl +86 -86
- crawlo/templates/project/pipelines.py.tmpl +341 -335
- crawlo/templates/project/run.py.tmpl +251 -238
- crawlo/templates/project/settings.py.tmpl +250 -247
- crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
- crawlo/templates/spider/spider.py.tmpl +177 -177
- crawlo/utils/__init__.py +7 -7
- crawlo/utils/controlled_spider_mixin.py +439 -335
- crawlo/utils/date_tools.py +233 -233
- crawlo/utils/db_helper.py +343 -343
- crawlo/utils/func_tools.py +82 -82
- crawlo/utils/large_scale_config.py +286 -286
- crawlo/utils/large_scale_helper.py +343 -343
- crawlo/utils/log.py +128 -128
- crawlo/utils/queue_helper.py +175 -175
- crawlo/utils/request.py +267 -267
- crawlo/utils/request_serializer.py +219 -219
- crawlo/utils/spider_loader.py +62 -62
- crawlo/utils/system.py +11 -11
- crawlo/utils/tools.py +4 -4
- crawlo/utils/url.py +39 -39
- {crawlo-1.1.2.dist-info → crawlo-1.1.3.dist-info}/METADATA +635 -567
- crawlo-1.1.3.dist-info/RECORD +113 -0
- examples/__init__.py +7 -7
- examples/controlled_spider_example.py +205 -0
- tests/__init__.py +7 -7
- tests/test_final_validation.py +153 -153
- tests/test_proxy_health_check.py +32 -32
- tests/test_proxy_middleware_integration.py +136 -136
- tests/test_proxy_providers.py +56 -56
- tests/test_proxy_stats.py +19 -19
- tests/test_proxy_strategies.py +59 -59
- tests/test_redis_config.py +28 -28
- tests/test_redis_queue.py +224 -224
- tests/test_request_serialization.py +70 -70
- tests/test_scheduler.py +241 -241
- crawlo-1.1.2.dist-info/RECORD +0 -108
- {crawlo-1.1.2.dist-info → crawlo-1.1.3.dist-info}/WHEEL +0 -0
- {crawlo-1.1.2.dist-info → crawlo-1.1.3.dist-info}/entry_points.txt +0 -0
- {crawlo-1.1.2.dist-info → crawlo-1.1.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
crawlo/__init__.py,sha256=-9NQRRYzQvX-sVT-phr3qiJRePbs1G2Gd6h76y8UpZU,859
|
|
2
|
+
crawlo/__version__.py,sha256=MNg8Ih17kliJwlnU9Nip6ZLjE9MDf-ibrAgQJVctAy4,23
|
|
3
|
+
crawlo/cli.py,sha256=hjAJKx9pba375sATvvcy-dtZyBIgXj8fRBq9RFIZHA4,1206
|
|
4
|
+
crawlo/config.py,sha256=ShwUnG3K3JmYeIg3oFzHHRYDs1vJQ-H71CPaq0rNNZ0,8608
|
|
5
|
+
crawlo/crawler.py,sha256=mZknc51PvB_2tGFhxe4DRMvoMH9qO8CydlIRysvLZC4,37488
|
|
6
|
+
crawlo/event.py,sha256=ZhoPW5CglCEuZNFEwviSCBIw0pT5O6jT98bqYrDFd3E,324
|
|
7
|
+
crawlo/exceptions.py,sha256=YVIDnC1bKSMv3fXH_6tinWMuD9HmKHIaUfO4_fkX5sY,1247
|
|
8
|
+
crawlo/mode_manager.py,sha256=n9-NPR7KK7SkoCoI8JdQkVMwL5_9T4MzdSfhTg-BfV4,6902
|
|
9
|
+
crawlo/project.py,sha256=DOf_zzdA_A_nilff6Dp5KJXA6KphHYMalAYv336-cO8,5335
|
|
10
|
+
crawlo/stats_collector.py,sha256=v4jC9BAe-23w93hWzbeMCCgQ9VuFPyxw5JV9ItbGH8w,1636
|
|
11
|
+
crawlo/subscriber.py,sha256=udlHeTR0ymGQhCDxVUGwUzeeeR4TYCEJrJwFnkgr0cU,3836
|
|
12
|
+
crawlo/task_manager.py,sha256=PScfEB03306Txa0l38AeQ_0WVhKzeWOFyT3bnrkbHW0,849
|
|
13
|
+
crawlo/commands/__init__.py,sha256=kZ3qATqDPmMUCNUQSFfBfIA8fp_1dgBwIAWbmFN3_To,355
|
|
14
|
+
crawlo/commands/check.py,sha256=jW8SgfkOS35j4VS7nRZBZdFCBX9CVFez5LR2sfP_H1U,23437
|
|
15
|
+
crawlo/commands/genspider.py,sha256=_3GwFMYK79BuKk__5L0ljuwWwOzN80MeuhRkL4Ql11A,5201
|
|
16
|
+
crawlo/commands/list.py,sha256=octTk0QZhapiyM7WgCPersP2v3MesthbJeG9vMqVFOs,5936
|
|
17
|
+
crawlo/commands/run.py,sha256=m7SFTxmw4mZJ_eS1a9fHG-c6FvQcRHXfW71xenYBYYc,10809
|
|
18
|
+
crawlo/commands/startproject.py,sha256=G88oa5OdK1wCFBCMpOm3P7ZM1Ahv5Lq_HUq-XysxZwU,7334
|
|
19
|
+
crawlo/commands/stats.py,sha256=iEKdxHoqsJuTkn8zAF9ekBVO1--8__BeD7xohYG5NwE,6252
|
|
20
|
+
crawlo/commands/utils.py,sha256=b7yW6UlOLprR3gN9oOdhcl3fsCwWRE3-_gDxWz5xhMo,5292
|
|
21
|
+
crawlo/core/__init__.py,sha256=JYSAn15r8yWgRK_Nc69t_8tZCyb70MiPZKssA8wrYz0,43
|
|
22
|
+
crawlo/core/engine.py,sha256=vQPuk6gMcmJnPfMSg8-RrRpRSXg_i7ZraJfgpvz-NwE,6833
|
|
23
|
+
crawlo/core/enhanced_engine.py,sha256=5cxv8EVszB6yrpW32m4CF73UMuv4tOzVjejVB7pK1u0,6443
|
|
24
|
+
crawlo/core/processor.py,sha256=oHLs-cno0bJGTNc9NGD2S7_2-grI3ruvggO0SY2mf3Q,1180
|
|
25
|
+
crawlo/core/scheduler.py,sha256=hnMcHN8nq904ibfGNZpcFbIXAr0shUKmTYgY5eE2-Pc,5969
|
|
26
|
+
crawlo/downloader/__init__.py,sha256=O7T7MjCfiwfqXQYSF59CPsFYZWmfI21weO41J-SxcRM,7905
|
|
27
|
+
crawlo/downloader/aiohttp_downloader.py,sha256=wzJsOlMx_sPSrQ-OF71EcB1j6p7zTdmYevYdBsPN-34,8217
|
|
28
|
+
crawlo/downloader/cffi_downloader.py,sha256=swia_ZqJbbBCzhkqaG7EEVdvj6S123d0kfA6QscqF5I,10691
|
|
29
|
+
crawlo/downloader/httpx_downloader.py,sha256=MpgDeIdGqNsiSKLOEDBnr5Z0eUbhHnqVEmAuoIfJmFU,12296
|
|
30
|
+
crawlo/extension/__init__.py,sha256=wCyNLL-IdjoITKA_mWjr4PjIsUtWKogj-TFfFhDavf4,1164
|
|
31
|
+
crawlo/extension/log_interval.py,sha256=FOWeTOuWtOpCz2UPV5F_--QIa8yomltSpjxbw3F7bkU,1971
|
|
32
|
+
crawlo/extension/log_stats.py,sha256=JFJsdK7UWkhP4TEAF-H-S7SpQbDpBryS0AT6e6jZCBo,1721
|
|
33
|
+
crawlo/extension/logging_extension.py,sha256=rty2_up53KV05nCazuBuz2ZapHKq0ti7mGVBzMTr0ak,1236
|
|
34
|
+
crawlo/filters/__init__.py,sha256=lX-QOCDTiTRFoiK1qrZ5HABo7LgZfcxScx_lELYEvJk,4395
|
|
35
|
+
crawlo/filters/aioredis_filter.py,sha256=LbUXtfJXyOF9L-Fxztka8djTtEaFDsNNCydaE9aDqUM,8611
|
|
36
|
+
crawlo/filters/memory_filter.py,sha256=FzGJPhVKfZ8P23kP6de-VSfE8oVMjjpfWzKJIdiMtZU,9529
|
|
37
|
+
crawlo/items/__init__.py,sha256=rFpx1qFBo0Ik7bSdnXC8EVTJUOQdoJYGVdhYjaH00nk,409
|
|
38
|
+
crawlo/items/base.py,sha256=hwGJEdFWOdaZfalFX8umRkh_HUWLEbCjvq4j70fplMQ,598
|
|
39
|
+
crawlo/items/fields.py,sha256=fpS0vlRPpZYjTaMDgI9Q8z_YQqruwf6fi4Dgm6R2oEk,1854
|
|
40
|
+
crawlo/items/items.py,sha256=OmVEvMmgofMU95GkaiWkfNQ2fjsH2fY9sw3SKcmUhLs,3478
|
|
41
|
+
crawlo/middleware/__init__.py,sha256=PSwpRLdBUopaQzBp1S0zK_TZbrRagQ4yzvgyLy4tBk8,570
|
|
42
|
+
crawlo/middleware/default_header.py,sha256=OVW4vpRPp3Y6qYXtiEYlGqVjCYcbuv1Iecc7zEgwCsI,1099
|
|
43
|
+
crawlo/middleware/download_delay.py,sha256=P2eyAJXwdLdC4yYuLhvKZVa1b5YQvQD0GpsR8aDW8-8,994
|
|
44
|
+
crawlo/middleware/middleware_manager.py,sha256=G9R9SnDVey_zS2k1zm_358EPHDxRWfP953I4QavL7P0,6348
|
|
45
|
+
crawlo/middleware/proxy.py,sha256=1kdKjKhSkjilJqkBYV8khTK964XbBLxJ4ssh8NPcQSE,10059
|
|
46
|
+
crawlo/middleware/request_ignore.py,sha256=jdybWFVXuA5YsAPfZJFzLTWkYhEAewNgxuhFqczPW9M,1027
|
|
47
|
+
crawlo/middleware/response_code.py,sha256=vgXWv3mMu_v9URvhKA9myIFH4u6L4EwNme80wL4DCGc,677
|
|
48
|
+
crawlo/middleware/response_filter.py,sha256=O2gkV_Yjart8kmmXTGzrtZnb_Uuefap4uL2Cu01iRs4,863
|
|
49
|
+
crawlo/middleware/retry.py,sha256=D_v4-8grofGsLkGJIhLKklMbdHO0gtij7DIS4G52NJc,4248
|
|
50
|
+
crawlo/network/__init__.py,sha256=bvEnpEUBZJ79URfNZbsHhsBKna54hM2-x_BV8eotTA4,418
|
|
51
|
+
crawlo/network/request.py,sha256=qaKIlOg8ETC8vcKQ__M7aD0own4DLi9vqYBg3Getmkg,11386
|
|
52
|
+
crawlo/network/response.py,sha256=3IAmNzV8b-wn4Z-N2mchgn5U7Fcpudl1D-zcY-lH_mw,10009
|
|
53
|
+
crawlo/pipelines/__init__.py,sha256=FDe2Pr5tiHtV8hFlheElRO_O1aVKvSWlkTcAl9BXAKA,637
|
|
54
|
+
crawlo/pipelines/bloom_dedup_pipeline.py,sha256=n0Ay7MtIEJ8L4Otiha4zRvI9toFUSNFugTNubi-Q3aw,5798
|
|
55
|
+
crawlo/pipelines/console_pipeline.py,sha256=bwe5hZgaVSWmh3R8XpOaaeAjJme-Ttrpo6G6f1cnLIg,1287
|
|
56
|
+
crawlo/pipelines/csv_pipeline.py,sha256=qbXZoqq4FIR9QkUGpC0ryWzmqGJSrM2bxmWLM4I1nXM,12490
|
|
57
|
+
crawlo/pipelines/database_dedup_pipeline.py,sha256=6_zKtCNgFBPJyTI3Mk4l75fMQ2UQQGKFfNTpZqGs_zI,8224
|
|
58
|
+
crawlo/pipelines/json_pipeline.py,sha256=wrCsh8YInmcPLAkhPrHObMx89VZfhf-c7qRrYsTixPE,8585
|
|
59
|
+
crawlo/pipelines/memory_dedup_pipeline.py,sha256=oQcBODO-I2p6B7Nm_klXvuhzSMIHP-JWwC4_o6Gkgcc,3954
|
|
60
|
+
crawlo/pipelines/mongo_pipeline.py,sha256=Yr48D0T61-_Y-EpgWXf7BUn9w8e-Pj5P07QDSPZ0pYU,4558
|
|
61
|
+
crawlo/pipelines/mysql_pipeline.py,sha256=bsAFqpxrCijzvX-IusxOtvTvQEUCt5uHNTyYMo_pIq4,8056
|
|
62
|
+
crawlo/pipelines/pipeline_manager.py,sha256=Kw37RC2GESWDnDJ6qIN1MA0qc27Uyhu77ebm1r-FgeU,2168
|
|
63
|
+
crawlo/pipelines/redis_dedup_pipeline.py,sha256=4Mpsscucq2_6UAcKlaa6FIj_KhsX0IeD22cperUcUIo,6165
|
|
64
|
+
crawlo/queue/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
65
|
+
crawlo/queue/pqueue.py,sha256=qTFOuvEXsYEZbm0ULjsOeZo0XtSsZ-SHpx7nFEtmluE,1095
|
|
66
|
+
crawlo/queue/queue_manager.py,sha256=yejs-wasl_gkRZF8qNXV0LX2qIdi0yJrNZa-fkLKnes,11774
|
|
67
|
+
crawlo/queue/redis_priority_queue.py,sha256=N0lD83hmmYi2bRa-e-rnHA8DB5GbdTDavBlHZGcepoQ,8324
|
|
68
|
+
crawlo/settings/__init__.py,sha256=NgYFLfk_Bw7h6KSoepJn_lMBSqVbCHebjKxaE3_eMgw,130
|
|
69
|
+
crawlo/settings/default_settings.py,sha256=Zc1lJwMlPKI6jiG5nX-QORSgg3zqt9jPcCD6HEHnrp8,10104
|
|
70
|
+
crawlo/settings/setting_manager.py,sha256=4xXOzKwZCgAp8ybwvVcs2R--CsOD7c6dBIkj6DJHB3c,2998
|
|
71
|
+
crawlo/spider/__init__.py,sha256=xAH6NfE_6K2aY_VSL9DoGjcmMHJDd5Nxr7TG1Y8vQAE,21091
|
|
72
|
+
crawlo/templates/crawlo.cfg.tmpl,sha256=lwiUVe5sFixJgHFEjn1OtbAeyWsECOrz37uheuVtulk,240
|
|
73
|
+
crawlo/templates/project/__init__.py.tmpl,sha256=aQnHaOjMSkTviOC8COUX0fKymuyf8lx2tGduxkMkXEE,61
|
|
74
|
+
crawlo/templates/project/items.py.tmpl,sha256=bXx-oCldMr2EgBKUAH9LH5gMnbyLiWX-EySAaMzcu2g,318
|
|
75
|
+
crawlo/templates/project/middlewares.py.tmpl,sha256=OgvIfsWyTNbfBwsJqxQiEAWCNjXt8iQVj7Hr26RCEIU,3243
|
|
76
|
+
crawlo/templates/project/pipelines.py.tmpl,sha256=076j3H3dKpmie9VIiIiQiVVlB_tic_hDaN4bJ3VnULc,12775
|
|
77
|
+
crawlo/templates/project/run.py.tmpl,sha256=IBD0F0XEgBR6gR34PhYAjKiZDdvLufZJkABHapTsoYo,8428
|
|
78
|
+
crawlo/templates/project/settings.py.tmpl,sha256=HfsKW1lHG7KFw3lqhHSi3KkhnJ4gO8C9p8zfE5pC30w,8307
|
|
79
|
+
crawlo/templates/project/spiders/__init__.py.tmpl,sha256=zMbePipgLsctQUEnda4WkHz8rDLUX--rc8ruI6zkpWc,111
|
|
80
|
+
crawlo/templates/spider/spider.py.tmpl,sha256=9DEEp8m1zUY022VoNGkauxNXB0UtZlSps1aw3_ywLRs,6344
|
|
81
|
+
crawlo/utils/__init__.py,sha256=XCYumI8wJ1jU_Myn_K0LT-LVygPDUCdETCbXM3EWvlo,130
|
|
82
|
+
crawlo/utils/controlled_spider_mixin.py,sha256=os5v8eFX08Nbqm48W-xWdY04LboqDgOgOL-93IX9zvg,16914
|
|
83
|
+
crawlo/utils/date_tools.py,sha256=lcEFP2Z5b-6pUTHczrzCCuqiHP_4_2zamomMGPZrExo,7194
|
|
84
|
+
crawlo/utils/db_helper.py,sha256=ZqOt1d3mErVv4TOvoWlov0niUxORB9aHByTmMoNFIDw,10902
|
|
85
|
+
crawlo/utils/func_tools.py,sha256=y-TYP9H3X67MS_foWy9Z2LIS6GP7Y4Cy3T168ulq3Jc,2451
|
|
86
|
+
crawlo/utils/large_scale_config.py,sha256=lsraHTAQx3sMPjTnCBY_SVIpkuIBUb3zD9eFvmccOOM,8440
|
|
87
|
+
crawlo/utils/large_scale_helper.py,sha256=ZazAI7KV3V-3hzc4a3BWxTXfEO2XIEBMzxTLM9S1l_Q,12500
|
|
88
|
+
crawlo/utils/log.py,sha256=YD2FfXuuE2MC9ZdQQZ0H7KysE7l_LHZqQepaTPlcApo,4133
|
|
89
|
+
crawlo/utils/queue_helper.py,sha256=gFmkh1jKlIcN1rmo2Jl6vYcLP5ByUWlfHO9eNlZPBLs,4918
|
|
90
|
+
crawlo/utils/request.py,sha256=ejdKpTwc-HE04HQybafhOVywzz57IV3pY0YMkSLyGUo,9065
|
|
91
|
+
crawlo/utils/request_serializer.py,sha256=bZhuonZV2AdB_X0aje7sDljqWAIrEzUYwEaxXytaWsg,8784
|
|
92
|
+
crawlo/utils/spider_loader.py,sha256=V0CBTicJBYBZafhwLfDEfuEc_hJ2mSoiptT6qKufI9U,2249
|
|
93
|
+
crawlo/utils/system.py,sha256=24zGmtHNhDFMGVo7ftMV-Pqg6_5d63zsyNey9udvJJk,248
|
|
94
|
+
crawlo/utils/tools.py,sha256=uy7qw5Z1BIhyEgiHENvtM7WoGCJxlS8EX3PmOA7ouCo,275
|
|
95
|
+
crawlo/utils/url.py,sha256=RKe_iqdjafsNcp-P2GVLYpsL1qbxiuZLiFc-SqOQkcs,1521
|
|
96
|
+
examples/__init__.py,sha256=NkRbV8_S1tb8S2AW6BE2U6P2-eGOPwMR1k0YQAwQpSE,130
|
|
97
|
+
examples/controlled_spider_example.py,sha256=2SAQKoREGHe-OzVaSkGpopCcrou6QXmeW7rLdmsyopw,7981
|
|
98
|
+
tests/__init__.py,sha256=409aRX8hsPffiZCVjOogtxwhACzBp8G2UTJyUQSxhK0,136
|
|
99
|
+
tests/test_final_validation.py,sha256=UNHMOkcOBx9jPdnYuYCF4Cx5GlXakBeHybOP27lpbAg,5078
|
|
100
|
+
tests/test_proxy_health_check.py,sha256=_tDlxa_6TdL3M5RLkHF82roXJ8WIuG5hELBp2GADyKQ,1123
|
|
101
|
+
tests/test_proxy_middleware_integration.py,sha256=mTPK_XvbmLCV_QoVZzA3ybWOOX61493Ew78WfTp-bYQ,4441
|
|
102
|
+
tests/test_proxy_providers.py,sha256=u_R2fhab90vqvQEaOAztpAOe9tJXvUMIdoDxmStmXJ4,1749
|
|
103
|
+
tests/test_proxy_stats.py,sha256=ES00CEoDITYPFBGPk8pecFzD3ItYIv6NSpcqNd8-kvo,526
|
|
104
|
+
tests/test_proxy_strategies.py,sha256=9Z1pXmTNyw-eIhGXlf2abZbJx6igLohYq-_3hldQ5uE,1868
|
|
105
|
+
tests/test_redis_config.py,sha256=Kbl3PURGNM1BUIspakEOA-ZOl2xxTHb_8KbftwjYOsg,921
|
|
106
|
+
tests/test_redis_queue.py,sha256=5LTc86A5qqF5VbmkvkF2OnLAxlJ7ClfJPw0dODxekFk,6846
|
|
107
|
+
tests/test_request_serialization.py,sha256=Jf7Kr7edL0ENwxh8ABa1W_O3dWyqNlvoSfQM1Mykpys,2359
|
|
108
|
+
tests/test_scheduler.py,sha256=elAPFh-Ph49bbJQlTBEsRwzhoX82EdryqQbpc_wsobU,7683
|
|
109
|
+
crawlo-1.1.3.dist-info/METADATA,sha256=o02XZ1pmSz28mIePbTgfDN67y3POps84PFa9b1ImIzY,20601
|
|
110
|
+
crawlo-1.1.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
111
|
+
crawlo-1.1.3.dist-info/entry_points.txt,sha256=5HoVoTSPxI8SCa5B7pQYxLSrkOdiunyO9tqNsLMv52g,43
|
|
112
|
+
crawlo-1.1.3.dist-info/top_level.txt,sha256=keG_67pbZ_wZL2dmDRA9RMaNHTaV_x_oxZ9DKNgwvR0,22
|
|
113
|
+
crawlo-1.1.3.dist-info/RECORD,,
|
examples/__init__.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
#!/usr/bin/python
|
|
2
|
-
# -*- coding:UTF-8 -*-
|
|
3
|
-
"""
|
|
4
|
-
# @Time : 2025-02-05 12:36
|
|
5
|
-
# @Author : oscar
|
|
6
|
-
# @Desc : None
|
|
7
|
-
"""
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# -*- coding:UTF-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
# @Time : 2025-02-05 12:36
|
|
5
|
+
# @Author : oscar
|
|
6
|
+
# @Desc : None
|
|
7
|
+
"""
|
|
@@ -0,0 +1,205 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
受控爬虫混入类使用示例
|
|
5
|
+
演示如何使用 ControlledRequestMixin 和 AsyncControlledRequestMixin 来控制大规模请求生成
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import asyncio
|
|
9
|
+
from crawlo.spider import Spider
|
|
10
|
+
from crawlo.network import Request
|
|
11
|
+
from crawlo.utils.controlled_spider_mixin import ControlledRequestMixin, AsyncControlledRequestMixin
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class LargeScaleSpider(Spider, ControlledRequestMixin):
|
|
15
|
+
"""
|
|
16
|
+
同步版本的受控爬虫示例
|
|
17
|
+
适用于需要生成大量请求但要控制并发的场景
|
|
18
|
+
"""
|
|
19
|
+
name = 'large_scale_spider'
|
|
20
|
+
|
|
21
|
+
def __init__(self):
|
|
22
|
+
Spider.__init__(self)
|
|
23
|
+
ControlledRequestMixin.__init__(self)
|
|
24
|
+
|
|
25
|
+
# 配置受控生成参数
|
|
26
|
+
self.max_pending_requests = 150 # 最大待处理请求数
|
|
27
|
+
self.batch_size = 75 # 每批生成请求数
|
|
28
|
+
self.generation_interval = 0.02 # 生成间隔(秒)
|
|
29
|
+
self.backpressure_threshold = 300 # 背压阈值
|
|
30
|
+
|
|
31
|
+
def _original_start_requests(self):
|
|
32
|
+
"""
|
|
33
|
+
提供原始的大量请求
|
|
34
|
+
这里模拟爬取一个电商网站的商品页面
|
|
35
|
+
"""
|
|
36
|
+
# 模拟爬取 10,000 个商品页面
|
|
37
|
+
base_urls = [
|
|
38
|
+
"https://example-shop.com/products",
|
|
39
|
+
"https://example-shop.com/electronics",
|
|
40
|
+
"https://example-shop.com/clothing",
|
|
41
|
+
"https://example-shop.com/books",
|
|
42
|
+
"https://example-shop.com/home"
|
|
43
|
+
]
|
|
44
|
+
|
|
45
|
+
for category in base_urls:
|
|
46
|
+
# 每个分类爬取 2000 页
|
|
47
|
+
for page in range(1, 2001):
|
|
48
|
+
yield Request(
|
|
49
|
+
url=f"{category}?page={page}",
|
|
50
|
+
callback=self.parse_product_list,
|
|
51
|
+
meta={'category': category.split('/')[-1], 'page': page}
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
def _process_request_before_yield(self, request):
|
|
55
|
+
"""
|
|
56
|
+
在 yield 请求前进行处理
|
|
57
|
+
可以添加去重、优先级设置、请求头设置等逻辑
|
|
58
|
+
"""
|
|
59
|
+
# 设置请求头
|
|
60
|
+
request.headers.setdefault('User-Agent',
|
|
61
|
+
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36')
|
|
62
|
+
|
|
63
|
+
# 根据分类设置优先级
|
|
64
|
+
category = request.meta.get('category', '')
|
|
65
|
+
if category == 'electronics':
|
|
66
|
+
request.priority = 10 # 电子产品优先级最高
|
|
67
|
+
elif category == 'clothing':
|
|
68
|
+
request.priority = 8
|
|
69
|
+
else:
|
|
70
|
+
request.priority = 5
|
|
71
|
+
|
|
72
|
+
# 可以在这里添加去重逻辑
|
|
73
|
+
# if self.is_duplicate_request(request):
|
|
74
|
+
# return None # 跳过重复请求
|
|
75
|
+
|
|
76
|
+
return request
|
|
77
|
+
|
|
78
|
+
async def parse_product_list(self, response):
|
|
79
|
+
"""解析商品列表页面"""
|
|
80
|
+
# 提取商品链接
|
|
81
|
+
product_links = response.css('a.product-link::attr(href)').getall()
|
|
82
|
+
|
|
83
|
+
for link in product_links:
|
|
84
|
+
# 生成商品详情页请求
|
|
85
|
+
yield Request(
|
|
86
|
+
url=response.urljoin(link),
|
|
87
|
+
callback=self.parse_product_detail,
|
|
88
|
+
meta={'category': response.meta['category']}
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
# 提取分页信息
|
|
92
|
+
next_page = response.css('a.next-page::attr(href)').get()
|
|
93
|
+
if next_page:
|
|
94
|
+
yield Request(
|
|
95
|
+
url=response.urljoin(next_page),
|
|
96
|
+
callback=self.parse_product_list,
|
|
97
|
+
meta=response.meta
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
async def parse_product_detail(self, response):
|
|
101
|
+
"""解析商品详情页面"""
|
|
102
|
+
yield {
|
|
103
|
+
'title': response.css('h1.product-title::text').get(),
|
|
104
|
+
'price': response.css('.price::text').re_first(r'\d+\.?\d*'),
|
|
105
|
+
'description': response.css('.product-description::text').get(),
|
|
106
|
+
'category': response.meta['category'],
|
|
107
|
+
'url': response.url,
|
|
108
|
+
'in_stock': 'in-stock' in response.css('.availability::attr(class)').get(''),
|
|
109
|
+
'rating': response.css('.rating::attr(data-rating)').get(),
|
|
110
|
+
'reviews_count': response.css('.reviews-count::text').re_first(r'\d+')
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
class AsyncLargeScaleSpider(Spider, AsyncControlledRequestMixin):
|
|
115
|
+
"""
|
|
116
|
+
异步版本的受控爬虫示例
|
|
117
|
+
使用异步控制来实现更精确的并发管理
|
|
118
|
+
"""
|
|
119
|
+
name = 'async_large_scale_spider'
|
|
120
|
+
|
|
121
|
+
def __init__(self):
|
|
122
|
+
Spider.__init__(self)
|
|
123
|
+
AsyncControlledRequestMixin.__init__(self)
|
|
124
|
+
|
|
125
|
+
# 配置异步控制参数
|
|
126
|
+
self.max_concurrent_generations = 15 # 最大同时生成数
|
|
127
|
+
self.queue_monitor_interval = 0.5 # 队列监控间隔
|
|
128
|
+
|
|
129
|
+
def _original_start_requests(self):
|
|
130
|
+
"""
|
|
131
|
+
提供原始的大量请求
|
|
132
|
+
这里模拟爬取新闻网站的文章
|
|
133
|
+
"""
|
|
134
|
+
# 模拟爬取 50,000 篇新闻文章
|
|
135
|
+
news_sites = [
|
|
136
|
+
"https://news-site-1.com",
|
|
137
|
+
"https://news-site-2.com",
|
|
138
|
+
"https://news-site-3.com",
|
|
139
|
+
"https://tech-news.com",
|
|
140
|
+
"https://finance-news.com"
|
|
141
|
+
]
|
|
142
|
+
|
|
143
|
+
categories = ['tech', 'finance', 'sports', 'politics', 'entertainment']
|
|
144
|
+
|
|
145
|
+
for site in news_sites:
|
|
146
|
+
for category in categories:
|
|
147
|
+
# 每个分类爬取 2000 页
|
|
148
|
+
for page in range(1, 2001):
|
|
149
|
+
yield Request(
|
|
150
|
+
url=f"{site}/{category}?page={page}",
|
|
151
|
+
callback=self.parse_article_list,
|
|
152
|
+
meta={'site': site, 'category': category, 'page': page}
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
def _process_request_before_yield(self, request):
|
|
156
|
+
"""异步版本的请求预处理"""
|
|
157
|
+
# 设置请求头
|
|
158
|
+
request.headers.setdefault('User-Agent',
|
|
159
|
+
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36')
|
|
160
|
+
|
|
161
|
+
# 根据新闻类型设置优先级
|
|
162
|
+
category = request.meta.get('category', '')
|
|
163
|
+
if category in ['tech', 'finance']:
|
|
164
|
+
request.priority = 10 # 科技和财经新闻优先级最高
|
|
165
|
+
elif category in ['sports', 'politics']:
|
|
166
|
+
request.priority = 8
|
|
167
|
+
else:
|
|
168
|
+
request.priority = 5
|
|
169
|
+
|
|
170
|
+
# 设置延迟(避免对服务器造成过大压力)
|
|
171
|
+
site = request.meta.get('site', '')
|
|
172
|
+
if 'tech-news.com' in site:
|
|
173
|
+
request.meta['download_delay'] = 0.5 # 科技新闻站点较敏感,增加延迟
|
|
174
|
+
|
|
175
|
+
return request
|
|
176
|
+
|
|
177
|
+
async def parse_article_list(self, response):
|
|
178
|
+
"""解析文章列表页面"""
|
|
179
|
+
# 提取文章链接
|
|
180
|
+
article_links = response.css('a.article-link::attr(href)').getall()
|
|
181
|
+
|
|
182
|
+
for link in article_links:
|
|
183
|
+
yield Request(
|
|
184
|
+
url=response.urljoin(link),
|
|
185
|
+
callback=self.parse_article_detail,
|
|
186
|
+
meta={
|
|
187
|
+
'site': response.meta['site'],
|
|
188
|
+
'category': response.meta['category']
|
|
189
|
+
}
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
async def parse_article_detail(self, response):
|
|
193
|
+
"""解析文章详情页面"""
|
|
194
|
+
yield {
|
|
195
|
+
'title': response.css('h1.article-title::text').get(),
|
|
196
|
+
'content': ' '.join(response.css('.article-content p::text').getall()),
|
|
197
|
+
'author': response.css('.author::text').get(),
|
|
198
|
+
'publish_date': response.css('.publish-date::attr(datetime)').get(),
|
|
199
|
+
'category': response.meta['category'],
|
|
200
|
+
'site': response.meta['site'],
|
|
201
|
+
'url': response.url,
|
|
202
|
+
'tags': response.css('.tags a::text').getall(),
|
|
203
|
+
'views': response.css('.views-count::text').re_first(r'\d+'),
|
|
204
|
+
'comments': response.css('.comments-count::text').re_first(r'\d+')
|
|
205
|
+
}
|
tests/__init__.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
#!/usr/bin/python
|
|
2
|
-
# -*- coding:UTF-8 -*-
|
|
3
|
-
"""
|
|
4
|
-
# @Time : 2025-08-24 12:36
|
|
5
|
-
# @Author : crawl-coder
|
|
6
|
-
# @Desc : None
|
|
7
|
-
"""
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# -*- coding:UTF-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
# @Time : 2025-08-24 12:36
|
|
5
|
+
# @Author : crawl-coder
|
|
6
|
+
# @Desc : None
|
|
7
|
+
"""
|
tests/test_final_validation.py
CHANGED
|
@@ -1,154 +1,154 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
# -*- coding: utf-8 -*-
|
|
3
|
-
"""
|
|
4
|
-
最终验证测试:确认分布式队列的 logger 序列化问题已完全解决
|
|
5
|
-
"""
|
|
6
|
-
import asyncio
|
|
7
|
-
import pickle
|
|
8
|
-
import sys
|
|
9
|
-
sys.path.insert(0, "..")
|
|
10
|
-
|
|
11
|
-
from crawlo.network.request import Request
|
|
12
|
-
from crawlo.spider import Spider
|
|
13
|
-
from crawlo.core.scheduler import Scheduler
|
|
14
|
-
from crawlo.queue.redis_priority_queue import RedisPriorityQueue
|
|
15
|
-
from crawlo.utils.log import get_logger
|
|
16
|
-
from unittest.mock import Mock
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
class TestSpider(Spider):
|
|
20
|
-
"""测试爬虫"""
|
|
21
|
-
name = "validation_spider"
|
|
22
|
-
|
|
23
|
-
def __init__(self):
|
|
24
|
-
super().__init__()
|
|
25
|
-
# 故意添加多个 logger 来测试清理
|
|
26
|
-
self.custom_logger = get_logger("custom")
|
|
27
|
-
self.debug_logger = get_logger("debug")
|
|
28
|
-
self.nested_data = {
|
|
29
|
-
'logger': get_logger("nested"),
|
|
30
|
-
'sub': {
|
|
31
|
-
'logger_ref': get_logger("sub_logger")
|
|
32
|
-
}
|
|
33
|
-
}
|
|
34
|
-
|
|
35
|
-
def parse(self, response):
|
|
36
|
-
# 验证主 logger 还在
|
|
37
|
-
self.logger.info(f"✅ 主 logger 工作正常: {response.url}")
|
|
38
|
-
return {"url": response.url, "status": "success"}
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
def test_scheduler_cleaning():
|
|
42
|
-
"""测试调度器的 logger 清理"""
|
|
43
|
-
print("🔍 测试调度器 logger 清理...")
|
|
44
|
-
|
|
45
|
-
spider = TestSpider()
|
|
46
|
-
request = Request(
|
|
47
|
-
url="https://scheduler-test.com",
|
|
48
|
-
callback=spider.parse,
|
|
49
|
-
meta={"logger": get_logger("meta_logger")}
|
|
50
|
-
)
|
|
51
|
-
|
|
52
|
-
# Mock crawler 和 scheduler
|
|
53
|
-
class MockCrawler:
|
|
54
|
-
def __init__(self):
|
|
55
|
-
self.spider = spider
|
|
56
|
-
|
|
57
|
-
class MockScheduler(Scheduler):
|
|
58
|
-
def __init__(self):
|
|
59
|
-
self.crawler = MockCrawler()
|
|
60
|
-
self.logger = get_logger("MockScheduler")
|
|
61
|
-
|
|
62
|
-
scheduler = MockScheduler()
|
|
63
|
-
|
|
64
|
-
# 清理前检查
|
|
65
|
-
print(f" 🔧 清理前 - spider.logger: {spider.logger is not None}")
|
|
66
|
-
print(f" 🔧 清理前 - spider.custom_logger: {spider.custom_logger is not None}")
|
|
67
|
-
print(f" 🔧 清理前 - request.callback: {request.callback is not None}")
|
|
68
|
-
|
|
69
|
-
# 执行清理
|
|
70
|
-
cleaned_request = scheduler._deep_clean_loggers(request)
|
|
71
|
-
|
|
72
|
-
# 清理后检查
|
|
73
|
-
print(f" ✅ 清理后 - spider.logger: {spider.logger is not None}")
|
|
74
|
-
print(f" ✅ 清理后 - spider.custom_logger: {spider.custom_logger is None}")
|
|
75
|
-
print(f" ✅ 清理后 - request.callback: {cleaned_request.callback is None}")
|
|
76
|
-
|
|
77
|
-
# 序列化测试
|
|
78
|
-
try:
|
|
79
|
-
serialized = pickle.dumps(cleaned_request)
|
|
80
|
-
print(f" ✅ 调度器清理后序列化成功,大小: {len(serialized)} bytes")
|
|
81
|
-
return True
|
|
82
|
-
except Exception as e:
|
|
83
|
-
print(f" ❌ 调度器清理后序列化失败: {e}")
|
|
84
|
-
return False
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
async def test_redis_queue_cleaning():
|
|
88
|
-
"""测试 Redis 队列的 logger 清理"""
|
|
89
|
-
print("\\n🔍 测试 Redis 队列 logger 清理...")
|
|
90
|
-
|
|
91
|
-
spider = TestSpider()
|
|
92
|
-
request = Request(
|
|
93
|
-
url="https://redis-test.com",
|
|
94
|
-
callback=spider.parse,
|
|
95
|
-
meta={"logger": get_logger("meta_logger")}
|
|
96
|
-
)
|
|
97
|
-
|
|
98
|
-
try:
|
|
99
|
-
queue = RedisPriorityQueue(redis_url="redis://127.0.0.1:6379/0")
|
|
100
|
-
await queue.connect()
|
|
101
|
-
|
|
102
|
-
# 入队测试
|
|
103
|
-
success = await queue.put(request, priority=0)
|
|
104
|
-
print(f" ✅ Redis 队列入队成功: {success}")
|
|
105
|
-
|
|
106
|
-
if success:
|
|
107
|
-
# 出队测试
|
|
108
|
-
retrieved = await queue.get(timeout=2.0)
|
|
109
|
-
if retrieved:
|
|
110
|
-
print(f" ✅ Redis 队列出队成功: {retrieved.url}")
|
|
111
|
-
print(f" ✅ callback 信息保存: {'_callback_info' in retrieved.meta}")
|
|
112
|
-
await queue.close()
|
|
113
|
-
return True
|
|
114
|
-
else:
|
|
115
|
-
print(" ❌ 出队失败")
|
|
116
|
-
await queue.close()
|
|
117
|
-
return False
|
|
118
|
-
else:
|
|
119
|
-
await queue.close()
|
|
120
|
-
return False
|
|
121
|
-
|
|
122
|
-
except Exception as e:
|
|
123
|
-
print(f" ❌ Redis 队列测试失败: {e}")
|
|
124
|
-
return False
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
async def main():
|
|
128
|
-
"""主测试函数"""
|
|
129
|
-
print("🚀 开始最终验证测试...")
|
|
130
|
-
print("=" * 60)
|
|
131
|
-
|
|
132
|
-
# 测试 1: 调度器清理
|
|
133
|
-
scheduler_ok = test_scheduler_cleaning()
|
|
134
|
-
|
|
135
|
-
# 测试 2: Redis 队列清理
|
|
136
|
-
redis_ok = await test_redis_queue_cleaning()
|
|
137
|
-
|
|
138
|
-
print("\\n" + "=" * 60)
|
|
139
|
-
print("📊 测试结果汇总:")
|
|
140
|
-
print(f" 调度器 logger 清理: {'✅ 通过' if scheduler_ok else '❌ 失败'}")
|
|
141
|
-
print(f" Redis 队列清理: {'✅ 通过' if redis_ok else '❌ 失败'}")
|
|
142
|
-
|
|
143
|
-
if scheduler_ok and redis_ok:
|
|
144
|
-
print("\\n🎉 所有测试通过!")
|
|
145
|
-
print("✅ 分布式队列的 logger 序列化问题已完全修复!")
|
|
146
|
-
print("✅ Crawlo 现在可以正常使用 Redis 分布式队列了!")
|
|
147
|
-
return True
|
|
148
|
-
else:
|
|
149
|
-
print("\\n❌ 部分测试失败,需要进一步修复")
|
|
150
|
-
return False
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
if __name__ == "__main__":
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
最终验证测试:确认分布式队列的 logger 序列化问题已完全解决
|
|
5
|
+
"""
|
|
6
|
+
import asyncio
|
|
7
|
+
import pickle
|
|
8
|
+
import sys
|
|
9
|
+
sys.path.insert(0, "..")
|
|
10
|
+
|
|
11
|
+
from crawlo.network.request import Request
|
|
12
|
+
from crawlo.spider import Spider
|
|
13
|
+
from crawlo.core.scheduler import Scheduler
|
|
14
|
+
from crawlo.queue.redis_priority_queue import RedisPriorityQueue
|
|
15
|
+
from crawlo.utils.log import get_logger
|
|
16
|
+
from unittest.mock import Mock
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class TestSpider(Spider):
|
|
20
|
+
"""测试爬虫"""
|
|
21
|
+
name = "validation_spider"
|
|
22
|
+
|
|
23
|
+
def __init__(self):
|
|
24
|
+
super().__init__()
|
|
25
|
+
# 故意添加多个 logger 来测试清理
|
|
26
|
+
self.custom_logger = get_logger("custom")
|
|
27
|
+
self.debug_logger = get_logger("debug")
|
|
28
|
+
self.nested_data = {
|
|
29
|
+
'logger': get_logger("nested"),
|
|
30
|
+
'sub': {
|
|
31
|
+
'logger_ref': get_logger("sub_logger")
|
|
32
|
+
}
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
def parse(self, response):
|
|
36
|
+
# 验证主 logger 还在
|
|
37
|
+
self.logger.info(f"✅ 主 logger 工作正常: {response.url}")
|
|
38
|
+
return {"url": response.url, "status": "success"}
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def test_scheduler_cleaning():
|
|
42
|
+
"""测试调度器的 logger 清理"""
|
|
43
|
+
print("🔍 测试调度器 logger 清理...")
|
|
44
|
+
|
|
45
|
+
spider = TestSpider()
|
|
46
|
+
request = Request(
|
|
47
|
+
url="https://scheduler-test.com",
|
|
48
|
+
callback=spider.parse,
|
|
49
|
+
meta={"logger": get_logger("meta_logger")}
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
# Mock crawler 和 scheduler
|
|
53
|
+
class MockCrawler:
|
|
54
|
+
def __init__(self):
|
|
55
|
+
self.spider = spider
|
|
56
|
+
|
|
57
|
+
class MockScheduler(Scheduler):
|
|
58
|
+
def __init__(self):
|
|
59
|
+
self.crawler = MockCrawler()
|
|
60
|
+
self.logger = get_logger("MockScheduler")
|
|
61
|
+
|
|
62
|
+
scheduler = MockScheduler()
|
|
63
|
+
|
|
64
|
+
# 清理前检查
|
|
65
|
+
print(f" 🔧 清理前 - spider.logger: {spider.logger is not None}")
|
|
66
|
+
print(f" 🔧 清理前 - spider.custom_logger: {spider.custom_logger is not None}")
|
|
67
|
+
print(f" 🔧 清理前 - request.callback: {request.callback is not None}")
|
|
68
|
+
|
|
69
|
+
# 执行清理
|
|
70
|
+
cleaned_request = scheduler._deep_clean_loggers(request)
|
|
71
|
+
|
|
72
|
+
# 清理后检查
|
|
73
|
+
print(f" ✅ 清理后 - spider.logger: {spider.logger is not None}")
|
|
74
|
+
print(f" ✅ 清理后 - spider.custom_logger: {spider.custom_logger is None}")
|
|
75
|
+
print(f" ✅ 清理后 - request.callback: {cleaned_request.callback is None}")
|
|
76
|
+
|
|
77
|
+
# 序列化测试
|
|
78
|
+
try:
|
|
79
|
+
serialized = pickle.dumps(cleaned_request)
|
|
80
|
+
print(f" ✅ 调度器清理后序列化成功,大小: {len(serialized)} bytes")
|
|
81
|
+
return True
|
|
82
|
+
except Exception as e:
|
|
83
|
+
print(f" ❌ 调度器清理后序列化失败: {e}")
|
|
84
|
+
return False
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
async def test_redis_queue_cleaning():
|
|
88
|
+
"""测试 Redis 队列的 logger 清理"""
|
|
89
|
+
print("\\n🔍 测试 Redis 队列 logger 清理...")
|
|
90
|
+
|
|
91
|
+
spider = TestSpider()
|
|
92
|
+
request = Request(
|
|
93
|
+
url="https://redis-test.com",
|
|
94
|
+
callback=spider.parse,
|
|
95
|
+
meta={"logger": get_logger("meta_logger")}
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
try:
|
|
99
|
+
queue = RedisPriorityQueue(redis_url="redis://127.0.0.1:6379/0")
|
|
100
|
+
await queue.connect()
|
|
101
|
+
|
|
102
|
+
# 入队测试
|
|
103
|
+
success = await queue.put(request, priority=0)
|
|
104
|
+
print(f" ✅ Redis 队列入队成功: {success}")
|
|
105
|
+
|
|
106
|
+
if success:
|
|
107
|
+
# 出队测试
|
|
108
|
+
retrieved = await queue.get(timeout=2.0)
|
|
109
|
+
if retrieved:
|
|
110
|
+
print(f" ✅ Redis 队列出队成功: {retrieved.url}")
|
|
111
|
+
print(f" ✅ callback 信息保存: {'_callback_info' in retrieved.meta}")
|
|
112
|
+
await queue.close()
|
|
113
|
+
return True
|
|
114
|
+
else:
|
|
115
|
+
print(" ❌ 出队失败")
|
|
116
|
+
await queue.close()
|
|
117
|
+
return False
|
|
118
|
+
else:
|
|
119
|
+
await queue.close()
|
|
120
|
+
return False
|
|
121
|
+
|
|
122
|
+
except Exception as e:
|
|
123
|
+
print(f" ❌ Redis 队列测试失败: {e}")
|
|
124
|
+
return False
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
async def main():
|
|
128
|
+
"""主测试函数"""
|
|
129
|
+
print("🚀 开始最终验证测试...")
|
|
130
|
+
print("=" * 60)
|
|
131
|
+
|
|
132
|
+
# 测试 1: 调度器清理
|
|
133
|
+
scheduler_ok = test_scheduler_cleaning()
|
|
134
|
+
|
|
135
|
+
# 测试 2: Redis 队列清理
|
|
136
|
+
redis_ok = await test_redis_queue_cleaning()
|
|
137
|
+
|
|
138
|
+
print("\\n" + "=" * 60)
|
|
139
|
+
print("📊 测试结果汇总:")
|
|
140
|
+
print(f" 调度器 logger 清理: {'✅ 通过' if scheduler_ok else '❌ 失败'}")
|
|
141
|
+
print(f" Redis 队列清理: {'✅ 通过' if redis_ok else '❌ 失败'}")
|
|
142
|
+
|
|
143
|
+
if scheduler_ok and redis_ok:
|
|
144
|
+
print("\\n🎉 所有测试通过!")
|
|
145
|
+
print("✅ 分布式队列的 logger 序列化问题已完全修复!")
|
|
146
|
+
print("✅ Crawlo 现在可以正常使用 Redis 分布式队列了!")
|
|
147
|
+
return True
|
|
148
|
+
else:
|
|
149
|
+
print("\\n❌ 部分测试失败,需要进一步修复")
|
|
150
|
+
return False
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
if __name__ == "__main__":
|
|
154
154
|
asyncio.run(main())
|