crawlo 1.1.0__py3-none-any.whl → 1.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crawlo might be problematic. Click here for more details.
- crawlo/__init__.py +34 -24
- crawlo/__version__.py +1 -1
- crawlo/cli.py +40 -40
- crawlo/commands/__init__.py +13 -13
- crawlo/commands/check.py +594 -155
- crawlo/commands/genspider.py +152 -111
- crawlo/commands/list.py +156 -119
- crawlo/commands/run.py +285 -170
- crawlo/commands/startproject.py +196 -101
- crawlo/commands/stats.py +188 -167
- crawlo/commands/utils.py +187 -0
- crawlo/config.py +280 -0
- crawlo/core/__init__.py +2 -2
- crawlo/core/engine.py +171 -158
- crawlo/core/enhanced_engine.py +190 -0
- crawlo/core/processor.py +40 -40
- crawlo/core/scheduler.py +162 -57
- crawlo/crawler.py +1028 -493
- crawlo/downloader/__init__.py +242 -78
- crawlo/downloader/aiohttp_downloader.py +212 -199
- crawlo/downloader/cffi_downloader.py +252 -277
- crawlo/downloader/httpx_downloader.py +257 -246
- crawlo/event.py +11 -11
- crawlo/exceptions.py +78 -78
- crawlo/extension/__init__.py +31 -31
- crawlo/extension/log_interval.py +49 -49
- crawlo/extension/log_stats.py +44 -44
- crawlo/extension/logging_extension.py +34 -34
- crawlo/filters/__init__.py +154 -37
- crawlo/filters/aioredis_filter.py +242 -150
- crawlo/filters/memory_filter.py +269 -202
- crawlo/items/__init__.py +23 -23
- crawlo/items/base.py +21 -21
- crawlo/items/fields.py +53 -53
- crawlo/items/items.py +104 -104
- crawlo/middleware/__init__.py +21 -21
- crawlo/middleware/default_header.py +32 -32
- crawlo/middleware/download_delay.py +28 -28
- crawlo/middleware/middleware_manager.py +135 -135
- crawlo/middleware/proxy.py +248 -245
- crawlo/middleware/request_ignore.py +30 -30
- crawlo/middleware/response_code.py +18 -18
- crawlo/middleware/response_filter.py +26 -26
- crawlo/middleware/retry.py +125 -90
- crawlo/mode_manager.py +201 -0
- crawlo/network/__init__.py +21 -7
- crawlo/network/request.py +311 -203
- crawlo/network/response.py +269 -166
- crawlo/pipelines/__init__.py +13 -13
- crawlo/pipelines/console_pipeline.py +39 -39
- crawlo/pipelines/csv_pipeline.py +317 -0
- crawlo/pipelines/json_pipeline.py +219 -0
- crawlo/pipelines/mongo_pipeline.py +116 -116
- crawlo/pipelines/mysql_pipeline.py +195 -195
- crawlo/pipelines/pipeline_manager.py +56 -56
- crawlo/project.py +153 -0
- crawlo/queue/pqueue.py +37 -0
- crawlo/queue/queue_manager.py +304 -0
- crawlo/queue/redis_priority_queue.py +192 -0
- crawlo/settings/__init__.py +7 -7
- crawlo/settings/default_settings.py +226 -169
- crawlo/settings/setting_manager.py +99 -99
- crawlo/spider/__init__.py +639 -129
- crawlo/stats_collector.py +59 -59
- crawlo/subscriber.py +106 -106
- crawlo/task_manager.py +30 -27
- crawlo/templates/crawlo.cfg.tmpl +10 -10
- crawlo/templates/project/__init__.py.tmpl +3 -3
- crawlo/templates/project/items.py.tmpl +17 -17
- crawlo/templates/project/middlewares.py.tmpl +87 -76
- crawlo/templates/project/pipelines.py.tmpl +336 -64
- crawlo/templates/project/run.py.tmpl +239 -0
- crawlo/templates/project/settings.py.tmpl +248 -54
- crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
- crawlo/templates/spider/spider.py.tmpl +178 -32
- crawlo/utils/__init__.py +7 -7
- crawlo/utils/controlled_spider_mixin.py +336 -0
- crawlo/utils/date_tools.py +233 -233
- crawlo/utils/db_helper.py +343 -343
- crawlo/utils/func_tools.py +82 -82
- crawlo/utils/large_scale_config.py +287 -0
- crawlo/utils/large_scale_helper.py +344 -0
- crawlo/utils/log.py +128 -128
- crawlo/utils/queue_helper.py +176 -0
- crawlo/utils/request.py +267 -267
- crawlo/utils/request_serializer.py +220 -0
- crawlo/utils/spider_loader.py +62 -62
- crawlo/utils/system.py +11 -11
- crawlo/utils/tools.py +4 -4
- crawlo/utils/url.py +39 -39
- crawlo-1.1.2.dist-info/METADATA +567 -0
- crawlo-1.1.2.dist-info/RECORD +108 -0
- examples/__init__.py +7 -0
- tests/__init__.py +7 -7
- tests/test_final_validation.py +154 -0
- tests/test_proxy_health_check.py +32 -32
- tests/test_proxy_middleware_integration.py +136 -136
- tests/test_proxy_providers.py +56 -56
- tests/test_proxy_stats.py +19 -19
- tests/test_proxy_strategies.py +59 -59
- tests/test_redis_config.py +29 -0
- tests/test_redis_queue.py +225 -0
- tests/test_request_serialization.py +71 -0
- tests/test_scheduler.py +242 -0
- crawlo/pipelines/mysql_batch_pipline.py +0 -273
- crawlo/utils/concurrency_manager.py +0 -125
- crawlo/utils/pqueue.py +0 -174
- crawlo/utils/project.py +0 -197
- crawlo-1.1.0.dist-info/METADATA +0 -49
- crawlo-1.1.0.dist-info/RECORD +0 -97
- examples/gxb/items.py +0 -36
- examples/gxb/run.py +0 -16
- examples/gxb/settings.py +0 -72
- examples/gxb/spider/__init__.py +0 -2
- examples/gxb/spider/miit_spider.py +0 -180
- examples/gxb/spider/telecom_device.py +0 -129
- {examples/gxb → crawlo/queue}/__init__.py +0 -0
- {crawlo-1.1.0.dist-info → crawlo-1.1.2.dist-info}/WHEEL +0 -0
- {crawlo-1.1.0.dist-info → crawlo-1.1.2.dist-info}/entry_points.txt +0 -0
- {crawlo-1.1.0.dist-info → crawlo-1.1.2.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,154 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
最终验证测试:确认分布式队列的 logger 序列化问题已完全解决
|
|
5
|
+
"""
|
|
6
|
+
import asyncio
|
|
7
|
+
import pickle
|
|
8
|
+
import sys
|
|
9
|
+
sys.path.insert(0, "..")
|
|
10
|
+
|
|
11
|
+
from crawlo.network.request import Request
|
|
12
|
+
from crawlo.spider import Spider
|
|
13
|
+
from crawlo.core.scheduler import Scheduler
|
|
14
|
+
from crawlo.queue.redis_priority_queue import RedisPriorityQueue
|
|
15
|
+
from crawlo.utils.log import get_logger
|
|
16
|
+
from unittest.mock import Mock
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class TestSpider(Spider):
|
|
20
|
+
"""测试爬虫"""
|
|
21
|
+
name = "validation_spider"
|
|
22
|
+
|
|
23
|
+
def __init__(self):
|
|
24
|
+
super().__init__()
|
|
25
|
+
# 故意添加多个 logger 来测试清理
|
|
26
|
+
self.custom_logger = get_logger("custom")
|
|
27
|
+
self.debug_logger = get_logger("debug")
|
|
28
|
+
self.nested_data = {
|
|
29
|
+
'logger': get_logger("nested"),
|
|
30
|
+
'sub': {
|
|
31
|
+
'logger_ref': get_logger("sub_logger")
|
|
32
|
+
}
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
def parse(self, response):
|
|
36
|
+
# 验证主 logger 还在
|
|
37
|
+
self.logger.info(f"✅ 主 logger 工作正常: {response.url}")
|
|
38
|
+
return {"url": response.url, "status": "success"}
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def test_scheduler_cleaning():
|
|
42
|
+
"""测试调度器的 logger 清理"""
|
|
43
|
+
print("🔍 测试调度器 logger 清理...")
|
|
44
|
+
|
|
45
|
+
spider = TestSpider()
|
|
46
|
+
request = Request(
|
|
47
|
+
url="https://scheduler-test.com",
|
|
48
|
+
callback=spider.parse,
|
|
49
|
+
meta={"logger": get_logger("meta_logger")}
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
# Mock crawler 和 scheduler
|
|
53
|
+
class MockCrawler:
|
|
54
|
+
def __init__(self):
|
|
55
|
+
self.spider = spider
|
|
56
|
+
|
|
57
|
+
class MockScheduler(Scheduler):
|
|
58
|
+
def __init__(self):
|
|
59
|
+
self.crawler = MockCrawler()
|
|
60
|
+
self.logger = get_logger("MockScheduler")
|
|
61
|
+
|
|
62
|
+
scheduler = MockScheduler()
|
|
63
|
+
|
|
64
|
+
# 清理前检查
|
|
65
|
+
print(f" 🔧 清理前 - spider.logger: {spider.logger is not None}")
|
|
66
|
+
print(f" 🔧 清理前 - spider.custom_logger: {spider.custom_logger is not None}")
|
|
67
|
+
print(f" 🔧 清理前 - request.callback: {request.callback is not None}")
|
|
68
|
+
|
|
69
|
+
# 执行清理
|
|
70
|
+
cleaned_request = scheduler._deep_clean_loggers(request)
|
|
71
|
+
|
|
72
|
+
# 清理后检查
|
|
73
|
+
print(f" ✅ 清理后 - spider.logger: {spider.logger is not None}")
|
|
74
|
+
print(f" ✅ 清理后 - spider.custom_logger: {spider.custom_logger is None}")
|
|
75
|
+
print(f" ✅ 清理后 - request.callback: {cleaned_request.callback is None}")
|
|
76
|
+
|
|
77
|
+
# 序列化测试
|
|
78
|
+
try:
|
|
79
|
+
serialized = pickle.dumps(cleaned_request)
|
|
80
|
+
print(f" ✅ 调度器清理后序列化成功,大小: {len(serialized)} bytes")
|
|
81
|
+
return True
|
|
82
|
+
except Exception as e:
|
|
83
|
+
print(f" ❌ 调度器清理后序列化失败: {e}")
|
|
84
|
+
return False
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
async def test_redis_queue_cleaning():
|
|
88
|
+
"""测试 Redis 队列的 logger 清理"""
|
|
89
|
+
print("\\n🔍 测试 Redis 队列 logger 清理...")
|
|
90
|
+
|
|
91
|
+
spider = TestSpider()
|
|
92
|
+
request = Request(
|
|
93
|
+
url="https://redis-test.com",
|
|
94
|
+
callback=spider.parse,
|
|
95
|
+
meta={"logger": get_logger("meta_logger")}
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
try:
|
|
99
|
+
queue = RedisPriorityQueue(redis_url="redis://127.0.0.1:6379/0")
|
|
100
|
+
await queue.connect()
|
|
101
|
+
|
|
102
|
+
# 入队测试
|
|
103
|
+
success = await queue.put(request, priority=0)
|
|
104
|
+
print(f" ✅ Redis 队列入队成功: {success}")
|
|
105
|
+
|
|
106
|
+
if success:
|
|
107
|
+
# 出队测试
|
|
108
|
+
retrieved = await queue.get(timeout=2.0)
|
|
109
|
+
if retrieved:
|
|
110
|
+
print(f" ✅ Redis 队列出队成功: {retrieved.url}")
|
|
111
|
+
print(f" ✅ callback 信息保存: {'_callback_info' in retrieved.meta}")
|
|
112
|
+
await queue.close()
|
|
113
|
+
return True
|
|
114
|
+
else:
|
|
115
|
+
print(" ❌ 出队失败")
|
|
116
|
+
await queue.close()
|
|
117
|
+
return False
|
|
118
|
+
else:
|
|
119
|
+
await queue.close()
|
|
120
|
+
return False
|
|
121
|
+
|
|
122
|
+
except Exception as e:
|
|
123
|
+
print(f" ❌ Redis 队列测试失败: {e}")
|
|
124
|
+
return False
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
async def main():
|
|
128
|
+
"""主测试函数"""
|
|
129
|
+
print("🚀 开始最终验证测试...")
|
|
130
|
+
print("=" * 60)
|
|
131
|
+
|
|
132
|
+
# 测试 1: 调度器清理
|
|
133
|
+
scheduler_ok = test_scheduler_cleaning()
|
|
134
|
+
|
|
135
|
+
# 测试 2: Redis 队列清理
|
|
136
|
+
redis_ok = await test_redis_queue_cleaning()
|
|
137
|
+
|
|
138
|
+
print("\\n" + "=" * 60)
|
|
139
|
+
print("📊 测试结果汇总:")
|
|
140
|
+
print(f" 调度器 logger 清理: {'✅ 通过' if scheduler_ok else '❌ 失败'}")
|
|
141
|
+
print(f" Redis 队列清理: {'✅ 通过' if redis_ok else '❌ 失败'}")
|
|
142
|
+
|
|
143
|
+
if scheduler_ok and redis_ok:
|
|
144
|
+
print("\\n🎉 所有测试通过!")
|
|
145
|
+
print("✅ 分布式队列的 logger 序列化问题已完全修复!")
|
|
146
|
+
print("✅ Crawlo 现在可以正常使用 Redis 分布式队列了!")
|
|
147
|
+
return True
|
|
148
|
+
else:
|
|
149
|
+
print("\\n❌ 部分测试失败,需要进一步修复")
|
|
150
|
+
return False
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
if __name__ == "__main__":
|
|
154
|
+
asyncio.run(main())
|
tests/test_proxy_health_check.py
CHANGED
|
@@ -1,33 +1,33 @@
|
|
|
1
|
-
# tests/test_proxy_health_check.py
|
|
2
|
-
import pytest
|
|
3
|
-
from unittest.mock import AsyncMock, patch
|
|
4
|
-
from crawlo.proxy.health_check import check_single_proxy
|
|
5
|
-
import httpx
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
@pytest.mark.asyncio
|
|
9
|
-
@patch('httpx.AsyncClient')
|
|
10
|
-
async def test_health_check_success(mock_client_class):
|
|
11
|
-
"""测试健康检查:成功"""
|
|
12
|
-
mock_resp = AsyncMock()
|
|
13
|
-
mock_resp.status_code = 200
|
|
14
|
-
mock_client_class.return_value.__aenter__.return_value.get.return_value = mock_resp
|
|
15
|
-
|
|
16
|
-
proxy_info = {'url': 'http://good:8080', 'healthy': False}
|
|
17
|
-
await check_single_proxy(proxy_info)
|
|
18
|
-
|
|
19
|
-
assert proxy_info['healthy'] is True
|
|
20
|
-
assert proxy_info['failures'] == 0
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
@pytest.mark.asyncio
|
|
24
|
-
@patch('httpx.AsyncClient')
|
|
25
|
-
async def test_health_check_failure(mock_client_class):
|
|
26
|
-
"""测试健康检查:失败"""
|
|
27
|
-
mock_client_class.return_value.__aenter__.return_value.get.side_effect = httpx.ConnectError("Failed")
|
|
28
|
-
|
|
29
|
-
proxy_info = {'url': 'http://bad:8080', 'healthy': True, 'failures': 0}
|
|
30
|
-
await check_single_proxy(proxy_info)
|
|
31
|
-
|
|
32
|
-
assert proxy_info['healthy'] is False
|
|
1
|
+
# tests/test_proxy_health_check.py
|
|
2
|
+
import pytest
|
|
3
|
+
from unittest.mock import AsyncMock, patch
|
|
4
|
+
from crawlo.proxy.health_check import check_single_proxy
|
|
5
|
+
import httpx
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@pytest.mark.asyncio
|
|
9
|
+
@patch('httpx.AsyncClient')
|
|
10
|
+
async def test_health_check_success(mock_client_class):
|
|
11
|
+
"""测试健康检查:成功"""
|
|
12
|
+
mock_resp = AsyncMock()
|
|
13
|
+
mock_resp.status_code = 200
|
|
14
|
+
mock_client_class.return_value.__aenter__.return_value.get.return_value = mock_resp
|
|
15
|
+
|
|
16
|
+
proxy_info = {'url': 'http://good:8080', 'healthy': False}
|
|
17
|
+
await check_single_proxy(proxy_info)
|
|
18
|
+
|
|
19
|
+
assert proxy_info['healthy'] is True
|
|
20
|
+
assert proxy_info['failures'] == 0
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@pytest.mark.asyncio
|
|
24
|
+
@patch('httpx.AsyncClient')
|
|
25
|
+
async def test_health_check_failure(mock_client_class):
|
|
26
|
+
"""测试健康检查:失败"""
|
|
27
|
+
mock_client_class.return_value.__aenter__.return_value.get.side_effect = httpx.ConnectError("Failed")
|
|
28
|
+
|
|
29
|
+
proxy_info = {'url': 'http://bad:8080', 'healthy': True, 'failures': 0}
|
|
30
|
+
await check_single_proxy(proxy_info)
|
|
31
|
+
|
|
32
|
+
assert proxy_info['healthy'] is False
|
|
33
33
|
assert proxy_info['failures'] == 1
|
|
@@ -1,137 +1,137 @@
|
|
|
1
|
-
# tests/test_proxy_middleware_integration.py
|
|
2
|
-
import pytest
|
|
3
|
-
import asyncio
|
|
4
|
-
import time
|
|
5
|
-
from unittest.mock import Mock, AsyncMock, patch
|
|
6
|
-
from crawlo import Request, Response, Spider
|
|
7
|
-
from crawlo.proxy.middleware import ProxyMiddleware
|
|
8
|
-
from crawlo.proxy.stats import ProxyStats
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
@pytest.fixture
|
|
12
|
-
def crawler():
|
|
13
|
-
class MockSettings:
|
|
14
|
-
def get(self, key, default=None):
|
|
15
|
-
defaults = {
|
|
16
|
-
'PROXY_ENABLED': True,
|
|
17
|
-
'PROXIES': ['http://p1:8080', 'http://p2:8080'],
|
|
18
|
-
'PROXY_SELECTION_STRATEGY': 'random',
|
|
19
|
-
'PROXY_REQUEST_DELAY_ENABLED': False,
|
|
20
|
-
'PROXY_MAX_RETRY_COUNT': 1,
|
|
21
|
-
}
|
|
22
|
-
return defaults.get(key, default)
|
|
23
|
-
|
|
24
|
-
def get_bool(self, key, default=None):
|
|
25
|
-
return self.get(key, default)
|
|
26
|
-
|
|
27
|
-
def get_int(self, key, default=None):
|
|
28
|
-
return self.get(key, default)
|
|
29
|
-
|
|
30
|
-
def get_float(self, key, default=None):
|
|
31
|
-
return self.get(key, default)
|
|
32
|
-
|
|
33
|
-
def get_list(self, key, default=None):
|
|
34
|
-
return self.get(key, default)
|
|
35
|
-
|
|
36
|
-
class MockCrawler:
|
|
37
|
-
def __init__(self):
|
|
38
|
-
self.settings = MockSettings()
|
|
39
|
-
|
|
40
|
-
return MockCrawler()
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
@pytest.fixture
|
|
44
|
-
def middleware(crawler):
|
|
45
|
-
mw = ProxyMiddleware.create_instance(crawler)
|
|
46
|
-
mw._load_providers = Mock()
|
|
47
|
-
mw._update_proxy_pool = AsyncMock()
|
|
48
|
-
mw._health_check = AsyncMock()
|
|
49
|
-
mw.scheduler = None
|
|
50
|
-
|
|
51
|
-
mw.proxies = [
|
|
52
|
-
{
|
|
53
|
-
'url': 'http://p1:8080',
|
|
54
|
-
'healthy': True,
|
|
55
|
-
'failures': 0,
|
|
56
|
-
'last_health_check': 0,
|
|
57
|
-
'unhealthy_since': 0
|
|
58
|
-
},
|
|
59
|
-
{
|
|
60
|
-
'url': 'http://p2:8080',
|
|
61
|
-
'healthy': True,
|
|
62
|
-
'failures': 0,
|
|
63
|
-
'last_health_check': 0,
|
|
64
|
-
'unhealthy_since': 0
|
|
65
|
-
},
|
|
66
|
-
]
|
|
67
|
-
mw.stats = ProxyStats()
|
|
68
|
-
for p in mw.proxies:
|
|
69
|
-
mw.stats.record(p['url'], 'total')
|
|
70
|
-
|
|
71
|
-
asyncio.get_event_loop().run_until_complete(mw._initial_setup())
|
|
72
|
-
return mw
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
@pytest.fixture
|
|
76
|
-
def spider():
|
|
77
|
-
return Mock(spec=Spider, logger=Mock())
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
def test_process_request_sets_proxy(middleware, spider):
|
|
81
|
-
request = Request("https://example.com")
|
|
82
|
-
result = asyncio.get_event_loop().run_until_complete(
|
|
83
|
-
middleware.process_request(request, spider)
|
|
84
|
-
)
|
|
85
|
-
assert result is None
|
|
86
|
-
assert hasattr(request, 'proxy')
|
|
87
|
-
assert request.proxy in ['http://p1:8080', 'http://p2:8080']
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
def test_process_response_records_success(middleware, spider):
|
|
91
|
-
request = Request("https://example.com")
|
|
92
|
-
request.proxy = 'http://p1:8080'
|
|
93
|
-
response = Response("https://example.com", body=b"ok", headers={})
|
|
94
|
-
middleware.stats.record(request.proxy, 'total')
|
|
95
|
-
middleware.process_response(request, response, spider)
|
|
96
|
-
assert middleware.stats.get(request.proxy)['success'] == 1
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
def test_process_exception_switches_proxy(middleware, spider):
|
|
100
|
-
request = Request("https://example.com")
|
|
101
|
-
request.proxy = 'http://p1:8080'
|
|
102
|
-
request.meta['proxy_retry_count'] = 0
|
|
103
|
-
|
|
104
|
-
result = middleware.process_exception(request, Exception("Timeout"), spider)
|
|
105
|
-
assert result is not None
|
|
106
|
-
assert result.proxy != 'http://p1:8080'
|
|
107
|
-
assert result.meta['proxy_retry_count'] == 1
|
|
108
|
-
|
|
109
|
-
final = middleware.process_exception(result, Exception("Timeout"), spider)
|
|
110
|
-
assert final is None
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
def test_mark_failure_disables_proxy(middleware):
|
|
114
|
-
proxy_url = 'http://p1:8080'
|
|
115
|
-
p = next(p for p in middleware.proxies if p['url'] == proxy_url)
|
|
116
|
-
p['failures'] = 2
|
|
117
|
-
|
|
118
|
-
middleware._mark_failure(proxy_url)
|
|
119
|
-
assert p['failures'] == 3
|
|
120
|
-
assert p['healthy'] is False
|
|
121
|
-
assert p['unhealthy_since'] > 0
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
@pytest.mark.asyncio
|
|
125
|
-
async def test_request_delay(middleware, spider):
|
|
126
|
-
"""测试请求延迟功能:验证是否调用了 asyncio.sleep"""
|
|
127
|
-
with patch("crawlo.proxy.middleware.asyncio.sleep", new_callable=AsyncMock) as mock_sleep:
|
|
128
|
-
middleware.delay_enabled = True # 注意:这里应该是 delay_enabled 而不是 request_delay_enabled
|
|
129
|
-
middleware.request_delay = 0.1
|
|
130
|
-
middleware._last_req_time = time.time() - 0.05 # 50ms 前
|
|
131
|
-
|
|
132
|
-
request = Request("https://a.com")
|
|
133
|
-
await middleware.process_request(request, spider)
|
|
134
|
-
|
|
135
|
-
mock_sleep.assert_called_once()
|
|
136
|
-
delay = mock_sleep.call_args[0][0]
|
|
1
|
+
# tests/test_proxy_middleware_integration.py
|
|
2
|
+
import pytest
|
|
3
|
+
import asyncio
|
|
4
|
+
import time
|
|
5
|
+
from unittest.mock import Mock, AsyncMock, patch
|
|
6
|
+
from crawlo import Request, Response, Spider
|
|
7
|
+
from crawlo.proxy.middleware import ProxyMiddleware
|
|
8
|
+
from crawlo.proxy.stats import ProxyStats
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@pytest.fixture
|
|
12
|
+
def crawler():
|
|
13
|
+
class MockSettings:
|
|
14
|
+
def get(self, key, default=None):
|
|
15
|
+
defaults = {
|
|
16
|
+
'PROXY_ENABLED': True,
|
|
17
|
+
'PROXIES': ['http://p1:8080', 'http://p2:8080'],
|
|
18
|
+
'PROXY_SELECTION_STRATEGY': 'random',
|
|
19
|
+
'PROXY_REQUEST_DELAY_ENABLED': False,
|
|
20
|
+
'PROXY_MAX_RETRY_COUNT': 1,
|
|
21
|
+
}
|
|
22
|
+
return defaults.get(key, default)
|
|
23
|
+
|
|
24
|
+
def get_bool(self, key, default=None):
|
|
25
|
+
return self.get(key, default)
|
|
26
|
+
|
|
27
|
+
def get_int(self, key, default=None):
|
|
28
|
+
return self.get(key, default)
|
|
29
|
+
|
|
30
|
+
def get_float(self, key, default=None):
|
|
31
|
+
return self.get(key, default)
|
|
32
|
+
|
|
33
|
+
def get_list(self, key, default=None):
|
|
34
|
+
return self.get(key, default)
|
|
35
|
+
|
|
36
|
+
class MockCrawler:
|
|
37
|
+
def __init__(self):
|
|
38
|
+
self.settings = MockSettings()
|
|
39
|
+
|
|
40
|
+
return MockCrawler()
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
@pytest.fixture
|
|
44
|
+
def middleware(crawler):
|
|
45
|
+
mw = ProxyMiddleware.create_instance(crawler)
|
|
46
|
+
mw._load_providers = Mock()
|
|
47
|
+
mw._update_proxy_pool = AsyncMock()
|
|
48
|
+
mw._health_check = AsyncMock()
|
|
49
|
+
mw.scheduler = None
|
|
50
|
+
|
|
51
|
+
mw.proxies = [
|
|
52
|
+
{
|
|
53
|
+
'url': 'http://p1:8080',
|
|
54
|
+
'healthy': True,
|
|
55
|
+
'failures': 0,
|
|
56
|
+
'last_health_check': 0,
|
|
57
|
+
'unhealthy_since': 0
|
|
58
|
+
},
|
|
59
|
+
{
|
|
60
|
+
'url': 'http://p2:8080',
|
|
61
|
+
'healthy': True,
|
|
62
|
+
'failures': 0,
|
|
63
|
+
'last_health_check': 0,
|
|
64
|
+
'unhealthy_since': 0
|
|
65
|
+
},
|
|
66
|
+
]
|
|
67
|
+
mw.stats = ProxyStats()
|
|
68
|
+
for p in mw.proxies:
|
|
69
|
+
mw.stats.record(p['url'], 'total')
|
|
70
|
+
|
|
71
|
+
asyncio.get_event_loop().run_until_complete(mw._initial_setup())
|
|
72
|
+
return mw
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
@pytest.fixture
|
|
76
|
+
def spider():
|
|
77
|
+
return Mock(spec=Spider, logger=Mock())
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def test_process_request_sets_proxy(middleware, spider):
|
|
81
|
+
request = Request("https://example.com")
|
|
82
|
+
result = asyncio.get_event_loop().run_until_complete(
|
|
83
|
+
middleware.process_request(request, spider)
|
|
84
|
+
)
|
|
85
|
+
assert result is None
|
|
86
|
+
assert hasattr(request, 'proxy')
|
|
87
|
+
assert request.proxy in ['http://p1:8080', 'http://p2:8080']
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def test_process_response_records_success(middleware, spider):
|
|
91
|
+
request = Request("https://example.com")
|
|
92
|
+
request.proxy = 'http://p1:8080'
|
|
93
|
+
response = Response("https://example.com", body=b"ok", headers={})
|
|
94
|
+
middleware.stats.record(request.proxy, 'total')
|
|
95
|
+
middleware.process_response(request, response, spider)
|
|
96
|
+
assert middleware.stats.get(request.proxy)['success'] == 1
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def test_process_exception_switches_proxy(middleware, spider):
|
|
100
|
+
request = Request("https://example.com")
|
|
101
|
+
request.proxy = 'http://p1:8080'
|
|
102
|
+
request.meta['proxy_retry_count'] = 0
|
|
103
|
+
|
|
104
|
+
result = middleware.process_exception(request, Exception("Timeout"), spider)
|
|
105
|
+
assert result is not None
|
|
106
|
+
assert result.proxy != 'http://p1:8080'
|
|
107
|
+
assert result.meta['proxy_retry_count'] == 1
|
|
108
|
+
|
|
109
|
+
final = middleware.process_exception(result, Exception("Timeout"), spider)
|
|
110
|
+
assert final is None
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
def test_mark_failure_disables_proxy(middleware):
|
|
114
|
+
proxy_url = 'http://p1:8080'
|
|
115
|
+
p = next(p for p in middleware.proxies if p['url'] == proxy_url)
|
|
116
|
+
p['failures'] = 2
|
|
117
|
+
|
|
118
|
+
middleware._mark_failure(proxy_url)
|
|
119
|
+
assert p['failures'] == 3
|
|
120
|
+
assert p['healthy'] is False
|
|
121
|
+
assert p['unhealthy_since'] > 0
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
@pytest.mark.asyncio
|
|
125
|
+
async def test_request_delay(middleware, spider):
|
|
126
|
+
"""测试请求延迟功能:验证是否调用了 asyncio.sleep"""
|
|
127
|
+
with patch("crawlo.proxy.middleware.asyncio.sleep", new_callable=AsyncMock) as mock_sleep:
|
|
128
|
+
middleware.delay_enabled = True # 注意:这里应该是 delay_enabled 而不是 request_delay_enabled
|
|
129
|
+
middleware.request_delay = 0.1
|
|
130
|
+
middleware._last_req_time = time.time() - 0.05 # 50ms 前
|
|
131
|
+
|
|
132
|
+
request = Request("https://a.com")
|
|
133
|
+
await middleware.process_request(request, spider)
|
|
134
|
+
|
|
135
|
+
mock_sleep.assert_called_once()
|
|
136
|
+
delay = mock_sleep.call_args[0][0]
|
|
137
137
|
assert 0.04 <= delay <= 0.06
|
tests/test_proxy_providers.py
CHANGED
|
@@ -1,57 +1,57 @@
|
|
|
1
|
-
# tests/test_proxy_providers.py
|
|
2
|
-
import pytest
|
|
3
|
-
import pytest
|
|
4
|
-
import respx
|
|
5
|
-
from httpx import Response
|
|
6
|
-
from crawlo.proxy.providers import StaticProxyProvider, FileProxyProvider, APIProxyProvider
|
|
7
|
-
import tempfile
|
|
8
|
-
import os
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
@pytest.mark.asyncio
|
|
12
|
-
async def test_static_provider():
|
|
13
|
-
"""测试静态代理提供者"""
|
|
14
|
-
provider = StaticProxyProvider(['http://1.1.1.1:8080', 'http://2.2.2.2:8080'])
|
|
15
|
-
proxies = await provider.fetch_proxies()
|
|
16
|
-
assert len(proxies) == 2
|
|
17
|
-
assert 'http://1.1.1.1:8080' in proxies
|
|
18
|
-
assert 'http://2.2.2.2:8080' in proxies
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
@pytest.mark.asyncio
|
|
22
|
-
async def test_file_provider():
|
|
23
|
-
"""测试文件代理提供者"""
|
|
24
|
-
with tempfile.NamedTemporaryFile(mode='w', delete=False) as f:
|
|
25
|
-
f.write("http://a.com:8080\nhttp://b.com:8080\n")
|
|
26
|
-
temp_path = f.name
|
|
27
|
-
try:
|
|
28
|
-
provider = FileProxyProvider(temp_path)
|
|
29
|
-
proxies = await provider.fetch_proxies()
|
|
30
|
-
assert len(proxies) == 2
|
|
31
|
-
assert 'http://a.com:8080' in proxies
|
|
32
|
-
assert 'http://b.com:8080' in proxies
|
|
33
|
-
finally:
|
|
34
|
-
os.unlink(temp_path)
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
@pytest.mark.asyncio
|
|
38
|
-
@respx.mock
|
|
39
|
-
async def test_api_provider():
|
|
40
|
-
"""使用 respx 拦截 HTTP 请求,更简洁可靠"""
|
|
41
|
-
# 拦截 GET 请求
|
|
42
|
-
respx.get("https://api.example.com").mock(
|
|
43
|
-
return_value=Response(
|
|
44
|
-
200,
|
|
45
|
-
json=[
|
|
46
|
-
{"ip": "1.1.1.1", "port": 8080},
|
|
47
|
-
{"ip": "2.2.2.2", "port": 8080}
|
|
48
|
-
]
|
|
49
|
-
)
|
|
50
|
-
)
|
|
51
|
-
|
|
52
|
-
provider = APIProxyProvider(url="https://api.example.com")
|
|
53
|
-
proxies = await provider.fetch_proxies()
|
|
54
|
-
|
|
55
|
-
assert len(proxies) == 2
|
|
56
|
-
assert "http://1.1.1.1:8080" in proxies
|
|
1
|
+
# tests/test_proxy_providers.py
|
|
2
|
+
import pytest
|
|
3
|
+
import pytest
|
|
4
|
+
import respx
|
|
5
|
+
from httpx import Response
|
|
6
|
+
from crawlo.proxy.providers import StaticProxyProvider, FileProxyProvider, APIProxyProvider
|
|
7
|
+
import tempfile
|
|
8
|
+
import os
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@pytest.mark.asyncio
|
|
12
|
+
async def test_static_provider():
|
|
13
|
+
"""测试静态代理提供者"""
|
|
14
|
+
provider = StaticProxyProvider(['http://1.1.1.1:8080', 'http://2.2.2.2:8080'])
|
|
15
|
+
proxies = await provider.fetch_proxies()
|
|
16
|
+
assert len(proxies) == 2
|
|
17
|
+
assert 'http://1.1.1.1:8080' in proxies
|
|
18
|
+
assert 'http://2.2.2.2:8080' in proxies
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@pytest.mark.asyncio
|
|
22
|
+
async def test_file_provider():
|
|
23
|
+
"""测试文件代理提供者"""
|
|
24
|
+
with tempfile.NamedTemporaryFile(mode='w', delete=False) as f:
|
|
25
|
+
f.write("http://a.com:8080\nhttp://b.com:8080\n")
|
|
26
|
+
temp_path = f.name
|
|
27
|
+
try:
|
|
28
|
+
provider = FileProxyProvider(temp_path)
|
|
29
|
+
proxies = await provider.fetch_proxies()
|
|
30
|
+
assert len(proxies) == 2
|
|
31
|
+
assert 'http://a.com:8080' in proxies
|
|
32
|
+
assert 'http://b.com:8080' in proxies
|
|
33
|
+
finally:
|
|
34
|
+
os.unlink(temp_path)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
@pytest.mark.asyncio
|
|
38
|
+
@respx.mock
|
|
39
|
+
async def test_api_provider():
|
|
40
|
+
"""使用 respx 拦截 HTTP 请求,更简洁可靠"""
|
|
41
|
+
# 拦截 GET 请求
|
|
42
|
+
respx.get("https://api.example.com").mock(
|
|
43
|
+
return_value=Response(
|
|
44
|
+
200,
|
|
45
|
+
json=[
|
|
46
|
+
{"ip": "1.1.1.1", "port": 8080},
|
|
47
|
+
{"ip": "2.2.2.2", "port": 8080}
|
|
48
|
+
]
|
|
49
|
+
)
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
provider = APIProxyProvider(url="https://api.example.com")
|
|
53
|
+
proxies = await provider.fetch_proxies()
|
|
54
|
+
|
|
55
|
+
assert len(proxies) == 2
|
|
56
|
+
assert "http://1.1.1.1:8080" in proxies
|
|
57
57
|
assert "http://2.2.2.2:8080" in proxies
|
tests/test_proxy_stats.py
CHANGED
|
@@ -1,20 +1,20 @@
|
|
|
1
|
-
# tests/test_proxy_stats.py
|
|
2
|
-
from crawlo.proxy.stats import ProxyStats
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
def test_proxy_stats():
|
|
6
|
-
"""测试代理统计功能"""
|
|
7
|
-
stats = ProxyStats()
|
|
8
|
-
url = 'http://proxy1:8080'
|
|
9
|
-
|
|
10
|
-
stats.record(url, 'success')
|
|
11
|
-
stats.record(url, 'success')
|
|
12
|
-
stats.record(url, 'failure')
|
|
13
|
-
|
|
14
|
-
assert stats.get(url)['success'] == 2
|
|
15
|
-
assert stats.get(url)['failure'] == 1
|
|
16
|
-
assert stats.get(url)['total'] == 3
|
|
17
|
-
|
|
18
|
-
all_data = stats.all()
|
|
19
|
-
assert url in all_data
|
|
1
|
+
# tests/test_proxy_stats.py
|
|
2
|
+
from crawlo.proxy.stats import ProxyStats
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def test_proxy_stats():
|
|
6
|
+
"""测试代理统计功能"""
|
|
7
|
+
stats = ProxyStats()
|
|
8
|
+
url = 'http://proxy1:8080'
|
|
9
|
+
|
|
10
|
+
stats.record(url, 'success')
|
|
11
|
+
stats.record(url, 'success')
|
|
12
|
+
stats.record(url, 'failure')
|
|
13
|
+
|
|
14
|
+
assert stats.get(url)['success'] == 2
|
|
15
|
+
assert stats.get(url)['failure'] == 1
|
|
16
|
+
assert stats.get(url)['total'] == 3
|
|
17
|
+
|
|
18
|
+
all_data = stats.all()
|
|
19
|
+
assert url in all_data
|
|
20
20
|
assert all_data[url]['success'] == 2
|