crawlo 1.1.0__py3-none-any.whl → 1.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crawlo might be problematic. Click here for more details.
- crawlo/__init__.py +33 -24
- crawlo/__version__.py +1 -1
- crawlo/cli.py +40 -40
- crawlo/commands/__init__.py +13 -13
- crawlo/commands/check.py +594 -155
- crawlo/commands/genspider.py +125 -110
- crawlo/commands/list.py +147 -119
- crawlo/commands/run.py +285 -170
- crawlo/commands/startproject.py +111 -101
- crawlo/commands/stats.py +188 -167
- crawlo/core/__init__.py +2 -2
- crawlo/core/engine.py +158 -158
- crawlo/core/processor.py +40 -40
- crawlo/core/scheduler.py +57 -57
- crawlo/crawler.py +494 -492
- crawlo/downloader/__init__.py +78 -78
- crawlo/downloader/aiohttp_downloader.py +199 -199
- crawlo/downloader/cffi_downloader.py +242 -277
- crawlo/downloader/httpx_downloader.py +246 -246
- crawlo/event.py +11 -11
- crawlo/exceptions.py +78 -78
- crawlo/extension/__init__.py +31 -31
- crawlo/extension/log_interval.py +49 -49
- crawlo/extension/log_stats.py +44 -44
- crawlo/extension/logging_extension.py +34 -34
- crawlo/filters/__init__.py +37 -37
- crawlo/filters/aioredis_filter.py +150 -150
- crawlo/filters/memory_filter.py +202 -202
- crawlo/items/__init__.py +23 -23
- crawlo/items/base.py +21 -21
- crawlo/items/fields.py +53 -53
- crawlo/items/items.py +104 -104
- crawlo/middleware/__init__.py +21 -21
- crawlo/middleware/default_header.py +32 -32
- crawlo/middleware/download_delay.py +28 -28
- crawlo/middleware/middleware_manager.py +135 -135
- crawlo/middleware/proxy.py +245 -245
- crawlo/middleware/request_ignore.py +30 -30
- crawlo/middleware/response_code.py +18 -18
- crawlo/middleware/response_filter.py +26 -26
- crawlo/middleware/retry.py +90 -90
- crawlo/network/__init__.py +7 -7
- crawlo/network/request.py +203 -203
- crawlo/network/response.py +166 -166
- crawlo/pipelines/__init__.py +13 -13
- crawlo/pipelines/console_pipeline.py +39 -39
- crawlo/pipelines/mongo_pipeline.py +116 -116
- crawlo/pipelines/mysql_batch_pipline.py +272 -272
- crawlo/pipelines/mysql_pipeline.py +195 -195
- crawlo/pipelines/pipeline_manager.py +56 -56
- crawlo/project.py +153 -0
- crawlo/settings/__init__.py +7 -7
- crawlo/settings/default_settings.py +166 -168
- crawlo/settings/setting_manager.py +99 -99
- crawlo/spider/__init__.py +129 -129
- crawlo/stats_collector.py +59 -59
- crawlo/subscriber.py +106 -106
- crawlo/task_manager.py +27 -27
- crawlo/templates/crawlo.cfg.tmpl +10 -10
- crawlo/templates/project/__init__.py.tmpl +3 -3
- crawlo/templates/project/items.py.tmpl +17 -17
- crawlo/templates/project/middlewares.py.tmpl +75 -75
- crawlo/templates/project/pipelines.py.tmpl +63 -63
- crawlo/templates/project/settings.py.tmpl +54 -54
- crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
- crawlo/templates/spider/spider.py.tmpl +31 -31
- crawlo/utils/__init__.py +7 -7
- crawlo/utils/date_tools.py +233 -233
- crawlo/utils/db_helper.py +343 -343
- crawlo/utils/func_tools.py +82 -82
- crawlo/utils/log.py +128 -128
- crawlo/utils/pqueue.py +173 -173
- crawlo/utils/request.py +267 -267
- crawlo/utils/spider_loader.py +62 -62
- crawlo/utils/system.py +11 -11
- crawlo/utils/tools.py +4 -4
- crawlo/utils/url.py +39 -39
- crawlo-1.1.1.dist-info/METADATA +220 -0
- crawlo-1.1.1.dist-info/RECORD +100 -0
- examples/__init__.py +7 -0
- examples/baidu_spider/__init__.py +7 -0
- examples/baidu_spider/demo.py +94 -0
- examples/baidu_spider/items.py +46 -0
- examples/baidu_spider/middleware.py +49 -0
- examples/baidu_spider/pipeline.py +55 -0
- examples/baidu_spider/run.py +27 -0
- examples/baidu_spider/settings.py +121 -0
- examples/baidu_spider/spiders/__init__.py +7 -0
- examples/baidu_spider/spiders/bai_du.py +61 -0
- examples/baidu_spider/spiders/miit.py +159 -0
- examples/baidu_spider/spiders/sina.py +79 -0
- tests/__init__.py +7 -7
- tests/test_proxy_health_check.py +32 -32
- tests/test_proxy_middleware_integration.py +136 -136
- tests/test_proxy_providers.py +56 -56
- tests/test_proxy_stats.py +19 -19
- tests/test_proxy_strategies.py +59 -59
- crawlo/utils/concurrency_manager.py +0 -125
- crawlo/utils/project.py +0 -197
- crawlo-1.1.0.dist-info/METADATA +0 -49
- crawlo-1.1.0.dist-info/RECORD +0 -97
- examples/gxb/__init__.py +0 -0
- examples/gxb/items.py +0 -36
- examples/gxb/run.py +0 -16
- examples/gxb/settings.py +0 -72
- examples/gxb/spider/__init__.py +0 -2
- examples/gxb/spider/miit_spider.py +0 -180
- examples/gxb/spider/telecom_device.py +0 -129
- {crawlo-1.1.0.dist-info → crawlo-1.1.1.dist-info}/WHEEL +0 -0
- {crawlo-1.1.0.dist-info → crawlo-1.1.1.dist-info}/entry_points.txt +0 -0
- {crawlo-1.1.0.dist-info → crawlo-1.1.1.dist-info}/top_level.txt +0 -0
|
@@ -1,137 +1,137 @@
|
|
|
1
|
-
# tests/test_proxy_middleware_integration.py
|
|
2
|
-
import pytest
|
|
3
|
-
import asyncio
|
|
4
|
-
import time
|
|
5
|
-
from unittest.mock import Mock, AsyncMock, patch
|
|
6
|
-
from crawlo import Request, Response, Spider
|
|
7
|
-
from crawlo.proxy.middleware import ProxyMiddleware
|
|
8
|
-
from crawlo.proxy.stats import ProxyStats
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
@pytest.fixture
|
|
12
|
-
def crawler():
|
|
13
|
-
class MockSettings:
|
|
14
|
-
def get(self, key, default=None):
|
|
15
|
-
defaults = {
|
|
16
|
-
'PROXY_ENABLED': True,
|
|
17
|
-
'PROXIES': ['http://p1:8080', 'http://p2:8080'],
|
|
18
|
-
'PROXY_SELECTION_STRATEGY': 'random',
|
|
19
|
-
'PROXY_REQUEST_DELAY_ENABLED': False,
|
|
20
|
-
'PROXY_MAX_RETRY_COUNT': 1,
|
|
21
|
-
}
|
|
22
|
-
return defaults.get(key, default)
|
|
23
|
-
|
|
24
|
-
def get_bool(self, key, default=None):
|
|
25
|
-
return self.get(key, default)
|
|
26
|
-
|
|
27
|
-
def get_int(self, key, default=None):
|
|
28
|
-
return self.get(key, default)
|
|
29
|
-
|
|
30
|
-
def get_float(self, key, default=None):
|
|
31
|
-
return self.get(key, default)
|
|
32
|
-
|
|
33
|
-
def get_list(self, key, default=None):
|
|
34
|
-
return self.get(key, default)
|
|
35
|
-
|
|
36
|
-
class MockCrawler:
|
|
37
|
-
def __init__(self):
|
|
38
|
-
self.settings = MockSettings()
|
|
39
|
-
|
|
40
|
-
return MockCrawler()
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
@pytest.fixture
|
|
44
|
-
def middleware(crawler):
|
|
45
|
-
mw = ProxyMiddleware.create_instance(crawler)
|
|
46
|
-
mw._load_providers = Mock()
|
|
47
|
-
mw._update_proxy_pool = AsyncMock()
|
|
48
|
-
mw._health_check = AsyncMock()
|
|
49
|
-
mw.scheduler = None
|
|
50
|
-
|
|
51
|
-
mw.proxies = [
|
|
52
|
-
{
|
|
53
|
-
'url': 'http://p1:8080',
|
|
54
|
-
'healthy': True,
|
|
55
|
-
'failures': 0,
|
|
56
|
-
'last_health_check': 0,
|
|
57
|
-
'unhealthy_since': 0
|
|
58
|
-
},
|
|
59
|
-
{
|
|
60
|
-
'url': 'http://p2:8080',
|
|
61
|
-
'healthy': True,
|
|
62
|
-
'failures': 0,
|
|
63
|
-
'last_health_check': 0,
|
|
64
|
-
'unhealthy_since': 0
|
|
65
|
-
},
|
|
66
|
-
]
|
|
67
|
-
mw.stats = ProxyStats()
|
|
68
|
-
for p in mw.proxies:
|
|
69
|
-
mw.stats.record(p['url'], 'total')
|
|
70
|
-
|
|
71
|
-
asyncio.get_event_loop().run_until_complete(mw._initial_setup())
|
|
72
|
-
return mw
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
@pytest.fixture
|
|
76
|
-
def spider():
|
|
77
|
-
return Mock(spec=Spider, logger=Mock())
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
def test_process_request_sets_proxy(middleware, spider):
|
|
81
|
-
request = Request("https://example.com")
|
|
82
|
-
result = asyncio.get_event_loop().run_until_complete(
|
|
83
|
-
middleware.process_request(request, spider)
|
|
84
|
-
)
|
|
85
|
-
assert result is None
|
|
86
|
-
assert hasattr(request, 'proxy')
|
|
87
|
-
assert request.proxy in ['http://p1:8080', 'http://p2:8080']
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
def test_process_response_records_success(middleware, spider):
|
|
91
|
-
request = Request("https://example.com")
|
|
92
|
-
request.proxy = 'http://p1:8080'
|
|
93
|
-
response = Response("https://example.com", body=b"ok", headers={})
|
|
94
|
-
middleware.stats.record(request.proxy, 'total')
|
|
95
|
-
middleware.process_response(request, response, spider)
|
|
96
|
-
assert middleware.stats.get(request.proxy)['success'] == 1
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
def test_process_exception_switches_proxy(middleware, spider):
|
|
100
|
-
request = Request("https://example.com")
|
|
101
|
-
request.proxy = 'http://p1:8080'
|
|
102
|
-
request.meta['proxy_retry_count'] = 0
|
|
103
|
-
|
|
104
|
-
result = middleware.process_exception(request, Exception("Timeout"), spider)
|
|
105
|
-
assert result is not None
|
|
106
|
-
assert result.proxy != 'http://p1:8080'
|
|
107
|
-
assert result.meta['proxy_retry_count'] == 1
|
|
108
|
-
|
|
109
|
-
final = middleware.process_exception(result, Exception("Timeout"), spider)
|
|
110
|
-
assert final is None
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
def test_mark_failure_disables_proxy(middleware):
|
|
114
|
-
proxy_url = 'http://p1:8080'
|
|
115
|
-
p = next(p for p in middleware.proxies if p['url'] == proxy_url)
|
|
116
|
-
p['failures'] = 2
|
|
117
|
-
|
|
118
|
-
middleware._mark_failure(proxy_url)
|
|
119
|
-
assert p['failures'] == 3
|
|
120
|
-
assert p['healthy'] is False
|
|
121
|
-
assert p['unhealthy_since'] > 0
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
@pytest.mark.asyncio
|
|
125
|
-
async def test_request_delay(middleware, spider):
|
|
126
|
-
"""测试请求延迟功能:验证是否调用了 asyncio.sleep"""
|
|
127
|
-
with patch("crawlo.proxy.middleware.asyncio.sleep", new_callable=AsyncMock) as mock_sleep:
|
|
128
|
-
middleware.delay_enabled = True # 注意:这里应该是 delay_enabled 而不是 request_delay_enabled
|
|
129
|
-
middleware.request_delay = 0.1
|
|
130
|
-
middleware._last_req_time = time.time() - 0.05 # 50ms 前
|
|
131
|
-
|
|
132
|
-
request = Request("https://a.com")
|
|
133
|
-
await middleware.process_request(request, spider)
|
|
134
|
-
|
|
135
|
-
mock_sleep.assert_called_once()
|
|
136
|
-
delay = mock_sleep.call_args[0][0]
|
|
1
|
+
# tests/test_proxy_middleware_integration.py
|
|
2
|
+
import pytest
|
|
3
|
+
import asyncio
|
|
4
|
+
import time
|
|
5
|
+
from unittest.mock import Mock, AsyncMock, patch
|
|
6
|
+
from crawlo import Request, Response, Spider
|
|
7
|
+
from crawlo.proxy.middleware import ProxyMiddleware
|
|
8
|
+
from crawlo.proxy.stats import ProxyStats
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@pytest.fixture
|
|
12
|
+
def crawler():
|
|
13
|
+
class MockSettings:
|
|
14
|
+
def get(self, key, default=None):
|
|
15
|
+
defaults = {
|
|
16
|
+
'PROXY_ENABLED': True,
|
|
17
|
+
'PROXIES': ['http://p1:8080', 'http://p2:8080'],
|
|
18
|
+
'PROXY_SELECTION_STRATEGY': 'random',
|
|
19
|
+
'PROXY_REQUEST_DELAY_ENABLED': False,
|
|
20
|
+
'PROXY_MAX_RETRY_COUNT': 1,
|
|
21
|
+
}
|
|
22
|
+
return defaults.get(key, default)
|
|
23
|
+
|
|
24
|
+
def get_bool(self, key, default=None):
|
|
25
|
+
return self.get(key, default)
|
|
26
|
+
|
|
27
|
+
def get_int(self, key, default=None):
|
|
28
|
+
return self.get(key, default)
|
|
29
|
+
|
|
30
|
+
def get_float(self, key, default=None):
|
|
31
|
+
return self.get(key, default)
|
|
32
|
+
|
|
33
|
+
def get_list(self, key, default=None):
|
|
34
|
+
return self.get(key, default)
|
|
35
|
+
|
|
36
|
+
class MockCrawler:
|
|
37
|
+
def __init__(self):
|
|
38
|
+
self.settings = MockSettings()
|
|
39
|
+
|
|
40
|
+
return MockCrawler()
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
@pytest.fixture
|
|
44
|
+
def middleware(crawler):
|
|
45
|
+
mw = ProxyMiddleware.create_instance(crawler)
|
|
46
|
+
mw._load_providers = Mock()
|
|
47
|
+
mw._update_proxy_pool = AsyncMock()
|
|
48
|
+
mw._health_check = AsyncMock()
|
|
49
|
+
mw.scheduler = None
|
|
50
|
+
|
|
51
|
+
mw.proxies = [
|
|
52
|
+
{
|
|
53
|
+
'url': 'http://p1:8080',
|
|
54
|
+
'healthy': True,
|
|
55
|
+
'failures': 0,
|
|
56
|
+
'last_health_check': 0,
|
|
57
|
+
'unhealthy_since': 0
|
|
58
|
+
},
|
|
59
|
+
{
|
|
60
|
+
'url': 'http://p2:8080',
|
|
61
|
+
'healthy': True,
|
|
62
|
+
'failures': 0,
|
|
63
|
+
'last_health_check': 0,
|
|
64
|
+
'unhealthy_since': 0
|
|
65
|
+
},
|
|
66
|
+
]
|
|
67
|
+
mw.stats = ProxyStats()
|
|
68
|
+
for p in mw.proxies:
|
|
69
|
+
mw.stats.record(p['url'], 'total')
|
|
70
|
+
|
|
71
|
+
asyncio.get_event_loop().run_until_complete(mw._initial_setup())
|
|
72
|
+
return mw
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
@pytest.fixture
|
|
76
|
+
def spider():
|
|
77
|
+
return Mock(spec=Spider, logger=Mock())
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def test_process_request_sets_proxy(middleware, spider):
|
|
81
|
+
request = Request("https://example.com")
|
|
82
|
+
result = asyncio.get_event_loop().run_until_complete(
|
|
83
|
+
middleware.process_request(request, spider)
|
|
84
|
+
)
|
|
85
|
+
assert result is None
|
|
86
|
+
assert hasattr(request, 'proxy')
|
|
87
|
+
assert request.proxy in ['http://p1:8080', 'http://p2:8080']
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def test_process_response_records_success(middleware, spider):
|
|
91
|
+
request = Request("https://example.com")
|
|
92
|
+
request.proxy = 'http://p1:8080'
|
|
93
|
+
response = Response("https://example.com", body=b"ok", headers={})
|
|
94
|
+
middleware.stats.record(request.proxy, 'total')
|
|
95
|
+
middleware.process_response(request, response, spider)
|
|
96
|
+
assert middleware.stats.get(request.proxy)['success'] == 1
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def test_process_exception_switches_proxy(middleware, spider):
|
|
100
|
+
request = Request("https://example.com")
|
|
101
|
+
request.proxy = 'http://p1:8080'
|
|
102
|
+
request.meta['proxy_retry_count'] = 0
|
|
103
|
+
|
|
104
|
+
result = middleware.process_exception(request, Exception("Timeout"), spider)
|
|
105
|
+
assert result is not None
|
|
106
|
+
assert result.proxy != 'http://p1:8080'
|
|
107
|
+
assert result.meta['proxy_retry_count'] == 1
|
|
108
|
+
|
|
109
|
+
final = middleware.process_exception(result, Exception("Timeout"), spider)
|
|
110
|
+
assert final is None
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
def test_mark_failure_disables_proxy(middleware):
|
|
114
|
+
proxy_url = 'http://p1:8080'
|
|
115
|
+
p = next(p for p in middleware.proxies if p['url'] == proxy_url)
|
|
116
|
+
p['failures'] = 2
|
|
117
|
+
|
|
118
|
+
middleware._mark_failure(proxy_url)
|
|
119
|
+
assert p['failures'] == 3
|
|
120
|
+
assert p['healthy'] is False
|
|
121
|
+
assert p['unhealthy_since'] > 0
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
@pytest.mark.asyncio
|
|
125
|
+
async def test_request_delay(middleware, spider):
|
|
126
|
+
"""测试请求延迟功能:验证是否调用了 asyncio.sleep"""
|
|
127
|
+
with patch("crawlo.proxy.middleware.asyncio.sleep", new_callable=AsyncMock) as mock_sleep:
|
|
128
|
+
middleware.delay_enabled = True # 注意:这里应该是 delay_enabled 而不是 request_delay_enabled
|
|
129
|
+
middleware.request_delay = 0.1
|
|
130
|
+
middleware._last_req_time = time.time() - 0.05 # 50ms 前
|
|
131
|
+
|
|
132
|
+
request = Request("https://a.com")
|
|
133
|
+
await middleware.process_request(request, spider)
|
|
134
|
+
|
|
135
|
+
mock_sleep.assert_called_once()
|
|
136
|
+
delay = mock_sleep.call_args[0][0]
|
|
137
137
|
assert 0.04 <= delay <= 0.06
|
tests/test_proxy_providers.py
CHANGED
|
@@ -1,57 +1,57 @@
|
|
|
1
|
-
# tests/test_proxy_providers.py
|
|
2
|
-
import pytest
|
|
3
|
-
import pytest
|
|
4
|
-
import respx
|
|
5
|
-
from httpx import Response
|
|
6
|
-
from crawlo.proxy.providers import StaticProxyProvider, FileProxyProvider, APIProxyProvider
|
|
7
|
-
import tempfile
|
|
8
|
-
import os
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
@pytest.mark.asyncio
|
|
12
|
-
async def test_static_provider():
|
|
13
|
-
"""测试静态代理提供者"""
|
|
14
|
-
provider = StaticProxyProvider(['http://1.1.1.1:8080', 'http://2.2.2.2:8080'])
|
|
15
|
-
proxies = await provider.fetch_proxies()
|
|
16
|
-
assert len(proxies) == 2
|
|
17
|
-
assert 'http://1.1.1.1:8080' in proxies
|
|
18
|
-
assert 'http://2.2.2.2:8080' in proxies
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
@pytest.mark.asyncio
|
|
22
|
-
async def test_file_provider():
|
|
23
|
-
"""测试文件代理提供者"""
|
|
24
|
-
with tempfile.NamedTemporaryFile(mode='w', delete=False) as f:
|
|
25
|
-
f.write("http://a.com:8080\nhttp://b.com:8080\n")
|
|
26
|
-
temp_path = f.name
|
|
27
|
-
try:
|
|
28
|
-
provider = FileProxyProvider(temp_path)
|
|
29
|
-
proxies = await provider.fetch_proxies()
|
|
30
|
-
assert len(proxies) == 2
|
|
31
|
-
assert 'http://a.com:8080' in proxies
|
|
32
|
-
assert 'http://b.com:8080' in proxies
|
|
33
|
-
finally:
|
|
34
|
-
os.unlink(temp_path)
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
@pytest.mark.asyncio
|
|
38
|
-
@respx.mock
|
|
39
|
-
async def test_api_provider():
|
|
40
|
-
"""使用 respx 拦截 HTTP 请求,更简洁可靠"""
|
|
41
|
-
# 拦截 GET 请求
|
|
42
|
-
respx.get("https://api.example.com").mock(
|
|
43
|
-
return_value=Response(
|
|
44
|
-
200,
|
|
45
|
-
json=[
|
|
46
|
-
{"ip": "1.1.1.1", "port": 8080},
|
|
47
|
-
{"ip": "2.2.2.2", "port": 8080}
|
|
48
|
-
]
|
|
49
|
-
)
|
|
50
|
-
)
|
|
51
|
-
|
|
52
|
-
provider = APIProxyProvider(url="https://api.example.com")
|
|
53
|
-
proxies = await provider.fetch_proxies()
|
|
54
|
-
|
|
55
|
-
assert len(proxies) == 2
|
|
56
|
-
assert "http://1.1.1.1:8080" in proxies
|
|
1
|
+
# tests/test_proxy_providers.py
|
|
2
|
+
import pytest
|
|
3
|
+
import pytest
|
|
4
|
+
import respx
|
|
5
|
+
from httpx import Response
|
|
6
|
+
from crawlo.proxy.providers import StaticProxyProvider, FileProxyProvider, APIProxyProvider
|
|
7
|
+
import tempfile
|
|
8
|
+
import os
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@pytest.mark.asyncio
|
|
12
|
+
async def test_static_provider():
|
|
13
|
+
"""测试静态代理提供者"""
|
|
14
|
+
provider = StaticProxyProvider(['http://1.1.1.1:8080', 'http://2.2.2.2:8080'])
|
|
15
|
+
proxies = await provider.fetch_proxies()
|
|
16
|
+
assert len(proxies) == 2
|
|
17
|
+
assert 'http://1.1.1.1:8080' in proxies
|
|
18
|
+
assert 'http://2.2.2.2:8080' in proxies
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@pytest.mark.asyncio
|
|
22
|
+
async def test_file_provider():
|
|
23
|
+
"""测试文件代理提供者"""
|
|
24
|
+
with tempfile.NamedTemporaryFile(mode='w', delete=False) as f:
|
|
25
|
+
f.write("http://a.com:8080\nhttp://b.com:8080\n")
|
|
26
|
+
temp_path = f.name
|
|
27
|
+
try:
|
|
28
|
+
provider = FileProxyProvider(temp_path)
|
|
29
|
+
proxies = await provider.fetch_proxies()
|
|
30
|
+
assert len(proxies) == 2
|
|
31
|
+
assert 'http://a.com:8080' in proxies
|
|
32
|
+
assert 'http://b.com:8080' in proxies
|
|
33
|
+
finally:
|
|
34
|
+
os.unlink(temp_path)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
@pytest.mark.asyncio
|
|
38
|
+
@respx.mock
|
|
39
|
+
async def test_api_provider():
|
|
40
|
+
"""使用 respx 拦截 HTTP 请求,更简洁可靠"""
|
|
41
|
+
# 拦截 GET 请求
|
|
42
|
+
respx.get("https://api.example.com").mock(
|
|
43
|
+
return_value=Response(
|
|
44
|
+
200,
|
|
45
|
+
json=[
|
|
46
|
+
{"ip": "1.1.1.1", "port": 8080},
|
|
47
|
+
{"ip": "2.2.2.2", "port": 8080}
|
|
48
|
+
]
|
|
49
|
+
)
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
provider = APIProxyProvider(url="https://api.example.com")
|
|
53
|
+
proxies = await provider.fetch_proxies()
|
|
54
|
+
|
|
55
|
+
assert len(proxies) == 2
|
|
56
|
+
assert "http://1.1.1.1:8080" in proxies
|
|
57
57
|
assert "http://2.2.2.2:8080" in proxies
|
tests/test_proxy_stats.py
CHANGED
|
@@ -1,20 +1,20 @@
|
|
|
1
|
-
# tests/test_proxy_stats.py
|
|
2
|
-
from crawlo.proxy.stats import ProxyStats
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
def test_proxy_stats():
|
|
6
|
-
"""测试代理统计功能"""
|
|
7
|
-
stats = ProxyStats()
|
|
8
|
-
url = 'http://proxy1:8080'
|
|
9
|
-
|
|
10
|
-
stats.record(url, 'success')
|
|
11
|
-
stats.record(url, 'success')
|
|
12
|
-
stats.record(url, 'failure')
|
|
13
|
-
|
|
14
|
-
assert stats.get(url)['success'] == 2
|
|
15
|
-
assert stats.get(url)['failure'] == 1
|
|
16
|
-
assert stats.get(url)['total'] == 3
|
|
17
|
-
|
|
18
|
-
all_data = stats.all()
|
|
19
|
-
assert url in all_data
|
|
1
|
+
# tests/test_proxy_stats.py
|
|
2
|
+
from crawlo.proxy.stats import ProxyStats
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def test_proxy_stats():
|
|
6
|
+
"""测试代理统计功能"""
|
|
7
|
+
stats = ProxyStats()
|
|
8
|
+
url = 'http://proxy1:8080'
|
|
9
|
+
|
|
10
|
+
stats.record(url, 'success')
|
|
11
|
+
stats.record(url, 'success')
|
|
12
|
+
stats.record(url, 'failure')
|
|
13
|
+
|
|
14
|
+
assert stats.get(url)['success'] == 2
|
|
15
|
+
assert stats.get(url)['failure'] == 1
|
|
16
|
+
assert stats.get(url)['total'] == 3
|
|
17
|
+
|
|
18
|
+
all_data = stats.all()
|
|
19
|
+
assert url in all_data
|
|
20
20
|
assert all_data[url]['success'] == 2
|
tests/test_proxy_strategies.py
CHANGED
|
@@ -1,60 +1,60 @@
|
|
|
1
|
-
# tests/test_proxy_strategies.py
|
|
2
|
-
import pytest
|
|
3
|
-
from crawlo import Request
|
|
4
|
-
from crawlo.proxy.strategies import STRATEGIES
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
@pytest.fixture
|
|
8
|
-
def mock_proxies():
|
|
9
|
-
"""提供测试用的代理列表"""
|
|
10
|
-
return [
|
|
11
|
-
{'url': 'http://p1:8080'},
|
|
12
|
-
{'url': 'http://p2:8080'},
|
|
13
|
-
{'url': 'http://p3:8080'},
|
|
14
|
-
]
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
@pytest.fixture
|
|
18
|
-
def mock_stats():
|
|
19
|
-
"""提供测试用的统计信息"""
|
|
20
|
-
return {
|
|
21
|
-
'http://p1:8080': {'total': 10},
|
|
22
|
-
'http://p2:8080': {'total': 5},
|
|
23
|
-
'http://p3:8080': {'total': 1},
|
|
24
|
-
}
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
@pytest.fixture
|
|
28
|
-
def mock_request():
|
|
29
|
-
"""提供测试用的请求对象"""
|
|
30
|
-
return Request("https://example.com")
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
def test_random_strategy(mock_proxies, mock_request, mock_stats):
|
|
34
|
-
"""测试随机策略"""
|
|
35
|
-
strategy = STRATEGIES['random']
|
|
36
|
-
chosen = strategy(mock_proxies, mock_request, mock_stats)
|
|
37
|
-
assert chosen in [p['url'] for p in mock_proxies]
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
def test_least_used_strategy(mock_proxies, mock_request, mock_stats):
|
|
41
|
-
"""测试最少使用策略"""
|
|
42
|
-
strategy = STRATEGIES['least_used']
|
|
43
|
-
chosen = strategy(mock_proxies, mock_request, mock_stats)
|
|
44
|
-
assert chosen == 'http://p3:8080' # total=1
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
def test_domain_rule_strategy(mock_proxies, mock_request, mock_stats):
|
|
48
|
-
"""测试域名规则策略"""
|
|
49
|
-
from crawlo.proxy.strategies.domain_rule import domain_rule_strategy
|
|
50
|
-
request = Request("https://taobao.com/item/123")
|
|
51
|
-
rules = {'taobao.com': 'http://special:8080'}
|
|
52
|
-
|
|
53
|
-
# Monkey patch 确保有回退策略
|
|
54
|
-
old_strategy = STRATEGIES['least_used']
|
|
55
|
-
try:
|
|
56
|
-
STRATEGIES['least_used'] = lambda p, r, s: 'http://fallback:8080'
|
|
57
|
-
chosen = domain_rule_strategy(mock_proxies, request, mock_stats, rules)
|
|
58
|
-
assert chosen == 'http://special:8080'
|
|
59
|
-
finally:
|
|
1
|
+
# tests/test_proxy_strategies.py
|
|
2
|
+
import pytest
|
|
3
|
+
from crawlo import Request
|
|
4
|
+
from crawlo.proxy.strategies import STRATEGIES
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
@pytest.fixture
|
|
8
|
+
def mock_proxies():
|
|
9
|
+
"""提供测试用的代理列表"""
|
|
10
|
+
return [
|
|
11
|
+
{'url': 'http://p1:8080'},
|
|
12
|
+
{'url': 'http://p2:8080'},
|
|
13
|
+
{'url': 'http://p3:8080'},
|
|
14
|
+
]
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@pytest.fixture
|
|
18
|
+
def mock_stats():
|
|
19
|
+
"""提供测试用的统计信息"""
|
|
20
|
+
return {
|
|
21
|
+
'http://p1:8080': {'total': 10},
|
|
22
|
+
'http://p2:8080': {'total': 5},
|
|
23
|
+
'http://p3:8080': {'total': 1},
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@pytest.fixture
|
|
28
|
+
def mock_request():
|
|
29
|
+
"""提供测试用的请求对象"""
|
|
30
|
+
return Request("https://example.com")
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def test_random_strategy(mock_proxies, mock_request, mock_stats):
|
|
34
|
+
"""测试随机策略"""
|
|
35
|
+
strategy = STRATEGIES['random']
|
|
36
|
+
chosen = strategy(mock_proxies, mock_request, mock_stats)
|
|
37
|
+
assert chosen in [p['url'] for p in mock_proxies]
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def test_least_used_strategy(mock_proxies, mock_request, mock_stats):
|
|
41
|
+
"""测试最少使用策略"""
|
|
42
|
+
strategy = STRATEGIES['least_used']
|
|
43
|
+
chosen = strategy(mock_proxies, mock_request, mock_stats)
|
|
44
|
+
assert chosen == 'http://p3:8080' # total=1
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def test_domain_rule_strategy(mock_proxies, mock_request, mock_stats):
|
|
48
|
+
"""测试域名规则策略"""
|
|
49
|
+
from crawlo.proxy.strategies.domain_rule import domain_rule_strategy
|
|
50
|
+
request = Request("https://taobao.com/item/123")
|
|
51
|
+
rules = {'taobao.com': 'http://special:8080'}
|
|
52
|
+
|
|
53
|
+
# Monkey patch 确保有回退策略
|
|
54
|
+
old_strategy = STRATEGIES['least_used']
|
|
55
|
+
try:
|
|
56
|
+
STRATEGIES['least_used'] = lambda p, r, s: 'http://fallback:8080'
|
|
57
|
+
chosen = domain_rule_strategy(mock_proxies, request, mock_stats, rules)
|
|
58
|
+
assert chosen == 'http://special:8080'
|
|
59
|
+
finally:
|
|
60
60
|
STRATEGIES['least_used'] = old_strategy
|
|
@@ -1,125 +0,0 @@
|
|
|
1
|
-
import os
|
|
2
|
-
import platform
|
|
3
|
-
import logging
|
|
4
|
-
from typing import Optional
|
|
5
|
-
|
|
6
|
-
try:
|
|
7
|
-
import psutil # 用于获取系统资源信息的第三方库
|
|
8
|
-
except ImportError:
|
|
9
|
-
psutil = None # 如果psutil不可用则设为None
|
|
10
|
-
|
|
11
|
-
logger = logging.getLogger(__name__)
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
def calculate_optimal_concurrency(user_specified: Optional[int] = None, use_logical_cores: bool = True) -> int:
|
|
15
|
-
"""
|
|
16
|
-
基于系统资源计算最优并发数,或使用用户指定值
|
|
17
|
-
|
|
18
|
-
参数:
|
|
19
|
-
user_specified: 用户指定的并发数(优先使用)
|
|
20
|
-
use_logical_cores: 是否使用逻辑CPU核心数(超线程),默认为True
|
|
21
|
-
|
|
22
|
-
返回:
|
|
23
|
-
计算得出的最优并发数
|
|
24
|
-
|
|
25
|
-
说明:
|
|
26
|
-
1. 优先使用用户指定的并发数
|
|
27
|
-
2. 根据操作系统类型采用不同的计算策略:
|
|
28
|
-
- Windows: 保守计算,避免内存压力
|
|
29
|
-
- macOS: 平衡资源使用
|
|
30
|
-
- Linux: 充分利用服务器资源
|
|
31
|
-
- 其他系统: 使用合理默认值
|
|
32
|
-
3. 使用可用内存和CPU核心数进行计算
|
|
33
|
-
4. 提供psutil不可用时的备用方案
|
|
34
|
-
"""
|
|
35
|
-
# 优先使用用户指定的并发数
|
|
36
|
-
if user_specified is not None:
|
|
37
|
-
logger.info(f"使用用户指定的并发数: {user_specified}")
|
|
38
|
-
return user_specified
|
|
39
|
-
|
|
40
|
-
try:
|
|
41
|
-
current_os = platform.system() # 获取当前操作系统类型
|
|
42
|
-
logger.debug(f"检测到操作系统: {current_os}")
|
|
43
|
-
|
|
44
|
-
# 获取CPU核心数(根据参数决定是否使用逻辑核心)
|
|
45
|
-
cpu_count = psutil.cpu_count(logical=use_logical_cores) or 1 if psutil else os.cpu_count() or 1
|
|
46
|
-
|
|
47
|
-
# 根据操作系统类型选择不同的计算方法
|
|
48
|
-
if current_os == "Windows":
|
|
49
|
-
concurrency = _get_concurrency_for_windows(cpu_count, use_logical_cores)
|
|
50
|
-
elif current_os == "Darwin": # macOS系统
|
|
51
|
-
concurrency = _get_concurrency_for_macos(cpu_count, use_logical_cores)
|
|
52
|
-
elif current_os == "Linux":
|
|
53
|
-
concurrency = _get_concurrency_for_linux(cpu_count, use_logical_cores)
|
|
54
|
-
else: # 其他操作系统
|
|
55
|
-
concurrency = _get_concurrency_default(cpu_count)
|
|
56
|
-
|
|
57
|
-
logger.info(f"计算得到最大并发数: {concurrency}")
|
|
58
|
-
return concurrency
|
|
59
|
-
|
|
60
|
-
except Exception as e:
|
|
61
|
-
logger.warning(f"动态计算并发数失败: {str(e)},使用默认值50")
|
|
62
|
-
return 50 # 计算失败时的安全默认值
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
def _get_concurrency_for_windows(cpu_count: int, use_logical_cores: bool) -> int:
|
|
66
|
-
"""Windows系统专用的并发数计算逻辑"""
|
|
67
|
-
if psutil:
|
|
68
|
-
# 计算可用内存(GB)
|
|
69
|
-
available_memory = psutil.virtual_memory().available / (1024 ** 3)
|
|
70
|
-
# 内存计算:每4GB可用内存分配10个并发
|
|
71
|
-
mem_based = int((available_memory / 4) * 10)
|
|
72
|
-
# CPU计算:使用逻辑核心时乘数较大
|
|
73
|
-
cpu_based = cpu_count * (5 if use_logical_cores else 3)
|
|
74
|
-
# 取5-100之间的值,选择内存和CPU限制中较小的
|
|
75
|
-
return max(5, min(100, mem_based, cpu_based))
|
|
76
|
-
else:
|
|
77
|
-
# 无psutil时的备用方案
|
|
78
|
-
return min(50, cpu_count * 5)
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
def _get_concurrency_for_macos(cpu_count: int, use_logical_cores: bool) -> int:
|
|
82
|
-
"""macOS系统专用的并发数计算逻辑"""
|
|
83
|
-
if psutil:
|
|
84
|
-
available_memory = psutil.virtual_memory().available / (1024 ** 3)
|
|
85
|
-
# 内存计算:每3GB可用内存分配10个并发
|
|
86
|
-
mem_based = int((available_memory / 3) * 10)
|
|
87
|
-
# CPU计算:使用逻辑核心时乘数较大
|
|
88
|
-
cpu_based = cpu_count * (6 if use_logical_cores else 4)
|
|
89
|
-
# 取5-120之间的值
|
|
90
|
-
return max(5, min(120, mem_based, cpu_based))
|
|
91
|
-
else:
|
|
92
|
-
try:
|
|
93
|
-
# macOS备用方案:使用系统命令获取物理CPU核心数
|
|
94
|
-
import subprocess
|
|
95
|
-
output = subprocess.check_output(["sysctl", "hw.physicalcpu"])
|
|
96
|
-
cpu_count = int(output.split()[1])
|
|
97
|
-
return min(60, cpu_count * 5)
|
|
98
|
-
except:
|
|
99
|
-
return 40 # Mac电脑的合理默认值
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
def _get_concurrency_for_linux(cpu_count: int, use_logical_cores: bool) -> int:
|
|
103
|
-
"""Linux系统专用的并发数计算逻辑(更激进)"""
|
|
104
|
-
if psutil:
|
|
105
|
-
available_memory = psutil.virtual_memory().available / (1024 ** 3)
|
|
106
|
-
# 内存计算:每1.5GB可用内存分配10个并发
|
|
107
|
-
mem_based = int((available_memory / 1.5) * 10)
|
|
108
|
-
# CPU计算:服务器环境使用更大的乘数
|
|
109
|
-
cpu_based = cpu_count * (8 if use_logical_cores else 5)
|
|
110
|
-
# 取5-200之间的值
|
|
111
|
-
return max(5, min(200, mem_based, cpu_based))
|
|
112
|
-
else:
|
|
113
|
-
try:
|
|
114
|
-
# Linux备用方案:解析/proc/cpuinfo文件
|
|
115
|
-
with open("/proc/cpuinfo") as f:
|
|
116
|
-
cpu_count = f.read().count("processor\t:")
|
|
117
|
-
if cpu_count > 0:
|
|
118
|
-
return min(200, cpu_count * 8)
|
|
119
|
-
except:
|
|
120
|
-
return 50 # Linux服务器的合理默认值
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
def _get_concurrency_default(cpu_count: int) -> int:
|
|
124
|
-
"""未知操作系统的默认计算逻辑"""
|
|
125
|
-
return min(50, cpu_count * 5) # 保守的默认计算方式
|