crawlo 1.4.4__py3-none-any.whl → 1.4.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crawlo might be problematic. Click here for more details.
- crawlo/__init__.py +11 -15
- crawlo/__version__.py +1 -1
- crawlo/commands/startproject.py +24 -0
- crawlo/core/engine.py +2 -2
- crawlo/core/scheduler.py +4 -4
- crawlo/crawler.py +8 -7
- crawlo/downloader/__init__.py +5 -2
- crawlo/downloader/cffi_downloader.py +3 -1
- crawlo/extension/__init__.py +2 -2
- crawlo/filters/aioredis_filter.py +8 -1
- crawlo/filters/memory_filter.py +8 -1
- crawlo/initialization/built_in.py +13 -4
- crawlo/initialization/core.py +5 -4
- crawlo/interfaces.py +24 -0
- crawlo/middleware/__init__.py +7 -4
- crawlo/middleware/middleware_manager.py +15 -8
- crawlo/middleware/proxy.py +171 -348
- crawlo/mode_manager.py +45 -11
- crawlo/network/response.py +374 -69
- crawlo/pipelines/mysql_pipeline.py +340 -189
- crawlo/pipelines/pipeline_manager.py +2 -2
- crawlo/project.py +2 -4
- crawlo/settings/default_settings.py +42 -30
- crawlo/stats_collector.py +10 -1
- crawlo/task_manager.py +2 -2
- crawlo/templates/project/items.py.tmpl +2 -2
- crawlo/templates/project/middlewares.py.tmpl +9 -89
- crawlo/templates/project/pipelines.py.tmpl +8 -68
- crawlo/templates/project/settings.py.tmpl +10 -55
- crawlo/templates/project/settings_distributed.py.tmpl +20 -22
- crawlo/templates/project/settings_gentle.py.tmpl +5 -0
- crawlo/templates/project/settings_high_performance.py.tmpl +5 -0
- crawlo/templates/project/settings_minimal.py.tmpl +25 -1
- crawlo/templates/project/settings_simple.py.tmpl +5 -0
- crawlo/templates/run.py.tmpl +1 -8
- crawlo/templates/spider/spider.py.tmpl +5 -108
- crawlo/tools/__init__.py +0 -11
- crawlo/utils/__init__.py +17 -1
- crawlo/utils/db_helper.py +226 -319
- crawlo/utils/error_handler.py +313 -67
- crawlo/utils/fingerprint.py +3 -4
- crawlo/utils/misc.py +82 -0
- crawlo/utils/request.py +55 -66
- crawlo/utils/selector_helper.py +138 -0
- crawlo/utils/spider_loader.py +185 -45
- crawlo/utils/text_helper.py +95 -0
- crawlo-1.4.6.dist-info/METADATA +329 -0
- {crawlo-1.4.4.dist-info → crawlo-1.4.6.dist-info}/RECORD +110 -69
- tests/authenticated_proxy_example.py +10 -6
- tests/bug_check_test.py +251 -0
- tests/direct_selector_helper_test.py +97 -0
- tests/explain_mysql_update_behavior.py +77 -0
- tests/ofweek_scrapy/ofweek_scrapy/items.py +12 -0
- tests/ofweek_scrapy/ofweek_scrapy/middlewares.py +100 -0
- tests/ofweek_scrapy/ofweek_scrapy/pipelines.py +13 -0
- tests/ofweek_scrapy/ofweek_scrapy/settings.py +85 -0
- tests/ofweek_scrapy/ofweek_scrapy/spiders/__init__.py +4 -0
- tests/ofweek_scrapy/ofweek_scrapy/spiders/ofweek_spider.py +162 -0
- tests/ofweek_scrapy/scrapy.cfg +11 -0
- tests/performance_comparison.py +4 -5
- tests/simple_crawlo_test.py +1 -2
- tests/simple_follow_test.py +39 -0
- tests/simple_response_selector_test.py +95 -0
- tests/simple_selector_helper_test.py +155 -0
- tests/simple_selector_test.py +208 -0
- tests/simple_url_test.py +74 -0
- tests/simulate_mysql_update_test.py +140 -0
- tests/test_asyncmy_usage.py +57 -0
- tests/test_crawler_process_import.py +39 -0
- tests/test_crawler_process_spider_modules.py +48 -0
- tests/test_crawlo_proxy_integration.py +8 -2
- tests/test_downloader_proxy_compatibility.py +24 -20
- tests/test_edge_cases.py +7 -5
- tests/test_encoding_core.py +57 -0
- tests/test_encoding_detection.py +127 -0
- tests/test_factory_compatibility.py +197 -0
- tests/test_mysql_pipeline_config.py +165 -0
- tests/test_mysql_pipeline_error.py +99 -0
- tests/test_mysql_pipeline_init_log.py +83 -0
- tests/test_mysql_pipeline_integration.py +133 -0
- tests/test_mysql_pipeline_refactor.py +144 -0
- tests/test_mysql_pipeline_refactor_simple.py +86 -0
- tests/test_mysql_pipeline_robustness.py +196 -0
- tests/test_mysql_pipeline_types.py +89 -0
- tests/test_mysql_update_columns.py +94 -0
- tests/test_optimized_selector_naming.py +101 -0
- tests/test_priority_behavior.py +18 -18
- tests/test_proxy_middleware.py +104 -8
- tests/test_proxy_middleware_enhanced.py +1 -5
- tests/test_proxy_middleware_integration.py +7 -2
- tests/test_proxy_middleware_refactored.py +25 -2
- tests/test_proxy_only.py +84 -0
- tests/test_proxy_with_downloader.py +153 -0
- tests/test_real_scenario_proxy.py +17 -17
- tests/test_response_follow.py +105 -0
- tests/test_response_selector_methods.py +93 -0
- tests/test_response_url_methods.py +71 -0
- tests/test_response_urljoin.py +87 -0
- tests/test_scrapy_style_encoding.py +113 -0
- tests/test_selector_helper.py +101 -0
- tests/test_selector_optimizations.py +147 -0
- tests/test_spider_loader.py +50 -0
- tests/test_spider_loader_comprehensive.py +70 -0
- tests/test_spiders/__init__.py +1 -0
- tests/test_spiders/test_spider.py +10 -0
- tests/verify_mysql_warnings.py +110 -0
- crawlo/middleware/simple_proxy.py +0 -65
- crawlo/tools/anti_crawler.py +0 -269
- crawlo/utils/class_loader.py +0 -26
- crawlo/utils/enhanced_error_handler.py +0 -357
- crawlo-1.4.4.dist-info/METADATA +0 -190
- tests/simple_log_test.py +0 -58
- tests/simple_test.py +0 -48
- tests/test_framework_logger.py +0 -67
- tests/test_framework_startup.py +0 -65
- tests/test_mode_change.py +0 -73
- {crawlo-1.4.4.dist-info → crawlo-1.4.6.dist-info}/WHEEL +0 -0
- {crawlo-1.4.4.dist-info → crawlo-1.4.6.dist-info}/entry_points.txt +0 -0
- {crawlo-1.4.4.dist-info → crawlo-1.4.6.dist-info}/top_level.txt +0 -0
- /tests/{final_command_test_report.md → ofweek_scrapy/ofweek_scrapy/__init__.py} +0 -0
crawlo/middleware/proxy.py
CHANGED
|
@@ -1,386 +1,209 @@
|
|
|
1
1
|
#!/usr/bin/python
|
|
2
2
|
# -*- coding: UTF-8 -*-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
3
|
+
"""
|
|
4
|
+
通用代理中间件
|
|
5
|
+
支持静态代理列表和动态代理API两种模式
|
|
6
|
+
"""
|
|
7
|
+
import random
|
|
6
8
|
from urllib.parse import urlparse
|
|
7
|
-
from typing import Optional,
|
|
9
|
+
from typing import Optional, List
|
|
8
10
|
|
|
9
|
-
from crawlo import Request, Response
|
|
10
|
-
from crawlo.exceptions import NotConfiguredError
|
|
11
|
+
from crawlo.network import Request, Response
|
|
11
12
|
from crawlo.utils.log import get_logger
|
|
12
13
|
|
|
13
|
-
if TYPE_CHECKING:
|
|
14
|
-
import aiohttp
|
|
15
|
-
|
|
16
|
-
try:
|
|
17
|
-
import httpx
|
|
18
|
-
|
|
19
|
-
HTTPX_EXCEPTIONS = (httpx.NetworkError, httpx.TimeoutException, httpx.ReadError, httpx.ConnectError)
|
|
20
|
-
except ImportError:
|
|
21
|
-
HTTPX_EXCEPTIONS = ()
|
|
22
|
-
httpx = None
|
|
23
|
-
|
|
24
|
-
try:
|
|
25
|
-
import aiohttp
|
|
26
|
-
|
|
27
|
-
AIOHTTP_EXCEPTIONS = (
|
|
28
|
-
aiohttp.ClientError, aiohttp.ClientConnectorError, aiohttp.ClientResponseError, aiohttp.ServerTimeoutError,
|
|
29
|
-
aiohttp.ServerDisconnectedError)
|
|
30
|
-
except ImportError:
|
|
31
|
-
AIOHTTP_EXCEPTIONS = ()
|
|
32
|
-
aiohttp = None
|
|
33
|
-
|
|
34
|
-
try:
|
|
35
|
-
from curl_cffi import requests as cffi_requests
|
|
36
|
-
|
|
37
|
-
CURL_CFFI_EXCEPTIONS = (cffi_requests.RequestsError,)
|
|
38
|
-
except (ImportError, AttributeError):
|
|
39
|
-
CURL_CFFI_EXCEPTIONS = ()
|
|
40
|
-
cffi_requests = None
|
|
41
|
-
|
|
42
|
-
NETWORK_EXCEPTIONS = (
|
|
43
|
-
asyncio.TimeoutError,
|
|
44
|
-
socket.gaierror,
|
|
45
|
-
ConnectionError,
|
|
46
|
-
TimeoutError,
|
|
47
|
-
) + HTTPX_EXCEPTIONS + AIOHTTP_EXCEPTIONS + CURL_CFFI_EXCEPTIONS
|
|
48
|
-
|
|
49
|
-
ProxyExtractor = Callable[[Dict[str, Any]], Union[None, str, Dict[str, str]]]
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
class Proxy:
|
|
53
|
-
"""代理对象,包含代理信息和统计数据"""
|
|
54
|
-
|
|
55
|
-
def __init__(self, proxy_str: str):
|
|
56
|
-
self.proxy_str = proxy_str
|
|
57
|
-
self.success_count = 0
|
|
58
|
-
self.failure_count = 0
|
|
59
|
-
self.last_used_time = 0.0
|
|
60
|
-
self.is_healthy = True
|
|
61
|
-
|
|
62
|
-
@property
|
|
63
|
-
def success_rate(self) -> float:
|
|
64
|
-
"""计算代理成功率"""
|
|
65
|
-
total = self.success_count + self.failure_count
|
|
66
|
-
if total == 0:
|
|
67
|
-
return 1.0
|
|
68
|
-
return self.success_count / total
|
|
69
|
-
|
|
70
|
-
def mark_success(self):
|
|
71
|
-
"""标记代理使用成功"""
|
|
72
|
-
self.success_count += 1
|
|
73
|
-
self.last_used_time = time.time()
|
|
74
|
-
self.is_healthy = True
|
|
75
|
-
|
|
76
|
-
def mark_failure(self):
|
|
77
|
-
"""标记代理使用失败"""
|
|
78
|
-
self.failure_count += 1
|
|
79
|
-
self.last_used_time = time.time()
|
|
80
|
-
# 如果失败率过高,标记为不健康
|
|
81
|
-
if self.failure_count > 3 and self.success_rate < 0.5:
|
|
82
|
-
self.is_healthy = False
|
|
83
|
-
|
|
84
14
|
|
|
85
15
|
class ProxyMiddleware:
|
|
16
|
+
"""通用代理中间件"""
|
|
17
|
+
|
|
86
18
|
def __init__(self, settings, log_level):
|
|
87
19
|
self.logger = get_logger(self.__class__.__name__, log_level)
|
|
88
20
|
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
self._proxy_pool: List[Proxy] = []
|
|
92
|
-
self._current_proxy_index: int = 0
|
|
93
|
-
self._last_fetch_time: float = 0
|
|
94
|
-
|
|
95
|
-
self.proxy_extractor = settings.get("PROXY_EXTRACTOR", "proxy")
|
|
96
|
-
self.refresh_interval = settings.get_float("PROXY_REFRESH_INTERVAL", 60)
|
|
97
|
-
self.timeout = settings.get_float("PROXY_API_TIMEOUT", 10)
|
|
98
|
-
# 新增配置:代理池大小
|
|
99
|
-
self.proxy_pool_size = settings.get_int("PROXY_POOL_SIZE", 5)
|
|
100
|
-
# 新增配置:健康检查阈值
|
|
101
|
-
self.health_check_threshold = settings.get_float("PROXY_HEALTH_CHECK_THRESHOLD", 0.5)
|
|
102
|
-
|
|
103
|
-
self.enabled = settings.get_bool("PROXY_ENABLED", True)
|
|
104
|
-
|
|
105
|
-
if not self.enabled:
|
|
106
|
-
self.logger.info("ProxyMiddleware disabled")
|
|
107
|
-
return
|
|
108
|
-
|
|
21
|
+
# 获取代理列表和API URL
|
|
22
|
+
self.proxies: List[str] = settings.get("PROXY_LIST", [])
|
|
109
23
|
self.api_url = settings.get("PROXY_API_URL")
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
24
|
+
# 获取代理提取配置
|
|
25
|
+
self.proxy_extractor = settings.get("PROXY_EXTRACTOR", "proxy") # 默认从"proxy"字段提取
|
|
26
|
+
|
|
27
|
+
# 记录失败的代理,避免重复使用
|
|
28
|
+
self.failed_proxies = set()
|
|
29
|
+
# 最大失败次数,超过这个次数的代理将被标记为失效
|
|
30
|
+
self.max_failed_attempts = settings.get("PROXY_MAX_FAILED_ATTEMPTS", 3)
|
|
31
|
+
# 失效代理记录
|
|
32
|
+
self.proxy_failure_count = {}
|
|
33
|
+
|
|
34
|
+
# 根据配置决定启用哪种模式
|
|
35
|
+
if self.proxies:
|
|
36
|
+
self.mode = "static" # 静态代理模式
|
|
37
|
+
self.enabled = True
|
|
38
|
+
self.logger.info(f"ProxyMiddleware enabled (static mode) with {len(self.proxies)} proxies")
|
|
39
|
+
elif self.api_url:
|
|
40
|
+
self.mode = "dynamic" # 动态代理模式
|
|
41
|
+
self.enabled = True
|
|
42
|
+
self.logger.info(f"ProxyMiddleware enabled (dynamic mode) | API: {self.api_url}")
|
|
43
|
+
else:
|
|
44
|
+
self.mode = None
|
|
45
|
+
self.enabled = False
|
|
46
|
+
self.logger.info("ProxyMiddleware disabled (no proxy configuration)")
|
|
115
47
|
|
|
116
48
|
@classmethod
|
|
117
49
|
def create_instance(cls, crawler):
|
|
118
50
|
return cls(settings=crawler.settings, log_level=crawler.settings.get("LOG_LEVEL"))
|
|
119
51
|
|
|
120
|
-
def
|
|
121
|
-
|
|
122
|
-
return self.proxy_extractor
|
|
123
|
-
|
|
124
|
-
if isinstance(self.proxy_extractor, str):
|
|
125
|
-
keys = self.proxy_extractor.split(".")
|
|
126
|
-
|
|
127
|
-
def extract(data: Dict[str, Any]) -> Union[None, str, Dict[str, str]]:
|
|
128
|
-
for k in keys:
|
|
129
|
-
if isinstance(data, dict):
|
|
130
|
-
data = data.get(k)
|
|
131
|
-
else:
|
|
132
|
-
return None
|
|
133
|
-
if data is None:
|
|
134
|
-
break
|
|
135
|
-
return data
|
|
136
|
-
|
|
137
|
-
return extract
|
|
138
|
-
|
|
139
|
-
raise ValueError(f"PROXY_EXTRACTOR 必须是 str 或 callable,当前类型: {type(self.proxy_extractor)}")
|
|
140
|
-
|
|
141
|
-
async def _close_session(self):
|
|
142
|
-
if self._session:
|
|
143
|
-
try:
|
|
144
|
-
await self._session.close()
|
|
145
|
-
self.logger.debug("aiohttp session closed.")
|
|
146
|
-
except Exception as e:
|
|
147
|
-
self.logger.warning(f"Error closing aiohttp session: {e}")
|
|
148
|
-
finally:
|
|
149
|
-
self._session = None
|
|
150
|
-
|
|
151
|
-
async def _get_session(self) -> Any: # returns aiohttp.ClientSession when aiohttp is available
|
|
152
|
-
if aiohttp is None:
|
|
153
|
-
raise RuntimeError("aiohttp not installed, cannot use ProxyMiddleware")
|
|
154
|
-
|
|
155
|
-
if self._session is None or self._session.closed:
|
|
156
|
-
if self._session and self._session.closed:
|
|
157
|
-
self.logger.debug("Existing session closed, creating new session...")
|
|
158
|
-
timeout = aiohttp.ClientTimeout(total=self.timeout)
|
|
159
|
-
self._session = aiohttp.ClientSession(timeout=timeout)
|
|
160
|
-
self.logger.debug("New aiohttp session created.")
|
|
161
|
-
return self._session
|
|
162
|
-
|
|
163
|
-
async def _fetch_raw_data(self) -> Optional[Dict[str, Any]]:
|
|
164
|
-
max_retries = 2
|
|
165
|
-
retry_count = 0
|
|
166
|
-
|
|
167
|
-
while retry_count <= max_retries:
|
|
168
|
-
session = await self._get_session()
|
|
169
|
-
try:
|
|
170
|
-
async with session.get(self.api_url) as resp:
|
|
171
|
-
content_type = resp.content_type.lower()
|
|
172
|
-
if 'application/json' not in content_type:
|
|
173
|
-
self.logger.warning(
|
|
174
|
-
f"Proxy API returned non-JSON content type: {content_type} (URL: {self.api_url})")
|
|
175
|
-
try:
|
|
176
|
-
text = await resp.text()
|
|
177
|
-
return {"__raw_text__": text.strip(), "__content_type__": content_type}
|
|
178
|
-
except Exception as e:
|
|
179
|
-
self.logger.error(f"Failed to read non-JSON response body: {repr(e)}")
|
|
180
|
-
return None
|
|
181
|
-
|
|
182
|
-
if resp.status != 200:
|
|
183
|
-
try:
|
|
184
|
-
error_text = await resp.text()
|
|
185
|
-
except:
|
|
186
|
-
error_text = "<Unable to read response body>"
|
|
187
|
-
self.logger.error(f"Proxy API status code error: {resp.status}, Response body: {error_text}")
|
|
188
|
-
if 400 <= resp.status < 500:
|
|
189
|
-
return None
|
|
190
|
-
return None
|
|
191
|
-
|
|
192
|
-
return await resp.json()
|
|
193
|
-
|
|
194
|
-
except NETWORK_EXCEPTIONS as e:
|
|
195
|
-
retry_count += 1
|
|
196
|
-
self.logger.warning(f"Failed to request proxy API (attempt {retry_count}/{max_retries + 1}): {repr(e)}")
|
|
197
|
-
if retry_count <= max_retries:
|
|
198
|
-
self.logger.info("Closing and rebuilding session for retry...")
|
|
199
|
-
await self._close_session()
|
|
200
|
-
else:
|
|
201
|
-
self.logger.error(
|
|
202
|
-
f"Failed to request proxy API, maximum retry attempts reached ({max_retries + 1}): {repr(e)}")
|
|
203
|
-
return None
|
|
204
|
-
|
|
205
|
-
except aiohttp.ContentTypeError as e:
|
|
206
|
-
self.logger.error(f"Proxy API response content type error: {repr(e)}")
|
|
207
|
-
return None
|
|
208
|
-
|
|
209
|
-
except Exception as e:
|
|
210
|
-
self.logger.critical(f"Unexpected error occurred while requesting proxy API: {repr(e)}", exc_info=True)
|
|
211
|
-
return None
|
|
212
|
-
|
|
213
|
-
return None
|
|
214
|
-
|
|
215
|
-
async def _extract_proxy(self, data: Dict[str, Any]) -> Optional[Union[str, Dict[str, str]]]:
|
|
216
|
-
extractor = self._compile_extractor()
|
|
52
|
+
async def _fetch_proxy_from_api(self) -> Optional[str]:
|
|
53
|
+
"""从代理API获取代理"""
|
|
217
54
|
try:
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
55
|
+
import aiohttp
|
|
56
|
+
async with aiohttp.ClientSession() as session:
|
|
57
|
+
async with session.get(self.api_url) as resp:
|
|
58
|
+
if resp.status == 200:
|
|
59
|
+
data = await resp.json()
|
|
60
|
+
# 支持多种代理提取方式
|
|
61
|
+
proxy = self._extract_proxy_from_data(data)
|
|
62
|
+
if proxy and isinstance(proxy, str) and (proxy.startswith("http://") or proxy.startswith("https://")):
|
|
63
|
+
return proxy
|
|
64
|
+
else:
|
|
65
|
+
self.logger.warning(f"Proxy API returned status {resp.status}")
|
|
225
66
|
except Exception as e:
|
|
226
|
-
self.logger.
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
async def _get_proxy_from_api(self) -> Optional[Union[str, Dict[str, str]]]:
|
|
230
|
-
raw_data = await self._fetch_raw_data()
|
|
231
|
-
if not raw_data:
|
|
232
|
-
return None
|
|
233
|
-
|
|
234
|
-
if "__raw_text__" in raw_data:
|
|
235
|
-
text = raw_data["__raw_text__"]
|
|
236
|
-
if text.startswith("http://") or text.startswith("https://"):
|
|
237
|
-
return text
|
|
238
|
-
|
|
239
|
-
return await self._extract_proxy(raw_data)
|
|
240
|
-
|
|
241
|
-
def _parse_proxy_data(self, proxy_data: Union[str, Dict[str, Any]]) -> List[str]:
|
|
242
|
-
"""解析代理数据,提取代理URL列表"""
|
|
243
|
-
new_proxies = []
|
|
244
|
-
if isinstance(proxy_data, str):
|
|
245
|
-
# 单个代理
|
|
246
|
-
if proxy_data.startswith("http://") or proxy_data.startswith("https://"):
|
|
247
|
-
new_proxies = [proxy_data]
|
|
248
|
-
elif isinstance(proxy_data, dict):
|
|
249
|
-
# 如果是字典,尝试提取代理列表
|
|
250
|
-
for key, value in proxy_data.items():
|
|
251
|
-
if isinstance(value, str) and (value.startswith("http://") or value.startswith("https://")):
|
|
252
|
-
new_proxies.append(value)
|
|
253
|
-
elif isinstance(value, list):
|
|
254
|
-
# 如果值是列表,添加所有有效的代理
|
|
255
|
-
for item in value:
|
|
256
|
-
if isinstance(item, str) and (item.startswith("http://") or item.startswith("https://")):
|
|
257
|
-
new_proxies.append(item)
|
|
258
|
-
return new_proxies
|
|
259
|
-
|
|
260
|
-
def _get_healthy_proxies(self) -> List[Proxy]:
|
|
261
|
-
"""获取所有健康的代理"""
|
|
262
|
-
return [p for p in self._proxy_pool if p.is_healthy and p.success_rate >= self.health_check_threshold]
|
|
263
|
-
|
|
264
|
-
async def _update_proxy_pool(self):
|
|
265
|
-
"""更新代理池"""
|
|
266
|
-
if not self.enabled:
|
|
267
|
-
self.logger.debug("ProxyMiddleware disabled, skipping proxy fetch.")
|
|
268
|
-
return
|
|
269
|
-
|
|
270
|
-
now = asyncio.get_event_loop().time()
|
|
271
|
-
if (now - self._last_fetch_time) < self.refresh_interval:
|
|
272
|
-
return
|
|
273
|
-
|
|
274
|
-
# 获取新的代理列表
|
|
275
|
-
proxy_data = await self._get_proxy_from_api()
|
|
276
|
-
if not proxy_data:
|
|
277
|
-
self.logger.warning("Failed to get new proxies, proxy pool will remain unchanged.")
|
|
278
|
-
return
|
|
279
|
-
|
|
280
|
-
# 解析代理数据
|
|
281
|
-
new_proxies = self._parse_proxy_data(proxy_data)
|
|
282
|
-
|
|
283
|
-
# 创建新的代理池
|
|
284
|
-
if new_proxies:
|
|
285
|
-
self._proxy_pool = [Proxy(proxy_str) for proxy_str in new_proxies[:self.proxy_pool_size]]
|
|
286
|
-
self._current_proxy_index = 0
|
|
287
|
-
self._last_fetch_time = now
|
|
288
|
-
self.logger.info(f"Updated proxy pool, added {len(self._proxy_pool)} proxies")
|
|
289
|
-
else:
|
|
290
|
-
self.logger.warning("No valid proxies parsed, proxy pool will remain unchanged.")
|
|
291
|
-
|
|
292
|
-
async def _get_healthy_proxy(self) -> Optional[Proxy]:
|
|
293
|
-
"""从代理池中获取一个健康的代理"""
|
|
294
|
-
if not self._proxy_pool:
|
|
295
|
-
await self._update_proxy_pool()
|
|
296
|
-
|
|
297
|
-
if not self._proxy_pool:
|
|
298
|
-
return None
|
|
299
|
-
|
|
300
|
-
# 查找健康的代理
|
|
301
|
-
healthy_proxies = self._get_healthy_proxies()
|
|
302
|
-
|
|
303
|
-
if not healthy_proxies:
|
|
304
|
-
# 如果没有健康的代理,尝试更新代理池
|
|
305
|
-
await self._update_proxy_pool()
|
|
306
|
-
healthy_proxies = self._get_healthy_proxies()
|
|
307
|
-
|
|
308
|
-
if not healthy_proxies:
|
|
309
|
-
return None
|
|
310
|
-
|
|
311
|
-
# 使用轮询方式选择代理
|
|
312
|
-
self._current_proxy_index = (self._current_proxy_index + 1) % len(healthy_proxies)
|
|
313
|
-
selected_proxy = healthy_proxies[self._current_proxy_index]
|
|
314
|
-
return selected_proxy
|
|
67
|
+
self.logger.warning(f"Failed to fetch proxy from API: {repr(e)}")
|
|
68
|
+
return None
|
|
315
69
|
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
70
|
+
def _extract_proxy_from_data(self, data) -> Optional[str]:
|
|
71
|
+
"""
|
|
72
|
+
从API返回的数据中提取代理
|
|
73
|
+
|
|
74
|
+
支持多种提取方式:
|
|
75
|
+
1. 字符串: 直接作为字段名使用
|
|
76
|
+
2. 字典: 包含type和value字段,type支持"field"和"jsonpath"
|
|
77
|
+
3. 函数: 用户自定义提取函数
|
|
78
|
+
"""
|
|
79
|
+
if isinstance(self.proxy_extractor, str):
|
|
80
|
+
# 简单字段名提取(向后兼容)
|
|
81
|
+
if self.proxy_extractor in data:
|
|
82
|
+
proxy_value = data[self.proxy_extractor]
|
|
83
|
+
# 如果返回的是字典,尝试提取http或https字段
|
|
84
|
+
if isinstance(proxy_value, dict):
|
|
85
|
+
if "http" in proxy_value:
|
|
86
|
+
return proxy_value["http"]
|
|
87
|
+
elif "https" in proxy_value:
|
|
88
|
+
return proxy_value["https"]
|
|
89
|
+
return proxy_value
|
|
90
|
+
elif isinstance(self.proxy_extractor, dict):
|
|
91
|
+
# 复杂提取规则
|
|
92
|
+
extractor_type = self.proxy_extractor.get("type", "field")
|
|
93
|
+
extractor_value = self.proxy_extractor.get("value", "proxy")
|
|
94
|
+
|
|
95
|
+
if extractor_type == "field":
|
|
96
|
+
# 字段提取
|
|
97
|
+
if extractor_value in data:
|
|
98
|
+
proxy_value = data[extractor_value]
|
|
99
|
+
# 如果返回的是字典,尝试提取http或https字段
|
|
100
|
+
if isinstance(proxy_value, dict):
|
|
101
|
+
if "http" in proxy_value:
|
|
102
|
+
return proxy_value["http"]
|
|
103
|
+
elif "https" in proxy_value:
|
|
104
|
+
return proxy_value["https"]
|
|
105
|
+
return proxy_value
|
|
106
|
+
elif extractor_type == "jsonpath":
|
|
107
|
+
# JSON路径提取(需要安装jsonpath库)
|
|
108
|
+
try:
|
|
109
|
+
import jsonpath
|
|
110
|
+
matches = jsonpath.jsonpath(data, extractor_value)
|
|
111
|
+
if matches:
|
|
112
|
+
return matches[0]
|
|
113
|
+
except ImportError:
|
|
114
|
+
self.logger.warning("jsonpath library not installed, falling back to default extraction")
|
|
115
|
+
if "proxy" in data:
|
|
116
|
+
proxy_value = data["proxy"]
|
|
117
|
+
if isinstance(proxy_value, dict):
|
|
118
|
+
if "http" in proxy_value:
|
|
119
|
+
return proxy_value["http"]
|
|
120
|
+
elif "https" in proxy_value:
|
|
121
|
+
return proxy_value["https"]
|
|
122
|
+
return proxy_value
|
|
123
|
+
elif extractor_type == "custom":
|
|
124
|
+
# 自定义提取函数(需要用户提供)
|
|
125
|
+
custom_func = self.proxy_extractor.get("function")
|
|
126
|
+
if callable(custom_func):
|
|
127
|
+
return custom_func(data)
|
|
128
|
+
elif callable(self.proxy_extractor):
|
|
129
|
+
# 直接调用用户提供的函数
|
|
130
|
+
return self.proxy_extractor(data)
|
|
131
|
+
|
|
132
|
+
# 默认提取方式(向后兼容)
|
|
133
|
+
if "proxy" in data:
|
|
134
|
+
proxy_value = data["proxy"]
|
|
135
|
+
# 如果返回的是字典,尝试提取http或https字段
|
|
136
|
+
if isinstance(proxy_value, dict):
|
|
137
|
+
if "http" in proxy_value:
|
|
138
|
+
return proxy_value["http"]
|
|
139
|
+
elif "https" in proxy_value:
|
|
140
|
+
return proxy_value["https"]
|
|
141
|
+
return proxy_value
|
|
142
|
+
|
|
143
|
+
return None
|
|
319
144
|
|
|
320
145
|
async def process_request(self, request: Request, spider) -> Optional[Request]:
|
|
146
|
+
"""为请求分配代理"""
|
|
321
147
|
if not self.enabled:
|
|
322
|
-
self.logger.debug(f"ProxyMiddleware disabled, request will connect directly: {request.url}")
|
|
323
148
|
return None
|
|
324
149
|
|
|
325
150
|
if request.proxy:
|
|
151
|
+
# 请求已指定代理,不覆盖
|
|
326
152
|
return None
|
|
327
153
|
|
|
328
|
-
|
|
329
|
-
if
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
if
|
|
333
|
-
|
|
334
|
-
parsed = urlparse(proxy)
|
|
335
|
-
if parsed.username and parsed.password:
|
|
336
|
-
# 对于AioHttp下载器,需要特殊处理认证信息
|
|
337
|
-
downloader_type = spider.crawler.settings.get("DOWNLOADER_TYPE", "aiohttp")
|
|
338
|
-
if downloader_type == "aiohttp":
|
|
339
|
-
# 将认证信息存储在meta中,由下载器处理
|
|
340
|
-
request.meta["proxy_auth"] = {
|
|
341
|
-
"username": parsed.username,
|
|
342
|
-
"password": parsed.password
|
|
343
|
-
}
|
|
344
|
-
# 清理URL中的认证信息
|
|
345
|
-
clean_proxy = f"{parsed.scheme}://{parsed.hostname}"
|
|
346
|
-
if parsed.port:
|
|
347
|
-
clean_proxy += f":{parsed.port}"
|
|
348
|
-
request.proxy = clean_proxy
|
|
349
|
-
else:
|
|
350
|
-
# 其他下载器可以直接使用带认证的URL
|
|
351
|
-
request.proxy = proxy
|
|
352
|
-
else:
|
|
353
|
-
request.proxy = proxy
|
|
154
|
+
proxy = None
|
|
155
|
+
if self.mode == "static" and self.proxies:
|
|
156
|
+
# 静态代理模式:随机选择一个代理,排除已知失败的代理
|
|
157
|
+
available_proxies = [p for p in self.proxies if p not in self.failed_proxies]
|
|
158
|
+
if available_proxies:
|
|
159
|
+
proxy = random.choice(available_proxies)
|
|
354
160
|
else:
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
#
|
|
358
|
-
|
|
359
|
-
|
|
161
|
+
self.logger.warning("所有静态代理都已失效,将使用直连")
|
|
162
|
+
elif self.mode == "dynamic" and self.api_url:
|
|
163
|
+
# 动态代理模式:从API获取代理
|
|
164
|
+
proxy = await self._fetch_proxy_from_api()
|
|
165
|
+
|
|
166
|
+
if proxy:
|
|
167
|
+
# 检查代理是否在失败列表中
|
|
168
|
+
if proxy in self.failed_proxies:
|
|
169
|
+
self.logger.warning(f"尝试使用已知失败的代理: {proxy},但仍会尝试")
|
|
170
|
+
|
|
171
|
+
request.proxy = proxy
|
|
172
|
+
self.logger.debug(f"Assigned proxy {proxy} to {request.url}")
|
|
360
173
|
else:
|
|
361
|
-
self.logger.warning(f"No proxy
|
|
174
|
+
self.logger.warning(f"No proxy available, request connecting directly: {request.url}")
|
|
362
175
|
|
|
363
176
|
return None
|
|
364
177
|
|
|
365
178
|
def process_response(self, request: Request, response: Response, spider) -> Response:
|
|
366
|
-
|
|
367
|
-
if
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
self.
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
179
|
+
"""处理响应"""
|
|
180
|
+
if request.proxy:
|
|
181
|
+
self.logger.debug(f"Proxy request successful: {request.proxy} | {request.url}")
|
|
182
|
+
# 代理请求成功,从失败列表中移除(如果存在)
|
|
183
|
+
self.failed_proxies.discard(request.proxy)
|
|
184
|
+
# 重置失败计数
|
|
185
|
+
if request.proxy in self.proxy_failure_count:
|
|
186
|
+
del self.proxy_failure_count[request.proxy]
|
|
374
187
|
return response
|
|
375
188
|
|
|
376
189
|
def process_exception(self, request: Request, exception: Exception, spider) -> Optional[Request]:
|
|
377
|
-
|
|
378
|
-
if
|
|
379
|
-
|
|
380
|
-
self.logger.warning(
|
|
381
|
-
|
|
382
|
-
|
|
190
|
+
"""处理异常"""
|
|
191
|
+
if request.proxy:
|
|
192
|
+
error_msg = f"Proxy request failed: {request.proxy} | {request.url} | {repr(exception)}"
|
|
193
|
+
self.logger.warning(error_msg)
|
|
194
|
+
|
|
195
|
+
# 记录代理失败次数
|
|
196
|
+
if request.proxy not in self.proxy_failure_count:
|
|
197
|
+
self.proxy_failure_count[request.proxy] = 0
|
|
198
|
+
self.proxy_failure_count[request.proxy] += 1
|
|
199
|
+
|
|
200
|
+
# 如果失败次数超过阈值,将代理标记为失效
|
|
201
|
+
if self.proxy_failure_count[request.proxy] >= self.max_failed_attempts:
|
|
202
|
+
self.failed_proxies.add(request.proxy)
|
|
203
|
+
self.logger.warning(f"代理 {request.proxy} 已失败 {self.max_failed_attempts} 次,标记为失效")
|
|
204
|
+
|
|
205
|
+
# 从代理列表中移除(仅适用于静态代理模式)
|
|
206
|
+
if self.mode == "static" and request.proxy in self.proxies:
|
|
207
|
+
self.proxies.remove(request.proxy)
|
|
208
|
+
self.logger.info(f"已从静态代理列表中移除失效代理: {request.proxy}")
|
|
383
209
|
return None
|
|
384
|
-
|
|
385
|
-
async def close(self):
|
|
386
|
-
await self._close_session()
|
crawlo/mode_manager.py
CHANGED
|
@@ -51,9 +51,11 @@ class ModeManager:
|
|
|
51
51
|
def get_standalone_settings() -> Dict[str, Any]:
|
|
52
52
|
"""获取单机模式配置"""
|
|
53
53
|
return {
|
|
54
|
+
'RUN_MODE': 'standalone',
|
|
54
55
|
'QUEUE_TYPE': 'memory',
|
|
55
56
|
'FILTER_CLASS': 'crawlo.filters.memory_filter.MemoryFilter',
|
|
56
57
|
'DEFAULT_DEDUP_PIPELINE': 'crawlo.pipelines.memory_dedup_pipeline.MemoryDedupPipeline',
|
|
58
|
+
'PROJECT_NAME': 'crawlo',
|
|
57
59
|
'CONCURRENCY': 8,
|
|
58
60
|
'MAX_RUNNING_SPIDERS': 1,
|
|
59
61
|
'DOWNLOAD_DELAY': 1.0,
|
|
@@ -75,6 +77,7 @@ class ModeManager:
|
|
|
75
77
|
redis_url = f'redis://{redis_host}:{redis_port}/{redis_db}'
|
|
76
78
|
|
|
77
79
|
return {
|
|
80
|
+
'RUN_MODE': 'distributed',
|
|
78
81
|
'QUEUE_TYPE': 'redis',
|
|
79
82
|
'FILTER_CLASS': 'crawlo.filters.aioredis_filter.AioRedisFilter',
|
|
80
83
|
'DEFAULT_DEDUP_PIPELINE': 'crawlo.pipelines.redis_dedup_pipeline.RedisDedupPipeline',
|
|
@@ -95,6 +98,7 @@ class ModeManager:
|
|
|
95
98
|
"""获取自动检测模式配置"""
|
|
96
99
|
# 默认使用内存队列和过滤器
|
|
97
100
|
settings = ModeManager.get_standalone_settings()
|
|
101
|
+
settings['RUN_MODE'] = 'auto'
|
|
98
102
|
settings['QUEUE_TYPE'] = 'auto'
|
|
99
103
|
return settings
|
|
100
104
|
|
|
@@ -143,13 +147,29 @@ class ModeManager:
|
|
|
143
147
|
raise ValueError(f"不支持的运行模式: {mode}")
|
|
144
148
|
|
|
145
149
|
# 合并用户自定义配置
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
150
|
+
# 对于分布式模式,过滤掉特定参数
|
|
151
|
+
if mode == RunMode.DISTRIBUTED:
|
|
152
|
+
user_settings = {
|
|
153
|
+
k.upper(): v for k,
|
|
154
|
+
v in kwargs.items() if k not in [
|
|
155
|
+
'redis_host',
|
|
156
|
+
'redis_port',
|
|
157
|
+
'redis_password',
|
|
158
|
+
'project_name']}
|
|
159
|
+
# 特别处理project_name
|
|
160
|
+
if 'project_name' in kwargs:
|
|
161
|
+
settings['PROJECT_NAME'] = kwargs['project_name']
|
|
162
|
+
else:
|
|
163
|
+
# 对于单机模式和自动模式,只过滤Redis相关参数
|
|
164
|
+
user_settings = {
|
|
165
|
+
k.upper(): v for k,
|
|
166
|
+
v in kwargs.items() if k not in [
|
|
167
|
+
'redis_host',
|
|
168
|
+
'redis_port',
|
|
169
|
+
'redis_password']}
|
|
170
|
+
# 特别处理project_name
|
|
171
|
+
if 'project_name' in kwargs:
|
|
172
|
+
settings['PROJECT_NAME'] = kwargs['project_name']
|
|
153
173
|
settings.update(user_settings)
|
|
154
174
|
self._debug(f"合并用户自定义配置: {list(user_settings.keys())}")
|
|
155
175
|
|
|
@@ -182,9 +202,16 @@ class ModeManager:
|
|
|
182
202
|
|
|
183
203
|
|
|
184
204
|
# 便利函数
|
|
185
|
-
def standalone_mode(
|
|
205
|
+
def standalone_mode(
|
|
206
|
+
project_name: str = 'crawlo',
|
|
207
|
+
**kwargs
|
|
208
|
+
) -> Dict[str, Any]:
|
|
186
209
|
"""快速创建单机模式配置"""
|
|
187
|
-
return ModeManager().resolve_mode_settings(
|
|
210
|
+
return ModeManager().resolve_mode_settings(
|
|
211
|
+
'standalone',
|
|
212
|
+
project_name=project_name,
|
|
213
|
+
**kwargs
|
|
214
|
+
)
|
|
188
215
|
|
|
189
216
|
|
|
190
217
|
def distributed_mode(
|
|
@@ -207,9 +234,16 @@ def distributed_mode(
|
|
|
207
234
|
)
|
|
208
235
|
|
|
209
236
|
|
|
210
|
-
def auto_mode(
|
|
237
|
+
def auto_mode(
|
|
238
|
+
project_name: str = 'crawlo',
|
|
239
|
+
**kwargs
|
|
240
|
+
) -> Dict[str, Any]:
|
|
211
241
|
"""快速创建自动检测模式配置"""
|
|
212
|
-
return ModeManager().resolve_mode_settings(
|
|
242
|
+
return ModeManager().resolve_mode_settings(
|
|
243
|
+
'auto',
|
|
244
|
+
project_name=project_name,
|
|
245
|
+
**kwargs
|
|
246
|
+
)
|
|
213
247
|
|
|
214
248
|
|
|
215
249
|
# 环境变量支持
|