crawlo 1.1.2__py3-none-any.whl → 1.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crawlo might be problematic. Click here for more details.
- crawlo/__init__.py +34 -34
- crawlo/__version__.py +1 -1
- crawlo/cli.py +40 -40
- crawlo/commands/__init__.py +13 -13
- crawlo/commands/check.py +594 -594
- crawlo/commands/genspider.py +151 -151
- crawlo/commands/list.py +155 -155
- crawlo/commands/run.py +285 -285
- crawlo/commands/startproject.py +196 -196
- crawlo/commands/stats.py +188 -188
- crawlo/commands/utils.py +186 -186
- crawlo/config.py +279 -279
- crawlo/core/__init__.py +2 -2
- crawlo/core/engine.py +171 -171
- crawlo/core/enhanced_engine.py +189 -189
- crawlo/core/processor.py +40 -40
- crawlo/core/scheduler.py +166 -162
- crawlo/crawler.py +1027 -1027
- crawlo/downloader/__init__.py +242 -242
- crawlo/downloader/aiohttp_downloader.py +212 -212
- crawlo/downloader/cffi_downloader.py +251 -251
- crawlo/downloader/httpx_downloader.py +259 -257
- crawlo/event.py +11 -11
- crawlo/exceptions.py +82 -78
- crawlo/extension/__init__.py +31 -31
- crawlo/extension/log_interval.py +49 -49
- crawlo/extension/log_stats.py +44 -44
- crawlo/extension/logging_extension.py +34 -34
- crawlo/filters/__init__.py +154 -154
- crawlo/filters/aioredis_filter.py +242 -242
- crawlo/filters/memory_filter.py +269 -269
- crawlo/items/__init__.py +23 -23
- crawlo/items/base.py +21 -21
- crawlo/items/fields.py +53 -53
- crawlo/items/items.py +104 -104
- crawlo/middleware/__init__.py +21 -21
- crawlo/middleware/default_header.py +32 -32
- crawlo/middleware/download_delay.py +28 -28
- crawlo/middleware/middleware_manager.py +135 -135
- crawlo/middleware/proxy.py +248 -248
- crawlo/middleware/request_ignore.py +30 -30
- crawlo/middleware/response_code.py +18 -18
- crawlo/middleware/response_filter.py +26 -26
- crawlo/middleware/retry.py +125 -125
- crawlo/mode_manager.py +200 -200
- crawlo/network/__init__.py +21 -21
- crawlo/network/request.py +311 -311
- crawlo/network/response.py +271 -269
- crawlo/pipelines/__init__.py +22 -13
- crawlo/pipelines/bloom_dedup_pipeline.py +157 -0
- crawlo/pipelines/console_pipeline.py +39 -39
- crawlo/pipelines/csv_pipeline.py +316 -316
- crawlo/pipelines/database_dedup_pipeline.py +225 -0
- crawlo/pipelines/json_pipeline.py +218 -218
- crawlo/pipelines/memory_dedup_pipeline.py +116 -0
- crawlo/pipelines/mongo_pipeline.py +116 -116
- crawlo/pipelines/mysql_pipeline.py +195 -195
- crawlo/pipelines/pipeline_manager.py +56 -56
- crawlo/pipelines/redis_dedup_pipeline.py +163 -0
- crawlo/project.py +153 -153
- crawlo/queue/pqueue.py +37 -37
- crawlo/queue/queue_manager.py +307 -303
- crawlo/queue/redis_priority_queue.py +208 -191
- crawlo/settings/__init__.py +7 -7
- crawlo/settings/default_settings.py +245 -226
- crawlo/settings/setting_manager.py +99 -99
- crawlo/spider/__init__.py +639 -639
- crawlo/stats_collector.py +59 -59
- crawlo/subscriber.py +106 -106
- crawlo/task_manager.py +30 -30
- crawlo/templates/crawlo.cfg.tmpl +10 -10
- crawlo/templates/project/__init__.py.tmpl +3 -3
- crawlo/templates/project/items.py.tmpl +17 -17
- crawlo/templates/project/middlewares.py.tmpl +86 -86
- crawlo/templates/project/pipelines.py.tmpl +341 -335
- crawlo/templates/project/run.py.tmpl +251 -238
- crawlo/templates/project/settings.py.tmpl +250 -247
- crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
- crawlo/templates/spider/spider.py.tmpl +177 -177
- crawlo/utils/__init__.py +7 -7
- crawlo/utils/controlled_spider_mixin.py +439 -335
- crawlo/utils/date_tools.py +233 -233
- crawlo/utils/db_helper.py +343 -343
- crawlo/utils/func_tools.py +82 -82
- crawlo/utils/large_scale_config.py +286 -286
- crawlo/utils/large_scale_helper.py +343 -343
- crawlo/utils/log.py +128 -128
- crawlo/utils/queue_helper.py +175 -175
- crawlo/utils/request.py +267 -267
- crawlo/utils/request_serializer.py +219 -219
- crawlo/utils/spider_loader.py +62 -62
- crawlo/utils/system.py +11 -11
- crawlo/utils/tools.py +4 -4
- crawlo/utils/url.py +39 -39
- {crawlo-1.1.2.dist-info → crawlo-1.1.3.dist-info}/METADATA +635 -567
- crawlo-1.1.3.dist-info/RECORD +113 -0
- examples/__init__.py +7 -7
- examples/controlled_spider_example.py +205 -0
- tests/__init__.py +7 -7
- tests/test_final_validation.py +153 -153
- tests/test_proxy_health_check.py +32 -32
- tests/test_proxy_middleware_integration.py +136 -136
- tests/test_proxy_providers.py +56 -56
- tests/test_proxy_stats.py +19 -19
- tests/test_proxy_strategies.py +59 -59
- tests/test_redis_config.py +28 -28
- tests/test_redis_queue.py +224 -224
- tests/test_request_serialization.py +70 -70
- tests/test_scheduler.py +241 -241
- crawlo-1.1.2.dist-info/RECORD +0 -108
- {crawlo-1.1.2.dist-info → crawlo-1.1.3.dist-info}/WHEEL +0 -0
- {crawlo-1.1.2.dist-info → crawlo-1.1.3.dist-info}/entry_points.txt +0 -0
- {crawlo-1.1.2.dist-info → crawlo-1.1.3.dist-info}/top_level.txt +0 -0
crawlo/middleware/proxy.py
CHANGED
|
@@ -1,249 +1,249 @@
|
|
|
1
|
-
#!/usr/bin/python
|
|
2
|
-
# -*- coding: UTF-8 -*-
|
|
3
|
-
import asyncio
|
|
4
|
-
import socket
|
|
5
|
-
from typing import Optional, Dict, Any, Callable, Union, TYPE_CHECKING
|
|
6
|
-
from urllib.parse import urlparse
|
|
7
|
-
|
|
8
|
-
from crawlo import Request, Response
|
|
9
|
-
from crawlo.exceptions import NotConfiguredError
|
|
10
|
-
from crawlo.utils.log import get_logger
|
|
11
|
-
|
|
12
|
-
if TYPE_CHECKING:
|
|
13
|
-
import aiohttp
|
|
14
|
-
|
|
15
|
-
try:
|
|
16
|
-
import httpx
|
|
17
|
-
HTTPX_EXCEPTIONS = (httpx.NetworkError, httpx.TimeoutException, httpx.ReadError, httpx.ConnectError)
|
|
18
|
-
except ImportError:
|
|
19
|
-
HTTPX_EXCEPTIONS = ()
|
|
20
|
-
httpx = None
|
|
21
|
-
|
|
22
|
-
try:
|
|
23
|
-
import aiohttp
|
|
24
|
-
AIOHTTP_EXCEPTIONS = (
|
|
25
|
-
aiohttp.ClientError, aiohttp.ClientConnectorError, aiohttp.ClientResponseError, aiohttp.ServerTimeoutError,
|
|
26
|
-
aiohttp.ServerDisconnectedError)
|
|
27
|
-
except ImportError:
|
|
28
|
-
AIOHTTP_EXCEPTIONS = ()
|
|
29
|
-
aiohttp = None
|
|
30
|
-
|
|
31
|
-
try:
|
|
32
|
-
from curl_cffi import requests as cffi_requests
|
|
33
|
-
CURL_CFFI_EXCEPTIONS = (cffi_requests.RequestsError,)
|
|
34
|
-
except (ImportError, AttributeError):
|
|
35
|
-
CURL_CFFI_EXCEPTIONS = ()
|
|
36
|
-
cffi_requests = None
|
|
37
|
-
|
|
38
|
-
NETWORK_EXCEPTIONS = (
|
|
39
|
-
asyncio.TimeoutError,
|
|
40
|
-
socket.gaierror,
|
|
41
|
-
ConnectionError,
|
|
42
|
-
TimeoutError,
|
|
43
|
-
) + HTTPX_EXCEPTIONS + AIOHTTP_EXCEPTIONS + CURL_CFFI_EXCEPTIONS
|
|
44
|
-
|
|
45
|
-
ProxyExtractor = Callable[[Dict[str, Any]], Union[None, str, Dict[str, str]]]
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
class ProxyMiddleware:
|
|
49
|
-
def __init__(self, settings, log_level):
|
|
50
|
-
self.logger = get_logger(self.__class__.__name__, log_level)
|
|
51
|
-
|
|
52
|
-
self._session: Optional[Any] = None # aiohttp.ClientSession when aiohttp is available
|
|
53
|
-
self._current_proxy: Optional[Union[str, Dict[str, str]]] = None
|
|
54
|
-
self._last_fetch_time: float = 0
|
|
55
|
-
|
|
56
|
-
self.proxy_extractor = settings.get("PROXY_EXTRACTOR", "proxy")
|
|
57
|
-
self.refresh_interval = settings.get_float("PROXY_REFRESH_INTERVAL", 60)
|
|
58
|
-
self.timeout = settings.get_float("PROXY_API_TIMEOUT", 10)
|
|
59
|
-
|
|
60
|
-
self.enabled = settings.get_bool("PROXY_ENABLED", True)
|
|
61
|
-
|
|
62
|
-
if not self.enabled:
|
|
63
|
-
self.logger.info("ProxyMiddleware 已被禁用 (PROXY_ENABLED=False)")
|
|
64
|
-
return
|
|
65
|
-
|
|
66
|
-
self.api_url = settings.get("PROXY_API_URL")
|
|
67
|
-
if not self.api_url:
|
|
68
|
-
raise NotConfiguredError("PROXY_API_URL 未配置,ProxyMiddleware 已禁用")
|
|
69
|
-
|
|
70
|
-
self.logger.info(f"代理中间件已启用 | API: {self.api_url} | 刷新间隔: {self.refresh_interval}s")
|
|
71
|
-
|
|
72
|
-
@classmethod
|
|
73
|
-
def create_instance(cls, crawler):
|
|
74
|
-
return cls(settings=crawler.settings, log_level=crawler.settings.get("LOG_LEVEL"))
|
|
75
|
-
|
|
76
|
-
def _compile_extractor(self) -> ProxyExtractor:
|
|
77
|
-
if callable(self.proxy_extractor):
|
|
78
|
-
return self.proxy_extractor
|
|
79
|
-
|
|
80
|
-
if isinstance(self.proxy_extractor, str):
|
|
81
|
-
keys = self.proxy_extractor.split(".")
|
|
82
|
-
|
|
83
|
-
def extract(data: Dict[str, Any]) -> Union[None, str, Dict[str, str]]:
|
|
84
|
-
for k in keys:
|
|
85
|
-
if isinstance(data, dict):
|
|
86
|
-
data = data.get(k)
|
|
87
|
-
else:
|
|
88
|
-
return None
|
|
89
|
-
if data is None:
|
|
90
|
-
break
|
|
91
|
-
return data
|
|
92
|
-
|
|
93
|
-
return extract
|
|
94
|
-
|
|
95
|
-
raise ValueError(f"PROXY_EXTRACTOR 必须是 str 或 callable,当前类型: {type(self.proxy_extractor)}")
|
|
96
|
-
|
|
97
|
-
async def _close_session(self):
|
|
98
|
-
if self._session:
|
|
99
|
-
try:
|
|
100
|
-
await self._session.close()
|
|
101
|
-
self.logger.debug("已关闭 aiohttp session.")
|
|
102
|
-
except Exception as e:
|
|
103
|
-
self.logger.warning(f"关闭 aiohttp session 时出错: {e}")
|
|
104
|
-
finally:
|
|
105
|
-
self._session = None
|
|
106
|
-
|
|
107
|
-
async def _get_session(self) -> Any: # returns aiohttp.ClientSession when aiohttp is available
|
|
108
|
-
if aiohttp is None:
|
|
109
|
-
raise RuntimeError("aiohttp 未安装,无法使用 ProxyMiddleware")
|
|
110
|
-
|
|
111
|
-
if self._session is None or self._session.closed:
|
|
112
|
-
if self._session and self._session.closed:
|
|
113
|
-
self.logger.debug("现有 session 已关闭,正在创建新 session...")
|
|
114
|
-
timeout = aiohttp.ClientTimeout(total=self.timeout)
|
|
115
|
-
self._session = aiohttp.ClientSession(timeout=timeout)
|
|
116
|
-
self.logger.debug("已创建新的 aiohttp session.")
|
|
117
|
-
return self._session
|
|
118
|
-
|
|
119
|
-
async def _fetch_raw_data(self) -> Optional[Dict[str, Any]]:
|
|
120
|
-
max_retries = 2
|
|
121
|
-
retry_count = 0
|
|
122
|
-
|
|
123
|
-
while retry_count <= max_retries:
|
|
124
|
-
session = await self._get_session()
|
|
125
|
-
try:
|
|
126
|
-
async with session.get(self.api_url) as resp:
|
|
127
|
-
content_type = resp.content_type.lower()
|
|
128
|
-
if 'application/json' not in content_type:
|
|
129
|
-
self.logger.warning(f"代理 API 返回非 JSON 内容类型: {content_type} (URL: {self.api_url})")
|
|
130
|
-
try:
|
|
131
|
-
text = await resp.text()
|
|
132
|
-
return {"__raw_text__": text.strip(), "__content_type__": content_type}
|
|
133
|
-
except Exception as e:
|
|
134
|
-
self.logger.error(f"读取非 JSON 响应体失败: {repr(e)}")
|
|
135
|
-
return None
|
|
136
|
-
|
|
137
|
-
if resp.status != 200:
|
|
138
|
-
try:
|
|
139
|
-
error_text = await resp.text()
|
|
140
|
-
except:
|
|
141
|
-
error_text = "<无法读取响应体>"
|
|
142
|
-
self.logger.error(f"代理 API 状态码异常: {resp.status}, 响应体: {error_text}")
|
|
143
|
-
if 400 <= resp.status < 500:
|
|
144
|
-
return None
|
|
145
|
-
return None
|
|
146
|
-
|
|
147
|
-
return await resp.json()
|
|
148
|
-
|
|
149
|
-
except NETWORK_EXCEPTIONS as e:
|
|
150
|
-
retry_count += 1
|
|
151
|
-
self.logger.warning(f"请求代理 API 失败 (尝试 {retry_count}/{max_retries + 1}): {repr(e)}")
|
|
152
|
-
if retry_count <= max_retries:
|
|
153
|
-
self.logger.info("正在关闭并重建 session 以进行重试...")
|
|
154
|
-
await self._close_session()
|
|
155
|
-
else:
|
|
156
|
-
self.logger.error(f"请求代理 API 失败,已达到最大重试次数 ({max_retries + 1}): {repr(e)}")
|
|
157
|
-
return None
|
|
158
|
-
|
|
159
|
-
except aiohttp.ContentTypeError as e:
|
|
160
|
-
self.logger.error(f"代理 API 响应内容类型错误: {repr(e)}")
|
|
161
|
-
return None
|
|
162
|
-
|
|
163
|
-
except Exception as e:
|
|
164
|
-
self.logger.critical(f"请求代理 API 时发生未预期错误: {repr(e)}", exc_info=True)
|
|
165
|
-
return None
|
|
166
|
-
|
|
167
|
-
return None
|
|
168
|
-
|
|
169
|
-
async def _extract_proxy(self, data: Dict[str, Any]) -> Optional[Union[str, Dict[str, str]]]:
|
|
170
|
-
extractor = self._compile_extractor()
|
|
171
|
-
try:
|
|
172
|
-
result = extractor(data)
|
|
173
|
-
if isinstance(result, str) and result.strip():
|
|
174
|
-
return result.strip()
|
|
175
|
-
elif isinstance(result, dict):
|
|
176
|
-
cleaned = {k: v.strip() for k, v in result.items() if v and isinstance(v, str)}
|
|
177
|
-
return cleaned if cleaned else None
|
|
178
|
-
return None
|
|
179
|
-
except Exception as e:
|
|
180
|
-
self.logger.error(f"执行 PROXY_EXTRACTOR 时出错: {repr(e)}")
|
|
181
|
-
return None
|
|
182
|
-
|
|
183
|
-
async def _get_proxy_from_api(self) -> Optional[Union[str, Dict[str, str]]]:
|
|
184
|
-
raw_data = await self._fetch_raw_data()
|
|
185
|
-
if not raw_data:
|
|
186
|
-
return None
|
|
187
|
-
|
|
188
|
-
if "__raw_text__" in raw_data:
|
|
189
|
-
text = raw_data["__raw_text__"]
|
|
190
|
-
if text.startswith("http://") or text.startswith("https://"):
|
|
191
|
-
return text
|
|
192
|
-
|
|
193
|
-
return await self._extract_proxy(raw_data)
|
|
194
|
-
|
|
195
|
-
async def _get_cached_proxy(self) -> Optional[str]:
|
|
196
|
-
if not self.enabled:
|
|
197
|
-
self.logger.debug("ProxyMiddleware 已禁用,跳过代理获取。")
|
|
198
|
-
return None
|
|
199
|
-
|
|
200
|
-
now = asyncio.get_event_loop().time()
|
|
201
|
-
if self._current_proxy and (now - self._last_fetch_time) < self.refresh_interval:
|
|
202
|
-
pass
|
|
203
|
-
else:
|
|
204
|
-
proxy = await self._get_proxy_from_api()
|
|
205
|
-
if proxy:
|
|
206
|
-
self._current_proxy = proxy
|
|
207
|
-
self._last_fetch_time = now
|
|
208
|
-
self.logger.debug(f"更新代理缓存: {proxy}")
|
|
209
|
-
else:
|
|
210
|
-
self.logger.warning("无法获取新代理,请求将直连。")
|
|
211
|
-
|
|
212
|
-
return self._current_proxy
|
|
213
|
-
|
|
214
|
-
@staticmethod
|
|
215
|
-
def _is_https(request: Request) -> bool:
|
|
216
|
-
return urlparse(request.url).scheme == "https"
|
|
217
|
-
|
|
218
|
-
async def process_request(self, request: Request, spider) -> Optional[Request]:
|
|
219
|
-
if not self.enabled:
|
|
220
|
-
self.logger.debug(f"ProxyMiddleware 已禁用,请求将直连: {request.url}")
|
|
221
|
-
return None
|
|
222
|
-
|
|
223
|
-
if request.proxy:
|
|
224
|
-
return None
|
|
225
|
-
|
|
226
|
-
proxy = await self._get_cached_proxy()
|
|
227
|
-
if proxy:
|
|
228
|
-
request.proxy = proxy
|
|
229
|
-
self.logger.info(f"分配代理 → {proxy} | {request.url}")
|
|
230
|
-
else:
|
|
231
|
-
self.logger.warning(f"未获取到代理,请求直连: {request.url}")
|
|
232
|
-
|
|
233
|
-
return None
|
|
234
|
-
|
|
235
|
-
def process_response(self, request: Request, response: Response, spider) -> Response:
|
|
236
|
-
proxy = request.proxy
|
|
237
|
-
if proxy:
|
|
238
|
-
status_code = getattr(response, 'status_code', 'N/A')
|
|
239
|
-
self.logger.debug(f"代理成功: {proxy} | {request.url} | Status: {status_code}")
|
|
240
|
-
return response
|
|
241
|
-
|
|
242
|
-
def process_exception(self, request: Request, exception: Exception, spider) -> Optional[Request]:
|
|
243
|
-
proxy = request.proxy
|
|
244
|
-
if proxy:
|
|
245
|
-
self.logger.warning(f"代理请求失败: {proxy} | {request.url} | {repr(exception)}")
|
|
246
|
-
return None
|
|
247
|
-
|
|
248
|
-
async def close(self):
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# -*- coding: UTF-8 -*-
|
|
3
|
+
import asyncio
|
|
4
|
+
import socket
|
|
5
|
+
from typing import Optional, Dict, Any, Callable, Union, TYPE_CHECKING
|
|
6
|
+
from urllib.parse import urlparse
|
|
7
|
+
|
|
8
|
+
from crawlo import Request, Response
|
|
9
|
+
from crawlo.exceptions import NotConfiguredError
|
|
10
|
+
from crawlo.utils.log import get_logger
|
|
11
|
+
|
|
12
|
+
if TYPE_CHECKING:
|
|
13
|
+
import aiohttp
|
|
14
|
+
|
|
15
|
+
try:
|
|
16
|
+
import httpx
|
|
17
|
+
HTTPX_EXCEPTIONS = (httpx.NetworkError, httpx.TimeoutException, httpx.ReadError, httpx.ConnectError)
|
|
18
|
+
except ImportError:
|
|
19
|
+
HTTPX_EXCEPTIONS = ()
|
|
20
|
+
httpx = None
|
|
21
|
+
|
|
22
|
+
try:
|
|
23
|
+
import aiohttp
|
|
24
|
+
AIOHTTP_EXCEPTIONS = (
|
|
25
|
+
aiohttp.ClientError, aiohttp.ClientConnectorError, aiohttp.ClientResponseError, aiohttp.ServerTimeoutError,
|
|
26
|
+
aiohttp.ServerDisconnectedError)
|
|
27
|
+
except ImportError:
|
|
28
|
+
AIOHTTP_EXCEPTIONS = ()
|
|
29
|
+
aiohttp = None
|
|
30
|
+
|
|
31
|
+
try:
|
|
32
|
+
from curl_cffi import requests as cffi_requests
|
|
33
|
+
CURL_CFFI_EXCEPTIONS = (cffi_requests.RequestsError,)
|
|
34
|
+
except (ImportError, AttributeError):
|
|
35
|
+
CURL_CFFI_EXCEPTIONS = ()
|
|
36
|
+
cffi_requests = None
|
|
37
|
+
|
|
38
|
+
NETWORK_EXCEPTIONS = (
|
|
39
|
+
asyncio.TimeoutError,
|
|
40
|
+
socket.gaierror,
|
|
41
|
+
ConnectionError,
|
|
42
|
+
TimeoutError,
|
|
43
|
+
) + HTTPX_EXCEPTIONS + AIOHTTP_EXCEPTIONS + CURL_CFFI_EXCEPTIONS
|
|
44
|
+
|
|
45
|
+
ProxyExtractor = Callable[[Dict[str, Any]], Union[None, str, Dict[str, str]]]
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class ProxyMiddleware:
|
|
49
|
+
def __init__(self, settings, log_level):
|
|
50
|
+
self.logger = get_logger(self.__class__.__name__, log_level)
|
|
51
|
+
|
|
52
|
+
self._session: Optional[Any] = None # aiohttp.ClientSession when aiohttp is available
|
|
53
|
+
self._current_proxy: Optional[Union[str, Dict[str, str]]] = None
|
|
54
|
+
self._last_fetch_time: float = 0
|
|
55
|
+
|
|
56
|
+
self.proxy_extractor = settings.get("PROXY_EXTRACTOR", "proxy")
|
|
57
|
+
self.refresh_interval = settings.get_float("PROXY_REFRESH_INTERVAL", 60)
|
|
58
|
+
self.timeout = settings.get_float("PROXY_API_TIMEOUT", 10)
|
|
59
|
+
|
|
60
|
+
self.enabled = settings.get_bool("PROXY_ENABLED", True)
|
|
61
|
+
|
|
62
|
+
if not self.enabled:
|
|
63
|
+
self.logger.info("ProxyMiddleware 已被禁用 (PROXY_ENABLED=False)")
|
|
64
|
+
return
|
|
65
|
+
|
|
66
|
+
self.api_url = settings.get("PROXY_API_URL")
|
|
67
|
+
if not self.api_url:
|
|
68
|
+
raise NotConfiguredError("PROXY_API_URL 未配置,ProxyMiddleware 已禁用")
|
|
69
|
+
|
|
70
|
+
self.logger.info(f"代理中间件已启用 | API: {self.api_url} | 刷新间隔: {self.refresh_interval}s")
|
|
71
|
+
|
|
72
|
+
@classmethod
|
|
73
|
+
def create_instance(cls, crawler):
|
|
74
|
+
return cls(settings=crawler.settings, log_level=crawler.settings.get("LOG_LEVEL"))
|
|
75
|
+
|
|
76
|
+
def _compile_extractor(self) -> ProxyExtractor:
|
|
77
|
+
if callable(self.proxy_extractor):
|
|
78
|
+
return self.proxy_extractor
|
|
79
|
+
|
|
80
|
+
if isinstance(self.proxy_extractor, str):
|
|
81
|
+
keys = self.proxy_extractor.split(".")
|
|
82
|
+
|
|
83
|
+
def extract(data: Dict[str, Any]) -> Union[None, str, Dict[str, str]]:
|
|
84
|
+
for k in keys:
|
|
85
|
+
if isinstance(data, dict):
|
|
86
|
+
data = data.get(k)
|
|
87
|
+
else:
|
|
88
|
+
return None
|
|
89
|
+
if data is None:
|
|
90
|
+
break
|
|
91
|
+
return data
|
|
92
|
+
|
|
93
|
+
return extract
|
|
94
|
+
|
|
95
|
+
raise ValueError(f"PROXY_EXTRACTOR 必须是 str 或 callable,当前类型: {type(self.proxy_extractor)}")
|
|
96
|
+
|
|
97
|
+
async def _close_session(self):
|
|
98
|
+
if self._session:
|
|
99
|
+
try:
|
|
100
|
+
await self._session.close()
|
|
101
|
+
self.logger.debug("已关闭 aiohttp session.")
|
|
102
|
+
except Exception as e:
|
|
103
|
+
self.logger.warning(f"关闭 aiohttp session 时出错: {e}")
|
|
104
|
+
finally:
|
|
105
|
+
self._session = None
|
|
106
|
+
|
|
107
|
+
async def _get_session(self) -> Any: # returns aiohttp.ClientSession when aiohttp is available
|
|
108
|
+
if aiohttp is None:
|
|
109
|
+
raise RuntimeError("aiohttp 未安装,无法使用 ProxyMiddleware")
|
|
110
|
+
|
|
111
|
+
if self._session is None or self._session.closed:
|
|
112
|
+
if self._session and self._session.closed:
|
|
113
|
+
self.logger.debug("现有 session 已关闭,正在创建新 session...")
|
|
114
|
+
timeout = aiohttp.ClientTimeout(total=self.timeout)
|
|
115
|
+
self._session = aiohttp.ClientSession(timeout=timeout)
|
|
116
|
+
self.logger.debug("已创建新的 aiohttp session.")
|
|
117
|
+
return self._session
|
|
118
|
+
|
|
119
|
+
async def _fetch_raw_data(self) -> Optional[Dict[str, Any]]:
|
|
120
|
+
max_retries = 2
|
|
121
|
+
retry_count = 0
|
|
122
|
+
|
|
123
|
+
while retry_count <= max_retries:
|
|
124
|
+
session = await self._get_session()
|
|
125
|
+
try:
|
|
126
|
+
async with session.get(self.api_url) as resp:
|
|
127
|
+
content_type = resp.content_type.lower()
|
|
128
|
+
if 'application/json' not in content_type:
|
|
129
|
+
self.logger.warning(f"代理 API 返回非 JSON 内容类型: {content_type} (URL: {self.api_url})")
|
|
130
|
+
try:
|
|
131
|
+
text = await resp.text()
|
|
132
|
+
return {"__raw_text__": text.strip(), "__content_type__": content_type}
|
|
133
|
+
except Exception as e:
|
|
134
|
+
self.logger.error(f"读取非 JSON 响应体失败: {repr(e)}")
|
|
135
|
+
return None
|
|
136
|
+
|
|
137
|
+
if resp.status != 200:
|
|
138
|
+
try:
|
|
139
|
+
error_text = await resp.text()
|
|
140
|
+
except:
|
|
141
|
+
error_text = "<无法读取响应体>"
|
|
142
|
+
self.logger.error(f"代理 API 状态码异常: {resp.status}, 响应体: {error_text}")
|
|
143
|
+
if 400 <= resp.status < 500:
|
|
144
|
+
return None
|
|
145
|
+
return None
|
|
146
|
+
|
|
147
|
+
return await resp.json()
|
|
148
|
+
|
|
149
|
+
except NETWORK_EXCEPTIONS as e:
|
|
150
|
+
retry_count += 1
|
|
151
|
+
self.logger.warning(f"请求代理 API 失败 (尝试 {retry_count}/{max_retries + 1}): {repr(e)}")
|
|
152
|
+
if retry_count <= max_retries:
|
|
153
|
+
self.logger.info("正在关闭并重建 session 以进行重试...")
|
|
154
|
+
await self._close_session()
|
|
155
|
+
else:
|
|
156
|
+
self.logger.error(f"请求代理 API 失败,已达到最大重试次数 ({max_retries + 1}): {repr(e)}")
|
|
157
|
+
return None
|
|
158
|
+
|
|
159
|
+
except aiohttp.ContentTypeError as e:
|
|
160
|
+
self.logger.error(f"代理 API 响应内容类型错误: {repr(e)}")
|
|
161
|
+
return None
|
|
162
|
+
|
|
163
|
+
except Exception as e:
|
|
164
|
+
self.logger.critical(f"请求代理 API 时发生未预期错误: {repr(e)}", exc_info=True)
|
|
165
|
+
return None
|
|
166
|
+
|
|
167
|
+
return None
|
|
168
|
+
|
|
169
|
+
async def _extract_proxy(self, data: Dict[str, Any]) -> Optional[Union[str, Dict[str, str]]]:
|
|
170
|
+
extractor = self._compile_extractor()
|
|
171
|
+
try:
|
|
172
|
+
result = extractor(data)
|
|
173
|
+
if isinstance(result, str) and result.strip():
|
|
174
|
+
return result.strip()
|
|
175
|
+
elif isinstance(result, dict):
|
|
176
|
+
cleaned = {k: v.strip() for k, v in result.items() if v and isinstance(v, str)}
|
|
177
|
+
return cleaned if cleaned else None
|
|
178
|
+
return None
|
|
179
|
+
except Exception as e:
|
|
180
|
+
self.logger.error(f"执行 PROXY_EXTRACTOR 时出错: {repr(e)}")
|
|
181
|
+
return None
|
|
182
|
+
|
|
183
|
+
async def _get_proxy_from_api(self) -> Optional[Union[str, Dict[str, str]]]:
|
|
184
|
+
raw_data = await self._fetch_raw_data()
|
|
185
|
+
if not raw_data:
|
|
186
|
+
return None
|
|
187
|
+
|
|
188
|
+
if "__raw_text__" in raw_data:
|
|
189
|
+
text = raw_data["__raw_text__"]
|
|
190
|
+
if text.startswith("http://") or text.startswith("https://"):
|
|
191
|
+
return text
|
|
192
|
+
|
|
193
|
+
return await self._extract_proxy(raw_data)
|
|
194
|
+
|
|
195
|
+
async def _get_cached_proxy(self) -> Optional[str]:
|
|
196
|
+
if not self.enabled:
|
|
197
|
+
self.logger.debug("ProxyMiddleware 已禁用,跳过代理获取。")
|
|
198
|
+
return None
|
|
199
|
+
|
|
200
|
+
now = asyncio.get_event_loop().time()
|
|
201
|
+
if self._current_proxy and (now - self._last_fetch_time) < self.refresh_interval:
|
|
202
|
+
pass
|
|
203
|
+
else:
|
|
204
|
+
proxy = await self._get_proxy_from_api()
|
|
205
|
+
if proxy:
|
|
206
|
+
self._current_proxy = proxy
|
|
207
|
+
self._last_fetch_time = now
|
|
208
|
+
self.logger.debug(f"更新代理缓存: {proxy}")
|
|
209
|
+
else:
|
|
210
|
+
self.logger.warning("无法获取新代理,请求将直连。")
|
|
211
|
+
|
|
212
|
+
return self._current_proxy
|
|
213
|
+
|
|
214
|
+
@staticmethod
|
|
215
|
+
def _is_https(request: Request) -> bool:
|
|
216
|
+
return urlparse(request.url).scheme == "https"
|
|
217
|
+
|
|
218
|
+
async def process_request(self, request: Request, spider) -> Optional[Request]:
|
|
219
|
+
if not self.enabled:
|
|
220
|
+
self.logger.debug(f"ProxyMiddleware 已禁用,请求将直连: {request.url}")
|
|
221
|
+
return None
|
|
222
|
+
|
|
223
|
+
if request.proxy:
|
|
224
|
+
return None
|
|
225
|
+
|
|
226
|
+
proxy = await self._get_cached_proxy()
|
|
227
|
+
if proxy:
|
|
228
|
+
request.proxy = proxy
|
|
229
|
+
self.logger.info(f"分配代理 → {proxy} | {request.url}")
|
|
230
|
+
else:
|
|
231
|
+
self.logger.warning(f"未获取到代理,请求直连: {request.url}")
|
|
232
|
+
|
|
233
|
+
return None
|
|
234
|
+
|
|
235
|
+
def process_response(self, request: Request, response: Response, spider) -> Response:
|
|
236
|
+
proxy = request.proxy
|
|
237
|
+
if proxy:
|
|
238
|
+
status_code = getattr(response, 'status_code', 'N/A')
|
|
239
|
+
self.logger.debug(f"代理成功: {proxy} | {request.url} | Status: {status_code}")
|
|
240
|
+
return response
|
|
241
|
+
|
|
242
|
+
def process_exception(self, request: Request, exception: Exception, spider) -> Optional[Request]:
|
|
243
|
+
proxy = request.proxy
|
|
244
|
+
if proxy:
|
|
245
|
+
self.logger.warning(f"代理请求失败: {proxy} | {request.url} | {repr(exception)}")
|
|
246
|
+
return None
|
|
247
|
+
|
|
248
|
+
async def close(self):
|
|
249
249
|
await self._close_session()
|
|
@@ -1,30 +1,30 @@
|
|
|
1
|
-
#!/usr/bin/python
|
|
2
|
-
# -*- coding:UTF-8 -*-
|
|
3
|
-
from crawlo.utils.log import get_logger
|
|
4
|
-
from crawlo.exceptions import IgnoreRequestError
|
|
5
|
-
from crawlo.event import ignore_request
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
class RequestIgnoreMiddleware(object):
|
|
9
|
-
|
|
10
|
-
def __init__(self, stats, log_level):
|
|
11
|
-
self.logger = get_logger(self.__class__.__name__, log_level)
|
|
12
|
-
self.stats = stats
|
|
13
|
-
|
|
14
|
-
@classmethod
|
|
15
|
-
def create_instance(cls, crawler):
|
|
16
|
-
o = cls(stats=crawler.stats, log_level=crawler.settings.get('LOG_LEVEL'))
|
|
17
|
-
crawler.subscriber.subscribe(o.request_ignore, event=ignore_request)
|
|
18
|
-
return o
|
|
19
|
-
|
|
20
|
-
async def request_ignore(self, exc, request, _spider):
|
|
21
|
-
self.logger.info(f'{request} ignored.')
|
|
22
|
-
self.stats.inc_value('request_ignore_count')
|
|
23
|
-
reason = exc.msg
|
|
24
|
-
if reason:
|
|
25
|
-
self.stats.inc_value(f'request_ignore_count/{reason}')
|
|
26
|
-
|
|
27
|
-
@staticmethod
|
|
28
|
-
def process_exception(_request, exc, _spider):
|
|
29
|
-
if isinstance(exc, IgnoreRequestError):
|
|
30
|
-
return True
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# -*- coding:UTF-8 -*-
|
|
3
|
+
from crawlo.utils.log import get_logger
|
|
4
|
+
from crawlo.exceptions import IgnoreRequestError
|
|
5
|
+
from crawlo.event import ignore_request
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class RequestIgnoreMiddleware(object):
|
|
9
|
+
|
|
10
|
+
def __init__(self, stats, log_level):
|
|
11
|
+
self.logger = get_logger(self.__class__.__name__, log_level)
|
|
12
|
+
self.stats = stats
|
|
13
|
+
|
|
14
|
+
@classmethod
|
|
15
|
+
def create_instance(cls, crawler):
|
|
16
|
+
o = cls(stats=crawler.stats, log_level=crawler.settings.get('LOG_LEVEL'))
|
|
17
|
+
crawler.subscriber.subscribe(o.request_ignore, event=ignore_request)
|
|
18
|
+
return o
|
|
19
|
+
|
|
20
|
+
async def request_ignore(self, exc, request, _spider):
|
|
21
|
+
self.logger.info(f'{request} ignored.')
|
|
22
|
+
self.stats.inc_value('request_ignore_count')
|
|
23
|
+
reason = exc.msg
|
|
24
|
+
if reason:
|
|
25
|
+
self.stats.inc_value(f'request_ignore_count/{reason}')
|
|
26
|
+
|
|
27
|
+
@staticmethod
|
|
28
|
+
def process_exception(_request, exc, _spider):
|
|
29
|
+
if isinstance(exc, IgnoreRequestError):
|
|
30
|
+
return True
|
|
@@ -1,19 +1,19 @@
|
|
|
1
|
-
#!/usr/bin/python
|
|
2
|
-
# -*- coding:UTF-8 -*-
|
|
3
|
-
from crawlo.utils.log import get_logger
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
class ResponseCodeMiddleware(object):
|
|
7
|
-
def __init__(self, stats, log_level):
|
|
8
|
-
self.logger = get_logger(self.__class__.__name__, log_level)
|
|
9
|
-
self.stats = stats
|
|
10
|
-
|
|
11
|
-
@classmethod
|
|
12
|
-
def create_instance(cls, crawler):
|
|
13
|
-
o = cls(stats=crawler.stats, log_level=crawler.settings.get('LOG_LEVEL'))
|
|
14
|
-
return o
|
|
15
|
-
|
|
16
|
-
def process_response(self, request, response, spider):
|
|
17
|
-
self.stats.inc_value(f'stats_code/count/{response.status_code}')
|
|
18
|
-
self.logger.debug(f'Got response from <{response.status_code} {response.url}>')
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# -*- coding:UTF-8 -*-
|
|
3
|
+
from crawlo.utils.log import get_logger
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class ResponseCodeMiddleware(object):
|
|
7
|
+
def __init__(self, stats, log_level):
|
|
8
|
+
self.logger = get_logger(self.__class__.__name__, log_level)
|
|
9
|
+
self.stats = stats
|
|
10
|
+
|
|
11
|
+
@classmethod
|
|
12
|
+
def create_instance(cls, crawler):
|
|
13
|
+
o = cls(stats=crawler.stats, log_level=crawler.settings.get('LOG_LEVEL'))
|
|
14
|
+
return o
|
|
15
|
+
|
|
16
|
+
def process_response(self, request, response, spider):
|
|
17
|
+
self.stats.inc_value(f'stats_code/count/{response.status_code}')
|
|
18
|
+
self.logger.debug(f'Got response from <{response.status_code} {response.url}>')
|
|
19
19
|
return response
|
|
@@ -1,26 +1,26 @@
|
|
|
1
|
-
#!/usr/bin/python
|
|
2
|
-
# -*- coding:UTF-8 -*-
|
|
3
|
-
from crawlo.utils.log import get_logger
|
|
4
|
-
from crawlo.exceptions import IgnoreRequestError
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
class ResponseFilterMiddleware:
|
|
8
|
-
|
|
9
|
-
def __init__(self, allowed_codes, log_level):
|
|
10
|
-
self.allowed_codes = allowed_codes
|
|
11
|
-
self.logger = get_logger(self.__class__.__name__, log_level)
|
|
12
|
-
|
|
13
|
-
@classmethod
|
|
14
|
-
def create_instance(cls, crawler):
|
|
15
|
-
o = cls(
|
|
16
|
-
allowed_codes=crawler.settings.get_list('ALLOWED_CODES'),
|
|
17
|
-
log_level=crawler.settings.get('LOG_LEVEL')
|
|
18
|
-
)
|
|
19
|
-
return o
|
|
20
|
-
|
|
21
|
-
def process_response(self, request, response, spider):
|
|
22
|
-
if 200 <= response.status_code < 300:
|
|
23
|
-
return response
|
|
24
|
-
if response.status_code in self.allowed_codes:
|
|
25
|
-
return response
|
|
26
|
-
raise IgnoreRequestError(f"response status_code/non-200")
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# -*- coding:UTF-8 -*-
|
|
3
|
+
from crawlo.utils.log import get_logger
|
|
4
|
+
from crawlo.exceptions import IgnoreRequestError
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class ResponseFilterMiddleware:
|
|
8
|
+
|
|
9
|
+
def __init__(self, allowed_codes, log_level):
|
|
10
|
+
self.allowed_codes = allowed_codes
|
|
11
|
+
self.logger = get_logger(self.__class__.__name__, log_level)
|
|
12
|
+
|
|
13
|
+
@classmethod
|
|
14
|
+
def create_instance(cls, crawler):
|
|
15
|
+
o = cls(
|
|
16
|
+
allowed_codes=crawler.settings.get_list('ALLOWED_CODES'),
|
|
17
|
+
log_level=crawler.settings.get('LOG_LEVEL')
|
|
18
|
+
)
|
|
19
|
+
return o
|
|
20
|
+
|
|
21
|
+
def process_response(self, request, response, spider):
|
|
22
|
+
if 200 <= response.status_code < 300:
|
|
23
|
+
return response
|
|
24
|
+
if response.status_code in self.allowed_codes:
|
|
25
|
+
return response
|
|
26
|
+
raise IgnoreRequestError(f"response status_code/non-200")
|