crawlo 1.4.5__py3-none-any.whl → 1.4.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (44) hide show
  1. crawlo/__version__.py +1 -1
  2. crawlo/downloader/cffi_downloader.py +3 -1
  3. crawlo/middleware/proxy.py +171 -348
  4. crawlo/pipelines/mysql_pipeline.py +339 -188
  5. crawlo/settings/default_settings.py +38 -30
  6. crawlo/stats_collector.py +10 -1
  7. crawlo/templates/project/settings.py.tmpl +10 -55
  8. crawlo/templates/project/settings_distributed.py.tmpl +20 -22
  9. crawlo/templates/project/settings_gentle.py.tmpl +5 -0
  10. crawlo/templates/project/settings_high_performance.py.tmpl +5 -0
  11. crawlo/templates/project/settings_minimal.py.tmpl +25 -1
  12. crawlo/templates/project/settings_simple.py.tmpl +5 -0
  13. crawlo/templates/run.py.tmpl +1 -8
  14. crawlo/templates/spider/spider.py.tmpl +5 -108
  15. crawlo/utils/db_helper.py +11 -5
  16. {crawlo-1.4.5.dist-info → crawlo-1.4.6.dist-info}/METADATA +1 -1
  17. {crawlo-1.4.5.dist-info → crawlo-1.4.6.dist-info}/RECORD +43 -29
  18. tests/authenticated_proxy_example.py +10 -6
  19. tests/explain_mysql_update_behavior.py +77 -0
  20. tests/simulate_mysql_update_test.py +140 -0
  21. tests/test_asyncmy_usage.py +57 -0
  22. tests/test_crawlo_proxy_integration.py +8 -2
  23. tests/test_downloader_proxy_compatibility.py +24 -20
  24. tests/test_mysql_pipeline_config.py +165 -0
  25. tests/test_mysql_pipeline_error.py +99 -0
  26. tests/test_mysql_pipeline_init_log.py +83 -0
  27. tests/test_mysql_pipeline_integration.py +133 -0
  28. tests/test_mysql_pipeline_refactor.py +144 -0
  29. tests/test_mysql_pipeline_refactor_simple.py +86 -0
  30. tests/test_mysql_pipeline_robustness.py +196 -0
  31. tests/test_mysql_pipeline_types.py +89 -0
  32. tests/test_mysql_update_columns.py +94 -0
  33. tests/test_proxy_middleware.py +104 -8
  34. tests/test_proxy_middleware_enhanced.py +1 -5
  35. tests/test_proxy_middleware_integration.py +7 -2
  36. tests/test_proxy_middleware_refactored.py +25 -2
  37. tests/test_proxy_only.py +84 -0
  38. tests/test_proxy_with_downloader.py +153 -0
  39. tests/test_real_scenario_proxy.py +17 -17
  40. tests/verify_mysql_warnings.py +110 -0
  41. crawlo/middleware/simple_proxy.py +0 -65
  42. {crawlo-1.4.5.dist-info → crawlo-1.4.6.dist-info}/WHEEL +0 -0
  43. {crawlo-1.4.5.dist-info → crawlo-1.4.6.dist-info}/entry_points.txt +0 -0
  44. {crawlo-1.4.5.dist-info → crawlo-1.4.6.dist-info}/top_level.txt +0 -0
crawlo/__version__.py CHANGED
@@ -1 +1 @@
1
- __version__ = '1.4.5'
1
+ __version__ = '1.4.6'
@@ -22,7 +22,9 @@ class CurlCffiDownloader(DownloaderBase):
22
22
  """
23
23
 
24
24
  def __init__(self, crawler):
25
- self.crawler = crawler
25
+ # 调用父类初始化方法,确保 _closed 等属性被正确初始化
26
+ super().__init__(crawler)
27
+
26
28
  self.logger = get_logger(self.__class__.__name__, crawler.settings.get('LOG_LEVEL'))
27
29
  self._active_requests = set()
28
30
 
@@ -1,386 +1,209 @@
1
1
  #!/usr/bin/python
2
2
  # -*- coding: UTF-8 -*-
3
- import time
4
- import asyncio
5
- import socket
3
+ """
4
+ 通用代理中间件
5
+ 支持静态代理列表和动态代理API两种模式
6
+ """
7
+ import random
6
8
  from urllib.parse import urlparse
7
- from typing import Optional, Dict, Any, Callable, Union, TYPE_CHECKING, List
9
+ from typing import Optional, List
8
10
 
9
- from crawlo import Request, Response
10
- from crawlo.exceptions import NotConfiguredError
11
+ from crawlo.network import Request, Response
11
12
  from crawlo.utils.log import get_logger
12
13
 
13
- if TYPE_CHECKING:
14
- import aiohttp
15
-
16
- try:
17
- import httpx
18
-
19
- HTTPX_EXCEPTIONS = (httpx.NetworkError, httpx.TimeoutException, httpx.ReadError, httpx.ConnectError)
20
- except ImportError:
21
- HTTPX_EXCEPTIONS = ()
22
- httpx = None
23
-
24
- try:
25
- import aiohttp
26
-
27
- AIOHTTP_EXCEPTIONS = (
28
- aiohttp.ClientError, aiohttp.ClientConnectorError, aiohttp.ClientResponseError, aiohttp.ServerTimeoutError,
29
- aiohttp.ServerDisconnectedError)
30
- except ImportError:
31
- AIOHTTP_EXCEPTIONS = ()
32
- aiohttp = None
33
-
34
- try:
35
- from curl_cffi import requests as cffi_requests
36
-
37
- CURL_CFFI_EXCEPTIONS = (cffi_requests.RequestsError,)
38
- except (ImportError, AttributeError):
39
- CURL_CFFI_EXCEPTIONS = ()
40
- cffi_requests = None
41
-
42
- NETWORK_EXCEPTIONS = (
43
- asyncio.TimeoutError,
44
- socket.gaierror,
45
- ConnectionError,
46
- TimeoutError,
47
- ) + HTTPX_EXCEPTIONS + AIOHTTP_EXCEPTIONS + CURL_CFFI_EXCEPTIONS
48
-
49
- ProxyExtractor = Callable[[Dict[str, Any]], Union[None, str, Dict[str, str]]]
50
-
51
-
52
- class Proxy:
53
- """代理对象,包含代理信息和统计数据"""
54
-
55
- def __init__(self, proxy_str: str):
56
- self.proxy_str = proxy_str
57
- self.success_count = 0
58
- self.failure_count = 0
59
- self.last_used_time = 0.0
60
- self.is_healthy = True
61
-
62
- @property
63
- def success_rate(self) -> float:
64
- """计算代理成功率"""
65
- total = self.success_count + self.failure_count
66
- if total == 0:
67
- return 1.0
68
- return self.success_count / total
69
-
70
- def mark_success(self):
71
- """标记代理使用成功"""
72
- self.success_count += 1
73
- self.last_used_time = time.time()
74
- self.is_healthy = True
75
-
76
- def mark_failure(self):
77
- """标记代理使用失败"""
78
- self.failure_count += 1
79
- self.last_used_time = time.time()
80
- # 如果失败率过高,标记为不健康
81
- if self.failure_count > 3 and self.success_rate < 0.5:
82
- self.is_healthy = False
83
-
84
14
 
85
15
  class ProxyMiddleware:
16
+ """通用代理中间件"""
17
+
86
18
  def __init__(self, settings, log_level):
87
19
  self.logger = get_logger(self.__class__.__name__, log_level)
88
20
 
89
- self._session: Optional[Any] = None # aiohttp.ClientSession when aiohttp is available
90
- # 将单个代理改为代理池
91
- self._proxy_pool: List[Proxy] = []
92
- self._current_proxy_index: int = 0
93
- self._last_fetch_time: float = 0
94
-
95
- self.proxy_extractor = settings.get("PROXY_EXTRACTOR", "proxy")
96
- self.refresh_interval = settings.get_float("PROXY_REFRESH_INTERVAL", 60)
97
- self.timeout = settings.get_float("PROXY_API_TIMEOUT", 10)
98
- # 新增配置:代理池大小
99
- self.proxy_pool_size = settings.get_int("PROXY_POOL_SIZE", 5)
100
- # 新增配置:健康检查阈值
101
- self.health_check_threshold = settings.get_float("PROXY_HEALTH_CHECK_THRESHOLD", 0.5)
102
-
103
- self.enabled = settings.get_bool("PROXY_ENABLED", True)
104
-
105
- if not self.enabled:
106
- self.logger.info("ProxyMiddleware disabled")
107
- return
108
-
21
+ # 获取代理列表和API URL
22
+ self.proxies: List[str] = settings.get("PROXY_LIST", [])
109
23
  self.api_url = settings.get("PROXY_API_URL")
110
- if not self.api_url:
111
- raise NotConfiguredError("PROXY_API_URL not configured, ProxyMiddleware disabled")
112
-
113
- self.logger.info(
114
- f"Proxy middleware enabled | API: {self.api_url} | Refresh interval: {self.refresh_interval}s | Proxy pool size: {self.proxy_pool_size}")
24
+ # 获取代理提取配置
25
+ self.proxy_extractor = settings.get("PROXY_EXTRACTOR", "proxy") # 默认从"proxy"字段提取
26
+
27
+ # 记录失败的代理,避免重复使用
28
+ self.failed_proxies = set()
29
+ # 最大失败次数,超过这个次数的代理将被标记为失效
30
+ self.max_failed_attempts = settings.get("PROXY_MAX_FAILED_ATTEMPTS", 3)
31
+ # 失效代理记录
32
+ self.proxy_failure_count = {}
33
+
34
+ # 根据配置决定启用哪种模式
35
+ if self.proxies:
36
+ self.mode = "static" # 静态代理模式
37
+ self.enabled = True
38
+ self.logger.info(f"ProxyMiddleware enabled (static mode) with {len(self.proxies)} proxies")
39
+ elif self.api_url:
40
+ self.mode = "dynamic" # 动态代理模式
41
+ self.enabled = True
42
+ self.logger.info(f"ProxyMiddleware enabled (dynamic mode) | API: {self.api_url}")
43
+ else:
44
+ self.mode = None
45
+ self.enabled = False
46
+ self.logger.info("ProxyMiddleware disabled (no proxy configuration)")
115
47
 
116
48
  @classmethod
117
49
  def create_instance(cls, crawler):
118
50
  return cls(settings=crawler.settings, log_level=crawler.settings.get("LOG_LEVEL"))
119
51
 
120
- def _compile_extractor(self) -> ProxyExtractor:
121
- if callable(self.proxy_extractor):
122
- return self.proxy_extractor
123
-
124
- if isinstance(self.proxy_extractor, str):
125
- keys = self.proxy_extractor.split(".")
126
-
127
- def extract(data: Dict[str, Any]) -> Union[None, str, Dict[str, str]]:
128
- for k in keys:
129
- if isinstance(data, dict):
130
- data = data.get(k)
131
- else:
132
- return None
133
- if data is None:
134
- break
135
- return data
136
-
137
- return extract
138
-
139
- raise ValueError(f"PROXY_EXTRACTOR 必须是 str 或 callable,当前类型: {type(self.proxy_extractor)}")
140
-
141
- async def _close_session(self):
142
- if self._session:
143
- try:
144
- await self._session.close()
145
- self.logger.debug("aiohttp session closed.")
146
- except Exception as e:
147
- self.logger.warning(f"Error closing aiohttp session: {e}")
148
- finally:
149
- self._session = None
150
-
151
- async def _get_session(self) -> Any: # returns aiohttp.ClientSession when aiohttp is available
152
- if aiohttp is None:
153
- raise RuntimeError("aiohttp not installed, cannot use ProxyMiddleware")
154
-
155
- if self._session is None or self._session.closed:
156
- if self._session and self._session.closed:
157
- self.logger.debug("Existing session closed, creating new session...")
158
- timeout = aiohttp.ClientTimeout(total=self.timeout)
159
- self._session = aiohttp.ClientSession(timeout=timeout)
160
- self.logger.debug("New aiohttp session created.")
161
- return self._session
162
-
163
- async def _fetch_raw_data(self) -> Optional[Dict[str, Any]]:
164
- max_retries = 2
165
- retry_count = 0
166
-
167
- while retry_count <= max_retries:
168
- session = await self._get_session()
169
- try:
170
- async with session.get(self.api_url) as resp:
171
- content_type = resp.content_type.lower()
172
- if 'application/json' not in content_type:
173
- self.logger.warning(
174
- f"Proxy API returned non-JSON content type: {content_type} (URL: {self.api_url})")
175
- try:
176
- text = await resp.text()
177
- return {"__raw_text__": text.strip(), "__content_type__": content_type}
178
- except Exception as e:
179
- self.logger.error(f"Failed to read non-JSON response body: {repr(e)}")
180
- return None
181
-
182
- if resp.status != 200:
183
- try:
184
- error_text = await resp.text()
185
- except:
186
- error_text = "<Unable to read response body>"
187
- self.logger.error(f"Proxy API status code error: {resp.status}, Response body: {error_text}")
188
- if 400 <= resp.status < 500:
189
- return None
190
- return None
191
-
192
- return await resp.json()
193
-
194
- except NETWORK_EXCEPTIONS as e:
195
- retry_count += 1
196
- self.logger.warning(f"Failed to request proxy API (attempt {retry_count}/{max_retries + 1}): {repr(e)}")
197
- if retry_count <= max_retries:
198
- self.logger.info("Closing and rebuilding session for retry...")
199
- await self._close_session()
200
- else:
201
- self.logger.error(
202
- f"Failed to request proxy API, maximum retry attempts reached ({max_retries + 1}): {repr(e)}")
203
- return None
204
-
205
- except aiohttp.ContentTypeError as e:
206
- self.logger.error(f"Proxy API response content type error: {repr(e)}")
207
- return None
208
-
209
- except Exception as e:
210
- self.logger.critical(f"Unexpected error occurred while requesting proxy API: {repr(e)}", exc_info=True)
211
- return None
212
-
213
- return None
214
-
215
- async def _extract_proxy(self, data: Dict[str, Any]) -> Optional[Union[str, Dict[str, str]]]:
216
- extractor = self._compile_extractor()
52
+ async def _fetch_proxy_from_api(self) -> Optional[str]:
53
+ """从代理API获取代理"""
217
54
  try:
218
- result = extractor(data)
219
- if isinstance(result, str) and result.strip():
220
- return result.strip()
221
- elif isinstance(result, dict):
222
- cleaned = {k: v.strip() if isinstance(v, str) else v for k, v in result.items()}
223
- return cleaned if cleaned else None
224
- return None
55
+ import aiohttp
56
+ async with aiohttp.ClientSession() as session:
57
+ async with session.get(self.api_url) as resp:
58
+ if resp.status == 200:
59
+ data = await resp.json()
60
+ # 支持多种代理提取方式
61
+ proxy = self._extract_proxy_from_data(data)
62
+ if proxy and isinstance(proxy, str) and (proxy.startswith("http://") or proxy.startswith("https://")):
63
+ return proxy
64
+ else:
65
+ self.logger.warning(f"Proxy API returned status {resp.status}")
225
66
  except Exception as e:
226
- self.logger.error(f"Error executing PROXY_EXTRACTOR: {repr(e)}")
227
- return None
228
-
229
- async def _get_proxy_from_api(self) -> Optional[Union[str, Dict[str, str]]]:
230
- raw_data = await self._fetch_raw_data()
231
- if not raw_data:
232
- return None
233
-
234
- if "__raw_text__" in raw_data:
235
- text = raw_data["__raw_text__"]
236
- if text.startswith("http://") or text.startswith("https://"):
237
- return text
238
-
239
- return await self._extract_proxy(raw_data)
240
-
241
- def _parse_proxy_data(self, proxy_data: Union[str, Dict[str, Any]]) -> List[str]:
242
- """解析代理数据,提取代理URL列表"""
243
- new_proxies = []
244
- if isinstance(proxy_data, str):
245
- # 单个代理
246
- if proxy_data.startswith("http://") or proxy_data.startswith("https://"):
247
- new_proxies = [proxy_data]
248
- elif isinstance(proxy_data, dict):
249
- # 如果是字典,尝试提取代理列表
250
- for key, value in proxy_data.items():
251
- if isinstance(value, str) and (value.startswith("http://") or value.startswith("https://")):
252
- new_proxies.append(value)
253
- elif isinstance(value, list):
254
- # 如果值是列表,添加所有有效的代理
255
- for item in value:
256
- if isinstance(item, str) and (item.startswith("http://") or item.startswith("https://")):
257
- new_proxies.append(item)
258
- return new_proxies
259
-
260
- def _get_healthy_proxies(self) -> List[Proxy]:
261
- """获取所有健康的代理"""
262
- return [p for p in self._proxy_pool if p.is_healthy and p.success_rate >= self.health_check_threshold]
263
-
264
- async def _update_proxy_pool(self):
265
- """更新代理池"""
266
- if not self.enabled:
267
- self.logger.debug("ProxyMiddleware disabled, skipping proxy fetch.")
268
- return
269
-
270
- now = asyncio.get_event_loop().time()
271
- if (now - self._last_fetch_time) < self.refresh_interval:
272
- return
273
-
274
- # 获取新的代理列表
275
- proxy_data = await self._get_proxy_from_api()
276
- if not proxy_data:
277
- self.logger.warning("Failed to get new proxies, proxy pool will remain unchanged.")
278
- return
279
-
280
- # 解析代理数据
281
- new_proxies = self._parse_proxy_data(proxy_data)
282
-
283
- # 创建新的代理池
284
- if new_proxies:
285
- self._proxy_pool = [Proxy(proxy_str) for proxy_str in new_proxies[:self.proxy_pool_size]]
286
- self._current_proxy_index = 0
287
- self._last_fetch_time = now
288
- self.logger.info(f"Updated proxy pool, added {len(self._proxy_pool)} proxies")
289
- else:
290
- self.logger.warning("No valid proxies parsed, proxy pool will remain unchanged.")
291
-
292
- async def _get_healthy_proxy(self) -> Optional[Proxy]:
293
- """从代理池中获取一个健康的代理"""
294
- if not self._proxy_pool:
295
- await self._update_proxy_pool()
296
-
297
- if not self._proxy_pool:
298
- return None
299
-
300
- # 查找健康的代理
301
- healthy_proxies = self._get_healthy_proxies()
302
-
303
- if not healthy_proxies:
304
- # 如果没有健康的代理,尝试更新代理池
305
- await self._update_proxy_pool()
306
- healthy_proxies = self._get_healthy_proxies()
307
-
308
- if not healthy_proxies:
309
- return None
310
-
311
- # 使用轮询方式选择代理
312
- self._current_proxy_index = (self._current_proxy_index + 1) % len(healthy_proxies)
313
- selected_proxy = healthy_proxies[self._current_proxy_index]
314
- return selected_proxy
67
+ self.logger.warning(f"Failed to fetch proxy from API: {repr(e)}")
68
+ return None
315
69
 
316
- @staticmethod
317
- def _is_https(request: Request) -> bool:
318
- return urlparse(request.url).scheme == "https"
70
+ def _extract_proxy_from_data(self, data) -> Optional[str]:
71
+ """
72
+ 从API返回的数据中提取代理
73
+
74
+ 支持多种提取方式:
75
+ 1. 字符串: 直接作为字段名使用
76
+ 2. 字典: 包含type和value字段,type支持"field"和"jsonpath"
77
+ 3. 函数: 用户自定义提取函数
78
+ """
79
+ if isinstance(self.proxy_extractor, str):
80
+ # 简单字段名提取(向后兼容)
81
+ if self.proxy_extractor in data:
82
+ proxy_value = data[self.proxy_extractor]
83
+ # 如果返回的是字典,尝试提取http或https字段
84
+ if isinstance(proxy_value, dict):
85
+ if "http" in proxy_value:
86
+ return proxy_value["http"]
87
+ elif "https" in proxy_value:
88
+ return proxy_value["https"]
89
+ return proxy_value
90
+ elif isinstance(self.proxy_extractor, dict):
91
+ # 复杂提取规则
92
+ extractor_type = self.proxy_extractor.get("type", "field")
93
+ extractor_value = self.proxy_extractor.get("value", "proxy")
94
+
95
+ if extractor_type == "field":
96
+ # 字段提取
97
+ if extractor_value in data:
98
+ proxy_value = data[extractor_value]
99
+ # 如果返回的是字典,尝试提取http或https字段
100
+ if isinstance(proxy_value, dict):
101
+ if "http" in proxy_value:
102
+ return proxy_value["http"]
103
+ elif "https" in proxy_value:
104
+ return proxy_value["https"]
105
+ return proxy_value
106
+ elif extractor_type == "jsonpath":
107
+ # JSON路径提取(需要安装jsonpath库)
108
+ try:
109
+ import jsonpath
110
+ matches = jsonpath.jsonpath(data, extractor_value)
111
+ if matches:
112
+ return matches[0]
113
+ except ImportError:
114
+ self.logger.warning("jsonpath library not installed, falling back to default extraction")
115
+ if "proxy" in data:
116
+ proxy_value = data["proxy"]
117
+ if isinstance(proxy_value, dict):
118
+ if "http" in proxy_value:
119
+ return proxy_value["http"]
120
+ elif "https" in proxy_value:
121
+ return proxy_value["https"]
122
+ return proxy_value
123
+ elif extractor_type == "custom":
124
+ # 自定义提取函数(需要用户提供)
125
+ custom_func = self.proxy_extractor.get("function")
126
+ if callable(custom_func):
127
+ return custom_func(data)
128
+ elif callable(self.proxy_extractor):
129
+ # 直接调用用户提供的函数
130
+ return self.proxy_extractor(data)
131
+
132
+ # 默认提取方式(向后兼容)
133
+ if "proxy" in data:
134
+ proxy_value = data["proxy"]
135
+ # 如果返回的是字典,尝试提取http或https字段
136
+ if isinstance(proxy_value, dict):
137
+ if "http" in proxy_value:
138
+ return proxy_value["http"]
139
+ elif "https" in proxy_value:
140
+ return proxy_value["https"]
141
+ return proxy_value
142
+
143
+ return None
319
144
 
320
145
  async def process_request(self, request: Request, spider) -> Optional[Request]:
146
+ """为请求分配代理"""
321
147
  if not self.enabled:
322
- self.logger.debug(f"ProxyMiddleware disabled, request will connect directly: {request.url}")
323
148
  return None
324
149
 
325
150
  if request.proxy:
151
+ # 请求已指定代理,不覆盖
326
152
  return None
327
153
 
328
- proxy_obj = await self._get_healthy_proxy()
329
- if proxy_obj:
330
- proxy = proxy_obj.proxy_str
331
- # 处理带认证的代理URL
332
- if isinstance(proxy, str) and "@" in proxy and "://" in proxy:
333
- # 解析带认证的代理URL
334
- parsed = urlparse(proxy)
335
- if parsed.username and parsed.password:
336
- # 对于AioHttp下载器,需要特殊处理认证信息
337
- downloader_type = spider.crawler.settings.get("DOWNLOADER_TYPE", "aiohttp")
338
- if downloader_type == "aiohttp":
339
- # 将认证信息存储在meta中,由下载器处理
340
- request.meta["proxy_auth"] = {
341
- "username": parsed.username,
342
- "password": parsed.password
343
- }
344
- # 清理URL中的认证信息
345
- clean_proxy = f"{parsed.scheme}://{parsed.hostname}"
346
- if parsed.port:
347
- clean_proxy += f":{parsed.port}"
348
- request.proxy = clean_proxy
349
- else:
350
- # 其他下载器可以直接使用带认证的URL
351
- request.proxy = proxy
352
- else:
353
- request.proxy = proxy
154
+ proxy = None
155
+ if self.mode == "static" and self.proxies:
156
+ # 静态代理模式:随机选择一个代理,排除已知失败的代理
157
+ available_proxies = [p for p in self.proxies if p not in self.failed_proxies]
158
+ if available_proxies:
159
+ proxy = random.choice(available_proxies)
354
160
  else:
355
- request.proxy = proxy
356
-
357
- # 记录使用的代理
358
- request.meta["_used_proxy"] = proxy_obj
359
- self.logger.info(f"Assigned proxy → {proxy} | {request.url}")
161
+ self.logger.warning("所有静态代理都已失效,将使用直连")
162
+ elif self.mode == "dynamic" and self.api_url:
163
+ # 动态代理模式:从API获取代理
164
+ proxy = await self._fetch_proxy_from_api()
165
+
166
+ if proxy:
167
+ # 检查代理是否在失败列表中
168
+ if proxy in self.failed_proxies:
169
+ self.logger.warning(f"尝试使用已知失败的代理: {proxy},但仍会尝试")
170
+
171
+ request.proxy = proxy
172
+ self.logger.debug(f"Assigned proxy {proxy} to {request.url}")
360
173
  else:
361
- self.logger.warning(f"No proxy obtained, request connecting directly: {request.url}")
174
+ self.logger.warning(f"No proxy available, request connecting directly: {request.url}")
362
175
 
363
176
  return None
364
177
 
365
178
  def process_response(self, request: Request, response: Response, spider) -> Response:
366
- proxy_obj = request.meta.get("_used_proxy")
367
- if proxy_obj and isinstance(proxy_obj, Proxy):
368
- proxy_obj.mark_success()
369
- status_code = getattr(response, 'status_code', 'N/A')
370
- self.logger.debug(f"Proxy success: {proxy_obj.proxy_str} | {request.url} | Status: {status_code}")
371
- elif request.proxy:
372
- status_code = getattr(response, 'status_code', 'N/A')
373
- self.logger.debug(f"Proxy success: {request.proxy} | {request.url} | Status: {status_code}")
179
+ """处理响应"""
180
+ if request.proxy:
181
+ self.logger.debug(f"Proxy request successful: {request.proxy} | {request.url}")
182
+ # 代理请求成功,从失败列表中移除(如果存在)
183
+ self.failed_proxies.discard(request.proxy)
184
+ # 重置失败计数
185
+ if request.proxy in self.proxy_failure_count:
186
+ del self.proxy_failure_count[request.proxy]
374
187
  return response
375
188
 
376
189
  def process_exception(self, request: Request, exception: Exception, spider) -> Optional[Request]:
377
- proxy_obj = request.meta.get("_used_proxy")
378
- if proxy_obj and isinstance(proxy_obj, Proxy):
379
- proxy_obj.mark_failure()
380
- self.logger.warning(f"Proxy request failed: {proxy_obj.proxy_str} | {request.url} | {repr(exception)}")
381
- elif request.proxy:
382
- self.logger.warning(f"Proxy request failed: {request.proxy} | {request.url} | {repr(exception)}")
190
+ """处理异常"""
191
+ if request.proxy:
192
+ error_msg = f"Proxy request failed: {request.proxy} | {request.url} | {repr(exception)}"
193
+ self.logger.warning(error_msg)
194
+
195
+ # 记录代理失败次数
196
+ if request.proxy not in self.proxy_failure_count:
197
+ self.proxy_failure_count[request.proxy] = 0
198
+ self.proxy_failure_count[request.proxy] += 1
199
+
200
+ # 如果失败次数超过阈值,将代理标记为失效
201
+ if self.proxy_failure_count[request.proxy] >= self.max_failed_attempts:
202
+ self.failed_proxies.add(request.proxy)
203
+ self.logger.warning(f"代理 {request.proxy} 已失败 {self.max_failed_attempts} 次,标记为失效")
204
+
205
+ # 从代理列表中移除(仅适用于静态代理模式)
206
+ if self.mode == "static" and request.proxy in self.proxies:
207
+ self.proxies.remove(request.proxy)
208
+ self.logger.info(f"已从静态代理列表中移除失效代理: {request.proxy}")
383
209
  return None
384
-
385
- async def close(self):
386
- await self._close_session()