crawlo 1.0.4__py3-none-any.whl → 1.0.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crawlo might be problematic. Click here for more details.
- crawlo/__init__.py +25 -9
- crawlo/__version__.py +1 -1
- crawlo/core/__init__.py +2 -2
- crawlo/core/engine.py +158 -158
- crawlo/core/processor.py +40 -40
- crawlo/core/scheduler.py +57 -57
- crawlo/crawler.py +424 -242
- crawlo/downloader/__init__.py +78 -78
- crawlo/downloader/aiohttp_downloader.py +200 -259
- crawlo/downloader/cffi_downloader.py +277 -0
- crawlo/downloader/httpx_downloader.py +246 -187
- crawlo/event.py +11 -11
- crawlo/exceptions.py +73 -64
- crawlo/extension/__init__.py +31 -31
- crawlo/extension/log_interval.py +49 -49
- crawlo/extension/log_stats.py +44 -44
- crawlo/extension/logging_extension.py +35 -0
- crawlo/filters/__init__.py +37 -37
- crawlo/filters/aioredis_filter.py +150 -150
- crawlo/filters/memory_filter.py +202 -202
- crawlo/items/__init__.py +62 -62
- crawlo/items/items.py +115 -119
- crawlo/middleware/__init__.py +21 -21
- crawlo/middleware/default_header.py +32 -32
- crawlo/middleware/download_delay.py +28 -28
- crawlo/middleware/middleware_manager.py +135 -140
- crawlo/middleware/proxy.py +246 -0
- crawlo/middleware/request_ignore.py +30 -30
- crawlo/middleware/response_code.py +18 -18
- crawlo/middleware/response_filter.py +26 -26
- crawlo/middleware/retry.py +90 -90
- crawlo/network/__init__.py +7 -7
- crawlo/network/request.py +203 -204
- crawlo/network/response.py +166 -166
- crawlo/pipelines/__init__.py +13 -13
- crawlo/pipelines/console_pipeline.py +39 -39
- crawlo/pipelines/mongo_pipeline.py +116 -116
- crawlo/pipelines/mysql_batch_pipline.py +273 -134
- crawlo/pipelines/mysql_pipeline.py +195 -195
- crawlo/pipelines/pipeline_manager.py +56 -56
- crawlo/settings/__init__.py +7 -7
- crawlo/settings/default_settings.py +169 -94
- crawlo/settings/setting_manager.py +99 -99
- crawlo/spider/__init__.py +41 -36
- crawlo/stats_collector.py +59 -59
- crawlo/subscriber.py +106 -106
- crawlo/task_manager.py +27 -27
- crawlo/templates/item_template.tmpl +21 -21
- crawlo/templates/project_template/main.py +32 -32
- crawlo/templates/project_template/setting.py +189 -189
- crawlo/templates/spider_template.tmpl +30 -30
- crawlo/utils/__init__.py +7 -7
- crawlo/utils/concurrency_manager.py +124 -124
- crawlo/utils/date_tools.py +233 -177
- crawlo/utils/db_helper.py +344 -0
- crawlo/utils/func_tools.py +82 -82
- crawlo/utils/log.py +129 -39
- crawlo/utils/pqueue.py +173 -173
- crawlo/utils/project.py +59 -59
- crawlo/utils/request.py +267 -122
- crawlo/utils/system.py +11 -11
- crawlo/utils/tools.py +5 -303
- crawlo/utils/url.py +39 -39
- {crawlo-1.0.4.dist-info → crawlo-1.0.5.dist-info}/METADATA +49 -48
- crawlo-1.0.5.dist-info/RECORD +84 -0
- {crawlo-1.0.4.dist-info → crawlo-1.0.5.dist-info}/top_level.txt +1 -0
- examples/__init__.py +0 -0
- examples/gxb/__init__.py +0 -0
- examples/gxb/items.py +36 -0
- examples/gxb/run.py +15 -0
- examples/gxb/settings.py +71 -0
- examples/gxb/spider/__init__.py +0 -0
- examples/gxb/spider/miit_spider.py +180 -0
- examples/gxb/spider/telecom_device_licenses.py +129 -0
- tests/__init__.py +7 -7
- tests/test_proxy_health_check.py +33 -0
- tests/test_proxy_middleware_integration.py +137 -0
- tests/test_proxy_providers.py +57 -0
- tests/test_proxy_stats.py +20 -0
- tests/test_proxy_strategies.py +60 -0
- crawlo/downloader/playwright_downloader.py +0 -161
- crawlo-1.0.4.dist-info/RECORD +0 -79
- tests/baidu_spider/__init__.py +0 -7
- tests/baidu_spider/demo.py +0 -94
- tests/baidu_spider/items.py +0 -25
- tests/baidu_spider/middleware.py +0 -49
- tests/baidu_spider/pipeline.py +0 -55
- tests/baidu_spider/request_fingerprints.txt +0 -9
- tests/baidu_spider/run.py +0 -27
- tests/baidu_spider/settings.py +0 -80
- tests/baidu_spider/spiders/__init__.py +0 -7
- tests/baidu_spider/spiders/bai_du.py +0 -61
- tests/baidu_spider/spiders/sina.py +0 -79
- {crawlo-1.0.4.dist-info → crawlo-1.0.5.dist-info}/WHEEL +0 -0
- {crawlo-1.0.4.dist-info → crawlo-1.0.5.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,277 @@
|
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# -*- coding: UTF-8 -*-
|
|
3
|
+
import asyncio
|
|
4
|
+
import random
|
|
5
|
+
import time
|
|
6
|
+
from typing import Optional, Dict, Any
|
|
7
|
+
from curl_cffi import CurlError
|
|
8
|
+
from curl_cffi.requests import AsyncSession
|
|
9
|
+
|
|
10
|
+
from crawlo import Response
|
|
11
|
+
from crawlo.downloader import DownloaderBase
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class CurlCffiDownloader(DownloaderBase):
|
|
15
|
+
"""
|
|
16
|
+
基于 curl-cffi 的高性能异步下载器
|
|
17
|
+
- 支持真实浏览器指纹模拟,绕过Cloudflare等反爬虫检测
|
|
18
|
+
- 高性能的异步HTTP客户端,基于libcurl
|
|
19
|
+
- 内存安全的响应处理
|
|
20
|
+
- 自动代理和Cookie管理
|
|
21
|
+
- 支持请求延迟、重试、警告大小检查等高级功能
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
def __init__(self, crawler):
|
|
25
|
+
super().__init__(crawler)
|
|
26
|
+
self.session: Optional[AsyncSession] = None
|
|
27
|
+
self.max_download_size: int = 0
|
|
28
|
+
self.download_warn_size: int = 0
|
|
29
|
+
self.download_delay: float = 0
|
|
30
|
+
self.randomize_delay: bool = False
|
|
31
|
+
self.default_headers: dict = {}
|
|
32
|
+
# 使用默认值,但会在 open 中被 settings 覆盖
|
|
33
|
+
self.browser_type_str: str = "chrome136"
|
|
34
|
+
self._last_request_time: float = 0
|
|
35
|
+
self._active_requests: set = set() # 用于跟踪活跃请求
|
|
36
|
+
|
|
37
|
+
def open(self):
|
|
38
|
+
super().open()
|
|
39
|
+
self.logger.info("正在打开 CurlCffiDownloader")
|
|
40
|
+
|
|
41
|
+
# 读取配置
|
|
42
|
+
timeout_secs = self.crawler.settings.get_int("DOWNLOAD_TIMEOUT", 30)
|
|
43
|
+
verify_ssl = self.crawler.settings.get_bool("VERIFY_SSL", True)
|
|
44
|
+
pool_size = self.crawler.settings.get_int("CONNECTION_POOL_LIMIT", 100)
|
|
45
|
+
self.max_download_size = self.crawler.settings.get_int("DOWNLOAD_MAXSIZE", 10 * 1024 * 1024) # 10MB
|
|
46
|
+
self.download_warn_size = self.crawler.settings.get_int("DOWNLOAD_WARN_SIZE", 1024 * 1024) # 1MB
|
|
47
|
+
self.download_delay = self.crawler.settings.get_float("DOWNLOAD_DELAY", 0)
|
|
48
|
+
# 兼容旧的 RANDOMNESS 配置
|
|
49
|
+
self.randomize_delay = self.crawler.settings.get_bool("RANDOMIZE_DOWNLOAD_DELAY",
|
|
50
|
+
self.crawler.settings.get_bool("RANDOMNESS", False))
|
|
51
|
+
self.default_headers = self.crawler.settings.get_dict("DEFAULT_REQUEST_HEADERS", {})
|
|
52
|
+
|
|
53
|
+
# --- 浏览器指纹模拟配置 ---
|
|
54
|
+
# 1. 读取用户自定义的浏览器版本映射
|
|
55
|
+
user_browser_map = self.crawler.settings.get_dict("CURL_BROWSER_VERSION_MAP", {})
|
|
56
|
+
# 2. 定义代码中的默认浏览器版本映射
|
|
57
|
+
default_browser_map = self._get_default_browser_map()
|
|
58
|
+
# 3. 合并配置:用户配置优先级更高
|
|
59
|
+
effective_browser_map = {**default_browser_map, **user_browser_map}
|
|
60
|
+
|
|
61
|
+
# 4. 读取用户选择的浏览器类型 (键)
|
|
62
|
+
raw_browser_type_str = self.crawler.settings.get("CURL_BROWSER_TYPE", "chrome")
|
|
63
|
+
|
|
64
|
+
# 5. 使用合并后的映射进行规范化
|
|
65
|
+
# 如果 raw_browser_type_str 在映射中,则使用映射的值
|
|
66
|
+
# 如果不在映射中(例如用户直接指定了具体版本 "chrome136"),则使用原始字符串
|
|
67
|
+
self.browser_type_str = effective_browser_map.get(raw_browser_type_str.lower(), raw_browser_type_str)
|
|
68
|
+
|
|
69
|
+
# 创建会话配置
|
|
70
|
+
session_config = {
|
|
71
|
+
"timeout": timeout_secs,
|
|
72
|
+
"verify": verify_ssl,
|
|
73
|
+
"max_clients": pool_size, # Use max_clients for pool size
|
|
74
|
+
"impersonate": self.browser_type_str, # Add impersonate
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
# 创建全局 session
|
|
78
|
+
self.session = AsyncSession(**session_config)
|
|
79
|
+
|
|
80
|
+
self.logger.debug(f"CurlCffiDownloader 初始化完成,浏览器指纹模拟: {self.browser_type_str}")
|
|
81
|
+
|
|
82
|
+
@staticmethod
|
|
83
|
+
def _get_default_browser_map() -> Dict[str, str]:
|
|
84
|
+
"""获取代码中硬编码的默认浏览器映射"""
|
|
85
|
+
return {
|
|
86
|
+
"chrome": "chrome136",
|
|
87
|
+
"edge": "edge101",
|
|
88
|
+
"safari": "safari184",
|
|
89
|
+
"firefox": "firefox135",
|
|
90
|
+
# 可根据 curl-cffi 支持的版本添加更多
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
async def download(self, request) -> Optional[Response]:
|
|
94
|
+
if not self.session:
|
|
95
|
+
raise RuntimeError("CurlCffiDownloader 会话未打开")
|
|
96
|
+
|
|
97
|
+
# 请求延迟控制
|
|
98
|
+
await self._apply_download_delay()
|
|
99
|
+
|
|
100
|
+
# 本地重试机制
|
|
101
|
+
# 复用现有的 MAX_RETRY_TIMES 配置
|
|
102
|
+
max_retries = self.crawler.settings.get_int("DOWNLOAD_RETRY_TIMES",
|
|
103
|
+
self.crawler.settings.get_int("MAX_RETRY_TIMES", 1))
|
|
104
|
+
last_exception = None
|
|
105
|
+
|
|
106
|
+
for attempt in range(max_retries + 1):
|
|
107
|
+
request_id = id(request)
|
|
108
|
+
self._active_requests.add(request_id)
|
|
109
|
+
try:
|
|
110
|
+
# 尝试执行请求
|
|
111
|
+
result = await self._execute_request(request)
|
|
112
|
+
return result # 成功,返回响应
|
|
113
|
+
|
|
114
|
+
except (CurlError, asyncio.TimeoutError) as e:
|
|
115
|
+
last_exception = e
|
|
116
|
+
if attempt < max_retries:
|
|
117
|
+
retry_delay = 2 ** attempt
|
|
118
|
+
self.logger.warning(
|
|
119
|
+
f"第 {attempt + 1}/{max_retries} 次重试 {request.url},等待 {retry_delay} 秒,原因: {type(e).__name__}")
|
|
120
|
+
await asyncio.sleep(retry_delay)
|
|
121
|
+
else:
|
|
122
|
+
self.logger.error(
|
|
123
|
+
f"请求 {request.url} 在 {max_retries} 次重试后失败: {type(e).__name__}: {e}")
|
|
124
|
+
except Exception as e:
|
|
125
|
+
last_exception = e
|
|
126
|
+
self.logger.critical(f"请求 {request.url} 发生未预期错误: {e}", exc_info=True)
|
|
127
|
+
# 对于未预期错误,可能不希望重试
|
|
128
|
+
break # Or handle differently based on error type
|
|
129
|
+
finally:
|
|
130
|
+
self._active_requests.discard(request_id)
|
|
131
|
+
|
|
132
|
+
# If loop finishes without returning, it means all retries failed or an unretriable error occurred
|
|
133
|
+
if last_exception:
|
|
134
|
+
raise last_exception
|
|
135
|
+
# This line should ideally not be reached if exceptions are handled correctly above
|
|
136
|
+
raise RuntimeError(f"下载 {request.url} 失败,已重试或发生不可重试错误")
|
|
137
|
+
|
|
138
|
+
async def _apply_download_delay(self):
|
|
139
|
+
"""应用下载延迟"""
|
|
140
|
+
if self.download_delay > 0:
|
|
141
|
+
current_time = time.time()
|
|
142
|
+
if hasattr(self, '_last_request_time'): # Check if attribute exists
|
|
143
|
+
elapsed = current_time - self._last_request_time
|
|
144
|
+
else:
|
|
145
|
+
elapsed = self.download_delay + 1 # Ensure delay is applied if _last_request_time is not set yet
|
|
146
|
+
|
|
147
|
+
if elapsed < self.download_delay:
|
|
148
|
+
delay = self.download_delay - elapsed
|
|
149
|
+
if self.randomize_delay:
|
|
150
|
+
# 兼容旧的 RANDOM_RANGE 配置
|
|
151
|
+
range_tuple = self.crawler.settings.get("RANDOM_RANGE", (0.75, 1.25))
|
|
152
|
+
if isinstance(range_tuple, (list, tuple)) and len(range_tuple) == 2:
|
|
153
|
+
delay *= random.uniform(range_tuple[0], range_tuple[1])
|
|
154
|
+
else:
|
|
155
|
+
delay *= random.uniform(0.5, 1.5) # Fallback
|
|
156
|
+
await asyncio.sleep(max(0, int(delay))) # Ensure non-negative sleep
|
|
157
|
+
self._last_request_time = time.time()
|
|
158
|
+
|
|
159
|
+
async def _execute_request(self, request) -> Response:
|
|
160
|
+
"""执行单个请求"""
|
|
161
|
+
if not self.session:
|
|
162
|
+
raise RuntimeError("会话未初始化")
|
|
163
|
+
|
|
164
|
+
# 构造请求参数
|
|
165
|
+
kwargs = self._build_request_kwargs(request)
|
|
166
|
+
|
|
167
|
+
# 发送请求
|
|
168
|
+
method = request.method.lower()
|
|
169
|
+
if not hasattr(self.session, method):
|
|
170
|
+
raise ValueError(f"不支持的 HTTP 方法: {request.method}")
|
|
171
|
+
|
|
172
|
+
method_func = getattr(self.session, method)
|
|
173
|
+
|
|
174
|
+
# *** 核心修正:直接 await 方法调用 ***
|
|
175
|
+
try:
|
|
176
|
+
response = await method_func(request.url, **kwargs)
|
|
177
|
+
except Exception as e:
|
|
178
|
+
# Re-raise to let the calling function handle retries
|
|
179
|
+
raise
|
|
180
|
+
|
|
181
|
+
# 检查 Content-Length
|
|
182
|
+
content_length = response.headers.get("Content-Length")
|
|
183
|
+
if content_length:
|
|
184
|
+
try:
|
|
185
|
+
cl = int(content_length)
|
|
186
|
+
if cl > self.max_download_size:
|
|
187
|
+
raise OverflowError(
|
|
188
|
+
f"响应过大 (基于 Content-Length): {cl} > {self.max_download_size}")
|
|
189
|
+
except ValueError:
|
|
190
|
+
self.logger.warning(f"无效的 Content-Length 头部值: {content_length}")
|
|
191
|
+
|
|
192
|
+
# 获取响应体 (curl-cffi 中 response.content 通常是 bytes)
|
|
193
|
+
# *** 核心修正:直接使用 response.content ***
|
|
194
|
+
body = response.content
|
|
195
|
+
|
|
196
|
+
# 再次检查实际大小 (以防 Content-Length 不准确或缺失)
|
|
197
|
+
actual_size = len(body)
|
|
198
|
+
if actual_size > self.max_download_size:
|
|
199
|
+
raise OverflowError(f"响应体过大: {actual_size} > {self.max_download_size}")
|
|
200
|
+
|
|
201
|
+
# 警告大小检查
|
|
202
|
+
if actual_size > self.download_warn_size:
|
|
203
|
+
self.logger.warning(f"响应体较大: {actual_size} 字节,来自 {request.url}")
|
|
204
|
+
|
|
205
|
+
return self._structure_response(request, response, body)
|
|
206
|
+
|
|
207
|
+
def _build_request_kwargs(self, request) -> Dict[str, Any]:
|
|
208
|
+
"""构造curl-cffi请求参数"""
|
|
209
|
+
# 合并默认 headers 和请求 headers
|
|
210
|
+
# 确保 request.headers 是一个字典或类似对象
|
|
211
|
+
request_headers = getattr(request, 'headers', {}) or {}
|
|
212
|
+
headers = {**self.default_headers, **request_headers}
|
|
213
|
+
|
|
214
|
+
kwargs = {
|
|
215
|
+
"headers": headers,
|
|
216
|
+
"cookies": getattr(request, 'cookies', {}) or {}, # Safely get cookies
|
|
217
|
+
"allow_redirects": getattr(request, 'allow_redirects', True), # Safely get allow_redirects
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
# 代理设置
|
|
221
|
+
# curl-cffi 通常使用 proxies 参数,接受字典 {'http': '...', 'https': '...'}
|
|
222
|
+
if hasattr(request, 'proxy') and request.proxy:
|
|
223
|
+
# 简单处理,假设 proxy URL 适用于 http 和 https
|
|
224
|
+
# 你可能需要根据 request.url.scheme 来决定使用哪个
|
|
225
|
+
if request.proxy.startswith(('http://', 'https://')):
|
|
226
|
+
kwargs["proxies"] = {"http": request.proxy, "https": request.proxy}
|
|
227
|
+
else:
|
|
228
|
+
# Handle other proxy types if needed (e.g., socks)
|
|
229
|
+
# For now, just pass it and let curl-cffi potentially handle it or log a warning
|
|
230
|
+
self.logger.warning(
|
|
231
|
+
f"代理格式可能需要为 curl-cffi 调整: {request.proxy}。按原样传递。")
|
|
232
|
+
kwargs["proxy"] = request.proxy # Try the simpler 'proxy' kwarg if 'proxies' doesn't work
|
|
233
|
+
|
|
234
|
+
# 智能处理请求体
|
|
235
|
+
# 优先使用 _json_body (如果框架有此约定)
|
|
236
|
+
if hasattr(request, "_json_body") and request._json_body is not None:
|
|
237
|
+
kwargs["json"] = request._json_body
|
|
238
|
+
# 其次检查 body 是否为 dict/list (兼容直接传 body=dict 的旧写法)
|
|
239
|
+
elif isinstance(getattr(request, 'body', None), (dict, list)):
|
|
240
|
+
kwargs["json"] = request.body
|
|
241
|
+
# 最后处理其他类型的 body (str, bytes, etc.)
|
|
242
|
+
elif getattr(request, 'body', None) is not None:
|
|
243
|
+
# curl-cffi 通常可以处理 str 和 bytes
|
|
244
|
+
kwargs["data"] = request.body
|
|
245
|
+
|
|
246
|
+
return kwargs
|
|
247
|
+
|
|
248
|
+
@staticmethod
|
|
249
|
+
def _structure_response(request, response, body: bytes) -> Response:
|
|
250
|
+
"""构造框架所需的 Response 对象"""
|
|
251
|
+
return Response(
|
|
252
|
+
url=str(response.url),
|
|
253
|
+
headers=dict(response.headers),
|
|
254
|
+
status_code=response.status_code,
|
|
255
|
+
body=body,
|
|
256
|
+
request=request,
|
|
257
|
+
)
|
|
258
|
+
|
|
259
|
+
async def close(self) -> None:
|
|
260
|
+
"""关闭会话资源"""
|
|
261
|
+
if self.session:
|
|
262
|
+
self.logger.info("正在关闭 CurlCffiDownloader 会话...")
|
|
263
|
+
try:
|
|
264
|
+
await self.session.close()
|
|
265
|
+
except Exception as e:
|
|
266
|
+
self.logger.warning(f"关闭 curl-cffi 会话时出错: {e}")
|
|
267
|
+
finally:
|
|
268
|
+
self.session = None
|
|
269
|
+
self.logger.debug("CurlCffiDownloader 已关闭")
|
|
270
|
+
|
|
271
|
+
def idle(self) -> bool:
|
|
272
|
+
"""检查是否空闲"""
|
|
273
|
+
return len(self._active_requests) == 0 # Check active requests
|
|
274
|
+
|
|
275
|
+
def __len__(self) -> int:
|
|
276
|
+
"""返回活跃请求数"""
|
|
277
|
+
return len(self._active_requests)
|