crawlo 1.1.3__py3-none-any.whl → 1.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crawlo might be problematic. Click here for more details.
- crawlo/__init__.py +28 -1
- crawlo/__version__.py +1 -1
- crawlo/cleaners/__init__.py +61 -0
- crawlo/cleaners/data_formatter.py +226 -0
- crawlo/cleaners/encoding_converter.py +126 -0
- crawlo/cleaners/text_cleaner.py +233 -0
- crawlo/commands/startproject.py +117 -13
- crawlo/config.py +30 -0
- crawlo/config_validator.py +253 -0
- crawlo/core/engine.py +185 -11
- crawlo/core/scheduler.py +49 -78
- crawlo/crawler.py +6 -6
- crawlo/downloader/__init__.py +24 -0
- crawlo/downloader/aiohttp_downloader.py +8 -0
- crawlo/downloader/cffi_downloader.py +5 -0
- crawlo/downloader/hybrid_downloader.py +214 -0
- crawlo/downloader/playwright_downloader.py +403 -0
- crawlo/downloader/selenium_downloader.py +473 -0
- crawlo/extension/__init__.py +17 -10
- crawlo/extension/health_check.py +142 -0
- crawlo/extension/log_interval.py +27 -18
- crawlo/extension/log_stats.py +62 -24
- crawlo/extension/logging_extension.py +18 -9
- crawlo/extension/memory_monitor.py +105 -0
- crawlo/extension/performance_profiler.py +134 -0
- crawlo/extension/request_recorder.py +108 -0
- crawlo/filters/aioredis_filter.py +50 -12
- crawlo/middleware/proxy.py +26 -2
- crawlo/mode_manager.py +24 -19
- crawlo/network/request.py +30 -3
- crawlo/network/response.py +114 -25
- crawlo/pipelines/mongo_pipeline.py +81 -66
- crawlo/pipelines/mysql_pipeline.py +165 -43
- crawlo/pipelines/redis_dedup_pipeline.py +7 -3
- crawlo/queue/queue_manager.py +15 -2
- crawlo/queue/redis_priority_queue.py +144 -76
- crawlo/settings/default_settings.py +93 -121
- crawlo/subscriber.py +62 -37
- crawlo/templates/project/items.py.tmpl +1 -1
- crawlo/templates/project/middlewares.py.tmpl +73 -49
- crawlo/templates/project/pipelines.py.tmpl +51 -295
- crawlo/templates/project/settings.py.tmpl +93 -17
- crawlo/templates/project/settings_distributed.py.tmpl +120 -0
- crawlo/templates/project/settings_gentle.py.tmpl +95 -0
- crawlo/templates/project/settings_high_performance.py.tmpl +152 -0
- crawlo/templates/project/settings_simple.py.tmpl +69 -0
- crawlo/templates/spider/spider.py.tmpl +2 -38
- crawlo/tools/__init__.py +183 -0
- crawlo/tools/anti_crawler.py +269 -0
- crawlo/tools/authenticated_proxy.py +241 -0
- crawlo/tools/data_validator.py +181 -0
- crawlo/tools/date_tools.py +36 -0
- crawlo/tools/distributed_coordinator.py +387 -0
- crawlo/tools/retry_mechanism.py +221 -0
- crawlo/tools/scenario_adapter.py +263 -0
- crawlo/utils/__init__.py +29 -1
- crawlo/utils/batch_processor.py +261 -0
- crawlo/utils/date_tools.py +58 -1
- crawlo/utils/enhanced_error_handler.py +360 -0
- crawlo/utils/env_config.py +106 -0
- crawlo/utils/error_handler.py +126 -0
- crawlo/utils/performance_monitor.py +285 -0
- crawlo/utils/redis_connection_pool.py +335 -0
- crawlo/utils/redis_key_validator.py +200 -0
- crawlo-1.1.5.dist-info/METADATA +401 -0
- crawlo-1.1.5.dist-info/RECORD +185 -0
- tests/advanced_tools_example.py +276 -0
- tests/authenticated_proxy_example.py +237 -0
- tests/cleaners_example.py +161 -0
- tests/config_validation_demo.py +103 -0
- tests/date_tools_example.py +181 -0
- tests/dynamic_loading_example.py +524 -0
- tests/dynamic_loading_test.py +105 -0
- tests/env_config_example.py +134 -0
- tests/error_handling_example.py +172 -0
- tests/redis_key_validation_demo.py +131 -0
- tests/response_improvements_example.py +145 -0
- tests/test_advanced_tools.py +149 -0
- tests/test_all_redis_key_configs.py +146 -0
- tests/test_authenticated_proxy.py +142 -0
- tests/test_cleaners.py +55 -0
- tests/test_comprehensive.py +147 -0
- tests/test_config_validator.py +194 -0
- tests/test_date_tools.py +124 -0
- tests/test_dynamic_downloaders_proxy.py +125 -0
- tests/test_dynamic_proxy.py +93 -0
- tests/test_dynamic_proxy_config.py +147 -0
- tests/test_dynamic_proxy_real.py +110 -0
- tests/test_edge_cases.py +304 -0
- tests/test_enhanced_error_handler.py +271 -0
- tests/test_env_config.py +122 -0
- tests/test_error_handler_compatibility.py +113 -0
- tests/test_framework_env_usage.py +104 -0
- tests/test_integration.py +357 -0
- tests/test_item_dedup_redis_key.py +123 -0
- tests/test_parsel.py +30 -0
- tests/test_performance.py +328 -0
- tests/test_queue_manager_redis_key.py +177 -0
- tests/test_redis_connection_pool.py +295 -0
- tests/test_redis_key_naming.py +182 -0
- tests/test_redis_key_validator.py +124 -0
- tests/test_response_improvements.py +153 -0
- tests/test_simple_response.py +62 -0
- tests/test_telecom_spider_redis_key.py +206 -0
- tests/test_template_content.py +88 -0
- tests/test_template_redis_key.py +135 -0
- tests/test_tools.py +154 -0
- tests/tools_example.py +258 -0
- crawlo/core/enhanced_engine.py +0 -190
- crawlo-1.1.3.dist-info/METADATA +0 -635
- crawlo-1.1.3.dist-info/RECORD +0 -113
- {crawlo-1.1.3.dist-info → crawlo-1.1.5.dist-info}/WHEEL +0 -0
- {crawlo-1.1.3.dist-info → crawlo-1.1.5.dist-info}/entry_points.txt +0 -0
- {crawlo-1.1.3.dist-info → crawlo-1.1.5.dist-info}/top_level.txt +0 -0
- {examples → tests}/controlled_spider_example.py +0 -0
|
@@ -0,0 +1,214 @@
|
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# -*- coding: UTF-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
混合下载器
|
|
5
|
+
=========
|
|
6
|
+
智能选择合适的下载器处理不同类型的请求,支持协议请求和动态加载内容。
|
|
7
|
+
|
|
8
|
+
支持的场景:
|
|
9
|
+
1. 列表页、详情页都要动态加载
|
|
10
|
+
2. 列表页使用协议请求、详情页使用动态加载
|
|
11
|
+
3. 列表页使用动态加载,详情页使用协议请求
|
|
12
|
+
|
|
13
|
+
功能特性:
|
|
14
|
+
1. 智能检测内容类型并选择合适的下载器
|
|
15
|
+
2. 支持基于URL模式的下载器选择
|
|
16
|
+
3. 支持基于请求标记的下载器选择
|
|
17
|
+
4. 统一的接口和响应格式
|
|
18
|
+
5. 自动资源管理和优化
|
|
19
|
+
"""
|
|
20
|
+
import asyncio
|
|
21
|
+
from typing import Optional, Dict, Type
|
|
22
|
+
from urllib.parse import urlparse
|
|
23
|
+
|
|
24
|
+
from crawlo.downloader import DownloaderBase
|
|
25
|
+
from crawlo.network.request import Request
|
|
26
|
+
from crawlo.network.response import Response
|
|
27
|
+
from crawlo.utils.log import get_logger
|
|
28
|
+
|
|
29
|
+
# 动态导入下载器(避免循环导入)
|
|
30
|
+
try:
|
|
31
|
+
from .aiohttp_downloader import AioHttpDownloader
|
|
32
|
+
except ImportError:
|
|
33
|
+
AioHttpDownloader = None
|
|
34
|
+
|
|
35
|
+
try:
|
|
36
|
+
from .httpx_downloader import HttpXDownloader
|
|
37
|
+
except ImportError:
|
|
38
|
+
HttpXDownloader = None
|
|
39
|
+
|
|
40
|
+
try:
|
|
41
|
+
from .cffi_downloader import CurlCffiDownloader
|
|
42
|
+
except ImportError:
|
|
43
|
+
CurlCffiDownloader = None
|
|
44
|
+
|
|
45
|
+
try:
|
|
46
|
+
from .selenium_downloader import SeleniumDownloader
|
|
47
|
+
except ImportError:
|
|
48
|
+
SeleniumDownloader = None
|
|
49
|
+
|
|
50
|
+
try:
|
|
51
|
+
from .playwright_downloader import PlaywrightDownloader
|
|
52
|
+
except ImportError:
|
|
53
|
+
PlaywrightDownloader = None
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class HybridDownloader(DownloaderBase):
|
|
57
|
+
"""
|
|
58
|
+
混合下载器 - 根据请求特征智能选择合适的下载器
|
|
59
|
+
"""
|
|
60
|
+
|
|
61
|
+
def __init__(self, crawler):
|
|
62
|
+
super().__init__(crawler)
|
|
63
|
+
self.logger = get_logger(self.__class__.__name__, crawler.settings.get("LOG_LEVEL"))
|
|
64
|
+
|
|
65
|
+
# 下载器实例缓存
|
|
66
|
+
self._downloaders: Dict[str, DownloaderBase] = {}
|
|
67
|
+
|
|
68
|
+
# 配置选项
|
|
69
|
+
self.default_protocol_downloader = crawler.settings.get("HYBRID_DEFAULT_PROTOCOL_DOWNLOADER", "aiohttp")
|
|
70
|
+
self.default_dynamic_downloader = crawler.settings.get("HYBRID_DEFAULT_DYNAMIC_DOWNLOADER", "playwright")
|
|
71
|
+
|
|
72
|
+
# URL模式配置
|
|
73
|
+
self.dynamic_url_patterns = set(crawler.settings.get_list("HYBRID_DYNAMIC_URL_PATTERNS", []))
|
|
74
|
+
self.protocol_url_patterns = set(crawler.settings.get_list("HYBRID_PROTOCOL_URL_PATTERNS", []))
|
|
75
|
+
|
|
76
|
+
# 域名配置
|
|
77
|
+
self.dynamic_domains = set(crawler.settings.get_list("HYBRID_DYNAMIC_DOMAINS", []))
|
|
78
|
+
self.protocol_domains = set(crawler.settings.get_list("HYBRID_PROTOCOL_DOMAINS", []))
|
|
79
|
+
|
|
80
|
+
def open(self):
|
|
81
|
+
super().open()
|
|
82
|
+
self.logger.info("Opening HybridDownloader")
|
|
83
|
+
|
|
84
|
+
# 初始化默认下载器
|
|
85
|
+
self._initialize_default_downloaders()
|
|
86
|
+
|
|
87
|
+
def _initialize_default_downloaders(self):
|
|
88
|
+
"""初始化默认下载器"""
|
|
89
|
+
# 初始化协议下载器
|
|
90
|
+
protocol_downloader_cls = self._get_downloader_class(self.default_protocol_downloader)
|
|
91
|
+
if protocol_downloader_cls:
|
|
92
|
+
self._downloaders["protocol"] = protocol_downloader_cls(self.crawler)
|
|
93
|
+
self._downloaders["protocol"].open()
|
|
94
|
+
|
|
95
|
+
# 初始化动态下载器
|
|
96
|
+
dynamic_downloader_cls = self._get_downloader_class(self.default_dynamic_downloader)
|
|
97
|
+
if dynamic_downloader_cls:
|
|
98
|
+
self._downloaders["dynamic"] = dynamic_downloader_cls(self.crawler)
|
|
99
|
+
# 使用标准的 open 方法初始化下载器
|
|
100
|
+
self._downloaders["dynamic"].open()
|
|
101
|
+
|
|
102
|
+
self.logger.debug("Default downloaders initialized")
|
|
103
|
+
|
|
104
|
+
def _get_downloader_class(self, downloader_type: str) -> Optional[Type[DownloaderBase]]:
|
|
105
|
+
"""根据类型获取下载器类"""
|
|
106
|
+
downloader_map = {
|
|
107
|
+
"aiohttp": AioHttpDownloader,
|
|
108
|
+
"httpx": HttpXDownloader,
|
|
109
|
+
"curl_cffi": CurlCffiDownloader,
|
|
110
|
+
"selenium": SeleniumDownloader,
|
|
111
|
+
"playwright": PlaywrightDownloader
|
|
112
|
+
}
|
|
113
|
+
return downloader_map.get(downloader_type.lower())
|
|
114
|
+
|
|
115
|
+
async def download(self, request: Request) -> Optional[Response]:
|
|
116
|
+
"""根据请求特征选择合适的下载器并下载"""
|
|
117
|
+
# 确定应该使用的下载器类型
|
|
118
|
+
downloader_type = self._determine_downloader_type(request)
|
|
119
|
+
|
|
120
|
+
# 获取对应的下载器
|
|
121
|
+
downloader = self._get_or_create_downloader(downloader_type)
|
|
122
|
+
if not downloader:
|
|
123
|
+
raise RuntimeError(f"No downloader available for type: {downloader_type}")
|
|
124
|
+
|
|
125
|
+
self.logger.debug(f"Using {downloader_type} downloader for {request.url}")
|
|
126
|
+
|
|
127
|
+
# 执行下载
|
|
128
|
+
return await downloader.download(request)
|
|
129
|
+
|
|
130
|
+
def _determine_downloader_type(self, request: Request) -> str:
|
|
131
|
+
"""根据请求特征确定下载器类型"""
|
|
132
|
+
url = request.url
|
|
133
|
+
parsed_url = urlparse(url)
|
|
134
|
+
domain = parsed_url.netloc.lower()
|
|
135
|
+
|
|
136
|
+
# 1. 检查请求标记
|
|
137
|
+
if request.meta.get("use_dynamic_loader"):
|
|
138
|
+
return "dynamic"
|
|
139
|
+
elif request.meta.get("use_protocol_loader"):
|
|
140
|
+
return "protocol"
|
|
141
|
+
|
|
142
|
+
# 2. 检查URL模式
|
|
143
|
+
for pattern in self.dynamic_url_patterns:
|
|
144
|
+
if pattern in url:
|
|
145
|
+
return "dynamic"
|
|
146
|
+
|
|
147
|
+
for pattern in self.protocol_url_patterns:
|
|
148
|
+
if pattern in url:
|
|
149
|
+
return "protocol"
|
|
150
|
+
|
|
151
|
+
# 3. 检查域名
|
|
152
|
+
if domain in self.dynamic_domains:
|
|
153
|
+
return "dynamic"
|
|
154
|
+
|
|
155
|
+
if domain in self.protocol_domains:
|
|
156
|
+
return "protocol"
|
|
157
|
+
|
|
158
|
+
# 4. 检查文件扩展名(动态内容通常没有特定扩展名)
|
|
159
|
+
path = parsed_url.path.lower()
|
|
160
|
+
static_extensions = {'.js', '.css', '.jpg', '.jpeg', '.png', '.gif', '.ico', '.pdf', '.zip', '.doc', '.docx'}
|
|
161
|
+
if any(path.endswith(ext) for ext in static_extensions):
|
|
162
|
+
return "protocol"
|
|
163
|
+
|
|
164
|
+
# 5. 检查请求方法(POST请求更可能需要动态加载)
|
|
165
|
+
if request.method.upper() == "POST":
|
|
166
|
+
return "dynamic"
|
|
167
|
+
|
|
168
|
+
# 6. 默认策略:根据内容类型推测
|
|
169
|
+
# 如果URL中包含典型的动态内容标识符
|
|
170
|
+
dynamic_indicators = ['ajax', 'api', 'dynamic', 'spa', 'react', 'vue', 'angular']
|
|
171
|
+
if any(indicator in url.lower() for indicator in dynamic_indicators):
|
|
172
|
+
return "dynamic"
|
|
173
|
+
|
|
174
|
+
# 默认使用协议下载器
|
|
175
|
+
return "protocol"
|
|
176
|
+
|
|
177
|
+
def _get_or_create_downloader(self, downloader_type: str) -> Optional[DownloaderBase]:
|
|
178
|
+
"""获取或创建下载器实例"""
|
|
179
|
+
# 如果已经存在,直接返回
|
|
180
|
+
if downloader_type in self._downloaders:
|
|
181
|
+
return self._downloaders[downloader_type]
|
|
182
|
+
|
|
183
|
+
# 创建新的下载器实例
|
|
184
|
+
if downloader_type == "protocol":
|
|
185
|
+
downloader_cls = self._get_downloader_class(self.default_protocol_downloader)
|
|
186
|
+
elif downloader_type == "dynamic":
|
|
187
|
+
downloader_cls = self._get_downloader_class(self.default_dynamic_downloader)
|
|
188
|
+
else:
|
|
189
|
+
return None
|
|
190
|
+
|
|
191
|
+
if not downloader_cls:
|
|
192
|
+
return None
|
|
193
|
+
|
|
194
|
+
downloader = downloader_cls(self.crawler)
|
|
195
|
+
# 使用标准的 open 方法初始化下载器
|
|
196
|
+
downloader.open()
|
|
197
|
+
|
|
198
|
+
self._downloaders[downloader_type] = downloader
|
|
199
|
+
return downloader
|
|
200
|
+
|
|
201
|
+
async def close(self) -> None:
|
|
202
|
+
"""关闭所有下载器"""
|
|
203
|
+
for name, downloader in self._downloaders.items():
|
|
204
|
+
try:
|
|
205
|
+
if hasattr(downloader, 'close_async'):
|
|
206
|
+
await downloader.close_async()
|
|
207
|
+
else:
|
|
208
|
+
await downloader.close()
|
|
209
|
+
self.logger.debug(f"Closed {name} downloader")
|
|
210
|
+
except Exception as e:
|
|
211
|
+
self.logger.warning(f"Error closing {name} downloader: {e}")
|
|
212
|
+
|
|
213
|
+
self._downloaders.clear()
|
|
214
|
+
self.logger.info("HybridDownloader closed.")
|
|
@@ -0,0 +1,403 @@
|
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# -*- coding: UTF-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
Playwright 下载器
|
|
5
|
+
===============
|
|
6
|
+
支持动态加载内容的下载器,基于 Playwright 实现。
|
|
7
|
+
|
|
8
|
+
功能特性:
|
|
9
|
+
- 支持 Chromium/Firefox/WebKit 浏览器引擎
|
|
10
|
+
- 异步非阻塞操作
|
|
11
|
+
- 智能等待页面加载完成
|
|
12
|
+
- 支持自定义浏览器上下文和选项
|
|
13
|
+
- 内存安全的资源管理
|
|
14
|
+
- 自动处理 Cookie 和本地存储
|
|
15
|
+
- 支持翻页操作(鼠标滑动、点击翻页)
|
|
16
|
+
- 单浏览器多标签页模式
|
|
17
|
+
"""
|
|
18
|
+
import time
|
|
19
|
+
from typing import Optional, Dict, List
|
|
20
|
+
from urllib.parse import urlparse
|
|
21
|
+
|
|
22
|
+
from playwright.async_api import async_playwright, Playwright, Browser, Page, BrowserContext
|
|
23
|
+
|
|
24
|
+
from crawlo.downloader import DownloaderBase
|
|
25
|
+
from crawlo.network.response import Response
|
|
26
|
+
from crawlo.utils.log import get_logger
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class PlaywrightDownloader(DownloaderBase):
|
|
30
|
+
"""
|
|
31
|
+
基于 Playwright 的动态内容下载器
|
|
32
|
+
支持处理 JavaScript 渲染的网页内容,性能优于 Selenium
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
def __init__(self, crawler):
|
|
36
|
+
super().__init__(crawler)
|
|
37
|
+
self.playwright: Optional[Playwright] = None
|
|
38
|
+
self.browser: Optional[Browser] = None
|
|
39
|
+
self.context: Optional[BrowserContext] = None
|
|
40
|
+
self.logger = get_logger(self.__class__.__name__, crawler.settings.get("LOG_LEVEL"))
|
|
41
|
+
self.default_timeout = crawler.settings.get_int("PLAYWRIGHT_TIMEOUT", 30000) # 毫秒
|
|
42
|
+
self.load_timeout = crawler.settings.get_int("PLAYWRIGHT_LOAD_TIMEOUT", 10000) # 毫秒
|
|
43
|
+
self.browser_type = crawler.settings.get("PLAYWRIGHT_BROWSER_TYPE", "chromium").lower()
|
|
44
|
+
self.headless = crawler.settings.get_bool("PLAYWRIGHT_HEADLESS", True)
|
|
45
|
+
self.wait_for_element = crawler.settings.get("PLAYWRIGHT_WAIT_FOR_ELEMENT", None)
|
|
46
|
+
self.viewport_width = crawler.settings.get_int("PLAYWRIGHT_VIEWPORT_WIDTH", 1920)
|
|
47
|
+
self.viewport_height = crawler.settings.get_int("PLAYWRIGHT_VIEWPORT_HEIGHT", 1080)
|
|
48
|
+
|
|
49
|
+
# 单浏览器多标签页模式
|
|
50
|
+
self.single_browser_mode = crawler.settings.get_bool("PLAYWRIGHT_SINGLE_BROWSER_MODE", True)
|
|
51
|
+
self.max_pages_per_browser = crawler.settings.get_int("PLAYWRIGHT_MAX_PAGES_PER_BROWSER", 10)
|
|
52
|
+
self._page_pool: List[Page] = []
|
|
53
|
+
self._used_pages: set = set()
|
|
54
|
+
|
|
55
|
+
def open(self):
|
|
56
|
+
super().open()
|
|
57
|
+
self.logger.info("Opening PlaywrightDownloader")
|
|
58
|
+
|
|
59
|
+
async def download(self, request) -> Optional[Response]:
|
|
60
|
+
"""下载动态内容"""
|
|
61
|
+
if not self.playwright or not self.browser or not self.context:
|
|
62
|
+
await self._initialize_playwright()
|
|
63
|
+
|
|
64
|
+
start_time = None
|
|
65
|
+
if self.crawler.settings.get_bool("DOWNLOAD_STATS", True):
|
|
66
|
+
start_time = time.time()
|
|
67
|
+
|
|
68
|
+
page: Optional[Page] = None
|
|
69
|
+
try:
|
|
70
|
+
# 获取页面(支持单浏览器多标签页模式)
|
|
71
|
+
page = await self._get_page()
|
|
72
|
+
|
|
73
|
+
# 设置超时
|
|
74
|
+
page.set_default_timeout(self.default_timeout)
|
|
75
|
+
page.set_default_navigation_timeout(self.load_timeout)
|
|
76
|
+
|
|
77
|
+
# 设置视口
|
|
78
|
+
await page.set_viewport_size({
|
|
79
|
+
"width": self.viewport_width,
|
|
80
|
+
"height": self.viewport_height
|
|
81
|
+
})
|
|
82
|
+
|
|
83
|
+
# 应用请求特定的设置
|
|
84
|
+
await self._apply_request_settings(page, request)
|
|
85
|
+
|
|
86
|
+
# 访问页面
|
|
87
|
+
response = await page.goto(request.url, wait_until="networkidle")
|
|
88
|
+
|
|
89
|
+
# 等待页面加载完成
|
|
90
|
+
await self._wait_for_page_load(page)
|
|
91
|
+
|
|
92
|
+
# 执行自定义操作(如果有)
|
|
93
|
+
await self._execute_custom_actions(page, request)
|
|
94
|
+
|
|
95
|
+
# 执行翻页操作(如果有)
|
|
96
|
+
await self._execute_pagination_actions(page, request)
|
|
97
|
+
|
|
98
|
+
# 获取页面内容
|
|
99
|
+
page_content = await page.content()
|
|
100
|
+
page_url = page.url
|
|
101
|
+
|
|
102
|
+
# 获取响应信息
|
|
103
|
+
status_code = response.status if response else 200
|
|
104
|
+
headers = dict(response.headers) if response else {}
|
|
105
|
+
|
|
106
|
+
# 获取 Cookies
|
|
107
|
+
cookies = await self._get_cookies()
|
|
108
|
+
|
|
109
|
+
# 构造响应对象
|
|
110
|
+
crawlo_response = Response(
|
|
111
|
+
url=page_url,
|
|
112
|
+
headers=headers,
|
|
113
|
+
status_code=status_code,
|
|
114
|
+
body=page_content.encode('utf-8'),
|
|
115
|
+
request=request
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
# 添加 Cookies 到响应
|
|
119
|
+
crawlo_response.cookies = cookies
|
|
120
|
+
|
|
121
|
+
# 记录下载统计
|
|
122
|
+
if start_time:
|
|
123
|
+
download_time = time.time() - start_time
|
|
124
|
+
self.logger.debug(f"Downloaded {request.url} in {download_time:.3f}s")
|
|
125
|
+
|
|
126
|
+
return crawlo_response
|
|
127
|
+
|
|
128
|
+
except Exception as e:
|
|
129
|
+
self.logger.error(f"Error downloading {request.url}: {e}")
|
|
130
|
+
raise
|
|
131
|
+
finally:
|
|
132
|
+
# 归还页面到池中
|
|
133
|
+
if page:
|
|
134
|
+
await self._release_page(page)
|
|
135
|
+
|
|
136
|
+
async def _initialize_playwright(self):
|
|
137
|
+
"""初始化 Playwright"""
|
|
138
|
+
try:
|
|
139
|
+
self.playwright = await async_playwright().start()
|
|
140
|
+
|
|
141
|
+
# 获取代理配置
|
|
142
|
+
proxy_config = self.crawler.settings.get("PLAYWRIGHT_PROXY")
|
|
143
|
+
launch_kwargs = {
|
|
144
|
+
"headless": self.headless
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
# 如果配置了代理,则添加代理参数
|
|
148
|
+
if proxy_config:
|
|
149
|
+
if isinstance(proxy_config, str):
|
|
150
|
+
# 简单的代理URL
|
|
151
|
+
launch_kwargs["proxy"] = {
|
|
152
|
+
"server": proxy_config
|
|
153
|
+
}
|
|
154
|
+
elif isinstance(proxy_config, dict):
|
|
155
|
+
# 完整的代理配置
|
|
156
|
+
launch_kwargs["proxy"] = proxy_config
|
|
157
|
+
|
|
158
|
+
# 根据配置选择浏览器类型
|
|
159
|
+
if self.browser_type == "chromium":
|
|
160
|
+
self.browser = await self.playwright.chromium.launch(**launch_kwargs)
|
|
161
|
+
elif self.browser_type == "firefox":
|
|
162
|
+
self.browser = await self.playwright.firefox.launch(**launch_kwargs)
|
|
163
|
+
elif self.browser_type == "webkit":
|
|
164
|
+
self.browser = await self.playwright.webkit.launch(**launch_kwargs)
|
|
165
|
+
else:
|
|
166
|
+
raise ValueError(f"Unsupported browser type: {self.browser_type}")
|
|
167
|
+
|
|
168
|
+
# 创建浏览器上下文
|
|
169
|
+
self.context = await self.browser.new_context()
|
|
170
|
+
|
|
171
|
+
# 应用全局设置
|
|
172
|
+
await self._apply_global_settings()
|
|
173
|
+
|
|
174
|
+
self.logger.debug(f"PlaywrightDownloader initialized with {self.browser_type}")
|
|
175
|
+
|
|
176
|
+
except Exception as e:
|
|
177
|
+
self.logger.error(f"Failed to initialize Playwright: {e}")
|
|
178
|
+
raise
|
|
179
|
+
|
|
180
|
+
async def _apply_global_settings(self):
|
|
181
|
+
"""应用全局浏览器设置"""
|
|
182
|
+
if not self.context:
|
|
183
|
+
return
|
|
184
|
+
|
|
185
|
+
# 设置用户代理
|
|
186
|
+
user_agent = self.crawler.settings.get("USER_AGENT")
|
|
187
|
+
if user_agent:
|
|
188
|
+
await self.context.set_extra_http_headers({"User-Agent": user_agent})
|
|
189
|
+
|
|
190
|
+
# 设置代理
|
|
191
|
+
proxy = self.crawler.settings.get("PLAYWRIGHT_PROXY")
|
|
192
|
+
if proxy:
|
|
193
|
+
# Playwright 的代理设置在启动浏览器时配置
|
|
194
|
+
pass
|
|
195
|
+
|
|
196
|
+
async def _apply_request_settings(self, page: Page, request):
|
|
197
|
+
"""应用请求特定的设置"""
|
|
198
|
+
# 设置请求头
|
|
199
|
+
if request.headers:
|
|
200
|
+
await page.set_extra_http_headers(request.headers)
|
|
201
|
+
|
|
202
|
+
# 设置 Cookies
|
|
203
|
+
if request.cookies:
|
|
204
|
+
cookies = []
|
|
205
|
+
for name, value in request.cookies.items():
|
|
206
|
+
# 需要确定域名和路径
|
|
207
|
+
parsed_url = urlparse(request.url)
|
|
208
|
+
cookies.append({
|
|
209
|
+
"name": name,
|
|
210
|
+
"value": value,
|
|
211
|
+
"domain": parsed_url.netloc,
|
|
212
|
+
"path": "/"
|
|
213
|
+
})
|
|
214
|
+
await page.context.add_cookies(cookies)
|
|
215
|
+
|
|
216
|
+
async def _wait_for_page_load(self, page: Page):
|
|
217
|
+
"""等待页面加载完成"""
|
|
218
|
+
try:
|
|
219
|
+
# 等待网络空闲
|
|
220
|
+
await page.wait_for_load_state("networkidle")
|
|
221
|
+
|
|
222
|
+
# 如果配置了等待特定元素,则等待该元素出现
|
|
223
|
+
if self.wait_for_element:
|
|
224
|
+
await page.wait_for_selector(self.wait_for_element, timeout=self.load_timeout)
|
|
225
|
+
|
|
226
|
+
except Exception as e:
|
|
227
|
+
self.logger.warning(f"Page load wait timeout, continuing with current content: {e}")
|
|
228
|
+
|
|
229
|
+
async def _execute_custom_actions(self, page: Page, request):
|
|
230
|
+
"""执行自定义操作"""
|
|
231
|
+
# 从请求的 meta 中获取自定义操作
|
|
232
|
+
custom_actions = request.meta.get("playwright_actions", [])
|
|
233
|
+
|
|
234
|
+
for action in custom_actions:
|
|
235
|
+
try:
|
|
236
|
+
if isinstance(action, dict):
|
|
237
|
+
action_type = action.get("type")
|
|
238
|
+
action_params = action.get("params", {})
|
|
239
|
+
|
|
240
|
+
if action_type == "click":
|
|
241
|
+
selector = action_params.get("selector")
|
|
242
|
+
if selector:
|
|
243
|
+
await page.click(selector)
|
|
244
|
+
elif action_type == "fill":
|
|
245
|
+
selector = action_params.get("selector")
|
|
246
|
+
value = action_params.get("value")
|
|
247
|
+
if selector and value is not None:
|
|
248
|
+
await page.fill(selector, value)
|
|
249
|
+
elif action_type == "wait":
|
|
250
|
+
timeout = action_params.get("timeout", 1000)
|
|
251
|
+
await page.wait_for_timeout(timeout)
|
|
252
|
+
elif action_type == "evaluate":
|
|
253
|
+
script = action_params.get("script")
|
|
254
|
+
if script:
|
|
255
|
+
await page.evaluate(script)
|
|
256
|
+
elif action_type == "scroll":
|
|
257
|
+
position = action_params.get("position", "bottom")
|
|
258
|
+
if position == "bottom":
|
|
259
|
+
await page.evaluate("window.scrollTo(0, document.body.scrollHeight)")
|
|
260
|
+
elif position == "top":
|
|
261
|
+
await page.evaluate("window.scrollTo(0, 0)")
|
|
262
|
+
|
|
263
|
+
except Exception as e:
|
|
264
|
+
self.logger.warning(f"Failed to execute custom action: {e}")
|
|
265
|
+
|
|
266
|
+
async def _execute_pagination_actions(self, page: Page, request):
|
|
267
|
+
"""执行翻页操作"""
|
|
268
|
+
# 从请求的 meta 中获取翻页操作
|
|
269
|
+
pagination_actions = request.meta.get("pagination_actions", [])
|
|
270
|
+
|
|
271
|
+
for action in pagination_actions:
|
|
272
|
+
try:
|
|
273
|
+
if isinstance(action, dict):
|
|
274
|
+
action_type = action.get("type")
|
|
275
|
+
action_params = action.get("params", {})
|
|
276
|
+
|
|
277
|
+
if action_type == "scroll":
|
|
278
|
+
# 鼠标滑动翻页
|
|
279
|
+
scroll_count = action_params.get("count", 1)
|
|
280
|
+
scroll_delay = action_params.get("delay", 1000)
|
|
281
|
+
scroll_distance = action_params.get("distance", 500)
|
|
282
|
+
|
|
283
|
+
for _ in range(scroll_count):
|
|
284
|
+
await page.mouse.wheel(0, scroll_distance)
|
|
285
|
+
await page.wait_for_timeout(scroll_delay)
|
|
286
|
+
|
|
287
|
+
elif action_type == "click":
|
|
288
|
+
# 鼠标点击翻页
|
|
289
|
+
selector = action_params.get("selector")
|
|
290
|
+
click_count = action_params.get("count", 1)
|
|
291
|
+
click_delay = action_params.get("delay", 1000)
|
|
292
|
+
|
|
293
|
+
if selector:
|
|
294
|
+
for _ in range(click_count):
|
|
295
|
+
await page.click(selector)
|
|
296
|
+
await page.wait_for_timeout(click_delay)
|
|
297
|
+
|
|
298
|
+
elif action_type == "evaluate":
|
|
299
|
+
# 执行自定义脚本翻页
|
|
300
|
+
script = action_params.get("script")
|
|
301
|
+
if script:
|
|
302
|
+
await page.evaluate(script)
|
|
303
|
+
|
|
304
|
+
except Exception as e:
|
|
305
|
+
self.logger.warning(f"Failed to execute pagination action: {e}")
|
|
306
|
+
|
|
307
|
+
async def _get_cookies(self) -> Dict[str, str]:
|
|
308
|
+
"""获取 Cookies"""
|
|
309
|
+
try:
|
|
310
|
+
if self.context:
|
|
311
|
+
playwright_cookies = await self.context.cookies()
|
|
312
|
+
return {cookie['name']: cookie['value'] for cookie in playwright_cookies}
|
|
313
|
+
return {}
|
|
314
|
+
except Exception as e:
|
|
315
|
+
self.logger.warning(f"Failed to get cookies: {e}")
|
|
316
|
+
return {}
|
|
317
|
+
|
|
318
|
+
async def close(self) -> None:
|
|
319
|
+
"""关闭 Playwright 资源"""
|
|
320
|
+
try:
|
|
321
|
+
# 关闭所有页面
|
|
322
|
+
for page in self._page_pool:
|
|
323
|
+
try:
|
|
324
|
+
await page.close()
|
|
325
|
+
except:
|
|
326
|
+
pass
|
|
327
|
+
self._page_pool.clear()
|
|
328
|
+
self._used_pages.clear()
|
|
329
|
+
|
|
330
|
+
if self.context:
|
|
331
|
+
await self.context.close()
|
|
332
|
+
if self.browser:
|
|
333
|
+
await self.browser.close()
|
|
334
|
+
if self.playwright:
|
|
335
|
+
await self.playwright.stop()
|
|
336
|
+
|
|
337
|
+
self.logger.info("PlaywrightDownloader closed.")
|
|
338
|
+
except Exception as e:
|
|
339
|
+
self.logger.warning(f"Error closing Playwright resources: {e}")
|
|
340
|
+
finally:
|
|
341
|
+
self.context = None
|
|
342
|
+
self.browser = None
|
|
343
|
+
self.playwright = None
|
|
344
|
+
|
|
345
|
+
async def _get_page(self) -> Page:
|
|
346
|
+
"""获取页面实例(支持单浏览器多标签页模式)"""
|
|
347
|
+
# 如果启用了单浏览器模式且页面池中有可用页面
|
|
348
|
+
if self.single_browser_mode and self._page_pool:
|
|
349
|
+
# 检查是否需要创建新页面
|
|
350
|
+
if len(self._page_pool) < self.max_pages_per_browser:
|
|
351
|
+
# 创建新页面
|
|
352
|
+
if not self.context:
|
|
353
|
+
raise RuntimeError("Browser context not initialized")
|
|
354
|
+
new_page = await self.context.new_page()
|
|
355
|
+
self._page_pool.append(new_page)
|
|
356
|
+
self._used_pages.add(id(new_page))
|
|
357
|
+
return new_page
|
|
358
|
+
|
|
359
|
+
# 尝试从池中获取未使用的页面
|
|
360
|
+
for page in self._page_pool:
|
|
361
|
+
if id(page) not in self._used_pages:
|
|
362
|
+
self._used_pages.add(id(page))
|
|
363
|
+
return page
|
|
364
|
+
|
|
365
|
+
# 创建新页面
|
|
366
|
+
if not self.context:
|
|
367
|
+
raise RuntimeError("Browser context not initialized")
|
|
368
|
+
|
|
369
|
+
page = await self.context.new_page()
|
|
370
|
+
|
|
371
|
+
# 如果启用了单浏览器模式,将页面添加到池中
|
|
372
|
+
if self.single_browser_mode:
|
|
373
|
+
self._page_pool.append(page)
|
|
374
|
+
self._used_pages.add(id(page))
|
|
375
|
+
|
|
376
|
+
# 如果超过最大页面数,移除最早的页面
|
|
377
|
+
if len(self._page_pool) > self.max_pages_per_browser:
|
|
378
|
+
old_page = self._page_pool.pop(0)
|
|
379
|
+
self._used_pages.discard(id(old_page))
|
|
380
|
+
try:
|
|
381
|
+
await old_page.close()
|
|
382
|
+
except:
|
|
383
|
+
pass
|
|
384
|
+
|
|
385
|
+
return page
|
|
386
|
+
|
|
387
|
+
async def _release_page(self, page: Page):
|
|
388
|
+
"""归还页面到池中"""
|
|
389
|
+
if self.single_browser_mode:
|
|
390
|
+
page_id = id(page)
|
|
391
|
+
if page_id in self._used_pages:
|
|
392
|
+
self._used_pages.discard(page_id)
|
|
393
|
+
# 清空页面内容,准备下次使用
|
|
394
|
+
try:
|
|
395
|
+
await page.goto("about:blank")
|
|
396
|
+
except:
|
|
397
|
+
pass
|
|
398
|
+
else:
|
|
399
|
+
# 非单浏览器模式,直接关闭页面
|
|
400
|
+
try:
|
|
401
|
+
await page.close()
|
|
402
|
+
except:
|
|
403
|
+
pass
|