crawlo 1.4.2__py3-none-any.whl → 1.4.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crawlo might be problematic. Click here for more details.
- crawlo/__init__.py +93 -93
- crawlo/__version__.py +1 -1
- crawlo/cli.py +75 -75
- crawlo/commands/__init__.py +14 -14
- crawlo/commands/check.py +594 -594
- crawlo/commands/genspider.py +151 -151
- crawlo/commands/help.py +138 -138
- crawlo/commands/list.py +155 -155
- crawlo/commands/run.py +341 -341
- crawlo/commands/startproject.py +436 -436
- crawlo/commands/stats.py +187 -187
- crawlo/commands/utils.py +196 -196
- crawlo/config.py +312 -312
- crawlo/config_validator.py +277 -277
- crawlo/core/__init__.py +52 -52
- crawlo/core/engine.py +438 -439
- crawlo/core/processor.py +47 -47
- crawlo/core/scheduler.py +291 -257
- crawlo/crawler.py +650 -650
- crawlo/data/__init__.py +5 -5
- crawlo/data/user_agents.py +194 -194
- crawlo/downloader/__init__.py +273 -273
- crawlo/downloader/aiohttp_downloader.py +233 -233
- crawlo/downloader/cffi_downloader.py +245 -245
- crawlo/downloader/httpx_downloader.py +259 -259
- crawlo/downloader/hybrid_downloader.py +212 -212
- crawlo/downloader/playwright_downloader.py +402 -402
- crawlo/downloader/selenium_downloader.py +472 -472
- crawlo/event.py +11 -11
- crawlo/exceptions.py +81 -81
- crawlo/extension/__init__.py +63 -63
- crawlo/extension/health_check.py +141 -141
- crawlo/extension/log_interval.py +94 -94
- crawlo/extension/log_stats.py +70 -70
- crawlo/extension/logging_extension.py +61 -61
- crawlo/extension/memory_monitor.py +104 -104
- crawlo/extension/performance_profiler.py +133 -133
- crawlo/extension/request_recorder.py +107 -107
- crawlo/factories/__init__.py +27 -27
- crawlo/factories/base.py +68 -68
- crawlo/factories/crawler.py +103 -103
- crawlo/factories/registry.py +84 -84
- crawlo/filters/__init__.py +154 -154
- crawlo/filters/aioredis_filter.py +257 -257
- crawlo/filters/memory_filter.py +269 -269
- crawlo/framework.py +292 -292
- crawlo/initialization/__init__.py +44 -44
- crawlo/initialization/built_in.py +425 -425
- crawlo/initialization/context.py +141 -141
- crawlo/initialization/core.py +193 -193
- crawlo/initialization/phases.py +148 -148
- crawlo/initialization/registry.py +145 -145
- crawlo/items/__init__.py +23 -23
- crawlo/items/base.py +23 -23
- crawlo/items/fields.py +52 -52
- crawlo/items/items.py +104 -104
- crawlo/logging/__init__.py +45 -37
- crawlo/logging/async_handler.py +181 -0
- crawlo/logging/config.py +196 -96
- crawlo/logging/factory.py +171 -128
- crawlo/logging/manager.py +111 -111
- crawlo/logging/monitor.py +153 -0
- crawlo/logging/sampler.py +167 -0
- crawlo/middleware/__init__.py +21 -21
- crawlo/middleware/default_header.py +132 -132
- crawlo/middleware/download_delay.py +104 -104
- crawlo/middleware/middleware_manager.py +135 -135
- crawlo/middleware/offsite.py +123 -123
- crawlo/middleware/proxy.py +386 -386
- crawlo/middleware/request_ignore.py +86 -86
- crawlo/middleware/response_code.py +150 -150
- crawlo/middleware/response_filter.py +136 -136
- crawlo/middleware/retry.py +124 -124
- crawlo/middleware/simple_proxy.py +65 -65
- crawlo/mode_manager.py +219 -219
- crawlo/network/__init__.py +21 -21
- crawlo/network/request.py +379 -379
- crawlo/network/response.py +359 -359
- crawlo/pipelines/__init__.py +21 -21
- crawlo/pipelines/bloom_dedup_pipeline.py +146 -146
- crawlo/pipelines/console_pipeline.py +39 -39
- crawlo/pipelines/csv_pipeline.py +316 -316
- crawlo/pipelines/database_dedup_pipeline.py +197 -197
- crawlo/pipelines/json_pipeline.py +218 -218
- crawlo/pipelines/memory_dedup_pipeline.py +105 -105
- crawlo/pipelines/mongo_pipeline.py +131 -131
- crawlo/pipelines/mysql_pipeline.py +325 -325
- crawlo/pipelines/pipeline_manager.py +100 -84
- crawlo/pipelines/redis_dedup_pipeline.py +156 -156
- crawlo/project.py +349 -338
- crawlo/queue/pqueue.py +42 -42
- crawlo/queue/queue_manager.py +526 -522
- crawlo/queue/redis_priority_queue.py +370 -367
- crawlo/settings/__init__.py +7 -7
- crawlo/settings/default_settings.py +284 -284
- crawlo/settings/setting_manager.py +219 -219
- crawlo/spider/__init__.py +657 -657
- crawlo/stats_collector.py +73 -73
- crawlo/subscriber.py +129 -129
- crawlo/task_manager.py +138 -138
- crawlo/templates/crawlo.cfg.tmpl +10 -10
- crawlo/templates/project/__init__.py.tmpl +3 -3
- crawlo/templates/project/items.py.tmpl +17 -17
- crawlo/templates/project/middlewares.py.tmpl +118 -118
- crawlo/templates/project/pipelines.py.tmpl +96 -96
- crawlo/templates/project/settings.py.tmpl +170 -170
- crawlo/templates/project/settings_distributed.py.tmpl +169 -169
- crawlo/templates/project/settings_gentle.py.tmpl +166 -166
- crawlo/templates/project/settings_high_performance.py.tmpl +167 -167
- crawlo/templates/project/settings_minimal.py.tmpl +65 -65
- crawlo/templates/project/settings_simple.py.tmpl +164 -164
- crawlo/templates/project/spiders/__init__.py.tmpl +9 -9
- crawlo/templates/run.py.tmpl +34 -34
- crawlo/templates/spider/spider.py.tmpl +143 -143
- crawlo/templates/spiders_init.py.tmpl +9 -9
- crawlo/tools/__init__.py +200 -200
- crawlo/tools/anti_crawler.py +268 -268
- crawlo/tools/authenticated_proxy.py +240 -240
- crawlo/tools/data_formatter.py +225 -225
- crawlo/tools/data_validator.py +180 -180
- crawlo/tools/date_tools.py +289 -289
- crawlo/tools/distributed_coordinator.py +384 -384
- crawlo/tools/encoding_converter.py +127 -127
- crawlo/tools/network_diagnostic.py +364 -364
- crawlo/tools/request_tools.py +82 -82
- crawlo/tools/retry_mechanism.py +224 -224
- crawlo/tools/scenario_adapter.py +262 -262
- crawlo/tools/text_cleaner.py +232 -232
- crawlo/utils/__init__.py +34 -34
- crawlo/utils/batch_processor.py +259 -259
- crawlo/utils/class_loader.py +25 -25
- crawlo/utils/controlled_spider_mixin.py +439 -439
- crawlo/utils/db_helper.py +343 -343
- crawlo/utils/enhanced_error_handler.py +356 -356
- crawlo/utils/env_config.py +142 -142
- crawlo/utils/error_handler.py +165 -165
- crawlo/utils/fingerprint.py +122 -122
- crawlo/utils/func_tools.py +82 -82
- crawlo/utils/large_scale_config.py +286 -286
- crawlo/utils/large_scale_helper.py +344 -344
- crawlo/utils/log.py +79 -79
- crawlo/utils/performance_monitor.py +285 -285
- crawlo/utils/queue_helper.py +175 -175
- crawlo/utils/redis_connection_pool.py +388 -388
- crawlo/utils/redis_key_validator.py +198 -198
- crawlo/utils/request.py +267 -267
- crawlo/utils/request_serializer.py +225 -225
- crawlo/utils/spider_loader.py +61 -61
- crawlo/utils/system.py +11 -11
- crawlo/utils/tools.py +4 -4
- crawlo/utils/url.py +39 -39
- crawlo-1.4.3.dist-info/METADATA +190 -0
- crawlo-1.4.3.dist-info/RECORD +326 -0
- examples/__init__.py +7 -7
- examples/test_project/__init__.py +7 -7
- examples/test_project/run.py +34 -34
- examples/test_project/test_project/__init__.py +3 -3
- examples/test_project/test_project/items.py +17 -17
- examples/test_project/test_project/middlewares.py +118 -118
- examples/test_project/test_project/pipelines.py +96 -96
- examples/test_project/test_project/settings.py +169 -169
- examples/test_project/test_project/spiders/__init__.py +9 -9
- examples/test_project/test_project/spiders/of_week_dis.py +143 -143
- tests/__init__.py +7 -7
- tests/advanced_tools_example.py +275 -275
- tests/authenticated_proxy_example.py +106 -106
- tests/baidu_performance_test.py +108 -108
- tests/baidu_test.py +59 -59
- tests/cleaners_example.py +160 -160
- tests/comprehensive_framework_test.py +212 -212
- tests/comprehensive_test.py +81 -81
- tests/comprehensive_testing_summary.md +186 -186
- tests/config_validation_demo.py +142 -142
- tests/controlled_spider_example.py +205 -205
- tests/date_tools_example.py +180 -180
- tests/debug_configure.py +69 -69
- tests/debug_framework_logger.py +84 -84
- tests/debug_log_config.py +126 -126
- tests/debug_log_levels.py +63 -63
- tests/debug_pipelines.py +66 -66
- tests/detailed_log_test.py +233 -233
- tests/distributed_test.py +66 -66
- tests/distributed_test_debug.py +76 -76
- tests/dynamic_loading_example.py +523 -523
- tests/dynamic_loading_test.py +104 -104
- tests/env_config_example.py +133 -133
- tests/error_handling_example.py +171 -171
- tests/final_comprehensive_test.py +151 -151
- tests/final_log_test.py +260 -260
- tests/final_validation_test.py +182 -182
- tests/fix_log_test.py +142 -142
- tests/framework_performance_test.py +202 -202
- tests/log_buffering_test.py +111 -111
- tests/log_generation_timing_test.py +153 -153
- tests/optimized_performance_test.py +211 -211
- tests/performance_comparison.py +245 -245
- tests/queue_blocking_test.py +113 -113
- tests/queue_test.py +89 -89
- tests/redis_key_validation_demo.py +130 -130
- tests/request_params_example.py +150 -150
- tests/response_improvements_example.py +144 -144
- tests/scrapy_comparison/ofweek_scrapy.py +138 -138
- tests/scrapy_comparison/scrapy_test.py +133 -133
- tests/simple_command_test.py +119 -119
- tests/simple_crawlo_test.py +127 -127
- tests/simple_log_test.py +57 -57
- tests/simple_log_test2.py +137 -137
- tests/simple_optimization_test.py +128 -128
- tests/simple_queue_type_test.py +41 -41
- tests/simple_spider_test.py +49 -49
- tests/simple_test.py +47 -47
- tests/spider_log_timing_test.py +177 -177
- tests/test_advanced_tools.py +148 -148
- tests/test_all_commands.py +230 -230
- tests/test_all_pipeline_fingerprints.py +133 -133
- tests/test_all_redis_key_configs.py +145 -145
- tests/test_authenticated_proxy.py +141 -141
- tests/test_batch_processor.py +178 -178
- tests/test_cleaners.py +54 -54
- tests/test_component_factory.py +174 -174
- tests/test_comprehensive.py +146 -146
- tests/test_config_consistency.py +80 -80
- tests/test_config_merge.py +152 -152
- tests/test_config_validator.py +182 -182
- tests/test_controlled_spider_mixin.py +79 -79
- tests/test_crawlo_proxy_integration.py +108 -108
- tests/test_date_tools.py +123 -123
- tests/test_dedup_fix.py +220 -220
- tests/test_dedup_pipeline_consistency.py +125 -0
- tests/test_default_header_middleware.py +313 -313
- tests/test_distributed.py +65 -65
- tests/test_double_crawlo_fix.py +204 -204
- tests/test_double_crawlo_fix_simple.py +124 -124
- tests/test_download_delay_middleware.py +221 -221
- tests/test_downloader_proxy_compatibility.py +268 -268
- tests/test_dynamic_downloaders_proxy.py +124 -124
- tests/test_dynamic_proxy.py +92 -92
- tests/test_dynamic_proxy_config.py +146 -146
- tests/test_dynamic_proxy_real.py +109 -109
- tests/test_edge_cases.py +303 -303
- tests/test_enhanced_error_handler.py +270 -270
- tests/test_enhanced_error_handler_comprehensive.py +245 -245
- tests/test_env_config.py +121 -121
- tests/test_error_handler_compatibility.py +112 -112
- tests/test_factories.py +252 -252
- tests/test_final_validation.py +153 -153
- tests/test_fingerprint_consistency.py +135 -135
- tests/test_fingerprint_simple.py +51 -51
- tests/test_framework_env_usage.py +103 -103
- tests/test_framework_logger.py +66 -66
- tests/test_framework_startup.py +64 -64
- tests/test_get_component_logger.py +83 -83
- tests/test_hash_performance.py +99 -99
- tests/test_integration.py +169 -169
- tests/test_item_dedup_redis_key.py +122 -122
- tests/test_large_scale_config.py +112 -112
- tests/test_large_scale_helper.py +235 -235
- tests/test_logging_enhancements.py +375 -0
- tests/test_logging_final.py +185 -0
- tests/test_logging_integration.py +313 -0
- tests/test_logging_system.py +282 -282
- tests/test_middleware_debug.py +142 -0
- tests/test_mode_change.py +72 -72
- tests/test_mode_consistency.py +51 -51
- tests/test_offsite_middleware.py +244 -244
- tests/test_offsite_middleware_simple.py +203 -203
- tests/test_parsel.py +29 -29
- tests/test_performance.py +327 -327
- tests/test_performance_monitor.py +115 -115
- tests/test_pipeline_fingerprint_consistency.py +86 -86
- tests/test_priority_behavior.py +212 -0
- tests/test_priority_consistency.py +152 -0
- tests/test_priority_consistency_fixed.py +250 -0
- tests/test_proxy_api.py +264 -264
- tests/test_proxy_health_check.py +32 -32
- tests/test_proxy_middleware.py +121 -121
- tests/test_proxy_middleware_enhanced.py +216 -216
- tests/test_proxy_middleware_integration.py +136 -136
- tests/test_proxy_middleware_refactored.py +184 -184
- tests/test_proxy_providers.py +56 -56
- tests/test_proxy_stats.py +19 -19
- tests/test_proxy_strategies.py +59 -59
- tests/test_queue_empty_check.py +41 -41
- tests/test_queue_manager_double_crawlo.py +173 -173
- tests/test_queue_manager_redis_key.py +179 -179
- tests/test_queue_naming.py +154 -154
- tests/test_queue_type.py +106 -106
- tests/test_queue_type_redis_config_consistency.py +131 -0
- tests/test_random_headers_default.py +323 -0
- tests/test_random_headers_necessity.py +309 -0
- tests/test_random_user_agent.py +72 -72
- tests/test_real_scenario_proxy.py +195 -195
- tests/test_redis_config.py +28 -28
- tests/test_redis_connection_pool.py +294 -294
- tests/test_redis_key_naming.py +181 -181
- tests/test_redis_key_validator.py +123 -123
- tests/test_redis_queue.py +224 -224
- tests/test_redis_queue_name_fix.py +175 -175
- tests/test_redis_queue_type_fallback.py +130 -0
- tests/test_request_ignore_middleware.py +182 -182
- tests/test_request_params.py +111 -111
- tests/test_request_serialization.py +70 -70
- tests/test_response_code_middleware.py +349 -349
- tests/test_response_filter_middleware.py +427 -427
- tests/test_response_improvements.py +152 -152
- tests/test_retry_middleware.py +334 -242
- tests/test_retry_middleware_realistic.py +274 -0
- tests/test_scheduler.py +252 -252
- tests/test_scheduler_config_update.py +133 -133
- tests/test_simple_response.py +61 -61
- tests/test_telecom_spider_redis_key.py +205 -205
- tests/test_template_content.py +87 -87
- tests/test_template_redis_key.py +134 -134
- tests/test_tools.py +159 -159
- tests/test_user_agent_randomness.py +177 -0
- tests/test_user_agents.py +96 -96
- tests/tools_example.py +260 -260
- tests/untested_features_report.md +138 -138
- tests/verify_debug.py +51 -51
- tests/verify_distributed.py +117 -117
- tests/verify_log_fix.py +111 -111
- crawlo-1.4.2.dist-info/METADATA +0 -1199
- crawlo-1.4.2.dist-info/RECORD +0 -309
- {crawlo-1.4.2.dist-info → crawlo-1.4.3.dist-info}/WHEEL +0 -0
- {crawlo-1.4.2.dist-info → crawlo-1.4.3.dist-info}/entry_points.txt +0 -0
- {crawlo-1.4.2.dist-info → crawlo-1.4.3.dist-info}/top_level.txt +0 -0
crawlo/network/response.py
CHANGED
|
@@ -1,360 +1,360 @@
|
|
|
1
|
-
#!/usr/bin/python
|
|
2
|
-
# -*- coding:UTF-8 -*-
|
|
3
|
-
"""
|
|
4
|
-
HTTP Response 封装模块
|
|
5
|
-
=====================
|
|
6
|
-
提供功能丰富的HTTP响应封装,支持:
|
|
7
|
-
- 智能编码检测和解码
|
|
8
|
-
- XPath/CSS 选择器
|
|
9
|
-
- JSON 解析和缓存
|
|
10
|
-
- 正则表达式支持
|
|
11
|
-
- Cookie 处理
|
|
12
|
-
"""
|
|
13
|
-
import re
|
|
14
|
-
import ujson
|
|
15
|
-
from http.cookies import SimpleCookie
|
|
16
|
-
from parsel import Selector, SelectorList
|
|
17
|
-
from typing import Dict, Any, List, Optional, Union
|
|
18
|
-
from urllib.parse import urljoin as _urljoin
|
|
19
|
-
|
|
20
|
-
from crawlo.exceptions import DecodeError
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
class Response:
|
|
24
|
-
"""
|
|
25
|
-
HTTP响应的封装,提供数据解析的便捷方法。
|
|
26
|
-
|
|
27
|
-
功能特性:
|
|
28
|
-
- 智能编码检测和缓存
|
|
29
|
-
- 懒加载 Selector 实例
|
|
30
|
-
- JSON 解析和缓存
|
|
31
|
-
- 多类型数据提取
|
|
32
|
-
"""
|
|
33
|
-
|
|
34
|
-
def __init__(
|
|
35
|
-
self,
|
|
36
|
-
url: str,
|
|
37
|
-
*,
|
|
38
|
-
headers: Dict[str, Any] = None,
|
|
39
|
-
body: bytes = b"",
|
|
40
|
-
method: str = 'GET',
|
|
41
|
-
request: 'Request' = None, # 使用字符串注解避免循环导入
|
|
42
|
-
status_code: int = 200,
|
|
43
|
-
):
|
|
44
|
-
# 基本属性
|
|
45
|
-
self.url = url
|
|
46
|
-
self.headers = headers or {}
|
|
47
|
-
self.body = body
|
|
48
|
-
self.method = method.upper()
|
|
49
|
-
self.request = request
|
|
50
|
-
self.status_code = status_code
|
|
51
|
-
|
|
52
|
-
# 编码处理
|
|
53
|
-
self.encoding = self._determine_encoding()
|
|
54
|
-
|
|
55
|
-
# 缓存属性
|
|
56
|
-
self._text_cache = None
|
|
57
|
-
self._json_cache = None
|
|
58
|
-
self._selector_instance = None
|
|
59
|
-
|
|
60
|
-
# 状态标记
|
|
61
|
-
self._is_success = 200 <= status_code < 300
|
|
62
|
-
self._is_redirect = 300 <= status_code < 400
|
|
63
|
-
self._is_client_error = 400 <= status_code < 500
|
|
64
|
-
self._is_server_error = status_code >= 500
|
|
65
|
-
|
|
66
|
-
def _determine_encoding(self) -> Optional[str]:
|
|
67
|
-
"""智能检测响应编码"""
|
|
68
|
-
# 1. 优先使用 request 的编码
|
|
69
|
-
if self.request and self.request.encoding:
|
|
70
|
-
return self.request.encoding
|
|
71
|
-
|
|
72
|
-
# 2. 从 Content-Type 头中检测
|
|
73
|
-
content_type = self.headers.get("content-type", "") or self.headers.get("Content-Type", "")
|
|
74
|
-
if content_type:
|
|
75
|
-
charset_match = re.search(r"charset=([w-]+)", content_type, re.I)
|
|
76
|
-
if charset_match:
|
|
77
|
-
return charset_match.group(1).lower()
|
|
78
|
-
|
|
79
|
-
# 3. 从 HTML meta 标签中检测(仅对HTML内容)
|
|
80
|
-
if b'<html' in self.body[:1024].lower():
|
|
81
|
-
# 查找 <meta charset="xxx"> 或 <meta http-equiv="Content-Type" content="...charset=xxx">
|
|
82
|
-
html_start = self.body[:4096] # 只检查前4KB
|
|
83
|
-
try:
|
|
84
|
-
html_text = html_start.decode('ascii', errors='ignore')
|
|
85
|
-
# <meta charset="utf-8">
|
|
86
|
-
charset_match = re.search(r'<meta[^>]+charset=["\']?([\w-]+)', html_text, re.I)
|
|
87
|
-
if charset_match:
|
|
88
|
-
return charset_match.group(1).lower()
|
|
89
|
-
|
|
90
|
-
# <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
|
|
91
|
-
content_match = re.search(r'<meta[^>]+content=["\'][^"\'>]*charset=([\w-]+)', html_text, re.I)
|
|
92
|
-
if content_match:
|
|
93
|
-
return content_match.group(1).lower()
|
|
94
|
-
except Exception:
|
|
95
|
-
pass
|
|
96
|
-
|
|
97
|
-
# 4. 默认使用 utf-8
|
|
98
|
-
return 'utf-8'
|
|
99
|
-
|
|
100
|
-
@property
|
|
101
|
-
def text(self) -> str:
|
|
102
|
-
"""将响应体(body)以正确的编码解码为字符串,并缓存结果。"""
|
|
103
|
-
if self._text_cache is not None:
|
|
104
|
-
return self._text_cache
|
|
105
|
-
|
|
106
|
-
if not self.body:
|
|
107
|
-
self._text_cache = ""
|
|
108
|
-
return self._text_cache
|
|
109
|
-
|
|
110
|
-
# 尝试多种编码
|
|
111
|
-
encodings_to_try = [self.encoding]
|
|
112
|
-
if self.encoding != 'utf-8':
|
|
113
|
-
encodings_to_try.append('utf-8')
|
|
114
|
-
if 'gbk' not in encodings_to_try:
|
|
115
|
-
encodings_to_try.append('gbk')
|
|
116
|
-
if 'gb2312' not in encodings_to_try:
|
|
117
|
-
encodings_to_try.append('gb2312')
|
|
118
|
-
encodings_to_try.append('latin1') # 最后的回退选项
|
|
119
|
-
|
|
120
|
-
for encoding in encodings_to_try:
|
|
121
|
-
if not encoding:
|
|
122
|
-
continue
|
|
123
|
-
try:
|
|
124
|
-
self._text_cache = self.body.decode(encoding)
|
|
125
|
-
return self._text_cache
|
|
126
|
-
except (UnicodeDecodeError, LookupError):
|
|
127
|
-
continue
|
|
128
|
-
|
|
129
|
-
# 所有编码都失败,使用容错解码
|
|
130
|
-
try:
|
|
131
|
-
self._text_cache = self.body.decode('utf-8', errors='replace')
|
|
132
|
-
return self._text_cache
|
|
133
|
-
except Exception as e:
|
|
134
|
-
raise DecodeError(f"Failed to decode response from {self.url}: {e}")
|
|
135
|
-
|
|
136
|
-
@property
|
|
137
|
-
def is_success(self) -> bool:
|
|
138
|
-
"""检查响应是否成功 (2xx)"""
|
|
139
|
-
return self._is_success
|
|
140
|
-
|
|
141
|
-
@property
|
|
142
|
-
def is_redirect(self) -> bool:
|
|
143
|
-
"""检查响应是否为重定向 (3xx)"""
|
|
144
|
-
return self._is_redirect
|
|
145
|
-
|
|
146
|
-
@property
|
|
147
|
-
def is_client_error(self) -> bool:
|
|
148
|
-
"""检查响应是否为客户端错误 (4xx)"""
|
|
149
|
-
return self._is_client_error
|
|
150
|
-
|
|
151
|
-
@property
|
|
152
|
-
def is_server_error(self) -> bool:
|
|
153
|
-
"""检查响应是否为服务器错误 (5xx)"""
|
|
154
|
-
return self._is_server_error
|
|
155
|
-
|
|
156
|
-
@property
|
|
157
|
-
def content_type(self) -> str:
|
|
158
|
-
"""获取响应的 Content-Type"""
|
|
159
|
-
return self.headers.get('content-type', '') or self.headers.get('Content-Type', '')
|
|
160
|
-
|
|
161
|
-
@property
|
|
162
|
-
def content_length(self) -> Optional[int]:
|
|
163
|
-
"""获取响应的 Content-Length"""
|
|
164
|
-
length = self.headers.get('content-length') or self.headers.get('Content-Length')
|
|
165
|
-
return int(length) if length else None
|
|
166
|
-
|
|
167
|
-
def json(self, default: Any = None) -> Any:
|
|
168
|
-
"""将响应文本解析为 JSON 对象。"""
|
|
169
|
-
if self._json_cache is not None:
|
|
170
|
-
return self._json_cache
|
|
171
|
-
|
|
172
|
-
try:
|
|
173
|
-
self._json_cache = ujson.loads(self.text)
|
|
174
|
-
return self._json_cache
|
|
175
|
-
except (ujson.JSONDecodeError, ValueError) as e:
|
|
176
|
-
if default is not None:
|
|
177
|
-
return default
|
|
178
|
-
raise DecodeError(f"Failed to parse JSON from {self.url}: {e}")
|
|
179
|
-
|
|
180
|
-
def urljoin(self, url: str) -> str:
|
|
181
|
-
"""拼接 URL,自动处理相对路径。"""
|
|
182
|
-
return _urljoin(self.url, url)
|
|
183
|
-
|
|
184
|
-
@property
|
|
185
|
-
def _selector(self) -> Selector:
|
|
186
|
-
"""懒加载 Selector 实例"""
|
|
187
|
-
if self._selector_instance is None:
|
|
188
|
-
self._selector_instance = Selector(self.text)
|
|
189
|
-
return self._selector_instance
|
|
190
|
-
|
|
191
|
-
def xpath(self, query: str) -> SelectorList:
|
|
192
|
-
"""使用 XPath 选择器查询文档。"""
|
|
193
|
-
return self._selector.xpath(query)
|
|
194
|
-
|
|
195
|
-
def css(self, query: str) -> SelectorList:
|
|
196
|
-
"""使用 CSS 选择器查询文档。"""
|
|
197
|
-
return self._selector.css(query)
|
|
198
|
-
|
|
199
|
-
def _is_xpath(self, query: str) -> bool:
|
|
200
|
-
"""判断查询语句是否为XPath"""
|
|
201
|
-
return query.startswith(('/', '//', './'))
|
|
202
|
-
|
|
203
|
-
def _extract_text_from_elements(self, elements: SelectorList, join_str: str = " ") -> str:
|
|
204
|
-
"""
|
|
205
|
-
从元素列表中提取文本并拼接
|
|
206
|
-
|
|
207
|
-
:param elements: SelectorList元素列表
|
|
208
|
-
:param join_str: 文本拼接分隔符
|
|
209
|
-
:return: 拼接后的文本
|
|
210
|
-
"""
|
|
211
|
-
texts = []
|
|
212
|
-
for element in elements:
|
|
213
|
-
# 获取元素的所有文本节点
|
|
214
|
-
if hasattr(element, 'xpath'):
|
|
215
|
-
element_texts = element.xpath('.//text()').getall()
|
|
216
|
-
else:
|
|
217
|
-
element_texts = [str(element)]
|
|
218
|
-
# 清理并添加非空文本
|
|
219
|
-
for text in element_texts:
|
|
220
|
-
cleaned = text.strip()
|
|
221
|
-
if cleaned:
|
|
222
|
-
texts.append(cleaned)
|
|
223
|
-
return join_str.join(texts)
|
|
224
|
-
|
|
225
|
-
def extract_text(self, xpath_or_css: str, join_str: str = " ", default: str = '') -> str:
|
|
226
|
-
"""
|
|
227
|
-
提取单个元素的文本内容,支持CSS和XPath选择器
|
|
228
|
-
|
|
229
|
-
参数:
|
|
230
|
-
xpath_or_css: XPath或CSS选择器
|
|
231
|
-
join_str: 文本拼接分隔符(默认为空格)
|
|
232
|
-
default: 默认返回值,当未找到元素时返回
|
|
233
|
-
|
|
234
|
-
返回:
|
|
235
|
-
拼接后的纯文本字符串
|
|
236
|
-
"""
|
|
237
|
-
try:
|
|
238
|
-
elements = self.xpath(xpath_or_css) if self._is_xpath(xpath_or_css) else self.css(xpath_or_css)
|
|
239
|
-
if not elements:
|
|
240
|
-
return default
|
|
241
|
-
return self._extract_text_from_elements(elements, join_str)
|
|
242
|
-
except Exception:
|
|
243
|
-
return default
|
|
244
|
-
|
|
245
|
-
def extract_texts(self, xpath_or_css: str, join_str: str = " ", default: List[str] = None) -> List[str]:
|
|
246
|
-
"""
|
|
247
|
-
提取多个元素的文本内容列表,支持CSS和XPath选择器
|
|
248
|
-
|
|
249
|
-
参数:
|
|
250
|
-
xpath_or_css: XPath或CSS选择器
|
|
251
|
-
join_str: 单个节点内文本拼接分隔符
|
|
252
|
-
default: 默认返回值,当未找到元素时返回
|
|
253
|
-
|
|
254
|
-
返回:
|
|
255
|
-
纯文本列表(每个元素对应一个节点的文本)
|
|
256
|
-
"""
|
|
257
|
-
if default is None:
|
|
258
|
-
default = []
|
|
259
|
-
|
|
260
|
-
try:
|
|
261
|
-
elements = self.xpath(xpath_or_css) if self._is_xpath(xpath_or_css) else self.css(xpath_or_css)
|
|
262
|
-
if not elements:
|
|
263
|
-
return default
|
|
264
|
-
|
|
265
|
-
result = []
|
|
266
|
-
for element in elements:
|
|
267
|
-
# 对每个元素提取文本
|
|
268
|
-
if hasattr(element, 'xpath'):
|
|
269
|
-
texts = element.xpath('.//text()').getall()
|
|
270
|
-
else:
|
|
271
|
-
texts = [str(element)]
|
|
272
|
-
|
|
273
|
-
# 清理文本并拼接
|
|
274
|
-
clean_texts = [text.strip() for text in texts if text.strip()]
|
|
275
|
-
if clean_texts:
|
|
276
|
-
result.append(join_str.join(clean_texts))
|
|
277
|
-
|
|
278
|
-
return result if result else default
|
|
279
|
-
except Exception:
|
|
280
|
-
return default
|
|
281
|
-
|
|
282
|
-
def extract_attr(self, xpath_or_css: str, attr_name: str, default: Any = None) -> Any:
|
|
283
|
-
"""
|
|
284
|
-
提取单个元素的属性值,支持CSS和XPath选择器
|
|
285
|
-
|
|
286
|
-
参数:
|
|
287
|
-
xpath_or_css: XPath或CSS选择器
|
|
288
|
-
attr_name: 属性名称
|
|
289
|
-
default: 默认返回值
|
|
290
|
-
|
|
291
|
-
返回:
|
|
292
|
-
属性值或默认值
|
|
293
|
-
"""
|
|
294
|
-
try:
|
|
295
|
-
elements = self.xpath(xpath_or_css) if self._is_xpath(xpath_or_css) else self.css(xpath_or_css)
|
|
296
|
-
if not elements:
|
|
297
|
-
return default
|
|
298
|
-
return elements.attrib.get(attr_name, default)
|
|
299
|
-
except Exception:
|
|
300
|
-
return default
|
|
301
|
-
|
|
302
|
-
def extract_attrs(self, xpath_or_css: str, attr_name: str, default: List[Any] = None) -> List[Any]:
|
|
303
|
-
"""
|
|
304
|
-
提取多个元素的属性值列表,支持CSS和XPath选择器
|
|
305
|
-
|
|
306
|
-
参数:
|
|
307
|
-
xpath_or_css: XPath或CSS选择器
|
|
308
|
-
attr_name: 属性名称
|
|
309
|
-
default: 默认返回值
|
|
310
|
-
|
|
311
|
-
返回:
|
|
312
|
-
属性值列表
|
|
313
|
-
"""
|
|
314
|
-
if default is None:
|
|
315
|
-
default = []
|
|
316
|
-
|
|
317
|
-
try:
|
|
318
|
-
elements = self.xpath(xpath_or_css) if self._is_xpath(xpath_or_css) else self.css(xpath_or_css)
|
|
319
|
-
if not elements:
|
|
320
|
-
return default
|
|
321
|
-
|
|
322
|
-
result = []
|
|
323
|
-
for element in elements:
|
|
324
|
-
if hasattr(element, 'attrib'):
|
|
325
|
-
attr_value = element.attrib.get(attr_name)
|
|
326
|
-
if attr_value is not None:
|
|
327
|
-
result.append(attr_value)
|
|
328
|
-
|
|
329
|
-
return result if result else default
|
|
330
|
-
except Exception:
|
|
331
|
-
return default
|
|
332
|
-
|
|
333
|
-
def re_search(self, pattern: str, flags: int = re.DOTALL) -> Optional[re.Match]:
|
|
334
|
-
"""在响应文本上执行正则表达式搜索。"""
|
|
335
|
-
if not isinstance(pattern, str):
|
|
336
|
-
raise TypeError("Pattern must be a string")
|
|
337
|
-
return re.search(pattern, self.text, flags=flags)
|
|
338
|
-
|
|
339
|
-
def re_findall(self, pattern: str, flags: int = re.DOTALL) -> List[Any]:
|
|
340
|
-
"""在响应文本上执行正则表达式查找。"""
|
|
341
|
-
if not isinstance(pattern, str):
|
|
342
|
-
raise TypeError("Pattern must be a string")
|
|
343
|
-
return re.findall(pattern, self.text, flags=flags)
|
|
344
|
-
|
|
345
|
-
def get_cookies(self) -> Dict[str, str]:
|
|
346
|
-
"""从响应头中解析并返回Cookies。"""
|
|
347
|
-
cookie_header = self.headers.get("Set-Cookie", "")
|
|
348
|
-
if isinstance(cookie_header, list):
|
|
349
|
-
cookie_header = ", ".join(cookie_header)
|
|
350
|
-
cookies = SimpleCookie()
|
|
351
|
-
cookies.load(cookie_header)
|
|
352
|
-
return {key: morsel.value for key, morsel in cookies.items()}
|
|
353
|
-
|
|
354
|
-
@property
|
|
355
|
-
def meta(self) -> Dict:
|
|
356
|
-
"""获取关联的 Request 对象的 meta 字典。"""
|
|
357
|
-
return self.request.meta if self.request else {}
|
|
358
|
-
|
|
359
|
-
def __str__(self):
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
# -*- coding:UTF-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
HTTP Response 封装模块
|
|
5
|
+
=====================
|
|
6
|
+
提供功能丰富的HTTP响应封装,支持:
|
|
7
|
+
- 智能编码检测和解码
|
|
8
|
+
- XPath/CSS 选择器
|
|
9
|
+
- JSON 解析和缓存
|
|
10
|
+
- 正则表达式支持
|
|
11
|
+
- Cookie 处理
|
|
12
|
+
"""
|
|
13
|
+
import re
|
|
14
|
+
import ujson
|
|
15
|
+
from http.cookies import SimpleCookie
|
|
16
|
+
from parsel import Selector, SelectorList
|
|
17
|
+
from typing import Dict, Any, List, Optional, Union
|
|
18
|
+
from urllib.parse import urljoin as _urljoin
|
|
19
|
+
|
|
20
|
+
from crawlo.exceptions import DecodeError
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class Response:
|
|
24
|
+
"""
|
|
25
|
+
HTTP响应的封装,提供数据解析的便捷方法。
|
|
26
|
+
|
|
27
|
+
功能特性:
|
|
28
|
+
- 智能编码检测和缓存
|
|
29
|
+
- 懒加载 Selector 实例
|
|
30
|
+
- JSON 解析和缓存
|
|
31
|
+
- 多类型数据提取
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
def __init__(
|
|
35
|
+
self,
|
|
36
|
+
url: str,
|
|
37
|
+
*,
|
|
38
|
+
headers: Dict[str, Any] = None,
|
|
39
|
+
body: bytes = b"",
|
|
40
|
+
method: str = 'GET',
|
|
41
|
+
request: 'Request' = None, # 使用字符串注解避免循环导入
|
|
42
|
+
status_code: int = 200,
|
|
43
|
+
):
|
|
44
|
+
# 基本属性
|
|
45
|
+
self.url = url
|
|
46
|
+
self.headers = headers or {}
|
|
47
|
+
self.body = body
|
|
48
|
+
self.method = method.upper()
|
|
49
|
+
self.request = request
|
|
50
|
+
self.status_code = status_code
|
|
51
|
+
|
|
52
|
+
# 编码处理
|
|
53
|
+
self.encoding = self._determine_encoding()
|
|
54
|
+
|
|
55
|
+
# 缓存属性
|
|
56
|
+
self._text_cache = None
|
|
57
|
+
self._json_cache = None
|
|
58
|
+
self._selector_instance = None
|
|
59
|
+
|
|
60
|
+
# 状态标记
|
|
61
|
+
self._is_success = 200 <= status_code < 300
|
|
62
|
+
self._is_redirect = 300 <= status_code < 400
|
|
63
|
+
self._is_client_error = 400 <= status_code < 500
|
|
64
|
+
self._is_server_error = status_code >= 500
|
|
65
|
+
|
|
66
|
+
def _determine_encoding(self) -> Optional[str]:
|
|
67
|
+
"""智能检测响应编码"""
|
|
68
|
+
# 1. 优先使用 request 的编码
|
|
69
|
+
if self.request and self.request.encoding:
|
|
70
|
+
return self.request.encoding
|
|
71
|
+
|
|
72
|
+
# 2. 从 Content-Type 头中检测
|
|
73
|
+
content_type = self.headers.get("content-type", "") or self.headers.get("Content-Type", "")
|
|
74
|
+
if content_type:
|
|
75
|
+
charset_match = re.search(r"charset=([w-]+)", content_type, re.I)
|
|
76
|
+
if charset_match:
|
|
77
|
+
return charset_match.group(1).lower()
|
|
78
|
+
|
|
79
|
+
# 3. 从 HTML meta 标签中检测(仅对HTML内容)
|
|
80
|
+
if b'<html' in self.body[:1024].lower():
|
|
81
|
+
# 查找 <meta charset="xxx"> 或 <meta http-equiv="Content-Type" content="...charset=xxx">
|
|
82
|
+
html_start = self.body[:4096] # 只检查前4KB
|
|
83
|
+
try:
|
|
84
|
+
html_text = html_start.decode('ascii', errors='ignore')
|
|
85
|
+
# <meta charset="utf-8">
|
|
86
|
+
charset_match = re.search(r'<meta[^>]+charset=["\']?([\w-]+)', html_text, re.I)
|
|
87
|
+
if charset_match:
|
|
88
|
+
return charset_match.group(1).lower()
|
|
89
|
+
|
|
90
|
+
# <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
|
|
91
|
+
content_match = re.search(r'<meta[^>]+content=["\'][^"\'>]*charset=([\w-]+)', html_text, re.I)
|
|
92
|
+
if content_match:
|
|
93
|
+
return content_match.group(1).lower()
|
|
94
|
+
except Exception:
|
|
95
|
+
pass
|
|
96
|
+
|
|
97
|
+
# 4. 默认使用 utf-8
|
|
98
|
+
return 'utf-8'
|
|
99
|
+
|
|
100
|
+
@property
|
|
101
|
+
def text(self) -> str:
|
|
102
|
+
"""将响应体(body)以正确的编码解码为字符串,并缓存结果。"""
|
|
103
|
+
if self._text_cache is not None:
|
|
104
|
+
return self._text_cache
|
|
105
|
+
|
|
106
|
+
if not self.body:
|
|
107
|
+
self._text_cache = ""
|
|
108
|
+
return self._text_cache
|
|
109
|
+
|
|
110
|
+
# 尝试多种编码
|
|
111
|
+
encodings_to_try = [self.encoding]
|
|
112
|
+
if self.encoding != 'utf-8':
|
|
113
|
+
encodings_to_try.append('utf-8')
|
|
114
|
+
if 'gbk' not in encodings_to_try:
|
|
115
|
+
encodings_to_try.append('gbk')
|
|
116
|
+
if 'gb2312' not in encodings_to_try:
|
|
117
|
+
encodings_to_try.append('gb2312')
|
|
118
|
+
encodings_to_try.append('latin1') # 最后的回退选项
|
|
119
|
+
|
|
120
|
+
for encoding in encodings_to_try:
|
|
121
|
+
if not encoding:
|
|
122
|
+
continue
|
|
123
|
+
try:
|
|
124
|
+
self._text_cache = self.body.decode(encoding)
|
|
125
|
+
return self._text_cache
|
|
126
|
+
except (UnicodeDecodeError, LookupError):
|
|
127
|
+
continue
|
|
128
|
+
|
|
129
|
+
# 所有编码都失败,使用容错解码
|
|
130
|
+
try:
|
|
131
|
+
self._text_cache = self.body.decode('utf-8', errors='replace')
|
|
132
|
+
return self._text_cache
|
|
133
|
+
except Exception as e:
|
|
134
|
+
raise DecodeError(f"Failed to decode response from {self.url}: {e}")
|
|
135
|
+
|
|
136
|
+
@property
|
|
137
|
+
def is_success(self) -> bool:
|
|
138
|
+
"""检查响应是否成功 (2xx)"""
|
|
139
|
+
return self._is_success
|
|
140
|
+
|
|
141
|
+
@property
|
|
142
|
+
def is_redirect(self) -> bool:
|
|
143
|
+
"""检查响应是否为重定向 (3xx)"""
|
|
144
|
+
return self._is_redirect
|
|
145
|
+
|
|
146
|
+
@property
|
|
147
|
+
def is_client_error(self) -> bool:
|
|
148
|
+
"""检查响应是否为客户端错误 (4xx)"""
|
|
149
|
+
return self._is_client_error
|
|
150
|
+
|
|
151
|
+
@property
|
|
152
|
+
def is_server_error(self) -> bool:
|
|
153
|
+
"""检查响应是否为服务器错误 (5xx)"""
|
|
154
|
+
return self._is_server_error
|
|
155
|
+
|
|
156
|
+
@property
|
|
157
|
+
def content_type(self) -> str:
|
|
158
|
+
"""获取响应的 Content-Type"""
|
|
159
|
+
return self.headers.get('content-type', '') or self.headers.get('Content-Type', '')
|
|
160
|
+
|
|
161
|
+
@property
|
|
162
|
+
def content_length(self) -> Optional[int]:
|
|
163
|
+
"""获取响应的 Content-Length"""
|
|
164
|
+
length = self.headers.get('content-length') or self.headers.get('Content-Length')
|
|
165
|
+
return int(length) if length else None
|
|
166
|
+
|
|
167
|
+
def json(self, default: Any = None) -> Any:
|
|
168
|
+
"""将响应文本解析为 JSON 对象。"""
|
|
169
|
+
if self._json_cache is not None:
|
|
170
|
+
return self._json_cache
|
|
171
|
+
|
|
172
|
+
try:
|
|
173
|
+
self._json_cache = ujson.loads(self.text)
|
|
174
|
+
return self._json_cache
|
|
175
|
+
except (ujson.JSONDecodeError, ValueError) as e:
|
|
176
|
+
if default is not None:
|
|
177
|
+
return default
|
|
178
|
+
raise DecodeError(f"Failed to parse JSON from {self.url}: {e}")
|
|
179
|
+
|
|
180
|
+
def urljoin(self, url: str) -> str:
|
|
181
|
+
"""拼接 URL,自动处理相对路径。"""
|
|
182
|
+
return _urljoin(self.url, url)
|
|
183
|
+
|
|
184
|
+
@property
|
|
185
|
+
def _selector(self) -> Selector:
|
|
186
|
+
"""懒加载 Selector 实例"""
|
|
187
|
+
if self._selector_instance is None:
|
|
188
|
+
self._selector_instance = Selector(self.text)
|
|
189
|
+
return self._selector_instance
|
|
190
|
+
|
|
191
|
+
def xpath(self, query: str) -> SelectorList:
|
|
192
|
+
"""使用 XPath 选择器查询文档。"""
|
|
193
|
+
return self._selector.xpath(query)
|
|
194
|
+
|
|
195
|
+
def css(self, query: str) -> SelectorList:
|
|
196
|
+
"""使用 CSS 选择器查询文档。"""
|
|
197
|
+
return self._selector.css(query)
|
|
198
|
+
|
|
199
|
+
def _is_xpath(self, query: str) -> bool:
|
|
200
|
+
"""判断查询语句是否为XPath"""
|
|
201
|
+
return query.startswith(('/', '//', './'))
|
|
202
|
+
|
|
203
|
+
def _extract_text_from_elements(self, elements: SelectorList, join_str: str = " ") -> str:
|
|
204
|
+
"""
|
|
205
|
+
从元素列表中提取文本并拼接
|
|
206
|
+
|
|
207
|
+
:param elements: SelectorList元素列表
|
|
208
|
+
:param join_str: 文本拼接分隔符
|
|
209
|
+
:return: 拼接后的文本
|
|
210
|
+
"""
|
|
211
|
+
texts = []
|
|
212
|
+
for element in elements:
|
|
213
|
+
# 获取元素的所有文本节点
|
|
214
|
+
if hasattr(element, 'xpath'):
|
|
215
|
+
element_texts = element.xpath('.//text()').getall()
|
|
216
|
+
else:
|
|
217
|
+
element_texts = [str(element)]
|
|
218
|
+
# 清理并添加非空文本
|
|
219
|
+
for text in element_texts:
|
|
220
|
+
cleaned = text.strip()
|
|
221
|
+
if cleaned:
|
|
222
|
+
texts.append(cleaned)
|
|
223
|
+
return join_str.join(texts)
|
|
224
|
+
|
|
225
|
+
def extract_text(self, xpath_or_css: str, join_str: str = " ", default: str = '') -> str:
|
|
226
|
+
"""
|
|
227
|
+
提取单个元素的文本内容,支持CSS和XPath选择器
|
|
228
|
+
|
|
229
|
+
参数:
|
|
230
|
+
xpath_or_css: XPath或CSS选择器
|
|
231
|
+
join_str: 文本拼接分隔符(默认为空格)
|
|
232
|
+
default: 默认返回值,当未找到元素时返回
|
|
233
|
+
|
|
234
|
+
返回:
|
|
235
|
+
拼接后的纯文本字符串
|
|
236
|
+
"""
|
|
237
|
+
try:
|
|
238
|
+
elements = self.xpath(xpath_or_css) if self._is_xpath(xpath_or_css) else self.css(xpath_or_css)
|
|
239
|
+
if not elements:
|
|
240
|
+
return default
|
|
241
|
+
return self._extract_text_from_elements(elements, join_str)
|
|
242
|
+
except Exception:
|
|
243
|
+
return default
|
|
244
|
+
|
|
245
|
+
def extract_texts(self, xpath_or_css: str, join_str: str = " ", default: List[str] = None) -> List[str]:
|
|
246
|
+
"""
|
|
247
|
+
提取多个元素的文本内容列表,支持CSS和XPath选择器
|
|
248
|
+
|
|
249
|
+
参数:
|
|
250
|
+
xpath_or_css: XPath或CSS选择器
|
|
251
|
+
join_str: 单个节点内文本拼接分隔符
|
|
252
|
+
default: 默认返回值,当未找到元素时返回
|
|
253
|
+
|
|
254
|
+
返回:
|
|
255
|
+
纯文本列表(每个元素对应一个节点的文本)
|
|
256
|
+
"""
|
|
257
|
+
if default is None:
|
|
258
|
+
default = []
|
|
259
|
+
|
|
260
|
+
try:
|
|
261
|
+
elements = self.xpath(xpath_or_css) if self._is_xpath(xpath_or_css) else self.css(xpath_or_css)
|
|
262
|
+
if not elements:
|
|
263
|
+
return default
|
|
264
|
+
|
|
265
|
+
result = []
|
|
266
|
+
for element in elements:
|
|
267
|
+
# 对每个元素提取文本
|
|
268
|
+
if hasattr(element, 'xpath'):
|
|
269
|
+
texts = element.xpath('.//text()').getall()
|
|
270
|
+
else:
|
|
271
|
+
texts = [str(element)]
|
|
272
|
+
|
|
273
|
+
# 清理文本并拼接
|
|
274
|
+
clean_texts = [text.strip() for text in texts if text.strip()]
|
|
275
|
+
if clean_texts:
|
|
276
|
+
result.append(join_str.join(clean_texts))
|
|
277
|
+
|
|
278
|
+
return result if result else default
|
|
279
|
+
except Exception:
|
|
280
|
+
return default
|
|
281
|
+
|
|
282
|
+
def extract_attr(self, xpath_or_css: str, attr_name: str, default: Any = None) -> Any:
|
|
283
|
+
"""
|
|
284
|
+
提取单个元素的属性值,支持CSS和XPath选择器
|
|
285
|
+
|
|
286
|
+
参数:
|
|
287
|
+
xpath_or_css: XPath或CSS选择器
|
|
288
|
+
attr_name: 属性名称
|
|
289
|
+
default: 默认返回值
|
|
290
|
+
|
|
291
|
+
返回:
|
|
292
|
+
属性值或默认值
|
|
293
|
+
"""
|
|
294
|
+
try:
|
|
295
|
+
elements = self.xpath(xpath_or_css) if self._is_xpath(xpath_or_css) else self.css(xpath_or_css)
|
|
296
|
+
if not elements:
|
|
297
|
+
return default
|
|
298
|
+
return elements.attrib.get(attr_name, default)
|
|
299
|
+
except Exception:
|
|
300
|
+
return default
|
|
301
|
+
|
|
302
|
+
def extract_attrs(self, xpath_or_css: str, attr_name: str, default: List[Any] = None) -> List[Any]:
|
|
303
|
+
"""
|
|
304
|
+
提取多个元素的属性值列表,支持CSS和XPath选择器
|
|
305
|
+
|
|
306
|
+
参数:
|
|
307
|
+
xpath_or_css: XPath或CSS选择器
|
|
308
|
+
attr_name: 属性名称
|
|
309
|
+
default: 默认返回值
|
|
310
|
+
|
|
311
|
+
返回:
|
|
312
|
+
属性值列表
|
|
313
|
+
"""
|
|
314
|
+
if default is None:
|
|
315
|
+
default = []
|
|
316
|
+
|
|
317
|
+
try:
|
|
318
|
+
elements = self.xpath(xpath_or_css) if self._is_xpath(xpath_or_css) else self.css(xpath_or_css)
|
|
319
|
+
if not elements:
|
|
320
|
+
return default
|
|
321
|
+
|
|
322
|
+
result = []
|
|
323
|
+
for element in elements:
|
|
324
|
+
if hasattr(element, 'attrib'):
|
|
325
|
+
attr_value = element.attrib.get(attr_name)
|
|
326
|
+
if attr_value is not None:
|
|
327
|
+
result.append(attr_value)
|
|
328
|
+
|
|
329
|
+
return result if result else default
|
|
330
|
+
except Exception:
|
|
331
|
+
return default
|
|
332
|
+
|
|
333
|
+
def re_search(self, pattern: str, flags: int = re.DOTALL) -> Optional[re.Match]:
|
|
334
|
+
"""在响应文本上执行正则表达式搜索。"""
|
|
335
|
+
if not isinstance(pattern, str):
|
|
336
|
+
raise TypeError("Pattern must be a string")
|
|
337
|
+
return re.search(pattern, self.text, flags=flags)
|
|
338
|
+
|
|
339
|
+
def re_findall(self, pattern: str, flags: int = re.DOTALL) -> List[Any]:
|
|
340
|
+
"""在响应文本上执行正则表达式查找。"""
|
|
341
|
+
if not isinstance(pattern, str):
|
|
342
|
+
raise TypeError("Pattern must be a string")
|
|
343
|
+
return re.findall(pattern, self.text, flags=flags)
|
|
344
|
+
|
|
345
|
+
def get_cookies(self) -> Dict[str, str]:
|
|
346
|
+
"""从响应头中解析并返回Cookies。"""
|
|
347
|
+
cookie_header = self.headers.get("Set-Cookie", "")
|
|
348
|
+
if isinstance(cookie_header, list):
|
|
349
|
+
cookie_header = ", ".join(cookie_header)
|
|
350
|
+
cookies = SimpleCookie()
|
|
351
|
+
cookies.load(cookie_header)
|
|
352
|
+
return {key: morsel.value for key, morsel in cookies.items()}
|
|
353
|
+
|
|
354
|
+
@property
|
|
355
|
+
def meta(self) -> Dict:
|
|
356
|
+
"""获取关联的 Request 对象的 meta 字典。"""
|
|
357
|
+
return self.request.meta if self.request else {}
|
|
358
|
+
|
|
359
|
+
def __str__(self):
|
|
360
360
|
return f"<{self.status_code} {self.url}>"
|