crawlo 1.2.5__py3-none-any.whl → 1.2.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (209) hide show
  1. crawlo/__init__.py +61 -61
  2. crawlo/__version__.py +1 -1
  3. crawlo/cleaners/__init__.py +60 -60
  4. crawlo/cleaners/data_formatter.py +225 -225
  5. crawlo/cleaners/encoding_converter.py +125 -125
  6. crawlo/cleaners/text_cleaner.py +232 -232
  7. crawlo/cli.py +75 -88
  8. crawlo/commands/__init__.py +14 -14
  9. crawlo/commands/check.py +594 -594
  10. crawlo/commands/genspider.py +151 -151
  11. crawlo/commands/help.py +138 -144
  12. crawlo/commands/list.py +155 -155
  13. crawlo/commands/run.py +323 -323
  14. crawlo/commands/startproject.py +436 -436
  15. crawlo/commands/stats.py +187 -187
  16. crawlo/commands/utils.py +186 -186
  17. crawlo/config.py +312 -312
  18. crawlo/config_validator.py +251 -251
  19. crawlo/core/__init__.py +2 -2
  20. crawlo/core/engine.py +365 -354
  21. crawlo/core/processor.py +40 -40
  22. crawlo/core/scheduler.py +251 -143
  23. crawlo/crawler.py +1099 -1110
  24. crawlo/data/__init__.py +5 -5
  25. crawlo/data/user_agents.py +107 -107
  26. crawlo/downloader/__init__.py +266 -266
  27. crawlo/downloader/aiohttp_downloader.py +228 -221
  28. crawlo/downloader/cffi_downloader.py +256 -256
  29. crawlo/downloader/httpx_downloader.py +259 -259
  30. crawlo/downloader/hybrid_downloader.py +212 -212
  31. crawlo/downloader/playwright_downloader.py +402 -402
  32. crawlo/downloader/selenium_downloader.py +472 -472
  33. crawlo/event.py +11 -11
  34. crawlo/exceptions.py +81 -81
  35. crawlo/extension/__init__.py +39 -38
  36. crawlo/extension/health_check.py +141 -141
  37. crawlo/extension/log_interval.py +57 -57
  38. crawlo/extension/log_stats.py +81 -81
  39. crawlo/extension/logging_extension.py +43 -43
  40. crawlo/extension/memory_monitor.py +104 -104
  41. crawlo/extension/performance_profiler.py +133 -133
  42. crawlo/extension/request_recorder.py +107 -107
  43. crawlo/filters/__init__.py +154 -154
  44. crawlo/filters/aioredis_filter.py +234 -281
  45. crawlo/filters/memory_filter.py +269 -269
  46. crawlo/items/__init__.py +23 -23
  47. crawlo/items/base.py +21 -21
  48. crawlo/items/fields.py +52 -52
  49. crawlo/items/items.py +104 -104
  50. crawlo/middleware/__init__.py +21 -21
  51. crawlo/middleware/default_header.py +131 -131
  52. crawlo/middleware/download_delay.py +104 -104
  53. crawlo/middleware/middleware_manager.py +136 -135
  54. crawlo/middleware/offsite.py +114 -114
  55. crawlo/middleware/proxy.py +367 -367
  56. crawlo/middleware/request_ignore.py +86 -86
  57. crawlo/middleware/response_code.py +163 -163
  58. crawlo/middleware/response_filter.py +136 -136
  59. crawlo/middleware/retry.py +124 -124
  60. crawlo/mode_manager.py +211 -211
  61. crawlo/network/__init__.py +21 -21
  62. crawlo/network/request.py +338 -338
  63. crawlo/network/response.py +359 -359
  64. crawlo/pipelines/__init__.py +21 -21
  65. crawlo/pipelines/bloom_dedup_pipeline.py +156 -156
  66. crawlo/pipelines/console_pipeline.py +39 -39
  67. crawlo/pipelines/csv_pipeline.py +316 -316
  68. crawlo/pipelines/database_dedup_pipeline.py +222 -222
  69. crawlo/pipelines/json_pipeline.py +218 -218
  70. crawlo/pipelines/memory_dedup_pipeline.py +115 -115
  71. crawlo/pipelines/mongo_pipeline.py +131 -131
  72. crawlo/pipelines/mysql_pipeline.py +317 -317
  73. crawlo/pipelines/pipeline_manager.py +62 -61
  74. crawlo/pipelines/redis_dedup_pipeline.py +166 -165
  75. crawlo/project.py +314 -279
  76. crawlo/queue/pqueue.py +37 -37
  77. crawlo/queue/queue_manager.py +377 -337
  78. crawlo/queue/redis_priority_queue.py +306 -299
  79. crawlo/settings/__init__.py +7 -7
  80. crawlo/settings/default_settings.py +219 -217
  81. crawlo/settings/setting_manager.py +122 -122
  82. crawlo/spider/__init__.py +639 -639
  83. crawlo/stats_collector.py +59 -59
  84. crawlo/subscriber.py +129 -129
  85. crawlo/task_manager.py +30 -30
  86. crawlo/templates/crawlo.cfg.tmpl +10 -10
  87. crawlo/templates/project/__init__.py.tmpl +3 -3
  88. crawlo/templates/project/items.py.tmpl +17 -17
  89. crawlo/templates/project/middlewares.py.tmpl +118 -118
  90. crawlo/templates/project/pipelines.py.tmpl +96 -96
  91. crawlo/templates/project/settings.py.tmpl +288 -324
  92. crawlo/templates/project/settings_distributed.py.tmpl +157 -154
  93. crawlo/templates/project/settings_gentle.py.tmpl +101 -128
  94. crawlo/templates/project/settings_high_performance.py.tmpl +135 -150
  95. crawlo/templates/project/settings_simple.py.tmpl +99 -103
  96. crawlo/templates/project/spiders/__init__.py.tmpl +5 -5
  97. crawlo/templates/run.py.tmpl +45 -47
  98. crawlo/templates/spider/spider.py.tmpl +143 -143
  99. crawlo/tools/__init__.py +182 -182
  100. crawlo/tools/anti_crawler.py +268 -268
  101. crawlo/tools/authenticated_proxy.py +240 -240
  102. crawlo/tools/data_validator.py +180 -180
  103. crawlo/tools/date_tools.py +35 -35
  104. crawlo/tools/distributed_coordinator.py +386 -386
  105. crawlo/tools/retry_mechanism.py +220 -220
  106. crawlo/tools/scenario_adapter.py +262 -262
  107. crawlo/utils/__init__.py +35 -35
  108. crawlo/utils/batch_processor.py +259 -259
  109. crawlo/utils/controlled_spider_mixin.py +439 -439
  110. crawlo/utils/date_tools.py +290 -290
  111. crawlo/utils/db_helper.py +343 -343
  112. crawlo/utils/enhanced_error_handler.py +356 -356
  113. crawlo/utils/env_config.py +143 -106
  114. crawlo/utils/error_handler.py +123 -123
  115. crawlo/utils/func_tools.py +82 -82
  116. crawlo/utils/large_scale_config.py +286 -286
  117. crawlo/utils/large_scale_helper.py +344 -344
  118. crawlo/utils/log.py +128 -128
  119. crawlo/utils/performance_monitor.py +285 -285
  120. crawlo/utils/queue_helper.py +175 -175
  121. crawlo/utils/redis_connection_pool.py +351 -334
  122. crawlo/utils/redis_key_validator.py +198 -198
  123. crawlo/utils/request.py +267 -267
  124. crawlo/utils/request_serializer.py +218 -218
  125. crawlo/utils/spider_loader.py +61 -61
  126. crawlo/utils/system.py +11 -11
  127. crawlo/utils/tools.py +4 -4
  128. crawlo/utils/url.py +39 -39
  129. {crawlo-1.2.5.dist-info → crawlo-1.2.7.dist-info}/METADATA +764 -764
  130. crawlo-1.2.7.dist-info/RECORD +209 -0
  131. examples/__init__.py +7 -7
  132. tests/DOUBLE_CRAWLO_PREFIX_FIX_REPORT.md +81 -81
  133. tests/__init__.py +7 -7
  134. tests/advanced_tools_example.py +275 -275
  135. tests/authenticated_proxy_example.py +236 -236
  136. tests/cleaners_example.py +160 -160
  137. tests/config_validation_demo.py +102 -102
  138. tests/controlled_spider_example.py +205 -205
  139. tests/date_tools_example.py +180 -180
  140. tests/dynamic_loading_example.py +523 -523
  141. tests/dynamic_loading_test.py +104 -104
  142. tests/env_config_example.py +133 -133
  143. tests/error_handling_example.py +171 -171
  144. tests/redis_key_validation_demo.py +130 -130
  145. tests/response_improvements_example.py +144 -144
  146. tests/test_advanced_tools.py +148 -148
  147. tests/test_all_redis_key_configs.py +145 -145
  148. tests/test_authenticated_proxy.py +141 -141
  149. tests/test_cleaners.py +54 -54
  150. tests/test_comprehensive.py +146 -146
  151. tests/test_config_consistency.py +81 -0
  152. tests/test_config_validator.py +193 -193
  153. tests/test_crawlo_proxy_integration.py +172 -172
  154. tests/test_date_tools.py +123 -123
  155. tests/test_default_header_middleware.py +158 -158
  156. tests/test_double_crawlo_fix.py +207 -207
  157. tests/test_double_crawlo_fix_simple.py +124 -124
  158. tests/test_download_delay_middleware.py +221 -221
  159. tests/test_downloader_proxy_compatibility.py +268 -268
  160. tests/test_dynamic_downloaders_proxy.py +124 -124
  161. tests/test_dynamic_proxy.py +92 -92
  162. tests/test_dynamic_proxy_config.py +146 -146
  163. tests/test_dynamic_proxy_real.py +109 -109
  164. tests/test_edge_cases.py +303 -303
  165. tests/test_enhanced_error_handler.py +270 -270
  166. tests/test_env_config.py +121 -121
  167. tests/test_error_handler_compatibility.py +112 -112
  168. tests/test_final_validation.py +153 -153
  169. tests/test_framework_env_usage.py +103 -103
  170. tests/test_integration.py +356 -356
  171. tests/test_item_dedup_redis_key.py +122 -122
  172. tests/test_mode_consistency.py +52 -0
  173. tests/test_offsite_middleware.py +221 -221
  174. tests/test_parsel.py +29 -29
  175. tests/test_performance.py +327 -327
  176. tests/test_proxy_api.py +264 -264
  177. tests/test_proxy_health_check.py +32 -32
  178. tests/test_proxy_middleware.py +121 -121
  179. tests/test_proxy_middleware_enhanced.py +216 -216
  180. tests/test_proxy_middleware_integration.py +136 -136
  181. tests/test_proxy_providers.py +56 -56
  182. tests/test_proxy_stats.py +19 -19
  183. tests/test_proxy_strategies.py +59 -59
  184. tests/test_queue_manager_double_crawlo.py +173 -173
  185. tests/test_queue_manager_redis_key.py +176 -176
  186. tests/test_real_scenario_proxy.py +195 -195
  187. tests/test_redis_config.py +28 -28
  188. tests/test_redis_connection_pool.py +294 -294
  189. tests/test_redis_key_naming.py +181 -181
  190. tests/test_redis_key_validator.py +123 -123
  191. tests/test_redis_queue.py +224 -224
  192. tests/test_request_ignore_middleware.py +182 -182
  193. tests/test_request_serialization.py +70 -70
  194. tests/test_response_code_middleware.py +349 -349
  195. tests/test_response_filter_middleware.py +427 -427
  196. tests/test_response_improvements.py +152 -152
  197. tests/test_retry_middleware.py +241 -241
  198. tests/test_scheduler.py +252 -241
  199. tests/test_scheduler_config_update.py +134 -0
  200. tests/test_simple_response.py +61 -61
  201. tests/test_telecom_spider_redis_key.py +205 -205
  202. tests/test_template_content.py +87 -87
  203. tests/test_template_redis_key.py +134 -134
  204. tests/test_tools.py +153 -153
  205. tests/tools_example.py +257 -257
  206. crawlo-1.2.5.dist-info/RECORD +0 -206
  207. {crawlo-1.2.5.dist-info → crawlo-1.2.7.dist-info}/WHEEL +0 -0
  208. {crawlo-1.2.5.dist-info → crawlo-1.2.7.dist-info}/entry_points.txt +0 -0
  209. {crawlo-1.2.5.dist-info → crawlo-1.2.7.dist-info}/top_level.txt +0 -0
tests/test_proxy_api.py CHANGED
@@ -1,265 +1,265 @@
1
- #!/usr/bin/python
2
- # -*- coding: UTF-8 -*-
3
- """
4
- 代理API测试脚本
5
- ================
6
- 测试指定的代理API接口是否能正常工作
7
- """
8
-
9
- import asyncio
10
- import aiohttp
11
- import sys
12
- import os
13
- from urllib.parse import urlparse
14
-
15
- # 添加项目根目录到Python路径
16
- sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
17
-
18
- from crawlo.middleware.proxy import ProxyMiddleware
19
- from crawlo.network.request import Request
20
- from crawlo.settings.setting_manager import SettingManager
21
-
22
-
23
- async def test_proxy_api(proxy_api_url):
24
- """测试代理API接口"""
25
- print(f"=== 测试代理API接口 ===")
26
- print(f"API地址: {proxy_api_url}")
27
-
28
- try:
29
- timeout = aiohttp.ClientTimeout(total=10)
30
- async with aiohttp.ClientSession(timeout=timeout) as session:
31
- async with session.get(proxy_api_url) as response:
32
- print(f"状态码: {response.status}")
33
- print(f"响应头: {response.headers.get('content-type', 'Unknown')}")
34
-
35
- # 尝试解析JSON响应
36
- try:
37
- data = await response.json()
38
- print(f"响应数据: {data}")
39
- return data
40
- except Exception as e:
41
- # 如果不是JSON,尝试获取文本
42
- try:
43
- text = await response.text()
44
- print(f"响应文本: {text[:200]}{'...' if len(text) > 200 else ''}")
45
- return text
46
- except Exception as e2:
47
- print(f"无法解析响应内容: {e2}")
48
- return None
49
-
50
- except asyncio.TimeoutError:
51
- print("请求超时")
52
- return None
53
- except Exception as e:
54
- print(f"请求失败: {e}")
55
- return None
56
-
57
-
58
- def extract_proxy_url(proxy_data):
59
- """从API响应中提取代理URL"""
60
- proxy_url = None
61
-
62
- if isinstance(proxy_data, dict):
63
- # 检查是否有status字段且为成功状态
64
- if proxy_data.get('status') == 0:
65
- # 获取proxy字段
66
- proxy_info = proxy_data.get('proxy', {})
67
- if isinstance(proxy_info, dict):
68
- # 优先使用https代理,否则使用http代理
69
- proxy_url = proxy_info.get('https') or proxy_info.get('http')
70
- elif isinstance(proxy_info, str):
71
- proxy_url = proxy_info
72
- else:
73
- # 直接尝试常见的字段名
74
- for key in ['proxy', 'data', 'url', 'http', 'https']:
75
- if key in proxy_data:
76
- value = proxy_data[key]
77
- if isinstance(value, str):
78
- proxy_url = value
79
- break
80
- elif isinstance(value, dict):
81
- proxy_url = value.get('https') or value.get('http')
82
- break
83
-
84
- # 如果还是没有找到,尝试更深层的嵌套
85
- if not proxy_url:
86
- for key, value in proxy_data.items():
87
- if isinstance(value, str) and (value.startswith('http://') or value.startswith('https://')):
88
- proxy_url = value
89
- break
90
- elif isinstance(value, dict):
91
- # 递归查找
92
- for sub_key, sub_value in value.items():
93
- if isinstance(sub_value, str) and (sub_value.startswith('http://') or sub_value.startswith('https://')):
94
- proxy_url = sub_value
95
- break
96
- if proxy_url:
97
- break
98
-
99
- elif isinstance(proxy_data, str):
100
- # 如果响应是字符串,直接使用
101
- if proxy_data.startswith('http://') or proxy_data.startswith('https://'):
102
- proxy_url = proxy_data
103
-
104
- return proxy_url
105
-
106
-
107
- async def test_target_url_without_proxy(target_url):
108
- """不使用代理直接测试访问目标URL"""
109
- print(f"\n=== 直接访问目标URL(不使用代理) ===")
110
- print(f"目标URL: {target_url}")
111
-
112
- try:
113
- timeout = aiohttp.ClientTimeout(total=15)
114
- async with aiohttp.ClientSession(timeout=timeout) as session:
115
- # 添加用户代理头,避免被反爬虫机制拦截
116
- headers = {
117
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36'
118
- }
119
- async with session.get(target_url, headers=headers) as response:
120
- print(f"状态码: {response.status}")
121
- print(f"响应头: {response.headers.get('content-type', 'Unknown')}")
122
-
123
- # 只读取响应状态,不尝试解码内容
124
- return response.status == 200
125
-
126
- except asyncio.TimeoutError:
127
- print("请求超时")
128
- return False
129
- except Exception as e:
130
- print(f"请求失败: {e}")
131
- return False
132
-
133
-
134
- async def test_target_url_with_proxy(proxy_url, target_url, max_retries=3):
135
- """使用代理测试访问目标URL"""
136
- print(f"\n=== 使用代理测试访问目标URL ===")
137
- print(f"代理地址: {proxy_url}")
138
- print(f"目标URL: {target_url}")
139
-
140
- # 添加用户代理头,避免被反爬虫机制拦截
141
- headers = {
142
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36'
143
- }
144
-
145
- for attempt in range(max_retries):
146
- if attempt > 0:
147
- print(f"\n第 {attempt + 1} 次重试...")
148
-
149
- try:
150
- # 创建aiohttp客户端会话
151
- timeout = aiohttp.ClientTimeout(total=15)
152
- async with aiohttp.ClientSession(timeout=timeout, headers=headers) as session:
153
- # 处理代理URL,支持带认证的代理
154
- if isinstance(proxy_url, str) and "@" in proxy_url and "://" in proxy_url:
155
- parsed = urlparse(proxy_url)
156
- if parsed.username and parsed.password:
157
- # 提取认证信息
158
- auth = aiohttp.BasicAuth(parsed.username, parsed.password)
159
- # 清理代理URL,移除认证信息
160
- clean_proxy = f"{parsed.scheme}://{parsed.hostname}"
161
- if parsed.port:
162
- clean_proxy += f":{parsed.port}"
163
-
164
- print(f"使用带认证的代理: {clean_proxy}")
165
- async with session.get(target_url, proxy=clean_proxy, proxy_auth=auth) as response:
166
- print(f"状态码: {response.status}")
167
- print(f"响应头: {response.headers.get('content-type', 'Unknown')}")
168
- return response.status == 200
169
- else:
170
- # 没有认证信息的代理
171
- print(f"使用普通代理: {proxy_url}")
172
- async with session.get(target_url, proxy=proxy_url) as response:
173
- print(f"状态码: {response.status}")
174
- print(f"响应头: {response.headers.get('content-type', 'Unknown')}")
175
- return response.status == 200
176
- else:
177
- # 直接使用代理URL
178
- print(f"使用代理: {proxy_url}")
179
- async with session.get(target_url, proxy=proxy_url) as response:
180
- print(f"状态码: {response.status}")
181
- print(f"响应头: {response.headers.get('content-type', 'Unknown')}")
182
- return response.status == 200
183
-
184
- except asyncio.TimeoutError:
185
- print("请求超时")
186
- if attempt < max_retries - 1:
187
- await asyncio.sleep(2) # 等待2秒后重试
188
- continue
189
- except aiohttp.ClientConnectorError as e:
190
- print(f"连接错误: {e}")
191
- if attempt < max_retries - 1:
192
- await asyncio.sleep(2) # 等待2秒后重试
193
- continue
194
- except aiohttp.ClientHttpProxyError as e:
195
- print(f"代理HTTP错误: {e}")
196
- if attempt < max_retries - 1:
197
- await asyncio.sleep(2) # 等待2秒后重试
198
- continue
199
- except aiohttp.ServerDisconnectedError as e:
200
- print(f"服务器断开连接: {e}")
201
- if attempt < max_retries - 1:
202
- await asyncio.sleep(2) # 等待2秒后重试
203
- continue
204
- except Exception as e:
205
- print(f"请求失败: {e}")
206
- if attempt < max_retries - 1:
207
- await asyncio.sleep(2) # 等待2秒后重试
208
- continue
209
-
210
- return False
211
-
212
-
213
- async def main():
214
- """主测试函数"""
215
- # 指定的代理API和测试链接
216
- proxy_api = 'http://test.proxy.api:8080/proxy/getitem/'
217
- target_url = 'https://stock.10jqka.com.cn/20240315/c655957791.shtml'
218
-
219
- print("开始测试代理接口和目标链接访问...\n")
220
-
221
- # 1. 测试代理API接口
222
- proxy_data = await test_proxy_api(proxy_api)
223
-
224
- if not proxy_data:
225
- print("代理API测试失败,无法获取代理信息")
226
- return
227
-
228
- # 2. 从API响应中提取代理URL
229
- proxy_url = extract_proxy_url(proxy_data)
230
-
231
- if not proxy_url:
232
- print("无法从API响应中提取代理URL")
233
- print(f"API响应内容: {proxy_data}")
234
- return
235
-
236
- print(f"\n提取到的代理URL: {proxy_url}")
237
-
238
- # 3. 首先尝试直接访问,确认目标URL是否可访问
239
- print("\n=== 测试直接访问目标URL ===")
240
- direct_success = await test_target_url_without_proxy(target_url)
241
-
242
- if direct_success:
243
- print("✅ 直接访问目标URL成功")
244
- else:
245
- print("❌ 直接访问目标URL失败")
246
-
247
- # 4. 使用代理访问目标URL
248
- print("\n=== 测试使用代理访问目标URL ===")
249
- proxy_success = await test_target_url_with_proxy(proxy_url, target_url)
250
-
251
- if proxy_success:
252
- print(f"✅ 代理测试成功!代理 {proxy_url} 可以正常访问目标链接")
253
- else:
254
- print(f"❌ 代理测试失败!代理 {proxy_url} 无法访问目标链接")
255
-
256
- # 5. 总结
257
- print(f"\n=== 测试总结 ===")
258
- print(f"代理API访问: {'成功' if proxy_data else '失败'}")
259
- print(f"代理提取: {'成功' if proxy_url else '失败'}")
260
- print(f"直接访问: {'成功' if direct_success else '失败'}")
261
- print(f"代理访问: {'成功' if proxy_success else '失败'}")
262
-
263
-
264
- if __name__ == "__main__":
1
+ #!/usr/bin/python
2
+ # -*- coding: UTF-8 -*-
3
+ """
4
+ 代理API测试脚本
5
+ ================
6
+ 测试指定的代理API接口是否能正常工作
7
+ """
8
+
9
+ import asyncio
10
+ import aiohttp
11
+ import sys
12
+ import os
13
+ from urllib.parse import urlparse
14
+
15
+ # 添加项目根目录到Python路径
16
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
17
+
18
+ from crawlo.middleware.proxy import ProxyMiddleware
19
+ from crawlo.network.request import Request
20
+ from crawlo.settings.setting_manager import SettingManager
21
+
22
+
23
+ async def test_proxy_api(proxy_api_url):
24
+ """测试代理API接口"""
25
+ print(f"=== 测试代理API接口 ===")
26
+ print(f"API地址: {proxy_api_url}")
27
+
28
+ try:
29
+ timeout = aiohttp.ClientTimeout(total=10)
30
+ async with aiohttp.ClientSession(timeout=timeout) as session:
31
+ async with session.get(proxy_api_url) as response:
32
+ print(f"状态码: {response.status}")
33
+ print(f"响应头: {response.headers.get('content-type', 'Unknown')}")
34
+
35
+ # 尝试解析JSON响应
36
+ try:
37
+ data = await response.json()
38
+ print(f"响应数据: {data}")
39
+ return data
40
+ except Exception as e:
41
+ # 如果不是JSON,尝试获取文本
42
+ try:
43
+ text = await response.text()
44
+ print(f"响应文本: {text[:200]}{'...' if len(text) > 200 else ''}")
45
+ return text
46
+ except Exception as e2:
47
+ print(f"无法解析响应内容: {e2}")
48
+ return None
49
+
50
+ except asyncio.TimeoutError:
51
+ print("请求超时")
52
+ return None
53
+ except Exception as e:
54
+ print(f"请求失败: {e}")
55
+ return None
56
+
57
+
58
+ def extract_proxy_url(proxy_data):
59
+ """从API响应中提取代理URL"""
60
+ proxy_url = None
61
+
62
+ if isinstance(proxy_data, dict):
63
+ # 检查是否有status字段且为成功状态
64
+ if proxy_data.get('status') == 0:
65
+ # 获取proxy字段
66
+ proxy_info = proxy_data.get('proxy', {})
67
+ if isinstance(proxy_info, dict):
68
+ # 优先使用https代理,否则使用http代理
69
+ proxy_url = proxy_info.get('https') or proxy_info.get('http')
70
+ elif isinstance(proxy_info, str):
71
+ proxy_url = proxy_info
72
+ else:
73
+ # 直接尝试常见的字段名
74
+ for key in ['proxy', 'data', 'url', 'http', 'https']:
75
+ if key in proxy_data:
76
+ value = proxy_data[key]
77
+ if isinstance(value, str):
78
+ proxy_url = value
79
+ break
80
+ elif isinstance(value, dict):
81
+ proxy_url = value.get('https') or value.get('http')
82
+ break
83
+
84
+ # 如果还是没有找到,尝试更深层的嵌套
85
+ if not proxy_url:
86
+ for key, value in proxy_data.items():
87
+ if isinstance(value, str) and (value.startswith('http://') or value.startswith('https://')):
88
+ proxy_url = value
89
+ break
90
+ elif isinstance(value, dict):
91
+ # 递归查找
92
+ for sub_key, sub_value in value.items():
93
+ if isinstance(sub_value, str) and (sub_value.startswith('http://') or sub_value.startswith('https://')):
94
+ proxy_url = sub_value
95
+ break
96
+ if proxy_url:
97
+ break
98
+
99
+ elif isinstance(proxy_data, str):
100
+ # 如果响应是字符串,直接使用
101
+ if proxy_data.startswith('http://') or proxy_data.startswith('https://'):
102
+ proxy_url = proxy_data
103
+
104
+ return proxy_url
105
+
106
+
107
+ async def test_target_url_without_proxy(target_url):
108
+ """不使用代理直接测试访问目标URL"""
109
+ print(f"\n=== 直接访问目标URL(不使用代理) ===")
110
+ print(f"目标URL: {target_url}")
111
+
112
+ try:
113
+ timeout = aiohttp.ClientTimeout(total=15)
114
+ async with aiohttp.ClientSession(timeout=timeout) as session:
115
+ # 添加用户代理头,避免被反爬虫机制拦截
116
+ headers = {
117
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36'
118
+ }
119
+ async with session.get(target_url, headers=headers) as response:
120
+ print(f"状态码: {response.status}")
121
+ print(f"响应头: {response.headers.get('content-type', 'Unknown')}")
122
+
123
+ # 只读取响应状态,不尝试解码内容
124
+ return response.status == 200
125
+
126
+ except asyncio.TimeoutError:
127
+ print("请求超时")
128
+ return False
129
+ except Exception as e:
130
+ print(f"请求失败: {e}")
131
+ return False
132
+
133
+
134
+ async def test_target_url_with_proxy(proxy_url, target_url, max_retries=3):
135
+ """使用代理测试访问目标URL"""
136
+ print(f"\n=== 使用代理测试访问目标URL ===")
137
+ print(f"代理地址: {proxy_url}")
138
+ print(f"目标URL: {target_url}")
139
+
140
+ # 添加用户代理头,避免被反爬虫机制拦截
141
+ headers = {
142
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36'
143
+ }
144
+
145
+ for attempt in range(max_retries):
146
+ if attempt > 0:
147
+ print(f"\n第 {attempt + 1} 次重试...")
148
+
149
+ try:
150
+ # 创建aiohttp客户端会话
151
+ timeout = aiohttp.ClientTimeout(total=15)
152
+ async with aiohttp.ClientSession(timeout=timeout, headers=headers) as session:
153
+ # 处理代理URL,支持带认证的代理
154
+ if isinstance(proxy_url, str) and "@" in proxy_url and "://" in proxy_url:
155
+ parsed = urlparse(proxy_url)
156
+ if parsed.username and parsed.password:
157
+ # 提取认证信息
158
+ auth = aiohttp.BasicAuth(parsed.username, parsed.password)
159
+ # 清理代理URL,移除认证信息
160
+ clean_proxy = f"{parsed.scheme}://{parsed.hostname}"
161
+ if parsed.port:
162
+ clean_proxy += f":{parsed.port}"
163
+
164
+ print(f"使用带认证的代理: {clean_proxy}")
165
+ async with session.get(target_url, proxy=clean_proxy, proxy_auth=auth) as response:
166
+ print(f"状态码: {response.status}")
167
+ print(f"响应头: {response.headers.get('content-type', 'Unknown')}")
168
+ return response.status == 200
169
+ else:
170
+ # 没有认证信息的代理
171
+ print(f"使用普通代理: {proxy_url}")
172
+ async with session.get(target_url, proxy=proxy_url) as response:
173
+ print(f"状态码: {response.status}")
174
+ print(f"响应头: {response.headers.get('content-type', 'Unknown')}")
175
+ return response.status == 200
176
+ else:
177
+ # 直接使用代理URL
178
+ print(f"使用代理: {proxy_url}")
179
+ async with session.get(target_url, proxy=proxy_url) as response:
180
+ print(f"状态码: {response.status}")
181
+ print(f"响应头: {response.headers.get('content-type', 'Unknown')}")
182
+ return response.status == 200
183
+
184
+ except asyncio.TimeoutError:
185
+ print("请求超时")
186
+ if attempt < max_retries - 1:
187
+ await asyncio.sleep(2) # 等待2秒后重试
188
+ continue
189
+ except aiohttp.ClientConnectorError as e:
190
+ print(f"连接错误: {e}")
191
+ if attempt < max_retries - 1:
192
+ await asyncio.sleep(2) # 等待2秒后重试
193
+ continue
194
+ except aiohttp.ClientHttpProxyError as e:
195
+ print(f"代理HTTP错误: {e}")
196
+ if attempt < max_retries - 1:
197
+ await asyncio.sleep(2) # 等待2秒后重试
198
+ continue
199
+ except aiohttp.ServerDisconnectedError as e:
200
+ print(f"服务器断开连接: {e}")
201
+ if attempt < max_retries - 1:
202
+ await asyncio.sleep(2) # 等待2秒后重试
203
+ continue
204
+ except Exception as e:
205
+ print(f"请求失败: {e}")
206
+ if attempt < max_retries - 1:
207
+ await asyncio.sleep(2) # 等待2秒后重试
208
+ continue
209
+
210
+ return False
211
+
212
+
213
+ async def main():
214
+ """主测试函数"""
215
+ # 指定的代理API和测试链接
216
+ proxy_api = 'http://test.proxy.api:8080/proxy/getitem/'
217
+ target_url = 'https://stock.10jqka.com.cn/20240315/c655957791.shtml'
218
+
219
+ print("开始测试代理接口和目标链接访问...\n")
220
+
221
+ # 1. 测试代理API接口
222
+ proxy_data = await test_proxy_api(proxy_api)
223
+
224
+ if not proxy_data:
225
+ print("代理API测试失败,无法获取代理信息")
226
+ return
227
+
228
+ # 2. 从API响应中提取代理URL
229
+ proxy_url = extract_proxy_url(proxy_data)
230
+
231
+ if not proxy_url:
232
+ print("无法从API响应中提取代理URL")
233
+ print(f"API响应内容: {proxy_data}")
234
+ return
235
+
236
+ print(f"\n提取到的代理URL: {proxy_url}")
237
+
238
+ # 3. 首先尝试直接访问,确认目标URL是否可访问
239
+ print("\n=== 测试直接访问目标URL ===")
240
+ direct_success = await test_target_url_without_proxy(target_url)
241
+
242
+ if direct_success:
243
+ print("✅ 直接访问目标URL成功")
244
+ else:
245
+ print("❌ 直接访问目标URL失败")
246
+
247
+ # 4. 使用代理访问目标URL
248
+ print("\n=== 测试使用代理访问目标URL ===")
249
+ proxy_success = await test_target_url_with_proxy(proxy_url, target_url)
250
+
251
+ if proxy_success:
252
+ print(f"✅ 代理测试成功!代理 {proxy_url} 可以正常访问目标链接")
253
+ else:
254
+ print(f"❌ 代理测试失败!代理 {proxy_url} 无法访问目标链接")
255
+
256
+ # 5. 总结
257
+ print(f"\n=== 测试总结 ===")
258
+ print(f"代理API访问: {'成功' if proxy_data else '失败'}")
259
+ print(f"代理提取: {'成功' if proxy_url else '失败'}")
260
+ print(f"直接访问: {'成功' if direct_success else '失败'}")
261
+ print(f"代理访问: {'成功' if proxy_success else '失败'}")
262
+
263
+
264
+ if __name__ == "__main__":
265
265
  asyncio.run(main())
@@ -1,33 +1,33 @@
1
- # tests/test_proxy_health_check.py
2
- import pytest
3
- from unittest.mock import AsyncMock, patch
4
- from crawlo.proxy.health_check import check_single_proxy
5
- import httpx
6
-
7
-
8
- @pytest.mark.asyncio
9
- @patch('httpx.AsyncClient')
10
- async def test_health_check_success(mock_client_class):
11
- """测试健康检查:成功"""
12
- mock_resp = AsyncMock()
13
- mock_resp.status_code = 200
14
- mock_client_class.return_value.__aenter__.return_value.get.return_value = mock_resp
15
-
16
- proxy_info = {'url': 'http://good:8080', 'healthy': False}
17
- await check_single_proxy(proxy_info)
18
-
19
- assert proxy_info['healthy'] is True
20
- assert proxy_info['failures'] == 0
21
-
22
-
23
- @pytest.mark.asyncio
24
- @patch('httpx.AsyncClient')
25
- async def test_health_check_failure(mock_client_class):
26
- """测试健康检查:失败"""
27
- mock_client_class.return_value.__aenter__.return_value.get.side_effect = httpx.ConnectError("Failed")
28
-
29
- proxy_info = {'url': 'http://bad:8080', 'healthy': True, 'failures': 0}
30
- await check_single_proxy(proxy_info)
31
-
32
- assert proxy_info['healthy'] is False
1
+ # tests/test_proxy_health_check.py
2
+ import pytest
3
+ from unittest.mock import AsyncMock, patch
4
+ from crawlo.proxy.health_check import check_single_proxy
5
+ import httpx
6
+
7
+
8
+ @pytest.mark.asyncio
9
+ @patch('httpx.AsyncClient')
10
+ async def test_health_check_success(mock_client_class):
11
+ """测试健康检查:成功"""
12
+ mock_resp = AsyncMock()
13
+ mock_resp.status_code = 200
14
+ mock_client_class.return_value.__aenter__.return_value.get.return_value = mock_resp
15
+
16
+ proxy_info = {'url': 'http://good:8080', 'healthy': False}
17
+ await check_single_proxy(proxy_info)
18
+
19
+ assert proxy_info['healthy'] is True
20
+ assert proxy_info['failures'] == 0
21
+
22
+
23
+ @pytest.mark.asyncio
24
+ @patch('httpx.AsyncClient')
25
+ async def test_health_check_failure(mock_client_class):
26
+ """测试健康检查:失败"""
27
+ mock_client_class.return_value.__aenter__.return_value.get.side_effect = httpx.ConnectError("Failed")
28
+
29
+ proxy_info = {'url': 'http://bad:8080', 'healthy': True, 'failures': 0}
30
+ await check_single_proxy(proxy_info)
31
+
32
+ assert proxy_info['healthy'] is False
33
33
  assert proxy_info['failures'] == 1