crawlo 1.4.6__py3-none-any.whl → 1.4.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (162) hide show
  1. crawlo/__init__.py +2 -1
  2. crawlo/__version__.py +1 -1
  3. crawlo/cli.py +2 -2
  4. crawlo/commands/check.py +1 -1
  5. crawlo/commands/help.py +5 -3
  6. crawlo/commands/list.py +1 -1
  7. crawlo/commands/run.py +49 -11
  8. crawlo/commands/stats.py +1 -1
  9. crawlo/config.py +12 -4
  10. crawlo/config_validator.py +1 -1
  11. crawlo/core/engine.py +20 -7
  12. crawlo/core/processor.py +1 -1
  13. crawlo/core/scheduler.py +4 -5
  14. crawlo/crawler.py +51 -10
  15. crawlo/downloader/__init__.py +7 -3
  16. crawlo/downloader/aiohttp_downloader.py +18 -18
  17. crawlo/downloader/cffi_downloader.py +5 -2
  18. crawlo/downloader/httpx_downloader.py +9 -3
  19. crawlo/downloader/hybrid_downloader.py +2 -2
  20. crawlo/downloader/playwright_downloader.py +38 -15
  21. crawlo/downloader/selenium_downloader.py +16 -2
  22. crawlo/event.py +42 -8
  23. crawlo/exceptions.py +157 -24
  24. crawlo/extension/__init__.py +10 -9
  25. crawlo/extension/health_check.py +7 -7
  26. crawlo/extension/log_interval.py +6 -6
  27. crawlo/extension/log_stats.py +2 -2
  28. crawlo/extension/logging_extension.py +4 -12
  29. crawlo/extension/memory_monitor.py +5 -5
  30. crawlo/extension/performance_profiler.py +5 -5
  31. crawlo/extension/request_recorder.py +6 -6
  32. crawlo/factories/base.py +1 -1
  33. crawlo/factories/crawler.py +61 -60
  34. crawlo/factories/utils.py +135 -0
  35. crawlo/filters/__init__.py +19 -2
  36. crawlo/filters/aioredis_filter.py +133 -49
  37. crawlo/filters/memory_filter.py +6 -21
  38. crawlo/framework.py +22 -8
  39. crawlo/initialization/built_in.py +24 -67
  40. crawlo/initialization/core.py +65 -19
  41. crawlo/initialization/phases.py +83 -2
  42. crawlo/initialization/registry.py +5 -7
  43. crawlo/initialization/utils.py +49 -0
  44. crawlo/logging/__init__.py +6 -10
  45. crawlo/logging/config.py +106 -22
  46. crawlo/logging/factory.py +12 -8
  47. crawlo/logging/manager.py +19 -27
  48. crawlo/middleware/__init__.py +72 -9
  49. crawlo/middleware/default_header.py +2 -2
  50. crawlo/middleware/download_delay.py +2 -2
  51. crawlo/middleware/middleware_manager.py +6 -6
  52. crawlo/middleware/offsite.py +2 -2
  53. crawlo/middleware/proxy.py +2 -2
  54. crawlo/middleware/request_ignore.py +4 -4
  55. crawlo/middleware/response_code.py +2 -2
  56. crawlo/middleware/response_filter.py +2 -2
  57. crawlo/middleware/retry.py +1 -1
  58. crawlo/mode_manager.py +38 -4
  59. crawlo/network/request.py +54 -26
  60. crawlo/network/response.py +69 -135
  61. crawlo/pipelines/__init__.py +40 -9
  62. crawlo/pipelines/base_pipeline.py +452 -0
  63. crawlo/pipelines/bloom_dedup_pipeline.py +4 -5
  64. crawlo/pipelines/console_pipeline.py +2 -2
  65. crawlo/pipelines/csv_pipeline.py +4 -4
  66. crawlo/pipelines/database_dedup_pipeline.py +4 -5
  67. crawlo/pipelines/json_pipeline.py +4 -4
  68. crawlo/pipelines/memory_dedup_pipeline.py +4 -5
  69. crawlo/pipelines/mongo_pipeline.py +23 -14
  70. crawlo/pipelines/mysql_pipeline.py +31 -39
  71. crawlo/pipelines/pipeline_manager.py +8 -8
  72. crawlo/pipelines/redis_dedup_pipeline.py +13 -14
  73. crawlo/project.py +1 -1
  74. crawlo/queue/__init__.py +10 -0
  75. crawlo/queue/queue_manager.py +79 -13
  76. crawlo/queue/redis_priority_queue.py +196 -47
  77. crawlo/settings/default_settings.py +16 -6
  78. crawlo/spider/__init__.py +6 -5
  79. crawlo/stats_collector.py +2 -2
  80. crawlo/task_manager.py +1 -1
  81. crawlo/templates/crawlo.cfg.tmpl +3 -3
  82. crawlo/templates/project/__init__.py.tmpl +1 -3
  83. crawlo/templates/project/items.py.tmpl +2 -6
  84. crawlo/templates/project/middlewares.py.tmpl +1 -1
  85. crawlo/templates/project/pipelines.py.tmpl +1 -2
  86. crawlo/templates/project/settings.py.tmpl +12 -10
  87. crawlo/templates/project/settings_distributed.py.tmpl +14 -13
  88. crawlo/templates/project/settings_gentle.py.tmpl +21 -23
  89. crawlo/templates/project/settings_high_performance.py.tmpl +21 -23
  90. crawlo/templates/project/settings_minimal.py.tmpl +10 -8
  91. crawlo/templates/project/settings_simple.py.tmpl +21 -23
  92. crawlo/templates/run.py.tmpl +1 -1
  93. crawlo/templates/spider/spider.py.tmpl +4 -12
  94. crawlo/templates/spiders_init.py.tmpl +3 -8
  95. crawlo/tools/__init__.py +0 -103
  96. crawlo/tools/scenario_adapter.py +1 -1
  97. crawlo/utils/__init__.py +25 -1
  98. crawlo/utils/batch_processor.py +23 -6
  99. crawlo/utils/config_manager.py +442 -0
  100. crawlo/utils/controlled_spider_mixin.py +1 -1
  101. crawlo/utils/db_helper.py +1 -1
  102. crawlo/utils/encoding_helper.py +190 -0
  103. crawlo/utils/error_handler.py +2 -2
  104. crawlo/utils/large_scale_helper.py +1 -1
  105. crawlo/utils/leak_detector.py +335 -0
  106. crawlo/utils/mongo_connection_pool.py +157 -0
  107. crawlo/utils/mysql_connection_pool.py +197 -0
  108. crawlo/utils/performance_monitor.py +1 -1
  109. crawlo/utils/redis_checker.py +91 -0
  110. crawlo/utils/redis_connection_pool.py +260 -70
  111. crawlo/utils/redis_key_validator.py +1 -1
  112. crawlo/utils/request.py +24 -2
  113. crawlo/utils/request_serializer.py +1 -1
  114. crawlo/utils/resource_manager.py +337 -0
  115. crawlo/utils/response_helper.py +113 -0
  116. crawlo/utils/selector_helper.py +3 -2
  117. crawlo/utils/singleton.py +70 -0
  118. crawlo/utils/spider_loader.py +1 -1
  119. crawlo/utils/text_helper.py +1 -1
  120. crawlo-1.4.8.dist-info/METADATA +831 -0
  121. {crawlo-1.4.6.dist-info → crawlo-1.4.8.dist-info}/RECORD +131 -145
  122. tests/advanced_tools_example.py +10 -68
  123. tests/distributed_dedup_test.py +467 -0
  124. tests/monitor_redis_dedup.sh +72 -0
  125. tests/ofweek_scrapy/ofweek_scrapy/spiders/__init__.py +4 -4
  126. tests/simple_cli_test.py +55 -0
  127. tests/test_cli_arguments.py +119 -0
  128. tests/test_dedup_fix.py +10 -10
  129. crawlo/logging/async_handler.py +0 -181
  130. crawlo/logging/monitor.py +0 -153
  131. crawlo/logging/sampler.py +0 -167
  132. crawlo/tools/authenticated_proxy.py +0 -241
  133. crawlo/tools/data_formatter.py +0 -226
  134. crawlo/tools/data_validator.py +0 -181
  135. crawlo/tools/encoding_converter.py +0 -127
  136. crawlo/tools/network_diagnostic.py +0 -365
  137. crawlo/tools/request_tools.py +0 -83
  138. crawlo/tools/retry_mechanism.py +0 -224
  139. crawlo/utils/env_config.py +0 -143
  140. crawlo/utils/large_scale_config.py +0 -287
  141. crawlo/utils/log.py +0 -80
  142. crawlo/utils/system.py +0 -11
  143. crawlo/utils/tools.py +0 -5
  144. crawlo/utils/url.py +0 -40
  145. crawlo-1.4.6.dist-info/METADATA +0 -329
  146. tests/env_config_example.py +0 -134
  147. tests/ofweek_scrapy/ofweek_scrapy/spiders/ofweek_spider.py +0 -162
  148. tests/test_authenticated_proxy.py +0 -142
  149. tests/test_comprehensive.py +0 -147
  150. tests/test_dynamic_downloaders_proxy.py +0 -125
  151. tests/test_dynamic_proxy.py +0 -93
  152. tests/test_dynamic_proxy_config.py +0 -147
  153. tests/test_dynamic_proxy_real.py +0 -110
  154. tests/test_env_config.py +0 -122
  155. tests/test_framework_env_usage.py +0 -104
  156. tests/test_large_scale_config.py +0 -113
  157. tests/test_proxy_api.py +0 -265
  158. tests/test_real_scenario_proxy.py +0 -196
  159. tests/tools_example.py +0 -261
  160. {crawlo-1.4.6.dist-info → crawlo-1.4.8.dist-info}/WHEEL +0 -0
  161. {crawlo-1.4.6.dist-info → crawlo-1.4.8.dist-info}/entry_points.txt +0 -0
  162. {crawlo-1.4.6.dist-info → crawlo-1.4.8.dist-info}/top_level.txt +0 -0
tests/test_proxy_api.py DELETED
@@ -1,265 +0,0 @@
1
- #!/usr/bin/python
2
- # -*- coding: UTF-8 -*-
3
- """
4
- 代理API测试脚本
5
- ================
6
- 测试指定的代理API接口是否能正常工作
7
- """
8
-
9
- import asyncio
10
- import aiohttp
11
- import sys
12
- import os
13
- from urllib.parse import urlparse
14
-
15
- # 添加项目根目录到Python路径
16
- sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
17
-
18
- from crawlo.middleware.proxy import ProxyMiddleware
19
- from crawlo.network.request import Request
20
- from crawlo.settings.setting_manager import SettingManager
21
-
22
-
23
- async def test_proxy_api(proxy_api_url):
24
- """测试代理API接口"""
25
- print(f"=== 测试代理API接口 ===")
26
- print(f"API地址: {proxy_api_url}")
27
-
28
- try:
29
- timeout = aiohttp.ClientTimeout(total=10)
30
- async with aiohttp.ClientSession(timeout=timeout) as session:
31
- async with session.get(proxy_api_url) as response:
32
- print(f"状态码: {response.status}")
33
- print(f"响应头: {response.headers.get('content-type', 'Unknown')}")
34
-
35
- # 尝试解析JSON响应
36
- try:
37
- data = await response.json()
38
- print(f"响应数据: {data}")
39
- return data
40
- except Exception as e:
41
- # 如果不是JSON,尝试获取文本
42
- try:
43
- text = await response.text()
44
- print(f"响应文本: {text[:200]}{'...' if len(text) > 200 else ''}")
45
- return text
46
- except Exception as e2:
47
- print(f"无法解析响应内容: {e2}")
48
- return None
49
-
50
- except asyncio.TimeoutError:
51
- print("请求超时")
52
- return None
53
- except Exception as e:
54
- print(f"请求失败: {e}")
55
- return None
56
-
57
-
58
- def extract_proxy_url(proxy_data):
59
- """从API响应中提取代理URL"""
60
- proxy_url = None
61
-
62
- if isinstance(proxy_data, dict):
63
- # 检查是否有status字段且为成功状态
64
- if proxy_data.get('status') == 0:
65
- # 获取proxy字段
66
- proxy_info = proxy_data.get('proxy', {})
67
- if isinstance(proxy_info, dict):
68
- # 优先使用https代理,否则使用http代理
69
- proxy_url = proxy_info.get('https') or proxy_info.get('http')
70
- elif isinstance(proxy_info, str):
71
- proxy_url = proxy_info
72
- else:
73
- # 直接尝试常见的字段名
74
- for key in ['proxy', 'data', 'url', 'http', 'https']:
75
- if key in proxy_data:
76
- value = proxy_data[key]
77
- if isinstance(value, str):
78
- proxy_url = value
79
- break
80
- elif isinstance(value, dict):
81
- proxy_url = value.get('https') or value.get('http')
82
- break
83
-
84
- # 如果还是没有找到,尝试更深层的嵌套
85
- if not proxy_url:
86
- for key, value in proxy_data.items():
87
- if isinstance(value, str) and (value.startswith('http://') or value.startswith('https://')):
88
- proxy_url = value
89
- break
90
- elif isinstance(value, dict):
91
- # 递归查找
92
- for sub_key, sub_value in value.items():
93
- if isinstance(sub_value, str) and (sub_value.startswith('http://') or sub_value.startswith('https://')):
94
- proxy_url = sub_value
95
- break
96
- if proxy_url:
97
- break
98
-
99
- elif isinstance(proxy_data, str):
100
- # 如果响应是字符串,直接使用
101
- if proxy_data.startswith('http://') or proxy_data.startswith('https://'):
102
- proxy_url = proxy_data
103
-
104
- return proxy_url
105
-
106
-
107
- async def test_target_url_without_proxy(target_url):
108
- """不使用代理直接测试访问目标URL"""
109
- print(f"\n=== 直接访问目标URL(不使用代理) ===")
110
- print(f"目标URL: {target_url}")
111
-
112
- try:
113
- timeout = aiohttp.ClientTimeout(total=15)
114
- async with aiohttp.ClientSession(timeout=timeout) as session:
115
- # 添加用户代理头,避免被反爬虫机制拦截
116
- headers = {
117
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36'
118
- }
119
- async with session.get(target_url, headers=headers) as response:
120
- print(f"状态码: {response.status}")
121
- print(f"响应头: {response.headers.get('content-type', 'Unknown')}")
122
-
123
- # 只读取响应状态,不尝试解码内容
124
- return response.status == 200
125
-
126
- except asyncio.TimeoutError:
127
- print("请求超时")
128
- return False
129
- except Exception as e:
130
- print(f"请求失败: {e}")
131
- return False
132
-
133
-
134
- async def test_target_url_with_proxy(proxy_url, target_url, max_retries=3):
135
- """使用代理测试访问目标URL"""
136
- print(f"\n=== 使用代理测试访问目标URL ===")
137
- print(f"代理地址: {proxy_url}")
138
- print(f"目标URL: {target_url}")
139
-
140
- # 添加用户代理头,避免被反爬虫机制拦截
141
- headers = {
142
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36'
143
- }
144
-
145
- for attempt in range(max_retries):
146
- if attempt > 0:
147
- print(f"\n第 {attempt + 1} 次重试...")
148
-
149
- try:
150
- # 创建aiohttp客户端会话
151
- timeout = aiohttp.ClientTimeout(total=15)
152
- async with aiohttp.ClientSession(timeout=timeout, headers=headers) as session:
153
- # 处理代理URL,支持带认证的代理
154
- if isinstance(proxy_url, str) and "@" in proxy_url and "://" in proxy_url:
155
- parsed = urlparse(proxy_url)
156
- if parsed.username and parsed.password:
157
- # 提取认证信息
158
- auth = aiohttp.BasicAuth(parsed.username, parsed.password)
159
- # 清理代理URL,移除认证信息
160
- clean_proxy = f"{parsed.scheme}://{parsed.hostname}"
161
- if parsed.port:
162
- clean_proxy += f":{parsed.port}"
163
-
164
- print(f"使用带认证的代理: {clean_proxy}")
165
- async with session.get(target_url, proxy=clean_proxy, proxy_auth=auth) as response:
166
- print(f"状态码: {response.status}")
167
- print(f"响应头: {response.headers.get('content-type', 'Unknown')}")
168
- return response.status == 200
169
- else:
170
- # 没有认证信息的代理
171
- print(f"使用普通代理: {proxy_url}")
172
- async with session.get(target_url, proxy=proxy_url) as response:
173
- print(f"状态码: {response.status}")
174
- print(f"响应头: {response.headers.get('content-type', 'Unknown')}")
175
- return response.status == 200
176
- else:
177
- # 直接使用代理URL
178
- print(f"使用代理: {proxy_url}")
179
- async with session.get(target_url, proxy=proxy_url) as response:
180
- print(f"状态码: {response.status}")
181
- print(f"响应头: {response.headers.get('content-type', 'Unknown')}")
182
- return response.status == 200
183
-
184
- except asyncio.TimeoutError:
185
- print("请求超时")
186
- if attempt < max_retries - 1:
187
- await asyncio.sleep(2) # 等待2秒后重试
188
- continue
189
- except aiohttp.ClientConnectorError as e:
190
- print(f"连接错误: {e}")
191
- if attempt < max_retries - 1:
192
- await asyncio.sleep(2) # 等待2秒后重试
193
- continue
194
- except aiohttp.ClientHttpProxyError as e:
195
- print(f"代理HTTP错误: {e}")
196
- if attempt < max_retries - 1:
197
- await asyncio.sleep(2) # 等待2秒后重试
198
- continue
199
- except aiohttp.ServerDisconnectedError as e:
200
- print(f"服务器断开连接: {e}")
201
- if attempt < max_retries - 1:
202
- await asyncio.sleep(2) # 等待2秒后重试
203
- continue
204
- except Exception as e:
205
- print(f"请求失败: {e}")
206
- if attempt < max_retries - 1:
207
- await asyncio.sleep(2) # 等待2秒后重试
208
- continue
209
-
210
- return False
211
-
212
-
213
- async def main():
214
- """主测试函数"""
215
- # 指定的代理API和测试链接
216
- proxy_api = 'http://test.proxy.api:8080/proxy/getitem/'
217
- target_url = 'https://stock.10jqka.com.cn/20240315/c655957791.shtml'
218
-
219
- print("开始测试代理接口和目标链接访问...\n")
220
-
221
- # 1. 测试代理API接口
222
- proxy_data = await test_proxy_api(proxy_api)
223
-
224
- if not proxy_data:
225
- print("代理API测试失败,无法获取代理信息")
226
- return
227
-
228
- # 2. 从API响应中提取代理URL
229
- proxy_url = extract_proxy_url(proxy_data)
230
-
231
- if not proxy_url:
232
- print("无法从API响应中提取代理URL")
233
- print(f"API响应内容: {proxy_data}")
234
- return
235
-
236
- print(f"\n提取到的代理URL: {proxy_url}")
237
-
238
- # 3. 首先尝试直接访问,确认目标URL是否可访问
239
- print("\n=== 测试直接访问目标URL ===")
240
- direct_success = await test_target_url_without_proxy(target_url)
241
-
242
- if direct_success:
243
- print("直接访问目标URL成功")
244
- else:
245
- print("直接访问目标URL失败")
246
-
247
- # 4. 使用代理访问目标URL
248
- print("\n=== 测试使用代理访问目标URL ===")
249
- proxy_success = await test_target_url_with_proxy(proxy_url, target_url)
250
-
251
- if proxy_success:
252
- print(f"代理测试成功!代理 {proxy_url} 可以正常访问目标链接")
253
- else:
254
- print(f"代理测试失败!代理 {proxy_url} 无法访问目标链接")
255
-
256
- # 5. 总结
257
- print(f"\n=== 测试总结 ===")
258
- print(f"代理API访问: {'成功' if proxy_data else '失败'}")
259
- print(f"代理提取: {'成功' if proxy_url else '失败'}")
260
- print(f"直接访问: {'成功' if direct_success else '失败'}")
261
- print(f"代理访问: {'成功' if proxy_success else '失败'}")
262
-
263
-
264
- if __name__ == "__main__":
265
- asyncio.run(main())
@@ -1,196 +0,0 @@
1
- #!/usr/bin/python
2
- # -*- coding: UTF-8 -*-
3
- """
4
- 真实场景代理测试
5
- ================
6
- 使用用户提供的headers、cookies和URL测试代理功能
7
- """
8
-
9
- import asyncio
10
- import aiohttp
11
- import sys
12
- import os
13
- from urllib.parse import urlparse
14
-
15
- # 添加项目根目录到Python路径
16
- sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
17
-
18
- # 用户提供的请求头
19
- HEADERS = {
20
- "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
21
- "accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
22
- "cache-control": "no-cache",
23
- "pragma": "no-cache",
24
- "priority": "u=0, i",
25
- "sec-ch-ua": "\"Chromium\";v=\"140\", \"Not=A?Brand\";v=\"24\", \"Google Chrome\";v=\"140\"",
26
- "sec-ch-ua-mobile": "?0",
27
- "sec-ch-ua-platform": "\"Windows\"",
28
- "sec-fetch-dest": "document",
29
- "sec-fetch-mode": "navigate",
30
- "sec-fetch-site": "none",
31
- "sec-fetch-user": "?1",
32
- "upgrade-insecure-requests": "1",
33
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/140.0.0.0 Safari/537.36"
34
- }
35
-
36
- # 用户提供的cookies
37
- COOKIES = {
38
- "Hm_lvt_722143063e4892925903024537075d0d": "1758071793",
39
- "Hm_lvt_929f8b362150b1f77b477230541dbbc2": "1758071793",
40
- "historystock": "600699",
41
- "spversion": "20130314",
42
- "cid": "example_cid_value",
43
- "u_ukey": "example_u_ukey_value",
44
- "u_uver": "1.0.0",
45
- "u_dpass": "example_u_dpass_value",
46
- "u_did": "example_u_did_value",
47
- "u_ttype": "WEB",
48
- "user_status": "0",
49
- "ttype": "WEB",
50
- "log": "",
51
- "Hm_lvt_69929b9dce4c22a060bd22d703b2a280": "example_Hm_lvt_value",
52
- "HMACCOUNT": "example_HMACCOUNT_value",
53
- "Hm_lvt_78c58f01938e4d85eaf619eae71b4ed1": "example_Hm_lvt_value",
54
- "user": "example_user_value",
55
- "userid": "example_userid_value",
56
- "u_name": "example_u_name_value",
57
- "escapename": "example_escapename_value",
58
- "ticket": "example_ticket_value",
59
- "utk": "example_utk_value",
60
- "sess_tk": "example_sess_tk_value",
61
- "cuc": "example_cuc_value",
62
- "Hm_lvt_f79b64788a4e377c608617fba4c736e2": "example_Hm_lvt_value",
63
- "v": "example_v_value",
64
- "Hm_lpvt_78c58f01938e4d85eaf619eae71b4ed1": "1758163145",
65
- "Hm_lpvt_f79b64788a4e377c608617fba4c736e2": "1758163145",
66
- "Hm_lpvt_69929b9dce4c22a060bd22d703b2a280": "1758163145"
67
- }
68
-
69
- # 用户提供的URL
70
- URL = "https://stock.10jqka.com.cn/20240315/c655957791.shtml"
71
-
72
-
73
- async def test_without_proxy():
74
- """不使用代理直接测试访问"""
75
- print("=== 不使用代理直接访问 ===")
76
- print(f"URL: {URL}")
77
-
78
- try:
79
- timeout = aiohttp.ClientTimeout(total=15)
80
- async with aiohttp.ClientSession(timeout=timeout, headers=HEADERS, cookies=COOKIES) as session:
81
- async with session.get(URL) as response:
82
- print(f"状态码: {response.status}")
83
- if response.status == 200:
84
- print("直接访问成功")
85
- return True
86
- else:
87
- print(f"直接访问失败,状态码: {response.status}")
88
- return False
89
- except Exception as e:
90
- print(f"直接访问出错: {e}")
91
- return False
92
-
93
-
94
- async def test_with_proxy(proxy_url):
95
- """使用代理测试访问"""
96
- print(f"\n=== 使用代理访问 ===")
97
- print(f"代理地址: {proxy_url}")
98
- print(f"URL: {URL}")
99
-
100
- try:
101
- timeout = aiohttp.ClientTimeout(total=15)
102
- async with aiohttp.ClientSession(timeout=timeout, headers=HEADERS, cookies=COOKIES) as session:
103
- # 处理带认证的代理
104
- if "@" in proxy_url and "://" in proxy_url:
105
- parsed = urlparse(proxy_url)
106
- if parsed.username and parsed.password:
107
- # 提取认证信息
108
- auth = aiohttp.BasicAuth(parsed.username, parsed.password)
109
- # 清理代理URL
110
- clean_proxy = f"{parsed.scheme}://{parsed.hostname}"
111
- if parsed.port:
112
- clean_proxy += f":{parsed.port}"
113
-
114
- print(f"使用带认证的代理: {clean_proxy}")
115
- async with session.get(URL, proxy=clean_proxy, proxy_auth=auth) as response:
116
- print(f"状态码: {response.status}")
117
- if response.status == 200:
118
- print("代理访问成功")
119
- return True
120
- else:
121
- print(f"代理访问失败,状态码: {response.status}")
122
- return False
123
- else:
124
- # 直接使用代理URL
125
- print(f"使用代理: {proxy_url}")
126
- async with session.get(URL, proxy=proxy_url) as response:
127
- print(f"状态码: {response.status}")
128
- if response.status == 200:
129
- print("代理访问成功")
130
- return True
131
- else:
132
- print(f"代理访问失败,状态码: {response.status}")
133
- return False
134
- except Exception as e:
135
- print(f"代理访问出错: {e}")
136
- return False
137
-
138
-
139
- async def get_proxy_from_api():
140
- """从代理API获取代理"""
141
- proxy_api = 'http://test.proxy.api:8080/proxy/getitem/'
142
- print(f"\n=== 从代理API获取代理 ===")
143
- print(f"API地址: {proxy_api}")
144
-
145
- try:
146
- timeout = aiohttp.ClientTimeout(total=10)
147
- async with aiohttp.ClientSession(timeout=timeout) as session:
148
- async with session.get(proxy_api) as response:
149
- print(f"状态码: {response.status}")
150
-
151
- if response.status == 200:
152
- data = await response.json()
153
- print(f"响应数据: {data}")
154
-
155
- # 提取代理URL
156
- if isinstance(data, dict) and data.get('status') == 0:
157
- proxy_info = data.get('proxy', {})
158
- if isinstance(proxy_info, dict):
159
- proxy_url = proxy_info.get('https') or proxy_info.get('http')
160
- if proxy_url:
161
- print(f"提取到的代理URL: {proxy_url}")
162
- return proxy_url
163
- print("无法获取代理URL")
164
- return None
165
- except Exception as e:
166
- print(f"API请求出错: {e}")
167
- return None
168
-
169
-
170
- async def main():
171
- """主测试函数"""
172
- print("开始真实场景代理测试...")
173
- print("=" * 50)
174
-
175
- # 1. 首先测试不使用代理直接访问
176
- direct_success = await test_without_proxy()
177
-
178
- # 2. 从代理API获取代理
179
- proxy_url = await get_proxy_from_api()
180
-
181
- if not proxy_url:
182
- print("\n无法获取代理,测试结束")
183
- return
184
-
185
- # 3. 使用代理访问
186
- proxy_success = await test_with_proxy(proxy_url)
187
-
188
- # 4. 测试结果总结
189
- print(f"\n{'='*30}")
190
- print("测试结果:")
191
- print(f"直接访问: {'成功' if direct_success else '失败'}")
192
- print(f"代理访问: {'成功' if proxy_success else '失败'}")
193
-
194
-
195
- if __name__ == "__main__":
196
- asyncio.run(main())