crawlo 1.3.2__py3-none-any.whl → 1.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (105) hide show
  1. crawlo/__init__.py +24 -0
  2. crawlo/__version__.py +1 -1
  3. crawlo/commands/run.py +58 -32
  4. crawlo/core/__init__.py +44 -0
  5. crawlo/core/engine.py +119 -45
  6. crawlo/core/scheduler.py +4 -3
  7. crawlo/crawler.py +603 -1133
  8. crawlo/downloader/aiohttp_downloader.py +4 -2
  9. crawlo/extension/__init__.py +1 -1
  10. crawlo/extension/logging_extension.py +23 -7
  11. crawlo/factories/__init__.py +28 -0
  12. crawlo/factories/base.py +69 -0
  13. crawlo/factories/crawler.py +104 -0
  14. crawlo/factories/registry.py +85 -0
  15. crawlo/filters/aioredis_filter.py +25 -2
  16. crawlo/framework.py +292 -0
  17. crawlo/initialization/__init__.py +40 -0
  18. crawlo/initialization/built_in.py +426 -0
  19. crawlo/initialization/context.py +142 -0
  20. crawlo/initialization/core.py +194 -0
  21. crawlo/initialization/phases.py +149 -0
  22. crawlo/initialization/registry.py +146 -0
  23. crawlo/items/base.py +2 -1
  24. crawlo/logging/__init__.py +38 -0
  25. crawlo/logging/config.py +97 -0
  26. crawlo/logging/factory.py +129 -0
  27. crawlo/logging/manager.py +112 -0
  28. crawlo/middleware/middleware_manager.py +1 -1
  29. crawlo/middleware/offsite.py +1 -1
  30. crawlo/mode_manager.py +26 -1
  31. crawlo/pipelines/pipeline_manager.py +2 -1
  32. crawlo/project.py +76 -46
  33. crawlo/queue/pqueue.py +11 -5
  34. crawlo/queue/queue_manager.py +143 -19
  35. crawlo/queue/redis_priority_queue.py +69 -49
  36. crawlo/settings/default_settings.py +110 -14
  37. crawlo/settings/setting_manager.py +29 -13
  38. crawlo/spider/__init__.py +34 -16
  39. crawlo/stats_collector.py +17 -3
  40. crawlo/task_manager.py +112 -3
  41. crawlo/templates/project/settings.py.tmpl +103 -202
  42. crawlo/templates/project/settings_distributed.py.tmpl +122 -135
  43. crawlo/templates/project/settings_gentle.py.tmpl +149 -43
  44. crawlo/templates/project/settings_high_performance.py.tmpl +127 -90
  45. crawlo/templates/project/settings_minimal.py.tmpl +46 -15
  46. crawlo/templates/project/settings_simple.py.tmpl +138 -75
  47. crawlo/templates/project/spiders/__init__.py.tmpl +5 -1
  48. crawlo/templates/run.py.tmpl +10 -14
  49. crawlo/templates/spiders_init.py.tmpl +10 -0
  50. crawlo/tools/network_diagnostic.py +365 -0
  51. crawlo/utils/class_loader.py +26 -0
  52. crawlo/utils/error_handler.py +76 -35
  53. crawlo/utils/log.py +41 -144
  54. crawlo/utils/redis_connection_pool.py +43 -6
  55. crawlo/utils/request_serializer.py +8 -1
  56. {crawlo-1.3.2.dist-info → crawlo-1.3.4.dist-info}/METADATA +120 -14
  57. {crawlo-1.3.2.dist-info → crawlo-1.3.4.dist-info}/RECORD +104 -45
  58. tests/authenticated_proxy_example.py +2 -2
  59. tests/baidu_performance_test.py +109 -0
  60. tests/baidu_test.py +60 -0
  61. tests/comprehensive_framework_test.py +213 -0
  62. tests/comprehensive_test.py +82 -0
  63. tests/comprehensive_testing_summary.md +187 -0
  64. tests/debug_configure.py +70 -0
  65. tests/debug_framework_logger.py +85 -0
  66. tests/debug_log_levels.py +64 -0
  67. tests/distributed_test.py +67 -0
  68. tests/distributed_test_debug.py +77 -0
  69. tests/final_command_test_report.md +0 -0
  70. tests/final_comprehensive_test.py +152 -0
  71. tests/final_validation_test.py +183 -0
  72. tests/framework_performance_test.py +203 -0
  73. tests/optimized_performance_test.py +212 -0
  74. tests/performance_comparison.py +246 -0
  75. tests/queue_blocking_test.py +114 -0
  76. tests/queue_test.py +90 -0
  77. tests/scrapy_comparison/ofweek_scrapy.py +139 -0
  78. tests/scrapy_comparison/scrapy_test.py +134 -0
  79. tests/simple_command_test.py +120 -0
  80. tests/simple_crawlo_test.py +128 -0
  81. tests/simple_log_test.py +58 -0
  82. tests/simple_optimization_test.py +129 -0
  83. tests/simple_spider_test.py +50 -0
  84. tests/simple_test.py +48 -0
  85. tests/test_all_commands.py +231 -0
  86. tests/test_batch_processor.py +179 -0
  87. tests/test_component_factory.py +175 -0
  88. tests/test_controlled_spider_mixin.py +80 -0
  89. tests/test_enhanced_error_handler_comprehensive.py +246 -0
  90. tests/test_factories.py +253 -0
  91. tests/test_framework_logger.py +67 -0
  92. tests/test_framework_startup.py +65 -0
  93. tests/test_large_scale_config.py +113 -0
  94. tests/test_large_scale_helper.py +236 -0
  95. tests/test_mode_change.py +73 -0
  96. tests/test_mode_consistency.py +1 -1
  97. tests/test_performance_monitor.py +116 -0
  98. tests/test_queue_empty_check.py +42 -0
  99. tests/untested_features_report.md +139 -0
  100. tests/verify_debug.py +52 -0
  101. tests/verify_log_fix.py +112 -0
  102. tests/DOUBLE_CRAWLO_PREFIX_FIX_REPORT.md +0 -82
  103. {crawlo-1.3.2.dist-info → crawlo-1.3.4.dist-info}/WHEEL +0 -0
  104. {crawlo-1.3.2.dist-info → crawlo-1.3.4.dist-info}/entry_points.txt +0 -0
  105. {crawlo-1.3.2.dist-info → crawlo-1.3.4.dist-info}/top_level.txt +0 -0
@@ -1,102 +1,165 @@
1
1
  # -*- coding: UTF-8 -*-
2
2
  """
3
- 简化模式配置模板
4
- 最小配置,适合快速开始和简单项目
3
+ {{project_name}} 项目配置文件(简化版)
4
+ =============================
5
+ 基于 Crawlo 框架的简化爬虫项目配置。
6
+ 适合快速开始和简单项目。
5
7
  """
6
8
 
7
9
  # ============================== 项目基本信息 ==============================
8
10
  PROJECT_NAME = '{{project_name}}'
9
11
 
10
- # ============================== 简化运行模式 ==============================
11
- # 运行模式:'standalone'(单机), 'distributed'(分布式), 'auto'(自动检测)
12
- RUN_MODE = 'standalone' # 单机模式 - 适用于开发和小规模数据采集
12
+ # ============================== 运行模式 ==============================
13
+ RUN_MODE = 'standalone'
13
14
 
14
- # 并发配置
15
- CONCURRENCY = 4 # 低并发数以减少资源占用
16
- DOWNLOAD_DELAY = 1.0 # 增加延迟以降低目标网站压力
15
+ # ============================== 并发配置 ==============================
16
+ CONCURRENCY = 4
17
+ MAX_RUNNING_SPIDERS = 1
18
+ DOWNLOAD_DELAY = 1.0
17
19
 
18
- # ============================== 队列配置 ==============================
19
-
20
- # 注意:框架已提供默认的队列配置,以下配置项通常无需修改
21
- # 如需自定义,请取消注释并修改相应值
22
-
23
- # 队列类型:'auto'(自动选择), 'memory'(内存队列), 'redis'(分布式队列)
24
- # QUEUE_TYPE = 'auto' # 自动检测,如果Redis可用则使用Redis队列
25
- # SCHEDULER_MAX_QUEUE_SIZE = 1000
26
- # SCHEDULER_QUEUE_NAME = f'crawlo:{{project_name}}:queue:requests'
27
- # QUEUE_MAX_RETRIES = 3
28
- # QUEUE_TIMEOUT = 300
20
+ # ============================== 下载器配置 ==============================
21
+ # 可选下载器:
22
+ # DOWNLOADER = 'crawlo.downloader.aiohttp_downloader.AioHttpDownloader'
23
+ # DOWNLOADER = 'crawlo.downloader.httpx_downloader.HttpXDownloader'
24
+ # DOWNLOADER = 'crawlo.downloader.cffi_downloader.CurlCffiDownloader'
25
+ DOWNLOADER = 'crawlo.downloader.aiohttp_downloader.AioHttpDownloader'
29
26
 
30
- # ============================== 去重过滤配置 ==============================
27
+ # ============================== 队列配置 ==============================
28
+ QUEUE_TYPE = 'memory'
31
29
 
32
- # 注意:框架已提供默认的去重配置,以下配置项通常无需修改
33
- # 如需自定义,请取消注释并修改相应值
30
+ # ============================== 去重过滤器 ==============================
31
+ FILTER_CLASS = 'crawlo.filters.memory_filter.MemoryFilter'
34
32
 
35
- # 简化模式下使用内存去重管道和过滤器
36
- # DEFAULT_DEDUP_PIPELINE = 'crawlo.pipelines.memory_dedup_pipeline.MemoryDedupPipeline'
37
- # FILTER_CLASS = 'crawlo.filters.memory_filter.MemoryFilter'
33
+ # ============================== 默认去重管道 ==============================
34
+ DEFAULT_DEDUP_PIPELINE = 'crawlo.pipelines.memory_dedup_pipeline.MemoryDedupPipeline'
38
35
 
39
- # --- Redis 配置(用于分布式去重和队列) ---
40
- # REDIS_HOST = '127.0.0.1'
41
- # REDIS_PORT = 6379
42
- # REDIS_PASSWORD = '' # 如果有密码,请填写
36
+ # ============================== 爬虫模块配置 ==============================
37
+ SPIDER_MODULES = ['{{project_name}}.spiders']
43
38
 
44
- # 根据是否有密码生成 URL
45
- # if REDIS_PASSWORD:
46
- # REDIS_URL = f'redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/0'
47
- # else:
48
- # REDIS_URL = f'redis://{REDIS_HOST}:{REDIS_PORT}/0'
49
-
50
- # Redis key配置已移至各组件中,使用统一的命名规范
51
- # crawlo:{project_name}:filter:fingerprint (请求去重)
52
- # crawlo:{project_name}:item:fingerprint (数据项去重)
53
- # crawlo:{project_name}:queue:requests (请求队列)
54
- # crawlo:{project_name}:queue:processing (处理中队列)
55
- # crawlo:{project_name}:queue:failed (失败队列)
56
-
57
- # REDIS_TTL = 0
58
- # CLEANUP_FP = 0
59
- # FILTER_DEBUG = True
60
- # DECODE_RESPONSES = True
61
-
62
- # ============================== 域名过滤配置 ==============================
63
- # OffsiteMiddleware 配置,用于限制爬虫只爬取指定域名的页面
64
- # 如需启用域名过滤功能,请取消注释并配置允许的域名列表
65
- # ALLOWED_DOMAINS = ['example.com', 'www.example.com']
66
-
67
- # ============================== 用户自定义中间件配置 ==============================
68
- # 注意:框架默认中间件已自动加载,此处可添加或覆盖默认中间件
69
-
70
- # 中间件列表(框架默认中间件 + 用户自定义中间件)
39
+ # ============================== 中间件 ==============================
71
40
  # MIDDLEWARES = [
72
- # '{{project_name}}.middlewares.CustomMiddleware', # 示例自定义中间件
41
+ # 'crawlo.middleware.simple_proxy.SimpleProxyMiddleware',
73
42
  # ]
74
43
 
75
- # ============================== 用户自定义数据管道配置 ==============================
76
- # 注意:框架默认管道已自动加载,此处可添加或覆盖默认管道
44
+ # ============================== 默认请求头配置 ==============================
45
+ # 为DefaultHeaderMiddleware配置默认请求头
46
+ DEFAULT_REQUEST_HEADERS = {
47
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
48
+ 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
49
+ 'Accept-Encoding': 'gzip, deflate, br',
50
+ }
51
+
52
+ # ============================== 允许的域名 ==============================
53
+ # 为OffsiteMiddleware配置允许的域名
54
+ # ALLOWED_DOMAINS = ['example.com']
77
55
 
78
- # 数据处理管道列表(框架默认管道 + 用户自定义管道)
56
+ # ============================== 数据管道 ==============================
79
57
  # PIPELINES = [
80
- # '{{project_name}}.pipelines.DatabasePipeline', # 自定义数据库管道
81
- # 'crawlo.pipelines.mysql_pipeline.AsyncmyMySQLPipeline', # MySQL 存储
82
- # 'crawlo.pipelines.mongo_pipeline.MongoPipeline', # MongoDB 存储
58
+ # 'crawlo.pipelines.mysql_pipeline.AsyncmyMySQLPipeline', # MySQL 存储(使用asyncmy异步库)
83
59
  # ]
84
60
 
85
- # ============================== 用户自定义扩展组件 ==============================
86
- # 注意:框架默认扩展已自动加载,此处可添加或覆盖默认扩展
87
-
88
- # 扩展组件列表(框架默认扩展 + 用户自定义扩展)
61
+ # ============================== 扩展组件 ==============================
89
62
  # EXTENSIONS = [
90
- # 'crawlo.extension.memory_monitor.MemoryMonitorExtension', # 内存监控
91
- # 'crawlo.extension.request_recorder.RequestRecorderExtension', # 请求记录
92
- # 'crawlo.extension.performance_profiler.PerformanceProfilerExtension', # 性能分析
93
- # 'crawlo.extension.health_check.HealthCheckExtension', # 健康检查
63
+ # 'crawlo.extension.log_interval.LogIntervalExtension',
64
+ # 'crawlo.extension.log_stats.LogStats',
65
+ # 'crawlo.extension.logging_extension.CustomLoggerExtension',
94
66
  # ]
95
67
 
96
68
  # ============================== 日志配置 ==============================
97
-
98
69
  LOG_LEVEL = 'INFO'
70
+ LOG_FILE = 'logs/{{project_name}}.log'
71
+ LOG_ENCODING = 'utf-8' # 明确指定日志文件编码
99
72
  STATS_DUMP = True
100
- LOG_FILE = f'logs/{{project_name}}.log'
101
- LOG_FORMAT = '%(asctime)s - [%(name)s] - %(levelname)s: %(message)s'
102
- LOG_ENCODING = 'utf-8'
73
+
74
+ # ============================== 输出配置 ==============================
75
+ OUTPUT_DIR = 'output'
76
+
77
+ # ============================== Redis配置 ==============================
78
+ REDIS_HOST = '127.0.0.1'
79
+ REDIS_PORT = 6379
80
+ REDIS_PASSWORD = ''
81
+ REDIS_DB = 0
82
+
83
+ # 根据是否有密码生成 URL
84
+ if REDIS_PASSWORD:
85
+ REDIS_URL = f'redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}'
86
+ else:
87
+ REDIS_URL = f'redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}'
88
+
89
+ # ============================== MySQL配置 ==============================
90
+ MYSQL_HOST = '127.0.0.1'
91
+ MYSQL_PORT = 3306
92
+ MYSQL_USER = 'root'
93
+ MYSQL_PASSWORD = '123456'
94
+ MYSQL_DB = '{{project_name}}'
95
+ MYSQL_TABLE = '{{project_name}}_data'
96
+ MYSQL_BATCH_SIZE = 100
97
+ MYSQL_USE_BATCH = False # 是否启用批量插入
98
+
99
+ # ============================== MongoDB配置 ==============================
100
+ MONGO_URI = 'mongodb://localhost:27017'
101
+ MONGO_DATABASE = '{{project_name}}_db'
102
+ MONGO_COLLECTION = '{{project_name}}_items'
103
+ MONGO_MAX_POOL_SIZE = 200
104
+ MONGO_MIN_POOL_SIZE = 20
105
+ MONGO_BATCH_SIZE = 100 # 批量插入条数
106
+ MONGO_USE_BATCH = False # 是否启用批量插入
107
+
108
+ # ============================== 代理配置 ==============================
109
+ # 代理功能默认不启用,如需使用请在项目配置文件中启用并配置相关参数
110
+ PROXY_ENABLED = False # 是否启用代理
111
+
112
+ # 简化版代理配置(适用于SimpleProxyMiddleware)
113
+ PROXY_LIST = [] # 代理列表,例如: ["http://proxy1:8080", "http://proxy2:8080"]
114
+
115
+ # 高级代理配置(适用于ProxyMiddleware)
116
+ PROXY_API_URL = "" # 代理获取接口(请替换为真实地址)
117
+
118
+ # 代理提取方式(支持字段路径或函数)
119
+ # 示例: "proxy" 适用于 {"proxy": "http://1.1.1.1:8080"}
120
+ # 示例: "data.proxy" 适用于 {"data": {"proxy": "http://1.1.1.1:8080"}}
121
+ PROXY_EXTRACTOR = "proxy"
122
+
123
+ # 代理刷新控制
124
+ PROXY_REFRESH_INTERVAL = 60 # 代理刷新间隔(秒)
125
+ PROXY_API_TIMEOUT = 10 # 请求代理 API 超时时间
126
+
127
+ # ============================== Curl-Cffi 特有配置 ==============================
128
+ # 浏览器指纹模拟(仅 CurlCffi 下载器有效)
129
+ CURL_BROWSER_TYPE = "chrome" # 可选: chrome, edge, safari, firefox 或版本如 chrome136
130
+
131
+ # 自定义浏览器版本映射(可覆盖默认行为)
132
+ CURL_BROWSER_VERSION_MAP = {
133
+ "chrome": "chrome136",
134
+ "edge": "edge101",
135
+ "safari": "safari184",
136
+ "firefox": "firefox135",
137
+ }
138
+
139
+ # ============================== 下载器优化配置 ==============================
140
+ # 下载器健康检查
141
+ DOWNLOADER_HEALTH_CHECK = True # 是否启用下载器健康检查
142
+ HEALTH_CHECK_INTERVAL = 60 # 健康检查间隔(秒)
143
+
144
+ # 请求统计配置
145
+ REQUEST_STATS_ENABLED = True # 是否启用请求统计
146
+ STATS_RESET_ON_START = False # 启动时是否重置统计
147
+
148
+ # HttpX 下载器专用配置
149
+ HTTPX_HTTP2 = True # 是否启用HTTP/2支持
150
+ HTTPX_FOLLOW_REDIRECTS = True # 是否自动跟随重定向
151
+
152
+ # AioHttp 下载器专用配置
153
+ AIOHTTP_AUTO_DECOMPRESS = True # 是否自动解压响应
154
+ AIOHTTP_FORCE_CLOSE = False # 是否强制关闭连接
155
+
156
+ # 通用优化配置
157
+ CONNECTION_TTL_DNS_CACHE = 300 # DNS缓存TTL(秒)
158
+ CONNECTION_KEEPALIVE_TIMEOUT = 15 # Keep-Alive超时(秒)
159
+
160
+ # ============================== 内存监控配置 ==============================
161
+ # 内存监控扩展默认不启用,如需使用请在项目配置文件中启用
162
+ MEMORY_MONITOR_ENABLED = False # 是否启用内存监控
163
+ MEMORY_MONITOR_INTERVAL = 60 # 内存监控检查间隔(秒)
164
+ MEMORY_WARNING_THRESHOLD = 80.0 # 内存使用率警告阈值(百分比)
165
+ MEMORY_CRITICAL_THRESHOLD = 90.0 # 内存使用率严重阈值(百分比)
@@ -3,4 +3,8 @@
3
3
  {{project_name}}.spiders
4
4
  ========================
5
5
  存放所有的爬虫。
6
- """
6
+ """
7
+
8
+ # 自动导入所有爬虫以确保它们被注册
9
+ # 示例:
10
+ # from .YourSpider import YourSpider
@@ -1,9 +1,12 @@
1
- #!/usr/bin/env python
1
+ #!/usr/bin/env python3
2
2
  # -*- coding: UTF-8 -*-
3
3
  """
4
4
  {{project_name}} 项目运行脚本
5
5
  ============================
6
6
  基于 Crawlo 框架的简化爬虫启动器。
7
+
8
+ 框架会自动处理爬虫模块的导入和注册,用户无需手动导入。
9
+ 只需指定spider_modules参数,框架会自动扫描并导入所有爬虫。
7
10
  """
8
11
  import sys
9
12
  import asyncio
@@ -12,22 +15,15 @@ from crawlo.crawler import CrawlerProcess
12
15
 
13
16
 
14
17
  def main():
15
- """主函数:运行固定的爬虫"""
16
- print("🚀 启动 {{project_name}} 爬虫")
17
-
18
- # 创建爬虫进程(自动加载默认配置)
18
+ """主函数:运行爬虫"""
19
19
  try:
20
- # 确保 spider 模块被正确导入
20
+ # 指定爬虫模块路径,框架会自动导入并注册所有爬虫
21
21
  spider_modules = ['{{project_name}}.spiders']
22
22
  process = CrawlerProcess(spider_modules=spider_modules)
23
- print("✅ 爬虫进程初始化成功")
24
-
25
- # 运行固定的爬虫
26
- # TODO: 请将 'your_spider_name' 替换为实际的爬虫名称
27
- asyncio.run(process.crawl('your_spider_name'))
28
-
29
- print("✅ 爬虫运行完成")
30
-
23
+
24
+ # 运行指定的爬虫
25
+ asyncio.run(process.crawl('{{spider_name}}'))
26
+
31
27
  except Exception as e:
32
28
  print(f"❌ 运行失败: {e}")
33
29
  import traceback
@@ -0,0 +1,10 @@
1
+ # -*- coding: UTF-8 -*-
2
+ """
3
+ {{project_name}}.spiders
4
+ ========================
5
+ 存放所有的爬虫。
6
+
7
+ 该文件支持自动导入所有爬虫模块,确保爬虫能被正确注册。
8
+ 框架会自动扫描并导入此目录下的所有Python文件(除了__init__.py)。
9
+ """
10
+ # 框架会自动处理爬虫模块的导入,无需手动导入
@@ -0,0 +1,365 @@
1
+ #!/usr/bin/python
2
+ # -*- coding: UTF-8 -*-
3
+ """
4
+ 网络诊断工具
5
+ 提供网络连接问题的诊断和解决方案
6
+ """
7
+
8
+ import asyncio
9
+ import socket
10
+ import time
11
+ from typing import Dict, List, Optional, Tuple
12
+ from urllib.parse import urlparse
13
+
14
+ import aiohttp
15
+ from crawlo.utils.log import get_logger
16
+
17
+
18
+ class NetworkDiagnostic:
19
+ """网络诊断工具类"""
20
+
21
+ def __init__(self):
22
+ self.logger = get_logger(self.__class__.__name__)
23
+ self._dns_cache: Dict[str, str] = {}
24
+
25
+ async def diagnose_url(self, url: str) -> Dict[str, any]:
26
+ """
27
+ 诊断URL的网络连接问题
28
+
29
+ Args:
30
+ url: 要诊断的URL
31
+
32
+ Returns:
33
+ 诊断结果字典
34
+ """
35
+ parsed = urlparse(url)
36
+ hostname = parsed.hostname
37
+ port = parsed.port or (443 if parsed.scheme == 'https' else 80)
38
+
39
+ result = {
40
+ 'url': url,
41
+ 'hostname': hostname,
42
+ 'port': port,
43
+ 'dns_resolution': None,
44
+ 'tcp_connection': None,
45
+ 'http_response': None,
46
+ 'recommendations': []
47
+ }
48
+
49
+ # DNS解析测试
50
+ dns_result = await self._test_dns_resolution(hostname)
51
+ result['dns_resolution'] = dns_result
52
+
53
+ if dns_result['success']:
54
+ # TCP连接测试
55
+ tcp_result = await self._test_tcp_connection(hostname, port)
56
+ result['tcp_connection'] = tcp_result
57
+
58
+ if tcp_result['success']:
59
+ # HTTP响应测试
60
+ http_result = await self._test_http_response(url)
61
+ result['http_response'] = http_result
62
+
63
+ # 生成建议
64
+ result['recommendations'] = self._generate_recommendations(result)
65
+
66
+ return result
67
+
68
+ async def _test_dns_resolution(self, hostname: str) -> Dict[str, any]:
69
+ """测试DNS解析"""
70
+ try:
71
+ start_time = time.time()
72
+
73
+ # 使用asyncio的DNS解析
74
+ loop = asyncio.get_event_loop()
75
+ addr_info = await loop.getaddrinfo(hostname, None)
76
+
77
+ resolution_time = time.time() - start_time
78
+ ip_addresses = list(set([addr[4][0] for addr in addr_info]))
79
+
80
+ # 缓存DNS结果
81
+ if ip_addresses:
82
+ self._dns_cache[hostname] = ip_addresses[0]
83
+
84
+ return {
85
+ 'success': True,
86
+ 'ip_addresses': ip_addresses,
87
+ 'resolution_time': resolution_time,
88
+ 'error': None
89
+ }
90
+
91
+ except socket.gaierror as e:
92
+ return {
93
+ 'success': False,
94
+ 'ip_addresses': [],
95
+ 'resolution_time': None,
96
+ 'error': {
97
+ 'type': 'DNSError',
98
+ 'code': e.errno,
99
+ 'message': str(e)
100
+ }
101
+ }
102
+ except Exception as e:
103
+ return {
104
+ 'success': False,
105
+ 'ip_addresses': [],
106
+ 'resolution_time': None,
107
+ 'error': {
108
+ 'type': type(e).__name__,
109
+ 'message': str(e)
110
+ }
111
+ }
112
+
113
+ async def _test_tcp_connection(self, hostname: str, port: int) -> Dict[str, any]:
114
+ """测试TCP连接"""
115
+ try:
116
+ start_time = time.time()
117
+
118
+ # 尝试TCP连接
119
+ reader, writer = await asyncio.wait_for(
120
+ asyncio.open_connection(hostname, port),
121
+ timeout=10.0
122
+ )
123
+
124
+ connection_time = time.time() - start_time
125
+
126
+ # 关闭连接
127
+ writer.close()
128
+ await writer.wait_closed()
129
+
130
+ return {
131
+ 'success': True,
132
+ 'connection_time': connection_time,
133
+ 'error': None
134
+ }
135
+
136
+ except asyncio.TimeoutError:
137
+ return {
138
+ 'success': False,
139
+ 'connection_time': None,
140
+ 'error': {
141
+ 'type': 'TimeoutError',
142
+ 'message': 'Connection timeout'
143
+ }
144
+ }
145
+ except Exception as e:
146
+ return {
147
+ 'success': False,
148
+ 'connection_time': None,
149
+ 'error': {
150
+ 'type': type(e).__name__,
151
+ 'message': str(e)
152
+ }
153
+ }
154
+
155
+ async def _test_http_response(self, url: str) -> Dict[str, any]:
156
+ """测试HTTP响应"""
157
+ try:
158
+ start_time = time.time()
159
+
160
+ timeout = aiohttp.ClientTimeout(total=30, connect=10)
161
+ async with aiohttp.ClientSession(timeout=timeout) as session:
162
+ async with session.get(url) as response:
163
+ response_time = time.time() - start_time
164
+
165
+ return {
166
+ 'success': True,
167
+ 'url': url,
168
+ 'status_code': response.status_code, # 修复:使用status_code而不是status
169
+ 'response_time': response_time,
170
+ 'content_length': len(response.body) if response.body else 0,
171
+ 'headers': dict(response.headers)
172
+ }
173
+
174
+ except aiohttp.ClientError as e:
175
+ return {
176
+ 'success': False,
177
+ 'status_code': None,
178
+ 'response_time': None,
179
+ 'headers': {},
180
+ 'error': {
181
+ 'type': type(e).__name__,
182
+ 'message': str(e)
183
+ }
184
+ }
185
+ except Exception as e:
186
+ return {
187
+ 'success': False,
188
+ 'status_code': None,
189
+ 'response_time': None,
190
+ 'headers': {},
191
+ 'error': {
192
+ 'type': type(e).__name__,
193
+ 'message': str(e)
194
+ }
195
+ }
196
+
197
+ def _generate_recommendations(self, result: Dict[str, any]) -> List[str]:
198
+ """根据诊断结果生成建议"""
199
+ recommendations = []
200
+
201
+ dns_result = result.get('dns_resolution', {})
202
+ tcp_result = result.get('tcp_connection', {})
203
+ http_result = result.get('http_response', {})
204
+
205
+ # DNS问题建议
206
+ if not dns_result.get('success'):
207
+ error = dns_result.get('error', {})
208
+ if error.get('code') == 8: # nodename nor servname provided, or not known
209
+ recommendations.extend([
210
+ "DNS解析失败 - 检查域名是否正确",
211
+ "检查网络连接是否正常",
212
+ "尝试使用不同的DNS服务器(如8.8.8.8或1.1.1.1)",
213
+ "检查本地hosts文件是否有相关配置",
214
+ "确认域名是否可以从外部访问"
215
+ ])
216
+ elif error.get('code') == 2: # Name or service not known
217
+ recommendations.extend([
218
+ "域名不存在或无法解析",
219
+ "检查域名拼写是否正确",
220
+ "确认域名是否已注册且配置了DNS记录"
221
+ ])
222
+
223
+ # TCP连接问题建议
224
+ elif not tcp_result.get('success'):
225
+ error = tcp_result.get('error', {})
226
+ if error.get('type') == 'TimeoutError':
227
+ recommendations.extend([
228
+ "TCP连接超时 - 服务器可能无响应",
229
+ "检查防火墙设置是否阻止了连接",
230
+ "尝试增加连接超时时间",
231
+ "检查代理设置"
232
+ ])
233
+
234
+ # HTTP问题建议
235
+ elif not http_result.get('success'):
236
+ error = http_result.get('error', {})
237
+ recommendations.extend([
238
+ f"HTTP请求失败: {error.get('message', 'Unknown error')}",
239
+ "检查URL是否正确",
240
+ "确认服务器是否正常运行"
241
+ ])
242
+
243
+ # 性能建议
244
+ if dns_result.get('success') and dns_result.get('resolution_time', 0) > 1.0:
245
+ recommendations.append("DNS解析时间较长,考虑使用DNS缓存或更快的DNS服务器")
246
+
247
+ if tcp_result.get('success') and tcp_result.get('connection_time', 0) > 2.0:
248
+ recommendations.append("TCP连接时间较长,可能存在网络延迟问题")
249
+
250
+ if http_result.get('success') and http_result.get('response_time', 0) > 5.0:
251
+ recommendations.append("HTTP响应时间较长,服务器可能负载较高")
252
+
253
+ return recommendations
254
+
255
+ async def batch_diagnose(self, urls: List[str]) -> Dict[str, Dict[str, any]]:
256
+ """批量诊断多个URL"""
257
+ tasks = []
258
+ for url in urls:
259
+ task = asyncio.create_task(self.diagnose_url(url))
260
+ tasks.append((url, task))
261
+
262
+ results = {}
263
+ for url, task in tasks:
264
+ try:
265
+ result = await task
266
+ results[url] = result
267
+ except Exception as e:
268
+ results[url] = {
269
+ 'url': url,
270
+ 'error': f"诊断过程出错: {e}",
271
+ 'recommendations': ["诊断工具本身出现问题,请检查网络环境"]
272
+ }
273
+
274
+ return results
275
+
276
+ def format_diagnostic_report(self, result: Dict[str, any]) -> str:
277
+ """格式化诊断报告"""
278
+ lines = [
279
+ f"=== 网络诊断报告 ===",
280
+ f"URL: {result['url']}",
281
+ f"主机: {result['hostname']}:{result['port']}",
282
+ ""
283
+ ]
284
+
285
+ # DNS解析结果
286
+ dns = result.get('dns_resolution', {})
287
+ if dns.get('success'):
288
+ lines.extend([
289
+ "✅ DNS解析: 成功",
290
+ f" IP地址: {', '.join(dns['ip_addresses'])}",
291
+ f" 解析时间: {dns['resolution_time']:.3f}秒"
292
+ ])
293
+ else:
294
+ error = dns.get('error', {})
295
+ lines.extend([
296
+ "❌ DNS解析: 失败",
297
+ f" 错误类型: {error.get('type', 'Unknown')}",
298
+ f" 错误信息: {error.get('message', 'Unknown error')}"
299
+ ])
300
+
301
+ lines.append("")
302
+
303
+ # TCP连接结果
304
+ tcp = result.get('tcp_connection', {})
305
+ if tcp and tcp.get('success'):
306
+ lines.extend([
307
+ "✅ TCP连接: 成功",
308
+ f" 连接时间: {tcp['connection_time']:.3f}秒"
309
+ ])
310
+ elif tcp:
311
+ error = tcp.get('error', {})
312
+ lines.extend([
313
+ "❌ TCP连接: 失败",
314
+ f" 错误类型: {error.get('type', 'Unknown')}",
315
+ f" 错误信息: {error.get('message', 'Unknown error')}"
316
+ ])
317
+
318
+ lines.append("")
319
+
320
+ # HTTP响应结果
321
+ http = result.get('http_response', {})
322
+ if http and http.get('success'):
323
+ lines.extend([
324
+ "✅ HTTP响应: 成功",
325
+ f" 状态码: {http['status_code']}",
326
+ f" 响应时间: {http['response_time']:.3f}秒"
327
+ ])
328
+ elif http:
329
+ error = http.get('error', {})
330
+ lines.extend([
331
+ "❌ HTTP响应: 失败",
332
+ f" 错误类型: {error.get('type', 'Unknown')}",
333
+ f" 错误信息: {error.get('message', 'Unknown error')}"
334
+ ])
335
+
336
+ # 建议
337
+ recommendations = result.get('recommendations', [])
338
+ if recommendations:
339
+ lines.extend([
340
+ "",
341
+ "🔧 建议:",
342
+ ])
343
+ for i, rec in enumerate(recommendations, 1):
344
+ lines.append(f" {i}. {rec}")
345
+
346
+ return "\n".join(lines)
347
+
348
+
349
+ # 便捷函数
350
+ async def diagnose_url(url: str) -> Dict[str, any]:
351
+ """诊断单个URL的网络问题"""
352
+ diagnostic = NetworkDiagnostic()
353
+ return await diagnostic.diagnose_url(url)
354
+
355
+
356
+ async def diagnose_urls(urls: List[str]) -> Dict[str, Dict[str, any]]:
357
+ """批量诊断URL的网络问题"""
358
+ diagnostic = NetworkDiagnostic()
359
+ return await diagnostic.batch_diagnose(urls)
360
+
361
+
362
+ def format_report(result: Dict[str, any]) -> str:
363
+ """格式化诊断报告"""
364
+ diagnostic = NetworkDiagnostic()
365
+ return diagnostic.format_diagnostic_report(result)