gitinstall 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. gitinstall/__init__.py +61 -0
  2. gitinstall/_sdk.py +541 -0
  3. gitinstall/academic.py +831 -0
  4. gitinstall/admin.html +327 -0
  5. gitinstall/auto_update.py +384 -0
  6. gitinstall/autopilot.py +349 -0
  7. gitinstall/badge.py +476 -0
  8. gitinstall/checkpoint.py +330 -0
  9. gitinstall/cicd.py +499 -0
  10. gitinstall/clawhub.html +718 -0
  11. gitinstall/config_schema.py +353 -0
  12. gitinstall/db.py +984 -0
  13. gitinstall/db_backend.py +445 -0
  14. gitinstall/dep_chain.py +337 -0
  15. gitinstall/dependency_audit.py +1153 -0
  16. gitinstall/detector.py +542 -0
  17. gitinstall/doctor.py +493 -0
  18. gitinstall/education.py +869 -0
  19. gitinstall/enterprise.py +802 -0
  20. gitinstall/error_fixer.py +953 -0
  21. gitinstall/event_bus.py +251 -0
  22. gitinstall/executor.py +577 -0
  23. gitinstall/feature_flags.py +138 -0
  24. gitinstall/fetcher.py +921 -0
  25. gitinstall/huggingface.py +922 -0
  26. gitinstall/hw_detect.py +988 -0
  27. gitinstall/i18n.py +664 -0
  28. gitinstall/installer_registry.py +362 -0
  29. gitinstall/knowledge_base.py +379 -0
  30. gitinstall/license_check.py +605 -0
  31. gitinstall/llm.py +569 -0
  32. gitinstall/log.py +236 -0
  33. gitinstall/main.py +1408 -0
  34. gitinstall/mcp_agent.py +841 -0
  35. gitinstall/mcp_server.py +386 -0
  36. gitinstall/monorepo.py +810 -0
  37. gitinstall/multi_source.py +425 -0
  38. gitinstall/onboard.py +276 -0
  39. gitinstall/planner.py +222 -0
  40. gitinstall/planner_helpers.py +323 -0
  41. gitinstall/planner_known_projects.py +1010 -0
  42. gitinstall/planner_templates.py +996 -0
  43. gitinstall/remote_gpu.py +633 -0
  44. gitinstall/resilience.py +608 -0
  45. gitinstall/run_tests.py +572 -0
  46. gitinstall/skills.py +476 -0
  47. gitinstall/tool_schemas.py +324 -0
  48. gitinstall/trending.py +279 -0
  49. gitinstall/uninstaller.py +415 -0
  50. gitinstall/validate_top100.py +607 -0
  51. gitinstall/watchdog.py +180 -0
  52. gitinstall/web.py +1277 -0
  53. gitinstall/web_ui.html +2277 -0
  54. gitinstall-1.1.0.dist-info/METADATA +275 -0
  55. gitinstall-1.1.0.dist-info/RECORD +59 -0
  56. gitinstall-1.1.0.dist-info/WHEEL +5 -0
  57. gitinstall-1.1.0.dist-info/entry_points.txt +3 -0
  58. gitinstall-1.1.0.dist-info/licenses/LICENSE +21 -0
  59. gitinstall-1.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,988 @@
1
+ """
2
+ hw_detect.py - AI 硬件智能检测与推荐引擎
3
+ =========================================
4
+
5
+ 技术壁垒核心模块 —— 竞品无法轻易复制的领域知识。
6
+
7
+ 功能:
8
+ 1. 深度 GPU 分析:型号、VRAM、驱动版本、计算能力
9
+ 2. AI 框架兼容矩阵:PyTorch/TF 版本 ↔ CUDA ↔ GPU 驱动
10
+ 3. VRAM 智能推荐:根据显存推荐量化方案(Q4/Q8/FP16)
11
+ 4. 模型适配器:给定模型参数量 → 推荐最佳运行方式
12
+ 5. 安装成功率预测:基于硬件 + 项目特征 → 预估成功概率
13
+
14
+ 设计原则:
15
+ - 零外部依赖(纯标准库)
16
+ - 跨平台(macOS/Linux/Windows)
17
+ - 检测结果可缓存(120秒TTL)
18
+ """
19
+
20
+ from __future__ import annotations
21
+
22
+ import os
23
+ import platform
24
+ import re
25
+ import shutil
26
+ import subprocess
27
+ import time
28
+ from pathlib import Path
29
+ from typing import Optional
30
+
31
+ # ─────────────────────────────────────────────
32
+ # GPU VRAM 数据库(核心领域知识)
33
+ # ─────────────────────────────────────────────
34
+
35
+ # NVIDIA GPU → VRAM (GB) + compute capability
36
+ # 来源:NVIDIA 官方规格 + 实测验证
37
+ _NVIDIA_VRAM_DB: dict[str, dict] = {
38
+ # RTX 50 系列 (Blackwell)
39
+ "RTX 5090": {"vram_gb": 32, "compute": "10.0", "gen": "blackwell"},
40
+ "RTX 5080": {"vram_gb": 16, "compute": "10.0", "gen": "blackwell"},
41
+ "RTX 5070 Ti": {"vram_gb": 16, "compute": "10.0", "gen": "blackwell"},
42
+ "RTX 5070": {"vram_gb": 12, "compute": "10.0", "gen": "blackwell"},
43
+ "RTX 5060 Ti": {"vram_gb": 16, "compute": "10.0", "gen": "blackwell"},
44
+ "RTX 5060": {"vram_gb": 8, "compute": "10.0", "gen": "blackwell"},
45
+ # RTX 40 系列 (Ada Lovelace)
46
+ "RTX 4090": {"vram_gb": 24, "compute": "8.9", "gen": "ada"},
47
+ "RTX 4080 SUPER": {"vram_gb": 16, "compute": "8.9", "gen": "ada"},
48
+ "RTX 4080": {"vram_gb": 16, "compute": "8.9", "gen": "ada"},
49
+ "RTX 4070 Ti SUPER": {"vram_gb": 16, "compute": "8.9", "gen": "ada"},
50
+ "RTX 4070 Ti": {"vram_gb": 12, "compute": "8.9", "gen": "ada"},
51
+ "RTX 4070 SUPER": {"vram_gb": 12, "compute": "8.9", "gen": "ada"},
52
+ "RTX 4070": {"vram_gb": 12, "compute": "8.9", "gen": "ada"},
53
+ "RTX 4060 Ti": {"vram_gb": 8, "compute": "8.9", "gen": "ada"},
54
+ "RTX 4060": {"vram_gb": 8, "compute": "8.9", "gen": "ada"},
55
+ # RTX 30 系列 (Ampere)
56
+ "RTX 3090 Ti": {"vram_gb": 24, "compute": "8.6", "gen": "ampere"},
57
+ "RTX 3090": {"vram_gb": 24, "compute": "8.6", "gen": "ampere"},
58
+ "RTX 3080 Ti": {"vram_gb": 12, "compute": "8.6", "gen": "ampere"},
59
+ "RTX 3080": {"vram_gb": 10, "compute": "8.6", "gen": "ampere"},
60
+ "RTX 3070 Ti": {"vram_gb": 8, "compute": "8.6", "gen": "ampere"},
61
+ "RTX 3070": {"vram_gb": 8, "compute": "8.6", "gen": "ampere"},
62
+ "RTX 3060 Ti": {"vram_gb": 8, "compute": "8.6", "gen": "ampere"},
63
+ "RTX 3060": {"vram_gb": 12, "compute": "8.6", "gen": "ampere"},
64
+ "RTX 3050": {"vram_gb": 8, "compute": "8.6", "gen": "ampere"},
65
+ # RTX 20 系列 (Turing)
66
+ "RTX 2080 Ti": {"vram_gb": 11, "compute": "7.5", "gen": "turing"},
67
+ "RTX 2080 SUPER": {"vram_gb": 8, "compute": "7.5", "gen": "turing"},
68
+ "RTX 2080": {"vram_gb": 8, "compute": "7.5", "gen": "turing"},
69
+ "RTX 2070 SUPER": {"vram_gb": 8, "compute": "7.5", "gen": "turing"},
70
+ "RTX 2070": {"vram_gb": 8, "compute": "7.5", "gen": "turing"},
71
+ "RTX 2060 SUPER": {"vram_gb": 8, "compute": "7.5", "gen": "turing"},
72
+ "RTX 2060": {"vram_gb": 6, "compute": "7.5", "gen": "turing"},
73
+ # GTX 16 系列 (Turing, no RT)
74
+ "GTX 1660 Ti": {"vram_gb": 6, "compute": "7.5", "gen": "turing"},
75
+ "GTX 1660 SUPER": {"vram_gb": 6, "compute": "7.5", "gen": "turing"},
76
+ "GTX 1660": {"vram_gb": 6, "compute": "7.5", "gen": "turing"},
77
+ "GTX 1650 SUPER": {"vram_gb": 4, "compute": "7.5", "gen": "turing"},
78
+ "GTX 1650": {"vram_gb": 4, "compute": "7.5", "gen": "turing"},
79
+ # GTX 10 系列 (Pascal)
80
+ "GTX 1080 Ti": {"vram_gb": 11, "compute": "6.1", "gen": "pascal"},
81
+ "GTX 1080": {"vram_gb": 8, "compute": "6.1", "gen": "pascal"},
82
+ "GTX 1070 Ti": {"vram_gb": 8, "compute": "6.1", "gen": "pascal"},
83
+ "GTX 1070": {"vram_gb": 8, "compute": "6.1", "gen": "pascal"},
84
+ "GTX 1060": {"vram_gb": 6, "compute": "6.1", "gen": "pascal"},
85
+ "GTX 1050 Ti": {"vram_gb": 4, "compute": "6.1", "gen": "pascal"},
86
+ "GTX 1050": {"vram_gb": 2, "compute": "6.1", "gen": "pascal"},
87
+ # 专业卡
88
+ "A100": {"vram_gb": 80, "compute": "8.0", "gen": "ampere"},
89
+ "A100 40GB": {"vram_gb": 40, "compute": "8.0", "gen": "ampere"},
90
+ "A6000": {"vram_gb": 48, "compute": "8.6", "gen": "ampere"},
91
+ "A5000": {"vram_gb": 24, "compute": "8.6", "gen": "ampere"},
92
+ "A4000": {"vram_gb": 16, "compute": "8.6", "gen": "ampere"},
93
+ "H100": {"vram_gb": 80, "compute": "9.0", "gen": "hopper"},
94
+ "H200": {"vram_gb": 141,"compute": "9.0", "gen": "hopper"},
95
+ "L40S": {"vram_gb": 48, "compute": "8.9", "gen": "ada"},
96
+ "L4": {"vram_gb": 24, "compute": "8.9", "gen": "ada"},
97
+ "T4": {"vram_gb": 16, "compute": "7.5", "gen": "turing"},
98
+ "V100": {"vram_gb": 16, "compute": "7.0", "gen": "volta"},
99
+ "V100 32GB": {"vram_gb": 32, "compute": "7.0", "gen": "volta"},
100
+ "P100": {"vram_gb": 16, "compute": "6.0", "gen": "pascal"},
101
+ # 笔记本版本(移动端通常 VRAM 较低)
102
+ "RTX 4090 Laptop": {"vram_gb": 16, "compute": "8.9", "gen": "ada"},
103
+ "RTX 4080 Laptop": {"vram_gb": 12, "compute": "8.9", "gen": "ada"},
104
+ "RTX 4070 Laptop": {"vram_gb": 8, "compute": "8.9", "gen": "ada"},
105
+ "RTX 4060 Laptop": {"vram_gb": 8, "compute": "8.9", "gen": "ada"},
106
+ "RTX 3080 Laptop": {"vram_gb": 8, "compute": "8.6", "gen": "ampere"}, # 部分16GB
107
+ "RTX 3070 Laptop": {"vram_gb": 8, "compute": "8.6", "gen": "ampere"},
108
+ "RTX 3060 Laptop": {"vram_gb": 6, "compute": "8.6", "gen": "ampere"},
109
+ }
110
+
111
+ # Apple Silicon 统一内存(CPU/GPU 共享)
112
+ _APPLE_SILICON_DB: dict[str, dict] = {
113
+ # M4 系列
114
+ "M4 Ultra": {"base_ram_gb": 192, "gpu_cores": 80, "gen": "m4"},
115
+ "M4 Max": {"base_ram_gb": 36, "gpu_cores": 40, "gen": "m4"},
116
+ "M4 Pro": {"base_ram_gb": 24, "gpu_cores": 20, "gen": "m4"},
117
+ "M4": {"base_ram_gb": 16, "gpu_cores": 10, "gen": "m4"},
118
+ # M3 系列
119
+ "M3 Ultra": {"base_ram_gb": 192, "gpu_cores": 76, "gen": "m3"},
120
+ "M3 Max": {"base_ram_gb": 36, "gpu_cores": 40, "gen": "m3"},
121
+ "M3 Pro": {"base_ram_gb": 18, "gpu_cores": 14, "gen": "m3"},
122
+ "M3": {"base_ram_gb": 8, "gpu_cores": 10, "gen": "m3"},
123
+ # M2 系列
124
+ "M2 Ultra": {"base_ram_gb": 192, "gpu_cores": 76, "gen": "m2"},
125
+ "M2 Max": {"base_ram_gb": 32, "gpu_cores": 38, "gen": "m2"},
126
+ "M2 Pro": {"base_ram_gb": 16, "gpu_cores": 19, "gen": "m2"},
127
+ "M2": {"base_ram_gb": 8, "gpu_cores": 8, "gen": "m2"},
128
+ # M1 系列
129
+ "M1 Ultra": {"base_ram_gb": 128, "gpu_cores": 64, "gen": "m1"},
130
+ "M1 Max": {"base_ram_gb": 32, "gpu_cores": 32, "gen": "m1"},
131
+ "M1 Pro": {"base_ram_gb": 16, "gpu_cores": 16, "gen": "m1"},
132
+ "M1": {"base_ram_gb": 8, "gpu_cores": 8, "gen": "m1"},
133
+ }
134
+
135
+
136
+ # ─────────────────────────────────────────────
137
+ # CUDA ↔ PyTorch ↔ 驱动 兼容矩阵
138
+ # ─────────────────────────────────────────────
139
+
140
+ # PyTorch 版本 → 推荐 CUDA 版本 → 最低驱动版本
141
+ _PYTORCH_CUDA_MATRIX: list[dict] = [
142
+ {"pytorch": "2.6", "cuda": ["12.6", "12.4", "11.8"], "min_driver": "525.60"},
143
+ {"pytorch": "2.5", "cuda": ["12.4", "12.1", "11.8"], "min_driver": "525.60"},
144
+ {"pytorch": "2.4", "cuda": ["12.4", "12.1", "11.8"], "min_driver": "525.60"},
145
+ {"pytorch": "2.3", "cuda": ["12.1", "11.8"], "min_driver": "520.61"},
146
+ {"pytorch": "2.2", "cuda": ["12.1", "11.8"], "min_driver": "520.61"},
147
+ {"pytorch": "2.1", "cuda": ["12.1", "11.8"], "min_driver": "515.43"},
148
+ {"pytorch": "2.0", "cuda": ["11.8", "11.7"], "min_driver": "515.43"},
149
+ {"pytorch": "1.13", "cuda": ["11.7", "11.6"], "min_driver": "510.39"},
150
+ ]
151
+
152
+ # CUDA 版本 → 最低 NVIDIA 驱动
153
+ _CUDA_DRIVER_MAP: dict[str, str] = {
154
+ "12.6": "560.28",
155
+ "12.4": "550.54",
156
+ "12.3": "545.23",
157
+ "12.2": "535.54",
158
+ "12.1": "530.30",
159
+ "12.0": "525.60",
160
+ "11.8": "520.61",
161
+ "11.7": "515.43",
162
+ "11.6": "510.39",
163
+ "11.5": "495.29",
164
+ }
165
+
166
+
167
+ # ─────────────────────────────────────────────
168
+ # AI 模型 VRAM 需求数据库
169
+ # ─────────────────────────────────────────────
170
+
171
+ # 模型参数量(B) → 不同量化的 VRAM 需求(GB)
172
+ # 公式基础:FP16 ≈ params × 2,Q8 ≈ params × 1.1,Q4 ≈ params × 0.6
173
+ # 加 20% 运行时开销
174
+ _MODEL_VRAM_FORMULA = {
175
+ "fp32": lambda params_b: params_b * 4 * 1.2,
176
+ "fp16": lambda params_b: params_b * 2 * 1.2,
177
+ "q8": lambda params_b: params_b * 1.1 * 1.2,
178
+ "q6_k": lambda params_b: params_b * 0.85 * 1.2,
179
+ "q5_k": lambda params_b: params_b * 0.72 * 1.2,
180
+ "q4_k": lambda params_b: params_b * 0.63 * 1.2,
181
+ "q4_0": lambda params_b: params_b * 0.6 * 1.2,
182
+ "q3_k": lambda params_b: params_b * 0.52 * 1.2,
183
+ "q2_k": lambda params_b: params_b * 0.42 * 1.2,
184
+ }
185
+
186
+ # 常见模型参数量速查
187
+ _KNOWN_MODEL_PARAMS: dict[str, float] = {
188
+ # LLaMA 系列
189
+ "llama-3.3-70b": 70, "llama-3.2-90b": 90,
190
+ "llama-3.1-405b": 405, "llama-3.1-70b": 70,
191
+ "llama-3.1-8b": 8, "llama-3-70b": 70, "llama-3-8b": 8,
192
+ "llama-2-70b": 70, "llama-2-13b": 13, "llama-2-7b": 7,
193
+ # Qwen 系列
194
+ "qwen3-235b": 235, "qwen3-32b": 32, "qwen3-14b": 14,
195
+ "qwen3-8b": 8, "qwen3-4b": 4, "qwen3-1.7b": 1.7, "qwen3-0.6b": 0.6,
196
+ "qwen2.5-72b": 72, "qwen2.5-32b": 32, "qwen2.5-14b": 14,
197
+ "qwen2.5-7b": 7, "qwen2.5-3b": 3,
198
+ # DeepSeek
199
+ "deepseek-r1": 671, "deepseek-r1-distill-qwen-32b": 32,
200
+ "deepseek-r1-distill-llama-70b": 70, "deepseek-r1-distill-llama-8b": 8,
201
+ "deepseek-v3": 671, "deepseek-v2.5": 236,
202
+ # Mistral / Mixtral
203
+ "mixtral-8x22b": 141, "mixtral-8x7b": 47,
204
+ "mistral-large": 123, "mistral-7b": 7,
205
+ # Gemma
206
+ "gemma-2-27b": 27, "gemma-2-9b": 9, "gemma-2-2b": 2,
207
+ # Phi
208
+ "phi-4": 14, "phi-3-14b": 14, "phi-3-7b": 7, "phi-3-mini": 3.8,
209
+ # Stable Diffusion (VRAM for inference, different formula)
210
+ "sdxl": 6.5, "sd-1.5": 2.0, "sd-3": 8.0, "flux": 12.0,
211
+ # Whisper
212
+ "whisper-large-v3": 1.55, "whisper-medium": 0.77, "whisper-small": 0.24,
213
+ }
214
+
215
+
216
+ # ─────────────────────────────────────────────
217
+ # 底层检测函数
218
+ # ─────────────────────────────────────────────
219
+
220
+ def _run(cmd: list[str], timeout: int = 5) -> Optional[str]:
221
+ """运行命令,返回 stdout 或 None"""
222
+ try:
223
+ result = subprocess.run(
224
+ cmd, capture_output=True, text=True, timeout=timeout,
225
+ )
226
+ return result.stdout.strip() if result.returncode == 0 else None
227
+ except (subprocess.TimeoutExpired, FileNotFoundError, PermissionError):
228
+ return None
229
+
230
+
231
+ def _run_any(cmd: list[str], timeout: int = 5) -> Optional[str]:
232
+ """运行命令,不管返回码,只要有 stdout/stderr 就返回"""
233
+ try:
234
+ result = subprocess.run(
235
+ cmd, capture_output=True, text=True, timeout=timeout,
236
+ )
237
+ return (result.stdout + result.stderr).strip() or None
238
+ except (subprocess.TimeoutExpired, FileNotFoundError, PermissionError):
239
+ return None
240
+
241
+
242
+ # ─────────────────────────────────────────────
243
+ # GPU 深度检测
244
+ # ─────────────────────────────────────────────
245
+
246
+ def detect_gpu_deep() -> dict:
247
+ """
248
+ 深度 GPU 检测,返回详细硬件信息。
249
+
250
+ Returns:
251
+ {
252
+ "type": "nvidia" | "apple_mps" | "amd_rocm" | "cpu_only",
253
+ "name": str,
254
+ "vram_gb": float | None,
255
+ "driver_version": str | None,
256
+ "cuda_version": str | None,
257
+ "compute_capability": str | None,
258
+ "gpu_gen": str | None,
259
+ "unified_memory": bool,
260
+ "total_ram_gb": float | None,
261
+ "mps_available": bool,
262
+ }
263
+ """
264
+ system = platform.system()
265
+ arch = platform.machine()
266
+
267
+ # Apple Silicon → MPS(统一内存)
268
+ if system == "Darwin" and arch == "arm64":
269
+ return _detect_apple_deep()
270
+
271
+ # NVIDIA
272
+ if shutil.which("nvidia-smi"):
273
+ result = _detect_nvidia_deep()
274
+ if result:
275
+ return result
276
+
277
+ # AMD ROCm (Linux)
278
+ if system == "Linux" and (shutil.which("rocm-smi") or Path("/opt/rocm").exists()):
279
+ result = _detect_amd_deep()
280
+ if result:
281
+ return result
282
+
283
+ return {
284
+ "type": "cpu_only",
285
+ "name": "No dedicated GPU",
286
+ "vram_gb": None,
287
+ "driver_version": None,
288
+ "cuda_version": None,
289
+ "compute_capability": None,
290
+ "gpu_gen": None,
291
+ "unified_memory": False,
292
+ "total_ram_gb": _get_ram_gb(),
293
+ "mps_available": False,
294
+ }
295
+
296
+
297
+ def _detect_apple_deep() -> dict:
298
+ """Apple Silicon 深度检测"""
299
+ chip_info = _run(["sysctl", "-n", "machdep.cpu.brand_string"]) or ""
300
+ ram_gb = _get_ram_gb() or 8.0
301
+
302
+ # 识别具体芯片型号
303
+ chip_name = "Apple Silicon"
304
+ chip_data = None
305
+ for key in _APPLE_SILICON_DB:
306
+ if key.lower().replace(" ", "") in chip_info.lower().replace(" ", ""):
307
+ chip_name = f"Apple {key}"
308
+ chip_data = _APPLE_SILICON_DB[key]
309
+ break
310
+
311
+ # Apple Silicon 统一内存 → GPU 可用内存约 75% of total RAM
312
+ gpu_mem = round(ram_gb * 0.75, 1)
313
+
314
+ return {
315
+ "type": "apple_mps",
316
+ "name": chip_name,
317
+ "vram_gb": gpu_mem, # 统一内存中可用于 GPU 的部分
318
+ "driver_version": None,
319
+ "cuda_version": None,
320
+ "compute_capability": None,
321
+ "gpu_gen": chip_data["gen"] if chip_data else None,
322
+ "gpu_cores": chip_data["gpu_cores"] if chip_data else None,
323
+ "unified_memory": True,
324
+ "total_ram_gb": ram_gb,
325
+ "mps_available": True,
326
+ }
327
+
328
+
329
+ def _detect_nvidia_deep() -> Optional[dict]:
330
+ """NVIDIA GPU 深度检测"""
331
+ # 获取 GPU 名称
332
+ gpu_name = _run([
333
+ "nvidia-smi", "--query-gpu=name", "--format=csv,noheader,nounits"
334
+ ])
335
+ if not gpu_name:
336
+ return None
337
+ gpu_name = gpu_name.split("\n")[0].strip()
338
+
339
+ # 获取 VRAM(优先 nvidia-smi 实时查询)
340
+ vram_mb = None
341
+ vram_output = _run([
342
+ "nvidia-smi", "--query-gpu=memory.total", "--format=csv,noheader,nounits"
343
+ ])
344
+ if vram_output:
345
+ try:
346
+ vram_mb = int(vram_output.split("\n")[0].strip())
347
+ except ValueError:
348
+ pass
349
+
350
+ # 获取驱动版本
351
+ driver_ver = _run([
352
+ "nvidia-smi", "--query-gpu=driver_version", "--format=csv,noheader"
353
+ ])
354
+ if driver_ver:
355
+ driver_ver = driver_ver.split("\n")[0].strip()
356
+
357
+ # 获取 CUDA 版本
358
+ cuda_ver = None
359
+ nvcc_out = _run(["nvcc", "--version"])
360
+ if nvcc_out:
361
+ m = re.search(r'release (\d+\.\d+)', nvcc_out)
362
+ if m:
363
+ cuda_ver = m.group(1)
364
+ if not cuda_ver:
365
+ smi_out = _run(["nvidia-smi"])
366
+ if smi_out:
367
+ m = re.search(r'CUDA Version:\s*([\d.]+)', smi_out)
368
+ if m:
369
+ cuda_ver = m.group(1)
370
+
371
+ # 从数据库查找详细信息
372
+ gpu_data = _lookup_nvidia_gpu(gpu_name)
373
+ vram_gb = round(vram_mb / 1024, 1) if vram_mb else (gpu_data["vram_gb"] if gpu_data else None)
374
+
375
+ return {
376
+ "type": "nvidia",
377
+ "name": gpu_name,
378
+ "vram_gb": vram_gb,
379
+ "driver_version": driver_ver,
380
+ "cuda_version": cuda_ver,
381
+ "compute_capability": gpu_data["compute"] if gpu_data else None,
382
+ "gpu_gen": gpu_data["gen"] if gpu_data else None,
383
+ "unified_memory": False,
384
+ "total_ram_gb": _get_ram_gb(),
385
+ "mps_available": False,
386
+ }
387
+
388
+
389
+ def _detect_amd_deep() -> Optional[dict]:
390
+ """AMD ROCm GPU 深度检测"""
391
+ rocm_ver = None
392
+ gpu_name = "AMD GPU"
393
+
394
+ # 尝试 rocm-smi
395
+ smi_output = _run_any(["rocm-smi", "--showproductname"])
396
+ if smi_output:
397
+ for line in smi_output.split("\n"):
398
+ if "card" in line.lower() or "gpu" in line.lower():
399
+ # 提取 GPU 名称
400
+ parts = line.split(":")
401
+ if len(parts) >= 2:
402
+ gpu_name = parts[-1].strip()
403
+ break
404
+
405
+ rocm_output = _run(["cat", "/opt/rocm/.info/version"])
406
+ if rocm_output:
407
+ rocm_ver = rocm_output.strip()
408
+
409
+ # 尝试获取 VRAM
410
+ vram_gb = None
411
+ mem_output = _run_any(["rocm-smi", "--showmeminfo", "vram"])
412
+ if mem_output:
413
+ m = re.search(r'Total.*?:\s*(\d+)', mem_output)
414
+ if m:
415
+ vram_gb = round(int(m.group(1)) / (1024 * 1024), 1)
416
+
417
+ return {
418
+ "type": "amd_rocm",
419
+ "name": gpu_name,
420
+ "vram_gb": vram_gb,
421
+ "driver_version": None,
422
+ "cuda_version": None,
423
+ "compute_capability": None,
424
+ "gpu_gen": None,
425
+ "rocm_version": rocm_ver,
426
+ "unified_memory": False,
427
+ "total_ram_gb": _get_ram_gb(),
428
+ "mps_available": False,
429
+ }
430
+
431
+
432
+ def _lookup_nvidia_gpu(gpu_name: str) -> Optional[dict]:
433
+ """从 VRAM 数据库中查找 GPU 信息(优先最长匹配)"""
434
+ name_upper = gpu_name.upper()
435
+ best_match = None
436
+ best_len = 0
437
+ for key, data in _NVIDIA_VRAM_DB.items():
438
+ key_upper = key.upper()
439
+ if key_upper in name_upper and len(key_upper) > best_len:
440
+ best_match = data
441
+ best_len = len(key_upper)
442
+ if best_match:
443
+ return best_match
444
+ # 模糊匹配:去掉前缀
445
+ for key, data in _NVIDIA_VRAM_DB.items():
446
+ key_clean = key.upper().replace("GEFORCE ", "").replace("NVIDIA ", "")
447
+ if key_clean in name_upper and len(key_clean) > best_len:
448
+ best_match = data
449
+ best_len = len(key_clean)
450
+ return best_match
451
+
452
+
453
+ def _get_ram_gb() -> Optional[float]:
454
+ """获取系统 RAM(GB)"""
455
+ system = platform.system()
456
+ try:
457
+ if system == "Darwin":
458
+ output = _run(["sysctl", "-n", "hw.memsize"])
459
+ if output:
460
+ return round(int(output) / (1024 ** 3), 1)
461
+ elif system == "Linux":
462
+ with open("/proc/meminfo") as f:
463
+ for line in f:
464
+ if line.startswith("MemTotal:"):
465
+ return round(int(line.split()[1]) / (1024 ** 2), 1)
466
+ elif system == "Windows":
467
+ output = _run(["wmic", "ComputerSystem", "get", "TotalPhysicalMemory"])
468
+ if output:
469
+ for line in output.split("\n"):
470
+ if line.strip().isdigit():
471
+ return round(int(line.strip()) / (1024 ** 3), 1)
472
+ except Exception:
473
+ pass
474
+ return None
475
+
476
+
477
+ # ─────────────────────────────────────────────
478
+ # AI 框架兼容性分析
479
+ # ─────────────────────────────────────────────
480
+
481
+ def check_pytorch_compatibility(gpu_info: dict) -> dict:
482
+ """
483
+ 检查当前 GPU 与 PyTorch 的兼容性。
484
+
485
+ Returns:
486
+ {
487
+ "compatible": bool,
488
+ "recommended_pytorch": str, # 推荐的 PyTorch 版本
489
+ "recommended_cuda": str | None,
490
+ "install_cmd": str, # 推荐的安装命令
491
+ "backend": "cuda" | "mps" | "rocm" | "cpu",
492
+ "warnings": [str],
493
+ }
494
+ """
495
+ gpu_type = gpu_info.get("type", "cpu_only")
496
+ warnings = []
497
+
498
+ if gpu_type == "apple_mps":
499
+ return {
500
+ "compatible": True,
501
+ "recommended_pytorch": "2.6",
502
+ "recommended_cuda": None,
503
+ "install_cmd": "pip3 install torch torchvision torchaudio",
504
+ "backend": "mps",
505
+ "warnings": [],
506
+ }
507
+
508
+ if gpu_type == "amd_rocm":
509
+ rocm_ver = gpu_info.get("rocm_version", "")
510
+ return {
511
+ "compatible": bool(rocm_ver),
512
+ "recommended_pytorch": "2.5",
513
+ "recommended_cuda": None,
514
+ "install_cmd": "pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm6.2",
515
+ "backend": "rocm",
516
+ "warnings": ["AMD ROCm 支持相比 CUDA 可能存在部分算子不兼容"] if rocm_ver else ["未检测到 ROCm"],
517
+ }
518
+
519
+ if gpu_type == "nvidia":
520
+ cuda_ver = gpu_info.get("cuda_version")
521
+ driver_ver = gpu_info.get("driver_version")
522
+ compute = gpu_info.get("compute_capability")
523
+
524
+ if not cuda_ver:
525
+ warnings.append("未检测到 CUDA,需要先安装 CUDA Toolkit")
526
+
527
+ # 选择最佳 PyTorch + CUDA 组合
528
+ best_pt = None
529
+ best_cuda = None
530
+ for entry in _PYTORCH_CUDA_MATRIX:
531
+ for cuda_opt in entry["cuda"]:
532
+ if cuda_ver and cuda_opt == cuda_ver:
533
+ best_pt = entry["pytorch"]
534
+ best_cuda = cuda_opt
535
+ break
536
+ if not best_pt:
537
+ best_pt = entry["pytorch"]
538
+ best_cuda = entry["cuda"][0]
539
+ if best_pt and best_cuda == cuda_ver:
540
+ break
541
+
542
+ if not best_pt:
543
+ best_pt = "2.6"
544
+ best_cuda = "12.4"
545
+
546
+ # 检查 compute capability
547
+ if compute:
548
+ cc_float = float(compute)
549
+ if cc_float < 3.5:
550
+ warnings.append(f"GPU compute capability {compute} 过低,PyTorch 2.x 不再支持")
551
+ elif cc_float < 7.0:
552
+ warnings.append(f"GPU compute capability {compute} 较旧,部分新特性(如 BF16/Flash Attention)不可用")
553
+
554
+ # 检查驱动版本
555
+ if driver_ver and best_cuda:
556
+ min_driver = _CUDA_DRIVER_MAP.get(best_cuda)
557
+ if min_driver and _ver_lt(driver_ver, min_driver):
558
+ warnings.append(f"NVIDIA 驱动 {driver_ver} 低于 CUDA {best_cuda} 要求的最低版本 {min_driver},请升级驱动")
559
+
560
+ cuda_suffix = best_cuda.replace(".", "") if best_cuda else "124"
561
+ install_url = f"https://download.pytorch.org/whl/cu{cuda_suffix[:3]}"
562
+
563
+ return {
564
+ "compatible": True,
565
+ "recommended_pytorch": best_pt,
566
+ "recommended_cuda": best_cuda,
567
+ "install_cmd": f"pip3 install torch torchvision torchaudio --index-url {install_url}",
568
+ "backend": "cuda",
569
+ "warnings": warnings,
570
+ }
571
+
572
+ # CPU only
573
+ return {
574
+ "compatible": True,
575
+ "recommended_pytorch": "2.6",
576
+ "recommended_cuda": None,
577
+ "install_cmd": "pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu",
578
+ "backend": "cpu",
579
+ "warnings": ["无 GPU,将使用 CPU 模式(推理速度较慢)"],
580
+ }
581
+
582
+
583
+ # ─────────────────────────────────────────────
584
+ # VRAM 智能推荐
585
+ # ─────────────────────────────────────────────
586
+
587
+ def recommend_quantization(
588
+ model_params_b: float,
589
+ available_vram_gb: float,
590
+ ) -> dict:
591
+ """
592
+ 根据模型参数量和可用 VRAM,推荐最佳量化方案。
593
+
594
+ Args:
595
+ model_params_b: 模型参数量(十亿)
596
+ available_vram_gb: 可用 GPU 内存(GB)
597
+
598
+ Returns:
599
+ {
600
+ "can_run": bool,
601
+ "recommended_quant": str | None, # "fp16" / "q8" / "q4_k" / ...
602
+ "vram_needed_gb": float,
603
+ "all_options": [
604
+ {"quant": str, "vram_gb": float, "fits": bool, "quality": str},
605
+ ...
606
+ ],
607
+ "advice": str,
608
+ }
609
+ """
610
+ quality_labels = {
611
+ "fp32": "完美(无损)",
612
+ "fp16": "极好(几乎无损)",
613
+ "q8": "优秀(推荐)",
614
+ "q6_k": "很好",
615
+ "q5_k": "良好",
616
+ "q4_k": "良好(推荐性价比最佳)",
617
+ "q4_0": "可用",
618
+ "q3_k": "一般(明显降质)",
619
+ "q2_k": "较差(仅在 VRAM 极有限时使用)",
620
+ }
621
+
622
+ options = []
623
+ recommended = None
624
+
625
+ for quant, formula in _MODEL_VRAM_FORMULA.items():
626
+ vram_needed = round(formula(model_params_b), 1)
627
+ fits = vram_needed <= available_vram_gb
628
+ options.append({
629
+ "quant": quant,
630
+ "vram_gb": vram_needed,
631
+ "fits": fits,
632
+ "quality": quality_labels.get(quant, ""),
633
+ })
634
+ if fits and recommended is None:
635
+ recommended = quant
636
+
637
+ # 从高质量到低质量排序(options 已按 _MODEL_VRAM_FORMULA 的 dict 顺序)
638
+ can_run = recommended is not None
639
+
640
+ if not can_run:
641
+ # 即使最小量化也不行
642
+ min_vram = options[-1]["vram_gb"] if options else 0
643
+ advice = f"当前 VRAM {available_vram_gb}GB 不足以运行 {model_params_b}B 模型(最低需 {min_vram}GB)。建议使用更小的模型或增加内存。"
644
+ elif recommended in ("fp32", "fp16"):
645
+ advice = f"VRAM 充足!可以 {recommended.upper()} 全精度运行 {model_params_b}B 模型,效果最佳。"
646
+ elif recommended in ("q8", "q6_k"):
647
+ advice = f"推荐使用 {recommended.upper()} 量化运行 {model_params_b}B 模型,质量损失极小。"
648
+ elif recommended in ("q4_k", "q5_k"):
649
+ advice = f"推荐使用 {recommended.upper()} 量化,这是 VRAM 和质量的最佳平衡点。"
650
+ else:
651
+ advice = f"VRAM 有限,使用 {recommended.upper()} 量化。如需更好效果,考虑换用更小的模型。"
652
+
653
+ return {
654
+ "can_run": can_run,
655
+ "recommended_quant": recommended,
656
+ "vram_needed_gb": options[0]["vram_gb"] if options else 0, # FP32 需求
657
+ "all_options": options,
658
+ "advice": advice,
659
+ }
660
+
661
+
662
+ def recommend_for_model(
663
+ model_name: str,
664
+ gpu_info: dict,
665
+ ) -> dict:
666
+ """
667
+ 给定模型名称和 GPU 信息,返回完整推荐方案。
668
+
669
+ Args:
670
+ model_name: 模型名称(如 "llama-3.1-8b", "qwen2.5-72b")
671
+ gpu_info: detect_gpu_deep() 的返回值
672
+
673
+ Returns:
674
+ {
675
+ "model": str,
676
+ "params_b": float | None,
677
+ "gpu": str,
678
+ "vram_gb": float,
679
+ "recommendation": dict, # recommend_quantization() 结果
680
+ "pytorch_compat": dict, # check_pytorch_compatibility() 结果
681
+ "ollama_tag": str | None, # 推荐的 ollama 模型 tag
682
+ }
683
+ """
684
+ # 查找模型参数量
685
+ params_b = _lookup_model_params(model_name)
686
+ vram_gb = gpu_info.get("vram_gb") or 0
687
+
688
+ recommendation = recommend_quantization(params_b, vram_gb) if params_b else None
689
+ pytorch_compat = check_pytorch_compatibility(gpu_info)
690
+
691
+ # 生成 ollama 推荐 tag
692
+ ollama_tag = None
693
+ if params_b and recommendation and recommendation.get("can_run"):
694
+ quant = recommendation["recommended_quant"]
695
+ if quant in ("fp16", "fp32"):
696
+ ollama_tag = f"{model_name}"
697
+ else:
698
+ ollama_tag = f"{model_name}:{quant}"
699
+
700
+ return {
701
+ "model": model_name,
702
+ "params_b": params_b,
703
+ "gpu": gpu_info.get("name", "Unknown"),
704
+ "vram_gb": vram_gb,
705
+ "recommendation": recommendation,
706
+ "pytorch_compat": pytorch_compat,
707
+ "ollama_tag": ollama_tag,
708
+ }
709
+
710
+
711
+ def _lookup_model_params(model_name: str) -> Optional[float]:
712
+ """从已知模型数据库查找参数量"""
713
+ name_lower = model_name.lower().strip()
714
+ # 精确匹配
715
+ if name_lower in _KNOWN_MODEL_PARAMS:
716
+ return _KNOWN_MODEL_PARAMS[name_lower]
717
+ # 模糊匹配(去除前缀)
718
+ for key, params in _KNOWN_MODEL_PARAMS.items():
719
+ if key in name_lower or name_lower in key:
720
+ return params
721
+ # 从名称中提取参数量(如 "xxx-7b", "xxx-70B")
722
+ m = re.search(r'(\d+(?:\.\d+)?)\s*[bB]', model_name)
723
+ if m:
724
+ return float(m.group(1))
725
+ return None
726
+
727
+
728
+ # ─────────────────────────────────────────────
729
+ # 安装成功率预测
730
+ # ─────────────────────────────────────────────
731
+
732
+ def predict_install_success(
733
+ project_key: str,
734
+ gpu_info: dict,
735
+ env: dict,
736
+ strategy: str = "unknown",
737
+ ) -> dict:
738
+ """
739
+ 基于硬件和项目特征预测安装成功概率。
740
+
741
+ Args:
742
+ project_key: "owner/repo" 格式
743
+ gpu_info: detect_gpu_deep() 结果
744
+ env: EnvironmentDetector.detect() 结果
745
+ strategy: 使用的安装策略
746
+
747
+ Returns:
748
+ {
749
+ "success_probability": float, # 0.0 ~ 1.0
750
+ "risk_factors": [str],
751
+ "recommendations": [str],
752
+ "confidence_level": "high" | "medium" | "low",
753
+ }
754
+ """
755
+ probability = 0.9 # 基准
756
+ risk_factors = []
757
+ recommendations = []
758
+
759
+ # 策略因素
760
+ strategy_scores = {
761
+ "known_project": 0.0, # 已知项目,高度可靠
762
+ "type_template_python": -0.05,
763
+ "type_template_python_ml": -0.10,
764
+ "type_template_node": -0.05,
765
+ "type_template_docker": -0.03,
766
+ "type_template_rust": -0.05,
767
+ "type_template_go": -0.03,
768
+ "type_template_cmake": -0.15,
769
+ "type_template_make": -0.15,
770
+ "readme_extract": -0.25, # README 提取最不可靠
771
+ }
772
+ adjustment = strategy_scores.get(strategy, -0.15)
773
+ probability += adjustment
774
+ if adjustment < -0.10:
775
+ risk_factors.append(f"安装策略 '{strategy}' 可靠性较低")
776
+
777
+ # GPU 相关风险
778
+ os_info = env.get("os", {})
779
+ gpu_type = gpu_info.get("type", "cpu_only")
780
+
781
+ if gpu_type == "nvidia":
782
+ if not gpu_info.get("cuda_version"):
783
+ probability -= 0.15
784
+ risk_factors.append("NVIDIA GPU 未安装 CUDA")
785
+ recommendations.append("安装 CUDA Toolkit: https://developer.nvidia.com/cuda-toolkit")
786
+ elif gpu_info.get("compute_capability"):
787
+ cc = float(gpu_info["compute_capability"])
788
+ if cc < 6.0:
789
+ probability -= 0.20
790
+ risk_factors.append(f"GPU compute capability {cc} 过低")
791
+
792
+ elif gpu_type == "amd_rocm":
793
+ if not gpu_info.get("rocm_version"):
794
+ probability -= 0.20
795
+ risk_factors.append("AMD GPU 未安装 ROCm")
796
+
797
+ # 操作系统因素
798
+ os_type = os_info.get("type", "unknown")
799
+ if os_type == "windows":
800
+ probability -= 0.10
801
+ risk_factors.append("Windows 平台编译工具链配置较复杂")
802
+ recommendations.append("考虑使用 WSL2 获得更好的兼容性")
803
+ elif os_type == "linux" and os_info.get("is_wsl"):
804
+ probability -= 0.03
805
+ risk_factors.append("WSL2 环境可能存在 GPU 直通限制")
806
+
807
+ # 运行时因素
808
+ runtimes = env.get("runtimes", {})
809
+ if not runtimes.get("git", {}).get("available"):
810
+ probability -= 0.30
811
+ risk_factors.append("未安装 Git")
812
+ recommendations.append("安装 Git: https://git-scm.com/")
813
+
814
+ # 磁盘空间
815
+ disk = env.get("disk", {})
816
+ free_gb = disk.get("free_gb", 999)
817
+ if free_gb < 5:
818
+ probability -= 0.20
819
+ risk_factors.append(f"磁盘空间不足({free_gb}GB)")
820
+ elif free_gb < 20:
821
+ probability -= 0.05
822
+ risk_factors.append(f"磁盘空间较少({free_gb}GB),大型 AI 项目可能不足")
823
+
824
+ # RAM 因素
825
+ hw = env.get("hardware", {})
826
+ ram_gb = hw.get("ram_gb", 0)
827
+ if ram_gb and ram_gb < 8:
828
+ probability -= 0.15
829
+ risk_factors.append(f"内存仅 {ram_gb}GB,可能不足以编译大型项目")
830
+
831
+ # 限制范围
832
+ probability = max(0.05, min(0.98, probability))
833
+
834
+ # 置信度分级
835
+ if strategy == "known_project":
836
+ confidence = "high"
837
+ elif probability >= 0.75:
838
+ confidence = "medium"
839
+ else:
840
+ confidence = "low"
841
+
842
+ return {
843
+ "success_probability": round(probability, 2),
844
+ "risk_factors": risk_factors,
845
+ "recommendations": recommendations,
846
+ "confidence_level": confidence,
847
+ }
848
+
849
+
850
+ # ─────────────────────────────────────────────
851
+ # 版本比较辅助
852
+ # ─────────────────────────────────────────────
853
+
854
+ def _ver_lt(v1: str, v2: str) -> bool:
855
+ """版本号比较:v1 < v2"""
856
+ def _parts(v):
857
+ return [int(x) for x in re.findall(r'\d+', v)]
858
+ return _parts(v1) < _parts(v2)
859
+
860
+
861
+ # ─────────────────────────────────────────────
862
+ # 缓存层
863
+ # ─────────────────────────────────────────────
864
+
865
+ _cache: dict[str, tuple[float, dict]] = {}
866
+ _CACHE_TTL = 120 # 2 分钟
867
+
868
+
869
+ def get_gpu_info(force_refresh: bool = False) -> dict:
870
+ """获取 GPU 信息(带缓存)"""
871
+ now = time.time()
872
+ if not force_refresh and "gpu" in _cache:
873
+ ts, data = _cache["gpu"]
874
+ if now - ts < _CACHE_TTL:
875
+ return data
876
+ data = detect_gpu_deep()
877
+ _cache["gpu"] = (now, data)
878
+ return data
879
+
880
+
881
+ def get_full_ai_hardware_report(env: dict | None = None) -> dict:
882
+ """
883
+ 生成完整的 AI 硬件报告。
884
+
885
+ Returns:
886
+ {
887
+ "gpu": dict, # GPU 深度检测
888
+ "pytorch": dict, # PyTorch 兼容性
889
+ "vram_gb": float, # 可用 GPU 内存
890
+ "recommended_models": list, # 推荐可运行的模型规模
891
+ "summary": str, # 人类可读摘要
892
+ }
893
+ """
894
+ gpu = get_gpu_info()
895
+ pytorch = check_pytorch_compatibility(gpu)
896
+ vram = gpu.get("vram_gb") or 0
897
+
898
+ # 推荐可运行的模型规模
899
+ model_tiers = []
900
+ for label, params_b in [
901
+ ("小型(1-3B)", 3), ("中型(7-8B)", 8), ("大型(13-14B)", 14),
902
+ ("超大(32-34B)", 32), ("巨型(70B)", 70), ("旗舰(405B)", 405),
903
+ ]:
904
+ rec = recommend_quantization(params_b, vram)
905
+ if rec["can_run"]:
906
+ model_tiers.append({
907
+ "tier": label,
908
+ "params_b": params_b,
909
+ "best_quant": rec["recommended_quant"],
910
+ "vram_needed": rec["all_options"][0]["vram_gb"] if rec["all_options"] else 0,
911
+ })
912
+
913
+ # 生成摘要
914
+ gpu_name = gpu.get("name", "Unknown")
915
+ backend = pytorch.get("backend", "cpu")
916
+ summary_parts = [f"GPU: {gpu_name}"]
917
+
918
+ if vram:
919
+ summary_parts.append(f"可用 GPU 内存: {vram}GB")
920
+ summary_parts.append(f"推理后端: {backend.upper()}")
921
+
922
+ if model_tiers:
923
+ max_tier = model_tiers[-1]["tier"]
924
+ summary_parts.append(f"最大可运行: {max_tier}")
925
+ else:
926
+ summary_parts.append("VRAM 不足以运行任何模型")
927
+
928
+ if pytorch.get("warnings"):
929
+ summary_parts.extend(pytorch["warnings"])
930
+
931
+ return {
932
+ "gpu": gpu,
933
+ "pytorch": pytorch,
934
+ "vram_gb": vram,
935
+ "recommended_models": model_tiers,
936
+ "summary": " | ".join(summary_parts),
937
+ }
938
+
939
+
940
+ # ─────────────────────────────────────────────
941
+ # 格式化输出
942
+ # ─────────────────────────────────────────────
943
+
944
+ def format_ai_hardware_report(report: dict) -> str:
945
+ """将 AI 硬件报告格式化为人类可读文本"""
946
+ lines = []
947
+ gpu = report.get("gpu", {})
948
+ pytorch = report.get("pytorch", {})
949
+ vram = report.get("vram_gb", 0)
950
+
951
+ lines.append("🎮 AI 硬件报告")
952
+ lines.append("=" * 40)
953
+
954
+ # GPU 信息
955
+ gpu_type = gpu.get("type", "cpu_only")
956
+ lines.append(f" GPU: {gpu.get('name', 'N/A')}")
957
+ if vram:
958
+ lines.append(f" 可用 GPU 内存: {vram} GB")
959
+ if gpu.get("cuda_version"):
960
+ lines.append(f" CUDA: {gpu['cuda_version']}")
961
+ if gpu.get("driver_version"):
962
+ lines.append(f" 驱动: {gpu['driver_version']}")
963
+ if gpu.get("compute_capability"):
964
+ lines.append(f" 计算能力: {gpu['compute_capability']}")
965
+ if gpu.get("unified_memory"):
966
+ lines.append(f" 统一内存: ✅ (总 RAM: {gpu.get('total_ram_gb', '?')} GB)")
967
+
968
+ # PyTorch
969
+ lines.append("")
970
+ lines.append(f"🔥 PyTorch 推荐")
971
+ lines.append(f" 后端: {pytorch.get('backend', 'N/A').upper()}")
972
+ lines.append(f" 推荐版本: {pytorch.get('recommended_pytorch', 'N/A')}")
973
+ lines.append(f" 安装命令: {pytorch.get('install_cmd', 'N/A')}")
974
+ for w in pytorch.get("warnings", []):
975
+ lines.append(f" ⚠️ {w}")
976
+
977
+ # 可运行的模型规模
978
+ models = report.get("recommended_models", [])
979
+ if models:
980
+ lines.append("")
981
+ lines.append("🤖 可运行模型规模")
982
+ for m in models:
983
+ lines.append(f" ✅ {m['tier']} → {m['best_quant'].upper()}")
984
+ else:
985
+ lines.append("")
986
+ lines.append("❌ 当前硬件 VRAM 不足以运行 AI 模型")
987
+
988
+ return "\n".join(lines)