gitinstall 1.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gitinstall/__init__.py +61 -0
- gitinstall/_sdk.py +541 -0
- gitinstall/academic.py +831 -0
- gitinstall/admin.html +327 -0
- gitinstall/auto_update.py +384 -0
- gitinstall/autopilot.py +349 -0
- gitinstall/badge.py +476 -0
- gitinstall/checkpoint.py +330 -0
- gitinstall/cicd.py +499 -0
- gitinstall/clawhub.html +718 -0
- gitinstall/config_schema.py +353 -0
- gitinstall/db.py +984 -0
- gitinstall/db_backend.py +445 -0
- gitinstall/dep_chain.py +337 -0
- gitinstall/dependency_audit.py +1153 -0
- gitinstall/detector.py +542 -0
- gitinstall/doctor.py +493 -0
- gitinstall/education.py +869 -0
- gitinstall/enterprise.py +802 -0
- gitinstall/error_fixer.py +953 -0
- gitinstall/event_bus.py +251 -0
- gitinstall/executor.py +577 -0
- gitinstall/feature_flags.py +138 -0
- gitinstall/fetcher.py +921 -0
- gitinstall/huggingface.py +922 -0
- gitinstall/hw_detect.py +988 -0
- gitinstall/i18n.py +664 -0
- gitinstall/installer_registry.py +362 -0
- gitinstall/knowledge_base.py +379 -0
- gitinstall/license_check.py +605 -0
- gitinstall/llm.py +569 -0
- gitinstall/log.py +236 -0
- gitinstall/main.py +1408 -0
- gitinstall/mcp_agent.py +841 -0
- gitinstall/mcp_server.py +386 -0
- gitinstall/monorepo.py +810 -0
- gitinstall/multi_source.py +425 -0
- gitinstall/onboard.py +276 -0
- gitinstall/planner.py +222 -0
- gitinstall/planner_helpers.py +323 -0
- gitinstall/planner_known_projects.py +1010 -0
- gitinstall/planner_templates.py +996 -0
- gitinstall/remote_gpu.py +633 -0
- gitinstall/resilience.py +608 -0
- gitinstall/run_tests.py +572 -0
- gitinstall/skills.py +476 -0
- gitinstall/tool_schemas.py +324 -0
- gitinstall/trending.py +279 -0
- gitinstall/uninstaller.py +415 -0
- gitinstall/validate_top100.py +607 -0
- gitinstall/watchdog.py +180 -0
- gitinstall/web.py +1277 -0
- gitinstall/web_ui.html +2277 -0
- gitinstall-1.1.0.dist-info/METADATA +275 -0
- gitinstall-1.1.0.dist-info/RECORD +59 -0
- gitinstall-1.1.0.dist-info/WHEEL +5 -0
- gitinstall-1.1.0.dist-info/entry_points.txt +3 -0
- gitinstall-1.1.0.dist-info/licenses/LICENSE +21 -0
- gitinstall-1.1.0.dist-info/top_level.txt +1 -0
gitinstall/llm.py
ADDED
|
@@ -0,0 +1,569 @@
|
|
|
1
|
+
"""
|
|
2
|
+
llm.py - 多 LLM 适配器
|
|
3
|
+
=====================================
|
|
4
|
+
|
|
5
|
+
15 级降级策略,永远不失败:
|
|
6
|
+
1. Anthropic Claude (ANTHROPIC_API_KEY)
|
|
7
|
+
2. OpenAI GPT-4o (OPENAI_API_KEY)
|
|
8
|
+
3. OpenRouter (OPENROUTER_API_KEY)
|
|
9
|
+
4. Google Gemini (GEMINI_API_KEY)
|
|
10
|
+
5. Groq Llama 3.3 (GROQ_API_KEY)
|
|
11
|
+
6. DeepSeek (DEEPSEEK_API_KEY)
|
|
12
|
+
7. 通义千问 Qwen (DASHSCOPE_API_KEY)
|
|
13
|
+
8. 智谱 GLM (ZHIPU_API_KEY)
|
|
14
|
+
9. 月之暗面 Kimi (MOONSHOT_API_KEY)
|
|
15
|
+
10. 百川智能 (BAICHUAN_API_KEY)
|
|
16
|
+
11. 零一万物 Yi (YI_API_KEY)
|
|
17
|
+
12. 阶跃星辰 Step (STEPFUN_API_KEY)
|
|
18
|
+
13. LM Studio (localhost:1234,本地运行即可)
|
|
19
|
+
14. Ollama (localhost:11434,本地运行即可)
|
|
20
|
+
15. 无 LLM 规则模式 (永远可用,无需任何配置)
|
|
21
|
+
|
|
22
|
+
面向大众原则(最低配置要求):
|
|
23
|
+
- 本地模型推荐 1.5B~3B(普通笔记本即可运行)
|
|
24
|
+
- 默认推荐:qwen2.5:1.5b(中英双语,~1GB 显存/内存)
|
|
25
|
+
- 环境变量 GITINSTALL_LLM_MODEL 可自定义模型名称
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
from __future__ import annotations
|
|
29
|
+
|
|
30
|
+
import json
|
|
31
|
+
import os
|
|
32
|
+
import re
|
|
33
|
+
import socket
|
|
34
|
+
import sys
|
|
35
|
+
import urllib.error
|
|
36
|
+
import urllib.request
|
|
37
|
+
|
|
38
|
+
from log import get_logger
|
|
39
|
+
from i18n import t
|
|
40
|
+
|
|
41
|
+
logger = get_logger(__name__)
|
|
42
|
+
from abc import ABC, abstractmethod
|
|
43
|
+
from typing import Optional
|
|
44
|
+
|
|
45
|
+
# LLM 请求超时(秒):超过此时间自动降级到规则模式
|
|
46
|
+
# 可通过环境变量 GITINSTALL_LLM_TIMEOUT 覆盖
|
|
47
|
+
LLM_TIMEOUT = int(os.getenv("GITINSTALL_LLM_TIMEOUT", "30"))
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
# ─────────────────────────────────────────────
|
|
51
|
+
# 抽象基类
|
|
52
|
+
# ─────────────────────────────────────────────
|
|
53
|
+
|
|
54
|
+
class BaseLLMProvider(ABC):
|
|
55
|
+
"""所有 LLM Provider 的统一接口"""
|
|
56
|
+
|
|
57
|
+
@abstractmethod
|
|
58
|
+
def complete(self, system: str, user: str, max_tokens: int = 2048) -> str:
|
|
59
|
+
"""发送对话请求,返回 AI 回复文本"""
|
|
60
|
+
|
|
61
|
+
@property
|
|
62
|
+
@abstractmethod
|
|
63
|
+
def name(self) -> str:
|
|
64
|
+
"""Provider 名称,用于日志和用户提示"""
|
|
65
|
+
|
|
66
|
+
def is_available(self) -> bool:
|
|
67
|
+
"""检查 Provider 是否可用(子类可覆写)"""
|
|
68
|
+
return True
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
# ─────────────────────────────────────────────
|
|
72
|
+
# 通用 OpenAI 兼容 Provider
|
|
73
|
+
# 支持:OpenAI / OpenRouter / Groq / DeepSeek /
|
|
74
|
+
# Gemini / LM Studio / Ollama / 任何 OpenAI 兼容 API
|
|
75
|
+
# ─────────────────────────────────────────────
|
|
76
|
+
|
|
77
|
+
class OpenAICompatibleProvider(BaseLLMProvider):
|
|
78
|
+
"""
|
|
79
|
+
适配所有 OpenAI Chat Completions 兼容接口。
|
|
80
|
+
只用 Python 标准库 urllib,无需安装任何第三方包。
|
|
81
|
+
"""
|
|
82
|
+
|
|
83
|
+
def __init__(self, api_key: str, base_url: str, model: str, provider_name: str):
|
|
84
|
+
self.api_key = api_key
|
|
85
|
+
self.base_url = base_url.rstrip("/")
|
|
86
|
+
self.model = model
|
|
87
|
+
self._name = provider_name
|
|
88
|
+
|
|
89
|
+
@property
|
|
90
|
+
def name(self) -> str:
|
|
91
|
+
return self._name
|
|
92
|
+
|
|
93
|
+
def complete(self, system: str, user: str, max_tokens: int = 2048) -> str:
|
|
94
|
+
payload = json.dumps({
|
|
95
|
+
"model": self.model,
|
|
96
|
+
"messages": [
|
|
97
|
+
{"role": "system", "content": system},
|
|
98
|
+
{"role": "user", "content": user},
|
|
99
|
+
],
|
|
100
|
+
"max_tokens": max_tokens,
|
|
101
|
+
"temperature": 0.1, # 安装任务要确定性,不要随机性
|
|
102
|
+
}).encode("utf-8")
|
|
103
|
+
|
|
104
|
+
req = urllib.request.Request(
|
|
105
|
+
f"{self.base_url}/chat/completions",
|
|
106
|
+
data=payload,
|
|
107
|
+
headers={
|
|
108
|
+
"Content-Type": "application/json",
|
|
109
|
+
"Authorization": f"Bearer {self.api_key}",
|
|
110
|
+
"User-Agent": "gitinstall/1.0",
|
|
111
|
+
},
|
|
112
|
+
method="POST",
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
try:
|
|
116
|
+
with urllib.request.urlopen(req, timeout=LLM_TIMEOUT) as resp:
|
|
117
|
+
data = json.loads(resp.read().decode("utf-8"))
|
|
118
|
+
return data["choices"][0]["message"]["content"]
|
|
119
|
+
except urllib.error.HTTPError as e:
|
|
120
|
+
body = e.read().decode("utf-8", errors="replace")
|
|
121
|
+
raise RuntimeError(f"{self._name} API 错误 {e.code}: {body[:300]}") from e
|
|
122
|
+
except (socket.timeout, TimeoutError):
|
|
123
|
+
raise RuntimeError(f"{self._name} 请求超时({LLM_TIMEOUT}秒),自动降级到规则模式")
|
|
124
|
+
except urllib.error.URLError as e:
|
|
125
|
+
if isinstance(e.reason, (socket.timeout, TimeoutError)):
|
|
126
|
+
raise RuntimeError(f"{self._name} 请求超时({LLM_TIMEOUT}秒),自动降级到规则模式")
|
|
127
|
+
raise RuntimeError(f"{self._name} 连接失败: {e.reason}") from e
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
# ─────────────────────────────────────────────
|
|
131
|
+
# Anthropic 原生 Provider(非 OpenAI 兼容格式)
|
|
132
|
+
# ─────────────────────────────────────────────
|
|
133
|
+
|
|
134
|
+
class AnthropicProvider(BaseLLMProvider):
|
|
135
|
+
"""Anthropic Messages API 原生接口"""
|
|
136
|
+
|
|
137
|
+
DEFAULT_MODEL = "claude-opus-4-5"
|
|
138
|
+
|
|
139
|
+
def __init__(self, api_key: str, model: Optional[str] = None):
|
|
140
|
+
self.api_key = api_key
|
|
141
|
+
self.model = model or self.DEFAULT_MODEL
|
|
142
|
+
|
|
143
|
+
@property
|
|
144
|
+
def name(self) -> str:
|
|
145
|
+
return f"Anthropic {self.model}"
|
|
146
|
+
|
|
147
|
+
def complete(self, system: str, user: str, max_tokens: int = 2048) -> str:
|
|
148
|
+
payload = json.dumps({
|
|
149
|
+
"model": self.model,
|
|
150
|
+
"max_tokens": max_tokens,
|
|
151
|
+
"system": system,
|
|
152
|
+
"messages": [{"role": "user", "content": user}],
|
|
153
|
+
}).encode("utf-8")
|
|
154
|
+
|
|
155
|
+
req = urllib.request.Request(
|
|
156
|
+
"https://api.anthropic.com/v1/messages",
|
|
157
|
+
data=payload,
|
|
158
|
+
headers={
|
|
159
|
+
"Content-Type": "application/json",
|
|
160
|
+
"x-api-key": self.api_key,
|
|
161
|
+
"anthropic-version": "2023-06-01",
|
|
162
|
+
"User-Agent": "gitinstall/1.0",
|
|
163
|
+
},
|
|
164
|
+
method="POST",
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
try:
|
|
168
|
+
with urllib.request.urlopen(req, timeout=LLM_TIMEOUT) as resp:
|
|
169
|
+
data = json.loads(resp.read().decode("utf-8"))
|
|
170
|
+
return data["content"][0]["text"]
|
|
171
|
+
except urllib.error.HTTPError as e:
|
|
172
|
+
body = e.read().decode("utf-8", errors="replace")
|
|
173
|
+
raise RuntimeError(f"Anthropic API 错误 {e.code}: {body[:300]}") from e
|
|
174
|
+
except (socket.timeout, TimeoutError):
|
|
175
|
+
raise RuntimeError(f"Anthropic 请求超时({LLM_TIMEOUT}秒),自动降级到规则模式")
|
|
176
|
+
except urllib.error.URLError as e:
|
|
177
|
+
if isinstance(e.reason, (socket.timeout, TimeoutError)):
|
|
178
|
+
raise RuntimeError(f"Anthropic 请求超时({LLM_TIMEOUT}秒),自动降级到规则模式")
|
|
179
|
+
raise RuntimeError(f"Anthropic 连接失败: {e.reason}") from e
|
|
180
|
+
|
|
181
|
+
|
|
182
|
+
# ─────────────────────────────────────────────
|
|
183
|
+
# 无 LLM 规则模式(永远可用)
|
|
184
|
+
# ─────────────────────────────────────────────
|
|
185
|
+
|
|
186
|
+
class HeuristicProvider(BaseLLMProvider):
|
|
187
|
+
"""
|
|
188
|
+
无需任何 API Key 或本地模型。
|
|
189
|
+
通过正则表达式和规则库解析 README,提取安装命令。
|
|
190
|
+
|
|
191
|
+
覆盖 90% 的主流开源项目(pip/npm/docker/brew/cargo 类型)。
|
|
192
|
+
"""
|
|
193
|
+
|
|
194
|
+
@property
|
|
195
|
+
def name(self) -> str:
|
|
196
|
+
return "规则引擎(无 LLM)"
|
|
197
|
+
|
|
198
|
+
def complete(self, system: str, user: str, max_tokens: int = 2048) -> str:
|
|
199
|
+
"""解析 prompt 中的 README 内容,提取安装步骤"""
|
|
200
|
+
return self._extract_install_plan(user)
|
|
201
|
+
|
|
202
|
+
def _extract_install_plan(self, content: str) -> str:
|
|
203
|
+
"""从 README 文本中提取安装命令"""
|
|
204
|
+
steps = []
|
|
205
|
+
|
|
206
|
+
# 提取所有代码块
|
|
207
|
+
code_blocks = re.findall(
|
|
208
|
+
r'```(?:bash|shell|sh|zsh|powershell|cmd|console|text)?\n(.*?)```',
|
|
209
|
+
content,
|
|
210
|
+
re.DOTALL | re.IGNORECASE,
|
|
211
|
+
)
|
|
212
|
+
|
|
213
|
+
# 按优先级排序的安装命令模式
|
|
214
|
+
patterns = [
|
|
215
|
+
# Git 克隆(几乎所有项目的第一步)
|
|
216
|
+
(r'git\s+clone\s+(?:--depth[= ]\S+\s+)?(?:https?://|git@)\S+', "git_clone"),
|
|
217
|
+
# Python 安装
|
|
218
|
+
(r'pip(?:3)?\s+install[^\n]+', "pip"),
|
|
219
|
+
(r'pip(?:3)?\s+install\s+-r\s+requirements[^\n]*\.txt', "pip_req"),
|
|
220
|
+
(r'conda\s+(?:install|env\s+create)[^\n]+', "conda"),
|
|
221
|
+
(r'python(?:3)?\s+setup\.py\s+install', "setup_py"),
|
|
222
|
+
(r'python(?:3)?\s+-m\s+pip\s+install[^\n]+', "pip_m"),
|
|
223
|
+
# Node.js
|
|
224
|
+
(r'npm\s+install[^\n]*', "npm"),
|
|
225
|
+
(r'pnpm\s+install[^\n]*', "pnpm"),
|
|
226
|
+
(r'yarn(?:\s+install)?[^\n]*', "yarn"),
|
|
227
|
+
# 系统包管理器
|
|
228
|
+
(r'brew\s+install[^\n]+', "brew"),
|
|
229
|
+
(r'apt(?:-get)?\s+install[^\n]+', "apt"),
|
|
230
|
+
(r'dnf\s+install[^\n]+', "dnf"),
|
|
231
|
+
(r'pacman\s+-S[^\n]+', "pacman"),
|
|
232
|
+
(r'winget\s+install[^\n]+', "winget"),
|
|
233
|
+
(r'choco\s+install[^\n]+', "choco"),
|
|
234
|
+
# 其他语言
|
|
235
|
+
(r'cargo\s+install[^\n]+', "cargo"),
|
|
236
|
+
(r'go\s+install[^\n]+', "go"),
|
|
237
|
+
# Docker
|
|
238
|
+
(r'docker\s+(?:pull|run)[^\n]+', "docker"),
|
|
239
|
+
(r'docker-compose\s+up[^\n]*', "docker_compose"),
|
|
240
|
+
# 安装脚本
|
|
241
|
+
(r'curl\s+[^\n]+\s*\|[^\n]+(?:bash|sh)', "curl_pipe"),
|
|
242
|
+
(r'bash\s+(?:install|setup)\.sh[^\n]*', "bash_script"),
|
|
243
|
+
(r'make(?:\s+install)?[^\n]*', "make"),
|
|
244
|
+
]
|
|
245
|
+
|
|
246
|
+
seen = set()
|
|
247
|
+
for block in code_blocks:
|
|
248
|
+
for pattern, kind in patterns:
|
|
249
|
+
for match in re.finditer(pattern, block, re.IGNORECASE):
|
|
250
|
+
cmd = match.group(0).strip()
|
|
251
|
+
# 安全过滤
|
|
252
|
+
if self._is_dangerous(cmd):
|
|
253
|
+
continue
|
|
254
|
+
if cmd not in seen:
|
|
255
|
+
seen.add(cmd)
|
|
256
|
+
steps.append({
|
|
257
|
+
"command": cmd,
|
|
258
|
+
"type": kind,
|
|
259
|
+
"description": self._describe(kind),
|
|
260
|
+
})
|
|
261
|
+
|
|
262
|
+
if not steps:
|
|
263
|
+
return json.dumps({
|
|
264
|
+
"mode": "heuristic",
|
|
265
|
+
"status": "insufficient_data",
|
|
266
|
+
"steps": [],
|
|
267
|
+
"message": (
|
|
268
|
+
"规则模式未能从 README 提取到安装命令。\n"
|
|
269
|
+
"建议:配置任意 LLM(哪怕是免费的 Groq)以获得更好效果,\n"
|
|
270
|
+
"或手动查阅项目 README。"
|
|
271
|
+
),
|
|
272
|
+
}, ensure_ascii=False, indent=2)
|
|
273
|
+
|
|
274
|
+
return json.dumps({
|
|
275
|
+
"mode": "heuristic",
|
|
276
|
+
"status": "ok",
|
|
277
|
+
"steps": steps,
|
|
278
|
+
"warning": "规则模式,建议人工确认步骤后再执行",
|
|
279
|
+
}, ensure_ascii=False, indent=2)
|
|
280
|
+
|
|
281
|
+
@staticmethod
|
|
282
|
+
def _is_dangerous(cmd: str) -> bool:
|
|
283
|
+
"""过滤危险命令"""
|
|
284
|
+
dangerous = [
|
|
285
|
+
r'rm\s+-rf\s+/',
|
|
286
|
+
r'rm\s+-rf\s+~',
|
|
287
|
+
r':\(\)\{', # fork bomb
|
|
288
|
+
r'format\s+[cC]:',
|
|
289
|
+
r'mkfs\.',
|
|
290
|
+
r'dd\s+if=',
|
|
291
|
+
r'wget[^\n]+\|\s*(?:sudo\s+)?(?:bash|sh)',
|
|
292
|
+
]
|
|
293
|
+
return any(re.search(p, cmd, re.IGNORECASE) for p in dangerous)
|
|
294
|
+
|
|
295
|
+
@staticmethod
|
|
296
|
+
def _describe(kind: str) -> str:
|
|
297
|
+
descriptions = {
|
|
298
|
+
"git_clone": "克隆代码仓库",
|
|
299
|
+
"pip": "安装 Python 包",
|
|
300
|
+
"pip_req": "安装 Python 依赖(requirements.txt)",
|
|
301
|
+
"pip_m": "安装 Python 包(pip module 方式)",
|
|
302
|
+
"conda": "安装 Conda 包",
|
|
303
|
+
"setup_py": "编译安装 Python 包",
|
|
304
|
+
"npm": "安装 Node.js 包",
|
|
305
|
+
"pnpm": "安装 Node.js 包(pnpm)",
|
|
306
|
+
"yarn": "安装 Node.js 包(yarn)",
|
|
307
|
+
"brew": "通过 Homebrew 安装(macOS)",
|
|
308
|
+
"apt": "通过 apt 安装(Debian/Ubuntu)",
|
|
309
|
+
"dnf": "通过 dnf 安装(Fedora/RHEL)",
|
|
310
|
+
"pacman": "通过 pacman 安装(Arch Linux)",
|
|
311
|
+
"winget": "通过 winget 安装(Windows)",
|
|
312
|
+
"choco": "通过 Chocolatey 安装(Windows)",
|
|
313
|
+
"cargo": "通过 Cargo 安装(Rust)",
|
|
314
|
+
"go": "通过 go install 安装",
|
|
315
|
+
"docker": "通过 Docker 运行",
|
|
316
|
+
"docker_compose": "通过 Docker Compose 启动",
|
|
317
|
+
"curl_pipe": "下载并执行安装脚本",
|
|
318
|
+
"bash_script": "执行安装脚本",
|
|
319
|
+
"make": "编译安装",
|
|
320
|
+
}
|
|
321
|
+
return descriptions.get(kind, "执行安装命令")
|
|
322
|
+
|
|
323
|
+
|
|
324
|
+
# ─────────────────────────────────────────────
|
|
325
|
+
# 自动检测 + 工厂函数
|
|
326
|
+
# ─────────────────────────────────────────────
|
|
327
|
+
|
|
328
|
+
def _is_port_open(host: str, port: int, timeout: float = 1.0) -> bool:
|
|
329
|
+
"""检查本地端口是否在监听"""
|
|
330
|
+
try:
|
|
331
|
+
with socket.create_connection((host, port), timeout=timeout):
|
|
332
|
+
return True
|
|
333
|
+
except (socket.timeout, ConnectionRefusedError, OSError):
|
|
334
|
+
return False
|
|
335
|
+
|
|
336
|
+
|
|
337
|
+
# 用户可通过环境变量指定本地模型,方便普通用户自定义
|
|
338
|
+
# 例:export GITINSTALL_LLM_MODEL=qwen2.5:1.5b
|
|
339
|
+
_DEFAULT_SMALL_MODEL = "qwen2.5:1.5b" # ~1GB,普通笔记本可跑
|
|
340
|
+
_DEFAULT_MEDIUM_MODEL = "qwen2.5:3b" # ~2GB,推荐有独显用户
|
|
341
|
+
|
|
342
|
+
# 优先推荐的小模型列表(按质量/大小权衡排序)
|
|
343
|
+
# 如果 Ollama 里安装了这些模型之一,优先使用最小的
|
|
344
|
+
_PREFERRED_SMALL_MODELS = [
|
|
345
|
+
"qwen2.5:1.5b", # 1.5B,中英双语最佳(推荐大众用户)
|
|
346
|
+
"deepseek-r1:1.5b", # 1.5B,有推理链
|
|
347
|
+
"smollm2:1.7b", # 1.7B,英文为主
|
|
348
|
+
"qwen2.5-coder:1.5b", # 1.5B,代码理解强(已有 base 版本)
|
|
349
|
+
"gemma3:1b", # 1B,超小
|
|
350
|
+
"qwen2.5:3b", # 3B,质量更好
|
|
351
|
+
"llama3.2:3b", # 3B,Meta 官方
|
|
352
|
+
"llama3.2:1b", # 1B,Meta 官方最小
|
|
353
|
+
]
|
|
354
|
+
|
|
355
|
+
|
|
356
|
+
def _get_local_model(base_url: str, endpoint: str = "/v1/models",
|
|
357
|
+
fallback: str = _DEFAULT_SMALL_MODEL) -> str:
|
|
358
|
+
"""
|
|
359
|
+
获取本地运行的模型名称。
|
|
360
|
+
|
|
361
|
+
优先级:
|
|
362
|
+
1. 环境变量 GITINSTALL_LLM_MODEL(用户显式指定)
|
|
363
|
+
2. 本地已安装的小模型(_PREFERRED_SMALL_MODELS 顺序)
|
|
364
|
+
3. 本地第一个可用模型
|
|
365
|
+
4. fallback 默认值
|
|
366
|
+
"""
|
|
367
|
+
# 用户显式指定模型(最高优先级)
|
|
368
|
+
user_model = os.getenv("GITINSTALL_LLM_MODEL", "").strip()
|
|
369
|
+
if user_model:
|
|
370
|
+
return user_model
|
|
371
|
+
|
|
372
|
+
try:
|
|
373
|
+
req = urllib.request.Request(
|
|
374
|
+
f"{base_url}{endpoint}",
|
|
375
|
+
headers={"Authorization": "Bearer local"},
|
|
376
|
+
)
|
|
377
|
+
with urllib.request.urlopen(req, timeout=3) as resp:
|
|
378
|
+
data = json.loads(resp.read())
|
|
379
|
+
models = data.get("data", [])
|
|
380
|
+
if not models:
|
|
381
|
+
return fallback
|
|
382
|
+
installed_ids = [m.get("id", "") for m in models]
|
|
383
|
+
# 优先选用已知小模型
|
|
384
|
+
for preferred in _PREFERRED_SMALL_MODELS:
|
|
385
|
+
for installed in installed_ids:
|
|
386
|
+
if preferred in installed or installed.startswith(preferred.split(":")[0] + ":1"):
|
|
387
|
+
return installed
|
|
388
|
+
# 否则返回第一个
|
|
389
|
+
return installed_ids[0]
|
|
390
|
+
except Exception:
|
|
391
|
+
pass
|
|
392
|
+
return fallback
|
|
393
|
+
|
|
394
|
+
|
|
395
|
+
def create_provider(force: Optional[str] = None) -> BaseLLMProvider:
|
|
396
|
+
"""
|
|
397
|
+
自动检测并创建最优可用的 LLM Provider。
|
|
398
|
+
|
|
399
|
+
Args:
|
|
400
|
+
force: 强制指定 Provider,可选值:
|
|
401
|
+
"anthropic" | "openai" | "openrouter" | "gemini" |
|
|
402
|
+
"groq" | "deepseek" | "lmstudio" | "ollama" | "none"
|
|
403
|
+
None 表示自动检测
|
|
404
|
+
|
|
405
|
+
Returns:
|
|
406
|
+
可用的 LLM Provider 实例,永远不会返回 None
|
|
407
|
+
"""
|
|
408
|
+
|
|
409
|
+
# ── 强制指定模式 ──
|
|
410
|
+
if force == "none":
|
|
411
|
+
logger.info(t("llm.using_heuristic"))
|
|
412
|
+
return HeuristicProvider()
|
|
413
|
+
|
|
414
|
+
if force == "lmstudio":
|
|
415
|
+
model = _get_local_model("http://localhost:1234", fallback=_DEFAULT_SMALL_MODEL)
|
|
416
|
+
logger.info(t("llm.using_with_model", name="LM Studio", model=model))
|
|
417
|
+
return OpenAICompatibleProvider("lm-studio", "http://localhost:1234/v1", model, "LM Studio")
|
|
418
|
+
|
|
419
|
+
if force == "ollama":
|
|
420
|
+
model = _get_local_model("http://localhost:11434", fallback=_DEFAULT_SMALL_MODEL)
|
|
421
|
+
logger.info(t("llm.using_with_model", name="Ollama", model=model))
|
|
422
|
+
logger.info(t("llm.ollama_hint", model=model))
|
|
423
|
+
return OpenAICompatibleProvider("ollama", "http://localhost:11434/v1", model, f"Ollama ({model})")
|
|
424
|
+
|
|
425
|
+
# ── 自动检测(按质量/成本优先级排序)──
|
|
426
|
+
# 格式:(环境变量名, Provider 构造函数)
|
|
427
|
+
cloud_providers = [
|
|
428
|
+
(
|
|
429
|
+
"ANTHROPIC_API_KEY",
|
|
430
|
+
lambda k: AnthropicProvider(k),
|
|
431
|
+
),
|
|
432
|
+
(
|
|
433
|
+
"OPENAI_API_KEY",
|
|
434
|
+
lambda k: OpenAICompatibleProvider(k, "https://api.openai.com/v1", "gpt-4o", "OpenAI GPT-4o"),
|
|
435
|
+
),
|
|
436
|
+
(
|
|
437
|
+
"OPENROUTER_API_KEY",
|
|
438
|
+
lambda k: OpenAICompatibleProvider(k, "https://openrouter.ai/api/v1", "anthropic/claude-opus-4-5", "OpenRouter"),
|
|
439
|
+
),
|
|
440
|
+
(
|
|
441
|
+
"GEMINI_API_KEY",
|
|
442
|
+
lambda k: OpenAICompatibleProvider(
|
|
443
|
+
k,
|
|
444
|
+
"https://generativelanguage.googleapis.com/v1beta/openai",
|
|
445
|
+
"gemini-2.0-flash",
|
|
446
|
+
"Google Gemini",
|
|
447
|
+
),
|
|
448
|
+
),
|
|
449
|
+
(
|
|
450
|
+
"GROQ_API_KEY",
|
|
451
|
+
lambda k: OpenAICompatibleProvider(k, "https://api.groq.com/openai/v1", "llama-3.3-70b-versatile", "Groq Llama"),
|
|
452
|
+
),
|
|
453
|
+
(
|
|
454
|
+
"DEEPSEEK_API_KEY",
|
|
455
|
+
lambda k: OpenAICompatibleProvider(k, "https://api.deepseek.com/v1", "deepseek-chat", "DeepSeek"),
|
|
456
|
+
),
|
|
457
|
+
# ── 中国 LLM 提供商 ──
|
|
458
|
+
(
|
|
459
|
+
"DASHSCOPE_API_KEY",
|
|
460
|
+
lambda k: OpenAICompatibleProvider(
|
|
461
|
+
k, "https://dashscope.aliyuncs.com/compatible-mode/v1",
|
|
462
|
+
"qwen-plus", "通义千问 Qwen",
|
|
463
|
+
),
|
|
464
|
+
),
|
|
465
|
+
(
|
|
466
|
+
"ZHIPU_API_KEY",
|
|
467
|
+
lambda k: OpenAICompatibleProvider(
|
|
468
|
+
k, "https://open.bigmodel.cn/api/paas/v4",
|
|
469
|
+
"glm-4-flash", "智谱 GLM",
|
|
470
|
+
),
|
|
471
|
+
),
|
|
472
|
+
(
|
|
473
|
+
"MOONSHOT_API_KEY",
|
|
474
|
+
lambda k: OpenAICompatibleProvider(
|
|
475
|
+
k, "https://api.moonshot.cn/v1",
|
|
476
|
+
"moonshot-v1-8k", "月之暗面 Kimi",
|
|
477
|
+
),
|
|
478
|
+
),
|
|
479
|
+
(
|
|
480
|
+
"BAICHUAN_API_KEY",
|
|
481
|
+
lambda k: OpenAICompatibleProvider(
|
|
482
|
+
k, "https://api.baichuan-ai.com/v1",
|
|
483
|
+
"Baichuan4", "百川智能",
|
|
484
|
+
),
|
|
485
|
+
),
|
|
486
|
+
(
|
|
487
|
+
"YI_API_KEY",
|
|
488
|
+
lambda k: OpenAICompatibleProvider(
|
|
489
|
+
k, "https://api.lingyiwanwu.com/v1",
|
|
490
|
+
"yi-lightning", "零一万物 Yi",
|
|
491
|
+
),
|
|
492
|
+
),
|
|
493
|
+
(
|
|
494
|
+
"STEPFUN_API_KEY",
|
|
495
|
+
lambda k: OpenAICompatibleProvider(
|
|
496
|
+
k, "https://api.stepfun.com/v1",
|
|
497
|
+
"step-2-16k", "阶跃星辰 Step",
|
|
498
|
+
),
|
|
499
|
+
),
|
|
500
|
+
]
|
|
501
|
+
|
|
502
|
+
# 检查环境变量,支持 force 指定特定 Provider
|
|
503
|
+
for env_var, factory in cloud_providers:
|
|
504
|
+
provider_name = env_var.replace("_API_KEY", "").lower()
|
|
505
|
+
if force and force != provider_name:
|
|
506
|
+
continue
|
|
507
|
+
key = os.getenv(env_var, "").strip()
|
|
508
|
+
if key:
|
|
509
|
+
provider = factory(key)
|
|
510
|
+
logger.info(t("llm.using_named", name=provider.name))
|
|
511
|
+
return provider
|
|
512
|
+
|
|
513
|
+
# 检查本地服务
|
|
514
|
+
if not force or force == "lmstudio":
|
|
515
|
+
if _is_port_open("localhost", 1234):
|
|
516
|
+
model = _get_local_model("http://localhost:1234", fallback="local-model")
|
|
517
|
+
logger.info(t("llm.detected_local", name="LM Studio", model=model))
|
|
518
|
+
return OpenAICompatibleProvider("lm-studio", "http://localhost:1234/v1", model, "LM Studio")
|
|
519
|
+
|
|
520
|
+
if not force or force == "ollama":
|
|
521
|
+
if _is_port_open("localhost", 11434):
|
|
522
|
+
model = _get_local_model("http://localhost:11434", fallback=_DEFAULT_SMALL_MODEL)
|
|
523
|
+
logger.info(t("llm.detected_local", name="Ollama", model=model))
|
|
524
|
+
return OpenAICompatibleProvider("ollama", "http://localhost:11434/v1", model, f"Ollama ({model})")
|
|
525
|
+
|
|
526
|
+
# 最终兜底:规则模式
|
|
527
|
+
logger.warning(t("llm.no_provider"))
|
|
528
|
+
logger.info(t("llm.hint_ollama"))
|
|
529
|
+
logger.info(t("llm.hint_groq"))
|
|
530
|
+
return HeuristicProvider()
|
|
531
|
+
|
|
532
|
+
|
|
533
|
+
# ─────────────────────────────────────────────
|
|
534
|
+
# 标准化 System Prompt(供各模块复用)
|
|
535
|
+
# ─────────────────────────────────────────────
|
|
536
|
+
|
|
537
|
+
# System Prompt 有两个版本:
|
|
538
|
+
# - INSTALL_SYSTEM_PROMPT 完整版(大模型/云端 API 使用)
|
|
539
|
+
# - INSTALL_SYSTEM_PROMPT_SMALL 精简版(1.5B~3B 本地小模型使用,避免 context 超限)
|
|
540
|
+
|
|
541
|
+
INSTALL_SYSTEM_PROMPT = """\
|
|
542
|
+
你是开源软件安装专家。根据用户提供的项目信息和环境,输出安装步骤(纯 JSON,无代码块)。
|
|
543
|
+
|
|
544
|
+
JSON 格式:
|
|
545
|
+
{"project_name":"名","steps":[{"id":1,"description":"说明","command":"命令"}],"launch_command":"启动命令","notes":"注意"}
|
|
546
|
+
|
|
547
|
+
规则:Python项目用venv;Apple Silicon用pip install torch(无--index-url);NVIDIA CUDA12用--index-url .../cu121;只输出JSON。
|
|
548
|
+
"""
|
|
549
|
+
|
|
550
|
+
# 精简版 prompt,适配 1.5B~3B 本地小模型
|
|
551
|
+
# 关键:越短越好,指令越清楚越好,JSON schema 越简单越好
|
|
552
|
+
INSTALL_SYSTEM_PROMPT_SMALL = """\
|
|
553
|
+
Output JSON only. No explanation. Format:
|
|
554
|
+
{"steps":[{"description":"step","command":"shell cmd"}],"launch_command":"start cmd"}
|
|
555
|
+
Rules: use venv for Python; Apple MPS: pip install torch (no --index-url); CUDA12: add --index-url https://download.pytorch.org/whl/cu121
|
|
556
|
+
"""
|
|
557
|
+
|
|
558
|
+
ERROR_FIX_SYSTEM_PROMPT = """\
|
|
559
|
+
你是一个开源软件安装报错修复专家。
|
|
560
|
+
用户在安装 GitHub 项目时遇到了报错,请分析报错原因并提供修复方案。
|
|
561
|
+
|
|
562
|
+
输出 JSON 格式:
|
|
563
|
+
{
|
|
564
|
+
"root_cause": "报错根本原因(一句话)",
|
|
565
|
+
"fix_commands": ["修复命令1", "修复命令2"],
|
|
566
|
+
"explanation": "详细解释",
|
|
567
|
+
"prevention": "如何避免再次出现"
|
|
568
|
+
}
|
|
569
|
+
"""
|