gitinstall 1.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gitinstall/__init__.py +61 -0
- gitinstall/_sdk.py +541 -0
- gitinstall/academic.py +831 -0
- gitinstall/admin.html +327 -0
- gitinstall/auto_update.py +384 -0
- gitinstall/autopilot.py +349 -0
- gitinstall/badge.py +476 -0
- gitinstall/checkpoint.py +330 -0
- gitinstall/cicd.py +499 -0
- gitinstall/clawhub.html +718 -0
- gitinstall/config_schema.py +353 -0
- gitinstall/db.py +984 -0
- gitinstall/db_backend.py +445 -0
- gitinstall/dep_chain.py +337 -0
- gitinstall/dependency_audit.py +1153 -0
- gitinstall/detector.py +542 -0
- gitinstall/doctor.py +493 -0
- gitinstall/education.py +869 -0
- gitinstall/enterprise.py +802 -0
- gitinstall/error_fixer.py +953 -0
- gitinstall/event_bus.py +251 -0
- gitinstall/executor.py +577 -0
- gitinstall/feature_flags.py +138 -0
- gitinstall/fetcher.py +921 -0
- gitinstall/huggingface.py +922 -0
- gitinstall/hw_detect.py +988 -0
- gitinstall/i18n.py +664 -0
- gitinstall/installer_registry.py +362 -0
- gitinstall/knowledge_base.py +379 -0
- gitinstall/license_check.py +605 -0
- gitinstall/llm.py +569 -0
- gitinstall/log.py +236 -0
- gitinstall/main.py +1408 -0
- gitinstall/mcp_agent.py +841 -0
- gitinstall/mcp_server.py +386 -0
- gitinstall/monorepo.py +810 -0
- gitinstall/multi_source.py +425 -0
- gitinstall/onboard.py +276 -0
- gitinstall/planner.py +222 -0
- gitinstall/planner_helpers.py +323 -0
- gitinstall/planner_known_projects.py +1010 -0
- gitinstall/planner_templates.py +996 -0
- gitinstall/remote_gpu.py +633 -0
- gitinstall/resilience.py +608 -0
- gitinstall/run_tests.py +572 -0
- gitinstall/skills.py +476 -0
- gitinstall/tool_schemas.py +324 -0
- gitinstall/trending.py +279 -0
- gitinstall/uninstaller.py +415 -0
- gitinstall/validate_top100.py +607 -0
- gitinstall/watchdog.py +180 -0
- gitinstall/web.py +1277 -0
- gitinstall/web_ui.html +2277 -0
- gitinstall-1.1.0.dist-info/METADATA +275 -0
- gitinstall-1.1.0.dist-info/RECORD +59 -0
- gitinstall-1.1.0.dist-info/WHEEL +5 -0
- gitinstall-1.1.0.dist-info/entry_points.txt +3 -0
- gitinstall-1.1.0.dist-info/licenses/LICENSE +21 -0
- gitinstall-1.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1010 @@
|
|
|
1
|
+
"""
|
|
2
|
+
planner_known_projects.py - 已知热门项目安装数据库
|
|
3
|
+
=====================================================
|
|
4
|
+
|
|
5
|
+
从 planner.py 拆分出来的已知项目数据库。
|
|
6
|
+
新增项目时只需编辑此文件,不影响核心逻辑。
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
# ─────────────────────────────────────────────
|
|
12
|
+
# AI 项目硬件需求数据库(技术壁垒核心数据)
|
|
13
|
+
#
|
|
14
|
+
# 每个 AI 项目标注:
|
|
15
|
+
# category → 项目类别
|
|
16
|
+
# min_vram_gb → 最低 GPU 显存需求
|
|
17
|
+
# rec_vram_gb → 推荐 GPU 显存
|
|
18
|
+
# gpu_required → 是否必须 GPU
|
|
19
|
+
# gpu_backends → 支持的 GPU 后端列表
|
|
20
|
+
# disk_gb → 预估磁盘空间需求
|
|
21
|
+
# ram_gb → 最低内存需求
|
|
22
|
+
# ─────────────────────────────────────────────
|
|
23
|
+
|
|
24
|
+
_AI_HARDWARE_REQS: dict[str, dict] = {
|
|
25
|
+
"ollama/ollama": {
|
|
26
|
+
"category": "llm_inference",
|
|
27
|
+
"min_vram_gb": 0, # CPU 可跑小模型
|
|
28
|
+
"rec_vram_gb": 8,
|
|
29
|
+
"gpu_required": False,
|
|
30
|
+
"gpu_backends": ["cuda", "mps", "rocm", "cpu"],
|
|
31
|
+
"disk_gb": 5, # 基础安装
|
|
32
|
+
"ram_gb": 8,
|
|
33
|
+
},
|
|
34
|
+
"ggerganov/llama.cpp": {
|
|
35
|
+
"category": "llm_inference",
|
|
36
|
+
"min_vram_gb": 0,
|
|
37
|
+
"rec_vram_gb": 8,
|
|
38
|
+
"gpu_required": False,
|
|
39
|
+
"gpu_backends": ["cuda", "metal", "rocm", "vulkan", "cpu"],
|
|
40
|
+
"disk_gb": 2,
|
|
41
|
+
"ram_gb": 8,
|
|
42
|
+
},
|
|
43
|
+
"comfyanonymous/comfyui": {
|
|
44
|
+
"category": "image_gen",
|
|
45
|
+
"min_vram_gb": 4,
|
|
46
|
+
"rec_vram_gb": 8,
|
|
47
|
+
"gpu_required": True,
|
|
48
|
+
"gpu_backends": ["cuda", "mps", "rocm"],
|
|
49
|
+
"disk_gb": 15,
|
|
50
|
+
"ram_gb": 16,
|
|
51
|
+
},
|
|
52
|
+
"automatic1111/stable-diffusion-webui": {
|
|
53
|
+
"category": "image_gen",
|
|
54
|
+
"min_vram_gb": 4,
|
|
55
|
+
"rec_vram_gb": 8,
|
|
56
|
+
"gpu_required": True,
|
|
57
|
+
"gpu_backends": ["cuda", "mps"],
|
|
58
|
+
"disk_gb": 15,
|
|
59
|
+
"ram_gb": 16,
|
|
60
|
+
},
|
|
61
|
+
"lllyasviel/stable-diffusion-webui-forge": {
|
|
62
|
+
"category": "image_gen",
|
|
63
|
+
"min_vram_gb": 4,
|
|
64
|
+
"rec_vram_gb": 8,
|
|
65
|
+
"gpu_required": True,
|
|
66
|
+
"gpu_backends": ["cuda", "mps"],
|
|
67
|
+
"disk_gb": 15,
|
|
68
|
+
"ram_gb": 16,
|
|
69
|
+
},
|
|
70
|
+
"open-webui/open-webui": {
|
|
71
|
+
"category": "llm_ui",
|
|
72
|
+
"min_vram_gb": 0,
|
|
73
|
+
"rec_vram_gb": 0,
|
|
74
|
+
"gpu_required": False,
|
|
75
|
+
"gpu_backends": ["cpu"], # UI 本身不需要 GPU
|
|
76
|
+
"disk_gb": 2,
|
|
77
|
+
"ram_gb": 4,
|
|
78
|
+
},
|
|
79
|
+
"oobabooga/text-generation-webui": {
|
|
80
|
+
"category": "llm_ui",
|
|
81
|
+
"min_vram_gb": 4,
|
|
82
|
+
"rec_vram_gb": 8,
|
|
83
|
+
"gpu_required": False,
|
|
84
|
+
"gpu_backends": ["cuda", "mps", "rocm", "cpu"],
|
|
85
|
+
"disk_gb": 10,
|
|
86
|
+
"ram_gb": 16,
|
|
87
|
+
},
|
|
88
|
+
"lobehub/lobe-chat": {
|
|
89
|
+
"category": "llm_ui",
|
|
90
|
+
"min_vram_gb": 0,
|
|
91
|
+
"rec_vram_gb": 0,
|
|
92
|
+
"gpu_required": False,
|
|
93
|
+
"gpu_backends": ["cpu"],
|
|
94
|
+
"disk_gb": 2,
|
|
95
|
+
"ram_gb": 4,
|
|
96
|
+
},
|
|
97
|
+
"nomic-ai/gpt4all": {
|
|
98
|
+
"category": "llm_inference",
|
|
99
|
+
"min_vram_gb": 0,
|
|
100
|
+
"rec_vram_gb": 8,
|
|
101
|
+
"gpu_required": False,
|
|
102
|
+
"gpu_backends": ["cuda", "metal", "cpu"],
|
|
103
|
+
"disk_gb": 5,
|
|
104
|
+
"ram_gb": 8,
|
|
105
|
+
},
|
|
106
|
+
"mudler/localai": {
|
|
107
|
+
"category": "llm_inference",
|
|
108
|
+
"min_vram_gb": 0,
|
|
109
|
+
"rec_vram_gb": 8,
|
|
110
|
+
"gpu_required": False,
|
|
111
|
+
"gpu_backends": ["cuda", "metal", "cpu"],
|
|
112
|
+
"disk_gb": 5,
|
|
113
|
+
"ram_gb": 8,
|
|
114
|
+
},
|
|
115
|
+
"hiyouga/llama-factory": {
|
|
116
|
+
"category": "llm_training",
|
|
117
|
+
"min_vram_gb": 8,
|
|
118
|
+
"rec_vram_gb": 24,
|
|
119
|
+
"gpu_required": True,
|
|
120
|
+
"gpu_backends": ["cuda"],
|
|
121
|
+
"disk_gb": 30,
|
|
122
|
+
"ram_gb": 32,
|
|
123
|
+
},
|
|
124
|
+
"vllm-project/vllm": {
|
|
125
|
+
"category": "llm_inference",
|
|
126
|
+
"min_vram_gb": 8,
|
|
127
|
+
"rec_vram_gb": 24,
|
|
128
|
+
"gpu_required": True,
|
|
129
|
+
"gpu_backends": ["cuda", "rocm"],
|
|
130
|
+
"disk_gb": 10,
|
|
131
|
+
"ram_gb": 16,
|
|
132
|
+
},
|
|
133
|
+
"lm-sys/fastchat": {
|
|
134
|
+
"category": "llm_inference",
|
|
135
|
+
"min_vram_gb": 8,
|
|
136
|
+
"rec_vram_gb": 16,
|
|
137
|
+
"gpu_required": False,
|
|
138
|
+
"gpu_backends": ["cuda", "mps", "cpu"],
|
|
139
|
+
"disk_gb": 5,
|
|
140
|
+
"ram_gb": 16,
|
|
141
|
+
},
|
|
142
|
+
"mckaywrigley/chatbot-ui": {
|
|
143
|
+
"category": "llm_ui",
|
|
144
|
+
"min_vram_gb": 0,
|
|
145
|
+
"rec_vram_gb": 0,
|
|
146
|
+
"gpu_required": False,
|
|
147
|
+
"gpu_backends": ["cpu"],
|
|
148
|
+
"disk_gb": 1,
|
|
149
|
+
"ram_gb": 4,
|
|
150
|
+
},
|
|
151
|
+
"microsoft/autogen": {
|
|
152
|
+
"category": "ai_agent",
|
|
153
|
+
"min_vram_gb": 0,
|
|
154
|
+
"rec_vram_gb": 0,
|
|
155
|
+
"gpu_required": False,
|
|
156
|
+
"gpu_backends": ["cpu"],
|
|
157
|
+
"disk_gb": 2,
|
|
158
|
+
"ram_gb": 8,
|
|
159
|
+
},
|
|
160
|
+
"zhayujie/chatgpt-on-wechat": {
|
|
161
|
+
"category": "llm_app",
|
|
162
|
+
"min_vram_gb": 0,
|
|
163
|
+
"rec_vram_gb": 0,
|
|
164
|
+
"gpu_required": False,
|
|
165
|
+
"gpu_backends": ["cpu"],
|
|
166
|
+
"disk_gb": 1,
|
|
167
|
+
"ram_gb": 4,
|
|
168
|
+
},
|
|
169
|
+
"continuedev/continue": {
|
|
170
|
+
"category": "ai_dev_tool",
|
|
171
|
+
"min_vram_gb": 0,
|
|
172
|
+
"rec_vram_gb": 0,
|
|
173
|
+
"gpu_required": False,
|
|
174
|
+
"gpu_backends": ["cpu"],
|
|
175
|
+
"disk_gb": 1,
|
|
176
|
+
"ram_gb": 4,
|
|
177
|
+
},
|
|
178
|
+
"invoke-ai/invokeai": {
|
|
179
|
+
"category": "image_gen",
|
|
180
|
+
"min_vram_gb": 4,
|
|
181
|
+
"rec_vram_gb": 8,
|
|
182
|
+
"gpu_required": True,
|
|
183
|
+
"gpu_backends": ["cuda", "mps"],
|
|
184
|
+
"disk_gb": 15,
|
|
185
|
+
"ram_gb": 16,
|
|
186
|
+
},
|
|
187
|
+
"bmaltais/kohya_ss": {
|
|
188
|
+
"category": "image_training",
|
|
189
|
+
"min_vram_gb": 8,
|
|
190
|
+
"rec_vram_gb": 12,
|
|
191
|
+
"gpu_required": True,
|
|
192
|
+
"gpu_backends": ["cuda"],
|
|
193
|
+
"disk_gb": 20,
|
|
194
|
+
"ram_gb": 16,
|
|
195
|
+
},
|
|
196
|
+
"huggingface/transformers": {
|
|
197
|
+
"category": "ml_framework",
|
|
198
|
+
"min_vram_gb": 0,
|
|
199
|
+
"rec_vram_gb": 8,
|
|
200
|
+
"gpu_required": False,
|
|
201
|
+
"gpu_backends": ["cuda", "mps", "rocm", "cpu"],
|
|
202
|
+
"disk_gb": 5,
|
|
203
|
+
"ram_gb": 8,
|
|
204
|
+
},
|
|
205
|
+
"ultralytics/ultralytics": {
|
|
206
|
+
"category": "cv_inference",
|
|
207
|
+
"min_vram_gb": 2,
|
|
208
|
+
"rec_vram_gb": 8,
|
|
209
|
+
"gpu_required": False,
|
|
210
|
+
"gpu_backends": ["cuda", "mps", "cpu"],
|
|
211
|
+
"disk_gb": 3,
|
|
212
|
+
"ram_gb": 8,
|
|
213
|
+
},
|
|
214
|
+
"facebookresearch/detectron2": {
|
|
215
|
+
"category": "cv_training",
|
|
216
|
+
"min_vram_gb": 4,
|
|
217
|
+
"rec_vram_gb": 8,
|
|
218
|
+
"gpu_required": True,
|
|
219
|
+
"gpu_backends": ["cuda"],
|
|
220
|
+
"disk_gb": 10,
|
|
221
|
+
"ram_gb": 16,
|
|
222
|
+
},
|
|
223
|
+
"gradio-app/gradio": {
|
|
224
|
+
"category": "ml_framework",
|
|
225
|
+
"min_vram_gb": 0,
|
|
226
|
+
"rec_vram_gb": 0,
|
|
227
|
+
"gpu_required": False,
|
|
228
|
+
"gpu_backends": ["cpu"],
|
|
229
|
+
"disk_gb": 1,
|
|
230
|
+
"ram_gb": 4,
|
|
231
|
+
},
|
|
232
|
+
"streamlit/streamlit": {
|
|
233
|
+
"category": "ml_framework",
|
|
234
|
+
"min_vram_gb": 0,
|
|
235
|
+
"rec_vram_gb": 0,
|
|
236
|
+
"gpu_required": False,
|
|
237
|
+
"gpu_backends": ["cpu"],
|
|
238
|
+
"disk_gb": 1,
|
|
239
|
+
"ram_gb": 4,
|
|
240
|
+
},
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
# 项目类别描述
|
|
244
|
+
_AI_CATEGORIES = {
|
|
245
|
+
"llm_inference": "LLM 推理引擎",
|
|
246
|
+
"llm_training": "LLM 训练/微调",
|
|
247
|
+
"llm_ui": "LLM 交互界面",
|
|
248
|
+
"llm_app": "LLM 应用",
|
|
249
|
+
"image_gen": "AI 图像生成",
|
|
250
|
+
"image_training": "AI 图像训练",
|
|
251
|
+
"cv_inference": "计算机视觉推理",
|
|
252
|
+
"cv_training": "计算机视觉训练",
|
|
253
|
+
"ml_framework": "ML 框架/库",
|
|
254
|
+
"ai_agent": "AI Agent 框架",
|
|
255
|
+
"ai_dev_tool": "AI 开发工具",
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
|
|
259
|
+
def get_hardware_req(project_key: str) -> dict | None:
|
|
260
|
+
"""获取项目的硬件需求,未收录则返回 None"""
|
|
261
|
+
return _AI_HARDWARE_REQS.get(project_key.lower())
|
|
262
|
+
|
|
263
|
+
|
|
264
|
+
def check_hardware_compatibility(project_key: str, gpu_info: dict, env: dict) -> dict:
|
|
265
|
+
"""
|
|
266
|
+
检查用户硬件是否满足项目需求。
|
|
267
|
+
|
|
268
|
+
Returns:
|
|
269
|
+
{
|
|
270
|
+
"compatible": bool,
|
|
271
|
+
"warnings": [str],
|
|
272
|
+
"recommendations": [str],
|
|
273
|
+
"category": str,
|
|
274
|
+
}
|
|
275
|
+
"""
|
|
276
|
+
req = get_hardware_req(project_key)
|
|
277
|
+
if not req:
|
|
278
|
+
return {"compatible": True, "warnings": [], "recommendations": [], "category": "unknown"}
|
|
279
|
+
|
|
280
|
+
warnings = []
|
|
281
|
+
recommendations = []
|
|
282
|
+
compatible = True
|
|
283
|
+
|
|
284
|
+
vram = gpu_info.get("vram_gb") or 0
|
|
285
|
+
gpu_type = gpu_info.get("type", "cpu_only")
|
|
286
|
+
ram = env.get("hardware", {}).get("ram_gb") or 0
|
|
287
|
+
disk = env.get("disk", {}).get("free_gb") or 0
|
|
288
|
+
|
|
289
|
+
# GPU 检查
|
|
290
|
+
if req["gpu_required"] and gpu_type == "cpu_only":
|
|
291
|
+
compatible = False
|
|
292
|
+
warnings.append(f"此项目需要 GPU,但未检测到独立显卡")
|
|
293
|
+
recommendations.append("安装 NVIDIA GPU + CUDA 或使用 Apple Silicon Mac")
|
|
294
|
+
|
|
295
|
+
# GPU 后端兼容性
|
|
296
|
+
backend_map = {"nvidia": "cuda", "apple_mps": "mps", "amd_rocm": "rocm", "cpu_only": "cpu"}
|
|
297
|
+
user_backend = backend_map.get(gpu_type, "cpu")
|
|
298
|
+
if user_backend not in req["gpu_backends"] and user_backend != "cpu":
|
|
299
|
+
warnings.append(f"你的 GPU 后端 ({user_backend}) 可能不完全支持此项目")
|
|
300
|
+
|
|
301
|
+
# VRAM 检查
|
|
302
|
+
if vram and vram < req["min_vram_gb"] and req["min_vram_gb"] > 0:
|
|
303
|
+
compatible = False
|
|
304
|
+
warnings.append(f"GPU 显存 {vram}GB 低于最低要求 {req['min_vram_gb']}GB")
|
|
305
|
+
elif vram and vram < req["rec_vram_gb"]:
|
|
306
|
+
warnings.append(f"GPU 显存 {vram}GB 低于推荐值 {req['rec_vram_gb']}GB,可能影响性能")
|
|
307
|
+
|
|
308
|
+
# RAM 检查
|
|
309
|
+
if ram and ram < req["ram_gb"]:
|
|
310
|
+
warnings.append(f"系统内存 {ram}GB 低于推荐值 {req['ram_gb']}GB")
|
|
311
|
+
|
|
312
|
+
# 磁盘检查
|
|
313
|
+
if disk and disk < req["disk_gb"]:
|
|
314
|
+
warnings.append(f"磁盘剩余 {disk}GB 可能不足(推荐 {req['disk_gb']}GB+)")
|
|
315
|
+
|
|
316
|
+
category = _AI_CATEGORIES.get(req.get("category", ""), req.get("category", "unknown"))
|
|
317
|
+
|
|
318
|
+
return {
|
|
319
|
+
"compatible": compatible,
|
|
320
|
+
"warnings": warnings,
|
|
321
|
+
"recommendations": recommendations,
|
|
322
|
+
"category": category,
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
|
|
326
|
+
# ─────────────────────────────────────────────
|
|
327
|
+
# 已知热门项目数据库
|
|
328
|
+
#
|
|
329
|
+
# 为什么需要这个:
|
|
330
|
+
# - 这些项目的安装步骤有特殊性,正则提取 README 拿不对
|
|
331
|
+
# - 部分项目需要 GPU 选择,必须结合 env 动态生成
|
|
332
|
+
# - 给用户直接正确的命令,避免踩坑
|
|
333
|
+
# ─────────────────────────────────────────────
|
|
334
|
+
|
|
335
|
+
# 格式说明:
|
|
336
|
+
# "steps" → 所有平台通用步骤
|
|
337
|
+
# "by_os" → 平台差异步骤(macos / linux / windows)
|
|
338
|
+
# "by_platform" → 特殊分发策略(docker_preferred 等)
|
|
339
|
+
# "steps_docker" → Docker 分发路径
|
|
340
|
+
# "steps_pip" → pip 分发路径
|
|
341
|
+
# 支持模板占位符:{python} {pip} {venv_activate} {torch_install}
|
|
342
|
+
|
|
343
|
+
_KNOWN_PROJECTS: dict[str, dict] = {
|
|
344
|
+
|
|
345
|
+
# ── 本地 LLM 推理 ──────────────────────────────
|
|
346
|
+
"ollama/ollama": {
|
|
347
|
+
"desc": "最简单的本地 LLM 运行工具",
|
|
348
|
+
"by_os": {
|
|
349
|
+
"macos": [
|
|
350
|
+
{"cmd": "brew install ollama", "desc": "安装 Ollama"},
|
|
351
|
+
{"cmd": "brew services start ollama", "desc": "设置后台自动启动"},
|
|
352
|
+
{"cmd": "ollama pull qwen2.5:1.5b", "desc": "下载推荐小模型(~1GB,普通电脑可跑)"},
|
|
353
|
+
],
|
|
354
|
+
"linux": [
|
|
355
|
+
{"cmd": "curl -fsSL https://ollama.com/install.sh | sh", "desc": "一键安装 Ollama", "warn": True},
|
|
356
|
+
{"cmd": "sudo systemctl enable --now ollama", "desc": "设置为系统服务"},
|
|
357
|
+
{"cmd": "ollama pull qwen2.5:1.5b", "desc": "下载推荐小模型(~1GB)"},
|
|
358
|
+
],
|
|
359
|
+
"windows": [
|
|
360
|
+
{"cmd": "winget install Ollama.Ollama", "desc": "安装 Ollama(winget)"},
|
|
361
|
+
{"cmd": "ollama pull qwen2.5:1.5b", "desc": "下载推荐小模型(~1GB)"},
|
|
362
|
+
],
|
|
363
|
+
},
|
|
364
|
+
"launch": "ollama serve",
|
|
365
|
+
"notes": (
|
|
366
|
+
"Ollama API:http://localhost:11434\n"
|
|
367
|
+
"推荐模型(按配置从低到高):\n"
|
|
368
|
+
" qwen2.5:1.5b (~1GB 显存/内存,普通笔记本可跑 ← 推荐新手)\n"
|
|
369
|
+
" qwen2.5:3b (~2GB 显存/内存,流畅度更好)\n"
|
|
370
|
+
" qwen2.5:7b (~4GB 显存/内存,质量最佳)\n"
|
|
371
|
+
"中文支持:qwen2.5 系列最佳"
|
|
372
|
+
),
|
|
373
|
+
},
|
|
374
|
+
|
|
375
|
+
"ggerganov/llama.cpp": {
|
|
376
|
+
"desc": "高性能本地 LLM 推理引擎(GGUF 格式)",
|
|
377
|
+
"steps": [
|
|
378
|
+
{"cmd": "git clone --depth 1 https://github.com/ggerganov/llama.cpp.git", "desc": "克隆代码"},
|
|
379
|
+
{"cmd": "cd llama.cpp", "desc": "进入目录"},
|
|
380
|
+
],
|
|
381
|
+
"by_os": {
|
|
382
|
+
"macos": [
|
|
383
|
+
{"cmd": "cmake -B build -DGGML_METAL=ON && cmake --build build --config Release -j$(sysctl -n hw.ncpu)",
|
|
384
|
+
"desc": "编译(Apple Metal 加速)"},
|
|
385
|
+
],
|
|
386
|
+
"linux": [
|
|
387
|
+
{"cmd": "cmake -B build && cmake --build build --config Release -j$(nproc)",
|
|
388
|
+
"desc": "编译(CPU)"},
|
|
389
|
+
],
|
|
390
|
+
"windows": [
|
|
391
|
+
{"cmd": "cmake -B build && cmake --build build --config Release",
|
|
392
|
+
"desc": "编译"},
|
|
393
|
+
],
|
|
394
|
+
},
|
|
395
|
+
"launch": "./build/bin/llama-cli -m model.gguf -p '你好' -n 128",
|
|
396
|
+
"notes": "下载 GGUF 模型后放入项目目录即可运行",
|
|
397
|
+
},
|
|
398
|
+
|
|
399
|
+
# ── Stable Diffusion 图像生成 ──────────────────
|
|
400
|
+
"comfyanonymous/comfyui": {
|
|
401
|
+
"desc": "最强大的 Stable Diffusion 节点式工作流 UI",
|
|
402
|
+
"steps": [
|
|
403
|
+
{"cmd": "git clone --depth 1 https://github.com/comfyanonymous/ComfyUI.git", "desc": "克隆代码"},
|
|
404
|
+
{"cmd": "cd ComfyUI", "desc": "进入目录"},
|
|
405
|
+
{"cmd": "{python} -m venv venv", "desc": "创建虚拟环境(隔离依赖)"},
|
|
406
|
+
{"cmd": "{venv_activate}", "desc": "激活虚拟环境"},
|
|
407
|
+
{"cmd": "{torch_install}", "desc": "安装 PyTorch(已自动适配你的 GPU)"},
|
|
408
|
+
{"cmd": "{pip} install -r requirements.txt", "desc": "安装其余依赖"},
|
|
409
|
+
],
|
|
410
|
+
"launch": "{python} main.py --listen",
|
|
411
|
+
"notes": "浏览器打开 http://127.0.0.1:8188\n模型(.safetensors)放入 models/checkpoints/ 目录",
|
|
412
|
+
},
|
|
413
|
+
|
|
414
|
+
"automatic1111/stable-diffusion-webui": {
|
|
415
|
+
"desc": "最流行的 Stable Diffusion Web UI(A1111)",
|
|
416
|
+
"steps": [
|
|
417
|
+
{"cmd": "git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git", "desc": "克隆代码"},
|
|
418
|
+
{"cmd": "cd stable-diffusion-webui", "desc": "进入目录"},
|
|
419
|
+
],
|
|
420
|
+
"by_os": {
|
|
421
|
+
"macos": [{"cmd": "bash webui.sh", "desc": "首次运行(自动配置环境,约30分钟)"}],
|
|
422
|
+
"linux": [{"cmd": "bash webui.sh", "desc": "首次运行(自动配置环境,约30分钟)"}],
|
|
423
|
+
"windows": [{"cmd": ".\\webui-user.bat", "desc": "首次运行(自动配置环境,约30分钟)"}],
|
|
424
|
+
},
|
|
425
|
+
"launch": "bash webui.sh",
|
|
426
|
+
"notes": "浏览器打开 http://127.0.0.1:7860\n首次运行会自动下载 Python 环境,需要稳定网络",
|
|
427
|
+
},
|
|
428
|
+
|
|
429
|
+
"lllyasviel/stable-diffusion-webui-forge": {
|
|
430
|
+
"desc": "A1111 高性能优化版(Forge),速度提升 30%+",
|
|
431
|
+
"steps": [
|
|
432
|
+
{"cmd": "git clone https://github.com/lllyasviel/stable-diffusion-webui-forge.git", "desc": "克隆代码"},
|
|
433
|
+
{"cmd": "cd stable-diffusion-webui-forge", "desc": "进入目录"},
|
|
434
|
+
],
|
|
435
|
+
"by_os": {
|
|
436
|
+
"macos": [{"cmd": "bash webui.sh", "desc": "首次运行(自动配置)"}],
|
|
437
|
+
"linux": [{"cmd": "bash webui.sh", "desc": "首次运行(自动配置)"}],
|
|
438
|
+
"windows": [{"cmd": ".\\webui-user.bat", "desc": "首次运行(自动配置)"}],
|
|
439
|
+
},
|
|
440
|
+
"launch": "bash webui.sh",
|
|
441
|
+
"notes": "浏览器打开 http://127.0.0.1:7860",
|
|
442
|
+
},
|
|
443
|
+
|
|
444
|
+
# ── 文字/聊天 UI ───────────────────────────────
|
|
445
|
+
"open-webui/open-webui": {
|
|
446
|
+
"desc": "功能最完备的本地 ChatGPT 风格 Web UI,深度整合 Ollama",
|
|
447
|
+
"by_platform": "docker_preferred",
|
|
448
|
+
"steps_docker": [
|
|
449
|
+
{"cmd": (
|
|
450
|
+
"docker run -d -p 3000:8080 "
|
|
451
|
+
"--add-host=host.docker.internal:host-gateway "
|
|
452
|
+
"-v open-webui:/app/backend/data "
|
|
453
|
+
"--name open-webui --restart always "
|
|
454
|
+
"ghcr.io/open-webui/open-webui:main"
|
|
455
|
+
), "desc": "Docker 一键启动(推荐)"},
|
|
456
|
+
],
|
|
457
|
+
"steps_pip": [
|
|
458
|
+
{"cmd": "{pip} install open-webui", "desc": "pip 安装"},
|
|
459
|
+
{"cmd": "open-webui serve", "desc": "启动服务"},
|
|
460
|
+
],
|
|
461
|
+
"launch": "open-webui serve",
|
|
462
|
+
"notes": "浏览器打开 http://localhost:3000\n需先启动 Ollama(ollama serve)",
|
|
463
|
+
},
|
|
464
|
+
|
|
465
|
+
"oobabooga/text-generation-webui": {
|
|
466
|
+
"desc": "最全面的本地 LLM Web 界面,支持 GGUF/GPTQ/AWQ/EXL2 多格式",
|
|
467
|
+
"steps": [
|
|
468
|
+
{"cmd": "git clone https://github.com/oobabooga/text-generation-webui.git", "desc": "克隆代码"},
|
|
469
|
+
{"cmd": "cd text-generation-webui", "desc": "进入目录"},
|
|
470
|
+
],
|
|
471
|
+
"by_os": {
|
|
472
|
+
"macos": [{"cmd": "bash start_macos.sh", "desc": "一键安装并启动(macOS)"}],
|
|
473
|
+
"linux": [{"cmd": "bash start_linux.sh", "desc": "一键安装并启动(Linux)"}],
|
|
474
|
+
"windows": [{"cmd": ".\\start_windows.bat", "desc": "一键安装并启动(Windows)"}],
|
|
475
|
+
},
|
|
476
|
+
"launch": "bash start_linux.sh",
|
|
477
|
+
"notes": "浏览器打开 http://127.0.0.1:7860\nmodels/ 目录放 GGUF 文件即可直接加载",
|
|
478
|
+
},
|
|
479
|
+
|
|
480
|
+
"lobehub/lobe-chat": {
|
|
481
|
+
"desc": "现代化 AI 对话界面,支持多 LLM 和插件",
|
|
482
|
+
"steps": [
|
|
483
|
+
{"cmd": "git clone --depth 1 https://github.com/lobehub/lobe-chat.git", "desc": "克隆代码"},
|
|
484
|
+
{"cmd": "cd lobe-chat", "desc": "进入目录"},
|
|
485
|
+
{"cmd": "{node_install}", "desc": "安装依赖"},
|
|
486
|
+
{"cmd": "cp .env.example .env", "desc": "创建配置文件"},
|
|
487
|
+
],
|
|
488
|
+
"launch": "{node_dev}",
|
|
489
|
+
"notes": "编辑 .env 文件填写 API Key\n浏览器打开 http://localhost:3010",
|
|
490
|
+
},
|
|
491
|
+
|
|
492
|
+
"mckaywrigley/chatbot-ui": {
|
|
493
|
+
"desc": "开源 ChatGPT UI 克隆(Next.js)",
|
|
494
|
+
"steps": [
|
|
495
|
+
{"cmd": "git clone --depth 1 https://github.com/mckaywrigley/chatbot-ui.git", "desc": "克隆代码"},
|
|
496
|
+
{"cmd": "cd chatbot-ui", "desc": "进入目录"},
|
|
497
|
+
{"cmd": "{node_install}", "desc": "安装依赖"},
|
|
498
|
+
{"cmd": "cp .env.local.example .env.local", "desc": "创建配置文件"},
|
|
499
|
+
],
|
|
500
|
+
"launch": "{node_dev}",
|
|
501
|
+
"notes": "编辑 .env.local 设置 OPENAI_API_KEY\n浏览器打开 http://localhost:3000",
|
|
502
|
+
},
|
|
503
|
+
|
|
504
|
+
# ── 训练 / 微调 ────────────────────────────────
|
|
505
|
+
"hiyouga/llama-factory": {
|
|
506
|
+
"desc": "最受欢迎的 LLM 微调框架,支持 LoRA/QLoRA/全量微调",
|
|
507
|
+
"steps": [
|
|
508
|
+
{"cmd": "git clone --depth 1 https://github.com/hiyouga/LLaMA-Factory.git", "desc": "克隆代码"},
|
|
509
|
+
{"cmd": "cd LLaMA-Factory", "desc": "进入目录"},
|
|
510
|
+
{"cmd": "{python} -m venv venv", "desc": "创建虚拟环境"},
|
|
511
|
+
{"cmd": "{venv_activate}", "desc": "激活虚拟环境"},
|
|
512
|
+
{"cmd": "{torch_install}", "desc": "安装 PyTorch(已自动适配 GPU)"},
|
|
513
|
+
{"cmd": "{pip} install -e '.[torch,metrics]'", "desc": "安装 LLaMA-Factory"},
|
|
514
|
+
],
|
|
515
|
+
"launch": "llamafactory-cli webui",
|
|
516
|
+
"notes": "浏览器打开 http://localhost:7860",
|
|
517
|
+
},
|
|
518
|
+
|
|
519
|
+
"bmaltais/kohya_ss": {
|
|
520
|
+
"desc": "LoRA / DreamBooth 训练工具(Stable Diffusion)",
|
|
521
|
+
"steps": [
|
|
522
|
+
{"cmd": "git clone --depth 1 https://github.com/bmaltais/kohya_ss.git", "desc": "克隆代码"},
|
|
523
|
+
{"cmd": "cd kohya_ss", "desc": "进入目录"},
|
|
524
|
+
],
|
|
525
|
+
"by_os": {
|
|
526
|
+
"macos": [{"cmd": "bash setup.sh", "desc": "一键安装(macOS)"}],
|
|
527
|
+
"linux": [{"cmd": "bash setup.sh", "desc": "一键安装(Linux)"}],
|
|
528
|
+
"windows": [{"cmd": ".\\setup.bat", "desc": "一键安装(Windows)"}],
|
|
529
|
+
},
|
|
530
|
+
"launch": "bash gui.sh",
|
|
531
|
+
"notes": "浏览器打开 http://127.0.0.1:7860",
|
|
532
|
+
},
|
|
533
|
+
|
|
534
|
+
"invoke-ai/invokeai": {
|
|
535
|
+
"desc": "专业级 AI 绘图工具,界面精美",
|
|
536
|
+
"steps": [
|
|
537
|
+
{"cmd": "{python} -m venv invokeai_env", "desc": "创建虚拟环境"},
|
|
538
|
+
{"cmd": "{venv_activate}", "desc": "激活虚拟环境(invokeai_env)"},
|
|
539
|
+
{"cmd": "{torch_install}", "desc": "安装 PyTorch(GPU 适配)"},
|
|
540
|
+
{"cmd": "{pip} install InvokeAI", "desc": "安装 InvokeAI"},
|
|
541
|
+
{"cmd": "invokeai-configure", "desc": "交互式初始化配置"},
|
|
542
|
+
],
|
|
543
|
+
"launch": "invokeai --web",
|
|
544
|
+
"notes": "浏览器打开 http://localhost:9090",
|
|
545
|
+
},
|
|
546
|
+
|
|
547
|
+
# ── 中文场景 ───────────────────────────────────
|
|
548
|
+
"zhayujie/chatgpt-on-wechat": {
|
|
549
|
+
"desc": "将 ChatGPT 接入微信/企业微信/飞书/钉钉",
|
|
550
|
+
"steps": [
|
|
551
|
+
{"cmd": "git clone --depth 1 https://github.com/zhayujie/chatgpt-on-wechat.git", "desc": "克隆代码"},
|
|
552
|
+
{"cmd": "cd chatgpt-on-wechat", "desc": "进入目录"},
|
|
553
|
+
{"cmd": "{python} -m venv venv", "desc": "创建虚拟环境"},
|
|
554
|
+
{"cmd": "{venv_activate}", "desc": "激活虚拟环境"},
|
|
555
|
+
{"cmd": "{pip} install -r requirements.txt", "desc": "安装依赖"},
|
|
556
|
+
{"cmd": "cp config-template.json config.json", "desc": "创建配置文件"},
|
|
557
|
+
],
|
|
558
|
+
"launch": "{python} app.py",
|
|
559
|
+
"notes": "编辑 config.json:填写 model(gpt-3.5-turbo)和 open_ai_api_key",
|
|
560
|
+
},
|
|
561
|
+
|
|
562
|
+
# ── 本地 API 服务 ──────────────────────────────
|
|
563
|
+
"mudler/localai": {
|
|
564
|
+
"desc": "本地 OpenAI 兼容 API,可替换所有 ChatGPT 调用",
|
|
565
|
+
"by_platform": "docker_preferred",
|
|
566
|
+
"steps_docker": [
|
|
567
|
+
{"cmd": (
|
|
568
|
+
"docker run -p 8080:8080 "
|
|
569
|
+
"-v $PWD/models:/build/models:cached "
|
|
570
|
+
"localai/localai:latest-aio-cpu"
|
|
571
|
+
), "desc": "Docker 一键启动(CPU 模式)"},
|
|
572
|
+
],
|
|
573
|
+
"steps_pip": [
|
|
574
|
+
{"cmd": "# LocalAI 暂不支持 pip 安装,请使用 Docker 或查看官方文档", "desc": "提示"},
|
|
575
|
+
],
|
|
576
|
+
"launch": None,
|
|
577
|
+
"notes": "API 地址:http://localhost:8080\n完全兼容 OpenAI API,只需修改 base_url 即可",
|
|
578
|
+
},
|
|
579
|
+
|
|
580
|
+
# ── 开发工具 ───────────────────────────────────
|
|
581
|
+
"continuedev/continue": {
|
|
582
|
+
"desc": "VS Code / JetBrains AI 编程助手插件",
|
|
583
|
+
"by_os": {
|
|
584
|
+
"macos": [
|
|
585
|
+
{"cmd": "code --install-extension continue.continue", "desc": "VS Code 安装 Continue 插件"},
|
|
586
|
+
],
|
|
587
|
+
"linux": [
|
|
588
|
+
{"cmd": "code --install-extension continue.continue", "desc": "VS Code 安装 Continue 插件"},
|
|
589
|
+
],
|
|
590
|
+
"windows": [
|
|
591
|
+
{"cmd": "code --install-extension continue.continue", "desc": "VS Code 安装 Continue 插件"},
|
|
592
|
+
],
|
|
593
|
+
},
|
|
594
|
+
"launch": None,
|
|
595
|
+
"notes": "重启 VS Code 后,侧边栏出现 Continue 图标\n连接本地 Ollama:base_url = http://localhost:11434",
|
|
596
|
+
},
|
|
597
|
+
|
|
598
|
+
"huggingface/transformers": {
|
|
599
|
+
"desc": "Hugging Face Transformers 深度学习库",
|
|
600
|
+
"steps": [
|
|
601
|
+
{"cmd": "{python} -m venv venv", "desc": "创建虚拟环境"},
|
|
602
|
+
{"cmd": "{venv_activate}", "desc": "激活虚拟环境"},
|
|
603
|
+
{"cmd": "{torch_install}", "desc": "安装 PyTorch(GPU 适配)"},
|
|
604
|
+
{"cmd": "{pip} install transformers[torch]", "desc": "安装 Transformers"},
|
|
605
|
+
{"cmd": "{pip} install accelerate datasets", "desc": "安装常用配套库"},
|
|
606
|
+
],
|
|
607
|
+
"launch": "{python} -c \"from transformers import pipeline; print(pipeline('text-generation')('Hello'))\"",
|
|
608
|
+
"notes": "Transformers 文档:https://huggingface.co/docs/transformers",
|
|
609
|
+
},
|
|
610
|
+
|
|
611
|
+
# ── 目标检测 / 计算机视觉 ──────────────────────
|
|
612
|
+
"ultralytics/ultralytics": {
|
|
613
|
+
"desc": "YOLOv8/YOLOv11 目标检测,pip 一键安装",
|
|
614
|
+
"steps": [
|
|
615
|
+
{"cmd": "{python} -m venv venv", "desc": "创建虚拟环境"},
|
|
616
|
+
{"cmd": "{venv_activate}", "desc": "激活虚拟环境"},
|
|
617
|
+
{"cmd": "{torch_install}", "desc": "安装 PyTorch(GPU 适配)"},
|
|
618
|
+
{"cmd": "{pip} install ultralytics", "desc": "安装 Ultralytics YOLO"},
|
|
619
|
+
],
|
|
620
|
+
"launch": "yolo predict model=yolo11n.pt source=0",
|
|
621
|
+
"notes": "快速测试:yolo detect predict model=yolo11n.pt source='https://ultralytics.com/images/bus.jpg'",
|
|
622
|
+
},
|
|
623
|
+
|
|
624
|
+
"facebookresearch/detectron2": {
|
|
625
|
+
"desc": "Facebook Detectron2 目标检测/分割框架",
|
|
626
|
+
"steps": [
|
|
627
|
+
{"cmd": "git clone --depth 1 https://github.com/facebookresearch/detectron2.git", "desc": "克隆代码"},
|
|
628
|
+
{"cmd": "cd detectron2", "desc": "进入目录"},
|
|
629
|
+
{"cmd": "{python} -m venv venv", "desc": "创建虚拟环境"},
|
|
630
|
+
{"cmd": "{venv_activate}", "desc": "激活虚拟环境"},
|
|
631
|
+
{"cmd": "{torch_install}", "desc": "安装 PyTorch(GPU 适配)"},
|
|
632
|
+
{"cmd": "{pip} install -e .", "desc": "安装 Detectron2(开发模式)"},
|
|
633
|
+
],
|
|
634
|
+
"launch": "{python} demo/demo.py --help",
|
|
635
|
+
"notes": "需要 OpenCV:pip install opencv-python",
|
|
636
|
+
},
|
|
637
|
+
|
|
638
|
+
# ── LLM 推理服务 ───────────────────────────────
|
|
639
|
+
"vllm-project/vllm": {
|
|
640
|
+
"desc": "高吞吐量 LLM 推理引擎(生产级 GPU 服务)",
|
|
641
|
+
"steps": [
|
|
642
|
+
{"cmd": "{python} -m venv venv", "desc": "创建虚拟环境"},
|
|
643
|
+
{"cmd": "{venv_activate}", "desc": "激活虚拟环境"},
|
|
644
|
+
{"cmd": "{pip} install vllm", "desc": "安装 vLLM(需要 NVIDIA GPU)"},
|
|
645
|
+
],
|
|
646
|
+
"launch": "python -m vllm.entrypoints.openai.api_server --model Qwen/Qwen2.5-7B-Instruct",
|
|
647
|
+
"notes": "⚠️ 需要 NVIDIA GPU,不支持 Apple Silicon\nAPI 兼容 OpenAI 格式:http://localhost:8000",
|
|
648
|
+
},
|
|
649
|
+
|
|
650
|
+
"nomic-ai/gpt4all": {
|
|
651
|
+
"desc": "GPT4All 本地 LLM 桌面客户端(图形界面)",
|
|
652
|
+
"by_os": {
|
|
653
|
+
"macos": [
|
|
654
|
+
{"cmd": "brew install --cask gpt4all", "desc": "安装 GPT4All 桌面版(macOS)"},
|
|
655
|
+
],
|
|
656
|
+
"linux": [
|
|
657
|
+
{"cmd": "# Linux 请从官网下载 AppImage:https://gpt4all.io/", "desc": "下载 Linux 版本"},
|
|
658
|
+
{"cmd": "chmod +x gpt4all-*.AppImage && ./gpt4all-*.AppImage", "desc": "直接运行 AppImage"},
|
|
659
|
+
],
|
|
660
|
+
"windows": [
|
|
661
|
+
{"cmd": "winget install nomic-ai.gpt4all", "desc": "安装 GPT4All(winget)"},
|
|
662
|
+
],
|
|
663
|
+
},
|
|
664
|
+
"launch": "gpt4all",
|
|
665
|
+
"notes": "图形界面本地 LLM,支持 llama/mistral/qwen 等,无需命令行",
|
|
666
|
+
},
|
|
667
|
+
|
|
668
|
+
"lm-sys/fastchat": {
|
|
669
|
+
"desc": "FastChat - LLM 对话服务平台(支持 Vicuna/LLaMA 等)",
|
|
670
|
+
"steps": [
|
|
671
|
+
{"cmd": "{python} -m venv venv", "desc": "创建虚拟环境"},
|
|
672
|
+
{"cmd": "{venv_activate}", "desc": "激活虚拟环境"},
|
|
673
|
+
{"cmd": "{pip} install fschat", "desc": "安装 FastChat"},
|
|
674
|
+
],
|
|
675
|
+
"launch": "python -m fastchat.serve.cli --model-path lmsys/vicuna-7b-v1.5",
|
|
676
|
+
"notes": "Web UI 启动:python -m fastchat.serve.gradio_web_server\nAPI 服务:python -m fastchat.serve.openai_api_server --host 0.0.0.0",
|
|
677
|
+
},
|
|
678
|
+
|
|
679
|
+
# ── Gradio / Streamlit 开发框架 ────────────────
|
|
680
|
+
"gradio-app/gradio": {
|
|
681
|
+
"desc": "快速构建 AI Demo Web UI(pip 一键安装)",
|
|
682
|
+
"steps": [
|
|
683
|
+
{"cmd": "{python} -m venv venv", "desc": "创建虚拟环境"},
|
|
684
|
+
{"cmd": "{venv_activate}", "desc": "激活虚拟环境"},
|
|
685
|
+
{"cmd": "{pip} install gradio", "desc": "安装 Gradio"},
|
|
686
|
+
],
|
|
687
|
+
"launch": "{python} -c \"import gradio as gr; gr.Interface(lambda x: x, 'text', 'text').launch()\"",
|
|
688
|
+
"notes": "浏览器自动打开 http://127.0.0.1:7860",
|
|
689
|
+
},
|
|
690
|
+
|
|
691
|
+
"streamlit/streamlit": {
|
|
692
|
+
"desc": "数据应用快速构建框架(pip 一键安装)",
|
|
693
|
+
"steps": [
|
|
694
|
+
{"cmd": "{python} -m venv venv", "desc": "创建虚拟环境"},
|
|
695
|
+
{"cmd": "{venv_activate}", "desc": "激活虚拟环境"},
|
|
696
|
+
{"cmd": "{pip} install streamlit", "desc": "安装 Streamlit"},
|
|
697
|
+
],
|
|
698
|
+
"launch": "streamlit hello",
|
|
699
|
+
"notes": "启动自己的 app:streamlit run app.py",
|
|
700
|
+
},
|
|
701
|
+
|
|
702
|
+
# ── 工作流自动化 ───────────────────────────────
|
|
703
|
+
"n8n-io/n8n": {
|
|
704
|
+
"desc": "n8n 工作流自动化(npm 或 Docker)",
|
|
705
|
+
"by_platform": "docker_preferred",
|
|
706
|
+
"steps_docker": [
|
|
707
|
+
{"cmd": (
|
|
708
|
+
"docker run -it --rm "
|
|
709
|
+
"-p 5678:5678 "
|
|
710
|
+
"-v n8n_data:/home/node/.n8n "
|
|
711
|
+
"docker.n8n.io/n8nio/n8n"
|
|
712
|
+
), "desc": "Docker 一键启动 n8n"},
|
|
713
|
+
],
|
|
714
|
+
"steps_pip": [
|
|
715
|
+
{"cmd": "npm install -g n8n", "desc": "npm 全局安装 n8n"},
|
|
716
|
+
{"cmd": "n8n start", "desc": "启动 n8n"},
|
|
717
|
+
],
|
|
718
|
+
"launch": "n8n start",
|
|
719
|
+
"notes": "浏览器打开 http://localhost:5678\n⚠️ 首次启动需要创建账号",
|
|
720
|
+
},
|
|
721
|
+
|
|
722
|
+
# ── 搜索引擎 ───────────────────────────────────
|
|
723
|
+
"searxng/searxng": {
|
|
724
|
+
"desc": "SearXNG 隐私自托管搜索引擎(Docker 推荐)",
|
|
725
|
+
"by_platform": "docker_preferred",
|
|
726
|
+
"steps_docker": [
|
|
727
|
+
{"cmd": "git clone --depth 1 https://github.com/searxng/searxng-docker.git && cd searxng-docker",
|
|
728
|
+
"desc": "克隆配置文件"},
|
|
729
|
+
{"cmd": "docker compose up -d", "desc": "Docker 启动 SearXNG"},
|
|
730
|
+
],
|
|
731
|
+
"steps_pip": [
|
|
732
|
+
{"cmd": "git clone --depth 1 https://github.com/searxng/searxng.git && cd searxng", "desc": "克隆代码"},
|
|
733
|
+
{"cmd": "{python} -m venv venv && {venv_activate}", "desc": "创建虚拟环境"},
|
|
734
|
+
{"cmd": "{pip} install -e '.[client]'", "desc": "安装依赖"},
|
|
735
|
+
],
|
|
736
|
+
"launch": None,
|
|
737
|
+
"notes": "浏览器打开 http://localhost:8080",
|
|
738
|
+
},
|
|
739
|
+
|
|
740
|
+
# ── 命令行工具(Rust)─────────────────────────
|
|
741
|
+
"burntsushi/ripgrep": {
|
|
742
|
+
"desc": "ripgrep — 极速文本搜索工具(rust 实现,速度比 grep 快10x)",
|
|
743
|
+
"by_os": {
|
|
744
|
+
"macos": [
|
|
745
|
+
{"cmd": "brew install ripgrep", "desc": "安装 ripgrep(推荐方式)"},
|
|
746
|
+
],
|
|
747
|
+
"linux": [
|
|
748
|
+
{"cmd": "cargo install ripgrep", "desc": "Cargo 安装(需要 Rust 工具链)"},
|
|
749
|
+
],
|
|
750
|
+
"windows": [
|
|
751
|
+
{"cmd": "winget install BurntSushi.ripgrep.MSVC", "desc": "winget 安装"},
|
|
752
|
+
],
|
|
753
|
+
},
|
|
754
|
+
"launch": "rg --version",
|
|
755
|
+
"notes": "使用:rg '搜索词' 目录/\nmacOS 推荐 brew,无需编译,30秒安装完",
|
|
756
|
+
},
|
|
757
|
+
|
|
758
|
+
"BurntSushi/ripgrep": { # 大写别名
|
|
759
|
+
"desc": "ripgrep — 极速文本搜索(Rust 编写)",
|
|
760
|
+
"by_os": {
|
|
761
|
+
"macos": [{"cmd": "brew install ripgrep", "desc": "安装 ripgrep(Homebrew)"}],
|
|
762
|
+
"linux": [{"cmd": "cargo install ripgrep", "desc": "Cargo 安装"}],
|
|
763
|
+
"windows": [{"cmd": "winget install BurntSushi.ripgrep.MSVC", "desc": "winget 安装"}],
|
|
764
|
+
},
|
|
765
|
+
"launch": "rg --version",
|
|
766
|
+
"notes": "使用:rg '关键词' 目录/\n安装 Rust:curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh",
|
|
767
|
+
},
|
|
768
|
+
|
|
769
|
+
"sharkdp/bat": {
|
|
770
|
+
"desc": "bat — 带语法高亮的 cat 替代品(Rust 编写)",
|
|
771
|
+
"by_os": {
|
|
772
|
+
"macos": [{"cmd": "brew install bat", "desc": "安装 bat(Homebrew)"}],
|
|
773
|
+
"linux": [{"cmd": "sudo apt-get install -y bat || cargo install bat", "desc": "安装 bat"}],
|
|
774
|
+
"windows": [{"cmd": "winget install sharkdp.bat", "desc": "winget 安装"}],
|
|
775
|
+
},
|
|
776
|
+
"launch": "bat --version",
|
|
777
|
+
"notes": "使用:bat 文件名\nUbuntu/Debian 上命令可能叫 batcat",
|
|
778
|
+
},
|
|
779
|
+
|
|
780
|
+
"sharkdp/fd": {
|
|
781
|
+
"desc": "fd — 更快更友好的 find 替代品(Rust 编写)",
|
|
782
|
+
"by_os": {
|
|
783
|
+
"macos": [{"cmd": "brew install fd", "desc": "安装 fd(Homebrew)"}],
|
|
784
|
+
"linux": [{"cmd": "sudo apt-get install -y fd-find || cargo install fd-find", "desc": "安装 fd"}],
|
|
785
|
+
"windows": [{"cmd": "winget install sharkdp.fd", "desc": "winget 安装"}],
|
|
786
|
+
},
|
|
787
|
+
"launch": "fd --version",
|
|
788
|
+
"notes": "使用:fd '模式' 目录/\nUbuntu 上命令可能叫 fdfind",
|
|
789
|
+
},
|
|
790
|
+
|
|
791
|
+
"sharkdp/hyperfine": {
|
|
792
|
+
"desc": "hyperfine — 命令行基准测试工具(Rust 编写)",
|
|
793
|
+
"by_os": {
|
|
794
|
+
"macos": [{"cmd": "brew install hyperfine", "desc": "安装 hyperfine(Homebrew)"}],
|
|
795
|
+
"linux": [{"cmd": "cargo install hyperfine", "desc": "Cargo 安装"}],
|
|
796
|
+
"windows": [{"cmd": "winget install sharkdp.hyperfine", "desc": "winget 安装"}],
|
|
797
|
+
},
|
|
798
|
+
"launch": "hyperfine --version",
|
|
799
|
+
"notes": "使用:hyperfine 'sleep 0.5' '命令2'",
|
|
800
|
+
},
|
|
801
|
+
|
|
802
|
+
"junegunn/fzf": {
|
|
803
|
+
"desc": "fzf — 通用命令行模糊查找器(Go 编写)",
|
|
804
|
+
"by_os": {
|
|
805
|
+
"macos": [{"cmd": "brew install fzf", "desc": "安装 fzf(Homebrew)"}],
|
|
806
|
+
"linux": [{"cmd": "sudo apt-get install -y fzf || go install github.com/junegunn/fzf@latest", "desc": "安装 fzf"}],
|
|
807
|
+
"windows": [{"cmd": "winget install junegunn.fzf", "desc": "winget 安装"}],
|
|
808
|
+
},
|
|
809
|
+
"launch": "fzf --version",
|
|
810
|
+
"notes": "使用:fzf 启动交互搜索\nCtrl+R 搜索历史(需 shell 集成)",
|
|
811
|
+
},
|
|
812
|
+
|
|
813
|
+
"jqlang/jq": {
|
|
814
|
+
"desc": "jq — 轻量级命令行 JSON 处理器(C 编写)",
|
|
815
|
+
"by_os": {
|
|
816
|
+
"macos": [{"cmd": "brew install jq", "desc": "安装 jq(Homebrew)"}],
|
|
817
|
+
"linux": [{"cmd": "sudo apt-get install -y jq", "desc": "安装 jq(apt)"}],
|
|
818
|
+
"windows": [{"cmd": "winget install jqlang.jq", "desc": "winget 安装"}],
|
|
819
|
+
},
|
|
820
|
+
"launch": "jq --version",
|
|
821
|
+
"notes": "使用:echo '{\"a\":1}' | jq '.a'",
|
|
822
|
+
},
|
|
823
|
+
|
|
824
|
+
"stedolan/jq": { # 旧 owner 别名
|
|
825
|
+
"desc": "jq — 轻量级命令行 JSON 处理器",
|
|
826
|
+
"by_os": {
|
|
827
|
+
"macos": [{"cmd": "brew install jq", "desc": "安装 jq(Homebrew)"}],
|
|
828
|
+
"linux": [{"cmd": "sudo apt-get install -y jq", "desc": "安装 jq(apt)"}],
|
|
829
|
+
"windows": [{"cmd": "winget install jqlang.jq", "desc": "winget 安装"}],
|
|
830
|
+
},
|
|
831
|
+
"launch": "jq --version",
|
|
832
|
+
"notes": "使用:echo '{\"a\":1}' | jq '.a'",
|
|
833
|
+
},
|
|
834
|
+
|
|
835
|
+
"jekyll/jekyll": {
|
|
836
|
+
"desc": "Jekyll — 静态网站生成器(Ruby 编写,GitHub Pages 官方引擎)",
|
|
837
|
+
"by_os": {
|
|
838
|
+
"macos": [
|
|
839
|
+
{"cmd": "gem install jekyll bundler", "desc": "安装 Jekyll + Bundler"},
|
|
840
|
+
],
|
|
841
|
+
"linux": [
|
|
842
|
+
{"cmd": "sudo apt-get install -y ruby-full build-essential zlib1g-dev", "desc": "安装 Ruby 及编译依赖"},
|
|
843
|
+
{"cmd": "gem install jekyll bundler", "desc": "安装 Jekyll + Bundler"},
|
|
844
|
+
],
|
|
845
|
+
"windows": [
|
|
846
|
+
{"cmd": "winget install RubyInstallerTeam.Ruby.3.2", "desc": "安装 Ruby"},
|
|
847
|
+
{"cmd": "gem install jekyll bundler", "desc": "安装 Jekyll + Bundler"},
|
|
848
|
+
],
|
|
849
|
+
},
|
|
850
|
+
"launch": "jekyll --version",
|
|
851
|
+
"notes": "创建新站点:jekyll new my-site && cd my-site && bundle exec jekyll serve",
|
|
852
|
+
},
|
|
853
|
+
|
|
854
|
+
"dandavison/delta": {
|
|
855
|
+
"desc": "delta — 美化 git diff 输出",
|
|
856
|
+
"by_os": {
|
|
857
|
+
"macos": [{"cmd": "brew install git-delta", "desc": "安装 delta(Homebrew)"}],
|
|
858
|
+
"linux": [{"cmd": "cargo install git-delta", "desc": "Cargo 安装"}],
|
|
859
|
+
"windows": [{"cmd": "winget install dandavison.delta", "desc": "winget 安装"}],
|
|
860
|
+
},
|
|
861
|
+
"launch": "delta --version",
|
|
862
|
+
"notes": "配置 git 使用 delta:git config --global core.pager delta",
|
|
863
|
+
},
|
|
864
|
+
|
|
865
|
+
"ajeetdsouza/zoxide": {
|
|
866
|
+
"desc": "zoxide — 更智能的 cd 命令替代品",
|
|
867
|
+
"by_os": {
|
|
868
|
+
"macos": [{"cmd": "brew install zoxide", "desc": "安装 zoxide(Homebrew)"}],
|
|
869
|
+
"linux": [{"cmd": "cargo install zoxide --locked", "desc": "Cargo 安装"}],
|
|
870
|
+
"windows": [{"cmd": "winget install ajeetdsouza.zoxide", "desc": "winget 安装"}],
|
|
871
|
+
},
|
|
872
|
+
"launch": "zoxide --version",
|
|
873
|
+
"notes": "需要在 shell 配置中添加 eval \"$(zoxide init zsh)\"",
|
|
874
|
+
},
|
|
875
|
+
|
|
876
|
+
"starship/starship": {
|
|
877
|
+
"desc": "starship — 极速跨 shell 提示符",
|
|
878
|
+
"by_os": {
|
|
879
|
+
"macos": [{"cmd": "brew install starship", "desc": "安装 starship(Homebrew)"}],
|
|
880
|
+
"linux": [{"cmd": "cargo install starship --locked", "desc": "Cargo 安装"}],
|
|
881
|
+
"windows": [{"cmd": "winget install Starship.Starship", "desc": "winget 安装"}],
|
|
882
|
+
},
|
|
883
|
+
"launch": "starship --version",
|
|
884
|
+
"notes": "需要在 shell 配置中添加 eval \"$(starship init zsh)\"",
|
|
885
|
+
},
|
|
886
|
+
|
|
887
|
+
"eza-community/eza": {
|
|
888
|
+
"desc": "eza — 现代化的 ls 替代品(exa 的维护版)",
|
|
889
|
+
"by_os": {
|
|
890
|
+
"macos": [{"cmd": "brew install eza", "desc": "安装 eza(Homebrew)"}],
|
|
891
|
+
"linux": [{"cmd": "cargo install eza", "desc": "Cargo 安装"}],
|
|
892
|
+
"windows": [{"cmd": "winget install eza-community.eza", "desc": "winget 安装"}],
|
|
893
|
+
},
|
|
894
|
+
"launch": "eza --version",
|
|
895
|
+
"notes": "使用:eza -la --icons",
|
|
896
|
+
},
|
|
897
|
+
|
|
898
|
+
"gohugoio/hugo": {
|
|
899
|
+
"desc": "Hugo — 最快的静态网站生成器(Go 编写)",
|
|
900
|
+
"by_os": {
|
|
901
|
+
"macos": [{"cmd": "brew install hugo", "desc": "安装 Hugo(Homebrew)"}],
|
|
902
|
+
"linux": [{"cmd": "sudo apt-get install -y hugo || go install github.com/gohugoio/hugo@latest", "desc": "安装 Hugo"}],
|
|
903
|
+
"windows": [{"cmd": "winget install Hugo.Hugo.Extended", "desc": "winget 安装"}],
|
|
904
|
+
},
|
|
905
|
+
"launch": "hugo version",
|
|
906
|
+
"notes": "创建新站点:hugo new site my-site && cd my-site && hugo server",
|
|
907
|
+
},
|
|
908
|
+
|
|
909
|
+
# ── 命令行工具(Go)───────────────────────────
|
|
910
|
+
"cli/cli": {
|
|
911
|
+
"desc": "GitHub 官方命令行工具 (gh),管理 PR/Issue/Release",
|
|
912
|
+
"by_os": {
|
|
913
|
+
"macos": [
|
|
914
|
+
{"cmd": "brew install gh", "desc": "安装 GitHub CLI(Homebrew)"},
|
|
915
|
+
{"cmd": "gh auth login", "desc": "登录 GitHub 账号"},
|
|
916
|
+
],
|
|
917
|
+
"linux": [
|
|
918
|
+
{"cmd": (
|
|
919
|
+
"type -p curl >/dev/null || (sudo apt update && sudo apt install curl -y) && "
|
|
920
|
+
"curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg | sudo dd of=/usr/share/keyrings/githubcli-archive-keyring.gpg && "
|
|
921
|
+
"echo \"deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main\" | sudo tee /etc/apt/sources.list.d/github-cli.list > /dev/null && "
|
|
922
|
+
"sudo apt update && sudo apt install gh -y"
|
|
923
|
+
), "desc": "安装 GitHub CLI(Ubuntu/Debian)", "warn": True},
|
|
924
|
+
{"cmd": "gh auth login", "desc": "登录 GitHub 账号"},
|
|
925
|
+
],
|
|
926
|
+
"windows": [
|
|
927
|
+
{"cmd": "winget install --id GitHub.cli", "desc": "安装 GitHub CLI(winget)"},
|
|
928
|
+
{"cmd": "gh auth login", "desc": "登录 GitHub 账号"},
|
|
929
|
+
],
|
|
930
|
+
},
|
|
931
|
+
"launch": "gh status",
|
|
932
|
+
"notes": "常用命令:gh repo clone / gh pr list / gh issue create",
|
|
933
|
+
},
|
|
934
|
+
|
|
935
|
+
# ── 容器管理 ───────────────────────────────────
|
|
936
|
+
"portainer/portainer": {
|
|
937
|
+
"desc": "Portainer — Docker/K8s 可视化管理 Web UI",
|
|
938
|
+
"by_platform": "docker_preferred",
|
|
939
|
+
"steps_docker": [
|
|
940
|
+
{"cmd": "docker volume create portainer_data", "desc": "创建数据卷"},
|
|
941
|
+
{"cmd": (
|
|
942
|
+
"docker run -d -p 8000:8000 -p 9443:9443 "
|
|
943
|
+
"--name portainer --restart=always "
|
|
944
|
+
"-v /var/run/docker.sock:/var/run/docker.sock "
|
|
945
|
+
"-v portainer_data:/data "
|
|
946
|
+
"portainer/portainer-ce:latest"
|
|
947
|
+
), "desc": "启动 Portainer"},
|
|
948
|
+
],
|
|
949
|
+
"steps_pip": [
|
|
950
|
+
{"cmd": "# Portainer 必须通过 Docker 安装,请先安装 Docker", "desc": "提示"},
|
|
951
|
+
],
|
|
952
|
+
"launch": None,
|
|
953
|
+
"notes": "浏览器打开 https://localhost:9443\n首次启动需要创建管理员账号",
|
|
954
|
+
},
|
|
955
|
+
|
|
956
|
+
# ── AI Agent 框架 ─────────────────────────────
|
|
957
|
+
"microsoft/autogen": {
|
|
958
|
+
"desc": "Microsoft AutoGen — 多 Agent 协作 AI 框架",
|
|
959
|
+
"steps": [
|
|
960
|
+
{"cmd": "{python} -m venv venv", "desc": "创建虚拟环境"},
|
|
961
|
+
{"cmd": "{venv_activate}", "desc": "激活虚拟环境"},
|
|
962
|
+
{"cmd": "{pip} install pyautogen", "desc": "安装 AutoGen"},
|
|
963
|
+
],
|
|
964
|
+
"launch": "{python} -c \"import autogen; print('AutoGen', autogen.__version__, 'ready')\"",
|
|
965
|
+
"notes": "需配置 OAI_CONFIG_LIST(OpenAI/Azure/Ollama API)\n示例:https://github.com/microsoft/autogen/tree/main/samples",
|
|
966
|
+
},
|
|
967
|
+
|
|
968
|
+
# ── 智能家居 ───────────────────────────────────
|
|
969
|
+
"home-assistant/core": {
|
|
970
|
+
"desc": "Home Assistant — 最强开源智能家居平台",
|
|
971
|
+
"by_platform": "docker_preferred",
|
|
972
|
+
"steps_docker": [
|
|
973
|
+
{"cmd": (
|
|
974
|
+
"docker run -d "
|
|
975
|
+
"--name homeassistant "
|
|
976
|
+
"--privileged "
|
|
977
|
+
"--restart=unless-stopped "
|
|
978
|
+
"-e TZ=Asia/Shanghai "
|
|
979
|
+
"-v /PATH_TO_YOUR_CONFIG:/config "
|
|
980
|
+
"--network=host "
|
|
981
|
+
"ghcr.io/home-assistant/home-assistant:stable"
|
|
982
|
+
), "desc": "Docker 启动 Home Assistant(推荐)"},
|
|
983
|
+
],
|
|
984
|
+
"steps_pip": [
|
|
985
|
+
{"cmd": "{python} -m venv hass_env", "desc": "创建虚拟环境"},
|
|
986
|
+
{"cmd": "source hass_env/bin/activate", "desc": "激活虚拟环境"},
|
|
987
|
+
{"cmd": "{pip} install homeassistant", "desc": "安装 Home Assistant"},
|
|
988
|
+
{"cmd": "hass --open-ui", "desc": "启动并打开界面"},
|
|
989
|
+
],
|
|
990
|
+
"launch": "hass --open-ui",
|
|
991
|
+
"notes": "浏览器打开 http://localhost:8123\n⚠️ 将 /PATH_TO_YOUR_CONFIG 改为实际路径",
|
|
992
|
+
},
|
|
993
|
+
|
|
994
|
+
# ── 图片管理 ───────────────────────────────────
|
|
995
|
+
"immich-app/immich": {
|
|
996
|
+
"desc": "Immich — 自托管 Google Photos 替代品(Docker)",
|
|
997
|
+
"by_platform": "docker_preferred",
|
|
998
|
+
"steps_docker": [
|
|
999
|
+
{"cmd": "git clone --depth 1 https://github.com/immich-app/immich.git && cd immich/docker",
|
|
1000
|
+
"desc": "克隆配置文件"},
|
|
1001
|
+
{"cmd": "cp .env.example .env", "desc": "创建环境变量文件"},
|
|
1002
|
+
{"cmd": "docker compose up -d", "desc": "一键启动 Immich"},
|
|
1003
|
+
],
|
|
1004
|
+
"steps_pip": [
|
|
1005
|
+
{"cmd": "# Immich 需要 Docker 和 Docker Compose,请先安装 Docker", "desc": "提示"},
|
|
1006
|
+
],
|
|
1007
|
+
"launch": None,
|
|
1008
|
+
"notes": "浏览器打开 http://localhost:2283\n⚠️ 修改 .env 设置 UPLOAD_LOCATION(照片存储路径)",
|
|
1009
|
+
},
|
|
1010
|
+
}
|