maque 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- maque/__init__.py +30 -0
- maque/__main__.py +926 -0
- maque/ai_platform/__init__.py +0 -0
- maque/ai_platform/crawl.py +45 -0
- maque/ai_platform/metrics.py +258 -0
- maque/ai_platform/nlp_preprocess.py +67 -0
- maque/ai_platform/webpage_screen_shot.py +195 -0
- maque/algorithms/__init__.py +78 -0
- maque/algorithms/bezier.py +15 -0
- maque/algorithms/bktree.py +117 -0
- maque/algorithms/core.py +104 -0
- maque/algorithms/hilbert.py +16 -0
- maque/algorithms/rate_function.py +92 -0
- maque/algorithms/transform.py +27 -0
- maque/algorithms/trie.py +272 -0
- maque/algorithms/utils.py +63 -0
- maque/algorithms/video.py +587 -0
- maque/api/__init__.py +1 -0
- maque/api/common.py +110 -0
- maque/api/fetch.py +26 -0
- maque/api/static/icon.png +0 -0
- maque/api/static/redoc.standalone.js +1782 -0
- maque/api/static/swagger-ui-bundle.js +3 -0
- maque/api/static/swagger-ui.css +3 -0
- maque/cli/__init__.py +1 -0
- maque/cli/clean_invisible_chars.py +324 -0
- maque/cli/core.py +34 -0
- maque/cli/groups/__init__.py +26 -0
- maque/cli/groups/config.py +205 -0
- maque/cli/groups/data.py +615 -0
- maque/cli/groups/doctor.py +259 -0
- maque/cli/groups/embedding.py +222 -0
- maque/cli/groups/git.py +29 -0
- maque/cli/groups/help.py +410 -0
- maque/cli/groups/llm.py +223 -0
- maque/cli/groups/mcp.py +241 -0
- maque/cli/groups/mllm.py +1795 -0
- maque/cli/groups/mllm_simple.py +60 -0
- maque/cli/groups/quant.py +210 -0
- maque/cli/groups/service.py +490 -0
- maque/cli/groups/system.py +570 -0
- maque/cli/mllm_run.py +1451 -0
- maque/cli/script.py +52 -0
- maque/cli/tree.py +49 -0
- maque/clustering/__init__.py +52 -0
- maque/clustering/analyzer.py +347 -0
- maque/clustering/clusterers.py +464 -0
- maque/clustering/sampler.py +134 -0
- maque/clustering/visualizer.py +205 -0
- maque/constant.py +13 -0
- maque/core.py +133 -0
- maque/cv/__init__.py +1 -0
- maque/cv/image.py +219 -0
- maque/cv/utils.py +68 -0
- maque/cv/video/__init__.py +3 -0
- maque/cv/video/keyframe_extractor.py +368 -0
- maque/embedding/__init__.py +43 -0
- maque/embedding/base.py +56 -0
- maque/embedding/multimodal.py +308 -0
- maque/embedding/server.py +523 -0
- maque/embedding/text.py +311 -0
- maque/git/__init__.py +24 -0
- maque/git/pure_git.py +912 -0
- maque/io/__init__.py +29 -0
- maque/io/core.py +38 -0
- maque/io/ops.py +194 -0
- maque/llm/__init__.py +111 -0
- maque/llm/backend.py +416 -0
- maque/llm/base.py +411 -0
- maque/llm/server.py +366 -0
- maque/mcp_server.py +1096 -0
- maque/mllm_data_processor_pipeline/__init__.py +17 -0
- maque/mllm_data_processor_pipeline/core.py +341 -0
- maque/mllm_data_processor_pipeline/example.py +291 -0
- maque/mllm_data_processor_pipeline/steps/__init__.py +56 -0
- maque/mllm_data_processor_pipeline/steps/data_alignment.py +267 -0
- maque/mllm_data_processor_pipeline/steps/data_loader.py +172 -0
- maque/mllm_data_processor_pipeline/steps/data_validation.py +304 -0
- maque/mllm_data_processor_pipeline/steps/format_conversion.py +411 -0
- maque/mllm_data_processor_pipeline/steps/mllm_annotation.py +331 -0
- maque/mllm_data_processor_pipeline/steps/mllm_refinement.py +446 -0
- maque/mllm_data_processor_pipeline/steps/result_validation.py +501 -0
- maque/mllm_data_processor_pipeline/web_app.py +317 -0
- maque/nlp/__init__.py +14 -0
- maque/nlp/ngram.py +9 -0
- maque/nlp/parser.py +63 -0
- maque/nlp/risk_matcher.py +543 -0
- maque/nlp/sentence_splitter.py +202 -0
- maque/nlp/simple_tradition_cvt.py +31 -0
- maque/performance/__init__.py +21 -0
- maque/performance/_measure_time.py +70 -0
- maque/performance/_profiler.py +367 -0
- maque/performance/_stat_memory.py +51 -0
- maque/pipelines/__init__.py +15 -0
- maque/pipelines/clustering.py +252 -0
- maque/quantization/__init__.py +42 -0
- maque/quantization/auto_round.py +120 -0
- maque/quantization/base.py +145 -0
- maque/quantization/bitsandbytes.py +127 -0
- maque/quantization/llm_compressor.py +102 -0
- maque/retriever/__init__.py +35 -0
- maque/retriever/chroma.py +654 -0
- maque/retriever/document.py +140 -0
- maque/retriever/milvus.py +1140 -0
- maque/table_ops/__init__.py +1 -0
- maque/table_ops/core.py +133 -0
- maque/table_viewer/__init__.py +4 -0
- maque/table_viewer/download_assets.py +57 -0
- maque/table_viewer/server.py +698 -0
- maque/table_viewer/static/element-plus-icons.js +5791 -0
- maque/table_viewer/static/element-plus.css +1 -0
- maque/table_viewer/static/element-plus.js +65236 -0
- maque/table_viewer/static/main.css +268 -0
- maque/table_viewer/static/main.js +669 -0
- maque/table_viewer/static/vue.global.js +18227 -0
- maque/table_viewer/templates/index.html +401 -0
- maque/utils/__init__.py +56 -0
- maque/utils/color.py +68 -0
- maque/utils/color_string.py +45 -0
- maque/utils/compress.py +66 -0
- maque/utils/constant.py +183 -0
- maque/utils/core.py +261 -0
- maque/utils/cursor.py +143 -0
- maque/utils/distance.py +58 -0
- maque/utils/docker.py +96 -0
- maque/utils/downloads.py +51 -0
- maque/utils/excel_helper.py +542 -0
- maque/utils/helper_metrics.py +121 -0
- maque/utils/helper_parser.py +168 -0
- maque/utils/net.py +64 -0
- maque/utils/nvidia_stat.py +140 -0
- maque/utils/ops.py +53 -0
- maque/utils/packages.py +31 -0
- maque/utils/path.py +57 -0
- maque/utils/tar.py +260 -0
- maque/utils/untar.py +129 -0
- maque/web/__init__.py +0 -0
- maque/web/image_downloader.py +1410 -0
- maque-0.2.1.dist-info/METADATA +450 -0
- maque-0.2.1.dist-info/RECORD +143 -0
- maque-0.2.1.dist-info/WHEEL +4 -0
- maque-0.2.1.dist-info/entry_points.txt +3 -0
- maque-0.2.1.dist-info/licenses/LICENSE +21 -0
maque/cli/groups/help.py
ADDED
|
@@ -0,0 +1,410 @@
|
|
|
1
|
+
"""帮助和文档系统"""
|
|
2
|
+
import inspect
|
|
3
|
+
from rich.console import Console
|
|
4
|
+
from rich.table import Table
|
|
5
|
+
from rich.panel import Panel
|
|
6
|
+
from rich import print
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class HelpGroup:
|
|
10
|
+
"""帮助和文档命令组"""
|
|
11
|
+
|
|
12
|
+
def __init__(self, cli_instance):
|
|
13
|
+
self.cli = cli_instance
|
|
14
|
+
self.console = Console()
|
|
15
|
+
|
|
16
|
+
def examples(self, command: str = None):
|
|
17
|
+
"""显示命令使用示例
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
command: 命令名称,不指定则显示所有示例
|
|
21
|
+
"""
|
|
22
|
+
examples_data = {
|
|
23
|
+
"config": {
|
|
24
|
+
"description": "配置管理",
|
|
25
|
+
"examples": [
|
|
26
|
+
{
|
|
27
|
+
"desc": "查看当前配置",
|
|
28
|
+
"cmd": "maque config show"
|
|
29
|
+
},
|
|
30
|
+
{
|
|
31
|
+
"desc": "设置MLLM模型",
|
|
32
|
+
"cmd": "maque config set mllm.model gpt-4o-mini"
|
|
33
|
+
},
|
|
34
|
+
{
|
|
35
|
+
"desc": "编辑配置文件",
|
|
36
|
+
"cmd": "maque config edit"
|
|
37
|
+
},
|
|
38
|
+
{
|
|
39
|
+
"desc": "验证配置文件",
|
|
40
|
+
"cmd": "maque config validate"
|
|
41
|
+
}
|
|
42
|
+
]
|
|
43
|
+
},
|
|
44
|
+
"mllm": {
|
|
45
|
+
"description": "多模态大语言模型",
|
|
46
|
+
"examples": [
|
|
47
|
+
{
|
|
48
|
+
"desc": "批量处理表格中的图像",
|
|
49
|
+
"cmd": "maque mllm call-table images.xlsx --output_file=results.csv"
|
|
50
|
+
},
|
|
51
|
+
{
|
|
52
|
+
"desc": "批量处理文件夹中的图像",
|
|
53
|
+
"cmd": "maque mllm call-images ./photos --max_num=100"
|
|
54
|
+
},
|
|
55
|
+
{
|
|
56
|
+
"desc": "交互式聊天",
|
|
57
|
+
"cmd": "maque mllm chat"
|
|
58
|
+
},
|
|
59
|
+
{
|
|
60
|
+
"desc": "单次图像分析",
|
|
61
|
+
"cmd": "maque mllm chat \"描述这张图片\" --image=photo.jpg"
|
|
62
|
+
},
|
|
63
|
+
{
|
|
64
|
+
"desc": "列出可用模型",
|
|
65
|
+
"cmd": "maque mllm models"
|
|
66
|
+
}
|
|
67
|
+
]
|
|
68
|
+
},
|
|
69
|
+
"doctor": {
|
|
70
|
+
"description": "环境诊断",
|
|
71
|
+
"examples": [
|
|
72
|
+
{
|
|
73
|
+
"desc": "检查环境和依赖",
|
|
74
|
+
"cmd": "maque doctor check"
|
|
75
|
+
},
|
|
76
|
+
{
|
|
77
|
+
"desc": "详细环境检查",
|
|
78
|
+
"cmd": "maque doctor check --verbose"
|
|
79
|
+
},
|
|
80
|
+
{
|
|
81
|
+
"desc": "自动修复问题",
|
|
82
|
+
"cmd": "maque doctor fix"
|
|
83
|
+
},
|
|
84
|
+
{
|
|
85
|
+
"desc": "显示详细版本信息",
|
|
86
|
+
"cmd": "maque doctor version --full"
|
|
87
|
+
}
|
|
88
|
+
]
|
|
89
|
+
},
|
|
90
|
+
"data": {
|
|
91
|
+
"description": "数据处理",
|
|
92
|
+
"examples": [
|
|
93
|
+
{
|
|
94
|
+
"desc": "启动表格查看器",
|
|
95
|
+
"cmd": "maque data table-viewer data.xlsx"
|
|
96
|
+
},
|
|
97
|
+
{
|
|
98
|
+
"desc": "数据格式转换",
|
|
99
|
+
"cmd": "maque data convert input.xlsx output.csv"
|
|
100
|
+
},
|
|
101
|
+
{
|
|
102
|
+
"desc": "数据统计分析",
|
|
103
|
+
"cmd": "maque data stats data.csv"
|
|
104
|
+
}
|
|
105
|
+
]
|
|
106
|
+
},
|
|
107
|
+
"video": {
|
|
108
|
+
"description": "视频处理",
|
|
109
|
+
"examples": [
|
|
110
|
+
{
|
|
111
|
+
"desc": "视频去重(提取唯一帧)",
|
|
112
|
+
"cmd": "maque video-dedup video.mp4 --method=phash"
|
|
113
|
+
},
|
|
114
|
+
{
|
|
115
|
+
"desc": "帧图像合成视频",
|
|
116
|
+
"cmd": "maque frames-to-video ./frames --fps=30"
|
|
117
|
+
},
|
|
118
|
+
{
|
|
119
|
+
"desc": "一步完成去重和合成",
|
|
120
|
+
"cmd": "maque dedup-and-create-video input.mp4 --video_fps=15"
|
|
121
|
+
}
|
|
122
|
+
]
|
|
123
|
+
}
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
if command:
|
|
127
|
+
if command in examples_data:
|
|
128
|
+
self._show_command_examples(command, examples_data[command])
|
|
129
|
+
else:
|
|
130
|
+
print(f"[red]未找到命令 '{command}' 的示例[/red]")
|
|
131
|
+
self._list_available_commands(examples_data)
|
|
132
|
+
else:
|
|
133
|
+
# 显示所有示例
|
|
134
|
+
print("[bold blue]Sparrow 命令示例[/bold blue]\n")
|
|
135
|
+
for cmd, data in examples_data.items():
|
|
136
|
+
self._show_command_examples(cmd, data, compact=True)
|
|
137
|
+
|
|
138
|
+
def _show_command_examples(self, command, data, compact=False):
|
|
139
|
+
"""显示单个命令的示例"""
|
|
140
|
+
if compact:
|
|
141
|
+
print(f"[bold cyan]{command}[/bold cyan] - {data['description']}")
|
|
142
|
+
for example in data['examples'][:2]: # 只显示前2个示例
|
|
143
|
+
print(f" [green]${example['cmd']}[/green]")
|
|
144
|
+
if not compact:
|
|
145
|
+
print(f" {example['desc']}")
|
|
146
|
+
if len(data['examples']) > 2:
|
|
147
|
+
print(f" ... 更多示例请使用: maque help examples {command}")
|
|
148
|
+
print()
|
|
149
|
+
else:
|
|
150
|
+
print(f"[bold blue]{command} 命令示例[/bold blue]")
|
|
151
|
+
print(f"{data['description']}\n")
|
|
152
|
+
|
|
153
|
+
for i, example in enumerate(data['examples'], 1):
|
|
154
|
+
print(f"[bold]{i}. {example['desc']}[/bold]")
|
|
155
|
+
print(f" [green]$ {example['cmd']}[/green]\n")
|
|
156
|
+
|
|
157
|
+
def _list_available_commands(self, examples_data):
|
|
158
|
+
"""列出可用的命令"""
|
|
159
|
+
print("\n[bold]可用命令:[/bold]")
|
|
160
|
+
for cmd in examples_data.keys():
|
|
161
|
+
print(f" [cyan]{cmd}[/cyan]")
|
|
162
|
+
|
|
163
|
+
def cheatsheet(self):
|
|
164
|
+
"""显示快速参考表"""
|
|
165
|
+
print(Panel.fit(
|
|
166
|
+
"""[bold blue]Sparrow 快速参考[/bold blue]
|
|
167
|
+
|
|
168
|
+
[bold cyan]配置管理[/bold cyan]
|
|
169
|
+
maque config show # 查看配置
|
|
170
|
+
maque config set key value # 设置配置
|
|
171
|
+
maque config edit # 编辑配置
|
|
172
|
+
|
|
173
|
+
[bold cyan]环境诊断[/bold cyan]
|
|
174
|
+
maque doctor check # 环境检查
|
|
175
|
+
maque doctor fix # 自动修复
|
|
176
|
+
|
|
177
|
+
[bold cyan]多模态AI[/bold cyan]
|
|
178
|
+
maque mllm chat # 交互聊天
|
|
179
|
+
maque mllm call-images ./imgs # 批量图像分析
|
|
180
|
+
maque mllm models # 列出模型
|
|
181
|
+
|
|
182
|
+
[bold cyan]数据处理[/bold cyan]
|
|
183
|
+
maque data table-viewer file # 表格查看器
|
|
184
|
+
maque data convert a.xlsx b.csv # 格式转换
|
|
185
|
+
|
|
186
|
+
[bold cyan]视频处理[/bold cyan]
|
|
187
|
+
maque video-dedup video.mp4 # 视频去重
|
|
188
|
+
maque frames-to-video ./frames # 合成视频
|
|
189
|
+
|
|
190
|
+
[bold cyan]开发工具[/bold cyan]
|
|
191
|
+
maque create myproject # 创建项目
|
|
192
|
+
maque download model-name # 下载模型
|
|
193
|
+
|
|
194
|
+
获得更多帮助: maque help examples <command>""",
|
|
195
|
+
title="快速参考",
|
|
196
|
+
border_style="blue"
|
|
197
|
+
))
|
|
198
|
+
|
|
199
|
+
def topics(self, topic: str = None):
|
|
200
|
+
"""显示主题帮助
|
|
201
|
+
|
|
202
|
+
Args:
|
|
203
|
+
topic: 主题名称
|
|
204
|
+
"""
|
|
205
|
+
topics_data = {
|
|
206
|
+
"configuration": {
|
|
207
|
+
"title": "配置文件详解",
|
|
208
|
+
"content": """
|
|
209
|
+
[bold]配置文件位置(按优先级):[/bold]
|
|
210
|
+
1. 当前目录: ./maque_config.yaml
|
|
211
|
+
2. 项目根目录: <project>/maque_config.yaml
|
|
212
|
+
3. 用户目录: ~/.maque/config.yaml
|
|
213
|
+
|
|
214
|
+
[bold]配置文件结构:[/bold]
|
|
215
|
+
```yaml
|
|
216
|
+
# 多模态大语言模型配置
|
|
217
|
+
mllm:
|
|
218
|
+
model: "gpt-4o-mini"
|
|
219
|
+
base_url: "https://api.openai.com/v1"
|
|
220
|
+
api_key: "your-api-key"
|
|
221
|
+
|
|
222
|
+
# 其他设置...
|
|
223
|
+
```
|
|
224
|
+
|
|
225
|
+
[bold]常用配置命令:[/bold]
|
|
226
|
+
maque config show # 查看当前配置
|
|
227
|
+
maque config set mllm.model gpt-4o # 设置模型
|
|
228
|
+
maque config validate # 验证配置文件
|
|
229
|
+
"""
|
|
230
|
+
},
|
|
231
|
+
"models": {
|
|
232
|
+
"title": "模型支持说明",
|
|
233
|
+
"content": """
|
|
234
|
+
[bold]支持的模型类型:[/bold]
|
|
235
|
+
• OpenAI兼容API (GPT-4, GPT-4o, Claude等)
|
|
236
|
+
• Ollama本地模型 (Llama, Gemma等)
|
|
237
|
+
• vLLM服务器部署的模型
|
|
238
|
+
• 其他OpenAI API兼容服务
|
|
239
|
+
|
|
240
|
+
[bold]模型配置示例:[/bold]
|
|
241
|
+
# OpenAI官方
|
|
242
|
+
maque config set mllm.model gpt-4o-mini
|
|
243
|
+
maque config set mllm.base_url https://api.openai.com/v1
|
|
244
|
+
maque config set mllm.api_key your-openai-key
|
|
245
|
+
|
|
246
|
+
# Ollama本地
|
|
247
|
+
maque config set mllm.model llama3:latest
|
|
248
|
+
maque config set mllm.base_url http://localhost:11434/v1
|
|
249
|
+
maque config set mllm.api_key EMPTY
|
|
250
|
+
|
|
251
|
+
# 自定义服务
|
|
252
|
+
maque config set mllm.base_url http://your-server:8000/v1
|
|
253
|
+
|
|
254
|
+
[bold]查看可用模型:[/bold]
|
|
255
|
+
maque mllm models
|
|
256
|
+
"""
|
|
257
|
+
},
|
|
258
|
+
"troubleshooting": {
|
|
259
|
+
"title": "故障排除",
|
|
260
|
+
"content": """
|
|
261
|
+
[bold]常见问题解决:[/bold]
|
|
262
|
+
|
|
263
|
+
[bold cyan]1. 环境检查[/bold cyan]
|
|
264
|
+
maque doctor check --verbose # 详细诊断
|
|
265
|
+
maque doctor fix # 自动修复
|
|
266
|
+
|
|
267
|
+
[bold cyan]2. 依赖问题[/bold cyan]
|
|
268
|
+
pip install maque[dev] # 安装开发依赖
|
|
269
|
+
pip install maque[torch] # 安装深度学习依赖
|
|
270
|
+
pip install maque[video] # 安装视频处理依赖
|
|
271
|
+
|
|
272
|
+
[bold cyan]3. 配置问题[/bold cyan]
|
|
273
|
+
maque config validate # 验证配置
|
|
274
|
+
maque config reset # 重置为默认配置
|
|
275
|
+
|
|
276
|
+
[bold cyan]4. 模型连接问题[/bold cyan]
|
|
277
|
+
maque mllm models # 检查模型连接
|
|
278
|
+
curl http://localhost:11434 # 测试Ollama连接
|
|
279
|
+
|
|
280
|
+
[bold cyan]5. 权限问题[/bold cyan]
|
|
281
|
+
# Windows: 以管理员运行
|
|
282
|
+
# Linux/macOS: 检查文件权限
|
|
283
|
+
ls -la ~/.maque/config.yaml
|
|
284
|
+
|
|
285
|
+
获得更多帮助: https://github.com/your-repo/issues
|
|
286
|
+
"""
|
|
287
|
+
}
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
if topic:
|
|
291
|
+
if topic in topics_data:
|
|
292
|
+
topic_info = topics_data[topic]
|
|
293
|
+
print(Panel(
|
|
294
|
+
topic_info["content"].strip(),
|
|
295
|
+
title=topic_info["title"],
|
|
296
|
+
border_style="cyan"
|
|
297
|
+
))
|
|
298
|
+
else:
|
|
299
|
+
print(f"[red]未找到主题 '{topic}'[/red]")
|
|
300
|
+
self._list_available_topics(topics_data)
|
|
301
|
+
else:
|
|
302
|
+
print("[bold blue]可用帮助主题:[/bold blue]\n")
|
|
303
|
+
for topic_key, topic_info in topics_data.items():
|
|
304
|
+
print(f"[cyan]{topic_key:<15}[/cyan] {topic_info['title']}")
|
|
305
|
+
print(f"\n使用方法: maque help topics <topic>")
|
|
306
|
+
|
|
307
|
+
def _list_available_topics(self, topics_data):
|
|
308
|
+
"""列出可用主题"""
|
|
309
|
+
print("\n[bold]可用主题:[/bold]")
|
|
310
|
+
for topic in topics_data.keys():
|
|
311
|
+
print(f" [cyan]{topic}[/cyan]")
|
|
312
|
+
|
|
313
|
+
def commands(self, group: str = None):
|
|
314
|
+
"""列出所有命令或指定组的命令
|
|
315
|
+
|
|
316
|
+
Args:
|
|
317
|
+
group: 命令组名称
|
|
318
|
+
"""
|
|
319
|
+
# 获取所有可用命令
|
|
320
|
+
command_groups = {
|
|
321
|
+
"config": ["show", "edit", "validate", "set", "get", "reset"],
|
|
322
|
+
"mllm": ["call-table", "call-images", "chat", "models", "benchmark"],
|
|
323
|
+
"doctor": ["check", "fix", "version"],
|
|
324
|
+
"data": ["table-viewer", "convert", "stats", "validate"],
|
|
325
|
+
"service": ["list", "status", "start", "stop", "logs"],
|
|
326
|
+
"video": ["dedup", "frames-to-video", "dedup-and-create-video"],
|
|
327
|
+
"legacy": ["传统单层命令", "如 download, create, split 等"]
|
|
328
|
+
}
|
|
329
|
+
|
|
330
|
+
if group:
|
|
331
|
+
if group in command_groups:
|
|
332
|
+
print(f"[bold blue]{group} 命令组[/bold blue]\n")
|
|
333
|
+
if group == "legacy":
|
|
334
|
+
# 显示传统命令说明
|
|
335
|
+
print("[yellow]注意: 以下为传统单层命令,建议使用新的分组命令[/yellow]\n")
|
|
336
|
+
legacy_commands = [
|
|
337
|
+
"download", "create", "split", "merge", "pack", "unpack",
|
|
338
|
+
"clone", "gen-key", "send", "recv", "kill", "auto-commit"
|
|
339
|
+
]
|
|
340
|
+
for cmd in legacy_commands:
|
|
341
|
+
print(f" [dim cyan]{cmd}[/dim cyan]")
|
|
342
|
+
else:
|
|
343
|
+
for cmd in command_groups[group]:
|
|
344
|
+
print(f" [cyan]maque {group} {cmd}[/cyan]")
|
|
345
|
+
|
|
346
|
+
print(f"\n获取详细帮助: maque help examples {group}")
|
|
347
|
+
else:
|
|
348
|
+
print(f"[red]未找到命令组 '{group}'[/red]")
|
|
349
|
+
self._list_command_groups(command_groups)
|
|
350
|
+
else:
|
|
351
|
+
print("[bold blue]Sparrow 命令列表[/bold blue]\n")
|
|
352
|
+
|
|
353
|
+
for group_name, commands in command_groups.items():
|
|
354
|
+
if group_name == "legacy":
|
|
355
|
+
continue
|
|
356
|
+
|
|
357
|
+
print(f"[bold cyan]{group_name}[/bold cyan]")
|
|
358
|
+
for cmd in commands[:3]: # 只显示前3个命令
|
|
359
|
+
print(f" maque {group_name} {cmd}")
|
|
360
|
+
if len(commands) > 3:
|
|
361
|
+
print(f" ... 共 {len(commands)} 个命令")
|
|
362
|
+
print()
|
|
363
|
+
|
|
364
|
+
print("[dim]使用 'maque help commands <group>' 查看特定组的所有命令[/dim]")
|
|
365
|
+
|
|
366
|
+
def _list_command_groups(self, command_groups):
|
|
367
|
+
"""列出可用的命令组"""
|
|
368
|
+
print("\n[bold]可用命令组:[/bold]")
|
|
369
|
+
for group in command_groups.keys():
|
|
370
|
+
if group != "legacy":
|
|
371
|
+
print(f" [cyan]{group}[/cyan]")
|
|
372
|
+
|
|
373
|
+
def getting_started(self):
|
|
374
|
+
"""显示入门指南"""
|
|
375
|
+
guide = """[bold blue]Sparrow 快速入门[/bold blue]
|
|
376
|
+
|
|
377
|
+
[bold]1. 环境检查[/bold]
|
|
378
|
+
首先检查你的环境是否配置正确:
|
|
379
|
+
[green]$ maque doctor check[/green]
|
|
380
|
+
|
|
381
|
+
[bold]2. 初始化配置[/bold]
|
|
382
|
+
创建配置文件并设置你的偏好:
|
|
383
|
+
[green]$ maque config show[/green]
|
|
384
|
+
[green]$ maque config set mllm.model your-preferred-model[/green]
|
|
385
|
+
|
|
386
|
+
[bold]3. 尝试核心功能[/bold]
|
|
387
|
+
• 多模态AI分析:
|
|
388
|
+
[green]$ maque mllm chat "Hello!"[/green]
|
|
389
|
+
|
|
390
|
+
• 数据处理:
|
|
391
|
+
[green]$ maque data table-viewer your-data.xlsx[/green]
|
|
392
|
+
|
|
393
|
+
• 视频处理:
|
|
394
|
+
[green]$ maque video-dedup your-video.mp4[/green]
|
|
395
|
+
|
|
396
|
+
[bold]4. 探索更多功能[/bold]
|
|
397
|
+
查看所有可用命令:
|
|
398
|
+
[green]$ maque help commands[/green]
|
|
399
|
+
|
|
400
|
+
查看具体示例:
|
|
401
|
+
[green]$ maque help examples mllm[/green]
|
|
402
|
+
|
|
403
|
+
[bold]5. 获得帮助[/bold]
|
|
404
|
+
• 快速参考: [green]maque help cheatsheet[/green]
|
|
405
|
+
• 主题帮助: [green]maque help topics configuration[/green]
|
|
406
|
+
• 故障排除: [green]maque help topics troubleshooting[/green]
|
|
407
|
+
|
|
408
|
+
准备好开始了吗?试试: [green]maque doctor check[/green]
|
|
409
|
+
"""
|
|
410
|
+
print(Panel(guide.strip(), title="快速入门指南", border_style="green"))
|
maque/cli/groups/llm.py
ADDED
|
@@ -0,0 +1,223 @@
|
|
|
1
|
+
"""LLM 服务命令组"""
|
|
2
|
+
from rich import print
|
|
3
|
+
from rich.console import Console
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class LlmGroup:
|
|
7
|
+
"""LLM 服务命令组
|
|
8
|
+
|
|
9
|
+
提供 LLM/MLLM 模型服务的启动、管理功能,
|
|
10
|
+
支持纯文本 LLM 和多模态 VL 模型。
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
def __init__(self, cli_instance):
|
|
14
|
+
self.cli = cli_instance
|
|
15
|
+
self.console = Console()
|
|
16
|
+
|
|
17
|
+
def serve(
|
|
18
|
+
self,
|
|
19
|
+
model: str,
|
|
20
|
+
host: str = "0.0.0.0",
|
|
21
|
+
port: int = 8000,
|
|
22
|
+
device: str = None,
|
|
23
|
+
local_dir: str = None,
|
|
24
|
+
dtype: str = None,
|
|
25
|
+
attn: str = None,
|
|
26
|
+
model_class: str = None,
|
|
27
|
+
processor_class: str = None,
|
|
28
|
+
vision_processor: str = None,
|
|
29
|
+
enable_thinking: bool = False,
|
|
30
|
+
):
|
|
31
|
+
"""启动 LLM/MLLM API 服务
|
|
32
|
+
|
|
33
|
+
启动兼容 OpenAI 的 Chat Completions API 服务,
|
|
34
|
+
自动检测多模态 VL 模型并启用图片处理。
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
model: 模型名称或路径 (如 Qwen/Qwen2.5-7B-Instruct)
|
|
38
|
+
host: 监听地址,默认 0.0.0.0
|
|
39
|
+
port: 监听端口,默认 8000
|
|
40
|
+
device: 设备类型 (cuda/cpu),默认自动检测
|
|
41
|
+
local_dir: 本地模型目录
|
|
42
|
+
dtype: 数据类型 (float16/bfloat16/float32),默认自动选择
|
|
43
|
+
attn: 注意力实现 (eager/sdpa/flash_attention_2),默认自动选择
|
|
44
|
+
model_class: 模型类名 (如 HunYuanVLForConditionalGeneration)
|
|
45
|
+
processor_class: 处理器类名 (如 AutoProcessor)
|
|
46
|
+
vision_processor: 视觉处理器类型 (qwen_vl/general)
|
|
47
|
+
enable_thinking: 启用 Qwen3 的 thinking 模式
|
|
48
|
+
|
|
49
|
+
Examples:
|
|
50
|
+
# 标准 LLM
|
|
51
|
+
maque llm serve Qwen/Qwen2.5-7B-Instruct
|
|
52
|
+
|
|
53
|
+
# 多模态 VL 模型
|
|
54
|
+
maque llm serve Qwen/Qwen2.5-VL-3B-Instruct --port=8001
|
|
55
|
+
|
|
56
|
+
# 自定义精度和注意力
|
|
57
|
+
maque llm serve model --dtype=float32 --attn=eager
|
|
58
|
+
|
|
59
|
+
# HunyuanOCR (自定义模型类)
|
|
60
|
+
maque llm serve tencent/HunyuanOCR --model_class=HunYuanVLForConditionalGeneration --vision_processor=general
|
|
61
|
+
|
|
62
|
+
# Qwen3 带 thinking 模式
|
|
63
|
+
maque llm serve Qwen/Qwen3-0.6B --enable_thinking
|
|
64
|
+
"""
|
|
65
|
+
try:
|
|
66
|
+
from maque.llm.server import create_server
|
|
67
|
+
except ImportError as e:
|
|
68
|
+
print(f"[red]无法导入 LLM 服务模块: {e}[/red]")
|
|
69
|
+
print("请确保已安装依赖: pip install transformers torch fastapi uvicorn")
|
|
70
|
+
return
|
|
71
|
+
|
|
72
|
+
print(f"[bold blue]启动 LLM 服务[/bold blue]")
|
|
73
|
+
print(f" 模型: [cyan]{model}[/cyan]")
|
|
74
|
+
print(f" 地址: [green]http://{host}:{port}[/green]")
|
|
75
|
+
print(f" 设备: [yellow]{device or 'auto'}[/yellow]")
|
|
76
|
+
print(f" 精度: [yellow]{dtype or 'auto'}[/yellow]")
|
|
77
|
+
print(f" 注意力: [yellow]{attn or 'auto'}[/yellow]")
|
|
78
|
+
if local_dir:
|
|
79
|
+
print(f" 本地目录: [magenta]{local_dir}[/magenta]")
|
|
80
|
+
if model_class:
|
|
81
|
+
print(f" 模型类: [cyan]{model_class}[/cyan]")
|
|
82
|
+
if processor_class:
|
|
83
|
+
print(f" 处理器类: [cyan]{processor_class}[/cyan]")
|
|
84
|
+
if vision_processor:
|
|
85
|
+
print(f" 视觉处理器: [cyan]{vision_processor}[/cyan]")
|
|
86
|
+
if enable_thinking:
|
|
87
|
+
print(f" Thinking 模式: [green]启用[/green]")
|
|
88
|
+
print()
|
|
89
|
+
|
|
90
|
+
# 构建 chat_template_kwargs
|
|
91
|
+
chat_template_kwargs = {}
|
|
92
|
+
if enable_thinking:
|
|
93
|
+
chat_template_kwargs["enable_thinking"] = True
|
|
94
|
+
|
|
95
|
+
server = create_server(
|
|
96
|
+
model=model,
|
|
97
|
+
device=device,
|
|
98
|
+
local_dir=local_dir,
|
|
99
|
+
dtype=dtype,
|
|
100
|
+
attn=attn,
|
|
101
|
+
model_class=model_class,
|
|
102
|
+
processor_class=processor_class,
|
|
103
|
+
vision_processor=vision_processor,
|
|
104
|
+
chat_template_kwargs=chat_template_kwargs if chat_template_kwargs else None,
|
|
105
|
+
)
|
|
106
|
+
server.run(host=host, port=port)
|
|
107
|
+
|
|
108
|
+
def test(
|
|
109
|
+
self,
|
|
110
|
+
url: str = "http://localhost:8000",
|
|
111
|
+
model: str = None,
|
|
112
|
+
prompt: str = "你好,请介绍一下你自己。",
|
|
113
|
+
stream: bool = False,
|
|
114
|
+
):
|
|
115
|
+
"""测试 LLM 服务
|
|
116
|
+
|
|
117
|
+
Args:
|
|
118
|
+
url: 服务 URL
|
|
119
|
+
model: 模型名称 (可选,不指定则自动获取)
|
|
120
|
+
prompt: 测试提示词
|
|
121
|
+
stream: 是否使用流式输出
|
|
122
|
+
|
|
123
|
+
Examples:
|
|
124
|
+
maque llm test
|
|
125
|
+
maque llm test --stream
|
|
126
|
+
maque llm test --url=http://localhost:8000
|
|
127
|
+
"""
|
|
128
|
+
import requests
|
|
129
|
+
|
|
130
|
+
# 获取模型名称
|
|
131
|
+
if not model:
|
|
132
|
+
try:
|
|
133
|
+
resp = requests.get(f"{url.rstrip('/')}/v1/models", timeout=5)
|
|
134
|
+
data = resp.json()
|
|
135
|
+
if data.get("data"):
|
|
136
|
+
model = data["data"][0]["id"]
|
|
137
|
+
except Exception:
|
|
138
|
+
pass
|
|
139
|
+
|
|
140
|
+
if not model:
|
|
141
|
+
print("[red]无法获取模型名称,请使用 --model 指定[/red]")
|
|
142
|
+
return
|
|
143
|
+
|
|
144
|
+
endpoint = f"{url.rstrip('/')}/v1/chat/completions"
|
|
145
|
+
|
|
146
|
+
payload = {
|
|
147
|
+
"model": model,
|
|
148
|
+
"messages": [{"role": "user", "content": prompt}],
|
|
149
|
+
"stream": stream,
|
|
150
|
+
"max_tokens": 256,
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
print(f"[blue]测试 LLM 服务[/blue]")
|
|
154
|
+
print(f" URL: {endpoint}")
|
|
155
|
+
print(f" 模型: {model}")
|
|
156
|
+
print(f" 提示: {prompt[:50]}{'...' if len(prompt) > 50 else ''}")
|
|
157
|
+
print(f" 流式: {stream}")
|
|
158
|
+
print()
|
|
159
|
+
|
|
160
|
+
try:
|
|
161
|
+
if stream:
|
|
162
|
+
self._test_stream(endpoint, payload)
|
|
163
|
+
else:
|
|
164
|
+
self._test_normal(endpoint, payload)
|
|
165
|
+
|
|
166
|
+
except requests.exceptions.ConnectionError:
|
|
167
|
+
print(f"[red]无法连接到服务: {endpoint}[/red]")
|
|
168
|
+
print("请确保服务已启动")
|
|
169
|
+
except Exception as e:
|
|
170
|
+
print(f"[red]测试失败: {e}[/red]")
|
|
171
|
+
|
|
172
|
+
def _test_normal(self, endpoint: str, payload: dict):
|
|
173
|
+
"""普通请求测试"""
|
|
174
|
+
import requests
|
|
175
|
+
|
|
176
|
+
response = requests.post(
|
|
177
|
+
endpoint,
|
|
178
|
+
json=payload,
|
|
179
|
+
headers={"Content-Type": "application/json"},
|
|
180
|
+
timeout=120,
|
|
181
|
+
)
|
|
182
|
+
response.raise_for_status()
|
|
183
|
+
data = response.json()
|
|
184
|
+
|
|
185
|
+
if "choices" in data and len(data["choices"]) > 0:
|
|
186
|
+
content = data["choices"][0]["message"]["content"]
|
|
187
|
+
print(f"[green]响应:[/green]")
|
|
188
|
+
print(content)
|
|
189
|
+
print()
|
|
190
|
+
print(f"[dim]Token 使用: {data.get('usage', {})}[/dim]")
|
|
191
|
+
else:
|
|
192
|
+
print(f"[yellow]响应异常: {data}[/yellow]")
|
|
193
|
+
|
|
194
|
+
def _test_stream(self, endpoint: str, payload: dict):
|
|
195
|
+
"""流式请求测试"""
|
|
196
|
+
import requests
|
|
197
|
+
|
|
198
|
+
print("[green]响应:[/green]", end=" ")
|
|
199
|
+
|
|
200
|
+
with requests.post(
|
|
201
|
+
endpoint,
|
|
202
|
+
json=payload,
|
|
203
|
+
headers={"Content-Type": "application/json"},
|
|
204
|
+
stream=True,
|
|
205
|
+
timeout=120,
|
|
206
|
+
) as response:
|
|
207
|
+
response.raise_for_status()
|
|
208
|
+
for line in response.iter_lines():
|
|
209
|
+
if line:
|
|
210
|
+
line = line.decode("utf-8")
|
|
211
|
+
if line.startswith("data: "):
|
|
212
|
+
data = line[6:]
|
|
213
|
+
if data == "[DONE]":
|
|
214
|
+
break
|
|
215
|
+
try:
|
|
216
|
+
import json
|
|
217
|
+
chunk = json.loads(data)
|
|
218
|
+
if chunk["choices"][0]["delta"].get("content"):
|
|
219
|
+
print(chunk["choices"][0]["delta"]["content"], end="", flush=True)
|
|
220
|
+
except Exception:
|
|
221
|
+
pass
|
|
222
|
+
|
|
223
|
+
print("\n")
|