auto-coder 0.1.347__py3-none-any.whl → 0.1.349__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of auto-coder might be problematic. Click here for more details.
- {auto_coder-0.1.347.dist-info → auto_coder-0.1.349.dist-info}/METADATA +1 -1
- {auto_coder-0.1.347.dist-info → auto_coder-0.1.349.dist-info}/RECORD +37 -27
- autocoder/auto_coder_runner.py +19 -14
- autocoder/chat_auto_coder_lang.py +5 -3
- autocoder/common/auto_coder_lang.py +3 -3
- autocoder/common/model_speed_tester.py +392 -0
- autocoder/common/printer.py +7 -8
- autocoder/common/run_cmd.py +247 -0
- autocoder/common/test_run_cmd.py +110 -0
- autocoder/common/v2/agent/agentic_edit.py +82 -29
- autocoder/common/v2/agent/agentic_edit_conversation.py +9 -0
- autocoder/common/v2/agent/agentic_edit_tools/execute_command_tool_resolver.py +21 -36
- autocoder/common/v2/agent/agentic_edit_tools/list_files_tool_resolver.py +4 -7
- autocoder/common/v2/agent/agentic_edit_tools/search_files_tool_resolver.py +2 -5
- autocoder/helper/rag_doc_creator.py +141 -0
- autocoder/ignorefiles/__init__.py +4 -0
- autocoder/ignorefiles/ignore_file_utils.py +63 -0
- autocoder/ignorefiles/test_ignore_file_utils.py +91 -0
- autocoder/models.py +49 -9
- autocoder/plugins/__init__.py +20 -0
- autocoder/rag/cache/byzer_storage_cache.py +10 -4
- autocoder/rag/cache/file_monitor_cache.py +27 -24
- autocoder/rag/cache/local_byzer_storage_cache.py +11 -5
- autocoder/rag/cache/local_duckdb_storage_cache.py +203 -128
- autocoder/rag/cache/simple_cache.py +56 -37
- autocoder/rag/loaders/filter_utils.py +106 -0
- autocoder/rag/loaders/image_loader.py +573 -0
- autocoder/rag/loaders/pdf_loader.py +3 -3
- autocoder/rag/loaders/test_image_loader.py +209 -0
- autocoder/rag/qa_conversation_strategy.py +3 -5
- autocoder/rag/utils.py +20 -9
- autocoder/utils/_markitdown.py +35 -0
- autocoder/version.py +1 -1
- {auto_coder-0.1.347.dist-info → auto_coder-0.1.349.dist-info}/LICENSE +0 -0
- {auto_coder-0.1.347.dist-info → auto_coder-0.1.349.dist-info}/WHEEL +0 -0
- {auto_coder-0.1.347.dist-info → auto_coder-0.1.349.dist-info}/entry_points.txt +0 -0
- {auto_coder-0.1.347.dist-info → auto_coder-0.1.349.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,392 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import byzerllm
|
|
3
|
+
from typing import Dict, Any, List, Optional
|
|
4
|
+
from rich.console import Console
|
|
5
|
+
from rich.table import Table
|
|
6
|
+
from rich.panel import Panel
|
|
7
|
+
from autocoder.common.printer import Printer
|
|
8
|
+
from autocoder import models as models_module
|
|
9
|
+
from autocoder.utils.llms import get_single_llm
|
|
10
|
+
import byzerllm
|
|
11
|
+
import pkg_resources
|
|
12
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
13
|
+
from typing import Dict, List, Tuple
|
|
14
|
+
from pydantic import BaseModel
|
|
15
|
+
|
|
16
|
+
class ModelSpeedTestResult(BaseModel):
|
|
17
|
+
model_name: str
|
|
18
|
+
tokens_per_second: float
|
|
19
|
+
first_token_time: float
|
|
20
|
+
input_tokens_count: float
|
|
21
|
+
generated_tokens_count: float
|
|
22
|
+
input_tokens_cost: float
|
|
23
|
+
generated_tokens_cost: float
|
|
24
|
+
status: str
|
|
25
|
+
error: Optional[str] = None
|
|
26
|
+
|
|
27
|
+
class SpeedTestResults(BaseModel):
|
|
28
|
+
results: List[ModelSpeedTestResult]
|
|
29
|
+
|
|
30
|
+
byzerllm_content = ""
|
|
31
|
+
try:
|
|
32
|
+
byzerllm_conten_path = pkg_resources.resource_filename(
|
|
33
|
+
"autocoder", "data/byzerllm.md"
|
|
34
|
+
)
|
|
35
|
+
with open(byzerllm_conten_path, "r",encoding="utf-8") as f:
|
|
36
|
+
byzerllm_content = f.read()
|
|
37
|
+
except FileNotFoundError:
|
|
38
|
+
pass
|
|
39
|
+
|
|
40
|
+
@byzerllm.prompt()
|
|
41
|
+
def long_context_prompt() -> str:
|
|
42
|
+
'''
|
|
43
|
+
下面是我们提供的一份文档:
|
|
44
|
+
<document>
|
|
45
|
+
{{ content }}
|
|
46
|
+
</document>
|
|
47
|
+
|
|
48
|
+
请根据上述文档,实现用户的需求:
|
|
49
|
+
|
|
50
|
+
<query>
|
|
51
|
+
我想开发一个翻译程序,使用prompt 函数实现。
|
|
52
|
+
</query>
|
|
53
|
+
'''
|
|
54
|
+
return {
|
|
55
|
+
"content": byzerllm_content
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
@byzerllm.prompt()
|
|
59
|
+
def short_context_prompt() -> str:
|
|
60
|
+
'''
|
|
61
|
+
Hello, can you help me test the response speed?
|
|
62
|
+
'''
|
|
63
|
+
return {}
|
|
64
|
+
|
|
65
|
+
def test_model_speed(model_name: str,
|
|
66
|
+
product_mode: str,
|
|
67
|
+
test_rounds: int = 3,
|
|
68
|
+
enable_long_context: bool = False
|
|
69
|
+
) -> Dict[str, Any]:
|
|
70
|
+
from autocoder.models import get_model_by_name
|
|
71
|
+
"""
|
|
72
|
+
测试单个模型的速度
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
model_name: 模型名称
|
|
76
|
+
product_mode: 产品模式 (lite/pro)
|
|
77
|
+
test_rounds: 测试轮数
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
Dict包含测试结果:
|
|
81
|
+
- avg_time: 平均响应时间
|
|
82
|
+
- min_time: 最小响应时间
|
|
83
|
+
- max_time: 最大响应时间
|
|
84
|
+
- first_token_time: 首token时间
|
|
85
|
+
- success: 是否测试成功
|
|
86
|
+
- error: 错误信息(如果有)
|
|
87
|
+
"""
|
|
88
|
+
try:
|
|
89
|
+
llm = get_single_llm(model_name, product_mode)
|
|
90
|
+
model_info = get_model_by_name(model_name)
|
|
91
|
+
|
|
92
|
+
times = []
|
|
93
|
+
first_token_times = []
|
|
94
|
+
tokens_per_seconds = []
|
|
95
|
+
input_tokens_counts = []
|
|
96
|
+
generated_tokens_counts = []
|
|
97
|
+
|
|
98
|
+
input_tokens_costs = []
|
|
99
|
+
generated_tokens_costs = []
|
|
100
|
+
|
|
101
|
+
input_tokens_cost_per_m = model_info.get("input_price", 0.0) / 1000000
|
|
102
|
+
output_tokens_cost_per_m = model_info.get("output_price", 0.0) / 1000000
|
|
103
|
+
|
|
104
|
+
test_query = short_context_prompt.prompt()
|
|
105
|
+
if enable_long_context:
|
|
106
|
+
test_query = long_context_prompt.prompt()
|
|
107
|
+
|
|
108
|
+
content = ""
|
|
109
|
+
for _ in range(test_rounds):
|
|
110
|
+
start_time = time.time()
|
|
111
|
+
first_token_received = False
|
|
112
|
+
first_token_time = None
|
|
113
|
+
last_meta = None
|
|
114
|
+
input_tokens_count = 0
|
|
115
|
+
generated_tokens_count = 0
|
|
116
|
+
input_tokens_cost = 0
|
|
117
|
+
generated_tokens_cost = 0
|
|
118
|
+
for chunk,meta in llm.stream_chat_oai(conversations=[{
|
|
119
|
+
"role": "user",
|
|
120
|
+
"content": test_query
|
|
121
|
+
}],delta_mode=True):
|
|
122
|
+
content += chunk
|
|
123
|
+
last_meta = meta
|
|
124
|
+
current_time = time.time()
|
|
125
|
+
if not first_token_received:
|
|
126
|
+
first_token_time = current_time - start_time
|
|
127
|
+
first_token_received = True
|
|
128
|
+
first_token_times.append(first_token_time)
|
|
129
|
+
|
|
130
|
+
end_time = time.time()
|
|
131
|
+
generated_tokens_count = 0
|
|
132
|
+
if last_meta:
|
|
133
|
+
generated_tokens_count = last_meta.generated_tokens_count
|
|
134
|
+
input_tokens_count = last_meta.input_tokens_count
|
|
135
|
+
input_tokens_cost = input_tokens_count * input_tokens_cost_per_m
|
|
136
|
+
generated_tokens_cost = generated_tokens_count * output_tokens_cost_per_m
|
|
137
|
+
|
|
138
|
+
input_tokens_costs.append(input_tokens_cost)
|
|
139
|
+
generated_tokens_costs.append(generated_tokens_cost)
|
|
140
|
+
generated_tokens_counts.append(generated_tokens_count)
|
|
141
|
+
input_tokens_counts.append(input_tokens_count)
|
|
142
|
+
|
|
143
|
+
tokens_per_seconds.append(generated_tokens_count / (end_time - start_time))
|
|
144
|
+
times.append(end_time - start_time)
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
avg_time = sum(times) / len(times)
|
|
148
|
+
return {
|
|
149
|
+
"tokens_per_second": sum(tokens_per_seconds) / len(tokens_per_seconds),
|
|
150
|
+
"avg_time": avg_time,
|
|
151
|
+
"min_time": min(times),
|
|
152
|
+
"max_time": max(times),
|
|
153
|
+
"first_token_time": sum(first_token_times) / len(first_token_times),
|
|
154
|
+
"input_tokens_count": sum(input_tokens_counts) / len(input_tokens_counts),
|
|
155
|
+
"generated_tokens_count": sum(generated_tokens_counts) / len(generated_tokens_counts),
|
|
156
|
+
"success": True,
|
|
157
|
+
"error": None,
|
|
158
|
+
"input_tokens_cost": sum(input_tokens_costs) / len(input_tokens_costs),
|
|
159
|
+
"generated_tokens_cost": sum(generated_tokens_costs) / len(generated_tokens_costs)
|
|
160
|
+
}
|
|
161
|
+
except Exception as e:
|
|
162
|
+
return {
|
|
163
|
+
"tokens_per_second": 0,
|
|
164
|
+
"avg_time": 0,
|
|
165
|
+
"min_time": 0,
|
|
166
|
+
"max_time": 0,
|
|
167
|
+
"first_token_time": 0,
|
|
168
|
+
"input_tokens_count": 0,
|
|
169
|
+
"generated_tokens_count": 0,
|
|
170
|
+
"success": False,
|
|
171
|
+
"error": str(e),
|
|
172
|
+
"input_tokens_cost": 0.0,
|
|
173
|
+
"generated_tokens_cost": 0.0
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
def test_model_speed_wrapper(args: Tuple[str, str, int, bool]) -> Tuple[str, Dict[str, Any]]:
|
|
177
|
+
"""
|
|
178
|
+
包装测试函数以适应线程池调用
|
|
179
|
+
|
|
180
|
+
Args:
|
|
181
|
+
args: (model_name, product_mode, test_rounds)的元组
|
|
182
|
+
|
|
183
|
+
Returns:
|
|
184
|
+
(model_name, test_results)的元组
|
|
185
|
+
"""
|
|
186
|
+
model_name, product_mode, test_rounds,enable_long_context = args
|
|
187
|
+
results = test_model_speed(model_name, product_mode, test_rounds,enable_long_context)
|
|
188
|
+
return (model_name, results)
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
def run_speed_test(product_mode: str, test_rounds: int = 3, max_workers: Optional[int] = None, enable_long_context: bool = False) -> SpeedTestResults:
|
|
192
|
+
"""
|
|
193
|
+
运行所有已激活模型的速度测试
|
|
194
|
+
|
|
195
|
+
Args:
|
|
196
|
+
product_mode: 产品模式 (lite/pro)
|
|
197
|
+
test_rounds: 每个模型测试的轮数
|
|
198
|
+
max_workers: 最大线程数,默认为None(ThreadPoolExecutor会自动设置)
|
|
199
|
+
enable_long_context: 是否启用长文本上下文测试
|
|
200
|
+
|
|
201
|
+
Returns:
|
|
202
|
+
SpeedTestResults: 包含所有模型测试结果的pydantic模型
|
|
203
|
+
"""
|
|
204
|
+
# 获取所有模型
|
|
205
|
+
models_data = models_module.load_models()
|
|
206
|
+
active_models = [m for m in models_data if "api_key" in m] if product_mode == "lite" else models_data
|
|
207
|
+
|
|
208
|
+
if not active_models:
|
|
209
|
+
return SpeedTestResults(results=[])
|
|
210
|
+
|
|
211
|
+
# 准备测试参数
|
|
212
|
+
test_args = [(model["name"], product_mode, test_rounds, enable_long_context) for model in active_models]
|
|
213
|
+
|
|
214
|
+
# 存储结果用于排序
|
|
215
|
+
results_list = []
|
|
216
|
+
|
|
217
|
+
# 使用线程池并发测试
|
|
218
|
+
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
|
219
|
+
# 提交所有测试任务并获取future对象
|
|
220
|
+
future_to_model = {executor.submit(test_model_speed_wrapper, args): args[0]
|
|
221
|
+
for args in test_args}
|
|
222
|
+
|
|
223
|
+
# 收集结果
|
|
224
|
+
for future in future_to_model:
|
|
225
|
+
model_name = future_to_model[future]
|
|
226
|
+
|
|
227
|
+
try:
|
|
228
|
+
_, results = future.result()
|
|
229
|
+
|
|
230
|
+
if results["success"]:
|
|
231
|
+
status = "✓"
|
|
232
|
+
results_list.append((
|
|
233
|
+
results['tokens_per_second'],
|
|
234
|
+
ModelSpeedTestResult(
|
|
235
|
+
model_name=model_name,
|
|
236
|
+
tokens_per_second=results['tokens_per_second'],
|
|
237
|
+
first_token_time=results['first_token_time'],
|
|
238
|
+
input_tokens_count=results['input_tokens_count'],
|
|
239
|
+
generated_tokens_count=results['generated_tokens_count'],
|
|
240
|
+
status=status,
|
|
241
|
+
input_tokens_cost=results['input_tokens_cost'],
|
|
242
|
+
generated_tokens_cost=results['generated_tokens_cost'],
|
|
243
|
+
)
|
|
244
|
+
))
|
|
245
|
+
try:
|
|
246
|
+
# 更新模型的平均速度
|
|
247
|
+
models_module.update_model_speed(model_name, results['tokens_per_second'])
|
|
248
|
+
except Exception:
|
|
249
|
+
pass
|
|
250
|
+
else:
|
|
251
|
+
results_list.append((
|
|
252
|
+
0,
|
|
253
|
+
ModelSpeedTestResult(
|
|
254
|
+
model_name=model_name,
|
|
255
|
+
tokens_per_second=0,
|
|
256
|
+
first_token_time=0,
|
|
257
|
+
input_tokens_count=0,
|
|
258
|
+
generated_tokens_count=0,
|
|
259
|
+
status=f"✗ {results['error']}",
|
|
260
|
+
error=results['error'],
|
|
261
|
+
input_tokens_cost=0.0,
|
|
262
|
+
generated_tokens_cost=0.0
|
|
263
|
+
)
|
|
264
|
+
))
|
|
265
|
+
except Exception as e:
|
|
266
|
+
results_list.append((
|
|
267
|
+
0,
|
|
268
|
+
ModelSpeedTestResult(
|
|
269
|
+
model_name=model_name,
|
|
270
|
+
tokens_per_second=0,
|
|
271
|
+
first_token_time=0,
|
|
272
|
+
input_tokens_count=0,
|
|
273
|
+
generated_tokens_count=0,
|
|
274
|
+
status=f"✗ {str(e)}",
|
|
275
|
+
error=str(e),
|
|
276
|
+
input_tokens_cost=0.0,
|
|
277
|
+
generated_tokens_cost=0.0
|
|
278
|
+
)
|
|
279
|
+
))
|
|
280
|
+
|
|
281
|
+
# 按速度排序
|
|
282
|
+
results_list.sort(key=lambda x: x[0], reverse=True)
|
|
283
|
+
|
|
284
|
+
return SpeedTestResults(results=[result[1] for result in results_list])
|
|
285
|
+
|
|
286
|
+
def render_speed_test_in_terminal(product_mode: str, test_rounds: int = 3, max_workers: Optional[int] = None,enable_long_context: bool = False) -> None:
|
|
287
|
+
"""
|
|
288
|
+
运行所有已激活模型的速度测试
|
|
289
|
+
|
|
290
|
+
Args:
|
|
291
|
+
product_mode: 产品模式 (lite/pro)
|
|
292
|
+
test_rounds: 每个模型测试的轮数
|
|
293
|
+
max_workers: 最大线程数,默认为None(ThreadPoolExecutor会自动设置)
|
|
294
|
+
"""
|
|
295
|
+
printer = Printer()
|
|
296
|
+
console = Console()
|
|
297
|
+
|
|
298
|
+
# 获取所有模型
|
|
299
|
+
models_data = models_module.load_models()
|
|
300
|
+
active_models = [m for m in models_data if "api_key" in m] if product_mode == "lite" else models_data
|
|
301
|
+
|
|
302
|
+
if not active_models:
|
|
303
|
+
printer.print_in_terminal("models_no_active", style="yellow")
|
|
304
|
+
return
|
|
305
|
+
|
|
306
|
+
# 创建结果表格
|
|
307
|
+
table = Table(
|
|
308
|
+
title=printer.get_message_from_key("models_speed_test_results"),
|
|
309
|
+
show_header=True,
|
|
310
|
+
header_style="bold magenta",
|
|
311
|
+
show_lines=True
|
|
312
|
+
)
|
|
313
|
+
|
|
314
|
+
table.add_column("Model", style="cyan", width=30)
|
|
315
|
+
table.add_column("Tokens/s", style="green", width=15)
|
|
316
|
+
table.add_column("First Token(s)", style="magenta", width=15)
|
|
317
|
+
table.add_column("Input Tokens", style="magenta", width=15)
|
|
318
|
+
table.add_column("Generated Tokens", style="magenta", width=15)
|
|
319
|
+
table.add_column("Input Tokens Cost", style="yellow", width=15)
|
|
320
|
+
table.add_column("Generated Tokens Cost", style="yellow", width=15)
|
|
321
|
+
table.add_column("Status", style="red", width=20)
|
|
322
|
+
|
|
323
|
+
# 准备测试参数
|
|
324
|
+
test_args = [(model["name"], product_mode, test_rounds, enable_long_context) for model in active_models]
|
|
325
|
+
|
|
326
|
+
# 存储结果用于排序
|
|
327
|
+
results_list = []
|
|
328
|
+
|
|
329
|
+
# 使用线程池并发测试
|
|
330
|
+
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
|
331
|
+
printer.print_in_terminal("models_testing_start", style="yellow")
|
|
332
|
+
|
|
333
|
+
# 提交所有测试任务并获取future对象
|
|
334
|
+
future_to_model = {executor.submit(test_model_speed_wrapper, args): args[0]
|
|
335
|
+
for args in test_args}
|
|
336
|
+
|
|
337
|
+
# 收集结果
|
|
338
|
+
completed = 0
|
|
339
|
+
total = len(future_to_model)
|
|
340
|
+
for future in future_to_model:
|
|
341
|
+
completed += 1
|
|
342
|
+
printer.print_in_terminal("models_testing_progress", style="yellow", completed=completed, total=total)
|
|
343
|
+
model_name = future_to_model[future]
|
|
344
|
+
printer.print_in_terminal("models_testing", style="yellow", name=model_name)
|
|
345
|
+
|
|
346
|
+
try:
|
|
347
|
+
_, results = future.result()
|
|
348
|
+
|
|
349
|
+
if results["success"]:
|
|
350
|
+
status = "✓"
|
|
351
|
+
results['status'] = status
|
|
352
|
+
results_list.append((
|
|
353
|
+
results['tokens_per_second'],
|
|
354
|
+
model_name,
|
|
355
|
+
results
|
|
356
|
+
))
|
|
357
|
+
try:
|
|
358
|
+
# 更新模型的平均速度
|
|
359
|
+
models_module.update_model_speed(model_name, results['tokens_per_second'])
|
|
360
|
+
except Exception as e:
|
|
361
|
+
pass
|
|
362
|
+
else:
|
|
363
|
+
status = f"✗ ({results['error']})"
|
|
364
|
+
results_list.append((
|
|
365
|
+
0,
|
|
366
|
+
model_name,
|
|
367
|
+
{"tokens_per_second":0,"avg_time": 0, "input_tokens_count":0, "generated_tokens_count":0, "min_time": 0, "max_time": 0, "first_token_time": 0, "input_tokens_cost": 0.0, "generated_tokens_cost": 0.0, "status": status}
|
|
368
|
+
))
|
|
369
|
+
except Exception as e:
|
|
370
|
+
results_list.append((
|
|
371
|
+
0,
|
|
372
|
+
model_name,
|
|
373
|
+
{"tokens_per_second":0,"avg_time": 0, "input_tokens_count":0, "generated_tokens_count":0, "min_time": 0, "max_time": 0, "first_token_time": 0, "input_tokens_cost": 0.0, "generated_tokens_cost": 0.0, "status": f"✗ ({str(e)})"}
|
|
374
|
+
))
|
|
375
|
+
|
|
376
|
+
# 按速度排序
|
|
377
|
+
results_list.sort(key=lambda x: x[0], reverse=True)
|
|
378
|
+
|
|
379
|
+
# 添加排序后的结果到表格
|
|
380
|
+
for tokens_per_second, model_name, results in results_list:
|
|
381
|
+
table.add_row(
|
|
382
|
+
model_name,
|
|
383
|
+
f"{tokens_per_second:.2f}",
|
|
384
|
+
f"{results['first_token_time']:.2f}",
|
|
385
|
+
f"{results['input_tokens_count']}",
|
|
386
|
+
f"{results['generated_tokens_count']}",
|
|
387
|
+
f"{results['input_tokens_cost']:.4f}",
|
|
388
|
+
f"{results['generated_tokens_cost']:.4f}",
|
|
389
|
+
results['status']
|
|
390
|
+
)
|
|
391
|
+
|
|
392
|
+
console.print(Panel(table, border_style="blue"))
|
autocoder/common/printer.py
CHANGED
|
@@ -15,19 +15,18 @@ class Printer:
|
|
|
15
15
|
|
|
16
16
|
def get_message_from_key(self, msg_key: str):
|
|
17
17
|
try:
|
|
18
|
-
|
|
18
|
+
v = get_message(msg_key)
|
|
19
|
+
if not v:
|
|
20
|
+
return get_chat_message(msg_key)
|
|
19
21
|
except Exception as e:
|
|
20
22
|
return get_chat_message(msg_key)
|
|
21
23
|
|
|
22
|
-
def get_message_from_key_with_format(self, msg_key: str, **kwargs):
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
except Exception as e:
|
|
26
|
-
return format_str_jinja2(self.get_chat_message_from_key(msg_key), **kwargs)
|
|
27
|
-
|
|
24
|
+
def get_message_from_key_with_format(self, msg_key: str, **kwargs):
|
|
25
|
+
return format_str_jinja2(self.get_message_from_key(msg_key), **kwargs)
|
|
26
|
+
|
|
28
27
|
def print_in_terminal(self, msg_key: str, style: str = None,**kwargs):
|
|
29
28
|
try:
|
|
30
|
-
if style:
|
|
29
|
+
if style:
|
|
31
30
|
self.console.print(format_str_jinja2(self.get_message_from_key(msg_key),**kwargs), style=style)
|
|
32
31
|
else:
|
|
33
32
|
self.console.print(format_str_jinja2(self.get_message_from_key(msg_key),**kwargs))
|
|
@@ -0,0 +1,247 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import platform
|
|
3
|
+
import subprocess
|
|
4
|
+
import sys
|
|
5
|
+
from io import BytesIO
|
|
6
|
+
|
|
7
|
+
import pexpect
|
|
8
|
+
import psutil
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def run_cmd(command, verbose=False, error_print=None, cwd=None):
|
|
12
|
+
"""
|
|
13
|
+
执行一条命令,根据不同系统和环境选择最合适的执行方式(交互式或非交互式)。
|
|
14
|
+
|
|
15
|
+
适用场景:
|
|
16
|
+
- 需要跨平台运行命令,自动判断是否使用pexpect(支持交互式)还是subprocess。
|
|
17
|
+
- 希望获得命令的执行状态及输出内容。
|
|
18
|
+
- 在CLI工具、自动化脚本、REPL中执行shell命令。
|
|
19
|
+
|
|
20
|
+
参数:
|
|
21
|
+
- command (str): 需要执行的命令字符串。
|
|
22
|
+
- verbose (bool): 是否打印详细调试信息,默认为False。
|
|
23
|
+
- error_print (callable|None): 自定义错误打印函数,默认为None,使用print。
|
|
24
|
+
- cwd (str|None): 指定命令的工作目录,默认为None。
|
|
25
|
+
|
|
26
|
+
返回:
|
|
27
|
+
- tuple: (exit_code, output),其中exit_code为整型退出码,output为命令输出内容。
|
|
28
|
+
|
|
29
|
+
异常:
|
|
30
|
+
- 捕获OSError异常,返回错误信息。
|
|
31
|
+
"""
|
|
32
|
+
try:
|
|
33
|
+
if sys.stdin.isatty() and hasattr(pexpect, "spawn") and platform.system() != "Windows":
|
|
34
|
+
return run_cmd_pexpect(command, verbose, cwd)
|
|
35
|
+
|
|
36
|
+
return run_cmd_subprocess(command, verbose, cwd)
|
|
37
|
+
except OSError as e:
|
|
38
|
+
error_message = f"Error occurred while running command '{command}': {str(e)}"
|
|
39
|
+
if error_print is None:
|
|
40
|
+
print(error_message)
|
|
41
|
+
else:
|
|
42
|
+
error_print(error_message)
|
|
43
|
+
return 1, error_message
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def get_windows_parent_process_name():
|
|
47
|
+
"""
|
|
48
|
+
获取当前进程的父进程名(仅在Windows系统下有意义)。
|
|
49
|
+
|
|
50
|
+
适用场景:
|
|
51
|
+
- 判断命令是否由PowerShell或cmd.exe启动,以便调整命令格式。
|
|
52
|
+
- 在Windows平台上进行父进程分析。
|
|
53
|
+
|
|
54
|
+
参数:
|
|
55
|
+
- 无
|
|
56
|
+
|
|
57
|
+
返回:
|
|
58
|
+
- str|None: 父进程名(小写字符串,如"powershell.exe"或"cmd.exe"),如果无法获取则为None。
|
|
59
|
+
|
|
60
|
+
异常:
|
|
61
|
+
- 捕获所有异常,返回None。
|
|
62
|
+
"""
|
|
63
|
+
try:
|
|
64
|
+
current_process = psutil.Process()
|
|
65
|
+
while True:
|
|
66
|
+
parent = current_process.parent()
|
|
67
|
+
if parent is None:
|
|
68
|
+
break
|
|
69
|
+
parent_name = parent.name().lower()
|
|
70
|
+
if parent_name in ["powershell.exe", "cmd.exe"]:
|
|
71
|
+
return parent_name
|
|
72
|
+
current_process = parent
|
|
73
|
+
return None
|
|
74
|
+
except Exception:
|
|
75
|
+
return None
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def run_cmd_subprocess(command, verbose=False, cwd=None, encoding=sys.stdout.encoding):
|
|
79
|
+
if verbose:
|
|
80
|
+
print("Using run_cmd_subprocess:", command)
|
|
81
|
+
|
|
82
|
+
try:
|
|
83
|
+
shell = os.environ.get("SHELL", "/bin/sh")
|
|
84
|
+
parent_process = None
|
|
85
|
+
|
|
86
|
+
# Determine the appropriate shell
|
|
87
|
+
if platform.system() == "Windows":
|
|
88
|
+
parent_process = get_windows_parent_process_name()
|
|
89
|
+
if parent_process == "powershell.exe":
|
|
90
|
+
command = f"powershell -Command {command}"
|
|
91
|
+
|
|
92
|
+
if verbose:
|
|
93
|
+
print("Running command:", command)
|
|
94
|
+
print("SHELL:", shell)
|
|
95
|
+
if platform.system() == "Windows":
|
|
96
|
+
print("Parent process:", parent_process)
|
|
97
|
+
|
|
98
|
+
process = subprocess.Popen(
|
|
99
|
+
command,
|
|
100
|
+
stdout=subprocess.PIPE,
|
|
101
|
+
stderr=subprocess.STDOUT,
|
|
102
|
+
text=True,
|
|
103
|
+
shell=True,
|
|
104
|
+
encoding=encoding,
|
|
105
|
+
errors="replace",
|
|
106
|
+
bufsize=0, # Set bufsize to 0 for unbuffered output
|
|
107
|
+
universal_newlines=True,
|
|
108
|
+
cwd=cwd,
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
output = []
|
|
112
|
+
while True:
|
|
113
|
+
chunk = process.stdout.read(1)
|
|
114
|
+
if not chunk:
|
|
115
|
+
break
|
|
116
|
+
print(chunk, end="", flush=True) # Print the chunk in real-time
|
|
117
|
+
output.append(chunk) # Store the chunk for later use
|
|
118
|
+
|
|
119
|
+
process.wait()
|
|
120
|
+
return process.returncode, "".join(output)
|
|
121
|
+
except Exception as e:
|
|
122
|
+
return 1, str(e)
|
|
123
|
+
|
|
124
|
+
def run_cmd_subprocess_generator(command, verbose=False, cwd=None, encoding=sys.stdout.encoding):
|
|
125
|
+
"""
|
|
126
|
+
使用subprocess运行命令,将命令输出逐步以生成器方式yield出来。
|
|
127
|
+
|
|
128
|
+
适用场景:
|
|
129
|
+
- 运行无需交互的命令。
|
|
130
|
+
- 希望实时逐步处理命令输出(如日志打印、进度监控)。
|
|
131
|
+
- 在Linux、macOS、Windows等多平台环境下安全运行命令。
|
|
132
|
+
|
|
133
|
+
参数:
|
|
134
|
+
- command (str): 需要执行的命令字符串。
|
|
135
|
+
- verbose (bool): 是否打印详细调试信息,默认为False。
|
|
136
|
+
- cwd (str|None): 指定命令的工作目录,默认为None。
|
|
137
|
+
- encoding (str): 输出解码使用的字符编码,默认为当前stdout编码。
|
|
138
|
+
|
|
139
|
+
返回:
|
|
140
|
+
- 生成器: 逐块yield命令的输出字符串。
|
|
141
|
+
|
|
142
|
+
异常:
|
|
143
|
+
- 捕获所有异常,yield错误信息字符串。
|
|
144
|
+
"""
|
|
145
|
+
if verbose:
|
|
146
|
+
print("Using run_cmd_subprocess:", command)
|
|
147
|
+
|
|
148
|
+
try:
|
|
149
|
+
shell = os.environ.get("SHELL", "/bin/sh")
|
|
150
|
+
parent_process = None
|
|
151
|
+
|
|
152
|
+
# Windows下调整命令
|
|
153
|
+
if platform.system() == "Windows":
|
|
154
|
+
parent_process = get_windows_parent_process_name()
|
|
155
|
+
if parent_process == "powershell.exe":
|
|
156
|
+
command = f"powershell -Command {command}"
|
|
157
|
+
|
|
158
|
+
if verbose:
|
|
159
|
+
print("Running command:", command)
|
|
160
|
+
print("SHELL:", shell)
|
|
161
|
+
if platform.system() == "Windows":
|
|
162
|
+
print("Parent process:", parent_process)
|
|
163
|
+
|
|
164
|
+
process = subprocess.Popen(
|
|
165
|
+
command,
|
|
166
|
+
stdout=subprocess.PIPE,
|
|
167
|
+
stderr=subprocess.STDOUT,
|
|
168
|
+
text=True,
|
|
169
|
+
shell=True,
|
|
170
|
+
encoding=encoding,
|
|
171
|
+
errors="replace",
|
|
172
|
+
bufsize=0,
|
|
173
|
+
universal_newlines=True,
|
|
174
|
+
cwd=cwd,
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
while True:
|
|
178
|
+
chunk = process.stdout.read(1)
|
|
179
|
+
if not chunk:
|
|
180
|
+
break
|
|
181
|
+
# 确保始终yield字符串,避免因字节或其他类型导致异常
|
|
182
|
+
if not isinstance(chunk, str):
|
|
183
|
+
chunk = str(chunk)
|
|
184
|
+
yield chunk
|
|
185
|
+
|
|
186
|
+
process.wait()
|
|
187
|
+
except Exception as e:
|
|
188
|
+
# 出错时yield异常信息,也可以raise
|
|
189
|
+
yield f"[run_cmd_subprocess error]: {str(e)}"
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
def run_cmd_pexpect(command, verbose=False, cwd=None):
|
|
193
|
+
"""
|
|
194
|
+
使用pexpect以交互方式运行命令,捕获完整输出。
|
|
195
|
+
|
|
196
|
+
适用场景:
|
|
197
|
+
- 执行需要用户交互的命令(如登录、密码输入等)。
|
|
198
|
+
- 在Linux、macOS等Unix系统下模拟终端操作。
|
|
199
|
+
- 希望完整捕获交互式命令的输出。
|
|
200
|
+
|
|
201
|
+
参数:
|
|
202
|
+
- command (str): 需要执行的命令字符串。
|
|
203
|
+
- verbose (bool): 是否打印详细调试信息,默认为False。
|
|
204
|
+
- cwd (str|None): 指定命令的工作目录,默认为None。
|
|
205
|
+
|
|
206
|
+
返回:
|
|
207
|
+
- tuple: (exit_code, output),exit_code为退出状态码,output为命令完整输出内容。
|
|
208
|
+
|
|
209
|
+
异常:
|
|
210
|
+
- 捕获pexpect相关异常,返回错误信息。
|
|
211
|
+
"""
|
|
212
|
+
if verbose:
|
|
213
|
+
print("Using run_cmd_pexpect:", command)
|
|
214
|
+
|
|
215
|
+
output = BytesIO()
|
|
216
|
+
|
|
217
|
+
def output_callback(b):
|
|
218
|
+
output.write(b)
|
|
219
|
+
return b
|
|
220
|
+
|
|
221
|
+
try:
|
|
222
|
+
# Use the SHELL environment variable, falling back to /bin/sh if not set
|
|
223
|
+
shell = os.environ.get("SHELL", "/bin/sh")
|
|
224
|
+
if verbose:
|
|
225
|
+
print("With shell:", shell)
|
|
226
|
+
|
|
227
|
+
if os.path.exists(shell):
|
|
228
|
+
# Use the shell from SHELL environment variable
|
|
229
|
+
if verbose:
|
|
230
|
+
print("Running pexpect.spawn with shell:", shell)
|
|
231
|
+
child = pexpect.spawn(shell, args=["-i", "-c", command], encoding="utf-8", cwd=cwd)
|
|
232
|
+
else:
|
|
233
|
+
# Fall back to spawning the command directly
|
|
234
|
+
if verbose:
|
|
235
|
+
print("Running pexpect.spawn without shell.")
|
|
236
|
+
child = pexpect.spawn(command, encoding="utf-8", cwd=cwd)
|
|
237
|
+
|
|
238
|
+
# Transfer control to the user, capturing output
|
|
239
|
+
child.interact(output_filter=output_callback)
|
|
240
|
+
|
|
241
|
+
# Wait for the command to finish and get the exit status
|
|
242
|
+
child.close()
|
|
243
|
+
return child.exitstatus, output.getvalue().decode("utf-8", errors="replace")
|
|
244
|
+
|
|
245
|
+
except (pexpect.ExceptionPexpect, TypeError, ValueError) as e:
|
|
246
|
+
error_msg = f"Error running command {command}: {e}"
|
|
247
|
+
return 1, error_msg
|