auto-coder 0.1.353__py3-none-any.whl → 0.1.355__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of auto-coder might be problematic. Click here for more details.

Files changed (60) hide show
  1. {auto_coder-0.1.353.dist-info → auto_coder-0.1.355.dist-info}/METADATA +1 -1
  2. {auto_coder-0.1.353.dist-info → auto_coder-0.1.355.dist-info}/RECORD +60 -45
  3. autocoder/agent/agentic_filter.py +1 -1
  4. autocoder/auto_coder.py +8 -0
  5. autocoder/auto_coder_rag.py +37 -1
  6. autocoder/auto_coder_runner.py +58 -77
  7. autocoder/chat/conf_command.py +270 -0
  8. autocoder/chat/models_command.py +485 -0
  9. autocoder/chat_auto_coder.py +29 -24
  10. autocoder/chat_auto_coder_lang.py +26 -2
  11. autocoder/commands/auto_command.py +60 -132
  12. autocoder/commands/auto_web.py +1 -1
  13. autocoder/commands/tools.py +1 -1
  14. autocoder/common/__init__.py +3 -1
  15. autocoder/common/command_completer.py +58 -12
  16. autocoder/common/command_completer_v2.py +576 -0
  17. autocoder/common/conversations/__init__.py +52 -0
  18. autocoder/common/conversations/compatibility.py +303 -0
  19. autocoder/common/conversations/conversation_manager.py +502 -0
  20. autocoder/common/conversations/example.py +152 -0
  21. autocoder/common/file_monitor/__init__.py +5 -0
  22. autocoder/common/file_monitor/monitor.py +383 -0
  23. autocoder/common/global_cancel.py +53 -16
  24. autocoder/common/ignorefiles/__init__.py +4 -0
  25. autocoder/common/ignorefiles/ignore_file_utils.py +103 -0
  26. autocoder/common/ignorefiles/test_ignore_file_utils.py +91 -0
  27. autocoder/common/rulefiles/__init__.py +15 -0
  28. autocoder/common/rulefiles/autocoderrules_utils.py +173 -0
  29. autocoder/common/save_formatted_log.py +54 -0
  30. autocoder/common/v2/agent/agentic_edit.py +10 -39
  31. autocoder/common/v2/agent/agentic_edit_tools/list_files_tool_resolver.py +1 -1
  32. autocoder/common/v2/agent/agentic_edit_tools/search_files_tool_resolver.py +73 -43
  33. autocoder/common/v2/code_agentic_editblock_manager.py +9 -9
  34. autocoder/common/v2/code_diff_manager.py +2 -2
  35. autocoder/common/v2/code_editblock_manager.py +31 -18
  36. autocoder/common/v2/code_strict_diff_manager.py +3 -2
  37. autocoder/dispacher/actions/action.py +6 -6
  38. autocoder/dispacher/actions/plugins/action_regex_project.py +2 -2
  39. autocoder/events/event_manager_singleton.py +1 -1
  40. autocoder/index/index.py +3 -3
  41. autocoder/models.py +22 -9
  42. autocoder/rag/api_server.py +14 -2
  43. autocoder/rag/cache/local_byzer_storage_cache.py +1 -1
  44. autocoder/rag/cache/local_duckdb_storage_cache.py +8 -0
  45. autocoder/rag/cache/simple_cache.py +63 -33
  46. autocoder/rag/loaders/docx_loader.py +1 -1
  47. autocoder/rag/loaders/filter_utils.py +133 -76
  48. autocoder/rag/loaders/image_loader.py +15 -3
  49. autocoder/rag/loaders/pdf_loader.py +2 -2
  50. autocoder/rag/long_context_rag.py +11 -0
  51. autocoder/rag/qa_conversation_strategy.py +5 -31
  52. autocoder/rag/utils.py +21 -2
  53. autocoder/utils/_markitdown.py +66 -25
  54. autocoder/utils/auto_coder_utils/chat_stream_out.py +4 -4
  55. autocoder/utils/thread_utils.py +9 -27
  56. autocoder/version.py +1 -1
  57. {auto_coder-0.1.353.dist-info → auto_coder-0.1.355.dist-info}/LICENSE +0 -0
  58. {auto_coder-0.1.353.dist-info → auto_coder-0.1.355.dist-info}/WHEEL +0 -0
  59. {auto_coder-0.1.353.dist-info → auto_coder-0.1.355.dist-info}/entry_points.txt +0 -0
  60. {auto_coder-0.1.353.dist-info → auto_coder-0.1.355.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,485 @@
1
+ import json
2
+ import shlex
3
+ import fnmatch # Add fnmatch for wildcard matching
4
+ from typing import Dict, Any
5
+ from rich.console import Console
6
+ from rich.table import Table
7
+ from rich.panel import Panel
8
+ import byzerllm
9
+ from typing import Generator
10
+ from autocoder import models as models_module
11
+ from autocoder.common.printer import Printer
12
+ from autocoder.common.result_manager import ResultManager
13
+ from autocoder.common.model_speed_tester import render_speed_test_in_terminal
14
+ from autocoder.utils.llms import get_single_llm
15
+
16
+ def handle_models_command(query: str, memory: Dict[str, Any]):
17
+ """
18
+ Handle /models subcommands:
19
+ /models /list - List all models (default + custom)
20
+ /models /add <n> <api_key> - Add model with simplified params
21
+ /models /add_model name=xxx base_url=xxx ... - Add model with custom params
22
+ /models /remove <n> - Remove model by name
23
+ /models /chat <content> - Chat with a model
24
+ """
25
+ console = Console()
26
+ printer = Printer(console=console)
27
+
28
+ product_mode = memory.get("product_mode", "lite")
29
+ if product_mode != "lite":
30
+ printer.print_in_terminal("models_lite_only", style="red")
31
+ return
32
+
33
+ # Check if the query is empty or only whitespace
34
+ if not query.strip():
35
+ printer.print_in_terminal("models_usage")
36
+ return
37
+
38
+ models_data = models_module.load_models()
39
+ subcmd = ""
40
+ if "/list" in query:
41
+ subcmd = "/list"
42
+ query = query.replace("/list", "", 1).strip()
43
+
44
+ if "/add_model" in query:
45
+ subcmd = "/add_model"
46
+ query = query.replace("/add_model", "", 1).strip()
47
+
48
+ if "/add" in query:
49
+ subcmd = "/add"
50
+ query = query.replace("/add", "", 1).strip()
51
+
52
+ # alias to /add
53
+ if "/activate" in query:
54
+ subcmd = "/add"
55
+ query = query.replace("/activate", "", 1).strip()
56
+
57
+ if "/remove" in query:
58
+ subcmd = "/remove"
59
+ query = query.replace("/remove", "", 1).strip()
60
+
61
+ if "/speed-test" in query:
62
+ subcmd = "/speed-test"
63
+ query = query.replace("/speed-test", "", 1).strip()
64
+
65
+ if "/speed_test" in query:
66
+ subcmd = "/speed-test"
67
+ query = query.replace("/speed_test", "", 1).strip()
68
+
69
+ if "input_price" in query:
70
+ subcmd = "/input_price"
71
+ query = query.replace("/input_price", "", 1).strip()
72
+
73
+ if "output_price" in query:
74
+ subcmd = "/output_price"
75
+ query = query.replace("/output_price", "", 1).strip()
76
+
77
+ if "/speed" in query:
78
+ subcmd = "/speed"
79
+ query = query.replace("/speed", "", 1).strip()
80
+
81
+ if "/chat" in query:
82
+ subcmd = "/chat"
83
+ query = query.replace("/chat", "", 1).strip()
84
+
85
+
86
+
87
+ if not subcmd:
88
+ printer.print_in_terminal("models_usage")
89
+
90
+ result_manager = ResultManager()
91
+ if subcmd == "/list":
92
+ pattern = query.strip() # Get the filter pattern from the query
93
+ filtered_models_data = models_data
94
+
95
+ if pattern: # Apply filter if a pattern is provided
96
+ filtered_models_data = [
97
+ m for m in models_data if fnmatch.fnmatch(m.get("name", ""), pattern)
98
+ ]
99
+
100
+ if filtered_models_data:
101
+ # Sort models by speed (average_speed)
102
+ sorted_models = sorted(filtered_models_data, key=lambda x: float(x.get('average_speed', 0)))
103
+ sorted_models.reverse()
104
+
105
+ table = Table(
106
+ title=printer.get_message_from_key("models_title") + (f" (Filtered by: '{pattern}')" if pattern else ""),
107
+ expand=True,
108
+ show_lines=True
109
+ )
110
+ table.add_column("Name", style="cyan", width=40, overflow="fold", no_wrap=False)
111
+ table.add_column("Model Name", style="magenta", width=30, overflow="fold", no_wrap=False)
112
+ table.add_column("Base URL", style="white", width=30, overflow="fold", no_wrap=False)
113
+ table.add_column("Input Price (M)", style="magenta", width=15, overflow="fold", no_wrap=False)
114
+ table.add_column("Output Price (M)", style="magenta", width=15, overflow="fold", no_wrap=False)
115
+ table.add_column("Speed (s/req)", style="blue", width=15, overflow="fold", no_wrap=False)
116
+ for m in sorted_models:
117
+ # Check if api_key_path exists and file exists
118
+ is_api_key_set = "api_key" in m
119
+ name = m.get("name", "")
120
+ if is_api_key_set:
121
+ api_key = m.get("api_key", "").strip()
122
+ if not api_key:
123
+ printer.print_in_terminal("models_api_key_empty", style="yellow", name=name)
124
+ name = f"{name} *"
125
+
126
+ table.add_row(
127
+ name,
128
+ m.get("model_name", ""),
129
+ m.get("base_url", ""),
130
+ f"{m.get('input_price', 0.0):.2f}",
131
+ f"{m.get('output_price', 0.0):.2f}",
132
+ f"{m.get('average_speed', 0.0):.3f}"
133
+ )
134
+ console.print(table)
135
+ result_manager.add_result(content=json.dumps(sorted_models, ensure_ascii=False), meta={
136
+ "action": "models",
137
+ "input": {
138
+ "query": query # Keep original query for logging
139
+ }
140
+ })
141
+ else:
142
+ if pattern:
143
+ # Use a specific message if filtering resulted in no models
144
+ printer.print_in_terminal("models_no_models_matching_pattern", style="yellow", pattern=pattern)
145
+ result_manager.add_result(content=f"No models found matching pattern: {pattern}", meta={
146
+ "action": "models",
147
+ "input": {
148
+ "query": query
149
+ }
150
+ })
151
+ else:
152
+ # Original message if no models exist at all
153
+ printer.print_in_terminal("models_no_models", style="yellow")
154
+ result_manager.add_result(content="No models found", meta={
155
+ "action": "models",
156
+ "input": {
157
+ "query": query
158
+ }
159
+ })
160
+
161
+ elif subcmd == "/input_price":
162
+ args = query.strip().split()
163
+ if len(args) >= 2:
164
+ name = args[0]
165
+ try:
166
+ price = float(args[1])
167
+ if models_module.update_model_input_price(name, price):
168
+ printer.print_in_terminal("models_input_price_updated", style="green", name=name, price=price)
169
+ result_manager.add_result(content=f"models_input_price_updated: {name} {price}",meta={
170
+ "action": "models",
171
+ "input": {
172
+ "query": query
173
+ }
174
+ })
175
+ else:
176
+ printer.print_in_terminal("models_not_found", style="red", name=name)
177
+ result_manager.add_result(content=f"models_not_found: {name}",meta={
178
+ "action": "models",
179
+ "input": {
180
+ "query": query
181
+ }
182
+ })
183
+ except ValueError as e:
184
+ result_manager.add_result(content=f"models_invalid_price: {str(e)}",meta={
185
+ "action": "models",
186
+ "input": {
187
+ "query": query
188
+ }
189
+ })
190
+ printer.print_in_terminal("models_invalid_price", style="red", error=str(e))
191
+ else:
192
+ result_manager.add_result(content=printer.get_message_from_key("models_input_price_usage"),meta={
193
+ "action": "models",
194
+ "input": {
195
+ "query": query
196
+ }
197
+ })
198
+ printer.print_in_terminal("models_input_price_usage", style="red")
199
+
200
+ elif subcmd == "/output_price":
201
+ args = query.strip().split()
202
+ if len(args) >= 2:
203
+ name = args[0]
204
+ try:
205
+ price = float(args[1])
206
+ if models_module.update_model_output_price(name, price):
207
+ printer.print_in_terminal("models_output_price_updated", style="green", name=name, price=price)
208
+ result_manager.add_result(content=f"models_output_price_updated: {name} {price}",meta={
209
+ "action": "models",
210
+ "input": {
211
+ "query": query
212
+ }
213
+ })
214
+ else:
215
+ printer.print_in_terminal("models_not_found", style="red", name=name)
216
+ result_manager.add_result(content=f"models_not_found: {name}",meta={
217
+ "action": "models",
218
+ "input": {
219
+ "query": query
220
+ }
221
+ })
222
+ except ValueError as e:
223
+ printer.print_in_terminal("models_invalid_price", style="red", error=str(e))
224
+ result_manager.add_result(content=f"models_invalid_price: {str(e)}",meta={
225
+ "action": "models",
226
+ "input": {
227
+ "query": query
228
+ }
229
+ })
230
+ else:
231
+ result_manager.add_result(content=printer.get_message_from_key("models_output_price_usage"),meta={
232
+ "action": "models",
233
+ "input": {
234
+ "query": query
235
+ }
236
+ })
237
+ printer.print_in_terminal("models_output_price_usage", style="red")
238
+
239
+ elif subcmd == "/speed":
240
+ args = query.strip().split()
241
+ if len(args) >= 2:
242
+ name = args[0]
243
+ try:
244
+ speed = float(args[1])
245
+ if models_module.update_model_speed(name, speed):
246
+ printer.print_in_terminal("models_speed_updated", style="green", name=name, speed=speed)
247
+ result_manager.add_result(content=f"models_speed_updated: {name} {speed}",meta={
248
+ "action": "models",
249
+ "input": {
250
+ "query": query
251
+ }
252
+ })
253
+ else:
254
+ printer.print_in_terminal("models_not_found", style="red", name=name)
255
+ result_manager.add_result(content=f"models_not_found: {name}",meta={
256
+ "action": "models",
257
+ "input": {
258
+ "query": query
259
+ }
260
+ })
261
+ except ValueError as e:
262
+ printer.print_in_terminal("models_invalid_speed", style="red", error=str(e))
263
+ result_manager.add_result(content=f"models_invalid_speed: {str(e)}",meta={
264
+ "action": "models",
265
+ "input": {
266
+ "query": query
267
+ }
268
+ })
269
+ else:
270
+ result_manager.add_result(content=printer.get_message_from_key("models_speed_usage"),meta={
271
+ "action": "models",
272
+ "input": {
273
+ "query": query
274
+ }
275
+ })
276
+ printer.print_in_terminal("models_speed_usage", style="red")
277
+
278
+ elif subcmd == "/speed-test":
279
+ test_rounds = 1 # 默认测试轮数
280
+
281
+ enable_long_context = False
282
+ if "/long_context" in query:
283
+ enable_long_context = True
284
+ query = query.replace("/long_context", "", 1).strip()
285
+
286
+ if "/long-context" in query:
287
+ enable_long_context = True
288
+ query = query.replace("/long-context", "", 1).strip()
289
+
290
+ # 解析可选的测试轮数参数
291
+ args = query.strip().split()
292
+ if args and args[0].isdigit():
293
+ test_rounds = int(args[0])
294
+
295
+ render_speed_test_in_terminal(product_mode, test_rounds,enable_long_context=enable_long_context)
296
+ ## 等待优化,获取明细数据
297
+ result_manager.add_result(content="models test success",meta={
298
+ "action": "models",
299
+ "input": {
300
+ "query": query
301
+ }
302
+ })
303
+
304
+ elif subcmd == "/add":
305
+ # Support both simplified and legacy formats
306
+ args = query.strip().split(" ")
307
+ if len(args) == 2:
308
+ # Simplified: /models /add <name> <api_key>
309
+ name, api_key = args[0], args[1]
310
+ result = models_module.update_model_with_api_key(name, api_key)
311
+ if result:
312
+ result_manager.add_result(content=f"models_added: {name}",meta={
313
+ "action": "models",
314
+ "input": {
315
+ "query": query
316
+ }
317
+ })
318
+ printer.print_in_terminal("models_added", style="green", name=name)
319
+ else:
320
+ result_manager.add_result(content=f"models_add_failed: {name}",meta={
321
+ "action": "models",
322
+ "input": {
323
+ "query": query
324
+ }
325
+ })
326
+ printer.print_in_terminal("models_add_failed", style="red", name=name)
327
+ else:
328
+ models_list = "\n".join([m["name"] for m in models_module.default_models_list])
329
+ printer.print_in_terminal("models_add_usage", style="red", models=models_list)
330
+ result_manager.add_result(content=printer.get_message_from_key_with_format("models_add_usage",models=models_list),meta={
331
+ "action": "models",
332
+ "input": {
333
+ "query": query
334
+ }
335
+ })
336
+
337
+ elif subcmd == "/add_model":
338
+ # Parse key=value pairs: /models /add_model name=abc base_url=http://xx ...
339
+ # Collect key=value pairs
340
+ kv_pairs = shlex.split(query)
341
+ data_dict = {}
342
+ for pair in kv_pairs:
343
+ if '=' not in pair:
344
+ printer.print_in_terminal("models_add_model_params", style="red")
345
+ continue
346
+ k, v = pair.split('=', 1)
347
+ data_dict[k.strip()] = v.strip()
348
+
349
+ # Name is required
350
+ if "name" not in data_dict:
351
+ printer.print_in_terminal("models_add_model_name_required", style="red")
352
+ return
353
+
354
+ # Check duplication
355
+ if any(m["name"] == data_dict["name"] for m in models_data):
356
+ printer.print_in_terminal("models_add_model_exists", style="yellow", name=data_dict["name"])
357
+ result_manager.add_result(content=printer.get_message_from_key_with_format("models_add_model_exists",name=data_dict["name"]),meta={
358
+ "action": "models",
359
+ "input": {
360
+ "query": query
361
+ }
362
+ })
363
+ return
364
+
365
+ # Create model with defaults
366
+ final_model = {
367
+ "name": data_dict["name"],
368
+ "model_type": data_dict.get("model_type", "saas/openai"),
369
+ "model_name": data_dict.get("model_name", data_dict["name"]),
370
+ "base_url": data_dict.get("base_url", "https://api.openai.com/v1"),
371
+ "api_key_path": data_dict.get("api_key_path", "api.openai.com"),
372
+ "description": data_dict.get("description", ""),
373
+ "is_reasoning": data_dict.get("is_reasoning", "false") in ["true", "True", "TRUE", "1"]
374
+ }
375
+
376
+ models_data.append(final_model)
377
+ models_module.save_models(models_data)
378
+ printer.print_in_terminal("models_add_model_success", style="green", name=data_dict["name"])
379
+ result_manager.add_result(content=f"models_add_model_success: {data_dict['name']}",meta={
380
+ "action": "models",
381
+ "input": {
382
+ "query": query
383
+ }
384
+ })
385
+
386
+ elif subcmd == "/remove":
387
+ args = query.strip().split(" ")
388
+ if len(args) < 1:
389
+ printer.print_in_terminal("models_add_usage", style="red")
390
+ result_manager.add_result(content=printer.get_message_from_key("models_add_usage"),meta={
391
+ "action": "models",
392
+ "input": {
393
+ "query": query
394
+ }
395
+ })
396
+ return
397
+ name = args[0]
398
+ filtered_models = [m for m in models_data if m["name"] != name]
399
+ if len(filtered_models) == len(models_data):
400
+ printer.print_in_terminal("models_add_model_remove", style="yellow", name=name)
401
+ result_manager.add_result(content=printer.get_message_from_key_with_format("models_add_model_remove",name=name),meta={
402
+ "action": "models",
403
+ "input": {
404
+ "query": query
405
+ }
406
+ })
407
+ return
408
+ models_module.save_models(filtered_models)
409
+ printer.print_in_terminal("models_add_model_removed", style="green", name=name)
410
+ result_manager.add_result(content=printer.get_message_from_key_with_format("models_add_model_removed",name=name),meta={
411
+ "action": "models",
412
+ "input": {
413
+ "query": query
414
+ }
415
+ })
416
+ elif subcmd == "/chat":
417
+ if not query.strip():
418
+ printer.print_in_terminal("Please provide content in format: <model_name> <question>", style="yellow")
419
+ result_manager.add_result(content="Please provide content in format: <model_name> <question>", meta={
420
+ "action": "models",
421
+ "input": {
422
+ "query": query
423
+ }
424
+ })
425
+ return
426
+
427
+ # 分离模型名称和用户问题
428
+ parts = query.strip().split(' ', 1) # 只在第一个空格处分割
429
+ if len(parts) < 2:
430
+ printer.print_in_terminal("Correct format should be: <model_name> <question>, where question can contain spaces", style="yellow")
431
+ result_manager.add_result(content="Correct format should be: <model_name> <question>, where question can contain spaces", meta={
432
+ "action": "models",
433
+ "input": {
434
+ "query": query
435
+ }
436
+ })
437
+ return
438
+
439
+ model_name = parts[0]
440
+ user_question = parts[1] # 这将包含所有剩余文本,保留空格
441
+ product_mode = memory.get("product_mode", "lite")
442
+
443
+ try:
444
+ # Get the model
445
+ llm = get_single_llm(model_name, product_mode=product_mode)
446
+
447
+ @byzerllm.prompt()
448
+ def chat_func(content: str) -> Generator[str, None, None]:
449
+ """
450
+ {{ content }}
451
+ """
452
+
453
+ # Support custom llm_config parameters
454
+ result = chat_func.with_llm(llm).run(user_question)
455
+ output_text = ""
456
+ for res in result:
457
+ output_text += res
458
+ print(res, end="", flush=True)
459
+ print("\n")
460
+
461
+ # Print the result
462
+
463
+ result_manager.add_result(content=output_text, meta={
464
+ "action": "models",
465
+ "input": {
466
+ "query": query
467
+ }
468
+ })
469
+ except Exception as e:
470
+ error_message = f"Error chatting with model: {str(e)}"
471
+ printer.print_str_in_terminal(error_message, style="red")
472
+ result_manager.add_result(content=error_message, meta={
473
+ "action": "models",
474
+ "input": {
475
+ "query": query
476
+ }
477
+ })
478
+ else:
479
+ printer.print_in_terminal("models_unknown_subcmd", style="yellow", subcmd=subcmd)
480
+ result_manager.add_result(content=printer.get_message_from_key_with_format("models_unknown_subcmd",subcmd=subcmd),meta={
481
+ "action": "models",
482
+ "input": {
483
+ "query": query
484
+ }
485
+ })
@@ -7,7 +7,7 @@ import argparse
7
7
  import os
8
8
  from prompt_toolkit import PromptSession
9
9
  from prompt_toolkit.key_binding import KeyBindings
10
- from prompt_toolkit.history import InMemoryHistory
10
+ from prompt_toolkit.history import FileHistory
11
11
  from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
12
12
  from prompt_toolkit.styles import Style
13
13
  from autocoder.version import __version__
@@ -16,13 +16,15 @@ from prompt_toolkit.formatted_text import FormattedText
16
16
  from prompt_toolkit.completion import Completer, Completion
17
17
  from autocoder.plugins import PluginManager
18
18
  from autocoder.events.event_manager_singleton import gengerate_event_file_path
19
+ from autocoder.common.global_cancel import global_cancel
20
+ from autocoder.chat.models_command import handle_models_command
19
21
  from autocoder.auto_coder_runner import (
20
22
  auto_command,
21
23
  load_memory,
22
24
  save_memory,
23
- configure,
24
- manage_models,
25
- print_conf,
25
+ configure, # Keep configure if it's used elsewhere or by handle_conf_command internally (though we adapted handle_conf_command not to)
26
+ # manage_models, # Removed
27
+ # print_conf, # Removed
26
28
  exclude_dirs,
27
29
  exclude_files,
28
30
  ask,
@@ -52,6 +54,8 @@ from autocoder.auto_coder_runner import (
52
54
  get_memory,
53
55
  active_context,
54
56
  )
57
+ # Ensure the correct import is present
58
+ from autocoder.chat.conf_command import handle_conf_command
55
59
 
56
60
  # Create a global plugin manager
57
61
  plugin_manager = PluginManager()
@@ -377,8 +381,13 @@ def main():
377
381
  # 创建一个继承Completer的增强补全器
378
382
  enhanced_completer = EnhancedCompleter(completer, plugin_manager)
379
383
 
384
+ # Define the path for the history file
385
+ history_file_path = os.path.join(os.getcwd(), ".auto-coder", "auto-coder.chat", "history", "command_history.txt")
386
+ # Ensure the directory exists
387
+ os.makedirs(os.path.dirname(history_file_path), exist_ok=True)
388
+
380
389
  session = PromptSession(
381
- history=InMemoryHistory(),
390
+ history=FileHistory(history_file_path), # Use FileHistory
382
391
  auto_suggest=AutoSuggestFromHistory(),
383
392
  enable_history_search=False,
384
393
  completer=enhanced_completer,
@@ -482,6 +491,7 @@ def main():
482
491
  ):
483
492
  event_file, file_id = gengerate_event_file_path()
484
493
  configure(f"event_file:{event_file}")
494
+ global_cancel.register_token(event_file)
485
495
  auto_command(user_input)
486
496
 
487
497
  elif memory["mode"] == "voice_input" and not user_input.startswith("/"):
@@ -508,6 +518,9 @@ def main():
508
518
  index_query(query)
509
519
 
510
520
  elif user_input.startswith("/index/build"):
521
+ event_file, file_id = gengerate_event_file_path()
522
+ configure(f"event_file:{event_file}")
523
+ global_cancel.register_token(event_file)
511
524
  index_build()
512
525
 
513
526
  elif user_input.startswith("/index/export"):
@@ -523,10 +536,7 @@ def main():
523
536
 
524
537
  elif user_input.startswith("/models"):
525
538
  query = user_input[len("/models") :].strip()
526
- if not query:
527
- print("Please enter your query.")
528
- else:
529
- manage_models(query)
539
+ handle_models_command(query, get_memory())
530
540
 
531
541
  elif user_input.startswith("/mode"):
532
542
  conf = user_input[len("/mode") :].strip()
@@ -538,27 +548,19 @@ def main():
538
548
  elif user_input.startswith("/conf/export"):
539
549
  from autocoder.common.conf_import_export import export_conf
540
550
 
541
- export_path = user_input[len("/conf/export") :].strip()
542
- export_conf(os.getcwd(), export_path)
543
-
544
- elif user_input.startswith("/conf/import"):
545
- from autocoder.common.conf_import_export import import_conf
546
-
547
- import_path = user_input[len("/conf/import") :].strip()
548
- import_conf(os.getcwd(), import_path)
549
-
550
551
  elif user_input.startswith("/plugins"):
551
552
  # 提取命令参数并交由 plugin_manager 处理
552
553
  args = user_input[len("/plugins") :].strip().split()
553
554
  result = plugin_manager.handle_plugins_command(args)
554
555
  print(result, end="")
555
556
 
557
+ # Handle /conf and its subcommands like /conf /export, /conf /import
556
558
  elif user_input.startswith("/conf"):
557
- conf = user_input[len("/conf") :].strip()
558
- if not conf:
559
- print_conf(memory["conf"])
560
- else:
561
- configure(conf)
559
+ # Extract everything after "/conf"
560
+ command_args = user_input[len("/conf"):].strip()
561
+ # Call the handler from conf_command.py and print its string result
562
+ result_message = handle_conf_command(command_args, memory)
563
+ print(result_message)
562
564
  elif user_input.startswith("/revert"):
563
565
  revert()
564
566
  elif user_input.startswith("/commit"):
@@ -592,14 +594,16 @@ def main():
592
594
  elif user_input.startswith("/coding"):
593
595
  event_file, file_id = gengerate_event_file_path()
594
596
  configure(f"event_file:{event_file}")
597
+ global_cancel.register_token(event_file)
595
598
  query = user_input[len("/coding") :].strip()
596
599
  if not query:
597
600
  print("\033[91mPlease enter your request.\033[0m")
598
601
  continue
599
- coding(query)
602
+ coding(query)
600
603
  elif user_input.startswith("/chat"):
601
604
  event_file, file_id = gengerate_event_file_path()
602
605
  configure(f"event_file:{event_file}")
606
+ global_cancel.register_token(event_file)
603
607
  query = user_input[len("/chat") :].strip()
604
608
  if not query:
605
609
  print("\033[91mPlease enter your request.\033[0m")
@@ -656,6 +660,7 @@ def main():
656
660
  else:
657
661
  if command.startswith("/chat"):
658
662
  event_file, file_id = gengerate_event_file_path()
663
+ global_cancel.register_token(event_file)
659
664
  configure(f"event_file:{event_file}")
660
665
  command = command[len("/chat") :].strip()
661
666
  gen_and_exec_shell_command(command)
@@ -375,8 +375,28 @@ MESSAGES = {
375
375
  "zh": "管理模型配置,仅在lite模式下可用"
376
376
  },
377
377
  "models_usage": {
378
- "en": "Usage: /models <command>\nAvailable subcommands:\n /list - List all models\n /add <name> <api_key> - Add a built-in model\n /add_model - Add a custom model\n /remove <name> - Remove a model\n /input_price <name> <value> - Set model input price\n /output_price <name> <value> - Set model output price\n /speed <name> <value> - Set model speed\n /speed-test - Test models speed\n /speed-test-long - Test models speed with long context",
379
- "zh": "用法: /models <命令>\n可用的子命令:\n /list - 列出所有模型\n /add <名称> <API密钥> - 添加内置模型\n /add_model - 添加自定义模型\n /remove <名称> - 移除模型\n /input_price <名称> <价格> - 设置模型输入价格\n /output_price <名称> <价格> - 设置模型输出价格\n /speed <名称> <速度> - 设置模型速度\n /speed-test - 测试模型速度\n /speed-test-long - 使用长文本上下文测试模型速度"
378
+ "en": '''Usage: /models <command>
379
+ Available subcommands:
380
+ /list - List all configured models (built-in + custom).
381
+ /add <name> <api_key> - Add or activate a built-in model and set its API key.
382
+ /add_model - Add a custom model. Provide parameters in 'key=value' format, e.g., name=my_model model_name=gpt-4 base_url=... api_key=...
383
+ /remove <name> - Remove a configured model by its name.
384
+ /input_price <name> <value> - Set the input price for a model (unit: Million tokens).
385
+ /output_price <name> <value> - Set the output price for a model (unit: Million tokens).
386
+ /speed <name> <value> - Set the average speed for a model (unit: seconds per request).
387
+ /speed-test [<rounds>] - Test the speed of configured models. Optionally specify the number of rounds.
388
+ /speed-test /long_context [<rounds>] - Test model speed using a long context. Optionally specify the number of rounds.''',
389
+ "zh": '''用法: /models <命令>
390
+ 可用的子命令:
391
+ /list - 列出所有已配置的模型 (包括内置和自定义)。
392
+ /add <名称> <API密钥> - 添加或激活一个内置模型,并设置其 API 密钥。
393
+ /add_model - 添加一个自定义模型。参数使用 'key=value' 格式提供,例如:name=my_model model_name=gpt-4 base_url=... api_key=...
394
+ /remove <名称> - 根据名称移除一个已配置的模型。
395
+ /input_price <名称> <价格> - 设置指定模型的输入价格 (单位: 百万 Token)。
396
+ /output_price <名称> <价格> - 设置指定模型的输出价格 (单位: 百万 Token)。
397
+ /speed <名称> <速度> - 设置指定模型的平均速度 (单位: 秒/请求)。
398
+ /speed-test [<轮数>] - 测试已配置模型的速度。可以指定测试轮数 (可选)。
399
+ /speed-test /long_context [<轮数>] - 使用长文本上下文测试模型速度。可以指定测试轮数 (可选)。'''
380
400
  },
381
401
  "models_added": {
382
402
  "en": "Added/Updated model '{{name}}' successfully.",
@@ -458,6 +478,10 @@ MESSAGES = {
458
478
  "en": "No models found.",
459
479
  "zh": "未找到任何模型。"
460
480
  },
481
+ "models_no_models_matching_pattern": {
482
+ "en": "No models found matching pattern: {{pattern}}",
483
+ "zh": "未找到匹配模式的模型: {{pattern}}"
484
+ },
461
485
  "models_lite_only": {
462
486
  "en": "The /models command is only available in lite mode",
463
487
  "zh": "/models 命令仅在 lite 模式下可用"