autocoder-nano 0.1.30__py3-none-any.whl → 0.1.34__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- autocoder_nano/agent/agent_base.py +4 -4
- autocoder_nano/agent/agentic_edit.py +1584 -0
- autocoder_nano/agent/agentic_edit_tools/__init__.py +28 -0
- autocoder_nano/agent/agentic_edit_tools/ask_followup_question_tool.py +51 -0
- autocoder_nano/agent/agentic_edit_tools/attempt_completion_tool.py +36 -0
- autocoder_nano/agent/agentic_edit_tools/base_tool_resolver.py +31 -0
- autocoder_nano/agent/agentic_edit_tools/execute_command_tool.py +65 -0
- autocoder_nano/agent/agentic_edit_tools/list_code_definition_names_tool.py +78 -0
- autocoder_nano/agent/agentic_edit_tools/list_files_tool.py +123 -0
- autocoder_nano/agent/agentic_edit_tools/list_package_info_tool.py +42 -0
- autocoder_nano/agent/agentic_edit_tools/plan_mode_respond_tool.py +35 -0
- autocoder_nano/agent/agentic_edit_tools/read_file_tool.py +73 -0
- autocoder_nano/agent/agentic_edit_tools/replace_in_file_tool.py +148 -0
- autocoder_nano/agent/agentic_edit_tools/search_files_tool.py +135 -0
- autocoder_nano/agent/agentic_edit_tools/write_to_file_tool.py +57 -0
- autocoder_nano/agent/agentic_edit_types.py +151 -0
- autocoder_nano/auto_coder_nano.py +159 -700
- autocoder_nano/git_utils.py +63 -1
- autocoder_nano/llm_client.py +170 -3
- autocoder_nano/llm_types.py +72 -16
- autocoder_nano/rules/rules_learn.py +221 -0
- autocoder_nano/templates.py +1 -1
- autocoder_nano/utils/completer_utils.py +616 -0
- autocoder_nano/utils/formatted_log_utils.py +128 -0
- autocoder_nano/utils/printer_utils.py +5 -4
- autocoder_nano/utils/shell_utils.py +85 -0
- autocoder_nano/version.py +1 -1
- {autocoder_nano-0.1.30.dist-info → autocoder_nano-0.1.34.dist-info}/METADATA +3 -2
- {autocoder_nano-0.1.30.dist-info → autocoder_nano-0.1.34.dist-info}/RECORD +34 -16
- autocoder_nano/agent/new/auto_new_project.py +0 -278
- /autocoder_nano/{agent/new → rules}/__init__.py +0 -0
- {autocoder_nano-0.1.30.dist-info → autocoder_nano-0.1.34.dist-info}/LICENSE +0 -0
- {autocoder_nano-0.1.30.dist-info → autocoder_nano-0.1.34.dist-info}/WHEEL +0 -0
- {autocoder_nano-0.1.30.dist-info → autocoder_nano-0.1.34.dist-info}/entry_points.txt +0 -0
- {autocoder_nano-0.1.30.dist-info → autocoder_nano-0.1.34.dist-info}/top_level.txt +0 -0
@@ -5,16 +5,19 @@ import os
|
|
5
5
|
import json
|
6
6
|
import shutil
|
7
7
|
import subprocess
|
8
|
-
import textwrap
|
9
8
|
import time
|
10
9
|
import uuid
|
11
10
|
|
11
|
+
from autocoder_nano.agent.agentic_edit import AgenticEdit
|
12
|
+
from autocoder_nano.agent.agentic_edit_types import AgenticEditRequest
|
12
13
|
from autocoder_nano.edit import Dispacher
|
13
14
|
from autocoder_nano.helper import show_help
|
14
15
|
from autocoder_nano.index.entry import build_index_and_filter_files
|
15
16
|
from autocoder_nano.index.index_manager import IndexManager
|
16
17
|
from autocoder_nano.index.symbols_utils import extract_symbols
|
17
18
|
from autocoder_nano.llm_client import AutoLLM
|
19
|
+
from autocoder_nano.rules.rules_learn import AutoRulesLearn
|
20
|
+
from autocoder_nano.utils.completer_utils import CommandCompleter
|
18
21
|
from autocoder_nano.version import __version__
|
19
22
|
from autocoder_nano.llm_types import *
|
20
23
|
from autocoder_nano.llm_prompt import prompt, extract_code
|
@@ -26,23 +29,20 @@ from autocoder_nano.project import PyProject, SuffixProject
|
|
26
29
|
from autocoder_nano.utils.printer_utils import Printer
|
27
30
|
|
28
31
|
import yaml
|
29
|
-
# import tabulate
|
30
32
|
from jinja2 import Template
|
31
|
-
# from loguru import logger
|
32
33
|
from prompt_toolkit import prompt as _toolkit_prompt, PromptSession
|
33
|
-
from prompt_toolkit.completion import Completer, Completion
|
34
34
|
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
|
35
35
|
from prompt_toolkit.formatted_text import FormattedText
|
36
36
|
from prompt_toolkit.history import InMemoryHistory
|
37
37
|
from prompt_toolkit.key_binding import KeyBindings
|
38
38
|
from prompt_toolkit.shortcuts import confirm
|
39
39
|
from prompt_toolkit.styles import Style
|
40
|
-
from rich.console import Console
|
40
|
+
# from rich.console import Console
|
41
41
|
from rich.live import Live
|
42
42
|
from rich.markdown import Markdown
|
43
43
|
from rich.panel import Panel
|
44
44
|
from rich.syntax import Syntax
|
45
|
-
from rich.table import Table
|
45
|
+
# from rich.table import Table
|
46
46
|
from rich.text import Text
|
47
47
|
|
48
48
|
|
@@ -55,7 +55,8 @@ base_persist_dir = os.path.join(project_root, ".auto-coder", "plugins", "chat-au
|
|
55
55
|
# ".vscode", ".idea", ".hg"]
|
56
56
|
commands = [
|
57
57
|
"/add_files", "/remove_files", "/list_files", "/conf", "/coding", "/chat", "/revert", "/index/query",
|
58
|
-
"/index/build", "/exclude_dirs", "/exclude_files", "/help", "/shell", "/exit", "/mode", "/models", "/commit",
|
58
|
+
"/index/build", "/exclude_dirs", "/exclude_files", "/help", "/shell", "/exit", "/mode", "/models", "/commit",
|
59
|
+
"/rules", "/auto"
|
59
60
|
]
|
60
61
|
|
61
62
|
memory = {
|
@@ -177,599 +178,6 @@ def find_files_in_project(patterns: List[str]) -> List[str]:
|
|
177
178
|
return list(set(matched_files))
|
178
179
|
|
179
180
|
|
180
|
-
COMMANDS = {
|
181
|
-
"/add_files": {
|
182
|
-
"/group": {"/add": "", "/drop": "", "/reset": ""},
|
183
|
-
"/refresh": "",
|
184
|
-
},
|
185
|
-
"/remove_files": {"/all": ""},
|
186
|
-
"/coding": {"/apply": ""},
|
187
|
-
"/chat": {"/history": "", "/new": "", "/review": ""},
|
188
|
-
"/models": {
|
189
|
-
"/add_model": "",
|
190
|
-
"/remove": "",
|
191
|
-
"/list": "",
|
192
|
-
"/check": ""
|
193
|
-
},
|
194
|
-
"/help": {
|
195
|
-
"/add_files": "",
|
196
|
-
"/remove_files": "",
|
197
|
-
"/chat": "",
|
198
|
-
"/coding": "",
|
199
|
-
"/commit": "",
|
200
|
-
"/conf": "",
|
201
|
-
"/mode": "",
|
202
|
-
"/models": ""
|
203
|
-
},
|
204
|
-
"/exclude_files": {"/list": "", "/drop": ""},
|
205
|
-
"/exclude_dirs": {}
|
206
|
-
}
|
207
|
-
|
208
|
-
|
209
|
-
class CommandTextParser:
|
210
|
-
def __init__(self, text: str, command: str):
|
211
|
-
self.text = text
|
212
|
-
self.pos = -1
|
213
|
-
self.len = len(text)
|
214
|
-
self.is_extracted = False
|
215
|
-
self.current_word_start_pos = 0
|
216
|
-
self.current_word_end_pos = 0
|
217
|
-
self.in_current_sub_command = ""
|
218
|
-
self.completions = []
|
219
|
-
self.command = command
|
220
|
-
self.current_hiararchy = COMMANDS[command]
|
221
|
-
self.sub_commands = []
|
222
|
-
self.tags = []
|
223
|
-
|
224
|
-
def first_sub_command(self):
|
225
|
-
if len(self.sub_commands) == 0:
|
226
|
-
return None
|
227
|
-
return self.sub_commands[0]
|
228
|
-
|
229
|
-
def last_sub_command(self):
|
230
|
-
if len(self.sub_commands) == 0:
|
231
|
-
return None
|
232
|
-
return self.sub_commands[-1]
|
233
|
-
|
234
|
-
def peek(self):
|
235
|
-
if self.pos + 1 < self.len:
|
236
|
-
return self.text[self.pos + 1]
|
237
|
-
return None
|
238
|
-
|
239
|
-
def peek2(self):
|
240
|
-
if self.pos + 2 < self.len:
|
241
|
-
return self.text[self.pos + 2]
|
242
|
-
return None
|
243
|
-
|
244
|
-
def peek3(self):
|
245
|
-
if self.pos + 3 < self.len:
|
246
|
-
return self.text[self.pos + 3]
|
247
|
-
return None
|
248
|
-
|
249
|
-
def next(self):
|
250
|
-
if self.pos < self.len - 1:
|
251
|
-
self.pos += 1
|
252
|
-
char = self.text[self.pos]
|
253
|
-
return char
|
254
|
-
return None
|
255
|
-
|
256
|
-
def consume_blank(self):
|
257
|
-
while self.peek() == "\n" or self.peek() == " " or self.peek() == "\t" or self.peek() == "\r":
|
258
|
-
self.next()
|
259
|
-
|
260
|
-
def is_blank(self) -> bool:
|
261
|
-
return self.peek() == "\n" or self.peek() == " " or self.peek() == "\t" or self.peek() == "\r"
|
262
|
-
|
263
|
-
def is_sub_command(self) -> bool | None:
|
264
|
-
backup_pos = self.pos
|
265
|
-
self.consume_blank()
|
266
|
-
try:
|
267
|
-
if self.peek() == "/":
|
268
|
-
current_sub_command = ""
|
269
|
-
while self.peek() is not None and self.peek() != " " and self.peek() != "\n":
|
270
|
-
current_sub_command += self.next()
|
271
|
-
|
272
|
-
if current_sub_command.count("/") > 1:
|
273
|
-
self.pos = backup_pos
|
274
|
-
return False
|
275
|
-
return True
|
276
|
-
return False
|
277
|
-
finally:
|
278
|
-
self.pos = backup_pos
|
279
|
-
|
280
|
-
def consume_sub_command(self) -> str:
|
281
|
-
# backup_pos = self.pos
|
282
|
-
self.consume_blank()
|
283
|
-
current_sub_command = ""
|
284
|
-
while self.peek() is not None and self.peek() != " " and self.peek() != "\n":
|
285
|
-
current_sub_command += self.next()
|
286
|
-
|
287
|
-
if self.peek() is None:
|
288
|
-
self.is_extracted = True
|
289
|
-
self.current_word_end_pos = self.pos + 1
|
290
|
-
self.current_word_start_pos = self.current_word_end_pos - len(
|
291
|
-
current_sub_command
|
292
|
-
)
|
293
|
-
self.in_current_sub_command = current_sub_command
|
294
|
-
else:
|
295
|
-
if current_sub_command in self.current_hiararchy:
|
296
|
-
self.current_hiararchy = self.current_hiararchy[current_sub_command]
|
297
|
-
self.sub_commands.append(current_sub_command)
|
298
|
-
|
299
|
-
return current_sub_command
|
300
|
-
|
301
|
-
def consume_command_value(self):
|
302
|
-
current_word = ""
|
303
|
-
while self.peek() is not None:
|
304
|
-
v = self.next()
|
305
|
-
if v == " ":
|
306
|
-
current_word = ""
|
307
|
-
else:
|
308
|
-
current_word += v
|
309
|
-
self.is_extracted = True
|
310
|
-
self.current_word_end_pos = self.pos + 1
|
311
|
-
self.current_word_start_pos = self.current_word_end_pos - len(current_word)
|
312
|
-
|
313
|
-
def previous(self):
|
314
|
-
if self.pos > 1:
|
315
|
-
return self.text[self.pos - 1]
|
316
|
-
return None
|
317
|
-
|
318
|
-
def is_start_tag(self) -> bool | None:
|
319
|
-
backup_pos = self.pos
|
320
|
-
tag = ""
|
321
|
-
try:
|
322
|
-
if self.peek() == "<" and self.peek2() != "/":
|
323
|
-
while (
|
324
|
-
self.peek() is not None
|
325
|
-
and self.peek() != ">"
|
326
|
-
and not self.is_blank()
|
327
|
-
):
|
328
|
-
tag += self.next()
|
329
|
-
if self.peek() == ">":
|
330
|
-
tag += self.next()
|
331
|
-
return True
|
332
|
-
else:
|
333
|
-
return False
|
334
|
-
return False
|
335
|
-
finally:
|
336
|
-
self.pos = backup_pos
|
337
|
-
|
338
|
-
def consume_tag(self):
|
339
|
-
start_tag = ""
|
340
|
-
content = ""
|
341
|
-
end_tag = ""
|
342
|
-
|
343
|
-
# consume start tag
|
344
|
-
self.current_word_start_pos = self.pos + 1
|
345
|
-
while self.peek() is not None and self.peek() != ">" and not self.is_blank():
|
346
|
-
start_tag += self.next()
|
347
|
-
if self.peek() == ">":
|
348
|
-
start_tag += self.next()
|
349
|
-
self.current_word_end_pos = self.pos + 1
|
350
|
-
tag = Tag(start_tag=start_tag, content=content, end_tag=end_tag)
|
351
|
-
self.tags.append(tag)
|
352
|
-
|
353
|
-
# consume content
|
354
|
-
self.current_word_start_pos = self.pos + 1
|
355
|
-
while self.peek() is not None and not (
|
356
|
-
self.peek() == "<" and self.peek2() == "/"
|
357
|
-
):
|
358
|
-
content += self.next()
|
359
|
-
|
360
|
-
tag.content = content
|
361
|
-
self.current_word_end_pos = self.pos + 1
|
362
|
-
|
363
|
-
# consume end tag
|
364
|
-
self.current_word_start_pos = self.pos + 1
|
365
|
-
if self.peek() == "<" and self.peek2() == "/":
|
366
|
-
while (
|
367
|
-
self.peek() is not None and self.peek() != ">" and not self.is_blank()
|
368
|
-
):
|
369
|
-
end_tag += self.next()
|
370
|
-
if self.peek() == ">":
|
371
|
-
end_tag += self.next()
|
372
|
-
tag.end_tag = end_tag
|
373
|
-
self.current_word_end_pos = self.pos + 1
|
374
|
-
|
375
|
-
# check is finished
|
376
|
-
if self.peek() is None:
|
377
|
-
self.is_extracted = True
|
378
|
-
|
379
|
-
def consume_coding_value(self):
|
380
|
-
current_word = ""
|
381
|
-
while self.peek() is not None and not self.is_start_tag():
|
382
|
-
v = self.next()
|
383
|
-
if v == " ":
|
384
|
-
current_word = ""
|
385
|
-
else:
|
386
|
-
current_word += v
|
387
|
-
if self.peek() is None:
|
388
|
-
self.is_extracted = True
|
389
|
-
|
390
|
-
self.current_word_end_pos = self.pos + 1
|
391
|
-
self.current_word_start_pos = self.current_word_end_pos - len(current_word)
|
392
|
-
|
393
|
-
def current_word(self) -> str:
|
394
|
-
return self.text[self.current_word_start_pos: self.current_word_end_pos]
|
395
|
-
|
396
|
-
def get_current_word(self) -> str:
|
397
|
-
return self.current_word()
|
398
|
-
|
399
|
-
def get_sub_commands(self) -> list[str]:
|
400
|
-
if self.get_current_word() and not self.get_current_word().startswith("/"):
|
401
|
-
return []
|
402
|
-
|
403
|
-
if isinstance(self.current_hiararchy, str):
|
404
|
-
return []
|
405
|
-
|
406
|
-
return [item for item in list(self.current_hiararchy.keys()) if item]
|
407
|
-
|
408
|
-
def add_files(self):
|
409
|
-
"""
|
410
|
-
for exmaple:
|
411
|
-
/add_files file1 file2 file3
|
412
|
-
/add_files /group/abc/cbd /group/abc/bc2
|
413
|
-
/add_files /group1 /add xxxxx
|
414
|
-
/add_files /group
|
415
|
-
/add_files /group /add <groupname>
|
416
|
-
/add_files /group /drop <groupname>
|
417
|
-
/add_files /group <groupname>,<groupname>
|
418
|
-
/add_files /refresh
|
419
|
-
"""
|
420
|
-
while True:
|
421
|
-
if self.pos == self.len - 1:
|
422
|
-
break
|
423
|
-
elif self.is_extracted:
|
424
|
-
break
|
425
|
-
elif self.is_sub_command():
|
426
|
-
self.consume_sub_command()
|
427
|
-
else:
|
428
|
-
self.consume_command_value()
|
429
|
-
return self
|
430
|
-
|
431
|
-
def coding(self):
|
432
|
-
while True:
|
433
|
-
if self.pos == self.len - 1:
|
434
|
-
break
|
435
|
-
elif self.is_extracted:
|
436
|
-
break
|
437
|
-
elif self.is_sub_command():
|
438
|
-
self.consume_sub_command()
|
439
|
-
elif self.is_start_tag():
|
440
|
-
self.consume_tag()
|
441
|
-
else:
|
442
|
-
self.consume_coding_value()
|
443
|
-
|
444
|
-
|
445
|
-
class CommandCompleter(Completer):
|
446
|
-
def __init__(self, _commands):
|
447
|
-
self.commands = _commands
|
448
|
-
self.all_file_names = get_all_file_names_in_project()
|
449
|
-
self.all_files = get_all_file_in_project()
|
450
|
-
self.all_dir_names = get_all_dir_names_in_project()
|
451
|
-
self.all_files_with_dot = get_all_file_in_project_with_dot()
|
452
|
-
self.symbol_list = get_symbol_list()
|
453
|
-
self.current_file_names = []
|
454
|
-
|
455
|
-
def get_completions(self, document, complete_event):
|
456
|
-
text = document.text_before_cursor
|
457
|
-
words = text.split()
|
458
|
-
|
459
|
-
if len(words) > 0:
|
460
|
-
if words[0] == "/mode":
|
461
|
-
left_word = text[len("/mode"):]
|
462
|
-
for mode in ["normal", "auto_detect"]:
|
463
|
-
if mode.startswith(left_word.strip()):
|
464
|
-
yield Completion(mode, start_position=-len(left_word.strip()))
|
465
|
-
|
466
|
-
if words[0] == "/add_files":
|
467
|
-
new_text = text[len("/add_files"):]
|
468
|
-
parser = CommandTextParser(new_text, words[0])
|
469
|
-
parser.add_files()
|
470
|
-
current_word = parser.current_word()
|
471
|
-
|
472
|
-
if parser.last_sub_command() == "/refresh":
|
473
|
-
return
|
474
|
-
|
475
|
-
for command in parser.get_sub_commands():
|
476
|
-
if command.startswith(current_word):
|
477
|
-
yield Completion(command, start_position=-len(current_word))
|
478
|
-
|
479
|
-
if parser.first_sub_command() == "/group" and (
|
480
|
-
parser.last_sub_command() == "/group"
|
481
|
-
or parser.last_sub_command() == "/drop"
|
482
|
-
):
|
483
|
-
group_names = memory["current_files"]["groups"].keys()
|
484
|
-
if "," in current_word:
|
485
|
-
current_word = current_word.split(",")[-1]
|
486
|
-
|
487
|
-
for group_name in group_names:
|
488
|
-
if group_name.startswith(current_word):
|
489
|
-
yield Completion(
|
490
|
-
group_name, start_position=-len(current_word)
|
491
|
-
)
|
492
|
-
|
493
|
-
if parser.first_sub_command() != "/group":
|
494
|
-
if current_word and current_word.startswith("."):
|
495
|
-
for file_name in self.all_files_with_dot:
|
496
|
-
if file_name.startswith(current_word):
|
497
|
-
yield Completion(file_name, start_position=-len(current_word))
|
498
|
-
else:
|
499
|
-
for file_name in self.all_file_names:
|
500
|
-
if file_name.startswith(current_word):
|
501
|
-
yield Completion(file_name, start_position=-len(current_word))
|
502
|
-
for file_name in self.all_files:
|
503
|
-
if current_word and current_word in file_name:
|
504
|
-
yield Completion(file_name, start_position=-len(current_word))
|
505
|
-
|
506
|
-
elif words[0] in ["/chat", "/coding"]:
|
507
|
-
image_extensions = (
|
508
|
-
".png", ".jpg", ".jpeg", ".gif", ".bmp", ".tiff", ".tif", ".webp", ".svg", ".ico",
|
509
|
-
".heic", ".heif", ".raw", ".cr2", ".nef", ".arw", ".dng", ".orf", ".rw2", ".pef",
|
510
|
-
".srw", ".eps", ".ai", ".psd", ".xcf",
|
511
|
-
)
|
512
|
-
new_text = text[len(words[0]):]
|
513
|
-
parser = CommandTextParser(new_text, words[0])
|
514
|
-
|
515
|
-
parser.coding()
|
516
|
-
current_word = parser.current_word()
|
517
|
-
|
518
|
-
if len(new_text.strip()) == 0 or new_text.strip() == "/":
|
519
|
-
for command in parser.get_sub_commands():
|
520
|
-
if command.startswith(current_word):
|
521
|
-
yield Completion(command, start_position=-len(current_word))
|
522
|
-
|
523
|
-
all_tags = parser.tags
|
524
|
-
|
525
|
-
if current_word.startswith("@"):
|
526
|
-
name = current_word[1:]
|
527
|
-
target_set = set()
|
528
|
-
|
529
|
-
for file_name in self.current_file_names:
|
530
|
-
base_file_name = os.path.basename(file_name)
|
531
|
-
if name in base_file_name:
|
532
|
-
target_set.add(base_file_name)
|
533
|
-
path_parts = file_name.split(os.sep)
|
534
|
-
display_name = (
|
535
|
-
os.sep.join(path_parts[-3:])
|
536
|
-
if len(path_parts) > 3
|
537
|
-
else file_name
|
538
|
-
)
|
539
|
-
relative_path = os.path.relpath(
|
540
|
-
file_name, project_root)
|
541
|
-
yield Completion(
|
542
|
-
relative_path,
|
543
|
-
start_position=-len(name),
|
544
|
-
display=f"{display_name} (in active files)",
|
545
|
-
)
|
546
|
-
|
547
|
-
for file_name in self.all_file_names:
|
548
|
-
if file_name.startswith(name) and file_name not in target_set:
|
549
|
-
target_set.add(file_name)
|
550
|
-
|
551
|
-
path_parts = file_name.split(os.sep)
|
552
|
-
display_name = (
|
553
|
-
os.sep.join(path_parts[-3:])
|
554
|
-
if len(path_parts) > 3
|
555
|
-
else file_name
|
556
|
-
)
|
557
|
-
relative_path = os.path.relpath(
|
558
|
-
file_name, project_root)
|
559
|
-
|
560
|
-
yield Completion(
|
561
|
-
relative_path,
|
562
|
-
start_position=-len(name),
|
563
|
-
display=f"{display_name}",
|
564
|
-
)
|
565
|
-
|
566
|
-
for file_name in self.all_files:
|
567
|
-
if name in file_name and file_name not in target_set:
|
568
|
-
path_parts = file_name.split(os.sep)
|
569
|
-
display_name = (
|
570
|
-
os.sep.join(path_parts[-3:])
|
571
|
-
if len(path_parts) > 3
|
572
|
-
else file_name
|
573
|
-
)
|
574
|
-
relative_path = os.path.relpath(
|
575
|
-
file_name, project_root)
|
576
|
-
yield Completion(
|
577
|
-
relative_path,
|
578
|
-
start_position=-len(name),
|
579
|
-
display=f"{display_name}",
|
580
|
-
)
|
581
|
-
|
582
|
-
if current_word.startswith("@@"):
|
583
|
-
name = current_word[2:]
|
584
|
-
for symbol in self.symbol_list:
|
585
|
-
if name in symbol.symbol_name:
|
586
|
-
file_name = symbol.file_name
|
587
|
-
path_parts = file_name.split(os.sep)
|
588
|
-
display_name = (
|
589
|
-
os.sep.join(path_parts[-3:])
|
590
|
-
if len(path_parts) > 3
|
591
|
-
else symbol.symbol_name
|
592
|
-
)
|
593
|
-
relative_path = os.path.relpath(
|
594
|
-
file_name, project_root)
|
595
|
-
yield Completion(
|
596
|
-
f"{symbol.symbol_name}(location: {relative_path})",
|
597
|
-
start_position=-len(name),
|
598
|
-
display=f"{symbol.symbol_name} ({display_name}/{symbol.symbol_type})",
|
599
|
-
)
|
600
|
-
|
601
|
-
tags = [tag for tag in parser.tags]
|
602
|
-
|
603
|
-
if current_word.startswith("<"):
|
604
|
-
name = current_word[1:]
|
605
|
-
for tag in ["<img>", "</img>"]:
|
606
|
-
if all_tags and all_tags[-1].start_tag == "<img>":
|
607
|
-
if tag.startswith(name):
|
608
|
-
yield Completion(
|
609
|
-
"</img>", start_position=-len(current_word)
|
610
|
-
)
|
611
|
-
elif tag.startswith(name):
|
612
|
-
yield Completion(tag, start_position=-len(current_word))
|
613
|
-
|
614
|
-
if tags and tags[-1].start_tag == "<img>" and tags[-1].end_tag == "":
|
615
|
-
raw_file_name = tags[0].content
|
616
|
-
file_name = raw_file_name.strip()
|
617
|
-
parent_dir = os.path.dirname(file_name)
|
618
|
-
file_basename = os.path.basename(file_name)
|
619
|
-
search_dir = parent_dir if parent_dir else "."
|
620
|
-
for root, dirs, files in os.walk(search_dir):
|
621
|
-
# 只处理直接子目录
|
622
|
-
if root != search_dir:
|
623
|
-
continue
|
624
|
-
|
625
|
-
# 补全子目录
|
626
|
-
for _dir in dirs:
|
627
|
-
full_path = os.path.join(root, _dir)
|
628
|
-
if full_path.startswith(file_name):
|
629
|
-
relative_path = os.path.relpath(full_path, search_dir)
|
630
|
-
yield Completion(relative_path, start_position=-len(file_basename))
|
631
|
-
|
632
|
-
# 补全文件
|
633
|
-
for file in files:
|
634
|
-
if file.lower().endswith(
|
635
|
-
image_extensions
|
636
|
-
) and file.startswith(file_basename):
|
637
|
-
full_path = os.path.join(root, file)
|
638
|
-
relative_path = os.path.relpath(full_path, search_dir)
|
639
|
-
yield Completion(
|
640
|
-
relative_path,
|
641
|
-
start_position=-len(file_basename),
|
642
|
-
)
|
643
|
-
|
644
|
-
# 只处理一层子目录,然后退出循环
|
645
|
-
break
|
646
|
-
|
647
|
-
elif words[0] == "/remove_files":
|
648
|
-
new_words = text[len("/remove_files"):].strip().split(",")
|
649
|
-
|
650
|
-
is_at_space = text[-1] == " "
|
651
|
-
last_word = new_words[-2] if len(new_words) > 1 else ""
|
652
|
-
current_word = new_words[-1] if new_words else ""
|
653
|
-
|
654
|
-
if is_at_space:
|
655
|
-
last_word = current_word
|
656
|
-
current_word = ""
|
657
|
-
|
658
|
-
# /remove_files /all [cursor] or /remove_files /all p[cursor]
|
659
|
-
if not last_word and not current_word:
|
660
|
-
if "/all".startswith(current_word):
|
661
|
-
yield Completion("/all", start_position=-len(current_word))
|
662
|
-
for file_name in self.current_file_names:
|
663
|
-
yield Completion(file_name, start_position=-len(current_word))
|
664
|
-
|
665
|
-
# /remove_files /a[cursor] or /remove_files p[cursor]
|
666
|
-
if current_word:
|
667
|
-
if "/all".startswith(current_word):
|
668
|
-
yield Completion("/all", start_position=-len(current_word))
|
669
|
-
for file_name in self.current_file_names:
|
670
|
-
if current_word and current_word in file_name:
|
671
|
-
yield Completion(
|
672
|
-
file_name, start_position=-len(current_word)
|
673
|
-
)
|
674
|
-
|
675
|
-
elif words[0] == "/exclude_dirs":
|
676
|
-
new_words = text[len("/exclude_dirs"):].strip().split(",")
|
677
|
-
current_word = new_words[-1]
|
678
|
-
|
679
|
-
for file_name in self.all_dir_names:
|
680
|
-
if current_word and current_word in file_name:
|
681
|
-
yield Completion(file_name, start_position=-len(current_word))
|
682
|
-
|
683
|
-
elif words[0] == "/exclude_files":
|
684
|
-
new_text = text[len("/exclude_files"):]
|
685
|
-
parser = CommandTextParser(new_text, words[0])
|
686
|
-
parser.add_files()
|
687
|
-
current_word = parser.current_word()
|
688
|
-
for command in parser.get_sub_commands():
|
689
|
-
if command.startswith(current_word):
|
690
|
-
yield Completion(command, start_position=-len(current_word))
|
691
|
-
|
692
|
-
elif words[0] == "/models":
|
693
|
-
new_text = text[len("/models"):]
|
694
|
-
parser = CommandTextParser(new_text, words[0])
|
695
|
-
parser.add_files()
|
696
|
-
current_word = parser.current_word()
|
697
|
-
for command in parser.get_sub_commands():
|
698
|
-
if command.startswith(current_word):
|
699
|
-
yield Completion(command, start_position=-len(current_word))
|
700
|
-
|
701
|
-
elif words[0] == "/help":
|
702
|
-
new_text = text[len("/help"):]
|
703
|
-
parser = CommandTextParser(new_text, words[0])
|
704
|
-
parser.add_files()
|
705
|
-
current_word = parser.current_word()
|
706
|
-
for command in parser.get_sub_commands():
|
707
|
-
if command.startswith(current_word):
|
708
|
-
yield Completion(command, start_position=-len(current_word))
|
709
|
-
|
710
|
-
elif words[0] == "/conf":
|
711
|
-
new_words = text[len("/conf"):].strip().split()
|
712
|
-
is_at_space = text[-1] == " "
|
713
|
-
last_word = new_words[-2] if len(new_words) > 1 else ""
|
714
|
-
current_word = new_words[-1] if new_words else ""
|
715
|
-
completions = []
|
716
|
-
|
717
|
-
if is_at_space:
|
718
|
-
last_word = current_word
|
719
|
-
current_word = ""
|
720
|
-
|
721
|
-
# /conf /drop [curor] or /conf /drop p[cursor]
|
722
|
-
if last_word == "/drop":
|
723
|
-
completions = [
|
724
|
-
field_name
|
725
|
-
for field_name in memory["conf"].keys()
|
726
|
-
if field_name.startswith(current_word)
|
727
|
-
]
|
728
|
-
# /conf [curosr]
|
729
|
-
elif not last_word and not current_word:
|
730
|
-
completions = [
|
731
|
-
"/drop"] if "/drop".startswith(current_word) else []
|
732
|
-
completions += [
|
733
|
-
field_name + ":"
|
734
|
-
for field_name in AutoCoderArgs.model_fields.keys()
|
735
|
-
if field_name.startswith(current_word)
|
736
|
-
]
|
737
|
-
# /conf p[cursor]
|
738
|
-
elif not last_word and current_word:
|
739
|
-
completions = [
|
740
|
-
"/drop"] if "/drop".startswith(current_word) else []
|
741
|
-
completions += [
|
742
|
-
field_name + ":"
|
743
|
-
for field_name in AutoCoderArgs.model_fields.keys()
|
744
|
-
if field_name.startswith(current_word)
|
745
|
-
]
|
746
|
-
|
747
|
-
for completion in completions:
|
748
|
-
yield Completion(completion, start_position=-len(current_word))
|
749
|
-
|
750
|
-
else:
|
751
|
-
for command in self.commands:
|
752
|
-
if command.startswith(text):
|
753
|
-
yield Completion(command, start_position=-len(text))
|
754
|
-
else:
|
755
|
-
for command in self.commands:
|
756
|
-
if command.startswith(text):
|
757
|
-
yield Completion(command, start_position=-len(text))
|
758
|
-
|
759
|
-
def update_current_files(self, files):
|
760
|
-
self.current_file_names = [f for f in files]
|
761
|
-
|
762
|
-
def refresh_files(self):
|
763
|
-
self.all_file_names = get_all_file_names_in_project()
|
764
|
-
self.all_files = get_all_file_in_project()
|
765
|
-
self.all_dir_names = get_all_dir_names_in_project()
|
766
|
-
self.all_files_with_dot = get_all_file_in_project_with_dot()
|
767
|
-
self.symbol_list = get_symbol_list()
|
768
|
-
|
769
|
-
|
770
|
-
completer = CommandCompleter(commands)
|
771
|
-
|
772
|
-
|
773
181
|
def save_memory():
|
774
182
|
with open(os.path.join(base_persist_dir, "nano-memory.json"), "w") as fp:
|
775
183
|
json_str = json.dumps(memory, indent=2, ensure_ascii=False)
|
@@ -783,7 +191,28 @@ def load_memory():
|
|
783
191
|
if os.path.exists(memory_path):
|
784
192
|
with open(memory_path, "r") as f:
|
785
193
|
memory = json.load(f)
|
786
|
-
|
194
|
+
|
195
|
+
|
196
|
+
def get_memory():
|
197
|
+
load_memory()
|
198
|
+
return memory
|
199
|
+
|
200
|
+
|
201
|
+
completer = CommandCompleter(
|
202
|
+
commands=commands,
|
203
|
+
file_system_model=FileSystemModel(
|
204
|
+
project_root=project_root,
|
205
|
+
get_all_file_names_in_project=get_all_file_names_in_project,
|
206
|
+
get_all_file_in_project=get_all_file_in_project,
|
207
|
+
get_all_dir_names_in_project=get_all_dir_names_in_project,
|
208
|
+
get_all_file_in_project_with_dot=get_all_file_in_project_with_dot,
|
209
|
+
get_symbol_list=get_symbol_list
|
210
|
+
),
|
211
|
+
memory_model=MemoryConfig(
|
212
|
+
get_memory_func=get_memory,
|
213
|
+
save_memory_func=save_memory
|
214
|
+
)
|
215
|
+
)
|
787
216
|
|
788
217
|
|
789
218
|
def exclude_dirs(dir_names: List[str]):
|
@@ -929,22 +358,6 @@ def index_import(import_path: str):
|
|
929
358
|
return False
|
930
359
|
|
931
360
|
|
932
|
-
def wrap_text_in_table(data, max_width=60):
|
933
|
-
"""
|
934
|
-
Wraps text in each cell of the table to a specified width.
|
935
|
-
|
936
|
-
:param data: A list of lists, where each inner list represents a row in the table.
|
937
|
-
:param max_width: The maximum width of text in each cell.
|
938
|
-
:return: A new table data with wrapped text.
|
939
|
-
"""
|
940
|
-
wrapped_data = []
|
941
|
-
for row in data:
|
942
|
-
wrapped_row = [textwrap.fill(str(cell), width=max_width) for cell in row]
|
943
|
-
wrapped_data.append(wrapped_row)
|
944
|
-
|
945
|
-
return wrapped_data
|
946
|
-
|
947
|
-
|
948
361
|
def index_query_command(query: str, llm: AutoLLM):
|
949
362
|
update_config_to_args(query=query, delete_execute_file=True)
|
950
363
|
|
@@ -1259,6 +672,7 @@ def init_project():
|
|
1259
672
|
return
|
1260
673
|
os.makedirs(os.path.join(args.source_dir, "actions"), exist_ok=True)
|
1261
674
|
os.makedirs(os.path.join(args.source_dir, ".auto-coder"), exist_ok=True)
|
675
|
+
os.makedirs(os.path.join(args.source_dir, ".auto-coder", "autocoderrules"), exist_ok=True)
|
1262
676
|
source_dir = os.path.abspath(args.source_dir)
|
1263
677
|
create_actions(
|
1264
678
|
source_dir=source_dir,
|
@@ -1307,7 +721,7 @@ def load_include_files(config, base_path, max_depth=10, current_depth=0):
|
|
1307
721
|
|
1308
722
|
for include_file in include_files:
|
1309
723
|
abs_include_path = resolve_include_path(base_path, include_file)
|
1310
|
-
printer.print_text(f"正在加载 Include file: {abs_include_path}", style="green")
|
724
|
+
# printer.print_text(f"正在加载 Include file: {abs_include_path}", style="green")
|
1311
725
|
with open(abs_include_path, "r") as f:
|
1312
726
|
include_config = yaml.safe_load(f)
|
1313
727
|
if not include_config:
|
@@ -1369,14 +783,9 @@ def coding(query: str, llm: AutoLLM):
|
|
1369
783
|
|
1370
784
|
memory["conversation"].append({"role": "user", "content": query})
|
1371
785
|
conf = memory.get("conf", {})
|
1372
|
-
|
1373
786
|
current_files = memory["current_files"]["files"]
|
1374
|
-
current_groups = memory["current_files"].get("current_groups", [])
|
1375
|
-
groups = memory["current_files"].get("groups", {})
|
1376
|
-
groups_info = memory["current_files"].get("groups_info", {})
|
1377
787
|
|
1378
788
|
prepare_chat_yaml() # 复制上一个序号的 yaml 文件, 生成一个新的聊天 yaml 文件
|
1379
|
-
|
1380
789
|
latest_yaml_file = get_last_yaml_file(os.path.join(args.source_dir, "actions"))
|
1381
790
|
|
1382
791
|
if latest_yaml_file:
|
@@ -1398,19 +807,6 @@ def coding(query: str, llm: AutoLLM):
|
|
1398
807
|
yaml_config["urls"] = current_files
|
1399
808
|
yaml_config["query"] = query
|
1400
809
|
|
1401
|
-
if current_groups:
|
1402
|
-
active_groups_context = "下面是对上面文件按分组给到的一些描述,当用户的需求正好匹配描述的时候,参考描述来做修改:\n"
|
1403
|
-
for group in current_groups:
|
1404
|
-
group_files = groups.get(group, [])
|
1405
|
-
query_prefix = groups_info.get(group, {}).get("query_prefix", "")
|
1406
|
-
active_groups_context += f"组名: {group}\n"
|
1407
|
-
active_groups_context += f"文件列表:\n"
|
1408
|
-
for file in group_files:
|
1409
|
-
active_groups_context += f"- {file}\n"
|
1410
|
-
active_groups_context += f"组描述: {query_prefix}\n\n"
|
1411
|
-
|
1412
|
-
yaml_config["context"] = active_groups_context + "\n"
|
1413
|
-
|
1414
810
|
if is_apply:
|
1415
811
|
memory_dir = os.path.join(args.source_dir, ".auto-coder", "memory")
|
1416
812
|
os.makedirs(memory_dir, exist_ok=True)
|
@@ -1441,6 +837,19 @@ def coding(query: str, llm: AutoLLM):
|
|
1441
837
|
yaml_config["context"] += f"你: {conv['content']}\n"
|
1442
838
|
yaml_config["context"] += "</history>\n"
|
1443
839
|
|
840
|
+
if args.enable_rules:
|
841
|
+
rules_dir_path = os.path.join(project_root, ".auto-coder", "autocoderrules")
|
842
|
+
printer.print_text("已开启 Rules 模式", style="green")
|
843
|
+
yaml_config["context"] += f"下面是我们对代码进行深入分析,提取具有通用价值的功能模式和设计模式,可在其他需求中复用的Rules\n"
|
844
|
+
yaml_config["context"] += "你在编写代码时可以参考以下Rules\n"
|
845
|
+
yaml_config["context"] += "<rules>\n"
|
846
|
+
for rules_name in os.listdir(rules_dir_path):
|
847
|
+
printer.print_text(f"正在加载 Rules:{rules_name}", style="green")
|
848
|
+
rules_file_path = os.path.join(rules_dir_path, rules_name)
|
849
|
+
with open(rules_file_path, "r") as fp:
|
850
|
+
yaml_config["context"] += f"{fp.read()}\n"
|
851
|
+
yaml_config["context"] += "</rules>\n"
|
852
|
+
|
1444
853
|
yaml_config["file"] = latest_yaml_file
|
1445
854
|
yaml_content = convert_yaml_config_to_str(yaml_config=yaml_config)
|
1446
855
|
execute_file = os.path.join(args.source_dir, "actions", latest_yaml_file)
|
@@ -1572,6 +981,20 @@ def commit_info(query: str, llm: AutoLLM):
|
|
1572
981
|
os.remove(execute_file)
|
1573
982
|
|
1574
983
|
|
984
|
+
def agentic_edit(query: str, llm: AutoLLM):
|
985
|
+
update_config_to_args(query=query, delete_execute_file=True)
|
986
|
+
|
987
|
+
sources = SourceCodeList([])
|
988
|
+
agentic_editor = AgenticEdit(
|
989
|
+
args=args, llm=llm, files=sources, history_conversation=[]
|
990
|
+
)
|
991
|
+
|
992
|
+
query = query.strip()
|
993
|
+
request = AgenticEditRequest(user_input=query)
|
994
|
+
|
995
|
+
agentic_editor.run_in_terminal(request)
|
996
|
+
|
997
|
+
|
1575
998
|
@prompt()
|
1576
999
|
def _generate_shell_script(user_input: str) -> str:
|
1577
1000
|
"""
|
@@ -2224,71 +1647,100 @@ def configure_project_model():
|
|
2224
1647
|
)
|
2225
1648
|
|
2226
1649
|
|
2227
|
-
|
2228
|
-
|
2229
|
-
|
2230
|
-
|
2231
|
-
|
2232
|
-
|
2233
|
-
|
2234
|
-
|
2235
|
-
|
2236
|
-
|
2237
|
-
|
2238
|
-
|
2239
|
-
|
2240
|
-
|
2241
|
-
|
2242
|
-
|
2243
|
-
|
2244
|
-
|
2245
|
-
|
2246
|
-
|
2247
|
-
|
2248
|
-
|
2249
|
-
|
2250
|
-
|
2251
|
-
|
2252
|
-
|
2253
|
-
|
2254
|
-
|
2255
|
-
#
|
2256
|
-
|
2257
|
-
|
2258
|
-
|
2259
|
-
|
2260
|
-
|
2261
|
-
|
2262
|
-
|
2263
|
-
|
2264
|
-
|
2265
|
-
|
2266
|
-
|
2267
|
-
|
2268
|
-
|
2269
|
-
|
2270
|
-
|
2271
|
-
|
2272
|
-
|
2273
|
-
|
2274
|
-
|
2275
|
-
|
2276
|
-
|
2277
|
-
|
2278
|
-
|
1650
|
+
def rules(query_args: List[str], llm: AutoLLM):
|
1651
|
+
"""
|
1652
|
+
/rules 命令帮助:
|
1653
|
+
/rules /list - 列出规则文件
|
1654
|
+
/rules /show - 查看规则文件内容
|
1655
|
+
/rules /remove - 删除规则文件
|
1656
|
+
/rules /analyze - 分析当前文件,可选提供查询内容
|
1657
|
+
/rules /commit <提交ID> - 分析特定提交,必须提供提交ID和查询内容
|
1658
|
+
"""
|
1659
|
+
update_config_to_args(query="", delete_execute_file=True)
|
1660
|
+
rules_dir_path = os.path.join(project_root, ".auto-coder", "autocoderrules")
|
1661
|
+
if query_args[0] == "/list":
|
1662
|
+
printer.print_table_compact(
|
1663
|
+
data=[[rules_name] for rules_name in os.listdir(rules_dir_path)],
|
1664
|
+
title="Rules 列表",
|
1665
|
+
headers=["Rules 文件"],
|
1666
|
+
center=True
|
1667
|
+
)
|
1668
|
+
|
1669
|
+
if query_args[0] == "/remove":
|
1670
|
+
remove_rules_name = query_args[1].strip()
|
1671
|
+
remove_rules_path = os.path.join(rules_dir_path, remove_rules_name)
|
1672
|
+
if os.path.exists(remove_rules_path):
|
1673
|
+
os.remove(remove_rules_path)
|
1674
|
+
printer.print_text(f"Rules 文件[{remove_rules_name}]移除成功", style="green")
|
1675
|
+
else:
|
1676
|
+
printer.print_text(f"Rules 文件[{remove_rules_name}]不存在", style="yellow")
|
1677
|
+
|
1678
|
+
if query_args[0] == "/show": # /rules /show 参数检查
|
1679
|
+
show_rules_name = query_args[1].strip()
|
1680
|
+
show_rules_path = os.path.join(rules_dir_path, show_rules_name)
|
1681
|
+
if os.path.exists(show_rules_path):
|
1682
|
+
with open(show_rules_path, "r") as fp:
|
1683
|
+
printer.print_markdown(text=fp.read(), panel=True)
|
1684
|
+
else:
|
1685
|
+
printer.print_text(f"Rules 文件[{show_rules_name}]不存在", style="yellow")
|
1686
|
+
|
1687
|
+
if query_args[0] == "/commit":
|
1688
|
+
commit_id = query_args[1].strip()
|
1689
|
+
auto_learn = AutoRulesLearn(llm=llm, args=args)
|
1690
|
+
|
1691
|
+
try:
|
1692
|
+
result = auto_learn.analyze_commit_changes(commit_id=commit_id, conversations=[])
|
1693
|
+
rules_file = os.path.join(rules_dir_path, f"rules-commit-{uuid.uuid4()}.md")
|
1694
|
+
with open(rules_file, "w", encoding="utf-8") as f:
|
1695
|
+
f.write(result)
|
1696
|
+
printer.print_text(f"代码变更[{commit_id}]生成 Rules 成功", style="green")
|
1697
|
+
except Exception as e:
|
1698
|
+
printer.print_text(f"代码变更[{commit_id}]生成 Rules 失败: {e}", style="red")
|
1699
|
+
|
1700
|
+
if query_args[0] == "/analyze":
|
1701
|
+
auto_learn = AutoRulesLearn(llm=llm, args=args)
|
1702
|
+
|
1703
|
+
files = memory.get("current_files", {}).get("files", [])
|
1704
|
+
if not files:
|
1705
|
+
printer.print_text("当前无活跃文件用于生成 Rules", style="yellow")
|
1706
|
+
return
|
1707
|
+
|
1708
|
+
sources = SourceCodeList([])
|
1709
|
+
for file in files:
|
1710
|
+
try:
|
1711
|
+
with open(file, "r", encoding="utf-8") as f:
|
1712
|
+
source_code = f.read()
|
1713
|
+
sources.sources.append(SourceCode(module_name=file, source_code=source_code))
|
1714
|
+
except Exception as e:
|
1715
|
+
printer.print_text(f"读取文件生成 Rules 失败: {e}", style="yellow")
|
1716
|
+
continue
|
1717
|
+
|
1718
|
+
try:
|
1719
|
+
result = auto_learn.analyze_modules(sources=sources, conversations=[])
|
1720
|
+
rules_file = os.path.join(rules_dir_path, f"rules-modules-{uuid.uuid4()}.md")
|
1721
|
+
with open(rules_file, "w", encoding="utf-8") as f:
|
1722
|
+
f.write(result)
|
1723
|
+
printer.print_text(f"活跃文件[Files:{len(files)}]生成 Rules 成功", style="green")
|
1724
|
+
except Exception as e:
|
1725
|
+
printer.print_text(f"活跃文件生成 Rules 失败: {e}", style="red")
|
1726
|
+
|
1727
|
+
completer.refresh_files()
|
2279
1728
|
|
2280
1729
|
|
2281
1730
|
def is_old_version():
|
2282
|
-
""
|
2283
|
-
|
2284
|
-
不再使用 current_chat_model 和 current_chat_model
|
2285
|
-
"""
|
1731
|
+
# "0.1.26" 开始使用兼容 AutoCoder 的 chat_model, code_model 参数
|
1732
|
+
# 不再使用 current_chat_model 和 current_chat_model
|
2286
1733
|
if 'current_chat_model' in memory['conf'] and 'current_code_model' in memory['conf']:
|
2287
|
-
printer.print_text(f"
|
1734
|
+
printer.print_text(f"0.1.26 新增 chat_model, code_model 参数, 正在进行配置兼容性处理", style="yellow")
|
2288
1735
|
memory['conf']['chat_model'] = memory['conf']['current_chat_model']
|
2289
1736
|
memory['conf']['code_model'] = memory['conf']['current_code_model']
|
2290
1737
|
del memory['conf']['current_chat_model']
|
2291
1738
|
del memory['conf']['current_code_model']
|
1739
|
+
# "0.1.31" 在 .auto-coder 目录中新增 autocoderrules 目录
|
1740
|
+
rules_dir_path = os.path.join(project_root, ".auto-coder", "autocoderrules")
|
1741
|
+
if not os.path.exists(rules_dir_path):
|
1742
|
+
printer.print_text(f"0.1.31 .auto-coder 目录中新增 autocoderrules 目录, 正在进行配置兼容性处理", style="yellow")
|
1743
|
+
os.makedirs(rules_dir_path, exist_ok=True)
|
2292
1744
|
|
2293
1745
|
|
2294
1746
|
def main():
|
@@ -2301,6 +1753,7 @@ def main():
|
|
2301
1753
|
|
2302
1754
|
load_memory()
|
2303
1755
|
is_old_version()
|
1756
|
+
completer.update_current_files(memory["current_files"]["files"])
|
2304
1757
|
|
2305
1758
|
if len(memory["models"]) == 0:
|
2306
1759
|
_model_pass = input(f" 是否跳过模型配置(y/n): ").strip().lower()
|
@@ -2453,6 +1906,12 @@ def main():
|
|
2453
1906
|
elif user_input.startswith("/commit"):
|
2454
1907
|
query = user_input[len("/commit"):].strip()
|
2455
1908
|
commit_info(query, auto_llm)
|
1909
|
+
elif user_input.startswith("/rules"):
|
1910
|
+
query_args = user_input[len("/rules"):].strip().split()
|
1911
|
+
if not query_args:
|
1912
|
+
printer.print_text("Please enter your request.", style="yellow")
|
1913
|
+
continue
|
1914
|
+
rules(query_args=query_args, llm=auto_llm)
|
2456
1915
|
elif user_input.startswith("/help"):
|
2457
1916
|
query = user_input[len("/help"):].strip()
|
2458
1917
|
show_help(query)
|
@@ -2461,15 +1920,15 @@ def main():
|
|
2461
1920
|
elif user_input.startswith("/coding"):
|
2462
1921
|
query = user_input[len("/coding"):].strip()
|
2463
1922
|
if not query:
|
2464
|
-
|
1923
|
+
printer.print_text("Please enter your request.", style="yellow")
|
2465
1924
|
continue
|
2466
1925
|
coding(query=query, llm=auto_llm)
|
2467
|
-
|
2468
|
-
|
2469
|
-
|
2470
|
-
|
2471
|
-
|
2472
|
-
|
1926
|
+
elif user_input.startswith("/auto"):
|
1927
|
+
query = user_input[len("/auto"):].strip()
|
1928
|
+
if not query:
|
1929
|
+
print("\033[91mPlease enter your request.\033[0m")
|
1930
|
+
continue
|
1931
|
+
agentic_edit(query=query, llm=auto_llm)
|
2473
1932
|
elif user_input.startswith("/chat"):
|
2474
1933
|
query = user_input[len("/chat"):].strip()
|
2475
1934
|
if not query:
|